Skip to content

Commit f2a6ceb

Browse files
committed
Disable litellm telemetry across multiple modules for improved performance and consistency
- Added environment variable to disable litellm telemetry in __init__.py, llm.py, and memory.py. - Ensured telemetry is disabled after importing litellm to prevent unnecessary data collection. This change enhances control over telemetry features while maintaining existing functionality.
1 parent 2c4af80 commit f2a6ceb

File tree

5 files changed

+118
-0
lines changed

5 files changed

+118
-0
lines changed

src/praisonai-agents/praisonaiagents/llm/__init__.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,9 @@
11
import logging
22
import warnings
3+
import os
4+
5+
# Disable litellm telemetry before any imports
6+
os.environ["LITELLM_TELEMETRY"] = "False"
37

48
# Suppress all relevant logs at module level
59
logging.getLogger("litellm").setLevel(logging.ERROR)
@@ -17,4 +21,11 @@
1721
# Import after suppressing warnings
1822
from .llm import LLM, LLMContextLengthExceededException
1923

24+
# Ensure telemetry is disabled after import as well
25+
try:
26+
import litellm
27+
litellm.telemetry = False
28+
except ImportError:
29+
pass
30+
2031
__all__ = ["LLM", "LLMContextLengthExceededException"]

src/praisonai-agents/praisonaiagents/llm/llm.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@
1717
from rich.console import Console
1818
from rich.live import Live
1919

20+
# Disable litellm telemetry before any imports
21+
os.environ["LITELLM_TELEMETRY"] = "False"
22+
2023
# TODO: Include in-build tool calling in LLM class
2124
# TODO: Restructure so that duplicate calls are not made (Sync with agent.py)
2225
class LLMContextLengthExceededException(Exception):
@@ -108,6 +111,9 @@ def __init__(
108111
):
109112
try:
110113
import litellm
114+
# Disable telemetry
115+
litellm.telemetry = False
116+
111117
# Set litellm options globally
112118
litellm.set_verbose = False
113119
litellm.success_callback = []

src/praisonai-agents/praisonaiagents/memory/memory.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,9 @@
66
from typing import Any, Dict, List, Optional, Union, Literal
77
import logging
88

9+
# Disable litellm telemetry before any imports
10+
os.environ["LITELLM_TELEMETRY"] = "False"
11+
912
# Set up logger
1013
logger = logging.getLogger(__name__)
1114

@@ -31,6 +34,7 @@
3134

3235
try:
3336
import litellm
37+
litellm.telemetry = False # Disable telemetry
3438
LITELLM_AVAILABLE = True
3539
except ImportError:
3640
LITELLM_AVAILABLE = False
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
#!/usr/bin/env python3
2+
"""Test script to identify litellm telemetry behavior"""
3+
4+
import os
5+
import sys
6+
7+
# Disable litellm telemetry before importing
8+
os.environ["LITELLM_TELEMETRY"] = "False"
9+
os.environ["LITELLM_LOG"] = "ERROR"
10+
11+
print("Environment variables set:")
12+
print(f"LITELLM_TELEMETRY: {os.environ.get('LITELLM_TELEMETRY')}")
13+
print(f"LITELLM_LOG: {os.environ.get('LITELLM_LOG')}")
14+
15+
print("\nImporting litellm...")
16+
import litellm
17+
18+
print(f"\nChecking litellm telemetry status:")
19+
print(f"Has telemetry attribute: {hasattr(litellm, 'telemetry')}")
20+
if hasattr(litellm, 'telemetry'):
21+
print(f"Telemetry value: {litellm.telemetry}")
22+
23+
# Try to disable telemetry programmatically
24+
if hasattr(litellm, 'telemetry'):
25+
litellm.telemetry = False
26+
print(f"\nTelemetry disabled programmatically")
27+
28+
# Check callbacks
29+
print(f"\nCallbacks: {litellm.callbacks}")
30+
print(f"Success callbacks: {litellm.success_callback}")
31+
print(f"Async success callbacks: {litellm._async_success_callback}")
32+
33+
# Test a simple completion
34+
print("\n\nTesting completion (this might trigger telemetry)...")
35+
try:
36+
response = litellm.completion(
37+
model="gpt-3.5-turbo",
38+
messages=[{"role": "user", "content": "Say hi"}],
39+
mock_response="Hi there!"
40+
)
41+
print("Completion successful")
42+
except Exception as e:
43+
print(f"Error: {e}")
44+
45+
print("\nDone. Check if any network requests were made to BerriAI/litellm")
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
#!/usr/bin/env python3
2+
"""Test to verify litellm telemetry is properly disabled"""
3+
4+
import sys
5+
import os
6+
7+
print("Testing litellm telemetry is disabled...")
8+
9+
# Test 1: Check environment variable is set
10+
print(f"\n1. Environment variable LITELLM_TELEMETRY: {os.environ.get('LITELLM_TELEMETRY', 'NOT SET')}")
11+
12+
# Test 2: Import praisonaiagents and check if telemetry is disabled
13+
try:
14+
from praisonaiagents.llm import LLM
15+
print("2. Successfully imported LLM from praisonaiagents")
16+
17+
# Check if litellm was imported and telemetry is disabled
18+
import litellm
19+
print(f"3. litellm.telemetry = {litellm.telemetry}")
20+
21+
# Test 3: Create an LLM instance
22+
llm = LLM(model="gpt-3.5-turbo")
23+
print("4. Successfully created LLM instance")
24+
25+
# Check telemetry again after instance creation
26+
print(f"5. After LLM creation, litellm.telemetry = {litellm.telemetry}")
27+
28+
# Test 4: Try a mock completion
29+
print("\n6. Testing mock completion...")
30+
response = litellm.completion(
31+
model="gpt-3.5-turbo",
32+
messages=[{"role": "user", "content": "test"}],
33+
mock_response="test response"
34+
)
35+
print(" Mock completion successful")
36+
37+
# Final check
38+
print(f"\n7. Final check: litellm.telemetry = {litellm.telemetry}")
39+
40+
if litellm.telemetry == False:
41+
print("\n✅ SUCCESS: Telemetry is properly disabled!")
42+
else:
43+
print("\n❌ FAILURE: Telemetry is still enabled!")
44+
sys.exit(1)
45+
46+
except Exception as e:
47+
print(f"\n❌ ERROR: {e}")
48+
import traceback
49+
traceback.print_exc()
50+
sys.exit(1)
51+
52+
print("\nAll tests passed! Telemetry should be disabled.")

0 commit comments

Comments
 (0)