mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
adding telemetry to litellm
This commit is contained in:
parent
4323b27c16
commit
11f9950f1e
8 changed files with 90 additions and 43 deletions
|
@ -1,7 +1,7 @@
|
||||||
success_callback = []
|
success_callback = []
|
||||||
failure_callback = []
|
failure_callback = []
|
||||||
set_verbose=False
|
set_verbose=False
|
||||||
|
telemetry=True
|
||||||
####### COMPLETION MODELS ###################
|
####### COMPLETION MODELS ###################
|
||||||
open_ai_chat_completion_models = [
|
open_ai_chat_completion_models = [
|
||||||
'gpt-3.5-turbo',
|
'gpt-3.5-turbo',
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -252,6 +252,7 @@ def completion(
|
||||||
@client
|
@client
|
||||||
@timeout(60) ## set timeouts, in case calls hang (e.g. Azure) - default is 60s, override with `force_timeout`
|
@timeout(60) ## set timeouts, in case calls hang (e.g. Azure) - default is 60s, override with `force_timeout`
|
||||||
def embedding(model, input=[], azure=False, force_timeout=60, logger_fn=None):
|
def embedding(model, input=[], azure=False, force_timeout=60, logger_fn=None):
|
||||||
|
try:
|
||||||
response = None
|
response = None
|
||||||
if azure == True:
|
if azure == True:
|
||||||
# azure configs
|
# azure configs
|
||||||
|
@ -280,7 +281,11 @@ def embedding(model, input=[], azure=False, force_timeout=60, logger_fn=None):
|
||||||
raise ValueError(f"No valid embedding model args passed in - {args}")
|
raise ValueError(f"No valid embedding model args passed in - {args}")
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
except Exception as e:
|
||||||
|
# log the original exception
|
||||||
|
logging(model=model, input=input, azure=azure, logger_fn=logger_fn, exception=e)
|
||||||
|
## Map to OpenAI Exception
|
||||||
|
raise exception_type(model=model, original_exception=e)
|
||||||
####### HELPER FUNCTIONS ################
|
####### HELPER FUNCTIONS ################
|
||||||
## Set verbose to true -> ```litellm.set_verbose = True```
|
## Set verbose to true -> ```litellm.set_verbose = True```
|
||||||
def print_verbose(print_statement):
|
def print_verbose(print_statement):
|
||||||
|
|
1
litellm/tests/litellm_uuid.txt
Normal file
1
litellm/tests/litellm_uuid.txt
Normal file
|
@ -0,0 +1 @@
|
||||||
|
80888ede-4881-4876-ab3f-765d47282e66
|
|
@ -1,14 +1,7 @@
|
||||||
import dotenv
|
import dotenv, json, traceback, threading
|
||||||
import json
|
import subprocess, os
|
||||||
import traceback
|
import litellm, openai
|
||||||
import threading
|
import random, uuid, requests
|
||||||
import traceback
|
|
||||||
import subprocess
|
|
||||||
import uuid
|
|
||||||
import litellm
|
|
||||||
import os
|
|
||||||
import openai
|
|
||||||
import random
|
|
||||||
from openai.error import AuthenticationError, InvalidRequestError, RateLimitError, ServiceUnavailableError, OpenAIError
|
from openai.error import AuthenticationError, InvalidRequestError, RateLimitError, ServiceUnavailableError, OpenAIError
|
||||||
####### ENVIRONMENT VARIABLES ###################
|
####### ENVIRONMENT VARIABLES ###################
|
||||||
dotenv.load_dotenv() # Loading env variables using dotenv
|
dotenv.load_dotenv() # Loading env variables using dotenv
|
||||||
|
@ -34,11 +27,15 @@ def logging(model, input, azure=False, additional_args={}, logger_fn=None, excep
|
||||||
try:
|
try:
|
||||||
model_call_details = {}
|
model_call_details = {}
|
||||||
model_call_details["model"] = model
|
model_call_details["model"] = model
|
||||||
model_call_details["input"] = input
|
|
||||||
model_call_details["azure"] = azure
|
model_call_details["azure"] = azure
|
||||||
# log exception details
|
# log exception details
|
||||||
if exception:
|
if exception:
|
||||||
model_call_details["original_exception"] = exception
|
model_call_details["original_exception"] = exception
|
||||||
|
|
||||||
|
if litellm.telemetry:
|
||||||
|
safe_crash_reporting(model=model, exception=exception, azure=azure) # log usage-crash details. Do not log any user details. If you want to turn this off, set `litellm.telemetry=False`.
|
||||||
|
|
||||||
|
model_call_details["input"] = input
|
||||||
# log additional call details -> api key, etc.
|
# log additional call details -> api key, etc.
|
||||||
if azure == True or model in litellm.open_ai_chat_completion_models or model in litellm.open_ai_chat_completion_models or model in litellm.open_ai_embedding_models:
|
if azure == True or model in litellm.open_ai_chat_completion_models or model in litellm.open_ai_chat_completion_models or model in litellm.open_ai_embedding_models:
|
||||||
model_call_details["api_type"] = openai.api_type
|
model_call_details["api_type"] = openai.api_type
|
||||||
|
@ -274,3 +271,47 @@ def exception_type(model, original_exception):
|
||||||
else:
|
else:
|
||||||
raise original_exception
|
raise original_exception
|
||||||
|
|
||||||
|
def safe_crash_reporting(model=None, exception=None, azure=None):
|
||||||
|
data = {
|
||||||
|
"model": model,
|
||||||
|
"exception": str(exception),
|
||||||
|
"azure": azure
|
||||||
|
}
|
||||||
|
print(f"data in crash reporting: {data}")
|
||||||
|
threading.Thread(target=litellm_telemetry, args=(data,), daemon=True).start()
|
||||||
|
|
||||||
|
def litellm_telemetry(data):
|
||||||
|
print(f"data in in litellm telemetry: {data}")
|
||||||
|
# Load or generate the UUID
|
||||||
|
uuid_file = 'litellm_uuid.txt'
|
||||||
|
try:
|
||||||
|
# Try to open the file and load the UUID
|
||||||
|
with open(uuid_file, 'r') as file:
|
||||||
|
uuid_value = file.read()
|
||||||
|
if uuid_value:
|
||||||
|
uuid_value = uuid_value.strip()
|
||||||
|
print(f"Loaded UUID: {uuid_value}")
|
||||||
|
else:
|
||||||
|
raise FileNotFoundError
|
||||||
|
except FileNotFoundError:
|
||||||
|
# Generate a new UUID if the file doesn't exist or is empty
|
||||||
|
new_uuid = uuid.uuid4()
|
||||||
|
uuid_value = str(new_uuid)
|
||||||
|
with open(uuid_file, 'w') as file:
|
||||||
|
file.write(uuid_value)
|
||||||
|
print(f"Generated and stored UUID: {uuid_value}")
|
||||||
|
|
||||||
|
# Prepare the data to send to localhost:3000
|
||||||
|
payload = {
|
||||||
|
'uuid': uuid_value,
|
||||||
|
'data': data
|
||||||
|
}
|
||||||
|
print_verbose(f"payload: {payload}")
|
||||||
|
try:
|
||||||
|
# Make the POST request to localhost:3000
|
||||||
|
response = requests.post('https://litellm.berri.ai/logging', json=payload)
|
||||||
|
response.raise_for_status() # Raise an exception for HTTP errors
|
||||||
|
print('Request successfully sent!')
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
# Handle any errors in the request
|
||||||
|
print(f'Error: {e}')
|
Loading…
Add table
Add a link
Reference in a new issue