updates to litedebugger dashboard

This commit is contained in:
Krrish Dholakia 2023-08-23 11:51:14 -07:00
parent 943cd26288
commit e659c66a75
8 changed files with 13 additions and 10 deletions

View file

@ -6,7 +6,8 @@ class LiteDebugger:
dashboard_url = None dashboard_url = None
def __init__(self, email=None): def __init__(self, email=None):
self.api_url = "https://api.litellm.ai/debugger" # self.api_url = "https://api.litellm.ai/debugger"
self.api_url = "http://0.0.0.0:4000/debugger"
self.validate_environment(email) self.validate_environment(email)
pass pass
@ -88,13 +89,14 @@ class LiteDebugger:
headers={"content-type": "application/json"}, headers={"content-type": "application/json"},
data=json.dumps(litellm_data_obj), data=json.dumps(litellm_data_obj),
) )
elif "embedding" in response_obj: elif "data" in response_obj and isinstance(response_obj["data"], list) and len(response_obj["data"]) > 0 and "embedding" in response_obj["data"][0]:
print(f"messages: {messages}")
litellm_data_obj = { litellm_data_obj = {
"response_time": response_time, "response_time": response_time,
"model": response_obj["model"], "model": response_obj["model"],
"total_cost": total_cost, "total_cost": total_cost,
"messages": messages, "messages": messages,
"response": response_obj["embedding"][:5], "response": str(response_obj["data"][0]["embedding"][:5]),
"end_user": end_user, "end_user": end_user,
"litellm_call_id": litellm_call_id, "litellm_call_id": litellm_call_id,
"status": "success", "status": "success",

View file

@ -815,7 +815,7 @@ def embedding(
) )
## EMBEDDING CALL ## EMBEDDING CALL
response = openai.Embedding.create(input=input, engine=model) response = openai.Embedding.create(input=input, engine=model)
print_verbose(f"response_value: {str(response)[:50]}") print_verbose(f"response_value: {str(response)[:100]}")
elif model in litellm.open_ai_embedding_models: elif model in litellm.open_ai_embedding_models:
openai.api_type = "openai" openai.api_type = "openai"
openai.api_base = "https://api.openai.com/v1" openai.api_base = "https://api.openai.com/v1"
@ -833,7 +833,7 @@ def embedding(
) )
## EMBEDDING CALL ## EMBEDDING CALL
response = openai.Embedding.create(input=input, model=model) response = openai.Embedding.create(input=input, model=model)
print_verbose(f"response_value: {str(response)[:50]}") print_verbose(f"response_value: {str(response)[:100]}")
else: else:
args = locals() args = locals()
raise ValueError(f"No valid embedding model args passed in - {args}") raise ValueError(f"No valid embedding model args passed in - {args}")

View file

@ -9,7 +9,7 @@ import litellm
from litellm import embedding, completion from litellm import embedding, completion
from infisical import InfisicalClient from infisical import InfisicalClient
# # litellm.set_verbose = True litellm.set_verbose = True
# litellm.secret_manager_client = InfisicalClient(token=os.environ["INFISICAL_TOKEN"]) # litellm.secret_manager_client = InfisicalClient(token=os.environ["INFISICAL_TOKEN"])
@ -19,6 +19,7 @@ def test_openai_embedding():
model="text-embedding-ada-002", input=["good morning from litellm"] model="text-embedding-ada-002", input=["good morning from litellm"]
) )
# Add any assertions here to check the response # Add any assertions here to check the response
print(f"response: {str(response)}") # print(f"response: {str(response)}")
except Exception as e: except Exception as e:
pytest.fail(f"Error occurred: {e}") pytest.fail(f"Error occurred: {e}")
test_openai_embedding()

View file

@ -899,7 +899,7 @@ def handle_failure(exception, traceback_exception, start_time, end_time, args,
print_verbose("reaches lite_debugger for logging!") print_verbose("reaches lite_debugger for logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}") print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
model = args[0] if len(args) > 0 else kwargs["model"] model = args[0] if len(args) > 0 else kwargs["model"]
messages = args[1] if len(args) > 1 else kwargs.get("messages", {"role": "user", "content": kwargs.get("input", "")}) messages = args[1] if len(args) > 1 else kwargs.get("messages", [{"role": "user", "content": ' '.join(kwargs.get("input", ""))}])
result = { result = {
"model": model, "model": model,
"created": time.time(), "created": time.time(),
@ -1031,7 +1031,7 @@ def handle_success(args, kwargs, result, start_time, end_time):
elif callback == "lite_debugger": elif callback == "lite_debugger":
print_verbose("reaches lite_debugger for logging!") print_verbose("reaches lite_debugger for logging!")
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}") print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
messages = args[1] if len(args) > 1 else kwargs.get("messages", {"role": "user", "content": kwargs.get("input")}) messages = args[1] if len(args) > 1 else kwargs.get("messages", [{"role": "user", "content": ' '.join(kwargs.get("input", ""))}])
liteDebuggerClient.log_event( liteDebuggerClient.log_event(
model=model, model=model,
messages=messages, messages=messages,

View file

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "litellm" name = "litellm"
version = "0.1.459" version = "0.1.460"
description = "Library to easily interface with LLM API providers" description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"] authors = ["BerriAI"]
license = "MIT License" license = "MIT License"