diff --git a/litellm/__pycache__/__init__.cpython-311.pyc b/litellm/__pycache__/__init__.cpython-311.pyc index beacda5273..2d7525526a 100644 Binary files a/litellm/__pycache__/__init__.cpython-311.pyc and b/litellm/__pycache__/__init__.cpython-311.pyc differ diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index 149cc4e417..3f89800a7c 100644 Binary files a/litellm/__pycache__/main.cpython-311.pyc and b/litellm/__pycache__/main.cpython-311.pyc differ diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index ec52f52a65..4e7d6c2d18 100644 Binary files a/litellm/__pycache__/utils.cpython-311.pyc and b/litellm/__pycache__/utils.cpython-311.pyc differ diff --git a/litellm/integrations/litedebugger.py b/litellm/integrations/litedebugger.py index ca695641bb..4779f62350 100644 --- a/litellm/integrations/litedebugger.py +++ b/litellm/integrations/litedebugger.py @@ -6,7 +6,8 @@ class LiteDebugger: dashboard_url = None def __init__(self, email=None): - self.api_url = "https://api.litellm.ai/debugger" + # self.api_url = "https://api.litellm.ai/debugger" + self.api_url = "http://0.0.0.0:4000/debugger" self.validate_environment(email) pass @@ -88,13 +89,14 @@ class LiteDebugger: headers={"content-type": "application/json"}, data=json.dumps(litellm_data_obj), ) - elif "embedding" in response_obj: + elif "data" in response_obj and isinstance(response_obj["data"], list) and len(response_obj["data"]) > 0 and "embedding" in response_obj["data"][0]: + print(f"messages: {messages}") litellm_data_obj = { "response_time": response_time, "model": response_obj["model"], "total_cost": total_cost, "messages": messages, - "response": response_obj["embedding"][:5], + "response": str(response_obj["data"][0]["embedding"][:5]), "end_user": end_user, "litellm_call_id": litellm_call_id, "status": "success", diff --git a/litellm/main.py b/litellm/main.py index 0e54af94e3..83889f8494 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -815,7 +815,7 @@ def embedding( ) ## EMBEDDING CALL response = openai.Embedding.create(input=input, engine=model) - print_verbose(f"response_value: {str(response)[:50]}") + print_verbose(f"response_value: {str(response)[:100]}") elif model in litellm.open_ai_embedding_models: openai.api_type = "openai" openai.api_base = "https://api.openai.com/v1" @@ -833,7 +833,7 @@ def embedding( ) ## EMBEDDING CALL response = openai.Embedding.create(input=input, model=model) - print_verbose(f"response_value: {str(response)[:50]}") + print_verbose(f"response_value: {str(response)[:100]}") else: args = locals() raise ValueError(f"No valid embedding model args passed in - {args}") diff --git a/litellm/tests/test_embedding.py b/litellm/tests/test_embedding.py index a9b3f2b79e..faa5760b28 100644 --- a/litellm/tests/test_embedding.py +++ b/litellm/tests/test_embedding.py @@ -9,7 +9,7 @@ import litellm from litellm import embedding, completion from infisical import InfisicalClient -# # litellm.set_verbose = True +litellm.set_verbose = True # litellm.secret_manager_client = InfisicalClient(token=os.environ["INFISICAL_TOKEN"]) @@ -19,6 +19,7 @@ def test_openai_embedding(): model="text-embedding-ada-002", input=["good morning from litellm"] ) # Add any assertions here to check the response - print(f"response: {str(response)}") + # print(f"response: {str(response)}") except Exception as e: pytest.fail(f"Error occurred: {e}") +test_openai_embedding() \ No newline at end of file diff --git a/litellm/utils.py b/litellm/utils.py index 10022268cd..0a541c40b8 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -899,7 +899,7 @@ def handle_failure(exception, traceback_exception, start_time, end_time, args, print_verbose("reaches lite_debugger for logging!") print_verbose(f"liteDebuggerClient: {liteDebuggerClient}") model = args[0] if len(args) > 0 else kwargs["model"] - messages = args[1] if len(args) > 1 else kwargs.get("messages", {"role": "user", "content": kwargs.get("input", "")}) + messages = args[1] if len(args) > 1 else kwargs.get("messages", [{"role": "user", "content": ' '.join(kwargs.get("input", ""))}]) result = { "model": model, "created": time.time(), @@ -1031,7 +1031,7 @@ def handle_success(args, kwargs, result, start_time, end_time): elif callback == "lite_debugger": print_verbose("reaches lite_debugger for logging!") print_verbose(f"liteDebuggerClient: {liteDebuggerClient}") - messages = args[1] if len(args) > 1 else kwargs.get("messages", {"role": "user", "content": kwargs.get("input")}) + messages = args[1] if len(args) > 1 else kwargs.get("messages", [{"role": "user", "content": ' '.join(kwargs.get("input", ""))}]) liteDebuggerClient.log_event( model=model, messages=messages, diff --git a/pyproject.toml b/pyproject.toml index 1a444d0629..be3dc2a824 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.459" +version = "0.1.460" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"