From fe2a3c02e5264fa1c69572b8b5fa36eca193cf1f Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Fri, 30 Aug 2024 08:54:42 -0700 Subject: [PATCH] - merge - fix TypeError: 'CompletionUsage' object is not subscriptable #5441 (#5448) * fix TypeError: 'CompletionUsage' object is not subscriptable (#5441) * test(test_team_logging.py): mark flaky test --------- Co-authored-by: yafei lee --- litellm/integrations/langfuse.py | 8 ++++---- tests/test_team_logging.py | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index 0fb2ea1f7f..0b5e9a4aab 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -300,8 +300,8 @@ class LangFuseLogger: prompt=input, completion=output, usage={ - "prompt_tokens": response_obj["usage"]["prompt_tokens"], - "completion_tokens": response_obj["usage"]["completion_tokens"], + "prompt_tokens": response_obj.usage.prompt_tokens, + "completion_tokens": response_obj.usage.completion_tokens, }, metadata=metadata, ) @@ -526,8 +526,8 @@ class LangFuseLogger: if response_obj is not None and response_obj.get("id", None) is not None: generation_id = litellm.utils.get_logging_id(start_time, response_obj) usage = { - "prompt_tokens": response_obj["usage"]["prompt_tokens"], - "completion_tokens": response_obj["usage"]["completion_tokens"], + "prompt_tokens": response_obj.usage.prompt_tokens, + "completion_tokens": response_obj.usage.completion_tokens, "total_cost": cost if supports_costs else None, } generation_name = clean_metadata.pop("generation_name", None) diff --git a/tests/test_team_logging.py b/tests/test_team_logging.py index d745a8d770..b4d12ad271 100644 --- a/tests/test_team_logging.py +++ b/tests/test_team_logging.py @@ -62,6 +62,7 @@ async def chat_completion(session, key, model="azure-gpt-3.5", request_metadata= @pytest.mark.asyncio +@pytest.mark.flaky(retries=3, delay=1) async def test_team_logging(): """ -> Team 1 logs to project 1