diff --git a/litellm/tests/test_utils.py b/litellm/tests/test_utils.py index 976ded7f60..6b0148b4e0 100644 --- a/litellm/tests/test_utils.py +++ b/litellm/tests/test_utils.py @@ -761,3 +761,16 @@ def test_supports_response_schema(model, expected_bool): response = supports_response_schema(model=model, custom_llm_provider=None) assert expected_bool == response + + +def test_usage_object_null_tokens(): + """ + Unit test. + + Asserts Usage obj always returns int. + + Fixes https://github.com/BerriAI/litellm/issues/5096 + """ + usage_obj = litellm.Usage(prompt_tokens=2, completion_tokens=None, total_tokens=2) + + assert usage_obj.completion_tokens == 0 diff --git a/litellm/types/utils.py b/litellm/types/utils.py index 1a05c1d2a0..361163e6a5 100644 --- a/litellm/types/utils.py +++ b/litellm/types/utils.py @@ -444,11 +444,10 @@ class Choices(OpenAIObject): setattr(self, key, value) -class Usage(OpenAIObject): - prompt_tokens: Optional[int] = Field(default=None) - completion_tokens: Optional[int] = Field(default=None) - total_tokens: Optional[int] = Field(default=None) +from openai.types.completion_usage import CompletionUsage + +class Usage(CompletionUsage): def __init__( self, prompt_tokens: Optional[int] = None, @@ -457,14 +456,16 @@ class Usage(OpenAIObject): **params, ): data = { - "prompt_tokens": prompt_tokens, - "completion_tokens": completion_tokens, - "total_tokens": total_tokens, - **params, + "prompt_tokens": prompt_tokens or 0, + "completion_tokens": completion_tokens or 0, + "total_tokens": total_tokens or 0, } super().__init__(**data) + for k, v in params.items(): + setattr(self, k, v) + def __contains__(self, key): # Define custom behavior for the 'in' operator return hasattr(self, key)