fix(types/utils.py): handle null completion tokens

Fixes https://github.com/BerriAI/litellm/issues/5096
This commit is contained in:
Krrish Dholakia 2024-08-10 09:23:03 -07:00
parent f4355d3e88
commit 1553f7fa48
2 changed files with 22 additions and 8 deletions

View file

@ -761,3 +761,16 @@ def test_supports_response_schema(model, expected_bool):
response = supports_response_schema(model=model, custom_llm_provider=None) response = supports_response_schema(model=model, custom_llm_provider=None)
assert expected_bool == response assert expected_bool == response
def test_usage_object_null_tokens():
"""
Unit test.
Asserts Usage obj always returns int.
Fixes https://github.com/BerriAI/litellm/issues/5096
"""
usage_obj = litellm.Usage(prompt_tokens=2, completion_tokens=None, total_tokens=2)
assert usage_obj.completion_tokens == 0

View file

@ -444,11 +444,10 @@ class Choices(OpenAIObject):
setattr(self, key, value) setattr(self, key, value)
class Usage(OpenAIObject): from openai.types.completion_usage import CompletionUsage
prompt_tokens: Optional[int] = Field(default=None)
completion_tokens: Optional[int] = Field(default=None)
total_tokens: Optional[int] = Field(default=None)
class Usage(CompletionUsage):
def __init__( def __init__(
self, self,
prompt_tokens: Optional[int] = None, prompt_tokens: Optional[int] = None,
@ -457,14 +456,16 @@ class Usage(OpenAIObject):
**params, **params,
): ):
data = { data = {
"prompt_tokens": prompt_tokens, "prompt_tokens": prompt_tokens or 0,
"completion_tokens": completion_tokens, "completion_tokens": completion_tokens or 0,
"total_tokens": total_tokens, "total_tokens": total_tokens or 0,
**params,
} }
super().__init__(**data) super().__init__(**data)
for k, v in params.items():
setattr(self, k, v)
def __contains__(self, key): def __contains__(self, key):
# Define custom behavior for the 'in' operator # Define custom behavior for the 'in' operator
return hasattr(self, key) return hasattr(self, key)