mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
fix test test_stream_chunk_builder_openai_audio_output_usage - use direct dict comparison
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 13s
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 13s
This commit is contained in:
parent
2eaa0079f2
commit
74e332bfdd
1 changed files with 8 additions and 9 deletions
|
@ -721,15 +721,14 @@ def test_stream_chunk_builder_openai_audio_output_usage():
|
|||
print(f"response usage: {response.usage}")
|
||||
check_non_streaming_response(response)
|
||||
print(f"response: {response}")
|
||||
for k, v in usage_obj.model_dump(exclude_none=True).items():
|
||||
print(k, v)
|
||||
response_usage_value = getattr(response.usage, k) # type: ignore
|
||||
print(f"response_usage_value: {response_usage_value}")
|
||||
print(f"type: {type(response_usage_value)}")
|
||||
if isinstance(response_usage_value, BaseModel):
|
||||
assert response_usage_value.model_dump(exclude_none=True) == v
|
||||
else:
|
||||
assert response_usage_value == v
|
||||
# Convert both usage objects to dictionaries for easier comparison
|
||||
usage_dict = usage_obj.model_dump(exclude_none=True)
|
||||
response_usage_dict = response.usage.model_dump(exclude_none=True)
|
||||
|
||||
# Simple dictionary comparison
|
||||
assert (
|
||||
usage_dict == response_usage_dict
|
||||
), f"\nExpected: {usage_dict}\nGot: {response_usage_dict}"
|
||||
|
||||
|
||||
def test_stream_chunk_builder_empty_initial_chunk():
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue