From 9eb75cc15924f51d4104a1c5e14be4553db27f4f Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Thu, 25 Apr 2024 20:22:18 -0700 Subject: [PATCH] test(test_streaming.py): fix test --- litellm/tests/test_streaming.py | 14 ++++++++++++-- litellm/utils.py | 7 ++++--- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/litellm/tests/test_streaming.py b/litellm/tests/test_streaming.py index 81e58a301..df759b0b9 100644 --- a/litellm/tests/test_streaming.py +++ b/litellm/tests/test_streaming.py @@ -2723,8 +2723,18 @@ def test_aamazing_unit_test_custom_stream_wrapper_n(): chunk_list = [] for chunk in chunks: - _chunk = litellm.ModelResponse(**chunk, stream=True) - chunk_list.append(_chunk) + new_chunk = litellm.ModelResponse(stream=True, id=chunk["id"]) + if "choices" in chunk and isinstance(chunk["choices"], list): + print("INSIDE CHUNK CHOICES!") + new_choices = [] + for choice in chunk["choices"]: + if isinstance(choice, litellm.utils.StreamingChoices): + _new_choice = choice + elif isinstance(choice, dict): + _new_choice = litellm.utils.StreamingChoices(**choice) + new_choices.append(_new_choice) + new_chunk.choices = new_choices + chunk_list.append(new_chunk) completion_stream = ModelResponseListIterator(model_responses=chunk_list) diff --git a/litellm/utils.py b/litellm/utils.py index 1185380c3..8c3863344 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -7103,9 +7103,10 @@ def convert_to_model_response_object( model_response_object.model = response_object["model"] if start_time is not None and end_time is not None: - model_response_object._response_ms = ( # type: ignore - end_time - start_time - ).total_seconds() * 1000 + if isinstance(start_time, type(end_time)): + model_response_object._response_ms = ( # type: ignore + end_time - start_time + ).total_seconds() * 1000 if hidden_params is not None: model_response_object._hidden_params = hidden_params