From 1019355527a82f46f2701800a1fe53836da5ccbc Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Wed, 10 Jul 2024 21:56:47 -0700 Subject: [PATCH] fix(types/utils.py): fix streaming function name --- litellm/types/utils.py | 4 +++- litellm/utils.py | 14 +++++++------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/litellm/types/utils.py b/litellm/types/utils.py index 5f31798da..4ae88a745 100644 --- a/litellm/types/utils.py +++ b/litellm/types/utils.py @@ -166,7 +166,9 @@ class FunctionCall(OpenAIObject): class Function(OpenAIObject): arguments: str - name: str + name: Optional[ + str + ] # can be None - openai e.g.: ChoiceDeltaToolCallFunction(arguments='{"', name=None), type=None) def __init__( self, diff --git a/litellm/utils.py b/litellm/utils.py index cf2c679a8..39ddc02ac 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -8126,7 +8126,7 @@ class CustomStreamWrapper: if chunk.startswith(self.complete_response): # Remove last_sent_chunk only if it appears at the start of the new chunk - chunk = chunk[len(self.complete_response):] + chunk = chunk[len(self.complete_response) :] self.complete_response += chunk return chunk @@ -9483,8 +9483,8 @@ class CustomStreamWrapper: model_response.choices[0].delta = Delta(**_json_delta) except Exception as e: verbose_logger.error( - "litellm.CustomStreamWrapper.chunk_creator(): Exception occured - {}".format( - str(e) + "litellm.CustomStreamWrapper.chunk_creator(): Exception occured - {}\n{}".format( + str(e), traceback.format_exc() ) ) verbose_logger.debug(traceback.format_exc()) @@ -10124,7 +10124,7 @@ def mock_completion_streaming_obj( model_response, mock_response, model, n: Optional[int] = None ): for i in range(0, len(mock_response), 3): - completion_obj = Delta(role="assistant", content=mock_response[i: i + 3]) + completion_obj = Delta(role="assistant", content=mock_response[i : i + 3]) if n is None: model_response.choices[0].delta = completion_obj else: @@ -10133,7 +10133,7 @@ def mock_completion_streaming_obj( _streaming_choice = litellm.utils.StreamingChoices( index=j, delta=litellm.utils.Delta( - role="assistant", content=mock_response[i: i + 3] + role="assistant", content=mock_response[i : i + 3] ), ) _all_choices.append(_streaming_choice) @@ -10145,7 +10145,7 @@ async def async_mock_completion_streaming_obj( model_response, mock_response, model, n: Optional[int] = None ): for i in range(0, len(mock_response), 3): - completion_obj = Delta(role="assistant", content=mock_response[i: i + 3]) + completion_obj = Delta(role="assistant", content=mock_response[i : i + 3]) if n is None: model_response.choices[0].delta = completion_obj else: @@ -10154,7 +10154,7 @@ async def async_mock_completion_streaming_obj( _streaming_choice = litellm.utils.StreamingChoices( index=j, delta=litellm.utils.Delta( - role="assistant", content=mock_response[i: i + 3] + role="assistant", content=mock_response[i : i + 3] ), ) _all_choices.append(_streaming_choice)