mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
fix(types/utils.py): fix streaming function name
This commit is contained in:
parent
31829855c0
commit
1019355527
2 changed files with 10 additions and 8 deletions
|
@ -8126,7 +8126,7 @@ class CustomStreamWrapper:
|
|||
|
||||
if chunk.startswith(self.complete_response):
|
||||
# Remove last_sent_chunk only if it appears at the start of the new chunk
|
||||
chunk = chunk[len(self.complete_response):]
|
||||
chunk = chunk[len(self.complete_response) :]
|
||||
|
||||
self.complete_response += chunk
|
||||
return chunk
|
||||
|
@ -9483,8 +9483,8 @@ class CustomStreamWrapper:
|
|||
model_response.choices[0].delta = Delta(**_json_delta)
|
||||
except Exception as e:
|
||||
verbose_logger.error(
|
||||
"litellm.CustomStreamWrapper.chunk_creator(): Exception occured - {}".format(
|
||||
str(e)
|
||||
"litellm.CustomStreamWrapper.chunk_creator(): Exception occured - {}\n{}".format(
|
||||
str(e), traceback.format_exc()
|
||||
)
|
||||
)
|
||||
verbose_logger.debug(traceback.format_exc())
|
||||
|
@ -10124,7 +10124,7 @@ def mock_completion_streaming_obj(
|
|||
model_response, mock_response, model, n: Optional[int] = None
|
||||
):
|
||||
for i in range(0, len(mock_response), 3):
|
||||
completion_obj = Delta(role="assistant", content=mock_response[i: i + 3])
|
||||
completion_obj = Delta(role="assistant", content=mock_response[i : i + 3])
|
||||
if n is None:
|
||||
model_response.choices[0].delta = completion_obj
|
||||
else:
|
||||
|
@ -10133,7 +10133,7 @@ def mock_completion_streaming_obj(
|
|||
_streaming_choice = litellm.utils.StreamingChoices(
|
||||
index=j,
|
||||
delta=litellm.utils.Delta(
|
||||
role="assistant", content=mock_response[i: i + 3]
|
||||
role="assistant", content=mock_response[i : i + 3]
|
||||
),
|
||||
)
|
||||
_all_choices.append(_streaming_choice)
|
||||
|
@ -10145,7 +10145,7 @@ async def async_mock_completion_streaming_obj(
|
|||
model_response, mock_response, model, n: Optional[int] = None
|
||||
):
|
||||
for i in range(0, len(mock_response), 3):
|
||||
completion_obj = Delta(role="assistant", content=mock_response[i: i + 3])
|
||||
completion_obj = Delta(role="assistant", content=mock_response[i : i + 3])
|
||||
if n is None:
|
||||
model_response.choices[0].delta = completion_obj
|
||||
else:
|
||||
|
@ -10154,7 +10154,7 @@ async def async_mock_completion_streaming_obj(
|
|||
_streaming_choice = litellm.utils.StreamingChoices(
|
||||
index=j,
|
||||
delta=litellm.utils.Delta(
|
||||
role="assistant", content=mock_response[i: i + 3]
|
||||
role="assistant", content=mock_response[i : i + 3]
|
||||
),
|
||||
)
|
||||
_all_choices.append(_streaming_choice)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue