mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
fixes to mock completion
This commit is contained in:
parent
1b2cf704af
commit
e2ea4adb84
7 changed files with 26 additions and 25 deletions
|
@ -2291,7 +2291,7 @@ class CustomStreamWrapper:
|
|||
# Log the type of the received item
|
||||
self.logging_obj.post_call(str(type(completion_stream)))
|
||||
if model in litellm.cohere_models:
|
||||
# cohere does not return an iterator, so we need to wrap it in one
|
||||
# these do not return an iterator, so we need to wrap it in one
|
||||
self.completion_stream = iter(completion_stream)
|
||||
else:
|
||||
self.completion_stream = completion_stream
|
||||
|
@ -2461,6 +2461,12 @@ class CustomStreamWrapper:
|
|||
raise StopAsyncIteration
|
||||
|
||||
|
||||
def mock_completion_streaming_obj(model_response, mock_response, model):
|
||||
for i in range(0, len(mock_response), 3):
|
||||
completion_obj = {"role": "assistant", "content": mock_response[i: i+3]}
|
||||
model_response.choices[0].delta = completion_obj
|
||||
yield model_response
|
||||
|
||||
########## Reading Config File ############################
|
||||
def read_config_args(config_path):
|
||||
try:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue