mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 10:14:26 +00:00
fix(utils.py): return last streaming chunk
This commit is contained in:
parent
a9ed768991
commit
2b437a2699
2 changed files with 8 additions and 16 deletions
|
@ -37,15 +37,7 @@ def test_chat_openai():
|
|||
"role": "user",
|
||||
"content": "Hi 👋 - i'm openai"
|
||||
}],
|
||||
stream=True,
|
||||
complete_response = True)
|
||||
response2 = completion(model="gpt-3.5-turbo",
|
||||
messages=[{
|
||||
"role": "user",
|
||||
"content": "Hi 👋 - i'm not openai"
|
||||
}],
|
||||
stream=True,
|
||||
complete_response = True)
|
||||
stream=True)
|
||||
time.sleep(1)
|
||||
assert customHandler.success == True
|
||||
except Exception as e:
|
||||
|
@ -53,7 +45,7 @@ def test_chat_openai():
|
|||
pass
|
||||
|
||||
|
||||
# test_chat_openai()
|
||||
test_chat_openai()
|
||||
|
||||
def test_completion_azure_stream_moderation_failure():
|
||||
try:
|
||||
|
@ -80,7 +72,7 @@ def test_completion_azure_stream_moderation_failure():
|
|||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
test_completion_azure_stream_moderation_failure()
|
||||
# test_completion_azure_stream_moderation_failure()
|
||||
|
||||
|
||||
# def custom_callback(
|
||||
|
|
|
@ -5242,6 +5242,11 @@ class CustomStreamWrapper:
|
|||
return model_response
|
||||
else:
|
||||
return
|
||||
elif model_response.choices[0].finish_reason:
|
||||
model_response.choices[0].finish_reason = map_finish_reason(model_response.choices[0].finish_reason) # ensure consistent output to openai
|
||||
# LOGGING
|
||||
threading.Thread(target=self.logging_obj.success_handler, args=(model_response,)).start()
|
||||
return model_response
|
||||
elif response_obj is not None and response_obj.get("original_chunk", None) is not None: # function / tool calling branch - only set for openai/azure compatible endpoints
|
||||
# enter this branch when no content has been passed in response
|
||||
original_chunk = response_obj.get("original_chunk", None)
|
||||
|
@ -5263,11 +5268,6 @@ class CustomStreamWrapper:
|
|||
self.sent_first_chunk = True
|
||||
threading.Thread(target=self.logging_obj.success_handler, args=(model_response,)).start() # log response
|
||||
return model_response
|
||||
elif model_response.choices[0].finish_reason:
|
||||
model_response.choices[0].finish_reason = map_finish_reason(model_response.choices[0].finish_reason) # ensure consistent output to openai
|
||||
# LOGGING
|
||||
threading.Thread(target=self.logging_obj.success_handler, args=(model_response,)).start()
|
||||
return model_response
|
||||
else:
|
||||
return
|
||||
except StopIteration:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue