mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
add finish reason to streamed responses
This commit is contained in:
parent
ca72ebd2df
commit
0f769c5417
3 changed files with 27 additions and 4 deletions
|
@ -32,6 +32,7 @@ def test_completion_custom_provider_model_name():
|
||||||
)
|
)
|
||||||
# Add any assertions here to check the response
|
# Add any assertions here to check the response
|
||||||
print(response)
|
print(response)
|
||||||
|
print(response['choices'][0]['finish_reason'])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
|
@ -107,8 +108,10 @@ def test_completion_claude_stream():
|
||||||
# Add any assertions here to check the response
|
# Add any assertions here to check the response
|
||||||
for chunk in response:
|
for chunk in response:
|
||||||
print(chunk["choices"][0]["delta"]) # same as openai format
|
print(chunk["choices"][0]["delta"]) # same as openai format
|
||||||
|
print(chunk["choices"][0]["finish_reason"])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
# test_completion_claude_stream()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -173,8 +176,10 @@ def test_completion_cohere_stream():
|
||||||
# Add any assertions here to check the response
|
# Add any assertions here to check the response
|
||||||
for chunk in response:
|
for chunk in response:
|
||||||
print(chunk["choices"][0]["delta"]) # same as openai format
|
print(chunk["choices"][0]["delta"]) # same as openai format
|
||||||
|
print(chunk["choices"][0]["finish_reason"])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
# test_completion_cohere_stream()
|
||||||
|
|
||||||
|
|
||||||
def test_completion_openai():
|
def test_completion_openai():
|
||||||
|
@ -293,8 +298,12 @@ def test_completion_openai_with_stream():
|
||||||
)
|
)
|
||||||
# Add any assertions here to check the response
|
# Add any assertions here to check the response
|
||||||
print(response)
|
print(response)
|
||||||
|
for chunk in response:
|
||||||
|
print(chunk)
|
||||||
|
print(chunk["choices"][0]["finish_reason"])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
# test_completion_openai_with_stream()
|
||||||
|
|
||||||
|
|
||||||
def test_completion_openai_with_functions():
|
def test_completion_openai_with_functions():
|
||||||
|
@ -317,12 +326,16 @@ def test_completion_openai_with_functions():
|
||||||
]
|
]
|
||||||
try:
|
try:
|
||||||
response = completion(
|
response = completion(
|
||||||
model="gpt-3.5-turbo", messages=messages, functions=function1
|
model="gpt-3.5-turbo", messages=messages, functions=function1, stream=True
|
||||||
)
|
)
|
||||||
# Add any assertions here to check the response
|
# Add any assertions here to check the response
|
||||||
print(response)
|
print(response)
|
||||||
|
for chunk in response:
|
||||||
|
print(chunk)
|
||||||
|
print(chunk["choices"][0]["finish_reason"])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
# test_completion_openai_with_functions()
|
||||||
|
|
||||||
|
|
||||||
def test_completion_azure():
|
def test_completion_azure():
|
||||||
|
|
|
@ -2198,12 +2198,22 @@ class CustomStreamWrapper:
|
||||||
completion_obj["content"] = self.handle_openai_text_completion_chunk(chunk)
|
completion_obj["content"] = self.handle_openai_text_completion_chunk(chunk)
|
||||||
else: # openai chat/azure models
|
else: # openai chat/azure models
|
||||||
chunk = next(self.completion_stream)
|
chunk = next(self.completion_stream)
|
||||||
completion_obj["content"] = self.handle_openai_chat_completion_chunk(chunk)
|
return chunk # open ai returns finish_reason, we should just return the openai chunk
|
||||||
|
|
||||||
|
#completion_obj["content"] = self.handle_openai_chat_completion_chunk(chunk)
|
||||||
|
|
||||||
# LOGGING
|
# LOGGING
|
||||||
threading.Thread(target=self.logging_obj.success_handler, args=(completion_obj,)).start()
|
threading.Thread(target=self.logging_obj.success_handler, args=(completion_obj,)).start()
|
||||||
# return this for all models
|
# return this for all models
|
||||||
return {"choices": [{"delta": completion_obj}]}
|
return {
|
||||||
|
"choices":
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"delta": completion_obj,
|
||||||
|
"finish_reason": "stop"
|
||||||
|
},
|
||||||
|
]
|
||||||
|
}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise StopIteration
|
raise StopIteration
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "litellm"
|
name = "litellm"
|
||||||
version = "0.1.580"
|
version = "0.1.582"
|
||||||
description = "Library to easily interface with LLM API providers"
|
description = "Library to easily interface with LLM API providers"
|
||||||
authors = ["BerriAI"]
|
authors = ["BerriAI"]
|
||||||
license = "MIT License"
|
license = "MIT License"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue