bump: version 1.7.0 → 1.7.1

This commit is contained in:
Krrish Dholakia 2023-11-25 12:34:28 -08:00
parent 620633ec28
commit 30f47d3169
4 changed files with 33 additions and 33 deletions

View file

@ -199,7 +199,7 @@ class OpenAIChatCompletion(BaseLLM):
api_key=api_key,
additional_args={"headers": headers, "api_base": api_base, "acompletion": acompletion, "complete_input_dict": data},
)
try:
if acompletion is True:
if optional_params.get("stream", False):

View file

@ -229,7 +229,7 @@ def test_completion_azure_stream():
print(f"completion_response: {complete_response}")
except Exception as e:
pytest.fail(f"Error occurred: {e}")
test_completion_azure_stream()
# test_completion_azure_stream()
def test_completion_claude_stream():
try:
@ -290,35 +290,35 @@ def test_completion_palm_stream():
pytest.fail(f"Error occurred: {e}")
# test_completion_palm_stream()
# def test_completion_deep_infra_stream():
# # deep infra currently includes role in the 2nd chunk
# # waiting for them to make a fix on this
# try:
# messages = [
# {"role": "system", "content": "You are a helpful assistant."},
# {
# "role": "user",
# "content": "how does a court case get to the Supreme Court?",
# },
# ]
# print("testing deep infra streaming")
# response = completion(
# model="deepinfra/meta-llama/Llama-2-70b-chat-hf", messages=messages, stream=True, max_tokens=80
# )
def test_completion_deep_infra_stream():
# deep infra currently includes role in the 2nd chunk
# waiting for them to make a fix on this
try:
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": "how does a court case get to the Supreme Court?",
},
]
print("testing deep infra streaming")
response = completion(
model="deepinfra/meta-llama/Llama-2-70b-chat-hf", messages=messages, stream=True, max_tokens=80
)
# complete_response = ""
# # Add any assertions here to check the response
# for idx, chunk in enumerate(response):
# chunk, finished = streaming_format_tests(idx, chunk)
# if finished:
# break
# complete_response += chunk
# if complete_response.strip() == "":
# raise Exception("Empty response received")
# print(f"completion_response: {complete_response}")
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_completion_deep_infra_stream()
complete_response = ""
# Add any assertions here to check the response
for idx, chunk in enumerate(response):
chunk, finished = streaming_format_tests(idx, chunk)
if finished:
break
complete_response += chunk
if complete_response.strip() == "":
raise Exception("Empty response received")
print(f"completion_response: {complete_response}")
except Exception as e:
pytest.fail(f"Error occurred: {e}")
test_completion_deep_infra_stream()
def test_completion_claude_stream_bad_key():
try:

View file

@ -2301,7 +2301,7 @@ def get_optional_params( # use the openai defaults
if n:
optional_params["n"] = n
if stream:
optional_params["stream"] = str
optional_params["stream"] = stream
if stop:
optional_params["stop"] = stop
if max_tokens:

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "litellm"
version = "1.7.0"
version = "1.7.1"
description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"]
license = "MIT License"
@ -27,7 +27,7 @@ requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.commitizen]
version = "1.7.0"
version = "1.7.1"
version_files = [
"pyproject.toml:^version"
]