From 30f47d3169a8587fd54062d57fd75965b2427001 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 25 Nov 2023 12:34:28 -0800 Subject: [PATCH] =?UTF-8?q?bump:=20version=201.7.0=20=E2=86=92=201.7.1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- litellm/llms/openai.py | 2 +- litellm/tests/test_streaming.py | 58 ++++++++++++++++----------------- litellm/utils.py | 2 +- pyproject.toml | 4 +-- 4 files changed, 33 insertions(+), 33 deletions(-) diff --git a/litellm/llms/openai.py b/litellm/llms/openai.py index f30ed3a768..6d0809442c 100644 --- a/litellm/llms/openai.py +++ b/litellm/llms/openai.py @@ -199,7 +199,7 @@ class OpenAIChatCompletion(BaseLLM): api_key=api_key, additional_args={"headers": headers, "api_base": api_base, "acompletion": acompletion, "complete_input_dict": data}, ) - + try: if acompletion is True: if optional_params.get("stream", False): diff --git a/litellm/tests/test_streaming.py b/litellm/tests/test_streaming.py index 1a635460a2..137f38e73c 100644 --- a/litellm/tests/test_streaming.py +++ b/litellm/tests/test_streaming.py @@ -229,7 +229,7 @@ def test_completion_azure_stream(): print(f"completion_response: {complete_response}") except Exception as e: pytest.fail(f"Error occurred: {e}") -test_completion_azure_stream() +# test_completion_azure_stream() def test_completion_claude_stream(): try: @@ -290,35 +290,35 @@ def test_completion_palm_stream(): pytest.fail(f"Error occurred: {e}") # test_completion_palm_stream() -# def test_completion_deep_infra_stream(): -# # deep infra currently includes role in the 2nd chunk -# # waiting for them to make a fix on this -# try: -# messages = [ -# {"role": "system", "content": "You are a helpful assistant."}, -# { -# "role": "user", -# "content": "how does a court case get to the Supreme Court?", -# }, -# ] -# print("testing deep infra streaming") -# response = completion( -# model="deepinfra/meta-llama/Llama-2-70b-chat-hf", messages=messages, stream=True, max_tokens=80 -# ) +def test_completion_deep_infra_stream(): + # deep infra currently includes role in the 2nd chunk + # waiting for them to make a fix on this + try: + messages = [ + {"role": "system", "content": "You are a helpful assistant."}, + { + "role": "user", + "content": "how does a court case get to the Supreme Court?", + }, + ] + print("testing deep infra streaming") + response = completion( + model="deepinfra/meta-llama/Llama-2-70b-chat-hf", messages=messages, stream=True, max_tokens=80 + ) -# complete_response = "" -# # Add any assertions here to check the response -# for idx, chunk in enumerate(response): -# chunk, finished = streaming_format_tests(idx, chunk) -# if finished: -# break -# complete_response += chunk -# if complete_response.strip() == "": -# raise Exception("Empty response received") -# print(f"completion_response: {complete_response}") -# except Exception as e: -# pytest.fail(f"Error occurred: {e}") -# test_completion_deep_infra_stream() + complete_response = "" + # Add any assertions here to check the response + for idx, chunk in enumerate(response): + chunk, finished = streaming_format_tests(idx, chunk) + if finished: + break + complete_response += chunk + if complete_response.strip() == "": + raise Exception("Empty response received") + print(f"completion_response: {complete_response}") + except Exception as e: + pytest.fail(f"Error occurred: {e}") +test_completion_deep_infra_stream() def test_completion_claude_stream_bad_key(): try: diff --git a/litellm/utils.py b/litellm/utils.py index 9220765751..3466e236a4 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -2301,7 +2301,7 @@ def get_optional_params( # use the openai defaults if n: optional_params["n"] = n if stream: - optional_params["stream"] = str + optional_params["stream"] = stream if stop: optional_params["stop"] = stop if max_tokens: diff --git a/pyproject.toml b/pyproject.toml index 92f89f907a..0acc5312ab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.7.0" +version = "1.7.1" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License" @@ -27,7 +27,7 @@ requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.7.0" +version = "1.7.1" version_files = [ "pyproject.toml:^version" ]