From a2ea39818d062137c542aeccb1b3c2ae7e72abd4 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Thu, 17 Aug 2023 07:54:56 -0700 Subject: [PATCH] test_proxy_stream --- cookbook/proxy-server/test_proxy_stream.py | 32 +++++++++++----------- pyproject.toml | 2 +- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/cookbook/proxy-server/test_proxy_stream.py b/cookbook/proxy-server/test_proxy_stream.py index 8bd2d767c..8b358f058 100644 --- a/cookbook/proxy-server/test_proxy_stream.py +++ b/cookbook/proxy-server/test_proxy_stream.py @@ -1,21 +1,21 @@ -import openai -import os +# import openai +# import os -os.environ["OPENAI_API_KEY"] = "" +# os.environ["OPENAI_API_KEY"] = "" -openai.api_key = os.environ["OPENAI_API_KEY"] -openai.api_base ="http://localhost:5000" +# openai.api_key = os.environ["OPENAI_API_KEY"] +# openai.api_base ="http://localhost:5000" -messages = [ - { - "role": "user", - "content": "write a 1 pg essay in liteLLM" - } -] +# messages = [ +# { +# "role": "user", +# "content": "write a 1 pg essay in liteLLM" +# } +# ] -response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages, stream=True) -print("got response", response) -# response is a generator +# response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages, stream=True) +# print("got response", response) +# # response is a generator -for chunk in response: - print(chunk) \ No newline at end of file +# for chunk in response: +# print(chunk) diff --git a/pyproject.toml b/pyproject.toml index a83ff9bbc..7e047abee 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.406" +version = "0.1.408" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"