From a44aa8c10c5e3ce91449072575b6a1a01becdbcc Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Thu, 9 Nov 2023 10:22:59 -0800 Subject: [PATCH] (test) add proxy cli tests --- litellm/proxy/tests/test_async.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 litellm/proxy/tests/test_async.py diff --git a/litellm/proxy/tests/test_async.py b/litellm/proxy/tests/test_async.py new file mode 100644 index 0000000000..6c00cfeadf --- /dev/null +++ b/litellm/proxy/tests/test_async.py @@ -0,0 +1,29 @@ +# This tests the litelm proxy +# it makes async Completion requests with streaming +import openai + +openai.api_base = "http://0.0.0.0:8000" +openai.api_key = "temp-key" +print(openai.api_base) + +async def test_async_completion(): + response = await openai.Completion.acreate( + model="gpt-3.5-turbo", + prompt='this is a test request, write a short poem', + ) + print(response) + + print("test_streaming") + response = await openai.Completion.acreate( + model="gpt-3.5-turbo", + prompt='this is a test request, write a short poem', + stream=True + ) + print(response) + async for chunk in response: + print(chunk) + + +import asyncio +asyncio.run(test_async_completion()) +