From 2a033fd8a24949aa448161268d3d08dea801f00f Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 24 Nov 2023 12:30:08 -0800 Subject: [PATCH] test(test_router_cooldowns.py): adding logging --- litellm/proxy/tests/test_async.py | 46 +++++++++++++------------- litellm/tests/test_router_cooldowns.py | 2 +- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/litellm/proxy/tests/test_async.py b/litellm/proxy/tests/test_async.py index bd88649529..fab10ee08d 100644 --- a/litellm/proxy/tests/test_async.py +++ b/litellm/proxy/tests/test_async.py @@ -1,29 +1,29 @@ -# This tests the litelm proxy -# it makes async Completion requests with streaming -import openai +# # This tests the litelm proxy +# # it makes async Completion requests with streaming +# import openai -openai.base_url = "http://0.0.0.0:8000" -openai.api_key = "temp-key" -print(openai.base_url) +# openai.base_url = "http://0.0.0.0:8000" +# openai.api_key = "temp-key" +# print(openai.base_url) -async def test_async_completion(): - response = await openai.Completion.acreate( - model="gpt-3.5-turbo", - prompt='this is a test request, write a short poem', - ) - print(response) +# async def test_async_completion(): +# response = await ( +# model="gpt-3.5-turbo", +# prompt='this is a test request, write a short poem', +# ) +# print(response) - print("test_streaming") - response = await openai.Completion.acreate( - model="gpt-3.5-turbo", - prompt='this is a test request, write a short poem', - stream=True - ) - print(response) - async for chunk in response: - print(chunk) +# print("test_streaming") +# response = await openai.chat.completions.create( +# model="gpt-3.5-turbo", +# prompt='this is a test request, write a short poem', +# stream=True +# ) +# print(response) +# async for chunk in response: +# print(chunk) -import asyncio -asyncio.run(test_async_completion()) +# import asyncio +# asyncio.run(test_async_completion()) diff --git a/litellm/tests/test_router_cooldowns.py b/litellm/tests/test_router_cooldowns.py index 0c7079bd15..0fb1369c57 100644 --- a/litellm/tests/test_router_cooldowns.py +++ b/litellm/tests/test_router_cooldowns.py @@ -40,7 +40,7 @@ router = Router(model_list=model_list, redis_password=os.getenv("REDIS_PASSWORD"), redis_port=int(os.getenv("REDIS_PORT")), # type: ignore routing_strategy="simple-shuffle", - set_verbose=False, + set_verbose=True, num_retries=1) # type: ignore kwargs = {"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Hey, how's it going?"}],}