test(test_router_cooldowns.py): adding logging

This commit is contained in:
Krrish Dholakia 2023-11-24 12:30:08 -08:00
parent 27fd144950
commit f630901a77
2 changed files with 24 additions and 24 deletions

View file

@ -1,29 +1,29 @@
# This tests the litelm proxy # # This tests the litelm proxy
# it makes async Completion requests with streaming # # it makes async Completion requests with streaming
import openai # import openai
openai.base_url = "http://0.0.0.0:8000" # openai.base_url = "http://0.0.0.0:8000"
openai.api_key = "temp-key" # openai.api_key = "temp-key"
print(openai.base_url) # print(openai.base_url)
async def test_async_completion(): # async def test_async_completion():
response = await openai.Completion.acreate( # response = await (
model="gpt-3.5-turbo", # model="gpt-3.5-turbo",
prompt='this is a test request, write a short poem', # prompt='this is a test request, write a short poem',
) # )
print(response) # print(response)
print("test_streaming") # print("test_streaming")
response = await openai.Completion.acreate( # response = await openai.chat.completions.create(
model="gpt-3.5-turbo", # model="gpt-3.5-turbo",
prompt='this is a test request, write a short poem', # prompt='this is a test request, write a short poem',
stream=True # stream=True
) # )
print(response) # print(response)
async for chunk in response: # async for chunk in response:
print(chunk) # print(chunk)
import asyncio # import asyncio
asyncio.run(test_async_completion()) # asyncio.run(test_async_completion())

View file

@ -40,7 +40,7 @@ router = Router(model_list=model_list,
redis_password=os.getenv("REDIS_PASSWORD"), redis_password=os.getenv("REDIS_PASSWORD"),
redis_port=int(os.getenv("REDIS_PORT")), # type: ignore redis_port=int(os.getenv("REDIS_PORT")), # type: ignore
routing_strategy="simple-shuffle", routing_strategy="simple-shuffle",
set_verbose=False, set_verbose=True,
num_retries=1) # type: ignore num_retries=1) # type: ignore
kwargs = {"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Hey, how's it going?"}],} kwargs = {"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Hey, how's it going?"}],}