test(test_router_cooldowns.py): adding logging

This commit is contained in:
Krrish Dholakia 2023-11-24 12:30:08 -08:00
parent 12dbdc4c15
commit 2a033fd8a2
2 changed files with 24 additions and 24 deletions

View file

@ -1,29 +1,29 @@
# This tests the litelm proxy
# it makes async Completion requests with streaming
import openai
# # This tests the litelm proxy
# # it makes async Completion requests with streaming
# import openai
openai.base_url = "http://0.0.0.0:8000"
openai.api_key = "temp-key"
print(openai.base_url)
# openai.base_url = "http://0.0.0.0:8000"
# openai.api_key = "temp-key"
# print(openai.base_url)
async def test_async_completion():
response = await openai.Completion.acreate(
model="gpt-3.5-turbo",
prompt='this is a test request, write a short poem',
)
print(response)
# async def test_async_completion():
# response = await (
# model="gpt-3.5-turbo",
# prompt='this is a test request, write a short poem',
# )
# print(response)
print("test_streaming")
response = await openai.Completion.acreate(
model="gpt-3.5-turbo",
prompt='this is a test request, write a short poem',
stream=True
)
print(response)
async for chunk in response:
print(chunk)
# print("test_streaming")
# response = await openai.chat.completions.create(
# model="gpt-3.5-turbo",
# prompt='this is a test request, write a short poem',
# stream=True
# )
# print(response)
# async for chunk in response:
# print(chunk)
import asyncio
asyncio.run(test_async_completion())
# import asyncio
# asyncio.run(test_async_completion())

View file

@ -40,7 +40,7 @@ router = Router(model_list=model_list,
redis_password=os.getenv("REDIS_PASSWORD"),
redis_port=int(os.getenv("REDIS_PORT")), # type: ignore
routing_strategy="simple-shuffle",
set_verbose=False,
set_verbose=True,
num_retries=1) # type: ignore
kwargs = {"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Hey, how's it going?"}],}