mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
29 lines
717 B
Python
29 lines
717 B
Python
# This tests the litelm proxy
|
|
# it makes async Completion requests with streaming
|
|
import openai
|
|
|
|
openai.base_url = "http://0.0.0.0:8000"
|
|
openai.api_key = "temp-key"
|
|
print(openai.base_url)
|
|
|
|
async def test_async_completion():
|
|
response = await openai.Completion.acreate(
|
|
model="gpt-3.5-turbo",
|
|
prompt='this is a test request, write a short poem',
|
|
)
|
|
print(response)
|
|
|
|
print("test_streaming")
|
|
response = await openai.Completion.acreate(
|
|
model="gpt-3.5-turbo",
|
|
prompt='this is a test request, write a short poem',
|
|
stream=True
|
|
)
|
|
print(response)
|
|
async for chunk in response:
|
|
print(chunk)
|
|
|
|
|
|
import asyncio
|
|
asyncio.run(test_async_completion())
|
|
|