litellm-mirror/cookbook/litellm-ollama-docker-image/test.py
Ishaan Jaff c7f14e936a
(code quality) run ruff rule to ban unused imports (#7313)
* remove unused imports

* fix AmazonConverseConfig

* fix test

* fix import

* ruff check fixes

* test fixes

* fix testing

* fix imports
2024-12-19 12:33:42 -08:00

35 lines
772 B
Python

import openai
api_base = "http://0.0.0.0:8000"
openai.api_base = api_base
openai.api_key = "temp-key"
print(openai.api_base)
print("LiteLLM: response from proxy with streaming")
response = openai.ChatCompletion.create(
model="ollama/llama2",
messages=[
{
"role": "user",
"content": "this is a test request, acknowledge that you got it",
}
],
stream=True,
)
for chunk in response:
print(f"LiteLLM: streaming response from proxy {chunk}")
response = openai.ChatCompletion.create(
model="ollama/llama2",
messages=[
{
"role": "user",
"content": "this is a test request, acknowledge that you got it",
}
],
)
print(f"LiteLLM: response from proxy {response}")