mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 19:54:13 +00:00
(fix) proxy cli tests
This commit is contained in:
parent
2833564065
commit
c0a4881194
1 changed files with 3 additions and 3 deletions
|
@ -176,13 +176,13 @@ def run_server(host, port, api_base, api_version, model, alias, add_key, headers
|
||||||
openai.api_key = "temp-key"
|
openai.api_key = "temp-key"
|
||||||
print(openai.api_base)
|
print(openai.api_base)
|
||||||
|
|
||||||
response = openai.Completion.create(model="gpt-3.5-turbo", prompt='hello who are you')
|
response = openai.Completion.create(model="gpt-3.5-turbo", prompt='this is a test request, write a short poem')
|
||||||
print(response)
|
print(response)
|
||||||
|
|
||||||
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages = [
|
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages = [
|
||||||
{
|
{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
"content": "this is a test request, acknowledge that you got it"
|
"content": "this is a test request, write a short poem"
|
||||||
}
|
}
|
||||||
])
|
])
|
||||||
click.echo(f'LiteLLM: response from proxy {response}')
|
click.echo(f'LiteLLM: response from proxy {response}')
|
||||||
|
@ -191,7 +191,7 @@ def run_server(host, port, api_base, api_version, model, alias, add_key, headers
|
||||||
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages = [
|
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages = [
|
||||||
{
|
{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
"content": "this is a test request, acknowledge that you got it"
|
"content": "this is a test request, write a short poem"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
stream=True,
|
stream=True,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue