forked from phoenix/litellm-mirror
build(openai_proxy/main.py): adding support for routing between multiple azure deployments
This commit is contained in:
parent
f208a1231b
commit
b9a4bfc054
15 changed files with 159 additions and 1 deletions
38
openai_proxy/tests/test_openrouter.py
Normal file
38
openai_proxy/tests/test_openrouter.py
Normal file
|
@ -0,0 +1,38 @@
|
|||
import openai
|
||||
openai.api_base = "http://0.0.0.0:8000"
|
||||
openai.api_key = "this can be anything"
|
||||
print("making request")
|
||||
|
||||
api_key = ""
|
||||
response = openai.ChatCompletion.create(
|
||||
model = "openrouter/google/palm-2-chat-bison",
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "this is a test message, what model / llm are you"
|
||||
}
|
||||
],
|
||||
api_key=api_key,
|
||||
max_tokens = 10,
|
||||
)
|
||||
|
||||
|
||||
print(response)
|
||||
|
||||
|
||||
response = openai.ChatCompletion.create(
|
||||
model = "openrouter/google/palm-2-chat-bison",
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "this is a test message, what model / llm are you"
|
||||
}
|
||||
],
|
||||
api_key=api_key,
|
||||
max_tokens = 10,
|
||||
stream=True
|
||||
)
|
||||
|
||||
|
||||
for chunk in response:
|
||||
print(chunk)
|
Loading…
Add table
Add a link
Reference in a new issue