mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
refactor: add black formatting
This commit is contained in:
parent
b87d630b0a
commit
4905929de3
156 changed files with 19723 additions and 10869 deletions
|
@ -1,4 +1,3 @@
|
|||
|
||||
import openai
|
||||
|
||||
api_base = f"http://0.0.0.0:8000"
|
||||
|
@ -8,29 +7,29 @@ openai.api_key = "temp-key"
|
|||
print(openai.api_base)
|
||||
|
||||
|
||||
print(f'LiteLLM: response from proxy with streaming')
|
||||
print(f"LiteLLM: response from proxy with streaming")
|
||||
response = openai.ChatCompletion.create(
|
||||
model="ollama/llama2",
|
||||
messages = [
|
||||
model="ollama/llama2",
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "this is a test request, acknowledge that you got it"
|
||||
"content": "this is a test request, acknowledge that you got it",
|
||||
}
|
||||
],
|
||||
stream=True
|
||||
stream=True,
|
||||
)
|
||||
|
||||
for chunk in response:
|
||||
print(f'LiteLLM: streaming response from proxy {chunk}')
|
||||
print(f"LiteLLM: streaming response from proxy {chunk}")
|
||||
|
||||
response = openai.ChatCompletion.create(
|
||||
model="ollama/llama2",
|
||||
messages = [
|
||||
model="ollama/llama2",
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "this is a test request, acknowledge that you got it"
|
||||
"content": "this is a test request, acknowledge that you got it",
|
||||
}
|
||||
]
|
||||
],
|
||||
)
|
||||
|
||||
print(f'LiteLLM: response from proxy {response}')
|
||||
print(f"LiteLLM: response from proxy {response}")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue