forked from phoenix/litellm-mirror
(docs) update how to use litellm streaming
This commit is contained in:
parent
37187e8e88
commit
f29a353796
2 changed files with 64 additions and 64 deletions
10
README.md
10
README.md
|
@ -68,13 +68,13 @@ Streaming is supported for all models (Bedrock, Huggingface, TogetherAI, Azure,
|
|||
```python
|
||||
from litellm import completion
|
||||
response = completion(model="gpt-3.5-turbo", messages=messages, stream=True)
|
||||
for chunk in response:
|
||||
print(chunk['choices'][0]['delta'])
|
||||
for part in response:
|
||||
print(part.choices[0].delta.content or "")
|
||||
|
||||
# claude 2
|
||||
result = completion('claude-2', messages, stream=True)
|
||||
for chunk in result:
|
||||
print(chunk['choices'][0]['delta'])
|
||||
response = completion('claude-2', messages, stream=True)
|
||||
for part in response:
|
||||
print(part.choices[0].delta.content or "")
|
||||
```
|
||||
|
||||
# Router - load balancing([Docs](https://docs.litellm.ai/docs/routing))
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue