mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
docs(stream.md): add stream chunk builder helper function to docs
This commit is contained in:
parent
30b80afe31
commit
cd2883065a
1 changed files with 15 additions and 0 deletions
|
@ -9,10 +9,25 @@ LiteLLM supports streaming the model response back by passing `stream=True` as a
|
||||||
### Usage
|
### Usage
|
||||||
```python
|
```python
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
messages = [{"role": "user", "content": "Hey, how's it going?"}]
|
||||||
response = completion(model="gpt-3.5-turbo", messages=messages, stream=True)
|
response = completion(model="gpt-3.5-turbo", messages=messages, stream=True)
|
||||||
for part in response:
|
for part in response:
|
||||||
print(part.choices[0].delta.content or "")
|
print(part.choices[0].delta.content or "")
|
||||||
|
```
|
||||||
|
|
||||||
|
### Helper function
|
||||||
|
|
||||||
|
LiteLLM also exposes a helper function to rebuild the complete streaming response from the list of chunks.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from litellm import completion
|
||||||
|
messages = [{"role": "user", "content": "Hey, how's it going?"}]
|
||||||
|
response = completion(model="gpt-3.5-turbo", messages=messages, stream=True)
|
||||||
|
|
||||||
|
for chunk in response:
|
||||||
|
chunks.append(chunk)
|
||||||
|
|
||||||
|
print(litellm.stream_chunk_builder(chunks, messages=messages))
|
||||||
```
|
```
|
||||||
|
|
||||||
## Async Completion
|
## Async Completion
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue