forked from phoenix/litellm-mirror
Merge pull request #5402 from ajeetdsouza/patch-1
docs: add time.sleep() between streaming calls
This commit is contained in:
commit
3df1186d72
1 changed files with 4 additions and 1 deletions
|
@ -51,8 +51,10 @@ LiteLLM can cache your streamed responses for you
|
||||||
### Usage
|
### Usage
|
||||||
```python
|
```python
|
||||||
import litellm
|
import litellm
|
||||||
|
import time
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
from litellm.caching import Cache
|
from litellm.caching import Cache
|
||||||
|
|
||||||
litellm.cache = Cache(type="hosted")
|
litellm.cache = Cache(type="hosted")
|
||||||
|
|
||||||
# Make completion calls
|
# Make completion calls
|
||||||
|
@ -64,6 +66,7 @@ response1 = completion(
|
||||||
for chunk in response1:
|
for chunk in response1:
|
||||||
print(chunk)
|
print(chunk)
|
||||||
|
|
||||||
|
time.sleep(1) # cache is updated asynchronously
|
||||||
|
|
||||||
response2 = completion(
|
response2 = completion(
|
||||||
model="gpt-3.5-turbo",
|
model="gpt-3.5-turbo",
|
||||||
|
@ -72,4 +75,4 @@ response2 = completion(
|
||||||
caching=True)
|
caching=True)
|
||||||
for chunk in response2:
|
for chunk in response2:
|
||||||
print(chunk)
|
print(chunk)
|
||||||
```
|
```
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue