forked from phoenix/litellm-mirror
Update README.md
This commit is contained in:
parent
8acad78fb3
commit
ade1e6e5c0
1 changed files with 2 additions and 9 deletions
11
README.md
11
README.md
|
@ -78,21 +78,14 @@ LiteLLM supports caching `completion()` and `embedding()` calls for all LLMs
|
||||||
```python
|
```python
|
||||||
import litellm
|
import litellm
|
||||||
from litellm.caching import Cache
|
from litellm.caching import Cache
|
||||||
litellm.cache = Cache(type="hosted") # init cache to use api.litellm.ai
|
litellm.cache = Cache() # init cache to use api.litellm.ai
|
||||||
|
|
||||||
# Make completion calls
|
# stores this response in cache
|
||||||
response1 = litellm.completion(
|
response1 = litellm.completion(
|
||||||
model="gpt-3.5-turbo",
|
model="gpt-3.5-turbo",
|
||||||
messages=[{"role": "user", "content": "Tell me a joke."}]
|
messages=[{"role": "user", "content": "Tell me a joke."}]
|
||||||
caching=True
|
caching=True
|
||||||
)
|
)
|
||||||
|
|
||||||
response2 = litellm.completion(
|
|
||||||
model="gpt-3.5-turbo",
|
|
||||||
messages=[{"role": "user", "content": "Tell me a joke."}],
|
|
||||||
caching=True
|
|
||||||
)
|
|
||||||
# response1 == response2, response 1 is cached
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## OpenAI Proxy Server ([Docs](https://docs.litellm.ai/docs/proxy_server))
|
## OpenAI Proxy Server ([Docs](https://docs.litellm.ai/docs/proxy_server))
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue