forked from phoenix/litellm-mirror
(docs) simplify caching docs
This commit is contained in:
parent
4680a26e2e
commit
8bfaee9654
1 changed files with 31 additions and 1 deletions
|
@ -1,7 +1,7 @@
|
||||||
import Tabs from '@theme/Tabs';
|
import Tabs from '@theme/Tabs';
|
||||||
import TabItem from '@theme/TabItem';
|
import TabItem from '@theme/TabItem';
|
||||||
|
|
||||||
# Redis Cache, s3 Cache
|
# Caching - In-Memory, Redis, s3
|
||||||
|
|
||||||
[**See Code**](https://github.com/BerriAI/litellm/blob/main/litellm/caching.py)
|
[**See Code**](https://github.com/BerriAI/litellm/blob/main/litellm/caching.py)
|
||||||
|
|
||||||
|
@ -79,6 +79,36 @@ response2 = completion(
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
||||||
|
|
||||||
|
<TabItem value="in-mem" label="in memory cache">
|
||||||
|
|
||||||
|
### Quick Start
|
||||||
|
|
||||||
|
```python
|
||||||
|
import litellm
|
||||||
|
from litellm import completion
|
||||||
|
from litellm.caching import Cache
|
||||||
|
litellm.cache = Cache()
|
||||||
|
|
||||||
|
# Make completion calls
|
||||||
|
response1 = completion(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=[{"role": "user", "content": "Tell me a joke."}]
|
||||||
|
caching=True
|
||||||
|
)
|
||||||
|
response2 = completion(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=[{"role": "user", "content": "Tell me a joke."}],
|
||||||
|
caching=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# response1 == response2, response 1 is cached
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
|
||||||
|
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
## Cache Context Manager - Enable, Disable, Update Cache
|
## Cache Context Manager - Enable, Disable, Update Cache
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue