forked from phoenix/litellm-mirror
docs(caching): add custom key value pairs to caching docs
b
This commit is contained in:
parent
00a6b4067d
commit
3fac512b74
2 changed files with 19 additions and 2 deletions
|
@ -32,6 +32,18 @@ response2 = completion(
|
|||
# response1 == response2, response 1 is cached
|
||||
```
|
||||
|
||||
## Custom Key-Value Pairs
|
||||
Add custom key-value pairs to your cache.
|
||||
|
||||
```python
|
||||
from litellm.caching import Cache
|
||||
cache = Cache()
|
||||
|
||||
cache.add_cache(cache_key="test-key", result="1234")
|
||||
|
||||
cache.get_cache(cache_key="test-key)
|
||||
```
|
||||
|
||||
## Caching with Streaming
|
||||
LiteLLM can cache your streamed responses for you
|
||||
|
|
@ -132,11 +132,16 @@ const sidebars = {
|
|||
{
|
||||
type: "category",
|
||||
label: "Caching",
|
||||
link: {
|
||||
type: 'generated-index',
|
||||
title: 'Providers',
|
||||
description: 'Learn how to deploy + call models from different providers on LiteLLM',
|
||||
slug: '/caching',
|
||||
},
|
||||
items: [
|
||||
"caching/caching",
|
||||
"caching/local_caching",
|
||||
"caching/redis_cache",
|
||||
"caching/caching_api",
|
||||
"caching/gpt_cache",
|
||||
],
|
||||
},
|
||||
{
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue