forked from phoenix/litellm-mirror
add gpt cache to docs
This commit is contained in:
parent
4e8bfeb6f1
commit
628cfa29f3
3 changed files with 34 additions and 1 deletions
26
docs/my-website/docs/caching/gpt_cache.md
Normal file
26
docs/my-website/docs/caching/gpt_cache.md
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
# Using GPTCache with LiteLLM
|
||||||
|
|
||||||
|
GPTCache is a Library for Creating Semantic Cache for LLM Queries
|
||||||
|
|
||||||
|
GPTCache Docs: https://gptcache.readthedocs.io/en/latest/index.html#
|
||||||
|
GPTCache Github: https://github.com/zilliztech/GPTCache
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Install GPTCache
|
||||||
|
pip install gptcache
|
||||||
|
|
||||||
|
### Using GPT Cache with Litellm Completion()
|
||||||
|
|
||||||
|
```python
|
||||||
|
from gptcache import cache
|
||||||
|
from litellm.cache import completion
|
||||||
|
|
||||||
|
# Set your .env keys
|
||||||
|
os.environ['OPENAI_API_KEY'] = ""
|
||||||
|
cache.init()
|
||||||
|
cache.set_openai_key()
|
||||||
|
|
||||||
|
messages = [{"role": "user", "content": "what is litellm YC 22?"}]
|
||||||
|
```
|
||||||
|
|
|
@ -58,7 +58,14 @@ const sidebars = {
|
||||||
"observability/supabase_integration",
|
"observability/supabase_integration",
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
'caching',
|
{
|
||||||
|
type: "category",
|
||||||
|
label: "Caching",
|
||||||
|
items: [
|
||||||
|
"caching/caching",
|
||||||
|
"caching/gpt_cache",
|
||||||
|
],
|
||||||
|
},
|
||||||
{
|
{
|
||||||
type: 'category',
|
type: 'category',
|
||||||
label: 'Extras',
|
label: 'Extras',
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue