From 628cfa29f3da34732c3dc1e782c1a312faa9aa25 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Sat, 26 Aug 2023 16:30:32 -0700 Subject: [PATCH] add gpt cache to docs --- docs/my-website/docs/{ => caching}/caching.md | 0 docs/my-website/docs/caching/gpt_cache.md | 26 +++++++++++++++++++ docs/my-website/sidebars.js | 9 ++++++- 3 files changed, 34 insertions(+), 1 deletion(-) rename docs/my-website/docs/{ => caching}/caching.md (100%) create mode 100644 docs/my-website/docs/caching/gpt_cache.md diff --git a/docs/my-website/docs/caching.md b/docs/my-website/docs/caching/caching.md similarity index 100% rename from docs/my-website/docs/caching.md rename to docs/my-website/docs/caching/caching.md diff --git a/docs/my-website/docs/caching/gpt_cache.md b/docs/my-website/docs/caching/gpt_cache.md new file mode 100644 index 000000000..663011b52 --- /dev/null +++ b/docs/my-website/docs/caching/gpt_cache.md @@ -0,0 +1,26 @@ +# Using GPTCache with LiteLLM + +GPTCache is a Library for Creating Semantic Cache for LLM Queries + +GPTCache Docs: https://gptcache.readthedocs.io/en/latest/index.html# +GPTCache Github: https://github.com/zilliztech/GPTCache + +## Usage + +### Install GPTCache +pip install gptcache + +### Using GPT Cache with Litellm Completion() + +```python +from gptcache import cache +from litellm.cache import completion + +# Set your .env keys +os.environ['OPENAI_API_KEY'] = "" +cache.init() +cache.set_openai_key() + +messages = [{"role": "user", "content": "what is litellm YC 22?"}] +``` + diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index 14af5f9d3..52aee02f3 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -58,7 +58,14 @@ const sidebars = { "observability/supabase_integration", ], }, - 'caching', + { + type: "category", + label: "Caching", + items: [ + "caching/caching", + "caching/gpt_cache", + ], + }, { type: 'category', label: 'Extras',