From 3fac512b7475b483fa610820b851561a6bd94615 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 21 Oct 2023 12:37:07 -0700 Subject: [PATCH] docs(caching): add custom key value pairs to caching docs b --- .../docs/caching/{caching.md => local_caching.md} | 12 ++++++++++++ docs/my-website/sidebars.js | 9 +++++++-- 2 files changed, 19 insertions(+), 2 deletions(-) rename docs/my-website/docs/caching/{caching.md => local_caching.md} (90%) diff --git a/docs/my-website/docs/caching/caching.md b/docs/my-website/docs/caching/local_caching.md similarity index 90% rename from docs/my-website/docs/caching/caching.md rename to docs/my-website/docs/caching/local_caching.md index 251b3e583..56425337b 100644 --- a/docs/my-website/docs/caching/caching.md +++ b/docs/my-website/docs/caching/local_caching.md @@ -32,6 +32,18 @@ response2 = completion( # response1 == response2, response 1 is cached ``` +## Custom Key-Value Pairs +Add custom key-value pairs to your cache. + +```python +from litellm.caching import Cache +cache = Cache() + +cache.add_cache(cache_key="test-key", result="1234") + +cache.get_cache(cache_key="test-key) +``` + ## Caching with Streaming LiteLLM can cache your streamed responses for you diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index ac35c4cd0..d8903b7ca 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -132,11 +132,16 @@ const sidebars = { { type: "category", label: "Caching", + link: { + type: 'generated-index', + title: 'Providers', + description: 'Learn how to deploy + call models from different providers on LiteLLM', + slug: '/caching', + }, items: [ - "caching/caching", + "caching/local_caching", "caching/redis_cache", "caching/caching_api", - "caching/gpt_cache", ], }, {