From 8bfaee96546fa0efc1ba356cb61fd3a18bf2def2 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Wed, 3 Jan 2024 16:20:19 +0530 Subject: [PATCH] (docs) simplify caching docs --- docs/my-website/docs/caching/redis_cache.md | 32 ++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/docs/my-website/docs/caching/redis_cache.md b/docs/my-website/docs/caching/redis_cache.md index cc3a6b5cf..3d70c5e3d 100644 --- a/docs/my-website/docs/caching/redis_cache.md +++ b/docs/my-website/docs/caching/redis_cache.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Redis Cache, s3 Cache +# Caching - In-Memory, Redis, s3 [**See Code**](https://github.com/BerriAI/litellm/blob/main/litellm/caching.py) @@ -79,6 +79,36 @@ response2 = completion( + + + +### Quick Start + +```python +import litellm +from litellm import completion +from litellm.caching import Cache +litellm.cache = Cache() + +# Make completion calls +response1 = completion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Tell me a joke."}] + caching=True +) +response2 = completion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Tell me a joke."}], + caching=True +) + +# response1 == response2, response 1 is cached + +``` + + + + ## Cache Context Manager - Enable, Disable, Update Cache