(docs) caching

This commit is contained in:
ishaan-jaff 2023-12-30 19:04:33 +05:30
parent 8ff3bbcfee
commit 231148ed73

View file

@ -4,7 +4,7 @@
### Pre-requisites ### Pre-requisites
Install redis Install redis
``` ```shell
pip install redis pip install redis
``` ```
For the hosted version you can setup your own Redis DB here: https://app.redislabs.com/ For the hosted version you can setup your own Redis DB here: https://app.redislabs.com/
@ -18,13 +18,11 @@ litellm.cache = Cache(type="redis", host=<host>, port=<port>, password=<password
# Make completion calls # Make completion calls
response1 = completion( response1 = completion(
model="gpt-3.5-turbo", model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Tell me a joke."}], messages=[{"role": "user", "content": "Tell me a joke."}]
caching=True
) )
response2 = completion( response2 = completion(
model="gpt-3.5-turbo", model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Tell me a joke."}], messages=[{"role": "user", "content": "Tell me a joke."}]
caching=True
) )
# response1 == response2, response 1 is cached # response1 == response2, response 1 is cached