diff --git a/docs/my-website/docs/caching/all_caches.md b/docs/my-website/docs/caching/all_caches.md index 8b4d7e863..d6ccb98a2 100644 --- a/docs/my-website/docs/caching/all_caches.md +++ b/docs/my-website/docs/caching/all_caches.md @@ -7,7 +7,10 @@ import TabItem from '@theme/TabItem'; :::info -Need to use Caching on LiteLLM Proxy Server? Doc here: [Caching Proxy Server](https://docs.litellm.ai/docs/proxy/caching) +- For Proxy Server? Doc here: [Caching Proxy Server](https://docs.litellm.ai/docs/proxy/caching) + +- For OpenAI/Anthropic Prompt Caching, go [here](../completion/prompt_caching.md) + ::: diff --git a/docs/my-website/docs/proxy/caching.md b/docs/my-website/docs/proxy/caching.md index 4d44a4da0..533d1bd9f 100644 --- a/docs/my-website/docs/proxy/caching.md +++ b/docs/my-website/docs/proxy/caching.md @@ -4,6 +4,12 @@ import TabItem from '@theme/TabItem'; # Caching Cache LLM Responses +:::note + +For OpenAI/Anthropic Prompt Caching, go [here](../completion/prompt_caching.md) + +::: + LiteLLM supports: - In Memory Cache - Redis Cache