diff --git a/litellm/caching.py b/litellm/caching.py index 7126f2e83..3b429bd52 100644 --- a/litellm/caching.py +++ b/litellm/caching.py @@ -414,6 +414,29 @@ def enable_cache( print_verbose(f"LiteLLM Cache: {vars(litellm.cache)}") +def update_cache( + type: Optional[Literal["local", "redis"]] = "local", + host: Optional[str] = None, + port: Optional[str] = None, + password: Optional[str] = None, + supported_call_types: Optional[ + List[Literal["completion", "acompletion", "embedding", "aembedding"]] + ] = ["completion", "acompletion", "embedding", "aembedding"], + **kwargs, +): + print_verbose("LiteLLM: Updating Cache") + litellm.cache = Cache( + type=type, + host=host, + port=port, + password=password, + supported_call_types=supported_call_types, + **kwargs, + ) + print_verbose(f"LiteLLM: Cache Updated, litellm.cache={litellm.cache}") + print_verbose(f"LiteLLM Cache: {vars(litellm.cache)}") + + def disable_cache(): from contextlib import suppress diff --git a/litellm/main.py b/litellm/main.py index 50f39e549..befb2733e 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -71,7 +71,7 @@ from .llms.prompt_templates.factory import ( import tiktoken from concurrent.futures import ThreadPoolExecutor from typing import Callable, List, Optional, Dict, Union, Mapping -from .caching import enable_cache, disable_cache +from .caching import enable_cache, disable_cache, update_cache encoding = tiktoken.get_encoding("cl100k_base") from litellm.utils import (