(feat) cache context manager - update cache

This commit is contained in:
ishaan-jaff 2023-12-30 19:50:53 +05:30
parent bf4a9f40e8
commit 70cdc16d6f
2 changed files with 24 additions and 1 deletions

View file

@ -414,6 +414,29 @@ def enable_cache(
print_verbose(f"LiteLLM Cache: {vars(litellm.cache)}") print_verbose(f"LiteLLM Cache: {vars(litellm.cache)}")
def update_cache(
type: Optional[Literal["local", "redis"]] = "local",
host: Optional[str] = None,
port: Optional[str] = None,
password: Optional[str] = None,
supported_call_types: Optional[
List[Literal["completion", "acompletion", "embedding", "aembedding"]]
] = ["completion", "acompletion", "embedding", "aembedding"],
**kwargs,
):
print_verbose("LiteLLM: Updating Cache")
litellm.cache = Cache(
type=type,
host=host,
port=port,
password=password,
supported_call_types=supported_call_types,
**kwargs,
)
print_verbose(f"LiteLLM: Cache Updated, litellm.cache={litellm.cache}")
print_verbose(f"LiteLLM Cache: {vars(litellm.cache)}")
def disable_cache(): def disable_cache():
from contextlib import suppress from contextlib import suppress

View file

@ -71,7 +71,7 @@ from .llms.prompt_templates.factory import (
import tiktoken import tiktoken
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
from typing import Callable, List, Optional, Dict, Union, Mapping from typing import Callable, List, Optional, Dict, Union, Mapping
from .caching import enable_cache, disable_cache from .caching import enable_cache, disable_cache, update_cache
encoding = tiktoken.get_encoding("cl100k_base") encoding = tiktoken.get_encoding("cl100k_base")
from litellm.utils import ( from litellm.utils import (