mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
* use lru cache wrapper * use lru_cache_wrapper for _cached_get_model_info_helper * fix _get_traceback_str_for_error * huggingface/mistralai/Mistral-7B-Instruct-v0.3
30 lines
794 B
Python
30 lines
794 B
Python
from functools import lru_cache
|
|
from typing import Callable, Optional, TypeVar
|
|
|
|
T = TypeVar("T")
|
|
|
|
|
|
def lru_cache_wrapper(
|
|
maxsize: Optional[int] = None,
|
|
) -> Callable[[Callable[..., T]], Callable[..., T]]:
|
|
"""
|
|
Wrapper for lru_cache that caches success and exceptions
|
|
"""
|
|
|
|
def decorator(f: Callable[..., T]) -> Callable[..., T]:
|
|
@lru_cache(maxsize=maxsize)
|
|
def wrapper(*args, **kwargs):
|
|
try:
|
|
return ("success", f(*args, **kwargs))
|
|
except Exception as e:
|
|
return ("error", e)
|
|
|
|
def wrapped(*args, **kwargs):
|
|
result = wrapper(*args, **kwargs)
|
|
if result[0] == "error":
|
|
raise result[1]
|
|
return result[1]
|
|
|
|
return wrapped
|
|
|
|
return decorator
|