mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
(perf) Litellm redis router fix - ~100ms improvement (#6483)
* docs(exception_mapping.md): add missing exception types Fixes https://github.com/Aider-AI/aider/issues/2120#issuecomment-2438971183 * fix(main.py): register custom model pricing with specific key Ensure custom model pricing is registered to the specific model+provider key combination * test: make testing more robust for custom pricing * fix(redis_cache.py): instrument otel logging for sync redis calls ensures complete coverage for all redis cache calls * refactor: pass parent_otel_span for redis caching calls in router allows for more observability into what calls are causing latency issues * test: update tests with new params * refactor: ensure e2e otel tracing for router * refactor(router.py): add more otel tracing acrosss router catch all latency issues for router requests * fix: fix linting error * fix(router.py): fix linting error * fix: fix test * test: fix tests * fix(dual_cache.py): pass ttl to redis cache * fix: fix param * perf(cooldown_cache.py): improve cooldown cache, to store cache results in memory for 5s, prevents redis call from being made on each request reduces 100ms latency per call with caching enabled on router * fix: fix test * fix(cooldown_cache.py): handle if a result is None * fix(cooldown_cache.py): add debug statements * refactor(dual_cache.py): move to using an in-memory check for batch get cache, to prevent redis from being hit for every call * fix(cooldown_cache.py): fix linting erropr
This commit is contained in:
parent
134bd2cebb
commit
44e7ffd05c
9 changed files with 144 additions and 45 deletions
|
@ -7,7 +7,15 @@ import time
|
|||
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, TypedDict
|
||||
|
||||
from litellm import verbose_logger
|
||||
from litellm.caching.caching import DualCache
|
||||
from litellm.caching.caching import Cache, DualCache
|
||||
from litellm.caching.in_memory_cache import InMemoryCache
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from opentelemetry.trace import Span as _Span
|
||||
|
||||
Span = _Span
|
||||
else:
|
||||
Span = Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from opentelemetry.trace import Span as _Span
|
||||
|
@ -28,6 +36,7 @@ class CooldownCache:
|
|||
def __init__(self, cache: DualCache, default_cooldown_time: float):
|
||||
self.cache = cache
|
||||
self.default_cooldown_time = default_cooldown_time
|
||||
self.in_memory_cache = InMemoryCache()
|
||||
|
||||
def _common_add_cooldown_logic(
|
||||
self, model_id: str, original_exception, exception_status, cooldown_time: float
|
||||
|
@ -83,21 +92,32 @@ class CooldownCache:
|
|||
)
|
||||
raise e
|
||||
|
||||
@staticmethod
|
||||
def get_cooldown_cache_key(model_id: str) -> str:
|
||||
return f"deployment:{model_id}:cooldown"
|
||||
|
||||
async def async_get_active_cooldowns(
|
||||
self, model_ids: List[str], parent_otel_span: Optional[Span]
|
||||
) -> List[Tuple[str, CooldownCacheValue]]:
|
||||
# Generate the keys for the deployments
|
||||
keys = [f"deployment:{model_id}:cooldown" for model_id in model_ids]
|
||||
keys = [
|
||||
CooldownCache.get_cooldown_cache_key(model_id) for model_id in model_ids
|
||||
]
|
||||
|
||||
# Retrieve the values for the keys using mget
|
||||
results = (
|
||||
await self.cache.async_batch_get_cache(
|
||||
keys=keys, parent_otel_span=parent_otel_span
|
||||
)
|
||||
or []
|
||||
)
|
||||
## more likely to be none if no models ratelimited. So just check redis every 1s
|
||||
## each redis call adds ~100ms latency.
|
||||
|
||||
## check in memory cache first
|
||||
results = await self.cache.async_batch_get_cache(
|
||||
keys=keys, parent_otel_span=parent_otel_span
|
||||
)
|
||||
active_cooldowns: List[Tuple[str, CooldownCacheValue]] = []
|
||||
|
||||
if results is None:
|
||||
return active_cooldowns
|
||||
|
||||
|
||||
active_cooldowns = []
|
||||
# Process the results
|
||||
for model_id, result in zip(model_ids, results):
|
||||
if result and isinstance(result, dict):
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue