forked from phoenix/litellm-mirror
Litellm router max depth (#6501)
* feat(router.py): add check for max fallback depth Prevent infinite loop for fallbacks Closes https://github.com/BerriAI/litellm/issues/6498 * test: update test * (fix) Prometheus - Log Postgres DB latency, status on prometheus (#6484) * fix logging DB fails on prometheus * unit testing log to otel wrapper * unit testing for service logger + prometheus * use LATENCY buckets for service logging * fix service logging * docs clarify vertex vs gemini * (router_strategy/) ensure all async functions use async cache methods (#6489) * fix router strat * use async set / get cache in router_strategy * add coverage for router strategy * fix imports * fix batch_get_cache * use async methods for least busy * fix least busy use async methods * fix test_dual_cache_increment * test async_get_available_deployment when routing_strategy="least-busy" * (fix) proxy - fix when `STORE_MODEL_IN_DB` should be set (#6492) * set store_model_in_db at the top * correctly use store_model_in_db global * (fix) `PrometheusServicesLogger` `_get_metric` should return metric in Registry (#6486) * fix logging DB fails on prometheus * unit testing log to otel wrapper * unit testing for service logger + prometheus * use LATENCY buckets for service logging * fix service logging * fix _get_metric in prom services logger * add clear doc string * unit testing for prom service logger * bump: version 1.51.0 → 1.51.1 * Add `azure/gpt-4o-mini-2024-07-18` to model_prices_and_context_window.json (#6477) * Update utils.py (#6468) Fixed missing keys * (perf) Litellm redis router fix - ~100ms improvement (#6483) * docs(exception_mapping.md): add missing exception types Fixes https://github.com/Aider-AI/aider/issues/2120#issuecomment-2438971183 * fix(main.py): register custom model pricing with specific key Ensure custom model pricing is registered to the specific model+provider key combination * test: make testing more robust for custom pricing * fix(redis_cache.py): instrument otel logging for sync redis calls ensures complete coverage for all redis cache calls * refactor: pass parent_otel_span for redis caching calls in router allows for more observability into what calls are causing latency issues * test: update tests with new params * refactor: ensure e2e otel tracing for router * refactor(router.py): add more otel tracing acrosss router catch all latency issues for router requests * fix: fix linting error * fix(router.py): fix linting error * fix: fix test * test: fix tests * fix(dual_cache.py): pass ttl to redis cache * fix: fix param * perf(cooldown_cache.py): improve cooldown cache, to store cache results in memory for 5s, prevents redis call from being made on each request reduces 100ms latency per call with caching enabled on router * fix: fix test * fix(cooldown_cache.py): handle if a result is None * fix(cooldown_cache.py): add debug statements * refactor(dual_cache.py): move to using an in-memory check for batch get cache, to prevent redis from being hit for every call * fix(cooldown_cache.py): fix linting erropr * build: merge main --------- Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> Co-authored-by: Xingyao Wang <xingyao@all-hands.dev> Co-authored-by: vibhanshu-ob <115142120+vibhanshu-ob@users.noreply.github.com>
This commit is contained in:
parent
1e403a8447
commit
56e9047818
11 changed files with 165 additions and 235 deletions
|
@ -88,12 +88,14 @@ async def test_run_async_fallback(original_function):
|
|||
request_kwargs["messages"] = [{"role": "user", "content": "Hello, world!"}]
|
||||
|
||||
result = await run_async_fallback(
|
||||
router,
|
||||
litellm_router=router,
|
||||
original_function=original_function,
|
||||
num_retries=1,
|
||||
fallback_model_group=fallback_model_group,
|
||||
original_model_group=original_model_group,
|
||||
original_exception=original_exception,
|
||||
max_fallbacks=5,
|
||||
fallback_depth=0,
|
||||
**request_kwargs
|
||||
)
|
||||
|
||||
|
@ -264,13 +266,15 @@ async def test_failed_fallbacks_raise_most_recent_exception(original_function):
|
|||
|
||||
with pytest.raises(litellm.exceptions.RateLimitError):
|
||||
await run_async_fallback(
|
||||
router,
|
||||
litellm_router=router,
|
||||
original_function=original_function,
|
||||
num_retries=1,
|
||||
fallback_model_group=fallback_model_group,
|
||||
original_model_group=original_model_group,
|
||||
original_exception=original_exception,
|
||||
mock_response="litellm.RateLimitError",
|
||||
max_fallbacks=5,
|
||||
fallback_depth=0,
|
||||
**request_kwargs
|
||||
)
|
||||
|
||||
|
@ -332,12 +336,14 @@ async def test_multiple_fallbacks(original_function):
|
|||
request_kwargs["messages"] = [{"role": "user", "content": "Hello, world!"}]
|
||||
|
||||
result = await run_async_fallback(
|
||||
router_2,
|
||||
litellm_router=router_2,
|
||||
original_function=original_function,
|
||||
num_retries=1,
|
||||
fallback_model_group=fallback_model_group,
|
||||
original_model_group=original_model_group,
|
||||
original_exception=original_exception,
|
||||
max_fallbacks=5,
|
||||
fallback_depth=0,
|
||||
**request_kwargs
|
||||
)
|
||||
|
||||
|
|
|
@ -1045,7 +1045,7 @@ async def test_default_model_fallbacks(sync_mode, litellm_module_fallbacks):
|
|||
},
|
||||
],
|
||||
default_fallbacks=(
|
||||
["my-good-model"] if litellm_module_fallbacks == False else None
|
||||
["my-good-model"] if litellm_module_fallbacks is False else None
|
||||
),
|
||||
)
|
||||
|
||||
|
@ -1398,3 +1398,48 @@ def test_router_fallbacks_with_custom_model_costs():
|
|||
|
||||
assert model_info["input_cost_per_token"] == 30
|
||||
assert model_info["output_cost_per_token"] == 60
|
||||
|
||||
|
||||
@pytest.mark.parametrize("sync_mode", [True, False])
|
||||
@pytest.mark.asyncio
|
||||
async def test_router_fallbacks_default_and_model_specific_fallbacks(sync_mode):
|
||||
"""
|
||||
Tests to ensure there is not an infinite fallback loop when there is a default fallback and model specific fallback.
|
||||
"""
|
||||
router = Router(
|
||||
model_list=[
|
||||
{
|
||||
"model_name": "bad-model",
|
||||
"litellm_params": {
|
||||
"model": "openai/my-bad-model",
|
||||
"api_key": "my-bad-api-key",
|
||||
},
|
||||
},
|
||||
{
|
||||
"model_name": "my-bad-model-2",
|
||||
"litellm_params": {
|
||||
"model": "gpt-4o",
|
||||
"api_key": "bad-key",
|
||||
},
|
||||
},
|
||||
],
|
||||
fallbacks=[{"bad-model": ["my-bad-model-2"]}],
|
||||
default_fallbacks=["bad-model"],
|
||||
)
|
||||
|
||||
with pytest.raises(Exception) as exc_info:
|
||||
if sync_mode:
|
||||
resp = router.completion(
|
||||
model="bad-model",
|
||||
messages=[{"role": "user", "content": "Hey, how's it going?"}],
|
||||
)
|
||||
|
||||
print(f"resp: {resp}")
|
||||
else:
|
||||
await router.acompletion(
|
||||
model="bad-model",
|
||||
messages=[{"role": "user", "content": "Hey, how's it going?"}],
|
||||
)
|
||||
assert isinstance(
|
||||
exc_info.value, litellm.AuthenticationError
|
||||
), f"Expected AuthenticationError, but got {type(exc_info.value).__name__}"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue