redis otel tracing + async support for latency routing (#6452)

* docs(exception_mapping.md): add missing exception types

Fixes https://github.com/Aider-AI/aider/issues/2120#issuecomment-2438971183

* fix(main.py): register custom model pricing with specific key

Ensure custom model pricing is registered to the specific model+provider key combination

* test: make testing more robust for custom pricing

* fix(redis_cache.py): instrument otel logging for sync redis calls

ensures complete coverage for all redis cache calls

* refactor: pass parent_otel_span for redis caching calls in router

allows for more observability into what calls are causing latency issues

* test: update tests with new params

* refactor: ensure e2e otel tracing for router

* refactor(router.py): add more otel tracing acrosss router

catch all latency issues for router requests

* fix: fix linting error

* fix(router.py): fix linting error

* fix: fix test

* test: fix tests

* fix(dual_cache.py): pass ttl to redis cache

* fix: fix param
This commit is contained in:
Krish Dholakia 2024-10-28 21:52:12 -07:00 committed by GitHub
parent d9e7818e6b
commit 4f8a3fd4cf
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
25 changed files with 559 additions and 147 deletions

View file

@ -83,7 +83,9 @@ def test_dual_cache_batch_get_cache():
in_memory_cache.set_cache(key="test_value", value="hello world")
result = dual_cache.batch_get_cache(keys=["test_value", "test_value_2"])
result = dual_cache.batch_get_cache(
keys=["test_value", "test_value_2"], parent_otel_span=None
)
assert result[0] == "hello world"
assert result[1] == None

View file

@ -2447,11 +2447,11 @@ async def test_aaarouter_dynamic_cooldown_message_retry_time(sync_mode):
if sync_mode:
cooldown_deployments = _get_cooldown_deployments(
litellm_router_instance=router
litellm_router_instance=router, parent_otel_span=None
)
else:
cooldown_deployments = await _async_get_cooldown_deployments(
litellm_router_instance=router
litellm_router_instance=router, parent_otel_span=None
)
print(
"Cooldown deployments - {}\n{}".format(

View file

@ -242,12 +242,12 @@ async def test_single_deployment_no_cooldowns_test_prod_mock_completion_calls():
pass
cooldown_list = await _async_get_cooldown_deployments(
litellm_router_instance=router
litellm_router_instance=router, parent_otel_span=None
)
assert len(cooldown_list) == 0
healthy_deployments, _ = await router._async_get_healthy_deployments(
model="gpt-3.5-turbo"
model="gpt-3.5-turbo", parent_otel_span=None
)
print("healthy_deployments: ", healthy_deployments)
@ -351,7 +351,7 @@ async def test_high_traffic_cooldowns_all_healthy_deployments():
print("model_stats: ", model_stats)
cooldown_list = await _async_get_cooldown_deployments(
litellm_router_instance=router
litellm_router_instance=router, parent_otel_span=None
)
assert len(cooldown_list) == 0
@ -449,7 +449,7 @@ async def test_high_traffic_cooldowns_one_bad_deployment():
print("model_stats: ", model_stats)
cooldown_list = await _async_get_cooldown_deployments(
litellm_router_instance=router
litellm_router_instance=router, parent_otel_span=None
)
assert len(cooldown_list) == 1
@ -550,7 +550,7 @@ async def test_high_traffic_cooldowns_one_rate_limited_deployment():
print("model_stats: ", model_stats)
cooldown_list = await _async_get_cooldown_deployments(
litellm_router_instance=router
litellm_router_instance=router, parent_otel_span=None
)
assert len(cooldown_list) == 1

View file

@ -440,12 +440,12 @@ def test_update_usage(model_list):
)
deployment_id = deployment["model_info"]["id"]
request_count = router._update_usage(
deployment_id=deployment_id,
deployment_id=deployment_id, parent_otel_span=None
)
assert request_count == 1
request_count = router._update_usage(
deployment_id=deployment_id,
deployment_id=deployment_id, parent_otel_span=None
)
assert request_count == 2
@ -482,7 +482,9 @@ def test_should_raise_content_policy_error(model_list, finish_reason, expected_e
def test_get_healthy_deployments(model_list):
"""Test if the '_get_healthy_deployments' function is working correctly"""
router = Router(model_list=model_list)
deployments = router._get_healthy_deployments(model="gpt-3.5-turbo")
deployments = router._get_healthy_deployments(
model="gpt-3.5-turbo", parent_otel_span=None
)
assert len(deployments) > 0
@ -756,6 +758,7 @@ def test_track_deployment_metrics(model_list):
model="gpt-3.5-turbo",
usage={"total_tokens": 100},
),
parent_otel_span=None,
)