Litellm dev 11 02 2024 (#6561)

* fix(dual_cache.py): update in-memory check for redis batch get cache

Fixes latency delay for async_batch_redis_cache

* fix(service_logger.py): fix race condition causing otel service logging to be overwritten if service_callbacks set

* feat(user_api_key_auth.py): add parent otel component for auth

allows us to isolate how much latency is added by auth checks

* perf(parallel_request_limiter.py): move async_set_cache_pipeline (from max parallel request limiter) out of execution path (background task)

reduces latency by 200ms

* feat(user_api_key_auth.py): have user api key auth object return user tpm/rpm limits - reduces redis calls in downstream task (parallel_request_limiter)

Reduces latency by 400-800ms

* fix(parallel_request_limiter.py): use batch get cache to reduce user/key/team usage object calls

reduces latency by 50-100ms

* fix: fix linting error

* fix(_service_logger.py): fix import

* fix(user_api_key_auth.py): fix service logging

* fix(dual_cache.py): don't pass 'self'

* fix: fix python3.8 error

* fix: fix init]
This commit is contained in:
Krish Dholakia 2024-11-04 07:48:20 +05:30 committed by GitHub
parent e5b4a71c79
commit cc19a9f6a1
17 changed files with 303 additions and 157 deletions

View file

@ -13,9 +13,13 @@ from .types.services import ServiceLoggerPayload, ServiceTypes
if TYPE_CHECKING:
from opentelemetry.trace import Span as _Span
from litellm.integrations.opentelemetry import OpenTelemetry
Span = _Span
OTELClass = OpenTelemetry
else:
Span = Any
OTELClass = Any
class ServiceLogging(CustomLogger):
@ -111,6 +115,7 @@ class ServiceLogging(CustomLogger):
"""
- For counting if the redis, postgres call is successful
"""
from litellm.integrations.opentelemetry import OpenTelemetry
if self.mock_testing:
self.mock_testing_async_success_hook += 1
@ -122,6 +127,7 @@ class ServiceLogging(CustomLogger):
duration=duration,
call_type=call_type,
)
for callback in litellm.service_callback:
if callback == "prometheus_system":
await self.init_prometheus_services_logger_if_none()
@ -139,8 +145,7 @@ class ServiceLogging(CustomLogger):
end_time=end_time,
event_metadata=event_metadata,
)
elif callback == "otel":
from litellm.integrations.opentelemetry import OpenTelemetry
elif callback == "otel" or isinstance(callback, OpenTelemetry):
from litellm.proxy.proxy_server import open_telemetry_logger
await self.init_otel_logger_if_none()
@ -214,6 +219,8 @@ class ServiceLogging(CustomLogger):
"""
- For counting if the redis, postgres call is unsuccessful
"""
from litellm.integrations.opentelemetry import OpenTelemetry
if self.mock_testing:
self.mock_testing_async_failure_hook += 1
@ -246,8 +253,7 @@ class ServiceLogging(CustomLogger):
end_time=end_time,
event_metadata=event_metadata,
)
elif callback == "otel":
from litellm.integrations.opentelemetry import OpenTelemetry
elif callback == "otel" or isinstance(callback, OpenTelemetry):
from litellm.proxy.proxy_server import open_telemetry_logger
await self.init_otel_logger_if_none()