Improve rpm check on keys (#8301)

* fix(parallel_request_limiter.py): initial commit that solves the rpm limit check on keys

Fixes https://github.com/BerriAI/litellm/issues/6938

* fix(parallel_request_limiter.py): simpler approach - just increment RPM in pre call hook instead of on success

* fix(parallel_request_limiter.py): pass testing

* fix: fix linting error

* fix(parallel_request_limiter.py): fix parallel request check for keys
This commit is contained in:
Krish Dholakia 2025-02-05 20:23:08 -08:00 committed by GitHub
parent 7e873538f6
commit b4e5c0de69
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 92 additions and 140 deletions

1
.gitignore vendored
View file

@ -76,3 +76,4 @@ litellm/proxy/_experimental/out/404.html
litellm/proxy/_experimental/out/404.html
litellm/proxy/_experimental/out/model_hub.html
.mypy_cache/*
litellm/proxy/application.log

View file

@ -2,7 +2,9 @@ model_list:
- model_name: gpt-3.5-turbo-testing
litellm_params:
model: gpt-3.5-turbo
rpm: 3
- model_name: gpt-4
litellm_params:
model: gpt-3.5-turbo
- model_name: o3-mini
litellm_params:
model: o3-mini
@ -25,8 +27,3 @@ model_list:
model: openai/fake
api_key: fake-key
api_base: https://exampleopenaiendpoint-production.up.railway.app/
litellm_settings:
callbacks: ["opik"]

View file

@ -414,7 +414,9 @@ def bytes_to_mb(bytes_value: int):
# helpers used by parallel request limiter to handle model rpm/tpm limits for a given api key
def get_key_model_rpm_limit(user_api_key_dict: UserAPIKeyAuth) -> Optional[dict]:
def get_key_model_rpm_limit(
user_api_key_dict: UserAPIKeyAuth,
) -> Optional[Dict[str, int]]:
if user_api_key_dict.metadata:
if "model_rpm_limit" in user_api_key_dict.metadata:
return user_api_key_dict.metadata["model_rpm_limit"]
@ -428,7 +430,9 @@ def get_key_model_rpm_limit(user_api_key_dict: UserAPIKeyAuth) -> Optional[dict]
return None
def get_key_model_tpm_limit(user_api_key_dict: UserAPIKeyAuth) -> Optional[dict]:
def get_key_model_tpm_limit(
user_api_key_dict: UserAPIKeyAuth,
) -> Optional[Dict[str, int]]:
if user_api_key_dict.metadata:
if "model_tpm_limit" in user_api_key_dict.metadata:
return user_api_key_dict.metadata["model_tpm_limit"]

View file

@ -9,7 +9,6 @@ from pydantic import BaseModel
import litellm
from litellm import DualCache, ModelResponse
from litellm._logging import verbose_proxy_logger
from litellm.constants import RATE_LIMIT_ERROR_MESSAGE_FOR_VIRTUAL_KEY
from litellm.integrations.custom_logger import CustomLogger
from litellm.litellm_core_utils.core_helpers import _get_parent_otel_span_from_kwargs
from litellm.proxy._types import CurrentItemRateLimit, UserAPIKeyAuth
@ -33,6 +32,7 @@ else:
class CacheObject(TypedDict):
current_global_requests: Optional[dict]
request_count_api_key: Optional[dict]
request_count_api_key_model: Optional[dict]
request_count_user_id: Optional[dict]
request_count_team_id: Optional[dict]
request_count_end_user_id: Optional[dict]
@ -62,23 +62,19 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
rpm_limit: int,
current: Optional[dict],
request_count_api_key: str,
rate_limit_type: Literal["user", "customer", "team"],
rate_limit_type: Literal["key", "model_per_key", "user", "customer", "team"],
values_to_update_in_cache: List[Tuple[Any, Any]],
):
# current = await self.internal_usage_cache.async_get_cache(
# key=request_count_api_key,
# litellm_parent_otel_span=user_api_key_dict.parent_otel_span,
# ) # {"current_requests": 1, "current_tpm": 1, "current_rpm": 10}
) -> dict:
if current is None:
if max_parallel_requests == 0 or tpm_limit == 0 or rpm_limit == 0:
# base case
return self.raise_rate_limit_error(
raise self.raise_rate_limit_error(
additional_details=f"Hit limit for {rate_limit_type}. Current limits: max_parallel_requests: {max_parallel_requests}, tpm_limit: {tpm_limit}, rpm_limit: {rpm_limit}"
)
new_val = {
"current_requests": 1,
"current_tpm": 0,
"current_rpm": 0,
"current_rpm": 1,
}
values_to_update_in_cache.append((request_count_api_key, new_val))
elif (
@ -90,15 +86,17 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
new_val = {
"current_requests": current["current_requests"] + 1,
"current_tpm": current["current_tpm"],
"current_rpm": current["current_rpm"],
"current_rpm": current["current_rpm"] + 1,
}
values_to_update_in_cache.append((request_count_api_key, new_val))
else:
raise HTTPException(
status_code=429,
detail=f"LiteLLM Rate Limit Handler for rate limit type = {rate_limit_type}. Crossed TPM, RPM Limit. current rpm: {current['current_rpm']}, rpm limit: {rpm_limit}, current tpm: {current['current_tpm']}, tpm limit: {tpm_limit}",
headers={"retry-after": str(self.time_to_next_minute())},
)
return new_val
def time_to_next_minute(self) -> float:
# Get the current time
@ -131,6 +129,7 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
self,
current_global_requests: Optional[str],
request_count_api_key: Optional[str],
request_count_api_key_model: Optional[str],
request_count_user_id: Optional[str],
request_count_team_id: Optional[str],
request_count_end_user_id: Optional[str],
@ -139,6 +138,7 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
keys = [
current_global_requests,
request_count_api_key,
request_count_api_key_model,
request_count_user_id,
request_count_team_id,
request_count_end_user_id,
@ -152,6 +152,7 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
return CacheObject(
current_global_requests=None,
request_count_api_key=None,
request_count_api_key_model=None,
request_count_user_id=None,
request_count_team_id=None,
request_count_end_user_id=None,
@ -160,9 +161,10 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
return CacheObject(
current_global_requests=results[0],
request_count_api_key=results[1],
request_count_user_id=results[2],
request_count_team_id=results[3],
request_count_end_user_id=results[4],
request_count_api_key_model=results[2],
request_count_user_id=results[3],
request_count_team_id=results[4],
request_count_end_user_id=results[5],
)
async def async_pre_call_hook( # noqa: PLR0915
@ -222,6 +224,7 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
local_only=True,
litellm_parent_otel_span=user_api_key_dict.parent_otel_span,
)
_model = data.get("model", None)
current_date = datetime.now().strftime("%Y-%m-%d")
current_hour = datetime.now().strftime("%H")
@ -239,6 +242,11 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
if api_key is not None
else None
),
request_count_api_key_model=(
f"{api_key}::{_model}::{precise_minute}::request_count"
if api_key is not None and _model is not None
else None
),
request_count_user_id=(
f"{user_api_key_dict.user_id}::{precise_minute}::request_count"
if user_api_key_dict.user_id is not None
@ -258,44 +266,20 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
)
if api_key is not None:
request_count_api_key = f"{api_key}::{precise_minute}::request_count"
# CHECK IF REQUEST ALLOWED for key
current = cache_objects["request_count_api_key"]
self.print_verbose(f"current: {current}")
if (
max_parallel_requests == sys.maxsize
and tpm_limit == sys.maxsize
and rpm_limit == sys.maxsize
):
pass
elif max_parallel_requests == 0 or tpm_limit == 0 or rpm_limit == 0:
return self.raise_rate_limit_error(
additional_details=f"Hit limit for {RATE_LIMIT_ERROR_MESSAGE_FOR_VIRTUAL_KEY}: {api_key}. max_parallel_requests: {max_parallel_requests}, tpm_limit: {tpm_limit}, rpm_limit: {rpm_limit}"
)
elif current is None:
new_val = {
"current_requests": 1,
"current_tpm": 0,
"current_rpm": 0,
}
values_to_update_in_cache.append((request_count_api_key, new_val))
elif (
int(current["current_requests"]) < max_parallel_requests
and current["current_tpm"] < tpm_limit
and current["current_rpm"] < rpm_limit
):
# Increase count for this token
new_val = {
"current_requests": current["current_requests"] + 1,
"current_tpm": current["current_tpm"],
"current_rpm": current["current_rpm"],
}
values_to_update_in_cache.append((request_count_api_key, new_val))
else:
return self.raise_rate_limit_error(
additional_details=f"Hit limit for {RATE_LIMIT_ERROR_MESSAGE_FOR_VIRTUAL_KEY}: {api_key}. tpm_limit: {tpm_limit}, current_tpm {current['current_tpm']} , rpm_limit: {rpm_limit} current rpm {current['current_rpm']} "
)
await self.check_key_in_limits(
user_api_key_dict=user_api_key_dict,
cache=cache,
data=data,
call_type=call_type,
max_parallel_requests=max_parallel_requests,
current=cache_objects["request_count_api_key"],
request_count_api_key=request_count_api_key,
tpm_limit=tpm_limit,
rpm_limit=rpm_limit,
rate_limit_type="key",
values_to_update_in_cache=values_to_update_in_cache,
)
# Check if request under RPM/TPM per model for a given API Key
if (
@ -306,56 +290,31 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
request_count_api_key = (
f"{api_key}::{_model}::{precise_minute}::request_count"
)
current = await self.internal_usage_cache.async_get_cache(
key=request_count_api_key,
litellm_parent_otel_span=user_api_key_dict.parent_otel_span,
) # {"current_requests": 1, "current_tpm": 1, "current_rpm": 10}
_tpm_limit_for_key_model = get_key_model_tpm_limit(user_api_key_dict)
_rpm_limit_for_key_model = get_key_model_rpm_limit(user_api_key_dict)
tpm_limit_for_model = None
rpm_limit_for_model = None
_tpm_limit_for_key_model = get_key_model_tpm_limit(user_api_key_dict)
_rpm_limit_for_key_model = get_key_model_rpm_limit(user_api_key_dict)
if _model is not None:
if _tpm_limit_for_key_model:
tpm_limit_for_model = _tpm_limit_for_key_model.get(_model)
if _rpm_limit_for_key_model:
rpm_limit_for_model = _rpm_limit_for_key_model.get(_model)
if current is None:
new_val = {
"current_requests": 1,
"current_tpm": 0,
"current_rpm": 0,
}
values_to_update_in_cache.append((request_count_api_key, new_val))
elif tpm_limit_for_model is not None or rpm_limit_for_model is not None:
# Increase count for this token
new_val = {
"current_requests": current["current_requests"] + 1,
"current_tpm": current["current_tpm"],
"current_rpm": current["current_rpm"],
}
if (
tpm_limit_for_model is not None
and current["current_tpm"] >= tpm_limit_for_model
):
return self.raise_rate_limit_error(
additional_details=f"Hit TPM limit for model: {_model} on {RATE_LIMIT_ERROR_MESSAGE_FOR_VIRTUAL_KEY}: {api_key}. tpm_limit: {tpm_limit_for_model}, current_tpm {current['current_tpm']} "
)
elif (
rpm_limit_for_model is not None
and current["current_rpm"] >= rpm_limit_for_model
):
return self.raise_rate_limit_error(
additional_details=f"Hit RPM limit for model: {_model} on {RATE_LIMIT_ERROR_MESSAGE_FOR_VIRTUAL_KEY}: {api_key}. rpm_limit: {rpm_limit_for_model}, current_rpm {current['current_rpm']} "
)
else:
values_to_update_in_cache.append((request_count_api_key, new_val))
new_val = await self.check_key_in_limits(
user_api_key_dict=user_api_key_dict,
cache=cache,
data=data,
call_type=call_type,
max_parallel_requests=sys.maxsize, # TODO: Support max parallel requests for a model
current=cache_objects["request_count_api_key_model"],
request_count_api_key=request_count_api_key,
tpm_limit=tpm_limit_for_model or sys.maxsize,
rpm_limit=rpm_limit_for_model or sys.maxsize,
rate_limit_type="model_per_key",
values_to_update_in_cache=values_to_update_in_cache,
)
_remaining_tokens = None
_remaining_requests = None
# Add remaining tokens, requests to metadata
@ -554,7 +513,7 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
new_val = {
"current_requests": max(current["current_requests"] - 1, 0),
"current_tpm": current["current_tpm"] + total_tokens,
"current_rpm": current["current_rpm"] + 1,
"current_rpm": current["current_rpm"],
}
self.print_verbose(
@ -591,7 +550,7 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
new_val = {
"current_requests": max(current["current_requests"] - 1, 0),
"current_tpm": current["current_tpm"] + total_tokens,
"current_rpm": current["current_rpm"] + 1,
"current_rpm": current["current_rpm"],
}
self.print_verbose(
@ -624,7 +583,7 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
new_val = {
"current_requests": max(current["current_requests"] - 1, 0),
"current_tpm": current["current_tpm"] + total_tokens,
"current_rpm": current["current_rpm"] + 1,
"current_rpm": current["current_rpm"],
}
self.print_verbose(
@ -657,7 +616,7 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
new_val = {
"current_requests": max(current["current_requests"] - 1, 0),
"current_tpm": current["current_tpm"] + total_tokens,
"current_rpm": current["current_rpm"] + 1,
"current_rpm": current["current_rpm"],
}
self.print_verbose(
@ -690,7 +649,7 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
new_val = {
"current_requests": max(current["current_requests"] - 1, 0),
"current_tpm": current["current_tpm"] + total_tokens,
"current_rpm": current["current_rpm"] + 1,
"current_rpm": current["current_rpm"],
}
self.print_verbose(

View file

@ -126,14 +126,15 @@ async def test_pre_call_hook_rpm_limits():
kwargs = {"litellm_params": {"metadata": {"user_api_key": _api_key}}}
await parallel_request_handler.async_log_success_event(
kwargs=kwargs,
response_obj="",
start_time="",
end_time="",
## Expected cache val: {"current_requests": 0, "current_tpm": 0, "current_rpm": 1}
await parallel_request_handler.async_pre_call_hook(
user_api_key_dict=user_api_key_dict,
cache=local_cache,
data={},
call_type="",
)
## Expected cache val: {"current_requests": 0, "current_tpm": 0, "current_rpm": 1}
await asyncio.sleep(2)
try:
await parallel_request_handler.async_pre_call_hook(
@ -148,6 +149,7 @@ async def test_pre_call_hook_rpm_limits():
assert e.status_code == 429
@pytest.mark.flaky(retries=6, delay=1)
@pytest.mark.asyncio
async def test_pre_call_hook_rpm_limits_retry_after():
"""
@ -169,13 +171,15 @@ async def test_pre_call_hook_rpm_limits_retry_after():
kwargs = {"litellm_params": {"metadata": {"user_api_key": _api_key}}}
await parallel_request_handler.async_log_success_event(
kwargs=kwargs,
response_obj="",
start_time="",
end_time="",
await parallel_request_handler.async_pre_call_hook(
user_api_key_dict=user_api_key_dict,
cache=local_cache,
data={},
call_type="",
)
await asyncio.sleep(2)
## Expected cache val: {"current_requests": 0, "current_tpm": 0, "current_rpm": 1}
try:
@ -224,14 +228,14 @@ async def test_pre_call_hook_team_rpm_limits():
}
}
await parallel_request_handler.async_log_success_event(
kwargs=kwargs,
response_obj="",
start_time="",
end_time="",
await parallel_request_handler.async_pre_call_hook(
user_api_key_dict=user_api_key_dict,
cache=local_cache,
data={},
call_type="",
)
print(f"local_cache: {local_cache}")
await asyncio.sleep(2)
## Expected cache val: {"current_requests": 0, "current_tpm": 0, "current_rpm": 1}
@ -1081,13 +1085,15 @@ async def test_pre_call_hook_rpm_limits_per_model():
},
}
await parallel_request_handler.async_log_success_event(
kwargs=kwargs,
response_obj="",
start_time="",
end_time="",
await parallel_request_handler.async_pre_call_hook(
user_api_key_dict=user_api_key_dict,
cache=local_cache,
data={"model": model},
call_type="",
)
await asyncio.sleep(2)
## Expected cache val: {"current_requests": 0, "current_tpm": 0, "current_rpm": 1}
try:
@ -1102,10 +1108,6 @@ async def test_pre_call_hook_rpm_limits_per_model():
except Exception as e:
assert e.status_code == 429
print("got error=", e)
assert (
"limit reached Hit RPM limit for model: azure-model on LiteLLM Virtual Key user_api_key_hash: c11e7177eb60c80cf983ddf8ca98f2dc1272d4c612204ce9bedd2460b18939cc"
in str(e)
)
@pytest.mark.flaky(retries=6, delay=2)
@ -1191,13 +1193,6 @@ async def test_pre_call_hook_tpm_limits_per_model():
== 11
)
assert (
parallel_request_handler.internal_usage_cache.get_cache(
key=request_count_api_key
)["current_rpm"]
== 1
)
## Expected cache val: {"current_requests": 0, "current_tpm": 11, "current_rpm": "1"}
try:
@ -1212,10 +1207,6 @@ async def test_pre_call_hook_tpm_limits_per_model():
except Exception as e:
assert e.status_code == 429
print("got error=", e)
assert (
"request limit reached Hit TPM limit for model: azure-model on LiteLLM Virtual Key user_api_key_hash"
in str(e)
)
@pytest.mark.asyncio