feat - add end user rate limiting

This commit is contained in:
Ishaan Jaff 2024-05-22 14:01:57 -07:00
parent eac7e70dca
commit 106910cecf
2 changed files with 74 additions and 1 deletions

View file

@ -938,6 +938,11 @@ class LiteLLM_VerificationTokenView(LiteLLM_VerificationToken):
soft_budget: Optional[float] = None soft_budget: Optional[float] = None
team_model_aliases: Optional[Dict] = None team_model_aliases: Optional[Dict] = None
# End User Params
end_user_id: Optional[str] = None
end_user_tpm_limit: Optional[int] = None
end_user_rpm_limit: Optional[int] = None
class UserAPIKeyAuth( class UserAPIKeyAuth(
LiteLLM_VerificationTokenView LiteLLM_VerificationTokenView

View file

@ -64,7 +64,8 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
cache.set_cache(request_count_api_key, new_val) cache.set_cache(request_count_api_key, new_val)
else: else:
raise HTTPException( raise HTTPException(
status_code=429, detail="Max parallel request limit reached." status_code=429,
detail=f"LiteLLM Rate Limit Handler: Crossed TPM, RPM Limit. current rpm: {current['current_rpm']}, rpm limit: {rpm_limit}, current tpm: {current['current_tpm']}, tpm limit: {tpm_limit}",
) )
async def async_pre_call_hook( async def async_pre_call_hook(
@ -223,6 +224,38 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
rpm_limit=team_rpm_limit, rpm_limit=team_rpm_limit,
) )
# End-User Rate Limits
# Only enforce if user passed `user` to /chat, /completions, /embeddings
if user_api_key_dict.end_user_id:
end_user_tpm_limit = getattr(
user_api_key_dict, "end_user_tpm_limit", sys.maxsize
)
end_user_rpm_limit = getattr(
user_api_key_dict, "end_user_rpm_limit", sys.maxsize
)
if end_user_tpm_limit is None:
end_user_tpm_limit = sys.maxsize
if end_user_rpm_limit is None:
end_user_rpm_limit = sys.maxsize
# now do the same tpm/rpm checks
request_count_api_key = (
f"{user_api_key_dict.end_user_id}::{precise_minute}::request_count"
)
# print(f"Checking if {request_count_api_key} is allowed to make request for minute {precise_minute}")
await self.check_key_in_limits(
user_api_key_dict=user_api_key_dict,
cache=cache,
data=data,
call_type=call_type,
max_parallel_requests=sys.maxsize, # TODO: Support max parallel requests for an End-User
request_count_api_key=request_count_api_key,
tpm_limit=end_user_tpm_limit,
rpm_limit=end_user_rpm_limit,
)
return return
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
@ -238,6 +271,7 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
user_api_key_team_id = kwargs["litellm_params"]["metadata"].get( user_api_key_team_id = kwargs["litellm_params"]["metadata"].get(
"user_api_key_team_id", None "user_api_key_team_id", None
) )
user_api_key_end_user_id = kwargs.get("user")
if self.user_api_key_cache is None: if self.user_api_key_cache is None:
return return
@ -362,6 +396,40 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
request_count_api_key, new_val, ttl=60 request_count_api_key, new_val, ttl=60
) # store in cache for 1 min. ) # store in cache for 1 min.
# ------------
# Update usage - End User
# ------------
if user_api_key_end_user_id is not None:
total_tokens = 0
if isinstance(response_obj, ModelResponse):
total_tokens = response_obj.usage.total_tokens
request_count_api_key = (
f"{user_api_key_end_user_id}::{precise_minute}::request_count"
)
current = self.user_api_key_cache.get_cache(
key=request_count_api_key
) or {
"current_requests": 1,
"current_tpm": total_tokens,
"current_rpm": 1,
}
new_val = {
"current_requests": max(current["current_requests"] - 1, 0),
"current_tpm": current["current_tpm"] + total_tokens,
"current_rpm": current["current_rpm"] + 1,
}
self.print_verbose(
f"updated_value in success call: {new_val}, precise_minute: {precise_minute}"
)
self.user_api_key_cache.set_cache(
request_count_api_key, new_val, ttl=60
) # store in cache for 1 min.
except Exception as e: except Exception as e:
self.print_verbose(e) # noqa self.print_verbose(e) # noqa