feat(parallel_request_limiter.py): add support for tpm/rpm limits

This commit is contained in:
Krrish Dholakia 2024-01-18 13:52:15 -08:00
parent 2e06e00413
commit aef59c554f
2 changed files with 166 additions and 50 deletions

View file

@ -5,6 +5,8 @@ from litellm.proxy._types import UserAPIKeyAuth
from litellm.integrations.custom_logger import CustomLogger
from fastapi import HTTPException
from litellm._logging import verbose_proxy_logger
from litellm import ModelResponse
from datetime import datetime
class MaxParallelRequestsHandler(CustomLogger):
@ -35,16 +37,37 @@ class MaxParallelRequestsHandler(CustomLogger):
return
self.user_api_key_cache = cache # save the api key cache for updating the value
# ------------
# Setup values
# ------------
current_date = datetime.now().strftime("%Y-%m-%d")
current_hour = datetime.now().strftime("%H")
current_minute = datetime.now().strftime("%M")
precise_minute = f"{current_date}-{current_hour}-{current_minute}"
request_count_api_key = f"{api_key}::{precise_minute}::request_count"
# CHECK IF REQUEST ALLOWED
request_count_api_key = f"{api_key}_request_count"
current = cache.get_cache(key=request_count_api_key)
current = cache.get_cache(
key=request_count_api_key
) # {"current_requests": 1, "current_tpm": 1, "current_rpm": 10}
self.print_verbose(f"current: {current}")
if current is None:
cache.set_cache(request_count_api_key, 1)
elif int(current) < max_parallel_requests:
new_val = {
"current_requests": 1,
"current_tpm": 0,
"current_rpm": 0,
}
cache.set_cache(request_count_api_key, new_val)
elif int(current["current_requests"]) < max_parallel_requests:
# Increase count for this token
cache.set_cache(request_count_api_key, int(current) + 1)
new_val = {
"current_requests": current["current_requests"] + 1,
"current_tpm": current["current_tpm"],
"current_rpm": current["current_rpm"],
}
cache.set_cache(request_count_api_key, new_val)
else:
raise HTTPException(
status_code=429, detail="Max parallel request limit reached."
@ -60,12 +83,42 @@ class MaxParallelRequestsHandler(CustomLogger):
if self.user_api_key_cache is None:
return
request_count_api_key = f"{user_api_key}_request_count"
# Decrease count for this token
current = self.user_api_key_cache.get_cache(key=request_count_api_key) or 1
new_val = current - 1
# ------------
# Setup values
# ------------
current_date = datetime.now().strftime("%Y-%m-%d")
current_hour = datetime.now().strftime("%H")
current_minute = datetime.now().strftime("%M")
precise_minute = f"{current_date}-{current_hour}-{current_minute}"
total_tokens = 0
if isinstance(response_obj, ModelResponse):
total_tokens = response_obj.usage.total_tokens
request_count_api_key = f"{user_api_key}::{precise_minute}::request_count"
current = self.user_api_key_cache.get_cache(key=request_count_api_key) or {
"current_requests": 1,
"current_tpm": total_tokens,
"current_rpm": 1,
}
# ------------
# Update usage
# ------------
new_val = {
"current_requests": current["current_requests"] - 1,
"current_tpm": current["current_tpm"] + total_tokens,
"current_rpm": current["current_rpm"] + 1,
}
self.print_verbose(f"updated_value in success call: {new_val}")
self.user_api_key_cache.set_cache(request_count_api_key, new_val)
self.user_api_key_cache.set_cache(
request_count_api_key, new_val, ttl=60
) # store in cache for 1 min.
except Exception as e:
self.print_verbose(e) # noqa
@ -87,13 +140,40 @@ class MaxParallelRequestsHandler(CustomLogger):
):
pass # ignore failed calls due to max limit being reached
else:
request_count_api_key = f"{user_api_key}_request_count"
# Decrease count for this token
current = (
self.user_api_key_cache.get_cache(key=request_count_api_key) or 1
# ------------
# Setup values
# ------------
current_date = datetime.now().strftime("%Y-%m-%d")
current_hour = datetime.now().strftime("%H")
current_minute = datetime.now().strftime("%M")
precise_minute = f"{current_date}-{current_hour}-{current_minute}"
request_count_api_key = (
f"{user_api_key}::{precise_minute}::request_count"
)
new_val = current - 1
# ------------
# Update usage
# ------------
current = self.user_api_key_cache.get_cache(
key=request_count_api_key
) or {
"current_requests": 1,
"current_tpm": 0,
"current_rpm": 0,
}
new_val = {
"current_requests": current["current_requests"] - 1,
"current_tpm": current["current_tpm"],
"current_rpm": current["current_rpm"],
}
self.print_verbose(f"updated_value in failure call: {new_val}")
self.user_api_key_cache.set_cache(request_count_api_key, new_val)
self.user_api_key_cache.set_cache(
request_count_api_key, new_val, ttl=60
) # save in cache for up to 1 min.
except Exception as e:
self.print_verbose(f"An exception occurred - {str(e)}") # noqa