track rpm/tpm usage per key+model

This commit is contained in:
Ishaan Jaff 2024-08-16 18:28:58 -07:00
parent a6a4b944ad
commit 1ee33478c9
2 changed files with 177 additions and 0 deletions

View file

@ -202,6 +202,61 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
additional_details=f"Hit limit for api_key: {api_key}. tpm_limit: {tpm_limit}, current_tpm {current['current_tpm']} , rpm_limit: {rpm_limit} current rpm {current['current_rpm']} " additional_details=f"Hit limit for api_key: {api_key}. tpm_limit: {tpm_limit}, current_tpm {current['current_tpm']} , rpm_limit: {rpm_limit} current rpm {current['current_rpm']} "
) )
# Check if request under RPM/TPM per model for a given API Key
if (
user_api_key_dict.tpm_limit_per_model
or user_api_key_dict.rpm_limit_per_model
):
_model = data.get("model", None)
request_count_api_key = (
f"{api_key}::{_model}::{precise_minute}::request_count"
)
current = await self.internal_usage_cache.async_get_cache(
key=request_count_api_key
) # {"current_requests": 1, "current_tpm": 1, "current_rpm": 10}
tpm_limit_for_model = None
rpm_limit_for_model = None
if _model is not None:
if user_api_key_dict.tpm_limit_per_model:
tpm_limit_for_model = user_api_key_dict.tpm_limit_per_model.get(
_model
)
if user_api_key_dict.rpm_limit_per_model:
rpm_limit_for_model = user_api_key_dict.rpm_limit_per_model.get(
_model
)
if current is None:
new_val = {
"current_requests": 1,
"current_tpm": 0,
"current_rpm": 0,
}
await self.internal_usage_cache.async_set_cache(
request_count_api_key, new_val
)
elif tpm_limit_for_model is not None or rpm_limit_for_model is not None:
new_val = {
"current_requests": 1,
"current_tpm": current["current_tpm"],
"current_rpm": current["current_rpm"],
}
if (
tpm_limit_for_model is not None
and current["current_tpm"] >= tpm_limit_for_model
):
return self.raise_rate_limit_error(
additional_details=f"Hit limit for model: {_model} on api_key: {api_key}. tpm_limit: {tpm_limit_for_model}, current_tpm {current['current_tpm']} "
)
elif (
rpm_limit_for_model is not None
and current["current_rpm"] >= rpm_limit_for_model
):
return self.raise_rate_limit_error(
additional_details=f"Hit limit for model: {_model} on api_key: {api_key}. rpm_limit: {rpm_limit_for_model}, current_rpm {current['current_rpm']} "
)
# check if REQUEST ALLOWED for user_id # check if REQUEST ALLOWED for user_id
user_id = user_api_key_dict.user_id user_id = user_api_key_dict.user_id
if user_id is not None: if user_id is not None:
@ -365,6 +420,36 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
request_count_api_key, new_val, ttl=60 request_count_api_key, new_val, ttl=60
) # store in cache for 1 min. ) # store in cache for 1 min.
# ------------
# Update usage - model + API Key
# ------------
_model = kwargs.get("model")
if user_api_key is not None and _model is not None:
request_count_api_key = (
f"{user_api_key}::{_model}::{precise_minute}::request_count"
)
current = await self.internal_usage_cache.async_get_cache(
key=request_count_api_key
) or {
"current_requests": 1,
"current_tpm": total_tokens,
"current_rpm": 1,
}
new_val = {
"current_requests": max(current["current_requests"] - 1, 0),
"current_tpm": current["current_tpm"] + total_tokens,
"current_rpm": current["current_rpm"] + 1,
}
self.print_verbose(
f"updated_value in success call: {new_val}, precise_minute: {precise_minute}"
)
await self.internal_usage_cache.async_set_cache(
request_count_api_key, new_val, ttl=60
)
# ------------ # ------------
# Update usage - User # Update usage - User
# ------------ # ------------

View file

@ -908,3 +908,95 @@ async def test_bad_router_tpm_limit():
)["current_tpm"] )["current_tpm"]
== 0 == 0
) )
@pytest.mark.asyncio
async def test_bad_router_tpm_limit_per_model():
model_list = [
{
"model_name": "azure-model",
"litellm_params": {
"model": "azure/gpt-turbo",
"api_key": "os.environ/AZURE_FRANCE_API_KEY",
"api_base": "https://openai-france-1234.openai.azure.com",
"rpm": 1440,
},
"model_info": {"id": 1},
},
{
"model_name": "azure-model",
"litellm_params": {
"model": "azure/gpt-35-turbo",
"api_key": "os.environ/AZURE_EUROPE_API_KEY",
"api_base": "https://my-endpoint-europe-berri-992.openai.azure.com",
"rpm": 6,
},
"model_info": {"id": 2},
},
]
router = Router(
model_list=model_list,
set_verbose=False,
num_retries=3,
) # type: ignore
_api_key = "sk-12345"
_api_key = hash_token(_api_key)
model = "azure-model"
user_api_key_dict = UserAPIKeyAuth(
api_key=_api_key,
max_parallel_requests=10,
tpm_limit=10,
tpm_limit_per_model={model: 5},
rpm_limit_per_model={model: 5},
)
local_cache = DualCache()
pl = ProxyLogging(user_api_key_cache=local_cache)
pl._init_litellm_callbacks()
print(f"litellm callbacks: {litellm.callbacks}")
parallel_request_handler = pl.max_parallel_request_limiter
await parallel_request_handler.async_pre_call_hook(
user_api_key_dict=user_api_key_dict,
cache=local_cache,
data={"model": model},
call_type="",
)
current_date = datetime.now().strftime("%Y-%m-%d")
current_hour = datetime.now().strftime("%H")
current_minute = datetime.now().strftime("%M")
precise_minute = f"{current_date}-{current_hour}-{current_minute}"
request_count_api_key = f"{_api_key}::{model}::{precise_minute}::request_count"
print(
"internal usage cache: ",
parallel_request_handler.internal_usage_cache.in_memory_cache.cache_dict,
)
assert (
parallel_request_handler.internal_usage_cache.get_cache(
key=request_count_api_key
)["current_requests"]
== 1
)
# bad call
try:
response = await router.acompletion(
model=model,
messages=[{"role": "user2", "content": "Write me a paragraph on the moon"}],
stream=True,
metadata={"user_api_key": _api_key},
)
except:
pass
await asyncio.sleep(1) # success is done in a separate thread
assert (
parallel_request_handler.internal_usage_cache.get_cache(
key=request_count_api_key
)["current_tpm"]
== 0
)