mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
fix(proxy_server.py): enable pre+post-call hooks and max parallel request limits
This commit is contained in:
parent
977bfaaab9
commit
5fa2b6e5ad
9 changed files with 213 additions and 130 deletions
33
litellm/proxy/hooks/parallel_request_limiter.py
Normal file
33
litellm/proxy/hooks/parallel_request_limiter.py
Normal file
|
@ -0,0 +1,33 @@
|
|||
from typing import Optional
|
||||
from litellm.caching import DualCache
|
||||
from fastapi import HTTPException
|
||||
|
||||
async def max_parallel_request_allow_request(max_parallel_requests: Optional[int], api_key: Optional[str], user_api_key_cache: DualCache):
|
||||
if api_key is None:
|
||||
return
|
||||
|
||||
if max_parallel_requests is None:
|
||||
return
|
||||
|
||||
# CHECK IF REQUEST ALLOWED
|
||||
request_count_api_key = f"{api_key}_request_count"
|
||||
current = user_api_key_cache.get_cache(key=request_count_api_key)
|
||||
if current is None:
|
||||
user_api_key_cache.set_cache(request_count_api_key, 1)
|
||||
elif int(current) < max_parallel_requests:
|
||||
# Increase count for this token
|
||||
user_api_key_cache.set_cache(request_count_api_key, int(current) + 1)
|
||||
else:
|
||||
raise HTTPException(status_code=429, detail="Max parallel request limit reached.")
|
||||
|
||||
|
||||
async def max_parallel_request_update_count(api_key: Optional[str], user_api_key_cache: DualCache):
|
||||
if api_key is None:
|
||||
return
|
||||
|
||||
request_count_api_key = f"{api_key}_request_count"
|
||||
# Decrease count for this token
|
||||
current = user_api_key_cache.get_cache(key=request_count_api_key) or 1
|
||||
user_api_key_cache.set_cache(request_count_api_key, int(current) - 1)
|
||||
|
||||
return
|
Loading…
Add table
Add a link
Reference in a new issue