mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
refactor: add black formatting
This commit is contained in:
parent
b87d630b0a
commit
4905929de3
156 changed files with 19723 additions and 10869 deletions
|
@ -5,18 +5,25 @@ from litellm.proxy._types import UserAPIKeyAuth
|
|||
from litellm.integrations.custom_logger import CustomLogger
|
||||
from fastapi import HTTPException
|
||||
|
||||
class MaxParallelRequestsHandler(CustomLogger):
|
||||
|
||||
class MaxParallelRequestsHandler(CustomLogger):
|
||||
user_api_key_cache = None
|
||||
|
||||
# Class variables or attributes
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def print_verbose(self, print_statement):
|
||||
if litellm.set_verbose is True:
|
||||
print(print_statement) # noqa
|
||||
if litellm.set_verbose is True:
|
||||
print(print_statement) # noqa
|
||||
|
||||
|
||||
async def async_pre_call_hook(self, user_api_key_dict: UserAPIKeyAuth, cache: DualCache, data: dict, call_type: str):
|
||||
async def async_pre_call_hook(
|
||||
self,
|
||||
user_api_key_dict: UserAPIKeyAuth,
|
||||
cache: DualCache,
|
||||
data: dict,
|
||||
call_type: str,
|
||||
):
|
||||
self.print_verbose(f"Inside Max Parallel Request Pre-Call Hook")
|
||||
api_key = user_api_key_dict.api_key
|
||||
max_parallel_requests = user_api_key_dict.max_parallel_requests
|
||||
|
@ -26,8 +33,8 @@ class MaxParallelRequestsHandler(CustomLogger):
|
|||
|
||||
if max_parallel_requests is None:
|
||||
return
|
||||
|
||||
self.user_api_key_cache = cache # save the api key cache for updating the value
|
||||
|
||||
self.user_api_key_cache = cache # save the api key cache for updating the value
|
||||
|
||||
# CHECK IF REQUEST ALLOWED
|
||||
request_count_api_key = f"{api_key}_request_count"
|
||||
|
@ -35,56 +42,67 @@ class MaxParallelRequestsHandler(CustomLogger):
|
|||
self.print_verbose(f"current: {current}")
|
||||
if current is None:
|
||||
cache.set_cache(request_count_api_key, 1)
|
||||
elif int(current) < max_parallel_requests:
|
||||
elif int(current) < max_parallel_requests:
|
||||
# Increase count for this token
|
||||
cache.set_cache(request_count_api_key, int(current) + 1)
|
||||
else:
|
||||
raise HTTPException(status_code=429, detail="Max parallel request limit reached.")
|
||||
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=429, detail="Max parallel request limit reached."
|
||||
)
|
||||
|
||||
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
|
||||
try:
|
||||
try:
|
||||
self.print_verbose(f"INSIDE ASYNC SUCCESS LOGGING")
|
||||
user_api_key = kwargs["litellm_params"]["metadata"]["user_api_key"]
|
||||
if user_api_key is None:
|
||||
return
|
||||
|
||||
if self.user_api_key_cache is None:
|
||||
|
||||
if self.user_api_key_cache is None:
|
||||
return
|
||||
|
||||
|
||||
request_count_api_key = f"{user_api_key}_request_count"
|
||||
# check if it has collected an entire stream response
|
||||
self.print_verbose(f"'complete_streaming_response' is in kwargs: {'complete_streaming_response' in kwargs}")
|
||||
self.print_verbose(
|
||||
f"'complete_streaming_response' is in kwargs: {'complete_streaming_response' in kwargs}"
|
||||
)
|
||||
if "complete_streaming_response" in kwargs or kwargs["stream"] != True:
|
||||
# Decrease count for this token
|
||||
current = self.user_api_key_cache.get_cache(key=request_count_api_key) or 1
|
||||
current = (
|
||||
self.user_api_key_cache.get_cache(key=request_count_api_key) or 1
|
||||
)
|
||||
new_val = current - 1
|
||||
self.print_verbose(f"updated_value in success call: {new_val}")
|
||||
self.user_api_key_cache.set_cache(request_count_api_key, new_val)
|
||||
except Exception as e:
|
||||
self.print_verbose(e) # noqa
|
||||
except Exception as e:
|
||||
self.print_verbose(e) # noqa
|
||||
|
||||
async def async_log_failure_call(self, user_api_key_dict: UserAPIKeyAuth, original_exception: Exception):
|
||||
async def async_log_failure_call(
|
||||
self, user_api_key_dict: UserAPIKeyAuth, original_exception: Exception
|
||||
):
|
||||
try:
|
||||
self.print_verbose(f"Inside Max Parallel Request Failure Hook")
|
||||
api_key = user_api_key_dict.api_key
|
||||
if api_key is None:
|
||||
return
|
||||
|
||||
if self.user_api_key_cache is None:
|
||||
|
||||
if self.user_api_key_cache is None:
|
||||
return
|
||||
|
||||
|
||||
## decrement call count if call failed
|
||||
if (hasattr(original_exception, "status_code")
|
||||
and original_exception.status_code == 429
|
||||
and "Max parallel request limit reached" in str(original_exception)):
|
||||
pass # ignore failed calls due to max limit being reached
|
||||
else:
|
||||
if (
|
||||
hasattr(original_exception, "status_code")
|
||||
and original_exception.status_code == 429
|
||||
and "Max parallel request limit reached" in str(original_exception)
|
||||
):
|
||||
pass # ignore failed calls due to max limit being reached
|
||||
else:
|
||||
request_count_api_key = f"{api_key}_request_count"
|
||||
# Decrease count for this token
|
||||
current = self.user_api_key_cache.get_cache(key=request_count_api_key) or 1
|
||||
current = (
|
||||
self.user_api_key_cache.get_cache(key=request_count_api_key) or 1
|
||||
)
|
||||
new_val = current - 1
|
||||
self.print_verbose(f"updated_value in failure call: {new_val}")
|
||||
self.user_api_key_cache.set_cache(request_count_api_key, new_val)
|
||||
except Exception as e:
|
||||
self.print_verbose(f"An exception occurred - {str(e)}") # noqa
|
||||
self.print_verbose(f"An exception occurred - {str(e)}") # noqa
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue