fix(custom_logger.py): enable pre_call hooks to modify incoming data to proxy

This commit is contained in:
Krrish Dholakia 2023-12-13 16:20:13 -08:00
parent 03d6dcefbb
commit effdddc1c8
4 changed files with 51 additions and 43 deletions

View file

@ -2,8 +2,9 @@
# On success, logs events to Promptlayer # On success, logs events to Promptlayer
import dotenv, os import dotenv, os
import requests import requests
import requests from litellm.proxy._types import UserAPIKeyAuth
from litellm.caching import DualCache
from typing import Literal
dotenv.load_dotenv() # Loading env variables using dotenv dotenv.load_dotenv() # Loading env variables using dotenv
import traceback import traceback
@ -40,6 +41,16 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callback
async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time): async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time):
pass pass
#### CALL HOOKS ####
"""
Control the modify incoming / outgoung data before calling the model
"""
async def async_pre_call_hook(self, user_api_key_dict: UserAPIKeyAuth, cache: DualCache, data: dict, call_type: Literal["completion", "embeddings"]):
pass
async def async_post_call_failure_hook(self, original_exception: Exception, user_api_key_dict: UserAPIKeyAuth):
pass
#### SINGLE-USE #### - https://docs.litellm.ai/docs/observability/custom_callback#using-your-custom-callback-function #### SINGLE-USE #### - https://docs.litellm.ai/docs/observability/custom_callback#using-your-custom-callback-function
def log_input_event(self, model, messages, kwargs, print_verbose, callback_func): def log_input_event(self, model, messages, kwargs, print_verbose, callback_func):

View file

@ -1,6 +1,7 @@
from typing import Optional from typing import Optional
import litellm import litellm
from litellm.caching import DualCache from litellm.caching import DualCache
from litellm.proxy._types import UserAPIKeyAuth
from litellm.integrations.custom_logger import CustomLogger from litellm.integrations.custom_logger import CustomLogger
from fastapi import HTTPException from fastapi import HTTPException
@ -14,24 +15,27 @@ class MaxParallelRequestsHandler(CustomLogger):
print(print_statement) # noqa print(print_statement) # noqa
async def max_parallel_request_allow_request(self, max_parallel_requests: Optional[int], api_key: Optional[str], user_api_key_cache: DualCache): async def async_pre_call_hook(self, user_api_key_dict: UserAPIKeyAuth, cache: DualCache, data: dict, call_type: str):
api_key = user_api_key_dict.api_key
max_parallel_requests = user_api_key_dict.max_parallel_requests
if api_key is None: if api_key is None:
return return
if max_parallel_requests is None: if max_parallel_requests is None:
return return
self.user_api_key_cache = user_api_key_cache # save the api key cache for updating the value self.user_api_key_cache = cache # save the api key cache for updating the value
# CHECK IF REQUEST ALLOWED # CHECK IF REQUEST ALLOWED
request_count_api_key = f"{api_key}_request_count" request_count_api_key = f"{api_key}_request_count"
current = user_api_key_cache.get_cache(key=request_count_api_key) current = cache.get_cache(key=request_count_api_key)
self.print_verbose(f"current: {current}") self.print_verbose(f"current: {current}")
if current is None: if current is None:
user_api_key_cache.set_cache(request_count_api_key, 1) cache.set_cache(request_count_api_key, 1)
elif int(current) < max_parallel_requests: elif int(current) < max_parallel_requests:
# Increase count for this token # Increase count for this token
user_api_key_cache.set_cache(request_count_api_key, int(current) + 1) cache.set_cache(request_count_api_key, int(current) + 1)
else: else:
raise HTTPException(status_code=429, detail="Max parallel request limit reached.") raise HTTPException(status_code=429, detail="Max parallel request limit reached.")
@ -55,16 +59,23 @@ class MaxParallelRequestsHandler(CustomLogger):
except Exception as e: except Exception as e:
self.print_verbose(e) # noqa self.print_verbose(e) # noqa
async def async_log_failure_call(self, api_key, user_api_key_cache): async def async_log_failure_call(self, user_api_key_dict: UserAPIKeyAuth, original_exception: Exception):
try: try:
api_key = user_api_key_dict.api_key
if api_key is None: if api_key is None:
return return
request_count_api_key = f"{api_key}_request_count" ## decrement call count if call failed
# Decrease count for this token if (hasattr(original_exception, "status_code")
current = self.user_api_key_cache.get_cache(key=request_count_api_key) or 1 and original_exception.status_code == 429
new_val = current - 1 and "Max parallel request limit reached" in str(original_exception)):
self.print_verbose(f"updated_value in failure call: {new_val}") pass # ignore failed calls due to max limit being reached
self.user_api_key_cache.set_cache(request_count_api_key, new_val) else:
request_count_api_key = f"{api_key}_request_count"
# Decrease count for this token
current = self.user_api_key_cache.get_cache(key=request_count_api_key) or 1
new_val = current - 1
self.print_verbose(f"updated_value in failure call: {new_val}")
self.user_api_key_cache.set_cache(request_count_api_key, new_val)
except Exception as e: except Exception as e:
self.print_verbose(f"An exception occurred - {str(e)}") # noqa self.print_verbose(f"An exception occurred - {str(e)}") # noqa

View file

@ -264,7 +264,7 @@ async def user_api_key_auth(request: Request, api_key: str = fastapi.Security(ap
if prisma_client is None: # if both master key + user key submitted, and user key != master key, and no db connected, raise an error if prisma_client is None: # if both master key + user key submitted, and user key != master key, and no db connected, raise an error
raise Exception("No connected db.") raise Exception("No connected db.")
## check for cache hit (In-Memory Cache) ## check for cache hit (In-Memory Cache)
valid_token = user_api_key_cache.get_cache(key=api_key) valid_token = user_api_key_cache.get_cache(key=api_key)
print(f"valid_token from cache: {valid_token}") print(f"valid_token from cache: {valid_token}")
@ -387,16 +387,11 @@ async def track_cost_callback(
response_cost = litellm.completion_cost(completion_response=completion_response) response_cost = litellm.completion_cost(completion_response=completion_response)
print("streaming response_cost", response_cost) print("streaming response_cost", response_cost)
user_api_key = kwargs["litellm_params"]["metadata"].get("user_api_key", None) user_api_key = kwargs["litellm_params"]["metadata"].get("user_api_key", None)
print(f"user_api_key - {user_api_key}; prisma_client - {prisma_client}")
if user_api_key and prisma_client: if user_api_key and prisma_client:
await update_prisma_database(token=user_api_key, response_cost=response_cost) await update_prisma_database(token=user_api_key, response_cost=response_cost)
elif kwargs["stream"] == False: # for non streaming responses elif kwargs["stream"] == False: # for non streaming responses
response_cost = litellm.completion_cost(completion_response=completion_response) response_cost = litellm.completion_cost(completion_response=completion_response)
print(f"received completion response: {completion_response}")
print(f"regular response_cost: {response_cost}")
user_api_key = kwargs["litellm_params"]["metadata"].get("user_api_key", None) user_api_key = kwargs["litellm_params"]["metadata"].get("user_api_key", None)
print(f"user_api_key - {user_api_key}; prisma_client - {prisma_client}")
if user_api_key and prisma_client: if user_api_key and prisma_client:
await update_prisma_database(token=user_api_key, response_cost=response_cost) await update_prisma_database(token=user_api_key, response_cost=response_cost)
except Exception as e: except Exception as e:
@ -1004,7 +999,6 @@ async def chat_completion(request: Request, model: Optional[str] = None, user_ap
### ROUTE THE REQUEST ### ### ROUTE THE REQUEST ###
router_model_names = [m["model_name"] for m in llm_model_list] if llm_model_list is not None else [] router_model_names = [m["model_name"] for m in llm_model_list] if llm_model_list is not None else []
if llm_router is not None and data["model"] in router_model_names: # model in router model list if llm_router is not None and data["model"] in router_model_names: # model in router model list
print(f"ENTERS LLM ROUTER ACOMPLETION")
response = await llm_router.acompletion(**data) response = await llm_router.acompletion(**data)
elif llm_router is not None and data["model"] in llm_router.deployment_names: # model in router deployments, calling a specific deployment on the router elif llm_router is not None and data["model"] in llm_router.deployment_names: # model in router deployments, calling a specific deployment on the router
response = await llm_router.acompletion(**data, specific_deployment = True) response = await llm_router.acompletion(**data, specific_deployment = True)

View file

@ -4,7 +4,7 @@ import litellm, backoff
from litellm.proxy._types import UserAPIKeyAuth from litellm.proxy._types import UserAPIKeyAuth
from litellm.caching import DualCache from litellm.caching import DualCache
from litellm.proxy.hooks.parallel_request_limiter import MaxParallelRequestsHandler from litellm.proxy.hooks.parallel_request_limiter import MaxParallelRequestsHandler
from litellm.integrations.custom_logger import CustomLogger
def print_verbose(print_statement): def print_verbose(print_statement):
if litellm.set_verbose: if litellm.set_verbose:
print(print_statement) # noqa print(print_statement) # noqa
@ -65,16 +65,12 @@ class ProxyLogging:
2. /embeddings 2. /embeddings
""" """
try: try:
self.call_details["data"] = data for callback in litellm.callbacks:
self.call_details["call_type"] = call_type if isinstance(callback, CustomLogger) and 'async_pre_call_hook' in vars(callback.__class__):
## check if max parallel requests set response = await callback.async_pre_call_hook(user_api_key_dict=user_api_key_dict, cache=self.call_details["user_api_key_cache"], data=data, call_type=call_type)
if user_api_key_dict.max_parallel_requests is not None: if response is not None:
## if set, check if request allowed data = response
await self.max_parallel_request_limiter.max_parallel_request_allow_request(
max_parallel_requests=user_api_key_dict.max_parallel_requests,
api_key=user_api_key_dict.api_key,
user_api_key_cache=self.call_details["user_api_key_cache"])
print_verbose(f'final data being sent to {call_type} call: {data}') print_verbose(f'final data being sent to {call_type} call: {data}')
return data return data
except Exception as e: except Exception as e:
@ -103,17 +99,13 @@ class ProxyLogging:
1. /chat/completions 1. /chat/completions
2. /embeddings 2. /embeddings
""" """
# check if max parallel requests set
if user_api_key_dict is not None and user_api_key_dict.max_parallel_requests is not None: for callback in litellm.callbacks:
## decrement call count if call failed try:
if (hasattr(original_exception, "status_code") if isinstance(callback, CustomLogger):
and original_exception.status_code == 429 await callback.async_post_call_failure_hook(user_api_key_dict=user_api_key_dict, original_exception=original_exception)
and "Max parallel request limit reached" in str(original_exception)): except Exception as e:
pass # ignore failed calls due to max limit being reached raise e
else:
await self.max_parallel_request_limiter.async_log_failure_call(
api_key=user_api_key_dict.api_key,
user_api_key_cache=self.call_details["user_api_key_cache"])
return return