forked from phoenix/litellm-mirror
fix(custom_logger.py): enable pre_call hooks to modify incoming data to proxy
This commit is contained in:
parent
03d6dcefbb
commit
effdddc1c8
4 changed files with 51 additions and 43 deletions
|
@ -2,8 +2,9 @@
|
|||
# On success, logs events to Promptlayer
|
||||
import dotenv, os
|
||||
import requests
|
||||
import requests
|
||||
|
||||
from litellm.proxy._types import UserAPIKeyAuth
|
||||
from litellm.caching import DualCache
|
||||
from typing import Literal
|
||||
dotenv.load_dotenv() # Loading env variables using dotenv
|
||||
import traceback
|
||||
|
||||
|
@ -40,6 +41,16 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callback
|
|||
async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time):
|
||||
pass
|
||||
|
||||
#### CALL HOOKS ####
|
||||
"""
|
||||
Control the modify incoming / outgoung data before calling the model
|
||||
"""
|
||||
async def async_pre_call_hook(self, user_api_key_dict: UserAPIKeyAuth, cache: DualCache, data: dict, call_type: Literal["completion", "embeddings"]):
|
||||
pass
|
||||
|
||||
async def async_post_call_failure_hook(self, original_exception: Exception, user_api_key_dict: UserAPIKeyAuth):
|
||||
pass
|
||||
|
||||
#### SINGLE-USE #### - https://docs.litellm.ai/docs/observability/custom_callback#using-your-custom-callback-function
|
||||
|
||||
def log_input_event(self, model, messages, kwargs, print_verbose, callback_func):
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
from typing import Optional
|
||||
import litellm
|
||||
from litellm.caching import DualCache
|
||||
from litellm.proxy._types import UserAPIKeyAuth
|
||||
from litellm.integrations.custom_logger import CustomLogger
|
||||
from fastapi import HTTPException
|
||||
|
||||
|
@ -14,24 +15,27 @@ class MaxParallelRequestsHandler(CustomLogger):
|
|||
print(print_statement) # noqa
|
||||
|
||||
|
||||
async def max_parallel_request_allow_request(self, max_parallel_requests: Optional[int], api_key: Optional[str], user_api_key_cache: DualCache):
|
||||
async def async_pre_call_hook(self, user_api_key_dict: UserAPIKeyAuth, cache: DualCache, data: dict, call_type: str):
|
||||
api_key = user_api_key_dict.api_key
|
||||
max_parallel_requests = user_api_key_dict.max_parallel_requests
|
||||
|
||||
if api_key is None:
|
||||
return
|
||||
|
||||
if max_parallel_requests is None:
|
||||
return
|
||||
|
||||
self.user_api_key_cache = user_api_key_cache # save the api key cache for updating the value
|
||||
self.user_api_key_cache = cache # save the api key cache for updating the value
|
||||
|
||||
# CHECK IF REQUEST ALLOWED
|
||||
request_count_api_key = f"{api_key}_request_count"
|
||||
current = user_api_key_cache.get_cache(key=request_count_api_key)
|
||||
current = cache.get_cache(key=request_count_api_key)
|
||||
self.print_verbose(f"current: {current}")
|
||||
if current is None:
|
||||
user_api_key_cache.set_cache(request_count_api_key, 1)
|
||||
cache.set_cache(request_count_api_key, 1)
|
||||
elif int(current) < max_parallel_requests:
|
||||
# Increase count for this token
|
||||
user_api_key_cache.set_cache(request_count_api_key, int(current) + 1)
|
||||
cache.set_cache(request_count_api_key, int(current) + 1)
|
||||
else:
|
||||
raise HTTPException(status_code=429, detail="Max parallel request limit reached.")
|
||||
|
||||
|
@ -55,16 +59,23 @@ class MaxParallelRequestsHandler(CustomLogger):
|
|||
except Exception as e:
|
||||
self.print_verbose(e) # noqa
|
||||
|
||||
async def async_log_failure_call(self, api_key, user_api_key_cache):
|
||||
async def async_log_failure_call(self, user_api_key_dict: UserAPIKeyAuth, original_exception: Exception):
|
||||
try:
|
||||
api_key = user_api_key_dict.api_key
|
||||
if api_key is None:
|
||||
return
|
||||
|
||||
request_count_api_key = f"{api_key}_request_count"
|
||||
# Decrease count for this token
|
||||
current = self.user_api_key_cache.get_cache(key=request_count_api_key) or 1
|
||||
new_val = current - 1
|
||||
self.print_verbose(f"updated_value in failure call: {new_val}")
|
||||
self.user_api_key_cache.set_cache(request_count_api_key, new_val)
|
||||
## decrement call count if call failed
|
||||
if (hasattr(original_exception, "status_code")
|
||||
and original_exception.status_code == 429
|
||||
and "Max parallel request limit reached" in str(original_exception)):
|
||||
pass # ignore failed calls due to max limit being reached
|
||||
else:
|
||||
request_count_api_key = f"{api_key}_request_count"
|
||||
# Decrease count for this token
|
||||
current = self.user_api_key_cache.get_cache(key=request_count_api_key) or 1
|
||||
new_val = current - 1
|
||||
self.print_verbose(f"updated_value in failure call: {new_val}")
|
||||
self.user_api_key_cache.set_cache(request_count_api_key, new_val)
|
||||
except Exception as e:
|
||||
self.print_verbose(f"An exception occurred - {str(e)}") # noqa
|
|
@ -264,7 +264,7 @@ async def user_api_key_auth(request: Request, api_key: str = fastapi.Security(ap
|
|||
|
||||
if prisma_client is None: # if both master key + user key submitted, and user key != master key, and no db connected, raise an error
|
||||
raise Exception("No connected db.")
|
||||
|
||||
|
||||
## check for cache hit (In-Memory Cache)
|
||||
valid_token = user_api_key_cache.get_cache(key=api_key)
|
||||
print(f"valid_token from cache: {valid_token}")
|
||||
|
@ -387,16 +387,11 @@ async def track_cost_callback(
|
|||
response_cost = litellm.completion_cost(completion_response=completion_response)
|
||||
print("streaming response_cost", response_cost)
|
||||
user_api_key = kwargs["litellm_params"]["metadata"].get("user_api_key", None)
|
||||
print(f"user_api_key - {user_api_key}; prisma_client - {prisma_client}")
|
||||
if user_api_key and prisma_client:
|
||||
await update_prisma_database(token=user_api_key, response_cost=response_cost)
|
||||
elif kwargs["stream"] == False: # for non streaming responses
|
||||
response_cost = litellm.completion_cost(completion_response=completion_response)
|
||||
print(f"received completion response: {completion_response}")
|
||||
|
||||
print(f"regular response_cost: {response_cost}")
|
||||
user_api_key = kwargs["litellm_params"]["metadata"].get("user_api_key", None)
|
||||
print(f"user_api_key - {user_api_key}; prisma_client - {prisma_client}")
|
||||
if user_api_key and prisma_client:
|
||||
await update_prisma_database(token=user_api_key, response_cost=response_cost)
|
||||
except Exception as e:
|
||||
|
@ -1004,7 +999,6 @@ async def chat_completion(request: Request, model: Optional[str] = None, user_ap
|
|||
### ROUTE THE REQUEST ###
|
||||
router_model_names = [m["model_name"] for m in llm_model_list] if llm_model_list is not None else []
|
||||
if llm_router is not None and data["model"] in router_model_names: # model in router model list
|
||||
print(f"ENTERS LLM ROUTER ACOMPLETION")
|
||||
response = await llm_router.acompletion(**data)
|
||||
elif llm_router is not None and data["model"] in llm_router.deployment_names: # model in router deployments, calling a specific deployment on the router
|
||||
response = await llm_router.acompletion(**data, specific_deployment = True)
|
||||
|
|
|
@ -4,7 +4,7 @@ import litellm, backoff
|
|||
from litellm.proxy._types import UserAPIKeyAuth
|
||||
from litellm.caching import DualCache
|
||||
from litellm.proxy.hooks.parallel_request_limiter import MaxParallelRequestsHandler
|
||||
|
||||
from litellm.integrations.custom_logger import CustomLogger
|
||||
def print_verbose(print_statement):
|
||||
if litellm.set_verbose:
|
||||
print(print_statement) # noqa
|
||||
|
@ -65,16 +65,12 @@ class ProxyLogging:
|
|||
2. /embeddings
|
||||
"""
|
||||
try:
|
||||
self.call_details["data"] = data
|
||||
self.call_details["call_type"] = call_type
|
||||
## check if max parallel requests set
|
||||
if user_api_key_dict.max_parallel_requests is not None:
|
||||
## if set, check if request allowed
|
||||
await self.max_parallel_request_limiter.max_parallel_request_allow_request(
|
||||
max_parallel_requests=user_api_key_dict.max_parallel_requests,
|
||||
api_key=user_api_key_dict.api_key,
|
||||
user_api_key_cache=self.call_details["user_api_key_cache"])
|
||||
|
||||
for callback in litellm.callbacks:
|
||||
if isinstance(callback, CustomLogger) and 'async_pre_call_hook' in vars(callback.__class__):
|
||||
response = await callback.async_pre_call_hook(user_api_key_dict=user_api_key_dict, cache=self.call_details["user_api_key_cache"], data=data, call_type=call_type)
|
||||
if response is not None:
|
||||
data = response
|
||||
|
||||
print_verbose(f'final data being sent to {call_type} call: {data}')
|
||||
return data
|
||||
except Exception as e:
|
||||
|
@ -103,17 +99,13 @@ class ProxyLogging:
|
|||
1. /chat/completions
|
||||
2. /embeddings
|
||||
"""
|
||||
# check if max parallel requests set
|
||||
if user_api_key_dict is not None and user_api_key_dict.max_parallel_requests is not None:
|
||||
## decrement call count if call failed
|
||||
if (hasattr(original_exception, "status_code")
|
||||
and original_exception.status_code == 429
|
||||
and "Max parallel request limit reached" in str(original_exception)):
|
||||
pass # ignore failed calls due to max limit being reached
|
||||
else:
|
||||
await self.max_parallel_request_limiter.async_log_failure_call(
|
||||
api_key=user_api_key_dict.api_key,
|
||||
user_api_key_cache=self.call_details["user_api_key_cache"])
|
||||
|
||||
for callback in litellm.callbacks:
|
||||
try:
|
||||
if isinstance(callback, CustomLogger):
|
||||
await callback.async_post_call_failure_hook(user_api_key_dict=user_api_key_dict, original_exception=original_exception)
|
||||
except Exception as e:
|
||||
raise e
|
||||
return
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue