forked from phoenix/litellm-mirror
(feat) stop eagerly evaluating fstring
This commit is contained in:
parent
9e9de7f6e2
commit
dad4bd58bc
3 changed files with 42 additions and 51 deletions
|
@ -123,7 +123,9 @@ class DynamoDBWrapper(CustomDB):
|
||||||
error_occurred = False
|
error_occurred = False
|
||||||
verbose_proxy_logger.debug("DynamoDB Wrapper - Creating User Table")
|
verbose_proxy_logger.debug("DynamoDB Wrapper - Creating User Table")
|
||||||
table = client.table(self.database_arguments.user_table_name)
|
table = client.table(self.database_arguments.user_table_name)
|
||||||
verbose_proxy_logger.debug(f"DynamoDB Wrapper - Created Table, {table}")
|
verbose_proxy_logger.debug(
|
||||||
|
"DynamoDB Wrapper - Created Table, %s", table
|
||||||
|
)
|
||||||
if not await table.exists():
|
if not await table.exists():
|
||||||
verbose_proxy_logger.debug(
|
verbose_proxy_logger.debug(
|
||||||
f"DynamoDB Wrapper - {table} does not exist"
|
f"DynamoDB Wrapper - {table} does not exist"
|
||||||
|
|
|
@ -520,7 +520,7 @@ async def user_api_key_auth(
|
||||||
valid_token = user_api_key_cache.get_cache(key=api_key)
|
valid_token = user_api_key_cache.get_cache(key=api_key)
|
||||||
if valid_token is None:
|
if valid_token is None:
|
||||||
## check db
|
## check db
|
||||||
verbose_proxy_logger.debug(f"api key: {api_key}")
|
verbose_proxy_logger.debug("api key: %s", api_key)
|
||||||
if prisma_client is not None:
|
if prisma_client is not None:
|
||||||
valid_token = await prisma_client.get_data(
|
valid_token = await prisma_client.get_data(
|
||||||
token=api_key, table_name="combined_view"
|
token=api_key, table_name="combined_view"
|
||||||
|
@ -535,9 +535,9 @@ async def user_api_key_auth(
|
||||||
valid_token = await custom_db_client.get_data(
|
valid_token = await custom_db_client.get_data(
|
||||||
key=original_api_key, table_name="key"
|
key=original_api_key, table_name="key"
|
||||||
)
|
)
|
||||||
verbose_proxy_logger.debug(f"Token from db: {valid_token}")
|
verbose_proxy_logger.debug("Token from db: %s", valid_token)
|
||||||
elif valid_token is not None:
|
elif valid_token is not None:
|
||||||
verbose_proxy_logger.debug(f"API Key Cache Hit!")
|
verbose_proxy_logger.debug("API Key Cache Hit!")
|
||||||
if valid_token:
|
if valid_token:
|
||||||
# Got Valid Token from Cache, DB
|
# Got Valid Token from Cache, DB
|
||||||
# Run checks for
|
# Run checks for
|
||||||
|
@ -1003,7 +1003,6 @@ async def user_api_key_auth(
|
||||||
else:
|
else:
|
||||||
raise Exception()
|
raise Exception()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# verbose_proxy_logger.debug(f"An exception occurred - {traceback.format_exc()}")
|
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
if isinstance(e, HTTPException):
|
if isinstance(e, HTTPException):
|
||||||
raise ProxyException(
|
raise ProxyException(
|
||||||
|
@ -1091,11 +1090,11 @@ async def _PROXY_track_cost_callback(
|
||||||
start_time=None,
|
start_time=None,
|
||||||
end_time=None, # start/end time for completion
|
end_time=None, # start/end time for completion
|
||||||
):
|
):
|
||||||
verbose_proxy_logger.debug(f"INSIDE _PROXY_track_cost_callback")
|
verbose_proxy_logger.debug("INSIDE _PROXY_track_cost_callback")
|
||||||
global prisma_client, custom_db_client
|
global prisma_client, custom_db_client
|
||||||
try:
|
try:
|
||||||
# check if it has collected an entire stream response
|
# check if it has collected an entire stream response
|
||||||
verbose_proxy_logger.debug(f"Proxy: In track_cost_callback for {kwargs}")
|
verbose_proxy_logger.debug("Proxy: In track_cost_callback for: ", extra=kwargs)
|
||||||
verbose_proxy_logger.debug(
|
verbose_proxy_logger.debug(
|
||||||
f"kwargs stream: {kwargs.get('stream', None)} + complete streaming response: {kwargs.get('complete_streaming_response', None)}"
|
f"kwargs stream: {kwargs.get('stream', None)} + complete streaming response: {kwargs.get('complete_streaming_response', None)}"
|
||||||
)
|
)
|
||||||
|
@ -1163,7 +1162,7 @@ async def _PROXY_track_cost_callback(
|
||||||
error_message=error_msg,
|
error_message=error_msg,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
verbose_proxy_logger.debug(f"error in tracking cost callback - {error_msg}")
|
verbose_proxy_logger.debug("error in tracking cost callback - %s", e)
|
||||||
|
|
||||||
|
|
||||||
async def update_database(
|
async def update_database(
|
||||||
|
@ -1314,7 +1313,7 @@ async def update_database(
|
||||||
# Calculate the new cost by adding the existing cost and response_cost
|
# Calculate the new cost by adding the existing cost and response_cost
|
||||||
new_spend = existing_spend + response_cost
|
new_spend = existing_spend + response_cost
|
||||||
|
|
||||||
verbose_proxy_logger.debug(f"new cost: {new_spend}")
|
verbose_proxy_logger.debug("new cost: %s", new_spend)
|
||||||
# Update the cost column for the given token
|
# Update the cost column for the given token
|
||||||
await custom_db_client.update_data(
|
await custom_db_client.update_data(
|
||||||
key=token, value={"spend": new_spend}, table_name="key"
|
key=token, value={"spend": new_spend}, table_name="key"
|
||||||
|
@ -1381,7 +1380,7 @@ async def update_database(
|
||||||
# Calculate the new cost by adding the existing cost and response_cost
|
# Calculate the new cost by adding the existing cost and response_cost
|
||||||
new_spend = existing_spend + response_cost
|
new_spend = existing_spend + response_cost
|
||||||
|
|
||||||
verbose_proxy_logger.debug(f"new cost: {new_spend}")
|
verbose_proxy_logger.debug("new cost: %s", new_spend)
|
||||||
# Update the cost column for the given token
|
# Update the cost column for the given token
|
||||||
await custom_db_client.update_data(
|
await custom_db_client.update_data(
|
||||||
key=token, value={"spend": new_spend}, table_name="key"
|
key=token, value={"spend": new_spend}, table_name="key"
|
||||||
|
@ -1430,7 +1429,7 @@ async def update_cache(
|
||||||
hashed_token = hash_token(token=token)
|
hashed_token = hash_token(token=token)
|
||||||
else:
|
else:
|
||||||
hashed_token = token
|
hashed_token = token
|
||||||
verbose_proxy_logger.debug(f"_update_key_cache: hashed_token={hashed_token}")
|
verbose_proxy_logger.debug("_update_key_cache: hashed_token=%s", hashed_token)
|
||||||
existing_spend_obj = await user_api_key_cache.async_get_cache(key=hashed_token)
|
existing_spend_obj = await user_api_key_cache.async_get_cache(key=hashed_token)
|
||||||
verbose_proxy_logger.debug(
|
verbose_proxy_logger.debug(
|
||||||
f"_update_key_cache: existing_spend_obj={existing_spend_obj}"
|
f"_update_key_cache: existing_spend_obj={existing_spend_obj}"
|
||||||
|
@ -1805,7 +1804,7 @@ class ProxyConfig:
|
||||||
|
|
||||||
cache_type = cache_params.get("type", "redis")
|
cache_type = cache_params.get("type", "redis")
|
||||||
|
|
||||||
verbose_proxy_logger.debug(f"passed cache type={cache_type}")
|
verbose_proxy_logger.debug("passed cache type=%s", cache_type)
|
||||||
|
|
||||||
if (
|
if (
|
||||||
cache_type == "redis" or cache_type == "redis-semantic"
|
cache_type == "redis" or cache_type == "redis-semantic"
|
||||||
|
@ -2091,9 +2090,9 @@ class ProxyConfig:
|
||||||
### CONNECT TO DATABASE ###
|
### CONNECT TO DATABASE ###
|
||||||
database_url = general_settings.get("database_url", None)
|
database_url = general_settings.get("database_url", None)
|
||||||
if database_url and database_url.startswith("os.environ/"):
|
if database_url and database_url.startswith("os.environ/"):
|
||||||
verbose_proxy_logger.debug(f"GOING INTO LITELLM.GET_SECRET!")
|
verbose_proxy_logger.debug("GOING INTO LITELLM.GET_SECRET!")
|
||||||
database_url = litellm.get_secret(database_url)
|
database_url = litellm.get_secret(database_url)
|
||||||
verbose_proxy_logger.debug(f"RETRIEVED DB URL: {database_url}")
|
verbose_proxy_logger.debug("RETRIEVED DB URL: %s", database_url)
|
||||||
### MASTER KEY ###
|
### MASTER KEY ###
|
||||||
master_key = general_settings.get(
|
master_key = general_settings.get(
|
||||||
"master_key", litellm.get_secret("LITELLM_MASTER_KEY", None)
|
"master_key", litellm.get_secret("LITELLM_MASTER_KEY", None)
|
||||||
|
@ -2412,11 +2411,11 @@ async def generate_key_helper_fn(
|
||||||
return key_data
|
return key_data
|
||||||
|
|
||||||
## CREATE KEY
|
## CREATE KEY
|
||||||
verbose_proxy_logger.debug(f"prisma_client: Creating Key={key_data}")
|
verbose_proxy_logger.debug("prisma_client: Creating Key= %s", key_data)
|
||||||
await prisma_client.insert_data(data=key_data, table_name="key")
|
await prisma_client.insert_data(data=key_data, table_name="key")
|
||||||
elif custom_db_client is not None:
|
elif custom_db_client is not None:
|
||||||
## CREATE USER (If necessary)
|
## CREATE USER (If necessary)
|
||||||
verbose_proxy_logger.debug(f"CustomDBClient: Creating User={user_data}")
|
verbose_proxy_logger.debug("CustomDBClient: Creating User= %s", user_data)
|
||||||
user_row = await custom_db_client.insert_data(
|
user_row = await custom_db_client.insert_data(
|
||||||
value=user_data, table_name="user"
|
value=user_data, table_name="user"
|
||||||
)
|
)
|
||||||
|
@ -2582,7 +2581,7 @@ async def initialize(
|
||||||
def data_generator(response):
|
def data_generator(response):
|
||||||
verbose_proxy_logger.debug("inside generator")
|
verbose_proxy_logger.debug("inside generator")
|
||||||
for chunk in response:
|
for chunk in response:
|
||||||
verbose_proxy_logger.debug(f"returned chunk: {chunk}")
|
verbose_proxy_logger.debug("returned chunk: %s", chunk)
|
||||||
try:
|
try:
|
||||||
yield f"data: {json.dumps(chunk.dict())}\n\n"
|
yield f"data: {json.dumps(chunk.dict())}\n\n"
|
||||||
except:
|
except:
|
||||||
|
@ -2686,7 +2685,7 @@ async def startup_event():
|
||||||
|
|
||||||
### LOAD CONFIG ###
|
### LOAD CONFIG ###
|
||||||
worker_config = litellm.get_secret("WORKER_CONFIG")
|
worker_config = litellm.get_secret("WORKER_CONFIG")
|
||||||
verbose_proxy_logger.debug(f"worker_config: {worker_config}")
|
verbose_proxy_logger.debug("worker_config: %s", worker_config)
|
||||||
# check if it's a valid file path
|
# check if it's a valid file path
|
||||||
if os.path.isfile(worker_config):
|
if os.path.isfile(worker_config):
|
||||||
if proxy_config.is_yaml(config_file_path=worker_config):
|
if proxy_config.is_yaml(config_file_path=worker_config):
|
||||||
|
@ -2721,13 +2720,14 @@ async def startup_event():
|
||||||
|
|
||||||
if prompt_injection_detection_obj is not None:
|
if prompt_injection_detection_obj is not None:
|
||||||
prompt_injection_detection_obj.update_environment(router=llm_router)
|
prompt_injection_detection_obj.update_environment(router=llm_router)
|
||||||
verbose_proxy_logger.debug(f"prisma client - {prisma_client}")
|
|
||||||
|
verbose_proxy_logger.debug("prisma_client: %s", prisma_client)
|
||||||
if prisma_client is not None:
|
if prisma_client is not None:
|
||||||
await prisma_client.connect()
|
await prisma_client.connect()
|
||||||
|
|
||||||
verbose_proxy_logger.debug(f"custom_db_client client - {custom_db_client}")
|
verbose_proxy_logger.debug("custom_db_client client - %s", custom_db_client)
|
||||||
if custom_db_client is not None:
|
if custom_db_client is not None:
|
||||||
verbose_proxy_logger.debug(f"custom_db_client connecting - {custom_db_client}")
|
verbose_proxy_logger.debug("custom_db_client: connecting %s", custom_db_client)
|
||||||
await custom_db_client.connect()
|
await custom_db_client.connect()
|
||||||
|
|
||||||
if prisma_client is not None and master_key is not None:
|
if prisma_client is not None and master_key is not None:
|
||||||
|
@ -2969,7 +2969,7 @@ async def completion(
|
||||||
else:
|
else:
|
||||||
model_id = ""
|
model_id = ""
|
||||||
|
|
||||||
verbose_proxy_logger.debug(f"final response: {response}")
|
verbose_proxy_logger.debug("final response: %s", response)
|
||||||
if (
|
if (
|
||||||
"stream" in data and data["stream"] == True
|
"stream" in data and data["stream"] == True
|
||||||
): # use generate_responses to stream responses
|
): # use generate_responses to stream responses
|
||||||
|
@ -2987,9 +2987,10 @@ async def completion(
|
||||||
fastapi_response.headers["x-litellm-model-id"] = model_id
|
fastapi_response.headers["x-litellm-model-id"] = model_id
|
||||||
return response
|
return response
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
verbose_proxy_logger.debug(f"EXCEPTION RAISED IN PROXY MAIN.PY")
|
verbose_proxy_logger.debug("EXCEPTION RAISED IN PROXY MAIN.PY")
|
||||||
verbose_proxy_logger.debug(
|
verbose_proxy_logger.debug(
|
||||||
f"\033[1;31mAn error occurred: {e}\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`"
|
"\033[1;31mAn error occurred: %s\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`",
|
||||||
|
e,
|
||||||
)
|
)
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
error_traceback = traceback.format_exc()
|
error_traceback = traceback.format_exc()
|
||||||
|
@ -3068,7 +3069,7 @@ async def chat_completion(
|
||||||
cache_dict = parse_cache_control(cache_control_header)
|
cache_dict = parse_cache_control(cache_control_header)
|
||||||
data["ttl"] = cache_dict.get("s-maxage")
|
data["ttl"] = cache_dict.get("s-maxage")
|
||||||
|
|
||||||
verbose_proxy_logger.debug(f"receiving data: {data}")
|
verbose_proxy_logger.debug("receiving data: %s", data)
|
||||||
data["model"] = (
|
data["model"] = (
|
||||||
general_settings.get("completion_model", None) # server default
|
general_settings.get("completion_model", None) # server default
|
||||||
or user_model # model name passed via cli args
|
or user_model # model name passed via cli args
|
||||||
|
@ -3229,18 +3230,6 @@ async def chat_completion(
|
||||||
if llm_model_list is not None
|
if llm_model_list is not None
|
||||||
else []
|
else []
|
||||||
)
|
)
|
||||||
if llm_router is not None and data.get("model", "") in router_model_names:
|
|
||||||
verbose_proxy_logger.debug("Results from router")
|
|
||||||
verbose_proxy_logger.debug("\nRouter stats")
|
|
||||||
verbose_proxy_logger.debug("\nTotal Calls made")
|
|
||||||
for key, value in llm_router.total_calls.items():
|
|
||||||
verbose_proxy_logger.debug(f"{key}: {value}")
|
|
||||||
verbose_proxy_logger.debug("\nSuccess Calls made")
|
|
||||||
for key, value in llm_router.success_calls.items():
|
|
||||||
verbose_proxy_logger.debug(f"{key}: {value}")
|
|
||||||
verbose_proxy_logger.debug("\nFail Calls made")
|
|
||||||
for key, value in llm_router.fail_calls.items():
|
|
||||||
verbose_proxy_logger.debug(f"{key}: {value}")
|
|
||||||
if user_debug:
|
if user_debug:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|
||||||
|
@ -6361,9 +6350,9 @@ async def add_new_model(model_params: ModelParams):
|
||||||
# Load existing config
|
# Load existing config
|
||||||
config = await proxy_config.get_config()
|
config = await proxy_config.get_config()
|
||||||
|
|
||||||
verbose_proxy_logger.debug(f"User config path: {user_config_file_path}")
|
verbose_proxy_logger.debug("User config path: %s", user_config_file_path)
|
||||||
|
|
||||||
verbose_proxy_logger.debug(f"Loaded config: {config}")
|
verbose_proxy_logger.debug(f"Loaded config: %s", config)
|
||||||
# Add the new model to the config
|
# Add the new model to the config
|
||||||
model_info = model_params.model_info.json()
|
model_info = model_params.model_info.json()
|
||||||
model_info = {k: v for k, v in model_info.items() if v is not None}
|
model_info = {k: v for k, v in model_info.items() if v is not None}
|
||||||
|
@ -6375,7 +6364,7 @@ async def add_new_model(model_params: ModelParams):
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
verbose_proxy_logger.debug(f"updated model list: {config['model_list']}")
|
verbose_proxy_logger.debug(f"updated model list: %s", config["model_list"])
|
||||||
|
|
||||||
# Save new config
|
# Save new config
|
||||||
await proxy_config.save_config(new_config=config)
|
await proxy_config.save_config(new_config=config)
|
||||||
|
@ -6475,7 +6464,7 @@ async def model_info_v2(
|
||||||
# don't return the api key
|
# don't return the api key
|
||||||
model["litellm_params"].pop("api_key", None)
|
model["litellm_params"].pop("api_key", None)
|
||||||
|
|
||||||
verbose_proxy_logger.debug(f"all_models: {all_models}")
|
verbose_proxy_logger.debug("all_models: %s", all_models)
|
||||||
return {"data": all_models}
|
return {"data": all_models}
|
||||||
|
|
||||||
|
|
||||||
|
@ -6668,7 +6657,7 @@ async def _litellm_chat_completions_worker(data, user_api_key_dict):
|
||||||
user_api_key_dict=user_api_key_dict, data=data, call_type="completion"
|
user_api_key_dict=user_api_key_dict, data=data, call_type="completion"
|
||||||
)
|
)
|
||||||
|
|
||||||
verbose_proxy_logger.debug(f"_litellm_chat_completions_worker started")
|
verbose_proxy_logger.debug("_litellm_chat_completions_worker started")
|
||||||
### ROUTE THE REQUEST ###
|
### ROUTE THE REQUEST ###
|
||||||
router_model_names = (
|
router_model_names = (
|
||||||
[m["model_name"] for m in llm_model_list]
|
[m["model_name"] for m in llm_model_list]
|
||||||
|
@ -6694,7 +6683,7 @@ async def _litellm_chat_completions_worker(data, user_api_key_dict):
|
||||||
else: # router is not set
|
else: # router is not set
|
||||||
response = await litellm.acompletion(**data)
|
response = await litellm.acompletion(**data)
|
||||||
|
|
||||||
verbose_proxy_logger.debug(f"final response: {response}")
|
verbose_proxy_logger.debug("final response: {response}")
|
||||||
return response
|
return response
|
||||||
except HTTPException as e:
|
except HTTPException as e:
|
||||||
verbose_proxy_logger.debug(
|
verbose_proxy_logger.debug(
|
||||||
|
@ -6704,7 +6693,7 @@ async def _litellm_chat_completions_worker(data, user_api_key_dict):
|
||||||
e.status_code == 429
|
e.status_code == 429
|
||||||
and "Max parallel request limit reached" in e.detail
|
and "Max parallel request limit reached" in e.detail
|
||||||
):
|
):
|
||||||
verbose_proxy_logger.debug(f"Max parallel request limit reached!")
|
verbose_proxy_logger.debug("Max parallel request limit reached!")
|
||||||
timeout = litellm._calculate_retry_after(
|
timeout = litellm._calculate_retry_after(
|
||||||
remaining_retries=3, max_retries=3, min_timeout=1
|
remaining_retries=3, max_retries=3, min_timeout=1
|
||||||
)
|
)
|
||||||
|
@ -7071,7 +7060,7 @@ def get_image():
|
||||||
default_logo = os.path.join(current_dir, "logo.jpg")
|
default_logo = os.path.join(current_dir, "logo.jpg")
|
||||||
|
|
||||||
logo_path = os.getenv("UI_LOGO_PATH", default_logo)
|
logo_path = os.getenv("UI_LOGO_PATH", default_logo)
|
||||||
verbose_proxy_logger.debug(f"Reading logo from {logo_path}")
|
verbose_proxy_logger.debug("Reading logo from path: %s", logo_path)
|
||||||
|
|
||||||
# Check if the logo path is an HTTP/HTTPS URL
|
# Check if the logo path is an HTTP/HTTPS URL
|
||||||
if logo_path.startswith(("http://", "https://")):
|
if logo_path.startswith(("http://", "https://")):
|
||||||
|
@ -7249,11 +7238,11 @@ async def auth_callback(request: Request):
|
||||||
allow_insecure_http=True,
|
allow_insecure_http=True,
|
||||||
scope=generic_scope,
|
scope=generic_scope,
|
||||||
)
|
)
|
||||||
verbose_proxy_logger.debug(f"calling generic_sso.verify_and_process")
|
verbose_proxy_logger.debug("calling generic_sso.verify_and_process")
|
||||||
result = await generic_sso.verify_and_process(
|
result = await generic_sso.verify_and_process(
|
||||||
request, params={"include_client_id": generic_include_client_id}
|
request, params={"include_client_id": generic_include_client_id}
|
||||||
)
|
)
|
||||||
verbose_proxy_logger.debug(f"generic result: {result}")
|
verbose_proxy_logger.debug("generic result: %s", result)
|
||||||
|
|
||||||
# User is Authe'd in - generate key for the UI to access Proxy
|
# User is Authe'd in - generate key for the UI to access Proxy
|
||||||
user_email = getattr(result, "email", None)
|
user_email = getattr(result, "email", None)
|
||||||
|
|
|
@ -306,7 +306,7 @@ class ProxyLogging:
|
||||||
|
|
||||||
# check if crossed budget
|
# check if crossed budget
|
||||||
if user_current_spend >= user_max_budget:
|
if user_current_spend >= user_max_budget:
|
||||||
verbose_proxy_logger.debug(f"Budget Crossed for {user_info}")
|
verbose_proxy_logger.debug("Budget Crossed for %s", user_info)
|
||||||
message = "Budget Crossed for" + user_info
|
message = "Budget Crossed for" + user_info
|
||||||
await self.alerting_handler(
|
await self.alerting_handler(
|
||||||
message=message,
|
message=message,
|
||||||
|
@ -1061,7 +1061,7 @@ class PrismaClient:
|
||||||
Add a key to the database. If it already exists, do nothing.
|
Add a key to the database. If it already exists, do nothing.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
verbose_proxy_logger.debug(f"PrismaClient: insert_data: {data}")
|
verbose_proxy_logger.debug("PrismaClient: insert_data: %s", data)
|
||||||
if table_name == "key":
|
if table_name == "key":
|
||||||
token = data["token"]
|
token = data["token"]
|
||||||
hashed_token = self.hash_token(token=token)
|
hashed_token = self.hash_token(token=token)
|
||||||
|
@ -1405,7 +1405,7 @@ class PrismaClient:
|
||||||
deleted_tokens = await self.db.litellm_verificationtoken.delete_many(
|
deleted_tokens = await self.db.litellm_verificationtoken.delete_many(
|
||||||
where=filter_query # type: ignore
|
where=filter_query # type: ignore
|
||||||
)
|
)
|
||||||
verbose_proxy_logger.debug(f"deleted_tokens: {deleted_tokens}")
|
verbose_proxy_logger.debug("deleted_tokens: %s", deleted_tokens)
|
||||||
return {"deleted_keys": deleted_tokens}
|
return {"deleted_keys": deleted_tokens}
|
||||||
elif (
|
elif (
|
||||||
table_name == "team"
|
table_name == "team"
|
||||||
|
@ -1768,7 +1768,7 @@ def get_logging_payload(kwargs, response_obj, start_time, end_time):
|
||||||
"api_base": litellm_params.get("api_base", ""),
|
"api_base": litellm_params.get("api_base", ""),
|
||||||
}
|
}
|
||||||
|
|
||||||
verbose_proxy_logger.debug(f"SpendTable: created payload - payload: {payload}\n\n")
|
verbose_proxy_logger.debug("SpendTable: created payload - payload: %s\n\n", payload)
|
||||||
json_fields = [
|
json_fields = [
|
||||||
field
|
field
|
||||||
for field, field_type in LiteLLM_SpendLogs.__annotations__.items()
|
for field, field_type in LiteLLM_SpendLogs.__annotations__.items()
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue