(feat) stop eagerly evaluating fstring

This commit is contained in:
Ishaan Jaff 2024-03-25 09:01:42 -07:00
parent 9e9de7f6e2
commit dad4bd58bc
3 changed files with 42 additions and 51 deletions

View file

@ -123,7 +123,9 @@ class DynamoDBWrapper(CustomDB):
error_occurred = False
verbose_proxy_logger.debug("DynamoDB Wrapper - Creating User Table")
table = client.table(self.database_arguments.user_table_name)
verbose_proxy_logger.debug(f"DynamoDB Wrapper - Created Table, {table}")
verbose_proxy_logger.debug(
"DynamoDB Wrapper - Created Table, %s", table
)
if not await table.exists():
verbose_proxy_logger.debug(
f"DynamoDB Wrapper - {table} does not exist"

View file

@ -520,7 +520,7 @@ async def user_api_key_auth(
valid_token = user_api_key_cache.get_cache(key=api_key)
if valid_token is None:
## check db
verbose_proxy_logger.debug(f"api key: {api_key}")
verbose_proxy_logger.debug("api key: %s", api_key)
if prisma_client is not None:
valid_token = await prisma_client.get_data(
token=api_key, table_name="combined_view"
@ -535,9 +535,9 @@ async def user_api_key_auth(
valid_token = await custom_db_client.get_data(
key=original_api_key, table_name="key"
)
verbose_proxy_logger.debug(f"Token from db: {valid_token}")
verbose_proxy_logger.debug("Token from db: %s", valid_token)
elif valid_token is not None:
verbose_proxy_logger.debug(f"API Key Cache Hit!")
verbose_proxy_logger.debug("API Key Cache Hit!")
if valid_token:
# Got Valid Token from Cache, DB
# Run checks for
@ -1003,7 +1003,6 @@ async def user_api_key_auth(
else:
raise Exception()
except Exception as e:
# verbose_proxy_logger.debug(f"An exception occurred - {traceback.format_exc()}")
traceback.print_exc()
if isinstance(e, HTTPException):
raise ProxyException(
@ -1091,11 +1090,11 @@ async def _PROXY_track_cost_callback(
start_time=None,
end_time=None, # start/end time for completion
):
verbose_proxy_logger.debug(f"INSIDE _PROXY_track_cost_callback")
verbose_proxy_logger.debug("INSIDE _PROXY_track_cost_callback")
global prisma_client, custom_db_client
try:
# check if it has collected an entire stream response
verbose_proxy_logger.debug(f"Proxy: In track_cost_callback for {kwargs}")
verbose_proxy_logger.debug("Proxy: In track_cost_callback for: ", extra=kwargs)
verbose_proxy_logger.debug(
f"kwargs stream: {kwargs.get('stream', None)} + complete streaming response: {kwargs.get('complete_streaming_response', None)}"
)
@ -1163,7 +1162,7 @@ async def _PROXY_track_cost_callback(
error_message=error_msg,
)
)
verbose_proxy_logger.debug(f"error in tracking cost callback - {error_msg}")
verbose_proxy_logger.debug("error in tracking cost callback - %s", e)
async def update_database(
@ -1314,7 +1313,7 @@ async def update_database(
# Calculate the new cost by adding the existing cost and response_cost
new_spend = existing_spend + response_cost
verbose_proxy_logger.debug(f"new cost: {new_spend}")
verbose_proxy_logger.debug("new cost: %s", new_spend)
# Update the cost column for the given token
await custom_db_client.update_data(
key=token, value={"spend": new_spend}, table_name="key"
@ -1381,7 +1380,7 @@ async def update_database(
# Calculate the new cost by adding the existing cost and response_cost
new_spend = existing_spend + response_cost
verbose_proxy_logger.debug(f"new cost: {new_spend}")
verbose_proxy_logger.debug("new cost: %s", new_spend)
# Update the cost column for the given token
await custom_db_client.update_data(
key=token, value={"spend": new_spend}, table_name="key"
@ -1430,7 +1429,7 @@ async def update_cache(
hashed_token = hash_token(token=token)
else:
hashed_token = token
verbose_proxy_logger.debug(f"_update_key_cache: hashed_token={hashed_token}")
verbose_proxy_logger.debug("_update_key_cache: hashed_token=%s", hashed_token)
existing_spend_obj = await user_api_key_cache.async_get_cache(key=hashed_token)
verbose_proxy_logger.debug(
f"_update_key_cache: existing_spend_obj={existing_spend_obj}"
@ -1805,7 +1804,7 @@ class ProxyConfig:
cache_type = cache_params.get("type", "redis")
verbose_proxy_logger.debug(f"passed cache type={cache_type}")
verbose_proxy_logger.debug("passed cache type=%s", cache_type)
if (
cache_type == "redis" or cache_type == "redis-semantic"
@ -2091,9 +2090,9 @@ class ProxyConfig:
### CONNECT TO DATABASE ###
database_url = general_settings.get("database_url", None)
if database_url and database_url.startswith("os.environ/"):
verbose_proxy_logger.debug(f"GOING INTO LITELLM.GET_SECRET!")
verbose_proxy_logger.debug("GOING INTO LITELLM.GET_SECRET!")
database_url = litellm.get_secret(database_url)
verbose_proxy_logger.debug(f"RETRIEVED DB URL: {database_url}")
verbose_proxy_logger.debug("RETRIEVED DB URL: %s", database_url)
### MASTER KEY ###
master_key = general_settings.get(
"master_key", litellm.get_secret("LITELLM_MASTER_KEY", None)
@ -2412,11 +2411,11 @@ async def generate_key_helper_fn(
return key_data
## CREATE KEY
verbose_proxy_logger.debug(f"prisma_client: Creating Key={key_data}")
verbose_proxy_logger.debug("prisma_client: Creating Key= %s", key_data)
await prisma_client.insert_data(data=key_data, table_name="key")
elif custom_db_client is not None:
## CREATE USER (If necessary)
verbose_proxy_logger.debug(f"CustomDBClient: Creating User={user_data}")
verbose_proxy_logger.debug("CustomDBClient: Creating User= %s", user_data)
user_row = await custom_db_client.insert_data(
value=user_data, table_name="user"
)
@ -2582,7 +2581,7 @@ async def initialize(
def data_generator(response):
verbose_proxy_logger.debug("inside generator")
for chunk in response:
verbose_proxy_logger.debug(f"returned chunk: {chunk}")
verbose_proxy_logger.debug("returned chunk: %s", chunk)
try:
yield f"data: {json.dumps(chunk.dict())}\n\n"
except:
@ -2686,7 +2685,7 @@ async def startup_event():
### LOAD CONFIG ###
worker_config = litellm.get_secret("WORKER_CONFIG")
verbose_proxy_logger.debug(f"worker_config: {worker_config}")
verbose_proxy_logger.debug("worker_config: %s", worker_config)
# check if it's a valid file path
if os.path.isfile(worker_config):
if proxy_config.is_yaml(config_file_path=worker_config):
@ -2721,13 +2720,14 @@ async def startup_event():
if prompt_injection_detection_obj is not None:
prompt_injection_detection_obj.update_environment(router=llm_router)
verbose_proxy_logger.debug(f"prisma client - {prisma_client}")
verbose_proxy_logger.debug("prisma_client: %s", prisma_client)
if prisma_client is not None:
await prisma_client.connect()
verbose_proxy_logger.debug(f"custom_db_client client - {custom_db_client}")
verbose_proxy_logger.debug("custom_db_client client - %s", custom_db_client)
if custom_db_client is not None:
verbose_proxy_logger.debug(f"custom_db_client connecting - {custom_db_client}")
verbose_proxy_logger.debug("custom_db_client: connecting %s", custom_db_client)
await custom_db_client.connect()
if prisma_client is not None and master_key is not None:
@ -2969,7 +2969,7 @@ async def completion(
else:
model_id = ""
verbose_proxy_logger.debug(f"final response: {response}")
verbose_proxy_logger.debug("final response: %s", response)
if (
"stream" in data and data["stream"] == True
): # use generate_responses to stream responses
@ -2987,9 +2987,10 @@ async def completion(
fastapi_response.headers["x-litellm-model-id"] = model_id
return response
except Exception as e:
verbose_proxy_logger.debug(f"EXCEPTION RAISED IN PROXY MAIN.PY")
verbose_proxy_logger.debug("EXCEPTION RAISED IN PROXY MAIN.PY")
verbose_proxy_logger.debug(
f"\033[1;31mAn error occurred: {e}\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`"
"\033[1;31mAn error occurred: %s\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`",
e,
)
traceback.print_exc()
error_traceback = traceback.format_exc()
@ -3068,7 +3069,7 @@ async def chat_completion(
cache_dict = parse_cache_control(cache_control_header)
data["ttl"] = cache_dict.get("s-maxage")
verbose_proxy_logger.debug(f"receiving data: {data}")
verbose_proxy_logger.debug("receiving data: %s", data)
data["model"] = (
general_settings.get("completion_model", None) # server default
or user_model # model name passed via cli args
@ -3229,18 +3230,6 @@ async def chat_completion(
if llm_model_list is not None
else []
)
if llm_router is not None and data.get("model", "") in router_model_names:
verbose_proxy_logger.debug("Results from router")
verbose_proxy_logger.debug("\nRouter stats")
verbose_proxy_logger.debug("\nTotal Calls made")
for key, value in llm_router.total_calls.items():
verbose_proxy_logger.debug(f"{key}: {value}")
verbose_proxy_logger.debug("\nSuccess Calls made")
for key, value in llm_router.success_calls.items():
verbose_proxy_logger.debug(f"{key}: {value}")
verbose_proxy_logger.debug("\nFail Calls made")
for key, value in llm_router.fail_calls.items():
verbose_proxy_logger.debug(f"{key}: {value}")
if user_debug:
traceback.print_exc()
@ -6361,9 +6350,9 @@ async def add_new_model(model_params: ModelParams):
# Load existing config
config = await proxy_config.get_config()
verbose_proxy_logger.debug(f"User config path: {user_config_file_path}")
verbose_proxy_logger.debug("User config path: %s", user_config_file_path)
verbose_proxy_logger.debug(f"Loaded config: {config}")
verbose_proxy_logger.debug(f"Loaded config: %s", config)
# Add the new model to the config
model_info = model_params.model_info.json()
model_info = {k: v for k, v in model_info.items() if v is not None}
@ -6375,7 +6364,7 @@ async def add_new_model(model_params: ModelParams):
}
)
verbose_proxy_logger.debug(f"updated model list: {config['model_list']}")
verbose_proxy_logger.debug(f"updated model list: %s", config["model_list"])
# Save new config
await proxy_config.save_config(new_config=config)
@ -6475,7 +6464,7 @@ async def model_info_v2(
# don't return the api key
model["litellm_params"].pop("api_key", None)
verbose_proxy_logger.debug(f"all_models: {all_models}")
verbose_proxy_logger.debug("all_models: %s", all_models)
return {"data": all_models}
@ -6668,7 +6657,7 @@ async def _litellm_chat_completions_worker(data, user_api_key_dict):
user_api_key_dict=user_api_key_dict, data=data, call_type="completion"
)
verbose_proxy_logger.debug(f"_litellm_chat_completions_worker started")
verbose_proxy_logger.debug("_litellm_chat_completions_worker started")
### ROUTE THE REQUEST ###
router_model_names = (
[m["model_name"] for m in llm_model_list]
@ -6694,7 +6683,7 @@ async def _litellm_chat_completions_worker(data, user_api_key_dict):
else: # router is not set
response = await litellm.acompletion(**data)
verbose_proxy_logger.debug(f"final response: {response}")
verbose_proxy_logger.debug("final response: {response}")
return response
except HTTPException as e:
verbose_proxy_logger.debug(
@ -6704,7 +6693,7 @@ async def _litellm_chat_completions_worker(data, user_api_key_dict):
e.status_code == 429
and "Max parallel request limit reached" in e.detail
):
verbose_proxy_logger.debug(f"Max parallel request limit reached!")
verbose_proxy_logger.debug("Max parallel request limit reached!")
timeout = litellm._calculate_retry_after(
remaining_retries=3, max_retries=3, min_timeout=1
)
@ -7071,7 +7060,7 @@ def get_image():
default_logo = os.path.join(current_dir, "logo.jpg")
logo_path = os.getenv("UI_LOGO_PATH", default_logo)
verbose_proxy_logger.debug(f"Reading logo from {logo_path}")
verbose_proxy_logger.debug("Reading logo from path: %s", logo_path)
# Check if the logo path is an HTTP/HTTPS URL
if logo_path.startswith(("http://", "https://")):
@ -7249,11 +7238,11 @@ async def auth_callback(request: Request):
allow_insecure_http=True,
scope=generic_scope,
)
verbose_proxy_logger.debug(f"calling generic_sso.verify_and_process")
verbose_proxy_logger.debug("calling generic_sso.verify_and_process")
result = await generic_sso.verify_and_process(
request, params={"include_client_id": generic_include_client_id}
)
verbose_proxy_logger.debug(f"generic result: {result}")
verbose_proxy_logger.debug("generic result: %s", result)
# User is Authe'd in - generate key for the UI to access Proxy
user_email = getattr(result, "email", None)

View file

@ -306,7 +306,7 @@ class ProxyLogging:
# check if crossed budget
if user_current_spend >= user_max_budget:
verbose_proxy_logger.debug(f"Budget Crossed for {user_info}")
verbose_proxy_logger.debug("Budget Crossed for %s", user_info)
message = "Budget Crossed for" + user_info
await self.alerting_handler(
message=message,
@ -1061,7 +1061,7 @@ class PrismaClient:
Add a key to the database. If it already exists, do nothing.
"""
try:
verbose_proxy_logger.debug(f"PrismaClient: insert_data: {data}")
verbose_proxy_logger.debug("PrismaClient: insert_data: %s", data)
if table_name == "key":
token = data["token"]
hashed_token = self.hash_token(token=token)
@ -1405,7 +1405,7 @@ class PrismaClient:
deleted_tokens = await self.db.litellm_verificationtoken.delete_many(
where=filter_query # type: ignore
)
verbose_proxy_logger.debug(f"deleted_tokens: {deleted_tokens}")
verbose_proxy_logger.debug("deleted_tokens: %s", deleted_tokens)
return {"deleted_keys": deleted_tokens}
elif (
table_name == "team"
@ -1768,7 +1768,7 @@ def get_logging_payload(kwargs, response_obj, start_time, end_time):
"api_base": litellm_params.get("api_base", ""),
}
verbose_proxy_logger.debug(f"SpendTable: created payload - payload: {payload}\n\n")
verbose_proxy_logger.debug("SpendTable: created payload - payload: %s\n\n", payload)
json_fields = [
field
for field, field_type in LiteLLM_SpendLogs.__annotations__.items()