mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Merge branch 'main' into litellm_global_spend_updates
This commit is contained in:
commit
6501fdb76e
11 changed files with 166 additions and 24 deletions
|
@ -626,6 +626,12 @@ async def track_cost_callback(
|
|||
"user_api_key_user_id", None
|
||||
)
|
||||
|
||||
if kwargs.get("cache_hit", False) == True:
|
||||
response_cost = 0.0
|
||||
verbose_proxy_logger.info(
|
||||
f"Cache Hit: response_cost {response_cost}, for user_id {user_id}"
|
||||
)
|
||||
|
||||
verbose_proxy_logger.info(
|
||||
f"response_cost {response_cost}, for user_id {user_id}"
|
||||
)
|
||||
|
@ -1429,8 +1435,6 @@ async def initialize(
|
|||
verbose_proxy_logger.setLevel(
|
||||
level=logging.DEBUG
|
||||
) # set proxy logs to debug
|
||||
litellm.set_verbose = True
|
||||
|
||||
dynamic_config = {"general": {}, user_model: {}}
|
||||
if config:
|
||||
(
|
||||
|
@ -1956,6 +1960,8 @@ async def chat_completion(
|
|||
else: # router is not set
|
||||
response = await litellm.acompletion(**data)
|
||||
|
||||
# Post Call Processing
|
||||
data["litellm_status"] = "success" # used for alerting
|
||||
if hasattr(response, "_hidden_params"):
|
||||
model_id = response._hidden_params.get("model_id", None) or ""
|
||||
else:
|
||||
|
@ -2141,6 +2147,7 @@ async def embeddings(
|
|||
response = await litellm.aembedding(**data)
|
||||
|
||||
### ALERTING ###
|
||||
data["litellm_status"] = "success" # used for alerting
|
||||
end_time = time.time()
|
||||
asyncio.create_task(
|
||||
proxy_logging_obj.response_taking_too_long(
|
||||
|
@ -2256,6 +2263,7 @@ async def image_generation(
|
|||
response = await litellm.aimage_generation(**data)
|
||||
|
||||
### ALERTING ###
|
||||
data["litellm_status"] = "success" # used for alerting
|
||||
end_time = time.time()
|
||||
asyncio.create_task(
|
||||
proxy_logging_obj.response_taking_too_long(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue