mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
test: fix tests
This commit is contained in:
parent
3e8c8ef507
commit
11e3ee4411
4 changed files with 24 additions and 12 deletions
|
@ -589,7 +589,7 @@ async def track_cost_callback(
|
|||
)
|
||||
else:
|
||||
raise Exception(
|
||||
f"Model={kwargs['model']} not in litellm model cost map. Add custom pricing - https://docs.litellm.ai/docs/proxy/custom_pricing"
|
||||
f"Model not in litellm model cost map. Add custom pricing - https://docs.litellm.ai/docs/proxy/custom_pricing"
|
||||
)
|
||||
except Exception as e:
|
||||
verbose_proxy_logger.debug(f"error in tracking cost callback - {str(e)}")
|
||||
|
|
|
@ -179,9 +179,11 @@ def test_call_with_key_over_budget(custom_db_client):
|
|||
# 5. Make a call with a key over budget, expect to fail
|
||||
setattr(litellm.proxy.proxy_server, "custom_db_client", custom_db_client)
|
||||
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
|
||||
from litellm._logging import verbose_proxy_logger
|
||||
from litellm._logging import verbose_proxy_logger, verbose_logger
|
||||
import logging
|
||||
|
||||
litellm.set_verbose = True
|
||||
verbose_logger.setLevel(logging.DEBUG)
|
||||
verbose_proxy_logger.setLevel(logging.DEBUG)
|
||||
try:
|
||||
|
||||
|
@ -229,6 +231,7 @@ def test_call_with_key_over_budget(custom_db_client):
|
|||
"user_api_key_user_id": user_id,
|
||||
}
|
||||
},
|
||||
"response_cost": 0.00002,
|
||||
},
|
||||
completion_response=resp,
|
||||
)
|
||||
|
@ -301,6 +304,7 @@ def test_call_with_key_over_budget_stream(custom_db_client):
|
|||
"user_api_key_user_id": user_id,
|
||||
}
|
||||
},
|
||||
"response_cost": 0.00002,
|
||||
},
|
||||
completion_response=ModelResponse(),
|
||||
)
|
||||
|
|
|
@ -256,6 +256,7 @@ def test_call_with_key_over_budget(prisma_client):
|
|||
"user_api_key_user_id": user_id,
|
||||
}
|
||||
},
|
||||
"response_cost": 0.00002,
|
||||
},
|
||||
completion_response=resp,
|
||||
start_time=datetime.now(),
|
||||
|
@ -331,6 +332,7 @@ def test_call_with_key_over_budget_stream(prisma_client):
|
|||
"user_api_key_user_id": user_id,
|
||||
}
|
||||
},
|
||||
"response_cost": 0.00002,
|
||||
},
|
||||
completion_response=ModelResponse(),
|
||||
start_time=datetime.now(),
|
||||
|
|
|
@ -1066,17 +1066,23 @@ class Logging:
|
|||
self.model_call_details["cache_hit"] = cache_hit
|
||||
## if model in model cost map - log the response cost
|
||||
## else set cost to None
|
||||
if (
|
||||
result is not None
|
||||
and (
|
||||
isinstance(result, ModelResponse)
|
||||
or isinstance(result, EmbeddingResponse)
|
||||
)
|
||||
and result.model in litellm.model_cost
|
||||
verbose_logger.debug(f"Model={self.model}; result={result}")
|
||||
if result is not None and (
|
||||
isinstance(result, ModelResponse)
|
||||
or isinstance(result, EmbeddingResponse)
|
||||
):
|
||||
self.model_call_details["response_cost"] = litellm.completion_cost(
|
||||
completion_response=result,
|
||||
)
|
||||
try:
|
||||
self.model_call_details["response_cost"] = litellm.completion_cost(
|
||||
completion_response=result,
|
||||
)
|
||||
verbose_logger.debug(
|
||||
f"Model={self.model}; cost={self.model_call_details['response_cost']}"
|
||||
)
|
||||
except litellm.NotFoundError as e:
|
||||
verbose_logger.debug(
|
||||
f"Model={self.model} not found in completion cost map."
|
||||
)
|
||||
self.model_call_details["response_cost"] = None
|
||||
else: # streaming chunks + image gen.
|
||||
self.model_call_details["response_cost"] = None
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue