test: fix tests

This commit is contained in:
Krrish Dholakia 2024-01-22 20:20:17 -08:00
parent 3e8c8ef507
commit 11e3ee4411
4 changed files with 24 additions and 12 deletions

View file

@ -589,7 +589,7 @@ async def track_cost_callback(
) )
else: else:
raise Exception( raise Exception(
f"Model={kwargs['model']} not in litellm model cost map. Add custom pricing - https://docs.litellm.ai/docs/proxy/custom_pricing" f"Model not in litellm model cost map. Add custom pricing - https://docs.litellm.ai/docs/proxy/custom_pricing"
) )
except Exception as e: except Exception as e:
verbose_proxy_logger.debug(f"error in tracking cost callback - {str(e)}") verbose_proxy_logger.debug(f"error in tracking cost callback - {str(e)}")

View file

@ -179,9 +179,11 @@ def test_call_with_key_over_budget(custom_db_client):
# 5. Make a call with a key over budget, expect to fail # 5. Make a call with a key over budget, expect to fail
setattr(litellm.proxy.proxy_server, "custom_db_client", custom_db_client) setattr(litellm.proxy.proxy_server, "custom_db_client", custom_db_client)
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
from litellm._logging import verbose_proxy_logger from litellm._logging import verbose_proxy_logger, verbose_logger
import logging import logging
litellm.set_verbose = True
verbose_logger.setLevel(logging.DEBUG)
verbose_proxy_logger.setLevel(logging.DEBUG) verbose_proxy_logger.setLevel(logging.DEBUG)
try: try:
@ -229,6 +231,7 @@ def test_call_with_key_over_budget(custom_db_client):
"user_api_key_user_id": user_id, "user_api_key_user_id": user_id,
} }
}, },
"response_cost": 0.00002,
}, },
completion_response=resp, completion_response=resp,
) )
@ -301,6 +304,7 @@ def test_call_with_key_over_budget_stream(custom_db_client):
"user_api_key_user_id": user_id, "user_api_key_user_id": user_id,
} }
}, },
"response_cost": 0.00002,
}, },
completion_response=ModelResponse(), completion_response=ModelResponse(),
) )

View file

@ -256,6 +256,7 @@ def test_call_with_key_over_budget(prisma_client):
"user_api_key_user_id": user_id, "user_api_key_user_id": user_id,
} }
}, },
"response_cost": 0.00002,
}, },
completion_response=resp, completion_response=resp,
start_time=datetime.now(), start_time=datetime.now(),
@ -331,6 +332,7 @@ def test_call_with_key_over_budget_stream(prisma_client):
"user_api_key_user_id": user_id, "user_api_key_user_id": user_id,
} }
}, },
"response_cost": 0.00002,
}, },
completion_response=ModelResponse(), completion_response=ModelResponse(),
start_time=datetime.now(), start_time=datetime.now(),

View file

@ -1066,17 +1066,23 @@ class Logging:
self.model_call_details["cache_hit"] = cache_hit self.model_call_details["cache_hit"] = cache_hit
## if model in model cost map - log the response cost ## if model in model cost map - log the response cost
## else set cost to None ## else set cost to None
if ( verbose_logger.debug(f"Model={self.model}; result={result}")
result is not None if result is not None and (
and ( isinstance(result, ModelResponse)
isinstance(result, ModelResponse) or isinstance(result, EmbeddingResponse)
or isinstance(result, EmbeddingResponse)
)
and result.model in litellm.model_cost
): ):
self.model_call_details["response_cost"] = litellm.completion_cost( try:
completion_response=result, self.model_call_details["response_cost"] = litellm.completion_cost(
) completion_response=result,
)
verbose_logger.debug(
f"Model={self.model}; cost={self.model_call_details['response_cost']}"
)
except litellm.NotFoundError as e:
verbose_logger.debug(
f"Model={self.model} not found in completion cost map."
)
self.model_call_details["response_cost"] = None
else: # streaming chunks + image gen. else: # streaming chunks + image gen.
self.model_call_details["response_cost"] = None self.model_call_details["response_cost"] = None