From 2c88ccd98da0ab0d764d26c394ec52a24e94e86f Mon Sep 17 00:00:00 2001 From: HikaruEgashira Date: Wed, 16 Apr 2025 18:58:11 +0900 Subject: [PATCH] fix: map custom model_name to actual model in /spend/calculate for cost calculation --- .../spend_tracking/spend_management_endpoints.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/litellm/proxy/spend_tracking/spend_management_endpoints.py b/litellm/proxy/spend_tracking/spend_management_endpoints.py index 4690b6cbd8..c65cd33c61 100644 --- a/litellm/proxy/spend_tracking/spend_management_endpoints.py +++ b/litellm/proxy/spend_tracking/spend_management_endpoints.py @@ -1580,7 +1580,18 @@ async def calculate_spend(request: SpendCalculateRequest): else: _cost = completion_cost(model=request.model, messages=request.messages) elif request.completion_response is not None: - _completion_response = litellm.ModelResponse(**request.completion_response) + _completion_response_dict = dict(request.completion_response) + _model_name = _completion_response_dict.get("model") + _resolved_model = None + if llm_router is not None and llm_router.model_list is not None and _model_name is not None: + for model in llm_router.model_list: + if model.get("model_name") == _model_name: + litellm_params = model.get("litellm_params", {}) + _resolved_model = litellm_params.get("model") + break + if _resolved_model: + _completion_response_dict["model"] = _resolved_model + _completion_response = litellm.ModelResponse(**_completion_response_dict) _cost = completion_cost(completion_response=_completion_response) else: raise HTTPException(