forked from phoenix/litellm-mirror
fix(utils.py): fix cost tracking for vertex ai partner models
This commit is contained in:
parent
6b17e84204
commit
185a6857f9
3 changed files with 6 additions and 1 deletions
|
@ -231,6 +231,9 @@ class Logging:
|
|||
):
|
||||
self.custom_pricing = True
|
||||
|
||||
if "custom_llm_provider" in self.model_call_details:
|
||||
self.custom_llm_provider = self.model_call_details["custom_llm_provider"]
|
||||
|
||||
def _pre_call(self, input, api_key, model=None, additional_args={}):
|
||||
"""
|
||||
Common helper function across the sync + async pre-call function
|
||||
|
|
|
@ -940,7 +940,7 @@ async def test_partner_models_httpx(model, sync_mode):
|
|||
|
||||
print(f"response: {response}")
|
||||
|
||||
assert response._hidden_params["response_cost"] > 0
|
||||
assert isinstance(response._hidden_params["response_cost"], float)
|
||||
except litellm.RateLimitError as e:
|
||||
pass
|
||||
except Exception as e:
|
||||
|
|
|
@ -4938,6 +4938,8 @@ def get_model_info(model: str, custom_llm_provider: Optional[str] = None) -> Mod
|
|||
if custom_llm_provider is not None and custom_llm_provider == "vertex_ai":
|
||||
if "meta/" + model in litellm.vertex_llama3_models:
|
||||
model = "meta/" + model
|
||||
elif model + "@latest" in litellm.vertex_mistral_models:
|
||||
model = model + "@latest"
|
||||
##########################
|
||||
if custom_llm_provider is None:
|
||||
# Get custom_llm_provider
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue