fix(utils.py): fix cost tracking for vertex ai partner models

This commit is contained in:
Krrish Dholakia 2024-07-30 14:19:39 -07:00
parent 6b17e84204
commit 185a6857f9
3 changed files with 6 additions and 1 deletions

View file

@ -231,6 +231,9 @@ class Logging:
): ):
self.custom_pricing = True self.custom_pricing = True
if "custom_llm_provider" in self.model_call_details:
self.custom_llm_provider = self.model_call_details["custom_llm_provider"]
def _pre_call(self, input, api_key, model=None, additional_args={}): def _pre_call(self, input, api_key, model=None, additional_args={}):
""" """
Common helper function across the sync + async pre-call function Common helper function across the sync + async pre-call function

View file

@ -940,7 +940,7 @@ async def test_partner_models_httpx(model, sync_mode):
print(f"response: {response}") print(f"response: {response}")
assert response._hidden_params["response_cost"] > 0 assert isinstance(response._hidden_params["response_cost"], float)
except litellm.RateLimitError as e: except litellm.RateLimitError as e:
pass pass
except Exception as e: except Exception as e:

View file

@ -4938,6 +4938,8 @@ def get_model_info(model: str, custom_llm_provider: Optional[str] = None) -> Mod
if custom_llm_provider is not None and custom_llm_provider == "vertex_ai": if custom_llm_provider is not None and custom_llm_provider == "vertex_ai":
if "meta/" + model in litellm.vertex_llama3_models: if "meta/" + model in litellm.vertex_llama3_models:
model = "meta/" + model model = "meta/" + model
elif model + "@latest" in litellm.vertex_mistral_models:
model = model + "@latest"
########################## ##########################
if custom_llm_provider is None: if custom_llm_provider is None:
# Get custom_llm_provider # Get custom_llm_provider