LiteLLM Minor Fixes & Improvements (01/10/2025) - p1 (#7670)

* test(test_get_model_info.py): add unit test confirming router deployment updates global 'get_model_info'

* fix(get_supported_openai_params.py): fix custom llm provider 'get_supported_openai_params'

Fixes https://github.com/BerriAI/litellm/issues/7668

* docs(azure.md): clarify how azure ad token refresh on proxy works

Closes https://github.com/BerriAI/litellm/issues/7665
This commit is contained in:
Krish Dholakia 2025-01-10 17:49:05 -08:00 committed by GitHub
parent 8576ca8ccb
commit a3e65c9bcb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 112 additions and 5 deletions

View file

@ -1946,16 +1946,15 @@ def register_model(model_cost: Union[str, dict]): # noqa: PLR0915
for key, value in loaded_model_cost.items():
## get model info ##
try:
existing_model: Union[ModelInfo, dict] = get_model_info(model=key)
existing_model: dict = cast(dict, get_model_info(model=key))
model_cost_key = existing_model["key"]
except Exception:
existing_model = {}
model_cost_key = key
## override / add new keys to the existing model cost dictionary
litellm.model_cost.setdefault(model_cost_key, {}).update(
_update_dictionary(existing_model, value) # type: ignore
)
verbose_logger.debug(f"{key} added to model cost map")
updated_dictionary = _update_dictionary(existing_model, value)
litellm.model_cost.setdefault(model_cost_key, {}).update(updated_dictionary)
verbose_logger.debug(f"{model_cost_key} added to model cost map")
# add new model names to provider lists
if value.get("litellm_provider") == "openai":
if key not in litellm.open_ai_chat_completion_models: