fix(router.py): fix get_router_model_info for azure models

This commit is contained in:
Krrish Dholakia 2024-06-28 22:13:29 -07:00
parent 9556bfda81
commit c9a424d28d
3 changed files with 5 additions and 4 deletions

View file

@ -3998,8 +3998,8 @@ class Router:
verbose_router_logger.error(
"Could not identify azure model. Set azure 'base_model' for accurate max tokens, cost tracking, etc.- https://docs.litellm.ai/docs/proxy/cost_tracking#spend-tracking-for-azure-openai-models"
)
else:
model = deployment.get("litellm_params", {}).get("model", None)
elif custom_llm_provider != "azure":
model = _model
## GET LITELLM MODEL INFO - raises exception, if model is not mapped
model_info = litellm.get_model_info(model=model)

View file

@ -812,6 +812,7 @@ def test_router_context_window_check_pre_call_check():
"base_model": "azure/gpt-35-turbo",
"mock_response": "Hello world 1!",
},
"model_info": {"base_model": "azure/gpt-35-turbo"},
},
{
"model_name": "gpt-3.5-turbo", # openai model name

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "litellm"
version = "1.40.32"
version = "1.41.0"
description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"]
license = "MIT"
@ -90,7 +90,7 @@ requires = ["poetry-core", "wheel"]
build-backend = "poetry.core.masonry.api"
[tool.commitizen]
version = "1.40.32"
version = "1.41.0"
version_files = [
"pyproject.toml:^version"
]