fix(types/router.py): modelgroupinfo to handle mode being None and supported_openai_params not being a list

This commit is contained in:
Krrish Dholakia 2024-06-08 20:12:59 -07:00
parent 8c5802d506
commit 58cce8a922
3 changed files with 25 additions and 6 deletions

View file

@ -3610,7 +3610,7 @@ class Router:
# get model info
try:
model_info = litellm.get_model_info(model=litellm_params.model)
except Exception as e:
except Exception:
model_info = None
# get llm provider
try:
@ -3619,7 +3619,9 @@ class Router:
custom_llm_provider=litellm_params.custom_llm_provider,
)
except litellm.exceptions.BadRequestError as e:
continue
verbose_router_logger.error(
"litellm.router.py::get_model_group_info() - {}".format(str(e))
)
if model_info is None:
supported_openai_params = litellm.get_supported_openai_params(

View file

@ -1275,6 +1275,21 @@ def test_openai_completion_on_router():
# test_openai_completion_on_router()
def test_model_group_info():
router = Router(
model_list=[
{
"model_name": "command-r-plus",
"litellm_params": {"model": "cohere.command-r-plus-v1:0"},
}
]
)
response = router.get_model_group_info(model_group="command-r-plus")
assert response is not None
def test_consistent_model_id():
"""
- For a given model group + litellm params, assert the model id is always the same

View file

@ -435,13 +435,15 @@ class ModelGroupInfo(BaseModel):
max_output_tokens: Optional[float] = None
input_cost_per_token: Optional[float] = None
output_cost_per_token: Optional[float] = None
mode: Literal[
mode: Optional[
Literal[
"chat", "embedding", "completion", "image_generation", "audio_transcription"
]
] = Field(default="chat")
supports_parallel_function_calling: bool = Field(default=False)
supports_vision: bool = Field(default=False)
supports_function_calling: bool = Field(default=False)
supported_openai_params: List[str] = Field(default=[])
supported_openai_params: Optional[List[str]] = Field(default=[])
class AssistantsTypedDict(TypedDict):