fix(router.py): fix pre call check

only check if response_format supported by model, if pre-call check enabled
This commit is contained in:
Krrish Dholakia 2024-05-24 20:09:15 -07:00
parent 9b90f91515
commit ae787645da

View file

@ -3324,7 +3324,7 @@ class Router:
invalid_model_indices.append(idx) invalid_model_indices.append(idx)
continue continue
## INVALID PARAMS ## -> catch 'gpt-3.5-turbo-16k' not supporting 'response_object' param ## INVALID PARAMS ## -> catch 'gpt-3.5-turbo-16k' not supporting 'response_format' param
if request_kwargs is not None and litellm.drop_params == False: if request_kwargs is not None and litellm.drop_params == False:
# get supported params # get supported params
model, custom_llm_provider, _, _ = litellm.get_llm_provider( model, custom_llm_provider, _, _ = litellm.get_llm_provider(
@ -3342,10 +3342,10 @@ class Router:
non_default_params = litellm.utils.get_non_default_params( non_default_params = litellm.utils.get_non_default_params(
passed_params=request_kwargs passed_params=request_kwargs
) )
special_params = ["response_object"] special_params = ["response_format"]
# check if all params are supported # check if all params are supported
for k, v in non_default_params.items(): for k, v in non_default_params.items():
if k not in supported_openai_params: if k not in supported_openai_params and k in special_params:
# if not -> invalid model # if not -> invalid model
verbose_router_logger.debug( verbose_router_logger.debug(
f"INVALID MODEL INDEX @ REQUEST KWARG FILTERING, k={k}" f"INVALID MODEL INDEX @ REQUEST KWARG FILTERING, k={k}"