mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
fix(router.py): fix pre call check
only check if response_format supported by model, if pre-call check enabled
This commit is contained in:
parent
9b90f91515
commit
ae787645da
1 changed files with 3 additions and 3 deletions
|
@ -3324,7 +3324,7 @@ class Router:
|
|||
invalid_model_indices.append(idx)
|
||||
continue
|
||||
|
||||
## INVALID PARAMS ## -> catch 'gpt-3.5-turbo-16k' not supporting 'response_object' param
|
||||
## INVALID PARAMS ## -> catch 'gpt-3.5-turbo-16k' not supporting 'response_format' param
|
||||
if request_kwargs is not None and litellm.drop_params == False:
|
||||
# get supported params
|
||||
model, custom_llm_provider, _, _ = litellm.get_llm_provider(
|
||||
|
@ -3342,10 +3342,10 @@ class Router:
|
|||
non_default_params = litellm.utils.get_non_default_params(
|
||||
passed_params=request_kwargs
|
||||
)
|
||||
special_params = ["response_object"]
|
||||
special_params = ["response_format"]
|
||||
# check if all params are supported
|
||||
for k, v in non_default_params.items():
|
||||
if k not in supported_openai_params:
|
||||
if k not in supported_openai_params and k in special_params:
|
||||
# if not -> invalid model
|
||||
verbose_router_logger.debug(
|
||||
f"INVALID MODEL INDEX @ REQUEST KWARG FILTERING, k={k}"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue