mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
fix: fix linting error
This commit is contained in:
parent
b56122b164
commit
b59e54d835
4 changed files with 59 additions and 36 deletions
|
@ -159,6 +159,51 @@ async def create_file_for_each_model(
|
|||
return response
|
||||
|
||||
|
||||
async def route_create_file(
|
||||
llm_router: Optional[Router],
|
||||
_create_file_request: CreateFileRequest,
|
||||
purpose: OpenAIFilesPurpose,
|
||||
proxy_logging_obj: ProxyLogging,
|
||||
user_api_key_dict: UserAPIKeyAuth,
|
||||
target_model_names_list: List[str],
|
||||
is_router_model: bool,
|
||||
router_model: Optional[str],
|
||||
custom_llm_provider: str,
|
||||
) -> OpenAIFileObject:
|
||||
if (
|
||||
litellm.enable_loadbalancing_on_batch_endpoints is True
|
||||
and is_router_model
|
||||
and router_model is not None
|
||||
):
|
||||
response = await _deprecated_loadbalanced_create_file(
|
||||
llm_router=llm_router,
|
||||
router_model=router_model,
|
||||
_create_file_request=_create_file_request,
|
||||
)
|
||||
elif target_model_names_list:
|
||||
response = await create_file_for_each_model(
|
||||
llm_router=llm_router,
|
||||
_create_file_request=_create_file_request,
|
||||
target_model_names_list=target_model_names_list,
|
||||
purpose=purpose,
|
||||
proxy_logging_obj=proxy_logging_obj,
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
)
|
||||
else:
|
||||
# get configs for custom_llm_provider
|
||||
llm_provider_config = get_files_provider_config(
|
||||
custom_llm_provider=custom_llm_provider
|
||||
)
|
||||
if llm_provider_config is not None:
|
||||
# add llm_provider_config to data
|
||||
_create_file_request.update(llm_provider_config)
|
||||
_create_file_request.pop("custom_llm_provider", None) # type: ignore
|
||||
# for now use custom_llm_provider=="openai" -> this will change as LiteLLM adds more providers for acreate_batch
|
||||
response = await litellm.acreate_file(**_create_file_request, custom_llm_provider=custom_llm_provider) # type: ignore
|
||||
|
||||
return response
|
||||
|
||||
|
||||
@router.post(
|
||||
"/{provider}/v1/files",
|
||||
dependencies=[Depends(user_api_key_auth)],
|
||||
|
@ -267,37 +312,17 @@ async def create_file(
|
|||
file=file_data, purpose=cast(CREATE_FILE_REQUESTS_PURPOSE, purpose), **data
|
||||
)
|
||||
|
||||
response: Optional[OpenAIFileObject] = None
|
||||
if (
|
||||
litellm.enable_loadbalancing_on_batch_endpoints is True
|
||||
and is_router_model
|
||||
and router_model is not None
|
||||
):
|
||||
response = await _deprecated_loadbalanced_create_file(
|
||||
llm_router=llm_router,
|
||||
router_model=router_model,
|
||||
_create_file_request=_create_file_request,
|
||||
)
|
||||
elif target_model_names_list:
|
||||
response = await create_file_for_each_model(
|
||||
llm_router=llm_router,
|
||||
_create_file_request=_create_file_request,
|
||||
target_model_names_list=target_model_names_list,
|
||||
purpose=purpose,
|
||||
proxy_logging_obj=proxy_logging_obj,
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
)
|
||||
else:
|
||||
# get configs for custom_llm_provider
|
||||
llm_provider_config = get_files_provider_config(
|
||||
custom_llm_provider=custom_llm_provider
|
||||
)
|
||||
if llm_provider_config is not None:
|
||||
# add llm_provider_config to data
|
||||
_create_file_request.update(llm_provider_config)
|
||||
_create_file_request.pop("custom_llm_provider", None) # type: ignore
|
||||
# for now use custom_llm_provider=="openai" -> this will change as LiteLLM adds more providers for acreate_batch
|
||||
response = await litellm.acreate_file(**_create_file_request, custom_llm_provider=custom_llm_provider) # type: ignore
|
||||
response = await route_create_file(
|
||||
llm_router=llm_router,
|
||||
_create_file_request=_create_file_request,
|
||||
purpose=purpose,
|
||||
proxy_logging_obj=proxy_logging_obj,
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
target_model_names_list=target_model_names_list,
|
||||
is_router_model=is_router_model,
|
||||
router_model=router_model,
|
||||
custom_llm_provider=custom_llm_provider,
|
||||
)
|
||||
|
||||
if response is None:
|
||||
raise HTTPException(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue