mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
simplify logic for routing llm request
This commit is contained in:
parent
fdd6664420
commit
d50f26d73d
1 changed files with 10 additions and 10 deletions
|
@ -46,12 +46,12 @@ async def route_request(
|
|||
router_model_names = llm_router.model_names if llm_router is not None else []
|
||||
|
||||
if "api_key" in data:
|
||||
return await getattr(litellm, f"{route_type}")(**data)
|
||||
return getattr(litellm, f"{route_type}")(**data)
|
||||
|
||||
elif "user_config" in data:
|
||||
router_config = data.pop("user_config")
|
||||
user_router = litellm.Router(**router_config)
|
||||
return await getattr(user_router, f"{route_type}")(**data)
|
||||
return getattr(user_router, f"{route_type}")(**data)
|
||||
|
||||
elif (
|
||||
"," in data.get("model", "")
|
||||
|
@ -59,40 +59,40 @@ async def route_request(
|
|||
and route_type == "acompletion"
|
||||
):
|
||||
if data.get("fastest_response", False):
|
||||
return await llm_router.abatch_completion_fastest_response(**data)
|
||||
return llm_router.abatch_completion_fastest_response(**data)
|
||||
else:
|
||||
models = [model.strip() for model in data.pop("model").split(",")]
|
||||
return await llm_router.abatch_completion(models=models, **data)
|
||||
return llm_router.abatch_completion(models=models, **data)
|
||||
|
||||
elif llm_router is not None:
|
||||
if (
|
||||
data["model"] in router_model_names
|
||||
or data["model"] in llm_router.get_model_ids()
|
||||
):
|
||||
return await getattr(llm_router, f"{route_type}")(**data)
|
||||
return getattr(llm_router, f"{route_type}")(**data)
|
||||
|
||||
elif (
|
||||
llm_router.model_group_alias is not None
|
||||
and data["model"] in llm_router.model_group_alias
|
||||
):
|
||||
return await getattr(llm_router, f"{route_type}")(**data)
|
||||
return getattr(llm_router, f"{route_type}")(**data)
|
||||
|
||||
elif data["model"] in llm_router.deployment_names:
|
||||
return await getattr(llm_router, f"{route_type}")(
|
||||
return getattr(llm_router, f"{route_type}")(
|
||||
**data, specific_deployment=True
|
||||
)
|
||||
|
||||
elif data["model"] not in router_model_names:
|
||||
if llm_router.router_general_settings.pass_through_all_models:
|
||||
return await getattr(litellm, f"{route_type}")(**data)
|
||||
return getattr(litellm, f"{route_type}")(**data)
|
||||
elif (
|
||||
llm_router.default_deployment is not None
|
||||
or len(llm_router.provider_default_deployments) > 0
|
||||
):
|
||||
return await getattr(llm_router, f"{route_type}")(**data)
|
||||
return getattr(llm_router, f"{route_type}")(**data)
|
||||
|
||||
elif user_model is not None:
|
||||
return await getattr(litellm, f"{route_type}")(**data)
|
||||
return getattr(litellm, f"{route_type}")(**data)
|
||||
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue