mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
fix using pass_through_all_models
This commit is contained in:
parent
af1cd9e06f
commit
693bcfac39
2 changed files with 22 additions and 15 deletions
|
@ -11,6 +11,10 @@ model_list:
|
|||
- model_name: "*"
|
||||
litellm_params:
|
||||
model: "*"
|
||||
- model_name: "*"
|
||||
litellm_params:
|
||||
model: openai/*
|
||||
api_key: os.environ/OPENAI_API_KEY
|
||||
general_settings:
|
||||
master_key: sk-1234
|
||||
alerting: ["slack"]
|
||||
|
|
|
@ -2882,14 +2882,15 @@ async def chat_completion(
|
|||
elif (
|
||||
llm_router is not None
|
||||
and data["model"] not in router_model_names
|
||||
and llm_router.default_deployment is not None
|
||||
): # model in router deployments, calling a specific deployment on the router
|
||||
tasks.append(llm_router.acompletion(**data))
|
||||
elif (
|
||||
llm_router is not None
|
||||
and llm_router.router_general_settings.pass_through_all_models is True
|
||||
):
|
||||
tasks.append(litellm.acompletion(**data))
|
||||
elif (
|
||||
llm_router is not None
|
||||
and data["model"] not in router_model_names
|
||||
and llm_router.default_deployment is not None
|
||||
): # model in router deployments, calling a specific deployment on the router
|
||||
tasks.append(llm_router.acompletion(**data))
|
||||
elif user_model is not None: # `litellm --model <your-model-name>`
|
||||
tasks.append(litellm.acompletion(**data))
|
||||
else:
|
||||
|
@ -3144,6 +3145,12 @@ async def completion(
|
|||
llm_router is not None and data["model"] in llm_router.get_model_ids()
|
||||
): # model in router model list
|
||||
llm_response = asyncio.create_task(llm_router.atext_completion(**data))
|
||||
elif (
|
||||
llm_router is not None
|
||||
and data["model"] not in router_model_names
|
||||
and llm_router.router_general_settings.pass_through_all_models is True
|
||||
):
|
||||
llm_response = asyncio.create_task(litellm.atext_completion(**data))
|
||||
elif (
|
||||
llm_router is not None
|
||||
and data["model"] not in router_model_names
|
||||
|
@ -3152,11 +3159,6 @@ async def completion(
|
|||
llm_response = asyncio.create_task(llm_router.atext_completion(**data))
|
||||
elif user_model is not None: # `litellm --model <your-model-name>`
|
||||
llm_response = asyncio.create_task(litellm.atext_completion(**data))
|
||||
elif (
|
||||
llm_router is not None
|
||||
and llm_router.router_general_settings.pass_through_all_models is True
|
||||
):
|
||||
llm_response = asyncio.create_task(litellm.atext_completion(**data))
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
|
@ -3412,14 +3414,15 @@ async def embeddings(
|
|||
elif (
|
||||
llm_router is not None
|
||||
and data["model"] not in router_model_names
|
||||
and llm_router.default_deployment is not None
|
||||
): # model in router deployments, calling a specific deployment on the router
|
||||
tasks.append(llm_router.aembedding(**data))
|
||||
elif (
|
||||
llm_router is not None
|
||||
and llm_router.router_general_settings.pass_through_all_models is True
|
||||
):
|
||||
tasks.append(litellm.aembedding(**data))
|
||||
elif (
|
||||
llm_router is not None
|
||||
and data["model"] not in router_model_names
|
||||
and llm_router.default_deployment is not None
|
||||
): # model in router deployments, calling a specific deployment on the router
|
||||
tasks.append(llm_router.aembedding(**data))
|
||||
elif user_model is not None: # `litellm --model <your-model-name>`
|
||||
tasks.append(litellm.aembedding(**data))
|
||||
else:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue