mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
(feat) proxy use model_group_alias_map
This commit is contained in:
parent
ee70c4e822
commit
900b8d66f3
2 changed files with 3 additions and 4 deletions
|
@ -944,6 +944,8 @@ async def chat_completion(request: Request, model: Optional[str] = None, user_ap
|
|||
response = await llm_router.acompletion(**data)
|
||||
elif llm_router is not None and data["model"] in llm_router.deployment_names: # model in router deployments, calling a specific deployment on the router
|
||||
response = await llm_router.acompletion(**data, specific_deployment = True)
|
||||
elif llm_router is not None and data["model"] in litellm.model_group_alias_map: # model set in model_group_alias_map
|
||||
response = await llm_router.acompletion(**data)
|
||||
else: # router is not set
|
||||
response = await litellm.acompletion(**data)
|
||||
if 'stream' in data and data['stream'] == True: # use generate_responses to stream responses
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue