(feat) proxy use model_group_alias_map

This commit is contained in:
ishaan-jaff 2023-12-06 20:23:24 -08:00
parent 1177c54636
commit 13a0a9f22c
2 changed files with 3 additions and 4 deletions

View file

@ -40,14 +40,11 @@ model_list:
model_info: model_info:
mode: completion mode: completion
litellm_settings: litellm_settings:
# setting callback class # setting callback class
# callbacks: custom_callbacks.proxy_handler_instance # sets litellm.callbacks = [proxy_handler_instance] # callbacks: custom_callbacks.proxy_handler_instance # sets litellm.callbacks = [proxy_handler_instance]
model_group_alias_map: {"gpt-4": "openai-gpt-3.5"} # all requests with gpt-4 model_name, get sent to openai-gpt-3.5
# setting a callback function for success and failure
success_callback: [custom_callbacks.async_on_succes_logger]
failure_callback: [custom_callbacks.async_on_fail_logger]
general_settings: general_settings:
# otel: True # OpenTelemetry Logger # otel: True # OpenTelemetry Logger

View file

@ -944,6 +944,8 @@ async def chat_completion(request: Request, model: Optional[str] = None, user_ap
response = await llm_router.acompletion(**data) response = await llm_router.acompletion(**data)
elif llm_router is not None and data["model"] in llm_router.deployment_names: # model in router deployments, calling a specific deployment on the router elif llm_router is not None and data["model"] in llm_router.deployment_names: # model in router deployments, calling a specific deployment on the router
response = await llm_router.acompletion(**data, specific_deployment = True) response = await llm_router.acompletion(**data, specific_deployment = True)
elif llm_router is not None and data["model"] in litellm.model_group_alias_map: # model set in model_group_alias_map
response = await llm_router.acompletion(**data)
else: # router is not set else: # router is not set
response = await litellm.acompletion(**data) response = await litellm.acompletion(**data)
if 'stream' in data and data['stream'] == True: # use generate_responses to stream responses if 'stream' in data and data['stream'] == True: # use generate_responses to stream responses