fix(main.py): fix ahealth_check to infer mode when custom_llm_provider/model_name used

This commit is contained in:
Krrish Dholakia 2024-06-03 14:06:15 -07:00
parent 6441a1c398
commit 46a0ac7953
2 changed files with 9 additions and 0 deletions

View file

@ -4333,6 +4333,10 @@ async def ahealth_check(
mode = litellm.model_cost[model]["mode"]
model, custom_llm_provider, _, _ = get_llm_provider(model=model)
if model in litellm.model_cost and mode is None:
mode = litellm.model_cost[model]["mode"]
mode = mode or "chat" # default to chat completion calls
if custom_llm_provider == "azure":

View file

@ -34,6 +34,11 @@ model_list:
api_base: https://openai-france-1234.openai.azure.com
api_key: os.environ/AZURE_FRANCE_API_KEY
model: azure/gpt-turbo
- model_name: text-embedding
litellm_params:
model: textembedding-gecko-multilingual@001
vertex_project: my-project-9d5c
vertex_location: us-central1
router_settings:
enable_pre_call_checks: true