diff --git a/litellm/main.py b/litellm/main.py
index cf7a4a5e7e..eb975ca6c4 100644
--- a/litellm/main.py
+++ b/litellm/main.py
@@ -4779,7 +4779,9 @@ async def ahealth_check(
For azure/openai -> completion.with_raw_response
For rest -> litellm.acompletion()
"""
+ passed_in_mode: Optional[str] = None
try:
+
model: Optional[str] = model_params.get("model", None)
if model is None:
@@ -4793,7 +4795,10 @@ async def ahealth_check(
if model in litellm.model_cost and mode is None:
mode = litellm.model_cost[model].get("mode")
- mode = mode or "chat" # default to chat completion calls
+ mode = mode
+ passed_in_mode = mode
+ if mode is None:
+ mode = "chat" # default to chat completion calls
if custom_llm_provider == "azure":
api_key = (
@@ -4883,13 +4888,14 @@ async def ahealth_check(
response = {} # args like remaining ratelimit etc.
return response
except Exception as e:
- verbose_logger.error(
+ verbose_logger.exception(
"litellm.ahealth_check(): Exception occured - {}".format(str(e))
)
stack_trace = traceback.format_exc()
if isinstance(stack_trace, str):
stack_trace = stack_trace[:1000]
- if model not in litellm.model_cost and mode is None:
+
+ if passed_in_mode is None:
return {
"error": "Missing `mode`. Set the `mode` for the model - https://docs.litellm.ai/docs/proxy/health#embedding-models"
}
diff --git a/litellm/proxy/_experimental/out/404.html b/litellm/proxy/_experimental/out/404.html
deleted file mode 100644
index eeffbc7ccd..0000000000
--- a/litellm/proxy/_experimental/out/404.html
+++ /dev/null
@@ -1 +0,0 @@
-
404: This page could not be found.LiteLLM Dashboard
404
This page could not be found.
\ No newline at end of file
diff --git a/litellm/proxy/_experimental/out/model_hub.html b/litellm/proxy/_experimental/out/model_hub.html
deleted file mode 100644
index 333dae1386..0000000000
--- a/litellm/proxy/_experimental/out/model_hub.html
+++ /dev/null
@@ -1 +0,0 @@
-LiteLLM Dashboard
\ No newline at end of file
diff --git a/litellm/proxy/_experimental/out/onboarding.html b/litellm/proxy/_experimental/out/onboarding.html
deleted file mode 100644
index 3187145b15..0000000000
--- a/litellm/proxy/_experimental/out/onboarding.html
+++ /dev/null
@@ -1 +0,0 @@
-LiteLLM Dashboard
\ No newline at end of file
diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml
index f224253a79..fcbc4a1700 100644
--- a/litellm/proxy/_new_secret_config.yaml
+++ b/litellm/proxy/_new_secret_config.yaml
@@ -1,9 +1,6 @@
-# model_list:
-# - model_name: "gpt-4"
-# litellm_params:
-# model: "gpt-4"
-# model_info:
-# my_custom_key: "my_custom_value"
-
-general_settings:
- infer_model_from_keys: true
\ No newline at end of file
+model_list:
+ - model_name: "text-embedding-ada-002"
+ litellm_params:
+ model: "azure/azure-embedding-model"
+ api_base: os.environ/AZURE_API_BASE
+ api_key: os.environ/AZURE_API_KEY
\ No newline at end of file
diff --git a/litellm/proxy/health_check.py b/litellm/proxy/health_check.py
index 5713fa782b..ff5ed7bfb7 100644
--- a/litellm/proxy/health_check.py
+++ b/litellm/proxy/health_check.py
@@ -13,7 +13,7 @@ logger = logging.getLogger(__name__)
ILLEGAL_DISPLAY_PARAMS = ["messages", "api_key", "prompt", "input"]
-MINIMAL_DISPLAY_PARAMS = ["model"]
+MINIMAL_DISPLAY_PARAMS = ["model", "mode_error"]
def _get_random_llm_message():
@@ -31,7 +31,7 @@ def _clean_endpoint_data(endpoint_data: dict, details: Optional[bool] = True):
"""
return (
{k: v for k, v in endpoint_data.items() if k not in ILLEGAL_DISPLAY_PARAMS}
- if details
+ if details is not False
else {k: v for k, v in endpoint_data.items() if k in MINIMAL_DISPLAY_PARAMS}
)