mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
fix(health_check.py): return 'missing mode' error message, if error with health check, and mode is missing
This commit is contained in:
parent
937471223a
commit
9efe9982f5
6 changed files with 17 additions and 17 deletions
|
@ -4779,7 +4779,9 @@ async def ahealth_check(
|
|||
For azure/openai -> completion.with_raw_response
|
||||
For rest -> litellm.acompletion()
|
||||
"""
|
||||
passed_in_mode: Optional[str] = None
|
||||
try:
|
||||
|
||||
model: Optional[str] = model_params.get("model", None)
|
||||
|
||||
if model is None:
|
||||
|
@ -4793,7 +4795,10 @@ async def ahealth_check(
|
|||
if model in litellm.model_cost and mode is None:
|
||||
mode = litellm.model_cost[model].get("mode")
|
||||
|
||||
mode = mode or "chat" # default to chat completion calls
|
||||
mode = mode
|
||||
passed_in_mode = mode
|
||||
if mode is None:
|
||||
mode = "chat" # default to chat completion calls
|
||||
|
||||
if custom_llm_provider == "azure":
|
||||
api_key = (
|
||||
|
@ -4883,13 +4888,14 @@ async def ahealth_check(
|
|||
response = {} # args like remaining ratelimit etc.
|
||||
return response
|
||||
except Exception as e:
|
||||
verbose_logger.error(
|
||||
verbose_logger.exception(
|
||||
"litellm.ahealth_check(): Exception occured - {}".format(str(e))
|
||||
)
|
||||
stack_trace = traceback.format_exc()
|
||||
if isinstance(stack_trace, str):
|
||||
stack_trace = stack_trace[:1000]
|
||||
if model not in litellm.model_cost and mode is None:
|
||||
|
||||
if passed_in_mode is None:
|
||||
return {
|
||||
"error": "Missing `mode`. Set the `mode` for the model - https://docs.litellm.ai/docs/proxy/health#embedding-models"
|
||||
}
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1,9 +1,6 @@
|
|||
# model_list:
|
||||
# - model_name: "gpt-4"
|
||||
# litellm_params:
|
||||
# model: "gpt-4"
|
||||
# model_info:
|
||||
# my_custom_key: "my_custom_value"
|
||||
|
||||
general_settings:
|
||||
infer_model_from_keys: true
|
||||
model_list:
|
||||
- model_name: "text-embedding-ada-002"
|
||||
litellm_params:
|
||||
model: "azure/azure-embedding-model"
|
||||
api_base: os.environ/AZURE_API_BASE
|
||||
api_key: os.environ/AZURE_API_KEY
|
|
@ -13,7 +13,7 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
ILLEGAL_DISPLAY_PARAMS = ["messages", "api_key", "prompt", "input"]
|
||||
|
||||
MINIMAL_DISPLAY_PARAMS = ["model"]
|
||||
MINIMAL_DISPLAY_PARAMS = ["model", "mode_error"]
|
||||
|
||||
|
||||
def _get_random_llm_message():
|
||||
|
@ -31,7 +31,7 @@ def _clean_endpoint_data(endpoint_data: dict, details: Optional[bool] = True):
|
|||
"""
|
||||
return (
|
||||
{k: v for k, v in endpoint_data.items() if k not in ILLEGAL_DISPLAY_PARAMS}
|
||||
if details
|
||||
if details is not False
|
||||
else {k: v for k, v in endpoint_data.items() if k in MINIMAL_DISPLAY_PARAMS}
|
||||
)
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue