From 7cb5d3c60fc39369fd734f536154f8b6d9d3b886 Mon Sep 17 00:00:00 2001 From: Rohan Awhad <30470101+RohanAwhad@users.noreply.github.com> Date: Fri, 27 Jun 2025 14:26:58 -0400 Subject: [PATCH] chore: standardize unsupported model error #2517 (#2518) # What does this PR do? - llama_stack/exceptions.py: Add UnsupportedModelError class - remote inference ollama.py and utils/inference/model_registry.py: Changed ValueError in favor of UnsupportedModelError - utils/inference/litellm_openai_mixin.py: remove `register_model` function implementation from `LiteLLMOpenAIMixin` class. Now uses the parent class `ModelRegistryHelper`'s function implementation Closes #2517 ## Test Plan 1. Create a new `test_run_openai.yaml` and paste the following config in it: ```yaml version: '2' image_name: test-image apis: - inference providers: inference: - provider_id: openai provider_type: remote::openai config: max_tokens: 8192 models: - metadata: {} model_id: "non-existent-model" provider_id: openai model_type: llm server: port: 8321 ``` And run the server with: ```bash uv run llama stack run test_run_openai.yaml ``` You should now get a `llama_stack.exceptions.UnsupportedModelError` with the supported list of models in the error message. --- Tested for the following remote inference providers, and they all raise the `UnsupportedModelError`: - Anthropic - Cerebras - Fireworks - Gemini - Groq - Ollama - OpenAI - SambaNova - Together - Watsonx --------- Co-authored-by: Rohan Awhad --- llama_stack/apis/common/errors.py | 13 +++++++++++++ .../providers/remote/inference/ollama/ollama.py | 5 ++--- .../utils/inference/litellm_openai_mixin.py | 3 ++- .../providers/utils/inference/model_registry.py | 9 +++++---- 4 files changed, 22 insertions(+), 8 deletions(-) create mode 100644 llama_stack/apis/common/errors.py diff --git a/llama_stack/apis/common/errors.py b/llama_stack/apis/common/errors.py new file mode 100644 index 000000000..80f297bce --- /dev/null +++ b/llama_stack/apis/common/errors.py @@ -0,0 +1,13 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + + +class UnsupportedModelError(ValueError): + """raised when model is not present in the list of supported models""" + + def __init__(self, model_name: str, supported_models_list: list[str]): + message = f"'{model_name}' model is not supported. Supported models are: {', '.join(supported_models_list)}" + super().__init__(message) diff --git a/llama_stack/providers/remote/inference/ollama/ollama.py b/llama_stack/providers/remote/inference/ollama/ollama.py index e9df0dcc8..2d83bf82b 100644 --- a/llama_stack/providers/remote/inference/ollama/ollama.py +++ b/llama_stack/providers/remote/inference/ollama/ollama.py @@ -18,6 +18,7 @@ from llama_stack.apis.common.content_types import ( InterleavedContentItem, TextContentItem, ) +from llama_stack.apis.common.errors import UnsupportedModelError from llama_stack.apis.inference import ( ChatCompletionRequest, ChatCompletionResponse, @@ -376,9 +377,7 @@ class OllamaInferenceAdapter( f"Imprecise provider resource id was used but 'latest' is available in Ollama - using '{model.provider_resource_id}:latest'" ) return model - raise ValueError( - f"Model '{model.provider_resource_id}' is not available in Ollama. Available models: {', '.join(available_models)}" - ) + raise UnsupportedModelError(model.provider_resource_id, available_models) model.provider_resource_id = provider_resource_id return model diff --git a/llama_stack/providers/utils/inference/litellm_openai_mixin.py b/llama_stack/providers/utils/inference/litellm_openai_mixin.py index d19908368..188e82125 100644 --- a/llama_stack/providers/utils/inference/litellm_openai_mixin.py +++ b/llama_stack/providers/utils/inference/litellm_openai_mixin.py @@ -13,6 +13,7 @@ from llama_stack.apis.common.content_types import ( InterleavedContent, InterleavedContentItem, ) +from llama_stack.apis.common.errors import UnsupportedModelError from llama_stack.apis.inference import ( ChatCompletionRequest, ChatCompletionResponse, @@ -92,7 +93,7 @@ class LiteLLMOpenAIMixin( async def register_model(self, model: Model) -> Model: model_id = self.get_provider_model_id(model.provider_resource_id) if model_id is None: - raise ValueError(f"Unsupported model: {model.provider_resource_id}") + raise UnsupportedModelError(model.provider_resource_id, self.alias_to_provider_id_map.keys()) return model def get_litellm_model_name(self, model_id: str) -> str: diff --git a/llama_stack/providers/utils/inference/model_registry.py b/llama_stack/providers/utils/inference/model_registry.py index de67e5288..46c0ca7b5 100644 --- a/llama_stack/providers/utils/inference/model_registry.py +++ b/llama_stack/providers/utils/inference/model_registry.py @@ -8,6 +8,7 @@ from typing import Any from pydantic import BaseModel, Field +from llama_stack.apis.common.errors import UnsupportedModelError from llama_stack.apis.models import ModelType from llama_stack.models.llama.sku_list import all_registered_models from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate @@ -34,7 +35,9 @@ def get_huggingface_repo(model_descriptor: str) -> str | None: def build_hf_repo_model_entry( - provider_model_id: str, model_descriptor: str, additional_aliases: list[str] | None = None + provider_model_id: str, + model_descriptor: str, + additional_aliases: list[str] | None = None, ) -> ProviderModelEntry: aliases = [ get_huggingface_repo(model_descriptor), @@ -81,9 +84,7 @@ class ModelRegistryHelper(ModelsProtocolPrivate): async def register_model(self, model: Model) -> Model: if not (supported_model_id := self.get_provider_model_id(model.provider_resource_id)): - raise ValueError( - f"Model '{model.provider_resource_id}' is not supported. Supported models are: {', '.join(self.alias_to_provider_id_map.keys())}" - ) + raise UnsupportedModelError(model.provider_resource_id, self.alias_to_provider_id_map.keys()) provider_resource_id = self.get_provider_model_id(model.model_id) if model.model_type == ModelType.embedding: # embedding models are always registered by their provider model id and does not need to be mapped to a llama model