diff --git a/llama_stack/exceptions.py b/llama_stack/exceptions.py new file mode 100644 index 000000000..80f297bce --- /dev/null +++ b/llama_stack/exceptions.py @@ -0,0 +1,13 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + + +class UnsupportedModelError(ValueError): + """raised when model is not present in the list of supported models""" + + def __init__(self, model_name: str, supported_models_list: list[str]): + message = f"'{model_name}' model is not supported. Supported models are: {', '.join(supported_models_list)}" + super().__init__(message) diff --git a/llama_stack/providers/remote/inference/ollama/ollama.py b/llama_stack/providers/remote/inference/ollama/ollama.py index e9df0dcc8..e7c0d1e05 100644 --- a/llama_stack/providers/remote/inference/ollama/ollama.py +++ b/llama_stack/providers/remote/inference/ollama/ollama.py @@ -48,6 +48,7 @@ from llama_stack.apis.inference import ( ToolPromptFormat, ) from llama_stack.apis.models import Model, ModelType +from llama_stack.exceptions import UnsupportedModelError from llama_stack.log import get_logger from llama_stack.providers.datatypes import ( HealthResponse, @@ -376,9 +377,7 @@ class OllamaInferenceAdapter( f"Imprecise provider resource id was used but 'latest' is available in Ollama - using '{model.provider_resource_id}:latest'" ) return model - raise ValueError( - f"Model '{model.provider_resource_id}' is not available in Ollama. Available models: {', '.join(available_models)}" - ) + raise UnsupportedModelError(model.provider_resource_id, available_models) model.provider_resource_id = provider_resource_id return model diff --git a/llama_stack/providers/utils/inference/litellm_openai_mixin.py b/llama_stack/providers/utils/inference/litellm_openai_mixin.py index d19908368..e511d1158 100644 --- a/llama_stack/providers/utils/inference/litellm_openai_mixin.py +++ b/llama_stack/providers/utils/inference/litellm_openai_mixin.py @@ -40,6 +40,7 @@ from llama_stack.apis.inference import ( ) from llama_stack.apis.models import Model from llama_stack.distribution.request_headers import NeedsRequestProviderData +from llama_stack.exceptions import UnsupportedModelError from llama_stack.log import get_logger from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper from llama_stack.providers.utils.inference.openai_compat import ( @@ -92,7 +93,7 @@ class LiteLLMOpenAIMixin( async def register_model(self, model: Model) -> Model: model_id = self.get_provider_model_id(model.provider_resource_id) if model_id is None: - raise ValueError(f"Unsupported model: {model.provider_resource_id}") + raise UnsupportedModelError(model.provider_resource_id, self.alias_to_provider_id_map.keys()) return model def get_litellm_model_name(self, model_id: str) -> str: diff --git a/llama_stack/providers/utils/inference/model_registry.py b/llama_stack/providers/utils/inference/model_registry.py index de67e5288..bbaf90779 100644 --- a/llama_stack/providers/utils/inference/model_registry.py +++ b/llama_stack/providers/utils/inference/model_registry.py @@ -9,6 +9,7 @@ from typing import Any from pydantic import BaseModel, Field from llama_stack.apis.models import ModelType +from llama_stack.exceptions import UnsupportedModelError from llama_stack.models.llama.sku_list import all_registered_models from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate from llama_stack.providers.utils.inference import ( @@ -34,7 +35,9 @@ def get_huggingface_repo(model_descriptor: str) -> str | None: def build_hf_repo_model_entry( - provider_model_id: str, model_descriptor: str, additional_aliases: list[str] | None = None + provider_model_id: str, + model_descriptor: str, + additional_aliases: list[str] | None = None, ) -> ProviderModelEntry: aliases = [ get_huggingface_repo(model_descriptor), @@ -81,9 +84,7 @@ class ModelRegistryHelper(ModelsProtocolPrivate): async def register_model(self, model: Model) -> Model: if not (supported_model_id := self.get_provider_model_id(model.provider_resource_id)): - raise ValueError( - f"Model '{model.provider_resource_id}' is not supported. Supported models are: {', '.join(self.alias_to_provider_id_map.keys())}" - ) + raise UnsupportedModelError(model.provider_resource_id, self.alias_to_provider_id_map.keys()) provider_resource_id = self.get_provider_model_id(model.model_id) if model.model_type == ModelType.embedding: # embedding models are always registered by their provider model id and does not need to be mapped to a llama model