mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-30 03:44:20 +00:00
# What does this PR do? - llama_stack/exceptions.py: Add UnsupportedModelError class - remote inference ollama.py and utils/inference/model_registry.py: Changed ValueError in favor of UnsupportedModelError - utils/inference/litellm_openai_mixin.py: remove `register_model` function implementation from `LiteLLMOpenAIMixin` class. Now uses the parent class `ModelRegistryHelper`'s function implementation Closes #2517 ## Test Plan 1. Create a new `test_run_openai.yaml` and paste the following config in it: ```yaml version: '2' image_name: test-image apis: - inference providers: inference: - provider_id: openai provider_type: remote::openai config: max_tokens: 8192 models: - metadata: {} model_id: "non-existent-model" provider_id: openai model_type: llm server: port: 8321 ``` And run the server with: ```bash uv run llama stack run test_run_openai.yaml ``` You should now get a `llama_stack.exceptions.UnsupportedModelError` with the supported list of models in the error message. --- Tested for the following remote inference providers, and they all raise the `UnsupportedModelError`: - Anthropic - Cerebras - Fireworks - Gemini - Groq - Ollama - OpenAI - SambaNova - Together - Watsonx --------- Co-authored-by: Rohan Awhad <rawhad@redhat.com>
This commit is contained in:
parent
9baa16e498
commit
7cb5d3c60f
4 changed files with 22 additions and 8 deletions
13
llama_stack/apis/common/errors.py
Normal file
13
llama_stack/apis/common/errors.py
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
|
# the root directory of this source tree.
|
||||||
|
|
||||||
|
|
||||||
|
class UnsupportedModelError(ValueError):
|
||||||
|
"""raised when model is not present in the list of supported models"""
|
||||||
|
|
||||||
|
def __init__(self, model_name: str, supported_models_list: list[str]):
|
||||||
|
message = f"'{model_name}' model is not supported. Supported models are: {', '.join(supported_models_list)}"
|
||||||
|
super().__init__(message)
|
|
@ -18,6 +18,7 @@ from llama_stack.apis.common.content_types import (
|
||||||
InterleavedContentItem,
|
InterleavedContentItem,
|
||||||
TextContentItem,
|
TextContentItem,
|
||||||
)
|
)
|
||||||
|
from llama_stack.apis.common.errors import UnsupportedModelError
|
||||||
from llama_stack.apis.inference import (
|
from llama_stack.apis.inference import (
|
||||||
ChatCompletionRequest,
|
ChatCompletionRequest,
|
||||||
ChatCompletionResponse,
|
ChatCompletionResponse,
|
||||||
|
@ -376,9 +377,7 @@ class OllamaInferenceAdapter(
|
||||||
f"Imprecise provider resource id was used but 'latest' is available in Ollama - using '{model.provider_resource_id}:latest'"
|
f"Imprecise provider resource id was used but 'latest' is available in Ollama - using '{model.provider_resource_id}:latest'"
|
||||||
)
|
)
|
||||||
return model
|
return model
|
||||||
raise ValueError(
|
raise UnsupportedModelError(model.provider_resource_id, available_models)
|
||||||
f"Model '{model.provider_resource_id}' is not available in Ollama. Available models: {', '.join(available_models)}"
|
|
||||||
)
|
|
||||||
model.provider_resource_id = provider_resource_id
|
model.provider_resource_id = provider_resource_id
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
|
@ -13,6 +13,7 @@ from llama_stack.apis.common.content_types import (
|
||||||
InterleavedContent,
|
InterleavedContent,
|
||||||
InterleavedContentItem,
|
InterleavedContentItem,
|
||||||
)
|
)
|
||||||
|
from llama_stack.apis.common.errors import UnsupportedModelError
|
||||||
from llama_stack.apis.inference import (
|
from llama_stack.apis.inference import (
|
||||||
ChatCompletionRequest,
|
ChatCompletionRequest,
|
||||||
ChatCompletionResponse,
|
ChatCompletionResponse,
|
||||||
|
@ -92,7 +93,7 @@ class LiteLLMOpenAIMixin(
|
||||||
async def register_model(self, model: Model) -> Model:
|
async def register_model(self, model: Model) -> Model:
|
||||||
model_id = self.get_provider_model_id(model.provider_resource_id)
|
model_id = self.get_provider_model_id(model.provider_resource_id)
|
||||||
if model_id is None:
|
if model_id is None:
|
||||||
raise ValueError(f"Unsupported model: {model.provider_resource_id}")
|
raise UnsupportedModelError(model.provider_resource_id, self.alias_to_provider_id_map.keys())
|
||||||
return model
|
return model
|
||||||
|
|
||||||
def get_litellm_model_name(self, model_id: str) -> str:
|
def get_litellm_model_name(self, model_id: str) -> str:
|
||||||
|
|
|
@ -8,6 +8,7 @@ from typing import Any
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from llama_stack.apis.common.errors import UnsupportedModelError
|
||||||
from llama_stack.apis.models import ModelType
|
from llama_stack.apis.models import ModelType
|
||||||
from llama_stack.models.llama.sku_list import all_registered_models
|
from llama_stack.models.llama.sku_list import all_registered_models
|
||||||
from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate
|
from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate
|
||||||
|
@ -34,7 +35,9 @@ def get_huggingface_repo(model_descriptor: str) -> str | None:
|
||||||
|
|
||||||
|
|
||||||
def build_hf_repo_model_entry(
|
def build_hf_repo_model_entry(
|
||||||
provider_model_id: str, model_descriptor: str, additional_aliases: list[str] | None = None
|
provider_model_id: str,
|
||||||
|
model_descriptor: str,
|
||||||
|
additional_aliases: list[str] | None = None,
|
||||||
) -> ProviderModelEntry:
|
) -> ProviderModelEntry:
|
||||||
aliases = [
|
aliases = [
|
||||||
get_huggingface_repo(model_descriptor),
|
get_huggingface_repo(model_descriptor),
|
||||||
|
@ -81,9 +84,7 @@ class ModelRegistryHelper(ModelsProtocolPrivate):
|
||||||
|
|
||||||
async def register_model(self, model: Model) -> Model:
|
async def register_model(self, model: Model) -> Model:
|
||||||
if not (supported_model_id := self.get_provider_model_id(model.provider_resource_id)):
|
if not (supported_model_id := self.get_provider_model_id(model.provider_resource_id)):
|
||||||
raise ValueError(
|
raise UnsupportedModelError(model.provider_resource_id, self.alias_to_provider_id_map.keys())
|
||||||
f"Model '{model.provider_resource_id}' is not supported. Supported models are: {', '.join(self.alias_to_provider_id_map.keys())}"
|
|
||||||
)
|
|
||||||
provider_resource_id = self.get_provider_model_id(model.model_id)
|
provider_resource_id = self.get_provider_model_id(model.model_id)
|
||||||
if model.model_type == ModelType.embedding:
|
if model.model_type == ModelType.embedding:
|
||||||
# embedding models are always registered by their provider model id and does not need to be mapped to a llama model
|
# embedding models are always registered by their provider model id and does not need to be mapped to a llama model
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue