mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-29 03:14:19 +00:00
chore: standardize unsupported model error #2517
- llama_stack/exceptions.py: Add UnsupportedModelError class - remote inference ollama.py and utils/inference/model_registry.py: Changed ValueError in favor of UnsupportedModelError - utils/inference/litellm_openai_mixin.py: remote register_model func. Now uses parent class ModelRegistry's func Closes #2517
This commit is contained in:
parent
cfee63bd0d
commit
7ccf83fb74
4 changed files with 17 additions and 13 deletions
13
llama_stack/exceptions.py
Normal file
13
llama_stack/exceptions.py
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
|
# the root directory of this source tree.
|
||||||
|
|
||||||
|
|
||||||
|
class UnsupportedModelError(ValueError):
|
||||||
|
"""raised when model is not present in the list of supported models"""
|
||||||
|
|
||||||
|
def __init__(self, model_name: str, supported_models_list: list[str]):
|
||||||
|
message = f"'{model_name}' model is not supported. Supported models are: {', '.join(supported_models_list)}"
|
||||||
|
super().__init__(message)
|
|
@ -51,6 +51,7 @@ from llama_stack.apis.inference.inference import (
|
||||||
OpenAIResponseFormatParam,
|
OpenAIResponseFormatParam,
|
||||||
)
|
)
|
||||||
from llama_stack.apis.models import Model, ModelType
|
from llama_stack.apis.models import Model, ModelType
|
||||||
|
from llama_stack.exceptions import UnsupportedModelError
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.datatypes import (
|
from llama_stack.providers.datatypes import (
|
||||||
HealthResponse,
|
HealthResponse,
|
||||||
|
@ -374,9 +375,7 @@ class OllamaInferenceAdapter(
|
||||||
f"Imprecise provider resource id was used but 'latest' is available in Ollama - using '{model.provider_resource_id}:latest'"
|
f"Imprecise provider resource id was used but 'latest' is available in Ollama - using '{model.provider_resource_id}:latest'"
|
||||||
)
|
)
|
||||||
return model
|
return model
|
||||||
raise ValueError(
|
raise UnsupportedModelError(model.provider_resource_id, available_models)
|
||||||
f"Model '{model.provider_resource_id}' is not available in Ollama. Available models: {', '.join(available_models)}"
|
|
||||||
)
|
|
||||||
model.provider_resource_id = provider_resource_id
|
model.provider_resource_id = provider_resource_id
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
|
@ -40,7 +40,6 @@ from llama_stack.apis.inference.inference import (
|
||||||
OpenAIMessageParam,
|
OpenAIMessageParam,
|
||||||
OpenAIResponseFormatParam,
|
OpenAIResponseFormatParam,
|
||||||
)
|
)
|
||||||
from llama_stack.apis.models.models import Model
|
|
||||||
from llama_stack.distribution.request_headers import NeedsRequestProviderData
|
from llama_stack.distribution.request_headers import NeedsRequestProviderData
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
|
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
|
||||||
|
@ -91,12 +90,6 @@ class LiteLLMOpenAIMixin(
|
||||||
async def shutdown(self):
|
async def shutdown(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
async def register_model(self, model: Model) -> Model:
|
|
||||||
model_id = self.get_provider_model_id(model.provider_resource_id)
|
|
||||||
if model_id is None:
|
|
||||||
raise ValueError(f"Unsupported model: {model.provider_resource_id}")
|
|
||||||
return model
|
|
||||||
|
|
||||||
def get_litellm_model_name(self, model_id: str) -> str:
|
def get_litellm_model_name(self, model_id: str) -> str:
|
||||||
# users may be using openai/ prefix in their model names. the openai/models.py did this by default.
|
# users may be using openai/ prefix in their model names. the openai/models.py did this by default.
|
||||||
# model_id.startswith("openai/") is for backwards compatibility.
|
# model_id.startswith("openai/") is for backwards compatibility.
|
||||||
|
|
|
@ -9,6 +9,7 @@ from typing import Any
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
from llama_stack.apis.models.models import ModelType
|
from llama_stack.apis.models.models import ModelType
|
||||||
|
from llama_stack.exceptions import UnsupportedModelError
|
||||||
from llama_stack.models.llama.sku_list import all_registered_models
|
from llama_stack.models.llama.sku_list import all_registered_models
|
||||||
from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate
|
from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate
|
||||||
from llama_stack.providers.utils.inference import (
|
from llama_stack.providers.utils.inference import (
|
||||||
|
@ -81,9 +82,7 @@ class ModelRegistryHelper(ModelsProtocolPrivate):
|
||||||
|
|
||||||
async def register_model(self, model: Model) -> Model:
|
async def register_model(self, model: Model) -> Model:
|
||||||
if not (supported_model_id := self.get_provider_model_id(model.provider_resource_id)):
|
if not (supported_model_id := self.get_provider_model_id(model.provider_resource_id)):
|
||||||
raise ValueError(
|
raise UnsupportedModelError(model.provider_resource_id, self.alias_to_provider_id_map.keys())
|
||||||
f"Model '{model.provider_resource_id}' is not supported. Supported models are: {', '.join(self.alias_to_provider_id_map.keys())}"
|
|
||||||
)
|
|
||||||
provider_resource_id = self.get_provider_model_id(model.model_id)
|
provider_resource_id = self.get_provider_model_id(model.model_id)
|
||||||
if model.model_type == ModelType.embedding:
|
if model.model_type == ModelType.embedding:
|
||||||
# embedding models are always registered by their provider model id and does not need to be mapped to a llama model
|
# embedding models are always registered by their provider model id and does not need to be mapped to a llama model
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue