mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 12:07:34 +00:00
# What does this PR do? Rather than have a single `LLAMA_STACK_VERSION`, we need to have a `_V1`, `_V1ALPHA`, and `_V1BETA` constant. This also necessitated addition of `level` to the `WebMethod` so that routing can be handeled properly. For backwards compat, the `v1` routes are being kept around and marked as `deprecated`. When used, the server will log a deprecation warning. Deprecation log: <img width="1224" height="134" alt="Screenshot 2025-09-25 at 2 43 36 PM" src="https://github.com/user-attachments/assets/0cc7c245-dafc-48f0-be99-269fb9a686f9" /> move: 1. post_training to `v1alpha` as it is under heavy development and not near its final state 2. eval: job scheduling is not implemented. Relies heavily on the datasetio API which is under development missing implementations of specific routes indicating the structure of those routes might change. Additionally eval depends on the `inference` API which is going to be deprecated, eval will likely need a major API surface change to conform to using completions properly implements leveling in #3317 note: integration tests will fail until the SDK is regenerated with v1alpha/inference as opposed to v1/inference ## Test Plan existing tests should pass with newly generated schema. Conformance will also pass as these routes are not the ones we currently test for stability Signed-off-by: Charlie Doern <cdoern@redhat.com>
163 lines
4.9 KiB
Python
163 lines
4.9 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from enum import StrEnum
|
|
from typing import Any, Literal, Protocol, runtime_checkable
|
|
|
|
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
|
|
|
from llama_stack.apis.resource import Resource, ResourceType
|
|
from llama_stack.apis.version import LLAMA_STACK_API_V1
|
|
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
|
|
from llama_stack.schema_utils import json_schema_type, webmethod
|
|
|
|
|
|
class CommonModelFields(BaseModel):
|
|
metadata: dict[str, Any] = Field(
|
|
default_factory=dict,
|
|
description="Any additional metadata for this model",
|
|
)
|
|
|
|
|
|
@json_schema_type
|
|
class ModelType(StrEnum):
|
|
"""Enumeration of supported model types in Llama Stack.
|
|
:cvar llm: Large language model for text generation and completion
|
|
:cvar embedding: Embedding model for converting text to vector representations
|
|
"""
|
|
|
|
llm = "llm"
|
|
embedding = "embedding"
|
|
|
|
|
|
@json_schema_type
|
|
class Model(CommonModelFields, Resource):
|
|
"""A model resource representing an AI model registered in Llama Stack.
|
|
|
|
:param type: The resource type, always 'model' for model resources
|
|
:param model_type: The type of model (LLM or embedding model)
|
|
:param metadata: Any additional metadata for this model
|
|
:param identifier: Unique identifier for this resource in llama stack
|
|
:param provider_resource_id: Unique identifier for this resource in the provider
|
|
:param provider_id: ID of the provider that owns this resource
|
|
"""
|
|
|
|
type: Literal[ResourceType.model] = ResourceType.model
|
|
|
|
@property
|
|
def model_id(self) -> str:
|
|
return self.identifier
|
|
|
|
@property
|
|
def provider_model_id(self) -> str:
|
|
assert self.provider_resource_id is not None, "Provider resource ID must be set"
|
|
return self.provider_resource_id
|
|
|
|
model_config = ConfigDict(protected_namespaces=())
|
|
|
|
model_type: ModelType = Field(default=ModelType.llm)
|
|
|
|
@field_validator("provider_resource_id")
|
|
@classmethod
|
|
def validate_provider_resource_id(cls, v):
|
|
if v is None:
|
|
raise ValueError("provider_resource_id cannot be None")
|
|
return v
|
|
|
|
|
|
class ModelInput(CommonModelFields):
|
|
model_id: str
|
|
provider_id: str | None = None
|
|
provider_model_id: str | None = None
|
|
model_type: ModelType | None = ModelType.llm
|
|
model_config = ConfigDict(protected_namespaces=())
|
|
|
|
|
|
class ListModelsResponse(BaseModel):
|
|
data: list[Model]
|
|
|
|
|
|
@json_schema_type
|
|
class OpenAIModel(BaseModel):
|
|
"""A model from OpenAI.
|
|
|
|
:id: The ID of the model
|
|
:object: The object type, which will be "model"
|
|
:created: The Unix timestamp in seconds when the model was created
|
|
:owned_by: The owner of the model
|
|
"""
|
|
|
|
id: str
|
|
object: Literal["model"] = "model"
|
|
created: int
|
|
owned_by: str
|
|
|
|
|
|
class OpenAIListModelsResponse(BaseModel):
|
|
data: list[OpenAIModel]
|
|
|
|
|
|
@runtime_checkable
|
|
@trace_protocol
|
|
class Models(Protocol):
|
|
@webmethod(route="/models", method="GET", level=LLAMA_STACK_API_V1)
|
|
async def list_models(self) -> ListModelsResponse:
|
|
"""List all models.
|
|
|
|
:returns: A ListModelsResponse.
|
|
"""
|
|
...
|
|
|
|
@webmethod(route="/openai/v1/models", method="GET", level=LLAMA_STACK_API_V1)
|
|
async def openai_list_models(self) -> OpenAIListModelsResponse:
|
|
"""List models using the OpenAI API.
|
|
|
|
:returns: A OpenAIListModelsResponse.
|
|
"""
|
|
...
|
|
|
|
@webmethod(route="/models/{model_id:path}", method="GET", level=LLAMA_STACK_API_V1)
|
|
async def get_model(
|
|
self,
|
|
model_id: str,
|
|
) -> Model:
|
|
"""Get a model by its identifier.
|
|
|
|
:param model_id: The identifier of the model to get.
|
|
:returns: A Model.
|
|
"""
|
|
...
|
|
|
|
@webmethod(route="/models", method="POST", level=LLAMA_STACK_API_V1)
|
|
async def register_model(
|
|
self,
|
|
model_id: str,
|
|
provider_model_id: str | None = None,
|
|
provider_id: str | None = None,
|
|
metadata: dict[str, Any] | None = None,
|
|
model_type: ModelType | None = None,
|
|
) -> Model:
|
|
"""Register a model.
|
|
|
|
:param model_id: The identifier of the model to register.
|
|
:param provider_model_id: The identifier of the model in the provider.
|
|
:param provider_id: The identifier of the provider.
|
|
:param metadata: Any additional metadata for this model.
|
|
:param model_type: The type of model to register.
|
|
:returns: A Model.
|
|
"""
|
|
...
|
|
|
|
@webmethod(route="/models/{model_id:path}", method="DELETE", level=LLAMA_STACK_API_V1)
|
|
async def unregister_model(
|
|
self,
|
|
model_id: str,
|
|
) -> None:
|
|
"""Unregister a model.
|
|
|
|
:param model_id: The identifier of the model to unregister.
|
|
"""
|
|
...
|