migrate model to Resource and new registration signature (#410)

* resource oriented object design for models

* add back llama_model field

* working tests

* register singature fix

* address feedback

---------

Co-authored-by: Dinesh Yeduguru <dineshyv@fb.com>
This commit is contained in:
Dinesh Yeduguru 2024-11-08 16:12:57 -08:00 committed by GitHub
parent bd0622ef10
commit ec644d3418
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
17 changed files with 99 additions and 90 deletions

View file

@ -216,7 +216,7 @@ class EmbeddingsResponse(BaseModel):
class ModelStore(Protocol):
def get_model(self, identifier: str) -> ModelDef: ...
def get_model(self, identifier: str) -> Model: ...
@runtime_checkable

View file

@ -26,16 +26,16 @@ class ModelsClient(Models):
async def shutdown(self) -> None:
pass
async def list_models(self) -> List[ModelDefWithProvider]:
async def list_models(self) -> List[Model]:
async with httpx.AsyncClient() as client:
response = await client.get(
f"{self.base_url}/models/list",
headers={"Content-Type": "application/json"},
)
response.raise_for_status()
return [ModelDefWithProvider(**x) for x in response.json()]
return [Model(**x) for x in response.json()]
async def register_model(self, model: ModelDefWithProvider) -> None:
async def register_model(self, model: Model) -> None:
async with httpx.AsyncClient() as client:
response = await client.post(
f"{self.base_url}/models/register",
@ -46,7 +46,7 @@ class ModelsClient(Models):
)
response.raise_for_status()
async def get_model(self, identifier: str) -> Optional[ModelDefWithProvider]:
async def get_model(self, identifier: str) -> Optional[Model]:
async with httpx.AsyncClient() as client:
response = await client.get(
f"{self.base_url}/models/get",
@ -59,7 +59,7 @@ class ModelsClient(Models):
j = response.json()
if j is None:
return None
return ModelDefWithProvider(**j)
return Model(**j)
async def run_main(host: str, port: int, stream: bool):

View file

@ -7,37 +7,33 @@
from typing import Any, Dict, List, Literal, Optional, Protocol, runtime_checkable
from llama_models.schema_utils import json_schema_type, webmethod
from pydantic import BaseModel, Field
from pydantic import Field
from llama_stack.apis.resource import Resource, ResourceType
class ModelDef(BaseModel):
identifier: str = Field(
description="A unique name for the model type",
)
llama_model: str = Field(
description="Pointer to the underlying core Llama family model. Each model served by Llama Stack must have a core Llama model.",
)
@json_schema_type
class Model(Resource):
type: Literal[ResourceType.model.value] = ResourceType.model.value
metadata: Dict[str, Any] = Field(
default_factory=dict,
description="Any additional metadata for this model",
)
@json_schema_type
class ModelDefWithProvider(ModelDef):
type: Literal["model"] = "model"
provider_id: str = Field(
description="The provider ID for this model",
)
@runtime_checkable
class Models(Protocol):
@webmethod(route="/models/list", method="GET")
async def list_models(self) -> List[ModelDefWithProvider]: ...
async def list_models(self) -> List[Model]: ...
@webmethod(route="/models/get", method="GET")
async def get_model(self, identifier: str) -> Optional[ModelDefWithProvider]: ...
async def get_model(self, identifier: str) -> Optional[Model]: ...
@webmethod(route="/models/register", method="POST")
async def register_model(self, model: ModelDefWithProvider) -> None: ...
async def register_model(
self,
model_id: str,
provider_model_id: Optional[str] = None,
provider_id: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> Model: ...

View file

@ -31,7 +31,7 @@ RoutingKey = Union[str, List[str]]
RoutableObject = Union[
ModelDef,
Model,
Shield,
MemoryBankDef,
DatasetDef,
@ -41,7 +41,7 @@ RoutableObject = Union[
RoutableObjectWithProvider = Annotated[
Union[
ModelDefWithProvider,
Model,
Shield,
MemoryBankDefWithProvider,
DatasetDefWithProvider,

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Any, AsyncGenerator, Dict, List
from typing import Any, AsyncGenerator, Dict, List, Optional
from llama_stack.apis.datasetio.datasetio import DatasetIO
from llama_stack.distribution.datatypes import RoutingTable
@ -71,8 +71,16 @@ class InferenceRouter(Inference):
async def shutdown(self) -> None:
pass
async def register_model(self, model: ModelDef) -> None:
await self.routing_table.register_model(model)
async def register_model(
self,
model_id: str,
provider_model_id: Optional[str] = None,
provider_id: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> None:
await self.routing_table.register_model(
model_id, provider_model_id, provider_id, metadata
)
async def chat_completion(
self,

View file

@ -84,8 +84,6 @@ class CommonRoutingTableImpl(RoutingTable):
api = get_impl_api(p)
if api == Api.inference:
p.model_store = self
models = await p.list_models()
await add_objects(models, pid, ModelDefWithProvider)
elif api == Api.safety:
p.shield_store = self
@ -198,14 +196,39 @@ class CommonRoutingTableImpl(RoutingTable):
class ModelsRoutingTable(CommonRoutingTableImpl, Models):
async def list_models(self) -> List[ModelDefWithProvider]:
async def list_models(self) -> List[Model]:
return await self.get_all_with_type("model")
async def get_model(self, identifier: str) -> Optional[ModelDefWithProvider]:
async def get_model(self, identifier: str) -> Optional[Model]:
return await self.get_object_by_identifier(identifier)
async def register_model(self, model: ModelDefWithProvider) -> None:
async def register_model(
self,
model_id: str,
provider_model_id: Optional[str] = None,
provider_id: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> Model:
if provider_model_id is None:
provider_model_id = model_id
if provider_id is None:
# If provider_id not specified, use the only provider if it supports this model
if len(self.impls_by_provider_id) == 1:
provider_id = list(self.impls_by_provider_id.keys())[0]
else:
raise ValueError(
"No provider specified and multiple providers available. Please specify a provider_id. Available providers: {self.impls_by_provider_id.keys()}"
)
if metadata is None:
metadata = {}
model = Model(
identifier=model_id,
provider_resource_id=provider_model_id,
provider_id=provider_id,
metadata=metadata,
)
await self.register_object(model)
return model
class ShieldsRoutingTable(CommonRoutingTableImpl, Shields):

View file

@ -9,7 +9,7 @@ import os
import pytest
import pytest_asyncio
from llama_stack.distribution.store import * # noqa F403
from llama_stack.apis.inference import ModelDefWithProvider
from llama_stack.apis.inference import Model
from llama_stack.apis.memory_banks import VectorMemoryBankDef
from llama_stack.providers.utils.kvstore import kvstore_impl, SqliteKVStoreConfig
from llama_stack.distribution.datatypes import * # noqa F403
@ -50,9 +50,8 @@ def sample_bank():
@pytest.fixture
def sample_model():
return ModelDefWithProvider(
return Model(
identifier="test_model",
llama_model="Llama3.2-3B-Instruct",
provider_id="test-provider",
)
@ -84,7 +83,6 @@ async def test_basic_registration(registry, sample_bank, sample_model):
assert len(results) == 1
result_model = results[0]
assert result_model.identifier == sample_model.identifier
assert result_model.llama_model == sample_model.llama_model
assert result_model.provider_id == sample_model.provider_id

View file

@ -14,7 +14,7 @@ from pydantic import BaseModel, Field
from llama_stack.apis.datasets import DatasetDef
from llama_stack.apis.eval_tasks import EvalTaskDef
from llama_stack.apis.memory_banks import MemoryBankDef
from llama_stack.apis.models import ModelDef
from llama_stack.apis.models import Model
from llama_stack.apis.scoring_functions import ScoringFnDef
from llama_stack.apis.shields import Shield
@ -43,9 +43,7 @@ class Api(Enum):
class ModelsProtocolPrivate(Protocol):
async def list_models(self) -> List[ModelDef]: ...
async def register_model(self, model: ModelDef) -> None: ...
async def register_model(self, model: Model) -> None: ...
class ShieldsProtocolPrivate(Protocol):

View file

@ -12,7 +12,7 @@ from llama_models.sku_list import resolve_model
from llama_models.llama3.api.datatypes import * # noqa: F403
from llama_stack.apis.inference import * # noqa: F403
from llama_stack.providers.datatypes import ModelDef, ModelsProtocolPrivate
from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate
from llama_stack.providers.utils.inference.prompt_adapter import (
convert_image_media_to_url,
@ -45,16 +45,11 @@ class MetaReferenceInferenceImpl(Inference, ModelsProtocolPrivate):
else:
self.generator = Llama.build(self.config)
async def register_model(self, model: ModelDef) -> None:
raise ValueError("Dynamic model registration is not supported")
async def list_models(self) -> List[ModelDef]:
return [
ModelDef(
identifier=self.model.descriptor(),
llama_model=self.model.descriptor(),
async def register_model(self, model: Model) -> None:
if model.identifier != self.model.descriptor():
raise ValueError(
f"Model mismatch: {model.identifier} != {self.model.descriptor()}"
)
]
async def shutdown(self) -> None:
if self.config.create_distributed_process_group:

View file

@ -20,7 +20,7 @@ from vllm.sampling_params import SamplingParams as VLLMSamplingParams
from llama_stack.apis.inference import * # noqa: F403
from llama_stack.providers.datatypes import ModelDef, ModelsProtocolPrivate
from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate
from llama_stack.providers.utils.inference.openai_compat import (
OpenAICompatCompletionChoice,
OpenAICompatCompletionResponse,
@ -83,19 +83,11 @@ class VLLMInferenceImpl(Inference, ModelsProtocolPrivate):
if self.engine:
self.engine.shutdown_background_loop()
async def register_model(self, model: ModelDef) -> None:
async def register_model(self, model: Model) -> None:
raise ValueError(
"You cannot dynamically add a model to a running vllm instance"
)
async def list_models(self) -> List[ModelDef]:
return [
ModelDef(
identifier=self.config.model,
llama_model=self.config.model,
)
]
def _sampling_params(self, sampling_params: SamplingParams) -> VLLMSamplingParams:
if sampling_params is None:
return VLLMSamplingParams(max_tokens=self.config.max_tokens)

View file

@ -15,7 +15,7 @@ from llama_models.llama3.api.tokenizer import Tokenizer
from ollama import AsyncClient
from llama_stack.apis.inference import * # noqa: F403
from llama_stack.providers.datatypes import ModelsProtocolPrivate
from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate
from llama_stack.providers.utils.inference.openai_compat import (
get_sampling_options,
@ -65,10 +65,11 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate):
async def shutdown(self) -> None:
pass
async def register_model(self, model: ModelDef) -> None:
raise ValueError("Dynamic model registration is not supported")
async def register_model(self, model: Model) -> None:
if model.identifier not in OLLAMA_SUPPORTED_MODELS:
raise ValueError(f"Model {model.identifier} is not supported by Ollama")
async def list_models(self) -> List[ModelDef]:
async def list_models(self) -> List[Model]:
ollama_to_llama = {v: k for k, v in OLLAMA_SUPPORTED_MODELS.items()}
ret = []
@ -80,9 +81,8 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate):
llama_model = ollama_to_llama[r["model"]]
ret.append(
ModelDef(
Model(
identifier=llama_model,
llama_model=llama_model,
metadata={
"ollama_model": r["model"],
},

View file

@ -14,7 +14,7 @@ class SampleInferenceImpl(Inference):
def __init__(self, config: SampleConfig):
self.config = config
async def register_model(self, model: ModelDef) -> None:
async def register_model(self, model: Model) -> None:
# these are the model names the Llama Stack will use to route requests to this provider
# perform validation here if necessary
pass

View file

@ -16,7 +16,7 @@ from llama_models.sku_list import all_registered_models
from llama_stack.apis.inference import * # noqa: F403
from llama_stack.apis.models import * # noqa: F403
from llama_stack.providers.datatypes import ModelDef, ModelsProtocolPrivate
from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate
from llama_stack.providers.utils.inference.openai_compat import (
get_sampling_options,
@ -50,14 +50,14 @@ class _HfAdapter(Inference, ModelsProtocolPrivate):
if model.huggingface_repo
}
async def register_model(self, model: ModelDef) -> None:
raise ValueError("Model registration is not supported for HuggingFace models")
async def register_model(self, model: Model) -> None:
pass
async def list_models(self) -> List[ModelDef]:
async def list_models(self) -> List[Model]:
repo = self.model_id
identifier = self.huggingface_repo_to_llama_model_id[repo]
return [
ModelDef(
Model(
identifier=identifier,
llama_model=identifier,
metadata={

View file

@ -13,7 +13,7 @@ from llama_models.sku_list import all_registered_models, resolve_model
from openai import OpenAI
from llama_stack.apis.inference import * # noqa: F403
from llama_stack.providers.datatypes import ModelsProtocolPrivate
from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate
from llama_stack.providers.utils.inference.openai_compat import (
get_sampling_options,
@ -44,13 +44,13 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
async def initialize(self) -> None:
self.client = OpenAI(base_url=self.config.url, api_key=self.config.api_token)
async def register_model(self, model: ModelDef) -> None:
async def register_model(self, model: Model) -> None:
raise ValueError("Model registration is not supported for vLLM models")
async def shutdown(self) -> None:
pass
async def list_models(self) -> List[ModelDef]:
async def list_models(self) -> List[Model]:
models = []
for model in self.client.models.list():
repo = model.id
@ -60,7 +60,7 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
identifier = self.huggingface_repo_to_llama_model_id[repo]
models.append(
ModelDef(
Model(
identifier=identifier,
llama_model=identifier,
)

View file

@ -153,7 +153,7 @@ INFERENCE_FIXTURES = [
@pytest_asyncio.fixture(scope="session")
async def inference_stack(request):
async def inference_stack(request, inference_model):
fixture_name = request.param
inference_fixture = request.getfixturevalue(f"inference_{fixture_name}")
impls = await resolve_impls_for_test_v2(
@ -162,4 +162,9 @@ async def inference_stack(request):
inference_fixture.provider_data,
)
await impls[Api.models].register_model(
model_id=inference_model,
provider_model_id=inference_fixture.providers[0].provider_id,
)
return (impls[Api.inference], impls[Api.models])

View file

@ -69,7 +69,7 @@ class TestInference:
response = await models_impl.list_models()
assert isinstance(response, list)
assert len(response) >= 1
assert all(isinstance(model, ModelDefWithProvider) for model in response)
assert all(isinstance(model, Model) for model in response)
model_def = None
for model in response:

View file

@ -4,11 +4,11 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Dict, List
from typing import Dict
from llama_models.sku_list import resolve_model
from llama_stack.providers.datatypes import ModelDef, ModelsProtocolPrivate
from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate
class ModelRegistryHelper(ModelsProtocolPrivate):
@ -28,14 +28,8 @@ class ModelRegistryHelper(ModelsProtocolPrivate):
return self.stack_to_provider_models_map[identifier]
async def register_model(self, model: ModelDef) -> None:
async def register_model(self, model: Model) -> None:
if model.identifier not in self.stack_to_provider_models_map:
raise ValueError(
f"Unsupported model {model.identifier}. Supported models: {self.stack_to_provider_models_map.keys()}"
)
async def list_models(self) -> List[ModelDef]:
models = []
for llama_model, provider_model in self.stack_to_provider_models_map.items():
models.append(ModelDef(identifier=llama_model, llama_model=llama_model))
return models