From ce17f204686dd2bf1fdbbba1279baa03a854ace4 Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Wed, 18 Sep 2024 15:22:36 -0700 Subject: [PATCH] wip models --- llama_stack/apis/models/models.py | 5 ----- llama_stack/providers/impls/meta_reference/models/models.py | 2 ++ 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/llama_stack/apis/models/models.py b/llama_stack/apis/models/models.py index bebd90bc1..5258063da 100644 --- a/llama_stack/apis/models/models.py +++ b/llama_stack/apis/models/models.py @@ -59,8 +59,3 @@ class Models(Protocol): @webmethod(route="/models/get", method="POST") async def get_model(self, model_id: str) -> ModelsGetResponse: ... - - @webmethod(route="/models/register") - async def register_model( - self, model_id: str, api: str, provider_spec: Dict[str, str] - ) -> ModelsRegisterResponse: ... diff --git a/llama_stack/providers/impls/meta_reference/models/models.py b/llama_stack/providers/impls/meta_reference/models/models.py index a7843f0fc..c3a2048c0 100644 --- a/llama_stack/providers/impls/meta_reference/models/models.py +++ b/llama_stack/providers/impls/meta_reference/models/models.py @@ -39,6 +39,8 @@ class MetaReferenceModelsImpl(Models): self.safety_api = safety_api self.models_list = [] + model = get_model_id_from_api(self.inference_api) + # TODO, make the inference route provider and use router provider to do the lookup dynamically if isinstance( self.inference_api,