From 9e5b7d5c9e93dc9117d6647267fa58cbea6b25f5 Mon Sep 17 00:00:00 2001 From: Botao Chen Date: Wed, 18 Dec 2024 14:32:23 -0800 Subject: [PATCH] address comment --- .../providers/inline/inference/meta_reference/__init__.py | 1 + .../providers/inline/inference/meta_reference/inference.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/llama_stack/providers/inline/inference/meta_reference/__init__.py b/llama_stack/providers/inline/inference/meta_reference/__init__.py index 18dc61d4a..2ae3e7a16 100644 --- a/llama_stack/providers/inline/inference/meta_reference/__init__.py +++ b/llama_stack/providers/inline/inference/meta_reference/__init__.py @@ -16,5 +16,6 @@ async def get_provider_impl( from .inference import MetaReferenceInferenceImpl impl = MetaReferenceInferenceImpl(config) + await impl.initialize() return impl diff --git a/llama_stack/providers/inline/inference/meta_reference/inference.py b/llama_stack/providers/inline/inference/meta_reference/inference.py index f2354aebb..d89bb21f7 100644 --- a/llama_stack/providers/inline/inference/meta_reference/inference.py +++ b/llama_stack/providers/inline/inference/meta_reference/inference.py @@ -73,6 +73,9 @@ class MetaReferenceInferenceImpl( self.model_id = None self.llama_model = None + async def initialize(self) -> None: + pass + async def load_model(self, model_id, llama_model) -> None: log.info(f"Loading model `{model_id}`") if self.config.create_distributed_process_group: