From dfa11a1216a43652421b8a198f748675a06a6233 Mon Sep 17 00:00:00 2001 From: Sarthak Deshpande <60317842+cheesecake100201@users.noreply.github.com> Date: Tue, 18 Mar 2025 02:34:47 +0530 Subject: [PATCH] fix: fixed import error (#1637) # What does this PR do? [Provide a short summary of what this PR does and why. Link to relevant issues if applicable.] The generate_response_prompt had an import error, fixed that error. Co-authored-by: sarthakdeshpande --- llama_stack/scripts/generate_prompt_format.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llama_stack/scripts/generate_prompt_format.py b/llama_stack/scripts/generate_prompt_format.py index 338b23f3e..ec4c5e9be 100644 --- a/llama_stack/scripts/generate_prompt_format.py +++ b/llama_stack/scripts/generate_prompt_format.py @@ -18,7 +18,7 @@ import fire from llama_stack.models.llama.sku_list import resolve_model from llama_stack.providers.inline.inference.meta_reference.config import MetaReferenceInferenceConfig -from llama_stack.providers.inline.inference.meta_reference.generation import Llama +from llama_stack.providers.inline.inference.meta_reference.llama3.generation import Llama3 THIS_DIR = Path(__file__).parent.resolve() @@ -41,7 +41,7 @@ def run_main( llama_model = resolve_model(model_id) if not llama_model: raise ValueError(f"Model {model_id} not found") - generator = Llama.build( + generator = Llama3.build( config=config, model_id=model_id, llama_model=llama_model,