mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
fix: fixed import error (#1637)
# What does this PR do? [Provide a short summary of what this PR does and why. Link to relevant issues if applicable.] The generate_response_prompt had an import error, fixed that error. Co-authored-by: sarthakdeshpande <sarthak.deshpande@engati.com>
This commit is contained in:
parent
fb418813fc
commit
dfa11a1216
1 changed files with 2 additions and 2 deletions
|
@ -18,7 +18,7 @@ import fire
|
|||
|
||||
from llama_stack.models.llama.sku_list import resolve_model
|
||||
from llama_stack.providers.inline.inference.meta_reference.config import MetaReferenceInferenceConfig
|
||||
from llama_stack.providers.inline.inference.meta_reference.generation import Llama
|
||||
from llama_stack.providers.inline.inference.meta_reference.llama3.generation import Llama3
|
||||
|
||||
THIS_DIR = Path(__file__).parent.resolve()
|
||||
|
||||
|
@ -41,7 +41,7 @@ def run_main(
|
|||
llama_model = resolve_model(model_id)
|
||||
if not llama_model:
|
||||
raise ValueError(f"Model {model_id} not found")
|
||||
generator = Llama.build(
|
||||
generator = Llama3.build(
|
||||
config=config,
|
||||
model_id=model_id,
|
||||
llama_model=llama_model,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue