From 069d877210582bfe1aac1e5b1bbe62670d22507c Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Wed, 14 Aug 2024 13:47:27 -0700 Subject: [PATCH] Typo bugfix (rename variable x -> prompt) See https://github.com/meta-llama/llama-stack/issues/16 for the report --- llama_toolchain/inference/meta_reference/generation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_toolchain/inference/meta_reference/generation.py b/llama_toolchain/inference/meta_reference/generation.py index 23cdbc2f6..9594311ef 100644 --- a/llama_toolchain/inference/meta_reference/generation.py +++ b/llama_toolchain/inference/meta_reference/generation.py @@ -274,7 +274,7 @@ class Llama: ): max_gen_len = self.model.params.max_seq_len - 1 - prompt_tokens = self.tokenizer.encode(x, bos=True, eos=False) + prompt_tokens = self.tokenizer.encode(prompt, bos=True, eos=False) yield from self.generate( model_input=ModelInput(tokens=prompt_tokens),