Typo bugfix (rename variable x -> prompt)

See https://github.com/meta-llama/llama-stack/issues/16 for the report
This commit is contained in:
Ashwin Bharambe 2024-08-14 13:47:27 -07:00
parent b6ccaf1778
commit 069d877210

View file

@ -274,7 +274,7 @@ class Llama:
):
max_gen_len = self.model.params.max_seq_len - 1
prompt_tokens = self.tokenizer.encode(x, bos=True, eos=False)
prompt_tokens = self.tokenizer.encode(prompt, bos=True, eos=False)
yield from self.generate(
model_input=ModelInput(tokens=prompt_tokens),