forked from phoenix-oss/llama-stack-mirror
Typo bugfix (rename variable x -> prompt)
See https://github.com/meta-llama/llama-stack/issues/16 for the report
This commit is contained in:
parent
b6ccaf1778
commit
069d877210
1 changed files with 1 additions and 1 deletions
|
@ -274,7 +274,7 @@ class Llama:
|
||||||
):
|
):
|
||||||
max_gen_len = self.model.params.max_seq_len - 1
|
max_gen_len = self.model.params.max_seq_len - 1
|
||||||
|
|
||||||
prompt_tokens = self.tokenizer.encode(x, bos=True, eos=False)
|
prompt_tokens = self.tokenizer.encode(prompt, bos=True, eos=False)
|
||||||
|
|
||||||
yield from self.generate(
|
yield from self.generate(
|
||||||
model_input=ModelInput(tokens=prompt_tokens),
|
model_input=ModelInput(tokens=prompt_tokens),
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue