fix: meta reference + llama4 tokenizer fix

This commit is contained in:
Ashwin Bharambe 2025-04-09 00:46:02 -07:00
parent 10882bf478
commit 8001c30a4f
2 changed files with 5 additions and 3 deletions

View file

@ -259,7 +259,7 @@ class Llama3Generator:
temperature, top_p = _infer_sampling_params(sampling_params)
for result in self.inner_generator.generate(
llm_inputs=[self.formatter.encode_content(request.content)],
model_inputs=[self.formatter.encode_content(request.content)],
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
@ -284,7 +284,7 @@ class Llama3Generator:
temperature, top_p = _infer_sampling_params(sampling_params)
for result in self.inner_generator.generate(
llm_inputs=[self.formatter.encode_dialog_prompt(request.messages, _infer_tool_prompt_format(request))],
model_inputs=[self.formatter.encode_dialog_prompt(request.messages, _infer_tool_prompt_format(request))],
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,