fix: meta ref inference (#2022)

MAX_BATCH_SIZE=10 LLAMA_MODELS_DEBUG=1 LLAMA_STACK_PORT=5002
LLAMA_STACK_LOGGING='all=info' llama stack run meta-reference-gpu --env
INFERENCE_MODEL=meta-llama/Llama-4-Scout-17B-16E-Instruct --env
INFERENCE_CHECKPOINT_DIR=...

LLAMA_STACK_CONFIG=http://localhost:5002/ pytest -s -v
tests/integration/inference --safety-shield meta-llama/Llama-Guard-3-8B
--vision-model meta-llama/Llama-4-Scout-17B-16E-Instruct --text-model
meta-llama/Llama-4-Scout-17B-16E-Instruct

Co-authored-by: Eric Huang <erichuang@fb.com>
This commit is contained in:
ehhuang 2025-04-24 13:03:35 -07:00 committed by GitHub
parent a5d6ab16b2
commit 7ed137e963
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 11 additions and 4 deletions

View file

@ -253,7 +253,8 @@ class MetaReferenceInferenceImpl(
def impl(): def impl():
stop_reason = None stop_reason = None
for token_result in self.generator.completion(request): for token_results in self.generator.completion([request]):
token_result = token_results[0]
if token_result.token == tokenizer.eot_id: if token_result.token == tokenizer.eot_id:
stop_reason = StopReason.end_of_turn stop_reason = StopReason.end_of_turn
text = "" text = ""

View file

@ -69,7 +69,10 @@ class CancelSentinel(BaseModel):
class TaskRequest(BaseModel): class TaskRequest(BaseModel):
type: Literal[ProcessingMessageName.task_request] = ProcessingMessageName.task_request type: Literal[ProcessingMessageName.task_request] = ProcessingMessageName.task_request
task: Tuple[str, List[CompletionRequestWithRawContent] | List[ChatCompletionRequestWithRawContent]] task: Tuple[
str,
List[CompletionRequestWithRawContent] | List[ChatCompletionRequestWithRawContent],
]
class TaskResponse(BaseModel): class TaskResponse(BaseModel):
@ -234,7 +237,7 @@ def worker_process_entrypoint(
if isinstance(task, EndSentinel): if isinstance(task, EndSentinel):
break break
assert isinstance(task, TaskRequest) assert isinstance(task, TaskRequest), task
result = model(task.task) result = model(task.task)
except StopIteration: except StopIteration:
break break
@ -331,7 +334,10 @@ class ModelParallelProcessGroup:
def run_inference( def run_inference(
self, self,
req: Tuple[str, List[CompletionRequestWithRawContent] | List[ChatCompletionRequestWithRawContent]], req: Tuple[
str,
List[CompletionRequestWithRawContent] | List[ChatCompletionRequestWithRawContent],
],
) -> Generator: ) -> Generator:
assert not self.running, "inference already running" assert not self.running, "inference already running"