forked from phoenix-oss/llama-stack-mirror
fix: meta ref inference (#2022)
MAX_BATCH_SIZE=10 LLAMA_MODELS_DEBUG=1 LLAMA_STACK_PORT=5002 LLAMA_STACK_LOGGING='all=info' llama stack run meta-reference-gpu --env INFERENCE_MODEL=meta-llama/Llama-4-Scout-17B-16E-Instruct --env INFERENCE_CHECKPOINT_DIR=... LLAMA_STACK_CONFIG=http://localhost:5002/ pytest -s -v tests/integration/inference --safety-shield meta-llama/Llama-Guard-3-8B --vision-model meta-llama/Llama-4-Scout-17B-16E-Instruct --text-model meta-llama/Llama-4-Scout-17B-16E-Instruct Co-authored-by: Eric Huang <erichuang@fb.com>
This commit is contained in:
parent
a5d6ab16b2
commit
7ed137e963
2 changed files with 11 additions and 4 deletions
|
@ -253,7 +253,8 @@ class MetaReferenceInferenceImpl(
|
||||||
def impl():
|
def impl():
|
||||||
stop_reason = None
|
stop_reason = None
|
||||||
|
|
||||||
for token_result in self.generator.completion(request):
|
for token_results in self.generator.completion([request]):
|
||||||
|
token_result = token_results[0]
|
||||||
if token_result.token == tokenizer.eot_id:
|
if token_result.token == tokenizer.eot_id:
|
||||||
stop_reason = StopReason.end_of_turn
|
stop_reason = StopReason.end_of_turn
|
||||||
text = ""
|
text = ""
|
||||||
|
|
|
@ -69,7 +69,10 @@ class CancelSentinel(BaseModel):
|
||||||
|
|
||||||
class TaskRequest(BaseModel):
|
class TaskRequest(BaseModel):
|
||||||
type: Literal[ProcessingMessageName.task_request] = ProcessingMessageName.task_request
|
type: Literal[ProcessingMessageName.task_request] = ProcessingMessageName.task_request
|
||||||
task: Tuple[str, List[CompletionRequestWithRawContent] | List[ChatCompletionRequestWithRawContent]]
|
task: Tuple[
|
||||||
|
str,
|
||||||
|
List[CompletionRequestWithRawContent] | List[ChatCompletionRequestWithRawContent],
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class TaskResponse(BaseModel):
|
class TaskResponse(BaseModel):
|
||||||
|
@ -234,7 +237,7 @@ def worker_process_entrypoint(
|
||||||
if isinstance(task, EndSentinel):
|
if isinstance(task, EndSentinel):
|
||||||
break
|
break
|
||||||
|
|
||||||
assert isinstance(task, TaskRequest)
|
assert isinstance(task, TaskRequest), task
|
||||||
result = model(task.task)
|
result = model(task.task)
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
break
|
break
|
||||||
|
@ -331,7 +334,10 @@ class ModelParallelProcessGroup:
|
||||||
|
|
||||||
def run_inference(
|
def run_inference(
|
||||||
self,
|
self,
|
||||||
req: Tuple[str, List[CompletionRequestWithRawContent] | List[ChatCompletionRequestWithRawContent]],
|
req: Tuple[
|
||||||
|
str,
|
||||||
|
List[CompletionRequestWithRawContent] | List[ChatCompletionRequestWithRawContent],
|
||||||
|
],
|
||||||
) -> Generator:
|
) -> Generator:
|
||||||
assert not self.running, "inference already running"
|
assert not self.running, "inference already running"
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue