mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-03 01:03:59 +00:00
fixes
This commit is contained in:
parent
a5d6ab16b2
commit
9b58624479
2 changed files with 11 additions and 4 deletions
|
@ -253,7 +253,8 @@ class MetaReferenceInferenceImpl(
|
||||||
def impl():
|
def impl():
|
||||||
stop_reason = None
|
stop_reason = None
|
||||||
|
|
||||||
for token_result in self.generator.completion(request):
|
for token_results in self.generator.completion([request]):
|
||||||
|
token_result = token_results[0]
|
||||||
if token_result.token == tokenizer.eot_id:
|
if token_result.token == tokenizer.eot_id:
|
||||||
stop_reason = StopReason.end_of_turn
|
stop_reason = StopReason.end_of_turn
|
||||||
text = ""
|
text = ""
|
||||||
|
|
|
@ -69,7 +69,10 @@ class CancelSentinel(BaseModel):
|
||||||
|
|
||||||
class TaskRequest(BaseModel):
|
class TaskRequest(BaseModel):
|
||||||
type: Literal[ProcessingMessageName.task_request] = ProcessingMessageName.task_request
|
type: Literal[ProcessingMessageName.task_request] = ProcessingMessageName.task_request
|
||||||
task: Tuple[str, List[CompletionRequestWithRawContent] | List[ChatCompletionRequestWithRawContent]]
|
task: Tuple[
|
||||||
|
str,
|
||||||
|
List[CompletionRequestWithRawContent] | List[ChatCompletionRequestWithRawContent],
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class TaskResponse(BaseModel):
|
class TaskResponse(BaseModel):
|
||||||
|
@ -234,7 +237,7 @@ def worker_process_entrypoint(
|
||||||
if isinstance(task, EndSentinel):
|
if isinstance(task, EndSentinel):
|
||||||
break
|
break
|
||||||
|
|
||||||
assert isinstance(task, TaskRequest)
|
assert isinstance(task, TaskRequest), task
|
||||||
result = model(task.task)
|
result = model(task.task)
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
break
|
break
|
||||||
|
@ -331,7 +334,10 @@ class ModelParallelProcessGroup:
|
||||||
|
|
||||||
def run_inference(
|
def run_inference(
|
||||||
self,
|
self,
|
||||||
req: Tuple[str, List[CompletionRequestWithRawContent] | List[ChatCompletionRequestWithRawContent]],
|
req: Tuple[
|
||||||
|
str,
|
||||||
|
List[CompletionRequestWithRawContent] | List[ChatCompletionRequestWithRawContent],
|
||||||
|
],
|
||||||
) -> Generator:
|
) -> Generator:
|
||||||
assert not self.running, "inference already running"
|
assert not self.running, "inference already running"
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue