mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 15:23:51 +00:00
fix log probs
This commit is contained in:
parent
16ba0fa06f
commit
20e78ff241
5 changed files with 31 additions and 20 deletions
|
@ -6,7 +6,6 @@
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import json
|
import json
|
||||||
import sys
|
|
||||||
from typing import Any, AsyncGenerator, List, Optional
|
from typing import Any, AsyncGenerator, List, Optional
|
||||||
|
|
||||||
import fire
|
import fire
|
||||||
|
@ -101,7 +100,9 @@ class InferenceClient(Inference):
|
||||||
print(f"Error with parsing or validation: {e}")
|
print(f"Error with parsing or validation: {e}")
|
||||||
|
|
||||||
|
|
||||||
async def run_main(host: str, port: int, stream: bool, model: Optional[str]):
|
async def run_main(
|
||||||
|
host: str, port: int, stream: bool, model: Optional[str], logprobs: bool
|
||||||
|
):
|
||||||
client = InferenceClient(f"http://{host}:{port}")
|
client = InferenceClient(f"http://{host}:{port}")
|
||||||
|
|
||||||
if not model:
|
if not model:
|
||||||
|
@ -115,9 +116,15 @@ async def run_main(host: str, port: int, stream: bool, model: Optional[str]):
|
||||||
model=model,
|
model=model,
|
||||||
messages=[message],
|
messages=[message],
|
||||||
stream=stream,
|
stream=stream,
|
||||||
|
logprobs=logprobs,
|
||||||
)
|
)
|
||||||
async for log in EventLogger().log(iterator):
|
|
||||||
log.print()
|
if logprobs:
|
||||||
|
async for chunk in iterator:
|
||||||
|
cprint(f"Response: {chunk}", "red")
|
||||||
|
else:
|
||||||
|
async for log in EventLogger().log(iterator):
|
||||||
|
log.print()
|
||||||
|
|
||||||
|
|
||||||
async def run_mm_main(
|
async def run_mm_main(
|
||||||
|
@ -149,13 +156,14 @@ def main(
|
||||||
port: int,
|
port: int,
|
||||||
stream: bool = True,
|
stream: bool = True,
|
||||||
mm: bool = False,
|
mm: bool = False,
|
||||||
|
logprobs: bool = False,
|
||||||
file: Optional[str] = None,
|
file: Optional[str] = None,
|
||||||
model: Optional[str] = None,
|
model: Optional[str] = None,
|
||||||
):
|
):
|
||||||
if mm:
|
if mm:
|
||||||
asyncio.run(run_mm_main(host, port, stream, file, model))
|
asyncio.run(run_mm_main(host, port, stream, file, model))
|
||||||
else:
|
else:
|
||||||
asyncio.run(run_main(host, port, stream, model))
|
asyncio.run(run_main(host, port, stream, model, logprobs))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
|
@ -69,7 +69,7 @@ class ChatCompletionResponseEvent(BaseModel):
|
||||||
|
|
||||||
event_type: ChatCompletionResponseEventType
|
event_type: ChatCompletionResponseEventType
|
||||||
delta: Union[str, ToolCallDelta]
|
delta: Union[str, ToolCallDelta]
|
||||||
logprobs: Optional[List[TokenLogProbs]] = None
|
logprobs: Optional[List[float]] = None
|
||||||
stop_reason: Optional[StopReason] = None
|
stop_reason: Optional[StopReason] = None
|
||||||
|
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ class CompletionRequest(BaseModel):
|
||||||
sampling_params: Optional[SamplingParams] = SamplingParams()
|
sampling_params: Optional[SamplingParams] = SamplingParams()
|
||||||
|
|
||||||
stream: Optional[bool] = False
|
stream: Optional[bool] = False
|
||||||
logprobs: Optional[LogProbConfig] = None
|
logprobs: Optional[bool] = False
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
|
@ -88,7 +88,7 @@ class CompletionResponse(BaseModel):
|
||||||
"""Completion response."""
|
"""Completion response."""
|
||||||
|
|
||||||
completion_message: CompletionMessage
|
completion_message: CompletionMessage
|
||||||
logprobs: Optional[List[TokenLogProbs]] = None
|
logprobs: Optional[List[float]] = None
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
|
@ -97,7 +97,7 @@ class CompletionResponseStreamChunk(BaseModel):
|
||||||
|
|
||||||
delta: str
|
delta: str
|
||||||
stop_reason: Optional[StopReason] = None
|
stop_reason: Optional[StopReason] = None
|
||||||
logprobs: Optional[List[TokenLogProbs]] = None
|
logprobs: Optional[List[float]] = None
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
|
@ -105,7 +105,7 @@ class BatchCompletionRequest(BaseModel):
|
||||||
model: str
|
model: str
|
||||||
content_batch: List[InterleavedTextMedia]
|
content_batch: List[InterleavedTextMedia]
|
||||||
sampling_params: Optional[SamplingParams] = SamplingParams()
|
sampling_params: Optional[SamplingParams] = SamplingParams()
|
||||||
logprobs: Optional[LogProbConfig] = None
|
logprobs: Optional[bool] = False
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
|
@ -129,7 +129,7 @@ class ChatCompletionRequest(BaseModel):
|
||||||
)
|
)
|
||||||
|
|
||||||
stream: Optional[bool] = False
|
stream: Optional[bool] = False
|
||||||
logprobs: Optional[LogProbConfig] = None
|
logprobs: Optional[bool] = False
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
|
@ -144,7 +144,7 @@ class ChatCompletionResponse(BaseModel):
|
||||||
"""Chat completion response."""
|
"""Chat completion response."""
|
||||||
|
|
||||||
completion_message: CompletionMessage
|
completion_message: CompletionMessage
|
||||||
logprobs: Optional[List[TokenLogProbs]] = None
|
logprobs: Optional[List[float]] = None
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
|
@ -159,7 +159,7 @@ class BatchChatCompletionRequest(BaseModel):
|
||||||
tool_prompt_format: Optional[ToolPromptFormat] = Field(
|
tool_prompt_format: Optional[ToolPromptFormat] = Field(
|
||||||
default=ToolPromptFormat.json
|
default=ToolPromptFormat.json
|
||||||
)
|
)
|
||||||
logprobs: Optional[LogProbConfig] = None
|
logprobs: Optional[bool] = False
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
|
@ -180,7 +180,7 @@ class Inference(Protocol):
|
||||||
content: InterleavedTextMedia,
|
content: InterleavedTextMedia,
|
||||||
sampling_params: Optional[SamplingParams] = SamplingParams(),
|
sampling_params: Optional[SamplingParams] = SamplingParams(),
|
||||||
stream: Optional[bool] = False,
|
stream: Optional[bool] = False,
|
||||||
logprobs: Optional[LogProbConfig] = None,
|
logprobs: Optional[bool] = None,
|
||||||
) -> Union[CompletionResponse, CompletionResponseStreamChunk]: ...
|
) -> Union[CompletionResponse, CompletionResponseStreamChunk]: ...
|
||||||
|
|
||||||
@webmethod(route="/inference/chat_completion")
|
@webmethod(route="/inference/chat_completion")
|
||||||
|
@ -194,7 +194,7 @@ class Inference(Protocol):
|
||||||
tool_choice: Optional[ToolChoice] = ToolChoice.auto,
|
tool_choice: Optional[ToolChoice] = ToolChoice.auto,
|
||||||
tool_prompt_format: Optional[ToolPromptFormat] = ToolPromptFormat.json,
|
tool_prompt_format: Optional[ToolPromptFormat] = ToolPromptFormat.json,
|
||||||
stream: Optional[bool] = False,
|
stream: Optional[bool] = False,
|
||||||
logprobs: Optional[LogProbConfig] = None,
|
logprobs: Optional[bool] = None,
|
||||||
) -> Union[ChatCompletionResponse, ChatCompletionResponseStreamChunk]: ...
|
) -> Union[ChatCompletionResponse, ChatCompletionResponseStreamChunk]: ...
|
||||||
|
|
||||||
@webmethod(route="/inference/embeddings")
|
@webmethod(route="/inference/embeddings")
|
||||||
|
|
|
@ -101,7 +101,7 @@ class InferenceRouter(Inference):
|
||||||
tool_choice: Optional[ToolChoice] = ToolChoice.auto,
|
tool_choice: Optional[ToolChoice] = ToolChoice.auto,
|
||||||
tool_prompt_format: Optional[ToolPromptFormat] = ToolPromptFormat.json,
|
tool_prompt_format: Optional[ToolPromptFormat] = ToolPromptFormat.json,
|
||||||
stream: Optional[bool] = False,
|
stream: Optional[bool] = False,
|
||||||
logprobs: Optional[LogProbConfig] = None,
|
logprobs: Optional[bool] = False,
|
||||||
) -> AsyncGenerator:
|
) -> AsyncGenerator:
|
||||||
params = dict(
|
params = dict(
|
||||||
model=model,
|
model=model,
|
||||||
|
@ -125,7 +125,7 @@ class InferenceRouter(Inference):
|
||||||
content: InterleavedTextMedia,
|
content: InterleavedTextMedia,
|
||||||
sampling_params: Optional[SamplingParams] = SamplingParams(),
|
sampling_params: Optional[SamplingParams] = SamplingParams(),
|
||||||
stream: Optional[bool] = False,
|
stream: Optional[bool] = False,
|
||||||
logprobs: Optional[LogProbConfig] = None,
|
logprobs: Optional[bool] = False,
|
||||||
) -> Union[CompletionResponse, CompletionResponseStreamChunk]:
|
) -> Union[CompletionResponse, CompletionResponseStreamChunk]:
|
||||||
return await self.routing_table.get_provider_impl(model).completion(
|
return await self.routing_table.get_provider_impl(model).completion(
|
||||||
model=model,
|
model=model,
|
||||||
|
|
|
@ -297,7 +297,7 @@ class Llama:
|
||||||
token=next_token[0].item(),
|
token=next_token[0].item(),
|
||||||
text=self.tokenizer.decode(next_token.tolist()),
|
text=self.tokenizer.decode(next_token.tolist()),
|
||||||
logprobs=(
|
logprobs=(
|
||||||
token_logprobs[:, prev_pos + 1 : cur_pos + 1][0].tolist()
|
token_logprobs[:, cur_pos : cur_pos + 1][0].tolist()
|
||||||
if logprobs
|
if logprobs
|
||||||
else None
|
else None
|
||||||
),
|
),
|
||||||
|
|
|
@ -58,7 +58,7 @@ class MetaReferenceInferenceImpl(Inference, RoutableProvider):
|
||||||
tool_choice: Optional[ToolChoice] = ToolChoice.auto,
|
tool_choice: Optional[ToolChoice] = ToolChoice.auto,
|
||||||
tool_prompt_format: Optional[ToolPromptFormat] = ToolPromptFormat.json,
|
tool_prompt_format: Optional[ToolPromptFormat] = ToolPromptFormat.json,
|
||||||
stream: Optional[bool] = False,
|
stream: Optional[bool] = False,
|
||||||
logprobs: Optional[LogProbConfig] = None,
|
logprobs: Optional[bool] = None,
|
||||||
) -> AsyncIterator[
|
) -> AsyncIterator[
|
||||||
Union[ChatCompletionResponseStreamChunk, ChatCompletionResponse]
|
Union[ChatCompletionResponseStreamChunk, ChatCompletionResponse]
|
||||||
]:
|
]:
|
||||||
|
@ -132,7 +132,10 @@ class MetaReferenceInferenceImpl(Inference, RoutableProvider):
|
||||||
|
|
||||||
if not request.stream:
|
if not request.stream:
|
||||||
if request.logprobs:
|
if request.logprobs:
|
||||||
logprobs.append(token_result.logprob)
|
assert (
|
||||||
|
len(token_result.logprobs) == 1
|
||||||
|
), "Expected logprob to contain 1 result for the current token"
|
||||||
|
logprobs.append(token_result.logprobs[0])
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue