fix log probs

This commit is contained in:
Xi Yan 2024-10-07 16:45:06 -07:00
parent 16ba0fa06f
commit 20e78ff241
5 changed files with 31 additions and 20 deletions

View file

@ -6,7 +6,6 @@
import asyncio
import json
import sys
from typing import Any, AsyncGenerator, List, Optional
import fire
@ -101,7 +100,9 @@ class InferenceClient(Inference):
print(f"Error with parsing or validation: {e}")
async def run_main(host: str, port: int, stream: bool, model: Optional[str]):
async def run_main(
host: str, port: int, stream: bool, model: Optional[str], logprobs: bool
):
client = InferenceClient(f"http://{host}:{port}")
if not model:
@ -115,7 +116,13 @@ async def run_main(host: str, port: int, stream: bool, model: Optional[str]):
model=model,
messages=[message],
stream=stream,
logprobs=logprobs,
)
if logprobs:
async for chunk in iterator:
cprint(f"Response: {chunk}", "red")
else:
async for log in EventLogger().log(iterator):
log.print()
@ -149,13 +156,14 @@ def main(
port: int,
stream: bool = True,
mm: bool = False,
logprobs: bool = False,
file: Optional[str] = None,
model: Optional[str] = None,
):
if mm:
asyncio.run(run_mm_main(host, port, stream, file, model))
else:
asyncio.run(run_main(host, port, stream, model))
asyncio.run(run_main(host, port, stream, model, logprobs))
if __name__ == "__main__":

View file

@ -69,7 +69,7 @@ class ChatCompletionResponseEvent(BaseModel):
event_type: ChatCompletionResponseEventType
delta: Union[str, ToolCallDelta]
logprobs: Optional[List[TokenLogProbs]] = None
logprobs: Optional[List[float]] = None
stop_reason: Optional[StopReason] = None
@ -80,7 +80,7 @@ class CompletionRequest(BaseModel):
sampling_params: Optional[SamplingParams] = SamplingParams()
stream: Optional[bool] = False
logprobs: Optional[LogProbConfig] = None
logprobs: Optional[bool] = False
@json_schema_type
@ -88,7 +88,7 @@ class CompletionResponse(BaseModel):
"""Completion response."""
completion_message: CompletionMessage
logprobs: Optional[List[TokenLogProbs]] = None
logprobs: Optional[List[float]] = None
@json_schema_type
@ -97,7 +97,7 @@ class CompletionResponseStreamChunk(BaseModel):
delta: str
stop_reason: Optional[StopReason] = None
logprobs: Optional[List[TokenLogProbs]] = None
logprobs: Optional[List[float]] = None
@json_schema_type
@ -105,7 +105,7 @@ class BatchCompletionRequest(BaseModel):
model: str
content_batch: List[InterleavedTextMedia]
sampling_params: Optional[SamplingParams] = SamplingParams()
logprobs: Optional[LogProbConfig] = None
logprobs: Optional[bool] = False
@json_schema_type
@ -129,7 +129,7 @@ class ChatCompletionRequest(BaseModel):
)
stream: Optional[bool] = False
logprobs: Optional[LogProbConfig] = None
logprobs: Optional[bool] = False
@json_schema_type
@ -144,7 +144,7 @@ class ChatCompletionResponse(BaseModel):
"""Chat completion response."""
completion_message: CompletionMessage
logprobs: Optional[List[TokenLogProbs]] = None
logprobs: Optional[List[float]] = None
@json_schema_type
@ -159,7 +159,7 @@ class BatchChatCompletionRequest(BaseModel):
tool_prompt_format: Optional[ToolPromptFormat] = Field(
default=ToolPromptFormat.json
)
logprobs: Optional[LogProbConfig] = None
logprobs: Optional[bool] = False
@json_schema_type
@ -180,7 +180,7 @@ class Inference(Protocol):
content: InterleavedTextMedia,
sampling_params: Optional[SamplingParams] = SamplingParams(),
stream: Optional[bool] = False,
logprobs: Optional[LogProbConfig] = None,
logprobs: Optional[bool] = None,
) -> Union[CompletionResponse, CompletionResponseStreamChunk]: ...
@webmethod(route="/inference/chat_completion")
@ -194,7 +194,7 @@ class Inference(Protocol):
tool_choice: Optional[ToolChoice] = ToolChoice.auto,
tool_prompt_format: Optional[ToolPromptFormat] = ToolPromptFormat.json,
stream: Optional[bool] = False,
logprobs: Optional[LogProbConfig] = None,
logprobs: Optional[bool] = None,
) -> Union[ChatCompletionResponse, ChatCompletionResponseStreamChunk]: ...
@webmethod(route="/inference/embeddings")

View file

@ -101,7 +101,7 @@ class InferenceRouter(Inference):
tool_choice: Optional[ToolChoice] = ToolChoice.auto,
tool_prompt_format: Optional[ToolPromptFormat] = ToolPromptFormat.json,
stream: Optional[bool] = False,
logprobs: Optional[LogProbConfig] = None,
logprobs: Optional[bool] = False,
) -> AsyncGenerator:
params = dict(
model=model,
@ -125,7 +125,7 @@ class InferenceRouter(Inference):
content: InterleavedTextMedia,
sampling_params: Optional[SamplingParams] = SamplingParams(),
stream: Optional[bool] = False,
logprobs: Optional[LogProbConfig] = None,
logprobs: Optional[bool] = False,
) -> Union[CompletionResponse, CompletionResponseStreamChunk]:
return await self.routing_table.get_provider_impl(model).completion(
model=model,

View file

@ -297,7 +297,7 @@ class Llama:
token=next_token[0].item(),
text=self.tokenizer.decode(next_token.tolist()),
logprobs=(
token_logprobs[:, prev_pos + 1 : cur_pos + 1][0].tolist()
token_logprobs[:, cur_pos : cur_pos + 1][0].tolist()
if logprobs
else None
),

View file

@ -58,7 +58,7 @@ class MetaReferenceInferenceImpl(Inference, RoutableProvider):
tool_choice: Optional[ToolChoice] = ToolChoice.auto,
tool_prompt_format: Optional[ToolPromptFormat] = ToolPromptFormat.json,
stream: Optional[bool] = False,
logprobs: Optional[LogProbConfig] = None,
logprobs: Optional[bool] = None,
) -> AsyncIterator[
Union[ChatCompletionResponseStreamChunk, ChatCompletionResponse]
]:
@ -132,7 +132,10 @@ class MetaReferenceInferenceImpl(Inference, RoutableProvider):
if not request.stream:
if request.logprobs:
logprobs.append(token_result.logprob)
assert (
len(token_result.logprobs) == 1
), "Expected logprob to contain 1 result for the current token"
logprobs.append(token_result.logprobs[0])
continue