mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 07:14:20 +00:00
fix non-streaming api in inference server
This commit is contained in:
parent
404af06e02
commit
cc98fbb058
3 changed files with 32 additions and 26 deletions
|
@ -14,6 +14,7 @@ from termcolor import cprint
|
|||
|
||||
from .api import (
|
||||
ChatCompletionRequest,
|
||||
ChatCompletionResponse,
|
||||
ChatCompletionResponseStreamChunk,
|
||||
CompletionRequest,
|
||||
Inference,
|
||||
|
@ -50,35 +51,33 @@ class InferenceClient(Inference):
|
|||
if line.startswith("data:"):
|
||||
data = line[len("data: ") :]
|
||||
try:
|
||||
if request.stream:
|
||||
yield ChatCompletionResponseStreamChunk(**json.loads(data))
|
||||
else:
|
||||
yield ChatCompletionResponse(**json.loads(data))
|
||||
except Exception as e:
|
||||
print(data)
|
||||
print(f"Error with parsing or validation: {e}")
|
||||
|
||||
|
||||
async def run_main(host: str, port: int):
|
||||
async def run_main(host: str, port: int, stream: bool):
|
||||
client = InferenceClient(f"http://{host}:{port}")
|
||||
|
||||
message = UserMessage(content="hello world, help me out here")
|
||||
cprint(f"User>{message.content}", "green")
|
||||
req = ChatCompletionRequest(
|
||||
model=InstructModel.llama3_70b_chat,
|
||||
messages=[message],
|
||||
stream=True,
|
||||
)
|
||||
iterator = client.chat_completion(
|
||||
ChatCompletionRequest(
|
||||
model=InstructModel.llama3_8b_chat,
|
||||
messages=[message],
|
||||
stream=True,
|
||||
stream=stream,
|
||||
)
|
||||
)
|
||||
async for log in EventLogger().log(iterator):
|
||||
log.print()
|
||||
|
||||
|
||||
def main(host: str, port: int):
|
||||
asyncio.run(run_main(host, port))
|
||||
def main(host: str, port: int, stream: bool = True):
|
||||
asyncio.run(run_main(host, port, stream))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -6,7 +6,10 @@
|
|||
|
||||
from termcolor import cprint
|
||||
|
||||
from llama_toolchain.inference.api import ChatCompletionResponseEventType
|
||||
from llama_toolchain.inference.api import (
|
||||
ChatCompletionResponseEventType,
|
||||
ChatCompletionResponseStreamChunk
|
||||
)
|
||||
|
||||
|
||||
class LogEvent:
|
||||
|
@ -25,8 +28,9 @@ class LogEvent:
|
|||
|
||||
|
||||
class EventLogger:
|
||||
async def log(self, event_generator, stream=True):
|
||||
async def log(self, event_generator):
|
||||
async for chunk in event_generator:
|
||||
if isinstance(chunk, ChatCompletionResponseStreamChunk):
|
||||
event = chunk.event
|
||||
if event.event_type == ChatCompletionResponseEventType.start:
|
||||
yield LogEvent("Assistant> ", color="cyan", end="")
|
||||
|
@ -34,3 +38,6 @@ class EventLogger:
|
|||
yield LogEvent(event.delta, color="yellow", end="")
|
||||
elif event.event_type == ChatCompletionResponseEventType.complete:
|
||||
yield LogEvent("")
|
||||
else:
|
||||
yield LogEvent("Assistant> ", color="cyan", end="")
|
||||
yield LogEvent(chunk.completion_message.content, color="yellow")
|
||||
|
|
|
@ -16,6 +16,7 @@ from .api.datatypes import (
|
|||
ToolCallParseStatus,
|
||||
)
|
||||
from .api.endpoints import (
|
||||
ChatCompletionResponse,
|
||||
ChatCompletionRequest,
|
||||
ChatCompletionResponseStreamChunk,
|
||||
CompletionRequest,
|
||||
|
@ -40,6 +41,7 @@ class InferenceImpl(Inference):
|
|||
raise NotImplementedError()
|
||||
|
||||
async def chat_completion(self, request: ChatCompletionRequest) -> AsyncGenerator:
|
||||
if request.stream:
|
||||
yield ChatCompletionResponseStreamChunk(
|
||||
event=ChatCompletionResponseEvent(
|
||||
event_type=ChatCompletionResponseEventType.start,
|
||||
|
@ -152,8 +154,6 @@ class InferenceImpl(Inference):
|
|||
# TODO(ashwin): what else do we need to send out here when everything finishes?
|
||||
else:
|
||||
yield ChatCompletionResponse(
|
||||
content=message.content,
|
||||
tool_calls=message.tool_calls,
|
||||
stop_reason=stop_reason,
|
||||
completion_message=message,
|
||||
logprobs=logprobs if request.logprobs else None,
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue