mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-12 04:50:39 +00:00
send start event by default
This commit is contained in:
parent
f55d812d8e
commit
ea27ae56f8
1 changed files with 6 additions and 6 deletions
|
@ -841,12 +841,12 @@ async def convert_openai_chat_completion_stream(
|
|||
Convert a stream of OpenAI chat completion chunks into a stream
|
||||
of ChatCompletionResponseStreamChunk.
|
||||
"""
|
||||
|
||||
# generate a stream of ChatCompletionResponseEventType: start -> progress -> progress -> ...
|
||||
# def _event_type_generator() -> Generator[ChatCompletionResponseEventType, None, None]:
|
||||
# yield ChatCompletionResponseEventType.start
|
||||
# while True:
|
||||
# yield ChatCompletionResponseEventType.progress
|
||||
yield ChatCompletionResponseStreamChunk(
|
||||
event=ChatCompletionResponseEvent(
|
||||
event_type=ChatCompletionResponseEventType.start,
|
||||
delta=TextDelta(text=""),
|
||||
)
|
||||
)
|
||||
event_type = ChatCompletionResponseEventType.progress
|
||||
|
||||
stop_reason = None
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue