From ea27ae56f8653bb6735621634a130a90033263c9 Mon Sep 17 00:00:00 2001 From: Hardik Shah Date: Thu, 27 Feb 2025 20:50:17 -0800 Subject: [PATCH] send start event by default --- .../providers/utils/inference/openai_compat.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/llama_stack/providers/utils/inference/openai_compat.py b/llama_stack/providers/utils/inference/openai_compat.py index 545e080c2..d0fdf6385 100644 --- a/llama_stack/providers/utils/inference/openai_compat.py +++ b/llama_stack/providers/utils/inference/openai_compat.py @@ -841,12 +841,12 @@ async def convert_openai_chat_completion_stream( Convert a stream of OpenAI chat completion chunks into a stream of ChatCompletionResponseStreamChunk. """ - - # generate a stream of ChatCompletionResponseEventType: start -> progress -> progress -> ... - # def _event_type_generator() -> Generator[ChatCompletionResponseEventType, None, None]: - # yield ChatCompletionResponseEventType.start - # while True: - # yield ChatCompletionResponseEventType.progress + yield ChatCompletionResponseStreamChunk( + event=ChatCompletionResponseEvent( + event_type=ChatCompletionResponseEventType.start, + delta=TextDelta(text=""), + ) + ) event_type = ChatCompletionResponseEventType.progress stop_reason = None