From 0cc072dcafb3426894eb77231b4d90b3edd06196 Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Tue, 30 Sep 2025 11:24:27 -0700 Subject: [PATCH] fix: don't pass default response format in Responses # What does this PR do? ## Test Plan --- .../inline/agents/meta_reference/responses/streaming.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py b/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py index 2f45ad2a3..179f7f023 100644 --- a/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py +++ b/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py @@ -127,13 +127,16 @@ class StreamingResponseOrchestrator: messages = self.ctx.messages.copy() while True: + # Text is the default response format for chat completion so don't need to pass it + # (some providers don't support non-empty response_format when tools are present) + response_format = None if self.ctx.response_format.type == "text" else self.ctx.response_format completion_result = await self.inference_api.openai_chat_completion( model=self.ctx.model, messages=messages, tools=self.ctx.chat_tools, stream=True, temperature=self.ctx.temperature, - response_format=self.ctx.response_format, + response_format=response_format, ) # Process streaming chunks and build complete response