From 93d5894a5e621d5c1ada618b789a323850300b45 Mon Sep 17 00:00:00 2001 From: Luis Tomas Bolivar Date: Fri, 3 Oct 2025 15:11:26 +0200 Subject: [PATCH] Add protection to response_format --- .../inline/agents/meta_reference/responses/streaming.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py b/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py index 0bb524f5c..cabd267a2 100644 --- a/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py +++ b/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py @@ -125,7 +125,9 @@ class StreamingResponseOrchestrator: while True: # Text is the default response format for chat completion so don't need to pass it # (some providers don't support non-empty response_format when tools are present) - response_format = None if self.ctx.response_format.type == "text" else self.ctx.response_format + response_format = ( + None if getattr(self.ctx.response_format, "type", None) == "text" else self.ctx.response_format + ) completion_result = await self.inference_api.openai_chat_completion( model=self.ctx.model, messages=messages,