From 5ac7e1fa12ef471d42985a99dad09bee6312ba91 Mon Sep 17 00:00:00 2001 From: Gordon Sim Date: Tue, 30 Sep 2025 21:58:09 +0100 Subject: [PATCH] address review comments --- .../responses/openai_responses.py | 2 +- .../agents/meta_reference/responses/types.py | 22 +++++++++++++++---- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py b/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py index a14d9267e..c27dc8467 100644 --- a/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py +++ b/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py @@ -237,8 +237,8 @@ class OpenAIResponsesImpl: response_tools=tools, temperature=temperature, response_format=response_format, + inputs=input, ) - ctx.extract_approvals(input) # Create orchestrator and delegate streaming logic response_id = f"resp-{uuid.uuid4()}" diff --git a/llama_stack/providers/inline/agents/meta_reference/responses/types.py b/llama_stack/providers/inline/agents/meta_reference/responses/types.py index 5dcc9099d..d3b5a16bd 100644 --- a/llama_stack/providers/inline/agents/meta_reference/responses/types.py +++ b/llama_stack/providers/inline/agents/meta_reference/responses/types.py @@ -10,6 +10,7 @@ from openai.types.chat import ChatCompletionToolParam from pydantic import BaseModel from llama_stack.apis.agents.openai_responses import ( + OpenAIResponseInput, OpenAIResponseInputTool, OpenAIResponseMCPApprovalRequest, OpenAIResponseMCPApprovalResponse, @@ -63,7 +64,22 @@ class ChatCompletionContext(BaseModel): approval_requests: list[OpenAIResponseMCPApprovalRequest] = [] approval_responses: dict[str, OpenAIResponseMCPApprovalResponse] = {} - def extract_approvals(self, inputs): + def __init__( + self, + model: str, + messages: list[OpenAIMessageParam], + response_tools: list[OpenAIResponseInputTool] | None, + temperature: float | None, + response_format: OpenAIResponseFormatParam, + inputs: list[OpenAIResponseInput] | str, + ): + super().__init__( + model=model, + messages=messages, + response_tools=response_tools, + temperature=temperature, + response_format=response_format, + ) if not isinstance(inputs, str): self.approval_requests = [input for input in inputs if input.type == "mcp_approval_request"] self.approval_responses = { @@ -72,9 +88,7 @@ class ChatCompletionContext(BaseModel): def approval_response(self, tool_name: str, arguments: str) -> OpenAIResponseMCPApprovalResponse | None: request = self._approval_request(tool_name, arguments) - if request and request.id in self.approval_responses: - return self.approval_responses[request.id] - return None + return self.approval_responses.get(request.id, None) if request else None def _approval_request(self, tool_name: str, arguments: str) -> OpenAIResponseMCPApprovalRequest | None: for request in self.approval_requests: