refactor: narrow mypy exclusions for meta_reference

Narrowed mypy exclusions from entire meta_reference/ directory to only
specific files that still have type errors. The responses/ subdirectory
(6 files) now passes full mypy type checking.

Fixed types.py by adding type annotations and strategic type ignores
for mypy's confusion between similar discriminated unions
(OpenAIResponseInputTool vs OpenAIResponseTool).

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Ashwin Bharambe 2025-10-28 17:00:38 -07:00
parent 6fb7b9521f
commit 6530a7872a
2 changed files with 19 additions and 7 deletions

View file

@ -284,7 +284,12 @@ exclude = [
"^src/llama_stack/models/llama/llama3/interface\\.py$", "^src/llama_stack/models/llama/llama3/interface\\.py$",
"^src/llama_stack/models/llama/llama3/tokenizer\\.py$", "^src/llama_stack/models/llama/llama3/tokenizer\\.py$",
"^src/llama_stack/models/llama/llama3/tool_utils\\.py$", "^src/llama_stack/models/llama/llama3/tool_utils\\.py$",
"^src/llama_stack/providers/inline/agents/meta_reference/", "^src/llama_stack/providers/inline/agents/meta_reference/agents\\.py$",
"^src/llama_stack/providers/inline/agents/meta_reference/agent_instance\\.py$",
"^src/llama_stack/providers/inline/agents/meta_reference/config\\.py$",
"^src/llama_stack/providers/inline/agents/meta_reference/persistence\\.py$",
"^src/llama_stack/providers/inline/agents/meta_reference/safety\\.py$",
"^src/llama_stack/providers/inline/agents/meta_reference/__init__\\.py$",
"^src/llama_stack/providers/inline/datasetio/localfs/", "^src/llama_stack/providers/inline/datasetio/localfs/",
"^src/llama_stack/providers/inline/eval/meta_reference/eval\\.py$", "^src/llama_stack/providers/inline/eval/meta_reference/eval\\.py$",
"^src/llama_stack/providers/inline/inference/meta_reference/inference\\.py$", "^src/llama_stack/providers/inline/inference/meta_reference/inference\\.py$",

View file

@ -5,6 +5,7 @@
# the root directory of this source tree. # the root directory of this source tree.
from dataclasses import dataclass from dataclasses import dataclass
from typing import cast
from openai.types.chat import ChatCompletionToolParam from openai.types.chat import ChatCompletionToolParam
from pydantic import BaseModel from pydantic import BaseModel
@ -100,17 +101,19 @@ class ToolContext(BaseModel):
if isinstance(tool, OpenAIResponseToolMCP): if isinstance(tool, OpenAIResponseToolMCP):
previous_tools_by_label[tool.server_label] = tool previous_tools_by_label[tool.server_label] = tool
# collect tool definitions which are the same in current and previous requests: # collect tool definitions which are the same in current and previous requests:
tools_to_process = [] tools_to_process: list[OpenAIResponseInputTool] = []
matched: dict[str, OpenAIResponseInputToolMCP] = {} matched: dict[str, OpenAIResponseInputToolMCP] = {}
for tool in self.current_tools: # Mypy confuses OpenAIResponseInputTool (Input union) with OpenAIResponseTool (output union)
# which differ only in MCP type (InputToolMCP vs ToolMCP). Code is correct.
for tool in cast(list[OpenAIResponseInputTool], self.current_tools): # type: ignore[assignment]
if isinstance(tool, OpenAIResponseInputToolMCP) and tool.server_label in previous_tools_by_label: if isinstance(tool, OpenAIResponseInputToolMCP) and tool.server_label in previous_tools_by_label:
previous_tool = previous_tools_by_label[tool.server_label] previous_tool = previous_tools_by_label[tool.server_label]
if previous_tool.allowed_tools == tool.allowed_tools: if previous_tool.allowed_tools == tool.allowed_tools:
matched[tool.server_label] = tool matched[tool.server_label] = tool
else: else:
tools_to_process.append(tool) tools_to_process.append(tool) # type: ignore[arg-type]
else: else:
tools_to_process.append(tool) tools_to_process.append(tool) # type: ignore[arg-type]
# tools that are not the same or were not previously defined need to be processed: # tools that are not the same or were not previously defined need to be processed:
self.tools_to_process = tools_to_process self.tools_to_process = tools_to_process
# for all matched definitions, get the mcp_list_tools objects from the previous output: # for all matched definitions, get the mcp_list_tools objects from the previous output:
@ -119,9 +122,11 @@ class ToolContext(BaseModel):
] ]
# reconstruct the tool to server mappings that can be reused: # reconstruct the tool to server mappings that can be reused:
for listing in self.previous_tool_listings: for listing in self.previous_tool_listings:
# listing is OpenAIResponseOutputMessageMCPListTools which has tools: list[MCPListToolsTool]
definition = matched[listing.server_label] definition = matched[listing.server_label]
for tool in listing.tools: for mcp_tool in listing.tools:
self.previous_tools[tool.name] = definition # mcp_tool is MCPListToolsTool which has a name: str field
self.previous_tools[mcp_tool.name] = definition
def available_tools(self) -> list[OpenAIResponseTool]: def available_tools(self) -> list[OpenAIResponseTool]:
if not self.current_tools: if not self.current_tools:
@ -139,6 +144,8 @@ class ToolContext(BaseModel):
server_label=tool.server_label, server_label=tool.server_label,
allowed_tools=tool.allowed_tools, allowed_tools=tool.allowed_tools,
) )
# Exhaustive check - all tool types should be handled above
raise AssertionError(f"Unexpected tool type: {type(tool)}")
return [convert_tool(tool) for tool in self.current_tools] return [convert_tool(tool) for tool in self.current_tools]