From 0c95140ca75349df2e582538ad98485fa3fe53c0 Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Mon, 27 Oct 2025 22:53:48 -0700 Subject: [PATCH] fix(mypy): resolve provider utility and testing type issues Fixes mypy type errors across provider utilities and testing infrastructure (Phase 2e): - mcp.py (2 errors fixed): - Cast sse_client to Any to handle incompatible signatures with streamablehttp_client - Wrap ImageContent data in _URLOrData(data=...) for proper ImageContentItem construction - batches.py (1 error fixed): - Rename walrus operator variable from `body` to `request_body` to avoid shadowing the file content `body` variable (bytes|memoryview) defined earlier in scope - api_recorder.py (1 error fixed): - Cast Pydantic field annotation assignment to proper type when monkey-patching OpenAI CompletionChoice model to accept None in finish_reason Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- .../providers/inline/batches/reference/batches.py | 4 ++-- src/llama_stack/providers/utils/tools/mcp.py | 8 +++++--- src/llama_stack/testing/api_recorder.py | 4 +++- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/llama_stack/providers/inline/batches/reference/batches.py b/src/llama_stack/providers/inline/batches/reference/batches.py index 79dc9c84c..b6f201fae 100644 --- a/src/llama_stack/providers/inline/batches/reference/batches.py +++ b/src/llama_stack/providers/inline/batches/reference/batches.py @@ -419,8 +419,8 @@ class ReferenceBatchesImpl(Batches): ) valid = False - if (body := request.get("body")) and isinstance(body, dict): - if body.get("stream", False): + if (request_body := request.get("body")) and isinstance(request_body, dict): + if request_body.get("stream", False): errors.append( BatchError( code="streaming_unsupported", diff --git a/src/llama_stack/providers/utils/tools/mcp.py b/src/llama_stack/providers/utils/tools/mcp.py index 48f07cb19..a271cb959 100644 --- a/src/llama_stack/providers/utils/tools/mcp.py +++ b/src/llama_stack/providers/utils/tools/mcp.py @@ -15,7 +15,7 @@ from mcp import types as mcp_types from mcp.client.sse import sse_client from mcp.client.streamable_http import streamablehttp_client -from llama_stack.apis.common.content_types import ImageContentItem, InterleavedContentItem, TextContentItem +from llama_stack.apis.common.content_types import ImageContentItem, InterleavedContentItem, TextContentItem, _URLOrData from llama_stack.apis.tools import ( ListToolDefsResponse, ToolDef, @@ -49,7 +49,9 @@ async def client_wrapper(endpoint: str, headers: dict[str, str]) -> AsyncGenerat try: client = streamablehttp_client if strategy == MCPProtol.SSE: - client = sse_client + # sse_client and streamablehttp_client have different signatures, but both + # are called the same way here, so we cast to Any to avoid type errors + client = cast(Any, sse_client) async with client(endpoint, headers=headers) as client_streams: async with ClientSession(read_stream=client_streams[0], write_stream=client_streams[1]) as session: await session.initialize() @@ -137,7 +139,7 @@ async def invoke_mcp_tool( if isinstance(item, mcp_types.TextContent): content.append(TextContentItem(text=item.text)) elif isinstance(item, mcp_types.ImageContent): - content.append(ImageContentItem(image=item.data)) + content.append(ImageContentItem(image=_URLOrData(data=item.data))) elif isinstance(item, mcp_types.EmbeddedResource): logger.warning(f"EmbeddedResource is not supported: {item}") else: diff --git a/src/llama_stack/testing/api_recorder.py b/src/llama_stack/testing/api_recorder.py index 84407223c..e0c80d63c 100644 --- a/src/llama_stack/testing/api_recorder.py +++ b/src/llama_stack/testing/api_recorder.py @@ -40,7 +40,9 @@ from openai.types.completion_choice import CompletionChoice from llama_stack.core.testing_context import get_test_context, is_debug_mode # update the "finish_reason" field, since its type definition is wrong (no None is accepted) -CompletionChoice.model_fields["finish_reason"].annotation = Literal["stop", "length", "content_filter"] | None +CompletionChoice.model_fields["finish_reason"].annotation = cast( + type[Any] | None, Literal["stop", "length", "content_filter"] | None +) CompletionChoice.model_rebuild() REPO_ROOT = Path(__file__).parent.parent.parent.parent