mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-11 19:56:03 +00:00
fix(mypy): resolve provider utility and testing type issues
Fixes mypy type errors across provider utilities and testing infrastructure (Phase 2e):
- mcp.py (2 errors fixed):
- Cast sse_client to Any to handle incompatible signatures with streamablehttp_client
- Wrap ImageContent data in _URLOrData(data=...) for proper ImageContentItem construction
- batches.py (1 error fixed):
- Rename walrus operator variable from `body` to `request_body` to avoid shadowing
the file content `body` variable (bytes|memoryview) defined earlier in scope
- api_recorder.py (1 error fixed):
- Cast Pydantic field annotation assignment to proper type when monkey-patching
OpenAI CompletionChoice model to accept None in finish_reason
Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
parent
257eaeb945
commit
bcf3289785
3 changed files with 10 additions and 6 deletions
|
|
@ -419,8 +419,8 @@ class ReferenceBatchesImpl(Batches):
|
|||
)
|
||||
valid = False
|
||||
|
||||
if (body := request.get("body")) and isinstance(body, dict):
|
||||
if body.get("stream", False):
|
||||
if (request_body := request.get("body")) and isinstance(request_body, dict):
|
||||
if request_body.get("stream", False):
|
||||
errors.append(
|
||||
BatchError(
|
||||
code="streaming_unsupported",
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ from mcp import types as mcp_types
|
|||
from mcp.client.sse import sse_client
|
||||
from mcp.client.streamable_http import streamablehttp_client
|
||||
|
||||
from llama_stack.apis.common.content_types import ImageContentItem, InterleavedContentItem, TextContentItem
|
||||
from llama_stack.apis.common.content_types import ImageContentItem, InterleavedContentItem, TextContentItem, _URLOrData
|
||||
from llama_stack.apis.tools import (
|
||||
ListToolDefsResponse,
|
||||
ToolDef,
|
||||
|
|
@ -49,7 +49,9 @@ async def client_wrapper(endpoint: str, headers: dict[str, str]) -> AsyncGenerat
|
|||
try:
|
||||
client = streamablehttp_client
|
||||
if strategy == MCPProtol.SSE:
|
||||
client = sse_client
|
||||
# sse_client and streamablehttp_client have different signatures, but both
|
||||
# are called the same way here, so we cast to Any to avoid type errors
|
||||
client = cast(Any, sse_client)
|
||||
async with client(endpoint, headers=headers) as client_streams:
|
||||
async with ClientSession(read_stream=client_streams[0], write_stream=client_streams[1]) as session:
|
||||
await session.initialize()
|
||||
|
|
@ -137,7 +139,7 @@ async def invoke_mcp_tool(
|
|||
if isinstance(item, mcp_types.TextContent):
|
||||
content.append(TextContentItem(text=item.text))
|
||||
elif isinstance(item, mcp_types.ImageContent):
|
||||
content.append(ImageContentItem(image=item.data))
|
||||
content.append(ImageContentItem(image=_URLOrData(data=item.data)))
|
||||
elif isinstance(item, mcp_types.EmbeddedResource):
|
||||
logger.warning(f"EmbeddedResource is not supported: {item}")
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -40,7 +40,9 @@ from openai.types.completion_choice import CompletionChoice
|
|||
from llama_stack.core.testing_context import get_test_context, is_debug_mode
|
||||
|
||||
# update the "finish_reason" field, since its type definition is wrong (no None is accepted)
|
||||
CompletionChoice.model_fields["finish_reason"].annotation = Literal["stop", "length", "content_filter"] | None
|
||||
CompletionChoice.model_fields["finish_reason"].annotation = cast(
|
||||
type[Any] | None, Literal["stop", "length", "content_filter"] | None
|
||||
)
|
||||
CompletionChoice.model_rebuild()
|
||||
|
||||
REPO_ROOT = Path(__file__).parent.parent.parent.parent
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue