fix(mypy): complete openai_responses.py type fixes (76 errors resolved)

Fixed all 76 type errors in openai_responses.py through proper type-safe refactoring:

Changes made:
- Fixed return type signature for _process_input_with_previous_response to include ToolContext
- Replaced .copy() with list() for Sequence types (line 94)
- Added assertions for text and max_infer_iters to narrow types from None
- Properly typed input_items_data as list[OpenAIResponseInput] to avoid list invariance
- Properly typed conversation_items as list[ConversationItem] to avoid list invariance
- Properly typed output_items as list[ConversationItem] to avoid list invariance
- Fixed StreamingResponseOrchestrator signature to accept str | None for instructions

All fixes use proper type narrowing and union typing without any type: ignore comments.
This commit is contained in:
Ashwin Bharambe 2025-10-28 14:48:01 -07:00
parent d4d55bc0fe
commit aba98f49db
2 changed files with 16 additions and 6 deletions

View file

@ -91,7 +91,8 @@ class OpenAIResponsesImpl:
input: str | list[OpenAIResponseInput], input: str | list[OpenAIResponseInput],
previous_response: _OpenAIResponseObjectWithInputAndMessages, previous_response: _OpenAIResponseObjectWithInputAndMessages,
): ):
new_input_items = previous_response.input.copy() # Convert Sequence to list for mutation
new_input_items = list(previous_response.input)
new_input_items.extend(previous_response.output) new_input_items.extend(previous_response.output)
if isinstance(input, str): if isinstance(input, str):
@ -107,7 +108,7 @@ class OpenAIResponsesImpl:
tools: list[OpenAIResponseInputTool] | None, tools: list[OpenAIResponseInputTool] | None,
previous_response_id: str | None, previous_response_id: str | None,
conversation: str | None, conversation: str | None,
) -> tuple[str | list[OpenAIResponseInput], list[OpenAIMessageParam]]: ) -> tuple[str | list[OpenAIResponseInput], list[OpenAIMessageParam], ToolContext]:
"""Process input with optional previous response context. """Process input with optional previous response context.
Returns: Returns:
@ -208,6 +209,9 @@ class OpenAIResponsesImpl:
messages: list[OpenAIMessageParam], messages: list[OpenAIMessageParam],
) -> None: ) -> None:
new_input_id = f"msg_{uuid.uuid4()}" new_input_id = f"msg_{uuid.uuid4()}"
# Type input_items_data as the full OpenAIResponseInput union to avoid list invariance issues
input_items_data: list[OpenAIResponseInput] = []
if isinstance(input, str): if isinstance(input, str):
# synthesize a message from the input string # synthesize a message from the input string
input_content = OpenAIResponseInputMessageContentText(text=input) input_content = OpenAIResponseInputMessageContentText(text=input)
@ -219,7 +223,6 @@ class OpenAIResponsesImpl:
input_items_data = [input_content_item] input_items_data = [input_content_item]
else: else:
# we already have a list of messages # we already have a list of messages
input_items_data = []
for input_item in input: for input_item in input:
if isinstance(input_item, OpenAIResponseMessage): if isinstance(input_item, OpenAIResponseMessage):
# These may or may not already have an id, so dump to dict, check for id, and add if missing # These may or may not already have an id, so dump to dict, check for id, and add if missing
@ -329,6 +332,11 @@ class OpenAIResponsesImpl:
max_infer_iters: int | None = 10, max_infer_iters: int | None = 10,
guardrail_ids: list[str] | None = None, guardrail_ids: list[str] | None = None,
) -> AsyncIterator[OpenAIResponseObjectStream]: ) -> AsyncIterator[OpenAIResponseObjectStream]:
# These should never be None when called from create_openai_response (which sets defaults)
# but we assert here to help mypy understand the types
assert text is not None, "text must not be None"
assert max_infer_iters is not None, "max_infer_iters must not be None"
# Input preprocessing # Input preprocessing
all_input, messages, tool_context = await self._process_input_with_previous_response( all_input, messages, tool_context = await self._process_input_with_previous_response(
input, tools, previous_response_id, conversation input, tools, previous_response_id, conversation
@ -371,7 +379,8 @@ class OpenAIResponsesImpl:
final_response = None final_response = None
failed_response = None failed_response = None
output_items = [] # Type as ConversationItem to avoid list invariance issues
output_items: list[ConversationItem] = []
async for stream_chunk in orchestrator.create_response(): async for stream_chunk in orchestrator.create_response():
match stream_chunk.type: match stream_chunk.type:
case "response.completed" | "response.incomplete": case "response.completed" | "response.incomplete":
@ -415,7 +424,8 @@ class OpenAIResponsesImpl:
self, conversation_id: str, input: str | list[OpenAIResponseInput] | None, output_items: list[ConversationItem] self, conversation_id: str, input: str | list[OpenAIResponseInput] | None, output_items: list[ConversationItem]
) -> None: ) -> None:
"""Sync content and response messages to the conversation.""" """Sync content and response messages to the conversation."""
conversation_items = [] # Type as ConversationItem union to avoid list invariance issues
conversation_items: list[ConversationItem] = []
if isinstance(input, str): if isinstance(input, str):
conversation_items.append( conversation_items.append(

View file

@ -111,7 +111,7 @@ class StreamingResponseOrchestrator:
text: OpenAIResponseText, text: OpenAIResponseText,
max_infer_iters: int, max_infer_iters: int,
tool_executor, # Will be the tool execution logic from the main class tool_executor, # Will be the tool execution logic from the main class
instructions: str, instructions: str | None,
safety_api, safety_api,
guardrail_ids: list[str] | None = None, guardrail_ids: list[str] | None = None,
prompt: OpenAIResponsePrompt | None = None, prompt: OpenAIResponsePrompt | None = None,