Wire through parallel_tool_calls to Responses API

Signed-off-by: Anastas Stoyanovsky <astoyano@redhat.com>
This commit is contained in:
Anastas Stoyanovsky 2025-11-11 08:54:02 -05:00
parent 7093978754
commit 7a9b7ecdc2
9 changed files with 159 additions and 20 deletions

View file

@ -72,6 +72,7 @@ class Agents(Protocol):
model: str,
prompt: OpenAIResponsePrompt | None = None,
instructions: str | None = None,
parallel_tool_calls: bool | None = True,
previous_response_id: str | None = None,
conversation: str | None = None,
store: bool | None = True,

View file

@ -585,7 +585,7 @@ class OpenAIResponseObject(BaseModel):
:param model: Model identifier used for generation
:param object: Object type identifier, always "response"
:param output: List of generated output items (messages, tool calls, etc.)
:param parallel_tool_calls: Whether tool calls can be executed in parallel
:param parallel_tool_calls: (Optional) Whether to allow more than one function tool call generated per turn.
:param previous_response_id: (Optional) ID of the previous response in a conversation
:param prompt: (Optional) Reference to a prompt template and its variables.
:param status: Current status of the response generation
@ -605,7 +605,7 @@ class OpenAIResponseObject(BaseModel):
model: str
object: Literal["response"] = "response"
output: Sequence[OpenAIResponseOutput]
parallel_tool_calls: bool = False
parallel_tool_calls: bool | None = True
previous_response_id: str | None = None
prompt: OpenAIResponsePrompt | None = None
status: str