mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
feat!: Wire through parallel_tool_calls to Responses API (#4124)
# What does this PR do? Initial PR against #4123 Adds `parallel_tool_calls` spec to Responses API and basic initial implementation where no more than one function call is generated when set to `False`. ## Test Plan * Unit tests have been added to verify no more than one function call is generated. * A followup PR will verify passing through `parallel_tool_calls` to providers. * A followup PR will address verification and/or implementation of incremental function calling across multiple conversational turns. --------- Signed-off-by: Anastas Stoyanovsky <astoyano@redhat.com>
This commit is contained in:
parent
7093978754
commit
a3580e6bc0
10 changed files with 73 additions and 32 deletions
|
|
@ -6723,9 +6723,10 @@ components:
|
||||||
type: array
|
type: array
|
||||||
title: Output
|
title: Output
|
||||||
parallel_tool_calls:
|
parallel_tool_calls:
|
||||||
type: boolean
|
anyOf:
|
||||||
title: Parallel Tool Calls
|
- type: boolean
|
||||||
default: false
|
- type: 'null'
|
||||||
|
default: true
|
||||||
previous_response_id:
|
previous_response_id:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
|
|
@ -7125,6 +7126,11 @@ components:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
- type: 'null'
|
- type: 'null'
|
||||||
|
parallel_tool_calls:
|
||||||
|
anyOf:
|
||||||
|
- type: boolean
|
||||||
|
- type: 'null'
|
||||||
|
default: true
|
||||||
previous_response_id:
|
previous_response_id:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
|
|
@ -7251,9 +7257,10 @@ components:
|
||||||
type: array
|
type: array
|
||||||
title: Output
|
title: Output
|
||||||
parallel_tool_calls:
|
parallel_tool_calls:
|
||||||
type: boolean
|
anyOf:
|
||||||
title: Parallel Tool Calls
|
- type: boolean
|
||||||
default: false
|
- type: 'null'
|
||||||
|
default: true
|
||||||
previous_response_id:
|
previous_response_id:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
|
|
|
||||||
19
docs/static/deprecated-llama-stack-spec.yaml
vendored
19
docs/static/deprecated-llama-stack-spec.yaml
vendored
|
|
@ -3566,9 +3566,10 @@ components:
|
||||||
type: array
|
type: array
|
||||||
title: Output
|
title: Output
|
||||||
parallel_tool_calls:
|
parallel_tool_calls:
|
||||||
type: boolean
|
anyOf:
|
||||||
title: Parallel Tool Calls
|
- type: boolean
|
||||||
default: false
|
- type: 'null'
|
||||||
|
default: true
|
||||||
previous_response_id:
|
previous_response_id:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
|
|
@ -3968,6 +3969,11 @@ components:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
- type: 'null'
|
- type: 'null'
|
||||||
|
parallel_tool_calls:
|
||||||
|
anyOf:
|
||||||
|
- type: boolean
|
||||||
|
- type: 'null'
|
||||||
|
default: true
|
||||||
previous_response_id:
|
previous_response_id:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
|
|
@ -4094,9 +4100,10 @@ components:
|
||||||
type: array
|
type: array
|
||||||
title: Output
|
title: Output
|
||||||
parallel_tool_calls:
|
parallel_tool_calls:
|
||||||
type: boolean
|
anyOf:
|
||||||
title: Parallel Tool Calls
|
- type: boolean
|
||||||
default: false
|
- type: 'null'
|
||||||
|
default: true
|
||||||
previous_response_id:
|
previous_response_id:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
|
|
|
||||||
14
docs/static/experimental-llama-stack-spec.yaml
vendored
14
docs/static/experimental-llama-stack-spec.yaml
vendored
|
|
@ -3263,9 +3263,10 @@ components:
|
||||||
type: array
|
type: array
|
||||||
title: Output
|
title: Output
|
||||||
parallel_tool_calls:
|
parallel_tool_calls:
|
||||||
type: boolean
|
anyOf:
|
||||||
title: Parallel Tool Calls
|
- type: boolean
|
||||||
default: false
|
- type: 'null'
|
||||||
|
default: true
|
||||||
previous_response_id:
|
previous_response_id:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
|
|
@ -3662,9 +3663,10 @@ components:
|
||||||
type: array
|
type: array
|
||||||
title: Output
|
title: Output
|
||||||
parallel_tool_calls:
|
parallel_tool_calls:
|
||||||
type: boolean
|
anyOf:
|
||||||
title: Parallel Tool Calls
|
- type: boolean
|
||||||
default: false
|
- type: 'null'
|
||||||
|
default: true
|
||||||
previous_response_id:
|
previous_response_id:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
|
|
|
||||||
19
docs/static/llama-stack-spec.yaml
vendored
19
docs/static/llama-stack-spec.yaml
vendored
|
|
@ -5744,9 +5744,10 @@ components:
|
||||||
type: array
|
type: array
|
||||||
title: Output
|
title: Output
|
||||||
parallel_tool_calls:
|
parallel_tool_calls:
|
||||||
type: boolean
|
anyOf:
|
||||||
title: Parallel Tool Calls
|
- type: boolean
|
||||||
default: false
|
- type: 'null'
|
||||||
|
default: true
|
||||||
previous_response_id:
|
previous_response_id:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
|
|
@ -6146,6 +6147,11 @@ components:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
- type: 'null'
|
- type: 'null'
|
||||||
|
parallel_tool_calls:
|
||||||
|
anyOf:
|
||||||
|
- type: boolean
|
||||||
|
- type: 'null'
|
||||||
|
default: true
|
||||||
previous_response_id:
|
previous_response_id:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
|
|
@ -6272,9 +6278,10 @@ components:
|
||||||
type: array
|
type: array
|
||||||
title: Output
|
title: Output
|
||||||
parallel_tool_calls:
|
parallel_tool_calls:
|
||||||
type: boolean
|
anyOf:
|
||||||
title: Parallel Tool Calls
|
- type: boolean
|
||||||
default: false
|
- type: 'null'
|
||||||
|
default: true
|
||||||
previous_response_id:
|
previous_response_id:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
|
|
|
||||||
19
docs/static/stainless-llama-stack-spec.yaml
vendored
19
docs/static/stainless-llama-stack-spec.yaml
vendored
|
|
@ -6723,9 +6723,10 @@ components:
|
||||||
type: array
|
type: array
|
||||||
title: Output
|
title: Output
|
||||||
parallel_tool_calls:
|
parallel_tool_calls:
|
||||||
type: boolean
|
anyOf:
|
||||||
title: Parallel Tool Calls
|
- type: boolean
|
||||||
default: false
|
- type: 'null'
|
||||||
|
default: true
|
||||||
previous_response_id:
|
previous_response_id:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
|
|
@ -7125,6 +7126,11 @@ components:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
- type: 'null'
|
- type: 'null'
|
||||||
|
parallel_tool_calls:
|
||||||
|
anyOf:
|
||||||
|
- type: boolean
|
||||||
|
- type: 'null'
|
||||||
|
default: true
|
||||||
previous_response_id:
|
previous_response_id:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
|
|
@ -7251,9 +7257,10 @@ components:
|
||||||
type: array
|
type: array
|
||||||
title: Output
|
title: Output
|
||||||
parallel_tool_calls:
|
parallel_tool_calls:
|
||||||
type: boolean
|
anyOf:
|
||||||
title: Parallel Tool Calls
|
- type: boolean
|
||||||
default: false
|
- type: 'null'
|
||||||
|
default: true
|
||||||
previous_response_id:
|
previous_response_id:
|
||||||
anyOf:
|
anyOf:
|
||||||
- type: string
|
- type: string
|
||||||
|
|
|
||||||
|
|
@ -92,6 +92,7 @@ class MetaReferenceAgentsImpl(Agents):
|
||||||
model: str,
|
model: str,
|
||||||
prompt: OpenAIResponsePrompt | None = None,
|
prompt: OpenAIResponsePrompt | None = None,
|
||||||
instructions: str | None = None,
|
instructions: str | None = None,
|
||||||
|
parallel_tool_calls: bool | None = True,
|
||||||
previous_response_id: str | None = None,
|
previous_response_id: str | None = None,
|
||||||
conversation: str | None = None,
|
conversation: str | None = None,
|
||||||
store: bool | None = True,
|
store: bool | None = True,
|
||||||
|
|
@ -120,6 +121,7 @@ class MetaReferenceAgentsImpl(Agents):
|
||||||
include,
|
include,
|
||||||
max_infer_iters,
|
max_infer_iters,
|
||||||
guardrails,
|
guardrails,
|
||||||
|
parallel_tool_calls,
|
||||||
max_tool_calls,
|
max_tool_calls,
|
||||||
)
|
)
|
||||||
return result # type: ignore[no-any-return]
|
return result # type: ignore[no-any-return]
|
||||||
|
|
|
||||||
|
|
@ -252,6 +252,7 @@ class OpenAIResponsesImpl:
|
||||||
include: list[str] | None = None,
|
include: list[str] | None = None,
|
||||||
max_infer_iters: int | None = 10,
|
max_infer_iters: int | None = 10,
|
||||||
guardrails: list[str | ResponseGuardrailSpec] | None = None,
|
guardrails: list[str | ResponseGuardrailSpec] | None = None,
|
||||||
|
parallel_tool_calls: bool | None = None,
|
||||||
max_tool_calls: int | None = None,
|
max_tool_calls: int | None = None,
|
||||||
):
|
):
|
||||||
stream = bool(stream)
|
stream = bool(stream)
|
||||||
|
|
@ -296,6 +297,7 @@ class OpenAIResponsesImpl:
|
||||||
tools=tools,
|
tools=tools,
|
||||||
max_infer_iters=max_infer_iters,
|
max_infer_iters=max_infer_iters,
|
||||||
guardrail_ids=guardrail_ids,
|
guardrail_ids=guardrail_ids,
|
||||||
|
parallel_tool_calls=parallel_tool_calls,
|
||||||
max_tool_calls=max_tool_calls,
|
max_tool_calls=max_tool_calls,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -346,6 +348,7 @@ class OpenAIResponsesImpl:
|
||||||
tools: list[OpenAIResponseInputTool] | None = None,
|
tools: list[OpenAIResponseInputTool] | None = None,
|
||||||
max_infer_iters: int | None = 10,
|
max_infer_iters: int | None = 10,
|
||||||
guardrail_ids: list[str] | None = None,
|
guardrail_ids: list[str] | None = None,
|
||||||
|
parallel_tool_calls: bool | None = True,
|
||||||
max_tool_calls: int | None = None,
|
max_tool_calls: int | None = None,
|
||||||
) -> AsyncIterator[OpenAIResponseObjectStream]:
|
) -> AsyncIterator[OpenAIResponseObjectStream]:
|
||||||
# These should never be None when called from create_openai_response (which sets defaults)
|
# These should never be None when called from create_openai_response (which sets defaults)
|
||||||
|
|
@ -385,6 +388,7 @@ class OpenAIResponsesImpl:
|
||||||
created_at=created_at,
|
created_at=created_at,
|
||||||
text=text,
|
text=text,
|
||||||
max_infer_iters=max_infer_iters,
|
max_infer_iters=max_infer_iters,
|
||||||
|
parallel_tool_calls=parallel_tool_calls,
|
||||||
tool_executor=self.tool_executor,
|
tool_executor=self.tool_executor,
|
||||||
safety_api=self.safety_api,
|
safety_api=self.safety_api,
|
||||||
guardrail_ids=guardrail_ids,
|
guardrail_ids=guardrail_ids,
|
||||||
|
|
|
||||||
|
|
@ -114,6 +114,7 @@ class StreamingResponseOrchestrator:
|
||||||
safety_api,
|
safety_api,
|
||||||
guardrail_ids: list[str] | None = None,
|
guardrail_ids: list[str] | None = None,
|
||||||
prompt: OpenAIResponsePrompt | None = None,
|
prompt: OpenAIResponsePrompt | None = None,
|
||||||
|
parallel_tool_calls: bool | None = None,
|
||||||
max_tool_calls: int | None = None,
|
max_tool_calls: int | None = None,
|
||||||
):
|
):
|
||||||
self.inference_api = inference_api
|
self.inference_api = inference_api
|
||||||
|
|
@ -128,6 +129,8 @@ class StreamingResponseOrchestrator:
|
||||||
self.prompt = prompt
|
self.prompt = prompt
|
||||||
# System message that is inserted into the model's context
|
# System message that is inserted into the model's context
|
||||||
self.instructions = instructions
|
self.instructions = instructions
|
||||||
|
# Whether to allow more than one function tool call generated per turn.
|
||||||
|
self.parallel_tool_calls = parallel_tool_calls
|
||||||
# Max number of total calls to built-in tools that can be processed in a response
|
# Max number of total calls to built-in tools that can be processed in a response
|
||||||
self.max_tool_calls = max_tool_calls
|
self.max_tool_calls = max_tool_calls
|
||||||
self.sequence_number = 0
|
self.sequence_number = 0
|
||||||
|
|
@ -190,6 +193,7 @@ class StreamingResponseOrchestrator:
|
||||||
usage=self.accumulated_usage,
|
usage=self.accumulated_usage,
|
||||||
instructions=self.instructions,
|
instructions=self.instructions,
|
||||||
prompt=self.prompt,
|
prompt=self.prompt,
|
||||||
|
parallel_tool_calls=self.parallel_tool_calls,
|
||||||
max_tool_calls=self.max_tool_calls,
|
max_tool_calls=self.max_tool_calls,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -72,6 +72,7 @@ class Agents(Protocol):
|
||||||
model: str,
|
model: str,
|
||||||
prompt: OpenAIResponsePrompt | None = None,
|
prompt: OpenAIResponsePrompt | None = None,
|
||||||
instructions: str | None = None,
|
instructions: str | None = None,
|
||||||
|
parallel_tool_calls: bool | None = True,
|
||||||
previous_response_id: str | None = None,
|
previous_response_id: str | None = None,
|
||||||
conversation: str | None = None,
|
conversation: str | None = None,
|
||||||
store: bool | None = True,
|
store: bool | None = True,
|
||||||
|
|
|
||||||
|
|
@ -585,7 +585,7 @@ class OpenAIResponseObject(BaseModel):
|
||||||
:param model: Model identifier used for generation
|
:param model: Model identifier used for generation
|
||||||
:param object: Object type identifier, always "response"
|
:param object: Object type identifier, always "response"
|
||||||
:param output: List of generated output items (messages, tool calls, etc.)
|
:param output: List of generated output items (messages, tool calls, etc.)
|
||||||
:param parallel_tool_calls: Whether tool calls can be executed in parallel
|
:param parallel_tool_calls: (Optional) Whether to allow more than one function tool call generated per turn.
|
||||||
:param previous_response_id: (Optional) ID of the previous response in a conversation
|
:param previous_response_id: (Optional) ID of the previous response in a conversation
|
||||||
:param prompt: (Optional) Reference to a prompt template and its variables.
|
:param prompt: (Optional) Reference to a prompt template and its variables.
|
||||||
:param status: Current status of the response generation
|
:param status: Current status of the response generation
|
||||||
|
|
@ -605,7 +605,7 @@ class OpenAIResponseObject(BaseModel):
|
||||||
model: str
|
model: str
|
||||||
object: Literal["response"] = "response"
|
object: Literal["response"] = "response"
|
||||||
output: Sequence[OpenAIResponseOutput]
|
output: Sequence[OpenAIResponseOutput]
|
||||||
parallel_tool_calls: bool = False
|
parallel_tool_calls: bool | None = True
|
||||||
previous_response_id: str | None = None
|
previous_response_id: str | None = None
|
||||||
prompt: OpenAIResponsePrompt | None = None
|
prompt: OpenAIResponsePrompt | None = None
|
||||||
status: str
|
status: str
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue