mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-28 04:51:59 +00:00
Responses API: Finish wiring up function tool calls
This finishes the plumbing for function tool call and adds a basic verification test (that passes for me locally against Llama 4 Scout in vllm). Signed-off-by: Ben Browning <bbrownin@redhat.com>
This commit is contained in:
parent
1990df2c50
commit
924213a689
6 changed files with 148 additions and 6 deletions
|
|
@ -77,8 +77,20 @@ class OpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
|
|||
type: Literal["web_search_call"] = "web_search_call"
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class OpenAIResponseOutputMessageFunctionToolCall(BaseModel):
|
||||
arguments: str
|
||||
call_id: str
|
||||
name: str
|
||||
type: Literal["function_call"] = "function_call"
|
||||
id: str
|
||||
status: str
|
||||
|
||||
|
||||
OpenAIResponseOutput = Annotated[
|
||||
OpenAIResponseMessage | OpenAIResponseOutputMessageWebSearchToolCall,
|
||||
OpenAIResponseMessage
|
||||
| OpenAIResponseOutputMessageWebSearchToolCall
|
||||
| OpenAIResponseOutputMessageFunctionToolCall,
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
register_schema(OpenAIResponseOutput, name="OpenAIResponseOutput")
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue