mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-02 08:44:44 +00:00
fix: Restore previous responses to input list, not messages
This adjusts the restoration of previous responses to prepend them to the list of Responses API inputs instead of our converted list of Chat Completion messages. This matches the expected behavior of the Responses API, and I misinterpreted the nuances here in the initial implementation. Signed-off-by: Ben Browning <bbrownin@redhat.com>
This commit is contained in:
parent
467fb19345
commit
263eb6fd37
7 changed files with 400 additions and 303 deletions
242
docs/_static/llama-stack-spec.html
vendored
242
docs/_static/llama-stack-spec.html
vendored
|
@ -6283,54 +6283,15 @@
|
||||||
],
|
],
|
||||||
"title": "AgentTurnResponseTurnStartPayload"
|
"title": "AgentTurnResponseTurnStartPayload"
|
||||||
},
|
},
|
||||||
"OpenAIResponseInputMessage": {
|
"OpenAIResponseInput": {
|
||||||
"type": "object",
|
"oneOf": [
|
||||||
"properties": {
|
{
|
||||||
"content": {
|
"$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall"
|
||||||
"oneOf": [
|
|
||||||
{
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"$ref": "#/components/schemas/OpenAIResponseInputMessageContent"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
"role": {
|
{
|
||||||
"oneOf": [
|
"$ref": "#/components/schemas/OpenAIResponseMessage"
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"const": "system"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"const": "developer"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"const": "user"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"const": "assistant"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"type": {
|
|
||||||
"type": "string",
|
|
||||||
"const": "message",
|
|
||||||
"default": "message"
|
|
||||||
}
|
}
|
||||||
},
|
]
|
||||||
"additionalProperties": false,
|
|
||||||
"required": [
|
|
||||||
"content",
|
|
||||||
"role"
|
|
||||||
],
|
|
||||||
"title": "OpenAIResponseInputMessage"
|
|
||||||
},
|
},
|
||||||
"OpenAIResponseInputMessageContent": {
|
"OpenAIResponseInputMessageContent": {
|
||||||
"oneOf": [
|
"oneOf": [
|
||||||
|
@ -6431,6 +6392,111 @@
|
||||||
],
|
],
|
||||||
"title": "OpenAIResponseInputToolWebSearch"
|
"title": "OpenAIResponseInputToolWebSearch"
|
||||||
},
|
},
|
||||||
|
"OpenAIResponseMessage": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"content": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseInputMessageContent"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseOutputMessageContent"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"role": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"const": "system"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"const": "developer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"const": "user"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"const": "assistant"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"type": {
|
||||||
|
"type": "string",
|
||||||
|
"const": "message",
|
||||||
|
"default": "message"
|
||||||
|
},
|
||||||
|
"id": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"content",
|
||||||
|
"role",
|
||||||
|
"type"
|
||||||
|
],
|
||||||
|
"title": "OpenAIResponseMessage",
|
||||||
|
"description": "Corresponds to the various Message types in the Responses API. They are all under one type because the Responses API gives them all the same \"type\" value, and there is no way to tell them apart in certain scenarios."
|
||||||
|
},
|
||||||
|
"OpenAIResponseOutputMessageContent": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"text": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"type": {
|
||||||
|
"type": "string",
|
||||||
|
"const": "output_text",
|
||||||
|
"default": "output_text"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"text",
|
||||||
|
"type"
|
||||||
|
],
|
||||||
|
"title": "OpenAIResponseOutputMessageContentOutputText"
|
||||||
|
},
|
||||||
|
"OpenAIResponseOutputMessageWebSearchToolCall": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"id": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"type": {
|
||||||
|
"type": "string",
|
||||||
|
"const": "web_search_call",
|
||||||
|
"default": "web_search_call"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"id",
|
||||||
|
"status",
|
||||||
|
"type"
|
||||||
|
],
|
||||||
|
"title": "OpenAIResponseOutputMessageWebSearchToolCall"
|
||||||
|
},
|
||||||
"CreateOpenaiResponseRequest": {
|
"CreateOpenaiResponseRequest": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -6442,7 +6508,7 @@
|
||||||
{
|
{
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/components/schemas/OpenAIResponseInputMessage"
|
"$ref": "#/components/schemas/OpenAIResponseInput"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
@ -6560,7 +6626,7 @@
|
||||||
"OpenAIResponseOutput": {
|
"OpenAIResponseOutput": {
|
||||||
"oneOf": [
|
"oneOf": [
|
||||||
{
|
{
|
||||||
"$ref": "#/components/schemas/OpenAIResponseOutputMessage"
|
"$ref": "#/components/schemas/OpenAIResponseMessage"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall"
|
"$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall"
|
||||||
|
@ -6569,89 +6635,11 @@
|
||||||
"discriminator": {
|
"discriminator": {
|
||||||
"propertyName": "type",
|
"propertyName": "type",
|
||||||
"mapping": {
|
"mapping": {
|
||||||
"message": "#/components/schemas/OpenAIResponseOutputMessage",
|
"message": "#/components/schemas/OpenAIResponseMessage",
|
||||||
"web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall"
|
"web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"OpenAIResponseOutputMessage": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"id": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"content": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"$ref": "#/components/schemas/OpenAIResponseOutputMessageContent"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"role": {
|
|
||||||
"type": "string",
|
|
||||||
"const": "assistant",
|
|
||||||
"default": "assistant"
|
|
||||||
},
|
|
||||||
"status": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"type": {
|
|
||||||
"type": "string",
|
|
||||||
"const": "message",
|
|
||||||
"default": "message"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"required": [
|
|
||||||
"id",
|
|
||||||
"content",
|
|
||||||
"role",
|
|
||||||
"status",
|
|
||||||
"type"
|
|
||||||
],
|
|
||||||
"title": "OpenAIResponseOutputMessage"
|
|
||||||
},
|
|
||||||
"OpenAIResponseOutputMessageContent": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"text": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"type": {
|
|
||||||
"type": "string",
|
|
||||||
"const": "output_text",
|
|
||||||
"default": "output_text"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"required": [
|
|
||||||
"text",
|
|
||||||
"type"
|
|
||||||
],
|
|
||||||
"title": "OpenAIResponseOutputMessageContentOutputText"
|
|
||||||
},
|
|
||||||
"OpenAIResponseOutputMessageWebSearchToolCall": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"id": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"status": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"type": {
|
|
||||||
"type": "string",
|
|
||||||
"const": "web_search_call",
|
|
||||||
"default": "web_search_call"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"required": [
|
|
||||||
"id",
|
|
||||||
"status",
|
|
||||||
"type"
|
|
||||||
],
|
|
||||||
"title": "OpenAIResponseOutputMessageWebSearchToolCall"
|
|
||||||
},
|
|
||||||
"OpenAIResponseObjectStream": {
|
"OpenAIResponseObjectStream": {
|
||||||
"oneOf": [
|
"oneOf": [
|
||||||
{
|
{
|
||||||
|
|
171
docs/_static/llama-stack-spec.yaml
vendored
171
docs/_static/llama-stack-spec.yaml
vendored
|
@ -4392,34 +4392,10 @@ components:
|
||||||
- event_type
|
- event_type
|
||||||
- turn_id
|
- turn_id
|
||||||
title: AgentTurnResponseTurnStartPayload
|
title: AgentTurnResponseTurnStartPayload
|
||||||
OpenAIResponseInputMessage:
|
OpenAIResponseInput:
|
||||||
type: object
|
oneOf:
|
||||||
properties:
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
||||||
content:
|
- $ref: '#/components/schemas/OpenAIResponseMessage'
|
||||||
oneOf:
|
|
||||||
- type: string
|
|
||||||
- type: array
|
|
||||||
items:
|
|
||||||
$ref: '#/components/schemas/OpenAIResponseInputMessageContent'
|
|
||||||
role:
|
|
||||||
oneOf:
|
|
||||||
- type: string
|
|
||||||
const: system
|
|
||||||
- type: string
|
|
||||||
const: developer
|
|
||||||
- type: string
|
|
||||||
const: user
|
|
||||||
- type: string
|
|
||||||
const: assistant
|
|
||||||
type:
|
|
||||||
type: string
|
|
||||||
const: message
|
|
||||||
default: message
|
|
||||||
additionalProperties: false
|
|
||||||
required:
|
|
||||||
- content
|
|
||||||
- role
|
|
||||||
title: OpenAIResponseInputMessage
|
|
||||||
OpenAIResponseInputMessageContent:
|
OpenAIResponseInputMessageContent:
|
||||||
oneOf:
|
oneOf:
|
||||||
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
||||||
|
@ -4483,6 +4459,79 @@ components:
|
||||||
required:
|
required:
|
||||||
- type
|
- type
|
||||||
title: OpenAIResponseInputToolWebSearch
|
title: OpenAIResponseInputToolWebSearch
|
||||||
|
OpenAIResponseMessage:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
content:
|
||||||
|
oneOf:
|
||||||
|
- type: string
|
||||||
|
- type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/OpenAIResponseInputMessageContent'
|
||||||
|
- type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/OpenAIResponseOutputMessageContent'
|
||||||
|
role:
|
||||||
|
oneOf:
|
||||||
|
- type: string
|
||||||
|
const: system
|
||||||
|
- type: string
|
||||||
|
const: developer
|
||||||
|
- type: string
|
||||||
|
const: user
|
||||||
|
- type: string
|
||||||
|
const: assistant
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
|
const: message
|
||||||
|
default: message
|
||||||
|
id:
|
||||||
|
type: string
|
||||||
|
status:
|
||||||
|
type: string
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- content
|
||||||
|
- role
|
||||||
|
- type
|
||||||
|
title: OpenAIResponseMessage
|
||||||
|
description: >-
|
||||||
|
Corresponds to the various Message types in the Responses API. They are all
|
||||||
|
under one type because the Responses API gives them all the same "type" value,
|
||||||
|
and there is no way to tell them apart in certain scenarios.
|
||||||
|
OpenAIResponseOutputMessageContent:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
text:
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
|
const: output_text
|
||||||
|
default: output_text
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- text
|
||||||
|
- type
|
||||||
|
title: >-
|
||||||
|
OpenAIResponseOutputMessageContentOutputText
|
||||||
|
"OpenAIResponseOutputMessageWebSearchToolCall":
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
id:
|
||||||
|
type: string
|
||||||
|
status:
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
|
const: web_search_call
|
||||||
|
default: web_search_call
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- id
|
||||||
|
- status
|
||||||
|
- type
|
||||||
|
title: >-
|
||||||
|
OpenAIResponseOutputMessageWebSearchToolCall
|
||||||
CreateOpenaiResponseRequest:
|
CreateOpenaiResponseRequest:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
@ -4491,7 +4540,7 @@ components:
|
||||||
- type: string
|
- type: string
|
||||||
- type: array
|
- type: array
|
||||||
items:
|
items:
|
||||||
$ref: '#/components/schemas/OpenAIResponseInputMessage'
|
$ref: '#/components/schemas/OpenAIResponseInput'
|
||||||
description: Input message(s) to create the response.
|
description: Input message(s) to create the response.
|
||||||
model:
|
model:
|
||||||
type: string
|
type: string
|
||||||
|
@ -4575,73 +4624,13 @@ components:
|
||||||
title: OpenAIResponseObject
|
title: OpenAIResponseObject
|
||||||
OpenAIResponseOutput:
|
OpenAIResponseOutput:
|
||||||
oneOf:
|
oneOf:
|
||||||
- $ref: '#/components/schemas/OpenAIResponseOutputMessage'
|
- $ref: '#/components/schemas/OpenAIResponseMessage'
|
||||||
- $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
||||||
discriminator:
|
discriminator:
|
||||||
propertyName: type
|
propertyName: type
|
||||||
mapping:
|
mapping:
|
||||||
message: '#/components/schemas/OpenAIResponseOutputMessage'
|
message: '#/components/schemas/OpenAIResponseMessage'
|
||||||
web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
||||||
OpenAIResponseOutputMessage:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
id:
|
|
||||||
type: string
|
|
||||||
content:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
$ref: '#/components/schemas/OpenAIResponseOutputMessageContent'
|
|
||||||
role:
|
|
||||||
type: string
|
|
||||||
const: assistant
|
|
||||||
default: assistant
|
|
||||||
status:
|
|
||||||
type: string
|
|
||||||
type:
|
|
||||||
type: string
|
|
||||||
const: message
|
|
||||||
default: message
|
|
||||||
additionalProperties: false
|
|
||||||
required:
|
|
||||||
- id
|
|
||||||
- content
|
|
||||||
- role
|
|
||||||
- status
|
|
||||||
- type
|
|
||||||
title: OpenAIResponseOutputMessage
|
|
||||||
OpenAIResponseOutputMessageContent:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
text:
|
|
||||||
type: string
|
|
||||||
type:
|
|
||||||
type: string
|
|
||||||
const: output_text
|
|
||||||
default: output_text
|
|
||||||
additionalProperties: false
|
|
||||||
required:
|
|
||||||
- text
|
|
||||||
- type
|
|
||||||
title: >-
|
|
||||||
OpenAIResponseOutputMessageContentOutputText
|
|
||||||
"OpenAIResponseOutputMessageWebSearchToolCall":
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
id:
|
|
||||||
type: string
|
|
||||||
status:
|
|
||||||
type: string
|
|
||||||
type:
|
|
||||||
type: string
|
|
||||||
const: web_search_call
|
|
||||||
default: web_search_call
|
|
||||||
additionalProperties: false
|
|
||||||
required:
|
|
||||||
- id
|
|
||||||
- status
|
|
||||||
- type
|
|
||||||
title: >-
|
|
||||||
OpenAIResponseOutputMessageWebSearchToolCall
|
|
||||||
OpenAIResponseObjectStream:
|
OpenAIResponseObjectStream:
|
||||||
oneOf:
|
oneOf:
|
||||||
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseCreated'
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseCreated'
|
||||||
|
|
|
@ -29,7 +29,7 @@ from llama_stack.apis.tools import ToolDef
|
||||||
from llama_stack.schema_utils import json_schema_type, register_schema, webmethod
|
from llama_stack.schema_utils import json_schema_type, register_schema, webmethod
|
||||||
|
|
||||||
from .openai_responses import (
|
from .openai_responses import (
|
||||||
OpenAIResponseInputMessage,
|
OpenAIResponseInput,
|
||||||
OpenAIResponseInputTool,
|
OpenAIResponseInputTool,
|
||||||
OpenAIResponseObject,
|
OpenAIResponseObject,
|
||||||
OpenAIResponseObjectStream,
|
OpenAIResponseObjectStream,
|
||||||
|
@ -588,7 +588,7 @@ class Agents(Protocol):
|
||||||
@webmethod(route="/openai/v1/responses", method="POST")
|
@webmethod(route="/openai/v1/responses", method="POST")
|
||||||
async def create_openai_response(
|
async def create_openai_response(
|
||||||
self,
|
self,
|
||||||
input: str | list[OpenAIResponseInputMessage],
|
input: str | list[OpenAIResponseInput],
|
||||||
model: str,
|
model: str,
|
||||||
previous_response_id: str | None = None,
|
previous_response_id: str | None = None,
|
||||||
store: bool | None = True,
|
store: bool | None = True,
|
||||||
|
|
|
@ -17,6 +17,28 @@ class OpenAIResponseError(BaseModel):
|
||||||
message: str
|
message: str
|
||||||
|
|
||||||
|
|
||||||
|
@json_schema_type
|
||||||
|
class OpenAIResponseInputMessageContentText(BaseModel):
|
||||||
|
text: str
|
||||||
|
type: Literal["input_text"] = "input_text"
|
||||||
|
|
||||||
|
|
||||||
|
@json_schema_type
|
||||||
|
class OpenAIResponseInputMessageContentImage(BaseModel):
|
||||||
|
detail: Literal["low"] | Literal["high"] | Literal["auto"] = "auto"
|
||||||
|
type: Literal["input_image"] = "input_image"
|
||||||
|
# TODO: handle file_id
|
||||||
|
image_url: str | None = None
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: handle file content types
|
||||||
|
OpenAIResponseInputMessageContent = Annotated[
|
||||||
|
OpenAIResponseInputMessageContentText | OpenAIResponseInputMessageContentImage,
|
||||||
|
Field(discriminator="type"),
|
||||||
|
]
|
||||||
|
register_schema(OpenAIResponseInputMessageContent, name="OpenAIResponseInputMessageContent")
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
class OpenAIResponseOutputMessageContentOutputText(BaseModel):
|
class OpenAIResponseOutputMessageContentOutputText(BaseModel):
|
||||||
text: str
|
text: str
|
||||||
|
@ -31,13 +53,22 @@ register_schema(OpenAIResponseOutputMessageContent, name="OpenAIResponseOutputMe
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
class OpenAIResponseOutputMessage(BaseModel):
|
class OpenAIResponseMessage(BaseModel):
|
||||||
id: str
|
"""
|
||||||
content: list[OpenAIResponseOutputMessageContent]
|
Corresponds to the various Message types in the Responses API.
|
||||||
role: Literal["assistant"] = "assistant"
|
They are all under one type because the Responses API gives them all
|
||||||
status: str
|
the same "type" value, and there is no way to tell them apart in certain
|
||||||
|
scenarios.
|
||||||
|
"""
|
||||||
|
|
||||||
|
content: str | list[OpenAIResponseInputMessageContent] | list[OpenAIResponseOutputMessageContent]
|
||||||
|
role: Literal["system"] | Literal["developer"] | Literal["user"] | Literal["assistant"]
|
||||||
type: Literal["message"] = "message"
|
type: Literal["message"] = "message"
|
||||||
|
|
||||||
|
# The fields below are not used in all scenarios, but are required in others.
|
||||||
|
id: str | None = None
|
||||||
|
status: str | None = None
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
class OpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
|
class OpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
|
||||||
|
@ -47,7 +78,7 @@ class OpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
|
||||||
|
|
||||||
|
|
||||||
OpenAIResponseOutput = Annotated[
|
OpenAIResponseOutput = Annotated[
|
||||||
OpenAIResponseOutputMessage | OpenAIResponseOutputMessageWebSearchToolCall,
|
OpenAIResponseMessage | OpenAIResponseOutputMessageWebSearchToolCall,
|
||||||
Field(discriminator="type"),
|
Field(discriminator="type"),
|
||||||
]
|
]
|
||||||
register_schema(OpenAIResponseOutput, name="OpenAIResponseOutput")
|
register_schema(OpenAIResponseOutput, name="OpenAIResponseOutput")
|
||||||
|
@ -89,33 +120,15 @@ OpenAIResponseObjectStream = Annotated[
|
||||||
register_schema(OpenAIResponseObjectStream, name="OpenAIResponseObjectStream")
|
register_schema(OpenAIResponseObjectStream, name="OpenAIResponseObjectStream")
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
OpenAIResponseInput = Annotated[
|
||||||
class OpenAIResponseInputMessageContentText(BaseModel):
|
# Responses API allows output messages to be passed in as input
|
||||||
text: str
|
OpenAIResponseOutputMessageWebSearchToolCall
|
||||||
type: Literal["input_text"] = "input_text"
|
|
|
||||||
|
# Fallback to the generic message type as a last resort
|
||||||
|
OpenAIResponseMessage,
|
||||||
@json_schema_type
|
Field(union_mode="left_to_right"),
|
||||||
class OpenAIResponseInputMessageContentImage(BaseModel):
|
|
||||||
detail: Literal["low"] | Literal["high"] | Literal["auto"] = "auto"
|
|
||||||
type: Literal["input_image"] = "input_image"
|
|
||||||
# TODO: handle file_id
|
|
||||||
image_url: str | None = None
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: handle file content types
|
|
||||||
OpenAIResponseInputMessageContent = Annotated[
|
|
||||||
OpenAIResponseInputMessageContentText | OpenAIResponseInputMessageContentImage,
|
|
||||||
Field(discriminator="type"),
|
|
||||||
]
|
]
|
||||||
register_schema(OpenAIResponseInputMessageContent, name="OpenAIResponseInputMessageContent")
|
register_schema(OpenAIResponseInput, name="OpenAIResponseInput")
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
|
||||||
class OpenAIResponseInputMessage(BaseModel):
|
|
||||||
content: str | list[OpenAIResponseInputMessageContent]
|
|
||||||
role: Literal["system"] | Literal["developer"] | Literal["user"] | Literal["assistant"]
|
|
||||||
type: Literal["message"] | None = "message"
|
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
|
@ -133,18 +146,11 @@ OpenAIResponseInputTool = Annotated[
|
||||||
register_schema(OpenAIResponseInputTool, name="OpenAIResponseInputTool")
|
register_schema(OpenAIResponseInputTool, name="OpenAIResponseInputTool")
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
|
||||||
class OpenAIResponseInputItemMessage(OpenAIResponseInputMessage):
|
|
||||||
id: str
|
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
|
||||||
class OpenAIResponseInputItemList(BaseModel):
|
class OpenAIResponseInputItemList(BaseModel):
|
||||||
data: list[OpenAIResponseInputItemMessage]
|
data: list[OpenAIResponseInput]
|
||||||
object: Literal["list"] = "list"
|
object: Literal["list"] = "list"
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
|
||||||
class OpenAIResponsePreviousResponseWithInputItems(BaseModel):
|
class OpenAIResponsePreviousResponseWithInputItems(BaseModel):
|
||||||
input_items: OpenAIResponseInputItemList
|
input_items: OpenAIResponseInputItemList
|
||||||
response: OpenAIResponseObject
|
response: OpenAIResponseObject
|
||||||
|
|
|
@ -22,7 +22,7 @@ from llama_stack.apis.agents import (
|
||||||
Document,
|
Document,
|
||||||
ListAgentSessionsResponse,
|
ListAgentSessionsResponse,
|
||||||
ListAgentsResponse,
|
ListAgentsResponse,
|
||||||
OpenAIResponseInputMessage,
|
OpenAIResponseInput,
|
||||||
OpenAIResponseInputTool,
|
OpenAIResponseInputTool,
|
||||||
OpenAIResponseObject,
|
OpenAIResponseObject,
|
||||||
Session,
|
Session,
|
||||||
|
@ -255,7 +255,7 @@ class MetaReferenceAgentsImpl(Agents):
|
||||||
|
|
||||||
async def create_openai_response(
|
async def create_openai_response(
|
||||||
self,
|
self,
|
||||||
input: str | list[OpenAIResponseInputMessage],
|
input: str | list[OpenAIResponseInput],
|
||||||
model: str,
|
model: str,
|
||||||
previous_response_id: str | None = None,
|
previous_response_id: str | None = None,
|
||||||
store: bool | None = True,
|
store: bool | None = True,
|
||||||
|
|
|
@ -12,19 +12,18 @@ from typing import cast
|
||||||
from openai.types.chat import ChatCompletionToolParam
|
from openai.types.chat import ChatCompletionToolParam
|
||||||
|
|
||||||
from llama_stack.apis.agents.openai_responses import (
|
from llama_stack.apis.agents.openai_responses import (
|
||||||
|
OpenAIResponseInput,
|
||||||
OpenAIResponseInputItemList,
|
OpenAIResponseInputItemList,
|
||||||
OpenAIResponseInputItemMessage,
|
|
||||||
OpenAIResponseInputMessage,
|
|
||||||
OpenAIResponseInputMessageContent,
|
OpenAIResponseInputMessageContent,
|
||||||
OpenAIResponseInputMessageContentImage,
|
OpenAIResponseInputMessageContentImage,
|
||||||
OpenAIResponseInputMessageContentText,
|
OpenAIResponseInputMessageContentText,
|
||||||
OpenAIResponseInputTool,
|
OpenAIResponseInputTool,
|
||||||
|
OpenAIResponseMessage,
|
||||||
OpenAIResponseObject,
|
OpenAIResponseObject,
|
||||||
OpenAIResponseObjectStream,
|
OpenAIResponseObjectStream,
|
||||||
OpenAIResponseObjectStreamResponseCompleted,
|
OpenAIResponseObjectStreamResponseCompleted,
|
||||||
OpenAIResponseObjectStreamResponseCreated,
|
OpenAIResponseObjectStreamResponseCreated,
|
||||||
OpenAIResponseOutput,
|
OpenAIResponseOutput,
|
||||||
OpenAIResponseOutputMessage,
|
|
||||||
OpenAIResponseOutputMessageContentOutputText,
|
OpenAIResponseOutputMessageContentOutputText,
|
||||||
OpenAIResponseOutputMessageWebSearchToolCall,
|
OpenAIResponseOutputMessageWebSearchToolCall,
|
||||||
OpenAIResponsePreviousResponseWithInputItems,
|
OpenAIResponsePreviousResponseWithInputItems,
|
||||||
|
@ -72,7 +71,7 @@ async def _convert_response_input_content_to_chat_content_parts(
|
||||||
|
|
||||||
|
|
||||||
async def _convert_response_input_to_chat_user_content(
|
async def _convert_response_input_to_chat_user_content(
|
||||||
input: str | list[OpenAIResponseInputMessage],
|
input: str | list[OpenAIResponseInput],
|
||||||
) -> str | list[OpenAIChatCompletionContentPartParam]:
|
) -> str | list[OpenAIChatCompletionContentPartParam]:
|
||||||
user_content: str | list[OpenAIChatCompletionContentPartParam] = ""
|
user_content: str | list[OpenAIChatCompletionContentPartParam] = ""
|
||||||
if isinstance(input, list):
|
if isinstance(input, list):
|
||||||
|
@ -87,29 +86,7 @@ async def _convert_response_input_to_chat_user_content(
|
||||||
return user_content
|
return user_content
|
||||||
|
|
||||||
|
|
||||||
async def _previous_response_to_messages(
|
async def _openai_choices_to_output_messages(choices: list[OpenAIChoice]) -> list[OpenAIResponseMessage]:
|
||||||
previous_response: OpenAIResponsePreviousResponseWithInputItems,
|
|
||||||
) -> list[OpenAIMessageParam]:
|
|
||||||
messages: list[OpenAIMessageParam] = []
|
|
||||||
for previous_message in previous_response.input_items.data:
|
|
||||||
previous_content = await _convert_response_input_content_to_chat_content_parts(previous_message.content)
|
|
||||||
if previous_message.role == "user":
|
|
||||||
converted_message = OpenAIUserMessageParam(content=previous_content)
|
|
||||||
elif previous_message.role == "assistant":
|
|
||||||
converted_message = OpenAIAssistantMessageParam(content=previous_content)
|
|
||||||
else:
|
|
||||||
# TODO: handle other message roles? unclear if system/developer roles are
|
|
||||||
# used in previous responses
|
|
||||||
continue
|
|
||||||
messages.append(converted_message)
|
|
||||||
|
|
||||||
for output_message in previous_response.response.output:
|
|
||||||
if isinstance(output_message, OpenAIResponseOutputMessage):
|
|
||||||
messages.append(OpenAIAssistantMessageParam(content=output_message.content[0].text))
|
|
||||||
return messages
|
|
||||||
|
|
||||||
|
|
||||||
async def _openai_choices_to_output_messages(choices: list[OpenAIChoice]) -> list[OpenAIResponseOutputMessage]:
|
|
||||||
output_messages = []
|
output_messages = []
|
||||||
for choice in choices:
|
for choice in choices:
|
||||||
output_content = ""
|
output_content = ""
|
||||||
|
@ -119,10 +96,11 @@ async def _openai_choices_to_output_messages(choices: list[OpenAIChoice]) -> lis
|
||||||
output_content = choice.message.content.text
|
output_content = choice.message.content.text
|
||||||
# TODO: handle image content
|
# TODO: handle image content
|
||||||
output_messages.append(
|
output_messages.append(
|
||||||
OpenAIResponseOutputMessage(
|
OpenAIResponseMessage(
|
||||||
id=f"msg_{uuid.uuid4()}",
|
id=f"msg_{uuid.uuid4()}",
|
||||||
content=[OpenAIResponseOutputMessageContentOutputText(text=output_content)],
|
content=[OpenAIResponseOutputMessageContentOutputText(text=output_content)],
|
||||||
status="completed",
|
status="completed",
|
||||||
|
role="assistant",
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
return output_messages
|
return output_messages
|
||||||
|
@ -148,6 +126,27 @@ class OpenAIResponsesImpl:
|
||||||
raise ValueError(f"OpenAI response with id '{id}' not found")
|
raise ValueError(f"OpenAI response with id '{id}' not found")
|
||||||
return OpenAIResponsePreviousResponseWithInputItems.model_validate_json(response_json)
|
return OpenAIResponsePreviousResponseWithInputItems.model_validate_json(response_json)
|
||||||
|
|
||||||
|
async def _prepend_previous_response(
|
||||||
|
self, input: str | list[OpenAIResponseInput], previous_response_id: str | None = None
|
||||||
|
):
|
||||||
|
if previous_response_id:
|
||||||
|
previous_response_with_input = await self._get_previous_response_with_input(previous_response_id)
|
||||||
|
|
||||||
|
# previous response input items
|
||||||
|
new_input_items = previous_response_with_input.input_items.data
|
||||||
|
|
||||||
|
# previous response output items
|
||||||
|
new_input_items.extend(previous_response_with_input.response.output)
|
||||||
|
|
||||||
|
# new input items from the current request
|
||||||
|
if isinstance(input, str):
|
||||||
|
# Normalize input to a list of OpenAIResponseInputMessage objects
|
||||||
|
input = [OpenAIResponseMessage(content=input, role="user")]
|
||||||
|
new_input_items.extend(input)
|
||||||
|
input = new_input_items
|
||||||
|
|
||||||
|
return input
|
||||||
|
|
||||||
async def get_openai_response(
|
async def get_openai_response(
|
||||||
self,
|
self,
|
||||||
id: str,
|
id: str,
|
||||||
|
@ -157,7 +156,7 @@ class OpenAIResponsesImpl:
|
||||||
|
|
||||||
async def create_openai_response(
|
async def create_openai_response(
|
||||||
self,
|
self,
|
||||||
input: str | list[OpenAIResponseInputMessage],
|
input: str | list[OpenAIResponseInput],
|
||||||
model: str,
|
model: str,
|
||||||
previous_response_id: str | None = None,
|
previous_response_id: str | None = None,
|
||||||
store: bool | None = True,
|
store: bool | None = True,
|
||||||
|
@ -167,10 +166,9 @@ class OpenAIResponsesImpl:
|
||||||
):
|
):
|
||||||
stream = False if stream is None else stream
|
stream = False if stream is None else stream
|
||||||
|
|
||||||
|
input = await self._prepend_previous_response(input, previous_response_id)
|
||||||
|
|
||||||
messages: list[OpenAIMessageParam] = []
|
messages: list[OpenAIMessageParam] = []
|
||||||
if previous_response_id:
|
|
||||||
previous_response_with_input = await self._get_previous_response_with_input(previous_response_id)
|
|
||||||
messages.extend(await _previous_response_to_messages(previous_response_with_input))
|
|
||||||
|
|
||||||
user_content = await _convert_response_input_to_chat_user_content(input)
|
user_content = await _convert_response_input_to_chat_user_content(input)
|
||||||
messages.append(OpenAIUserMessageParam(content=user_content))
|
messages.append(OpenAIUserMessageParam(content=user_content))
|
||||||
|
@ -237,22 +235,29 @@ class OpenAIResponsesImpl:
|
||||||
if store:
|
if store:
|
||||||
# Store in kvstore
|
# Store in kvstore
|
||||||
|
|
||||||
|
new_input_id = f"msg_{uuid.uuid4()}"
|
||||||
if isinstance(input, str):
|
if isinstance(input, str):
|
||||||
# synthesize a message from the input string
|
# synthesize a message from the input string
|
||||||
input_content = OpenAIResponseInputMessageContentText(text=input)
|
input_content = OpenAIResponseInputMessageContentText(text=input)
|
||||||
input_content_item = OpenAIResponseInputItemMessage(
|
input_content_item = OpenAIResponseMessage(
|
||||||
role="user",
|
role="user",
|
||||||
content=[input_content],
|
content=[input_content],
|
||||||
id=f"msg_{uuid.uuid4()}",
|
id=new_input_id,
|
||||||
)
|
)
|
||||||
input_items_data = [input_content_item]
|
input_items_data = [input_content_item]
|
||||||
else:
|
else:
|
||||||
# we already have a list of messages
|
# we already have a list of messages
|
||||||
input_items_data = []
|
input_items_data = []
|
||||||
for input_item in input:
|
for input_item in input:
|
||||||
input_items_data.append(
|
if isinstance(input_item, OpenAIResponseMessage):
|
||||||
OpenAIResponseInputItemMessage(id=f"msg_{uuid.uuid4()}", **input_item.model_dump())
|
# These may or may not already have an id, so dump to dict, check for id, and add if missing
|
||||||
)
|
input_item_dict = input_item.model_dump()
|
||||||
|
if "id" not in input_item_dict:
|
||||||
|
input_item_dict["id"] = new_input_id
|
||||||
|
input_items_data.append(OpenAIResponseMessage(**input_item_dict))
|
||||||
|
else:
|
||||||
|
input_items_data.append(input_item)
|
||||||
|
|
||||||
input_items = OpenAIResponseInputItemList(data=input_items_data)
|
input_items = OpenAIResponseInputItemList(data=input_items_data)
|
||||||
prev_response = OpenAIResponsePreviousResponseWithInputItems(
|
prev_response = OpenAIResponsePreviousResponseWithInputItems(
|
||||||
input_items=input_items,
|
input_items=input_items,
|
||||||
|
|
|
@ -4,13 +4,19 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
from unittest.mock import AsyncMock
|
from unittest.mock import AsyncMock, patch
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from llama_stack.apis.agents.openai_responses import (
|
from llama_stack.apis.agents.openai_responses import (
|
||||||
|
OpenAIResponseInputItemList,
|
||||||
|
OpenAIResponseInputMessageContentText,
|
||||||
OpenAIResponseInputToolWebSearch,
|
OpenAIResponseInputToolWebSearch,
|
||||||
OpenAIResponseOutputMessage,
|
OpenAIResponseMessage,
|
||||||
|
OpenAIResponseObject,
|
||||||
|
OpenAIResponseOutputMessageContentOutputText,
|
||||||
|
OpenAIResponseOutputMessageWebSearchToolCall,
|
||||||
|
OpenAIResponsePreviousResponseWithInputItems,
|
||||||
)
|
)
|
||||||
from llama_stack.apis.inference.inference import (
|
from llama_stack.apis.inference.inference import (
|
||||||
OpenAIAssistantMessageParam,
|
OpenAIAssistantMessageParam,
|
||||||
|
@ -100,7 +106,7 @@ async def test_create_openai_response_with_string_input(openai_responses_impl, m
|
||||||
openai_responses_impl.persistence_store.set.assert_called_once()
|
openai_responses_impl.persistence_store.set.assert_called_once()
|
||||||
assert result.model == model
|
assert result.model == model
|
||||||
assert len(result.output) == 1
|
assert len(result.output) == 1
|
||||||
assert isinstance(result.output[0], OpenAIResponseOutputMessage)
|
assert isinstance(result.output[0], OpenAIResponseMessage)
|
||||||
assert result.output[0].content[0].text == "Hello! How can I help you?"
|
assert result.output[0].content[0].text == "Hello! How can I help you?"
|
||||||
|
|
||||||
|
|
||||||
|
@ -198,5 +204,108 @@ async def test_create_openai_response_with_string_input_with_tools(openai_respon
|
||||||
|
|
||||||
# Check that we got the content from our mocked tool execution result
|
# Check that we got the content from our mocked tool execution result
|
||||||
assert len(result.output) >= 1
|
assert len(result.output) >= 1
|
||||||
assert isinstance(result.output[1], OpenAIResponseOutputMessage)
|
assert isinstance(result.output[1], OpenAIResponseMessage)
|
||||||
assert result.output[1].content[0].text == "The score of todays game was 10-12"
|
assert result.output[1].content[0].text == "The score of todays game was 10-12"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_prepend_previous_response_none(openai_responses_impl):
|
||||||
|
"""Test prepending no previous response to a new response."""
|
||||||
|
|
||||||
|
input = await openai_responses_impl._prepend_previous_response("fake_input", None)
|
||||||
|
assert input == "fake_input"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@patch.object(OpenAIResponsesImpl, "_get_previous_response_with_input")
|
||||||
|
async def test_prepend_previous_response_basic(get_previous_response_with_input, openai_responses_impl):
|
||||||
|
"""Test prepending a basic previous response to a new response."""
|
||||||
|
|
||||||
|
input_item_message = OpenAIResponseMessage(
|
||||||
|
id="123",
|
||||||
|
content=[OpenAIResponseInputMessageContentText(text="fake_previous_input")],
|
||||||
|
role="user",
|
||||||
|
)
|
||||||
|
input_items = OpenAIResponseInputItemList(data=[input_item_message])
|
||||||
|
response_output_message = OpenAIResponseMessage(
|
||||||
|
id="123",
|
||||||
|
content=[OpenAIResponseOutputMessageContentOutputText(text="fake_response")],
|
||||||
|
status="completed",
|
||||||
|
role="assistant",
|
||||||
|
)
|
||||||
|
response = OpenAIResponseObject(
|
||||||
|
created_at=1,
|
||||||
|
id="resp_123",
|
||||||
|
model="fake_model",
|
||||||
|
output=[response_output_message],
|
||||||
|
status="completed",
|
||||||
|
)
|
||||||
|
previous_response = OpenAIResponsePreviousResponseWithInputItems(
|
||||||
|
input_items=input_items,
|
||||||
|
response=response,
|
||||||
|
)
|
||||||
|
get_previous_response_with_input.return_value = previous_response
|
||||||
|
|
||||||
|
input = await openai_responses_impl._prepend_previous_response("fake_input", "resp_123")
|
||||||
|
|
||||||
|
assert len(input) == 3
|
||||||
|
# Check for previous input
|
||||||
|
assert isinstance(input[0], OpenAIResponseMessage)
|
||||||
|
assert input[0].content[0].text == "fake_previous_input"
|
||||||
|
# Check for previous output
|
||||||
|
assert isinstance(input[1], OpenAIResponseMessage)
|
||||||
|
assert input[1].content[0].text == "fake_response"
|
||||||
|
# Check for new input
|
||||||
|
assert isinstance(input[2], OpenAIResponseMessage)
|
||||||
|
assert input[2].content == "fake_input"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@patch.object(OpenAIResponsesImpl, "_get_previous_response_with_input")
|
||||||
|
async def test_prepend_previous_response_web_search(get_previous_response_with_input, openai_responses_impl):
|
||||||
|
"""Test prepending a web search previous response to a new response."""
|
||||||
|
|
||||||
|
input_item_message = OpenAIResponseMessage(
|
||||||
|
id="123",
|
||||||
|
content=[OpenAIResponseInputMessageContentText(text="fake_previous_input")],
|
||||||
|
role="user",
|
||||||
|
)
|
||||||
|
input_items = OpenAIResponseInputItemList(data=[input_item_message])
|
||||||
|
output_web_search = OpenAIResponseOutputMessageWebSearchToolCall(
|
||||||
|
id="ws_123",
|
||||||
|
status="completed",
|
||||||
|
)
|
||||||
|
output_message = OpenAIResponseMessage(
|
||||||
|
id="123",
|
||||||
|
content=[OpenAIResponseOutputMessageContentOutputText(text="fake_web_search_response")],
|
||||||
|
status="completed",
|
||||||
|
role="assistant",
|
||||||
|
)
|
||||||
|
response = OpenAIResponseObject(
|
||||||
|
created_at=1,
|
||||||
|
id="resp_123",
|
||||||
|
model="fake_model",
|
||||||
|
output=[output_web_search, output_message],
|
||||||
|
status="completed",
|
||||||
|
)
|
||||||
|
previous_response = OpenAIResponsePreviousResponseWithInputItems(
|
||||||
|
input_items=input_items,
|
||||||
|
response=response,
|
||||||
|
)
|
||||||
|
get_previous_response_with_input.return_value = previous_response
|
||||||
|
|
||||||
|
input_messages = [OpenAIResponseMessage(content="fake_input", role="user")]
|
||||||
|
input = await openai_responses_impl._prepend_previous_response(input_messages, "resp_123")
|
||||||
|
|
||||||
|
assert len(input) == 4
|
||||||
|
# Check for previous input
|
||||||
|
assert isinstance(input[0], OpenAIResponseMessage)
|
||||||
|
assert input[0].content[0].text == "fake_previous_input"
|
||||||
|
# Check for previous output web search tool call
|
||||||
|
assert isinstance(input[1], OpenAIResponseOutputMessageWebSearchToolCall)
|
||||||
|
# Check for previous output web search response
|
||||||
|
assert isinstance(input[2], OpenAIResponseMessage)
|
||||||
|
assert input[2].content[0].text == "fake_web_search_response"
|
||||||
|
# Check for new input
|
||||||
|
assert isinstance(input[3], OpenAIResponseMessage)
|
||||||
|
assert input[3].content == "fake_input"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue