From e7d21e1ee31210639d98f617804b4ea9a7ad48dc Mon Sep 17 00:00:00 2001 From: Francisco Arceo Date: Fri, 10 Oct 2025 14:57:40 -0400 Subject: [PATCH] feat: Add support for Conversations in Responses API (#3743) # What does this PR do? This PR adds support for Conversations in Responses. ## Test Plan Unit tests Integration tests
Manual testing with this script: (click to expand) ```python from openai import OpenAI client = OpenAI() client = OpenAI(base_url="http://localhost:8321/v1/", api_key="none") def test_conversation_create(): print("Testing conversation create...") conversation = client.conversations.create( metadata={"topic": "demo"}, items=[ {"type": "message", "role": "user", "content": "Hello!"} ] ) print(f"Created: {conversation}") return conversation def test_conversation_retrieve(conv_id): print(f"Testing conversation retrieve for {conv_id}...") retrieved = client.conversations.retrieve(conv_id) print(f"Retrieved: {retrieved}") return retrieved def test_conversation_update(conv_id): print(f"Testing conversation update for {conv_id}...") updated = client.conversations.update( conv_id, metadata={"topic": "project-x"} ) print(f"Updated: {updated}") return updated def test_conversation_delete(conv_id): print(f"Testing conversation delete for {conv_id}...") deleted = client.conversations.delete(conv_id) print(f"Deleted: {deleted}") return deleted def test_conversation_items_create(conv_id): print(f"Testing conversation items create for {conv_id}...") items = client.conversations.items.create( conv_id, items=[ { "type": "message", "role": "user", "content": [{"type": "input_text", "text": "Hello!"}] }, { "type": "message", "role": "user", "content": [{"type": "input_text", "text": "How are you?"}] } ] ) print(f"Items created: {items}") return items def test_conversation_items_list(conv_id): print(f"Testing conversation items list for {conv_id}...") items = client.conversations.items.list(conv_id, limit=10) print(f"Items list: {items}") return items def test_conversation_item_retrieve(conv_id, item_id): print(f"Testing conversation item retrieve for {conv_id}/{item_id}...") item = client.conversations.items.retrieve(conversation_id=conv_id, item_id=item_id) print(f"Item retrieved: {item}") return item def test_conversation_item_delete(conv_id, item_id): print(f"Testing conversation item delete for {conv_id}/{item_id}...") deleted = client.conversations.items.delete(conversation_id=conv_id, item_id=item_id) print(f"Item deleted: {deleted}") return deleted def test_conversation_responses_create(): print("\nTesting conversation create for a responses example...") conversation = client.conversations.create() print(f"Created: {conversation}") response = client.responses.create( model="gpt-4.1", input=[{"role": "user", "content": "What are the 5 Ds of dodgeball?"}], conversation=conversation.id, ) print(f"Created response: {response} for conversation {conversation.id}") return response, conversation def test_conversations_responses_create_followup( conversation, content="Repeat what you just said but add 'this is my second time saying this'", ): print(f"Using: {conversation.id}") response = client.responses.create( model="gpt-4.1", input=[{"role": "user", "content": content}], conversation=conversation.id, ) print(f"Created response: {response} for conversation {conversation.id}") conv_items = client.conversations.items.list(conversation.id) print(f"\nRetrieving list of items for conversation {conversation.id}:") print(conv_items.model_dump_json(indent=2)) def test_response_with_fake_conv_id(): fake_conv_id = "conv_zzzzzzzzz5dc81908289d62779d2ac510a2b0b602ef00a44" print(f"Using {fake_conv_id}") try: response = client.responses.create( model="gpt-4.1", input=[{"role": "user", "content": "say hello"}], conversation=fake_conv_id, ) print(f"Created response: {response} for conversation {fake_conv_id}") except Exception as e: print(f"failed to create response for conversation {fake_conv_id} with error {e}") def main(): print("Testing OpenAI Conversations API...") # Create conversation conversation = test_conversation_create() conv_id = conversation.id # Retrieve conversation test_conversation_retrieve(conv_id) # Update conversation test_conversation_update(conv_id) # Create items items = test_conversation_items_create(conv_id) # List items items_list = test_conversation_items_list(conv_id) # Retrieve specific item if items_list.data: item_id = items_list.data[0].id test_conversation_item_retrieve(conv_id, item_id) # Delete item test_conversation_item_delete(conv_id, item_id) # Delete conversation test_conversation_delete(conv_id) response, conversation2 = test_conversation_responses_create() print('\ntesting reseponse retrieval') test_conversation_retrieve(conversation2.id) print('\ntesting responses follow up') test_conversations_responses_create_followup(conversation2) print('\ntesting responses follow up x2!') test_conversations_responses_create_followup( conversation2, content="Repeat what you just said but add 'this is my third time saying this'", ) test_response_with_fake_conv_id() print("All tests completed!") if __name__ == "__main__": main() ```
--------- Signed-off-by: Francisco Javier Arceo Co-authored-by: Ashwin Bharambe --- docs/static/deprecated-llama-stack-spec.html | 4 + docs/static/deprecated-llama-stack-spec.yaml | 6 + docs/static/llama-stack-spec.html | 4 + docs/static/llama-stack-spec.yaml | 6 + docs/static/stainless-llama-stack-spec.html | 4 + docs/static/stainless-llama-stack-spec.yaml | 6 + llama_stack/apis/agents/agents.py | 2 + llama_stack/apis/common/errors.py | 15 + .../core/conversations/conversations.py | 7 +- llama_stack/core/resolver.py | 6 +- llama_stack/core/stack.py | 15 +- llama_stack/distributions/ci-tests/run.yaml | 3 + .../distributions/dell/run-with-safety.yaml | 3 + llama_stack/distributions/dell/run.yaml | 3 + .../meta-reference-gpu/run-with-safety.yaml | 3 + .../distributions/meta-reference-gpu/run.yaml | 3 + .../distributions/nvidia/run-with-safety.yaml | 3 + llama_stack/distributions/nvidia/run.yaml | 3 + .../distributions/open-benchmark/run.yaml | 3 + .../distributions/postgres-demo/run.yaml | 3 + .../distributions/starter-gpu/run.yaml | 3 + llama_stack/distributions/starter/run.yaml | 3 + llama_stack/distributions/template.py | 6 + llama_stack/distributions/watsonx/run.yaml | 3 + llama_stack/log.py | 1 + .../inline/agents/meta_reference/__init__.py | 1 + .../inline/agents/meta_reference/agents.py | 6 + .../responses/openai_responses.py | 131 +- llama_stack/providers/registry/agents.py | 1 + ...c18360a07bb3dda397579e25c27b-fb8ebeef.json | 687 +++++ ...b47d2d30f85486facb326c6566433a1a7937f.json | 638 +++++ ...65fc6e439e8a9d9c6529452347eb946d6e227.json | 323 +++ ...7425b4026d50c61ddf894b8e9dfa73a4e533f.json | 2483 +++++++++++++++++ ...0978c217fd96c5e18980dab9eef9bb4181763.json | 323 +++ ...1fe624fdc346efd40a6b0408be9e159bb741b.json | 485 ++++ ...4f189d73428e24ca646aa56d0030a5269cc52.json | 323 +++ ...26cdda9768f8cac1c13510d9d5a73de1f33c3.json | 223 ++ .../responses/test_conversation_responses.py | 147 + .../agent/test_meta_reference_agent.py | 5 +- .../meta_reference/test_openai_responses.py | 15 +- .../test_openai_responses_conversations.py | 331 +++ 41 files changed, 6221 insertions(+), 19 deletions(-) create mode 100644 tests/integration/common/recordings/models-d98e7566147f9d534bc0461f2efe61e3f525c18360a07bb3dda397579e25c27b-fb8ebeef.json create mode 100644 tests/integration/responses/recordings/2c7ef062cd359c27bf13edcc081b47d2d30f85486facb326c6566433a1a7937f.json create mode 100644 tests/integration/responses/recordings/2e1939c376f00646141ffc9896f65fc6e439e8a9d9c6529452347eb946d6e227.json create mode 100644 tests/integration/responses/recordings/302b9a7e33b5e584ddffd841d097425b4026d50c61ddf894b8e9dfa73a4e533f.json create mode 100644 tests/integration/responses/recordings/8fc2e03221ae4e60eae7084fffd0978c217fd96c5e18980dab9eef9bb4181763.json create mode 100644 tests/integration/responses/recordings/9d4488f127623f1ac2b73fd88ba1fe624fdc346efd40a6b0408be9e159bb741b.json create mode 100644 tests/integration/responses/recordings/a3570859ba5d7ed3933303d95564f189d73428e24ca646aa56d0030a5269cc52.json create mode 100644 tests/integration/responses/recordings/f46d73788d572bd21e0fc6f30ec26cdda9768f8cac1c13510d9d5a73de1f33c3.json create mode 100644 tests/integration/responses/test_conversation_responses.py create mode 100644 tests/unit/providers/agents/meta_reference/test_openai_responses_conversations.py diff --git a/docs/static/deprecated-llama-stack-spec.html b/docs/static/deprecated-llama-stack-spec.html index 2fa339eeb..0ea2e8c43 100644 --- a/docs/static/deprecated-llama-stack-spec.html +++ b/docs/static/deprecated-llama-stack-spec.html @@ -10083,6 +10083,10 @@ "type": "string", "description": "(Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses." }, + "conversation": { + "type": "string", + "description": "(Optional) The ID of a conversation to add the response to. Must begin with 'conv_'. Input and output messages will be automatically added to the conversation." + }, "store": { "type": "boolean" }, diff --git a/docs/static/deprecated-llama-stack-spec.yaml b/docs/static/deprecated-llama-stack-spec.yaml index 98af89fa8..008cd8673 100644 --- a/docs/static/deprecated-llama-stack-spec.yaml +++ b/docs/static/deprecated-llama-stack-spec.yaml @@ -7493,6 +7493,12 @@ components: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses. + conversation: + type: string + description: >- + (Optional) The ID of a conversation to add the response to. Must begin + with 'conv_'. Input and output messages will be automatically added to + the conversation. store: type: boolean stream: diff --git a/docs/static/llama-stack-spec.html b/docs/static/llama-stack-spec.html index 1064c1433..7e534f995 100644 --- a/docs/static/llama-stack-spec.html +++ b/docs/static/llama-stack-spec.html @@ -8178,6 +8178,10 @@ "type": "string", "description": "(Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses." }, + "conversation": { + "type": "string", + "description": "(Optional) The ID of a conversation to add the response to. Must begin with 'conv_'. Input and output messages will be automatically added to the conversation." + }, "store": { "type": "boolean" }, diff --git a/docs/static/llama-stack-spec.yaml b/docs/static/llama-stack-spec.yaml index f36d69e3a..bad40c87d 100644 --- a/docs/static/llama-stack-spec.yaml +++ b/docs/static/llama-stack-spec.yaml @@ -6189,6 +6189,12 @@ components: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses. + conversation: + type: string + description: >- + (Optional) The ID of a conversation to add the response to. Must begin + with 'conv_'. Input and output messages will be automatically added to + the conversation. store: type: boolean stream: diff --git a/docs/static/stainless-llama-stack-spec.html b/docs/static/stainless-llama-stack-spec.html index 25fa2bc03..36c63367c 100644 --- a/docs/static/stainless-llama-stack-spec.html +++ b/docs/static/stainless-llama-stack-spec.html @@ -10187,6 +10187,10 @@ "type": "string", "description": "(Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses." }, + "conversation": { + "type": "string", + "description": "(Optional) The ID of a conversation to add the response to. Must begin with 'conv_'. Input and output messages will be automatically added to the conversation." + }, "store": { "type": "boolean" }, diff --git a/docs/static/stainless-llama-stack-spec.yaml b/docs/static/stainless-llama-stack-spec.yaml index df0112be7..4475cc8f0 100644 --- a/docs/static/stainless-llama-stack-spec.yaml +++ b/docs/static/stainless-llama-stack-spec.yaml @@ -7634,6 +7634,12 @@ components: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses. + conversation: + type: string + description: >- + (Optional) The ID of a conversation to add the response to. Must begin + with 'conv_'. Input and output messages will be automatically added to + the conversation. store: type: boolean stream: diff --git a/llama_stack/apis/agents/agents.py b/llama_stack/apis/agents/agents.py index 5983b5c45..ff4412c12 100644 --- a/llama_stack/apis/agents/agents.py +++ b/llama_stack/apis/agents/agents.py @@ -812,6 +812,7 @@ class Agents(Protocol): model: str, instructions: str | None = None, previous_response_id: str | None = None, + conversation: str | None = None, store: bool | None = True, stream: bool | None = False, temperature: float | None = None, @@ -831,6 +832,7 @@ class Agents(Protocol): :param input: Input message(s) to create the response. :param model: The underlying LLM used for completions. :param previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses. + :param conversation: (Optional) The ID of a conversation to add the response to. Must begin with 'conv_'. Input and output messages will be automatically added to the conversation. :param include: (Optional) Additional fields to include in the response. :param shields: (Optional) List of shields to apply during response generation. Can be shield IDs (strings) or shield specifications. :returns: An OpenAIResponseObject. diff --git a/llama_stack/apis/common/errors.py b/llama_stack/apis/common/errors.py index 4c9c0a818..a421d0c6f 100644 --- a/llama_stack/apis/common/errors.py +++ b/llama_stack/apis/common/errors.py @@ -86,3 +86,18 @@ class TokenValidationError(ValueError): def __init__(self, message: str) -> None: super().__init__(message) + + +class ConversationNotFoundError(ResourceNotFoundError): + """raised when Llama Stack cannot find a referenced conversation""" + + def __init__(self, conversation_id: str) -> None: + super().__init__(conversation_id, "Conversation", "client.conversations.list()") + + +class InvalidConversationIdError(ValueError): + """raised when a conversation ID has an invalid format""" + + def __init__(self, conversation_id: str) -> None: + message = f"Invalid conversation ID '{conversation_id}'. Expected an ID that begins with 'conv_'." + super().__init__(message) diff --git a/llama_stack/core/conversations/conversations.py b/llama_stack/core/conversations/conversations.py index 612b2f68e..04441054d 100644 --- a/llama_stack/core/conversations/conversations.py +++ b/llama_stack/core/conversations/conversations.py @@ -193,12 +193,15 @@ class ConversationServiceImpl(Conversations): await self._get_validated_conversation(conversation_id) created_items = [] - created_at = int(time.time()) + base_time = int(time.time()) - for item in items: + for i, item in enumerate(items): item_dict = item.model_dump() item_id = self._get_or_generate_item_id(item, item_dict) + # make each timestamp unique to maintain order + created_at = base_time + i + item_record = { "id": item_id, "conversation_id": conversation_id, diff --git a/llama_stack/core/resolver.py b/llama_stack/core/resolver.py index 0d6f54f9e..749253865 100644 --- a/llama_stack/core/resolver.py +++ b/llama_stack/core/resolver.py @@ -150,6 +150,7 @@ async def resolve_impls( provider_registry: ProviderRegistry, dist_registry: DistributionRegistry, policy: list[AccessRule], + internal_impls: dict[Api, Any] | None = None, ) -> dict[Api, Any]: """ Resolves provider implementations by: @@ -172,7 +173,7 @@ async def resolve_impls( sorted_providers = sort_providers_by_deps(providers_with_specs, run_config) - return await instantiate_providers(sorted_providers, router_apis, dist_registry, run_config, policy) + return await instantiate_providers(sorted_providers, router_apis, dist_registry, run_config, policy, internal_impls) def specs_for_autorouted_apis(apis_to_serve: list[str] | set[str]) -> dict[str, dict[str, ProviderWithSpec]]: @@ -280,9 +281,10 @@ async def instantiate_providers( dist_registry: DistributionRegistry, run_config: StackRunConfig, policy: list[AccessRule], + internal_impls: dict[Api, Any] | None = None, ) -> dict[Api, Any]: """Instantiates providers asynchronously while managing dependencies.""" - impls: dict[Api, Any] = {} + impls: dict[Api, Any] = internal_impls.copy() if internal_impls else {} inner_impls_by_provider_id: dict[str, dict[str, Any]] = {f"inner-{x.value}": {} for x in router_apis} for api_str, provider in sorted_providers: # Skip providers that are not enabled diff --git a/llama_stack/core/stack.py b/llama_stack/core/stack.py index 49f6b9cc9..2eab9344f 100644 --- a/llama_stack/core/stack.py +++ b/llama_stack/core/stack.py @@ -326,12 +326,17 @@ class Stack: dist_registry, _ = await create_dist_registry(self.run_config.metadata_store, self.run_config.image_name) policy = self.run_config.server.auth.access_policy if self.run_config.server.auth else [] - impls = await resolve_impls( - self.run_config, self.provider_registry or get_provider_registry(self.run_config), dist_registry, policy - ) - # Add internal implementations after all other providers are resolved - add_internal_implementations(impls, self.run_config) + internal_impls = {} + add_internal_implementations(internal_impls, self.run_config) + + impls = await resolve_impls( + self.run_config, + self.provider_registry or get_provider_registry(self.run_config), + dist_registry, + policy, + internal_impls, + ) if Api.prompts in impls: await impls[Api.prompts].initialize() diff --git a/llama_stack/distributions/ci-tests/run.yaml b/llama_stack/distributions/ci-tests/run.yaml index b14477a9a..40f4d8a0a 100644 --- a/llama_stack/distributions/ci-tests/run.yaml +++ b/llama_stack/distributions/ci-tests/run.yaml @@ -224,6 +224,9 @@ metadata_store: inference_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/ci-tests}/inference_store.db +conversations_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/ci-tests}/conversations.db models: [] shields: - shield_id: llama-guard diff --git a/llama_stack/distributions/dell/run-with-safety.yaml b/llama_stack/distributions/dell/run-with-safety.yaml index f52a0e86a..9c140d0a3 100644 --- a/llama_stack/distributions/dell/run-with-safety.yaml +++ b/llama_stack/distributions/dell/run-with-safety.yaml @@ -101,6 +101,9 @@ metadata_store: inference_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/dell}/inference_store.db +conversations_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/dell}/conversations.db models: - metadata: {} model_id: ${env.INFERENCE_MODEL} diff --git a/llama_stack/distributions/dell/run.yaml b/llama_stack/distributions/dell/run.yaml index 322cd51d1..d6f4e967f 100644 --- a/llama_stack/distributions/dell/run.yaml +++ b/llama_stack/distributions/dell/run.yaml @@ -97,6 +97,9 @@ metadata_store: inference_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/dell}/inference_store.db +conversations_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/dell}/conversations.db models: - metadata: {} model_id: ${env.INFERENCE_MODEL} diff --git a/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml b/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml index dfa1754ab..1078192b9 100644 --- a/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml +++ b/llama_stack/distributions/meta-reference-gpu/run-with-safety.yaml @@ -114,6 +114,9 @@ metadata_store: inference_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/meta-reference-gpu}/inference_store.db +conversations_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/meta-reference-gpu}/conversations.db models: - metadata: {} model_id: ${env.INFERENCE_MODEL} diff --git a/llama_stack/distributions/meta-reference-gpu/run.yaml b/llama_stack/distributions/meta-reference-gpu/run.yaml index ab53f3b26..57b00958d 100644 --- a/llama_stack/distributions/meta-reference-gpu/run.yaml +++ b/llama_stack/distributions/meta-reference-gpu/run.yaml @@ -104,6 +104,9 @@ metadata_store: inference_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/meta-reference-gpu}/inference_store.db +conversations_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/meta-reference-gpu}/conversations.db models: - metadata: {} model_id: ${env.INFERENCE_MODEL} diff --git a/llama_stack/distributions/nvidia/run-with-safety.yaml b/llama_stack/distributions/nvidia/run-with-safety.yaml index d383fa078..edd258ee4 100644 --- a/llama_stack/distributions/nvidia/run-with-safety.yaml +++ b/llama_stack/distributions/nvidia/run-with-safety.yaml @@ -103,6 +103,9 @@ metadata_store: inference_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/nvidia}/inference_store.db +conversations_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/nvidia}/conversations.db models: - metadata: {} model_id: ${env.INFERENCE_MODEL} diff --git a/llama_stack/distributions/nvidia/run.yaml b/llama_stack/distributions/nvidia/run.yaml index 40913cf39..daa93093b 100644 --- a/llama_stack/distributions/nvidia/run.yaml +++ b/llama_stack/distributions/nvidia/run.yaml @@ -92,6 +92,9 @@ metadata_store: inference_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/nvidia}/inference_store.db +conversations_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/nvidia}/conversations.db models: [] shields: [] vector_dbs: [] diff --git a/llama_stack/distributions/open-benchmark/run.yaml b/llama_stack/distributions/open-benchmark/run.yaml index 68efa6e89..89442d502 100644 --- a/llama_stack/distributions/open-benchmark/run.yaml +++ b/llama_stack/distributions/open-benchmark/run.yaml @@ -134,6 +134,9 @@ metadata_store: inference_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/open-benchmark}/inference_store.db +conversations_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/open-benchmark}/conversations.db models: - metadata: {} model_id: gpt-4o diff --git a/llama_stack/distributions/postgres-demo/run.yaml b/llama_stack/distributions/postgres-demo/run.yaml index 0cf0e82e6..6af00d2d6 100644 --- a/llama_stack/distributions/postgres-demo/run.yaml +++ b/llama_stack/distributions/postgres-demo/run.yaml @@ -86,6 +86,9 @@ inference_store: db: ${env.POSTGRES_DB:=llamastack} user: ${env.POSTGRES_USER:=llamastack} password: ${env.POSTGRES_PASSWORD:=llamastack} +conversations_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/postgres-demo}/conversations.db models: - metadata: {} model_id: ${env.INFERENCE_MODEL} diff --git a/llama_stack/distributions/starter-gpu/run.yaml b/llama_stack/distributions/starter-gpu/run.yaml index de5fe5681..b28121815 100644 --- a/llama_stack/distributions/starter-gpu/run.yaml +++ b/llama_stack/distributions/starter-gpu/run.yaml @@ -227,6 +227,9 @@ metadata_store: inference_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter-gpu}/inference_store.db +conversations_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter-gpu}/conversations.db models: [] shields: - shield_id: llama-guard diff --git a/llama_stack/distributions/starter/run.yaml b/llama_stack/distributions/starter/run.yaml index c440e4e4b..341b51a97 100644 --- a/llama_stack/distributions/starter/run.yaml +++ b/llama_stack/distributions/starter/run.yaml @@ -224,6 +224,9 @@ metadata_store: inference_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/inference_store.db +conversations_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/conversations.db models: [] shields: - shield_id: llama-guard diff --git a/llama_stack/distributions/template.py b/llama_stack/distributions/template.py index d564312dc..59beb8a8a 100644 --- a/llama_stack/distributions/template.py +++ b/llama_stack/distributions/template.py @@ -181,6 +181,7 @@ class RunConfigSettings(BaseModel): default_benchmarks: list[BenchmarkInput] | None = None metadata_store: dict | None = None inference_store: dict | None = None + conversations_store: dict | None = None def run_config( self, @@ -240,6 +241,11 @@ class RunConfigSettings(BaseModel): __distro_dir__=f"~/.llama/distributions/{name}", db_name="inference_store.db", ), + "conversations_store": self.conversations_store + or SqliteSqlStoreConfig.sample_run_config( + __distro_dir__=f"~/.llama/distributions/{name}", + db_name="conversations.db", + ), "models": [m.model_dump(exclude_none=True) for m in (self.default_models or [])], "shields": [s.model_dump(exclude_none=True) for s in (self.default_shields or [])], "vector_dbs": [], diff --git a/llama_stack/distributions/watsonx/run.yaml b/llama_stack/distributions/watsonx/run.yaml index e0c337f9d..aea2189bc 100644 --- a/llama_stack/distributions/watsonx/run.yaml +++ b/llama_stack/distributions/watsonx/run.yaml @@ -107,6 +107,9 @@ metadata_store: inference_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/watsonx}/inference_store.db +conversations_store: + type: sqlite + db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/watsonx}/conversations.db models: [] shields: [] vector_dbs: [] diff --git a/llama_stack/log.py b/llama_stack/log.py index ce92219f4..ff54b2f7c 100644 --- a/llama_stack/log.py +++ b/llama_stack/log.py @@ -30,6 +30,7 @@ CATEGORIES = [ "tools", "client", "telemetry", + "openai", "openai_responses", "openai_conversations", "testing", diff --git a/llama_stack/providers/inline/agents/meta_reference/__init__.py b/llama_stack/providers/inline/agents/meta_reference/__init__.py index 37b0b50c8..d5cfd2e5b 100644 --- a/llama_stack/providers/inline/agents/meta_reference/__init__.py +++ b/llama_stack/providers/inline/agents/meta_reference/__init__.py @@ -21,6 +21,7 @@ async def get_provider_impl(config: MetaReferenceAgentsImplConfig, deps: dict[Ap deps[Api.safety], deps[Api.tool_runtime], deps[Api.tool_groups], + deps[Api.conversations], policy, Api.telemetry in deps, ) diff --git a/llama_stack/providers/inline/agents/meta_reference/agents.py b/llama_stack/providers/inline/agents/meta_reference/agents.py index cfaf56a34..27d3a94cc 100644 --- a/llama_stack/providers/inline/agents/meta_reference/agents.py +++ b/llama_stack/providers/inline/agents/meta_reference/agents.py @@ -30,6 +30,7 @@ from llama_stack.apis.agents import ( ) from llama_stack.apis.agents.openai_responses import OpenAIResponseText from llama_stack.apis.common.responses import PaginatedResponse +from llama_stack.apis.conversations import Conversations from llama_stack.apis.inference import ( Inference, ToolConfig, @@ -63,6 +64,7 @@ class MetaReferenceAgentsImpl(Agents): safety_api: Safety, tool_runtime_api: ToolRuntime, tool_groups_api: ToolGroups, + conversations_api: Conversations, policy: list[AccessRule], telemetry_enabled: bool = False, ): @@ -72,6 +74,7 @@ class MetaReferenceAgentsImpl(Agents): self.safety_api = safety_api self.tool_runtime_api = tool_runtime_api self.tool_groups_api = tool_groups_api + self.conversations_api = conversations_api self.telemetry_enabled = telemetry_enabled self.in_memory_store = InmemoryKVStoreImpl() @@ -88,6 +91,7 @@ class MetaReferenceAgentsImpl(Agents): tool_runtime_api=self.tool_runtime_api, responses_store=self.responses_store, vector_io_api=self.vector_io_api, + conversations_api=self.conversations_api, ) async def create_agent( @@ -325,6 +329,7 @@ class MetaReferenceAgentsImpl(Agents): model: str, instructions: str | None = None, previous_response_id: str | None = None, + conversation: str | None = None, store: bool | None = True, stream: bool | None = False, temperature: float | None = None, @@ -339,6 +344,7 @@ class MetaReferenceAgentsImpl(Agents): model, instructions, previous_response_id, + conversation, store, stream, temperature, diff --git a/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py b/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py index fabe46f43..e459b0232 100644 --- a/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py +++ b/llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py @@ -24,6 +24,11 @@ from llama_stack.apis.agents.openai_responses import ( OpenAIResponseText, OpenAIResponseTextFormat, ) +from llama_stack.apis.common.errors import ( + InvalidConversationIdError, +) +from llama_stack.apis.conversations import Conversations +from llama_stack.apis.conversations.conversations import ConversationItem from llama_stack.apis.inference import ( Inference, OpenAIMessageParam, @@ -61,12 +66,14 @@ class OpenAIResponsesImpl: tool_runtime_api: ToolRuntime, responses_store: ResponsesStore, vector_io_api: VectorIO, # VectorIO + conversations_api: Conversations, ): self.inference_api = inference_api self.tool_groups_api = tool_groups_api self.tool_runtime_api = tool_runtime_api self.responses_store = responses_store self.vector_io_api = vector_io_api + self.conversations_api = conversations_api self.tool_executor = ToolExecutor( tool_groups_api=tool_groups_api, tool_runtime_api=tool_runtime_api, @@ -205,6 +212,7 @@ class OpenAIResponsesImpl: model: str, instructions: str | None = None, previous_response_id: str | None = None, + conversation: str | None = None, store: bool | None = True, stream: bool | None = False, temperature: float | None = None, @@ -221,11 +229,27 @@ class OpenAIResponsesImpl: if shields is not None: raise NotImplementedError("Shields parameter is not yet implemented in the meta-reference provider") + if conversation is not None and previous_response_id is not None: + raise ValueError( + "Mutually exclusive parameters: 'previous_response_id' and 'conversation'. Ensure you are only providing one of these parameters." + ) + + original_input = input # needed for syncing to Conversations + if conversation is not None: + if not conversation.startswith("conv_"): + raise InvalidConversationIdError(conversation) + + # Check conversation exists (raises ConversationNotFoundError if not) + _ = await self.conversations_api.get_conversation(conversation) + input = await self._load_conversation_context(conversation, input) + stream_gen = self._create_streaming_response( input=input, + original_input=original_input, model=model, instructions=instructions, previous_response_id=previous_response_id, + conversation=conversation, store=store, temperature=temperature, text=text, @@ -268,8 +292,10 @@ class OpenAIResponsesImpl: self, input: str | list[OpenAIResponseInput], model: str, + original_input: str | list[OpenAIResponseInput] | None = None, instructions: str | None = None, previous_response_id: str | None = None, + conversation: str | None = None, store: bool | None = True, temperature: float | None = None, text: OpenAIResponseText | None = None, @@ -296,7 +322,7 @@ class OpenAIResponsesImpl: ) # Create orchestrator and delegate streaming logic - response_id = f"resp-{uuid.uuid4()}" + response_id = f"resp_{uuid.uuid4()}" created_at = int(time.time()) orchestrator = StreamingResponseOrchestrator( @@ -319,13 +345,102 @@ class OpenAIResponsesImpl: failed_response = stream_chunk.response yield stream_chunk - # Store the response if requested - if store and final_response and failed_response is None: - await self._store_response( - response=final_response, - input=all_input, - messages=orchestrator.final_messages, - ) + # Store and sync immediately after yielding terminal events + # This ensures the storage/syncing happens even if the consumer breaks early + if ( + stream_chunk.type in {"response.completed", "response.incomplete"} + and store + and final_response + and failed_response is None + ): + await self._store_response( + response=final_response, + input=all_input, + messages=orchestrator.final_messages, + ) + + if stream_chunk.type in {"response.completed", "response.incomplete"} and conversation and final_response: + # for Conversations, we need to use the original_input if it's available, otherwise use input + sync_input = original_input if original_input is not None else input + await self._sync_response_to_conversation(conversation, sync_input, final_response) async def delete_openai_response(self, response_id: str) -> OpenAIDeleteResponseObject: return await self.responses_store.delete_response_object(response_id) + + async def _load_conversation_context( + self, conversation_id: str, content: str | list[OpenAIResponseInput] + ) -> list[OpenAIResponseInput]: + """Load conversation history and merge with provided content.""" + conversation_items = await self.conversations_api.list(conversation_id, order="asc") + + context_messages = [] + for item in conversation_items.data: + if isinstance(item, OpenAIResponseMessage): + if item.role == "user": + context_messages.append( + OpenAIResponseMessage( + role="user", content=item.content, id=item.id if hasattr(item, "id") else None + ) + ) + elif item.role == "assistant": + context_messages.append( + OpenAIResponseMessage( + role="assistant", content=item.content, id=item.id if hasattr(item, "id") else None + ) + ) + + # add new content to context + if isinstance(content, str): + context_messages.append(OpenAIResponseMessage(role="user", content=content)) + elif isinstance(content, list): + context_messages.extend(content) + + return context_messages + + async def _sync_response_to_conversation( + self, conversation_id: str, content: str | list[OpenAIResponseInput], response: OpenAIResponseObject + ) -> None: + """Sync content and response messages to the conversation.""" + conversation_items = [] + + # add user content message(s) + if isinstance(content, str): + conversation_items.append( + {"type": "message", "role": "user", "content": [{"type": "input_text", "text": content}]} + ) + elif isinstance(content, list): + for item in content: + if not isinstance(item, OpenAIResponseMessage): + raise NotImplementedError(f"Unsupported input item type: {type(item)}") + + if item.role == "user": + if isinstance(item.content, str): + conversation_items.append( + { + "type": "message", + "role": "user", + "content": [{"type": "input_text", "text": item.content}], + } + ) + elif isinstance(item.content, list): + conversation_items.append({"type": "message", "role": "user", "content": item.content}) + else: + raise NotImplementedError(f"Unsupported user message content type: {type(item.content)}") + elif item.role == "assistant": + if isinstance(item.content, list): + conversation_items.append({"type": "message", "role": "assistant", "content": item.content}) + else: + raise NotImplementedError(f"Unsupported assistant message content type: {type(item.content)}") + else: + raise NotImplementedError(f"Unsupported message role: {item.role}") + + # add assistant response message + for output_item in response.output: + if isinstance(output_item, OpenAIResponseMessage) and output_item.role == "assistant": + if hasattr(output_item, "content") and isinstance(output_item.content, list): + conversation_items.append({"type": "message", "role": "assistant", "content": output_item.content}) + + if conversation_items: + adapter = TypeAdapter(list[ConversationItem]) + validated_items = adapter.validate_python(conversation_items) + await self.conversations_api.add_items(conversation_id, validated_items) diff --git a/llama_stack/providers/registry/agents.py b/llama_stack/providers/registry/agents.py index bc46b4de2..d7e9bed88 100644 --- a/llama_stack/providers/registry/agents.py +++ b/llama_stack/providers/registry/agents.py @@ -35,6 +35,7 @@ def available_providers() -> list[ProviderSpec]: Api.vector_dbs, Api.tool_runtime, Api.tool_groups, + Api.conversations, ], optional_api_dependencies=[ Api.telemetry, diff --git a/tests/integration/common/recordings/models-d98e7566147f9d534bc0461f2efe61e3f525c18360a07bb3dda397579e25c27b-fb8ebeef.json b/tests/integration/common/recordings/models-d98e7566147f9d534bc0461f2efe61e3f525c18360a07bb3dda397579e25c27b-fb8ebeef.json new file mode 100644 index 000000000..d80893db1 --- /dev/null +++ b/tests/integration/common/recordings/models-d98e7566147f9d534bc0461f2efe61e3f525c18360a07bb3dda397579e25c27b-fb8ebeef.json @@ -0,0 +1,687 @@ +{ + "test_id": null, + "request": { + "method": "POST", + "url": "https://generativelanguage.googleapis.com/v1beta/openai/v1/models", + "headers": {}, + "body": {}, + "endpoint": "/v1/models", + "model": "" + }, + "response": { + "body": [ + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/embedding-gecko-001", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Embedding Gecko" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-pro-preview-03-25", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Pro Preview 03-25" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-flash-preview-05-20", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Flash Preview 05-20" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-flash", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Flash" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-flash-lite-preview-06-17", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Flash-Lite Preview 06-17" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-pro-preview-05-06", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Pro Preview 05-06" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-pro-preview-06-05", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Pro Preview" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-pro", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Pro" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.0-flash-exp", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.0 Flash Experimental" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.0-flash", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.0 Flash" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.0-flash-001", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.0 Flash 001" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.0-flash-exp-image-generation", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.0 Flash (Image Generation) Experimental" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.0-flash-lite-001", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.0 Flash-Lite 001" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.0-flash-lite", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.0 Flash-Lite" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.0-flash-preview-image-generation", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.0 Flash Preview Image Generation" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.0-flash-lite-preview-02-05", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.0 Flash-Lite Preview 02-05" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.0-flash-lite-preview", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.0 Flash-Lite Preview" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.0-pro-exp", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.0 Pro Experimental" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.0-pro-exp-02-05", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.0 Pro Experimental 02-05" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-exp-1206", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini Experimental 1206" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.0-flash-thinking-exp-01-21", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Flash Preview 05-20" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.0-flash-thinking-exp", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Flash Preview 05-20" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.0-flash-thinking-exp-1219", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Flash Preview 05-20" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-flash-preview-tts", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Flash Preview TTS" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-pro-preview-tts", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Pro Preview TTS" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/learnlm-2.0-flash-experimental", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "LearnLM 2.0 Flash Experimental" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemma-3-1b-it", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemma 3 1B" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemma-3-4b-it", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemma 3 4B" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemma-3-12b-it", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemma 3 12B" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemma-3-27b-it", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemma 3 27B" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemma-3n-e4b-it", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemma 3n E4B" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemma-3n-e2b-it", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemma 3n E2B" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-flash-latest", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini Flash Latest" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-flash-lite-latest", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini Flash-Lite Latest" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-pro-latest", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini Pro Latest" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-flash-lite", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Flash-Lite" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-flash-image-preview", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Nano Banana" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-flash-image", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Nano Banana" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-flash-preview-09-2025", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Flash Preview Sep 2025" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-flash-lite-preview-09-2025", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Flash-Lite Preview Sep 2025" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-robotics-er-1.5-preview", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini Robotics-ER 1.5 Preview" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-computer-use-preview-10-2025", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Computer Use Preview 10-2025" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/embedding-001", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Embedding 001" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/text-embedding-004", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Text Embedding 004" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-embedding-exp-03-07", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini Embedding Experimental 03-07" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-embedding-exp", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini Embedding Experimental" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-embedding-001", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini Embedding 001" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/aqa", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Model that performs Attributed Question Answering." + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/imagen-3.0-generate-002", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Imagen 3.0" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/imagen-4.0-generate-preview-06-06", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Imagen 4 (Preview)" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/imagen-4.0-ultra-generate-preview-06-06", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Imagen 4 Ultra (Preview)" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/imagen-4.0-generate-001", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Imagen 4" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/imagen-4.0-ultra-generate-001", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Imagen 4 Ultra" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/imagen-4.0-fast-generate-001", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Imagen 4 Fast" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/veo-2.0-generate-001", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Veo 2" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/veo-3.0-generate-preview", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Veo 3" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/veo-3.0-fast-generate-preview", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Veo 3 fast" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/veo-3.0-generate-001", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Veo 3" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/veo-3.0-fast-generate-001", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Veo 3 fast" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-flash-preview-native-audio-dialog", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Flash Preview Native Audio Dialog" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-flash-exp-native-audio-thinking-dialog", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Flash Exp Native Audio Thinking Dialog" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.0-flash-live-001", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.0 Flash 001" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-live-2.5-flash-preview", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini Live 2.5 Flash Preview" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-flash-live-preview", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Flash Live Preview" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-flash-native-audio-latest", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Flash Native Audio Latest" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/gemini-2.5-flash-native-audio-preview-09-2025", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Gemini 2.5 Flash Native Audio Preview 09-2025" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "models/lyria-realtime-exp", + "created": null, + "object": "model", + "owned_by": "google", + "display_name": "Lyria Realtime Experimental" + } + } + ], + "is_streaming": false + }, + "id_normalization_mapping": {} +} diff --git a/tests/integration/responses/recordings/2c7ef062cd359c27bf13edcc081b47d2d30f85486facb326c6566433a1a7937f.json b/tests/integration/responses/recordings/2c7ef062cd359c27bf13edcc081b47d2d30f85486facb326c6566433a1a7937f.json new file mode 100644 index 000000000..28a85be54 --- /dev/null +++ b/tests/integration/responses/recordings/2c7ef062cd359c27bf13edcc081b47d2d30f85486facb326c6566433a1a7937f.json @@ -0,0 +1,638 @@ +{ + "test_id": "tests/integration/responses/test_conversation_responses.py::TestConversationResponses::test_conversation_multi_turn_and_streaming[txt=openai/gpt-4o]", + "request": { + "method": "POST", + "url": "https://api.openai.com/v1/v1/chat/completions", + "headers": {}, + "body": { + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "Say hello" + } + ] + }, + { + "role": "assistant", + "content": [ + { + "type": "text", + "text": "Hello! How can I assist you today?" + } + ] + }, + { + "role": "user", + "content": "Say goodbye" + } + ], + "stream": true + }, + "endpoint": "/v1/chat/completions", + "model": "gpt-4o" + }, + "response": { + "body": [ + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": "", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "E61N80qmAKV" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": "Good", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "RbYn78k9X" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": "bye", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "IBw4vIzBvt" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": "!", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "FXOCYsrHXSx9" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": " If", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "ySP64LCuV9" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": " you", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "NE3YC0ouv" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": " need", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "iSJ8YbAe" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": " anything", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "MPAP" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": " else", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "PChbHuZA" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": ",", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "WyyepKOKuTvf" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": " feel", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "s8nIpvhv" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": " free", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "tFwuLbr5" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "678PrJyxxu" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": " reach", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "RBmSBjT" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": " out", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "mjIJATfrE" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "GvKJyIHypg7s" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": " Have", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "CcDfM81t" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": " a", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "sGUAPFngoWf" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": " great", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "w1jhhm6" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": " day", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "4cMsz3kFC" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": "!", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "VismXuEf9n66" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2c7ef062cd35", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": "stop", + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "hbL6bnt" + } + } + ], + "is_streaming": true + }, + "id_normalization_mapping": {} +} diff --git a/tests/integration/responses/recordings/2e1939c376f00646141ffc9896f65fc6e439e8a9d9c6529452347eb946d6e227.json b/tests/integration/responses/recordings/2e1939c376f00646141ffc9896f65fc6e439e8a9d9c6529452347eb946d6e227.json new file mode 100644 index 000000000..859d8ae39 --- /dev/null +++ b/tests/integration/responses/recordings/2e1939c376f00646141ffc9896f65fc6e439e8a9d9c6529452347eb946d6e227.json @@ -0,0 +1,323 @@ +{ + "test_id": "tests/integration/responses/test_conversation_responses.py::TestConversationResponses::test_conversation_multi_turn_and_streaming[txt=openai/gpt-4o]", + "request": { + "method": "POST", + "url": "https://api.openai.com/v1/v1/chat/completions", + "headers": {}, + "body": { + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "Say hello" + } + ], + "stream": true + }, + "endpoint": "/v1/chat/completions", + "model": "gpt-4o" + }, + "response": { + "body": [ + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2e1939c376f0", + "choices": [ + { + "delta": { + "content": "", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_eb3c3cb84d", + "usage": null, + "obfuscation": "Wqzxk4QqgFH" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2e1939c376f0", + "choices": [ + { + "delta": { + "content": "Hello", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_eb3c3cb84d", + "usage": null, + "obfuscation": "IiBkvsZH" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2e1939c376f0", + "choices": [ + { + "delta": { + "content": "!", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_eb3c3cb84d", + "usage": null, + "obfuscation": "HT9J8j2DVPQj" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2e1939c376f0", + "choices": [ + { + "delta": { + "content": " How", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_eb3c3cb84d", + "usage": null, + "obfuscation": "BLAGedfaA" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2e1939c376f0", + "choices": [ + { + "delta": { + "content": " can", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_eb3c3cb84d", + "usage": null, + "obfuscation": "TpMOnSngQ" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2e1939c376f0", + "choices": [ + { + "delta": { + "content": " I", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_eb3c3cb84d", + "usage": null, + "obfuscation": "uooA8IiQn62" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2e1939c376f0", + "choices": [ + { + "delta": { + "content": " assist", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_eb3c3cb84d", + "usage": null, + "obfuscation": "vCfE8F" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2e1939c376f0", + "choices": [ + { + "delta": { + "content": " you", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_eb3c3cb84d", + "usage": null, + "obfuscation": "welDIA5iQ" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2e1939c376f0", + "choices": [ + { + "delta": { + "content": " today", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_eb3c3cb84d", + "usage": null, + "obfuscation": "GvffCjr" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2e1939c376f0", + "choices": [ + { + "delta": { + "content": "?", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_eb3c3cb84d", + "usage": null, + "obfuscation": "aPLvF5oge2Bo" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-2e1939c376f0", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": "stop", + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_eb3c3cb84d", + "usage": null, + "obfuscation": "mIwg4Kr" + } + } + ], + "is_streaming": true + }, + "id_normalization_mapping": {} +} diff --git a/tests/integration/responses/recordings/302b9a7e33b5e584ddffd841d097425b4026d50c61ddf894b8e9dfa73a4e533f.json b/tests/integration/responses/recordings/302b9a7e33b5e584ddffd841d097425b4026d50c61ddf894b8e9dfa73a4e533f.json new file mode 100644 index 000000000..eaa6ec4f0 --- /dev/null +++ b/tests/integration/responses/recordings/302b9a7e33b5e584ddffd841d097425b4026d50c61ddf894b8e9dfa73a4e533f.json @@ -0,0 +1,2483 @@ +{ + "test_id": "tests/integration/responses/test_conversation_responses.py::TestConversationResponses::test_conversation_basic_workflow[txt=openai/gpt-4o]", + "request": { + "method": "POST", + "url": "https://api.openai.com/v1/v1/chat/completions", + "headers": {}, + "body": { + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "What are the 5 Ds of dodgeball?" + } + ], + "stream": true + }, + "endpoint": "/v1/chat/completions", + "model": "gpt-4o" + }, + "response": { + "body": [ + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "tdsdAbt5jLO" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "The", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "3VExvPK9B8" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " \"", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "jLkgdbHwWx" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "5", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "QZrrBPIlMbtm" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " Ds", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "48iUCcmRJD" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " of", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "Z9XeVeclzU" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " dodge", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "rl7i5od" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "ball", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "l7O6u72Tg" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "\"", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "WD5QlVXjjK8" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " is", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "64SQMhDh5V" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " a", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "FVtAuknLosF" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " humorous", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "CBDG" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " concept", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "OFhGR" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " popular", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "YMlSI" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "ized", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "b9lAZlfNG" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " by", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "J8U3uWIqua" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " the", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "eHOM02ehd" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " character", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "JX5" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " P", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "W3hmgF2n5fE" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "atches", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "yhwvR55" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " O", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "eT3VIBiThsS" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "'H", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "6BOHgJaY8hn" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "ouli", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "jXOy78aqm" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "han", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "Xya8hdIM5j" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " in", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "iA3JsGK5YM" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " the", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "CxZk5gvMT" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " ", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "SV8pjR2KY0i8" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "200", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "McWWAhiOgM" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "4", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "lJ1xIwxvZ34P" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " comedy", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "7QGmWi" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " film", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "dYEjUC2D" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " \"", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "ZwTRVjmV7u" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "D", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "v7QZ5tyMG8IB" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "odge", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "MRn522ziA" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "ball", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "wl2HJqFrE" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": ":", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "nL4YQF9sjZEb" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " A", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "ZgEZ6syY6LN" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " True", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "l0OiJXEy" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " Und", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "bxRZ4WXmS" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "erd", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "ImDwehjPRe" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "og", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "THxADD33ykF" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " Story", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "jpPzlFx" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": ".\"", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "QWIp2nG4nD" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " The", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "z3X999fm6" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " ", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "LIcehppSOeT1" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "5", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "pcb2tkBr4sdF" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " Ds", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "8Ik5SSSP3l" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " are", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "FP5CWLFfB" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": ":\n\n", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "mMjvF0R1" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "1", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "4vhFTRGEXCc4" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "ePfTlLnEDVvA" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " Dodge", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "fH2ppyZ" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "\n", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "nYdTQtMNLm9" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "2", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "VHHpLrH4ljQ1" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "zuhAC2w01g8V" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " Duck", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "3oLLYr1z" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "\n", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "hdxlKF1prfZ" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "3", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "qigpbe2NbUUO" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "YGQghZaMLUrI" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " Dip", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "SASa0b1zN" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "\n", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "0jLsa9YEFCH" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "4", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "2fHM2mvnBonB" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "CmGGuXGvevyN" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " Dive", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "FLRn0aSw" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "\n", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "pCi2O3z98t3" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "5", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "7AKwoXgezTSo" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "WrieMAQl80RR" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " Dodge", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "Mpn0aZB" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "\n\n", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "6v95mb5Ls" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "These", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "J1m7poCa" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " terms", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "eiAf3GB" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " are", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "l4U2rdqfY" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " part", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "tgN4h38U" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " of", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "NBmfzCRtOs" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " a", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "MU7O8lP8ZSG" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " tongue", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "7Uqiwl" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "-in", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "S0IRrB572S" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "-che", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "I57A0AbhE" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "ek", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "k5STCelD8bO" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " guide", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "s3ZDMPE" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "ELupiQG7eW" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " exc", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "4aUFnthtz" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "elling", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "V6EUz1Y" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " in", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "5KQNrR4qVV" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " the", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "q2RMf19RS" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " game", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "XSwgHYdC" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " of", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "s2q7KWZAhA" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": " dodge", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "Tj4tUp8" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": "ball", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "o3o7c6Dts" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "yrQ8IX4WgySc" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-302b9a7e33b5", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": "stop", + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "CwMAT5I" + } + } + ], + "is_streaming": true + }, + "id_normalization_mapping": {} +} diff --git a/tests/integration/responses/recordings/8fc2e03221ae4e60eae7084fffd0978c217fd96c5e18980dab9eef9bb4181763.json b/tests/integration/responses/recordings/8fc2e03221ae4e60eae7084fffd0978c217fd96c5e18980dab9eef9bb4181763.json new file mode 100644 index 000000000..1af5289b4 --- /dev/null +++ b/tests/integration/responses/recordings/8fc2e03221ae4e60eae7084fffd0978c217fd96c5e18980dab9eef9bb4181763.json @@ -0,0 +1,323 @@ +{ + "test_id": "tests/integration/responses/test_conversation_responses.py::TestConversationResponses::test_conversation_backward_compatibility[txt=openai/gpt-4o]", + "request": { + "method": "POST", + "url": "https://api.openai.com/v1/v1/chat/completions", + "headers": {}, + "body": { + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "Hello world" + } + ], + "stream": true + }, + "endpoint": "/v1/chat/completions", + "model": "gpt-4o" + }, + "response": { + "body": [ + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-8fc2e03221ae", + "choices": [ + { + "delta": { + "content": "", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "Fm3rWsqaQaf" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-8fc2e03221ae", + "choices": [ + { + "delta": { + "content": "Hello", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "MWjFCtyG" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-8fc2e03221ae", + "choices": [ + { + "delta": { + "content": "!", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "Jc3BfOu6j22G" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-8fc2e03221ae", + "choices": [ + { + "delta": { + "content": " How", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "rS4ldJA8W" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-8fc2e03221ae", + "choices": [ + { + "delta": { + "content": " can", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "2SvPfbSRC" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-8fc2e03221ae", + "choices": [ + { + "delta": { + "content": " I", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "k8SvK0VN8kL" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-8fc2e03221ae", + "choices": [ + { + "delta": { + "content": " assist", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "b1znWM" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-8fc2e03221ae", + "choices": [ + { + "delta": { + "content": " you", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "unBlSWKS3" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-8fc2e03221ae", + "choices": [ + { + "delta": { + "content": " today", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "5ZRvTGW" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-8fc2e03221ae", + "choices": [ + { + "delta": { + "content": "?", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "xHpFO0IpgfGY" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-8fc2e03221ae", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": "stop", + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "lrtMQji" + } + } + ], + "is_streaming": true + }, + "id_normalization_mapping": {} +} diff --git a/tests/integration/responses/recordings/9d4488f127623f1ac2b73fd88ba1fe624fdc346efd40a6b0408be9e159bb741b.json b/tests/integration/responses/recordings/9d4488f127623f1ac2b73fd88ba1fe624fdc346efd40a6b0408be9e159bb741b.json new file mode 100644 index 000000000..4a80410aa --- /dev/null +++ b/tests/integration/responses/recordings/9d4488f127623f1ac2b73fd88ba1fe624fdc346efd40a6b0408be9e159bb741b.json @@ -0,0 +1,485 @@ +{ + "test_id": "tests/integration/responses/test_conversation_responses.py::TestConversationResponses::test_conversation_compat_client[openai_client-txt=openai/gpt-4o]", + "request": { + "method": "POST", + "url": "https://api.openai.com/v1/v1/chat/completions", + "headers": {}, + "body": { + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "Tell me a joke" + } + ], + "stream": true + }, + "endpoint": "/v1/chat/completions", + "model": "gpt-4o" + }, + "response": { + "body": [ + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-9d4488f12762", + "choices": [ + { + "delta": { + "content": "", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_f64f290af2", + "usage": null, + "obfuscation": "kW0IPPuVClM" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-9d4488f12762", + "choices": [ + { + "delta": { + "content": "Why", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_f64f290af2", + "usage": null, + "obfuscation": "Q244QQs6d9" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-9d4488f12762", + "choices": [ + { + "delta": { + "content": " don't", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_f64f290af2", + "usage": null, + "obfuscation": "yEmxyk0" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-9d4488f12762", + "choices": [ + { + "delta": { + "content": " skeleton", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_f64f290af2", + "usage": null, + "obfuscation": "DJ24" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-9d4488f12762", + "choices": [ + { + "delta": { + "content": "s", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_f64f290af2", + "usage": null, + "obfuscation": "l0tUXCwdKRYn" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-9d4488f12762", + "choices": [ + { + "delta": { + "content": " fight", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_f64f290af2", + "usage": null, + "obfuscation": "zMdufoe" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-9d4488f12762", + "choices": [ + { + "delta": { + "content": " each", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_f64f290af2", + "usage": null, + "obfuscation": "lqeljyuN" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-9d4488f12762", + "choices": [ + { + "delta": { + "content": " other", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_f64f290af2", + "usage": null, + "obfuscation": "uXfgSFR" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-9d4488f12762", + "choices": [ + { + "delta": { + "content": "?", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_f64f290af2", + "usage": null, + "obfuscation": "6WVp7kBgO8SQ" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-9d4488f12762", + "choices": [ + { + "delta": { + "content": " \n\n", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_f64f290af2", + "usage": null, + "obfuscation": "Wm2vzjtV" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-9d4488f12762", + "choices": [ + { + "delta": { + "content": "They", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_f64f290af2", + "usage": null, + "obfuscation": "Els6JQj2K" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-9d4488f12762", + "choices": [ + { + "delta": { + "content": " don't", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_f64f290af2", + "usage": null, + "obfuscation": "FmFagkz" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-9d4488f12762", + "choices": [ + { + "delta": { + "content": " have", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_f64f290af2", + "usage": null, + "obfuscation": "y0ZssBRg" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-9d4488f12762", + "choices": [ + { + "delta": { + "content": " the", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_f64f290af2", + "usage": null, + "obfuscation": "Cz7G6Hd7M" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-9d4488f12762", + "choices": [ + { + "delta": { + "content": " guts", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_f64f290af2", + "usage": null, + "obfuscation": "P6hXo4jF" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-9d4488f12762", + "choices": [ + { + "delta": { + "content": "!", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_f64f290af2", + "usage": null, + "obfuscation": "rMelEefCXLOp" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-9d4488f12762", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": "stop", + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_f64f290af2", + "usage": null, + "obfuscation": "gjonOWA" + } + } + ], + "is_streaming": true + }, + "id_normalization_mapping": {} +} diff --git a/tests/integration/responses/recordings/a3570859ba5d7ed3933303d95564f189d73428e24ca646aa56d0030a5269cc52.json b/tests/integration/responses/recordings/a3570859ba5d7ed3933303d95564f189d73428e24ca646aa56d0030a5269cc52.json new file mode 100644 index 000000000..fb7a57018 --- /dev/null +++ b/tests/integration/responses/recordings/a3570859ba5d7ed3933303d95564f189d73428e24ca646aa56d0030a5269cc52.json @@ -0,0 +1,323 @@ +{ + "test_id": "tests/integration/responses/test_conversation_responses.py::TestConversationResponses::test_conversation_error_handling[txt=openai/gpt-4o]", + "request": { + "method": "POST", + "url": "https://api.openai.com/v1/v1/chat/completions", + "headers": {}, + "body": { + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "First response" + } + ], + "stream": true + }, + "endpoint": "/v1/chat/completions", + "model": "gpt-4o" + }, + "response": { + "body": [ + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-a3570859ba5d", + "choices": [ + { + "delta": { + "content": "", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "R5Xx7TAQA1Z" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-a3570859ba5d", + "choices": [ + { + "delta": { + "content": "Hello", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "FnGcOTHd" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-a3570859ba5d", + "choices": [ + { + "delta": { + "content": "!", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "jxK7b2HxkZk7" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-a3570859ba5d", + "choices": [ + { + "delta": { + "content": " How", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "1Ge9iOM89" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-a3570859ba5d", + "choices": [ + { + "delta": { + "content": " can", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "YnG2hQdK5" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-a3570859ba5d", + "choices": [ + { + "delta": { + "content": " I", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "M6XDB3s0lRi" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-a3570859ba5d", + "choices": [ + { + "delta": { + "content": " assist", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "aNjFaO" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-a3570859ba5d", + "choices": [ + { + "delta": { + "content": " you", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "R9MUT2Wb1" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-a3570859ba5d", + "choices": [ + { + "delta": { + "content": " today", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "0OjhrLF" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-a3570859ba5d", + "choices": [ + { + "delta": { + "content": "?", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "1Fz6wJpIVonI" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-a3570859ba5d", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": "stop", + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "Skl7CuX" + } + } + ], + "is_streaming": true + }, + "id_normalization_mapping": {} +} diff --git a/tests/integration/responses/recordings/f46d73788d572bd21e0fc6f30ec26cdda9768f8cac1c13510d9d5a73de1f33c3.json b/tests/integration/responses/recordings/f46d73788d572bd21e0fc6f30ec26cdda9768f8cac1c13510d9d5a73de1f33c3.json new file mode 100644 index 000000000..ad28e6ff3 --- /dev/null +++ b/tests/integration/responses/recordings/f46d73788d572bd21e0fc6f30ec26cdda9768f8cac1c13510d9d5a73de1f33c3.json @@ -0,0 +1,223 @@ +{ + "test_id": "tests/integration/responses/test_conversation_responses.py::TestConversationResponses::test_conversation_context_loading[txt=openai/gpt-4o]", + "request": { + "method": "POST", + "url": "https://api.openai.com/v1/v1/chat/completions", + "headers": {}, + "body": { + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "My name is Alice" + }, + { + "role": "assistant", + "content": "Hello Alice!" + }, + { + "role": "user", + "content": "What's my name?" + } + ], + "stream": true + }, + "endpoint": "/v1/chat/completions", + "model": "gpt-4o" + }, + "response": { + "body": [ + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-f46d73788d57", + "choices": [ + { + "delta": { + "content": "", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "RHNwtOyHje8" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-f46d73788d57", + "choices": [ + { + "delta": { + "content": "Your", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "bUFt1mv3A" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-f46d73788d57", + "choices": [ + { + "delta": { + "content": " name", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "fMpArO6r" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-f46d73788d57", + "choices": [ + { + "delta": { + "content": " is", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "qX3pXE0jVS" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-f46d73788d57", + "choices": [ + { + "delta": { + "content": " Alice", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "JVtx58U" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-f46d73788d57", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "1dSAPGfGcbSV" + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "rec-f46d73788d57", + "choices": [ + { + "delta": { + "content": null, + "function_call": null, + "refusal": null, + "role": null, + "tool_calls": null + }, + "finish_reason": "stop", + "index": 0, + "logprobs": null + } + ], + "created": 0, + "model": "gpt-4o-2024-08-06", + "object": "chat.completion.chunk", + "service_tier": "default", + "system_fingerprint": "fp_cbf1785567", + "usage": null, + "obfuscation": "61xyZkZ" + } + } + ], + "is_streaming": true + }, + "id_normalization_mapping": {} +} diff --git a/tests/integration/responses/test_conversation_responses.py b/tests/integration/responses/test_conversation_responses.py new file mode 100644 index 000000000..6086ffd9e --- /dev/null +++ b/tests/integration/responses/test_conversation_responses.py @@ -0,0 +1,147 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +import pytest + + +@pytest.mark.integration +class TestConversationResponses: + """Integration tests for the conversation parameter in responses API.""" + + def test_conversation_basic_workflow(self, openai_client, text_model_id): + """Test basic conversation workflow: create conversation, add response, verify sync.""" + conversation = openai_client.conversations.create(metadata={"topic": "test"}) + assert conversation.id.startswith("conv_") + + response = openai_client.responses.create( + model=text_model_id, + input=[{"role": "user", "content": "What are the 5 Ds of dodgeball?"}], + conversation=conversation.id, + ) + + assert response.id.startswith("resp_") + assert len(response.output_text.strip()) > 0 + + # Verify conversation was synced properly + conversation_items = openai_client.conversations.items.list(conversation.id) + assert len(conversation_items.data) >= 2 + + roles = [item.role for item in conversation_items.data if hasattr(item, "role")] + assert "user" in roles and "assistant" in roles + + def test_conversation_multi_turn_and_streaming(self, openai_client, text_model_id): + """Test multi-turn conversations and streaming responses.""" + conversation = openai_client.conversations.create() + + # First turn + response1 = openai_client.responses.create( + model=text_model_id, + input=[{"role": "user", "content": "Say hello"}], + conversation=conversation.id, + ) + + # Second turn with streaming + response_stream = openai_client.responses.create( + model=text_model_id, + input=[{"role": "user", "content": "Say goodbye"}], + conversation=conversation.id, + stream=True, + ) + + final_response = None + for chunk in response_stream: + if chunk.type == "response.completed": + final_response = chunk.response + break + + assert response1.id != final_response.id + assert len(response1.output_text.strip()) > 0 + assert len(final_response.output_text.strip()) > 0 + + # Verify all turns are in conversation + conversation_items = openai_client.conversations.items.list(conversation.id) + print(f"DEBUG: Found {len(conversation_items.data)} messages in conversation:") + for i, item in enumerate(conversation_items.data): + if hasattr(item, "role") and hasattr(item, "content"): + content = item.content[0].text if item.content else "No content" + print(f" {i}: {item.role} - {content}") + assert len(conversation_items.data) >= 4 # 2 user + 2 assistant messages + + def test_conversation_context_loading(self, openai_client, text_model_id): + """Test that conversation context is properly loaded for responses.""" + conversation = openai_client.conversations.create( + items=[ + {"type": "message", "role": "user", "content": "My name is Alice"}, + {"type": "message", "role": "assistant", "content": "Hello Alice!"}, + ] + ) + + response = openai_client.responses.create( + model=text_model_id, + input=[{"role": "user", "content": "What's my name?"}], + conversation=conversation.id, + ) + + assert "alice" in response.output_text.lower() + + def test_conversation_error_handling(self, openai_client, text_model_id): + """Test error handling for invalid and nonexistent conversations.""" + # Invalid conversation ID format + with pytest.raises(Exception) as exc_info: + openai_client.responses.create( + model=text_model_id, + input=[{"role": "user", "content": "Hello"}], + conversation="invalid_id", + ) + assert any(word in str(exc_info.value).lower() for word in ["conv", "invalid", "bad"]) + + # Nonexistent conversation ID + with pytest.raises(Exception) as exc_info: + openai_client.responses.create( + model=text_model_id, + input=[{"role": "user", "content": "Hello"}], + conversation="conv_nonexistent123", + ) + assert any(word in str(exc_info.value).lower() for word in ["not found", "404"]) + + # + # response = openai_client.responses.create( + # model=text_model_id, input=[{"role": "user", "content": "First response"}] + # ) + # with pytest.raises(Exception) as exc_info: + # openai_client.responses.create( + # model=text_model_id, + # input=[{"role": "user", "content": "Hello"}], + # conversation="conv_test123", + # previous_response_id=response.id, + # ) + # assert "mutually exclusive" in str(exc_info.value).lower() + + def test_conversation_backward_compatibility(self, openai_client, text_model_id): + """Test that responses work without conversation parameter (backward compatibility).""" + response = openai_client.responses.create( + model=text_model_id, input=[{"role": "user", "content": "Hello world"}] + ) + + assert response.id.startswith("resp_") + assert len(response.output_text.strip()) > 0 + + # this is not ready yet + # def test_conversation_compat_client(self, compat_client, text_model_id): + # """Test conversation parameter works with compatibility client.""" + # if not hasattr(compat_client, "conversations"): + # pytest.skip("compat_client does not support conversations API") + # + # conversation = compat_client.conversations.create() + # response = compat_client.responses.create( + # model=text_model_id, input="Tell me a joke", conversation=conversation.id + # ) + # + # assert response is not None + # assert len(response.output_text.strip()) > 0 + # + # conversation_items = compat_client.conversations.items.list(conversation.id) + # assert len(conversation_items.data) >= 2 diff --git a/tests/unit/providers/agent/test_meta_reference_agent.py b/tests/unit/providers/agent/test_meta_reference_agent.py index fdbb2b8e9..cfb3e1327 100644 --- a/tests/unit/providers/agent/test_meta_reference_agent.py +++ b/tests/unit/providers/agent/test_meta_reference_agent.py @@ -15,6 +15,7 @@ from llama_stack.apis.agents import ( AgentCreateResponse, ) from llama_stack.apis.common.responses import PaginatedResponse +from llama_stack.apis.conversations import Conversations from llama_stack.apis.inference import Inference from llama_stack.apis.safety import Safety from llama_stack.apis.tools import ListToolDefsResponse, ToolDef, ToolGroups, ToolRuntime @@ -33,6 +34,7 @@ def mock_apis(): "safety_api": AsyncMock(spec=Safety), "tool_runtime_api": AsyncMock(spec=ToolRuntime), "tool_groups_api": AsyncMock(spec=ToolGroups), + "conversations_api": AsyncMock(spec=Conversations), } @@ -59,7 +61,8 @@ async def agents_impl(config, mock_apis): mock_apis["safety_api"], mock_apis["tool_runtime_api"], mock_apis["tool_groups_api"], - {}, + mock_apis["conversations_api"], + [], ) await impl.initialize() yield impl diff --git a/tests/unit/providers/agents/meta_reference/test_openai_responses.py b/tests/unit/providers/agents/meta_reference/test_openai_responses.py index 2ff586a08..2c09ad1d7 100644 --- a/tests/unit/providers/agents/meta_reference/test_openai_responses.py +++ b/tests/unit/providers/agents/meta_reference/test_openai_responses.py @@ -83,9 +83,21 @@ def mock_vector_io_api(): return vector_io_api +@pytest.fixture +def mock_conversations_api(): + """Mock conversations API for testing.""" + mock_api = AsyncMock() + return mock_api + + @pytest.fixture def openai_responses_impl( - mock_inference_api, mock_tool_groups_api, mock_tool_runtime_api, mock_responses_store, mock_vector_io_api + mock_inference_api, + mock_tool_groups_api, + mock_tool_runtime_api, + mock_responses_store, + mock_vector_io_api, + mock_conversations_api, ): return OpenAIResponsesImpl( inference_api=mock_inference_api, @@ -93,6 +105,7 @@ def openai_responses_impl( tool_runtime_api=mock_tool_runtime_api, responses_store=mock_responses_store, vector_io_api=mock_vector_io_api, + conversations_api=mock_conversations_api, ) diff --git a/tests/unit/providers/agents/meta_reference/test_openai_responses_conversations.py b/tests/unit/providers/agents/meta_reference/test_openai_responses_conversations.py new file mode 100644 index 000000000..b5c895e97 --- /dev/null +++ b/tests/unit/providers/agents/meta_reference/test_openai_responses_conversations.py @@ -0,0 +1,331 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + + +import pytest + +from llama_stack.apis.agents.openai_responses import ( + OpenAIResponseMessage, + OpenAIResponseObject, + OpenAIResponseObjectStreamResponseCompleted, + OpenAIResponseOutputMessageContentOutputText, +) +from llama_stack.apis.common.errors import ( + ConversationNotFoundError, + InvalidConversationIdError, +) +from llama_stack.apis.conversations.conversations import ( + ConversationItemList, +) + +# Import existing fixtures from the main responses test file +pytest_plugins = ["tests.unit.providers.agents.meta_reference.test_openai_responses"] + +from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import ( + OpenAIResponsesImpl, +) + + +@pytest.fixture +def responses_impl_with_conversations( + mock_inference_api, + mock_tool_groups_api, + mock_tool_runtime_api, + mock_responses_store, + mock_vector_io_api, + mock_conversations_api, +): + """Create OpenAIResponsesImpl instance with conversations API.""" + return OpenAIResponsesImpl( + inference_api=mock_inference_api, + tool_groups_api=mock_tool_groups_api, + tool_runtime_api=mock_tool_runtime_api, + responses_store=mock_responses_store, + vector_io_api=mock_vector_io_api, + conversations_api=mock_conversations_api, + ) + + +class TestConversationValidation: + """Test conversation ID validation logic.""" + + async def test_nonexistent_conversation_raises_error( + self, responses_impl_with_conversations, mock_conversations_api + ): + """Test that ConversationNotFoundError is raised for non-existent conversation.""" + conv_id = "conv_nonexistent" + + # Mock conversation not found + mock_conversations_api.list.side_effect = ConversationNotFoundError("conv_nonexistent") + + with pytest.raises(ConversationNotFoundError): + await responses_impl_with_conversations.create_openai_response( + input="Hello", model="test-model", conversation=conv_id, stream=False + ) + + +class TestConversationContextLoading: + """Test conversation context loading functionality.""" + + async def test_load_conversation_context_simple_input( + self, responses_impl_with_conversations, mock_conversations_api + ): + """Test loading conversation context with simple string input.""" + conv_id = "conv_test123" + input_text = "Hello, how are you?" + + # mock items in chronological order (a consequence of order="asc") + mock_conversation_items = ConversationItemList( + data=[ + OpenAIResponseMessage( + id="msg_1", + content=[{"type": "input_text", "text": "Previous user message"}], + role="user", + status="completed", + type="message", + ), + OpenAIResponseMessage( + id="msg_2", + content=[{"type": "output_text", "text": "Previous assistant response"}], + role="assistant", + status="completed", + type="message", + ), + ], + first_id="msg_1", + has_more=False, + last_id="msg_2", + object="list", + ) + + mock_conversations_api.list.return_value = mock_conversation_items + + result = await responses_impl_with_conversations._load_conversation_context(conv_id, input_text) + + # should have conversation history + new input + assert len(result) == 3 + assert isinstance(result[0], OpenAIResponseMessage) + assert result[0].role == "user" + assert isinstance(result[1], OpenAIResponseMessage) + assert result[1].role == "assistant" + assert isinstance(result[2], OpenAIResponseMessage) + assert result[2].role == "user" + assert result[2].content == input_text + + async def test_load_conversation_context_api_error(self, responses_impl_with_conversations, mock_conversations_api): + """Test loading conversation context when API call fails.""" + conv_id = "conv_test123" + input_text = "Hello" + + mock_conversations_api.list.side_effect = Exception("API Error") + + with pytest.raises(Exception, match="API Error"): + await responses_impl_with_conversations._load_conversation_context(conv_id, input_text) + + async def test_load_conversation_context_with_list_input( + self, responses_impl_with_conversations, mock_conversations_api + ): + """Test loading conversation context with list input.""" + conv_id = "conv_test123" + input_messages = [ + OpenAIResponseMessage(role="user", content="First message"), + OpenAIResponseMessage(role="user", content="Second message"), + ] + + mock_conversations_api.list.return_value = ConversationItemList( + data=[], first_id=None, has_more=False, last_id=None, object="list" + ) + + result = await responses_impl_with_conversations._load_conversation_context(conv_id, input_messages) + + assert len(result) == 2 + assert result == input_messages + + async def test_load_conversation_context_empty_conversation( + self, responses_impl_with_conversations, mock_conversations_api + ): + """Test loading context from empty conversation.""" + conv_id = "conv_empty" + input_text = "Hello" + + mock_conversations_api.list.return_value = ConversationItemList( + data=[], first_id=None, has_more=False, last_id=None, object="list" + ) + + result = await responses_impl_with_conversations._load_conversation_context(conv_id, input_text) + + assert len(result) == 1 + assert result[0].role == "user" + assert result[0].content == input_text + + +class TestMessageSyncing: + """Test message syncing to conversations.""" + + async def test_sync_response_to_conversation_simple( + self, responses_impl_with_conversations, mock_conversations_api + ): + """Test syncing simple response to conversation.""" + conv_id = "conv_test123" + input_text = "What are the 5 Ds of dodgeball?" + + # mock response + mock_response = OpenAIResponseObject( + id="resp_123", + created_at=1234567890, + model="test-model", + object="response", + output=[ + OpenAIResponseMessage( + id="msg_response", + content=[ + OpenAIResponseOutputMessageContentOutputText( + text="The 5 Ds are: Dodge, Duck, Dip, Dive, and Dodge.", type="output_text", annotations=[] + ) + ], + role="assistant", + status="completed", + type="message", + ) + ], + status="completed", + ) + + await responses_impl_with_conversations._sync_response_to_conversation(conv_id, input_text, mock_response) + + # should call add_items with user input and assistant response + mock_conversations_api.add_items.assert_called_once() + call_args = mock_conversations_api.add_items.call_args + + assert call_args[0][0] == conv_id # conversation_id + items = call_args[0][1] # conversation_items + + assert len(items) == 2 + # User message + assert items[0].type == "message" + assert items[0].role == "user" + assert items[0].content[0].type == "input_text" + assert items[0].content[0].text == input_text + + # Assistant message + assert items[1].type == "message" + assert items[1].role == "assistant" + + async def test_sync_response_to_conversation_api_error( + self, responses_impl_with_conversations, mock_conversations_api + ): + mock_conversations_api.add_items.side_effect = Exception("API Error") + mock_response = OpenAIResponseObject( + id="resp_123", created_at=1234567890, model="test-model", object="response", output=[], status="completed" + ) + + # matching the behavior of OpenAI here + with pytest.raises(Exception, match="API Error"): + await responses_impl_with_conversations._sync_response_to_conversation( + "conv_test123", "Hello", mock_response + ) + + async def test_sync_unsupported_types(self, responses_impl_with_conversations): + mock_response = OpenAIResponseObject( + id="resp_123", created_at=1234567890, model="test-model", object="response", output=[], status="completed" + ) + + with pytest.raises(NotImplementedError, match="Unsupported input item type"): + await responses_impl_with_conversations._sync_response_to_conversation( + "conv_123", [{"not": "message"}], mock_response + ) + + with pytest.raises(NotImplementedError, match="Unsupported message role: system"): + await responses_impl_with_conversations._sync_response_to_conversation( + "conv_123", [OpenAIResponseMessage(role="system", content="test")], mock_response + ) + + +class TestIntegrationWorkflow: + """Integration tests for the full conversation workflow.""" + + async def test_create_response_with_valid_conversation( + self, responses_impl_with_conversations, mock_conversations_api + ): + """Test creating a response with a valid conversation parameter.""" + mock_conversations_api.list.return_value = ConversationItemList( + data=[], first_id=None, has_more=False, last_id=None, object="list" + ) + + async def mock_streaming_response(*args, **kwargs): + mock_response = OpenAIResponseObject( + id="resp_test123", + created_at=1234567890, + model="test-model", + object="response", + output=[ + OpenAIResponseMessage( + id="msg_response", + content=[ + OpenAIResponseOutputMessageContentOutputText( + text="Test response", type="output_text", annotations=[] + ) + ], + role="assistant", + status="completed", + type="message", + ) + ], + status="completed", + ) + + yield OpenAIResponseObjectStreamResponseCompleted(response=mock_response, type="response.completed") + + responses_impl_with_conversations._create_streaming_response = mock_streaming_response + + input_text = "Hello, how are you?" + conversation_id = "conv_test123" + + response = await responses_impl_with_conversations.create_openai_response( + input=input_text, model="test-model", conversation=conversation_id, stream=False + ) + + assert response is not None + assert response.id == "resp_test123" + + mock_conversations_api.list.assert_called_once_with(conversation_id, order="asc") + + # Note: conversation sync happens in the streaming response flow, + # which is complex to mock fully in this unit test + + async def test_create_response_with_invalid_conversation_id(self, responses_impl_with_conversations): + """Test creating a response with an invalid conversation ID.""" + with pytest.raises(InvalidConversationIdError) as exc_info: + await responses_impl_with_conversations.create_openai_response( + input="Hello", model="test-model", conversation="invalid_id", stream=False + ) + + assert "Expected an ID that begins with 'conv_'" in str(exc_info.value) + + async def test_create_response_with_nonexistent_conversation( + self, responses_impl_with_conversations, mock_conversations_api + ): + """Test creating a response with a non-existent conversation.""" + mock_conversations_api.list.side_effect = ConversationNotFoundError("conv_nonexistent") + + with pytest.raises(ConversationNotFoundError) as exc_info: + await responses_impl_with_conversations.create_openai_response( + input="Hello", model="test-model", conversation="conv_nonexistent", stream=False + ) + + assert "not found" in str(exc_info.value) + + async def test_conversation_and_previous_response_id( + self, responses_impl_with_conversations, mock_conversations_api, mock_responses_store + ): + with pytest.raises(ValueError) as exc_info: + await responses_impl_with_conversations.create_openai_response( + input="test", model="test", conversation="conv_123", previous_response_id="resp_123" + ) + + assert "Mutually exclusive parameters" in str(exc_info.value) + assert "previous_response_id" in str(exc_info.value) + assert "conversation" in str(exc_info.value)