feat: Add support for Conversations in Responses API (#3743)

# What does this PR do?
This PR adds support for Conversations in Responses.

<!-- If resolving an issue, uncomment and update the line below -->
<!-- Closes #[issue-number] -->

## Test Plan
Unit tests
Integration tests

<Details>
<Summary>Manual testing with this script: (click to expand)</Summary>

```python
from openai import OpenAI

client = OpenAI()
client = OpenAI(base_url="http://localhost:8321/v1/", api_key="none")

def test_conversation_create():
    print("Testing conversation create...")
    conversation = client.conversations.create(
        metadata={"topic": "demo"},
        items=[
            {"type": "message", "role": "user", "content": "Hello!"}
        ]
    )
    print(f"Created: {conversation}")
    return conversation

def test_conversation_retrieve(conv_id):
    print(f"Testing conversation retrieve for {conv_id}...")
    retrieved = client.conversations.retrieve(conv_id)
    print(f"Retrieved: {retrieved}")
    return retrieved

def test_conversation_update(conv_id):
    print(f"Testing conversation update for {conv_id}...")
    updated = client.conversations.update(
        conv_id,
        metadata={"topic": "project-x"}
    )
    print(f"Updated: {updated}")
    return updated

def test_conversation_delete(conv_id):
    print(f"Testing conversation delete for {conv_id}...")
    deleted = client.conversations.delete(conv_id)
    print(f"Deleted: {deleted}")
    return deleted

def test_conversation_items_create(conv_id):
    print(f"Testing conversation items create for {conv_id}...")
    items = client.conversations.items.create(
        conv_id,
        items=[
            {
                "type": "message",
                "role": "user",
                "content": [{"type": "input_text", "text": "Hello!"}]
            },
            {
                "type": "message",
                "role": "user",
                "content": [{"type": "input_text", "text": "How are you?"}]
            }
        ]
    )
    print(f"Items created: {items}")
    return items

def test_conversation_items_list(conv_id):
    print(f"Testing conversation items list for {conv_id}...")
    items = client.conversations.items.list(conv_id, limit=10)
    print(f"Items list: {items}")
    return items

def test_conversation_item_retrieve(conv_id, item_id):
    print(f"Testing conversation item retrieve for {conv_id}/{item_id}...")
    item = client.conversations.items.retrieve(conversation_id=conv_id, item_id=item_id)
    print(f"Item retrieved: {item}")
    return item

def test_conversation_item_delete(conv_id, item_id):
    print(f"Testing conversation item delete for {conv_id}/{item_id}...")
    deleted = client.conversations.items.delete(conversation_id=conv_id, item_id=item_id)
    print(f"Item deleted: {deleted}")
    return deleted

def test_conversation_responses_create():
    print("\nTesting conversation create for a responses example...")
    conversation = client.conversations.create()
    print(f"Created: {conversation}")

    response = client.responses.create(
      model="gpt-4.1",
      input=[{"role": "user", "content": "What are the 5 Ds of dodgeball?"}],
      conversation=conversation.id,
    )
    print(f"Created response: {response} for conversation {conversation.id}")

    return response, conversation

def test_conversations_responses_create_followup(
        conversation,
        content="Repeat what you just said but add 'this is my second time saying this'",
    ):
    print(f"Using: {conversation.id}")

    response = client.responses.create(
      model="gpt-4.1",
      input=[{"role": "user", "content": content}],
      conversation=conversation.id,
    )
    print(f"Created response: {response} for conversation {conversation.id}")

    conv_items = client.conversations.items.list(conversation.id)
    print(f"\nRetrieving list of items for conversation {conversation.id}:")
    print(conv_items.model_dump_json(indent=2))

def test_response_with_fake_conv_id():
    fake_conv_id = "conv_zzzzzzzzz5dc81908289d62779d2ac510a2b0b602ef00a44"
    print(f"Using {fake_conv_id}")
    try:
        response = client.responses.create(
          model="gpt-4.1",
          input=[{"role": "user", "content": "say hello"}],
          conversation=fake_conv_id,
        )
        print(f"Created response: {response} for conversation {fake_conv_id}")
    except Exception as e:
        print(f"failed to create response for conversation {fake_conv_id} with error {e}")


def main():
    print("Testing OpenAI Conversations API...")

    # Create conversation
    conversation = test_conversation_create()
    conv_id = conversation.id

    # Retrieve conversation
    test_conversation_retrieve(conv_id)

    # Update conversation
    test_conversation_update(conv_id)

    # Create items
    items = test_conversation_items_create(conv_id)

    # List items
    items_list = test_conversation_items_list(conv_id)

    # Retrieve specific item
    if items_list.data:
        item_id = items_list.data[0].id
        test_conversation_item_retrieve(conv_id, item_id)

        # Delete item
        test_conversation_item_delete(conv_id, item_id)

    # Delete conversation
    test_conversation_delete(conv_id)

    response, conversation2 = test_conversation_responses_create()
    print('\ntesting reseponse retrieval')
    test_conversation_retrieve(conversation2.id)

    print('\ntesting responses follow up')
    test_conversations_responses_create_followup(conversation2)

    print('\ntesting responses follow up x2!')

    test_conversations_responses_create_followup(
        conversation2,
        content="Repeat what you just said but add 'this is my third time saying this'",
    )

    test_response_with_fake_conv_id()

    print("All tests completed!")


if __name__ == "__main__":
    main()
```
</Details>

---------

Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
Co-authored-by: Ashwin Bharambe <ashwin.bharambe@gmail.com>
This commit is contained in:
Francisco Arceo 2025-10-10 14:57:40 -04:00 committed by GitHub
parent 932fea813a
commit e7d21e1ee3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
41 changed files with 6221 additions and 19 deletions

View file

@ -0,0 +1,147 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import pytest
@pytest.mark.integration
class TestConversationResponses:
"""Integration tests for the conversation parameter in responses API."""
def test_conversation_basic_workflow(self, openai_client, text_model_id):
"""Test basic conversation workflow: create conversation, add response, verify sync."""
conversation = openai_client.conversations.create(metadata={"topic": "test"})
assert conversation.id.startswith("conv_")
response = openai_client.responses.create(
model=text_model_id,
input=[{"role": "user", "content": "What are the 5 Ds of dodgeball?"}],
conversation=conversation.id,
)
assert response.id.startswith("resp_")
assert len(response.output_text.strip()) > 0
# Verify conversation was synced properly
conversation_items = openai_client.conversations.items.list(conversation.id)
assert len(conversation_items.data) >= 2
roles = [item.role for item in conversation_items.data if hasattr(item, "role")]
assert "user" in roles and "assistant" in roles
def test_conversation_multi_turn_and_streaming(self, openai_client, text_model_id):
"""Test multi-turn conversations and streaming responses."""
conversation = openai_client.conversations.create()
# First turn
response1 = openai_client.responses.create(
model=text_model_id,
input=[{"role": "user", "content": "Say hello"}],
conversation=conversation.id,
)
# Second turn with streaming
response_stream = openai_client.responses.create(
model=text_model_id,
input=[{"role": "user", "content": "Say goodbye"}],
conversation=conversation.id,
stream=True,
)
final_response = None
for chunk in response_stream:
if chunk.type == "response.completed":
final_response = chunk.response
break
assert response1.id != final_response.id
assert len(response1.output_text.strip()) > 0
assert len(final_response.output_text.strip()) > 0
# Verify all turns are in conversation
conversation_items = openai_client.conversations.items.list(conversation.id)
print(f"DEBUG: Found {len(conversation_items.data)} messages in conversation:")
for i, item in enumerate(conversation_items.data):
if hasattr(item, "role") and hasattr(item, "content"):
content = item.content[0].text if item.content else "No content"
print(f" {i}: {item.role} - {content}")
assert len(conversation_items.data) >= 4 # 2 user + 2 assistant messages
def test_conversation_context_loading(self, openai_client, text_model_id):
"""Test that conversation context is properly loaded for responses."""
conversation = openai_client.conversations.create(
items=[
{"type": "message", "role": "user", "content": "My name is Alice"},
{"type": "message", "role": "assistant", "content": "Hello Alice!"},
]
)
response = openai_client.responses.create(
model=text_model_id,
input=[{"role": "user", "content": "What's my name?"}],
conversation=conversation.id,
)
assert "alice" in response.output_text.lower()
def test_conversation_error_handling(self, openai_client, text_model_id):
"""Test error handling for invalid and nonexistent conversations."""
# Invalid conversation ID format
with pytest.raises(Exception) as exc_info:
openai_client.responses.create(
model=text_model_id,
input=[{"role": "user", "content": "Hello"}],
conversation="invalid_id",
)
assert any(word in str(exc_info.value).lower() for word in ["conv", "invalid", "bad"])
# Nonexistent conversation ID
with pytest.raises(Exception) as exc_info:
openai_client.responses.create(
model=text_model_id,
input=[{"role": "user", "content": "Hello"}],
conversation="conv_nonexistent123",
)
assert any(word in str(exc_info.value).lower() for word in ["not found", "404"])
#
# response = openai_client.responses.create(
# model=text_model_id, input=[{"role": "user", "content": "First response"}]
# )
# with pytest.raises(Exception) as exc_info:
# openai_client.responses.create(
# model=text_model_id,
# input=[{"role": "user", "content": "Hello"}],
# conversation="conv_test123",
# previous_response_id=response.id,
# )
# assert "mutually exclusive" in str(exc_info.value).lower()
def test_conversation_backward_compatibility(self, openai_client, text_model_id):
"""Test that responses work without conversation parameter (backward compatibility)."""
response = openai_client.responses.create(
model=text_model_id, input=[{"role": "user", "content": "Hello world"}]
)
assert response.id.startswith("resp_")
assert len(response.output_text.strip()) > 0
# this is not ready yet
# def test_conversation_compat_client(self, compat_client, text_model_id):
# """Test conversation parameter works with compatibility client."""
# if not hasattr(compat_client, "conversations"):
# pytest.skip("compat_client does not support conversations API")
#
# conversation = compat_client.conversations.create()
# response = compat_client.responses.create(
# model=text_model_id, input="Tell me a joke", conversation=conversation.id
# )
#
# assert response is not None
# assert len(response.output_text.strip()) > 0
#
# conversation_items = compat_client.conversations.items.list(conversation.id)
# assert len(conversation_items.data) >= 2