fix(responses): use conversation items when no stored messages exist (#3819)

Handle a base case when no stored messages exist because no Response
call has been made.

## Test Plan

```
./scripts/integration-tests.sh --stack-config server:ci-tests \
   --suite responses   --inference-mode record-if-missing --pattern test_conversation_responses
```
This commit is contained in:
Ashwin Bharambe 2025-10-15 14:43:44 -07:00 committed by GitHub
parent 6ba9db3929
commit 8e7e0ddfec
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 1894 additions and 10 deletions

View file

@ -136,9 +136,21 @@ class OpenAIResponsesImpl:
# First turn - just convert the new input
messages = await convert_response_input_to_chat_messages(input)
else:
# Use stored messages directly and convert only new input
if not stored_messages:
all_input = conversation_items.data
if isinstance(input, str):
all_input.append(
OpenAIResponseMessage(
role="user", content=[OpenAIResponseInputMessageContentText(text=input)]
)
)
else:
all_input.extend(input)
else:
all_input = input
messages = stored_messages or []
new_messages = await convert_response_input_to_chat_messages(input, previous_messages=messages)
new_messages = await convert_response_input_to_chat_messages(all_input, previous_messages=messages)
messages.extend(new_messages)
else:
all_input = input

View file

@ -0,0 +1,281 @@
{
"test_id": "tests/integration/responses/test_conversation_responses.py::TestConversationResponses::test_conversation_context_loading[txt=openai/gpt-4o]",
"request": {
"method": "POST",
"url": "https://api.openai.com/v1/v1/chat/completions",
"headers": {},
"body": {
"model": "gpt-4o",
"messages": [
{
"role": "user",
"content": "My name is Alice. I like to eat apples."
},
{
"role": "assistant",
"content": "Hello Alice!"
},
{
"role": "user",
"content": "What do I like to eat?"
}
],
"stream": true,
"stream_options": {
"include_usage": true
}
},
"endpoint": "/v1/chat/completions",
"model": "gpt-4o"
},
"response": {
"body": [
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "rec-fcc3165422df",
"choices": [
{
"delta": {
"content": "",
"function_call": null,
"refusal": null,
"role": "assistant",
"tool_calls": null
},
"finish_reason": null,
"index": 0,
"logprobs": null
}
],
"created": 0,
"model": "gpt-4o-2024-08-06",
"object": "chat.completion.chunk",
"service_tier": "default",
"system_fingerprint": "fp_cbf1785567",
"usage": null,
"obfuscation": "oX42sfuTveQxTm"
}
},
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "rec-fcc3165422df",
"choices": [
{
"delta": {
"content": "You",
"function_call": null,
"refusal": null,
"role": null,
"tool_calls": null
},
"finish_reason": null,
"index": 0,
"logprobs": null
}
],
"created": 0,
"model": "gpt-4o-2024-08-06",
"object": "chat.completion.chunk",
"service_tier": "default",
"system_fingerprint": "fp_cbf1785567",
"usage": null,
"obfuscation": "WXjGiE9XpiBq8"
}
},
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "rec-fcc3165422df",
"choices": [
{
"delta": {
"content": " like",
"function_call": null,
"refusal": null,
"role": null,
"tool_calls": null
},
"finish_reason": null,
"index": 0,
"logprobs": null
}
],
"created": 0,
"model": "gpt-4o-2024-08-06",
"object": "chat.completion.chunk",
"service_tier": "default",
"system_fingerprint": "fp_cbf1785567",
"usage": null,
"obfuscation": "CM6omhIRiFc"
}
},
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "rec-fcc3165422df",
"choices": [
{
"delta": {
"content": " to",
"function_call": null,
"refusal": null,
"role": null,
"tool_calls": null
},
"finish_reason": null,
"index": 0,
"logprobs": null
}
],
"created": 0,
"model": "gpt-4o-2024-08-06",
"object": "chat.completion.chunk",
"service_tier": "default",
"system_fingerprint": "fp_cbf1785567",
"usage": null,
"obfuscation": "jkuxInIhW27Qi"
}
},
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "rec-fcc3165422df",
"choices": [
{
"delta": {
"content": " eat",
"function_call": null,
"refusal": null,
"role": null,
"tool_calls": null
},
"finish_reason": null,
"index": 0,
"logprobs": null
}
],
"created": 0,
"model": "gpt-4o-2024-08-06",
"object": "chat.completion.chunk",
"service_tier": "default",
"system_fingerprint": "fp_cbf1785567",
"usage": null,
"obfuscation": "t4nM3Hfho1UC"
}
},
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "rec-fcc3165422df",
"choices": [
{
"delta": {
"content": " apples",
"function_call": null,
"refusal": null,
"role": null,
"tool_calls": null
},
"finish_reason": null,
"index": 0,
"logprobs": null
}
],
"created": 0,
"model": "gpt-4o-2024-08-06",
"object": "chat.completion.chunk",
"service_tier": "default",
"system_fingerprint": "fp_cbf1785567",
"usage": null,
"obfuscation": "tdOE8fcT4"
}
},
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "rec-fcc3165422df",
"choices": [
{
"delta": {
"content": ".",
"function_call": null,
"refusal": null,
"role": null,
"tool_calls": null
},
"finish_reason": null,
"index": 0,
"logprobs": null
}
],
"created": 0,
"model": "gpt-4o-2024-08-06",
"object": "chat.completion.chunk",
"service_tier": "default",
"system_fingerprint": "fp_cbf1785567",
"usage": null,
"obfuscation": "lUFXgT3GzjuJk1B"
}
},
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "rec-fcc3165422df",
"choices": [
{
"delta": {
"content": null,
"function_call": null,
"refusal": null,
"role": null,
"tool_calls": null
},
"finish_reason": "stop",
"index": 0,
"logprobs": null
}
],
"created": 0,
"model": "gpt-4o-2024-08-06",
"object": "chat.completion.chunk",
"service_tier": "default",
"system_fingerprint": "fp_cbf1785567",
"usage": null,
"obfuscation": "NA9RsKkHAX"
}
},
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "rec-fcc3165422df",
"choices": [],
"created": 0,
"model": "gpt-4o-2024-08-06",
"object": "chat.completion.chunk",
"service_tier": "default",
"system_fingerprint": "fp_cbf1785567",
"usage": {
"completion_tokens": 6,
"prompt_tokens": 36,
"total_tokens": 42,
"completion_tokens_details": {
"accepted_prediction_tokens": 0,
"audio_tokens": 0,
"reasoning_tokens": 0,
"rejected_prediction_tokens": 0
},
"prompt_tokens_details": {
"audio_tokens": 0,
"cached_tokens": 0
}
},
"obfuscation": ""
}
}
],
"is_streaming": true
},
"id_normalization_mapping": {}
}

View file

@ -63,29 +63,24 @@ class TestConversationResponses:
# Verify all turns are in conversation
conversation_items = openai_client.conversations.items.list(conversation.id)
print(f"DEBUG: Found {len(conversation_items.data)} messages in conversation:")
for i, item in enumerate(conversation_items.data):
if hasattr(item, "role") and hasattr(item, "content"):
content = item.content[0].text if item.content else "No content"
print(f" {i}: {item.role} - {content}")
assert len(conversation_items.data) >= 4 # 2 user + 2 assistant messages
def test_conversation_context_loading(self, openai_client, text_model_id):
"""Test that conversation context is properly loaded for responses."""
conversation = openai_client.conversations.create(
items=[
{"type": "message", "role": "user", "content": "My name is Alice"},
{"type": "message", "role": "user", "content": "My name is Alice. I like to eat apples."},
{"type": "message", "role": "assistant", "content": "Hello Alice!"},
]
)
response = openai_client.responses.create(
model=text_model_id,
input=[{"role": "user", "content": "What's my name?"}],
input=[{"role": "user", "content": "What do I like to eat?"}],
conversation=conversation.id,
)
assert "alice" in response.output_text.lower()
assert "apple" in response.output_text.lower()
def test_conversation_error_handling(self, openai_client, text_model_id):
"""Test error handling for invalid and nonexistent conversations."""