fix: Responses API: handle type=None in streaming tool calls (#2166)

# What does this PR do?

In the Responses API, we convert incoming response requests to chat
completion requests. When streaming the resulting chunks of those chat
completion requests, inference providers that use OpenAI clients will
often return a `type=None` value in the tool call parts of the response.
This causes issues when we try to dump and load that response into our
pydantic model, because type cannot be None in the Responses API model
we're loading these into.

So, strip the "type" field, if present, off those chat completion tool
call results before dumping and loading them as our typed pydantic
models, which will apply our default value for that type field.

## Test Plan

This was found via manual testing of the Responses API with codex, where
I was getting errors in some tool call situations. I added a unit test
to simulate this scenario and verify the fix, as well as manual codex
testing to verify the fix.

Signed-off-by: Ben Browning <bbrownin@redhat.com>
This commit is contained in:
Ben Browning 2025-05-14 17:16:33 -04:00 committed by GitHub
parent aa5bef8e05
commit b42eb1ccbc
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 76 additions and 2 deletions

View file

@ -7,7 +7,7 @@
import json import json
import uuid import uuid
from collections.abc import AsyncIterator from collections.abc import AsyncIterator
from typing import cast from typing import Any, cast
from openai.types.chat import ChatCompletionToolParam from openai.types.chat import ChatCompletionToolParam
from pydantic import BaseModel from pydantic import BaseModel
@ -264,7 +264,11 @@ class OpenAIResponsesImpl:
if response_tool_call: if response_tool_call:
response_tool_call.function.arguments += tool_call.function.arguments response_tool_call.function.arguments += tool_call.function.arguments
else: else:
response_tool_call = OpenAIChatCompletionToolCall(**tool_call.model_dump()) tool_call_dict: dict[str, Any] = tool_call.model_dump()
# Ensure we don't have any empty type field in the tool call dict.
# The OpenAI client used by providers often returns a type=None here.
tool_call_dict.pop("type", None)
response_tool_call = OpenAIChatCompletionToolCall(**tool_call_dict)
chat_response_tool_calls[tool_call.index] = response_tool_call chat_response_tool_calls[tool_call.index] = response_tool_call
# Convert the dict of tool calls by index to a list of tool calls to pass back in our response # Convert the dict of tool calls by index to a list of tool calls to pass back in our response

View file

@ -7,10 +7,18 @@
from unittest.mock import AsyncMock, patch from unittest.mock import AsyncMock, patch
import pytest import pytest
from openai.types.chat.chat_completion_chunk import (
ChatCompletionChunk,
Choice,
ChoiceDelta,
ChoiceDeltaToolCall,
ChoiceDeltaToolCallFunction,
)
from llama_stack.apis.agents.openai_responses import ( from llama_stack.apis.agents.openai_responses import (
OpenAIResponseInputItemList, OpenAIResponseInputItemList,
OpenAIResponseInputMessageContentText, OpenAIResponseInputMessageContentText,
OpenAIResponseInputToolFunction,
OpenAIResponseInputToolWebSearch, OpenAIResponseInputToolWebSearch,
OpenAIResponseMessage, OpenAIResponseMessage,
OpenAIResponseObject, OpenAIResponseObject,
@ -167,6 +175,68 @@ async def test_create_openai_response_with_string_input_with_tools(openai_respon
assert result.output[1].content[0].text == "Dublin" assert result.output[1].content[0].text == "Dublin"
@pytest.mark.asyncio
async def test_create_openai_response_with_tool_call_type_none(openai_responses_impl, mock_inference_api):
"""Test creating an OpenAI response with a tool call response that has a type of None."""
# Setup
input_text = "How hot it is in San Francisco today?"
model = "meta-llama/Llama-3.1-8B-Instruct"
async def fake_stream():
yield ChatCompletionChunk(
id="123",
choices=[
Choice(
index=0,
delta=ChoiceDelta(
tool_calls=[
ChoiceDeltaToolCall(
index=0,
id="tc_123",
function=ChoiceDeltaToolCallFunction(name="get_weather", arguments="{}"),
type=None,
)
]
),
),
],
created=1,
model=model,
object="chat.completion.chunk",
)
mock_inference_api.openai_chat_completion.return_value = fake_stream()
# Execute
result = await openai_responses_impl.create_openai_response(
input=input_text,
model=model,
stream=True,
temperature=0.1,
tools=[
OpenAIResponseInputToolFunction(
name="get_weather",
description="Get current temperature for a given location.",
parameters={
"location": "string",
},
)
],
)
# Verify
first_call = mock_inference_api.openai_chat_completion.call_args_list[0]
assert first_call.kwargs["messages"][0].content == input_text
assert first_call.kwargs["tools"] is not None
assert first_call.kwargs["temperature"] == 0.1
# Check that we got the content from our mocked tool execution result
chunks = [chunk async for chunk in result]
assert len(chunks) > 0
assert chunks[0].response.output[0].type == "function_call"
assert chunks[0].response.output[0].name == "get_weather"
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_create_openai_response_with_multiple_messages(openai_responses_impl, mock_inference_api): async def test_create_openai_response_with_multiple_messages(openai_responses_impl, mock_inference_api):
"""Test creating an OpenAI response with multiple messages.""" """Test creating an OpenAI response with multiple messages."""