mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-27 18:50:41 +00:00
fix: Responses API: handle type=None in streaming tool calls (#2166)
# What does this PR do? In the Responses API, we convert incoming response requests to chat completion requests. When streaming the resulting chunks of those chat completion requests, inference providers that use OpenAI clients will often return a `type=None` value in the tool call parts of the response. This causes issues when we try to dump and load that response into our pydantic model, because type cannot be None in the Responses API model we're loading these into. So, strip the "type" field, if present, off those chat completion tool call results before dumping and loading them as our typed pydantic models, which will apply our default value for that type field. ## Test Plan This was found via manual testing of the Responses API with codex, where I was getting errors in some tool call situations. I added a unit test to simulate this scenario and verify the fix, as well as manual codex testing to verify the fix. Signed-off-by: Ben Browning <bbrownin@redhat.com>
This commit is contained in:
parent
aa5bef8e05
commit
b42eb1ccbc
2 changed files with 76 additions and 2 deletions
|
@ -7,7 +7,7 @@
|
|||
import json
|
||||
import uuid
|
||||
from collections.abc import AsyncIterator
|
||||
from typing import cast
|
||||
from typing import Any, cast
|
||||
|
||||
from openai.types.chat import ChatCompletionToolParam
|
||||
from pydantic import BaseModel
|
||||
|
@ -264,7 +264,11 @@ class OpenAIResponsesImpl:
|
|||
if response_tool_call:
|
||||
response_tool_call.function.arguments += tool_call.function.arguments
|
||||
else:
|
||||
response_tool_call = OpenAIChatCompletionToolCall(**tool_call.model_dump())
|
||||
tool_call_dict: dict[str, Any] = tool_call.model_dump()
|
||||
# Ensure we don't have any empty type field in the tool call dict.
|
||||
# The OpenAI client used by providers often returns a type=None here.
|
||||
tool_call_dict.pop("type", None)
|
||||
response_tool_call = OpenAIChatCompletionToolCall(**tool_call_dict)
|
||||
chat_response_tool_calls[tool_call.index] = response_tool_call
|
||||
|
||||
# Convert the dict of tool calls by index to a list of tool calls to pass back in our response
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue