fix: resolve BuiltinTools to strings for vllm tool_call messages (#2071)

# What does this PR do?

When the result of a ToolCall gets passed back into vLLM for the model
to handle the tool call result (as is often the case in agentic
tool-calling workflows), we forgot to handle the case where BuiltinTool
calls are not string values but instead instances of the BuiltinTool
enum. This fixes that, properly converting those enums to string values
before trying to serialize them into an OpenAI chat completion request
to vLLM.

PR #1931 fixed a bug where we weren't passing these tool calling results
back into vLLM, but as a side-effect it created this serialization bug
when using BuiltinTools.

Closes #2070

## Test Plan

I added a new unit test to the openai_compat unit tests to cover this
scenario, ensured the new test failed before this fix, and all the
existing tests there plus the new one passed with this fix.

```
python -m pytest -s -v tests/unit/providers/utils/inference/test_openai_compat.py
```

Signed-off-by: Ben Browning <bbrownin@redhat.com>
This commit is contained in:
Ben Browning 2025-05-01 08:47:29 -04:00 committed by GitHub
parent 293d95b955
commit 6378c2a2f3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 33 additions and 2 deletions

View file

@ -532,12 +532,17 @@ async def convert_message_to_openai_dict(message: Message, download: bool = Fals
if hasattr(message, "tool_calls") and message.tool_calls:
result["tool_calls"] = []
for tc in message.tool_calls:
# The tool.tool_name can be a str or a BuiltinTool enum. If
# it's the latter, convert to a string.
tool_name = tc.tool_name
if isinstance(tool_name, BuiltinTool):
tool_name = tool_name.value
result["tool_calls"].append(
{
"id": tc.call_id,
"type": "function",
"function": {
"name": tc.tool_name,
"name": tool_name,
"arguments": tc.arguments_json if hasattr(tc, "arguments_json") else json.dumps(tc.arguments),
},
}

View file

@ -8,7 +8,7 @@ import pytest
from llama_stack.apis.common.content_types import TextContentItem
from llama_stack.apis.inference.inference import CompletionMessage, UserMessage
from llama_stack.models.llama.datatypes import StopReason, ToolCall
from llama_stack.models.llama.datatypes import BuiltinTool, StopReason, ToolCall
from llama_stack.providers.utils.inference.openai_compat import convert_message_to_openai_dict
@ -41,3 +41,29 @@ async def test_convert_message_to_openai_dict_with_tool_call():
{"id": "123", "type": "function", "function": {"name": "test_tool", "arguments": '{"foo": "bar"}'}}
],
}
@pytest.mark.asyncio
async def test_convert_message_to_openai_dict_with_builtin_tool_call():
message = CompletionMessage(
content="",
tool_calls=[
ToolCall(
call_id="123",
tool_name=BuiltinTool.brave_search,
arguments_json='{"foo": "bar"}',
arguments={"foo": "bar"},
)
],
stop_reason=StopReason.end_of_turn,
)
openai_dict = await convert_message_to_openai_dict(message)
assert openai_dict == {
"role": "assistant",
"content": [{"type": "text", "text": ""}],
"tool_calls": [
{"id": "123", "type": "function", "function": {"name": "brave_search", "arguments": '{"foo": "bar"}'}}
],
}