fix: Including tool call in chat (#1931)

Include the tool call details with the chat when doing Rag with Remote
vllm

Fixes: #1929

With this PR the tool call is included in the chat returned to vllm, the
model (meta-llama/Llama-3.1-8B-Instruct) the returns the answer as
expected.

Signed-off-by: Derek Higgins <derekh@redhat.com>
This commit is contained in:
Derek Higgins 2025-04-25 00:59:10 +01:00 committed by GitHub
parent 7ed137e963
commit c8797f1125
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 106 additions and 2 deletions

View file

@ -524,11 +524,26 @@ async def convert_message_to_openai_dict(message: Message, download: bool = Fals
else:
content = [await _convert_content(message.content)]
return {
result = {
"role": message.role,
"content": content,
}
if hasattr(message, "tool_calls") and message.tool_calls:
result["tool_calls"] = []
for tc in message.tool_calls:
result["tool_calls"].append(
{
"id": tc.call_id,
"type": "function",
"function": {
"name": tc.tool_name,
"arguments": tc.arguments_json if hasattr(tc, "arguments_json") else json.dumps(tc.arguments),
},
}
)
return result
class UnparseableToolCall(BaseModel):
"""