forked from phoenix-oss/llama-stack-mirror
fix: Including tool call in chat (#1931)
Include the tool call details with the chat when doing Rag with Remote vllm Fixes: #1929 With this PR the tool call is included in the chat returned to vllm, the model (meta-llama/Llama-3.1-8B-Instruct) the returns the answer as expected. Signed-off-by: Derek Higgins <derekh@redhat.com>
This commit is contained in:
parent
7ed137e963
commit
c8797f1125
3 changed files with 106 additions and 2 deletions
|
@ -524,11 +524,26 @@ async def convert_message_to_openai_dict(message: Message, download: bool = Fals
|
|||
else:
|
||||
content = [await _convert_content(message.content)]
|
||||
|
||||
return {
|
||||
result = {
|
||||
"role": message.role,
|
||||
"content": content,
|
||||
}
|
||||
|
||||
if hasattr(message, "tool_calls") and message.tool_calls:
|
||||
result["tool_calls"] = []
|
||||
for tc in message.tool_calls:
|
||||
result["tool_calls"].append(
|
||||
{
|
||||
"id": tc.call_id,
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": tc.tool_name,
|
||||
"arguments": tc.arguments_json if hasattr(tc, "arguments_json") else json.dumps(tc.arguments),
|
||||
},
|
||||
}
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
class UnparseableToolCall(BaseModel):
|
||||
"""
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue