fix vllm bugs

This commit is contained in:
Hardik Shah 2025-03-18 13:57:36 -07:00
parent 549096b264
commit b0425c84b5

View file

@ -89,16 +89,12 @@ def _convert_to_vllm_tool_calls_in_response(
if not tool_calls:
return []
call_function_arguments = None
for call in tool_calls:
call_function_arguments = json.loads(call.function.arguments)
return [
ToolCall(
call_id=call.id,
tool_name=call.function.name,
arguments=json.loads(call.function.arguments),
arguments_json=call_function_arguments,
arguments_json=call.function.arguments,
)
for call in tool_calls
]
@ -182,7 +178,8 @@ async def _process_vllm_chat_completion_stream_response(
tool_call=ToolCall(
call_id=tool_call_buf.call_id,
tool_name=tool_call_buf.tool_name,
arguments=args_str,
arguments=args,
arguments_json=args_str,
),
parse_status=ToolCallParseStatus.succeeded,
),