feat: make multi-turn tool call tests work with llama4 (#1886)

Running full Tool Calling required some updates to work e2e.
- Remove `python_start` and `python_end` tags 
- Tool Call messages and Tool Resposne messages should end with
`<|eom|>`
- System prompt needed updates 
```
You are a helpful assisant who can can answer general questions or invoke tools when necessary.
In addition to tool calls, you should also augment your responses by using the tool outputs.
```

### Test Plan 
- Start server with meta-reference 
```
LLAMA_STACK_DISABLE_VERSION_CHECK=1 LLAMA_MODELS_DEBUG=1 INFERENCE_MODEL=meta-llama/$MODEL  llama stack run meta-reference-gpu 
``` 
- Added **NEW** tests with 5 test cases for multi-turn tool calls 
```
pytest -s -v --stack-config http://localhost:8321 tests/integration/inference/test_text_inference.py --text-model meta-llama/Llama-4-Scout-17B-16E-Instruct
``` 
- Also verified all vision and agent tests pass
This commit is contained in:
Hardik Shah 2025-04-06 19:14:21 -07:00 committed by GitHub
parent 5a31e66a91
commit 28e262ecdc
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 468 additions and 18 deletions

View file

@ -491,3 +491,80 @@ def test_text_chat_completion_tool_calling_tools_not_in_request(
else:
for tc in response.completion_message.tool_calls:
assert tc.tool_name == "get_object_namespace_list"
@pytest.mark.parametrize(
"test_case",
[
# Tests if the model can handle simple messages like "Hi" or
# a message unrelated to one of the tool calls
"inference:chat_completion:multi_turn_tool_calling_01",
# Tests if the model can do full tool call with responses correctly
"inference:chat_completion:multi_turn_tool_calling_02",
# Tests if model can generate multiple params and
# read outputs correctly
"inference:chat_completion:multi_turn_tool_calling_03",
# Tests if model can do different tool calls in a seqeunce
# and use the information between appropriately
"inference:chat_completion:multi_turn_tool_calling_04",
# Tests if model can use current date and run multiple tool calls
# sequentially and infer using both
"inference:chat_completion:multi_turn_tool_calling_05",
],
)
def test_text_chat_completion_with_multi_turn_tool_calling(client_with_models, text_model_id, test_case):
"""This test tests the model's tool calling loop in various scenarios"""
if "llama-4" not in text_model_id.lower():
pytest.xfail("Not tested for non-llama4 models yet")
tc = TestCase(test_case)
messages = []
# keep going until either
# 1. we have messages to test in multi-turn
# 2. no messages bust last message is tool response
while len(tc["messages"]) > 0 or (len(messages) > 0 and messages[-1]["role"] == "tool"):
# do not take new messages if last message is tool response
if len(messages) == 0 or messages[-1]["role"] != "tool":
new_messages = tc["messages"].pop(0)
messages += new_messages
# pprint(messages)
response = client_with_models.inference.chat_completion(
model_id=text_model_id,
messages=messages,
tools=tc["tools"],
stream=False,
sampling_params={
"strategy": {
"type": "top_p",
"top_p": 0.9,
"temperature": 0.6,
}
},
)
op_msg = response.completion_message
messages.append(op_msg.model_dump())
# pprint(op_msg)
assert op_msg.role == "assistant"
expected = tc["expected"].pop(0)
assert len(op_msg.tool_calls) == expected["num_tool_calls"]
if expected["num_tool_calls"] > 0:
assert op_msg.tool_calls[0].tool_name == expected["tool_name"]
assert op_msg.tool_calls[0].arguments == expected["tool_arguments"]
tool_response = tc["tool_responses"].pop(0)
messages.append(
# Tool Response Message
{
"role": "tool",
"call_id": op_msg.tool_calls[0].call_id,
"content": tool_response["response"],
}
)
else:
actual_answer = op_msg.content.lower()
# pprint(actual_answer)
assert expected["answer"] in actual_answer