fix: fix agent test recorded responses (#1462)

# What does this PR do?

- re-gen to fix agents test
- update test_custom_tool

[//]: # (If resolving an issue, uncomment and update the line below)
[//]: # (Closes #[issue-number])

## Test Plan
```
LLAMA_STACK_CONFIG=fireworks pytest -v tests/integration/agents/test_agents.py --text-model meta-llama/Llama-3.3-70B-Instruct
```

<img width="1294" alt="image"
src="https://github.com/user-attachments/assets/63521532-b989-4cf2-8fe5-c7f057f1c4dc"
/>


[//]: # (## Documentation)
This commit is contained in:
Xi Yan 2025-03-06 19:37:52 -08:00 committed by GitHub
parent 8234cdf1a5
commit 1e3be1e4d7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 13045 additions and 13 deletions

View file

@ -276,7 +276,6 @@ def test_custom_tool(llama_stack_client_with_mocked_inference, agent_config):
agent_config = {
**agent_config,
"tools": ["builtin::websearch", client_tool],
"client_tools": [client_tool.get_tool_definition()],
}
agent = Agent(llama_stack_client_with_mocked_inference, **agent_config)
@ -571,7 +570,10 @@ def test_rag_and_code_agent(llama_stack_client_with_mocked_inference, agent_conf
assert expected_kw in response.output_message.content.lower()
@pytest.mark.parametrize("client_tools", [(get_boiling_point, False), (get_boiling_point_with_metadata, True)])
@pytest.mark.parametrize(
"client_tools",
[(get_boiling_point, False), (get_boiling_point_with_metadata, True)],
)
def test_create_turn_response(llama_stack_client_with_mocked_inference, agent_config, client_tools):
client_tool, expectes_metadata = client_tools
agent_config = {

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long