mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 18:00:36 +00:00
Some checks failed
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 1s
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 2s
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 1s
Integration Tests (Replay) / generate-matrix (push) Successful in 4s
Python Package Build Test / build (3.13) (push) Failing after 2s
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
Vector IO Integration Tests / test-matrix (push) Failing after 6s
Pre-commit / pre-commit (push) Failing after 6s
Test External API and Providers / test-external (venv) (push) Failing after 5s
API Conformance Tests / check-schema-compatibility (push) Successful in 14s
Integration Tests (Replay) / Integration Tests (, , , client=, ) (push) Failing after 5s
Python Package Build Test / build (3.12) (push) Failing after 22s
UI Tests / ui-tests (22) (push) Successful in 57s
o Introduces vLLM provider support to the record/replay testing framework o Enabling both recording and replay of vLLM API interactions alongside existing Ollama support. The changes enable testing of vLLM functionality. vLLM tests focus on inference capabilities, while Ollama continues to exercise the full API surface including vision features. -- This is an alternative to #3128 , using qwen3 instead of llama 3.2 1B appears to be more capable at structure output and tool calls. --------- Signed-off-by: Derek Higgins <derekh@redhat.com> Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
92 lines
3.1 KiB
JSON
Generated
92 lines
3.1 KiB
JSON
Generated
{
|
|
"test_id": "tests/integration/inference/test_tools_with_schemas.py::TestChatCompletionWithTools::test_simple_tool_call[txt=vllm/Qwen/Qwen3-0.6B]",
|
|
"request": {
|
|
"method": "POST",
|
|
"url": "http://localhost:8000/v1/v1/chat/completions",
|
|
"headers": {},
|
|
"body": {
|
|
"model": "Qwen/Qwen3-0.6B",
|
|
"messages": [
|
|
{
|
|
"role": "user",
|
|
"content": "What's the weather in San Francisco?"
|
|
}
|
|
],
|
|
"max_tokens": 4096,
|
|
"tools": [
|
|
{
|
|
"type": "function",
|
|
"function": {
|
|
"name": "get_weather",
|
|
"description": "Get weather for a location",
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
"location": {
|
|
"type": "string",
|
|
"description": "City name"
|
|
}
|
|
},
|
|
"required": [
|
|
"location"
|
|
]
|
|
}
|
|
}
|
|
}
|
|
]
|
|
},
|
|
"endpoint": "/v1/chat/completions",
|
|
"model": "Qwen/Qwen3-0.6B"
|
|
},
|
|
"response": {
|
|
"body": {
|
|
"__type__": "openai.types.chat.chat_completion.ChatCompletion",
|
|
"__data__": {
|
|
"id": "rec-e89112e7735f",
|
|
"choices": [
|
|
{
|
|
"finish_reason": "tool_calls",
|
|
"index": 0,
|
|
"logprobs": null,
|
|
"message": {
|
|
"content": "<think>\nOkay, the user is asking for the weather in San Francisco. I need to check if there's a function available for that. Looking at the tools provided, there's a function called get_weather that requires a location parameter. The description says it gets weather for a location, and the parameter is the city name. The user provided \"San Francisco\" as the location, so I should call the get_weather function with \"San Francisco\" as the argument. I don't see any other parameters needed here, so the tool call should be straightforward. Just make sure the city name is correctly formatted in JSON.\n</think>\n\n",
|
|
"refusal": null,
|
|
"role": "assistant",
|
|
"annotations": null,
|
|
"audio": null,
|
|
"function_call": null,
|
|
"tool_calls": [
|
|
{
|
|
"id": "chatcmpl-tool-feead29842dc40b2831c41ed397f555f",
|
|
"function": {
|
|
"arguments": "{\"location\": \"San Francisco\"}",
|
|
"name": "get_weather"
|
|
},
|
|
"type": "function"
|
|
}
|
|
],
|
|
"reasoning_content": null
|
|
},
|
|
"stop_reason": null
|
|
}
|
|
],
|
|
"created": 0,
|
|
"model": "Qwen/Qwen3-0.6B",
|
|
"object": "chat.completion",
|
|
"service_tier": null,
|
|
"system_fingerprint": null,
|
|
"usage": {
|
|
"completion_tokens": 146,
|
|
"prompt_tokens": 161,
|
|
"total_tokens": 307,
|
|
"completion_tokens_details": null,
|
|
"prompt_tokens_details": null
|
|
},
|
|
"prompt_logprobs": null,
|
|
"kv_transfer_params": null
|
|
}
|
|
},
|
|
"is_streaming": false
|
|
},
|
|
"id_normalization_mapping": {}
|
|
}
|