llama-stack-mirror/tests/integration/inference/recordings/models-da380037dc0fe8ae61b838baf268e616057e46f8424df0a9b52f94e48cef4a7f-fb68f5a6.json

45 lines
1.3 KiB
JSON
Generated

{
"test_id": "tests/integration/inference/test_openai_completion.py::test_inference_store_tool_calls[openai_client-txt=vllm/Qwen/Qwen3-0.6B-False]",
"request": {
"method": "POST",
"url": "http://localhost:8000/v1/v1/models",
"headers": {},
"body": {},
"endpoint": "/v1/models",
"model": ""
},
"response": {
"body": [
{
"__type__": "openai.types.model.Model",
"__data__": {
"id": "Qwen/Qwen3-0.6B",
"created": 1762375053,
"object": "model",
"owned_by": "vllm",
"root": "/root/.cache/Qwen3-0.6B",
"parent": null,
"max_model_len": 8192,
"permission": [
{
"id": "modelperm-2e52800baf7e4d3389892f33feb3f52b",
"object": "model_permission",
"created": 1762375053,
"allow_create_engine": false,
"allow_sampling": true,
"allow_logprobs": true,
"allow_search_indices": false,
"allow_view": true,
"allow_fine_tuning": false,
"organization": "*",
"group": null,
"is_blocking": false
}
]
}
}
],
"is_streaming": false
},
"id_normalization_mapping": {}
}