llama-stack-mirror/tests/integration/inference/recordings/models-805e6b510b1ab33505a1af85c0d2a766cd3415512212d80f6292ca0ef5c359e1-fb68f5a6.json

45 lines
1.3 KiB
JSON
Generated

{
"test_id": "tests/integration/inference/test_openai_embeddings.py::test_openai_embeddings_base64_batch_processing[openai_client-emb=sentence-transformers/nomic-ai/nomic-embed-text-v1.5]",
"request": {
"method": "POST",
"url": "http://localhost:8000/v1/v1/models",
"headers": {},
"body": {},
"endpoint": "/v1/models",
"model": ""
},
"response": {
"body": [
{
"__type__": "openai.types.model.Model",
"__data__": {
"id": "Qwen/Qwen3-0.6B",
"created": 1762374591,
"object": "model",
"owned_by": "vllm",
"root": "/root/.cache/Qwen3-0.6B",
"parent": null,
"max_model_len": 8192,
"permission": [
{
"id": "modelperm-e19031997a1e44d99c8b5ae55725a887",
"object": "model_permission",
"created": 1762374591,
"allow_create_engine": false,
"allow_sampling": true,
"allow_logprobs": true,
"allow_search_indices": false,
"allow_view": true,
"allow_fine_tuning": false,
"organization": "*",
"group": null,
"is_blocking": false
}
]
}
}
],
"is_streaming": false
},
"id_normalization_mapping": {}
}