mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-14 14:27:58 +00:00
59 lines
1.8 KiB
JSON
59 lines
1.8 KiB
JSON
{
|
|
"test_id": "tests/integration/inference/test_openai_completion.py::test_openai_completion_non_streaming_suffix[txt=ollama/llama3.2:3b-instruct-fp16-inference:completion:suffix]",
|
|
"request": {
|
|
"method": "POST",
|
|
"url": "http://localhost:11434/api/ps",
|
|
"headers": {},
|
|
"body": {},
|
|
"endpoint": "/api/ps",
|
|
"model": ""
|
|
},
|
|
"response": {
|
|
"body": {
|
|
"__type__": "ollama._types.ProcessResponse",
|
|
"__data__": {
|
|
"models": [
|
|
{
|
|
"model": "llama3.2:3b-instruct-fp16",
|
|
"name": "llama3.2:3b-instruct-fp16",
|
|
"digest": "195a8c01d91ec3cb1e0aad4624a51f2602c51fa7d96110f8ab5a20c84081804d",
|
|
"expires_at": "2025-10-08T14:29:44.502767-07:00",
|
|
"size": 8581748736,
|
|
"size_vram": 8581748736,
|
|
"details": {
|
|
"parent_model": "",
|
|
"format": "gguf",
|
|
"family": "llama",
|
|
"families": [
|
|
"llama"
|
|
],
|
|
"parameter_size": "3.2B",
|
|
"quantization_level": "F16"
|
|
},
|
|
"context_length": null
|
|
},
|
|
{
|
|
"model": "llama-guard3:1b",
|
|
"name": "llama-guard3:1b",
|
|
"digest": "494147e06bf99e10dbe67b63a07ac81c162f18ef3341aa3390007ac828571b3b",
|
|
"expires_at": "2025-10-08T14:29:39.978391-07:00",
|
|
"size": 2770397184,
|
|
"size_vram": 2770397184,
|
|
"details": {
|
|
"parent_model": "",
|
|
"format": "gguf",
|
|
"family": "llama",
|
|
"families": [
|
|
"llama"
|
|
],
|
|
"parameter_size": "1.5B",
|
|
"quantization_level": "Q8_0"
|
|
},
|
|
"context_length": null
|
|
}
|
|
]
|
|
}
|
|
},
|
|
"is_streaming": false
|
|
}
|
|
}
|