mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-14 22:33:48 +00:00
Propagate test IDs from client to server via HTTP headers to maintain proper test isolation when running with server-based stack configs. Without this, recorded/replayed inference requests in server mode would leak across tests. Changes: - Patch client _prepare_request to inject test ID into provider data header - Sync test context from provider data on server side before storage operations - Set LLAMA_STACK_TEST_STACK_CONFIG_TYPE env var based on stack config - Configure console width for cleaner log output in CI - Add SQLITE_STORE_DIR temp directory for test data isolation
44 lines
1.3 KiB
JSON
44 lines
1.3 KiB
JSON
{
|
|
"test_id": "tests/integration/batches/test_batches.py::TestBatchesIntegration::test_batch_e2e_completions[txt=ollama/llama3.2:3b-instruct-fp16]",
|
|
"request": {
|
|
"method": "POST",
|
|
"url": "http://0.0.0.0:11434/v1/v1/completions",
|
|
"headers": {},
|
|
"body": {
|
|
"model": "llama3.2:3b-instruct-fp16",
|
|
"prompt": "Say completions",
|
|
"max_tokens": 20,
|
|
"extra_body": {}
|
|
},
|
|
"endpoint": "/v1/completions",
|
|
"model": "llama3.2:3b-instruct-fp16"
|
|
},
|
|
"response": {
|
|
"body": {
|
|
"__type__": "openai.types.completion.Completion",
|
|
"__data__": {
|
|
"id": "rec-cb4673f5ab80",
|
|
"choices": [
|
|
{
|
|
"finish_reason": "length",
|
|
"index": 0,
|
|
"logprobs": null,
|
|
"text": "I'd be happy to provide some completions on a topic of your choice. What would you like"
|
|
}
|
|
],
|
|
"created": 0,
|
|
"model": "llama3.2:3b-instruct-fp16",
|
|
"object": "text_completion",
|
|
"system_fingerprint": "fp_ollama",
|
|
"usage": {
|
|
"completion_tokens": 20,
|
|
"prompt_tokens": 28,
|
|
"total_tokens": 48,
|
|
"completion_tokens_details": null,
|
|
"prompt_tokens_details": null
|
|
}
|
|
}
|
|
},
|
|
"is_streaming": false
|
|
}
|
|
}
|