mirror of
				https://github.com/meta-llama/llama-stack.git
				synced 2025-10-25 09:05:37 +00:00 
			
		
		
		
	Propagate test IDs from client to server via HTTP headers to maintain proper test isolation when running with server-based stack configs. Without this, recorded/replayed inference requests in server mode would leak across tests. Changes: - Patch client _prepare_request to inject test ID into provider data header - Sync test context from provider data on server side before storage operations - Set LLAMA_STACK_TEST_STACK_CONFIG_TYPE env var based on stack config - Configure console width for cleaner log output in CI - Add SQLITE_STORE_DIR temp directory for test data isolation
		
			
				
	
	
		
			57 lines
		
	
	
	
		
			1.7 KiB
		
	
	
	
		
			JSON
		
	
	
	
	
		
			Generated
		
	
	
			
		
		
	
	
			57 lines
		
	
	
	
		
			1.7 KiB
		
	
	
	
		
			JSON
		
	
	
	
	
		
			Generated
		
	
	
| {
 | |
|   "test_id": "tests/integration/inference/test_openai_completion.py::test_openai_completion_stop_sequence[txt=ollama/llama3.2:3b-instruct-fp16-inference:completion:stop_sequence]",
 | |
|   "request": {
 | |
|     "method": "POST",
 | |
|     "url": "http://localhost:11434/api/ps",
 | |
|     "headers": {},
 | |
|     "body": {},
 | |
|     "endpoint": "/api/ps",
 | |
|     "model": ""
 | |
|   },
 | |
|   "response": {
 | |
|     "body": {
 | |
|       "__type__": "ollama._types.ProcessResponse",
 | |
|       "__data__": {
 | |
|         "models": [
 | |
|           {
 | |
|             "model": "llama3.2:3b",
 | |
|             "name": "llama3.2:3b",
 | |
|             "digest": "a80c4f17acd55265feec403c7aef86be0c25983ab279d83f3bcd3abbcb5b8b72",
 | |
|             "expires_at": "2025-10-08T16:14:05.423042-07:00",
 | |
|             "size": 3367856128,
 | |
|             "size_vram": 3367856128,
 | |
|             "details": {
 | |
|               "parent_model": "",
 | |
|               "format": "gguf",
 | |
|               "family": "llama",
 | |
|               "families": [
 | |
|                 "llama"
 | |
|               ],
 | |
|               "parameter_size": "3.2B",
 | |
|               "quantization_level": "Q4_K_M"
 | |
|             }
 | |
|           },
 | |
|           {
 | |
|             "model": "llama3.2:3b-instruct-fp16",
 | |
|             "name": "llama3.2:3b-instruct-fp16",
 | |
|             "digest": "195a8c01d91ec3cb1e0aad4624a51f2602c51fa7d96110f8ab5a20c84081804d",
 | |
|             "expires_at": "2025-10-08T11:35:03.108973-07:00",
 | |
|             "size": 7919570944,
 | |
|             "size_vram": 7919570944,
 | |
|             "details": {
 | |
|               "parent_model": "",
 | |
|               "format": "gguf",
 | |
|               "family": "llama",
 | |
|               "families": [
 | |
|                 "llama"
 | |
|               ],
 | |
|               "parameter_size": "3.2B",
 | |
|               "quantization_level": "F16"
 | |
|             }
 | |
|           }
 | |
|         ]
 | |
|       }
 | |
|     },
 | |
|     "is_streaming": false
 | |
|   }
 | |
| }
 |