mirror of
				https://github.com/meta-llama/llama-stack.git
				synced 2025-10-26 09:15:40 +00:00 
			
		
		
		
	Propagate test IDs from client to server via HTTP headers to maintain proper test isolation when running with server-based stack configs. Without this, recorded/replayed inference requests in server mode would leak across tests. Changes: - Patch client _prepare_request to inject test ID into provider data header - Sync test context from provider data on server side before storage operations - Set LLAMA_STACK_TEST_STACK_CONFIG_TYPE env var based on stack config - Configure console width for cleaner log output in CI - Add SQLITE_STORE_DIR temp directory for test data isolation
		
			
				
	
	
		
			75 lines
		
	
	
	
		
			2.3 KiB
		
	
	
	
		
			JSON
		
	
	
	
	
		
			Generated
		
	
	
			
		
		
	
	
			75 lines
		
	
	
	
		
			2.3 KiB
		
	
	
	
		
			JSON
		
	
	
	
	
		
			Generated
		
	
	
| {
 | |
|   "test_id": "tests/integration/inference/test_openai_embeddings.py::test_openai_embeddings_with_user_parameter[llama_stack_client-emb=ollama/all-minilm:l6-v2]",
 | |
|   "request": {
 | |
|     "method": "POST",
 | |
|     "url": "http://localhost:11434/api/ps",
 | |
|     "headers": {},
 | |
|     "body": {},
 | |
|     "endpoint": "/api/ps",
 | |
|     "model": ""
 | |
|   },
 | |
|   "response": {
 | |
|     "body": {
 | |
|       "__type__": "ollama._types.ProcessResponse",
 | |
|       "__data__": {
 | |
|         "models": [
 | |
|           {
 | |
|             "model": "llama3.2:3b",
 | |
|             "name": "llama3.2:3b",
 | |
|             "digest": "a80c4f17acd55265feec403c7aef86be0c25983ab279d83f3bcd3abbcb5b8b72",
 | |
|             "expires_at": "2025-10-08T16:14:05.423042-07:00",
 | |
|             "size": 3367856128,
 | |
|             "size_vram": 3367856128,
 | |
|             "details": {
 | |
|               "parent_model": "",
 | |
|               "format": "gguf",
 | |
|               "family": "llama",
 | |
|               "families": [
 | |
|                 "llama"
 | |
|               ],
 | |
|               "parameter_size": "3.2B",
 | |
|               "quantization_level": "Q4_K_M"
 | |
|             }
 | |
|           },
 | |
|           {
 | |
|             "model": "all-minilm:l6-v2",
 | |
|             "name": "all-minilm:l6-v2",
 | |
|             "digest": "1b226e2802dbb772b5fc32a58f103ca1804ef7501331012de126ab22f67475ef",
 | |
|             "expires_at": "2025-10-08T11:32:11.101611-07:00",
 | |
|             "size": 585846784,
 | |
|             "size_vram": 585846784,
 | |
|             "details": {
 | |
|               "parent_model": "",
 | |
|               "format": "gguf",
 | |
|               "family": "bert",
 | |
|               "families": [
 | |
|                 "bert"
 | |
|               ],
 | |
|               "parameter_size": "23M",
 | |
|               "quantization_level": "F16"
 | |
|             }
 | |
|           },
 | |
|           {
 | |
|             "model": "llama-guard3:1b",
 | |
|             "name": "llama-guard3:1b",
 | |
|             "digest": "494147e06bf99e10dbe67b63a07ac81c162f18ef3341aa3390007ac828571b3b",
 | |
|             "expires_at": "2025-10-08T11:30:00.392919-07:00",
 | |
|             "size": 2350966784,
 | |
|             "size_vram": 2350966784,
 | |
|             "details": {
 | |
|               "parent_model": "",
 | |
|               "format": "gguf",
 | |
|               "family": "llama",
 | |
|               "families": [
 | |
|                 "llama"
 | |
|               ],
 | |
|               "parameter_size": "1.5B",
 | |
|               "quantization_level": "Q8_0"
 | |
|             }
 | |
|           }
 | |
|         ]
 | |
|       }
 | |
|     },
 | |
|     "is_streaming": false
 | |
|   }
 | |
| }
 |