mirror of
				https://github.com/meta-llama/llama-stack.git
				synced 2025-10-25 17:11:12 +00:00 
			
		
		
		
	Uses test_id in request hashes and test-scoped subdirectories to prevent cross-test contamination. Model list endpoints exclude test_id to enable merging recordings from different servers. Additionally, this PR adds a `record-if-missing` mode (which we will use instead of `record` which records everything) which is very useful. 🤖 Co-authored with [Claude Code](https://claude.com/claude-code) --------- Co-authored-by: Claude <noreply@anthropic.com>
		
			
				
	
	
		
			40 lines
		
	
	
	
		
			1.2 KiB
		
	
	
	
		
			JSON
		
	
	
	
	
	
			
		
		
	
	
			40 lines
		
	
	
	
		
			1.2 KiB
		
	
	
	
		
			JSON
		
	
	
	
	
	
| {
 | |
|   "test_id": "tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_non_streaming[client_with_models-txt=ollama/llama3.2:3b-instruct-fp16-inference:chat_completion:non_streaming_01]",
 | |
|   "request": {
 | |
|     "method": "POST",
 | |
|     "url": "http://localhost:11434/api/ps",
 | |
|     "headers": {},
 | |
|     "body": {},
 | |
|     "endpoint": "/api/ps",
 | |
|     "model": ""
 | |
|   },
 | |
|   "response": {
 | |
|     "body": {
 | |
|       "__type__": "ollama._types.ProcessResponse",
 | |
|       "__data__": {
 | |
|         "models": [
 | |
|           {
 | |
|             "model": "llama3.2:3b",
 | |
|             "name": "llama3.2:3b",
 | |
|             "digest": "a80c4f17acd55265feec403c7aef86be0c25983ab279d83f3bcd3abbcb5b8b72",
 | |
|             "expires_at": "2025-10-04T12:20:09.202126-07:00",
 | |
|             "size": 3367856128,
 | |
|             "size_vram": 3367856128,
 | |
|             "details": {
 | |
|               "parent_model": "",
 | |
|               "format": "gguf",
 | |
|               "family": "llama",
 | |
|               "families": [
 | |
|                 "llama"
 | |
|               ],
 | |
|               "parameter_size": "3.2B",
 | |
|               "quantization_level": "Q4_K_M"
 | |
|             },
 | |
|             "context_length": 4096
 | |
|           }
 | |
|         ]
 | |
|       }
 | |
|     },
 | |
|     "is_streaming": false
 | |
|   }
 | |
| }
 |