mirror of
				https://github.com/meta-llama/llama-stack.git
				synced 2025-10-26 09:15:40 +00:00 
			
		
		
		
	Propagate test IDs from client to server via HTTP headers to maintain proper test isolation when running with server-based stack configs. Without this, recorded/replayed inference requests in server mode would leak across tests. Changes: - Patch client _prepare_request to inject test ID into provider data header - Sync test context from provider data on server side before storage operations - Set LLAMA_STACK_TEST_STACK_CONFIG_TYPE env var based on stack config - Configure console width for cleaner log output in CI - Add SQLITE_STORE_DIR temp directory for test data isolation
		
			
				
	
	
		
			79 lines
		
	
	
	
		
			2.1 KiB
		
	
	
	
		
			JSON
		
	
	
	
	
		
			Generated
		
	
	
			
		
		
	
	
			79 lines
		
	
	
	
		
			2.1 KiB
		
	
	
	
		
			JSON
		
	
	
	
	
		
			Generated
		
	
	
| {
 | |
|   "test_id": "tests/integration/inference/test_tools_with_schemas.py::TestEdgeCases::test_tool_without_schema[txt=ollama/llama3.2:3b-instruct-fp16]",
 | |
|   "request": {
 | |
|     "method": "POST",
 | |
|     "url": "http://0.0.0.0:11434/v1/v1/chat/completions",
 | |
|     "headers": {},
 | |
|     "body": {
 | |
|       "model": "llama3.2:3b-instruct-fp16",
 | |
|       "messages": [
 | |
|         {
 | |
|           "role": "user",
 | |
|           "content": "Call the no args tool"
 | |
|         }
 | |
|       ],
 | |
|       "tools": [
 | |
|         {
 | |
|           "type": "function",
 | |
|           "function": {
 | |
|             "name": "no_args_tool",
 | |
|             "description": "Tool with no arguments",
 | |
|             "parameters": {
 | |
|               "type": "object",
 | |
|               "properties": {}
 | |
|             }
 | |
|           }
 | |
|         }
 | |
|       ]
 | |
|     },
 | |
|     "endpoint": "/v1/chat/completions",
 | |
|     "model": "llama3.2:3b-instruct-fp16"
 | |
|   },
 | |
|   "response": {
 | |
|     "body": {
 | |
|       "__type__": "openai.types.chat.chat_completion.ChatCompletion",
 | |
|       "__data__": {
 | |
|         "id": "rec-a1e4350b0157",
 | |
|         "choices": [
 | |
|           {
 | |
|             "finish_reason": "tool_calls",
 | |
|             "index": 0,
 | |
|             "logprobs": null,
 | |
|             "message": {
 | |
|               "content": "",
 | |
|               "refusal": null,
 | |
|               "role": "assistant",
 | |
|               "annotations": null,
 | |
|               "audio": null,
 | |
|               "function_call": null,
 | |
|               "tool_calls": [
 | |
|                 {
 | |
|                   "id": "call_mw3hh7mz",
 | |
|                   "function": {
 | |
|                     "arguments": "{}",
 | |
|                     "name": "no_args_tool"
 | |
|                   },
 | |
|                   "type": "function",
 | |
|                   "index": 0
 | |
|                 }
 | |
|               ]
 | |
|             }
 | |
|           }
 | |
|         ],
 | |
|         "created": 0,
 | |
|         "model": "llama3.2:3b-instruct-fp16",
 | |
|         "object": "chat.completion",
 | |
|         "service_tier": null,
 | |
|         "system_fingerprint": "fp_ollama",
 | |
|         "usage": {
 | |
|           "completion_tokens": 14,
 | |
|           "prompt_tokens": 148,
 | |
|           "total_tokens": 162,
 | |
|           "completion_tokens_details": null,
 | |
|           "prompt_tokens_details": null
 | |
|         }
 | |
|       }
 | |
|     },
 | |
|     "is_streaming": false
 | |
|   }
 | |
| }
 |