mirror of
				https://github.com/meta-llama/llama-stack.git
				synced 2025-10-25 09:05:37 +00:00 
			
		
		
		
	Renames `inference_recorder.py` to `api_recorder.py` and extends it to support recording/replaying tool invocations in addition to inference calls. This allows us to record web-search, etc. tool calls and thereafter apply recordings for `tests/integration/responses` ## Test Plan ``` export OPENAI_API_KEY=... export TAVILY_SEARCH_API_KEY=... ./scripts/integration-tests.sh --stack-config ci-tests \ --suite responses --inference-mode record-if-missing ```
		
			
				
	
	
		
			219 lines
		
	
	
	
		
			7 KiB
		
	
	
	
		
			JSON
		
	
	
	
	
		
			Generated
		
	
	
			
		
		
	
	
			219 lines
		
	
	
	
		
			7 KiB
		
	
	
	
		
			JSON
		
	
	
	
	
		
			Generated
		
	
	
| {
 | |
|   "test_id": "tests/integration/responses/test_tool_responses.py::test_response_sequential_file_search[client_with_models-txt=openai/gpt-4o:emb=openai/text-embedding-3-small:dim=1536]",
 | |
|   "request": {
 | |
|     "method": "POST",
 | |
|     "url": "https://api.openai.com/v1/v1/chat/completions",
 | |
|     "headers": {},
 | |
|     "body": {
 | |
|       "model": "gpt-4o",
 | |
|       "messages": [
 | |
|         {
 | |
|           "role": "user",
 | |
|           "content": "How many experts does the Llama 4 Maverick model have?"
 | |
|         },
 | |
|         {
 | |
|           "role": "assistant",
 | |
|           "content": "",
 | |
|           "tool_calls": [
 | |
|             {
 | |
|               "index": 0,
 | |
|               "id": "call_zS2WxgXWetjnlPt2MzH9Asrc",
 | |
|               "type": "function",
 | |
|               "function": {
 | |
|                 "name": "knowledge_search",
 | |
|                 "arguments": "{\"query\":\"Llama 4 Maverick model number of experts\"}"
 | |
|               }
 | |
|             }
 | |
|           ]
 | |
|         },
 | |
|         {
 | |
|           "role": "tool",
 | |
|           "tool_call_id": "call_zS2WxgXWetjnlPt2MzH9Asrc",
 | |
|           "content": [
 | |
|             {
 | |
|               "type": "text",
 | |
|               "text": "knowledge_search tool found 1 chunks:\nBEGIN of knowledge_search tool results.\n"
 | |
|             },
 | |
|             {
 | |
|               "type": "text",
 | |
|               "text": "[1] document_id: file-5217982280, score: 2.57802841833685, attributes: {'filename': 'test_sequential_file_search.txt', 'document_id': 'file-5217982280', 'token_count': 19.0, 'metadata_token_count': 11.0} (cite as <|file-5217982280|>)\nThe Llama 4 Maverick model has 128 experts in its mixture of experts architecture.\n"
 | |
|             },
 | |
|             {
 | |
|               "type": "text",
 | |
|               "text": "END of knowledge_search tool results.\n"
 | |
|             },
 | |
|             {
 | |
|               "type": "text",
 | |
|               "text": "The above results were retrieved to help answer the user's query: \"Llama 4 Maverick model number of experts\". Use them as supporting information only in answering this query. Cite sources immediately at the end of sentences before punctuation, using `<|file-id|>` format (e.g., 'This is a fact <|file-Cn3MSNn72ENTiiq11Qda4A|>.'). Do not add extra punctuation. Use only the file IDs provided (do not invent new ones).\n"
 | |
|             }
 | |
|           ]
 | |
|         }
 | |
|       ],
 | |
|       "stream": true,
 | |
|       "tools": [
 | |
|         {
 | |
|           "type": "function",
 | |
|           "function": {
 | |
|             "name": "knowledge_search",
 | |
|             "description": "Search for information in a database.",
 | |
|             "parameters": {
 | |
|               "type": "object",
 | |
|               "properties": {
 | |
|                 "query": {
 | |
|                   "type": "string",
 | |
|                   "description": "The query to search for. Can be a natural language sentence or keywords."
 | |
|                 }
 | |
|               },
 | |
|               "required": [
 | |
|                 "query"
 | |
|               ]
 | |
|             }
 | |
|           }
 | |
|         }
 | |
|       ]
 | |
|     },
 | |
|     "endpoint": "/v1/chat/completions",
 | |
|     "model": "gpt-4o"
 | |
|   },
 | |
|   "response": {
 | |
|     "body": [
 | |
|       {
 | |
|         "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
 | |
|         "__data__": {
 | |
|           "id": "rec-813ac454f8df",
 | |
|           "choices": [
 | |
|             {
 | |
|               "delta": {
 | |
|                 "content": "",
 | |
|                 "function_call": null,
 | |
|                 "refusal": null,
 | |
|                 "role": "assistant",
 | |
|                 "tool_calls": null
 | |
|               },
 | |
|               "finish_reason": null,
 | |
|               "index": 0,
 | |
|               "logprobs": null
 | |
|             }
 | |
|           ],
 | |
|           "created": 0,
 | |
|           "model": "gpt-4o-2024-08-06",
 | |
|           "object": "chat.completion.chunk",
 | |
|           "service_tier": "default",
 | |
|           "system_fingerprint": "fp_f64f290af2",
 | |
|           "usage": null,
 | |
|           "obfuscation": "kAlKUdypard"
 | |
|         }
 | |
|       },
 | |
|       {
 | |
|         "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
 | |
|         "__data__": {
 | |
|           "id": "rec-813ac454f8df",
 | |
|           "choices": [
 | |
|             {
 | |
|               "delta": {
 | |
|                 "content": "The Llama ",
 | |
|                 "function_call": null,
 | |
|                 "refusal": null,
 | |
|                 "role": null,
 | |
|                 "tool_calls": null
 | |
|               },
 | |
|               "finish_reason": null,
 | |
|               "index": 0,
 | |
|               "logprobs": null
 | |
|             }
 | |
|           ],
 | |
|           "created": 0,
 | |
|           "model": "gpt-4o-2024-08-06",
 | |
|           "object": "chat.completion.chunk",
 | |
|           "service_tier": "default",
 | |
|           "system_fingerprint": "fp_f64f290af2",
 | |
|           "usage": null,
 | |
|           "obfuscation": "U9LsCIESSE"
 | |
|         }
 | |
|       },
 | |
|       {
 | |
|         "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
 | |
|         "__data__": {
 | |
|           "id": "rec-813ac454f8df",
 | |
|           "choices": [
 | |
|             {
 | |
|               "delta": {
 | |
|                 "content": "4 Maverick model has ",
 | |
|                 "function_call": null,
 | |
|                 "refusal": null,
 | |
|                 "role": null,
 | |
|                 "tool_calls": null
 | |
|               },
 | |
|               "finish_reason": null,
 | |
|               "index": 0,
 | |
|               "logprobs": null
 | |
|             }
 | |
|           ],
 | |
|           "created": 0,
 | |
|           "model": "gpt-4o-2024-08-06",
 | |
|           "object": "chat.completion.chunk",
 | |
|           "service_tier": "default",
 | |
|           "system_fingerprint": "fp_f64f290af2",
 | |
|           "usage": null,
 | |
|           "obfuscation": "wwn6wHEfw0FY"
 | |
|         }
 | |
|       },
 | |
|       {
 | |
|         "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
 | |
|         "__data__": {
 | |
|           "id": "rec-813ac454f8df",
 | |
|           "choices": [
 | |
|             {
 | |
|               "delta": {
 | |
|                 "content": "128 experts in its mixture of experts architecture <|file-5217982280|>.",
 | |
|                 "function_call": null,
 | |
|                 "refusal": null,
 | |
|                 "role": null,
 | |
|                 "tool_calls": null
 | |
|               },
 | |
|               "finish_reason": null,
 | |
|               "index": 0,
 | |
|               "logprobs": null
 | |
|             }
 | |
|           ],
 | |
|           "created": 0,
 | |
|           "model": "gpt-4o-2024-08-06",
 | |
|           "object": "chat.completion.chunk",
 | |
|           "service_tier": "default",
 | |
|           "system_fingerprint": "fp_f64f290af2",
 | |
|           "usage": null,
 | |
|           "obfuscation": "k3KlnqkWxH"
 | |
|         }
 | |
|       },
 | |
|       {
 | |
|         "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
 | |
|         "__data__": {
 | |
|           "id": "rec-813ac454f8df",
 | |
|           "choices": [
 | |
|             {
 | |
|               "delta": {
 | |
|                 "content": null,
 | |
|                 "function_call": null,
 | |
|                 "refusal": null,
 | |
|                 "role": null,
 | |
|                 "tool_calls": null
 | |
|               },
 | |
|               "finish_reason": "stop",
 | |
|               "index": 0,
 | |
|               "logprobs": null
 | |
|             }
 | |
|           ],
 | |
|           "created": 0,
 | |
|           "model": "gpt-4o-2024-08-06",
 | |
|           "object": "chat.completion.chunk",
 | |
|           "service_tier": "default",
 | |
|           "system_fingerprint": "fp_f64f290af2",
 | |
|           "usage": null,
 | |
|           "obfuscation": "m4ogAxG"
 | |
|         }
 | |
|       }
 | |
|     ],
 | |
|     "is_streaming": true
 | |
|   },
 | |
|   "id_normalization_mapping": {}
 | |
| }
 |