mirror of
				https://github.com/meta-llama/llama-stack.git
				synced 2025-10-25 09:05:37 +00:00 
			
		
		
		
	Continue to build on top of https://github.com/meta-llama/llama-stack/pull/2941 ## Test Plan Run server with `LLAMA_STACK_TEST_INFERENCE_MODE=record` and then run the integration tests with `--stack-config=server:starter`. Then restart the server with `LLAMA_STACK_TEST_INFERENCE_MODE=replay` and re-run the tests. Verify that no request hit Ollama at any point.
		
			
				
	
	
		
			258 lines
		
	
	
	
		
			9.5 KiB
		
	
	
	
		
			JSON
		
	
	
	
	
	
			
		
		
	
	
			258 lines
		
	
	
	
		
			9.5 KiB
		
	
	
	
		
			JSON
		
	
	
	
	
	
| {
 | |
|   "request": {
 | |
|     "method": "POST",
 | |
|     "url": "http://localhost:11434/api/generate",
 | |
|     "headers": {},
 | |
|     "body": {
 | |
|       "model": "llama3.2:3b-instruct-fp16",
 | |
|       "raw": true,
 | |
|       "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n    {\n        \"name\": \"get_boiling_point\",\n        \"description\": \"Returns the boiling point of a liquid in Celcius or Fahrenheit.\",\n        \"parameters\": {\n            \"type\": \"dict\",\n            \"required\": [\"liquid_name\"],\n            \"properties\": {\n                \"liquid_name\": {\n                    \"type\": \"str\",\n                    \"description\": \"The name of the liquid\"\n                },\n                \"celcius\": {\n                    \"type\": \"bool\",\n                    \"description\": \"Whether to return the boiling point in Celcius\",\n                    \"default\": \"True\"\n                }\n            }\n        }\n    }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nCall get_boiling_point tool and answer What is the boiling point of polyjuice?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n[get_boiling_point(liquid_name=\"polyjuice\", celcius=True)]<|eot_id|><|start_header_id|>ipython<|end_header_id|>\n\n-100<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
 | |
|       "options": {
 | |
|         "temperature": 0.0001,
 | |
|         "top_p": 0.9
 | |
|       },
 | |
|       "stream": true
 | |
|     },
 | |
|     "endpoint": "/api/generate",
 | |
|     "model": "llama3.2:3b-instruct-fp16"
 | |
|   },
 | |
|   "response": {
 | |
|     "body": [
 | |
|       {
 | |
|         "__type__": "ollama._types.GenerateResponse",
 | |
|         "__data__": {
 | |
|           "model": "llama3.2:3b-instruct-fp16",
 | |
|           "created_at": "2025-07-29T20:04:29.329935Z",
 | |
|           "done": false,
 | |
|           "done_reason": null,
 | |
|           "total_duration": null,
 | |
|           "load_duration": null,
 | |
|           "prompt_eval_count": null,
 | |
|           "prompt_eval_duration": null,
 | |
|           "eval_count": null,
 | |
|           "eval_duration": null,
 | |
|           "response": "The",
 | |
|           "thinking": null,
 | |
|           "context": null
 | |
|         }
 | |
|       },
 | |
|       {
 | |
|         "__type__": "ollama._types.GenerateResponse",
 | |
|         "__data__": {
 | |
|           "model": "llama3.2:3b-instruct-fp16",
 | |
|           "created_at": "2025-07-29T20:04:29.37326Z",
 | |
|           "done": false,
 | |
|           "done_reason": null,
 | |
|           "total_duration": null,
 | |
|           "load_duration": null,
 | |
|           "prompt_eval_count": null,
 | |
|           "prompt_eval_duration": null,
 | |
|           "eval_count": null,
 | |
|           "eval_duration": null,
 | |
|           "response": " boiling",
 | |
|           "thinking": null,
 | |
|           "context": null
 | |
|         }
 | |
|       },
 | |
|       {
 | |
|         "__type__": "ollama._types.GenerateResponse",
 | |
|         "__data__": {
 | |
|           "model": "llama3.2:3b-instruct-fp16",
 | |
|           "created_at": "2025-07-29T20:04:29.415761Z",
 | |
|           "done": false,
 | |
|           "done_reason": null,
 | |
|           "total_duration": null,
 | |
|           "load_duration": null,
 | |
|           "prompt_eval_count": null,
 | |
|           "prompt_eval_duration": null,
 | |
|           "eval_count": null,
 | |
|           "eval_duration": null,
 | |
|           "response": " point",
 | |
|           "thinking": null,
 | |
|           "context": null
 | |
|         }
 | |
|       },
 | |
|       {
 | |
|         "__type__": "ollama._types.GenerateResponse",
 | |
|         "__data__": {
 | |
|           "model": "llama3.2:3b-instruct-fp16",
 | |
|           "created_at": "2025-07-29T20:04:29.458843Z",
 | |
|           "done": false,
 | |
|           "done_reason": null,
 | |
|           "total_duration": null,
 | |
|           "load_duration": null,
 | |
|           "prompt_eval_count": null,
 | |
|           "prompt_eval_duration": null,
 | |
|           "eval_count": null,
 | |
|           "eval_duration": null,
 | |
|           "response": " of",
 | |
|           "thinking": null,
 | |
|           "context": null
 | |
|         }
 | |
|       },
 | |
|       {
 | |
|         "__type__": "ollama._types.GenerateResponse",
 | |
|         "__data__": {
 | |
|           "model": "llama3.2:3b-instruct-fp16",
 | |
|           "created_at": "2025-07-29T20:04:29.501468Z",
 | |
|           "done": false,
 | |
|           "done_reason": null,
 | |
|           "total_duration": null,
 | |
|           "load_duration": null,
 | |
|           "prompt_eval_count": null,
 | |
|           "prompt_eval_duration": null,
 | |
|           "eval_count": null,
 | |
|           "eval_duration": null,
 | |
|           "response": " poly",
 | |
|           "thinking": null,
 | |
|           "context": null
 | |
|         }
 | |
|       },
 | |
|       {
 | |
|         "__type__": "ollama._types.GenerateResponse",
 | |
|         "__data__": {
 | |
|           "model": "llama3.2:3b-instruct-fp16",
 | |
|           "created_at": "2025-07-29T20:04:29.543451Z",
 | |
|           "done": false,
 | |
|           "done_reason": null,
 | |
|           "total_duration": null,
 | |
|           "load_duration": null,
 | |
|           "prompt_eval_count": null,
 | |
|           "prompt_eval_duration": null,
 | |
|           "eval_count": null,
 | |
|           "eval_duration": null,
 | |
|           "response": "ju",
 | |
|           "thinking": null,
 | |
|           "context": null
 | |
|         }
 | |
|       },
 | |
|       {
 | |
|         "__type__": "ollama._types.GenerateResponse",
 | |
|         "__data__": {
 | |
|           "model": "llama3.2:3b-instruct-fp16",
 | |
|           "created_at": "2025-07-29T20:04:29.586683Z",
 | |
|           "done": false,
 | |
|           "done_reason": null,
 | |
|           "total_duration": null,
 | |
|           "load_duration": null,
 | |
|           "prompt_eval_count": null,
 | |
|           "prompt_eval_duration": null,
 | |
|           "eval_count": null,
 | |
|           "eval_duration": null,
 | |
|           "response": "ice",
 | |
|           "thinking": null,
 | |
|           "context": null
 | |
|         }
 | |
|       },
 | |
|       {
 | |
|         "__type__": "ollama._types.GenerateResponse",
 | |
|         "__data__": {
 | |
|           "model": "llama3.2:3b-instruct-fp16",
 | |
|           "created_at": "2025-07-29T20:04:29.629666Z",
 | |
|           "done": false,
 | |
|           "done_reason": null,
 | |
|           "total_duration": null,
 | |
|           "load_duration": null,
 | |
|           "prompt_eval_count": null,
 | |
|           "prompt_eval_duration": null,
 | |
|           "eval_count": null,
 | |
|           "eval_duration": null,
 | |
|           "response": " is",
 | |
|           "thinking": null,
 | |
|           "context": null
 | |
|         }
 | |
|       },
 | |
|       {
 | |
|         "__type__": "ollama._types.GenerateResponse",
 | |
|         "__data__": {
 | |
|           "model": "llama3.2:3b-instruct-fp16",
 | |
|           "created_at": "2025-07-29T20:04:29.672199Z",
 | |
|           "done": false,
 | |
|           "done_reason": null,
 | |
|           "total_duration": null,
 | |
|           "load_duration": null,
 | |
|           "prompt_eval_count": null,
 | |
|           "prompt_eval_duration": null,
 | |
|           "eval_count": null,
 | |
|           "eval_duration": null,
 | |
|           "response": " -",
 | |
|           "thinking": null,
 | |
|           "context": null
 | |
|         }
 | |
|       },
 | |
|       {
 | |
|         "__type__": "ollama._types.GenerateResponse",
 | |
|         "__data__": {
 | |
|           "model": "llama3.2:3b-instruct-fp16",
 | |
|           "created_at": "2025-07-29T20:04:29.71471Z",
 | |
|           "done": false,
 | |
|           "done_reason": null,
 | |
|           "total_duration": null,
 | |
|           "load_duration": null,
 | |
|           "prompt_eval_count": null,
 | |
|           "prompt_eval_duration": null,
 | |
|           "eval_count": null,
 | |
|           "eval_duration": null,
 | |
|           "response": "100",
 | |
|           "thinking": null,
 | |
|           "context": null
 | |
|         }
 | |
|       },
 | |
|       {
 | |
|         "__type__": "ollama._types.GenerateResponse",
 | |
|         "__data__": {
 | |
|           "model": "llama3.2:3b-instruct-fp16",
 | |
|           "created_at": "2025-07-29T20:04:29.757321Z",
 | |
|           "done": false,
 | |
|           "done_reason": null,
 | |
|           "total_duration": null,
 | |
|           "load_duration": null,
 | |
|           "prompt_eval_count": null,
 | |
|           "prompt_eval_duration": null,
 | |
|           "eval_count": null,
 | |
|           "eval_duration": null,
 | |
|           "response": "\u00b0C",
 | |
|           "thinking": null,
 | |
|           "context": null
 | |
|         }
 | |
|       },
 | |
|       {
 | |
|         "__type__": "ollama._types.GenerateResponse",
 | |
|         "__data__": {
 | |
|           "model": "llama3.2:3b-instruct-fp16",
 | |
|           "created_at": "2025-07-29T20:04:29.801345Z",
 | |
|           "done": false,
 | |
|           "done_reason": null,
 | |
|           "total_duration": null,
 | |
|           "load_duration": null,
 | |
|           "prompt_eval_count": null,
 | |
|           "prompt_eval_duration": null,
 | |
|           "eval_count": null,
 | |
|           "eval_duration": null,
 | |
|           "response": ".",
 | |
|           "thinking": null,
 | |
|           "context": null
 | |
|         }
 | |
|       },
 | |
|       {
 | |
|         "__type__": "ollama._types.GenerateResponse",
 | |
|         "__data__": {
 | |
|           "model": "llama3.2:3b-instruct-fp16",
 | |
|           "created_at": "2025-07-29T20:04:29.844187Z",
 | |
|           "done": true,
 | |
|           "done_reason": "stop",
 | |
|           "total_duration": 691818542,
 | |
|           "load_duration": 102634584,
 | |
|           "prompt_eval_count": 402,
 | |
|           "prompt_eval_duration": 72389458,
 | |
|           "eval_count": 13,
 | |
|           "eval_duration": 516194167,
 | |
|           "response": "",
 | |
|           "thinking": null,
 | |
|           "context": null
 | |
|         }
 | |
|       }
 | |
|     ],
 | |
|     "is_streaming": true
 | |
|   }
 | |
| }
 |