mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-03 19:57:35 +00:00
add missing mcp invocation recordings
This commit is contained in:
parent
0a0e02a0ac
commit
0e4279707e
3 changed files with 699 additions and 0 deletions
167
tests/integration/recordings/responses/168daab89068.json
Normal file
167
tests/integration/recordings/responses/168daab89068.json
Normal file
|
@ -0,0 +1,167 @@
|
|||
{
|
||||
"request": {
|
||||
"method": "POST",
|
||||
"url": "http://localhost:11434/api/generate",
|
||||
"headers": {},
|
||||
"body": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"raw": true,
|
||||
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"greet_everyone\",\n \"description\": \"\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"url\"],\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"description\": \"\"\n }\n }\n }\n },\n {\n \"name\": \"get_boiling_point\",\n \"description\": \"\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n \",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"liquid_name\"],\n \"properties\": {\n \"liquid_name\": {\n \"type\": \"string\",\n \"description\": \"\"\n },\n \"celsius\": {\n \"type\": \"boolean\",\n \"description\": \"\",\n \"default\": \"True\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSay hi to the world. Use tools to do so.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n[greet_everyone(url=\"world\")]<|eot_id|><|start_header_id|>ipython<|end_header_id|>\n\nHello, world!<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
|
||||
"options": {
|
||||
"temperature": 0.0
|
||||
},
|
||||
"stream": true
|
||||
},
|
||||
"endpoint": "/api/generate",
|
||||
"model": "llama3.2:3b-instruct-fp16"
|
||||
},
|
||||
"response": {
|
||||
"body": [
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:55.935115981Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "How",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:55.947461014Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": " can",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:55.957159731Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": " I",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:55.966717424Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": " assist",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:55.976404561Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": " you",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:55.986022942Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": " further",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:55.995649652Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "?",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:56.005198515Z",
|
||||
"done": true,
|
||||
"done_reason": "stop",
|
||||
"total_duration": 344765770,
|
||||
"load_duration": 219142539,
|
||||
"prompt_eval_count": 482,
|
||||
"prompt_eval_duration": 45594530,
|
||||
"eval_count": 8,
|
||||
"eval_duration": 79411415,
|
||||
"response": "",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"is_streaming": true
|
||||
}
|
||||
}
|
347
tests/integration/recordings/responses/949d3ad16367.json
Normal file
347
tests/integration/recordings/responses/949d3ad16367.json
Normal file
|
@ -0,0 +1,347 @@
|
|||
{
|
||||
"request": {
|
||||
"method": "POST",
|
||||
"url": "http://localhost:11434/api/generate",
|
||||
"headers": {},
|
||||
"body": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"raw": true,
|
||||
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"greet_everyone\",\n \"description\": \"\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"url\"],\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"description\": \"\"\n }\n }\n }\n },\n {\n \"name\": \"get_boiling_point\",\n \"description\": \"\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n \",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"liquid_name\"],\n \"properties\": {\n \"liquid_name\": {\n \"type\": \"string\",\n \"description\": \"\"\n },\n \"celsius\": {\n \"type\": \"boolean\",\n \"description\": \"\",\n \"default\": \"True\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSay hi to the world. Use tools to do so.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n[greet_everyone(url=\"world\")]<|eot_id|><|start_header_id|>ipython<|end_header_id|>\n\nHello, world!<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nHow can I assist you further?<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the boiling point of polyjuice? Use tools to answer.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
|
||||
"options": {
|
||||
"temperature": 0.0
|
||||
},
|
||||
"stream": true
|
||||
},
|
||||
"endpoint": "/api/generate",
|
||||
"model": "llama3.2:3b-instruct-fp16"
|
||||
},
|
||||
"response": {
|
||||
"body": [
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:56.904643956Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "[",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:56.916859233Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "get",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:56.926504022Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "_bo",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:56.936030206Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "iling",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:56.945883797Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "_point",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:56.955119103Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "(",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:56.964695005Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "liquid",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:56.974424001Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "_name",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:56.983890847Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "='",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:56.993651852Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "poly",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:57.003124728Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "ju",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:57.012819745Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "ice",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:57.022286241Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "',",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:57.032023797Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": " c",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:57.041504452Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "elsius",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:57.051211229Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "=True",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:57.060668146Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": ")]",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:57.070269267Z",
|
||||
"done": true,
|
||||
"done_reason": "stop",
|
||||
"total_duration": 391924499,
|
||||
"load_duration": 194743103,
|
||||
"prompt_eval_count": 514,
|
||||
"prompt_eval_duration": 23043860,
|
||||
"eval_count": 18,
|
||||
"eval_duration": 173446083,
|
||||
"response": "",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"is_streaming": true
|
||||
}
|
||||
}
|
185
tests/integration/recordings/responses/e61266e87842.json
Normal file
185
tests/integration/recordings/responses/e61266e87842.json
Normal file
|
@ -0,0 +1,185 @@
|
|||
{
|
||||
"request": {
|
||||
"method": "POST",
|
||||
"url": "http://localhost:11434/api/generate",
|
||||
"headers": {},
|
||||
"body": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"raw": true,
|
||||
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"greet_everyone\",\n \"description\": \"\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"url\"],\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"description\": \"\"\n }\n }\n }\n },\n {\n \"name\": \"get_boiling_point\",\n \"description\": \"\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n \",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"liquid_name\"],\n \"properties\": {\n \"liquid_name\": {\n \"type\": \"string\",\n \"description\": \"\"\n },\n \"celsius\": {\n \"type\": \"boolean\",\n \"description\": \"\",\n \"default\": \"True\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSay hi to the world. Use tools to do so.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
|
||||
"options": {
|
||||
"temperature": 0.0
|
||||
},
|
||||
"stream": true
|
||||
},
|
||||
"endpoint": "/api/generate",
|
||||
"model": "llama3.2:3b-instruct-fp16"
|
||||
},
|
||||
"response": {
|
||||
"body": [
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:54.934380579Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "[g",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:54.94754697Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "reet",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:54.957261736Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "_every",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:54.966955653Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "one",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:54.976633061Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "(url",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:54.986273131Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "=\"",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:54.995937469Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "world",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:55.00554591Z",
|
||||
"done": false,
|
||||
"done_reason": null,
|
||||
"total_duration": null,
|
||||
"load_duration": null,
|
||||
"prompt_eval_count": null,
|
||||
"prompt_eval_duration": null,
|
||||
"eval_count": null,
|
||||
"eval_duration": null,
|
||||
"response": "\")]",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"__type__": "ollama._types.GenerateResponse",
|
||||
"__data__": {
|
||||
"model": "llama3.2:3b-instruct-fp16",
|
||||
"created_at": "2025-10-01T13:50:55.01518389Z",
|
||||
"done": true,
|
||||
"done_reason": "stop",
|
||||
"total_duration": 3251109731,
|
||||
"load_duration": 3014241988,
|
||||
"prompt_eval_count": 459,
|
||||
"prompt_eval_duration": 152280811,
|
||||
"eval_count": 9,
|
||||
"eval_duration": 84036772,
|
||||
"response": "",
|
||||
"thinking": null,
|
||||
"context": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"is_streaming": true
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue