mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-22 18:46:16 +00:00
needed to record for python 3.12 for tool runtime too somehow
This commit is contained in:
parent
858d3fa647
commit
7182952014
6 changed files with 701 additions and 21 deletions
Binary file not shown.
|
|
@ -14,7 +14,7 @@
|
||||||
"models": [
|
"models": [
|
||||||
{
|
{
|
||||||
"model": "nomic-embed-text:latest",
|
"model": "nomic-embed-text:latest",
|
||||||
"modified_at": "2025-07-29T16:26:10.815830-07:00",
|
"modified_at": "2025-07-29T16:46:26.304701-07:00",
|
||||||
"digest": "0a109f422b47e3a30ba2b10eca18548e944e8a23073ee3f3e947efcf3c45e59f",
|
"digest": "0a109f422b47e3a30ba2b10eca18548e944e8a23073ee3f3e947efcf3c45e59f",
|
||||||
"size": 274302450,
|
"size": 274302450,
|
||||||
"details": {
|
"details": {
|
||||||
|
|
|
||||||
347
tests/integration/recordings/responses/9c28ec9ac338.json
Normal file
347
tests/integration/recordings/responses/9c28ec9ac338.json
Normal file
|
|
@ -0,0 +1,347 @@
|
||||||
|
{
|
||||||
|
"request": {
|
||||||
|
"method": "POST",
|
||||||
|
"url": "http://localhost:11434/api/generate",
|
||||||
|
"headers": {},
|
||||||
|
"body": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"raw": true,
|
||||||
|
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"greet_everyone\",\n \"description\": \"\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"url\"],\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"description\": \"\"\n }\n }\n }\n },\n {\n \"name\": \"get_boiling_point\",\n \"description\": \"\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n \",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"liquid_name\", \"celsius\"],\n \"properties\": {\n \"liquid_name\": {\n \"type\": \"string\",\n \"description\": \"\"\n },\n \"celsius\": {\n \"type\": \"boolean\",\n \"description\": \"\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSay hi to the world. Use tools to do so.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n[greet_everyone(url=\"world\")]<|eot_id|><|start_header_id|>ipython<|end_header_id|>\n\nHello, world!<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nHow can I assist you further?<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the boiling point of polyjuice? Use tools to answer.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
|
||||||
|
"options": {
|
||||||
|
"temperature": 0.0
|
||||||
|
},
|
||||||
|
"stream": true
|
||||||
|
},
|
||||||
|
"endpoint": "/api/generate",
|
||||||
|
"model": "llama3.2:3b-instruct-fp16"
|
||||||
|
},
|
||||||
|
"response": {
|
||||||
|
"body": [
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.316207Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "[",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.358611Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "get",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.401272Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "_bo",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.444321Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "iling",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.48795Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "_point",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.530158Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "(",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.573318Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "liquid",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.616297Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "_name",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.659527Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "='",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.702422Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "poly",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.745894Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "ju",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.788811Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "ice",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.831618Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "',",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.874469Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": " c",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.917372Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "elsius",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.960558Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "=True",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:37.004223Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": ")]",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:37.046563Z",
|
||||||
|
"done": true,
|
||||||
|
"done_reason": "stop",
|
||||||
|
"total_duration": 845522667,
|
||||||
|
"load_duration": 47784875,
|
||||||
|
"prompt_eval_count": 511,
|
||||||
|
"prompt_eval_duration": 66135292,
|
||||||
|
"eval_count": 18,
|
||||||
|
"eval_duration": 730999291,
|
||||||
|
"response": "",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"is_streaming": true
|
||||||
|
}
|
||||||
|
}
|
||||||
167
tests/integration/recordings/responses/bd356b27a085.json
Normal file
167
tests/integration/recordings/responses/bd356b27a085.json
Normal file
|
|
@ -0,0 +1,167 @@
|
||||||
|
{
|
||||||
|
"request": {
|
||||||
|
"method": "POST",
|
||||||
|
"url": "http://localhost:11434/api/generate",
|
||||||
|
"headers": {},
|
||||||
|
"body": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"raw": true,
|
||||||
|
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"greet_everyone\",\n \"description\": \"\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"url\"],\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"description\": \"\"\n }\n }\n }\n },\n {\n \"name\": \"get_boiling_point\",\n \"description\": \"\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n \",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"liquid_name\", \"celsius\"],\n \"properties\": {\n \"liquid_name\": {\n \"type\": \"string\",\n \"description\": \"\"\n },\n \"celsius\": {\n \"type\": \"boolean\",\n \"description\": \"\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSay hi to the world. Use tools to do so.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n[greet_everyone(url=\"world\")]<|eot_id|><|start_header_id|>ipython<|end_header_id|>\n\nHello, world!<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
|
||||||
|
"options": {
|
||||||
|
"temperature": 0.0
|
||||||
|
},
|
||||||
|
"stream": true
|
||||||
|
},
|
||||||
|
"endpoint": "/api/generate",
|
||||||
|
"model": "llama3.2:3b-instruct-fp16"
|
||||||
|
},
|
||||||
|
"response": {
|
||||||
|
"body": [
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:35.850399Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "How",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:35.89419Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": " can",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:35.938049Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": " I",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:35.980392Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": " assist",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.023004Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": " you",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.065467Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": " further",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.108189Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "?",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:36.150902Z",
|
||||||
|
"done": true,
|
||||||
|
"done_reason": "stop",
|
||||||
|
"total_duration": 468910417,
|
||||||
|
"load_duration": 93969000,
|
||||||
|
"prompt_eval_count": 479,
|
||||||
|
"prompt_eval_duration": 72596750,
|
||||||
|
"eval_count": 8,
|
||||||
|
"eval_duration": 301590375,
|
||||||
|
"response": "",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"is_streaming": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -11,26 +11,7 @@
|
||||||
"body": {
|
"body": {
|
||||||
"__type__": "ollama._types.ProcessResponse",
|
"__type__": "ollama._types.ProcessResponse",
|
||||||
"__data__": {
|
"__data__": {
|
||||||
"models": [
|
"models": []
|
||||||
{
|
|
||||||
"model": "llama3.2:1b",
|
|
||||||
"name": "llama3.2:1b",
|
|
||||||
"digest": "baf6a787fdffd633537aa2eb51cfd54cb93ff08e28040095462bb63daf552878",
|
|
||||||
"expires_at": "2025-07-29T16:28:09.564177-07:00",
|
|
||||||
"size": 2770397184,
|
|
||||||
"size_vram": 2770397184,
|
|
||||||
"details": {
|
|
||||||
"parent_model": "",
|
|
||||||
"format": "gguf",
|
|
||||||
"family": "llama",
|
|
||||||
"families": [
|
|
||||||
"llama"
|
|
||||||
],
|
|
||||||
"parameter_size": "1.2B",
|
|
||||||
"quantization_level": "Q8_0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"is_streaming": false
|
"is_streaming": false
|
||||||
|
|
|
||||||
185
tests/integration/recordings/responses/ef757a75ed08.json
Normal file
185
tests/integration/recordings/responses/ef757a75ed08.json
Normal file
|
|
@ -0,0 +1,185 @@
|
||||||
|
{
|
||||||
|
"request": {
|
||||||
|
"method": "POST",
|
||||||
|
"url": "http://localhost:11434/api/generate",
|
||||||
|
"headers": {},
|
||||||
|
"body": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"raw": true,
|
||||||
|
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"greet_everyone\",\n \"description\": \"\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"url\"],\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"description\": \"\"\n }\n }\n }\n },\n {\n \"name\": \"get_boiling_point\",\n \"description\": \"\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n \",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"liquid_name\", \"celsius\"],\n \"properties\": {\n \"liquid_name\": {\n \"type\": \"string\",\n \"description\": \"\"\n },\n \"celsius\": {\n \"type\": \"boolean\",\n \"description\": \"\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSay hi to the world. Use tools to do so.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
|
||||||
|
"options": {
|
||||||
|
"temperature": 0.0
|
||||||
|
},
|
||||||
|
"stream": true
|
||||||
|
},
|
||||||
|
"endpoint": "/api/generate",
|
||||||
|
"model": "llama3.2:3b-instruct-fp16"
|
||||||
|
},
|
||||||
|
"response": {
|
||||||
|
"body": [
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:35.212563Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "[g",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:35.254896Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "reet",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:35.297152Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "_every",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:35.339477Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "one",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:35.382245Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "(url",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:35.423387Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "=\"",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:35.465286Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "world",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:35.507249Z",
|
||||||
|
"done": false,
|
||||||
|
"done_reason": null,
|
||||||
|
"total_duration": null,
|
||||||
|
"load_duration": null,
|
||||||
|
"prompt_eval_count": null,
|
||||||
|
"prompt_eval_duration": null,
|
||||||
|
"eval_count": null,
|
||||||
|
"eval_duration": null,
|
||||||
|
"response": "\")]",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__type__": "ollama._types.GenerateResponse",
|
||||||
|
"__data__": {
|
||||||
|
"model": "llama3.2:3b-instruct-fp16",
|
||||||
|
"created_at": "2025-07-29T23:46:35.549072Z",
|
||||||
|
"done": true,
|
||||||
|
"done_reason": "stop",
|
||||||
|
"total_duration": 5519843458,
|
||||||
|
"load_duration": 4110366375,
|
||||||
|
"prompt_eval_count": 456,
|
||||||
|
"prompt_eval_duration": 1070783708,
|
||||||
|
"eval_count": 9,
|
||||||
|
"eval_duration": 337120750,
|
||||||
|
"response": "",
|
||||||
|
"thinking": null,
|
||||||
|
"context": null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"is_streaming": true
|
||||||
|
}
|
||||||
|
}
|
||||||
Loading…
Add table
Add a link
Reference in a new issue