mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-22 00:13:08 +00:00
# Problem The current inline provider appends the user provided instructions to messages as a system prompt, but the returned response object does not contain the instructions field (as specified in the OpenAI responses spec). # What does this PR do? This pull request adds the instruction field to the response object definition and updates the inline provider. It also ensures that instructions from previous response is not carried over to the next response (as specified in the openAI spec). Closes #[3566](https://github.com/llamastack/llama-stack/issues/3566) ## Test Plan - Tested manually for change in model response w.r.t supplied instructions field. - Added unit test to check that the instructions from previous response is not carried over to the next response. - Added integration tests to check instructions parameter in the returned response object. - Added new recordings for the integration tests. --------- Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
121 lines
3.5 KiB
JSON
Generated
121 lines
3.5 KiB
JSON
Generated
{
|
|
"test_id": "tests/integration/agents/test_agents.py::test_create_turn_response[ollama/llama3.2:3b-instruct-fp16-client_tools1]",
|
|
"request": {
|
|
"method": "POST",
|
|
"url": "http://0.0.0.0:11434/v1/v1/chat/completions",
|
|
"headers": {},
|
|
"body": {
|
|
"model": "llama3.2:3b-instruct-fp16",
|
|
"messages": [
|
|
{
|
|
"role": "system",
|
|
"content": "You are a helpful assistant"
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": "Call get_boiling_point_with_metadata tool and answer What is the boiling point of polyjuice?"
|
|
}
|
|
],
|
|
"max_tokens": 512,
|
|
"stream": true,
|
|
"temperature": 0.0001,
|
|
"tool_choice": "auto",
|
|
"tools": [
|
|
{
|
|
"type": "function",
|
|
"function": {
|
|
"name": "get_boiling_point_with_metadata",
|
|
"description": "Returns the boiling point of a liquid in Celcius or Fahrenheit",
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
"liquid_name": {
|
|
"type": "string",
|
|
"description": "The name of the liquid"
|
|
},
|
|
"celcius": {
|
|
"type": "boolean",
|
|
"description": "Whether to return the boiling point in Celcius"
|
|
}
|
|
},
|
|
"required": [
|
|
"liquid_name"
|
|
]
|
|
}
|
|
}
|
|
}
|
|
],
|
|
"top_p": 0.9
|
|
},
|
|
"endpoint": "/v1/chat/completions",
|
|
"model": "llama3.2:3b-instruct-fp16"
|
|
},
|
|
"response": {
|
|
"body": [
|
|
{
|
|
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
|
|
"__data__": {
|
|
"id": "rec-697a25dd7f0f",
|
|
"choices": [
|
|
{
|
|
"delta": {
|
|
"content": "",
|
|
"function_call": null,
|
|
"refusal": null,
|
|
"role": "assistant",
|
|
"tool_calls": [
|
|
{
|
|
"index": 0,
|
|
"id": "call_klhbln13",
|
|
"function": {
|
|
"arguments": "{\"celcius\":false,\"liquid_name\":\"polyjuice\"}",
|
|
"name": "get_boiling_point_with_metadata"
|
|
},
|
|
"type": "function"
|
|
}
|
|
]
|
|
},
|
|
"finish_reason": null,
|
|
"index": 0,
|
|
"logprobs": null
|
|
}
|
|
],
|
|
"created": 0,
|
|
"model": "llama3.2:3b-instruct-fp16",
|
|
"object": "chat.completion.chunk",
|
|
"service_tier": null,
|
|
"system_fingerprint": "fp_ollama",
|
|
"usage": null
|
|
}
|
|
},
|
|
{
|
|
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
|
|
"__data__": {
|
|
"id": "rec-697a25dd7f0f",
|
|
"choices": [
|
|
{
|
|
"delta": {
|
|
"content": "",
|
|
"function_call": null,
|
|
"refusal": null,
|
|
"role": "assistant",
|
|
"tool_calls": null
|
|
},
|
|
"finish_reason": "tool_calls",
|
|
"index": 0,
|
|
"logprobs": null
|
|
}
|
|
],
|
|
"created": 0,
|
|
"model": "llama3.2:3b-instruct-fp16",
|
|
"object": "chat.completion.chunk",
|
|
"service_tier": null,
|
|
"system_fingerprint": "fp_ollama",
|
|
"usage": null
|
|
}
|
|
}
|
|
],
|
|
"is_streaming": true
|
|
},
|
|
"id_normalization_mapping": {}
|
|
}
|