From 934446ddb4cf8801e011083fd6ae8d4a65724831 Mon Sep 17 00:00:00 2001 From: Ben Browning Date: Tue, 29 Apr 2025 04:45:28 -0400 Subject: [PATCH] fix: ollama still using tools with `tool_choice="none"` (#2047) # What does this PR do? In our OpenAI API verification tests, ollama was still calling tools even when `tool_choice="none"` was passed in its chat completion requests. Because ollama isn't respecting `tool_choice` properly, this adjusts our provider implementation to remove the `tools` from the request if `tool_choice="none"` is passed in so that it does not attempt to call any of those tools. ## Test Plan I tested this with a couple of Llama models, using both our OpenAI completions integration tests and our verification test suites. ### OpenAI Completions / Chat Completions integration tests These all passed before, and still do. ``` INFERENCE_MODEL="llama3.2:3b-instruct-fp16" \ llama stack build --template ollama --image-type venv --run ``` ``` LLAMA_STACK_CONFIG=http://localhost:8321 \ python -m pytest -v \ tests/integration/inference/test_openai_completion.py \ --text-model "llama3.2:3b-instruct-fp16" ``` ### OpenAI API Verification test suite test_chat_*_tool_choice_none OpenAI API verification tests pass now, when they failed before. See https://github.com/bbrowning/llama-stack-tests/blob/main/openai-api-verification/2025-04-27.md#ollama-llama-stack for an example of these failures from a recent nightly CI run. ``` INFERENCE_MODEL="llama3.3:70b-instruct-q3_K_M" \ llama stack build --template ollama --image-type venv --run ``` ``` cat <<-EOF > tests/verifications/conf/ollama-llama-stack.yaml base_url: http://localhost:8321/v1/openai/v1 api_key_var: OPENAI_API_KEY models: - llama3.3:70b-instruct-q3_K_M model_display_names: llama3.3:70b-instruct-q3_K_M: Llama-3.3-70B-Instruct test_exclusions: llama3.3:70b-instruct-q3_K_M: - test_chat_non_streaming_image - test_chat_streaming_image - test_chat_multi_turn_multiple_images EOF ``` ``` python -m pytest -s -v \ 'tests/verifications/openai_api/test_chat_completion.py' \ --provider=ollama-llama-stack ``` Signed-off-by: Ben Browning --- llama_stack/providers/remote/inference/ollama/ollama.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/llama_stack/providers/remote/inference/ollama/ollama.py b/llama_stack/providers/remote/inference/ollama/ollama.py index cdfe7b568..e915b3098 100644 --- a/llama_stack/providers/remote/inference/ollama/ollama.py +++ b/llama_stack/providers/remote/inference/ollama/ollama.py @@ -433,6 +433,12 @@ class OllamaInferenceAdapter( user: Optional[str] = None, ) -> Union[OpenAIChatCompletion, AsyncIterator[OpenAIChatCompletionChunk]]: model_obj = await self._get_model(model) + + # ollama still makes tool calls even when tool_choice is "none" + # so we need to remove the tools in that case + if tool_choice == "none" and tools is not None: + tools = None + params = { k: v for k, v in {