diff --git a/llama_stack/providers/remote/inference/ollama/ollama.py b/llama_stack/providers/remote/inference/ollama/ollama.py index 1c5d26a84..f53ed4e14 100644 --- a/llama_stack/providers/remote/inference/ollama/ollama.py +++ b/llama_stack/providers/remote/inference/ollama/ollama.py @@ -73,14 +73,6 @@ model_aliases = [ "llama3.2:3b", CoreModelId.llama3_2_3b_instruct.value, ), - build_model_alias_with_just_provider_model_id( - "llama-guard3:8b", - CoreModelId.llama_guard_3_8b.value, - ), - build_model_alias_with_just_provider_model_id( - "llama-guard3:1b", - CoreModelId.llama_guard_3_1b.value, - ), build_model_alias( "llama3.2-vision:11b-instruct-fp16", CoreModelId.llama3_2_11b_vision_instruct.value, @@ -89,6 +81,16 @@ model_aliases = [ "llama3.2-vision", CoreModelId.llama3_2_11b_vision_instruct.value, ), + # The Llama Guard models don't have their full fp16 versions + # so we are going to alias their default version to the canonical SKU + build_model_alias( + "llama-guard3:8b", + CoreModelId.llama_guard_3_8b.value, + ), + build_model_alias( + "llama-guard3:1b", + CoreModelId.llama_guard_3_1b.value, + ), ]