From d2b7c5aeae956abb29b5006dc041e6d08a938454 Mon Sep 17 00:00:00 2001 From: Kai Wu Date: Mon, 18 Nov 2024 18:55:23 -0800 Subject: [PATCH] add quantized model ollama support (#471) # What does this PR do? add more quantized model support for ollama. - [ ] Addresses issue (#issue) ## Test Plan Tested with ollama docker that run llama3.2 3b 4bit model. ``` root@docker-desktop:/# ollama ps NAME ID SIZE PROCESSOR UNTIL llama3.2:3b a80c4f17acd5 3.5 GB 100% CPU 3 minutes from now ``` ## Sources Please link relevant resources if necessary. ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Ran pre-commit to handle lint / formatting issues. - [ ] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests. --- .../remote/inference/ollama/ollama.py | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/llama_stack/providers/remote/inference/ollama/ollama.py b/llama_stack/providers/remote/inference/ollama/ollama.py index 27bf0088e..70a091b77 100644 --- a/llama_stack/providers/remote/inference/ollama/ollama.py +++ b/llama_stack/providers/remote/inference/ollama/ollama.py @@ -44,10 +44,18 @@ model_aliases = [ "llama3.1:8b-instruct-fp16", CoreModelId.llama3_1_8b_instruct.value, ), + build_model_alias( + "llama3.1:8b", + CoreModelId.llama3_1_8b_instruct.value, + ), build_model_alias( "llama3.1:70b-instruct-fp16", CoreModelId.llama3_1_70b_instruct.value, ), + build_model_alias( + "llama3.1:70b", + CoreModelId.llama3_1_70b_instruct.value, + ), build_model_alias( "llama3.2:1b-instruct-fp16", CoreModelId.llama3_2_1b_instruct.value, @@ -56,6 +64,14 @@ model_aliases = [ "llama3.2:3b-instruct-fp16", CoreModelId.llama3_2_3b_instruct.value, ), + build_model_alias( + "llama3.2:1b", + CoreModelId.llama3_2_1b_instruct.value, + ), + build_model_alias( + "llama3.2:3b", + CoreModelId.llama3_2_3b_instruct.value, + ), build_model_alias( "llama-guard3:8b", CoreModelId.llama_guard_3_8b.value, @@ -68,6 +84,10 @@ model_aliases = [ "x/llama3.2-vision:11b-instruct-fp16", CoreModelId.llama3_2_11b_vision_instruct.value, ), + build_model_alias( + "llama3.2-vision", + CoreModelId.llama3_2_11b_vision_instruct.value, + ), ]