From c2bd31eb5c5ae95bb2641f9b09238906fd25cfff Mon Sep 17 00:00:00 2001 From: ilya-kolchinsky Date: Tue, 4 Mar 2025 21:23:10 +0100 Subject: [PATCH] Enabled the default preprocessors in the ollama configuration. --- llama_stack/templates/ollama/ollama.py | 13 +++++++++++++ llama_stack/templates/ollama/run-with-safety.yaml | 6 +++++- llama_stack/templates/ollama/run.yaml | 6 +++++- llama_stack/templates/template.py | 3 +++ 4 files changed, 26 insertions(+), 2 deletions(-) diff --git a/llama_stack/templates/ollama/ollama.py b/llama_stack/templates/ollama/ollama.py index bf9bc8e2d..708302eb9 100644 --- a/llama_stack/templates/ollama/ollama.py +++ b/llama_stack/templates/ollama/ollama.py @@ -7,6 +7,7 @@ from pathlib import Path from llama_stack.apis.models.models import ModelType +from llama_stack.apis.preprocessing.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import ( ModelInput, Provider, @@ -80,6 +81,16 @@ def get_distribution_template() -> DistributionTemplate: provider_id="code-interpreter", ), ] + default_preprocessors = [ + PreprocessorInput( + preprocessor_id="builtin::basic", + provider_id="basic", + ), + PreprocessorInput( + preprocessor_id="builtin::chunking", + provider_id="simple_chunking", + ), + ] return DistributionTemplate( name=name, @@ -96,6 +107,7 @@ def get_distribution_template() -> DistributionTemplate: }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, + default_preprocessors=default_preprocessors, ), "run-with-safety.yaml": RunConfigSettings( provider_overrides={ @@ -130,6 +142,7 @@ def get_distribution_template() -> DistributionTemplate: ), ], default_tool_groups=default_tool_groups, + default_preprocessors=default_preprocessors, ), }, run_config_env_vars={ diff --git a/llama_stack/templates/ollama/run-with-safety.yaml b/llama_stack/templates/ollama/run-with-safety.yaml index 32b635a04..e7f72e723 100644 --- a/llama_stack/templates/ollama/run-with-safety.yaml +++ b/llama_stack/templates/ollama/run-with-safety.yaml @@ -127,6 +127,10 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: [] +preprocessors: +- preprocessor_id: builtin::basic + provider_id: basic +- preprocessor_id: builtin::chunking + provider_id: simple_chunking server: port: 8321 diff --git a/llama_stack/templates/ollama/run.yaml b/llama_stack/templates/ollama/run.yaml index 2bb4df691..adb541114 100644 --- a/llama_stack/templates/ollama/run.yaml +++ b/llama_stack/templates/ollama/run.yaml @@ -116,6 +116,10 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: [] +preprocessors: +- preprocessor_id: builtin::basic + provider_id: basic +- preprocessor_id: builtin::chunking + provider_id: simple_chunking server: port: 8321 diff --git a/llama_stack/templates/template.py b/llama_stack/templates/template.py index 2afb84a63..4d6d85ade 100644 --- a/llama_stack/templates/template.py +++ b/llama_stack/templates/template.py @@ -12,6 +12,7 @@ import yaml from pydantic import BaseModel, Field from llama_stack.apis.models.models import ModelType +from llama_stack.apis.preprocessing.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import ( Api, BuildConfig, @@ -56,6 +57,7 @@ class RunConfigSettings(BaseModel): default_models: Optional[List[ModelInput]] = None default_shields: Optional[List[ShieldInput]] = None default_tool_groups: Optional[List[ToolGroupInput]] = None + default_preprocessors: Optional[List[PreprocessorInput]] = None def run_config( self, @@ -113,6 +115,7 @@ class RunConfigSettings(BaseModel): models=self.default_models or [], shields=self.default_shields or [], tool_groups=self.default_tool_groups or [], + preprocessors=self.default_preprocessors or [], )