Enabled the default preprocessors in the ollama configuration.

This commit is contained in:
ilya-kolchinsky 2025-03-04 21:23:10 +01:00
parent e16bdf138f
commit c2bd31eb5c
4 changed files with 26 additions and 2 deletions

View file

@ -7,6 +7,7 @@
from pathlib import Path
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessing.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import (
ModelInput,
Provider,
@ -80,6 +81,16 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="code-interpreter",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
return DistributionTemplate(
name=name,
@ -96,6 +107,7 @@ def get_distribution_template() -> DistributionTemplate:
},
default_models=[inference_model, embedding_model],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
"run-with-safety.yaml": RunConfigSettings(
provider_overrides={
@ -130,6 +142,7 @@ def get_distribution_template() -> DistributionTemplate:
),
],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={

View file

@ -127,6 +127,10 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors: []
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
server:
port: 8321

View file

@ -116,6 +116,10 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors: []
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
server:
port: 8321

View file

@ -12,6 +12,7 @@ import yaml
from pydantic import BaseModel, Field
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessing.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import (
Api,
BuildConfig,
@ -56,6 +57,7 @@ class RunConfigSettings(BaseModel):
default_models: Optional[List[ModelInput]] = None
default_shields: Optional[List[ShieldInput]] = None
default_tool_groups: Optional[List[ToolGroupInput]] = None
default_preprocessors: Optional[List[PreprocessorInput]] = None
def run_config(
self,
@ -113,6 +115,7 @@ class RunConfigSettings(BaseModel):
models=self.default_models or [],
shields=self.default_shields or [],
tool_groups=self.default_tool_groups or [],
preprocessors=self.default_preprocessors or [],
)