Updated the configuration templates to include the builtin preprocessors.

This commit is contained in:
ilya-kolchinsky 2025-03-07 16:08:14 +01:00
parent e895bb111c
commit 3f15349c9d
72 changed files with 632 additions and 25 deletions

View file

@ -6,6 +6,7 @@
from pathlib import Path
from llama_stack.apis.preprocessing.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import Provider, ToolGroupInput
from llama_stack.providers.remote.inference.nvidia import NVIDIAConfig
from llama_stack.providers.remote.inference.nvidia.models import MODEL_ENTRIES
@ -29,6 +30,7 @@ def get_distribution_template() -> DistributionTemplate:
"inline::rag-runtime",
"remote::model-context-protocol",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
inference_provider = Provider(
@ -54,6 +56,16 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="code-interpreter",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
default_models = get_model_registry(available_models)
return DistributionTemplate(
@ -71,6 +83,7 @@ def get_distribution_template() -> DistributionTemplate:
},
default_models=default_models,
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={