Updated the configuration templates to include the builtin preprocessors.

This commit is contained in:
ilya-kolchinsky 2025-03-07 16:08:14 +01:00
parent e895bb111c
commit 3f15349c9d
72 changed files with 632 additions and 25 deletions

View file

@ -7,6 +7,7 @@
from typing import List, Tuple
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessing.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import (
ModelInput,
Provider,
@ -91,6 +92,7 @@ def get_distribution_template() -> DistributionTemplate:
"inline::rag-runtime",
"remote::model-context-protocol",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
name = "dev"
@ -119,6 +121,16 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="code-interpreter",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
embedding_model = ModelInput(
model_id="all-MiniLM-L6-v2",
provider_id=embedding_provider.provider_id,
@ -145,6 +157,7 @@ def get_distribution_template() -> DistributionTemplate:
},
default_models=default_models + [embedding_model],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")],
),
},