Restrict the changes to the new preprocessing API only.

This commit is contained in:
ilya-kolchinsky 2025-04-03 12:19:08 +02:00
parent 2008cd7921
commit 863f87aa15
90 changed files with 104 additions and 1138 deletions

View file

@ -7,7 +7,6 @@
from pathlib import Path
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import (
ModelInput,
Provider,
@ -45,7 +44,6 @@ def get_distribution_template() -> DistributionTemplate:
"inline::rag-runtime",
"remote::model-context-protocol",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
name = "fireworks"
@ -97,16 +95,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="code-interpreter",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
return DistributionTemplate(
name=name,
@ -125,7 +113,6 @@ def get_distribution_template() -> DistributionTemplate:
default_models=default_models + [embedding_model],
default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
"run-with-safety.yaml": RunConfigSettings(
provider_overrides={
@ -171,7 +158,6 @@ def get_distribution_template() -> DistributionTemplate:
),
],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={