Merge branch 'refs/heads/main' into preprocessors

# Conflicts:
#	llama_stack/distribution/routers/routers.py
#	llama_stack/templates/ollama/build.yaml
#	llama_stack/templates/ollama/run-with-safety.yaml
#	llama_stack/templates/ollama/run.yaml
#	llama_stack/templates/remote-vllm/build.yaml
#	llama_stack/templates/remote-vllm/run-with-safety.yaml
#	llama_stack/templates/remote-vllm/run.yaml
#	llama_stack/templates/together/build.yaml
#	llama_stack/templates/together/run-with-safety.yaml
#	llama_stack/templates/together/run.yaml
This commit is contained in:
ilya-kolchinsky 2025-03-07 16:20:30 +01:00
commit 6b9f673fdb
313 changed files with 181388 additions and 7064 deletions

View file

@ -38,6 +38,7 @@ def get_distribution_template() -> DistributionTemplate:
"inline::code-interpreter",
"inline::rag-runtime",
"remote::model-context-protocol",
"remote::wolfram-alpha",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
@ -57,7 +58,7 @@ def get_distribution_template() -> DistributionTemplate:
vector_io_provider = Provider(
provider_id="faiss",
provider_type="inline::faiss",
config=FaissVectorIOConfig.sample_run_config(f"distributions/{name}"),
config=FaissVectorIOConfig.sample_run_config(f"~/.llama/distributions/{name}"),
)
inference_model = ModelInput(
@ -89,6 +90,10 @@ def get_distribution_template() -> DistributionTemplate:
toolgroup_id="builtin::code_interpreter",
provider_id="code-interpreter",
),
ToolGroupInput(
toolgroup_id="builtin::wolfram_alpha",
provider_id="wolfram-alpha",
),
]
default_preprocessors = [
PreprocessorInput(