From 863f87aa1574f9c7ab3741c7fbe0e2a4f2954ef5 Mon Sep 17 00:00:00 2001 From: ilya-kolchinsky Date: Thu, 3 Apr 2025 12:19:08 +0200 Subject: [PATCH] Restrict the changes to the new preprocessing API only. --- docs/_static/llama-stack-spec.html | 82 +++++----- docs/_static/llama-stack-spec.yaml | 42 +++-- .../remote_hosted_distro/nvidia.md | 1 - .../self_hosted_distro/bedrock.md | 1 - .../self_hosted_distro/cerebras.md | 1 - .../self_hosted_distro/fireworks.md | 1 - .../distributions/self_hosted_distro/groq.md | 1 - .../self_hosted_distro/meta-reference-gpu.md | 1 - .../meta-reference-quantized-gpu.md | 1 - .../self_hosted_distro/ollama.md | 1 - .../self_hosted_distro/remote-vllm.md | 1 - .../self_hosted_distro/sambanova.md | 1 - .../distributions/self_hosted_distro/tgi.md | 1 - .../self_hosted_distro/together.md | 1 - llama_stack/apis/tools/rag_tool.py | 2 - llama_stack/distribution/routers/routers.py | 3 +- .../inline/preprocessing/basic/__init__.py | 18 --- .../inline/preprocessing/basic/basic.py | 151 ------------------ .../inline/preprocessing/basic/config.py | 9 -- .../preprocessing/simple_chunking/__init__.py | 18 --- .../preprocessing/simple_chunking/config.py | 11 -- .../simple_chunking/simple_chunking.py | 116 -------------- .../inline/tool_runtime/rag/__init__.py | 2 +- .../inline/tool_runtime/rag/memory.py | 62 ++----- .../providers/registry/preprocessing.py | 21 +-- .../providers/registry/tool_runtime.py | 2 +- llama_stack/templates/bedrock/bedrock.py | 14 -- llama_stack/templates/bedrock/build.yaml | 3 - llama_stack/templates/bedrock/run.yaml | 14 +- llama_stack/templates/cerebras/build.yaml | 3 - llama_stack/templates/cerebras/cerebras.py | 13 -- llama_stack/templates/cerebras/run.yaml | 14 +- llama_stack/templates/ci-tests/build.yaml | 3 - llama_stack/templates/ci-tests/ci_tests.py | 13 -- llama_stack/templates/ci-tests/run.yaml | 14 +- llama_stack/templates/dell/build.yaml | 3 - llama_stack/templates/dell/dell.py | 14 -- .../templates/dell/run-with-safety.yaml | 14 +- llama_stack/templates/dell/run.yaml | 14 +- llama_stack/templates/dev/build.yaml | 3 - llama_stack/templates/dev/dev.py | 13 -- llama_stack/templates/dev/run.yaml | 14 +- llama_stack/templates/fireworks/build.yaml | 3 - llama_stack/templates/fireworks/fireworks.py | 14 -- .../templates/fireworks/run-with-safety.yaml | 14 +- llama_stack/templates/fireworks/run.yaml | 14 +- llama_stack/templates/groq/build.yaml | 3 - llama_stack/templates/groq/groq.py | 13 -- llama_stack/templates/groq/run.yaml | 14 +- llama_stack/templates/hf-endpoint/build.yaml | 3 - .../templates/hf-endpoint/hf_endpoint.py | 14 -- .../hf-endpoint/run-with-safety.yaml | 14 +- llama_stack/templates/hf-endpoint/run.yaml | 14 +- .../templates/hf-serverless/build.yaml | 3 - .../templates/hf-serverless/hf_serverless.py | 14 -- .../hf-serverless/run-with-safety.yaml | 14 +- llama_stack/templates/hf-serverless/run.yaml | 14 +- .../templates/meta-reference-gpu/build.yaml | 3 - .../meta-reference-gpu/meta_reference.py | 14 -- .../meta-reference-gpu/run-with-safety.yaml | 14 +- .../templates/meta-reference-gpu/run.yaml | 14 +- .../meta-reference-quantized-gpu/build.yaml | 3 - .../meta_reference.py | 13 -- .../meta-reference-quantized-gpu/run.yaml | 14 +- llama_stack/templates/nvidia/build.yaml | 3 - llama_stack/templates/nvidia/nvidia.py | 14 -- .../templates/nvidia/run-with-safety.yaml | 14 +- llama_stack/templates/nvidia/run.yaml | 14 +- llama_stack/templates/ollama/build.yaml | 3 - llama_stack/templates/ollama/ollama.py | 14 -- .../templates/ollama/run-with-safety.yaml | 14 +- llama_stack/templates/ollama/run.yaml | 14 +- llama_stack/templates/remote-vllm/build.yaml | 3 - .../remote-vllm/run-with-safety.yaml | 14 +- llama_stack/templates/remote-vllm/run.yaml | 14 +- llama_stack/templates/remote-vllm/vllm.py | 14 -- llama_stack/templates/sambanova/build.yaml | 3 - llama_stack/templates/sambanova/run.yaml | 14 +- llama_stack/templates/sambanova/sambanova.py | 13 -- llama_stack/templates/tgi/build.yaml | 3 - .../templates/tgi/run-with-safety.yaml | 14 +- llama_stack/templates/tgi/run.yaml | 14 +- llama_stack/templates/tgi/tgi.py | 14 -- llama_stack/templates/together/build.yaml | 3 - .../templates/together/run-with-safety.yaml | 14 +- llama_stack/templates/together/run.yaml | 14 +- llama_stack/templates/together/together.py | 14 -- llama_stack/templates/vllm-gpu/build.yaml | 3 - llama_stack/templates/vllm-gpu/run.yaml | 14 +- llama_stack/templates/vllm-gpu/vllm.py | 13 -- 90 files changed, 104 insertions(+), 1138 deletions(-) delete mode 100644 llama_stack/providers/inline/preprocessing/basic/__init__.py delete mode 100644 llama_stack/providers/inline/preprocessing/basic/basic.py delete mode 100644 llama_stack/providers/inline/preprocessing/basic/config.py delete mode 100644 llama_stack/providers/inline/preprocessing/simple_chunking/__init__.py delete mode 100644 llama_stack/providers/inline/preprocessing/simple_chunking/config.py delete mode 100644 llama_stack/providers/inline/preprocessing/simple_chunking/simple_chunking.py diff --git a/docs/_static/llama-stack-spec.html b/docs/_static/llama-stack-spec.html index d1e6291f0..5179049a8 100644 --- a/docs/_static/llama-stack-spec.html +++ b/docs/_static/llama-stack-spec.html @@ -8052,44 +8052,6 @@ ], "title": "HealthInfo" }, - "PreprocessorChainElement": { - "type": "object", - "properties": { - "preprocessor_id": { - "type": "string" - }, - "options": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - } - }, - "additionalProperties": false, - "required": [ - "preprocessor_id" - ], - "title": "PreprocessorChainElement" - }, "RAGDocument": { "type": "object", "properties": { @@ -8171,12 +8133,6 @@ }, "chunk_size_in_tokens": { "type": "integer" - }, - "preprocessor_chain": { - "type": "array", - "items": { - "$ref": "#/components/schemas/PreprocessorChainElement" - } } }, "additionalProperties": false, @@ -9382,6 +9338,44 @@ ], "title": "PreprocessingDataElement" }, + "PreprocessorChainElement": { + "type": "object", + "properties": { + "preprocessor_id": { + "type": "string" + }, + "options": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } + } + }, + "additionalProperties": false, + "required": [ + "preprocessor_id" + ], + "title": "PreprocessorChainElement" + }, "PreprocessRequest": { "type": "object", "properties": { diff --git a/docs/_static/llama-stack-spec.yaml b/docs/_static/llama-stack-spec.yaml index 7eac70b13..9d04aada4 100644 --- a/docs/_static/llama-stack-spec.yaml +++ b/docs/_static/llama-stack-spec.yaml @@ -5572,25 +5572,6 @@ components: required: - status title: HealthInfo - PreprocessorChainElement: - type: object - properties: - preprocessor_id: - type: string - options: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - additionalProperties: false - required: - - preprocessor_id - title: PreprocessorChainElement RAGDocument: type: object properties: @@ -5639,10 +5620,6 @@ components: type: string chunk_size_in_tokens: type: integer - preprocessor_chain: - type: array - items: - $ref: '#/components/schemas/PreprocessorChainElement' additionalProperties: false required: - documents @@ -6414,6 +6391,25 @@ components: required: - data_element_id title: PreprocessingDataElement + PreprocessorChainElement: + type: object + properties: + preprocessor_id: + type: string + options: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + additionalProperties: false + required: + - preprocessor_id + title: PreprocessorChainElement PreprocessRequest: type: object properties: diff --git a/docs/source/distributions/remote_hosted_distro/nvidia.md b/docs/source/distributions/remote_hosted_distro/nvidia.md index a6320709f..58731392d 100644 --- a/docs/source/distributions/remote_hosted_distro/nvidia.md +++ b/docs/source/distributions/remote_hosted_distro/nvidia.md @@ -10,7 +10,6 @@ The `llamastack/distribution-nvidia` distribution consists of the following prov | eval | `inline::meta-reference` | | inference | `remote::nvidia` | | post_training | `remote::nvidia` | -| preprocessing | `inline::basic`, `inline::simple_chunking` | | safety | `remote::nvidia` | | scoring | `inline::basic` | | telemetry | `inline::meta-reference` | diff --git a/docs/source/distributions/self_hosted_distro/bedrock.md b/docs/source/distributions/self_hosted_distro/bedrock.md index 4552381c7..302d6932b 100644 --- a/docs/source/distributions/self_hosted_distro/bedrock.md +++ b/docs/source/distributions/self_hosted_distro/bedrock.md @@ -16,7 +16,6 @@ The `llamastack/distribution-bedrock` distribution consists of the following pro | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::bedrock` | -| preprocessing | `inline::basic`, `inline::simple_chunking` | | safety | `remote::bedrock` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | diff --git a/docs/source/distributions/self_hosted_distro/cerebras.md b/docs/source/distributions/self_hosted_distro/cerebras.md index 018af4d4a..8f441823a 100644 --- a/docs/source/distributions/self_hosted_distro/cerebras.md +++ b/docs/source/distributions/self_hosted_distro/cerebras.md @@ -9,7 +9,6 @@ The `llamastack/distribution-cerebras` distribution consists of the following pr | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::cerebras`, `inline::sentence-transformers` | -| preprocessing | `inline::basic`, `inline::simple_chunking` | | safety | `inline::llama-guard` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | diff --git a/docs/source/distributions/self_hosted_distro/fireworks.md b/docs/source/distributions/self_hosted_distro/fireworks.md index 06ab3c700..ee4bf0b25 100644 --- a/docs/source/distributions/self_hosted_distro/fireworks.md +++ b/docs/source/distributions/self_hosted_distro/fireworks.md @@ -19,7 +19,6 @@ The `llamastack/distribution-fireworks` distribution consists of the following p | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::fireworks`, `inline::sentence-transformers` | -| preprocessing | `inline::basic`, `inline::simple_chunking` | | safety | `inline::llama-guard` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | diff --git a/docs/source/distributions/self_hosted_distro/groq.md b/docs/source/distributions/self_hosted_distro/groq.md index f775c02e5..fe922f23d 100644 --- a/docs/source/distributions/self_hosted_distro/groq.md +++ b/docs/source/distributions/self_hosted_distro/groq.md @@ -19,7 +19,6 @@ The `llamastack/distribution-groq` distribution consists of the following provid | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::groq` | -| preprocessing | `inline::basic`, `inline::simple_chunking` | | safety | `inline::llama-guard` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | diff --git a/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md b/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md index 65cdeba6f..b90f75347 100644 --- a/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md +++ b/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md @@ -19,7 +19,6 @@ The `llamastack/distribution-meta-reference-gpu` distribution consists of the fo | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `inline::meta-reference` | -| preprocessing | `inline::basic`, `inline::simple_chunking` | | safety | `inline::llama-guard` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | diff --git a/docs/source/distributions/self_hosted_distro/meta-reference-quantized-gpu.md b/docs/source/distributions/self_hosted_distro/meta-reference-quantized-gpu.md index 56d9382a4..c3e2b4f2c 100644 --- a/docs/source/distributions/self_hosted_distro/meta-reference-quantized-gpu.md +++ b/docs/source/distributions/self_hosted_distro/meta-reference-quantized-gpu.md @@ -19,7 +19,6 @@ The `llamastack/distribution-meta-reference-quantized-gpu` distribution consists | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `inline::meta-reference-quantized` | -| preprocessing | `inline::basic`, `inline::simple_chunking` | | safety | `inline::llama-guard` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | diff --git a/docs/source/distributions/self_hosted_distro/ollama.md b/docs/source/distributions/self_hosted_distro/ollama.md index 68bbc4a19..2358a52a7 100644 --- a/docs/source/distributions/self_hosted_distro/ollama.md +++ b/docs/source/distributions/self_hosted_distro/ollama.md @@ -19,7 +19,6 @@ The `llamastack/distribution-ollama` distribution consists of the following prov | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::ollama` | -| preprocessing | `inline::basic`, `inline::simple_chunking` | | safety | `inline::llama-guard` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | diff --git a/docs/source/distributions/self_hosted_distro/remote-vllm.md b/docs/source/distributions/self_hosted_distro/remote-vllm.md index eea897985..b6e8a8ad4 100644 --- a/docs/source/distributions/self_hosted_distro/remote-vllm.md +++ b/docs/source/distributions/self_hosted_distro/remote-vllm.md @@ -18,7 +18,6 @@ The `llamastack/distribution-remote-vllm` distribution consists of the following | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::vllm`, `inline::sentence-transformers` | -| preprocessing | `inline::basic`, `inline::simple_chunking` | | safety | `inline::llama-guard` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | diff --git a/docs/source/distributions/self_hosted_distro/sambanova.md b/docs/source/distributions/self_hosted_distro/sambanova.md index 3bd63d427..1d2e0d9df 100644 --- a/docs/source/distributions/self_hosted_distro/sambanova.md +++ b/docs/source/distributions/self_hosted_distro/sambanova.md @@ -17,7 +17,6 @@ The `llamastack/distribution-sambanova` distribution consists of the following p |-----|-------------| | agents | `inline::meta-reference` | | inference | `remote::sambanova` | -| preprocessing | `inline::basic`, `inline::simple_chunking` | | safety | `inline::llama-guard` | | telemetry | `inline::meta-reference` | | tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime` | diff --git a/docs/source/distributions/self_hosted_distro/tgi.md b/docs/source/distributions/self_hosted_distro/tgi.md index 6100a412c..f6b14b064 100644 --- a/docs/source/distributions/self_hosted_distro/tgi.md +++ b/docs/source/distributions/self_hosted_distro/tgi.md @@ -20,7 +20,6 @@ The `llamastack/distribution-tgi` distribution consists of the following provide | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::tgi`, `inline::sentence-transformers` | -| preprocessing | `inline::basic`, `inline::simple_chunking` | | safety | `inline::llama-guard` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | diff --git a/docs/source/distributions/self_hosted_distro/together.md b/docs/source/distributions/self_hosted_distro/together.md index d40c87e9e..b07e85a1c 100644 --- a/docs/source/distributions/self_hosted_distro/together.md +++ b/docs/source/distributions/self_hosted_distro/together.md @@ -19,7 +19,6 @@ The `llamastack/distribution-together` distribution consists of the following pr | datasetio | `remote::huggingface`, `inline::localfs` | | eval | `inline::meta-reference` | | inference | `remote::together`, `inline::sentence-transformers` | -| preprocessing | `inline::basic`, `inline::simple_chunking` | | safety | `inline::llama-guard` | | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | | telemetry | `inline::meta-reference` | diff --git a/llama_stack/apis/tools/rag_tool.py b/llama_stack/apis/tools/rag_tool.py index 6ac49f22d..73b36e050 100644 --- a/llama_stack/apis/tools/rag_tool.py +++ b/llama_stack/apis/tools/rag_tool.py @@ -11,7 +11,6 @@ from pydantic import BaseModel, Field from typing_extensions import Annotated, Protocol, runtime_checkable from llama_stack.apis.common.content_types import URL, InterleavedContent -from llama_stack.apis.preprocessing import PreprocessorChain from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol from llama_stack.schema_utils import json_schema_type, register_schema, webmethod @@ -87,7 +86,6 @@ class RAGToolRuntime(Protocol): documents: List[RAGDocument], vector_db_id: str, chunk_size_in_tokens: int = 512, - preprocessor_chain: Optional[PreprocessorChain] = None, ) -> None: """Index documents so they can be used by the RAG system""" ... diff --git a/llama_stack/distribution/routers/routers.py b/llama_stack/distribution/routers/routers.py index 4bdd9d762..b37219e8f 100644 --- a/llama_stack/distribution/routers/routers.py +++ b/llama_stack/distribution/routers/routers.py @@ -677,13 +677,12 @@ class ToolRuntimeRouter(ToolRuntime): documents: List[RAGDocument], vector_db_id: str, chunk_size_in_tokens: int = 512, - preprocessor_chain: Optional[PreprocessorChain] = None, ) -> None: logger.debug( f"ToolRuntimeRouter.RagToolImpl.insert: {vector_db_id}, {len(documents)} documents, chunk_size={chunk_size_in_tokens}" ) return await self.routing_table.get_provider_impl("insert_into_memory").insert( - documents, vector_db_id, chunk_size_in_tokens, preprocessor_chain + documents, vector_db_id, chunk_size_in_tokens ) def __init__( diff --git a/llama_stack/providers/inline/preprocessing/basic/__init__.py b/llama_stack/providers/inline/preprocessing/basic/__init__.py deleted file mode 100644 index 5c156090e..000000000 --- a/llama_stack/providers/inline/preprocessing/basic/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -from .config import InlineBasicPreprocessorConfig - - -async def get_provider_impl( - config: InlineBasicPreprocessorConfig, - _deps, -): - from .basic import InclineBasicPreprocessorImpl - - impl = InclineBasicPreprocessorImpl(config) - await impl.initialize() - return impl diff --git a/llama_stack/providers/inline/preprocessing/basic/basic.py b/llama_stack/providers/inline/preprocessing/basic/basic.py deleted file mode 100644 index 0bac3e93a..000000000 --- a/llama_stack/providers/inline/preprocessing/basic/basic.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. -import logging -import re -from typing import Any, List, Optional - -import httpx - -from llama_stack.apis.common.content_types import URL -from llama_stack.apis.preprocessing import ( - Preprocessing, - PreprocessingDataElement, - PreprocessingDataFormat, - PreprocessingDataType, - Preprocessor, - PreprocessorChain, - PreprocessorOptions, - PreprocessorResponse, -) -from llama_stack.providers.datatypes import PreprocessorsProtocolPrivate -from llama_stack.providers.inline.preprocessing.basic.config import InlineBasicPreprocessorConfig -from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str -from llama_stack.providers.utils.memory.vector_store import content_from_data, parse_pdf - -log = logging.getLogger(__name__) - - -class InclineBasicPreprocessorImpl(Preprocessing, PreprocessorsProtocolPrivate): - # this preprocessor can either receive documents (text or binary) or document URIs - input_types = [ - PreprocessingDataType.binary_document, - PreprocessingDataType.raw_text_document, - PreprocessingDataType.document_uri, - ] - - # this preprocessor optionally retrieves the documents and converts them into plain text - output_types = [PreprocessingDataType.raw_text_document] - - preprocessor_store = None - - URL_VALIDATION_PATTERN = re.compile("^(https?://|file://|data:)") - - def __init__(self, config: InlineBasicPreprocessorConfig) -> None: - self.config = config - - async def initialize(self) -> None: - pass - - async def shutdown(self) -> None: - pass - - async def register_preprocessor(self, preprocessor: Preprocessor) -> None: - pass - - async def unregister_preprocessor(self, preprocessor_id: str) -> None: - pass - - async def do_preprocess( - self, - preprocessor_id: str, - preprocessor_inputs: List[PreprocessingDataElement], - options: Optional[PreprocessorOptions] = None, - ) -> PreprocessorResponse: - results = [] - - for inp in preprocessor_inputs: - input_type = self._resolve_input_type(inp) - - if input_type == PreprocessingDataType.document_uri: - document = await self._fetch_document(inp) - if document is None: - continue - elif input_type == PreprocessingDataType.binary_document: - document = inp.data_element_path_or_content - if inp.data_element_format is None: - log.error(f"Binary document format is not provided for {inp.data_element_id}, skipping it") - continue - if inp.data_element_format != PreprocessingDataFormat.pdf: - log.error( - f"Unsupported binary document type {inp.data_element_format} for {inp.data_element_id}, skipping it" - ) - continue - elif input_type == PreprocessingDataType.raw_text_document: - document = interleaved_content_as_str(inp.data_element_path_or_content) # type: ignore - else: - log.error(f"Unexpected preprocessor input type: {input_type}") - continue - - if inp.data_element_format == PreprocessingDataFormat.pdf: - document = parse_pdf(document) - - new_result = PreprocessingDataElement( - data_element_id=inp.data_element_id, - data_element_type=PreprocessingDataType.raw_text_document, - data_element_format=PreprocessingDataFormat.txt, - data_element_path_or_content=document, - ) - results.append(new_result) - - return PreprocessorResponse( - success=True, output_data_type=PreprocessingDataType.raw_text_document, results=results - ) - - async def preprocess( - self, - preprocessors: PreprocessorChain, - preprocessor_inputs: List[PreprocessingDataElement], - ) -> PreprocessorResponse: - return await self.do_preprocess(preprocessor_id="", preprocessor_inputs=preprocessor_inputs) - - @staticmethod - def _resolve_input_type(preprocessor_input: PreprocessingDataElement) -> PreprocessingDataType: - if preprocessor_input.data_element_type is not None: - return preprocessor_input.data_element_type - - if isinstance(preprocessor_input.data_element_path_or_content, URL): - return PreprocessingDataType.document_uri - if InclineBasicPreprocessorImpl.URL_VALIDATION_PATTERN.match( - str(preprocessor_input.data_element_path_or_content) - ): - return PreprocessingDataType.document_uri - if preprocessor_input.data_element_format == PreprocessingDataFormat.pdf: - return PreprocessingDataType.binary_document - - return PreprocessingDataType.raw_text_document - - @staticmethod - async def _fetch_document(preprocessor_input: PreprocessingDataElement) -> Any: - if isinstance(preprocessor_input.data_element_path_or_content, str): - url = preprocessor_input.data_element_path_or_content - if not InclineBasicPreprocessorImpl.URL_VALIDATION_PATTERN.match(url): - log.error(f"Unexpected URL: {url}") - return None - elif isinstance(preprocessor_input.data_element_path_or_content, URL): - url = preprocessor_input.data_element_path_or_content.uri - else: - log.error( - f"Unexpected type {type(preprocessor_input.data_element_path_or_content)} for input {preprocessor_input.data_element_path_or_content}, skipping this input." - ) - return None - - if url.startswith("data:"): - return content_from_data(url) - - async with httpx.AsyncClient() as client: - r = await client.get(url) - - return r.content if preprocessor_input.data_element_format == PreprocessingDataFormat.pdf else r.text diff --git a/llama_stack/providers/inline/preprocessing/basic/config.py b/llama_stack/providers/inline/preprocessing/basic/config.py deleted file mode 100644 index 6c290cd9d..000000000 --- a/llama_stack/providers/inline/preprocessing/basic/config.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. -from pydantic import BaseModel - - -class InlineBasicPreprocessorConfig(BaseModel): ... diff --git a/llama_stack/providers/inline/preprocessing/simple_chunking/__init__.py b/llama_stack/providers/inline/preprocessing/simple_chunking/__init__.py deleted file mode 100644 index 651c5bd3b..000000000 --- a/llama_stack/providers/inline/preprocessing/simple_chunking/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -from .config import InclineSimpleChunkingConfig - - -async def get_provider_impl( - config: InclineSimpleChunkingConfig, - _deps, -): - from .simple_chunking import InclineSimpleChunkingImpl - - impl = InclineSimpleChunkingImpl(config) - await impl.initialize() - return impl diff --git a/llama_stack/providers/inline/preprocessing/simple_chunking/config.py b/llama_stack/providers/inline/preprocessing/simple_chunking/config.py deleted file mode 100644 index 7853019e3..000000000 --- a/llama_stack/providers/inline/preprocessing/simple_chunking/config.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. -from pydantic import BaseModel - - -class InclineSimpleChunkingConfig(BaseModel): - chunk_size_in_tokens: int = 512 - chunk_overlap_ratio: int = 4 diff --git a/llama_stack/providers/inline/preprocessing/simple_chunking/simple_chunking.py b/llama_stack/providers/inline/preprocessing/simple_chunking/simple_chunking.py deleted file mode 100644 index 73b4a8c9c..000000000 --- a/llama_stack/providers/inline/preprocessing/simple_chunking/simple_chunking.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. -import logging -from enum import Enum -from typing import List, Optional, Tuple - -from llama_models.llama3.api import Tokenizer - -from llama_stack.apis.preprocessing import ( - Preprocessing, - PreprocessingDataElement, - PreprocessingDataFormat, - PreprocessingDataType, - Preprocessor, - PreprocessorChain, - PreprocessorOptions, - PreprocessorResponse, -) -from llama_stack.apis.vector_io import Chunk -from llama_stack.providers.datatypes import PreprocessorsProtocolPrivate -from llama_stack.providers.inline.preprocessing.simple_chunking import InclineSimpleChunkingConfig - -log = logging.getLogger(__name__) - - -class SimpleChunkingOptions(Enum): - chunk_size_in_tokens = "chunk_size_in_tokens" - chunk_overlap_ratio = "chunk_overlap_ratio" - - -class InclineSimpleChunkingImpl(Preprocessing, PreprocessorsProtocolPrivate): - # this preprocessor receives plain text and returns chunks - input_types = [PreprocessingDataType.raw_text_document] - output_types = [PreprocessingDataType.chunks] - - preprocessor_store = None - - def __init__(self, config: InclineSimpleChunkingConfig) -> None: - self.config = config - - async def initialize(self) -> None: ... - - async def shutdown(self) -> None: ... - - async def register_preprocessor(self, preprocessor: Preprocessor) -> None: ... - - async def unregister_preprocessor(self, preprocessor_id: str) -> None: ... - - async def do_preprocess( - self, - preprocessor_id: str, - preprocessor_inputs: List[PreprocessingDataElement], - options: Optional[PreprocessorOptions] = None, - ) -> PreprocessorResponse: - chunks = [] - - window_len, overlap_len = self._resolve_chunk_size_params(options) - - for inp in preprocessor_inputs: - new_chunks = self.make_overlapped_chunks( - inp.data_element_id, str(inp.data_element_path_or_content), window_len, overlap_len - ) - for i, chunk in enumerate(new_chunks): - new_chunk_data_element = PreprocessingDataElement( - data_element_id=f"{inp.data_element_id}_chunk_{i}", - data_element_type=PreprocessingDataType.chunks, - data_element_format=PreprocessingDataFormat.txt, - data_element_path_or_content=chunk, - ) - chunks.append(new_chunk_data_element) - - return PreprocessorResponse(success=True, output_data_type=PreprocessingDataType.chunks, results=chunks) - - async def preprocess( - self, - preprocessors: PreprocessorChain, - preprocessor_inputs: List[PreprocessingDataElement], - ) -> PreprocessorResponse: - return await self.do_preprocess(preprocessor_id="", preprocessor_inputs=preprocessor_inputs) - - def _resolve_chunk_size_params(self, options: PreprocessorOptions | None) -> Tuple[int, int]: - window_len = (options or {}).get( - str(SimpleChunkingOptions.chunk_size_in_tokens), self.config.chunk_size_in_tokens - ) - - chunk_overlap_ratio = (options or {}).get( - str(SimpleChunkingOptions.chunk_overlap_ratio), self.config.chunk_overlap_ratio - ) - overlap_len = window_len // chunk_overlap_ratio - - return window_len, overlap_len - - @staticmethod - def make_overlapped_chunks(document_id: str, text: str, window_len: int, overlap_len: int) -> List[Chunk]: - tokenizer = Tokenizer.get_instance() - tokens = tokenizer.encode(text, bos=False, eos=False) - - chunks = [] - for i in range(0, len(tokens), window_len - overlap_len): - toks = tokens[i : i + window_len] - chunk = tokenizer.decode(toks) - # chunk is a string - chunks.append( - Chunk( - content=chunk, - metadata={ - "token_count": len(toks), - "document_id": document_id, - }, - ) - ) - - return chunks diff --git a/llama_stack/providers/inline/tool_runtime/rag/__init__.py b/llama_stack/providers/inline/tool_runtime/rag/__init__.py index 2ed4d5934..0ef3c35e9 100644 --- a/llama_stack/providers/inline/tool_runtime/rag/__init__.py +++ b/llama_stack/providers/inline/tool_runtime/rag/__init__.py @@ -14,6 +14,6 @@ from .config import RagToolRuntimeConfig async def get_provider_impl(config: RagToolRuntimeConfig, deps: Dict[Api, Any]): from .memory import MemoryToolRuntimeImpl - impl = MemoryToolRuntimeImpl(config, deps[Api.vector_io], deps[Api.inference], deps[Api.preprocessing]) + impl = MemoryToolRuntimeImpl(config, deps[Api.vector_io], deps[Api.inference]) await impl.initialize() return impl diff --git a/llama_stack/providers/inline/tool_runtime/rag/memory.py b/llama_stack/providers/inline/tool_runtime/rag/memory.py index 7de8ece48..97c53d454 100644 --- a/llama_stack/providers/inline/tool_runtime/rag/memory.py +++ b/llama_stack/providers/inline/tool_runtime/rag/memory.py @@ -19,14 +19,6 @@ from llama_stack.apis.common.content_types import ( TextContentItem, ) from llama_stack.apis.inference import Inference -from llama_stack.apis.preprocessing import ( - Preprocessing, - PreprocessingDataElement, - PreprocessingDataFormat, - PreprocessingDataType, - PreprocessorChain, - PreprocessorChainElement, -) from llama_stack.apis.tools import ( ListToolDefsResponse, RAGDocument, @@ -41,6 +33,10 @@ from llama_stack.apis.tools import ( ) from llama_stack.apis.vector_io import QueryChunksResponse, VectorIO from llama_stack.providers.datatypes import ToolsProtocolPrivate +from llama_stack.providers.utils.memory.vector_store import ( + content_from_doc, + make_overlapped_chunks, +) from .config import RagToolRuntimeConfig from .context_retriever import generate_rag_query @@ -53,22 +49,15 @@ def make_random_string(length: int = 8): class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime): - DEFAULT_PREPROCESSING_CHAIN = [ - PreprocessorChainElement(preprocessor_id="builtin::basic"), - PreprocessorChainElement(preprocessor_id="builtin::chunking"), - ] - def __init__( self, config: RagToolRuntimeConfig, vector_io_api: VectorIO, inference_api: Inference, - preprocessing_api: Preprocessing, ): self.config = config self.vector_io_api = vector_io_api self.inference_api = inference_api - self.preprocessing_api = preprocessing_api async def initialize(self): pass @@ -87,32 +76,24 @@ class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime): documents: List[RAGDocument], vector_db_id: str, chunk_size_in_tokens: int = 512, - preprocessor_chain: Optional[PreprocessorChain] = None, ) -> None: - preprocessor_inputs = [self._rag_document_to_preprocessor_input(d) for d in documents] - preprocessor_response = await self.preprocessing_api.preprocess( - preprocessors=preprocessor_chain or self.DEFAULT_PREPROCESSING_CHAIN, - preprocessor_inputs=preprocessor_inputs, - ) - - if not preprocessor_response.success: - log.error("Preprocessor chain returned an error") - return - - if preprocessor_response.output_data_type != PreprocessingDataType.chunks: - log.error( - f"Preprocessor chain returned {preprocessor_response.output_data_type} instead of {PreprocessingDataType.chunks}" + chunks = [] + for doc in documents: + content = await content_from_doc(doc) + chunks.extend( + make_overlapped_chunks( + doc.document_id, + content, + chunk_size_in_tokens, + chunk_size_in_tokens // 4, + ) ) - return - chunks = preprocessor_response.results if not chunks: - log.error("No chunks returned by the preprocessor chain") return - actual_chunks = [chunk.data_element_path_or_content for chunk in chunks] await self.vector_io_api.insert_chunks( - chunks=actual_chunks, # type: ignore + chunks=chunks, vector_db_id=vector_db_id, ) @@ -226,16 +207,3 @@ class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime): content=result.content, metadata=result.metadata, ) - - @staticmethod - def _rag_document_to_preprocessor_input(document: RAGDocument) -> PreprocessingDataElement: - if document.mime_type == "application/pdf": - data_element_format = PreprocessingDataFormat.pdf - else: - data_element_format = None - - return PreprocessingDataElement( - data_element_id=document.document_id, - data_element_format=data_element_format, - data_element_path_or_content=document.content, - ) diff --git a/llama_stack/providers/registry/preprocessing.py b/llama_stack/providers/registry/preprocessing.py index ef306e732..c07b88a10 100644 --- a/llama_stack/providers/registry/preprocessing.py +++ b/llama_stack/providers/registry/preprocessing.py @@ -7,28 +7,9 @@ from typing import List from llama_stack.providers.datatypes import ( - Api, - InlineProviderSpec, ProviderSpec, ) def available_providers() -> List[ProviderSpec]: - return [ - InlineProviderSpec( - api=Api.preprocessing, - provider_type="inline::basic", - pip_packages=["httpx", "pypdf"], - module="llama_stack.providers.inline.preprocessing.basic", - config_class="llama_stack.providers.inline.preprocessing.basic.InlineBasicPreprocessorConfig", - api_dependencies=[], - ), - InlineProviderSpec( - api=Api.preprocessing, - provider_type="inline::simple_chunking", - pip_packages=[], - module="llama_stack.providers.inline.preprocessing.simple_chunking", - config_class="llama_stack.providers.inline.preprocessing.simple_chunking.InclineSimpleChunkingConfig", - api_dependencies=[], - ), - ] + return [] diff --git a/llama_stack/providers/registry/tool_runtime.py b/llama_stack/providers/registry/tool_runtime.py index a08df4c90..95ea2dcf9 100644 --- a/llama_stack/providers/registry/tool_runtime.py +++ b/llama_stack/providers/registry/tool_runtime.py @@ -34,7 +34,7 @@ def available_providers() -> List[ProviderSpec]: ], module="llama_stack.providers.inline.tool_runtime.rag", config_class="llama_stack.providers.inline.tool_runtime.rag.config.RagToolRuntimeConfig", - api_dependencies=[Api.vector_io, Api.inference, Api.preprocessing], + api_dependencies=[Api.vector_io, Api.inference], ), InlineProviderSpec( api=Api.tool_runtime, diff --git a/llama_stack/templates/bedrock/bedrock.py b/llama_stack/templates/bedrock/bedrock.py index 2afd329cc..f82defb4b 100644 --- a/llama_stack/templates/bedrock/bedrock.py +++ b/llama_stack/templates/bedrock/bedrock.py @@ -6,7 +6,6 @@ from pathlib import Path -from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import Provider, ToolGroupInput from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig from llama_stack.providers.remote.inference.bedrock.models import MODEL_ENTRIES @@ -34,7 +33,6 @@ def get_distribution_template() -> DistributionTemplate: "inline::rag-runtime", "remote::model-context-protocol", ], - "preprocessing": ["inline::basic", "inline::simple_chunking"], } name = "bedrock" vector_io_provider = Provider( @@ -63,17 +61,6 @@ def get_distribution_template() -> DistributionTemplate: ), ] - default_preprocessors = [ - PreprocessorInput( - preprocessor_id="builtin::basic", - provider_id="basic", - ), - PreprocessorInput( - preprocessor_id="builtin::chunking", - provider_id="simple_chunking", - ), - ] - return DistributionTemplate( name=name, distro_type="self_hosted", @@ -89,7 +76,6 @@ def get_distribution_template() -> DistributionTemplate: }, default_models=default_models, default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), }, run_config_env_vars={ diff --git a/llama_stack/templates/bedrock/build.yaml b/llama_stack/templates/bedrock/build.yaml index 477503321..6c07b0478 100644 --- a/llama_stack/templates/bedrock/build.yaml +++ b/llama_stack/templates/bedrock/build.yaml @@ -29,7 +29,4 @@ distribution_spec: - inline::code-interpreter - inline::rag-runtime - remote::model-context-protocol - preprocessing: - - inline::basic - - inline::simple_chunking image_type: conda diff --git a/llama_stack/templates/bedrock/run.yaml b/llama_stack/templates/bedrock/run.yaml index f60d93b32..c422cb2f9 100644 --- a/llama_stack/templates/bedrock/run.yaml +++ b/llama_stack/templates/bedrock/run.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -97,13 +96,6 @@ providers: - provider_id: model-context-protocol provider_type: remote::model-context-protocol config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/bedrock}/registry.db @@ -150,10 +142,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/cerebras/build.yaml b/llama_stack/templates/cerebras/build.yaml index 180357e2a..ef6c43212 100644 --- a/llama_stack/templates/cerebras/build.yaml +++ b/llama_stack/templates/cerebras/build.yaml @@ -29,7 +29,4 @@ distribution_spec: - remote::tavily-search - inline::code-interpreter - inline::rag-runtime - preprocessing: - - inline::basic - - inline::simple_chunking image_type: conda diff --git a/llama_stack/templates/cerebras/cerebras.py b/llama_stack/templates/cerebras/cerebras.py index ed8a127c8..c370fb7d0 100644 --- a/llama_stack/templates/cerebras/cerebras.py +++ b/llama_stack/templates/cerebras/cerebras.py @@ -7,7 +7,6 @@ from pathlib import Path from llama_stack.apis.models.models import ModelType -from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import ModelInput, Provider, ToolGroupInput from llama_stack.providers.inline.inference.sentence_transformers import ( SentenceTransformersInferenceConfig, @@ -38,7 +37,6 @@ def get_distribution_template() -> DistributionTemplate: "inline::code-interpreter", "inline::rag-runtime", ], - "preprocessing": ["inline::basic", "inline::simple_chunking"], } name = "cerebras" @@ -84,16 +82,6 @@ def get_distribution_template() -> DistributionTemplate: provider_id="code-interpreter", ), ] - default_preprocessors = [ - PreprocessorInput( - preprocessor_id="builtin::basic", - provider_id="basic", - ), - PreprocessorInput( - preprocessor_id="builtin::chunking", - provider_id="simple_chunking", - ), - ] return DistributionTemplate( name="cerebras", @@ -112,7 +100,6 @@ def get_distribution_template() -> DistributionTemplate: default_models=default_models + [embedding_model], default_shields=[], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), }, run_config_env_vars={ diff --git a/llama_stack/templates/cerebras/run.yaml b/llama_stack/templates/cerebras/run.yaml index e6f8adeeb..b2e155402 100644 --- a/llama_stack/templates/cerebras/run.yaml +++ b/llama_stack/templates/cerebras/run.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -100,13 +99,6 @@ providers: - provider_id: rag-runtime provider_type: inline::rag-runtime config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/cerebras}/registry.db @@ -148,10 +140,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/ci-tests/build.yaml b/llama_stack/templates/ci-tests/build.yaml index d31c14beb..a5c615f2f 100644 --- a/llama_stack/templates/ci-tests/build.yaml +++ b/llama_stack/templates/ci-tests/build.yaml @@ -30,7 +30,4 @@ distribution_spec: - inline::code-interpreter - inline::rag-runtime - remote::model-context-protocol - preprocessing: - - inline::basic - - inline::simple_chunking image_type: conda diff --git a/llama_stack/templates/ci-tests/ci_tests.py b/llama_stack/templates/ci-tests/ci_tests.py index 8f1ad2771..f6e836918 100644 --- a/llama_stack/templates/ci-tests/ci_tests.py +++ b/llama_stack/templates/ci-tests/ci_tests.py @@ -6,7 +6,6 @@ from llama_stack.apis.models.models import ModelType -from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import ( ModelInput, Provider, @@ -45,7 +44,6 @@ def get_distribution_template() -> DistributionTemplate: "inline::rag-runtime", "remote::model-context-protocol", ], - "preprocessing": ["inline::basic", "inline::simple_chunking"], } name = "ci-tests" inference_provider = Provider( @@ -78,16 +76,6 @@ def get_distribution_template() -> DistributionTemplate: provider_id="code-interpreter", ), ] - default_preprocessors = [ - PreprocessorInput( - preprocessor_id="builtin::basic", - provider_id="basic", - ), - PreprocessorInput( - preprocessor_id="builtin::chunking", - provider_id="simple_chunking", - ), - ] available_models = { "fireworks": MODEL_ENTRIES, } @@ -117,7 +105,6 @@ def get_distribution_template() -> DistributionTemplate: }, default_models=default_models + [embedding_model], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")], ), }, diff --git a/llama_stack/templates/ci-tests/run.yaml b/llama_stack/templates/ci-tests/run.yaml index 12520c11a..7f94ce90f 100644 --- a/llama_stack/templates/ci-tests/run.yaml +++ b/llama_stack/templates/ci-tests/run.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -100,13 +99,6 @@ providers: - provider_id: model-context-protocol provider_type: remote::model-context-protocol config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ci-tests}/registry.db @@ -226,10 +218,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/dell/build.yaml b/llama_stack/templates/dell/build.yaml index baabf294a..05b98d56f 100644 --- a/llama_stack/templates/dell/build.yaml +++ b/llama_stack/templates/dell/build.yaml @@ -30,7 +30,4 @@ distribution_spec: - remote::tavily-search - inline::code-interpreter - inline::rag-runtime - preprocessing: - - inline::basic - - inline::simple_chunking image_type: conda diff --git a/llama_stack/templates/dell/dell.py b/llama_stack/templates/dell/dell.py index cde0ddbbc..52c5a5476 100644 --- a/llama_stack/templates/dell/dell.py +++ b/llama_stack/templates/dell/dell.py @@ -5,7 +5,6 @@ # the root directory of this source tree. from llama_stack.apis.models.models import ModelType -from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import ( ModelInput, Provider, @@ -34,7 +33,6 @@ def get_distribution_template() -> DistributionTemplate: "inline::code-interpreter", "inline::rag-runtime", ], - "preprocessing": ["inline::basic", "inline::simple_chunking"], } name = "dell" inference_provider = Provider( @@ -94,16 +92,6 @@ def get_distribution_template() -> DistributionTemplate: provider_id="code-interpreter", ), ] - default_preprocessors = [ - PreprocessorInput( - preprocessor_id="builtin::basic", - provider_id="basic", - ), - PreprocessorInput( - preprocessor_id="builtin::chunking", - provider_id="simple_chunking", - ), - ] return DistributionTemplate( name=name, @@ -119,7 +107,6 @@ def get_distribution_template() -> DistributionTemplate: }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), "run-with-safety.yaml": RunConfigSettings( provider_overrides={ @@ -133,7 +120,6 @@ def get_distribution_template() -> DistributionTemplate: default_models=[inference_model, safety_model, embedding_model], default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), }, run_config_env_vars={ diff --git a/llama_stack/templates/dell/run-with-safety.yaml b/llama_stack/templates/dell/run-with-safety.yaml index 0658a4985..fc06ac45e 100644 --- a/llama_stack/templates/dell/run-with-safety.yaml +++ b/llama_stack/templates/dell/run-with-safety.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -100,13 +99,6 @@ providers: - provider_id: rag-runtime provider_type: inline::rag-runtime config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dell}/registry.db @@ -137,10 +129,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/dell/run.yaml b/llama_stack/templates/dell/run.yaml index cf84fd0ab..3bbc9ba56 100644 --- a/llama_stack/templates/dell/run.yaml +++ b/llama_stack/templates/dell/run.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -96,13 +95,6 @@ providers: - provider_id: rag-runtime provider_type: inline::rag-runtime config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dell}/registry.db @@ -128,10 +120,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/dev/build.yaml b/llama_stack/templates/dev/build.yaml index 72323901d..726ebccca 100644 --- a/llama_stack/templates/dev/build.yaml +++ b/llama_stack/templates/dev/build.yaml @@ -34,7 +34,4 @@ distribution_spec: - inline::code-interpreter - inline::rag-runtime - remote::model-context-protocol - preprocessing: - - inline::basic - - inline::simple_chunking image_type: conda diff --git a/llama_stack/templates/dev/dev.py b/llama_stack/templates/dev/dev.py index 06f788391..69924acbe 100644 --- a/llama_stack/templates/dev/dev.py +++ b/llama_stack/templates/dev/dev.py @@ -7,7 +7,6 @@ from typing import List, Tuple from llama_stack.apis.models.models import ModelType -from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import ( ModelInput, Provider, @@ -112,7 +111,6 @@ def get_distribution_template() -> DistributionTemplate: "inline::rag-runtime", "remote::model-context-protocol", ], - "preprocessing": ["inline::basic", "inline::simple_chunking"], } name = "dev" @@ -157,16 +155,6 @@ def get_distribution_template() -> DistributionTemplate: provider_id="code-interpreter", ), ] - default_preprocessors = [ - PreprocessorInput( - preprocessor_id="builtin::basic", - provider_id="basic", - ), - PreprocessorInput( - preprocessor_id="builtin::chunking", - provider_id="simple_chunking", - ), - ] embedding_model = ModelInput( model_id="all-MiniLM-L6-v2", provider_id=embedding_provider.provider_id, @@ -193,7 +181,6 @@ def get_distribution_template() -> DistributionTemplate: }, default_models=default_models + [embedding_model], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")], ), }, diff --git a/llama_stack/templates/dev/run.yaml b/llama_stack/templates/dev/run.yaml index ee5bf8eea..dfa467083 100644 --- a/llama_stack/templates/dev/run.yaml +++ b/llama_stack/templates/dev/run.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -129,13 +128,6 @@ providers: - provider_id: model-context-protocol provider_type: remote::model-context-protocol config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dev}/registry.db @@ -382,10 +374,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/fireworks/build.yaml b/llama_stack/templates/fireworks/build.yaml index f6146f53b..3907eba78 100644 --- a/llama_stack/templates/fireworks/build.yaml +++ b/llama_stack/templates/fireworks/build.yaml @@ -31,7 +31,4 @@ distribution_spec: - inline::code-interpreter - inline::rag-runtime - remote::model-context-protocol - preprocessing: - - inline::basic - - inline::simple_chunking image_type: conda diff --git a/llama_stack/templates/fireworks/fireworks.py b/llama_stack/templates/fireworks/fireworks.py index 2da30cb98..449f18bf7 100644 --- a/llama_stack/templates/fireworks/fireworks.py +++ b/llama_stack/templates/fireworks/fireworks.py @@ -7,7 +7,6 @@ from pathlib import Path from llama_stack.apis.models.models import ModelType -from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import ( ModelInput, Provider, @@ -45,7 +44,6 @@ def get_distribution_template() -> DistributionTemplate: "inline::rag-runtime", "remote::model-context-protocol", ], - "preprocessing": ["inline::basic", "inline::simple_chunking"], } name = "fireworks" @@ -97,16 +95,6 @@ def get_distribution_template() -> DistributionTemplate: provider_id="code-interpreter", ), ] - default_preprocessors = [ - PreprocessorInput( - preprocessor_id="builtin::basic", - provider_id="basic", - ), - PreprocessorInput( - preprocessor_id="builtin::chunking", - provider_id="simple_chunking", - ), - ] return DistributionTemplate( name=name, @@ -125,7 +113,6 @@ def get_distribution_template() -> DistributionTemplate: default_models=default_models + [embedding_model], default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), "run-with-safety.yaml": RunConfigSettings( provider_overrides={ @@ -171,7 +158,6 @@ def get_distribution_template() -> DistributionTemplate: ), ], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), }, run_config_env_vars={ diff --git a/llama_stack/templates/fireworks/run-with-safety.yaml b/llama_stack/templates/fireworks/run-with-safety.yaml index 295b3a8a2..8f891fc31 100644 --- a/llama_stack/templates/fireworks/run-with-safety.yaml +++ b/llama_stack/templates/fireworks/run-with-safety.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -112,13 +111,6 @@ providers: - provider_id: model-context-protocol provider_type: remote::model-context-protocol config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/registry.db @@ -245,10 +237,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/fireworks/run.yaml b/llama_stack/templates/fireworks/run.yaml index 435767cbd..5e7b2a01b 100644 --- a/llama_stack/templates/fireworks/run.yaml +++ b/llama_stack/templates/fireworks/run.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -107,13 +106,6 @@ providers: - provider_id: model-context-protocol provider_type: remote::model-context-protocol config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/registry.db @@ -235,10 +227,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/groq/build.yaml b/llama_stack/templates/groq/build.yaml index ddc922aa7..3263ce83b 100644 --- a/llama_stack/templates/groq/build.yaml +++ b/llama_stack/templates/groq/build.yaml @@ -26,7 +26,4 @@ distribution_spec: - remote::tavily-search - inline::code-interpreter - inline::rag-runtime - preprocessing: - - inline::basic - - inline::simple_chunking image_type: conda diff --git a/llama_stack/templates/groq/groq.py b/llama_stack/templates/groq/groq.py index 8db15f37d..7999f95cb 100644 --- a/llama_stack/templates/groq/groq.py +++ b/llama_stack/templates/groq/groq.py @@ -7,7 +7,6 @@ from pathlib import Path from llama_stack.apis.models.models import ModelType -from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import ModelInput, Provider, ToolGroupInput from llama_stack.providers.inline.inference.sentence_transformers import ( SentenceTransformersInferenceConfig, @@ -37,7 +36,6 @@ def get_distribution_template() -> DistributionTemplate: "inline::code-interpreter", "inline::rag-runtime", ], - "preprocessing": ["inline::basic", "inline::simple_chunking"], } name = "groq" @@ -79,16 +77,6 @@ def get_distribution_template() -> DistributionTemplate: provider_id="code-interpreter", ), ] - default_preprocessors = [ - PreprocessorInput( - preprocessor_id="builtin::basic", - provider_id="basic", - ), - PreprocessorInput( - preprocessor_id="builtin::chunking", - provider_id="simple_chunking", - ), - ] return DistributionTemplate( name=name, @@ -105,7 +93,6 @@ def get_distribution_template() -> DistributionTemplate: }, default_models=default_models + [embedding_model], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), }, run_config_env_vars={ diff --git a/llama_stack/templates/groq/run.yaml b/llama_stack/templates/groq/run.yaml index 2dd8a1cd9..abafcec5d 100644 --- a/llama_stack/templates/groq/run.yaml +++ b/llama_stack/templates/groq/run.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -100,13 +99,6 @@ providers: - provider_id: rag-runtime provider_type: inline::rag-runtime config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/groq}/registry.db @@ -173,10 +165,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/hf-endpoint/build.yaml b/llama_stack/templates/hf-endpoint/build.yaml index 9a8cb0768..c2eaaa05b 100644 --- a/llama_stack/templates/hf-endpoint/build.yaml +++ b/llama_stack/templates/hf-endpoint/build.yaml @@ -29,7 +29,4 @@ distribution_spec: - inline::code-interpreter - inline::rag-runtime - remote::model-context-protocol - preprocessing: - - inline::basic - - inline::simple_chunking image_type: conda diff --git a/llama_stack/templates/hf-endpoint/hf_endpoint.py b/llama_stack/templates/hf-endpoint/hf_endpoint.py index f59ce3b0e..53dc9d38f 100644 --- a/llama_stack/templates/hf-endpoint/hf_endpoint.py +++ b/llama_stack/templates/hf-endpoint/hf_endpoint.py @@ -5,7 +5,6 @@ # the root directory of this source tree. from llama_stack.apis.models.models import ModelType -from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import ( ModelInput, Provider, @@ -37,7 +36,6 @@ def get_distribution_template() -> DistributionTemplate: "inline::rag-runtime", "remote::model-context-protocol", ], - "preprocessing": ["inline::basic", "inline::simple_chunking"], } name = "hf-endpoint" inference_provider = Provider( @@ -86,16 +84,6 @@ def get_distribution_template() -> DistributionTemplate: provider_id="code-interpreter", ), ] - default_preprocessors = [ - PreprocessorInput( - preprocessor_id="builtin::basic", - provider_id="basic", - ), - PreprocessorInput( - preprocessor_id="builtin::chunking", - provider_id="simple_chunking", - ), - ] return DistributionTemplate( name=name, @@ -112,7 +100,6 @@ def get_distribution_template() -> DistributionTemplate: }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), "run-with-safety.yaml": RunConfigSettings( provider_overrides={ @@ -136,7 +123,6 @@ def get_distribution_template() -> DistributionTemplate: ], default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), }, run_config_env_vars={ diff --git a/llama_stack/templates/hf-endpoint/run-with-safety.yaml b/llama_stack/templates/hf-endpoint/run-with-safety.yaml index 44820155e..7a2a3c441 100644 --- a/llama_stack/templates/hf-endpoint/run-with-safety.yaml +++ b/llama_stack/templates/hf-endpoint/run-with-safety.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -108,13 +107,6 @@ providers: - provider_id: model-context-protocol provider_type: remote::model-context-protocol config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-endpoint}/registry.db @@ -145,10 +137,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/hf-endpoint/run.yaml b/llama_stack/templates/hf-endpoint/run.yaml index 32710c062..bd8c3d38f 100644 --- a/llama_stack/templates/hf-endpoint/run.yaml +++ b/llama_stack/templates/hf-endpoint/run.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -103,13 +102,6 @@ providers: - provider_id: model-context-protocol provider_type: remote::model-context-protocol config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-endpoint}/registry.db @@ -135,10 +127,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/hf-serverless/build.yaml b/llama_stack/templates/hf-serverless/build.yaml index 614275115..c0cc1e2c2 100644 --- a/llama_stack/templates/hf-serverless/build.yaml +++ b/llama_stack/templates/hf-serverless/build.yaml @@ -30,7 +30,4 @@ distribution_spec: - inline::code-interpreter - inline::rag-runtime - remote::model-context-protocol - preprocessing: - - inline::basic - - inline::simple_chunking image_type: conda diff --git a/llama_stack/templates/hf-serverless/hf_serverless.py b/llama_stack/templates/hf-serverless/hf_serverless.py index 2f4e909e1..ad8a72012 100644 --- a/llama_stack/templates/hf-serverless/hf_serverless.py +++ b/llama_stack/templates/hf-serverless/hf_serverless.py @@ -5,7 +5,6 @@ # the root directory of this source tree. from llama_stack.apis.models.models import ModelType -from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import ( ModelInput, Provider, @@ -37,7 +36,6 @@ def get_distribution_template() -> DistributionTemplate: "inline::rag-runtime", "remote::model-context-protocol", ], - "preprocessing": ["inline::basic", "inline::simple_chunking"], } name = "hf-serverless" @@ -87,16 +85,6 @@ def get_distribution_template() -> DistributionTemplate: provider_id="code-interpreter", ), ] - default_preprocessors = [ - PreprocessorInput( - preprocessor_id="builtin::basic", - provider_id="basic", - ), - PreprocessorInput( - preprocessor_id="builtin::chunking", - provider_id="simple_chunking", - ), - ] return DistributionTemplate( name=name, @@ -113,7 +101,6 @@ def get_distribution_template() -> DistributionTemplate: }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), "run-with-safety.yaml": RunConfigSettings( provider_overrides={ @@ -137,7 +124,6 @@ def get_distribution_template() -> DistributionTemplate: ], default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), }, run_config_env_vars={ diff --git a/llama_stack/templates/hf-serverless/run-with-safety.yaml b/llama_stack/templates/hf-serverless/run-with-safety.yaml index c6b90c4e3..f4cab40df 100644 --- a/llama_stack/templates/hf-serverless/run-with-safety.yaml +++ b/llama_stack/templates/hf-serverless/run-with-safety.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -108,13 +107,6 @@ providers: - provider_id: model-context-protocol provider_type: remote::model-context-protocol config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-serverless}/registry.db @@ -145,10 +137,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/hf-serverless/run.yaml b/llama_stack/templates/hf-serverless/run.yaml index 8d1491749..70adf8e94 100644 --- a/llama_stack/templates/hf-serverless/run.yaml +++ b/llama_stack/templates/hf-serverless/run.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -103,13 +102,6 @@ providers: - provider_id: model-context-protocol provider_type: remote::model-context-protocol config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-serverless}/registry.db @@ -135,10 +127,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/meta-reference-gpu/build.yaml b/llama_stack/templates/meta-reference-gpu/build.yaml index 42868c4bc..b9130fc7d 100644 --- a/llama_stack/templates/meta-reference-gpu/build.yaml +++ b/llama_stack/templates/meta-reference-gpu/build.yaml @@ -29,7 +29,4 @@ distribution_spec: - inline::code-interpreter - inline::rag-runtime - remote::model-context-protocol - preprocessing: - - inline::basic - - inline::simple_chunking image_type: conda diff --git a/llama_stack/templates/meta-reference-gpu/meta_reference.py b/llama_stack/templates/meta-reference-gpu/meta_reference.py index 5e6d3dac1..8ba9fadca 100644 --- a/llama_stack/templates/meta-reference-gpu/meta_reference.py +++ b/llama_stack/templates/meta-reference-gpu/meta_reference.py @@ -7,7 +7,6 @@ from pathlib import Path from llama_stack.apis.models.models import ModelType -from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import ( ModelInput, Provider, @@ -41,7 +40,6 @@ def get_distribution_template() -> DistributionTemplate: "inline::rag-runtime", "remote::model-context-protocol", ], - "preprocessing": ["inline::basic", "inline::simple_chunking"], } name = "meta-reference-gpu" inference_provider = Provider( @@ -93,16 +91,6 @@ def get_distribution_template() -> DistributionTemplate: provider_id="code-interpreter", ), ] - default_preprocessors = [ - PreprocessorInput( - preprocessor_id="builtin::basic", - provider_id="basic", - ), - PreprocessorInput( - preprocessor_id="builtin::chunking", - provider_id="simple_chunking", - ), - ] return DistributionTemplate( name=name, @@ -118,7 +106,6 @@ def get_distribution_template() -> DistributionTemplate: }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), "run-with-safety.yaml": RunConfigSettings( provider_overrides={ @@ -143,7 +130,6 @@ def get_distribution_template() -> DistributionTemplate: ], default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), }, run_config_env_vars={ diff --git a/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml b/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml index 410df6c12..04ae22cbf 100644 --- a/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml +++ b/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -110,13 +109,6 @@ providers: - provider_id: model-context-protocol provider_type: remote::model-context-protocol config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/registry.db @@ -147,10 +139,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/meta-reference-gpu/run.yaml b/llama_stack/templates/meta-reference-gpu/run.yaml index dae173b3e..6a4e34fbb 100644 --- a/llama_stack/templates/meta-reference-gpu/run.yaml +++ b/llama_stack/templates/meta-reference-gpu/run.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -104,13 +103,6 @@ providers: - provider_id: model-context-protocol provider_type: remote::model-context-protocol config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/registry.db @@ -136,10 +128,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/meta-reference-quantized-gpu/build.yaml b/llama_stack/templates/meta-reference-quantized-gpu/build.yaml index bf85a8040..7bbcfe5f2 100644 --- a/llama_stack/templates/meta-reference-quantized-gpu/build.yaml +++ b/llama_stack/templates/meta-reference-quantized-gpu/build.yaml @@ -29,7 +29,4 @@ distribution_spec: - inline::code-interpreter - inline::rag-runtime - remote::model-context-protocol - preprocessing: - - inline::basic - - inline::simple_chunking image_type: conda diff --git a/llama_stack/templates/meta-reference-quantized-gpu/meta_reference.py b/llama_stack/templates/meta-reference-quantized-gpu/meta_reference.py index 9e47a4c2d..c46ea8bc6 100644 --- a/llama_stack/templates/meta-reference-quantized-gpu/meta_reference.py +++ b/llama_stack/templates/meta-reference-quantized-gpu/meta_reference.py @@ -7,7 +7,6 @@ from pathlib import Path from llama_stack.apis.models.models import ModelType -from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import ModelInput, Provider, ToolGroupInput from llama_stack.providers.inline.inference.meta_reference import ( MetaReferenceQuantizedInferenceConfig, @@ -36,7 +35,6 @@ def get_distribution_template() -> DistributionTemplate: "inline::rag-runtime", "remote::model-context-protocol", ], - "preprocessing": ["inline::basic", "inline::simple_chunking"], } default_tool_groups = [ ToolGroupInput( @@ -52,16 +50,6 @@ def get_distribution_template() -> DistributionTemplate: provider_id="code-interpreter", ), ] - default_preprocessors = [ - PreprocessorInput( - preprocessor_id="builtin::basic", - provider_id="basic", - ), - PreprocessorInput( - preprocessor_id="builtin::chunking", - provider_id="simple_chunking", - ), - ] name = "meta-reference-quantized-gpu" inference_provider = Provider( provider_id="meta-reference-inference", @@ -108,7 +96,6 @@ def get_distribution_template() -> DistributionTemplate: }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), }, run_config_env_vars={ diff --git a/llama_stack/templates/meta-reference-quantized-gpu/run.yaml b/llama_stack/templates/meta-reference-quantized-gpu/run.yaml index e9bfbcb21..e7e8ac42b 100644 --- a/llama_stack/templates/meta-reference-quantized-gpu/run.yaml +++ b/llama_stack/templates/meta-reference-quantized-gpu/run.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -106,13 +105,6 @@ providers: - provider_id: model-context-protocol provider_type: remote::model-context-protocol config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-quantized-gpu}/registry.db @@ -138,10 +130,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/nvidia/build.yaml b/llama_stack/templates/nvidia/build.yaml index 13e9a55fc..f99ff6c81 100644 --- a/llama_stack/templates/nvidia/build.yaml +++ b/llama_stack/templates/nvidia/build.yaml @@ -22,7 +22,4 @@ distribution_spec: - inline::basic tool_runtime: - inline::rag-runtime - preprocessing: - - inline::basic - - inline::simple_chunking image_type: conda diff --git a/llama_stack/templates/nvidia/nvidia.py b/llama_stack/templates/nvidia/nvidia.py index 13f34373d..3b0cbe1e5 100644 --- a/llama_stack/templates/nvidia/nvidia.py +++ b/llama_stack/templates/nvidia/nvidia.py @@ -6,7 +6,6 @@ from pathlib import Path -from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import ModelInput, Provider, ShieldInput, ToolGroupInput from llama_stack.providers.remote.inference.nvidia import NVIDIAConfig from llama_stack.providers.remote.inference.nvidia.models import MODEL_ENTRIES @@ -26,7 +25,6 @@ def get_distribution_template() -> DistributionTemplate: "datasetio": ["inline::localfs"], "scoring": ["inline::basic"], "tool_runtime": ["inline::rag-runtime"], - "preprocessing": ["inline::basic", "inline::simple_chunking"], } inference_provider = Provider( @@ -57,16 +55,6 @@ def get_distribution_template() -> DistributionTemplate: provider_id="rag-runtime", ), ] - default_preprocessors = [ - PreprocessorInput( - preprocessor_id="builtin::basic", - provider_id="basic", - ), - PreprocessorInput( - preprocessor_id="builtin::chunking", - provider_id="simple_chunking", - ), - ] default_models = get_model_registry(available_models) return DistributionTemplate( @@ -84,7 +72,6 @@ def get_distribution_template() -> DistributionTemplate: }, default_models=default_models, default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), "run-with-safety.yaml": RunConfigSettings( provider_overrides={ @@ -96,7 +83,6 @@ def get_distribution_template() -> DistributionTemplate: default_models=[inference_model, safety_model], default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}", provider_id="nvidia")], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), }, run_config_env_vars={ diff --git a/llama_stack/templates/nvidia/run-with-safety.yaml b/llama_stack/templates/nvidia/run-with-safety.yaml index 2f739d62c..219a03078 100644 --- a/llama_stack/templates/nvidia/run-with-safety.yaml +++ b/llama_stack/templates/nvidia/run-with-safety.yaml @@ -6,7 +6,6 @@ apis: - eval - inference - post_training -- preprocessing - safety - scoring - telemetry @@ -85,13 +84,6 @@ providers: - provider_id: rag-runtime provider_type: inline::rag-runtime config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/registry.db @@ -114,10 +106,6 @@ benchmarks: [] tool_groups: - toolgroup_id: builtin::rag provider_id: rag-runtime -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/nvidia/run.yaml b/llama_stack/templates/nvidia/run.yaml index 6ec2d695b..0e0548533 100644 --- a/llama_stack/templates/nvidia/run.yaml +++ b/llama_stack/templates/nvidia/run.yaml @@ -6,7 +6,6 @@ apis: - eval - inference - post_training -- preprocessing - safety - scoring - telemetry @@ -80,13 +79,6 @@ providers: - provider_id: rag-runtime provider_type: inline::rag-runtime config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/registry.db @@ -217,10 +209,6 @@ benchmarks: [] tool_groups: - toolgroup_id: builtin::rag provider_id: rag-runtime -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/ollama/build.yaml b/llama_stack/templates/ollama/build.yaml index 3c7712865..37b72fc1f 100644 --- a/llama_stack/templates/ollama/build.yaml +++ b/llama_stack/templates/ollama/build.yaml @@ -30,7 +30,4 @@ distribution_spec: - inline::rag-runtime - remote::model-context-protocol - remote::wolfram-alpha - preprocessing: - - inline::basic - - inline::simple_chunking image_type: conda diff --git a/llama_stack/templates/ollama/ollama.py b/llama_stack/templates/ollama/ollama.py index ce7fa38c4..d9f0960a2 100644 --- a/llama_stack/templates/ollama/ollama.py +++ b/llama_stack/templates/ollama/ollama.py @@ -7,7 +7,6 @@ from pathlib import Path from llama_stack.apis.models.models import ModelType -from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import ( ModelInput, Provider, @@ -37,7 +36,6 @@ def get_distribution_template() -> DistributionTemplate: "remote::model-context-protocol", "remote::wolfram-alpha", ], - "preprocessing": ["inline::basic", "inline::simple_chunking"], } name = "ollama" inference_provider = Provider( @@ -86,16 +84,6 @@ def get_distribution_template() -> DistributionTemplate: provider_id="wolfram-alpha", ), ] - default_preprocessors = [ - PreprocessorInput( - preprocessor_id="builtin::basic", - provider_id="basic", - ), - PreprocessorInput( - preprocessor_id="builtin::chunking", - provider_id="simple_chunking", - ), - ] return DistributionTemplate( name=name, @@ -112,7 +100,6 @@ def get_distribution_template() -> DistributionTemplate: }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), "run-with-safety.yaml": RunConfigSettings( provider_overrides={ @@ -147,7 +134,6 @@ def get_distribution_template() -> DistributionTemplate: ), ], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), }, run_config_env_vars={ diff --git a/llama_stack/templates/ollama/run-with-safety.yaml b/llama_stack/templates/ollama/run-with-safety.yaml index 875c5c02b..6140c1078 100644 --- a/llama_stack/templates/ollama/run-with-safety.yaml +++ b/llama_stack/templates/ollama/run-with-safety.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -105,13 +104,6 @@ providers: provider_type: remote::wolfram-alpha config: api_key: ${env.WOLFRAM_ALPHA_API_KEY:} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/registry.db @@ -148,10 +140,6 @@ tool_groups: provider_id: code-interpreter - toolgroup_id: builtin::wolfram_alpha provider_id: wolfram-alpha -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/ollama/run.yaml b/llama_stack/templates/ollama/run.yaml index 62dd1e3b9..8780c2738 100644 --- a/llama_stack/templates/ollama/run.yaml +++ b/llama_stack/templates/ollama/run.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -103,13 +102,6 @@ providers: provider_type: remote::wolfram-alpha config: api_key: ${env.WOLFRAM_ALPHA_API_KEY:} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/registry.db @@ -138,10 +130,6 @@ tool_groups: provider_id: code-interpreter - toolgroup_id: builtin::wolfram_alpha provider_id: wolfram-alpha -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/remote-vllm/build.yaml b/llama_stack/templates/remote-vllm/build.yaml index cfeea5d57..b2bbf853a 100644 --- a/llama_stack/templates/remote-vllm/build.yaml +++ b/llama_stack/templates/remote-vllm/build.yaml @@ -31,7 +31,4 @@ distribution_spec: - inline::rag-runtime - remote::model-context-protocol - remote::wolfram-alpha - preprocessing: - - inline::basic - - inline::simple_chunking image_type: conda diff --git a/llama_stack/templates/remote-vllm/run-with-safety.yaml b/llama_stack/templates/remote-vllm/run-with-safety.yaml index 107cb4acc..a487edf1e 100644 --- a/llama_stack/templates/remote-vllm/run-with-safety.yaml +++ b/llama_stack/templates/remote-vllm/run-with-safety.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -116,13 +115,6 @@ providers: provider_type: remote::wolfram-alpha config: api_key: ${env.WOLFRAM_ALPHA_API_KEY:} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/registry.db @@ -155,10 +147,6 @@ tool_groups: provider_id: code-interpreter - toolgroup_id: builtin::wolfram_alpha provider_id: wolfram-alpha -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/remote-vllm/run.yaml b/llama_stack/templates/remote-vllm/run.yaml index eee335c21..ea86925e1 100644 --- a/llama_stack/templates/remote-vllm/run.yaml +++ b/llama_stack/templates/remote-vllm/run.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -109,13 +108,6 @@ providers: provider_type: remote::wolfram-alpha config: api_key: ${env.WOLFRAM_ALPHA_API_KEY:} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/registry.db @@ -143,10 +135,6 @@ tool_groups: provider_id: code-interpreter - toolgroup_id: builtin::wolfram_alpha provider_id: wolfram-alpha -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/remote-vllm/vllm.py b/llama_stack/templates/remote-vllm/vllm.py index 0f5070ad3..0f6c7659e 100644 --- a/llama_stack/templates/remote-vllm/vllm.py +++ b/llama_stack/templates/remote-vllm/vllm.py @@ -7,7 +7,6 @@ from pathlib import Path from llama_stack.apis.models.models import ModelType -from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import ( ModelInput, Provider, @@ -40,7 +39,6 @@ def get_distribution_template() -> DistributionTemplate: "remote::model-context-protocol", "remote::wolfram-alpha", ], - "preprocessing": ["inline::basic", "inline::simple_chunking"], } name = "remote-vllm" inference_provider = Provider( @@ -95,16 +93,6 @@ def get_distribution_template() -> DistributionTemplate: provider_id="wolfram-alpha", ), ] - default_preprocessors = [ - PreprocessorInput( - preprocessor_id="builtin::basic", - provider_id="basic", - ), - PreprocessorInput( - preprocessor_id="builtin::chunking", - provider_id="simple_chunking", - ), - ] return DistributionTemplate( name=name, @@ -120,7 +108,6 @@ def get_distribution_template() -> DistributionTemplate: }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), "run-with-safety.yaml": RunConfigSettings( provider_overrides={ @@ -144,7 +131,6 @@ def get_distribution_template() -> DistributionTemplate: ], default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), }, run_config_env_vars={ diff --git a/llama_stack/templates/sambanova/build.yaml b/llama_stack/templates/sambanova/build.yaml index a518d0c51..ca5ffe618 100644 --- a/llama_stack/templates/sambanova/build.yaml +++ b/llama_stack/templates/sambanova/build.yaml @@ -19,7 +19,4 @@ distribution_spec: - remote::tavily-search - inline::code-interpreter - inline::rag-runtime - preprocessing: - - inline::basic - - inline::simple_chunking image_type: conda diff --git a/llama_stack/templates/sambanova/run.yaml b/llama_stack/templates/sambanova/run.yaml index 5068cccc0..45a139ea4 100644 --- a/llama_stack/templates/sambanova/run.yaml +++ b/llama_stack/templates/sambanova/run.yaml @@ -3,7 +3,6 @@ image_name: sambanova apis: - agents - inference -- preprocessing - safety - telemetry - tool_runtime @@ -72,13 +71,6 @@ providers: - provider_id: rag-runtime provider_type: inline::rag-runtime config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/sambanova}/registry.db @@ -186,10 +178,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/sambanova/sambanova.py b/llama_stack/templates/sambanova/sambanova.py index f2456bc5b..8b91f8712 100644 --- a/llama_stack/templates/sambanova/sambanova.py +++ b/llama_stack/templates/sambanova/sambanova.py @@ -6,7 +6,6 @@ from pathlib import Path -from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import Provider, ShieldInput, ToolGroupInput from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig from llama_stack.providers.remote.inference.sambanova import SambaNovaImplConfig @@ -35,7 +34,6 @@ def get_distribution_template() -> DistributionTemplate: "inline::code-interpreter", "inline::rag-runtime", ], - "preprocessing": ["inline::basic", "inline::simple_chunking"], } name = "sambanova" @@ -87,16 +85,6 @@ def get_distribution_template() -> DistributionTemplate: provider_id="code-interpreter", ), ] - default_preprocessors = [ - PreprocessorInput( - preprocessor_id="builtin::basic", - provider_id="basic", - ), - PreprocessorInput( - preprocessor_id="builtin::chunking", - provider_id="simple_chunking", - ), - ] return DistributionTemplate( name=name, @@ -115,7 +103,6 @@ def get_distribution_template() -> DistributionTemplate: default_models=default_models, default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), }, run_config_env_vars={ diff --git a/llama_stack/templates/tgi/build.yaml b/llama_stack/templates/tgi/build.yaml index a13b8cd7c..9fe79647c 100644 --- a/llama_stack/templates/tgi/build.yaml +++ b/llama_stack/templates/tgi/build.yaml @@ -30,7 +30,4 @@ distribution_spec: - inline::code-interpreter - inline::rag-runtime - remote::model-context-protocol - preprocessing: - - inline::basic - - inline::simple_chunking image_type: conda diff --git a/llama_stack/templates/tgi/run-with-safety.yaml b/llama_stack/templates/tgi/run-with-safety.yaml index 4b79b03de..9d4f7b5b7 100644 --- a/llama_stack/templates/tgi/run-with-safety.yaml +++ b/llama_stack/templates/tgi/run-with-safety.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -103,13 +102,6 @@ providers: - provider_id: model-context-protocol provider_type: remote::model-context-protocol config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/registry.db @@ -135,10 +127,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/tgi/run.yaml b/llama_stack/templates/tgi/run.yaml index c6ef98aaf..0830e3317 100644 --- a/llama_stack/templates/tgi/run.yaml +++ b/llama_stack/templates/tgi/run.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -102,13 +101,6 @@ providers: - provider_id: model-context-protocol provider_type: remote::model-context-protocol config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/registry.db @@ -134,10 +126,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/tgi/tgi.py b/llama_stack/templates/tgi/tgi.py index 9f805c61e..22dcc3995 100644 --- a/llama_stack/templates/tgi/tgi.py +++ b/llama_stack/templates/tgi/tgi.py @@ -7,7 +7,6 @@ from pathlib import Path from llama_stack.apis.models.models import ModelType -from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import ( ModelInput, Provider, @@ -39,7 +38,6 @@ def get_distribution_template() -> DistributionTemplate: "inline::rag-runtime", "remote::model-context-protocol", ], - "preprocessing": ["inline::basic", "inline::simple_chunking"], } name = "tgi" inference_provider = Provider( @@ -90,16 +88,6 @@ def get_distribution_template() -> DistributionTemplate: provider_id="code-interpreter", ), ] - default_preprocessors = [ - PreprocessorInput( - preprocessor_id="builtin::basic", - provider_id="basic", - ), - PreprocessorInput( - preprocessor_id="builtin::chunking", - provider_id="simple_chunking", - ), - ] return DistributionTemplate( name=name, @@ -116,7 +104,6 @@ def get_distribution_template() -> DistributionTemplate: }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), "run-with-safety.yaml": RunConfigSettings( provider_overrides={ @@ -138,7 +125,6 @@ def get_distribution_template() -> DistributionTemplate: ], default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), }, run_config_env_vars={ diff --git a/llama_stack/templates/together/build.yaml b/llama_stack/templates/together/build.yaml index 630407cad..834a3ecaf 100644 --- a/llama_stack/templates/together/build.yaml +++ b/llama_stack/templates/together/build.yaml @@ -31,7 +31,4 @@ distribution_spec: - inline::rag-runtime - remote::model-context-protocol - remote::wolfram-alpha - preprocessing: - - inline::basic - - inline::simple_chunking image_type: conda diff --git a/llama_stack/templates/together/run-with-safety.yaml b/llama_stack/templates/together/run-with-safety.yaml index 470e32bae..2c5f1e580 100644 --- a/llama_stack/templates/together/run-with-safety.yaml +++ b/llama_stack/templates/together/run-with-safety.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -112,13 +111,6 @@ providers: provider_type: remote::wolfram-alpha config: api_key: ${env.WOLFRAM_ALPHA_API_KEY:} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/registry.db @@ -252,10 +244,6 @@ tool_groups: provider_id: code-interpreter - toolgroup_id: builtin::wolfram_alpha provider_id: wolfram-alpha -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/together/run.yaml b/llama_stack/templates/together/run.yaml index 85c6d6f3a..1c1e554fa 100644 --- a/llama_stack/templates/together/run.yaml +++ b/llama_stack/templates/together/run.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -107,13 +106,6 @@ providers: provider_type: remote::wolfram-alpha config: api_key: ${env.WOLFRAM_ALPHA_API_KEY:} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/registry.db @@ -242,10 +234,6 @@ tool_groups: provider_id: code-interpreter - toolgroup_id: builtin::wolfram_alpha provider_id: wolfram-alpha -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/together/together.py b/llama_stack/templates/together/together.py index 2a87501e2..a2bd87c97 100644 --- a/llama_stack/templates/together/together.py +++ b/llama_stack/templates/together/together.py @@ -7,7 +7,6 @@ from pathlib import Path from llama_stack.apis.models.models import ModelType -from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import ( ModelInput, Provider, @@ -45,7 +44,6 @@ def get_distribution_template() -> DistributionTemplate: "remote::model-context-protocol", "remote::wolfram-alpha", ], - "preprocessing": ["inline::basic", "inline::simple_chunking"], } name = "together" inference_provider = Provider( @@ -85,16 +83,6 @@ def get_distribution_template() -> DistributionTemplate: provider_id="wolfram-alpha", ), ] - default_preprocessors = [ - PreprocessorInput( - preprocessor_id="builtin::basic", - provider_id="basic", - ), - PreprocessorInput( - preprocessor_id="builtin::chunking", - provider_id="simple_chunking", - ), - ] embedding_model = ModelInput( model_id="all-MiniLM-L6-v2", provider_id="sentence-transformers", @@ -120,7 +108,6 @@ def get_distribution_template() -> DistributionTemplate: }, default_models=default_models + [embedding_model], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")], ), "run-with-safety.yaml": RunConfigSettings( @@ -167,7 +154,6 @@ def get_distribution_template() -> DistributionTemplate: ), ], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), }, run_config_env_vars={ diff --git a/llama_stack/templates/vllm-gpu/build.yaml b/llama_stack/templates/vllm-gpu/build.yaml index d91d32c40..8eb44dc1b 100644 --- a/llama_stack/templates/vllm-gpu/build.yaml +++ b/llama_stack/templates/vllm-gpu/build.yaml @@ -30,7 +30,4 @@ distribution_spec: - inline::code-interpreter - inline::rag-runtime - remote::model-context-protocol - preprocessing: - - inline::basic - - inline::simple_chunking image_type: conda diff --git a/llama_stack/templates/vllm-gpu/run.yaml b/llama_stack/templates/vllm-gpu/run.yaml index 33b07ebd3..0c2a0e6d3 100644 --- a/llama_stack/templates/vllm-gpu/run.yaml +++ b/llama_stack/templates/vllm-gpu/run.yaml @@ -5,7 +5,6 @@ apis: - datasetio - eval - inference -- preprocessing - safety - scoring - telemetry @@ -107,13 +106,6 @@ providers: - provider_id: model-context-protocol provider_type: remote::model-context-protocol config: {} - preprocessing: - - provider_id: basic - provider_type: inline::basic - config: {} - - provider_id: simple_chunking - provider_type: inline::simple_chunking - config: {} metadata_store: type: sqlite db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/vllm-gpu}/registry.db @@ -139,10 +131,6 @@ tool_groups: provider_id: rag-runtime - toolgroup_id: builtin::code_interpreter provider_id: code-interpreter -preprocessors: -- preprocessor_id: builtin::basic - provider_id: basic -- preprocessor_id: builtin::chunking - provider_id: simple_chunking +preprocessors: [] server: port: 8321 diff --git a/llama_stack/templates/vllm-gpu/vllm.py b/llama_stack/templates/vllm-gpu/vllm.py index 1c2038c04..9bfeadc8d 100644 --- a/llama_stack/templates/vllm-gpu/vllm.py +++ b/llama_stack/templates/vllm-gpu/vllm.py @@ -5,7 +5,6 @@ # the root directory of this source tree. from llama_stack.apis.models.models import ModelType -from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput from llama_stack.distribution.datatypes import ModelInput, Provider from llama_stack.providers.inline.inference.sentence_transformers import ( SentenceTransformersInferenceConfig, @@ -36,7 +35,6 @@ def get_distribution_template() -> DistributionTemplate: "inline::rag-runtime", "remote::model-context-protocol", ], - "preprocessing": ["inline::basic", "inline::simple_chunking"], } name = "vllm-gpu" @@ -82,16 +80,6 @@ def get_distribution_template() -> DistributionTemplate: provider_id="code-interpreter", ), ] - default_preprocessors = [ - PreprocessorInput( - preprocessor_id="builtin::basic", - provider_id="basic", - ), - PreprocessorInput( - preprocessor_id="builtin::chunking", - provider_id="simple_chunking", - ), - ] return DistributionTemplate( name=name, @@ -108,7 +96,6 @@ def get_distribution_template() -> DistributionTemplate: }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, - default_preprocessors=default_preprocessors, ), }, run_config_env_vars={