Restrict the changes to the new preprocessing API only.

This commit is contained in:
ilya-kolchinsky 2025-04-03 12:19:08 +02:00
parent 2008cd7921
commit 863f87aa15
90 changed files with 104 additions and 1138 deletions

View file

@ -8052,44 +8052,6 @@
],
"title": "HealthInfo"
},
"PreprocessorChainElement": {
"type": "object",
"properties": {
"preprocessor_id": {
"type": "string"
},
"options": {
"type": "object",
"additionalProperties": {
"oneOf": [
{
"type": "null"
},
{
"type": "boolean"
},
{
"type": "number"
},
{
"type": "string"
},
{
"type": "array"
},
{
"type": "object"
}
]
}
}
},
"additionalProperties": false,
"required": [
"preprocessor_id"
],
"title": "PreprocessorChainElement"
},
"RAGDocument": {
"type": "object",
"properties": {
@ -8171,12 +8133,6 @@
},
"chunk_size_in_tokens": {
"type": "integer"
},
"preprocessor_chain": {
"type": "array",
"items": {
"$ref": "#/components/schemas/PreprocessorChainElement"
}
}
},
"additionalProperties": false,
@ -9382,6 +9338,44 @@
],
"title": "PreprocessingDataElement"
},
"PreprocessorChainElement": {
"type": "object",
"properties": {
"preprocessor_id": {
"type": "string"
},
"options": {
"type": "object",
"additionalProperties": {
"oneOf": [
{
"type": "null"
},
{
"type": "boolean"
},
{
"type": "number"
},
{
"type": "string"
},
{
"type": "array"
},
{
"type": "object"
}
]
}
}
},
"additionalProperties": false,
"required": [
"preprocessor_id"
],
"title": "PreprocessorChainElement"
},
"PreprocessRequest": {
"type": "object",
"properties": {

View file

@ -5572,25 +5572,6 @@ components:
required:
- status
title: HealthInfo
PreprocessorChainElement:
type: object
properties:
preprocessor_id:
type: string
options:
type: object
additionalProperties:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
additionalProperties: false
required:
- preprocessor_id
title: PreprocessorChainElement
RAGDocument:
type: object
properties:
@ -5639,10 +5620,6 @@ components:
type: string
chunk_size_in_tokens:
type: integer
preprocessor_chain:
type: array
items:
$ref: '#/components/schemas/PreprocessorChainElement'
additionalProperties: false
required:
- documents
@ -6414,6 +6391,25 @@ components:
required:
- data_element_id
title: PreprocessingDataElement
PreprocessorChainElement:
type: object
properties:
preprocessor_id:
type: string
options:
type: object
additionalProperties:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
additionalProperties: false
required:
- preprocessor_id
title: PreprocessorChainElement
PreprocessRequest:
type: object
properties:

View file

@ -10,7 +10,6 @@ The `llamastack/distribution-nvidia` distribution consists of the following prov
| eval | `inline::meta-reference` |
| inference | `remote::nvidia` |
| post_training | `remote::nvidia` |
| preprocessing | `inline::basic`, `inline::simple_chunking` |
| safety | `remote::nvidia` |
| scoring | `inline::basic` |
| telemetry | `inline::meta-reference` |

View file

@ -16,7 +16,6 @@ The `llamastack/distribution-bedrock` distribution consists of the following pro
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `remote::bedrock` |
| preprocessing | `inline::basic`, `inline::simple_chunking` |
| safety | `remote::bedrock` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |

View file

@ -9,7 +9,6 @@ The `llamastack/distribution-cerebras` distribution consists of the following pr
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `remote::cerebras`, `inline::sentence-transformers` |
| preprocessing | `inline::basic`, `inline::simple_chunking` |
| safety | `inline::llama-guard` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |

View file

@ -19,7 +19,6 @@ The `llamastack/distribution-fireworks` distribution consists of the following p
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `remote::fireworks`, `inline::sentence-transformers` |
| preprocessing | `inline::basic`, `inline::simple_chunking` |
| safety | `inline::llama-guard` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |

View file

@ -19,7 +19,6 @@ The `llamastack/distribution-groq` distribution consists of the following provid
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `remote::groq` |
| preprocessing | `inline::basic`, `inline::simple_chunking` |
| safety | `inline::llama-guard` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |

View file

@ -19,7 +19,6 @@ The `llamastack/distribution-meta-reference-gpu` distribution consists of the fo
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `inline::meta-reference` |
| preprocessing | `inline::basic`, `inline::simple_chunking` |
| safety | `inline::llama-guard` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |

View file

@ -19,7 +19,6 @@ The `llamastack/distribution-meta-reference-quantized-gpu` distribution consists
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `inline::meta-reference-quantized` |
| preprocessing | `inline::basic`, `inline::simple_chunking` |
| safety | `inline::llama-guard` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |

View file

@ -19,7 +19,6 @@ The `llamastack/distribution-ollama` distribution consists of the following prov
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `remote::ollama` |
| preprocessing | `inline::basic`, `inline::simple_chunking` |
| safety | `inline::llama-guard` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |

View file

@ -18,7 +18,6 @@ The `llamastack/distribution-remote-vllm` distribution consists of the following
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `remote::vllm`, `inline::sentence-transformers` |
| preprocessing | `inline::basic`, `inline::simple_chunking` |
| safety | `inline::llama-guard` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |

View file

@ -17,7 +17,6 @@ The `llamastack/distribution-sambanova` distribution consists of the following p
|-----|-------------|
| agents | `inline::meta-reference` |
| inference | `remote::sambanova` |
| preprocessing | `inline::basic`, `inline::simple_chunking` |
| safety | `inline::llama-guard` |
| telemetry | `inline::meta-reference` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime` |

View file

@ -20,7 +20,6 @@ The `llamastack/distribution-tgi` distribution consists of the following provide
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `remote::tgi`, `inline::sentence-transformers` |
| preprocessing | `inline::basic`, `inline::simple_chunking` |
| safety | `inline::llama-guard` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |

View file

@ -19,7 +19,6 @@ The `llamastack/distribution-together` distribution consists of the following pr
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `remote::together`, `inline::sentence-transformers` |
| preprocessing | `inline::basic`, `inline::simple_chunking` |
| safety | `inline::llama-guard` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |

View file

@ -11,7 +11,6 @@ from pydantic import BaseModel, Field
from typing_extensions import Annotated, Protocol, runtime_checkable
from llama_stack.apis.common.content_types import URL, InterleavedContent
from llama_stack.apis.preprocessing import PreprocessorChain
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
from llama_stack.schema_utils import json_schema_type, register_schema, webmethod
@ -87,7 +86,6 @@ class RAGToolRuntime(Protocol):
documents: List[RAGDocument],
vector_db_id: str,
chunk_size_in_tokens: int = 512,
preprocessor_chain: Optional[PreprocessorChain] = None,
) -> None:
"""Index documents so they can be used by the RAG system"""
...

View file

@ -677,13 +677,12 @@ class ToolRuntimeRouter(ToolRuntime):
documents: List[RAGDocument],
vector_db_id: str,
chunk_size_in_tokens: int = 512,
preprocessor_chain: Optional[PreprocessorChain] = None,
) -> None:
logger.debug(
f"ToolRuntimeRouter.RagToolImpl.insert: {vector_db_id}, {len(documents)} documents, chunk_size={chunk_size_in_tokens}"
)
return await self.routing_table.get_provider_impl("insert_into_memory").insert(
documents, vector_db_id, chunk_size_in_tokens, preprocessor_chain
documents, vector_db_id, chunk_size_in_tokens
)
def __init__(

View file

@ -1,18 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from .config import InlineBasicPreprocessorConfig
async def get_provider_impl(
config: InlineBasicPreprocessorConfig,
_deps,
):
from .basic import InclineBasicPreprocessorImpl
impl = InclineBasicPreprocessorImpl(config)
await impl.initialize()
return impl

View file

@ -1,151 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import logging
import re
from typing import Any, List, Optional
import httpx
from llama_stack.apis.common.content_types import URL
from llama_stack.apis.preprocessing import (
Preprocessing,
PreprocessingDataElement,
PreprocessingDataFormat,
PreprocessingDataType,
Preprocessor,
PreprocessorChain,
PreprocessorOptions,
PreprocessorResponse,
)
from llama_stack.providers.datatypes import PreprocessorsProtocolPrivate
from llama_stack.providers.inline.preprocessing.basic.config import InlineBasicPreprocessorConfig
from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str
from llama_stack.providers.utils.memory.vector_store import content_from_data, parse_pdf
log = logging.getLogger(__name__)
class InclineBasicPreprocessorImpl(Preprocessing, PreprocessorsProtocolPrivate):
# this preprocessor can either receive documents (text or binary) or document URIs
input_types = [
PreprocessingDataType.binary_document,
PreprocessingDataType.raw_text_document,
PreprocessingDataType.document_uri,
]
# this preprocessor optionally retrieves the documents and converts them into plain text
output_types = [PreprocessingDataType.raw_text_document]
preprocessor_store = None
URL_VALIDATION_PATTERN = re.compile("^(https?://|file://|data:)")
def __init__(self, config: InlineBasicPreprocessorConfig) -> None:
self.config = config
async def initialize(self) -> None:
pass
async def shutdown(self) -> None:
pass
async def register_preprocessor(self, preprocessor: Preprocessor) -> None:
pass
async def unregister_preprocessor(self, preprocessor_id: str) -> None:
pass
async def do_preprocess(
self,
preprocessor_id: str,
preprocessor_inputs: List[PreprocessingDataElement],
options: Optional[PreprocessorOptions] = None,
) -> PreprocessorResponse:
results = []
for inp in preprocessor_inputs:
input_type = self._resolve_input_type(inp)
if input_type == PreprocessingDataType.document_uri:
document = await self._fetch_document(inp)
if document is None:
continue
elif input_type == PreprocessingDataType.binary_document:
document = inp.data_element_path_or_content
if inp.data_element_format is None:
log.error(f"Binary document format is not provided for {inp.data_element_id}, skipping it")
continue
if inp.data_element_format != PreprocessingDataFormat.pdf:
log.error(
f"Unsupported binary document type {inp.data_element_format} for {inp.data_element_id}, skipping it"
)
continue
elif input_type == PreprocessingDataType.raw_text_document:
document = interleaved_content_as_str(inp.data_element_path_or_content) # type: ignore
else:
log.error(f"Unexpected preprocessor input type: {input_type}")
continue
if inp.data_element_format == PreprocessingDataFormat.pdf:
document = parse_pdf(document)
new_result = PreprocessingDataElement(
data_element_id=inp.data_element_id,
data_element_type=PreprocessingDataType.raw_text_document,
data_element_format=PreprocessingDataFormat.txt,
data_element_path_or_content=document,
)
results.append(new_result)
return PreprocessorResponse(
success=True, output_data_type=PreprocessingDataType.raw_text_document, results=results
)
async def preprocess(
self,
preprocessors: PreprocessorChain,
preprocessor_inputs: List[PreprocessingDataElement],
) -> PreprocessorResponse:
return await self.do_preprocess(preprocessor_id="", preprocessor_inputs=preprocessor_inputs)
@staticmethod
def _resolve_input_type(preprocessor_input: PreprocessingDataElement) -> PreprocessingDataType:
if preprocessor_input.data_element_type is not None:
return preprocessor_input.data_element_type
if isinstance(preprocessor_input.data_element_path_or_content, URL):
return PreprocessingDataType.document_uri
if InclineBasicPreprocessorImpl.URL_VALIDATION_PATTERN.match(
str(preprocessor_input.data_element_path_or_content)
):
return PreprocessingDataType.document_uri
if preprocessor_input.data_element_format == PreprocessingDataFormat.pdf:
return PreprocessingDataType.binary_document
return PreprocessingDataType.raw_text_document
@staticmethod
async def _fetch_document(preprocessor_input: PreprocessingDataElement) -> Any:
if isinstance(preprocessor_input.data_element_path_or_content, str):
url = preprocessor_input.data_element_path_or_content
if not InclineBasicPreprocessorImpl.URL_VALIDATION_PATTERN.match(url):
log.error(f"Unexpected URL: {url}")
return None
elif isinstance(preprocessor_input.data_element_path_or_content, URL):
url = preprocessor_input.data_element_path_or_content.uri
else:
log.error(
f"Unexpected type {type(preprocessor_input.data_element_path_or_content)} for input {preprocessor_input.data_element_path_or_content}, skipping this input."
)
return None
if url.startswith("data:"):
return content_from_data(url)
async with httpx.AsyncClient() as client:
r = await client.get(url)
return r.content if preprocessor_input.data_element_format == PreprocessingDataFormat.pdf else r.text

View file

@ -1,9 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from pydantic import BaseModel
class InlineBasicPreprocessorConfig(BaseModel): ...

View file

@ -1,18 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from .config import InclineSimpleChunkingConfig
async def get_provider_impl(
config: InclineSimpleChunkingConfig,
_deps,
):
from .simple_chunking import InclineSimpleChunkingImpl
impl = InclineSimpleChunkingImpl(config)
await impl.initialize()
return impl

View file

@ -1,11 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from pydantic import BaseModel
class InclineSimpleChunkingConfig(BaseModel):
chunk_size_in_tokens: int = 512
chunk_overlap_ratio: int = 4

View file

@ -1,116 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import logging
from enum import Enum
from typing import List, Optional, Tuple
from llama_models.llama3.api import Tokenizer
from llama_stack.apis.preprocessing import (
Preprocessing,
PreprocessingDataElement,
PreprocessingDataFormat,
PreprocessingDataType,
Preprocessor,
PreprocessorChain,
PreprocessorOptions,
PreprocessorResponse,
)
from llama_stack.apis.vector_io import Chunk
from llama_stack.providers.datatypes import PreprocessorsProtocolPrivate
from llama_stack.providers.inline.preprocessing.simple_chunking import InclineSimpleChunkingConfig
log = logging.getLogger(__name__)
class SimpleChunkingOptions(Enum):
chunk_size_in_tokens = "chunk_size_in_tokens"
chunk_overlap_ratio = "chunk_overlap_ratio"
class InclineSimpleChunkingImpl(Preprocessing, PreprocessorsProtocolPrivate):
# this preprocessor receives plain text and returns chunks
input_types = [PreprocessingDataType.raw_text_document]
output_types = [PreprocessingDataType.chunks]
preprocessor_store = None
def __init__(self, config: InclineSimpleChunkingConfig) -> None:
self.config = config
async def initialize(self) -> None: ...
async def shutdown(self) -> None: ...
async def register_preprocessor(self, preprocessor: Preprocessor) -> None: ...
async def unregister_preprocessor(self, preprocessor_id: str) -> None: ...
async def do_preprocess(
self,
preprocessor_id: str,
preprocessor_inputs: List[PreprocessingDataElement],
options: Optional[PreprocessorOptions] = None,
) -> PreprocessorResponse:
chunks = []
window_len, overlap_len = self._resolve_chunk_size_params(options)
for inp in preprocessor_inputs:
new_chunks = self.make_overlapped_chunks(
inp.data_element_id, str(inp.data_element_path_or_content), window_len, overlap_len
)
for i, chunk in enumerate(new_chunks):
new_chunk_data_element = PreprocessingDataElement(
data_element_id=f"{inp.data_element_id}_chunk_{i}",
data_element_type=PreprocessingDataType.chunks,
data_element_format=PreprocessingDataFormat.txt,
data_element_path_or_content=chunk,
)
chunks.append(new_chunk_data_element)
return PreprocessorResponse(success=True, output_data_type=PreprocessingDataType.chunks, results=chunks)
async def preprocess(
self,
preprocessors: PreprocessorChain,
preprocessor_inputs: List[PreprocessingDataElement],
) -> PreprocessorResponse:
return await self.do_preprocess(preprocessor_id="", preprocessor_inputs=preprocessor_inputs)
def _resolve_chunk_size_params(self, options: PreprocessorOptions | None) -> Tuple[int, int]:
window_len = (options or {}).get(
str(SimpleChunkingOptions.chunk_size_in_tokens), self.config.chunk_size_in_tokens
)
chunk_overlap_ratio = (options or {}).get(
str(SimpleChunkingOptions.chunk_overlap_ratio), self.config.chunk_overlap_ratio
)
overlap_len = window_len // chunk_overlap_ratio
return window_len, overlap_len
@staticmethod
def make_overlapped_chunks(document_id: str, text: str, window_len: int, overlap_len: int) -> List[Chunk]:
tokenizer = Tokenizer.get_instance()
tokens = tokenizer.encode(text, bos=False, eos=False)
chunks = []
for i in range(0, len(tokens), window_len - overlap_len):
toks = tokens[i : i + window_len]
chunk = tokenizer.decode(toks)
# chunk is a string
chunks.append(
Chunk(
content=chunk,
metadata={
"token_count": len(toks),
"document_id": document_id,
},
)
)
return chunks

View file

@ -14,6 +14,6 @@ from .config import RagToolRuntimeConfig
async def get_provider_impl(config: RagToolRuntimeConfig, deps: Dict[Api, Any]):
from .memory import MemoryToolRuntimeImpl
impl = MemoryToolRuntimeImpl(config, deps[Api.vector_io], deps[Api.inference], deps[Api.preprocessing])
impl = MemoryToolRuntimeImpl(config, deps[Api.vector_io], deps[Api.inference])
await impl.initialize()
return impl

View file

@ -19,14 +19,6 @@ from llama_stack.apis.common.content_types import (
TextContentItem,
)
from llama_stack.apis.inference import Inference
from llama_stack.apis.preprocessing import (
Preprocessing,
PreprocessingDataElement,
PreprocessingDataFormat,
PreprocessingDataType,
PreprocessorChain,
PreprocessorChainElement,
)
from llama_stack.apis.tools import (
ListToolDefsResponse,
RAGDocument,
@ -41,6 +33,10 @@ from llama_stack.apis.tools import (
)
from llama_stack.apis.vector_io import QueryChunksResponse, VectorIO
from llama_stack.providers.datatypes import ToolsProtocolPrivate
from llama_stack.providers.utils.memory.vector_store import (
content_from_doc,
make_overlapped_chunks,
)
from .config import RagToolRuntimeConfig
from .context_retriever import generate_rag_query
@ -53,22 +49,15 @@ def make_random_string(length: int = 8):
class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime):
DEFAULT_PREPROCESSING_CHAIN = [
PreprocessorChainElement(preprocessor_id="builtin::basic"),
PreprocessorChainElement(preprocessor_id="builtin::chunking"),
]
def __init__(
self,
config: RagToolRuntimeConfig,
vector_io_api: VectorIO,
inference_api: Inference,
preprocessing_api: Preprocessing,
):
self.config = config
self.vector_io_api = vector_io_api
self.inference_api = inference_api
self.preprocessing_api = preprocessing_api
async def initialize(self):
pass
@ -87,32 +76,24 @@ class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime):
documents: List[RAGDocument],
vector_db_id: str,
chunk_size_in_tokens: int = 512,
preprocessor_chain: Optional[PreprocessorChain] = None,
) -> None:
preprocessor_inputs = [self._rag_document_to_preprocessor_input(d) for d in documents]
preprocessor_response = await self.preprocessing_api.preprocess(
preprocessors=preprocessor_chain or self.DEFAULT_PREPROCESSING_CHAIN,
preprocessor_inputs=preprocessor_inputs,
)
if not preprocessor_response.success:
log.error("Preprocessor chain returned an error")
return
if preprocessor_response.output_data_type != PreprocessingDataType.chunks:
log.error(
f"Preprocessor chain returned {preprocessor_response.output_data_type} instead of {PreprocessingDataType.chunks}"
chunks = []
for doc in documents:
content = await content_from_doc(doc)
chunks.extend(
make_overlapped_chunks(
doc.document_id,
content,
chunk_size_in_tokens,
chunk_size_in_tokens // 4,
)
)
return
chunks = preprocessor_response.results
if not chunks:
log.error("No chunks returned by the preprocessor chain")
return
actual_chunks = [chunk.data_element_path_or_content for chunk in chunks]
await self.vector_io_api.insert_chunks(
chunks=actual_chunks, # type: ignore
chunks=chunks,
vector_db_id=vector_db_id,
)
@ -226,16 +207,3 @@ class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime):
content=result.content,
metadata=result.metadata,
)
@staticmethod
def _rag_document_to_preprocessor_input(document: RAGDocument) -> PreprocessingDataElement:
if document.mime_type == "application/pdf":
data_element_format = PreprocessingDataFormat.pdf
else:
data_element_format = None
return PreprocessingDataElement(
data_element_id=document.document_id,
data_element_format=data_element_format,
data_element_path_or_content=document.content,
)

View file

@ -7,28 +7,9 @@
from typing import List
from llama_stack.providers.datatypes import (
Api,
InlineProviderSpec,
ProviderSpec,
)
def available_providers() -> List[ProviderSpec]:
return [
InlineProviderSpec(
api=Api.preprocessing,
provider_type="inline::basic",
pip_packages=["httpx", "pypdf"],
module="llama_stack.providers.inline.preprocessing.basic",
config_class="llama_stack.providers.inline.preprocessing.basic.InlineBasicPreprocessorConfig",
api_dependencies=[],
),
InlineProviderSpec(
api=Api.preprocessing,
provider_type="inline::simple_chunking",
pip_packages=[],
module="llama_stack.providers.inline.preprocessing.simple_chunking",
config_class="llama_stack.providers.inline.preprocessing.simple_chunking.InclineSimpleChunkingConfig",
api_dependencies=[],
),
]
return []

View file

@ -34,7 +34,7 @@ def available_providers() -> List[ProviderSpec]:
],
module="llama_stack.providers.inline.tool_runtime.rag",
config_class="llama_stack.providers.inline.tool_runtime.rag.config.RagToolRuntimeConfig",
api_dependencies=[Api.vector_io, Api.inference, Api.preprocessing],
api_dependencies=[Api.vector_io, Api.inference],
),
InlineProviderSpec(
api=Api.tool_runtime,

View file

@ -6,7 +6,6 @@
from pathlib import Path
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import Provider, ToolGroupInput
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
from llama_stack.providers.remote.inference.bedrock.models import MODEL_ENTRIES
@ -34,7 +33,6 @@ def get_distribution_template() -> DistributionTemplate:
"inline::rag-runtime",
"remote::model-context-protocol",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
name = "bedrock"
vector_io_provider = Provider(
@ -63,17 +61,6 @@ def get_distribution_template() -> DistributionTemplate:
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
return DistributionTemplate(
name=name,
distro_type="self_hosted",
@ -89,7 +76,6 @@ def get_distribution_template() -> DistributionTemplate:
},
default_models=default_models,
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={

View file

@ -29,7 +29,4 @@ distribution_spec:
- inline::code-interpreter
- inline::rag-runtime
- remote::model-context-protocol
preprocessing:
- inline::basic
- inline::simple_chunking
image_type: conda

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -97,13 +96,6 @@ providers:
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/bedrock}/registry.db
@ -150,10 +142,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -29,7 +29,4 @@ distribution_spec:
- remote::tavily-search
- inline::code-interpreter
- inline::rag-runtime
preprocessing:
- inline::basic
- inline::simple_chunking
image_type: conda

View file

@ -7,7 +7,6 @@
from pathlib import Path
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import ModelInput, Provider, ToolGroupInput
from llama_stack.providers.inline.inference.sentence_transformers import (
SentenceTransformersInferenceConfig,
@ -38,7 +37,6 @@ def get_distribution_template() -> DistributionTemplate:
"inline::code-interpreter",
"inline::rag-runtime",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
name = "cerebras"
@ -84,16 +82,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="code-interpreter",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
return DistributionTemplate(
name="cerebras",
@ -112,7 +100,6 @@ def get_distribution_template() -> DistributionTemplate:
default_models=default_models + [embedding_model],
default_shields=[],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -100,13 +99,6 @@ providers:
- provider_id: rag-runtime
provider_type: inline::rag-runtime
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/cerebras}/registry.db
@ -148,10 +140,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -30,7 +30,4 @@ distribution_spec:
- inline::code-interpreter
- inline::rag-runtime
- remote::model-context-protocol
preprocessing:
- inline::basic
- inline::simple_chunking
image_type: conda

View file

@ -6,7 +6,6 @@
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import (
ModelInput,
Provider,
@ -45,7 +44,6 @@ def get_distribution_template() -> DistributionTemplate:
"inline::rag-runtime",
"remote::model-context-protocol",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
name = "ci-tests"
inference_provider = Provider(
@ -78,16 +76,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="code-interpreter",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
available_models = {
"fireworks": MODEL_ENTRIES,
}
@ -117,7 +105,6 @@ def get_distribution_template() -> DistributionTemplate:
},
default_models=default_models + [embedding_model],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")],
),
},

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -100,13 +99,6 @@ providers:
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ci-tests}/registry.db
@ -226,10 +218,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -30,7 +30,4 @@ distribution_spec:
- remote::tavily-search
- inline::code-interpreter
- inline::rag-runtime
preprocessing:
- inline::basic
- inline::simple_chunking
image_type: conda

View file

@ -5,7 +5,6 @@
# the root directory of this source tree.
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import (
ModelInput,
Provider,
@ -34,7 +33,6 @@ def get_distribution_template() -> DistributionTemplate:
"inline::code-interpreter",
"inline::rag-runtime",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
name = "dell"
inference_provider = Provider(
@ -94,16 +92,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="code-interpreter",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
return DistributionTemplate(
name=name,
@ -119,7 +107,6 @@ def get_distribution_template() -> DistributionTemplate:
},
default_models=[inference_model, embedding_model],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
"run-with-safety.yaml": RunConfigSettings(
provider_overrides={
@ -133,7 +120,6 @@ def get_distribution_template() -> DistributionTemplate:
default_models=[inference_model, safety_model, embedding_model],
default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -100,13 +99,6 @@ providers:
- provider_id: rag-runtime
provider_type: inline::rag-runtime
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dell}/registry.db
@ -137,10 +129,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -96,13 +95,6 @@ providers:
- provider_id: rag-runtime
provider_type: inline::rag-runtime
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dell}/registry.db
@ -128,10 +120,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -34,7 +34,4 @@ distribution_spec:
- inline::code-interpreter
- inline::rag-runtime
- remote::model-context-protocol
preprocessing:
- inline::basic
- inline::simple_chunking
image_type: conda

View file

@ -7,7 +7,6 @@
from typing import List, Tuple
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import (
ModelInput,
Provider,
@ -112,7 +111,6 @@ def get_distribution_template() -> DistributionTemplate:
"inline::rag-runtime",
"remote::model-context-protocol",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
name = "dev"
@ -157,16 +155,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="code-interpreter",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
embedding_model = ModelInput(
model_id="all-MiniLM-L6-v2",
provider_id=embedding_provider.provider_id,
@ -193,7 +181,6 @@ def get_distribution_template() -> DistributionTemplate:
},
default_models=default_models + [embedding_model],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")],
),
},

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -129,13 +128,6 @@ providers:
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dev}/registry.db
@ -382,10 +374,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -31,7 +31,4 @@ distribution_spec:
- inline::code-interpreter
- inline::rag-runtime
- remote::model-context-protocol
preprocessing:
- inline::basic
- inline::simple_chunking
image_type: conda

View file

@ -7,7 +7,6 @@
from pathlib import Path
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import (
ModelInput,
Provider,
@ -45,7 +44,6 @@ def get_distribution_template() -> DistributionTemplate:
"inline::rag-runtime",
"remote::model-context-protocol",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
name = "fireworks"
@ -97,16 +95,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="code-interpreter",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
return DistributionTemplate(
name=name,
@ -125,7 +113,6 @@ def get_distribution_template() -> DistributionTemplate:
default_models=default_models + [embedding_model],
default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
"run-with-safety.yaml": RunConfigSettings(
provider_overrides={
@ -171,7 +158,6 @@ def get_distribution_template() -> DistributionTemplate:
),
],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -112,13 +111,6 @@ providers:
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/registry.db
@ -245,10 +237,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -107,13 +106,6 @@ providers:
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/registry.db
@ -235,10 +227,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -26,7 +26,4 @@ distribution_spec:
- remote::tavily-search
- inline::code-interpreter
- inline::rag-runtime
preprocessing:
- inline::basic
- inline::simple_chunking
image_type: conda

View file

@ -7,7 +7,6 @@
from pathlib import Path
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import ModelInput, Provider, ToolGroupInput
from llama_stack.providers.inline.inference.sentence_transformers import (
SentenceTransformersInferenceConfig,
@ -37,7 +36,6 @@ def get_distribution_template() -> DistributionTemplate:
"inline::code-interpreter",
"inline::rag-runtime",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
name = "groq"
@ -79,16 +77,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="code-interpreter",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
return DistributionTemplate(
name=name,
@ -105,7 +93,6 @@ def get_distribution_template() -> DistributionTemplate:
},
default_models=default_models + [embedding_model],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -100,13 +99,6 @@ providers:
- provider_id: rag-runtime
provider_type: inline::rag-runtime
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/groq}/registry.db
@ -173,10 +165,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -29,7 +29,4 @@ distribution_spec:
- inline::code-interpreter
- inline::rag-runtime
- remote::model-context-protocol
preprocessing:
- inline::basic
- inline::simple_chunking
image_type: conda

View file

@ -5,7 +5,6 @@
# the root directory of this source tree.
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import (
ModelInput,
Provider,
@ -37,7 +36,6 @@ def get_distribution_template() -> DistributionTemplate:
"inline::rag-runtime",
"remote::model-context-protocol",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
name = "hf-endpoint"
inference_provider = Provider(
@ -86,16 +84,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="code-interpreter",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
return DistributionTemplate(
name=name,
@ -112,7 +100,6 @@ def get_distribution_template() -> DistributionTemplate:
},
default_models=[inference_model, embedding_model],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
"run-with-safety.yaml": RunConfigSettings(
provider_overrides={
@ -136,7 +123,6 @@ def get_distribution_template() -> DistributionTemplate:
],
default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -108,13 +107,6 @@ providers:
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-endpoint}/registry.db
@ -145,10 +137,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -103,13 +102,6 @@ providers:
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-endpoint}/registry.db
@ -135,10 +127,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -30,7 +30,4 @@ distribution_spec:
- inline::code-interpreter
- inline::rag-runtime
- remote::model-context-protocol
preprocessing:
- inline::basic
- inline::simple_chunking
image_type: conda

View file

@ -5,7 +5,6 @@
# the root directory of this source tree.
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import (
ModelInput,
Provider,
@ -37,7 +36,6 @@ def get_distribution_template() -> DistributionTemplate:
"inline::rag-runtime",
"remote::model-context-protocol",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
name = "hf-serverless"
@ -87,16 +85,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="code-interpreter",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
return DistributionTemplate(
name=name,
@ -113,7 +101,6 @@ def get_distribution_template() -> DistributionTemplate:
},
default_models=[inference_model, embedding_model],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
"run-with-safety.yaml": RunConfigSettings(
provider_overrides={
@ -137,7 +124,6 @@ def get_distribution_template() -> DistributionTemplate:
],
default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -108,13 +107,6 @@ providers:
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-serverless}/registry.db
@ -145,10 +137,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -103,13 +102,6 @@ providers:
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-serverless}/registry.db
@ -135,10 +127,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -29,7 +29,4 @@ distribution_spec:
- inline::code-interpreter
- inline::rag-runtime
- remote::model-context-protocol
preprocessing:
- inline::basic
- inline::simple_chunking
image_type: conda

View file

@ -7,7 +7,6 @@
from pathlib import Path
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import (
ModelInput,
Provider,
@ -41,7 +40,6 @@ def get_distribution_template() -> DistributionTemplate:
"inline::rag-runtime",
"remote::model-context-protocol",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
name = "meta-reference-gpu"
inference_provider = Provider(
@ -93,16 +91,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="code-interpreter",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
return DistributionTemplate(
name=name,
@ -118,7 +106,6 @@ def get_distribution_template() -> DistributionTemplate:
},
default_models=[inference_model, embedding_model],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
"run-with-safety.yaml": RunConfigSettings(
provider_overrides={
@ -143,7 +130,6 @@ def get_distribution_template() -> DistributionTemplate:
],
default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -110,13 +109,6 @@ providers:
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/registry.db
@ -147,10 +139,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -104,13 +103,6 @@ providers:
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/registry.db
@ -136,10 +128,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -29,7 +29,4 @@ distribution_spec:
- inline::code-interpreter
- inline::rag-runtime
- remote::model-context-protocol
preprocessing:
- inline::basic
- inline::simple_chunking
image_type: conda

View file

@ -7,7 +7,6 @@
from pathlib import Path
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import ModelInput, Provider, ToolGroupInput
from llama_stack.providers.inline.inference.meta_reference import (
MetaReferenceQuantizedInferenceConfig,
@ -36,7 +35,6 @@ def get_distribution_template() -> DistributionTemplate:
"inline::rag-runtime",
"remote::model-context-protocol",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
default_tool_groups = [
ToolGroupInput(
@ -52,16 +50,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="code-interpreter",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
name = "meta-reference-quantized-gpu"
inference_provider = Provider(
provider_id="meta-reference-inference",
@ -108,7 +96,6 @@ def get_distribution_template() -> DistributionTemplate:
},
default_models=[inference_model, embedding_model],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -106,13 +105,6 @@ providers:
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-quantized-gpu}/registry.db
@ -138,10 +130,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -22,7 +22,4 @@ distribution_spec:
- inline::basic
tool_runtime:
- inline::rag-runtime
preprocessing:
- inline::basic
- inline::simple_chunking
image_type: conda

View file

@ -6,7 +6,6 @@
from pathlib import Path
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import ModelInput, Provider, ShieldInput, ToolGroupInput
from llama_stack.providers.remote.inference.nvidia import NVIDIAConfig
from llama_stack.providers.remote.inference.nvidia.models import MODEL_ENTRIES
@ -26,7 +25,6 @@ def get_distribution_template() -> DistributionTemplate:
"datasetio": ["inline::localfs"],
"scoring": ["inline::basic"],
"tool_runtime": ["inline::rag-runtime"],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
inference_provider = Provider(
@ -57,16 +55,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="rag-runtime",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
default_models = get_model_registry(available_models)
return DistributionTemplate(
@ -84,7 +72,6 @@ def get_distribution_template() -> DistributionTemplate:
},
default_models=default_models,
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
"run-with-safety.yaml": RunConfigSettings(
provider_overrides={
@ -96,7 +83,6 @@ def get_distribution_template() -> DistributionTemplate:
default_models=[inference_model, safety_model],
default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}", provider_id="nvidia")],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={

View file

@ -6,7 +6,6 @@ apis:
- eval
- inference
- post_training
- preprocessing
- safety
- scoring
- telemetry
@ -85,13 +84,6 @@ providers:
- provider_id: rag-runtime
provider_type: inline::rag-runtime
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/registry.db
@ -114,10 +106,6 @@ benchmarks: []
tool_groups:
- toolgroup_id: builtin::rag
provider_id: rag-runtime
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -6,7 +6,6 @@ apis:
- eval
- inference
- post_training
- preprocessing
- safety
- scoring
- telemetry
@ -80,13 +79,6 @@ providers:
- provider_id: rag-runtime
provider_type: inline::rag-runtime
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/registry.db
@ -217,10 +209,6 @@ benchmarks: []
tool_groups:
- toolgroup_id: builtin::rag
provider_id: rag-runtime
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -30,7 +30,4 @@ distribution_spec:
- inline::rag-runtime
- remote::model-context-protocol
- remote::wolfram-alpha
preprocessing:
- inline::basic
- inline::simple_chunking
image_type: conda

View file

@ -7,7 +7,6 @@
from pathlib import Path
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import (
ModelInput,
Provider,
@ -37,7 +36,6 @@ def get_distribution_template() -> DistributionTemplate:
"remote::model-context-protocol",
"remote::wolfram-alpha",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
name = "ollama"
inference_provider = Provider(
@ -86,16 +84,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="wolfram-alpha",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
return DistributionTemplate(
name=name,
@ -112,7 +100,6 @@ def get_distribution_template() -> DistributionTemplate:
},
default_models=[inference_model, embedding_model],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
"run-with-safety.yaml": RunConfigSettings(
provider_overrides={
@ -147,7 +134,6 @@ def get_distribution_template() -> DistributionTemplate:
),
],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -105,13 +104,6 @@ providers:
provider_type: remote::wolfram-alpha
config:
api_key: ${env.WOLFRAM_ALPHA_API_KEY:}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/registry.db
@ -148,10 +140,6 @@ tool_groups:
provider_id: code-interpreter
- toolgroup_id: builtin::wolfram_alpha
provider_id: wolfram-alpha
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -103,13 +102,6 @@ providers:
provider_type: remote::wolfram-alpha
config:
api_key: ${env.WOLFRAM_ALPHA_API_KEY:}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/registry.db
@ -138,10 +130,6 @@ tool_groups:
provider_id: code-interpreter
- toolgroup_id: builtin::wolfram_alpha
provider_id: wolfram-alpha
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -31,7 +31,4 @@ distribution_spec:
- inline::rag-runtime
- remote::model-context-protocol
- remote::wolfram-alpha
preprocessing:
- inline::basic
- inline::simple_chunking
image_type: conda

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -116,13 +115,6 @@ providers:
provider_type: remote::wolfram-alpha
config:
api_key: ${env.WOLFRAM_ALPHA_API_KEY:}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/registry.db
@ -155,10 +147,6 @@ tool_groups:
provider_id: code-interpreter
- toolgroup_id: builtin::wolfram_alpha
provider_id: wolfram-alpha
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -109,13 +108,6 @@ providers:
provider_type: remote::wolfram-alpha
config:
api_key: ${env.WOLFRAM_ALPHA_API_KEY:}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/registry.db
@ -143,10 +135,6 @@ tool_groups:
provider_id: code-interpreter
- toolgroup_id: builtin::wolfram_alpha
provider_id: wolfram-alpha
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -7,7 +7,6 @@
from pathlib import Path
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import (
ModelInput,
Provider,
@ -40,7 +39,6 @@ def get_distribution_template() -> DistributionTemplate:
"remote::model-context-protocol",
"remote::wolfram-alpha",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
name = "remote-vllm"
inference_provider = Provider(
@ -95,16 +93,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="wolfram-alpha",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
return DistributionTemplate(
name=name,
@ -120,7 +108,6 @@ def get_distribution_template() -> DistributionTemplate:
},
default_models=[inference_model, embedding_model],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
"run-with-safety.yaml": RunConfigSettings(
provider_overrides={
@ -144,7 +131,6 @@ def get_distribution_template() -> DistributionTemplate:
],
default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={

View file

@ -19,7 +19,4 @@ distribution_spec:
- remote::tavily-search
- inline::code-interpreter
- inline::rag-runtime
preprocessing:
- inline::basic
- inline::simple_chunking
image_type: conda

View file

@ -3,7 +3,6 @@ image_name: sambanova
apis:
- agents
- inference
- preprocessing
- safety
- telemetry
- tool_runtime
@ -72,13 +71,6 @@ providers:
- provider_id: rag-runtime
provider_type: inline::rag-runtime
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/sambanova}/registry.db
@ -186,10 +178,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -6,7 +6,6 @@
from pathlib import Path
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import Provider, ShieldInput, ToolGroupInput
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
from llama_stack.providers.remote.inference.sambanova import SambaNovaImplConfig
@ -35,7 +34,6 @@ def get_distribution_template() -> DistributionTemplate:
"inline::code-interpreter",
"inline::rag-runtime",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
name = "sambanova"
@ -87,16 +85,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="code-interpreter",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
return DistributionTemplate(
name=name,
@ -115,7 +103,6 @@ def get_distribution_template() -> DistributionTemplate:
default_models=default_models,
default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={

View file

@ -30,7 +30,4 @@ distribution_spec:
- inline::code-interpreter
- inline::rag-runtime
- remote::model-context-protocol
preprocessing:
- inline::basic
- inline::simple_chunking
image_type: conda

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -103,13 +102,6 @@ providers:
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/registry.db
@ -135,10 +127,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -102,13 +101,6 @@ providers:
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/registry.db
@ -134,10 +126,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -7,7 +7,6 @@
from pathlib import Path
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import (
ModelInput,
Provider,
@ -39,7 +38,6 @@ def get_distribution_template() -> DistributionTemplate:
"inline::rag-runtime",
"remote::model-context-protocol",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
name = "tgi"
inference_provider = Provider(
@ -90,16 +88,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="code-interpreter",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
return DistributionTemplate(
name=name,
@ -116,7 +104,6 @@ def get_distribution_template() -> DistributionTemplate:
},
default_models=[inference_model, embedding_model],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
"run-with-safety.yaml": RunConfigSettings(
provider_overrides={
@ -138,7 +125,6 @@ def get_distribution_template() -> DistributionTemplate:
],
default_shields=[ShieldInput(shield_id="${env.SAFETY_MODEL}")],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={

View file

@ -31,7 +31,4 @@ distribution_spec:
- inline::rag-runtime
- remote::model-context-protocol
- remote::wolfram-alpha
preprocessing:
- inline::basic
- inline::simple_chunking
image_type: conda

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -112,13 +111,6 @@ providers:
provider_type: remote::wolfram-alpha
config:
api_key: ${env.WOLFRAM_ALPHA_API_KEY:}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/registry.db
@ -252,10 +244,6 @@ tool_groups:
provider_id: code-interpreter
- toolgroup_id: builtin::wolfram_alpha
provider_id: wolfram-alpha
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -107,13 +106,6 @@ providers:
provider_type: remote::wolfram-alpha
config:
api_key: ${env.WOLFRAM_ALPHA_API_KEY:}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/registry.db
@ -242,10 +234,6 @@ tool_groups:
provider_id: code-interpreter
- toolgroup_id: builtin::wolfram_alpha
provider_id: wolfram-alpha
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -7,7 +7,6 @@
from pathlib import Path
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import (
ModelInput,
Provider,
@ -45,7 +44,6 @@ def get_distribution_template() -> DistributionTemplate:
"remote::model-context-protocol",
"remote::wolfram-alpha",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
name = "together"
inference_provider = Provider(
@ -85,16 +83,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="wolfram-alpha",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
embedding_model = ModelInput(
model_id="all-MiniLM-L6-v2",
provider_id="sentence-transformers",
@ -120,7 +108,6 @@ def get_distribution_template() -> DistributionTemplate:
},
default_models=default_models + [embedding_model],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")],
),
"run-with-safety.yaml": RunConfigSettings(
@ -167,7 +154,6 @@ def get_distribution_template() -> DistributionTemplate:
),
],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={

View file

@ -30,7 +30,4 @@ distribution_spec:
- inline::code-interpreter
- inline::rag-runtime
- remote::model-context-protocol
preprocessing:
- inline::basic
- inline::simple_chunking
image_type: conda

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- preprocessing
- safety
- scoring
- telemetry
@ -107,13 +106,6 @@ providers:
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
preprocessing:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: simple_chunking
provider_type: inline::simple_chunking
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/vllm-gpu}/registry.db
@ -139,10 +131,6 @@ tool_groups:
provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter
preprocessors:
- preprocessor_id: builtin::basic
provider_id: basic
- preprocessor_id: builtin::chunking
provider_id: simple_chunking
preprocessors: []
server:
port: 8321

View file

@ -5,7 +5,6 @@
# the root directory of this source tree.
from llama_stack.apis.models.models import ModelType
from llama_stack.apis.preprocessors.preprocessors import PreprocessorInput
from llama_stack.distribution.datatypes import ModelInput, Provider
from llama_stack.providers.inline.inference.sentence_transformers import (
SentenceTransformersInferenceConfig,
@ -36,7 +35,6 @@ def get_distribution_template() -> DistributionTemplate:
"inline::rag-runtime",
"remote::model-context-protocol",
],
"preprocessing": ["inline::basic", "inline::simple_chunking"],
}
name = "vllm-gpu"
@ -82,16 +80,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_id="code-interpreter",
),
]
default_preprocessors = [
PreprocessorInput(
preprocessor_id="builtin::basic",
provider_id="basic",
),
PreprocessorInput(
preprocessor_id="builtin::chunking",
provider_id="simple_chunking",
),
]
return DistributionTemplate(
name=name,
@ -108,7 +96,6 @@ def get_distribution_template() -> DistributionTemplate:
},
default_models=[inference_model, embedding_model],
default_tool_groups=default_tool_groups,
default_preprocessors=default_preprocessors,
),
},
run_config_env_vars={