mirror of
https://github.com/meta-llama/llama-stack.git
synced 2026-01-01 19:10:00 +00:00
Simplified the preprocessing interface.
This commit is contained in:
parent
1eeba2cc8a
commit
ad4cf97604
8 changed files with 31 additions and 43 deletions
|
|
@ -13,7 +13,13 @@ from llama_stack.apis.benchmarks import Benchmark
|
|||
from llama_stack.apis.datasets import Dataset
|
||||
from llama_stack.apis.datatypes import Api
|
||||
from llama_stack.apis.models import Model
|
||||
from llama_stack.apis.preprocessing import Preprocessor
|
||||
from llama_stack.apis.preprocessing import (
|
||||
PreprocessingDataElement,
|
||||
PreprocessingDataType,
|
||||
Preprocessor,
|
||||
PreprocessorOptions,
|
||||
PreprocessorResponse,
|
||||
)
|
||||
from llama_stack.apis.scoring_functions import ScoringFn
|
||||
from llama_stack.apis.shields import Shield
|
||||
from llama_stack.apis.tools import Tool
|
||||
|
|
@ -60,10 +66,20 @@ class ToolsProtocolPrivate(Protocol):
|
|||
|
||||
|
||||
class PreprocessorsProtocolPrivate(Protocol):
|
||||
input_types: List[PreprocessingDataType]
|
||||
output_types: List[PreprocessingDataType]
|
||||
|
||||
async def register_preprocessor(self, preprocessor: Preprocessor) -> None: ...
|
||||
|
||||
async def unregister_preprocessor(self, preprocessor_id: str) -> None: ...
|
||||
|
||||
async def do_preprocess(
|
||||
self,
|
||||
preprocessor_id: str,
|
||||
preprocessor_inputs: List[PreprocessingDataElement],
|
||||
options: Optional[PreprocessorOptions] = None,
|
||||
) -> PreprocessorResponse: ...
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class ProviderSpec(BaseModel):
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ class InclineBasicPreprocessorImpl(Preprocessing, PreprocessorsProtocolPrivate):
|
|||
|
||||
async def unregister_preprocessor(self, preprocessor_id: str) -> None: ...
|
||||
|
||||
async def preprocess(
|
||||
async def do_preprocess(
|
||||
self,
|
||||
preprocessor_id: str,
|
||||
preprocessor_inputs: List[PreprocessingDataElement],
|
||||
|
|
@ -98,12 +98,12 @@ class InclineBasicPreprocessorImpl(Preprocessing, PreprocessorsProtocolPrivate):
|
|||
success=True, output_data_type=PreprocessingDataType.raw_text_document, results=results
|
||||
)
|
||||
|
||||
async def chain_preprocess(
|
||||
async def preprocess(
|
||||
self,
|
||||
preprocessors: PreprocessorChain,
|
||||
preprocessor_inputs: List[PreprocessingDataElement],
|
||||
) -> PreprocessorResponse:
|
||||
return await self.preprocess(preprocessor_id="", preprocessor_inputs=preprocessor_inputs)
|
||||
return await self.do_preprocess(preprocessor_id="", preprocessor_inputs=preprocessor_inputs)
|
||||
|
||||
@staticmethod
|
||||
def _resolve_input_type(preprocessor_input: PreprocessingDataElement) -> PreprocessingDataType:
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ class InclineDoclingPreprocessorImpl(Preprocessing, PreprocessorsProtocolPrivate
|
|||
|
||||
async def unregister_preprocessor(self, preprocessor_id: str) -> None: ...
|
||||
|
||||
async def preprocess(
|
||||
async def do_preprocess(
|
||||
self,
|
||||
preprocessor_id: str,
|
||||
preprocessor_inputs: List[PreprocessingDataElement],
|
||||
|
|
@ -106,9 +106,9 @@ class InclineDoclingPreprocessorImpl(Preprocessing, PreprocessorsProtocolPrivate
|
|||
)
|
||||
return PreprocessorResponse(success=True, output_data_type=output_data_type, results=results)
|
||||
|
||||
async def chain_preprocess(
|
||||
async def preprocess(
|
||||
self,
|
||||
preprocessors: PreprocessorChain,
|
||||
preprocessor_inputs: List[PreprocessingDataElement],
|
||||
) -> PreprocessorResponse:
|
||||
return await self.preprocess(preprocessor_id="", preprocessor_inputs=preprocessor_inputs)
|
||||
return await self.do_preprocess(preprocessor_id="", preprocessor_inputs=preprocessor_inputs)
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ class InclineSimpleChunkingImpl(Preprocessing, PreprocessorsProtocolPrivate):
|
|||
|
||||
async def unregister_preprocessor(self, preprocessor_id: str) -> None: ...
|
||||
|
||||
async def preprocess(
|
||||
async def do_preprocess(
|
||||
self,
|
||||
preprocessor_id: str,
|
||||
preprocessor_inputs: List[PreprocessingDataElement],
|
||||
|
|
@ -72,12 +72,12 @@ class InclineSimpleChunkingImpl(Preprocessing, PreprocessorsProtocolPrivate):
|
|||
|
||||
return PreprocessorResponse(success=True, output_data_type=PreprocessingDataType.chunks, results=chunks)
|
||||
|
||||
async def chain_preprocess(
|
||||
async def preprocess(
|
||||
self,
|
||||
preprocessors: PreprocessorChain,
|
||||
preprocessor_inputs: List[PreprocessingDataElement],
|
||||
) -> PreprocessorResponse:
|
||||
return await self.preprocess(preprocessor_id="", preprocessor_inputs=preprocessor_inputs)
|
||||
return await self.do_preprocess(preprocessor_id="", preprocessor_inputs=preprocessor_inputs)
|
||||
|
||||
def _resolve_chunk_size_params(self, options: PreprocessorOptions) -> Tuple[int, int]:
|
||||
window_len = (options or {}).get(
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime):
|
|||
preprocessor_chain: Optional[PreprocessorChain] = None,
|
||||
) -> None:
|
||||
preprocessor_inputs = [self._rag_document_to_preprocessor_input(d) for d in documents]
|
||||
preprocessor_response = await self.preprocessing_api.chain_preprocess(
|
||||
preprocessor_response = await self.preprocessing_api.preprocess(
|
||||
preprocessors=preprocessor_chain or self.DEFAULT_PREPROCESSING_CHAIN,
|
||||
preprocessor_inputs=preprocessor_inputs,
|
||||
)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue