mirror of
https://github.com/meta-llama/llama-stack.git
synced 2026-01-02 20:20:01 +00:00
Added output type to PreprocessorResponse.
This commit is contained in:
parent
b981181b25
commit
4c81a72214
7 changed files with 33 additions and 25 deletions
|
|
@ -88,13 +88,14 @@ class InclineBasicPreprocessorImpl(Preprocessing, PreprocessorsProtocolPrivate):
|
|||
|
||||
results.append(document)
|
||||
|
||||
return PreprocessorResponse(status=True, results=results)
|
||||
return PreprocessorResponse(
|
||||
success=True, preprocessor_output_type=PreprocessingDataType.raw_text_document, results=results
|
||||
)
|
||||
|
||||
async def chain_preprocess(
|
||||
self,
|
||||
preprocessors: PreprocessorChain,
|
||||
preprocessor_inputs: List[PreprocessorInput],
|
||||
is_rag_chain: Optional[bool] = False,
|
||||
) -> PreprocessorResponse:
|
||||
return await self.preprocess(preprocessor_id="", preprocessor_inputs=preprocessor_inputs)
|
||||
|
||||
|
|
|
|||
|
|
@ -75,12 +75,14 @@ class InclineDoclingPreprocessorImpl(Preprocessing, PreprocessorsProtocolPrivate
|
|||
result = converted_document.export_to_markdown()
|
||||
results.append(result)
|
||||
|
||||
return PreprocessorResponse(status=True, results=results)
|
||||
preprocessor_output_type = (
|
||||
PreprocessingDataType.chunks if self.config.chunk else PreprocessingDataType.raw_text_document
|
||||
)
|
||||
return PreprocessorResponse(success=True, preprocessor_output_type=preprocessor_output_type, results=results)
|
||||
|
||||
async def chain_preprocess(
|
||||
self,
|
||||
preprocessors: PreprocessorChain,
|
||||
preprocessor_inputs: List[PreprocessorInput],
|
||||
is_rag_chain: Optional[bool] = False,
|
||||
) -> PreprocessorResponse:
|
||||
return await self.preprocess(preprocessor_id="", preprocessor_inputs=preprocessor_inputs)
|
||||
|
|
|
|||
|
|
@ -62,13 +62,12 @@ class InclineSimpleChunkingImpl(Preprocessing, PreprocessorsProtocolPrivate):
|
|||
)
|
||||
chunks.extend(new_chunks)
|
||||
|
||||
return PreprocessorResponse(status=True, results=chunks)
|
||||
return PreprocessorResponse(success=True, preprocessor_output_type=PreprocessingDataType.chunks, results=chunks)
|
||||
|
||||
async def chain_preprocess(
|
||||
self,
|
||||
preprocessors: PreprocessorChain,
|
||||
preprocessor_inputs: List[PreprocessorInput],
|
||||
is_rag_chain: Optional[bool] = False,
|
||||
) -> PreprocessorResponse:
|
||||
return await self.preprocess(preprocessor_id="", preprocessor_inputs=preprocessor_inputs)
|
||||
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ from llama_stack.apis.inference import Inference
|
|||
from llama_stack.apis.preprocessing import (
|
||||
Preprocessing,
|
||||
PreprocessingDataFormat,
|
||||
PreprocessingDataType,
|
||||
PreprocessorChainElement,
|
||||
PreprocessorInput,
|
||||
)
|
||||
|
|
@ -81,9 +82,19 @@ class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime):
|
|||
preprocessors=preprocessor_chain, preprocessor_inputs=preprocessor_inputs
|
||||
)
|
||||
|
||||
chunks = preprocessor_response.results
|
||||
if not preprocessor_response.success:
|
||||
log.error("Preprocessor chain returned an error")
|
||||
return
|
||||
|
||||
if preprocessor_response.preprocessor_output_type != PreprocessingDataType.chunks:
|
||||
log.error(
|
||||
f"Preprocessor chain returned {preprocessor_response.preprocessor_output_type} instead of {PreprocessingDataType.chunks}"
|
||||
)
|
||||
return
|
||||
|
||||
chunks = preprocessor_response.results
|
||||
if not chunks:
|
||||
log.error("No chunks returned by the preprocessor chain")
|
||||
return
|
||||
|
||||
await self.vector_io_api.insert_chunks(
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue