Switched to using the existing Chunk type.

This commit is contained in:
ilya-kolchinsky 2025-03-03 20:31:06 +01:00
parent c3515530bb
commit 224d2d2891
2 changed files with 5 additions and 10 deletions

View file

@ -13,10 +13,10 @@ from llama_stack.apis.preprocessing import (
Preprocessing,
PreprocessingInput,
PreprocessingResponse,
PreprocessingResult,
Preprocessor,
PreprocessorOptions,
)
from llama_stack.apis.vector_io import Chunk
from llama_stack.providers.datatypes import PreprocessorsProtocolPrivate
from llama_stack.providers.inline.preprocessing.docling import InlineDoclingConfig
@ -56,7 +56,7 @@ class InclineDoclingPreprocessorImpl(Preprocessing, PreprocessorsProtocolPrivate
converted_document = self.converter.convert(url).document
if self.config.chunk:
result = self.chunker.chunk(converted_document)
results.extend([PreprocessingResult(data=chunk.text, metadata=chunk.meta) for chunk in result])
results.extend([Chunk(content=chunk.text, metadata=chunk.meta) for chunk in result])
else:
result = converted_document.export_to_markdown()
results.append(result)