mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
This adds a `builtin::document_conversion` tool for converting documents when used with file_search that uses meta-llama/synthetic-data-kit. I also have another local implementation that uses Docling, but need to debug some segfault issues I'm hitting locally with that so pushing this first as a simpler reference implementation. Long-term I think we'll want a remote implemention here as well - like perhaps docling-serve or unstructured.io - but need to look more into that. This passes the existing `tests/verifications/openai_api/test_responses.py` but doesn't yet add any new tests for file types besides text and pdf. Signed-off-by: Ben Browning <bbrownin@redhat.com>
134 lines
5.7 KiB
Python
134 lines
5.7 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
|
|
from llama_stack.providers.datatypes import (
|
|
AdapterSpec,
|
|
Api,
|
|
InlineProviderSpec,
|
|
ProviderSpec,
|
|
remote_provider_spec,
|
|
)
|
|
|
|
|
|
def available_providers() -> list[ProviderSpec]:
|
|
return [
|
|
InlineProviderSpec(
|
|
api=Api.vector_io,
|
|
provider_type="inline::meta-reference",
|
|
pip_packages=["faiss-cpu"],
|
|
module="llama_stack.providers.inline.vector_io.faiss",
|
|
config_class="llama_stack.providers.inline.vector_io.faiss.FaissVectorIOConfig",
|
|
deprecation_warning="Please use the `inline::faiss` provider instead.",
|
|
api_dependencies=[Api.inference],
|
|
optional_api_dependencies=[Api.files, Api.tool_runtime],
|
|
),
|
|
InlineProviderSpec(
|
|
api=Api.vector_io,
|
|
provider_type="inline::faiss",
|
|
pip_packages=["faiss-cpu"],
|
|
module="llama_stack.providers.inline.vector_io.faiss",
|
|
config_class="llama_stack.providers.inline.vector_io.faiss.FaissVectorIOConfig",
|
|
api_dependencies=[Api.inference],
|
|
optional_api_dependencies=[Api.files, Api.tool_runtime],
|
|
),
|
|
# NOTE: sqlite-vec cannot be bundled into the container image because it does not have a
|
|
# source distribution and the wheels are not available for all platforms.
|
|
InlineProviderSpec(
|
|
api=Api.vector_io,
|
|
provider_type="inline::sqlite-vec",
|
|
pip_packages=["sqlite-vec"],
|
|
module="llama_stack.providers.inline.vector_io.sqlite_vec",
|
|
config_class="llama_stack.providers.inline.vector_io.sqlite_vec.SQLiteVectorIOConfig",
|
|
api_dependencies=[Api.inference],
|
|
optional_api_dependencies=[Api.files, Api.tool_runtime],
|
|
),
|
|
InlineProviderSpec(
|
|
api=Api.vector_io,
|
|
provider_type="inline::sqlite_vec",
|
|
pip_packages=["sqlite-vec"],
|
|
module="llama_stack.providers.inline.vector_io.sqlite_vec",
|
|
config_class="llama_stack.providers.inline.vector_io.sqlite_vec.SQLiteVectorIOConfig",
|
|
deprecation_warning="Please use the `inline::sqlite-vec` provider (notice the hyphen instead of underscore) instead.",
|
|
api_dependencies=[Api.inference],
|
|
optional_api_dependencies=[Api.files, Api.tool_runtime],
|
|
),
|
|
remote_provider_spec(
|
|
Api.vector_io,
|
|
AdapterSpec(
|
|
adapter_type="chromadb",
|
|
pip_packages=["chromadb-client"],
|
|
module="llama_stack.providers.remote.vector_io.chroma",
|
|
config_class="llama_stack.providers.remote.vector_io.chroma.ChromaVectorIOConfig",
|
|
),
|
|
api_dependencies=[Api.inference],
|
|
),
|
|
InlineProviderSpec(
|
|
api=Api.vector_io,
|
|
provider_type="inline::chromadb",
|
|
pip_packages=["chromadb"],
|
|
module="llama_stack.providers.inline.vector_io.chroma",
|
|
config_class="llama_stack.providers.inline.vector_io.chroma.ChromaVectorIOConfig",
|
|
api_dependencies=[Api.inference],
|
|
),
|
|
remote_provider_spec(
|
|
Api.vector_io,
|
|
AdapterSpec(
|
|
adapter_type="pgvector",
|
|
pip_packages=["psycopg2-binary"],
|
|
module="llama_stack.providers.remote.vector_io.pgvector",
|
|
config_class="llama_stack.providers.remote.vector_io.pgvector.PGVectorVectorIOConfig",
|
|
),
|
|
api_dependencies=[Api.inference],
|
|
),
|
|
remote_provider_spec(
|
|
Api.vector_io,
|
|
AdapterSpec(
|
|
adapter_type="weaviate",
|
|
pip_packages=["weaviate-client"],
|
|
module="llama_stack.providers.remote.vector_io.weaviate",
|
|
config_class="llama_stack.providers.remote.vector_io.weaviate.WeaviateVectorIOConfig",
|
|
provider_data_validator="llama_stack.providers.remote.vector_io.weaviate.WeaviateRequestProviderData",
|
|
),
|
|
api_dependencies=[Api.inference],
|
|
),
|
|
InlineProviderSpec(
|
|
api=Api.vector_io,
|
|
provider_type="inline::qdrant",
|
|
pip_packages=["qdrant-client"],
|
|
module="llama_stack.providers.inline.vector_io.qdrant",
|
|
config_class="llama_stack.providers.inline.vector_io.qdrant.QdrantVectorIOConfig",
|
|
api_dependencies=[Api.inference],
|
|
),
|
|
remote_provider_spec(
|
|
Api.vector_io,
|
|
AdapterSpec(
|
|
adapter_type="qdrant",
|
|
pip_packages=["qdrant-client"],
|
|
module="llama_stack.providers.remote.vector_io.qdrant",
|
|
config_class="llama_stack.providers.remote.vector_io.qdrant.QdrantVectorIOConfig",
|
|
),
|
|
api_dependencies=[Api.inference],
|
|
),
|
|
remote_provider_spec(
|
|
Api.vector_io,
|
|
AdapterSpec(
|
|
adapter_type="milvus",
|
|
pip_packages=["pymilvus"],
|
|
module="llama_stack.providers.remote.vector_io.milvus",
|
|
config_class="llama_stack.providers.remote.vector_io.milvus.MilvusVectorIOConfig",
|
|
),
|
|
api_dependencies=[Api.inference],
|
|
),
|
|
InlineProviderSpec(
|
|
api=Api.vector_io,
|
|
provider_type="inline::milvus",
|
|
pip_packages=["pymilvus"],
|
|
module="llama_stack.providers.inline.vector_io.milvus",
|
|
config_class="llama_stack.providers.inline.vector_io.milvus.MilvusVectorIOConfig",
|
|
api_dependencies=[Api.inference],
|
|
),
|
|
]
|