diff --git a/llama_stack/providers/datatypes.py b/llama_stack/providers/datatypes.py index 5e15dd8e1..508ef66ac 100644 --- a/llama_stack/providers/datatypes.py +++ b/llama_stack/providers/datatypes.py @@ -132,7 +132,6 @@ class ProviderSpec(BaseModel): ) is_external: bool = Field(default=False, description="Notes whether this provider is an external provider.") - # used internally by the resolver; this is a hack for now deps__: list[str] = Field(default_factory=list) @@ -182,7 +181,7 @@ A description of the provider. This is used to display in the documentation. class InlineProviderSpec(ProviderSpec): pip_packages: list[str] = Field( default_factory=list, - description="The pip dependencies needed for this implementation", + description="The pip dependencies needed for this implementation (deprecated - use package_name instead)", ) container_image: str | None = Field( default=None, diff --git a/llama_stack/providers/inline/agents/meta_reference/pyproject.toml b/llama_stack/providers/inline/agents/meta_reference/pyproject.toml new file mode 100644 index 000000000..35cd5fcde --- /dev/null +++ b/llama_stack/providers/inline/agents/meta_reference/pyproject.toml @@ -0,0 +1,24 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-agents-meta-reference" +version = "0.1.0" +description = "Meta's reference implementation of an agent system that can use tools, access vector databases, and perform complex reasoning tasks" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "matplotlib", + "pillow", + "pandas", + "scikit-learn", + "mcp>=1.8.1", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/datasetio/localfs/pyproject.toml b/llama_stack/providers/inline/datasetio/localfs/pyproject.toml new file mode 100644 index 000000000..2e0c33eac --- /dev/null +++ b/llama_stack/providers/inline/datasetio/localfs/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-datasetio-localfs" +version = "0.1.0" +description = "Local filesystem-based dataset I/O provider for reading and writing datasets to local storage" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "pandas", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/eval/meta_reference/pyproject.toml b/llama_stack/providers/inline/eval/meta_reference/pyproject.toml new file mode 100644 index 000000000..d0a0a5e38 --- /dev/null +++ b/llama_stack/providers/inline/eval/meta_reference/pyproject.toml @@ -0,0 +1,24 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-eval-meta-reference" +version = "0.1.0" +description = "Meta's reference implementation of evaluation tasks with support for multiple languages and evaluation metrics" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "tree_sitter", + "pythainlp", + "langdetect", + "emoji", + "nltk", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/files/localfs/pyproject.toml b/llama_stack/providers/inline/files/localfs/pyproject.toml new file mode 100644 index 000000000..814d2e71d --- /dev/null +++ b/llama_stack/providers/inline/files/localfs/pyproject.toml @@ -0,0 +1,18 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-files-localfs" +version = "0.1.0" +description = "Local filesystem-based file storage provider for managing files and documents locally" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/inference/meta_reference/pyproject.toml b/llama_stack/providers/inline/inference/meta_reference/pyproject.toml new file mode 100644 index 000000000..9618e8f0b --- /dev/null +++ b/llama_stack/providers/inline/inference/meta_reference/pyproject.toml @@ -0,0 +1,29 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-meta-reference" +version = "0.1.0" +description = "Meta's reference implementation of inference with support for various model formats and optimization techniques" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "accelerate", + "fairscale", + "torch", + "torchvision", + "transformers", + "zmq", + "lm-format-enforcer", + "sentence-transformers", + "torchao==0.8.0", + "fbgemm-gpu-genai==1.1.2", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/inference/sentence_transformers/pyproject.toml b/llama_stack/providers/inline/inference/sentence_transformers/pyproject.toml new file mode 100644 index 000000000..2221cb6cb --- /dev/null +++ b/llama_stack/providers/inline/inference/sentence_transformers/pyproject.toml @@ -0,0 +1,22 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-sentence-transformers" +version = "0.1.0" +description = "Sentence Transformers inference provider for text embeddings and similarity search" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "torch", + "torchvision", + "sentence-transformers", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/post_training/huggingface/pyproject.toml b/llama_stack/providers/inline/post_training/huggingface/pyproject.toml new file mode 100644 index 000000000..5188b9189 --- /dev/null +++ b/llama_stack/providers/inline/post_training/huggingface/pyproject.toml @@ -0,0 +1,24 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-post-training-huggingface" +version = "0.1.0" +description = "HuggingFace-based post-training provider for fine-tuning models using the HuggingFace ecosystem" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "torch", + "trl", + "transformers", + "peft", + "datasets", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/post_training/torchtune/pyproject.toml b/llama_stack/providers/inline/post_training/torchtune/pyproject.toml new file mode 100644 index 000000000..a3a7a887c --- /dev/null +++ b/llama_stack/providers/inline/post_training/torchtune/pyproject.toml @@ -0,0 +1,23 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-post-training-torchtune" +version = "0.1.0" +description = "TorchTune-based post-training provider for fine-tuning and optimizing models using Meta's TorchTune framework" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "torch", + "torchtune==0.5.0", + "torchao==0.8.0", + "numpy", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/safety/code_scanner/pyproject.toml b/llama_stack/providers/inline/safety/code_scanner/pyproject.toml new file mode 100644 index 000000000..0cd40a88b --- /dev/null +++ b/llama_stack/providers/inline/safety/code_scanner/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-safety-code-scanner" +version = "0.1.0" +description = "Code Scanner safety provider for detecting security vulnerabilities and unsafe code patterns" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "codeshield", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/safety/llama_guard/pyproject.toml b/llama_stack/providers/inline/safety/llama_guard/pyproject.toml new file mode 100644 index 000000000..71df3238c --- /dev/null +++ b/llama_stack/providers/inline/safety/llama_guard/pyproject.toml @@ -0,0 +1,18 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-safety-llama-guard" +version = "0.1.0" +description = "Llama Guard safety provider for content moderation and safety filtering using Meta's Llama Guard model" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/safety/prompt_guard/pyproject.toml b/llama_stack/providers/inline/safety/prompt_guard/pyproject.toml new file mode 100644 index 000000000..cee17b302 --- /dev/null +++ b/llama_stack/providers/inline/safety/prompt_guard/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-safety-prompt-guard" +version = "0.1.0" +description = "Prompt Guard safety provider for detecting and filtering unsafe prompts and content" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "transformers[accelerate]", + "torch", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/scoring/basic/pyproject.toml b/llama_stack/providers/inline/scoring/basic/pyproject.toml new file mode 100644 index 000000000..1fc86c36c --- /dev/null +++ b/llama_stack/providers/inline/scoring/basic/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-scoring-basic" +version = "0.1.0" +description = "Basic scoring provider for simple evaluation metrics and scoring functions" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "requests", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/scoring/braintrust/pyproject.toml b/llama_stack/providers/inline/scoring/braintrust/pyproject.toml new file mode 100644 index 000000000..c5d0780c8 --- /dev/null +++ b/llama_stack/providers/inline/scoring/braintrust/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-scoring-braintrust" +version = "0.1.0" +description = "Braintrust scoring provider for evaluation and scoring using the Braintrust platform" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "autoevals", + "openai", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/scoring/llm_as_judge/pyproject.toml b/llama_stack/providers/inline/scoring/llm_as_judge/pyproject.toml new file mode 100644 index 000000000..46cdc96ea --- /dev/null +++ b/llama_stack/providers/inline/scoring/llm_as_judge/pyproject.toml @@ -0,0 +1,18 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-scoring-llm-as-judge" +version = "0.1.0" +description = "LLM-as-judge scoring provider that uses language models to evaluate and score responses" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/telemetry/meta_reference/pyproject.toml b/llama_stack/providers/inline/telemetry/meta_reference/pyproject.toml new file mode 100644 index 000000000..23d41f7ba --- /dev/null +++ b/llama_stack/providers/inline/telemetry/meta_reference/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-telemetry-meta-reference" +version = "0.1.0" +description = "Meta's reference implementation of telemetry and observability using OpenTelemetry" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "opentelemetry-sdk", + "opentelemetry-exporter-otlp-proto-http", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/tool_runtime/rag/pyproject.toml b/llama_stack/providers/inline/tool_runtime/rag/pyproject.toml new file mode 100644 index 000000000..53510b2d4 --- /dev/null +++ b/llama_stack/providers/inline/tool_runtime/rag/pyproject.toml @@ -0,0 +1,28 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-tool-runtime-rag" +version = "0.1.0" +description = "RAG (Retrieval-Augmented Generation) tool runtime for document ingestion, chunking, and semantic search" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "chardet", + "pypdf", + "tqdm", + "numpy", + "scikit-learn", + "scipy", + "nltk", + "sentencepiece", + "transformers", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/vector_io/chroma/pyproject.toml b/llama_stack/providers/inline/vector_io/chroma/pyproject.toml new file mode 100644 index 000000000..dd33ed593 --- /dev/null +++ b/llama_stack/providers/inline/vector_io/chroma/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-chroma" +version = "0.1.0" +description = "Chroma inline vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "chromadb", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/vector_io/faiss/pyproject.toml b/llama_stack/providers/inline/vector_io/faiss/pyproject.toml new file mode 100644 index 000000000..712d70ba2 --- /dev/null +++ b/llama_stack/providers/inline/vector_io/faiss/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-faiss" +version = "0.1.0" +description = "Faiss inline vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "faiss-cpu", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/vector_io/milvus/pyproject.toml b/llama_stack/providers/inline/vector_io/milvus/pyproject.toml new file mode 100644 index 000000000..3d0ea94c9 --- /dev/null +++ b/llama_stack/providers/inline/vector_io/milvus/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-milvus" +version = "0.1.0" +description = "Milvus inline vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "pymilvus>=2.4.10", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/vector_io/qdrant/pyproject.toml b/llama_stack/providers/inline/vector_io/qdrant/pyproject.toml new file mode 100644 index 000000000..c2b7ee82d --- /dev/null +++ b/llama_stack/providers/inline/vector_io/qdrant/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-qdrant" +version = "0.1.0" +description = "Qdrant inline vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "qdrant-client", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/inline/vector_io/sqlite_vec/pyproject.toml b/llama_stack/providers/inline/vector_io/sqlite_vec/pyproject.toml new file mode 100644 index 000000000..37cb0bf67 --- /dev/null +++ b/llama_stack/providers/inline/vector_io/sqlite_vec/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-sqlite-vec" +version = "0.1.0" +description = "SQLite-Vec inline vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "sqlite-vec", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/registry/datasetio.py b/llama_stack/providers/registry/datasetio.py index 43cde83fb..797185588 100644 --- a/llama_stack/providers/registry/datasetio.py +++ b/llama_stack/providers/registry/datasetio.py @@ -19,7 +19,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.datasetio, provider_type="inline::localfs", - pip_packages=["pandas"], module="llama_stack.providers.inline.datasetio.localfs", config_class="llama_stack.providers.inline.datasetio.localfs.LocalFSDatasetIOConfig", api_dependencies=[], diff --git a/llama_stack/providers/registry/eval.py b/llama_stack/providers/registry/eval.py index 9f0d17916..a9f008098 100644 --- a/llama_stack/providers/registry/eval.py +++ b/llama_stack/providers/registry/eval.py @@ -13,7 +13,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.eval, provider_type="inline::meta-reference", - pip_packages=["tree_sitter", "pythainlp", "langdetect", "emoji", "nltk"], module="llama_stack.providers.inline.eval.meta_reference", config_class="llama_stack.providers.inline.eval.meta_reference.MetaReferenceEvalConfig", api_dependencies=[ diff --git a/llama_stack/providers/registry/inference.py b/llama_stack/providers/registry/inference.py index 1801cdcad..3d6f01da0 100644 --- a/llama_stack/providers/registry/inference.py +++ b/llama_stack/providers/registry/inference.py @@ -32,7 +32,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.inference, provider_type="inline::meta-reference", - pip_packages=META_REFERENCE_DEPS, module="llama_stack.providers.inline.inference.meta_reference", config_class="llama_stack.providers.inline.inference.meta_reference.MetaReferenceInferenceConfig", description="Meta's reference implementation of inference with support for various model formats and optimization techniques.", @@ -40,10 +39,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.inference, provider_type="inline::sentence-transformers", - pip_packages=[ - "torch torchvision --index-url https://download.pytorch.org/whl/cpu", - "sentence-transformers --no-deps", - ], module="llama_stack.providers.inline.inference.sentence_transformers", config_class="llama_stack.providers.inline.inference.sentence_transformers.config.SentenceTransformersInferenceConfig", description="Sentence Transformers inference provider for text embeddings and similarity search.", @@ -52,9 +47,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="cerebras", - pip_packages=[ - "cerebras_cloud_sdk", - ], module="llama_stack.providers.remote.inference.cerebras", config_class="llama_stack.providers.remote.inference.cerebras.CerebrasImplConfig", description="Cerebras inference provider for running models on Cerebras Cloud platform.", @@ -64,7 +56,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="ollama", - pip_packages=["ollama", "aiohttp", "h11>=0.16.0"], config_class="llama_stack.providers.remote.inference.ollama.OllamaImplConfig", module="llama_stack.providers.remote.inference.ollama", description="Ollama inference provider for running local models through the Ollama runtime.", @@ -74,7 +65,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="vllm", - pip_packages=["openai"], module="llama_stack.providers.remote.inference.vllm", config_class="llama_stack.providers.remote.inference.vllm.VLLMInferenceAdapterConfig", description="Remote vLLM inference provider for connecting to vLLM servers.", @@ -84,7 +74,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="tgi", - pip_packages=["huggingface_hub", "aiohttp"], module="llama_stack.providers.remote.inference.tgi", config_class="llama_stack.providers.remote.inference.tgi.TGIImplConfig", description="Text Generation Inference (TGI) provider for HuggingFace model serving.", @@ -94,7 +83,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="hf::serverless", - pip_packages=["huggingface_hub", "aiohttp"], module="llama_stack.providers.remote.inference.tgi", config_class="llama_stack.providers.remote.inference.tgi.InferenceAPIImplConfig", description="HuggingFace Inference API serverless provider for on-demand model inference.", @@ -104,7 +92,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="hf::endpoint", - pip_packages=["huggingface_hub", "aiohttp"], module="llama_stack.providers.remote.inference.tgi", config_class="llama_stack.providers.remote.inference.tgi.InferenceEndpointImplConfig", description="HuggingFace Inference Endpoints provider for dedicated model serving.", @@ -114,9 +101,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="fireworks", - pip_packages=[ - "fireworks-ai", - ], module="llama_stack.providers.remote.inference.fireworks", config_class="llama_stack.providers.remote.inference.fireworks.FireworksImplConfig", provider_data_validator="llama_stack.providers.remote.inference.fireworks.FireworksProviderDataValidator", @@ -127,9 +111,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="together", - pip_packages=[ - "together", - ], module="llama_stack.providers.remote.inference.together", config_class="llama_stack.providers.remote.inference.together.TogetherImplConfig", provider_data_validator="llama_stack.providers.remote.inference.together.TogetherProviderDataValidator", @@ -140,7 +121,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="bedrock", - pip_packages=["boto3"], module="llama_stack.providers.remote.inference.bedrock", config_class="llama_stack.providers.remote.inference.bedrock.BedrockConfig", description="AWS Bedrock inference provider for accessing various AI models through AWS's managed service.", @@ -150,9 +130,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="databricks", - pip_packages=[ - "openai", - ], module="llama_stack.providers.remote.inference.databricks", config_class="llama_stack.providers.remote.inference.databricks.DatabricksImplConfig", description="Databricks inference provider for running models on Databricks' unified analytics platform.", @@ -162,9 +139,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="nvidia", - pip_packages=[ - "openai", - ], module="llama_stack.providers.remote.inference.nvidia", config_class="llama_stack.providers.remote.inference.nvidia.NVIDIAConfig", description="NVIDIA inference provider for accessing NVIDIA NIM models and AI services.", @@ -174,7 +148,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="runpod", - pip_packages=["openai"], module="llama_stack.providers.remote.inference.runpod", config_class="llama_stack.providers.remote.inference.runpod.RunpodImplConfig", description="RunPod inference provider for running models on RunPod's cloud GPU platform.", @@ -184,7 +157,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="openai", - pip_packages=["litellm"], module="llama_stack.providers.remote.inference.openai", config_class="llama_stack.providers.remote.inference.openai.OpenAIConfig", provider_data_validator="llama_stack.providers.remote.inference.openai.config.OpenAIProviderDataValidator", @@ -195,7 +167,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="anthropic", - pip_packages=["litellm"], module="llama_stack.providers.remote.inference.anthropic", config_class="llama_stack.providers.remote.inference.anthropic.AnthropicConfig", provider_data_validator="llama_stack.providers.remote.inference.anthropic.config.AnthropicProviderDataValidator", @@ -206,7 +177,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="gemini", - pip_packages=["litellm"], module="llama_stack.providers.remote.inference.gemini", config_class="llama_stack.providers.remote.inference.gemini.GeminiConfig", provider_data_validator="llama_stack.providers.remote.inference.gemini.config.GeminiProviderDataValidator", @@ -217,7 +187,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.inference, adapter=AdapterSpec( adapter_type="vertexai", - pip_packages=["litellm", "google-cloud-aiplatform"], module="llama_stack.providers.remote.inference.vertexai", config_class="llama_stack.providers.remote.inference.vertexai.VertexAIConfig", provider_data_validator="llama_stack.providers.remote.inference.vertexai.config.VertexAIProviderDataValidator", @@ -247,7 +216,6 @@ Available Models: api=Api.inference, adapter=AdapterSpec( adapter_type="groq", - pip_packages=["litellm"], module="llama_stack.providers.remote.inference.groq", config_class="llama_stack.providers.remote.inference.groq.GroqConfig", provider_data_validator="llama_stack.providers.remote.inference.groq.config.GroqProviderDataValidator", @@ -258,7 +226,6 @@ Available Models: api=Api.inference, adapter=AdapterSpec( adapter_type="llama-openai-compat", - pip_packages=["litellm"], module="llama_stack.providers.remote.inference.llama_openai_compat", config_class="llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaCompatConfig", provider_data_validator="llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaProviderDataValidator", @@ -269,7 +236,6 @@ Available Models: api=Api.inference, adapter=AdapterSpec( adapter_type="sambanova", - pip_packages=["litellm"], module="llama_stack.providers.remote.inference.sambanova", config_class="llama_stack.providers.remote.inference.sambanova.SambaNovaImplConfig", provider_data_validator="llama_stack.providers.remote.inference.sambanova.config.SambaNovaProviderDataValidator", @@ -280,7 +246,6 @@ Available Models: api=Api.inference, adapter=AdapterSpec( adapter_type="passthrough", - pip_packages=[], module="llama_stack.providers.remote.inference.passthrough", config_class="llama_stack.providers.remote.inference.passthrough.PassthroughImplConfig", provider_data_validator="llama_stack.providers.remote.inference.passthrough.PassthroughProviderDataValidator", @@ -291,7 +256,6 @@ Available Models: api=Api.inference, adapter=AdapterSpec( adapter_type="watsonx", - pip_packages=["ibm_watson_machine_learning"], module="llama_stack.providers.remote.inference.watsonx", config_class="llama_stack.providers.remote.inference.watsonx.WatsonXConfig", provider_data_validator="llama_stack.providers.remote.inference.watsonx.WatsonXProviderDataValidator", diff --git a/llama_stack/providers/registry/post_training.py b/llama_stack/providers/registry/post_training.py index ffd64ef7c..3df45ded0 100644 --- a/llama_stack/providers/registry/post_training.py +++ b/llama_stack/providers/registry/post_training.py @@ -13,7 +13,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.post_training, provider_type="inline::torchtune", - pip_packages=["torch", "torchtune==0.5.0", "torchao==0.8.0", "numpy"], module="llama_stack.providers.inline.post_training.torchtune", config_class="llama_stack.providers.inline.post_training.torchtune.TorchtunePostTrainingConfig", api_dependencies=[ @@ -25,7 +24,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.post_training, provider_type="inline::huggingface", - pip_packages=["torch", "trl", "transformers", "peft", "datasets"], module="llama_stack.providers.inline.post_training.huggingface", config_class="llama_stack.providers.inline.post_training.huggingface.HuggingFacePostTrainingConfig", api_dependencies=[ @@ -38,7 +36,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.post_training, adapter=AdapterSpec( adapter_type="nvidia", - pip_packages=["requests", "aiohttp"], module="llama_stack.providers.remote.post_training.nvidia", config_class="llama_stack.providers.remote.post_training.nvidia.NvidiaPostTrainingConfig", description="NVIDIA's post-training provider for fine-tuning models on NVIDIA's platform.", diff --git a/llama_stack/providers/registry/safety.py b/llama_stack/providers/registry/safety.py index 9dd791bd8..f39f277a7 100644 --- a/llama_stack/providers/registry/safety.py +++ b/llama_stack/providers/registry/safety.py @@ -30,7 +30,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.safety, provider_type="inline::llama-guard", - pip_packages=[], module="llama_stack.providers.inline.safety.llama_guard", config_class="llama_stack.providers.inline.safety.llama_guard.LlamaGuardConfig", api_dependencies=[ @@ -52,7 +51,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.safety, adapter=AdapterSpec( adapter_type="bedrock", - pip_packages=["boto3"], module="llama_stack.providers.remote.safety.bedrock", config_class="llama_stack.providers.remote.safety.bedrock.BedrockSafetyConfig", description="AWS Bedrock safety provider for content moderation using AWS's safety services.", @@ -62,7 +60,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.safety, adapter=AdapterSpec( adapter_type="nvidia", - pip_packages=["requests"], module="llama_stack.providers.remote.safety.nvidia", config_class="llama_stack.providers.remote.safety.nvidia.NVIDIASafetyConfig", description="NVIDIA's safety provider for content moderation and safety filtering.", @@ -72,7 +69,6 @@ def available_providers() -> list[ProviderSpec]: api=Api.safety, adapter=AdapterSpec( adapter_type="sambanova", - pip_packages=["litellm", "requests"], module="llama_stack.providers.remote.safety.sambanova", config_class="llama_stack.providers.remote.safety.sambanova.SambaNovaSafetyConfig", provider_data_validator="llama_stack.providers.remote.safety.sambanova.config.SambaNovaProviderDataValidator", diff --git a/llama_stack/providers/registry/scoring.py b/llama_stack/providers/registry/scoring.py index 79293d888..1c8367a0d 100644 --- a/llama_stack/providers/registry/scoring.py +++ b/llama_stack/providers/registry/scoring.py @@ -13,7 +13,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.scoring, provider_type="inline::basic", - pip_packages=["requests"], module="llama_stack.providers.inline.scoring.basic", config_class="llama_stack.providers.inline.scoring.basic.BasicScoringConfig", api_dependencies=[ @@ -25,7 +24,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.scoring, provider_type="inline::llm-as-judge", - pip_packages=[], module="llama_stack.providers.inline.scoring.llm_as_judge", config_class="llama_stack.providers.inline.scoring.llm_as_judge.LlmAsJudgeScoringConfig", api_dependencies=[ @@ -38,7 +36,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.scoring, provider_type="inline::braintrust", - pip_packages=["autoevals", "openai"], module="llama_stack.providers.inline.scoring.braintrust", config_class="llama_stack.providers.inline.scoring.braintrust.BraintrustScoringConfig", api_dependencies=[ diff --git a/llama_stack/providers/registry/tool_runtime.py b/llama_stack/providers/registry/tool_runtime.py index 661851443..cc57bb7b6 100644 --- a/llama_stack/providers/registry/tool_runtime.py +++ b/llama_stack/providers/registry/tool_runtime.py @@ -41,7 +41,6 @@ def available_providers() -> list[ProviderSpec]: adapter_type="brave-search", module="llama_stack.providers.remote.tool_runtime.brave_search", config_class="llama_stack.providers.remote.tool_runtime.brave_search.config.BraveSearchToolConfig", - pip_packages=["requests"], provider_data_validator="llama_stack.providers.remote.tool_runtime.brave_search.BraveSearchToolProviderDataValidator", description="Brave Search tool for web search capabilities with privacy-focused results.", ), @@ -52,7 +51,6 @@ def available_providers() -> list[ProviderSpec]: adapter_type="bing-search", module="llama_stack.providers.remote.tool_runtime.bing_search", config_class="llama_stack.providers.remote.tool_runtime.bing_search.config.BingSearchToolConfig", - pip_packages=["requests"], provider_data_validator="llama_stack.providers.remote.tool_runtime.bing_search.BingSearchToolProviderDataValidator", description="Bing Search tool for web search capabilities using Microsoft's search engine.", ), @@ -63,7 +61,6 @@ def available_providers() -> list[ProviderSpec]: adapter_type="tavily-search", module="llama_stack.providers.remote.tool_runtime.tavily_search", config_class="llama_stack.providers.remote.tool_runtime.tavily_search.config.TavilySearchToolConfig", - pip_packages=["requests"], provider_data_validator="llama_stack.providers.remote.tool_runtime.tavily_search.TavilySearchToolProviderDataValidator", description="Tavily Search tool for AI-optimized web search with structured results.", ), @@ -74,7 +71,6 @@ def available_providers() -> list[ProviderSpec]: adapter_type="wolfram-alpha", module="llama_stack.providers.remote.tool_runtime.wolfram_alpha", config_class="llama_stack.providers.remote.tool_runtime.wolfram_alpha.config.WolframAlphaToolConfig", - pip_packages=["requests"], provider_data_validator="llama_stack.providers.remote.tool_runtime.wolfram_alpha.WolframAlphaToolProviderDataValidator", description="Wolfram Alpha tool for computational knowledge and mathematical calculations.", ), @@ -85,7 +81,6 @@ def available_providers() -> list[ProviderSpec]: adapter_type="model-context-protocol", module="llama_stack.providers.remote.tool_runtime.model_context_protocol", config_class="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.MCPProviderConfig", - pip_packages=["mcp>=1.8.1"], provider_data_validator="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.MCPProviderDataValidator", description="Model Context Protocol (MCP) tool for standardized tool calling and context management.", ), diff --git a/llama_stack/providers/registry/vector_io.py b/llama_stack/providers/registry/vector_io.py index 70148eb15..99db30892 100644 --- a/llama_stack/providers/registry/vector_io.py +++ b/llama_stack/providers/registry/vector_io.py @@ -19,7 +19,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.vector_io, provider_type="inline::meta-reference", - pip_packages=["faiss-cpu"], module="llama_stack.providers.inline.vector_io.faiss", config_class="llama_stack.providers.inline.vector_io.faiss.FaissVectorIOConfig", deprecation_warning="Please use the `inline::faiss` provider instead.", @@ -30,7 +29,6 @@ def available_providers() -> list[ProviderSpec]: InlineProviderSpec( api=Api.vector_io, provider_type="inline::faiss", - pip_packages=["faiss-cpu"], module="llama_stack.providers.inline.vector_io.faiss", config_class="llama_stack.providers.inline.vector_io.faiss.FaissVectorIOConfig", api_dependencies=[Api.inference], @@ -83,7 +81,6 @@ more details about Faiss in general. InlineProviderSpec( api=Api.vector_io, provider_type="inline::sqlite-vec", - pip_packages=["sqlite-vec"], module="llama_stack.providers.inline.vector_io.sqlite_vec", config_class="llama_stack.providers.inline.vector_io.sqlite_vec.SQLiteVectorIOConfig", api_dependencies=[Api.inference], @@ -290,7 +287,6 @@ See [sqlite-vec's GitHub repo](https://github.com/asg017/sqlite-vec/tree/main) f InlineProviderSpec( api=Api.vector_io, provider_type="inline::sqlite_vec", - pip_packages=["sqlite-vec"], module="llama_stack.providers.inline.vector_io.sqlite_vec", config_class="llama_stack.providers.inline.vector_io.sqlite_vec.SQLiteVectorIOConfig", deprecation_warning="Please use the `inline::sqlite-vec` provider (notice the hyphen instead of underscore) instead.", @@ -304,7 +300,6 @@ Please refer to the sqlite-vec provider documentation. Api.vector_io, AdapterSpec( adapter_type="chromadb", - pip_packages=["chromadb-client"], module="llama_stack.providers.remote.vector_io.chroma", config_class="llama_stack.providers.remote.vector_io.chroma.ChromaVectorIOConfig", description=""" @@ -347,7 +342,6 @@ See [Chroma's documentation](https://docs.trychroma.com/docs/overview/introducti InlineProviderSpec( api=Api.vector_io, provider_type="inline::chromadb", - pip_packages=["chromadb"], module="llama_stack.providers.inline.vector_io.chroma", config_class="llama_stack.providers.inline.vector_io.chroma.ChromaVectorIOConfig", api_dependencies=[Api.inference], @@ -391,7 +385,6 @@ See [Chroma's documentation](https://docs.trychroma.com/docs/overview/introducti Api.vector_io, AdapterSpec( adapter_type="pgvector", - pip_packages=["psycopg2-binary"], module="llama_stack.providers.remote.vector_io.pgvector", config_class="llama_stack.providers.remote.vector_io.pgvector.PGVectorVectorIOConfig", description=""" @@ -430,7 +423,6 @@ See [PGVector's documentation](https://github.com/pgvector/pgvector) for more de Api.vector_io, AdapterSpec( adapter_type="weaviate", - pip_packages=["weaviate-client"], module="llama_stack.providers.remote.vector_io.weaviate", config_class="llama_stack.providers.remote.vector_io.weaviate.WeaviateVectorIOConfig", provider_data_validator="llama_stack.providers.remote.vector_io.weaviate.WeaviateRequestProviderData", @@ -471,7 +463,6 @@ See [Weaviate's documentation](https://weaviate.io/developers/weaviate) for more InlineProviderSpec( api=Api.vector_io, provider_type="inline::qdrant", - pip_packages=["qdrant-client"], module="llama_stack.providers.inline.vector_io.qdrant", config_class="llama_stack.providers.inline.vector_io.qdrant.QdrantVectorIOConfig", api_dependencies=[Api.inference], @@ -524,7 +515,6 @@ See the [Qdrant documentation](https://qdrant.tech/documentation/) for more deta Api.vector_io, AdapterSpec( adapter_type="qdrant", - pip_packages=["qdrant-client"], module="llama_stack.providers.remote.vector_io.qdrant", config_class="llama_stack.providers.remote.vector_io.qdrant.QdrantVectorIOConfig", description=""" @@ -538,7 +528,6 @@ Please refer to the inline provider documentation. Api.vector_io, AdapterSpec( adapter_type="milvus", - pip_packages=["pymilvus>=2.4.10"], module="llama_stack.providers.remote.vector_io.milvus", config_class="llama_stack.providers.remote.vector_io.milvus.MilvusVectorIOConfig", description=""" @@ -739,7 +728,6 @@ For more details on TLS configuration, refer to the [TLS setup guide](https://mi InlineProviderSpec( api=Api.vector_io, provider_type="inline::milvus", - pip_packages=["pymilvus>=2.4.10"], module="llama_stack.providers.inline.vector_io.milvus", config_class="llama_stack.providers.inline.vector_io.milvus.MilvusVectorIOConfig", api_dependencies=[Api.inference], diff --git a/llama_stack/providers/remote/datasetio/huggingface/pyproject.toml b/llama_stack/providers/remote/datasetio/huggingface/pyproject.toml new file mode 100644 index 000000000..b525f3b97 --- /dev/null +++ b/llama_stack/providers/remote/datasetio/huggingface/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-datasetio-huggingface" +version = "0.1.0" +description = "HuggingFace datasets provider for accessing and managing datasets from the HuggingFace Hub" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "datasets", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/datasetio/nvidia/pyproject.toml b/llama_stack/providers/remote/datasetio/nvidia/pyproject.toml new file mode 100644 index 000000000..6dc98aec4 --- /dev/null +++ b/llama_stack/providers/remote/datasetio/nvidia/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-datasetio-nvidia" +version = "0.1.0" +description = "NVIDIA's dataset I/O provider for accessing datasets from NVIDIA's data platform" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "datasets", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/eval/nvidia/pyproject.toml b/llama_stack/providers/remote/eval/nvidia/pyproject.toml new file mode 100644 index 000000000..969c5930e --- /dev/null +++ b/llama_stack/providers/remote/eval/nvidia/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-eval-nvidia" +version = "0.1.0" +description = "NVIDIA's evaluation provider for running evaluation tasks on NVIDIA's platform" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "requests", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/anthropic/pyproject.toml b/llama_stack/providers/remote/inference/anthropic/pyproject.toml new file mode 100644 index 000000000..dbceed308 --- /dev/null +++ b/llama_stack/providers/remote/inference/anthropic/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-anthropic" +version = "0.1.0" +description = "Anthropic inference provider for accessing Claude models and Anthropic's AI services" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "litellm", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/bedrock/pyproject.toml b/llama_stack/providers/remote/inference/bedrock/pyproject.toml new file mode 100644 index 000000000..341c4ffc7 --- /dev/null +++ b/llama_stack/providers/remote/inference/bedrock/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-bedrock" +version = "0.1.0" +description = "AWS Bedrock inference provider for accessing various AI models through AWS's managed service" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "boto3", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/cerebras/pyproject.toml b/llama_stack/providers/remote/inference/cerebras/pyproject.toml new file mode 100644 index 000000000..3ad34dbe0 --- /dev/null +++ b/llama_stack/providers/remote/inference/cerebras/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-cerebras" +version = "0.1.0" +description = "Cerebras inference provider for running models on Cerebras Cloud platform" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "cerebras_cloud_sdk", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/databricks/pyproject.toml b/llama_stack/providers/remote/inference/databricks/pyproject.toml new file mode 100644 index 000000000..1bb551d65 --- /dev/null +++ b/llama_stack/providers/remote/inference/databricks/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-databricks" +version = "0.1.0" +description = "Databricks inference provider for running models on Databricks' unified analytics platform" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "openai", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/fireworks/pyproject.toml b/llama_stack/providers/remote/inference/fireworks/pyproject.toml new file mode 100644 index 000000000..6a75e17cc --- /dev/null +++ b/llama_stack/providers/remote/inference/fireworks/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-fireworks" +version = "0.1.0" +description = "Fireworks AI inference provider for Llama models and other AI models on the Fireworks platform" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "fireworks-ai", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/gemini/pyproject.toml b/llama_stack/providers/remote/inference/gemini/pyproject.toml new file mode 100644 index 000000000..37938260b --- /dev/null +++ b/llama_stack/providers/remote/inference/gemini/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-gemini" +version = "0.1.0" +description = "Google Gemini inference provider for accessing Gemini models and Google's AI services" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "litellm", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/groq/pyproject.toml b/llama_stack/providers/remote/inference/groq/pyproject.toml new file mode 100644 index 000000000..590b33e38 --- /dev/null +++ b/llama_stack/providers/remote/inference/groq/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-groq" +version = "0.1.0" +description = "Groq inference provider for ultra-fast inference using Groq's LPU technology" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "litellm", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/llama_openai_compat/pyproject.toml b/llama_stack/providers/remote/inference/llama_openai_compat/pyproject.toml new file mode 100644 index 000000000..b8f45e7db --- /dev/null +++ b/llama_stack/providers/remote/inference/llama_openai_compat/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-llama-openai-compat" +version = "0.1.0" +description = "Llama OpenAI-compatible provider for using Llama models with OpenAI API format" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "litellm", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/nvidia/pyproject.toml b/llama_stack/providers/remote/inference/nvidia/pyproject.toml new file mode 100644 index 000000000..6123a32b5 --- /dev/null +++ b/llama_stack/providers/remote/inference/nvidia/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-nvidia" +version = "0.1.0" +description = "NVIDIA inference provider for accessing NVIDIA NIM models and AI services" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "openai", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/ollama/pyproject.toml b/llama_stack/providers/remote/inference/ollama/pyproject.toml new file mode 100644 index 000000000..89390a612 --- /dev/null +++ b/llama_stack/providers/remote/inference/ollama/pyproject.toml @@ -0,0 +1,23 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-ollama" +version = "0.1.0" +description = "Ollama inference provider for running local models through the Ollama runtime" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "ollama", + "aiohttp", + "h11>=0.16.0", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/openai/pyproject.toml b/llama_stack/providers/remote/inference/openai/pyproject.toml new file mode 100644 index 000000000..471d02571 --- /dev/null +++ b/llama_stack/providers/remote/inference/openai/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-openai" +version = "0.1.0" +description = "OpenAI inference provider for accessing GPT models and other OpenAI services" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "litellm", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/passthrough/pyproject.toml b/llama_stack/providers/remote/inference/passthrough/pyproject.toml new file mode 100644 index 000000000..57765d81e --- /dev/null +++ b/llama_stack/providers/remote/inference/passthrough/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-passthrough" +version = "0.1.0" +description = "Passthrough inference provider for connecting to any external inference service not directly supported" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/runpod/pyproject.toml b/llama_stack/providers/remote/inference/runpod/pyproject.toml new file mode 100644 index 000000000..433991028 --- /dev/null +++ b/llama_stack/providers/remote/inference/runpod/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-runpod" +version = "0.1.0" +description = "RunPod inference provider for running models on RunPod's cloud GPU platform" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "openai", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/sambanova/pyproject.toml b/llama_stack/providers/remote/inference/sambanova/pyproject.toml new file mode 100644 index 000000000..0d8318e7c --- /dev/null +++ b/llama_stack/providers/remote/inference/sambanova/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-sambanova" +version = "0.1.0" +description = "SambaNova inference provider for running models on SambaNova's dataflow architecture" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "litellm", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/tgi/pyproject.toml b/llama_stack/providers/remote/inference/tgi/pyproject.toml new file mode 100644 index 000000000..4d63b3518 --- /dev/null +++ b/llama_stack/providers/remote/inference/tgi/pyproject.toml @@ -0,0 +1,22 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-tgi" +version = "0.1.0" +description = "Text Generation Inference (TGI) provider for HuggingFace model serving" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "huggingface_hub", + "aiohttp", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/together/pyproject.toml b/llama_stack/providers/remote/inference/together/pyproject.toml new file mode 100644 index 000000000..9cb290789 --- /dev/null +++ b/llama_stack/providers/remote/inference/together/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-together" +version = "0.1.0" +description = "Together AI inference provider for open-source models and collaborative AI development" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "together", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/vertexai/pyproject.toml b/llama_stack/providers/remote/inference/vertexai/pyproject.toml new file mode 100644 index 000000000..1b92abb97 --- /dev/null +++ b/llama_stack/providers/remote/inference/vertexai/pyproject.toml @@ -0,0 +1,19 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-vertexai" +version = "0.1.0" +description = "Google VertexAI Remote Inference Provider" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "litellm", + "google-cloud-aiplatform" +] + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/vllm/pyproject.toml b/llama_stack/providers/remote/inference/vllm/pyproject.toml new file mode 100644 index 000000000..7a74daa9f --- /dev/null +++ b/llama_stack/providers/remote/inference/vllm/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-vllm" +version = "0.1.0" +description = "Remote vLLM inference provider for connecting to vLLM servers" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "openai", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/inference/watsonx/pyproject.toml b/llama_stack/providers/remote/inference/watsonx/pyproject.toml new file mode 100644 index 000000000..6928566ac --- /dev/null +++ b/llama_stack/providers/remote/inference/watsonx/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-inference-watsonx" +version = "0.1.0" +description = "IBM WatsonX inference provider for accessing AI models on IBM's WatsonX platform" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "ibm_watson_machine_learning", + +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/post_training/nvidia/pyproject.toml b/llama_stack/providers/remote/post_training/nvidia/pyproject.toml new file mode 100644 index 000000000..e94aeb707 --- /dev/null +++ b/llama_stack/providers/remote/post_training/nvidia/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-post-training-nvidia" +version = "0.1.0" +description = "NVIDIA's post-training provider for fine-tuning models on NVIDIA's platform" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "requests", + "aiohttp", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/safety/bedrock/pyproject.toml b/llama_stack/providers/remote/safety/bedrock/pyproject.toml new file mode 100644 index 000000000..c998cc3ee --- /dev/null +++ b/llama_stack/providers/remote/safety/bedrock/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-safety-bedrock" +version = "0.1.0" +description = "AWS Bedrock safety provider for content moderation using AWS's safety services" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "boto3", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/safety/nvidia/pyproject.toml b/llama_stack/providers/remote/safety/nvidia/pyproject.toml new file mode 100644 index 000000000..668dfc641 --- /dev/null +++ b/llama_stack/providers/remote/safety/nvidia/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-safety-nvidia" +version = "0.1.0" +description = "NVIDIA's safety provider for content moderation and safety filtering" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "requests", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/safety/sambanova/pyproject.toml b/llama_stack/providers/remote/safety/sambanova/pyproject.toml new file mode 100644 index 000000000..a1c147093 --- /dev/null +++ b/llama_stack/providers/remote/safety/sambanova/pyproject.toml @@ -0,0 +1,21 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-safety-sambanova" +version = "0.1.0" +description = "SambaNova's safety provider for content moderation and safety filtering" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "litellm", + "requests", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/tool_runtime/bing_search/pyproject.toml b/llama_stack/providers/remote/tool_runtime/bing_search/pyproject.toml new file mode 100644 index 000000000..b2995778a --- /dev/null +++ b/llama_stack/providers/remote/tool_runtime/bing_search/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-tool-runtime-bing-search" +version = "0.1.0" +description = "Bing Search tool for web search capabilities using Microsoft's search engine" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "requests", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/tool_runtime/brave_search/pyproject.toml b/llama_stack/providers/remote/tool_runtime/brave_search/pyproject.toml new file mode 100644 index 000000000..ef00f8777 --- /dev/null +++ b/llama_stack/providers/remote/tool_runtime/brave_search/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-tool-runtime-brave-search" +version = "0.1.0" +description = "Brave Search tool for web search capabilities with privacy-focused results" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "requests", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/tool_runtime/model_context_protocol/pyproject.toml b/llama_stack/providers/remote/tool_runtime/model_context_protocol/pyproject.toml new file mode 100644 index 000000000..a930942d1 --- /dev/null +++ b/llama_stack/providers/remote/tool_runtime/model_context_protocol/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-tool-runtime-model-context-protocol" +version = "0.1.0" +description = "Model Context Protocol (MCP) tool for standardized tool calling and context management" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "mcp>=1.8.1", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/tool_runtime/tavily_search/pyproject.toml b/llama_stack/providers/remote/tool_runtime/tavily_search/pyproject.toml new file mode 100644 index 000000000..55e169614 --- /dev/null +++ b/llama_stack/providers/remote/tool_runtime/tavily_search/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-tool-runtime-tavily-search" +version = "0.1.0" +description = "Tavily Search tool for AI-optimized web search with structured results" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "requests", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/tool_runtime/wolfram_alpha/pyproject.toml b/llama_stack/providers/remote/tool_runtime/wolfram_alpha/pyproject.toml new file mode 100644 index 000000000..866d77f1b --- /dev/null +++ b/llama_stack/providers/remote/tool_runtime/wolfram_alpha/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-tool-runtime-wolfram-alpha" +version = "0.1.0" +description = "Wolfram Alpha tool for computational knowledge and mathematical calculations" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "requests", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/vector_io/chroma/pyproject.toml b/llama_stack/providers/remote/vector_io/chroma/pyproject.toml new file mode 100644 index 000000000..ba728ecb5 --- /dev/null +++ b/llama_stack/providers/remote/vector_io/chroma/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-chroma-remote" +version = "0.1.0" +description = "Chroma remote vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "chromadb-client", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/vector_io/milvus/pyproject.toml b/llama_stack/providers/remote/vector_io/milvus/pyproject.toml new file mode 100644 index 000000000..66a208604 --- /dev/null +++ b/llama_stack/providers/remote/vector_io/milvus/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-milvus-remote" +version = "0.1.0" +description = "Milvus remote vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "pymilvus>=2.4.10", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/vector_io/pgvector/pyproject.toml b/llama_stack/providers/remote/vector_io/pgvector/pyproject.toml new file mode 100644 index 000000000..9d761dc8d --- /dev/null +++ b/llama_stack/providers/remote/vector_io/pgvector/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-pgvector" +version = "0.1.0" +description = "PGVector remote vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "psycopg2-binary", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/vector_io/qdrant/pyproject.toml b/llama_stack/providers/remote/vector_io/qdrant/pyproject.toml new file mode 100644 index 000000000..f52b91471 --- /dev/null +++ b/llama_stack/providers/remote/vector_io/qdrant/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-qdrant-remote" +version = "0.1.0" +description = "Qdrant remote vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "qdrant-client", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"] diff --git a/llama_stack/providers/remote/vector_io/weaviate/pyproject.toml b/llama_stack/providers/remote/vector_io/weaviate/pyproject.toml new file mode 100644 index 000000000..811e99c97 --- /dev/null +++ b/llama_stack/providers/remote/vector_io/weaviate/pyproject.toml @@ -0,0 +1,20 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "llama-stack-provider-vector-io-weaviate" +version = "0.1.0" +description = "Weaviate remote vector database provider for Llama Stack" +authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }] +requires-python = ">=3.12" +license = { "text" = "MIT" } +dependencies = [ + "weaviate-client", +] + + + +[tool.setuptools.packages.find] +where = ["."] +include = ["llama_stack*"]