refactor: convert providers to be installed via package

currently providers have a `pip_package` list. Rather than make our own form of python dependency management, we should use `pyproject.toml` files in each provider declaring the dependencies in a more trackable manner.
Each provider can then be installed using the already in place `module` field in the ProviderSpec, pointing to the directory the provider lives in
we can then simply `uv pip install` this directory as opposed to installing the dependencies one by one

Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
Charlie Doern 2025-07-29 15:18:54 -04:00
parent a1301911e4
commit 41431d8bdd
76 changed files with 1294 additions and 134 deletions

View file

@ -0,0 +1,28 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-agents-meta-reference"
version = "0.1.0"
description = "Meta's reference implementation of an agent system that can use tools, access vector databases, and perform complex reasoning tasks"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"matplotlib",
"pillow",
"pandas",
"scikit-learn",
"mcp>=1.8.1",
"aiosqlite",
"psycopg2-binary",
"redis",
"pymongo"
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-batches-meta-reference"
version = "0.1.0"
description = "Meta's reference implementation of batches API with KVStore persistence."
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"openai"
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-datasetio-localfs"
version = "0.1.0"
description = "Local filesystem-based dataset I/O provider for reading and writing datasets to local storage"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"pandas",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,24 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-eval-meta-reference"
version = "0.1.0"
description = "Meta's reference implementation of evaluation tasks with support for multiple languages and evaluation metrics"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"tree_sitter",
"pythainlp",
"langdetect",
"emoji",
"nltk",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,18 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-files-localfs"
version = "0.1.0"
description = "Local filesystem-based file storage provider for managing files and documents locally"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = []
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,29 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-meta-reference"
version = "0.1.0"
description = "Meta's reference implementation of inference with support for various model formats and optimization techniques"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"accelerate",
"fairscale",
"torch",
"torchvision",
"transformers",
"zmq",
"lm-format-enforcer",
"sentence-transformers",
"torchao==0.8.0",
"fbgemm-gpu-genai==1.1.2",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,22 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-sentence-transformers"
version = "0.1.0"
description = "Sentence Transformers inference provider for text embeddings and similarity search"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"torch",
"torchvision",
"sentence-transformers",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,24 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-post-training-huggingface"
version = "0.1.0"
description = "HuggingFace-based post-training provider for fine-tuning models using the HuggingFace ecosystem"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"trl",
"transformers",
"peft",
"datasets>=4.0.0",
"torch",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,30 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-post-training-torchtune"
version = "0.1.0"
description = "TorchTune-based post-training provider for fine-tuning and optimizing models using Meta's TorchTune framework"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"torchtune>=0.5.0",
"torchao>=0.12.0",
"numpy",
]
[project.optional-dependencies]
cpu = [
"torch --extra-index-url https://download.pytorch.org/whl/cpu",
]
gpu = [
"torch",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-safety-code-scanner"
version = "0.1.0"
description = "Code Scanner safety provider for detecting security vulnerabilities and unsafe code patterns"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"codeshield",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,18 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-safety-llama-guard"
version = "0.1.0"
description = "Llama Guard safety provider for content moderation and safety filtering using Meta's Llama Guard model"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = []
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-safety-prompt-guard"
version = "0.1.0"
description = "Prompt Guard safety provider for detecting and filtering unsafe prompts and content"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"transformers[accelerate]",
"torch",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-scoring-basic"
version = "0.1.0"
description = "Basic scoring provider for simple evaluation metrics and scoring functions"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"requests",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-scoring-braintrust"
version = "0.1.0"
description = "Braintrust scoring provider for evaluation and scoring using the Braintrust platform"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"autoevals",
"openai",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,18 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-scoring-llm-as-judge"
version = "0.1.0"
description = "LLM-as-judge scoring provider that uses language models to evaluate and score responses"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = []
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-telemetry-meta-reference"
version = "0.1.0"
description = "Meta's reference implementation of telemetry and observability using OpenTelemetry"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"opentelemetry-sdk",
"opentelemetry-exporter-otlp-proto-http",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,28 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-tool-runtime-rag"
version = "0.1.0"
description = "RAG (Retrieval-Augmented Generation) tool runtime for document ingestion, chunking, and semantic search"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"chardet",
"pypdf",
"tqdm",
"numpy",
"scikit-learn",
"scipy",
"nltk",
"sentencepiece",
"transformers",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-vector-io-chroma"
version = "0.1.0"
description = "Chroma inline vector database provider for Llama Stack"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"chromadb",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-vector-io-faiss"
version = "0.1.0"
description = "Faiss inline vector database provider for Llama Stack"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"faiss-cpu",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-vector-io-milvus"
version = "0.1.0"
description = "Milvus inline vector database provider for Llama Stack"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"pymilvus[milvus-lite]>=2.4.10",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-vector-io-qdrant"
version = "0.1.0"
description = "Qdrant inline vector database provider for Llama Stack"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"qdrant-client",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-vector-io-sqlite-vec"
version = "0.1.0"
description = "SQLite-Vec inline vector database provider for Llama Stack"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"sqlite-vec",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]