mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 04:04:14 +00:00
currently providers have a `pip_package` list. Rather than make our own form of python dependency management, we should use `pyproject.toml` files in each provider declaring the dependencies in a more trackable manner. Each provider can then be installed using the already in place `module` field in the ProviderSpec, pointing to the directory the provider lives in we can then simply `uv pip install` this directory as opposed to installing the dependencies one by one Signed-off-by: Charlie Doern <cdoern@redhat.com>
20 lines
478 B
TOML
20 lines
478 B
TOML
[build-system]
|
|
requires = ["setuptools>=61.0"]
|
|
build-backend = "setuptools.build_meta"
|
|
|
|
[project]
|
|
name = "llama-stack-provider-safety-nvidia"
|
|
version = "0.1.0"
|
|
description = "NVIDIA's safety provider for content moderation and safety filtering"
|
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
|
requires-python = ">=3.12"
|
|
license = { "text" = "MIT" }
|
|
dependencies = [
|
|
"requests",
|
|
]
|
|
|
|
|
|
|
|
[tool.setuptools.packages.find]
|
|
where = ["."]
|
|
include = ["llama_stack*"]
|