llama-stack-mirror/llama_stack/providers/remote/inference/tgi/pyproject.toml
Charlie Doern 41431d8bdd refactor: convert providers to be installed via package
currently providers have a `pip_package` list. Rather than make our own form of python dependency management, we should use `pyproject.toml` files in each provider declaring the dependencies in a more trackable manner.
Each provider can then be installed using the already in place `module` field in the ProviderSpec, pointing to the directory the provider lives in
we can then simply `uv pip install` this directory as opposed to installing the dependencies one by one

Signed-off-by: Charlie Doern <cdoern@redhat.com>
2025-09-22 09:23:50 -04:00

22 lines
503 B
TOML

[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-tgi"
version = "0.1.0"
description = "Text Generation Inference (TGI) provider for HuggingFace model serving"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"huggingface_hub",
"aiohttp",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]