mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 12:07:34 +00:00
currently providers have a `pip_package` list. Rather than make our own form of python dependency management, we should use `pyproject.toml` files in each provider declaring the dependencies in a more trackable manner. Each provider can then be installed using the already in place `module` field in the ProviderSpec, pointing to the directory the provider lives in we can then simply `uv pip install` this directory as opposed to installing the dependencies one by one Signed-off-by: Charlie Doern <cdoern@redhat.com>
21 lines
487 B
TOML
21 lines
487 B
TOML
[build-system]
|
|
requires = ["setuptools>=61.0"]
|
|
build-backend = "setuptools.build_meta"
|
|
|
|
[project]
|
|
name = "llama-stack-provider-inference-groq"
|
|
version = "0.1.0"
|
|
description = "Groq inference provider for ultra-fast inference using Groq's LPU technology"
|
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
|
requires-python = ">=3.12"
|
|
license = { "text" = "MIT" }
|
|
dependencies = [
|
|
"litellm",
|
|
|
|
]
|
|
|
|
|
|
|
|
[tool.setuptools.packages.find]
|
|
where = ["."]
|
|
include = ["llama_stack*"]
|