mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-15 01:26:10 +00:00
# What does this PR do? This PR allows for unit test code coverage % to be reported in PR builds. Currently, today's output tells the end user which tests passed and which tests failed: <img width="744" alt="Screenshot 2025-03-10 at 9 44 28 AM" src="https://github.com/user-attachments/assets/40b1a578-951f-4b74-8a37-a39c039b1d7e" /> If a contributor is creating a new module within Llama Stack and starts writing unit tests for that module, it might be difficult for Llama Stack maintainers to immediately determine the code coverage percentage for that new module. To allow for code coverage reporting in the CI, we simply need to install `pytest-cov` so we can use the `--cov` flag with the existing `pytest` command. Ideally, it would be nicer to have a bot report code coverage, but this PR can be a temporary solution. [//]: # (If resolving an issue, uncomment and update the line below) [//]: # (Closes #[issue-number]) ## Test Plan I ran these changes locally: <img width="1455" alt="Screenshot 2025-03-10 at 10 01 53 AM" src="https://github.com/user-attachments/assets/dfd765c6-5979-42a3-b899-7713a3f202e6" /> PR build to confirm the expected behavior: <img width="1326" alt="Screenshot 2025-03-10 at 12 47 36 PM" src="https://github.com/user-attachments/assets/fe94f1e6-fbb5-4e57-9902-197502c50621" /> [//]: # (## Documentation) Signed-off-by: Courtney Pacheco <6019922+courtneypacheco@users.noreply.github.com>
168 lines
4.1 KiB
TOML
168 lines
4.1 KiB
TOML
[build-system]
|
|
requires = ["setuptools>=61.0"]
|
|
build-backend = "setuptools.build_meta"
|
|
|
|
[project]
|
|
name = "llama_stack"
|
|
version = "0.1.6"
|
|
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
|
|
description = "Llama Stack"
|
|
readme = "README.md"
|
|
requires-python = ">=3.10"
|
|
license = { "text" = "MIT" }
|
|
classifiers = [
|
|
"License :: OSI Approved :: MIT License",
|
|
"Programming Language :: Python :: 3",
|
|
"Operating System :: OS Independent",
|
|
"Intended Audience :: Developers",
|
|
"Intended Audience :: Information Technology",
|
|
"Intended Audience :: Science/Research",
|
|
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
"Topic :: Scientific/Engineering :: Information Analysis",
|
|
]
|
|
dependencies = [
|
|
"blobfile",
|
|
"fire",
|
|
"httpx",
|
|
"huggingface-hub",
|
|
"jsonschema",
|
|
"llama-stack-client>=0.1.6",
|
|
"prompt-toolkit",
|
|
"python-dotenv",
|
|
"pydantic>=2",
|
|
"requests",
|
|
"rich",
|
|
"setuptools",
|
|
"termcolor",
|
|
"tiktoken",
|
|
"pillow",
|
|
]
|
|
|
|
[project.optional-dependencies]
|
|
dev = [
|
|
"pytest",
|
|
"pytest-asyncio",
|
|
"pytest-cov",
|
|
"pytest-html",
|
|
"nbval", # For notebook testing
|
|
"black",
|
|
"ruff",
|
|
"types-requests",
|
|
"types-setuptools",
|
|
"pre-commit",
|
|
"uvicorn",
|
|
"fastapi",
|
|
"ruamel.yaml", # needed for openapi generator
|
|
]
|
|
test = [
|
|
"openai",
|
|
"aiosqlite",
|
|
"sqlite-vec",
|
|
"ollama",
|
|
"torch>=2.6.0",
|
|
"fairscale>=0.4.13",
|
|
"torchvision>=0.21.0",
|
|
"lm-format-enforcer>=0.10.9",
|
|
"groq",
|
|
"opentelemetry-sdk",
|
|
"opentelemetry-exporter-otlp-proto-http",
|
|
"chardet",
|
|
"pypdf",
|
|
]
|
|
docs = [
|
|
"sphinx-autobuild",
|
|
"myst-parser",
|
|
"sphinx-rtd-theme",
|
|
"sphinx-copybutton",
|
|
"sphinx-tabs",
|
|
"sphinx-design",
|
|
"sphinxcontrib.redoc",
|
|
"sphinxcontrib.video",
|
|
"sphinxcontrib.mermaid",
|
|
"tomli",
|
|
]
|
|
codegen = ["rich", "pydantic", "jinja2>=3.1.6"]
|
|
|
|
[project.urls]
|
|
Homepage = "https://github.com/meta-llama/llama-stack"
|
|
|
|
[project.scripts]
|
|
llama = "llama_stack.cli.llama:main"
|
|
install-wheel-from-presigned = "llama_stack.cli.scripts.run:install_wheel_from_presigned"
|
|
|
|
[tool.setuptools]
|
|
packages = { find = {} }
|
|
license-files = []
|
|
|
|
[[tool.uv.index]]
|
|
name = "pytorch-cpu"
|
|
url = "https://download.pytorch.org/whl/cpu"
|
|
explicit = true
|
|
|
|
[tool.uv.sources]
|
|
torch = [{ index = "pytorch-cpu" }]
|
|
torchvision = [{ index = "pytorch-cpu" }]
|
|
|
|
[tool.ruff]
|
|
line-length = 120
|
|
exclude = [
|
|
"./.git",
|
|
"./docs/*",
|
|
"./build",
|
|
"./scripts",
|
|
"./venv",
|
|
"*.pyi",
|
|
".pre-commit-config.yaml",
|
|
"*.md",
|
|
".flake8",
|
|
]
|
|
|
|
[tool.ruff.lint]
|
|
select = [
|
|
"B", # flake8-bugbear
|
|
"B9", # flake8-bugbear subset
|
|
"C", # comprehensions
|
|
"E", # pycodestyle
|
|
"F", # Pyflakes
|
|
"N", # Naming
|
|
"W", # Warnings
|
|
"I", # isort
|
|
]
|
|
ignore = [
|
|
# The following ignores are desired by the project maintainers.
|
|
"E402", # Module level import not at top of file
|
|
"E501", # Line too long
|
|
"F405", # Maybe undefined or defined from star import
|
|
"C408", # Ignored because we like the dict keyword argument syntax
|
|
"N812", # Ignored because import torch.nn.functional as F is PyTorch convention
|
|
|
|
# These are the additional ones we started ignoring after moving to ruff. We should look into each one of them later.
|
|
"C901", # Complexity of the function is too high
|
|
]
|
|
|
|
[tool.mypy]
|
|
mypy_path = ["llama_stack"]
|
|
packages = ["llama_stack"]
|
|
disable_error_code = []
|
|
warn_return_any = true
|
|
# # honor excludes by not following there through imports
|
|
follow_imports = "silent"
|
|
exclude = [
|
|
# As we fix more and more of these, we should remove them from the list
|
|
"llama_stack/providers",
|
|
"llama_stack/distribution",
|
|
"llama_stack/apis",
|
|
"llama_stack/cli",
|
|
"llama_stack/models",
|
|
"llama_stack/strong_typing",
|
|
"llama_stack/templates",
|
|
]
|
|
|
|
[[tool.mypy.overrides]]
|
|
# packages that lack typing annotations, do not have stubs, or are unavailable.
|
|
module = ["yaml", "fire"]
|
|
ignore_missing_imports = true
|
|
|
|
[[tool.mypy.overrides]]
|
|
module = ["llama_stack.distribution.resolver", "llama_stack.log"]
|
|
follow_imports = "normal" # This will force type checking on this module
|