llama-stack-mirror/src/llama_stack_api/pyproject.toml
Sébastien Han 00e7ea6c3b
fix: adopt FastAPI directly in llama-stack-api
This commit migrates the Batches API to use FastAPI routers directly in the
API package, removing the need for custom decorator systems and manual router
registration. The API package now defines FastAPI routers using standard
FastAPI route decorators, making it self-sufficient and eliminating dependencies
on the server package.

The router implementation has been moved from llama_stack/core/server/routers/batches.py
to llama_stack_api/batches/routes.py, where it belongs alongside the protocol
and models.

Standard error responses (standard_responses) have been moved from the server
package to llama_stack_api/router_utils.py, ensuring the API package can
define complete routers without server dependencies. FastAPI has been added
as an explicit dependency to the llama-stack-api package, making it an
intentional dependency rather than an implicit one.

Router discovery is now fully automatic. The server discovers routers by
checking for routes modules in each API package and looking for a create_router
function. This eliminates the need for manual registration and makes the system
scalable - new APIs with router modules are automatically discovered and used.

The router registry has been simplified to use automatic discovery instead of
maintaining a manual registry. The build_router function (renamed from
create_router to better reflect its purpose) discovers and combines router
factories with implementations to create the final router instances.

Exposing Routers from the API is nice for the Bring Your Own API use
case too.

Signed-off-by: Sébastien Han <seb@redhat.com>
2025-11-20 15:10:33 +01:00

83 lines
2.8 KiB
TOML

[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[tool.uv]
required-version = ">=0.7.0"
[project]
name = "llama-stack-api"
version = "0.4.0.dev0"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
description = "API and Provider specifications for Llama Stack - lightweight package with protocol definitions and provider specs"
readme = "README.md"
requires-python = ">=3.12"
license = { "text" = "MIT" }
classifiers = [
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Information Analysis",
]
dependencies = [
"fastapi>=0.115.0,<1.0",
"pydantic>=2.11.9",
"jsonschema",
"opentelemetry-sdk>=1.30.0",
"opentelemetry-exporter-otlp-proto-http>=1.30.0",
]
[project.urls]
Homepage = "https://github.com/llamastack/llama-stack"
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack_api", "llama_stack_api.*"]
[tool.setuptools.package-data]
llama_stack_api = ["py.typed"]
[tool.ruff]
line-length = 120
[tool.ruff.lint]
select = [
"UP", # pyupgrade
"B", # flake8-bugbear
"B9", # flake8-bugbear subset
"C", # comprehensions
"E", # pycodestyle
"F", # Pyflakes
"N", # Naming
"W", # Warnings
"DTZ", # datetime rules
"I", # isort (imports order)
"RUF001", # Checks for ambiguous Unicode characters in strings
"RUF002", # Checks for ambiguous Unicode characters in docstrings
"RUF003", # Checks for ambiguous Unicode characters in comments
"PLC2401", # Checks for the use of non-ASCII characters in variable names
]
ignore = [
# The following ignores are desired by the project maintainers.
"E402", # Module level import not at top of file
"E501", # Line too long
"F405", # Maybe undefined or defined from star import
"C408", # Ignored because we like the dict keyword argument syntax
"N812", # Ignored because import torch.nn.functional as F is PyTorch convention
# These are the additional ones we started ignoring after moving to ruff. We should look into each one of them later.
"C901", # Complexity of the function is too high
]
unfixable = [
"PLE2515",
] # Do not fix this automatically since ruff will replace the zero-width space with \u200b - let's do it manually
[tool.ruff.lint.per-file-ignores]
"llama_stack_api/apis/**/__init__.py" = ["F403"]
[tool.ruff.lint.pep8-naming]
classmethod-decorators = ["classmethod", "pydantic.field_validator"]