chore(package): migrate to src/ layout

Moved package code from llama_stack/ to src/llama_stack/ following Python
packaging best practices. Updated pyproject.toml, MANIFEST.in, and tool
configurations accordingly.

Public API and import paths remain unchanged. Developers will need to
reinstall in editable mode after pulling this change.

Also updated paths in pre-commit config, scripts, and GitHub workflows.
This commit is contained in:
Ashwin Bharambe 2025-10-27 11:27:58 -07:00
parent 98a5047f9d
commit 8e5ed739ec
790 changed files with 2947 additions and 447 deletions

View file

@ -150,7 +150,7 @@ llama = "llama_stack.cli.llama:main"
install-wheel-from-presigned = "llama_stack.cli.scripts.run:install_wheel_from_presigned"
[tool.setuptools.packages.find]
where = ["."]
where = ["src"]
include = ["llama_stack", "llama_stack.*"]
[[tool.uv.index]]
@ -217,17 +217,17 @@ unfixable = [
# Ignore the following errors for the following files
[tool.ruff.lint.per-file-ignores]
"tests/**/*.py" = ["DTZ"] # Ignore datetime rules for tests
"llama_stack/providers/inline/scoring/basic/utils/ifeval_utils.py" = ["RUF001"]
"llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_multiple_choice_answer.py" = [
"src/llama_stack/providers/inline/scoring/basic/utils/ifeval_utils.py" = ["RUF001"]
"src/llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_multiple_choice_answer.py" = [
"RUF001",
"PLE2515",
]
"llama_stack/apis/**/__init__.py" = [
"src/llama_stack/apis/**/__init__.py" = [
"F403",
] # Using import * is acceptable (or at least tolerated) in an __init__.py of a package API
[tool.mypy]
mypy_path = ["llama_stack"]
mypy_path = ["src"]
packages = ["llama_stack"]
plugins = ['pydantic.mypy']
disable_error_code = []
@ -239,77 +239,77 @@ follow_imports = "silent"
# to exclude the entire directory.
exclude = [
# As we fix more and more of these, we should remove them from the list
"^llama_stack.core/build\\.py$",
"^llama_stack.core/client\\.py$",
"^llama_stack.core/request_headers\\.py$",
"^llama_stack.core/routers/",
"^llama_stack.core/routing_tables/",
"^llama_stack.core/server/endpoints\\.py$",
"^llama_stack.core/server/server\\.py$",
"^llama_stack.core/stack\\.py$",
"^llama_stack.core/store/registry\\.py$",
"^llama_stack.core/utils/exec\\.py$",
"^llama_stack.core/utils/prompt_for_config\\.py$",
"^llama_stack/models/llama/llama3/interface\\.py$",
"^llama_stack/models/llama/llama3/tokenizer\\.py$",
"^llama_stack/models/llama/llama3/tool_utils\\.py$",
"^llama_stack/providers/inline/agents/meta_reference/",
"^llama_stack/providers/inline/datasetio/localfs/",
"^llama_stack/providers/inline/eval/meta_reference/eval\\.py$",
"^llama_stack/providers/inline/inference/meta_reference/inference\\.py$",
"^llama_stack/models/llama/llama3/generation\\.py$",
"^llama_stack/models/llama/llama3/multimodal/model\\.py$",
"^llama_stack/models/llama/llama4/",
"^llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers\\.py$",
"^llama_stack/providers/inline/post_training/common/validator\\.py$",
"^llama_stack/providers/inline/safety/code_scanner/",
"^llama_stack/providers/inline/safety/llama_guard/",
"^llama_stack/providers/inline/scoring/basic/",
"^llama_stack/providers/inline/scoring/braintrust/",
"^llama_stack/providers/inline/scoring/llm_as_judge/",
"^llama_stack/providers/remote/agents/sample/",
"^llama_stack/providers/remote/datasetio/huggingface/",
"^llama_stack/providers/remote/datasetio/nvidia/",
"^llama_stack/providers/remote/inference/bedrock/",
"^llama_stack/providers/remote/inference/nvidia/",
"^llama_stack/providers/remote/inference/passthrough/",
"^llama_stack/providers/remote/inference/runpod/",
"^llama_stack/providers/remote/inference/tgi/",
"^llama_stack/providers/remote/inference/watsonx/",
"^llama_stack/providers/remote/safety/bedrock/",
"^llama_stack/providers/remote/safety/nvidia/",
"^llama_stack/providers/remote/safety/sambanova/",
"^llama_stack/providers/remote/safety/sample/",
"^llama_stack/providers/remote/tool_runtime/bing_search/",
"^llama_stack/providers/remote/tool_runtime/brave_search/",
"^llama_stack/providers/remote/tool_runtime/model_context_protocol/",
"^llama_stack/providers/remote/tool_runtime/tavily_search/",
"^llama_stack/providers/remote/tool_runtime/wolfram_alpha/",
"^llama_stack/providers/remote/post_training/nvidia/",
"^llama_stack/providers/remote/vector_io/chroma/",
"^llama_stack/providers/remote/vector_io/milvus/",
"^llama_stack/providers/remote/vector_io/pgvector/",
"^llama_stack/providers/remote/vector_io/qdrant/",
"^llama_stack/providers/remote/vector_io/sample/",
"^llama_stack/providers/remote/vector_io/weaviate/",
"^llama_stack/providers/utils/bedrock/client\\.py$",
"^llama_stack/providers/utils/bedrock/refreshable_boto_session\\.py$",
"^llama_stack/providers/utils/inference/embedding_mixin\\.py$",
"^llama_stack/providers/utils/inference/litellm_openai_mixin\\.py$",
"^llama_stack/providers/utils/inference/model_registry\\.py$",
"^llama_stack/providers/utils/inference/openai_compat\\.py$",
"^llama_stack/providers/utils/inference/prompt_adapter\\.py$",
"^llama_stack/providers/utils/kvstore/kvstore\\.py$",
"^llama_stack/providers/utils/kvstore/postgres/postgres\\.py$",
"^llama_stack/providers/utils/kvstore/redis/redis\\.py$",
"^llama_stack/providers/utils/memory/vector_store\\.py$",
"^llama_stack/providers/utils/scoring/aggregation_utils\\.py$",
"^llama_stack/providers/utils/scoring/base_scoring_fn\\.py$",
"^llama_stack/providers/utils/telemetry/dataset_mixin\\.py$",
"^llama_stack/providers/utils/telemetry/trace_protocol\\.py$",
"^llama_stack/providers/utils/telemetry/tracing\\.py$",
"^llama_stack/strong_typing/auxiliary\\.py$",
"^llama_stack/distributions/template\\.py$",
"^src/llama_stack/core/build\\.py$",
"^src/llama_stack/core/client\\.py$",
"^src/llama_stack/core/request_headers\\.py$",
"^src/llama_stack/core/routers/",
"^src/llama_stack/core/routing_tables/",
"^src/llama_stack/core/server/endpoints\\.py$",
"^src/llama_stack/core/server/server\\.py$",
"^src/llama_stack/core/stack\\.py$",
"^src/llama_stack/core/store/registry\\.py$",
"^src/llama_stack/core/utils/exec\\.py$",
"^src/llama_stack/core/utils/prompt_for_config\\.py$",
"^src/llama_stack/models/llama/llama3/interface\\.py$",
"^src/llama_stack/models/llama/llama3/tokenizer\\.py$",
"^src/llama_stack/models/llama/llama3/tool_utils\\.py$",
"^src/llama_stack/providers/inline/agents/meta_reference/",
"^src/llama_stack/providers/inline/datasetio/localfs/",
"^src/llama_stack/providers/inline/eval/meta_reference/eval\\.py$",
"^src/llama_stack/providers/inline/inference/meta_reference/inference\\.py$",
"^src/llama_stack/models/llama/llama3/generation\\.py$",
"^src/llama_stack/models/llama/llama3/multimodal/model\\.py$",
"^src/llama_stack/models/llama/llama4/",
"^src/llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers\\.py$",
"^src/llama_stack/providers/inline/post_training/common/validator\\.py$",
"^src/llama_stack/providers/inline/safety/code_scanner/",
"^src/llama_stack/providers/inline/safety/llama_guard/",
"^src/llama_stack/providers/inline/scoring/basic/",
"^src/llama_stack/providers/inline/scoring/braintrust/",
"^src/llama_stack/providers/inline/scoring/llm_as_judge/",
"^src/llama_stack/providers/remote/agents/sample/",
"^src/llama_stack/providers/remote/datasetio/huggingface/",
"^src/llama_stack/providers/remote/datasetio/nvidia/",
"^src/llama_stack/providers/remote/inference/bedrock/",
"^src/llama_stack/providers/remote/inference/nvidia/",
"^src/llama_stack/providers/remote/inference/passthrough/",
"^src/llama_stack/providers/remote/inference/runpod/",
"^src/llama_stack/providers/remote/inference/tgi/",
"^src/llama_stack/providers/remote/inference/watsonx/",
"^src/llama_stack/providers/remote/safety/bedrock/",
"^src/llama_stack/providers/remote/safety/nvidia/",
"^src/llama_stack/providers/remote/safety/sambanova/",
"^src/llama_stack/providers/remote/safety/sample/",
"^src/llama_stack/providers/remote/tool_runtime/bing_search/",
"^src/llama_stack/providers/remote/tool_runtime/brave_search/",
"^src/llama_stack/providers/remote/tool_runtime/model_context_protocol/",
"^src/llama_stack/providers/remote/tool_runtime/tavily_search/",
"^src/llama_stack/providers/remote/tool_runtime/wolfram_alpha/",
"^src/llama_stack/providers/remote/post_training/nvidia/",
"^src/llama_stack/providers/remote/vector_io/chroma/",
"^src/llama_stack/providers/remote/vector_io/milvus/",
"^src/llama_stack/providers/remote/vector_io/pgvector/",
"^src/llama_stack/providers/remote/vector_io/qdrant/",
"^src/llama_stack/providers/remote/vector_io/sample/",
"^src/llama_stack/providers/remote/vector_io/weaviate/",
"^src/llama_stack/providers/utils/bedrock/client\\.py$",
"^src/llama_stack/providers/utils/bedrock/refreshable_boto_session\\.py$",
"^src/llama_stack/providers/utils/inference/embedding_mixin\\.py$",
"^src/llama_stack/providers/utils/inference/litellm_openai_mixin\\.py$",
"^src/llama_stack/providers/utils/inference/model_registry\\.py$",
"^src/llama_stack/providers/utils/inference/openai_compat\\.py$",
"^src/llama_stack/providers/utils/inference/prompt_adapter\\.py$",
"^src/llama_stack/providers/utils/kvstore/kvstore\\.py$",
"^src/llama_stack/providers/utils/kvstore/postgres/postgres\\.py$",
"^src/llama_stack/providers/utils/kvstore/redis/redis\\.py$",
"^src/llama_stack/providers/utils/memory/vector_store\\.py$",
"^src/llama_stack/providers/utils/scoring/aggregation_utils\\.py$",
"^src/llama_stack/providers/utils/scoring/base_scoring_fn\\.py$",
"^src/llama_stack/providers/utils/telemetry/dataset_mixin\\.py$",
"^src/llama_stack/providers/utils/telemetry/trace_protocol\\.py$",
"^src/llama_stack/providers/utils/telemetry/tracing\\.py$",
"^src/llama_stack/strong_typing/auxiliary\\.py$",
"^src/llama_stack/distributions/template\\.py$",
]
[[tool.mypy.overrides]]