chore(package): migrate to src/ layout (#3920)

Migrates package structure to src/ layout following Python packaging
best practices.

All code moved from `llama_stack/` to `src/llama_stack/`. Public API
unchanged - imports remain `import llama_stack.*`.

Updated build configs, pre-commit hooks, scripts, and GitHub workflows
accordingly. All hooks pass, package builds cleanly.

**Developer note**: Reinstall after pulling: `pip install -e .`
This commit is contained in:
Ashwin Bharambe 2025-10-27 12:02:21 -07:00 committed by GitHub
parent 98a5047f9d
commit 471b1b248b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
791 changed files with 2983 additions and 456 deletions

View file

@ -9,8 +9,8 @@ on:
branches: [ main ] branches: [ main ]
paths: paths:
- 'distributions/**' - 'distributions/**'
- 'llama_stack/**' - 'src/llama_stack/**'
- '!llama_stack/ui/**' - '!src/llama_stack/ui/**'
- 'tests/integration/**' - 'tests/integration/**'
- 'uv.lock' - 'uv.lock'
- 'pyproject.toml' - 'pyproject.toml'

View file

@ -8,7 +8,7 @@ on:
pull_request: pull_request:
branches: [ main ] branches: [ main ]
paths: paths:
- 'llama_stack/providers/utils/sqlstore/**' - 'src/llama_stack/providers/utils/sqlstore/**'
- 'tests/integration/sqlstore/**' - 'tests/integration/sqlstore/**'
- 'uv.lock' - 'uv.lock'
- 'pyproject.toml' - 'pyproject.toml'

View file

@ -9,8 +9,8 @@ on:
branches: [ main ] branches: [ main ]
types: [opened, synchronize, reopened] types: [opened, synchronize, reopened]
paths: paths:
- 'llama_stack/**' - 'src/llama_stack/**'
- '!llama_stack/ui/**' - '!src/llama_stack/ui/**'
- 'tests/**' - 'tests/**'
- 'uv.lock' - 'uv.lock'
- 'pyproject.toml' - 'pyproject.toml'

View file

@ -8,8 +8,8 @@ on:
pull_request: pull_request:
branches: [ main ] branches: [ main ]
paths: paths:
- 'llama_stack/**' - 'src/llama_stack/**'
- '!llama_stack/ui/**' - '!src/llama_stack/ui/**'
- 'tests/integration/vector_io/**' - 'tests/integration/vector_io/**'
- 'uv.lock' - 'uv.lock'
- 'pyproject.toml' - 'pyproject.toml'

View file

@ -41,11 +41,11 @@ jobs:
with: with:
node-version: '20' node-version: '20'
cache: 'npm' cache: 'npm'
cache-dependency-path: 'llama_stack/ui/' cache-dependency-path: 'src/llama_stack/ui/'
- name: Install npm dependencies - name: Install npm dependencies
run: npm ci run: npm ci
working-directory: llama_stack/ui working-directory: src/llama_stack/ui
- name: Run pre-commit - name: Run pre-commit
id: precommit id: precommit

View file

@ -145,12 +145,12 @@ jobs:
with: with:
node-version: '20' node-version: '20'
cache: 'npm' cache: 'npm'
cache-dependency-path: 'llama_stack/ui/' cache-dependency-path: 'src/llama_stack/ui/'
- name: Install npm dependencies - name: Install npm dependencies
if: steps.check_author.outputs.authorized == 'true' if: steps.check_author.outputs.authorized == 'true'
run: npm ci run: npm ci
working-directory: llama_stack/ui working-directory: src/llama_stack/ui
- name: Run pre-commit - name: Run pre-commit
if: steps.check_author.outputs.authorized == 'true' if: steps.check_author.outputs.authorized == 'true'

View file

@ -7,24 +7,24 @@ on:
branches: branches:
- main - main
paths: paths:
- 'llama_stack/cli/stack/build.py' - 'src/llama_stack/cli/stack/build.py'
- 'llama_stack/cli/stack/_build.py' - 'src/llama_stack/cli/stack/_build.py'
- 'llama_stack/core/build.*' - 'src/llama_stack/core/build.*'
- 'llama_stack/core/*.sh' - 'src/llama_stack/core/*.sh'
- '.github/workflows/providers-build.yml' - '.github/workflows/providers-build.yml'
- 'llama_stack/distributions/**' - 'src/llama_stack/distributions/**'
- 'pyproject.toml' - 'pyproject.toml'
- 'containers/Containerfile' - 'containers/Containerfile'
- '.dockerignore' - '.dockerignore'
pull_request: pull_request:
paths: paths:
- 'llama_stack/cli/stack/build.py' - 'src/llama_stack/cli/stack/build.py'
- 'llama_stack/cli/stack/_build.py' - 'src/llama_stack/cli/stack/_build.py'
- 'llama_stack/core/build.*' - 'src/llama_stack/core/build.*'
- 'llama_stack/core/*.sh' - 'src/llama_stack/core/*.sh'
- '.github/workflows/providers-build.yml' - '.github/workflows/providers-build.yml'
- 'llama_stack/distributions/**' - 'src/llama_stack/distributions/**'
- 'pyproject.toml' - 'pyproject.toml'
- 'containers/Containerfile' - 'containers/Containerfile'
- '.dockerignore' - '.dockerignore'
@ -45,7 +45,7 @@ jobs:
- name: Generate Distribution List - name: Generate Distribution List
id: set-matrix id: set-matrix
run: | run: |
distros=$(ls llama_stack/distributions/*/*build.yaml | awk -F'/' '{print $(NF-1)}' | jq -R -s -c 'split("\n")[:-1]') distros=$(ls src/llama_stack/distributions/*/*build.yaml | awk -F'/' '{print $(NF-1)}' | jq -R -s -c 'split("\n")[:-1]')
echo "distros=$distros" >> "$GITHUB_OUTPUT" echo "distros=$distros" >> "$GITHUB_OUTPUT"
build: build:
@ -107,13 +107,13 @@ jobs:
- name: Build container image - name: Build container image
run: | run: |
BASE_IMAGE=$(yq -r '.distribution_spec.container_image // "python:3.12-slim"' llama_stack/distributions/ci-tests/build.yaml) BASE_IMAGE=$(yq -r '.distribution_spec.container_image // "python:3.12-slim"' src/llama_stack/distributions/ci-tests/build.yaml)
docker build . \ docker build . \
-f containers/Containerfile \ -f containers/Containerfile \
--build-arg INSTALL_MODE=editable \ --build-arg INSTALL_MODE=editable \
--build-arg DISTRO_NAME=ci-tests \ --build-arg DISTRO_NAME=ci-tests \
--build-arg BASE_IMAGE="$BASE_IMAGE" \ --build-arg BASE_IMAGE="$BASE_IMAGE" \
--build-arg RUN_CONFIG_PATH=/workspace/llama_stack/distributions/ci-tests/run.yaml \ --build-arg RUN_CONFIG_PATH=/workspace/src/llama_stack/distributions/ci-tests/run.yaml \
-t llama-stack:ci-tests -t llama-stack:ci-tests
- name: Inspect the container image entrypoint - name: Inspect the container image entrypoint
@ -143,17 +143,17 @@ jobs:
run: | run: |
yq -i ' yq -i '
.distribution_spec.container_image = "registry.access.redhat.com/ubi9:latest" .distribution_spec.container_image = "registry.access.redhat.com/ubi9:latest"
' llama_stack/distributions/ci-tests/build.yaml ' src/llama_stack/distributions/ci-tests/build.yaml
- name: Build UBI9 container image - name: Build UBI9 container image
run: | run: |
BASE_IMAGE=$(yq -r '.distribution_spec.container_image // "registry.access.redhat.com/ubi9:latest"' llama_stack/distributions/ci-tests/build.yaml) BASE_IMAGE=$(yq -r '.distribution_spec.container_image // "registry.access.redhat.com/ubi9:latest"' src/llama_stack/distributions/ci-tests/build.yaml)
docker build . \ docker build . \
-f containers/Containerfile \ -f containers/Containerfile \
--build-arg INSTALL_MODE=editable \ --build-arg INSTALL_MODE=editable \
--build-arg DISTRO_NAME=ci-tests \ --build-arg DISTRO_NAME=ci-tests \
--build-arg BASE_IMAGE="$BASE_IMAGE" \ --build-arg BASE_IMAGE="$BASE_IMAGE" \
--build-arg RUN_CONFIG_PATH=/workspace/llama_stack/distributions/ci-tests/run.yaml \ --build-arg RUN_CONFIG_PATH=/workspace/src/llama_stack/distributions/ci-tests/run.yaml \
-t llama-stack:ci-tests-ubi9 -t llama-stack:ci-tests-ubi9
- name: Inspect UBI9 image - name: Inspect UBI9 image

View file

@ -7,22 +7,22 @@ on:
branches: branches:
- main - main
paths: paths:
- 'llama_stack/cli/stack/list_deps.py' - 'src/llama_stack/cli/stack/list_deps.py'
- 'llama_stack/cli/stack/_list_deps.py' - 'src/llama_stack/cli/stack/_list_deps.py'
- 'llama_stack/core/build.*' - 'src/llama_stack/core/build.*'
- 'llama_stack/core/*.sh' - 'src/llama_stack/core/*.sh'
- '.github/workflows/providers-list-deps.yml' - '.github/workflows/providers-list-deps.yml'
- 'llama_stack/templates/**' - 'src/llama_stack/templates/**'
- 'pyproject.toml' - 'pyproject.toml'
pull_request: pull_request:
paths: paths:
- 'llama_stack/cli/stack/list_deps.py' - 'src/llama_stack/cli/stack/list_deps.py'
- 'llama_stack/cli/stack/_list_deps.py' - 'src/llama_stack/cli/stack/_list_deps.py'
- 'llama_stack/core/build.*' - 'src/llama_stack/core/build.*'
- 'llama_stack/core/*.sh' - 'src/llama_stack/core/*.sh'
- '.github/workflows/providers-list-deps.yml' - '.github/workflows/providers-list-deps.yml'
- 'llama_stack/templates/**' - 'src/llama_stack/templates/**'
- 'pyproject.toml' - 'pyproject.toml'
concurrency: concurrency:
@ -41,7 +41,7 @@ jobs:
- name: Generate Distribution List - name: Generate Distribution List
id: set-matrix id: set-matrix
run: | run: |
distros=$(ls llama_stack/distributions/*/*build.yaml | awk -F'/' '{print $(NF-1)}' | jq -R -s -c 'split("\n")[:-1]') distros=$(ls src/llama_stack/distributions/*/*build.yaml | awk -F'/' '{print $(NF-1)}' | jq -R -s -c 'split("\n")[:-1]')
echo "distros=$distros" >> "$GITHUB_OUTPUT" echo "distros=$distros" >> "$GITHUB_OUTPUT"
list-deps: list-deps:
@ -102,4 +102,4 @@ jobs:
USE_COPY_NOT_MOUNT: "true" USE_COPY_NOT_MOUNT: "true"
LLAMA_STACK_DIR: "." LLAMA_STACK_DIR: "."
run: | run: |
uv run llama stack list-deps llama_stack/distributions/ci-tests/build.yaml uv run llama stack list-deps src/llama_stack/distributions/ci-tests/build.yaml

View file

@ -10,7 +10,7 @@ on:
branches: branches:
- main - main
paths-ignore: paths-ignore:
- 'llama_stack/ui/**' - 'src/llama_stack/ui/**'
jobs: jobs:
build: build:

View file

@ -8,7 +8,7 @@ on:
pull_request: pull_request:
branches: [ main ] branches: [ main ]
paths: paths:
- 'llama_stack/**' - 'src/llama_stack/**'
- 'tests/integration/**' - 'tests/integration/**'
- 'uv.lock' - 'uv.lock'
- 'pyproject.toml' - 'pyproject.toml'

View file

@ -8,8 +8,8 @@ on:
pull_request: pull_request:
branches: [ main ] branches: [ main ]
paths: paths:
- 'llama_stack/**' - 'src/llama_stack/**'
- '!llama_stack/ui/**' - '!src/llama_stack/ui/**'
- 'tests/integration/**' - 'tests/integration/**'
- 'uv.lock' - 'uv.lock'
- 'pyproject.toml' - 'pyproject.toml'

View file

@ -8,7 +8,7 @@ on:
pull_request: pull_request:
branches: [ main ] branches: [ main ]
paths: paths:
- 'llama_stack/ui/**' - 'src/llama_stack/ui/**'
- '.github/workflows/ui-unit-tests.yml' # This workflow - '.github/workflows/ui-unit-tests.yml' # This workflow
workflow_dispatch: workflow_dispatch:
@ -33,22 +33,22 @@ jobs:
with: with:
node-version: ${{ matrix.node-version }} node-version: ${{ matrix.node-version }}
cache: 'npm' cache: 'npm'
cache-dependency-path: 'llama_stack/ui/package-lock.json' cache-dependency-path: 'src/llama_stack/ui/package-lock.json'
- name: Install dependencies - name: Install dependencies
working-directory: llama_stack/ui working-directory: src/llama_stack/ui
run: npm ci run: npm ci
- name: Run linting - name: Run linting
working-directory: llama_stack/ui working-directory: src/llama_stack/ui
run: npm run lint run: npm run lint
- name: Run format check - name: Run format check
working-directory: llama_stack/ui working-directory: src/llama_stack/ui
run: npm run format:check run: npm run format:check
- name: Run unit tests - name: Run unit tests
working-directory: llama_stack/ui working-directory: src/llama_stack/ui
env: env:
CI: true CI: true

View file

@ -8,8 +8,8 @@ on:
pull_request: pull_request:
branches: [ main ] branches: [ main ]
paths: paths:
- 'llama_stack/**' - 'src/llama_stack/**'
- '!llama_stack/ui/**' - '!src/llama_stack/ui/**'
- 'tests/unit/**' - 'tests/unit/**'
- 'uv.lock' - 'uv.lock'
- 'pyproject.toml' - 'pyproject.toml'

View file

@ -42,7 +42,7 @@ repos:
hooks: hooks:
- id: ruff - id: ruff
args: [ --fix ] args: [ --fix ]
exclude: ^llama_stack/strong_typing/.*$ exclude: ^src/llama_stack/strong_typing/.*$
- id: ruff-format - id: ruff-format
- repo: https://github.com/adamchainz/blacken-docs - repo: https://github.com/adamchainz/blacken-docs
@ -86,7 +86,7 @@ repos:
language: python language: python
pass_filenames: false pass_filenames: false
require_serial: true require_serial: true
files: ^llama_stack/distributions/.*$|^llama_stack/providers/.*/inference/.*/models\.py$ files: ^src/llama_stack/distributions/.*$|^src/llama_stack/providers/.*/inference/.*/models\.py$
- id: provider-codegen - id: provider-codegen
name: Provider Codegen name: Provider Codegen
additional_dependencies: additional_dependencies:
@ -95,7 +95,7 @@ repos:
language: python language: python
pass_filenames: false pass_filenames: false
require_serial: true require_serial: true
files: ^llama_stack/providers/.*$ files: ^src/llama_stack/providers/.*$
- id: openapi-codegen - id: openapi-codegen
name: API Spec Codegen name: API Spec Codegen
additional_dependencies: additional_dependencies:
@ -104,7 +104,7 @@ repos:
language: python language: python
pass_filenames: false pass_filenames: false
require_serial: true require_serial: true
files: ^llama_stack/apis/|^docs/openapi_generator/ files: ^src/llama_stack/apis/|^docs/openapi_generator/
- id: check-workflows-use-hashes - id: check-workflows-use-hashes
name: Check GitHub Actions use SHA-pinned actions name: Check GitHub Actions use SHA-pinned actions
entry: ./scripts/check-workflows-use-hashes.sh entry: ./scripts/check-workflows-use-hashes.sh
@ -120,7 +120,7 @@ repos:
pass_filenames: false pass_filenames: false
require_serial: true require_serial: true
always_run: true always_run: true
files: ^llama_stack/.*$ files: ^src/llama_stack/.*$
- id: forbid-pytest-asyncio - id: forbid-pytest-asyncio
name: Block @pytest.mark.asyncio and @pytest_asyncio.fixture name: Block @pytest.mark.asyncio and @pytest_asyncio.fixture
entry: bash entry: bash
@ -150,7 +150,7 @@ repos:
name: Format & Lint UI name: Format & Lint UI
entry: bash ./scripts/run-ui-linter.sh entry: bash ./scripts/run-ui-linter.sh
language: system language: system
files: ^llama_stack/ui/.*\.(ts|tsx)$ files: ^src/llama_stack/ui/.*\.(ts|tsx)$
pass_filenames: false pass_filenames: false
require_serial: true require_serial: true

View file

@ -1,11 +1,11 @@
include pyproject.toml include pyproject.toml
include llama_stack/models/llama/llama3/tokenizer.model include src/llama_stack/models/llama/llama3/tokenizer.model
include llama_stack/models/llama/llama4/tokenizer.model include src/llama_stack/models/llama/llama4/tokenizer.model
include llama_stack/core/*.sh include src/llama_stack/core/*.sh
include llama_stack/cli/scripts/*.sh include src/llama_stack/cli/scripts/*.sh
include llama_stack/distributions/*/*.yaml include src/llama_stack/distributions/*/*.yaml
exclude llama_stack/distributions/ci-tests exclude src/llama_stack/distributions/ci-tests
include tests/integration/test_cases/inference/*.json include tests/integration/test_cases/inference/*.json
include llama_stack/models/llama/*/*.md include src/llama_stack/models/llama/*/*.md
include llama_stack/tests/integration/*.jpg include src/llama_stack/tests/integration/*.jpg
prune llama_stack/distributions/ci-tests prune src/llama_stack/distributions/ci-tests

View file

@ -150,7 +150,7 @@ llama = "llama_stack.cli.llama:main"
install-wheel-from-presigned = "llama_stack.cli.scripts.run:install_wheel_from_presigned" install-wheel-from-presigned = "llama_stack.cli.scripts.run:install_wheel_from_presigned"
[tool.setuptools.packages.find] [tool.setuptools.packages.find]
where = ["."] where = ["src"]
include = ["llama_stack", "llama_stack.*"] include = ["llama_stack", "llama_stack.*"]
[[tool.uv.index]] [[tool.uv.index]]
@ -217,17 +217,17 @@ unfixable = [
# Ignore the following errors for the following files # Ignore the following errors for the following files
[tool.ruff.lint.per-file-ignores] [tool.ruff.lint.per-file-ignores]
"tests/**/*.py" = ["DTZ"] # Ignore datetime rules for tests "tests/**/*.py" = ["DTZ"] # Ignore datetime rules for tests
"llama_stack/providers/inline/scoring/basic/utils/ifeval_utils.py" = ["RUF001"] "src/llama_stack/providers/inline/scoring/basic/utils/ifeval_utils.py" = ["RUF001"]
"llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_multiple_choice_answer.py" = [ "src/llama_stack/providers/inline/scoring/basic/scoring_fn/fn_defs/regex_parser_multiple_choice_answer.py" = [
"RUF001", "RUF001",
"PLE2515", "PLE2515",
] ]
"llama_stack/apis/**/__init__.py" = [ "src/llama_stack/apis/**/__init__.py" = [
"F403", "F403",
] # Using import * is acceptable (or at least tolerated) in an __init__.py of a package API ] # Using import * is acceptable (or at least tolerated) in an __init__.py of a package API
[tool.mypy] [tool.mypy]
mypy_path = ["llama_stack"] mypy_path = ["src"]
packages = ["llama_stack"] packages = ["llama_stack"]
plugins = ['pydantic.mypy'] plugins = ['pydantic.mypy']
disable_error_code = [] disable_error_code = []
@ -239,77 +239,77 @@ follow_imports = "silent"
# to exclude the entire directory. # to exclude the entire directory.
exclude = [ exclude = [
# As we fix more and more of these, we should remove them from the list # As we fix more and more of these, we should remove them from the list
"^llama_stack.core/build\\.py$", "^src/llama_stack/core/build\\.py$",
"^llama_stack.core/client\\.py$", "^src/llama_stack/core/client\\.py$",
"^llama_stack.core/request_headers\\.py$", "^src/llama_stack/core/request_headers\\.py$",
"^llama_stack.core/routers/", "^src/llama_stack/core/routers/",
"^llama_stack.core/routing_tables/", "^src/llama_stack/core/routing_tables/",
"^llama_stack.core/server/endpoints\\.py$", "^src/llama_stack/core/server/endpoints\\.py$",
"^llama_stack.core/server/server\\.py$", "^src/llama_stack/core/server/server\\.py$",
"^llama_stack.core/stack\\.py$", "^src/llama_stack/core/stack\\.py$",
"^llama_stack.core/store/registry\\.py$", "^src/llama_stack/core/store/registry\\.py$",
"^llama_stack.core/utils/exec\\.py$", "^src/llama_stack/core/utils/exec\\.py$",
"^llama_stack.core/utils/prompt_for_config\\.py$", "^src/llama_stack/core/utils/prompt_for_config\\.py$",
"^llama_stack/models/llama/llama3/interface\\.py$", "^src/llama_stack/models/llama/llama3/interface\\.py$",
"^llama_stack/models/llama/llama3/tokenizer\\.py$", "^src/llama_stack/models/llama/llama3/tokenizer\\.py$",
"^llama_stack/models/llama/llama3/tool_utils\\.py$", "^src/llama_stack/models/llama/llama3/tool_utils\\.py$",
"^llama_stack/providers/inline/agents/meta_reference/", "^src/llama_stack/providers/inline/agents/meta_reference/",
"^llama_stack/providers/inline/datasetio/localfs/", "^src/llama_stack/providers/inline/datasetio/localfs/",
"^llama_stack/providers/inline/eval/meta_reference/eval\\.py$", "^src/llama_stack/providers/inline/eval/meta_reference/eval\\.py$",
"^llama_stack/providers/inline/inference/meta_reference/inference\\.py$", "^src/llama_stack/providers/inline/inference/meta_reference/inference\\.py$",
"^llama_stack/models/llama/llama3/generation\\.py$", "^src/llama_stack/models/llama/llama3/generation\\.py$",
"^llama_stack/models/llama/llama3/multimodal/model\\.py$", "^src/llama_stack/models/llama/llama3/multimodal/model\\.py$",
"^llama_stack/models/llama/llama4/", "^src/llama_stack/models/llama/llama4/",
"^llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers\\.py$", "^src/llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers\\.py$",
"^llama_stack/providers/inline/post_training/common/validator\\.py$", "^src/llama_stack/providers/inline/post_training/common/validator\\.py$",
"^llama_stack/providers/inline/safety/code_scanner/", "^src/llama_stack/providers/inline/safety/code_scanner/",
"^llama_stack/providers/inline/safety/llama_guard/", "^src/llama_stack/providers/inline/safety/llama_guard/",
"^llama_stack/providers/inline/scoring/basic/", "^src/llama_stack/providers/inline/scoring/basic/",
"^llama_stack/providers/inline/scoring/braintrust/", "^src/llama_stack/providers/inline/scoring/braintrust/",
"^llama_stack/providers/inline/scoring/llm_as_judge/", "^src/llama_stack/providers/inline/scoring/llm_as_judge/",
"^llama_stack/providers/remote/agents/sample/", "^src/llama_stack/providers/remote/agents/sample/",
"^llama_stack/providers/remote/datasetio/huggingface/", "^src/llama_stack/providers/remote/datasetio/huggingface/",
"^llama_stack/providers/remote/datasetio/nvidia/", "^src/llama_stack/providers/remote/datasetio/nvidia/",
"^llama_stack/providers/remote/inference/bedrock/", "^src/llama_stack/providers/remote/inference/bedrock/",
"^llama_stack/providers/remote/inference/nvidia/", "^src/llama_stack/providers/remote/inference/nvidia/",
"^llama_stack/providers/remote/inference/passthrough/", "^src/llama_stack/providers/remote/inference/passthrough/",
"^llama_stack/providers/remote/inference/runpod/", "^src/llama_stack/providers/remote/inference/runpod/",
"^llama_stack/providers/remote/inference/tgi/", "^src/llama_stack/providers/remote/inference/tgi/",
"^llama_stack/providers/remote/inference/watsonx/", "^src/llama_stack/providers/remote/inference/watsonx/",
"^llama_stack/providers/remote/safety/bedrock/", "^src/llama_stack/providers/remote/safety/bedrock/",
"^llama_stack/providers/remote/safety/nvidia/", "^src/llama_stack/providers/remote/safety/nvidia/",
"^llama_stack/providers/remote/safety/sambanova/", "^src/llama_stack/providers/remote/safety/sambanova/",
"^llama_stack/providers/remote/safety/sample/", "^src/llama_stack/providers/remote/safety/sample/",
"^llama_stack/providers/remote/tool_runtime/bing_search/", "^src/llama_stack/providers/remote/tool_runtime/bing_search/",
"^llama_stack/providers/remote/tool_runtime/brave_search/", "^src/llama_stack/providers/remote/tool_runtime/brave_search/",
"^llama_stack/providers/remote/tool_runtime/model_context_protocol/", "^src/llama_stack/providers/remote/tool_runtime/model_context_protocol/",
"^llama_stack/providers/remote/tool_runtime/tavily_search/", "^src/llama_stack/providers/remote/tool_runtime/tavily_search/",
"^llama_stack/providers/remote/tool_runtime/wolfram_alpha/", "^src/llama_stack/providers/remote/tool_runtime/wolfram_alpha/",
"^llama_stack/providers/remote/post_training/nvidia/", "^src/llama_stack/providers/remote/post_training/nvidia/",
"^llama_stack/providers/remote/vector_io/chroma/", "^src/llama_stack/providers/remote/vector_io/chroma/",
"^llama_stack/providers/remote/vector_io/milvus/", "^src/llama_stack/providers/remote/vector_io/milvus/",
"^llama_stack/providers/remote/vector_io/pgvector/", "^src/llama_stack/providers/remote/vector_io/pgvector/",
"^llama_stack/providers/remote/vector_io/qdrant/", "^src/llama_stack/providers/remote/vector_io/qdrant/",
"^llama_stack/providers/remote/vector_io/sample/", "^src/llama_stack/providers/remote/vector_io/sample/",
"^llama_stack/providers/remote/vector_io/weaviate/", "^src/llama_stack/providers/remote/vector_io/weaviate/",
"^llama_stack/providers/utils/bedrock/client\\.py$", "^src/llama_stack/providers/utils/bedrock/client\\.py$",
"^llama_stack/providers/utils/bedrock/refreshable_boto_session\\.py$", "^src/llama_stack/providers/utils/bedrock/refreshable_boto_session\\.py$",
"^llama_stack/providers/utils/inference/embedding_mixin\\.py$", "^src/llama_stack/providers/utils/inference/embedding_mixin\\.py$",
"^llama_stack/providers/utils/inference/litellm_openai_mixin\\.py$", "^src/llama_stack/providers/utils/inference/litellm_openai_mixin\\.py$",
"^llama_stack/providers/utils/inference/model_registry\\.py$", "^src/llama_stack/providers/utils/inference/model_registry\\.py$",
"^llama_stack/providers/utils/inference/openai_compat\\.py$", "^src/llama_stack/providers/utils/inference/openai_compat\\.py$",
"^llama_stack/providers/utils/inference/prompt_adapter\\.py$", "^src/llama_stack/providers/utils/inference/prompt_adapter\\.py$",
"^llama_stack/providers/utils/kvstore/kvstore\\.py$", "^src/llama_stack/providers/utils/kvstore/kvstore\\.py$",
"^llama_stack/providers/utils/kvstore/postgres/postgres\\.py$", "^src/llama_stack/providers/utils/kvstore/postgres/postgres\\.py$",
"^llama_stack/providers/utils/kvstore/redis/redis\\.py$", "^src/llama_stack/providers/utils/kvstore/redis/redis\\.py$",
"^llama_stack/providers/utils/memory/vector_store\\.py$", "^src/llama_stack/providers/utils/memory/vector_store\\.py$",
"^llama_stack/providers/utils/scoring/aggregation_utils\\.py$", "^src/llama_stack/providers/utils/scoring/aggregation_utils\\.py$",
"^llama_stack/providers/utils/scoring/base_scoring_fn\\.py$", "^src/llama_stack/providers/utils/scoring/base_scoring_fn\\.py$",
"^llama_stack/providers/utils/telemetry/dataset_mixin\\.py$", "^src/llama_stack/providers/utils/telemetry/dataset_mixin\\.py$",
"^llama_stack/providers/utils/telemetry/trace_protocol\\.py$", "^src/llama_stack/providers/utils/telemetry/trace_protocol\\.py$",
"^llama_stack/providers/utils/telemetry/tracing\\.py$", "^src/llama_stack/providers/utils/telemetry/tracing\\.py$",
"^llama_stack/strong_typing/auxiliary\\.py$", "^src/llama_stack/strong_typing/auxiliary\\.py$",
"^llama_stack/distributions/template\\.py$", "^src/llama_stack/distributions/template\\.py$",
] ]
[[tool.mypy.overrides]] [[tool.mypy.overrides]]

View file

@ -16,7 +16,7 @@ if (( BASH_VERSINFO[0] < 4 )); then
exit 1 exit 1
fi fi
PACKAGE_DIR="${1:-llama_stack}" PACKAGE_DIR="${1:-src/llama_stack}"
if [ ! -d "$PACKAGE_DIR" ]; then if [ ! -d "$PACKAGE_DIR" ]; then
echo "ERROR: Package directory '$PACKAGE_DIR' does not exist" echo "ERROR: Package directory '$PACKAGE_DIR' does not exist"

View file

@ -93,7 +93,7 @@ def pre_import_distros(distro_dirs: list[Path]) -> None:
def main(): def main():
distros_dir = REPO_ROOT / "llama_stack" / "distributions" distros_dir = REPO_ROOT / "src" / "llama_stack" / "distributions"
change_tracker = ChangedPathTracker() change_tracker = ChangedPathTracker()
with Progress( with Progress(

View file

@ -6,7 +6,7 @@
# the root directory of this source tree. # the root directory of this source tree.
set -e set -e
cd llama_stack/ui cd src/llama_stack/ui
if [ ! -d node_modules ] || [ ! -x node_modules/.bin/prettier ] || [ ! -x node_modules/.bin/eslint ]; then if [ ! -d node_modules ] || [ ! -x node_modules/.bin/prettier ] || [ ! -x node_modules/.bin/eslint ]; then
echo "UI dependencies not installed, skipping prettier/linter check" echo "UI dependencies not installed, skipping prettier/linter check"

View file

@ -27,4 +27,4 @@ fi
# Run unit tests with coverage # Run unit tests with coverage
uv run --python "$PYTHON_VERSION" --with-editable . --group unit \ uv run --python "$PYTHON_VERSION" --with-editable . --group unit \
coverage run --source=llama_stack -m pytest -s -v tests/unit/ "$@" coverage run --source=src/llama_stack -m pytest -s -v tests/unit/ "$@"

Some files were not shown because too many files have changed in this diff Show more