diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index dd1a5c6cd..faa2eda31 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -11,10 +11,10 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + uses: actions/checkout@v4 - name: Set up Python - uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 + uses: actions/setup-python@v5 with: python-version: '3.11' cache: pip @@ -22,4 +22,4 @@ jobs: **/requirements*.txt .pre-commit-config.yaml - - uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd #v3.0.1 + - uses: pre-commit/action@v3.0.1 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 89064b692..cf36150cc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,10 +5,8 @@ default_language_version: repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: 6306a48f7dae5861702d573c9c247e4e9498e867 + rev: v5.0.0 # Latest stable version hooks: - - id: trailing-whitespace - - id: check-ast - id: check-merge-conflict - id: check-added-large-files args: ['--maxkb=1000'] @@ -28,23 +26,32 @@ repos: - --license-filepath - docs/license_header.txt -- repo: https://github.com/pycqa/flake8 - rev: 34cbf8ef3950f43d09b85e2e45c15ae5717dc37b +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.9.4 hooks: - - id: flake8 - additional_dependencies: - - flake8-bugbear == 22.4.25 - - pep8-naming == 0.12.1 - - torchfix - args: ['--config=.flake8'] + - id: ruff + args: [ + --fix, + --exit-non-zero-on-fix + ] + - id: ruff-format -- repo: https://github.com/omnilib/ufmt - rev: v2.7.0 +- repo: https://github.com/adamchainz/blacken-docs + rev: 1.19.0 hooks: - - id: ufmt + - id: blacken-docs additional_dependencies: - - black == 24.4.2 - - usort == 1.0.8 + - black==24.3.0 + +# - repo: https://github.com/pre-commit/mirrors-mypy +# rev: v1.14.0 +# hooks: +# - id: mypy +# additional_dependencies: +# - types-requests +# - types-setuptools +# - pydantic +# args: [--ignore-missing-imports] # - repo: https://github.com/jsh9/pydoclint # rev: d88180a8632bb1602a4d81344085cf320f288c5a @@ -71,3 +78,7 @@ repos: # require_serial: true # files: ^llama_stack/templates/.*$ # stages: [manual] + +ci: + autofix_commit_msg: 🎨 [pre-commit.ci] Auto format from pre-commit.com hooks + autoupdate_commit_msg: ⬆ [pre-commit.ci] pre-commit autoupdate diff --git a/llama_stack/cli/stack/_build.py b/llama_stack/cli/stack/_build.py index 9d2976dd2..a7e2afd48 100644 --- a/llama_stack/cli/stack/_build.py +++ b/llama_stack/cli/stack/_build.py @@ -183,9 +183,9 @@ def run_stack_build_command( build_config.distribution_spec.providers ) normal_deps += SERVER_DEPENDENCIES - print(f"pip install {' '.join(normal_deps)}") + print(f"uv pip install {' '.join(normal_deps)}") for special_dep in special_deps: - print(f"pip install {special_dep}") + print(f"uv pip install {special_dep}") return _run_stack_build_command_from_build_config( diff --git a/llama_stack/distribution/build.py b/llama_stack/distribution/build.py index a29c8d5d1..a207143f3 100644 --- a/llama_stack/distribution/build.py +++ b/llama_stack/distribution/build.py @@ -95,11 +95,11 @@ def print_pip_install_help(providers: Dict[str, List[Provider]]): normal_deps, special_deps = get_provider_dependencies(providers) cprint( - f"Please install needed dependencies using the following commands:\n\npip install {' '.join(normal_deps)}", + f"Please install needed dependencies using the following commands:\n\nuv pip install {' '.join(normal_deps)}", "yellow", ) for special_dep in special_deps: - cprint(f"pip install {special_dep}", "yellow") + cprint(f"uv pip install {special_dep}", "yellow") print() diff --git a/llama_stack/distribution/build_conda_env.sh b/llama_stack/distribution/build_conda_env.sh index 606fbf19d..3fea09ad5 100755 --- a/llama_stack/distribution/build_conda_env.sh +++ b/llama_stack/distribution/build_conda_env.sh @@ -78,10 +78,12 @@ ensure_conda_env_python310() { eval "$(conda shell.bash hook)" conda deactivate && conda activate "${env_name}" + $CONDA_PREFIX/bin/pip install uv + if [ -n "$TEST_PYPI_VERSION" ]; then # these packages are damaged in test-pypi, so install them first - $CONDA_PREFIX/bin/pip install fastapi libcst - $CONDA_PREFIX/bin/pip install --extra-index-url https://test.pypi.org/simple/ \ + uv pip install fastapi libcst + uv pip install --extra-index-url https://test.pypi.org/simple/ \ llama-models==$TEST_PYPI_VERSION \ llama-stack-client==$TEST_PYPI_VERSION \ llama-stack==$TEST_PYPI_VERSION \ @@ -90,7 +92,7 @@ ensure_conda_env_python310() { IFS='#' read -ra parts <<<"$special_pip_deps" for part in "${parts[@]}"; do echo "$part" - $CONDA_PREFIX/bin/pip install $part + uv pip install $part done fi else @@ -102,7 +104,7 @@ ensure_conda_env_python310() { fi printf "Installing from LLAMA_STACK_DIR: $LLAMA_STACK_DIR\n" - $CONDA_PREFIX/bin/pip install --no-cache-dir -e "$LLAMA_STACK_DIR" + uv pip install --no-cache-dir -e "$LLAMA_STACK_DIR" else PYPI_VERSION="${PYPI_VERSION:-}" if [ -n "$PYPI_VERSION" ]; then @@ -110,7 +112,7 @@ ensure_conda_env_python310() { else SPEC_VERSION="llama-stack" fi - $CONDA_PREFIX/bin/pip install --no-cache-dir $SPEC_VERSION + uv pip install --no-cache-dir $SPEC_VERSION fi if [ -n "$LLAMA_MODELS_DIR" ]; then @@ -120,18 +122,18 @@ ensure_conda_env_python310() { fi printf "Installing from LLAMA_MODELS_DIR: $LLAMA_MODELS_DIR\n" - $CONDA_PREFIX/bin/pip uninstall -y llama-models - $CONDA_PREFIX/bin/pip install --no-cache-dir -e "$LLAMA_MODELS_DIR" + uv pip uninstall -y llama-models + uv pip install --no-cache-dir -e "$LLAMA_MODELS_DIR" fi # Install pip dependencies printf "Installing pip dependencies\n" - $CONDA_PREFIX/bin/pip install $pip_dependencies + uv pip install $pip_dependencies if [ -n "$special_pip_deps" ]; then IFS='#' read -ra parts <<<"$special_pip_deps" for part in "${parts[@]}"; do echo "$part" - $CONDA_PREFIX/bin/pip install $part + uv pip install $part done fi fi diff --git a/llama_stack/distribution/build_container.sh b/llama_stack/distribution/build_container.sh index a2820ac13..8d69627a1 100755 --- a/llama_stack/distribution/build_container.sh +++ b/llama_stack/distribution/build_container.sh @@ -76,6 +76,8 @@ RUN apt-get update && apt-get install -y \ bubblewrap \ && rm -rf /var/lib/apt/lists/* +ENV UV_SYSTEM_PYTHON=1 +RUN pip install uv EOF fi @@ -83,7 +85,7 @@ fi # so we can reuse layers. if [ -n "$pip_dependencies" ]; then add_to_container << EOF -RUN pip install --no-cache $pip_dependencies +RUN uv pip install --no-cache $pip_dependencies EOF fi @@ -91,7 +93,7 @@ if [ -n "$special_pip_deps" ]; then IFS='#' read -ra parts <<<"$special_pip_deps" for part in "${parts[@]}"; do add_to_container < Type: return_type = None else: return_type = extract_non_async_iterator_type(sig.return_annotation) - assert ( - return_type - ), f"Could not extract return type for {sig.return_annotation}" + assert return_type, ( + f"Could not extract return type for {sig.return_annotation}" + ) async with httpx.AsyncClient() as client: params = self.httpx_request_params(method_name, *args, **kwargs) @@ -87,9 +87,9 @@ def create_api_client_class(protocol) -> Type: webmethod, sig = self.routes[method_name] return_type = extract_async_iterator_type(sig.return_annotation) - assert ( - return_type - ), f"Could not extract return type for {sig.return_annotation}" + assert return_type, ( + f"Could not extract return type for {sig.return_annotation}" + ) async with httpx.AsyncClient() as client: params = self.httpx_request_params(method_name, *args, **kwargs) diff --git a/llama_stack/scripts/install_packages.sh b/llama_stack/scripts/install_packages.sh index 151b7b9db..20182c767 100755 --- a/llama_stack/scripts/install_packages.sh +++ b/llama_stack/scripts/install_packages.sh @@ -11,5 +11,5 @@ VERSION="$1" set -euo pipefail set -x -pip install -U --extra-index-url https://test.pypi.org/simple \ +uv pip install -U --extra-index-url https://test.pypi.org/simple \ llama-stack==$VERSION llama-models==$VERSION llama-stack-client==$VERSION diff --git a/llama_stack/templates/meta-reference-gpu/doc_template.md b/llama_stack/templates/meta-reference-gpu/doc_template.md index 421812dbc..60556a6f3 100644 --- a/llama_stack/templates/meta-reference-gpu/doc_template.md +++ b/llama_stack/templates/meta-reference-gpu/doc_template.md @@ -71,7 +71,7 @@ docker run \ ### Via Conda -Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available. +Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available. ```bash llama stack build --template {{ name }} --image-type conda diff --git a/llama_stack/templates/meta-reference-quantized-gpu/doc_template.md b/llama_stack/templates/meta-reference-quantized-gpu/doc_template.md index daa380d20..2b117120c 100644 --- a/llama_stack/templates/meta-reference-quantized-gpu/doc_template.md +++ b/llama_stack/templates/meta-reference-quantized-gpu/doc_template.md @@ -73,7 +73,7 @@ docker run \ ### Via Conda -Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available. +Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available. ```bash llama stack build --template {{ name }} --image-type conda diff --git a/llama_stack/templates/ollama/doc_template.md b/llama_stack/templates/ollama/doc_template.md index 7c260e2c1..eb4aadd29 100644 --- a/llama_stack/templates/ollama/doc_template.md +++ b/llama_stack/templates/ollama/doc_template.md @@ -93,7 +93,7 @@ docker run \ ### Via Conda -Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available. +Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available. ```bash export LLAMA_STACK_PORT=5001 diff --git a/llama_stack/templates/remote-vllm/doc_template.md b/llama_stack/templates/remote-vllm/doc_template.md index c855f6e62..0ca7279a7 100644 --- a/llama_stack/templates/remote-vllm/doc_template.md +++ b/llama_stack/templates/remote-vllm/doc_template.md @@ -119,7 +119,7 @@ docker run \ ### Via Conda -Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available. +Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available. ```bash export INFERENCE_PORT=8000 diff --git a/llama_stack/templates/template.py b/llama_stack/templates/template.py index d87830bca..ef8248cca 100644 --- a/llama_stack/templates/template.py +++ b/llama_stack/templates/template.py @@ -58,9 +58,9 @@ class RunConfigSettings(BaseModel): ) config_class = provider_registry[api][provider_type].config_class - assert ( - config_class is not None - ), f"No config class for provider type: {provider_type} for API: {api_str}" + assert config_class is not None, ( + f"No config class for provider type: {provider_type} for API: {api_str}" + ) config_class = instantiate_class_type(config_class) if hasattr(config_class, "sample_run_config"): diff --git a/llama_stack/templates/tgi/doc_template.md b/llama_stack/templates/tgi/doc_template.md index 18b7d6b86..32988cf57 100644 --- a/llama_stack/templates/tgi/doc_template.md +++ b/llama_stack/templates/tgi/doc_template.md @@ -111,7 +111,7 @@ docker run \ ### Via Conda -Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available. +Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available. ```bash llama stack build --template {{ name }} --image-type conda diff --git a/pyproject.toml b/pyproject.toml index 1437bcf34..d0813d2b0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,3 +59,8 @@ install-wheel-from-presigned = "llama_stack.cli.scripts.run:install_wheel_from_p [tool.setuptools] packages = ["llama_stack"] + +[[tool.uv.index]] +name = "pytorch-cpu" +url = "https://download.pytorch.org/whl/cpu" +explicit = true