Use uv pip install instead of pip install (#921)

## What does this PR do? 

See issue: #747 -- `uv` is just plain better. This PR does the bare
minimum of replacing `pip install` by `uv pip install` and ensuring `uv`
exists in the environment.

## Test Plan 

First: create new conda, `uv pip install -e .` on `llama-stack` -- all
is good.
Next: run `llama stack build --template together` followed by `llama
stack run together` -- all good
Next: run `llama stack build --template together --image-name yoyo`
followed by `llama stack run together --image-name yoyo` -- all good
Next: fresh conda and `uv pip install -e .` and `llama stack build
--template together --image-type venv` -- all good.

Docker: `llama stack build --template together --image-type container`
works!
This commit is contained in:
Ashwin Bharambe 2025-01-31 22:29:41 -08:00 committed by GitHub
parent c6d9ff2054
commit 5b1e69e58e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 85 additions and 64 deletions

View file

@ -11,10 +11,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
uses: actions/setup-python@v5
with:
python-version: '3.11'
cache: pip
@ -22,4 +22,4 @@ jobs:
**/requirements*.txt
.pre-commit-config.yaml
- uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd #v3.0.1
- uses: pre-commit/action@v3.0.1

View file

@ -5,10 +5,8 @@ default_language_version:
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: 6306a48f7dae5861702d573c9c247e4e9498e867
rev: v5.0.0 # Latest stable version
hooks:
- id: trailing-whitespace
- id: check-ast
- id: check-merge-conflict
- id: check-added-large-files
args: ['--maxkb=1000']
@ -28,23 +26,32 @@ repos:
- --license-filepath
- docs/license_header.txt
- repo: https://github.com/pycqa/flake8
rev: 34cbf8ef3950f43d09b85e2e45c15ae5717dc37b
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.9.4
hooks:
- id: flake8
additional_dependencies:
- flake8-bugbear == 22.4.25
- pep8-naming == 0.12.1
- torchfix
args: ['--config=.flake8']
- id: ruff
args: [
--fix,
--exit-non-zero-on-fix
]
- id: ruff-format
- repo: https://github.com/omnilib/ufmt
rev: v2.7.0
- repo: https://github.com/adamchainz/blacken-docs
rev: 1.19.0
hooks:
- id: ufmt
- id: blacken-docs
additional_dependencies:
- black == 24.4.2
- usort == 1.0.8
- black==24.3.0
# - repo: https://github.com/pre-commit/mirrors-mypy
# rev: v1.14.0
# hooks:
# - id: mypy
# additional_dependencies:
# - types-requests
# - types-setuptools
# - pydantic
# args: [--ignore-missing-imports]
# - repo: https://github.com/jsh9/pydoclint
# rev: d88180a8632bb1602a4d81344085cf320f288c5a
@ -71,3 +78,7 @@ repos:
# require_serial: true
# files: ^llama_stack/templates/.*$
# stages: [manual]
ci:
autofix_commit_msg: 🎨 [pre-commit.ci] Auto format from pre-commit.com hooks
autoupdate_commit_msg: ⬆ [pre-commit.ci] pre-commit autoupdate

View file

@ -183,9 +183,9 @@ def run_stack_build_command(
build_config.distribution_spec.providers
)
normal_deps += SERVER_DEPENDENCIES
print(f"pip install {' '.join(normal_deps)}")
print(f"uv pip install {' '.join(normal_deps)}")
for special_dep in special_deps:
print(f"pip install {special_dep}")
print(f"uv pip install {special_dep}")
return
_run_stack_build_command_from_build_config(

View file

@ -95,11 +95,11 @@ def print_pip_install_help(providers: Dict[str, List[Provider]]):
normal_deps, special_deps = get_provider_dependencies(providers)
cprint(
f"Please install needed dependencies using the following commands:\n\npip install {' '.join(normal_deps)}",
f"Please install needed dependencies using the following commands:\n\nuv pip install {' '.join(normal_deps)}",
"yellow",
)
for special_dep in special_deps:
cprint(f"pip install {special_dep}", "yellow")
cprint(f"uv pip install {special_dep}", "yellow")
print()

View file

@ -78,10 +78,12 @@ ensure_conda_env_python310() {
eval "$(conda shell.bash hook)"
conda deactivate && conda activate "${env_name}"
$CONDA_PREFIX/bin/pip install uv
if [ -n "$TEST_PYPI_VERSION" ]; then
# these packages are damaged in test-pypi, so install them first
$CONDA_PREFIX/bin/pip install fastapi libcst
$CONDA_PREFIX/bin/pip install --extra-index-url https://test.pypi.org/simple/ \
uv pip install fastapi libcst
uv pip install --extra-index-url https://test.pypi.org/simple/ \
llama-models==$TEST_PYPI_VERSION \
llama-stack-client==$TEST_PYPI_VERSION \
llama-stack==$TEST_PYPI_VERSION \
@ -90,7 +92,7 @@ ensure_conda_env_python310() {
IFS='#' read -ra parts <<<"$special_pip_deps"
for part in "${parts[@]}"; do
echo "$part"
$CONDA_PREFIX/bin/pip install $part
uv pip install $part
done
fi
else
@ -102,7 +104,7 @@ ensure_conda_env_python310() {
fi
printf "Installing from LLAMA_STACK_DIR: $LLAMA_STACK_DIR\n"
$CONDA_PREFIX/bin/pip install --no-cache-dir -e "$LLAMA_STACK_DIR"
uv pip install --no-cache-dir -e "$LLAMA_STACK_DIR"
else
PYPI_VERSION="${PYPI_VERSION:-}"
if [ -n "$PYPI_VERSION" ]; then
@ -110,7 +112,7 @@ ensure_conda_env_python310() {
else
SPEC_VERSION="llama-stack"
fi
$CONDA_PREFIX/bin/pip install --no-cache-dir $SPEC_VERSION
uv pip install --no-cache-dir $SPEC_VERSION
fi
if [ -n "$LLAMA_MODELS_DIR" ]; then
@ -120,18 +122,18 @@ ensure_conda_env_python310() {
fi
printf "Installing from LLAMA_MODELS_DIR: $LLAMA_MODELS_DIR\n"
$CONDA_PREFIX/bin/pip uninstall -y llama-models
$CONDA_PREFIX/bin/pip install --no-cache-dir -e "$LLAMA_MODELS_DIR"
uv pip uninstall -y llama-models
uv pip install --no-cache-dir -e "$LLAMA_MODELS_DIR"
fi
# Install pip dependencies
printf "Installing pip dependencies\n"
$CONDA_PREFIX/bin/pip install $pip_dependencies
uv pip install $pip_dependencies
if [ -n "$special_pip_deps" ]; then
IFS='#' read -ra parts <<<"$special_pip_deps"
for part in "${parts[@]}"; do
echo "$part"
$CONDA_PREFIX/bin/pip install $part
uv pip install $part
done
fi
fi

View file

@ -76,6 +76,8 @@ RUN apt-get update && apt-get install -y \
bubblewrap \
&& rm -rf /var/lib/apt/lists/*
ENV UV_SYSTEM_PYTHON=1
RUN pip install uv
EOF
fi
@ -83,7 +85,7 @@ fi
# so we can reuse layers.
if [ -n "$pip_dependencies" ]; then
add_to_container << EOF
RUN pip install --no-cache $pip_dependencies
RUN uv pip install --no-cache $pip_dependencies
EOF
fi
@ -91,7 +93,7 @@ if [ -n "$special_pip_deps" ]; then
IFS='#' read -ra parts <<<"$special_pip_deps"
for part in "${parts[@]}"; do
add_to_container <<EOF
RUN pip install --no-cache $part
RUN uv pip install --no-cache $part
EOF
done
fi
@ -109,16 +111,16 @@ if [ -n "$LLAMA_STACK_DIR" ]; then
# so that changes will be reflected in the container without having to do a
# rebuild. This is just for development convenience.
add_to_container << EOF
RUN pip install --no-cache -e $stack_mount
RUN uv pip install --no-cache -e $stack_mount
EOF
else
if [ -n "$TEST_PYPI_VERSION" ]; then
# these packages are damaged in test-pypi, so install them first
add_to_container << EOF
RUN pip install fastapi libcst
RUN uv pip install fastapi libcst
EOF
add_to_container << EOF
RUN pip install --no-cache --extra-index-url https://test.pypi.org/simple/ \
RUN uv pip install --no-cache --extra-index-url https://test.pypi.org/simple/ \
llama-models==$TEST_PYPI_VERSION llama-stack-client==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION
EOF
@ -129,7 +131,7 @@ EOF
SPEC_VERSION="llama-stack"
fi
add_to_container << EOF
RUN pip install --no-cache $SPEC_VERSION
RUN uv pip install --no-cache $SPEC_VERSION
EOF
fi
fi
@ -141,8 +143,8 @@ if [ -n "$LLAMA_MODELS_DIR" ]; then
fi
add_to_container << EOF
RUN pip uninstall -y llama-models
RUN pip install --no-cache $models_mount
RUN uv pip uninstall -y llama-models
RUN uv pip install --no-cache $models_mount
EOF
fi

View file

@ -51,17 +51,18 @@ run() {
local pip_dependencies="$2"
local special_pip_deps="$3"
pip install uv
if [ -n "$TEST_PYPI_VERSION" ]; then
# these packages are damaged in test-pypi, so install them first
pip install fastapi libcst
pip install --extra-index-url https://test.pypi.org/simple/ \
uv pip install fastapi libcst
uv pip install --extra-index-url https://test.pypi.org/simple/ \
llama-models==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION \
$pip_dependencies
if [ -n "$special_pip_deps" ]; then
IFS='#' read -ra parts <<<"$special_pip_deps"
for part in "${parts[@]}"; do
echo "$part"
pip install $part
uv pip install $part
done
fi
else
@ -73,9 +74,9 @@ run() {
fi
printf "Installing from LLAMA_STACK_DIR: $LLAMA_STACK_DIR\n"
pip install --no-cache-dir -e "$LLAMA_STACK_DIR"
uv pip install --no-cache-dir -e "$LLAMA_STACK_DIR"
else
pip install --no-cache-dir llama-stack
uv pip install --no-cache-dir llama-stack
fi
if [ -n "$LLAMA_MODELS_DIR" ]; then
@ -85,18 +86,18 @@ run() {
fi
printf "Installing from LLAMA_MODELS_DIR: $LLAMA_MODELS_DIR\n"
pip uninstall -y llama-models
pip install --no-cache-dir -e "$LLAMA_MODELS_DIR"
uv pip uninstall -y llama-models
uv pip install --no-cache-dir -e "$LLAMA_MODELS_DIR"
fi
# Install pip dependencies
printf "Installing pip dependencies\n"
pip install $pip_dependencies
uv pip install $pip_dependencies
if [ -n "$special_pip_deps" ]; then
IFS='#' read -ra parts <<<"$special_pip_deps"
for part in "${parts[@]}"; do
echo "$part"
pip install $part
uv pip install $part
done
fi
fi

View file

@ -68,9 +68,9 @@ def create_api_client_class(protocol) -> Type:
return_type = None
else:
return_type = extract_non_async_iterator_type(sig.return_annotation)
assert (
return_type
), f"Could not extract return type for {sig.return_annotation}"
assert return_type, (
f"Could not extract return type for {sig.return_annotation}"
)
async with httpx.AsyncClient() as client:
params = self.httpx_request_params(method_name, *args, **kwargs)
@ -87,9 +87,9 @@ def create_api_client_class(protocol) -> Type:
webmethod, sig = self.routes[method_name]
return_type = extract_async_iterator_type(sig.return_annotation)
assert (
return_type
), f"Could not extract return type for {sig.return_annotation}"
assert return_type, (
f"Could not extract return type for {sig.return_annotation}"
)
async with httpx.AsyncClient() as client:
params = self.httpx_request_params(method_name, *args, **kwargs)

View file

@ -11,5 +11,5 @@ VERSION="$1"
set -euo pipefail
set -x
pip install -U --extra-index-url https://test.pypi.org/simple \
uv pip install -U --extra-index-url https://test.pypi.org/simple \
llama-stack==$VERSION llama-models==$VERSION llama-stack-client==$VERSION

View file

@ -71,7 +71,7 @@ docker run \
### Via Conda
Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available.
Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available.
```bash
llama stack build --template {{ name }} --image-type conda

View file

@ -73,7 +73,7 @@ docker run \
### Via Conda
Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available.
Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available.
```bash
llama stack build --template {{ name }} --image-type conda

View file

@ -93,7 +93,7 @@ docker run \
### Via Conda
Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available.
Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available.
```bash
export LLAMA_STACK_PORT=5001

View file

@ -119,7 +119,7 @@ docker run \
### Via Conda
Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available.
Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available.
```bash
export INFERENCE_PORT=8000

View file

@ -58,9 +58,9 @@ class RunConfigSettings(BaseModel):
)
config_class = provider_registry[api][provider_type].config_class
assert (
config_class is not None
), f"No config class for provider type: {provider_type} for API: {api_str}"
assert config_class is not None, (
f"No config class for provider type: {provider_type} for API: {api_str}"
)
config_class = instantiate_class_type(config_class)
if hasattr(config_class, "sample_run_config"):

View file

@ -111,7 +111,7 @@ docker run \
### Via Conda
Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available.
Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available.
```bash
llama stack build --template {{ name }} --image-type conda

View file

@ -59,3 +59,8 @@ install-wheel-from-presigned = "llama_stack.cli.scripts.run:install_wheel_from_p
[tool.setuptools]
packages = ["llama_stack"]
[[tool.uv.index]]
name = "pytorch-cpu"
url = "https://download.pytorch.org/whl/cpu"
explicit = true