Use uv pip install instead of pip install (#921)

## What does this PR do? 

See issue: #747 -- `uv` is just plain better. This PR does the bare
minimum of replacing `pip install` by `uv pip install` and ensuring `uv`
exists in the environment.

## Test Plan 

First: create new conda, `uv pip install -e .` on `llama-stack` -- all
is good.
Next: run `llama stack build --template together` followed by `llama
stack run together` -- all good
Next: run `llama stack build --template together --image-name yoyo`
followed by `llama stack run together --image-name yoyo` -- all good
Next: fresh conda and `uv pip install -e .` and `llama stack build
--template together --image-type venv` -- all good.

Docker: `llama stack build --template together --image-type container`
works!
This commit is contained in:
Ashwin Bharambe 2025-01-31 22:29:41 -08:00 committed by GitHub
parent c6d9ff2054
commit 5b1e69e58e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 85 additions and 64 deletions

View file

@ -95,11 +95,11 @@ def print_pip_install_help(providers: Dict[str, List[Provider]]):
normal_deps, special_deps = get_provider_dependencies(providers)
cprint(
f"Please install needed dependencies using the following commands:\n\npip install {' '.join(normal_deps)}",
f"Please install needed dependencies using the following commands:\n\nuv pip install {' '.join(normal_deps)}",
"yellow",
)
for special_dep in special_deps:
cprint(f"pip install {special_dep}", "yellow")
cprint(f"uv pip install {special_dep}", "yellow")
print()

View file

@ -78,10 +78,12 @@ ensure_conda_env_python310() {
eval "$(conda shell.bash hook)"
conda deactivate && conda activate "${env_name}"
$CONDA_PREFIX/bin/pip install uv
if [ -n "$TEST_PYPI_VERSION" ]; then
# these packages are damaged in test-pypi, so install them first
$CONDA_PREFIX/bin/pip install fastapi libcst
$CONDA_PREFIX/bin/pip install --extra-index-url https://test.pypi.org/simple/ \
uv pip install fastapi libcst
uv pip install --extra-index-url https://test.pypi.org/simple/ \
llama-models==$TEST_PYPI_VERSION \
llama-stack-client==$TEST_PYPI_VERSION \
llama-stack==$TEST_PYPI_VERSION \
@ -90,7 +92,7 @@ ensure_conda_env_python310() {
IFS='#' read -ra parts <<<"$special_pip_deps"
for part in "${parts[@]}"; do
echo "$part"
$CONDA_PREFIX/bin/pip install $part
uv pip install $part
done
fi
else
@ -102,7 +104,7 @@ ensure_conda_env_python310() {
fi
printf "Installing from LLAMA_STACK_DIR: $LLAMA_STACK_DIR\n"
$CONDA_PREFIX/bin/pip install --no-cache-dir -e "$LLAMA_STACK_DIR"
uv pip install --no-cache-dir -e "$LLAMA_STACK_DIR"
else
PYPI_VERSION="${PYPI_VERSION:-}"
if [ -n "$PYPI_VERSION" ]; then
@ -110,7 +112,7 @@ ensure_conda_env_python310() {
else
SPEC_VERSION="llama-stack"
fi
$CONDA_PREFIX/bin/pip install --no-cache-dir $SPEC_VERSION
uv pip install --no-cache-dir $SPEC_VERSION
fi
if [ -n "$LLAMA_MODELS_DIR" ]; then
@ -120,18 +122,18 @@ ensure_conda_env_python310() {
fi
printf "Installing from LLAMA_MODELS_DIR: $LLAMA_MODELS_DIR\n"
$CONDA_PREFIX/bin/pip uninstall -y llama-models
$CONDA_PREFIX/bin/pip install --no-cache-dir -e "$LLAMA_MODELS_DIR"
uv pip uninstall -y llama-models
uv pip install --no-cache-dir -e "$LLAMA_MODELS_DIR"
fi
# Install pip dependencies
printf "Installing pip dependencies\n"
$CONDA_PREFIX/bin/pip install $pip_dependencies
uv pip install $pip_dependencies
if [ -n "$special_pip_deps" ]; then
IFS='#' read -ra parts <<<"$special_pip_deps"
for part in "${parts[@]}"; do
echo "$part"
$CONDA_PREFIX/bin/pip install $part
uv pip install $part
done
fi
fi

View file

@ -76,6 +76,8 @@ RUN apt-get update && apt-get install -y \
bubblewrap \
&& rm -rf /var/lib/apt/lists/*
ENV UV_SYSTEM_PYTHON=1
RUN pip install uv
EOF
fi
@ -83,7 +85,7 @@ fi
# so we can reuse layers.
if [ -n "$pip_dependencies" ]; then
add_to_container << EOF
RUN pip install --no-cache $pip_dependencies
RUN uv pip install --no-cache $pip_dependencies
EOF
fi
@ -91,7 +93,7 @@ if [ -n "$special_pip_deps" ]; then
IFS='#' read -ra parts <<<"$special_pip_deps"
for part in "${parts[@]}"; do
add_to_container <<EOF
RUN pip install --no-cache $part
RUN uv pip install --no-cache $part
EOF
done
fi
@ -109,16 +111,16 @@ if [ -n "$LLAMA_STACK_DIR" ]; then
# so that changes will be reflected in the container without having to do a
# rebuild. This is just for development convenience.
add_to_container << EOF
RUN pip install --no-cache -e $stack_mount
RUN uv pip install --no-cache -e $stack_mount
EOF
else
if [ -n "$TEST_PYPI_VERSION" ]; then
# these packages are damaged in test-pypi, so install them first
add_to_container << EOF
RUN pip install fastapi libcst
RUN uv pip install fastapi libcst
EOF
add_to_container << EOF
RUN pip install --no-cache --extra-index-url https://test.pypi.org/simple/ \
RUN uv pip install --no-cache --extra-index-url https://test.pypi.org/simple/ \
llama-models==$TEST_PYPI_VERSION llama-stack-client==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION
EOF
@ -129,7 +131,7 @@ EOF
SPEC_VERSION="llama-stack"
fi
add_to_container << EOF
RUN pip install --no-cache $SPEC_VERSION
RUN uv pip install --no-cache $SPEC_VERSION
EOF
fi
fi
@ -141,8 +143,8 @@ if [ -n "$LLAMA_MODELS_DIR" ]; then
fi
add_to_container << EOF
RUN pip uninstall -y llama-models
RUN pip install --no-cache $models_mount
RUN uv pip uninstall -y llama-models
RUN uv pip install --no-cache $models_mount
EOF
fi

View file

@ -51,17 +51,18 @@ run() {
local pip_dependencies="$2"
local special_pip_deps="$3"
pip install uv
if [ -n "$TEST_PYPI_VERSION" ]; then
# these packages are damaged in test-pypi, so install them first
pip install fastapi libcst
pip install --extra-index-url https://test.pypi.org/simple/ \
uv pip install fastapi libcst
uv pip install --extra-index-url https://test.pypi.org/simple/ \
llama-models==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION \
$pip_dependencies
if [ -n "$special_pip_deps" ]; then
IFS='#' read -ra parts <<<"$special_pip_deps"
for part in "${parts[@]}"; do
echo "$part"
pip install $part
uv pip install $part
done
fi
else
@ -73,9 +74,9 @@ run() {
fi
printf "Installing from LLAMA_STACK_DIR: $LLAMA_STACK_DIR\n"
pip install --no-cache-dir -e "$LLAMA_STACK_DIR"
uv pip install --no-cache-dir -e "$LLAMA_STACK_DIR"
else
pip install --no-cache-dir llama-stack
uv pip install --no-cache-dir llama-stack
fi
if [ -n "$LLAMA_MODELS_DIR" ]; then
@ -85,18 +86,18 @@ run() {
fi
printf "Installing from LLAMA_MODELS_DIR: $LLAMA_MODELS_DIR\n"
pip uninstall -y llama-models
pip install --no-cache-dir -e "$LLAMA_MODELS_DIR"
uv pip uninstall -y llama-models
uv pip install --no-cache-dir -e "$LLAMA_MODELS_DIR"
fi
# Install pip dependencies
printf "Installing pip dependencies\n"
pip install $pip_dependencies
uv pip install $pip_dependencies
if [ -n "$special_pip_deps" ]; then
IFS='#' read -ra parts <<<"$special_pip_deps"
for part in "${parts[@]}"; do
echo "$part"
pip install $part
uv pip install $part
done
fi
fi

View file

@ -68,9 +68,9 @@ def create_api_client_class(protocol) -> Type:
return_type = None
else:
return_type = extract_non_async_iterator_type(sig.return_annotation)
assert (
return_type
), f"Could not extract return type for {sig.return_annotation}"
assert return_type, (
f"Could not extract return type for {sig.return_annotation}"
)
async with httpx.AsyncClient() as client:
params = self.httpx_request_params(method_name, *args, **kwargs)
@ -87,9 +87,9 @@ def create_api_client_class(protocol) -> Type:
webmethod, sig = self.routes[method_name]
return_type = extract_async_iterator_type(sig.return_annotation)
assert (
return_type
), f"Could not extract return type for {sig.return_annotation}"
assert return_type, (
f"Could not extract return type for {sig.return_annotation}"
)
async with httpx.AsyncClient() as client:
params = self.httpx_request_params(method_name, *args, **kwargs)