mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
Fix uv pip install timeout issue for PyTorch (#929)
This fixes the following timeout issue when installing PyTorch via uv. Also see reference: https://github.com/astral-sh/uv/pull/1694, https://github.com/astral-sh/uv/issues/1549 ``` Installing pip dependencies Using Python 3.10.16 environment at: /home/yutang/.conda/envs/distribution-myenv × Failed to download and build `antlr4-python3-runtime==4.9.3` ├─▶ Failed to extract archive ├─▶ failed to unpack │ `/home/yutang/.cache/uv/sdists-v7/.tmpDWX4iK/antlr4-python3-runtime-4.9.3/src/antlr4/ListTokenSource.py` ├─▶ failed to unpack │ `antlr4-python3-runtime-4.9.3/src/antlr4/ListTokenSource.py` into │ `/home/yutang/.cache/uv/sdists-v7/.tmpDWX4iK/antlr4-python3-runtime-4.9.3/src/antlr4/ListTokenSource.py` ├─▶ error decoding response body ├─▶ request or response body error ╰─▶ operation timed out help: `antlr4-python3-runtime` (v4.9.3) was included because `torchtune` (v0.5.0) depends on `omegaconf` (v2.3.0) which depends on `antlr4-python3-runtime>=4.9.dev0, <4.10.dev0` Failed to build target distribution-myenv with return code 1 ``` --------- Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>
This commit is contained in:
parent
e370a77752
commit
7558678b8c
3 changed files with 9 additions and 0 deletions
|
@ -9,6 +9,9 @@
|
||||||
LLAMA_MODELS_DIR=${LLAMA_MODELS_DIR:-}
|
LLAMA_MODELS_DIR=${LLAMA_MODELS_DIR:-}
|
||||||
LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
|
LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
|
||||||
TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-}
|
TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-}
|
||||||
|
# This timeout (in seconds) is necessary when installing PyTorch via uv since it's likely to time out
|
||||||
|
# Reference: https://github.com/astral-sh/uv/pull/1694
|
||||||
|
UV_HTTP_TIMEOUT=${UV_HTTP_TIMEOUT:-500}
|
||||||
|
|
||||||
if [ -n "$LLAMA_STACK_DIR" ]; then
|
if [ -n "$LLAMA_STACK_DIR" ]; then
|
||||||
echo "Using llama-stack-dir=$LLAMA_STACK_DIR"
|
echo "Using llama-stack-dir=$LLAMA_STACK_DIR"
|
||||||
|
|
|
@ -11,6 +11,9 @@ LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
|
||||||
TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-}
|
TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-}
|
||||||
PYPI_VERSION=${PYPI_VERSION:-}
|
PYPI_VERSION=${PYPI_VERSION:-}
|
||||||
BUILD_PLATFORM=${BUILD_PLATFORM:-}
|
BUILD_PLATFORM=${BUILD_PLATFORM:-}
|
||||||
|
# This timeout (in seconds) is necessary when installing PyTorch via uv since it's likely to time out
|
||||||
|
# Reference: https://github.com/astral-sh/uv/pull/1694
|
||||||
|
UV_HTTP_TIMEOUT=${UV_HTTP_TIMEOUT:-500}
|
||||||
|
|
||||||
# mounting is not supported by docker buildx, so we use COPY instead
|
# mounting is not supported by docker buildx, so we use COPY instead
|
||||||
USE_COPY_NOT_MOUNT=${USE_COPY_NOT_MOUNT:-}
|
USE_COPY_NOT_MOUNT=${USE_COPY_NOT_MOUNT:-}
|
||||||
|
|
|
@ -12,6 +12,9 @@
|
||||||
LLAMA_MODELS_DIR=${LLAMA_MODELS_DIR:-}
|
LLAMA_MODELS_DIR=${LLAMA_MODELS_DIR:-}
|
||||||
LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
|
LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
|
||||||
TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-}
|
TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-}
|
||||||
|
# This timeout (in seconds) is necessary when installing PyTorch via uv since it's likely to time out
|
||||||
|
# Reference: https://github.com/astral-sh/uv/pull/1694
|
||||||
|
UV_HTTP_TIMEOUT=${UV_HTTP_TIMEOUT:-500}
|
||||||
|
|
||||||
if [ -n "$LLAMA_STACK_DIR" ]; then
|
if [ -n "$LLAMA_STACK_DIR" ]; then
|
||||||
echo "Using llama-stack-dir=$LLAMA_STACK_DIR"
|
echo "Using llama-stack-dir=$LLAMA_STACK_DIR"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue