mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-27 18:50:41 +00:00
Use uv pip install
instead of pip install
(#921)
## What does this PR do? See issue: #747 -- `uv` is just plain better. This PR does the bare minimum of replacing `pip install` by `uv pip install` and ensuring `uv` exists in the environment. ## Test Plan First: create new conda, `uv pip install -e .` on `llama-stack` -- all is good. Next: run `llama stack build --template together` followed by `llama stack run together` -- all good Next: run `llama stack build --template together --image-name yoyo` followed by `llama stack run together --image-name yoyo` -- all good Next: fresh conda and `uv pip install -e .` and `llama stack build --template together --image-type venv` -- all good. Docker: `llama stack build --template together --image-type container` works!
This commit is contained in:
parent
c6d9ff2054
commit
5b1e69e58e
16 changed files with 85 additions and 64 deletions
6
.github/workflows/pre-commit.yml
vendored
6
.github/workflows/pre-commit.yml
vendored
|
@ -11,10 +11,10 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: '3.11'
|
python-version: '3.11'
|
||||||
cache: pip
|
cache: pip
|
||||||
|
@ -22,4 +22,4 @@ jobs:
|
||||||
**/requirements*.txt
|
**/requirements*.txt
|
||||||
.pre-commit-config.yaml
|
.pre-commit-config.yaml
|
||||||
|
|
||||||
- uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd #v3.0.1
|
- uses: pre-commit/action@v3.0.1
|
||||||
|
|
|
@ -5,10 +5,8 @@ default_language_version:
|
||||||
|
|
||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
rev: 6306a48f7dae5861702d573c9c247e4e9498e867
|
rev: v5.0.0 # Latest stable version
|
||||||
hooks:
|
hooks:
|
||||||
- id: trailing-whitespace
|
|
||||||
- id: check-ast
|
|
||||||
- id: check-merge-conflict
|
- id: check-merge-conflict
|
||||||
- id: check-added-large-files
|
- id: check-added-large-files
|
||||||
args: ['--maxkb=1000']
|
args: ['--maxkb=1000']
|
||||||
|
@ -28,23 +26,32 @@ repos:
|
||||||
- --license-filepath
|
- --license-filepath
|
||||||
- docs/license_header.txt
|
- docs/license_header.txt
|
||||||
|
|
||||||
- repo: https://github.com/pycqa/flake8
|
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||||
rev: 34cbf8ef3950f43d09b85e2e45c15ae5717dc37b
|
rev: v0.9.4
|
||||||
hooks:
|
hooks:
|
||||||
- id: flake8
|
- id: ruff
|
||||||
additional_dependencies:
|
args: [
|
||||||
- flake8-bugbear == 22.4.25
|
--fix,
|
||||||
- pep8-naming == 0.12.1
|
--exit-non-zero-on-fix
|
||||||
- torchfix
|
]
|
||||||
args: ['--config=.flake8']
|
- id: ruff-format
|
||||||
|
|
||||||
- repo: https://github.com/omnilib/ufmt
|
- repo: https://github.com/adamchainz/blacken-docs
|
||||||
rev: v2.7.0
|
rev: 1.19.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: ufmt
|
- id: blacken-docs
|
||||||
additional_dependencies:
|
additional_dependencies:
|
||||||
- black == 24.4.2
|
- black==24.3.0
|
||||||
- usort == 1.0.8
|
|
||||||
|
# - repo: https://github.com/pre-commit/mirrors-mypy
|
||||||
|
# rev: v1.14.0
|
||||||
|
# hooks:
|
||||||
|
# - id: mypy
|
||||||
|
# additional_dependencies:
|
||||||
|
# - types-requests
|
||||||
|
# - types-setuptools
|
||||||
|
# - pydantic
|
||||||
|
# args: [--ignore-missing-imports]
|
||||||
|
|
||||||
# - repo: https://github.com/jsh9/pydoclint
|
# - repo: https://github.com/jsh9/pydoclint
|
||||||
# rev: d88180a8632bb1602a4d81344085cf320f288c5a
|
# rev: d88180a8632bb1602a4d81344085cf320f288c5a
|
||||||
|
@ -71,3 +78,7 @@ repos:
|
||||||
# require_serial: true
|
# require_serial: true
|
||||||
# files: ^llama_stack/templates/.*$
|
# files: ^llama_stack/templates/.*$
|
||||||
# stages: [manual]
|
# stages: [manual]
|
||||||
|
|
||||||
|
ci:
|
||||||
|
autofix_commit_msg: 🎨 [pre-commit.ci] Auto format from pre-commit.com hooks
|
||||||
|
autoupdate_commit_msg: ⬆ [pre-commit.ci] pre-commit autoupdate
|
||||||
|
|
|
@ -183,9 +183,9 @@ def run_stack_build_command(
|
||||||
build_config.distribution_spec.providers
|
build_config.distribution_spec.providers
|
||||||
)
|
)
|
||||||
normal_deps += SERVER_DEPENDENCIES
|
normal_deps += SERVER_DEPENDENCIES
|
||||||
print(f"pip install {' '.join(normal_deps)}")
|
print(f"uv pip install {' '.join(normal_deps)}")
|
||||||
for special_dep in special_deps:
|
for special_dep in special_deps:
|
||||||
print(f"pip install {special_dep}")
|
print(f"uv pip install {special_dep}")
|
||||||
return
|
return
|
||||||
|
|
||||||
_run_stack_build_command_from_build_config(
|
_run_stack_build_command_from_build_config(
|
||||||
|
|
|
@ -95,11 +95,11 @@ def print_pip_install_help(providers: Dict[str, List[Provider]]):
|
||||||
normal_deps, special_deps = get_provider_dependencies(providers)
|
normal_deps, special_deps = get_provider_dependencies(providers)
|
||||||
|
|
||||||
cprint(
|
cprint(
|
||||||
f"Please install needed dependencies using the following commands:\n\npip install {' '.join(normal_deps)}",
|
f"Please install needed dependencies using the following commands:\n\nuv pip install {' '.join(normal_deps)}",
|
||||||
"yellow",
|
"yellow",
|
||||||
)
|
)
|
||||||
for special_dep in special_deps:
|
for special_dep in special_deps:
|
||||||
cprint(f"pip install {special_dep}", "yellow")
|
cprint(f"uv pip install {special_dep}", "yellow")
|
||||||
print()
|
print()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -78,10 +78,12 @@ ensure_conda_env_python310() {
|
||||||
eval "$(conda shell.bash hook)"
|
eval "$(conda shell.bash hook)"
|
||||||
conda deactivate && conda activate "${env_name}"
|
conda deactivate && conda activate "${env_name}"
|
||||||
|
|
||||||
|
$CONDA_PREFIX/bin/pip install uv
|
||||||
|
|
||||||
if [ -n "$TEST_PYPI_VERSION" ]; then
|
if [ -n "$TEST_PYPI_VERSION" ]; then
|
||||||
# these packages are damaged in test-pypi, so install them first
|
# these packages are damaged in test-pypi, so install them first
|
||||||
$CONDA_PREFIX/bin/pip install fastapi libcst
|
uv pip install fastapi libcst
|
||||||
$CONDA_PREFIX/bin/pip install --extra-index-url https://test.pypi.org/simple/ \
|
uv pip install --extra-index-url https://test.pypi.org/simple/ \
|
||||||
llama-models==$TEST_PYPI_VERSION \
|
llama-models==$TEST_PYPI_VERSION \
|
||||||
llama-stack-client==$TEST_PYPI_VERSION \
|
llama-stack-client==$TEST_PYPI_VERSION \
|
||||||
llama-stack==$TEST_PYPI_VERSION \
|
llama-stack==$TEST_PYPI_VERSION \
|
||||||
|
@ -90,7 +92,7 @@ ensure_conda_env_python310() {
|
||||||
IFS='#' read -ra parts <<<"$special_pip_deps"
|
IFS='#' read -ra parts <<<"$special_pip_deps"
|
||||||
for part in "${parts[@]}"; do
|
for part in "${parts[@]}"; do
|
||||||
echo "$part"
|
echo "$part"
|
||||||
$CONDA_PREFIX/bin/pip install $part
|
uv pip install $part
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
|
@ -102,7 +104,7 @@ ensure_conda_env_python310() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
printf "Installing from LLAMA_STACK_DIR: $LLAMA_STACK_DIR\n"
|
printf "Installing from LLAMA_STACK_DIR: $LLAMA_STACK_DIR\n"
|
||||||
$CONDA_PREFIX/bin/pip install --no-cache-dir -e "$LLAMA_STACK_DIR"
|
uv pip install --no-cache-dir -e "$LLAMA_STACK_DIR"
|
||||||
else
|
else
|
||||||
PYPI_VERSION="${PYPI_VERSION:-}"
|
PYPI_VERSION="${PYPI_VERSION:-}"
|
||||||
if [ -n "$PYPI_VERSION" ]; then
|
if [ -n "$PYPI_VERSION" ]; then
|
||||||
|
@ -110,7 +112,7 @@ ensure_conda_env_python310() {
|
||||||
else
|
else
|
||||||
SPEC_VERSION="llama-stack"
|
SPEC_VERSION="llama-stack"
|
||||||
fi
|
fi
|
||||||
$CONDA_PREFIX/bin/pip install --no-cache-dir $SPEC_VERSION
|
uv pip install --no-cache-dir $SPEC_VERSION
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -n "$LLAMA_MODELS_DIR" ]; then
|
if [ -n "$LLAMA_MODELS_DIR" ]; then
|
||||||
|
@ -120,18 +122,18 @@ ensure_conda_env_python310() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
printf "Installing from LLAMA_MODELS_DIR: $LLAMA_MODELS_DIR\n"
|
printf "Installing from LLAMA_MODELS_DIR: $LLAMA_MODELS_DIR\n"
|
||||||
$CONDA_PREFIX/bin/pip uninstall -y llama-models
|
uv pip uninstall -y llama-models
|
||||||
$CONDA_PREFIX/bin/pip install --no-cache-dir -e "$LLAMA_MODELS_DIR"
|
uv pip install --no-cache-dir -e "$LLAMA_MODELS_DIR"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Install pip dependencies
|
# Install pip dependencies
|
||||||
printf "Installing pip dependencies\n"
|
printf "Installing pip dependencies\n"
|
||||||
$CONDA_PREFIX/bin/pip install $pip_dependencies
|
uv pip install $pip_dependencies
|
||||||
if [ -n "$special_pip_deps" ]; then
|
if [ -n "$special_pip_deps" ]; then
|
||||||
IFS='#' read -ra parts <<<"$special_pip_deps"
|
IFS='#' read -ra parts <<<"$special_pip_deps"
|
||||||
for part in "${parts[@]}"; do
|
for part in "${parts[@]}"; do
|
||||||
echo "$part"
|
echo "$part"
|
||||||
$CONDA_PREFIX/bin/pip install $part
|
uv pip install $part
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -76,6 +76,8 @@ RUN apt-get update && apt-get install -y \
|
||||||
bubblewrap \
|
bubblewrap \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
ENV UV_SYSTEM_PYTHON=1
|
||||||
|
RUN pip install uv
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -83,7 +85,7 @@ fi
|
||||||
# so we can reuse layers.
|
# so we can reuse layers.
|
||||||
if [ -n "$pip_dependencies" ]; then
|
if [ -n "$pip_dependencies" ]; then
|
||||||
add_to_container << EOF
|
add_to_container << EOF
|
||||||
RUN pip install --no-cache $pip_dependencies
|
RUN uv pip install --no-cache $pip_dependencies
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -91,7 +93,7 @@ if [ -n "$special_pip_deps" ]; then
|
||||||
IFS='#' read -ra parts <<<"$special_pip_deps"
|
IFS='#' read -ra parts <<<"$special_pip_deps"
|
||||||
for part in "${parts[@]}"; do
|
for part in "${parts[@]}"; do
|
||||||
add_to_container <<EOF
|
add_to_container <<EOF
|
||||||
RUN pip install --no-cache $part
|
RUN uv pip install --no-cache $part
|
||||||
EOF
|
EOF
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
@ -109,16 +111,16 @@ if [ -n "$LLAMA_STACK_DIR" ]; then
|
||||||
# so that changes will be reflected in the container without having to do a
|
# so that changes will be reflected in the container without having to do a
|
||||||
# rebuild. This is just for development convenience.
|
# rebuild. This is just for development convenience.
|
||||||
add_to_container << EOF
|
add_to_container << EOF
|
||||||
RUN pip install --no-cache -e $stack_mount
|
RUN uv pip install --no-cache -e $stack_mount
|
||||||
EOF
|
EOF
|
||||||
else
|
else
|
||||||
if [ -n "$TEST_PYPI_VERSION" ]; then
|
if [ -n "$TEST_PYPI_VERSION" ]; then
|
||||||
# these packages are damaged in test-pypi, so install them first
|
# these packages are damaged in test-pypi, so install them first
|
||||||
add_to_container << EOF
|
add_to_container << EOF
|
||||||
RUN pip install fastapi libcst
|
RUN uv pip install fastapi libcst
|
||||||
EOF
|
EOF
|
||||||
add_to_container << EOF
|
add_to_container << EOF
|
||||||
RUN pip install --no-cache --extra-index-url https://test.pypi.org/simple/ \
|
RUN uv pip install --no-cache --extra-index-url https://test.pypi.org/simple/ \
|
||||||
llama-models==$TEST_PYPI_VERSION llama-stack-client==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION
|
llama-models==$TEST_PYPI_VERSION llama-stack-client==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
@ -129,7 +131,7 @@ EOF
|
||||||
SPEC_VERSION="llama-stack"
|
SPEC_VERSION="llama-stack"
|
||||||
fi
|
fi
|
||||||
add_to_container << EOF
|
add_to_container << EOF
|
||||||
RUN pip install --no-cache $SPEC_VERSION
|
RUN uv pip install --no-cache $SPEC_VERSION
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
@ -141,8 +143,8 @@ if [ -n "$LLAMA_MODELS_DIR" ]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
add_to_container << EOF
|
add_to_container << EOF
|
||||||
RUN pip uninstall -y llama-models
|
RUN uv pip uninstall -y llama-models
|
||||||
RUN pip install --no-cache $models_mount
|
RUN uv pip install --no-cache $models_mount
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -51,17 +51,18 @@ run() {
|
||||||
local pip_dependencies="$2"
|
local pip_dependencies="$2"
|
||||||
local special_pip_deps="$3"
|
local special_pip_deps="$3"
|
||||||
|
|
||||||
|
pip install uv
|
||||||
if [ -n "$TEST_PYPI_VERSION" ]; then
|
if [ -n "$TEST_PYPI_VERSION" ]; then
|
||||||
# these packages are damaged in test-pypi, so install them first
|
# these packages are damaged in test-pypi, so install them first
|
||||||
pip install fastapi libcst
|
uv pip install fastapi libcst
|
||||||
pip install --extra-index-url https://test.pypi.org/simple/ \
|
uv pip install --extra-index-url https://test.pypi.org/simple/ \
|
||||||
llama-models==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION \
|
llama-models==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION \
|
||||||
$pip_dependencies
|
$pip_dependencies
|
||||||
if [ -n "$special_pip_deps" ]; then
|
if [ -n "$special_pip_deps" ]; then
|
||||||
IFS='#' read -ra parts <<<"$special_pip_deps"
|
IFS='#' read -ra parts <<<"$special_pip_deps"
|
||||||
for part in "${parts[@]}"; do
|
for part in "${parts[@]}"; do
|
||||||
echo "$part"
|
echo "$part"
|
||||||
pip install $part
|
uv pip install $part
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
|
@ -73,9 +74,9 @@ run() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
printf "Installing from LLAMA_STACK_DIR: $LLAMA_STACK_DIR\n"
|
printf "Installing from LLAMA_STACK_DIR: $LLAMA_STACK_DIR\n"
|
||||||
pip install --no-cache-dir -e "$LLAMA_STACK_DIR"
|
uv pip install --no-cache-dir -e "$LLAMA_STACK_DIR"
|
||||||
else
|
else
|
||||||
pip install --no-cache-dir llama-stack
|
uv pip install --no-cache-dir llama-stack
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -n "$LLAMA_MODELS_DIR" ]; then
|
if [ -n "$LLAMA_MODELS_DIR" ]; then
|
||||||
|
@ -85,18 +86,18 @@ run() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
printf "Installing from LLAMA_MODELS_DIR: $LLAMA_MODELS_DIR\n"
|
printf "Installing from LLAMA_MODELS_DIR: $LLAMA_MODELS_DIR\n"
|
||||||
pip uninstall -y llama-models
|
uv pip uninstall -y llama-models
|
||||||
pip install --no-cache-dir -e "$LLAMA_MODELS_DIR"
|
uv pip install --no-cache-dir -e "$LLAMA_MODELS_DIR"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Install pip dependencies
|
# Install pip dependencies
|
||||||
printf "Installing pip dependencies\n"
|
printf "Installing pip dependencies\n"
|
||||||
pip install $pip_dependencies
|
uv pip install $pip_dependencies
|
||||||
if [ -n "$special_pip_deps" ]; then
|
if [ -n "$special_pip_deps" ]; then
|
||||||
IFS='#' read -ra parts <<<"$special_pip_deps"
|
IFS='#' read -ra parts <<<"$special_pip_deps"
|
||||||
for part in "${parts[@]}"; do
|
for part in "${parts[@]}"; do
|
||||||
echo "$part"
|
echo "$part"
|
||||||
pip install $part
|
uv pip install $part
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -68,9 +68,9 @@ def create_api_client_class(protocol) -> Type:
|
||||||
return_type = None
|
return_type = None
|
||||||
else:
|
else:
|
||||||
return_type = extract_non_async_iterator_type(sig.return_annotation)
|
return_type = extract_non_async_iterator_type(sig.return_annotation)
|
||||||
assert (
|
assert return_type, (
|
||||||
return_type
|
f"Could not extract return type for {sig.return_annotation}"
|
||||||
), f"Could not extract return type for {sig.return_annotation}"
|
)
|
||||||
|
|
||||||
async with httpx.AsyncClient() as client:
|
async with httpx.AsyncClient() as client:
|
||||||
params = self.httpx_request_params(method_name, *args, **kwargs)
|
params = self.httpx_request_params(method_name, *args, **kwargs)
|
||||||
|
@ -87,9 +87,9 @@ def create_api_client_class(protocol) -> Type:
|
||||||
webmethod, sig = self.routes[method_name]
|
webmethod, sig = self.routes[method_name]
|
||||||
|
|
||||||
return_type = extract_async_iterator_type(sig.return_annotation)
|
return_type = extract_async_iterator_type(sig.return_annotation)
|
||||||
assert (
|
assert return_type, (
|
||||||
return_type
|
f"Could not extract return type for {sig.return_annotation}"
|
||||||
), f"Could not extract return type for {sig.return_annotation}"
|
)
|
||||||
|
|
||||||
async with httpx.AsyncClient() as client:
|
async with httpx.AsyncClient() as client:
|
||||||
params = self.httpx_request_params(method_name, *args, **kwargs)
|
params = self.httpx_request_params(method_name, *args, **kwargs)
|
||||||
|
|
|
@ -11,5 +11,5 @@ VERSION="$1"
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
pip install -U --extra-index-url https://test.pypi.org/simple \
|
uv pip install -U --extra-index-url https://test.pypi.org/simple \
|
||||||
llama-stack==$VERSION llama-models==$VERSION llama-stack-client==$VERSION
|
llama-stack==$VERSION llama-models==$VERSION llama-stack-client==$VERSION
|
||||||
|
|
|
@ -71,7 +71,7 @@ docker run \
|
||||||
|
|
||||||
### Via Conda
|
### Via Conda
|
||||||
|
|
||||||
Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available.
|
Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
llama stack build --template {{ name }} --image-type conda
|
llama stack build --template {{ name }} --image-type conda
|
||||||
|
|
|
@ -73,7 +73,7 @@ docker run \
|
||||||
|
|
||||||
### Via Conda
|
### Via Conda
|
||||||
|
|
||||||
Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available.
|
Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
llama stack build --template {{ name }} --image-type conda
|
llama stack build --template {{ name }} --image-type conda
|
||||||
|
|
|
@ -93,7 +93,7 @@ docker run \
|
||||||
|
|
||||||
### Via Conda
|
### Via Conda
|
||||||
|
|
||||||
Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available.
|
Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export LLAMA_STACK_PORT=5001
|
export LLAMA_STACK_PORT=5001
|
||||||
|
|
|
@ -119,7 +119,7 @@ docker run \
|
||||||
|
|
||||||
### Via Conda
|
### Via Conda
|
||||||
|
|
||||||
Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available.
|
Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export INFERENCE_PORT=8000
|
export INFERENCE_PORT=8000
|
||||||
|
|
|
@ -58,9 +58,9 @@ class RunConfigSettings(BaseModel):
|
||||||
)
|
)
|
||||||
|
|
||||||
config_class = provider_registry[api][provider_type].config_class
|
config_class = provider_registry[api][provider_type].config_class
|
||||||
assert (
|
assert config_class is not None, (
|
||||||
config_class is not None
|
f"No config class for provider type: {provider_type} for API: {api_str}"
|
||||||
), f"No config class for provider type: {provider_type} for API: {api_str}"
|
)
|
||||||
|
|
||||||
config_class = instantiate_class_type(config_class)
|
config_class = instantiate_class_type(config_class)
|
||||||
if hasattr(config_class, "sample_run_config"):
|
if hasattr(config_class, "sample_run_config"):
|
||||||
|
|
|
@ -111,7 +111,7 @@ docker run \
|
||||||
|
|
||||||
### Via Conda
|
### Via Conda
|
||||||
|
|
||||||
Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available.
|
Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
llama stack build --template {{ name }} --image-type conda
|
llama stack build --template {{ name }} --image-type conda
|
||||||
|
|
|
@ -59,3 +59,8 @@ install-wheel-from-presigned = "llama_stack.cli.scripts.run:install_wheel_from_p
|
||||||
|
|
||||||
[tool.setuptools]
|
[tool.setuptools]
|
||||||
packages = ["llama_stack"]
|
packages = ["llama_stack"]
|
||||||
|
|
||||||
|
[[tool.uv.index]]
|
||||||
|
name = "pytorch-cpu"
|
||||||
|
url = "https://download.pytorch.org/whl/cpu"
|
||||||
|
explicit = true
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue