forked from phoenix-oss/llama-stack-mirror
docker: Install in editable mode for dev purposes (#160)
While rebuilding a stack using the `docker` image type and having `LLAMA_STACK_DIR` set so it installs `llama_stack` from my local source, I noticed that once built, it just used the image build cache and didn't pull in changes to my source. 1. Install in editable mode (`pip install -e`) for dev purposes. 2. Mount the source into the container for `configure` and `run` so that the editable install works. Signed-off-by: Russell Bryant <rbryant@redhat.com>
This commit is contained in:
parent
cb36be320f
commit
8db49de961
3 changed files with 27 additions and 19 deletions
|
@ -65,7 +65,11 @@ if [ -n "$LLAMA_STACK_DIR" ]; then
|
||||||
echo "${RED}Warning: LLAMA_STACK_DIR is set but directory does not exist: $LLAMA_STACK_DIR${NC}" >&2
|
echo "${RED}Warning: LLAMA_STACK_DIR is set but directory does not exist: $LLAMA_STACK_DIR${NC}" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
add_to_docker "RUN pip install $stack_mount"
|
|
||||||
|
# Install in editable format. We will mount the source code into the container
|
||||||
|
# so that changes will be reflected in the container without having to do a
|
||||||
|
# rebuild. This is just for development convenience.
|
||||||
|
add_to_docker "RUN pip install -e $stack_mount"
|
||||||
else
|
else
|
||||||
add_to_docker "RUN pip install llama-stack"
|
add_to_docker "RUN pip install llama-stack"
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
|
|
||||||
DOCKER_BINARY=${DOCKER_BINARY:-docker}
|
DOCKER_BINARY=${DOCKER_BINARY:-docker}
|
||||||
DOCKER_OPTS=${DOCKER_OPTS:-}
|
DOCKER_OPTS=${DOCKER_OPTS:-}
|
||||||
|
LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
|
||||||
|
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
|
@ -30,8 +31,14 @@ container_build_dir="/app/builds"
|
||||||
# Disable SELinux labels
|
# Disable SELinux labels
|
||||||
DOCKER_OPTS="$DOCKER_OPTS --security-opt label=disable"
|
DOCKER_OPTS="$DOCKER_OPTS --security-opt label=disable"
|
||||||
|
|
||||||
|
mounts=""
|
||||||
|
if [ -n "$LLAMA_STACK_DIR" ]; then
|
||||||
|
mounts="$mounts -v $(readlink -f $LLAMA_STACK_DIR):/app/llama-stack-source"
|
||||||
|
fi
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
$DOCKER_BINARY run $DOCKER_OPTS -it \
|
$DOCKER_BINARY run $DOCKER_OPTS -it \
|
||||||
-v $host_build_dir:$container_build_dir \
|
-v $host_build_dir:$container_build_dir \
|
||||||
|
$mounts \
|
||||||
$docker_image \
|
$docker_image \
|
||||||
llama stack configure ./llamastack-build.yaml --output-dir $container_build_dir
|
llama stack configure ./llamastack-build.yaml --output-dir $container_build_dir
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
DOCKER_BINARY=${DOCKER_BINARY:-docker}
|
DOCKER_BINARY=${DOCKER_BINARY:-docker}
|
||||||
DOCKER_OPTS=${DOCKER_OPTS:-}
|
DOCKER_OPTS=${DOCKER_OPTS:-}
|
||||||
LLAMA_CHECKPOINT_DIR=${LLAMA_CHECKPOINT_DIR:-}
|
LLAMA_CHECKPOINT_DIR=${LLAMA_CHECKPOINT_DIR:-}
|
||||||
|
LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
|
||||||
|
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
|
@ -42,24 +43,20 @@ set -x
|
||||||
# Disable SELinux labels
|
# Disable SELinux labels
|
||||||
DOCKER_OPTS="$DOCKER_OPTS --security-opt label=disable"
|
DOCKER_OPTS="$DOCKER_OPTS --security-opt label=disable"
|
||||||
|
|
||||||
|
mounts=""
|
||||||
|
if [ -n "$LLAMA_STACK_DIR" ]; then
|
||||||
|
mounts="$mounts -v $(readlink -f $LLAMA_STACK_DIR):/app/llama-stack-source"
|
||||||
|
fi
|
||||||
if [ -n "$LLAMA_CHECKPOINT_DIR" ]; then
|
if [ -n "$LLAMA_CHECKPOINT_DIR" ]; then
|
||||||
$DOCKER_BINARY run $DOCKER_OPTS -it \
|
mounts="$mounts -v $LLAMA_CHECKPOINT_DIR:/root/.llama"
|
||||||
-p $port:$port \
|
DOCKER_OPTS="$DOCKER_OPTS --gpus=all"
|
||||||
-v "$yaml_config:/app/config.yaml" \
|
|
||||||
-v "$LLAMA_CHECKPOINT_DIR:/root/.llama" \
|
|
||||||
--gpus=all \
|
|
||||||
$docker_image \
|
|
||||||
python -m llama_stack.distribution.server.server \
|
|
||||||
--yaml_config /app/config.yaml \
|
|
||||||
--port $port "$@"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$LLAMA_CHECKPOINT_DIR" ]; then
|
$DOCKER_BINARY run $DOCKER_OPTS -it \
|
||||||
$DOCKER_BINARY run $DOCKER_OPTS -it \
|
-p $port:$port \
|
||||||
-p $port:$port \
|
-v "$yaml_config:/app/config.yaml" \
|
||||||
-v "$yaml_config:/app/config.yaml" \
|
$mounts \
|
||||||
$docker_image \
|
$docker_image \
|
||||||
python -m llama_stack.distribution.server.server \
|
python -m llama_stack.distribution.server.server \
|
||||||
--yaml_config /app/config.yaml \
|
--yaml_config /app/config.yaml \
|
||||||
--port $port "$@"
|
--port $port "$@"
|
||||||
fi
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue