From 8db49de9619ccc1b5af56713241833c2d0694b5f Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Mon, 30 Sep 2024 14:56:31 -0400 Subject: [PATCH] docker: Install in editable mode for dev purposes (#160) While rebuilding a stack using the `docker` image type and having `LLAMA_STACK_DIR` set so it installs `llama_stack` from my local source, I noticed that once built, it just used the image build cache and didn't pull in changes to my source. 1. Install in editable mode (`pip install -e`) for dev purposes. 2. Mount the source into the container for `configure` and `run` so that the editable install works. Signed-off-by: Russell Bryant --- llama_stack/distribution/build_container.sh | 6 +++- .../distribution/configure_container.sh | 7 ++++ llama_stack/distribution/start_container.sh | 33 +++++++++---------- 3 files changed, 27 insertions(+), 19 deletions(-) diff --git a/llama_stack/distribution/build_container.sh b/llama_stack/distribution/build_container.sh index 970da804e..705fd9505 100755 --- a/llama_stack/distribution/build_container.sh +++ b/llama_stack/distribution/build_container.sh @@ -65,7 +65,11 @@ if [ -n "$LLAMA_STACK_DIR" ]; then echo "${RED}Warning: LLAMA_STACK_DIR is set but directory does not exist: $LLAMA_STACK_DIR${NC}" >&2 exit 1 fi - add_to_docker "RUN pip install $stack_mount" + + # Install in editable format. We will mount the source code into the container + # so that changes will be reflected in the container without having to do a + # rebuild. This is just for development convenience. + add_to_docker "RUN pip install -e $stack_mount" else add_to_docker "RUN pip install llama-stack" fi diff --git a/llama_stack/distribution/configure_container.sh b/llama_stack/distribution/configure_container.sh index c7ff74793..1f830a10e 100755 --- a/llama_stack/distribution/configure_container.sh +++ b/llama_stack/distribution/configure_container.sh @@ -8,6 +8,7 @@ DOCKER_BINARY=${DOCKER_BINARY:-docker} DOCKER_OPTS=${DOCKER_OPTS:-} +LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-} set -euo pipefail @@ -30,8 +31,14 @@ container_build_dir="/app/builds" # Disable SELinux labels DOCKER_OPTS="$DOCKER_OPTS --security-opt label=disable" +mounts="" +if [ -n "$LLAMA_STACK_DIR" ]; then + mounts="$mounts -v $(readlink -f $LLAMA_STACK_DIR):/app/llama-stack-source" +fi + set -x $DOCKER_BINARY run $DOCKER_OPTS -it \ -v $host_build_dir:$container_build_dir \ + $mounts \ $docker_image \ llama stack configure ./llamastack-build.yaml --output-dir $container_build_dir diff --git a/llama_stack/distribution/start_container.sh b/llama_stack/distribution/start_container.sh index 4618eb9c3..39b019588 100755 --- a/llama_stack/distribution/start_container.sh +++ b/llama_stack/distribution/start_container.sh @@ -9,6 +9,7 @@ DOCKER_BINARY=${DOCKER_BINARY:-docker} DOCKER_OPTS=${DOCKER_OPTS:-} LLAMA_CHECKPOINT_DIR=${LLAMA_CHECKPOINT_DIR:-} +LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-} set -euo pipefail @@ -42,24 +43,20 @@ set -x # Disable SELinux labels DOCKER_OPTS="$DOCKER_OPTS --security-opt label=disable" +mounts="" +if [ -n "$LLAMA_STACK_DIR" ]; then + mounts="$mounts -v $(readlink -f $LLAMA_STACK_DIR):/app/llama-stack-source" +fi if [ -n "$LLAMA_CHECKPOINT_DIR" ]; then - $DOCKER_BINARY run $DOCKER_OPTS -it \ - -p $port:$port \ - -v "$yaml_config:/app/config.yaml" \ - -v "$LLAMA_CHECKPOINT_DIR:/root/.llama" \ - --gpus=all \ - $docker_image \ - python -m llama_stack.distribution.server.server \ - --yaml_config /app/config.yaml \ - --port $port "$@" + mounts="$mounts -v $LLAMA_CHECKPOINT_DIR:/root/.llama" + DOCKER_OPTS="$DOCKER_OPTS --gpus=all" fi -if [ -z "$LLAMA_CHECKPOINT_DIR" ]; then - $DOCKER_BINARY run $DOCKER_OPTS -it \ - -p $port:$port \ - -v "$yaml_config:/app/config.yaml" \ - $docker_image \ - python -m llama_stack.distribution.server.server \ - --yaml_config /app/config.yaml \ - --port $port "$@" -fi +$DOCKER_BINARY run $DOCKER_OPTS -it \ + -p $port:$port \ + -v "$yaml_config:/app/config.yaml" \ + $mounts \ + $docker_image \ + python -m llama_stack.distribution.server.server \ + --yaml_config /app/config.yaml \ + --port $port "$@"