[CLI] simplify docker run (#159)

* bake run.yaml inside docker, simplify run

* add docker template examples

* delete generated Dockerfile

* unique deps

* clean up debug

* default entrypoint

* address comments, update output msg

* update msg

* build output msg

* configure msg

* unique special_deps

* remove quotes in configure
This commit is contained in:
Xi Yan 2024-09-30 15:04:04 -07:00 committed by GitHub
parent 8db49de961
commit d28c3dfe0f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 172 additions and 13 deletions

View file

@ -112,12 +112,6 @@ class StackConfigure(Subcommand):
)
return
build_name = docker_image.removeprefix("llamastack-")
saved_file = str(builds_dir / f"{build_name}-run.yaml")
cprint(
f"YAML configuration has been written to {saved_file}. You can now run `llama stack run {saved_file}`",
color="green",
)
return
def _configure_llama_distribution(

View file

@ -73,6 +73,8 @@ def build_image(build_config: BuildConfig, build_file_path: Path):
special_deps.append(package)
else:
deps.append(package)
deps = list(set(deps))
special_deps = list(set(special_deps))
if build_config.image_type == ImageType.docker.value:
script = pkg_resources.resource_filename(

View file

@ -29,9 +29,12 @@ SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
REPO_DIR=$(dirname $(dirname "$SCRIPT_DIR"))
DOCKER_BINARY=${DOCKER_BINARY:-docker}
DOCKER_OPTS=${DOCKER_OPTS:-}
REPO_CONFIGS_DIR="$REPO_DIR/tmp/configs"
TEMP_DIR=$(mktemp -d)
llama stack configure $build_file_path --output-dir $REPO_CONFIGS_DIR
add_to_docker() {
local input
output_file="$TEMP_DIR/Dockerfile"
@ -103,11 +106,12 @@ add_to_docker <<EOF
# This would be good in production but for debugging flexibility lets not add it right now
# We need a more solid production ready entrypoint.sh anyway
#
# ENTRYPOINT ["python", "-m", "llama_stack.distribution.server.server"]
ENTRYPOINT ["python", "-m", "llama_stack.distribution.server.server"]
EOF
add_to_docker "ADD tmp/configs/$(basename "$build_file_path") ./llamastack-build.yaml"
add_to_docker "ADD tmp/configs/$build_name-run.yaml ./llamastack-run.yaml"
printf "Dockerfile created successfully in $TEMP_DIR/Dockerfile"
cat $TEMP_DIR/Dockerfile
@ -128,7 +132,4 @@ set -x
$DOCKER_BINARY build $DOCKER_OPTS -t $image_name -f "$TEMP_DIR/Dockerfile" "$REPO_DIR" $mounts
set +x
echo "You can run it with: podman run -p 8000:8000 $image_name"
echo "Checking image builds..."
$DOCKER_BINARY run $DOCKER_OPTS -it $image_name cat llamastack-build.yaml
echo "Success! You can run it with: $DOCKER_BINARY $DOCKER_OPTS run -p 5000:5000 $image_name"

View file

@ -38,7 +38,8 @@ fi
set -x
$DOCKER_BINARY run $DOCKER_OPTS -it \
--entrypoint "/usr/local/bin/llama" \
-v $host_build_dir:$container_build_dir \
$mounts \
$docker_image \
llama stack configure ./llamastack-build.yaml --output-dir $container_build_dir
stack configure ./llamastack-build.yaml --output-dir $container_build_dir

View file

@ -408,7 +408,11 @@ async def resolve_impls_with_routing(run_config: StackRunConfig) -> Dict[Api, An
return impls, specs
def main(yaml_config: str, port: int = 5000, disable_ipv6: bool = False):
def main(
yaml_config: str = "llamastack-run.yaml",
port: int = 5000,
disable_ipv6: bool = False,
):
with open(yaml_config, "r") as fp:
config = StackRunConfig(**yaml.safe_load(fp))

View file

@ -0,0 +1,15 @@
name: local-cpu
distribution_spec:
description: remote inference + local safety/agents/memory
docker_image: null
providers:
inference:
- remote::ollama
- remote::tgi
- remote::together
- remote::fireworks
safety: meta-reference
agents: meta-reference
memory: meta-reference
telemetry: meta-reference
image_type: docker

View file

@ -0,0 +1,64 @@
built_at: '2024-09-30T09:04:30.533391'
image_name: local-cpu
docker_image: local-cpu
conda_env: null
apis_to_serve:
- agents
- inference
- models
- memory
- safety
- shields
- memory_banks
api_providers:
inference:
providers:
- remote::ollama
safety:
providers:
- meta-reference
agents:
provider_id: meta-reference
config:
persistence_store:
namespace: null
type: sqlite
db_path: /home/xiyan/.llama/runtime/kvstore.db
memory:
providers:
- meta-reference
telemetry:
provider_id: meta-reference
config: {}
routing_table:
inference:
- provider_id: remote::ollama
config:
host: localhost
port: 6000
routing_key: Meta-Llama3.1-8B-Instruct
safety:
- provider_id: meta-reference
config:
llama_guard_shield: null
prompt_guard_shield: null
routing_key: llama_guard
- provider_id: meta-reference
config:
llama_guard_shield: null
prompt_guard_shield: null
routing_key: code_scanner_guard
- provider_id: meta-reference
config:
llama_guard_shield: null
prompt_guard_shield: null
routing_key: injection_shield
- provider_id: meta-reference
config:
llama_guard_shield: null
prompt_guard_shield: null
routing_key: jailbreak_shield
memory:
- provider_id: meta-reference
config: {}
routing_key: vector

View file

@ -0,0 +1,11 @@
name: local-gpu
distribution_spec:
description: local meta reference
docker_image: null
providers:
inference: meta-reference
safety: meta-reference
agents: meta-reference
memory: meta-reference
telemetry: meta-reference
image_type: docker

View file

@ -0,0 +1,67 @@
built_at: '2024-09-30T09:00:56.693751'
image_name: local-gpu
docker_image: local-gpu
conda_env: null
apis_to_serve:
- memory
- inference
- agents
- shields
- safety
- models
- memory_banks
api_providers:
inference:
providers:
- meta-reference
safety:
providers:
- meta-reference
agents:
provider_id: meta-reference
config:
persistence_store:
namespace: null
type: sqlite
db_path: /home/xiyan/.llama/runtime/kvstore.db
memory:
providers:
- meta-reference
telemetry:
provider_id: meta-reference
config: {}
routing_table:
inference:
- provider_id: meta-reference
config:
model: Llama3.1-8B-Instruct
quantization: null
torch_seed: null
max_seq_len: 4096
max_batch_size: 1
routing_key: Llama3.1-8B-Instruct
safety:
- provider_id: meta-reference
config:
llama_guard_shield: null
prompt_guard_shield: null
routing_key: llama_guard
- provider_id: meta-reference
config:
llama_guard_shield: null
prompt_guard_shield: null
routing_key: code_scanner_guard
- provider_id: meta-reference
config:
llama_guard_shield: null
prompt_guard_shield: null
routing_key: injection_shield
- provider_id: meta-reference
config:
llama_guard_shield: null
prompt_guard_shield: null
routing_key: jailbreak_shield
memory:
- provider_id: meta-reference
config: {}
routing_key: vector