From e19840379bbfbbfb88cc93dcb793438ca3cd6470 Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Fri, 17 Jan 2025 22:48:49 -0800 Subject: [PATCH] tmp workflow --- .github/workflows/publish-to-docker.yml | 9 +++++++++ distributions/fireworks/compose.yaml | 10 ++++------ distributions/together/compose.yaml | 10 ++++------ llama_stack/cli/stack/run.py | 9 +++++++-- 4 files changed, 24 insertions(+), 14 deletions(-) diff --git a/.github/workflows/publish-to-docker.yml b/.github/workflows/publish-to-docker.yml index f63f52cbd..5fa57a8f6 100644 --- a/.github/workflows/publish-to-docker.yml +++ b/.github/workflows/publish-to-docker.yml @@ -85,6 +85,15 @@ jobs: run: | docker images + - name: Start up built docker image + run: | + cd distributions/together + docker compose up + + - name: Start up built docker image + run: | + llama-stack-client models list + - name: Push to dockerhub run: | TEMPLATES=("ollama" "bedrock" "remote-vllm" "fireworks" "together" "tgi" "meta-reference-gpu") diff --git a/distributions/fireworks/compose.yaml b/distributions/fireworks/compose.yaml index 4b53fcf00..5cf598ee2 100644 --- a/distributions/fireworks/compose.yaml +++ b/distributions/fireworks/compose.yaml @@ -1,13 +1,11 @@ services: llamastack: - image: llamastack/distribution-fireworks - network_mode: "host" - volumes: - - ~/.llama:/root/.llama - - ./run.yaml:/root/llamastack-run-fireworks.yaml + image: llamastack/fireworks ports: - "8321:8321" - entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-fireworks.yaml" + environment: + - FIREWORKS_API_KEY=${FIREWORKS_API_KEY} + entrypoint: bash -c "llama stack run fireworks" deploy: restart_policy: condition: on-failure diff --git a/distributions/together/compose.yaml b/distributions/together/compose.yaml index c7251d0a7..bc41e61f9 100644 --- a/distributions/together/compose.yaml +++ b/distributions/together/compose.yaml @@ -1,13 +1,11 @@ services: llamastack: - image: llamastack/distribution-together - network_mode: "host" - volumes: - - ~/.llama:/root/.llama - - ./run.yaml:/root/llamastack-run-together.yaml + image: llamastack/together ports: - "8321:8321" - entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-together.yaml" + environment: + - TOGETHER_API_KEY=${TOGETHER_API_KEY} + entrypoint: bash -c "llama stack run together" deploy: restart_policy: condition: on-failure diff --git a/llama_stack/cli/stack/run.py b/llama_stack/cli/stack/run.py index e1e02d10c..966989175 100644 --- a/llama_stack/cli/stack/run.py +++ b/llama_stack/cli/stack/run.py @@ -6,6 +6,7 @@ import argparse import os +import sys from pathlib import Path from llama_stack.cli.subcommand import Subcommand @@ -70,7 +71,7 @@ class StackRun(Subcommand): BUILDS_BASE_DIR, DISTRIBS_BASE_DIR, ) - from llama_stack.distribution.utils.exec import run_with_pty + from llama_stack.distribution.utils.exec import run_command, run_with_pty if not args.config: self.parser.error("Must specify a config file to run") @@ -190,4 +191,8 @@ class StackRun(Subcommand): return run_args.extend(["--env", f"{key}={value}"]) - run_with_pty(run_args) + is_terminal = os.isatty(sys.stdout.fileno()) + if is_terminal: + run_with_pty(run_args) + else: + run_command(run_args)