tmp workflow

This commit is contained in:
Xi Yan 2025-01-17 22:48:49 -08:00
parent 8c342e1876
commit e19840379b
4 changed files with 24 additions and 14 deletions

View file

@ -85,6 +85,15 @@ jobs:
run: | run: |
docker images docker images
- name: Start up built docker image
run: |
cd distributions/together
docker compose up
- name: Start up built docker image
run: |
llama-stack-client models list
- name: Push to dockerhub - name: Push to dockerhub
run: | run: |
TEMPLATES=("ollama" "bedrock" "remote-vllm" "fireworks" "together" "tgi" "meta-reference-gpu") TEMPLATES=("ollama" "bedrock" "remote-vllm" "fireworks" "together" "tgi" "meta-reference-gpu")

View file

@ -1,13 +1,11 @@
services: services:
llamastack: llamastack:
image: llamastack/distribution-fireworks image: llamastack/fireworks
network_mode: "host"
volumes:
- ~/.llama:/root/.llama
- ./run.yaml:/root/llamastack-run-fireworks.yaml
ports: ports:
- "8321:8321" - "8321:8321"
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-fireworks.yaml" environment:
- FIREWORKS_API_KEY=${FIREWORKS_API_KEY}
entrypoint: bash -c "llama stack run fireworks"
deploy: deploy:
restart_policy: restart_policy:
condition: on-failure condition: on-failure

View file

@ -1,13 +1,11 @@
services: services:
llamastack: llamastack:
image: llamastack/distribution-together image: llamastack/together
network_mode: "host"
volumes:
- ~/.llama:/root/.llama
- ./run.yaml:/root/llamastack-run-together.yaml
ports: ports:
- "8321:8321" - "8321:8321"
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-together.yaml" environment:
- TOGETHER_API_KEY=${TOGETHER_API_KEY}
entrypoint: bash -c "llama stack run together"
deploy: deploy:
restart_policy: restart_policy:
condition: on-failure condition: on-failure

View file

@ -6,6 +6,7 @@
import argparse import argparse
import os import os
import sys
from pathlib import Path from pathlib import Path
from llama_stack.cli.subcommand import Subcommand from llama_stack.cli.subcommand import Subcommand
@ -70,7 +71,7 @@ class StackRun(Subcommand):
BUILDS_BASE_DIR, BUILDS_BASE_DIR,
DISTRIBS_BASE_DIR, DISTRIBS_BASE_DIR,
) )
from llama_stack.distribution.utils.exec import run_with_pty from llama_stack.distribution.utils.exec import run_command, run_with_pty
if not args.config: if not args.config:
self.parser.error("Must specify a config file to run") self.parser.error("Must specify a config file to run")
@ -190,4 +191,8 @@ class StackRun(Subcommand):
return return
run_args.extend(["--env", f"{key}={value}"]) run_args.extend(["--env", f"{key}={value}"])
run_with_pty(run_args) is_terminal = os.isatty(sys.stdout.fileno())
if is_terminal:
run_with_pty(run_args)
else:
run_command(run_args)