forked from phoenix-oss/llama-stack-mirror
[CICD] Github workflow for publishing Docker images (#764)
# What does this PR do?
- Add Github workflow for publishing docker images.
- Manual Inputs
- We can use a (1) TestPyPi version / (2) build via released PyPi
version
**Notes**
- Keep this workflow manually triggered as we don't want to publish
nightly docker images
**Additional Changes**
- Resolve issue with running llama stack build in non-terminal device
```
File "/home/runner/.local/lib/python3.12/site-packages/llama_stack/distribution/utils/exec.py", line 25, in run_with_pty
old_settings = termios.tcgetattr(sys.stdin)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
termios.error: (25, 'Inappropriate ioctl for device')
```
- Modified build_container.sh to work in non-terminal environment
## Test Plan
- Triggered workflow:
3562217878
<img width="1076" alt="image"
src="https://github.com/user-attachments/assets/f1b5cef6-05ab-49c7-b405-53abc9264734"
/>
- Tested published docker image
<img width="702" alt="image"
src="https://github.com/user-attachments/assets/e7135189-65c8-45d8-86f9-9f3be70e380b"
/>
- /tools API endpoints are served so that docker is correctly using the
TestPyPi package
<img width="296" alt="image"
src="https://github.com/user-attachments/assets/bbcaa7fe-c0a4-4d22-b600-90e3c254bbfd"
/>
- Published tagged images:
https://hub.docker.com/repositories/llamastack
<img width="947" alt="image"
src="https://github.com/user-attachments/assets/2a0a0494-4d45-4643-bc29-72154ecc54a5"
/>
## Sources
Please link relevant resources if necessary.
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Ran pre-commit to handle lint / formatting issues.
- [ ] Read the [contributor
guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md),
Pull Request section?
- [ ] Updated relevant documentation.
- [ ] Wrote necessary unit or integration tests.
This commit is contained in:
parent
b78e6675ea
commit
32d3abe964
5 changed files with 145 additions and 21 deletions
99
.github/workflows/publish-to-docker.yml
vendored
Normal file
99
.github/workflows/publish-to-docker.yml
vendored
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
name: Docker Build and Publish
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
version:
|
||||||
|
description: 'TestPyPI or PyPI version to build (e.g., 0.0.63.dev20250114)'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-and-push:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Log in to the Container registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Set version
|
||||||
|
id: version
|
||||||
|
run: |
|
||||||
|
if [ "${{ github.event_name }}" = "push" ]; then
|
||||||
|
echo "VERSION=0.0.63.dev20250114" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "VERSION=${{ inputs.version }}" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Check package version availability
|
||||||
|
run: |
|
||||||
|
# Function to check if version exists in a repository
|
||||||
|
check_version() {
|
||||||
|
local repo=$1
|
||||||
|
local status_code=$(curl -s -o /dev/null -w "%{http_code}" "https://$repo.org/project/llama-stack/${{ steps.version.outputs.version }}")
|
||||||
|
return $([ "$status_code" -eq 200 ])
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check TestPyPI first, then PyPI
|
||||||
|
if check_version "test.pypi"; then
|
||||||
|
echo "Version ${{ steps.version.outputs.version }} found in TestPyPI"
|
||||||
|
echo "PYPI_SOURCE=testpypi" >> $GITHUB_ENV
|
||||||
|
elif check_version "pypi"; then
|
||||||
|
echo "Version ${{ steps.version.outputs.version }} found in PyPI"
|
||||||
|
echo "PYPI_SOURCE=pypi" >> $GITHUB_ENV
|
||||||
|
else
|
||||||
|
echo "Error: Version ${{ steps.version.outputs.version }} not found in either TestPyPI or PyPI"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Install llama-stack
|
||||||
|
run: |
|
||||||
|
if [ "${{ github.event_name }}" = "push" ]; then
|
||||||
|
pip install -e .
|
||||||
|
else
|
||||||
|
if [ "$PYPI_SOURCE" = "testpypi" ]; then
|
||||||
|
pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple llama-stack==${{ steps.version.outputs.version }}
|
||||||
|
else
|
||||||
|
pip install llama-stack==${{ steps.version.outputs.version }}
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Build docker image
|
||||||
|
run: |
|
||||||
|
TEMPLATES=("ollama" "bedrock" "remote-vllm" "fireworks" "together" "tgi" "meta-reference-gpu")
|
||||||
|
for template in "${TEMPLATES[@]}"; do
|
||||||
|
if [ "$PYPI_SOURCE" = "testpypi" ]; then
|
||||||
|
TEST_PYPI_VERSION=${{ steps.version.outputs.version }} llama stack build --template $template --image-type docker
|
||||||
|
else
|
||||||
|
PYPI_VERSION=${{ steps.version.outputs.version }} llama stack build --template $template --image-type docker
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: List docker images
|
||||||
|
run: |
|
||||||
|
docker images
|
||||||
|
|
||||||
|
- name: Push to dockerhub
|
||||||
|
run: |
|
||||||
|
TEMPLATES=("ollama" "bedrock" "remote-vllm" "fireworks" "together" "tgi" "meta-reference-gpu")
|
||||||
|
for template in "${TEMPLATES[@]}"; do
|
||||||
|
if [ "$PYPI_SOURCE" = "testpypi" ]; then
|
||||||
|
docker tag distribution-$template:test-${{ steps.version.outputs.version }} llamastack/distribution-$template:test-${{ steps.version.outputs.version }}
|
||||||
|
docker push llamastack/distribution-$template:test-${{ steps.version.outputs.version }}
|
||||||
|
else
|
||||||
|
docker tag distribution-$template:${{ steps.version.outputs.version }} llamastack/distribution-$template:${{ steps.version.outputs.version }}
|
||||||
|
docker push llamastack/distribution-$template:${{ steps.version.outputs.version }}
|
||||||
|
fi
|
||||||
|
done
|
|
@ -107,7 +107,8 @@ class StackBuild(Subcommand):
|
||||||
f"Please specify a image-type (docker | conda | venv) for {args.template}"
|
f"Please specify a image-type (docker | conda | venv) for {args.template}"
|
||||||
)
|
)
|
||||||
self._run_stack_build_command_from_build_config(
|
self._run_stack_build_command_from_build_config(
|
||||||
build_config, template_name=args.template
|
build_config,
|
||||||
|
template_name=args.template,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -261,7 +262,9 @@ class StackBuild(Subcommand):
|
||||||
)
|
)
|
||||||
|
|
||||||
def _run_stack_build_command_from_build_config(
|
def _run_stack_build_command_from_build_config(
|
||||||
self, build_config: BuildConfig, template_name: Optional[str] = None
|
self,
|
||||||
|
build_config: BuildConfig,
|
||||||
|
template_name: Optional[str] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
|
|
||||||
import importlib.resources
|
import importlib.resources
|
||||||
import logging
|
import logging
|
||||||
|
import sys
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
@ -20,7 +21,7 @@ from llama_stack.distribution.distribution import get_provider_registry
|
||||||
|
|
||||||
from llama_stack.distribution.utils.config_dirs import BUILDS_BASE_DIR
|
from llama_stack.distribution.utils.config_dirs import BUILDS_BASE_DIR
|
||||||
|
|
||||||
from llama_stack.distribution.utils.exec import run_with_pty
|
from llama_stack.distribution.utils.exec import run_command, run_with_pty
|
||||||
from llama_stack.providers.datatypes import Api
|
from llama_stack.providers.datatypes import Api
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
@ -102,7 +103,10 @@ def print_pip_install_help(providers: Dict[str, List[Provider]]):
|
||||||
print()
|
print()
|
||||||
|
|
||||||
|
|
||||||
def build_image(build_config: BuildConfig, build_file_path: Path):
|
def build_image(
|
||||||
|
build_config: BuildConfig,
|
||||||
|
build_file_path: Path,
|
||||||
|
):
|
||||||
docker_image = build_config.distribution_spec.docker_image or "python:3.10-slim"
|
docker_image = build_config.distribution_spec.docker_image or "python:3.10-slim"
|
||||||
|
|
||||||
normal_deps, special_deps = get_provider_dependencies(
|
normal_deps, special_deps = get_provider_dependencies(
|
||||||
|
@ -144,7 +148,12 @@ def build_image(build_config: BuildConfig, build_file_path: Path):
|
||||||
if special_deps:
|
if special_deps:
|
||||||
args.append("#".join(special_deps))
|
args.append("#".join(special_deps))
|
||||||
|
|
||||||
|
is_terminal = sys.stdin.isatty()
|
||||||
|
if is_terminal:
|
||||||
return_code = run_with_pty(args)
|
return_code = run_with_pty(args)
|
||||||
|
else:
|
||||||
|
return_code = run_command(args)
|
||||||
|
|
||||||
if return_code != 0:
|
if return_code != 0:
|
||||||
log.error(
|
log.error(
|
||||||
f"Failed to build target {build_config.name} with return code {return_code}",
|
f"Failed to build target {build_config.name} with return code {return_code}",
|
||||||
|
|
|
@ -82,13 +82,17 @@ fi
|
||||||
# Add pip dependencies first since llama-stack is what will change most often
|
# Add pip dependencies first since llama-stack is what will change most often
|
||||||
# so we can reuse layers.
|
# so we can reuse layers.
|
||||||
if [ -n "$pip_dependencies" ]; then
|
if [ -n "$pip_dependencies" ]; then
|
||||||
add_to_docker "RUN pip install --no-cache $pip_dependencies"
|
add_to_docker << EOF
|
||||||
|
RUN pip install --no-cache $pip_dependencies
|
||||||
|
EOF
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -n "$special_pip_deps" ]; then
|
if [ -n "$special_pip_deps" ]; then
|
||||||
IFS='#' read -ra parts <<<"$special_pip_deps"
|
IFS='#' read -ra parts <<<"$special_pip_deps"
|
||||||
for part in "${parts[@]}"; do
|
for part in "${parts[@]}"; do
|
||||||
add_to_docker "RUN pip install --no-cache $part"
|
add_to_docker <<EOF
|
||||||
|
RUN pip install --no-cache $part
|
||||||
|
EOF
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -104,14 +108,19 @@ if [ -n "$LLAMA_STACK_DIR" ]; then
|
||||||
# Install in editable format. We will mount the source code into the container
|
# Install in editable format. We will mount the source code into the container
|
||||||
# so that changes will be reflected in the container without having to do a
|
# so that changes will be reflected in the container without having to do a
|
||||||
# rebuild. This is just for development convenience.
|
# rebuild. This is just for development convenience.
|
||||||
add_to_docker "RUN pip install --no-cache -e $stack_mount"
|
add_to_docker << EOF
|
||||||
|
RUN pip install --no-cache -e $stack_mount
|
||||||
|
EOF
|
||||||
else
|
else
|
||||||
if [ -n "$TEST_PYPI_VERSION" ]; then
|
if [ -n "$TEST_PYPI_VERSION" ]; then
|
||||||
# these packages are damaged in test-pypi, so install them first
|
# these packages are damaged in test-pypi, so install them first
|
||||||
add_to_docker "RUN pip install fastapi libcst"
|
add_to_docker << EOF
|
||||||
|
RUN pip install fastapi libcst
|
||||||
|
EOF
|
||||||
add_to_docker << EOF
|
add_to_docker << EOF
|
||||||
RUN pip install --no-cache --extra-index-url https://test.pypi.org/simple/ \
|
RUN pip install --no-cache --extra-index-url https://test.pypi.org/simple/ \
|
||||||
llama-models==$TEST_PYPI_VERSION llama-stack-client==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION
|
llama-models==$TEST_PYPI_VERSION llama-stack-client==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
else
|
else
|
||||||
if [ -n "$PYPI_VERSION" ]; then
|
if [ -n "$PYPI_VERSION" ]; then
|
||||||
|
@ -119,7 +128,9 @@ EOF
|
||||||
else
|
else
|
||||||
SPEC_VERSION="llama-stack"
|
SPEC_VERSION="llama-stack"
|
||||||
fi
|
fi
|
||||||
add_to_docker "RUN pip install --no-cache $SPEC_VERSION"
|
add_to_docker << EOF
|
||||||
|
RUN pip install --no-cache $SPEC_VERSION
|
||||||
|
EOF
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
|
@ -98,9 +98,11 @@ def run_with_pty(command):
|
||||||
|
|
||||||
|
|
||||||
def run_command(command):
|
def run_command(command):
|
||||||
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
try:
|
||||||
output, error = process.communicate()
|
result = subprocess.run(command, capture_output=True, text=True, check=True)
|
||||||
if process.returncode != 0:
|
print("Script Output\n", result.stdout)
|
||||||
log.error(f"Error: {error.decode('utf-8')}")
|
return result.returncode
|
||||||
sys.exit(1)
|
except subprocess.CalledProcessError as e:
|
||||||
return output.decode("utf-8")
|
print("Error running script:", e)
|
||||||
|
print("Error output:", e.stderr)
|
||||||
|
return e.returncode
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue