forked from phoenix-oss/llama-stack-mirror
More generic image type for OCI-compliant container technologies (#802)
It's a more generic term and applicable to alternatives of Docker, such as Podman or other OCI-compliant technologies. --------- Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>
This commit is contained in:
parent
9d005154d7
commit
6da3053c0e
30 changed files with 102 additions and 100 deletions
|
@ -1,6 +1,6 @@
|
||||||
version: '2'
|
version: '2'
|
||||||
image_name: local
|
image_name: local
|
||||||
docker_image: null
|
container_image: null
|
||||||
conda_env: local
|
conda_env: local
|
||||||
apis:
|
apis:
|
||||||
- shields
|
- shields
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
version: '2'
|
version: '2'
|
||||||
image_name: local
|
image_name: local
|
||||||
docker_image: null
|
container_image: null
|
||||||
conda_env: local
|
conda_env: local
|
||||||
apis:
|
apis:
|
||||||
- shields
|
- shields
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
version: '2'
|
version: '2'
|
||||||
image_name: local
|
image_name: local
|
||||||
docker_image: null
|
container_image: null
|
||||||
conda_env: local
|
conda_env: local
|
||||||
apis:
|
apis:
|
||||||
- shields
|
- shields
|
||||||
|
|
|
@ -481,7 +481,7 @@
|
||||||
"- telemetry\n",
|
"- telemetry\n",
|
||||||
"conda_env: together\n",
|
"conda_env: together\n",
|
||||||
"datasets: <span style=\"font-weight: bold\">[]</span>\n",
|
"datasets: <span style=\"font-weight: bold\">[]</span>\n",
|
||||||
"docker_image: null\n",
|
"container_image: null\n",
|
||||||
"eval_tasks: <span style=\"font-weight: bold\">[]</span>\n",
|
"eval_tasks: <span style=\"font-weight: bold\">[]</span>\n",
|
||||||
"image_name: together\n",
|
"image_name: together\n",
|
||||||
"memory_banks: <span style=\"font-weight: bold\">[]</span>\n",
|
"memory_banks: <span style=\"font-weight: bold\">[]</span>\n",
|
||||||
|
@ -600,7 +600,7 @@
|
||||||
"- telemetry\n",
|
"- telemetry\n",
|
||||||
"conda_env: together\n",
|
"conda_env: together\n",
|
||||||
"datasets: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
"datasets: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
||||||
"docker_image: null\n",
|
"container_image: null\n",
|
||||||
"eval_tasks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
"eval_tasks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
||||||
"image_name: together\n",
|
"image_name: together\n",
|
||||||
"memory_banks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
"memory_banks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
||||||
|
|
|
@ -369,7 +369,7 @@
|
||||||
"- telemetry\n",
|
"- telemetry\n",
|
||||||
"- tool_runtime\n",
|
"- tool_runtime\n",
|
||||||
"datasets: <span style=\"font-weight: bold\">[]</span>\n",
|
"datasets: <span style=\"font-weight: bold\">[]</span>\n",
|
||||||
"docker_image: null\n",
|
"container_image: null\n",
|
||||||
"eval_tasks: <span style=\"font-weight: bold\">[]</span>\n",
|
"eval_tasks: <span style=\"font-weight: bold\">[]</span>\n",
|
||||||
"image_name: together\n",
|
"image_name: together\n",
|
||||||
"memory_banks: <span style=\"font-weight: bold\">[]</span>\n",
|
"memory_banks: <span style=\"font-weight: bold\">[]</span>\n",
|
||||||
|
@ -550,7 +550,7 @@
|
||||||
"- telemetry\n",
|
"- telemetry\n",
|
||||||
"- tool_runtime\n",
|
"- tool_runtime\n",
|
||||||
"datasets: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
"datasets: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
||||||
"docker_image: null\n",
|
"container_image: null\n",
|
||||||
"eval_tasks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
"eval_tasks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
||||||
"image_name: together\n",
|
"image_name: together\n",
|
||||||
"memory_banks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
"memory_banks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
||||||
|
|
|
@ -760,7 +760,7 @@
|
||||||
"- tool_runtime\n",
|
"- tool_runtime\n",
|
||||||
"conda_env: together\n",
|
"conda_env: together\n",
|
||||||
"datasets: <span style=\"font-weight: bold\">[]</span>\n",
|
"datasets: <span style=\"font-weight: bold\">[]</span>\n",
|
||||||
"docker_image: null\n",
|
"container_image: null\n",
|
||||||
"eval_tasks: <span style=\"font-weight: bold\">[]</span>\n",
|
"eval_tasks: <span style=\"font-weight: bold\">[]</span>\n",
|
||||||
"image_name: together\n",
|
"image_name: together\n",
|
||||||
"memory_banks: <span style=\"font-weight: bold\">[]</span>\n",
|
"memory_banks: <span style=\"font-weight: bold\">[]</span>\n",
|
||||||
|
@ -942,7 +942,7 @@
|
||||||
"- tool_runtime\n",
|
"- tool_runtime\n",
|
||||||
"conda_env: together\n",
|
"conda_env: together\n",
|
||||||
"datasets: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
"datasets: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
||||||
"docker_image: null\n",
|
"container_image: null\n",
|
||||||
"eval_tasks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
"eval_tasks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
||||||
"image_name: together\n",
|
"image_name: together\n",
|
||||||
"memory_banks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
"memory_banks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
||||||
|
|
|
@ -17,13 +17,13 @@ pip install -e .
|
||||||
llama stack build -h
|
llama stack build -h
|
||||||
```
|
```
|
||||||
|
|
||||||
We will start build our distribution (in the form of a Conda environment, or Docker image). In this step, we will specify:
|
We will start build our distribution (in the form of a Conda environment, or Container image). In this step, we will specify:
|
||||||
- `name`: the name for our distribution (e.g. `my-stack`)
|
- `name`: the name for our distribution (e.g. `my-stack`)
|
||||||
- `image_type`: our build image type (`conda | docker`)
|
- `image_type`: our build image type (`conda | container`)
|
||||||
- `distribution_spec`: our distribution specs for specifying API providers
|
- `distribution_spec`: our distribution specs for specifying API providers
|
||||||
- `description`: a short description of the configurations for the distribution
|
- `description`: a short description of the configurations for the distribution
|
||||||
- `providers`: specifies the underlying implementation for serving each API endpoint
|
- `providers`: specifies the underlying implementation for serving each API endpoint
|
||||||
- `image_type`: `conda` | `docker` to specify whether to build the distribution in the form of Docker image or Conda environment.
|
- `image_type`: `conda` | `container` to specify whether to build the distribution in the form of Container image or Conda environment.
|
||||||
|
|
||||||
After this step is complete, a file named `<name>-build.yaml` and template file `<name>-run.yaml` will be generated and saved at the output file path specified at the end of the command.
|
After this step is complete, a file named `<name>-build.yaml` and template file `<name>-run.yaml` will be generated and saved at the output file path specified at the end of the command.
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ After this step is complete, a file named `<name>-build.yaml` and template file
|
||||||
llama stack build
|
llama stack build
|
||||||
|
|
||||||
> Enter a name for your Llama Stack (e.g. my-local-stack): my-stack
|
> Enter a name for your Llama Stack (e.g. my-local-stack): my-stack
|
||||||
> Enter the image type you want your Llama Stack to be built as (docker or conda): conda
|
> Enter the image type you want your Llama Stack to be built as (container or conda): conda
|
||||||
|
|
||||||
Llama Stack is composed of several APIs working together. Let's select
|
Llama Stack is composed of several APIs working together. Let's select
|
||||||
the provider types (implementations) you want to use for these APIs.
|
the provider types (implementations) you want to use for these APIs.
|
||||||
|
@ -348,26 +348,26 @@ llama stack build --config llama_stack/templates/ollama/build.yaml
|
||||||
```
|
```
|
||||||
:::
|
:::
|
||||||
|
|
||||||
:::{tab-item} Building Docker
|
:::{tab-item} Building Container
|
||||||
> [!TIP]
|
> [!TIP]
|
||||||
> Podman is supported as an alternative to Docker. Set `DOCKER_BINARY` to `podman` in your environment to use Podman.
|
> Podman is supported as an alternative to Docker. Set `CONTAINER_BINARY` to `podman` in your environment to use Podman.
|
||||||
|
|
||||||
To build a docker image, you may start off from a template and use the `--image-type docker` flag to specify `docker` as the build image type.
|
To build a container image, you may start off from a template and use the `--image-type container` flag to specify `container` as the build image type.
|
||||||
|
|
||||||
```
|
```
|
||||||
llama stack build --template ollama --image-type docker
|
llama stack build --template ollama --image-type container
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
```
|
||||||
$ llama stack build --template ollama --image-type docker
|
$ llama stack build --template ollama --image-type container
|
||||||
...
|
...
|
||||||
Dockerfile created successfully in /tmp/tmp.viA3a3Rdsg/DockerfileFROM python:3.10-slim
|
Containerfile created successfully in /tmp/tmp.viA3a3Rdsg/ContainerfileFROM python:3.10-slim
|
||||||
...
|
...
|
||||||
|
|
||||||
You can now edit ~/meta-llama/llama-stack/tmp/configs/ollama-run.yaml and run `llama stack run ~/meta-llama/llama-stack/tmp/configs/ollama-run.yaml`
|
You can now edit ~/meta-llama/llama-stack/tmp/configs/ollama-run.yaml and run `llama stack run ~/meta-llama/llama-stack/tmp/configs/ollama-run.yaml`
|
||||||
```
|
```
|
||||||
|
|
||||||
After this step is successful, you should be able to find the built docker image and test it with `llama stack run <path/to/run.yaml>`.
|
After this step is successful, you should be able to find the built container image and test it with `llama stack run <path/to/run.yaml>`.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
::::
|
::::
|
||||||
|
|
|
@ -182,8 +182,8 @@ def _generate_run_config(
|
||||||
"""
|
"""
|
||||||
apis = list(build_config.distribution_spec.providers.keys())
|
apis = list(build_config.distribution_spec.providers.keys())
|
||||||
run_config = StackRunConfig(
|
run_config = StackRunConfig(
|
||||||
docker_image=(
|
container_image=(
|
||||||
image_name if build_config.image_type == ImageType.docker.value else None
|
image_name if build_config.image_type == ImageType.container.value else None
|
||||||
),
|
),
|
||||||
image_name=image_name,
|
image_name=image_name,
|
||||||
apis=apis,
|
apis=apis,
|
||||||
|
@ -238,7 +238,7 @@ def _run_stack_build_command_from_build_config(
|
||||||
image_name: Optional[str] = None,
|
image_name: Optional[str] = None,
|
||||||
template_name: Optional[str] = None,
|
template_name: Optional[str] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
if build_config.image_type == ImageType.docker.value:
|
if build_config.image_type == ImageType.container.value:
|
||||||
if template_name:
|
if template_name:
|
||||||
image_name = f"distribution-{template_name}"
|
image_name = f"distribution-{template_name}"
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -47,8 +47,8 @@ class StackBuild(Subcommand):
|
||||||
self.parser.add_argument(
|
self.parser.add_argument(
|
||||||
"--image-type",
|
"--image-type",
|
||||||
type=str,
|
type=str,
|
||||||
help="Image Type to use for the build. This can be either conda or docker. If not specified, will use the image type from the template config.",
|
help="Image Type to use for the build. This can be either conda or container or venv. If not specified, will use the image type from the template config.",
|
||||||
choices=["conda", "docker", "venv"],
|
choices=["conda", "container", "venv"],
|
||||||
default="conda",
|
default="conda",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ class StackConfigure(Subcommand):
|
||||||
self.parser.add_argument(
|
self.parser.add_argument(
|
||||||
"config",
|
"config",
|
||||||
type=str,
|
type=str,
|
||||||
help="Path to the build config file (e.g. ~/.llama/builds/<image_type>/<name>-build.yaml). For docker, this could also be the name of the docker image. ",
|
help="Path to the build config file (e.g. ~/.llama/builds/<image_type>/<name>-build.yaml). For container, this could also be the name of the container image. ",
|
||||||
)
|
)
|
||||||
|
|
||||||
self.parser.add_argument(
|
self.parser.add_argument(
|
||||||
|
|
|
@ -92,9 +92,9 @@ class StackRun(Subcommand):
|
||||||
)
|
)
|
||||||
|
|
||||||
if not config_file.exists() and not has_yaml_suffix:
|
if not config_file.exists() and not has_yaml_suffix:
|
||||||
# check if it's a build config saved to docker dir
|
# check if it's a build config saved to container dir
|
||||||
config_file = Path(
|
config_file = Path(
|
||||||
BUILDS_BASE_DIR / ImageType.docker.value / f"{args.config}-run.yaml"
|
BUILDS_BASE_DIR / ImageType.container.value / f"{args.config}-run.yaml"
|
||||||
)
|
)
|
||||||
|
|
||||||
if not config_file.exists() and not has_yaml_suffix:
|
if not config_file.exists() and not has_yaml_suffix:
|
||||||
|
@ -115,12 +115,12 @@ class StackRun(Subcommand):
|
||||||
config_dict = yaml.safe_load(config_file.read_text())
|
config_dict = yaml.safe_load(config_file.read_text())
|
||||||
config = parse_and_maybe_upgrade_config(config_dict)
|
config = parse_and_maybe_upgrade_config(config_dict)
|
||||||
|
|
||||||
if config.docker_image:
|
if config.container_image:
|
||||||
script = (
|
script = (
|
||||||
importlib.resources.files("llama_stack")
|
importlib.resources.files("llama_stack")
|
||||||
/ "distribution/start_container.sh"
|
/ "distribution/start_container.sh"
|
||||||
)
|
)
|
||||||
run_args = [script, config.docker_image]
|
run_args = [script, config.container_image]
|
||||||
else:
|
else:
|
||||||
current_conda_env = os.environ.get("CONDA_DEFAULT_ENV")
|
current_conda_env = os.environ.get("CONDA_DEFAULT_ENV")
|
||||||
image_name = args.image_name or current_conda_env
|
image_name = args.image_name or current_conda_env
|
||||||
|
|
|
@ -38,7 +38,7 @@ SERVER_DEPENDENCIES = [
|
||||||
|
|
||||||
|
|
||||||
class ImageType(Enum):
|
class ImageType(Enum):
|
||||||
docker = "docker"
|
container = "container"
|
||||||
conda = "conda"
|
conda = "conda"
|
||||||
venv = "venv"
|
venv = "venv"
|
||||||
|
|
||||||
|
@ -77,8 +77,8 @@ def get_provider_dependencies(
|
||||||
|
|
||||||
provider_spec = providers_for_api[provider_type]
|
provider_spec = providers_for_api[provider_type]
|
||||||
deps.extend(provider_spec.pip_packages)
|
deps.extend(provider_spec.pip_packages)
|
||||||
if provider_spec.docker_image:
|
if provider_spec.container_image:
|
||||||
raise ValueError("A stack's dependencies cannot have a docker image")
|
raise ValueError("A stack's dependencies cannot have a container image")
|
||||||
|
|
||||||
normal_deps = []
|
normal_deps = []
|
||||||
special_deps = []
|
special_deps = []
|
||||||
|
@ -109,23 +109,25 @@ def build_image(
|
||||||
image_name: str,
|
image_name: str,
|
||||||
template_name: Optional[str] = None,
|
template_name: Optional[str] = None,
|
||||||
):
|
):
|
||||||
docker_image = build_config.distribution_spec.docker_image or "python:3.10-slim"
|
container_image = (
|
||||||
|
build_config.distribution_spec.container_image or "python:3.10-slim"
|
||||||
|
)
|
||||||
|
|
||||||
normal_deps, special_deps = get_provider_dependencies(
|
normal_deps, special_deps = get_provider_dependencies(
|
||||||
build_config.distribution_spec.providers
|
build_config.distribution_spec.providers
|
||||||
)
|
)
|
||||||
normal_deps += SERVER_DEPENDENCIES
|
normal_deps += SERVER_DEPENDENCIES
|
||||||
|
|
||||||
if build_config.image_type == ImageType.docker.value:
|
if build_config.image_type == ImageType.container.value:
|
||||||
script = str(
|
script = str(
|
||||||
importlib.resources.files("llama_stack") / "distribution/build_container.sh"
|
importlib.resources.files("llama_stack") / "distribution/build_container.sh"
|
||||||
)
|
)
|
||||||
args = [
|
args = [
|
||||||
script,
|
script,
|
||||||
image_name,
|
image_name,
|
||||||
docker_image,
|
container_image,
|
||||||
str(build_file_path),
|
str(build_file_path),
|
||||||
str(BUILDS_BASE_DIR / ImageType.docker.value),
|
str(BUILDS_BASE_DIR / ImageType.container.value),
|
||||||
" ".join(normal_deps),
|
" ".join(normal_deps),
|
||||||
]
|
]
|
||||||
elif build_config.image_type == ImageType.conda.value:
|
elif build_config.image_type == ImageType.conda.value:
|
||||||
|
|
|
@ -13,7 +13,7 @@ PYPI_VERSION=${PYPI_VERSION:-}
|
||||||
BUILD_PLATFORM=${BUILD_PLATFORM:-}
|
BUILD_PLATFORM=${BUILD_PLATFORM:-}
|
||||||
|
|
||||||
if [ "$#" -lt 4 ]; then
|
if [ "$#" -lt 4 ]; then
|
||||||
echo "Usage: $0 <build_name> <docker_base> <pip_dependencies> [<special_pip_deps>]" >&2
|
echo "Usage: $0 <build_name> <container_base> <pip_dependencies> [<special_pip_deps>]" >&2
|
||||||
echo "Example: $0 my-fastapi-app python:3.9-slim 'fastapi uvicorn' " >&2
|
echo "Example: $0 my-fastapi-app python:3.9-slim 'fastapi uvicorn' " >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
@ -24,7 +24,7 @@ set -euo pipefail
|
||||||
|
|
||||||
build_name="$1"
|
build_name="$1"
|
||||||
image_name="distribution-$build_name"
|
image_name="distribution-$build_name"
|
||||||
docker_base=$2
|
container_base=$2
|
||||||
build_file_path=$3
|
build_file_path=$3
|
||||||
host_build_dir=$4
|
host_build_dir=$4
|
||||||
pip_dependencies=$5
|
pip_dependencies=$5
|
||||||
|
@ -36,14 +36,14 @@ NC='\033[0m' # No Color
|
||||||
|
|
||||||
SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
|
SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
|
||||||
REPO_DIR=$(dirname $(dirname "$SCRIPT_DIR"))
|
REPO_DIR=$(dirname $(dirname "$SCRIPT_DIR"))
|
||||||
DOCKER_BINARY=${DOCKER_BINARY:-docker}
|
CONTAINER_BINARY=${CONTAINER_BINARY:-docker}
|
||||||
DOCKER_OPTS=${DOCKER_OPTS:-}
|
CONTAINER_OPTS=${CONTAINER_OPTS:-}
|
||||||
|
|
||||||
TEMP_DIR=$(mktemp -d)
|
TEMP_DIR=$(mktemp -d)
|
||||||
|
|
||||||
add_to_docker() {
|
add_to_container() {
|
||||||
local input
|
local input
|
||||||
output_file="$TEMP_DIR/Dockerfile"
|
output_file="$TEMP_DIR/Containerfile"
|
||||||
if [ -t 0 ]; then
|
if [ -t 0 ]; then
|
||||||
printf '%s\n' "$1" >>"$output_file"
|
printf '%s\n' "$1" >>"$output_file"
|
||||||
else
|
else
|
||||||
|
@ -53,9 +53,9 @@ add_to_docker() {
|
||||||
}
|
}
|
||||||
|
|
||||||
# Update and install UBI9 components if UBI9 base image is used
|
# Update and install UBI9 components if UBI9 base image is used
|
||||||
if [[ $docker_base == *"registry.access.redhat.com/ubi9"* ]]; then
|
if [[ $container_base == *"registry.access.redhat.com/ubi9"* ]]; then
|
||||||
add_to_docker << EOF
|
add_to_container << EOF
|
||||||
FROM $docker_base
|
FROM $container_base
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
RUN microdnf -y update && microdnf install -y iputils net-tools wget \
|
RUN microdnf -y update && microdnf install -y iputils net-tools wget \
|
||||||
|
@ -64,8 +64,8 @@ RUN microdnf -y update && microdnf install -y iputils net-tools wget \
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
else
|
else
|
||||||
add_to_docker << EOF
|
add_to_container << EOF
|
||||||
FROM $docker_base
|
FROM $container_base
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt-get update && apt-get install -y \
|
||||||
|
@ -82,7 +82,7 @@ fi
|
||||||
# Add pip dependencies first since llama-stack is what will change most often
|
# Add pip dependencies first since llama-stack is what will change most often
|
||||||
# so we can reuse layers.
|
# so we can reuse layers.
|
||||||
if [ -n "$pip_dependencies" ]; then
|
if [ -n "$pip_dependencies" ]; then
|
||||||
add_to_docker << EOF
|
add_to_container << EOF
|
||||||
RUN pip install --no-cache $pip_dependencies
|
RUN pip install --no-cache $pip_dependencies
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
|
@ -90,7 +90,7 @@ fi
|
||||||
if [ -n "$special_pip_deps" ]; then
|
if [ -n "$special_pip_deps" ]; then
|
||||||
IFS='#' read -ra parts <<<"$special_pip_deps"
|
IFS='#' read -ra parts <<<"$special_pip_deps"
|
||||||
for part in "${parts[@]}"; do
|
for part in "${parts[@]}"; do
|
||||||
add_to_docker <<EOF
|
add_to_container <<EOF
|
||||||
RUN pip install --no-cache $part
|
RUN pip install --no-cache $part
|
||||||
EOF
|
EOF
|
||||||
done
|
done
|
||||||
|
@ -108,16 +108,16 @@ if [ -n "$LLAMA_STACK_DIR" ]; then
|
||||||
# Install in editable format. We will mount the source code into the container
|
# Install in editable format. We will mount the source code into the container
|
||||||
# so that changes will be reflected in the container without having to do a
|
# so that changes will be reflected in the container without having to do a
|
||||||
# rebuild. This is just for development convenience.
|
# rebuild. This is just for development convenience.
|
||||||
add_to_docker << EOF
|
add_to_container << EOF
|
||||||
RUN pip install --no-cache -e $stack_mount
|
RUN pip install --no-cache -e $stack_mount
|
||||||
EOF
|
EOF
|
||||||
else
|
else
|
||||||
if [ -n "$TEST_PYPI_VERSION" ]; then
|
if [ -n "$TEST_PYPI_VERSION" ]; then
|
||||||
# these packages are damaged in test-pypi, so install them first
|
# these packages are damaged in test-pypi, so install them first
|
||||||
add_to_docker << EOF
|
add_to_container << EOF
|
||||||
RUN pip install fastapi libcst
|
RUN pip install fastapi libcst
|
||||||
EOF
|
EOF
|
||||||
add_to_docker << EOF
|
add_to_container << EOF
|
||||||
RUN pip install --no-cache --extra-index-url https://test.pypi.org/simple/ \
|
RUN pip install --no-cache --extra-index-url https://test.pypi.org/simple/ \
|
||||||
llama-models==$TEST_PYPI_VERSION llama-stack-client==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION
|
llama-models==$TEST_PYPI_VERSION llama-stack-client==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION
|
||||||
|
|
||||||
|
@ -128,7 +128,7 @@ EOF
|
||||||
else
|
else
|
||||||
SPEC_VERSION="llama-stack"
|
SPEC_VERSION="llama-stack"
|
||||||
fi
|
fi
|
||||||
add_to_docker << EOF
|
add_to_container << EOF
|
||||||
RUN pip install --no-cache $SPEC_VERSION
|
RUN pip install --no-cache $SPEC_VERSION
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
|
@ -140,14 +140,14 @@ if [ -n "$LLAMA_MODELS_DIR" ]; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
add_to_docker << EOF
|
add_to_container << EOF
|
||||||
RUN pip uninstall -y llama-models
|
RUN pip uninstall -y llama-models
|
||||||
RUN pip install --no-cache $models_mount
|
RUN pip install --no-cache $models_mount
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
|
|
||||||
add_to_docker << EOF
|
add_to_container << EOF
|
||||||
|
|
||||||
# This would be good in production but for debugging flexibility lets not add it right now
|
# This would be good in production but for debugging flexibility lets not add it right now
|
||||||
# We need a more solid production ready entrypoint.sh anyway
|
# We need a more solid production ready entrypoint.sh anyway
|
||||||
|
@ -156,8 +156,8 @@ ENTRYPOINT ["python", "-m", "llama_stack.distribution.server.server", "--templat
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
printf "Dockerfile created successfully in $TEMP_DIR/Dockerfile\n\n"
|
printf "Containerfile created successfully in $TEMP_DIR/Containerfile\n\n"
|
||||||
cat $TEMP_DIR/Dockerfile
|
cat $TEMP_DIR/Containerfile
|
||||||
printf "\n"
|
printf "\n"
|
||||||
|
|
||||||
mounts=""
|
mounts=""
|
||||||
|
@ -170,7 +170,7 @@ fi
|
||||||
|
|
||||||
if command -v selinuxenabled &>/dev/null && selinuxenabled; then
|
if command -v selinuxenabled &>/dev/null && selinuxenabled; then
|
||||||
# Disable SELinux labels -- we don't want to relabel the llama-stack source dir
|
# Disable SELinux labels -- we don't want to relabel the llama-stack source dir
|
||||||
DOCKER_OPTS="$DOCKER_OPTS --security-opt label=disable"
|
CONTAINER_OPTS="$CONTAINER_OPTS --security-opt label=disable"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Set version tag based on PyPI version
|
# Set version tag based on PyPI version
|
||||||
|
@ -200,7 +200,7 @@ else
|
||||||
fi
|
fi
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
$DOCKER_BINARY build $DOCKER_OPTS $PLATFORM -t $image_tag -f "$TEMP_DIR/Dockerfile" "$REPO_DIR" $mounts
|
$CONTAINER_BINARY build $CONTAINER_OPTS $PLATFORM -t $image_tag -f "$TEMP_DIR/Containerfile" "$REPO_DIR" $mounts
|
||||||
|
|
||||||
# clean up tmp/configs
|
# clean up tmp/configs
|
||||||
set +x
|
set +x
|
||||||
|
|
|
@ -6,8 +6,8 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
DOCKER_BINARY=${DOCKER_BINARY:-docker}
|
CONTAINER_BINARY=${CONTAINER_BINARY:-docker}
|
||||||
DOCKER_OPTS=${DOCKER_OPTS:-}
|
CONTAINER_OPTS=${CONTAINER_OPTS:-}
|
||||||
LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
|
LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
|
||||||
|
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
@ -24,13 +24,13 @@ if [ $# -lt 2 ]; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
docker_image="$1"
|
container_image="$1"
|
||||||
host_build_dir="$2"
|
host_build_dir="$2"
|
||||||
container_build_dir="/app/builds"
|
container_build_dir="/app/builds"
|
||||||
|
|
||||||
if command -v selinuxenabled &> /dev/null && selinuxenabled; then
|
if command -v selinuxenabled &> /dev/null && selinuxenabled; then
|
||||||
# Disable SELinux labels
|
# Disable SELinux labels
|
||||||
DOCKER_OPTS="$DOCKER_OPTS --security-opt label=disable"
|
CONTAINER_OPTS="$CONTAINER_OPTS --security-opt label=disable"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
mounts=""
|
mounts=""
|
||||||
|
@ -39,9 +39,9 @@ if [ -n "$LLAMA_STACK_DIR" ]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
$DOCKER_BINARY run $DOCKER_OPTS -it \
|
$CONTAINER_BINARY run $CONTAINER_OPTS -it \
|
||||||
--entrypoint "/usr/local/bin/llama" \
|
--entrypoint "/usr/local/bin/llama" \
|
||||||
-v $host_build_dir:$container_build_dir \
|
-v $host_build_dir:$container_build_dir \
|
||||||
$mounts \
|
$mounts \
|
||||||
$docker_image \
|
$container_image \
|
||||||
stack configure ./llamastack-build.yaml --output-dir $container_build_dir
|
stack configure ./llamastack-build.yaml --output-dir $container_build_dir
|
||||||
|
|
|
@ -73,7 +73,7 @@ class AutoRoutedProviderSpec(ProviderSpec):
|
||||||
provider_type: str = "router"
|
provider_type: str = "router"
|
||||||
config_class: str = ""
|
config_class: str = ""
|
||||||
|
|
||||||
docker_image: Optional[str] = None
|
container_image: Optional[str] = None
|
||||||
routing_table_api: Api
|
routing_table_api: Api
|
||||||
module: str
|
module: str
|
||||||
provider_data_validator: Optional[str] = Field(
|
provider_data_validator: Optional[str] = Field(
|
||||||
|
@ -89,7 +89,7 @@ class AutoRoutedProviderSpec(ProviderSpec):
|
||||||
class RoutingTableProviderSpec(ProviderSpec):
|
class RoutingTableProviderSpec(ProviderSpec):
|
||||||
provider_type: str = "routing_table"
|
provider_type: str = "routing_table"
|
||||||
config_class: str = ""
|
config_class: str = ""
|
||||||
docker_image: Optional[str] = None
|
container_image: Optional[str] = None
|
||||||
|
|
||||||
router_api: Api
|
router_api: Api
|
||||||
module: str
|
module: str
|
||||||
|
@ -101,7 +101,7 @@ class DistributionSpec(BaseModel):
|
||||||
default="",
|
default="",
|
||||||
description="Description of the distribution",
|
description="Description of the distribution",
|
||||||
)
|
)
|
||||||
docker_image: Optional[str] = None
|
container_image: Optional[str] = None
|
||||||
providers: Dict[str, Union[str, List[str]]] = Field(
|
providers: Dict[str, Union[str, List[str]]] = Field(
|
||||||
default_factory=dict,
|
default_factory=dict,
|
||||||
description="""
|
description="""
|
||||||
|
@ -127,9 +127,9 @@ Reference to the distribution this package refers to. For unregistered (adhoc) p
|
||||||
this could be just a hash
|
this could be just a hash
|
||||||
""",
|
""",
|
||||||
)
|
)
|
||||||
docker_image: Optional[str] = Field(
|
container_image: Optional[str] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
description="Reference to the docker image if this package refers to a container",
|
description="Reference to the container image if this package refers to a container",
|
||||||
)
|
)
|
||||||
apis: List[str] = Field(
|
apis: List[str] = Field(
|
||||||
default_factory=list,
|
default_factory=list,
|
||||||
|
@ -168,5 +168,5 @@ class BuildConfig(BaseModel):
|
||||||
)
|
)
|
||||||
image_type: str = Field(
|
image_type: str = Field(
|
||||||
default="conda",
|
default="conda",
|
||||||
description="Type of package to build (conda | docker | venv)",
|
description="Type of package to build (conda | container | venv)",
|
||||||
)
|
)
|
||||||
|
|
|
@ -6,8 +6,8 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
DOCKER_BINARY=${DOCKER_BINARY:-docker}
|
CONTAINER_BINARY=${CONTAINER_BINARY:-docker}
|
||||||
DOCKER_OPTS=${DOCKER_OPTS:-}
|
CONTAINER_OPTS=${CONTAINER_OPTS:-}
|
||||||
LLAMA_CHECKPOINT_DIR=${LLAMA_CHECKPOINT_DIR:-}
|
LLAMA_CHECKPOINT_DIR=${LLAMA_CHECKPOINT_DIR:-}
|
||||||
LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
|
LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
|
||||||
TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-}
|
TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-}
|
||||||
|
@ -31,7 +31,7 @@ if [ $# -lt 3 ]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
build_name="$1"
|
build_name="$1"
|
||||||
docker_image="localhost/distribution-$build_name"
|
container_image="localhost/distribution-$build_name"
|
||||||
shift
|
shift
|
||||||
|
|
||||||
yaml_config="$1"
|
yaml_config="$1"
|
||||||
|
@ -64,7 +64,7 @@ set -x
|
||||||
|
|
||||||
if command -v selinuxenabled &> /dev/null && selinuxenabled; then
|
if command -v selinuxenabled &> /dev/null && selinuxenabled; then
|
||||||
# Disable SELinux labels
|
# Disable SELinux labels
|
||||||
DOCKER_OPTS="$DOCKER_OPTS --security-opt label=disable"
|
CONTAINER_OPTS="$CONTAINER_OPTS --security-opt label=disable"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
mounts=""
|
mounts=""
|
||||||
|
@ -73,7 +73,7 @@ if [ -n "$LLAMA_STACK_DIR" ]; then
|
||||||
fi
|
fi
|
||||||
if [ -n "$LLAMA_CHECKPOINT_DIR" ]; then
|
if [ -n "$LLAMA_CHECKPOINT_DIR" ]; then
|
||||||
mounts="$mounts -v $LLAMA_CHECKPOINT_DIR:/root/.llama"
|
mounts="$mounts -v $LLAMA_CHECKPOINT_DIR:/root/.llama"
|
||||||
DOCKER_OPTS="$DOCKER_OPTS --gpus=all"
|
CONTAINER_OPTS="$CONTAINER_OPTS --gpus=all"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
version_tag="latest"
|
version_tag="latest"
|
||||||
|
@ -85,11 +85,11 @@ elif [ -n "$TEST_PYPI_VERSION" ]; then
|
||||||
version_tag="test-$TEST_PYPI_VERSION"
|
version_tag="test-$TEST_PYPI_VERSION"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
$DOCKER_BINARY run $DOCKER_OPTS -it \
|
$CONTAINER_BINARY run $CONTAINER_OPTS -it \
|
||||||
-p $port:$port \
|
-p $port:$port \
|
||||||
$env_vars \
|
$env_vars \
|
||||||
-v "$yaml_config:/app/config.yaml" \
|
-v "$yaml_config:/app/config.yaml" \
|
||||||
$mounts \
|
$mounts \
|
||||||
--env LLAMA_STACK_PORT=$port \
|
--env LLAMA_STACK_PORT=$port \
|
||||||
--entrypoint='["python", "-m", "llama_stack.distribution.server.server", "--yaml-config", "/app/config.yaml"]' \
|
--entrypoint='["python", "-m", "llama_stack.distribution.server.server", "--yaml-config", "/app/config.yaml"]' \
|
||||||
$docker_image:$version_tag
|
$container_image:$version_tag
|
||||||
|
|
|
@ -150,11 +150,11 @@ class InlineProviderSpec(ProviderSpec):
|
||||||
default_factory=list,
|
default_factory=list,
|
||||||
description="The pip dependencies needed for this implementation",
|
description="The pip dependencies needed for this implementation",
|
||||||
)
|
)
|
||||||
docker_image: Optional[str] = Field(
|
container_image: Optional[str] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
description="""
|
description="""
|
||||||
The docker image to use for this implementation. If one is provided, pip_packages will be ignored.
|
The container image to use for this implementation. If one is provided, pip_packages will be ignored.
|
||||||
If a provider depends on other providers, the dependencies MUST NOT specify a docker image.
|
If a provider depends on other providers, the dependencies MUST NOT specify a container image.
|
||||||
""",
|
""",
|
||||||
)
|
)
|
||||||
module: str = Field(
|
module: str = Field(
|
||||||
|
@ -197,7 +197,7 @@ API responses, specify the adapter here.
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def docker_image(self) -> Optional[str]:
|
def container_image(self) -> Optional[str]:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
|
|
@ -70,7 +70,7 @@ def get_distribution_template() -> DistributionTemplate:
|
||||||
name=name,
|
name=name,
|
||||||
distro_type="self_hosted",
|
distro_type="self_hosted",
|
||||||
description="Use AWS Bedrock for running LLM inference and safety",
|
description="Use AWS Bedrock for running LLM inference and safety",
|
||||||
docker_image=None,
|
container_image=None,
|
||||||
template_path=Path(__file__).parent / "doc_template.md",
|
template_path=Path(__file__).parent / "doc_template.md",
|
||||||
providers=providers,
|
providers=providers,
|
||||||
default_models=default_models,
|
default_models=default_models,
|
||||||
|
|
|
@ -92,7 +92,7 @@ def get_distribution_template() -> DistributionTemplate:
|
||||||
name="cerebras",
|
name="cerebras",
|
||||||
distro_type="self_hosted",
|
distro_type="self_hosted",
|
||||||
description="Use Cerebras for running LLM inference",
|
description="Use Cerebras for running LLM inference",
|
||||||
docker_image=None,
|
container_image=None,
|
||||||
template_path=Path(__file__).parent / "doc_template.md",
|
template_path=Path(__file__).parent / "doc_template.md",
|
||||||
providers=providers,
|
providers=providers,
|
||||||
default_models=default_models,
|
default_models=default_models,
|
||||||
|
|
|
@ -2,7 +2,7 @@ version: '2'
|
||||||
name: experimental-post-training
|
name: experimental-post-training
|
||||||
distribution_spec:
|
distribution_spec:
|
||||||
description: Experimental template for post training
|
description: Experimental template for post training
|
||||||
docker_image: null
|
container_image: null
|
||||||
providers:
|
providers:
|
||||||
inference:
|
inference:
|
||||||
- inline::meta-reference
|
- inline::meta-reference
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
version: '2'
|
version: '2'
|
||||||
image_name: experimental-post-training
|
image_name: experimental-post-training
|
||||||
docker_image: null
|
container_image: null
|
||||||
conda_env: experimental-post-training
|
conda_env: experimental-post-training
|
||||||
apis:
|
apis:
|
||||||
- agents
|
- agents
|
||||||
|
|
|
@ -98,7 +98,7 @@ def get_distribution_template() -> DistributionTemplate:
|
||||||
name=name,
|
name=name,
|
||||||
distro_type="self_hosted",
|
distro_type="self_hosted",
|
||||||
description="Use Fireworks.AI for running LLM inference",
|
description="Use Fireworks.AI for running LLM inference",
|
||||||
docker_image=None,
|
container_image=None,
|
||||||
template_path=Path(__file__).parent / "doc_template.md",
|
template_path=Path(__file__).parent / "doc_template.md",
|
||||||
providers=providers,
|
providers=providers,
|
||||||
default_models=default_models,
|
default_models=default_models,
|
||||||
|
|
|
@ -88,7 +88,7 @@ def get_distribution_template() -> DistributionTemplate:
|
||||||
name=name,
|
name=name,
|
||||||
distro_type="self_hosted",
|
distro_type="self_hosted",
|
||||||
description="Use (an external) Hugging Face Inference Endpoint for running LLM inference",
|
description="Use (an external) Hugging Face Inference Endpoint for running LLM inference",
|
||||||
docker_image=None,
|
container_image=None,
|
||||||
template_path=None,
|
template_path=None,
|
||||||
providers=providers,
|
providers=providers,
|
||||||
default_models=[inference_model, safety_model],
|
default_models=[inference_model, safety_model],
|
||||||
|
|
|
@ -89,7 +89,7 @@ def get_distribution_template() -> DistributionTemplate:
|
||||||
name=name,
|
name=name,
|
||||||
distro_type="self_hosted",
|
distro_type="self_hosted",
|
||||||
description="Use (an external) Hugging Face Inference Endpoint for running LLM inference",
|
description="Use (an external) Hugging Face Inference Endpoint for running LLM inference",
|
||||||
docker_image=None,
|
container_image=None,
|
||||||
template_path=None,
|
template_path=None,
|
||||||
providers=providers,
|
providers=providers,
|
||||||
default_models=[inference_model, safety_model],
|
default_models=[inference_model, safety_model],
|
||||||
|
|
|
@ -68,7 +68,7 @@ def get_distribution_template() -> DistributionTemplate:
|
||||||
name="nvidia",
|
name="nvidia",
|
||||||
distro_type="remote_hosted",
|
distro_type="remote_hosted",
|
||||||
description="Use NVIDIA NIM for running LLM inference",
|
description="Use NVIDIA NIM for running LLM inference",
|
||||||
docker_image=None,
|
container_image=None,
|
||||||
template_path=Path(__file__).parent / "doc_template.md",
|
template_path=Path(__file__).parent / "doc_template.md",
|
||||||
providers=providers,
|
providers=providers,
|
||||||
default_models=default_models,
|
default_models=default_models,
|
||||||
|
|
|
@ -90,7 +90,7 @@ def get_distribution_template() -> DistributionTemplate:
|
||||||
name=name,
|
name=name,
|
||||||
distro_type="self_hosted",
|
distro_type="self_hosted",
|
||||||
description="Use (an external) Ollama server for running LLM inference",
|
description="Use (an external) Ollama server for running LLM inference",
|
||||||
docker_image=None,
|
container_image=None,
|
||||||
template_path=Path(__file__).parent / "doc_template.md",
|
template_path=Path(__file__).parent / "doc_template.md",
|
||||||
providers=providers,
|
providers=providers,
|
||||||
default_models=[inference_model, safety_model],
|
default_models=[inference_model, safety_model],
|
||||||
|
|
|
@ -37,7 +37,7 @@ class RunConfigSettings(BaseModel):
|
||||||
self,
|
self,
|
||||||
name: str,
|
name: str,
|
||||||
providers: Dict[str, List[str]],
|
providers: Dict[str, List[str]],
|
||||||
docker_image: Optional[str] = None,
|
container_image: Optional[str] = None,
|
||||||
) -> StackRunConfig:
|
) -> StackRunConfig:
|
||||||
provider_registry = get_provider_registry()
|
provider_registry = get_provider_registry()
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ class RunConfigSettings(BaseModel):
|
||||||
|
|
||||||
return StackRunConfig(
|
return StackRunConfig(
|
||||||
image_name=name,
|
image_name=name,
|
||||||
docker_image=docker_image,
|
container_image=container_image,
|
||||||
conda_env=name,
|
conda_env=name,
|
||||||
apis=apis,
|
apis=apis,
|
||||||
providers=provider_configs,
|
providers=provider_configs,
|
||||||
|
@ -113,7 +113,7 @@ class DistributionTemplate(BaseModel):
|
||||||
|
|
||||||
# Optional configuration
|
# Optional configuration
|
||||||
run_config_env_vars: Optional[Dict[str, Tuple[str, str]]] = None
|
run_config_env_vars: Optional[Dict[str, Tuple[str, str]]] = None
|
||||||
docker_image: Optional[str] = None
|
container_image: Optional[str] = None
|
||||||
|
|
||||||
default_models: Optional[List[ModelInput]] = None
|
default_models: Optional[List[ModelInput]] = None
|
||||||
|
|
||||||
|
@ -122,7 +122,7 @@ class DistributionTemplate(BaseModel):
|
||||||
name=self.name,
|
name=self.name,
|
||||||
distribution_spec=DistributionSpec(
|
distribution_spec=DistributionSpec(
|
||||||
description=self.description,
|
description=self.description,
|
||||||
docker_image=self.docker_image,
|
container_image=self.container_image,
|
||||||
providers=self.providers,
|
providers=self.providers,
|
||||||
),
|
),
|
||||||
image_type="conda", # default to conda, can be overridden
|
image_type="conda", # default to conda, can be overridden
|
||||||
|
@ -170,7 +170,7 @@ class DistributionTemplate(BaseModel):
|
||||||
|
|
||||||
for yaml_pth, settings in self.run_configs.items():
|
for yaml_pth, settings in self.run_configs.items():
|
||||||
run_config = settings.run_config(
|
run_config = settings.run_config(
|
||||||
self.name, self.providers, self.docker_image
|
self.name, self.providers, self.container_image
|
||||||
)
|
)
|
||||||
with open(yaml_output_dir / yaml_pth, "w") as f:
|
with open(yaml_output_dir / yaml_pth, "w") as f:
|
||||||
yaml.safe_dump(
|
yaml.safe_dump(
|
||||||
|
|
|
@ -92,7 +92,7 @@ def get_distribution_template() -> DistributionTemplate:
|
||||||
name=name,
|
name=name,
|
||||||
distro_type="self_hosted",
|
distro_type="self_hosted",
|
||||||
description="Use (an external) TGI server for running LLM inference",
|
description="Use (an external) TGI server for running LLM inference",
|
||||||
docker_image=None,
|
container_image=None,
|
||||||
template_path=Path(__file__).parent / "doc_template.md",
|
template_path=Path(__file__).parent / "doc_template.md",
|
||||||
providers=providers,
|
providers=providers,
|
||||||
default_models=[inference_model, safety_model],
|
default_models=[inference_model, safety_model],
|
||||||
|
|
|
@ -96,7 +96,7 @@ def get_distribution_template() -> DistributionTemplate:
|
||||||
name=name,
|
name=name,
|
||||||
distro_type="self_hosted",
|
distro_type="self_hosted",
|
||||||
description="Use Together.AI for running LLM inference",
|
description="Use Together.AI for running LLM inference",
|
||||||
docker_image=None,
|
container_image=None,
|
||||||
template_path=Path(__file__).parent / "doc_template.md",
|
template_path=Path(__file__).parent / "doc_template.md",
|
||||||
providers=providers,
|
providers=providers,
|
||||||
default_models=default_models,
|
default_models=default_models,
|
||||||
|
|
|
@ -84,7 +84,7 @@ def get_distribution_template() -> DistributionTemplate:
|
||||||
name=name,
|
name=name,
|
||||||
distro_type="self_hosted",
|
distro_type="self_hosted",
|
||||||
description="Use a built-in vLLM engine for running LLM inference",
|
description="Use a built-in vLLM engine for running LLM inference",
|
||||||
docker_image=None,
|
container_image=None,
|
||||||
template_path=None,
|
template_path=None,
|
||||||
providers=providers,
|
providers=providers,
|
||||||
default_models=[inference_model],
|
default_models=[inference_model],
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue