CLI Update: build -> configure -> run (#69)

* remove configure from build

* remove config from build

* configure to regenerate file

* update memory providers

* remove comments

* udpate build script

* add reedme

* update doc

* rename getting started

* update build cli

* update docker build script

* configure update

* clean up configure

* [tmp fix] hardware requirement tmp fix

* clean up build

* fix configure

* add example build files for conda & docker

* remove resolve_distribution_spec

* remove available_distribution_specs

* example build files

* update example build files

* more clean up on build

* add name args to override name

* move distribution to yaml files

* generate distribution specs

* getting started guide

* getting started

* add build yaml to Dockerfile

* cleanup distribution_dependencies

* configure from  docker image name

* build relative paths

* minor comment

* getting started

* Update getting_started.md

* Update getting_started.md

* address comments, configure within docker file

* remove distribution types!

* update getting started

* update documentation

* remove listing distribution

* minor heading

* address nits, remove docker_image=null

* gitignore
This commit is contained in:
Xi Yan 2024-09-16 11:02:26 -07:00 committed by GitHub
parent 73b71d9689
commit d9147f3184
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
27 changed files with 759 additions and 512 deletions

View file

@ -19,17 +19,15 @@ fi
set -euo pipefail
if [ "$#" -ne 4 ]; then
if [ "$#" -ne 2 ]; then
echo "Usage: $0 <distribution_type> <build_name> <pip_dependencies>" >&2
echo "Example: $0 <distribution_type> mybuild 'numpy pandas scipy'" >&2
exit 1
fi
distribution_type="$1"
build_name="$2"
build_name="$1"
env_name="llamastack-$build_name"
config_file="$3"
pip_dependencies="$4"
pip_dependencies="$2"
# Define color codes
RED='\033[0;31m'
@ -115,7 +113,3 @@ ensure_conda_env_python310() {
}
ensure_conda_env_python310 "$env_name" "$pip_dependencies"
printf "${GREEN}Successfully setup conda environment. Configuring build...${NC}\n"
$CONDA_PREFIX/bin/python3 -m llama_toolchain.cli.llama stack configure $config_file

View file

@ -4,18 +4,17 @@ LLAMA_MODELS_DIR=${LLAMA_MODELS_DIR:-}
LLAMA_TOOLCHAIN_DIR=${LLAMA_TOOLCHAIN_DIR:-}
TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-}
if [ "$#" -ne 5 ]; then
echo "Usage: $0 <distribution_type> <build_name> <docker_base> <pip_dependencies>
echo "Example: $0 distribution_type my-fastapi-app python:3.9-slim 'fastapi uvicorn'
if [ "$#" -ne 4 ]; then
echo "Usage: $0 <build_name> <docker_base> <pip_dependencies>
echo "Example: $0 my-fastapi-app python:3.9-slim 'fastapi uvicorn'
exit 1
fi
distribution_type=$1
build_name="$2"
build_name="$1"
image_name="llamastack-$build_name"
docker_base=$3
config_file=$4
pip_dependencies=$5
docker_base=$2
build_file_path=$3
pip_dependencies=$4
# Define color codes
RED='\033[0;31m'
@ -26,6 +25,8 @@ set -euo pipefail
SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
REPO_DIR=$(dirname $(dirname "$SCRIPT_DIR"))
DOCKER_BINARY=${DOCKER_BINARY:-docker}
DOCKER_OPTS=${DOCKER_OPTS:-}
TEMP_DIR=$(mktemp -d)
@ -93,6 +94,8 @@ add_to_docker <<EOF
EOF
add_to_docker "ADD $build_file_path ./llamastack-build.yaml"
printf "Dockerfile created successfully in $TEMP_DIR/Dockerfile"
cat $TEMP_DIR/Dockerfile
printf "\n"
@ -105,10 +108,10 @@ if [ -n "$LLAMA_MODELS_DIR" ]; then
mounts="$mounts -v $(readlink -f $LLAMA_MODELS_DIR):$models_mount"
fi
set -x
podman build -t $image_name -f "$TEMP_DIR/Dockerfile" "$REPO_DIR" $mounts
$DOCKER_BINARY build $DOCKER_OPTS -t $image_name -f "$TEMP_DIR/Dockerfile" "$REPO_DIR" $mounts
set +x
printf "${GREEN}Succesfully setup Podman image. Configuring build...${NC}"
echo "You can run it with: podman run -p 8000:8000 $image_name"
$CONDA_PREFIX/bin/python3 -m llama_toolchain.cli.llama stack configure $config_file
echo "Checking image builds..."
podman run -it $image_name cat llamastack-build.yaml

View file

@ -0,0 +1,31 @@
#!/bin/bash
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
set -euo pipefail
error_handler() {
echo "Error occurred in script at line: ${1}" >&2
exit 1
}
trap 'error_handler ${LINENO}' ERR
if [ $# -lt 2 ]; then
echo "Usage: $0 <container name> <build file path>"
exit 1
fi
docker_image="$1"
host_build_dir="$2"
container_build_dir="/app/builds"
set -x
podman run -it \
-v $host_build_dir:$container_build_dir \
$docker_image \
llama stack configure ./llamastack-build.yaml --output-dir $container_build_dir

View file

@ -151,11 +151,12 @@ def remote_provider_spec(
@json_schema_type
class DistributionSpec(BaseModel):
distribution_type: str
description: str
description: Optional[str] = Field(
default="",
description="Description of the distribution",
)
docker_image: Optional[str] = None
providers: Dict[Api, str] = Field(
providers: Dict[str, str] = Field(
default_factory=dict,
description="Provider Types for each of the APIs provided by this distribution",
)
@ -172,8 +173,6 @@ Reference to the distribution this package refers to. For unregistered (adhoc) p
this could be just a hash
""",
)
distribution_type: Optional[str] = None
docker_image: Optional[str] = Field(
default=None,
description="Reference to the docker image if this package refers to a container",
@ -194,12 +193,8 @@ the dependencies of these providers as well.
@json_schema_type
class BuildConfig(BaseModel):
name: str
distribution: str = Field(
default="local", description="Type of distribution to build (adhoc | {})"
)
api_providers: Optional[str] = Field(
default_factory=list,
description="List of API provider names to build",
distribution_spec: DistributionSpec = Field(
description="The distribution spec to build including API providers. "
)
image_type: str = Field(
default="conda",

View file

@ -31,16 +31,6 @@ SERVER_DEPENDENCIES = [
]
def distribution_dependencies(distribution: DistributionSpec) -> List[str]:
# only consider InlineProviderSpecs when calculating dependencies
return [
dep
for provider_spec in distribution.provider_specs.values()
if isinstance(provider_spec, InlineProviderSpec)
for dep in provider_spec.pip_packages
] + SERVER_DEPENDENCIES
def stack_apis() -> List[Api]:
return [v for v in Api]

View file

@ -5,84 +5,19 @@
# the root directory of this source tree.
from functools import lru_cache
from pathlib import Path
from typing import List, Optional
from .datatypes import * # noqa: F403
import yaml
@lru_cache()
def available_distribution_specs() -> List[DistributionSpec]:
return [
DistributionSpec(
distribution_type="local",
description="Use code from `llama_toolchain` itself to serve all llama stack APIs",
providers={
Api.inference: "meta-reference",
Api.memory: "meta-reference-faiss",
Api.safety: "meta-reference",
Api.agentic_system: "meta-reference",
Api.telemetry: "console",
},
),
DistributionSpec(
distribution_type="remote",
description="Point to remote services for all llama stack APIs",
providers={
**{x: "remote" for x in Api},
Api.telemetry: "console",
},
),
DistributionSpec(
distribution_type="local-ollama",
description="Like local, but use ollama for running LLM inference",
providers={
Api.inference: remote_provider_type("ollama"),
Api.safety: "meta-reference",
Api.agentic_system: "meta-reference",
Api.memory: "meta-reference-faiss",
Api.telemetry: "console",
},
),
DistributionSpec(
distribution_type="local-plus-fireworks-inference",
description="Use Fireworks.ai for running LLM inference",
providers={
Api.inference: remote_provider_type("fireworks"),
Api.safety: "meta-reference",
Api.agentic_system: "meta-reference",
Api.memory: "meta-reference-faiss",
Api.telemetry: "console",
},
),
DistributionSpec(
distribution_type="local-plus-together-inference",
description="Use Together.ai for running LLM inference",
providers={
Api.inference: remote_provider_type("together"),
Api.safety: "meta-reference",
Api.agentic_system: "meta-reference",
Api.memory: "meta-reference-faiss",
Api.telemetry: "console",
},
),
DistributionSpec(
distribution_type="local-plus-tgi-inference",
description="Use TGI for running LLM inference",
providers={
Api.inference: remote_provider_type("tgi"),
Api.safety: "meta-reference",
Api.agentic_system: "meta-reference",
Api.memory: "meta-reference-faiss",
},
),
]
distribution_specs = []
for p in Path("llama_toolchain/configs/distributions/distribution_registry").rglob(
"*.yaml"
):
with open(p, "r") as f:
distribution_specs.append(DistributionSpec(**yaml.safe_load(f)))
@lru_cache()
def resolve_distribution_spec(
distribution_type: str,
) -> Optional[DistributionSpec]:
for spec in available_distribution_specs():
if spec.distribution_type == distribution_type:
return spec
return None
return distribution_specs

View file

@ -21,6 +21,8 @@ from pydantic import BaseModel
from termcolor import cprint
from llama_toolchain.core.datatypes import * # noqa: F403
from pathlib import Path
from llama_toolchain.core.distribution import api_providers, SERVER_DEPENDENCIES
@ -39,87 +41,35 @@ class ApiInput(BaseModel):
provider: str
def build_package(
api_inputs: List[ApiInput],
image_type: ImageType,
name: str,
distribution_type: Optional[str] = None,
docker_image: Optional[str] = None,
):
if not distribution_type:
distribution_type = "adhoc"
build_dir = BUILDS_BASE_DIR / distribution_type / image_type.value
os.makedirs(build_dir, exist_ok=True)
package_name = name.replace("::", "-")
package_file = build_dir / f"{package_name}.yaml"
all_providers = api_providers()
def build_package(build_config: BuildConfig, build_file_path: Path):
package_deps = Dependencies(
docker_image=docker_image or "python:3.10-slim",
docker_image=build_config.distribution_spec.docker_image or "python:3.10-slim",
pip_packages=SERVER_DEPENDENCIES,
)
stub_config = {}
for api_input in api_inputs:
api = api_input.api
providers_for_api = all_providers[api]
if api_input.provider not in providers_for_api:
# extend package dependencies based on providers spec
all_providers = api_providers()
for api_str, provider in build_config.distribution_spec.providers.items():
providers_for_api = all_providers[Api(api_str)]
if provider not in providers_for_api:
raise ValueError(
f"Provider `{api_input.provider}` is not available for API `{api}`"
f"Provider `{provider}` is not available for API `{api_str}`"
)
provider = providers_for_api[api_input.provider]
package_deps.pip_packages.extend(provider.pip_packages)
if provider.docker_image:
provider_spec = providers_for_api[provider]
package_deps.pip_packages.extend(provider_spec.pip_packages)
if provider_spec.docker_image:
raise ValueError("A stack's dependencies cannot have a docker image")
stub_config[api.value] = {"provider_type": api_input.provider}
if package_file.exists():
cprint(
f"Build `{package_name}` exists; will reconfigure",
color="yellow",
)
c = PackageConfig(**yaml.safe_load(package_file.read_text()))
for api_str, new_config in stub_config.items():
if api_str not in c.providers:
c.providers[api_str] = new_config
else:
existing_config = c.providers[api_str]
if existing_config["provider_type"] != new_config["provider_type"]:
cprint(
f"Provider `{api_str}` has changed from `{existing_config}` to `{new_config}`",
color="yellow",
)
c.providers[api_str] = new_config
else:
c = PackageConfig(
built_at=datetime.now(),
package_name=package_name,
providers=stub_config,
)
c.distribution_type = distribution_type
c.docker_image = package_name if image_type == ImageType.docker else None
c.conda_env = package_name if image_type == ImageType.conda else None
with open(package_file, "w") as f:
to_write = json.loads(json.dumps(c.dict(), cls=EnumEncoder))
f.write(yaml.dump(to_write, sort_keys=False))
if image_type == ImageType.docker:
if build_config.image_type == ImageType.docker.value:
script = pkg_resources.resource_filename(
"llama_toolchain", "core/build_container.sh"
)
args = [
script,
distribution_type,
package_name,
build_config.name,
package_deps.docker_image,
str(package_file),
str(build_file_path),
" ".join(package_deps.pip_packages),
]
else:
@ -128,21 +78,14 @@ def build_package(
)
args = [
script,
distribution_type,
package_name,
str(package_file),
build_config.name,
" ".join(package_deps.pip_packages),
]
return_code = run_with_pty(args)
if return_code != 0:
cprint(
f"Failed to build target {package_name} with return code {return_code}",
f"Failed to build target {build_config.name} with return code {return_code}",
color="red",
)
return
cprint(
f"Target `{package_name}` built with configuration at {str(package_file)}",
color="green",
)