This commit is contained in:
Charlie Doern 2025-08-14 20:35:22 +02:00 committed by GitHub
commit 90e0580ccc
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
81 changed files with 1736 additions and 858 deletions

View file

@ -42,7 +42,7 @@ runs:
- name: Build Llama Stack - name: Build Llama Stack
shell: bash shell: bash
run: | run: |
uv run llama stack build --template ci-tests --image-type venv uv run llama stack show --template ci-tests | sh
- name: Configure git for commits - name: Configure git for commits
shell: bash shell: bash

View file

@ -11,7 +11,7 @@ Llama Stack uses GitHub Actions for Continuous Integration (CI). Below is a tabl
| Integration Tests (Replay) | [integration-tests.yml](integration-tests.yml) | Run the integration test suite from tests/integration in replay mode | | Integration Tests (Replay) | [integration-tests.yml](integration-tests.yml) | Run the integration test suite from tests/integration in replay mode |
| Vector IO Integration Tests | [integration-vector-io-tests.yml](integration-vector-io-tests.yml) | Run the integration test suite with various VectorIO providers | | Vector IO Integration Tests | [integration-vector-io-tests.yml](integration-vector-io-tests.yml) | Run the integration test suite with various VectorIO providers |
| Pre-commit | [pre-commit.yml](pre-commit.yml) | Run pre-commit checks | | Pre-commit | [pre-commit.yml](pre-commit.yml) | Run pre-commit checks |
| Test Llama Stack Build | [providers-build.yml](providers-build.yml) | Test llama stack build | | Test Llama Stack Show | [providers-show.yml](providers-show.yml) | Test llama stack Show |
| Python Package Build Test | [python-build-test.yml](python-build-test.yml) | Test building the llama-stack PyPI project | | Python Package Build Test | [python-build-test.yml](python-build-test.yml) | Test building the llama-stack PyPI project |
| Integration Tests (Record) | [record-integration-tests.yml](record-integration-tests.yml) | Run the integration test suite from tests/integration | | Integration Tests (Record) | [record-integration-tests.yml](record-integration-tests.yml) | Run the integration test suite from tests/integration |
| Check semantic PR titles | [semantic-pr.yml](semantic-pr.yml) | Ensure that PR titles follow the conventional commit spec | | Check semantic PR titles | [semantic-pr.yml](semantic-pr.yml) | Ensure that PR titles follow the conventional commit spec |

View file

@ -141,7 +141,7 @@ jobs:
- name: Build Llama Stack - name: Build Llama Stack
run: | run: |
uv run llama stack build --template ci-tests --image-type venv uv run llama stack sync --template ci-tests | sh
- name: Check Storage and Memory Available Before Tests - name: Check Storage and Memory Available Before Tests
if: ${{ always() }} if: ${{ always() }}

View file

@ -1,154 +0,0 @@
name: Test Llama Stack Build
run-name: Test llama stack build
on:
push:
branches:
- main
paths:
- 'llama_stack/cli/stack/build.py'
- 'llama_stack/cli/stack/_build.py'
- 'llama_stack/core/build.*'
- 'llama_stack/core/*.sh'
- '.github/workflows/providers-build.yml'
- 'llama_stack/distributions/**'
- 'pyproject.toml'
pull_request:
paths:
- 'llama_stack/cli/stack/build.py'
- 'llama_stack/cli/stack/_build.py'
- 'llama_stack/core/build.*'
- 'llama_stack/core/*.sh'
- '.github/workflows/providers-build.yml'
- 'llama_stack/distributions/**'
- 'pyproject.toml'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
generate-matrix:
runs-on: ubuntu-latest
outputs:
distros: ${{ steps.set-matrix.outputs.distros }}
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Generate Distribution List
id: set-matrix
run: |
distros=$(ls llama_stack/distributions/*/*build.yaml | awk -F'/' '{print $(NF-1)}' | jq -R -s -c 'split("\n")[:-1]')
echo "distros=$distros" >> "$GITHUB_OUTPUT"
build:
needs: generate-matrix
runs-on: ubuntu-latest
strategy:
matrix:
distro: ${{ fromJson(needs.generate-matrix.outputs.distros) }}
image-type: [venv, container]
fail-fast: false # We want to run all jobs even if some fail
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install dependencies
uses: ./.github/actions/setup-runner
- name: Print build dependencies
run: |
uv run llama stack build --distro ${{ matrix.distro }} --image-type ${{ matrix.image-type }} --image-name test --print-deps-only
- name: Run Llama Stack Build
run: |
# USE_COPY_NOT_MOUNT is set to true since mounting is not supported by docker buildx, we use COPY instead
# LLAMA_STACK_DIR is set to the current directory so we are building from the source
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --distro ${{ matrix.distro }} --image-type ${{ matrix.image-type }} --image-name test
- name: Print dependencies in the image
if: matrix.image-type == 'venv'
run: |
uv pip list
build-single-provider:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install dependencies
uses: ./.github/actions/setup-runner
- name: Build a single provider
run: |
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --image-type venv --image-name test --providers inference=remote::ollama
build-custom-container-distribution:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install dependencies
uses: ./.github/actions/setup-runner
- name: Build a single provider
run: |
yq -i '.image_type = "container"' llama_stack/distributions/ci-tests/build.yaml
yq -i '.image_name = "test"' llama_stack/distributions/ci-tests/build.yaml
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config llama_stack/distributions/ci-tests/build.yaml
- name: Inspect the container image entrypoint
run: |
IMAGE_ID=$(docker images --format "{{.Repository}}:{{.Tag}}" | head -n 1)
entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID)
echo "Entrypoint: $entrypoint"
if [ "$entrypoint" != "[python -m llama_stack.core.server.server /app/run.yaml]" ]; then
echo "Entrypoint is not correct"
exit 1
fi
build-ubi9-container-distribution:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install dependencies
uses: ./.github/actions/setup-runner
- name: Pin distribution to UBI9 base
run: |
yq -i '
.image_type = "container" |
.image_name = "ubi9-test" |
.distribution_spec.container_image = "registry.access.redhat.com/ubi9:latest"
' llama_stack/distributions/ci-tests/build.yaml
- name: Build dev container (UBI9)
env:
USE_COPY_NOT_MOUNT: "true"
LLAMA_STACK_DIR: "."
run: |
uv run llama stack build --config llama_stack/distributions/ci-tests/build.yaml
- name: Inspect UBI9 image
run: |
IMAGE_ID=$(docker images --format "{{.Repository}}:{{.Tag}}" | head -n 1)
entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID)
echo "Entrypoint: $entrypoint"
if [ "$entrypoint" != "[python -m llama_stack.core.server.server /app/run.yaml]" ]; then
echo "Entrypoint is not correct"
exit 1
fi
echo "Checking /etc/os-release in $IMAGE_ID"
docker run --rm --entrypoint sh "$IMAGE_ID" -c \
'source /etc/os-release && echo "$ID"' \
| grep -qE '^(rhel|ubi)$' \
|| { echo "Base image is not UBI 9!"; exit 1; }

105
.github/workflows/providers-show.yml vendored Normal file
View file

@ -0,0 +1,105 @@
name: Test Llama Stack Show
run-name: Test llama stack Show
on:
push:
branches:
- main
paths:
- 'llama_stack/cli/stack/show.py'
- 'llama_stack/cli/stack/_show.py'
- 'llama_stack/core/build.*'
- 'llama_stack/core/*.sh'
- '.github/workflows/providers-show.yml'
- 'llama_stack/templates/**'
- 'pyproject.toml'
pull_request:
paths:
- 'llama_stack/cli/stack/show.py'
- 'llama_stack/cli/stack/_show.py'
- 'llama_stack/core/build.*'
- 'llama_stack/core/*.sh'
- '.github/workflows/providers-show.yml'
- 'llama_stack/templates/**'
- 'pyproject.toml'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
generate-matrix:
runs-on: ubuntu-latest
outputs:
distros: ${{ steps.set-matrix.outputs.distros }}
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Generate Distribution List
id: set-matrix
run: |
distros=$(ls llama_stack/distributions/*/*build.yaml | awk -F'/' '{print $(NF-1)}' | jq -R -s -c 'split("\n")[:-1]')
echo "distros=$distros" >> "$GITHUB_OUTPUT"
show:
needs: generate-matrix
runs-on: ubuntu-latest
strategy:
matrix:
distro: ${{ fromJson(needs.generate-matrix.outputs.distros) }}
image-type: [venv, container]
fail-fast: false # We want to run all jobs even if some fail
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install dependencies
uses: ./.github/actions/setup-runner
- name: Print dependencies
run: |
uv run llama stack show --distro ${{ matrix.template }}
- name: Install Distro using Llama Stack Show
run: |
# USE_COPY_NOT_MOUNT is set to true since mounting is not supported by docker buildx, we use COPY instead
# LLAMA_STACK_DIR is set to the current directory so we are building from the source
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack show --distro ${{ matrix.template }} |
- name: Print dependencies in the image
if: matrix.image-type == 'venv'
run: |
uv pip list
show-single-provider:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install dependencies
uses: ./.github/actions/setup-runner
- name: Show a single provider
run: |
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack show --env-name test --providers inference=remote::ollama
show-from-config:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install dependencies
uses: ./.github/actions/setup-runner
- name: Show from Config
env:
USE_COPY_NOT_MOUNT: "true"
LLAMA_STACK_DIR: "."
run: |
uv run llama stack show --config llama_stack/templates/ci-tests/build.yaml

View file

@ -1,471 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import argparse
import importlib.resources
import json
import os
import shutil
import sys
import textwrap
from functools import lru_cache
from importlib.abc import Traversable
from pathlib import Path
import yaml
from prompt_toolkit import prompt
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.validation import Validator
from termcolor import colored, cprint
from llama_stack.cli.stack.utils import ImageType
from llama_stack.cli.table import print_table
from llama_stack.core.build import (
SERVER_DEPENDENCIES,
build_image,
get_provider_dependencies,
)
from llama_stack.core.configure import parse_and_maybe_upgrade_config
from llama_stack.core.datatypes import (
BuildConfig,
BuildProvider,
DistributionSpec,
Provider,
StackRunConfig,
)
from llama_stack.core.distribution import get_provider_registry
from llama_stack.core.external import load_external_apis
from llama_stack.core.resolver import InvalidProviderError
from llama_stack.core.stack import replace_env_vars
from llama_stack.core.utils.config_dirs import DISTRIBS_BASE_DIR, EXTERNAL_PROVIDERS_DIR
from llama_stack.core.utils.dynamic import instantiate_class_type
from llama_stack.core.utils.exec import formulate_run_args, run_command
from llama_stack.core.utils.image_types import LlamaStackImageType
from llama_stack.providers.datatypes import Api
DISTRIBS_PATH = Path(__file__).parent.parent.parent / "distributions"
@lru_cache
def available_distros_specs() -> dict[str, BuildConfig]:
import yaml
distro_specs = {}
for p in DISTRIBS_PATH.rglob("*build.yaml"):
distro_name = p.parent.name
with open(p) as f:
build_config = BuildConfig(**yaml.safe_load(f))
distro_specs[distro_name] = build_config
return distro_specs
def run_stack_build_command(args: argparse.Namespace) -> None:
if args.list_distros:
return _run_distro_list_cmd()
if args.image_type == ImageType.VENV.value:
current_venv = os.environ.get("VIRTUAL_ENV")
image_name = args.image_name or current_venv
else:
image_name = args.image_name
if args.template:
cprint(
"The --template argument is deprecated. Please use --distro instead.",
color="red",
file=sys.stderr,
)
distro_name = args.template
else:
distro_name = args.distribution
if distro_name:
available_distros = available_distros_specs()
if distro_name not in available_distros:
cprint(
f"Could not find distribution {distro_name}. Please run `llama stack build --list-distros` to check out the available distributions",
color="red",
file=sys.stderr,
)
sys.exit(1)
build_config = available_distros[distro_name]
if args.image_type:
build_config.image_type = args.image_type
else:
cprint(
f"Please specify a image-type ({' | '.join(e.value for e in ImageType)}) for {distro_name}",
color="red",
file=sys.stderr,
)
sys.exit(1)
elif args.providers:
provider_list: dict[str, list[BuildProvider]] = dict()
for api_provider in args.providers.split(","):
if "=" not in api_provider:
cprint(
"Could not parse `--providers`. Please ensure the list is in the format api1=provider1,api2=provider2",
color="red",
file=sys.stderr,
)
sys.exit(1)
api, provider_type = api_provider.split("=")
providers_for_api = get_provider_registry().get(Api(api), None)
if providers_for_api is None:
cprint(
f"{api} is not a valid API.",
color="red",
file=sys.stderr,
)
sys.exit(1)
if provider_type in providers_for_api:
provider = BuildProvider(
provider_type=provider_type,
module=None,
)
provider_list.setdefault(api, []).append(provider)
else:
cprint(
f"{provider} is not a valid provider for the {api} API.",
color="red",
file=sys.stderr,
)
sys.exit(1)
distribution_spec = DistributionSpec(
providers=provider_list,
description=",".join(args.providers),
)
if not args.image_type:
cprint(
f"Please specify a image-type (container | venv) for {args.template}",
color="red",
file=sys.stderr,
)
sys.exit(1)
build_config = BuildConfig(image_type=args.image_type, distribution_spec=distribution_spec)
elif not args.config and not distro_name:
name = prompt(
"> Enter a name for your Llama Stack (e.g. my-local-stack): ",
validator=Validator.from_callable(
lambda x: len(x) > 0,
error_message="Name cannot be empty, please enter a name",
),
)
image_type = prompt(
"> Enter the image type you want your Llama Stack to be built as (use <TAB> to see options): ",
completer=WordCompleter([e.value for e in ImageType]),
complete_while_typing=True,
validator=Validator.from_callable(
lambda x: x in [e.value for e in ImageType],
error_message="Invalid image type. Use <TAB> to see options",
),
)
image_name = f"llamastack-{name}"
cprint(
textwrap.dedent(
"""
Llama Stack is composed of several APIs working together. Let's select
the provider types (implementations) you want to use for these APIs.
""",
),
color="green",
file=sys.stderr,
)
cprint("Tip: use <TAB> to see options for the providers.\n", color="green", file=sys.stderr)
providers: dict[str, list[BuildProvider]] = dict()
for api, providers_for_api in get_provider_registry().items():
available_providers = [x for x in providers_for_api.keys() if x not in ("remote", "remote::sample")]
if not available_providers:
continue
api_provider = prompt(
f"> Enter provider for API {api.value}: ",
completer=WordCompleter(available_providers),
complete_while_typing=True,
validator=Validator.from_callable(
lambda x: x in available_providers, # noqa: B023 - see https://github.com/astral-sh/ruff/issues/7847
error_message="Invalid provider, use <TAB> to see options",
),
)
string_providers = api_provider.split(" ")
for provider in string_providers:
providers.setdefault(api.value, []).append(BuildProvider(provider_type=provider))
description = prompt(
"\n > (Optional) Enter a short description for your Llama Stack: ",
default="",
)
distribution_spec = DistributionSpec(
providers=providers,
description=description,
)
build_config = BuildConfig(image_type=image_type, distribution_spec=distribution_spec)
else:
with open(args.config) as f:
try:
contents = yaml.safe_load(f)
contents = replace_env_vars(contents)
build_config = BuildConfig(**contents)
if args.image_type:
build_config.image_type = args.image_type
except Exception as e:
cprint(
f"Could not parse config file {args.config}: {e}",
color="red",
file=sys.stderr,
)
sys.exit(1)
if args.print_deps_only:
print(f"# Dependencies for {distro_name or args.config or image_name}")
normal_deps, special_deps, external_provider_dependencies = get_provider_dependencies(build_config)
normal_deps += SERVER_DEPENDENCIES
print(f"uv pip install {' '.join(normal_deps)}")
for special_dep in special_deps:
print(f"uv pip install {special_dep}")
for external_dep in external_provider_dependencies:
print(f"uv pip install {external_dep}")
return
try:
run_config = _run_stack_build_command_from_build_config(
build_config,
image_name=image_name,
config_path=args.config,
distro_name=distro_name,
)
except (Exception, RuntimeError) as exc:
import traceback
cprint(
f"Error building stack: {exc}",
color="red",
file=sys.stderr,
)
cprint("Stack trace:", color="red", file=sys.stderr)
traceback.print_exc()
sys.exit(1)
if run_config is None:
cprint(
"Run config path is empty",
color="red",
file=sys.stderr,
)
sys.exit(1)
if args.run:
config_dict = yaml.safe_load(run_config.read_text())
config = parse_and_maybe_upgrade_config(config_dict)
if config.external_providers_dir and not config.external_providers_dir.exists():
config.external_providers_dir.mkdir(exist_ok=True)
run_args = formulate_run_args(args.image_type, image_name or config.image_name)
run_args.extend([str(os.getenv("LLAMA_STACK_PORT", 8321)), "--config", str(run_config)])
run_command(run_args)
def _generate_run_config(
build_config: BuildConfig,
build_dir: Path,
image_name: str,
) -> Path:
"""
Generate a run.yaml template file for user to edit from a build.yaml file
"""
apis = list(build_config.distribution_spec.providers.keys())
run_config = StackRunConfig(
container_image=(image_name if build_config.image_type == LlamaStackImageType.CONTAINER.value else None),
image_name=image_name,
apis=apis,
providers={},
external_providers_dir=build_config.external_providers_dir
if build_config.external_providers_dir
else EXTERNAL_PROVIDERS_DIR,
)
# build providers dict
provider_registry = get_provider_registry(build_config)
for api in apis:
run_config.providers[api] = []
providers = build_config.distribution_spec.providers[api]
for provider in providers:
pid = provider.provider_type.split("::")[-1]
p = provider_registry[Api(api)][provider.provider_type]
if p.deprecation_error:
raise InvalidProviderError(p.deprecation_error)
try:
config_type = instantiate_class_type(provider_registry[Api(api)][provider.provider_type].config_class)
except (ModuleNotFoundError, ValueError) as exc:
# HACK ALERT:
# This code executes after building is done, the import cannot work since the
# package is either available in the venv or container - not available on the host.
# TODO: use a "is_external" flag in ProviderSpec to check if the provider is
# external
cprint(
f"Failed to import provider {provider.provider_type} for API {api} - assuming it's external, skipping: {exc}",
color="yellow",
file=sys.stderr,
)
# Set config_type to None to avoid UnboundLocalError
config_type = None
if config_type is not None and hasattr(config_type, "sample_run_config"):
config = config_type.sample_run_config(__distro_dir__=f"~/.llama/distributions/{image_name}")
else:
config = {}
p_spec = Provider(
provider_id=pid,
provider_type=provider.provider_type,
config=config,
module=provider.module,
)
run_config.providers[api].append(p_spec)
run_config_file = build_dir / f"{image_name}-run.yaml"
with open(run_config_file, "w") as f:
to_write = json.loads(run_config.model_dump_json())
f.write(yaml.dump(to_write, sort_keys=False))
# Only print this message for non-container builds since it will be displayed before the
# container is built
# For non-container builds, the run.yaml is generated at the very end of the build process so it
# makes sense to display this message
if build_config.image_type != LlamaStackImageType.CONTAINER.value:
cprint(f"You can now run your stack with `llama stack run {run_config_file}`", color="green", file=sys.stderr)
return run_config_file
def _run_stack_build_command_from_build_config(
build_config: BuildConfig,
image_name: str | None = None,
distro_name: str | None = None,
config_path: str | None = None,
) -> Path | Traversable:
image_name = image_name or build_config.image_name
if build_config.image_type == LlamaStackImageType.CONTAINER.value:
if distro_name:
image_name = f"distribution-{distro_name}"
else:
if not image_name:
raise ValueError("Please specify an image name when building a container image without a template")
else:
if not image_name and os.environ.get("UV_SYSTEM_PYTHON"):
image_name = "__system__"
if not image_name:
raise ValueError("Please specify an image name when building a venv image")
# At this point, image_name should be guaranteed to be a string
if image_name is None:
raise ValueError("image_name should not be None after validation")
if distro_name:
build_dir = DISTRIBS_BASE_DIR / distro_name
build_file_path = build_dir / f"{distro_name}-build.yaml"
else:
if image_name is None:
raise ValueError("image_name cannot be None")
build_dir = DISTRIBS_BASE_DIR / image_name
build_file_path = build_dir / f"{image_name}-build.yaml"
os.makedirs(build_dir, exist_ok=True)
run_config_file = None
# Generate the run.yaml so it can be included in the container image with the proper entrypoint
# Only do this if we're building a container image and we're not using a template
if build_config.image_type == LlamaStackImageType.CONTAINER.value and not distro_name and config_path:
cprint("Generating run.yaml file", color="yellow", file=sys.stderr)
run_config_file = _generate_run_config(build_config, build_dir, image_name)
with open(build_file_path, "w") as f:
to_write = json.loads(build_config.model_dump_json(exclude_none=True))
f.write(yaml.dump(to_write, sort_keys=False))
# We first install the external APIs so that the build process can use them and discover the
# providers dependencies
if build_config.external_apis_dir:
cprint("Installing external APIs", color="yellow", file=sys.stderr)
external_apis = load_external_apis(build_config)
if external_apis:
# install the external APIs
packages = []
for _, api_spec in external_apis.items():
if api_spec.pip_packages:
packages.extend(api_spec.pip_packages)
cprint(
f"Installing {api_spec.name} with pip packages {api_spec.pip_packages}",
color="yellow",
file=sys.stderr,
)
return_code = run_command(["uv", "pip", "install", *packages])
if return_code != 0:
packages_str = ", ".join(packages)
raise RuntimeError(
f"Failed to install external APIs packages: {packages_str} (return code: {return_code})"
)
return_code = build_image(
build_config,
image_name,
distro_or_config=distro_name or config_path or str(build_file_path),
run_config=run_config_file.as_posix() if run_config_file else None,
)
if return_code != 0:
raise RuntimeError(f"Failed to build image {image_name}")
if distro_name:
# copy run.yaml from distribution to build_dir instead of generating it again
distro_path = importlib.resources.files("llama_stack") / f"distributions/{distro_name}/run.yaml"
run_config_file = build_dir / f"{distro_name}-run.yaml"
with importlib.resources.as_file(distro_path) as path:
shutil.copy(path, run_config_file)
cprint("Build Successful!", color="green", file=sys.stderr)
cprint(f"You can find the newly-built distribution here: {run_config_file}", color="blue", file=sys.stderr)
cprint(
"You can run the new Llama Stack distro via: "
+ colored(f"llama stack run {run_config_file} --image-type {build_config.image_type}", "blue"),
color="green",
file=sys.stderr,
)
return distro_path
else:
return _generate_run_config(build_config, build_dir, image_name)
def _run_distro_list_cmd() -> None:
headers = [
"Distribution Name",
# "Providers",
"Description",
]
rows = []
for distro_name, spec in available_distros_specs().items():
rows.append(
[
distro_name,
# json.dumps(spec.distribution_spec.providers, indent=2),
spec.distribution_spec.description,
]
)
print_table(
rows,
headers,
separate_rows=True,
)

View file

@ -0,0 +1,200 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import argparse
import os
import sys
import textwrap
from pathlib import Path
import yaml
from prompt_toolkit import prompt
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.validation import Validator
from termcolor import cprint
from llama_stack.cli.stack.utils import ImageType, available_templates_specs
from llama_stack.core.build import get_provider_dependencies
from llama_stack.core.datatypes import (
BuildConfig,
BuildProvider,
DistributionSpec,
)
from llama_stack.core.distribution import get_provider_registry
from llama_stack.core.stack import replace_env_vars
from llama_stack.log import get_logger
from llama_stack.providers.datatypes import Api
TEMPLATES_PATH = Path(__file__).parent.parent.parent / "templates"
logger = get_logger(name=__name__, category="cli")
# These are the dependencies needed by the distribution server.
# `llama-stack` is automatically installed by the installation script.
SERVER_DEPENDENCIES = [
"aiosqlite",
"fastapi",
"fire",
"httpx",
"uvicorn",
"opentelemetry-sdk",
"opentelemetry-exporter-otlp-proto-http",
]
def run_stack_show_command(args: argparse.Namespace) -> None:
current_venv = os.environ.get("VIRTUAL_ENV")
env_name = args.env_name or current_venv
if args.distro:
available_templates = available_templates_specs()
if args.distro not in available_templates:
cprint(
f"Could not find template {args.distro}. Please run `llama stack show --list-distros` to check out the available templates",
color="red",
file=sys.stderr,
)
sys.exit(1)
build_config = available_templates[args.distro]
# always venv, conda is gone and container is separate.
build_config.image_type = ImageType.VENV.value
elif args.providers:
provider_list: dict[str, list[BuildProvider]] = dict()
for api_provider in args.providers.split(","):
if "=" not in api_provider:
cprint(
"Could not parse `--providers`. Please ensure the list is in the format api1=provider1,api2=provider2",
color="red",
file=sys.stderr,
)
sys.exit(1)
api, provider_type = api_provider.split("=")
providers_for_api = get_provider_registry().get(Api(api), None)
if providers_for_api is None:
cprint(
f"{api} is not a valid API.",
color="red",
file=sys.stderr,
)
sys.exit(1)
if provider_type in providers_for_api:
provider = BuildProvider(
provider_type=provider_type,
module=None,
)
provider_list.setdefault(api, []).append(provider)
else:
cprint(
f"{provider} is not a valid provider for the {api} API.",
color="red",
file=sys.stderr,
)
sys.exit(1)
distribution_spec = DistributionSpec(
providers=provider_list,
description=",".join(args.providers),
)
build_config = BuildConfig(image_type=ImageType.VENV.value, distribution_spec=distribution_spec)
elif not args.config and not args.distro:
name = prompt(
"> Enter a name for your Llama Stack (e.g. my-local-stack): ",
validator=Validator.from_callable(
lambda x: len(x) > 0,
error_message="Name cannot be empty, please enter a name",
),
)
image_type = prompt(
"> Enter the image type you want your Llama Stack to be built as (use <TAB> to see options): ",
completer=WordCompleter([e.value for e in ImageType]),
complete_while_typing=True,
validator=Validator.from_callable(
lambda x: x in [e.value for e in ImageType],
error_message="Invalid image type. Use <TAB> to see options",
),
)
env_name = f"llamastack-{name}"
cprint(
textwrap.dedent(
"""
Llama Stack is composed of several APIs working together. Let's select
the provider types (implementations) you want to use for these APIs.
""",
),
color="green",
file=sys.stderr,
)
cprint("Tip: use <TAB> to see options for the providers.\n", color="green", file=sys.stderr)
providers: dict[str, list[BuildProvider]] = dict()
for api, providers_for_api in get_provider_registry().items():
available_providers = [x for x in providers_for_api.keys() if x not in ("remote", "remote::sample")]
if not available_providers:
continue
api_provider = prompt(
f"> Enter provider for API {api.value}: ",
completer=WordCompleter(available_providers),
complete_while_typing=True,
validator=Validator.from_callable(
lambda x: x in available_providers, # noqa: B023 - see https://github.com/astral-sh/ruff/issues/7847
error_message="Invalid provider, use <TAB> to see options",
),
)
string_providers = api_provider.split(" ")
for provider in string_providers:
providers.setdefault(api.value, []).append(BuildProvider(provider_type=provider))
description = prompt(
"\n > (Optional) Enter a short description for your Llama Stack: ",
default="",
)
distribution_spec = DistributionSpec(
providers=providers,
description=description,
)
build_config = BuildConfig(image_type=image_type, distribution_spec=distribution_spec)
else:
with open(args.config) as f:
try:
contents = yaml.safe_load(f)
contents = replace_env_vars(contents)
build_config = BuildConfig(**contents)
build_config.image_type = "venv"
except Exception as e:
cprint(
f"Could not parse config file {args.config}: {e}",
color="red",
file=sys.stderr,
)
sys.exit(1)
print(f"# Dependencies for {args.distro or args.config or env_name}")
normal_deps, special_deps, external_provider_dependencies = get_provider_dependencies(build_config)
normal_deps += SERVER_DEPENDENCIES
# Quote deps with commas
quoted_normal_deps = [quote_if_needed(dep) for dep in normal_deps]
print(f"uv pip install {' '.join(quoted_normal_deps)}")
for special_dep in special_deps:
print(f"uv pip install {quote_if_needed(special_dep)}")
for external_dep in external_provider_dependencies:
print(f"uv pip install {quote_if_needed(external_dep)}")
def quote_if_needed(dep):
# Add quotes if the dependency contains a comma (likely version specifier)
return f"'{dep}'" if "," in dep else dep

View file

@ -1,100 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import argparse
import textwrap
from llama_stack.cli.stack.utils import ImageType
from llama_stack.cli.subcommand import Subcommand
class StackBuild(Subcommand):
def __init__(self, subparsers: argparse._SubParsersAction):
super().__init__()
self.parser = subparsers.add_parser(
"build",
prog="llama stack build",
description="Build a Llama stack container",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
self._add_arguments()
self.parser.set_defaults(func=self._run_stack_build_command)
def _add_arguments(self):
self.parser.add_argument(
"--config",
type=str,
default=None,
help="Path to a config file to use for the build. You can find example configs in llama_stack.cores/**/build.yaml. If this argument is not provided, you will be prompted to enter information interactively",
)
self.parser.add_argument(
"--template",
type=str,
default=None,
help="""(deprecated) Name of the example template config to use for build. You may use `llama stack build --list-distros` to check out the available distributions""",
)
self.parser.add_argument(
"--distro",
"--distribution",
dest="distribution",
type=str,
default=None,
help="""Name of the distribution to use for build. You may use `llama stack build --list-distros` to check out the available distributions""",
)
self.parser.add_argument(
"--list-distros",
"--list-distributions",
action="store_true",
dest="list_distros",
default=False,
help="Show the available distributions for building a Llama Stack distribution",
)
self.parser.add_argument(
"--image-type",
type=str,
help="Image Type to use for the build. If not specified, will use the image type from the template config.",
choices=[e.value for e in ImageType],
default=None, # no default so we can detect if a user specified --image-type and override image_type in the config
)
self.parser.add_argument(
"--image-name",
type=str,
help=textwrap.dedent(
f"""[for image-type={"|".join(e.value for e in ImageType)}] Name of the virtual environment to use for
the build. If not specified, currently active environment will be used if found.
"""
),
default=None,
)
self.parser.add_argument(
"--print-deps-only",
default=False,
action="store_true",
help="Print the dependencies for the stack only, without building the stack",
)
self.parser.add_argument(
"--run",
action="store_true",
default=False,
help="Run the stack after building using the same image type, name, and other applicable arguments",
)
self.parser.add_argument(
"--providers",
type=str,
default=None,
help="Build a config for a list of providers and only those providers. This list is formatted like: api1=provider1,api2=provider2. Where there can be multiple providers per API.",
)
def _run_stack_build_command(self, args: argparse.Namespace) -> None:
# always keep implementation completely silo-ed away from CLI so CLI
# can be fast to load and reduces dependencies
from ._build import run_stack_build_command
return run_stack_build_command(args)

View file

@ -0,0 +1,75 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import argparse
import textwrap
from llama_stack.cli.stack.utils import ImageType
from llama_stack.cli.subcommand import Subcommand
class StackShow(Subcommand):
def __init__(self, subparsers: argparse._SubParsersAction):
super().__init__()
self.parser = subparsers.add_parser(
"show",
prog="llama stack show",
description="show the dependencies for a llama stack distribution",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
self._add_arguments()
self.parser.set_defaults(func=self._run_stack_show_command)
def _add_arguments(self):
self.parser.add_argument(
"--config",
type=str,
default=None,
help="Path to a config file to use for the build. You can find example configs in llama_stack/distributions/**/build.yaml. If this argument is not provided, you will be prompted to enter information interactively",
)
self.parser.add_argument(
"--distro",
type=str,
default=None,
help="Name of the distro config to use for show. You may use `llama stack show --list-distros` to check out the available distros",
)
self.parser.add_argument(
"--list-distros",
action="store_true",
default=False,
help="Show the available templates for building a Llama Stack distribution",
)
self.parser.add_argument(
"--env-name",
type=str,
help=textwrap.dedent(
f"""[for image-type={"|".join(e.value for e in ImageType)}] Name of the conda or virtual environment to use for
the build. If not specified, currently active environment will be used if found.
"""
),
default=None,
)
self.parser.add_argument(
"--print-deps-only",
default=False,
action="store_true",
help="Print the dependencies for the stack only, without building the stack",
)
self.parser.add_argument(
"--providers",
type=str,
default=None,
help="sync dependencies for a list of providers and only those providers. This list is formatted like: api1=provider1,api2=provider2. Where there can be multiple providers per API.",
)
def _run_stack_show_command(self, args: argparse.Namespace) -> None:
# always keep implementation completely silo-ed away from CLI so CLI
# can be fast to load and reduces dependencies
from ._show import run_stack_show_command
return run_stack_show_command(args)

View file

@ -11,11 +11,11 @@ from llama_stack.cli.stack.list_stacks import StackListBuilds
from llama_stack.cli.stack.utils import print_subcommand_description from llama_stack.cli.stack.utils import print_subcommand_description
from llama_stack.cli.subcommand import Subcommand from llama_stack.cli.subcommand import Subcommand
from .build import StackBuild
from .list_apis import StackListApis from .list_apis import StackListApis
from .list_providers import StackListProviders from .list_providers import StackListProviders
from .remove import StackRemove from .remove import StackRemove
from .run import StackRun from .run import StackRun
from .show import StackShow
class StackParser(Subcommand): class StackParser(Subcommand):
@ -39,7 +39,7 @@ class StackParser(Subcommand):
subparsers = self.parser.add_subparsers(title="stack_subcommands") subparsers = self.parser.add_subparsers(title="stack_subcommands")
# Add sub-commands # Add sub-commands
StackBuild.create(subparsers) StackShow.create(subparsers)
StackListApis.create(subparsers) StackListApis.create(subparsers)
StackListProviders.create(subparsers) StackListProviders.create(subparsers)
StackRun.create(subparsers) StackRun.create(subparsers)

View file

@ -4,7 +4,28 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
import json
import sys
from enum import Enum from enum import Enum
from functools import lru_cache
from pathlib import Path
import yaml
from termcolor import cprint
from llama_stack.core.datatypes import (
BuildConfig,
Provider,
StackRunConfig,
)
from llama_stack.core.distribution import get_provider_registry
from llama_stack.core.resolver import InvalidProviderError
from llama_stack.core.utils.config_dirs import EXTERNAL_PROVIDERS_DIR
from llama_stack.core.utils.dynamic import instantiate_class_type
from llama_stack.core.utils.image_types import LlamaStackImageType
from llama_stack.providers.datatypes import Api
TEMPLATES_PATH = Path(__file__).parent.parent.parent / "distributions"
class ImageType(Enum): class ImageType(Enum):
@ -19,3 +40,91 @@ def print_subcommand_description(parser, subparsers):
description = subcommand.description description = subcommand.description
description_text += f" {name:<21} {description}\n" description_text += f" {name:<21} {description}\n"
parser.epilog = description_text parser.epilog = description_text
def generate_run_config(
build_config: BuildConfig,
build_dir: Path,
image_name: str,
) -> Path:
"""
Generate a run.yaml template file for user to edit from a build.yaml file
"""
apis = list(build_config.distribution_spec.providers.keys())
run_config = StackRunConfig(
container_image=(image_name if build_config.image_type == LlamaStackImageType.CONTAINER.value else None),
image_name=image_name,
apis=apis,
providers={},
external_providers_dir=build_config.external_providers_dir
if build_config.external_providers_dir
else EXTERNAL_PROVIDERS_DIR,
)
# build providers dict
provider_registry = get_provider_registry(build_config)
for api in apis:
run_config.providers[api] = []
providers = build_config.distribution_spec.providers[api]
for provider in providers:
pid = provider.provider_type.split("::")[-1]
p = provider_registry[Api(api)][provider.provider_type]
if p.deprecation_error:
raise InvalidProviderError(p.deprecation_error)
try:
config_type = instantiate_class_type(provider_registry[Api(api)][provider.provider_type].config_class)
except (ModuleNotFoundError, ValueError) as exc:
# HACK ALERT:
# This code executes after building is done, the import cannot work since the
# package is either available in the venv or container - not available on the host.
# TODO: use a "is_external" flag in ProviderSpec to check if the provider is
# external
cprint(
f"Failed to import provider {provider.provider_type} for API {api} - assuming it's external, skipping: {exc}",
color="yellow",
file=sys.stderr,
)
# Set config_type to None to avoid UnboundLocalError
config_type = None
if config_type is not None and hasattr(config_type, "sample_run_config"):
config = config_type.sample_run_config(__distro_dir__=f"~/.llama/distributions/{image_name}")
else:
config = {}
p_spec = Provider(
provider_id=pid,
provider_type=provider.provider_type,
config=config,
module=provider.module,
)
run_config.providers[api].append(p_spec)
run_config_file = build_dir / f"{image_name}-run.yaml"
with open(run_config_file, "w") as f:
to_write = json.loads(run_config.model_dump_json())
f.write(yaml.dump(to_write, sort_keys=False))
# Only print this message for non-container builds since it will be displayed before the
# container is built
# For non-container builds, the run.yaml is generated at the very end of the build process so it
# makes sense to display this message
if build_config.image_type != LlamaStackImageType.CONTAINER.value:
cprint(f"You can now run your stack with `llama stack run {run_config_file}`", color="green", file=sys.stderr)
return run_config_file
@lru_cache
def available_templates_specs() -> dict[str, BuildConfig]:
import yaml
template_specs = {}
for p in TEMPLATES_PATH.rglob("*build.yaml"):
template_name = p.parent.name
with open(p) as f:
build_config = BuildConfig(**yaml.safe_load(f))
template_specs[template_name] = build_config
return template_specs

View file

@ -7,6 +7,8 @@
import importlib.resources import importlib.resources
import logging import logging
import sys import sys
import tomllib
from pathlib import Path
from pydantic import BaseModel from pydantic import BaseModel
from termcolor import cprint from termcolor import cprint
@ -72,8 +74,13 @@ def get_provider_dependencies(
external_provider_deps.append(provider_spec.module) external_provider_deps.append(provider_spec.module)
else: else:
external_provider_deps.extend(provider_spec.module) external_provider_deps.extend(provider_spec.module)
if hasattr(provider_spec, "pip_packages"):
deps.extend(provider_spec.pip_packages) pyproject = Path(provider_spec.module.replace(".", "/")) / "pyproject.toml"
with open(pyproject, "rb") as f:
data = tomllib.load(f)
dependencies = data.get("project", {}).get("dependencies", [])
deps.extend(dependencies)
if hasattr(provider_spec, "container_image") and provider_spec.container_image: if hasattr(provider_spec, "container_image") and provider_spec.container_image:
raise ValueError("A stack's dependencies cannot have a container image") raise ValueError("A stack's dependencies cannot have a container image")

View file

@ -132,7 +132,6 @@ class ProviderSpec(BaseModel):
) )
is_external: bool = Field(default=False, description="Notes whether this provider is an external provider.") is_external: bool = Field(default=False, description="Notes whether this provider is an external provider.")
# used internally by the resolver; this is a hack for now # used internally by the resolver; this is a hack for now
deps__: list[str] = Field(default_factory=list) deps__: list[str] = Field(default_factory=list)
@ -182,7 +181,7 @@ A description of the provider. This is used to display in the documentation.
class InlineProviderSpec(ProviderSpec): class InlineProviderSpec(ProviderSpec):
pip_packages: list[str] = Field( pip_packages: list[str] = Field(
default_factory=list, default_factory=list,
description="The pip dependencies needed for this implementation", description="The pip dependencies needed for this implementation (deprecated - use package_name instead)",
) )
container_image: str | None = Field( container_image: str | None = Field(
default=None, default=None,

View file

@ -0,0 +1,24 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-agents-meta-reference"
version = "0.1.0"
description = "Meta's reference implementation of an agent system that can use tools, access vector databases, and perform complex reasoning tasks"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"matplotlib",
"pillow",
"pandas",
"scikit-learn",
"mcp>=1.8.1",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-datasetio-localfs"
version = "0.1.0"
description = "Local filesystem-based dataset I/O provider for reading and writing datasets to local storage"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"pandas",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,24 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-eval-meta-reference"
version = "0.1.0"
description = "Meta's reference implementation of evaluation tasks with support for multiple languages and evaluation metrics"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"tree_sitter",
"pythainlp",
"langdetect",
"emoji",
"nltk",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,18 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-files-localfs"
version = "0.1.0"
description = "Local filesystem-based file storage provider for managing files and documents locally"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = []
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,29 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-meta-reference"
version = "0.1.0"
description = "Meta's reference implementation of inference with support for various model formats and optimization techniques"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"accelerate",
"fairscale",
"torch",
"torchvision",
"transformers",
"zmq",
"lm-format-enforcer",
"sentence-transformers",
"torchao==0.8.0",
"fbgemm-gpu-genai==1.1.2",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,22 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-sentence-transformers"
version = "0.1.0"
description = "Sentence Transformers inference provider for text embeddings and similarity search"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"torch",
"torchvision",
"sentence-transformers",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,24 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-post-training-huggingface"
version = "0.1.0"
description = "HuggingFace-based post-training provider for fine-tuning models using the HuggingFace ecosystem"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"torch",
"trl",
"transformers",
"peft",
"datasets",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,23 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-post-training-torchtune"
version = "0.1.0"
description = "TorchTune-based post-training provider for fine-tuning and optimizing models using Meta's TorchTune framework"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"torch",
"torchtune==0.5.0",
"torchao==0.8.0",
"numpy",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-safety-code-scanner"
version = "0.1.0"
description = "Code Scanner safety provider for detecting security vulnerabilities and unsafe code patterns"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"codeshield",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,18 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-safety-llama-guard"
version = "0.1.0"
description = "Llama Guard safety provider for content moderation and safety filtering using Meta's Llama Guard model"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = []
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-safety-prompt-guard"
version = "0.1.0"
description = "Prompt Guard safety provider for detecting and filtering unsafe prompts and content"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"transformers[accelerate]",
"torch",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-scoring-basic"
version = "0.1.0"
description = "Basic scoring provider for simple evaluation metrics and scoring functions"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"requests",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-scoring-braintrust"
version = "0.1.0"
description = "Braintrust scoring provider for evaluation and scoring using the Braintrust platform"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"autoevals",
"openai",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,18 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-scoring-llm-as-judge"
version = "0.1.0"
description = "LLM-as-judge scoring provider that uses language models to evaluate and score responses"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = []
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-telemetry-meta-reference"
version = "0.1.0"
description = "Meta's reference implementation of telemetry and observability using OpenTelemetry"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"opentelemetry-sdk",
"opentelemetry-exporter-otlp-proto-http",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,28 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-tool-runtime-rag"
version = "0.1.0"
description = "RAG (Retrieval-Augmented Generation) tool runtime for document ingestion, chunking, and semantic search"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"chardet",
"pypdf",
"tqdm",
"numpy",
"scikit-learn",
"scipy",
"nltk",
"sentencepiece",
"transformers",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-vector-io-chroma"
version = "0.1.0"
description = "Chroma inline vector database provider for Llama Stack"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"chromadb",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-vector-io-faiss"
version = "0.1.0"
description = "Faiss inline vector database provider for Llama Stack"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"faiss-cpu",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-vector-io-milvus"
version = "0.1.0"
description = "Milvus inline vector database provider for Llama Stack"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"pymilvus>=2.4.10",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-vector-io-qdrant"
version = "0.1.0"
description = "Qdrant inline vector database provider for Llama Stack"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"qdrant-client",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-vector-io-sqlite-vec"
version = "0.1.0"
description = "SQLite-Vec inline vector database provider for Llama Stack"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"sqlite-vec",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -19,7 +19,6 @@ def available_providers() -> list[ProviderSpec]:
InlineProviderSpec( InlineProviderSpec(
api=Api.datasetio, api=Api.datasetio,
provider_type="inline::localfs", provider_type="inline::localfs",
pip_packages=["pandas"],
module="llama_stack.providers.inline.datasetio.localfs", module="llama_stack.providers.inline.datasetio.localfs",
config_class="llama_stack.providers.inline.datasetio.localfs.LocalFSDatasetIOConfig", config_class="llama_stack.providers.inline.datasetio.localfs.LocalFSDatasetIOConfig",
api_dependencies=[], api_dependencies=[],

View file

@ -13,7 +13,6 @@ def available_providers() -> list[ProviderSpec]:
InlineProviderSpec( InlineProviderSpec(
api=Api.eval, api=Api.eval,
provider_type="inline::meta-reference", provider_type="inline::meta-reference",
pip_packages=["tree_sitter", "pythainlp", "langdetect", "emoji", "nltk"],
module="llama_stack.providers.inline.eval.meta_reference", module="llama_stack.providers.inline.eval.meta_reference",
config_class="llama_stack.providers.inline.eval.meta_reference.MetaReferenceEvalConfig", config_class="llama_stack.providers.inline.eval.meta_reference.MetaReferenceEvalConfig",
api_dependencies=[ api_dependencies=[

View file

@ -32,7 +32,6 @@ def available_providers() -> list[ProviderSpec]:
InlineProviderSpec( InlineProviderSpec(
api=Api.inference, api=Api.inference,
provider_type="inline::meta-reference", provider_type="inline::meta-reference",
pip_packages=META_REFERENCE_DEPS,
module="llama_stack.providers.inline.inference.meta_reference", module="llama_stack.providers.inline.inference.meta_reference",
config_class="llama_stack.providers.inline.inference.meta_reference.MetaReferenceInferenceConfig", config_class="llama_stack.providers.inline.inference.meta_reference.MetaReferenceInferenceConfig",
description="Meta's reference implementation of inference with support for various model formats and optimization techniques.", description="Meta's reference implementation of inference with support for various model formats and optimization techniques.",
@ -40,10 +39,6 @@ def available_providers() -> list[ProviderSpec]:
InlineProviderSpec( InlineProviderSpec(
api=Api.inference, api=Api.inference,
provider_type="inline::sentence-transformers", provider_type="inline::sentence-transformers",
pip_packages=[
"torch torchvision --index-url https://download.pytorch.org/whl/cpu",
"sentence-transformers --no-deps",
],
module="llama_stack.providers.inline.inference.sentence_transformers", module="llama_stack.providers.inline.inference.sentence_transformers",
config_class="llama_stack.providers.inline.inference.sentence_transformers.config.SentenceTransformersInferenceConfig", config_class="llama_stack.providers.inline.inference.sentence_transformers.config.SentenceTransformersInferenceConfig",
description="Sentence Transformers inference provider for text embeddings and similarity search.", description="Sentence Transformers inference provider for text embeddings and similarity search.",
@ -52,9 +47,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="cerebras", adapter_type="cerebras",
pip_packages=[
"cerebras_cloud_sdk",
],
module="llama_stack.providers.remote.inference.cerebras", module="llama_stack.providers.remote.inference.cerebras",
config_class="llama_stack.providers.remote.inference.cerebras.CerebrasImplConfig", config_class="llama_stack.providers.remote.inference.cerebras.CerebrasImplConfig",
description="Cerebras inference provider for running models on Cerebras Cloud platform.", description="Cerebras inference provider for running models on Cerebras Cloud platform.",
@ -64,7 +56,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="ollama", adapter_type="ollama",
pip_packages=["ollama", "aiohttp", "h11>=0.16.0"],
config_class="llama_stack.providers.remote.inference.ollama.OllamaImplConfig", config_class="llama_stack.providers.remote.inference.ollama.OllamaImplConfig",
module="llama_stack.providers.remote.inference.ollama", module="llama_stack.providers.remote.inference.ollama",
description="Ollama inference provider for running local models through the Ollama runtime.", description="Ollama inference provider for running local models through the Ollama runtime.",
@ -74,7 +65,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="vllm", adapter_type="vllm",
pip_packages=["openai"],
module="llama_stack.providers.remote.inference.vllm", module="llama_stack.providers.remote.inference.vllm",
config_class="llama_stack.providers.remote.inference.vllm.VLLMInferenceAdapterConfig", config_class="llama_stack.providers.remote.inference.vllm.VLLMInferenceAdapterConfig",
description="Remote vLLM inference provider for connecting to vLLM servers.", description="Remote vLLM inference provider for connecting to vLLM servers.",
@ -84,7 +74,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="tgi", adapter_type="tgi",
pip_packages=["huggingface_hub", "aiohttp"],
module="llama_stack.providers.remote.inference.tgi", module="llama_stack.providers.remote.inference.tgi",
config_class="llama_stack.providers.remote.inference.tgi.TGIImplConfig", config_class="llama_stack.providers.remote.inference.tgi.TGIImplConfig",
description="Text Generation Inference (TGI) provider for HuggingFace model serving.", description="Text Generation Inference (TGI) provider for HuggingFace model serving.",
@ -94,7 +83,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="hf::serverless", adapter_type="hf::serverless",
pip_packages=["huggingface_hub", "aiohttp"],
module="llama_stack.providers.remote.inference.tgi", module="llama_stack.providers.remote.inference.tgi",
config_class="llama_stack.providers.remote.inference.tgi.InferenceAPIImplConfig", config_class="llama_stack.providers.remote.inference.tgi.InferenceAPIImplConfig",
description="HuggingFace Inference API serverless provider for on-demand model inference.", description="HuggingFace Inference API serverless provider for on-demand model inference.",
@ -104,7 +92,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="hf::endpoint", adapter_type="hf::endpoint",
pip_packages=["huggingface_hub", "aiohttp"],
module="llama_stack.providers.remote.inference.tgi", module="llama_stack.providers.remote.inference.tgi",
config_class="llama_stack.providers.remote.inference.tgi.InferenceEndpointImplConfig", config_class="llama_stack.providers.remote.inference.tgi.InferenceEndpointImplConfig",
description="HuggingFace Inference Endpoints provider for dedicated model serving.", description="HuggingFace Inference Endpoints provider for dedicated model serving.",
@ -114,9 +101,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="fireworks", adapter_type="fireworks",
pip_packages=[
"fireworks-ai",
],
module="llama_stack.providers.remote.inference.fireworks", module="llama_stack.providers.remote.inference.fireworks",
config_class="llama_stack.providers.remote.inference.fireworks.FireworksImplConfig", config_class="llama_stack.providers.remote.inference.fireworks.FireworksImplConfig",
provider_data_validator="llama_stack.providers.remote.inference.fireworks.FireworksProviderDataValidator", provider_data_validator="llama_stack.providers.remote.inference.fireworks.FireworksProviderDataValidator",
@ -127,9 +111,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="together", adapter_type="together",
pip_packages=[
"together",
],
module="llama_stack.providers.remote.inference.together", module="llama_stack.providers.remote.inference.together",
config_class="llama_stack.providers.remote.inference.together.TogetherImplConfig", config_class="llama_stack.providers.remote.inference.together.TogetherImplConfig",
provider_data_validator="llama_stack.providers.remote.inference.together.TogetherProviderDataValidator", provider_data_validator="llama_stack.providers.remote.inference.together.TogetherProviderDataValidator",
@ -140,7 +121,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="bedrock", adapter_type="bedrock",
pip_packages=["boto3"],
module="llama_stack.providers.remote.inference.bedrock", module="llama_stack.providers.remote.inference.bedrock",
config_class="llama_stack.providers.remote.inference.bedrock.BedrockConfig", config_class="llama_stack.providers.remote.inference.bedrock.BedrockConfig",
description="AWS Bedrock inference provider for accessing various AI models through AWS's managed service.", description="AWS Bedrock inference provider for accessing various AI models through AWS's managed service.",
@ -150,9 +130,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="databricks", adapter_type="databricks",
pip_packages=[
"openai",
],
module="llama_stack.providers.remote.inference.databricks", module="llama_stack.providers.remote.inference.databricks",
config_class="llama_stack.providers.remote.inference.databricks.DatabricksImplConfig", config_class="llama_stack.providers.remote.inference.databricks.DatabricksImplConfig",
description="Databricks inference provider for running models on Databricks' unified analytics platform.", description="Databricks inference provider for running models on Databricks' unified analytics platform.",
@ -162,9 +139,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="nvidia", adapter_type="nvidia",
pip_packages=[
"openai",
],
module="llama_stack.providers.remote.inference.nvidia", module="llama_stack.providers.remote.inference.nvidia",
config_class="llama_stack.providers.remote.inference.nvidia.NVIDIAConfig", config_class="llama_stack.providers.remote.inference.nvidia.NVIDIAConfig",
description="NVIDIA inference provider for accessing NVIDIA NIM models and AI services.", description="NVIDIA inference provider for accessing NVIDIA NIM models and AI services.",
@ -174,7 +148,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="runpod", adapter_type="runpod",
pip_packages=["openai"],
module="llama_stack.providers.remote.inference.runpod", module="llama_stack.providers.remote.inference.runpod",
config_class="llama_stack.providers.remote.inference.runpod.RunpodImplConfig", config_class="llama_stack.providers.remote.inference.runpod.RunpodImplConfig",
description="RunPod inference provider for running models on RunPod's cloud GPU platform.", description="RunPod inference provider for running models on RunPod's cloud GPU platform.",
@ -184,7 +157,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="openai", adapter_type="openai",
pip_packages=["litellm"],
module="llama_stack.providers.remote.inference.openai", module="llama_stack.providers.remote.inference.openai",
config_class="llama_stack.providers.remote.inference.openai.OpenAIConfig", config_class="llama_stack.providers.remote.inference.openai.OpenAIConfig",
provider_data_validator="llama_stack.providers.remote.inference.openai.config.OpenAIProviderDataValidator", provider_data_validator="llama_stack.providers.remote.inference.openai.config.OpenAIProviderDataValidator",
@ -195,7 +167,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="anthropic", adapter_type="anthropic",
pip_packages=["litellm"],
module="llama_stack.providers.remote.inference.anthropic", module="llama_stack.providers.remote.inference.anthropic",
config_class="llama_stack.providers.remote.inference.anthropic.AnthropicConfig", config_class="llama_stack.providers.remote.inference.anthropic.AnthropicConfig",
provider_data_validator="llama_stack.providers.remote.inference.anthropic.config.AnthropicProviderDataValidator", provider_data_validator="llama_stack.providers.remote.inference.anthropic.config.AnthropicProviderDataValidator",
@ -206,7 +177,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="gemini", adapter_type="gemini",
pip_packages=["litellm"],
module="llama_stack.providers.remote.inference.gemini", module="llama_stack.providers.remote.inference.gemini",
config_class="llama_stack.providers.remote.inference.gemini.GeminiConfig", config_class="llama_stack.providers.remote.inference.gemini.GeminiConfig",
provider_data_validator="llama_stack.providers.remote.inference.gemini.config.GeminiProviderDataValidator", provider_data_validator="llama_stack.providers.remote.inference.gemini.config.GeminiProviderDataValidator",
@ -217,7 +187,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="vertexai", adapter_type="vertexai",
pip_packages=["litellm", "google-cloud-aiplatform"],
module="llama_stack.providers.remote.inference.vertexai", module="llama_stack.providers.remote.inference.vertexai",
config_class="llama_stack.providers.remote.inference.vertexai.VertexAIConfig", config_class="llama_stack.providers.remote.inference.vertexai.VertexAIConfig",
provider_data_validator="llama_stack.providers.remote.inference.vertexai.config.VertexAIProviderDataValidator", provider_data_validator="llama_stack.providers.remote.inference.vertexai.config.VertexAIProviderDataValidator",
@ -247,7 +216,6 @@ Available Models:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="groq", adapter_type="groq",
pip_packages=["litellm"],
module="llama_stack.providers.remote.inference.groq", module="llama_stack.providers.remote.inference.groq",
config_class="llama_stack.providers.remote.inference.groq.GroqConfig", config_class="llama_stack.providers.remote.inference.groq.GroqConfig",
provider_data_validator="llama_stack.providers.remote.inference.groq.config.GroqProviderDataValidator", provider_data_validator="llama_stack.providers.remote.inference.groq.config.GroqProviderDataValidator",
@ -258,7 +226,6 @@ Available Models:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="llama-openai-compat", adapter_type="llama-openai-compat",
pip_packages=["litellm"],
module="llama_stack.providers.remote.inference.llama_openai_compat", module="llama_stack.providers.remote.inference.llama_openai_compat",
config_class="llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaCompatConfig", config_class="llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaCompatConfig",
provider_data_validator="llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaProviderDataValidator", provider_data_validator="llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaProviderDataValidator",
@ -269,7 +236,6 @@ Available Models:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="sambanova", adapter_type="sambanova",
pip_packages=["litellm"],
module="llama_stack.providers.remote.inference.sambanova", module="llama_stack.providers.remote.inference.sambanova",
config_class="llama_stack.providers.remote.inference.sambanova.SambaNovaImplConfig", config_class="llama_stack.providers.remote.inference.sambanova.SambaNovaImplConfig",
provider_data_validator="llama_stack.providers.remote.inference.sambanova.config.SambaNovaProviderDataValidator", provider_data_validator="llama_stack.providers.remote.inference.sambanova.config.SambaNovaProviderDataValidator",
@ -280,7 +246,6 @@ Available Models:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="passthrough", adapter_type="passthrough",
pip_packages=[],
module="llama_stack.providers.remote.inference.passthrough", module="llama_stack.providers.remote.inference.passthrough",
config_class="llama_stack.providers.remote.inference.passthrough.PassthroughImplConfig", config_class="llama_stack.providers.remote.inference.passthrough.PassthroughImplConfig",
provider_data_validator="llama_stack.providers.remote.inference.passthrough.PassthroughProviderDataValidator", provider_data_validator="llama_stack.providers.remote.inference.passthrough.PassthroughProviderDataValidator",
@ -291,7 +256,6 @@ Available Models:
api=Api.inference, api=Api.inference,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="watsonx", adapter_type="watsonx",
pip_packages=["ibm_watson_machine_learning"],
module="llama_stack.providers.remote.inference.watsonx", module="llama_stack.providers.remote.inference.watsonx",
config_class="llama_stack.providers.remote.inference.watsonx.WatsonXConfig", config_class="llama_stack.providers.remote.inference.watsonx.WatsonXConfig",
provider_data_validator="llama_stack.providers.remote.inference.watsonx.WatsonXProviderDataValidator", provider_data_validator="llama_stack.providers.remote.inference.watsonx.WatsonXProviderDataValidator",

View file

@ -13,7 +13,6 @@ def available_providers() -> list[ProviderSpec]:
InlineProviderSpec( InlineProviderSpec(
api=Api.post_training, api=Api.post_training,
provider_type="inline::torchtune", provider_type="inline::torchtune",
pip_packages=["torch", "torchtune==0.5.0", "torchao==0.8.0", "numpy"],
module="llama_stack.providers.inline.post_training.torchtune", module="llama_stack.providers.inline.post_training.torchtune",
config_class="llama_stack.providers.inline.post_training.torchtune.TorchtunePostTrainingConfig", config_class="llama_stack.providers.inline.post_training.torchtune.TorchtunePostTrainingConfig",
api_dependencies=[ api_dependencies=[
@ -25,7 +24,6 @@ def available_providers() -> list[ProviderSpec]:
InlineProviderSpec( InlineProviderSpec(
api=Api.post_training, api=Api.post_training,
provider_type="inline::huggingface", provider_type="inline::huggingface",
pip_packages=["torch", "trl", "transformers", "peft", "datasets"],
module="llama_stack.providers.inline.post_training.huggingface", module="llama_stack.providers.inline.post_training.huggingface",
config_class="llama_stack.providers.inline.post_training.huggingface.HuggingFacePostTrainingConfig", config_class="llama_stack.providers.inline.post_training.huggingface.HuggingFacePostTrainingConfig",
api_dependencies=[ api_dependencies=[
@ -38,7 +36,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.post_training, api=Api.post_training,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="nvidia", adapter_type="nvidia",
pip_packages=["requests", "aiohttp"],
module="llama_stack.providers.remote.post_training.nvidia", module="llama_stack.providers.remote.post_training.nvidia",
config_class="llama_stack.providers.remote.post_training.nvidia.NvidiaPostTrainingConfig", config_class="llama_stack.providers.remote.post_training.nvidia.NvidiaPostTrainingConfig",
description="NVIDIA's post-training provider for fine-tuning models on NVIDIA's platform.", description="NVIDIA's post-training provider for fine-tuning models on NVIDIA's platform.",

View file

@ -30,7 +30,6 @@ def available_providers() -> list[ProviderSpec]:
InlineProviderSpec( InlineProviderSpec(
api=Api.safety, api=Api.safety,
provider_type="inline::llama-guard", provider_type="inline::llama-guard",
pip_packages=[],
module="llama_stack.providers.inline.safety.llama_guard", module="llama_stack.providers.inline.safety.llama_guard",
config_class="llama_stack.providers.inline.safety.llama_guard.LlamaGuardConfig", config_class="llama_stack.providers.inline.safety.llama_guard.LlamaGuardConfig",
api_dependencies=[ api_dependencies=[
@ -52,7 +51,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.safety, api=Api.safety,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="bedrock", adapter_type="bedrock",
pip_packages=["boto3"],
module="llama_stack.providers.remote.safety.bedrock", module="llama_stack.providers.remote.safety.bedrock",
config_class="llama_stack.providers.remote.safety.bedrock.BedrockSafetyConfig", config_class="llama_stack.providers.remote.safety.bedrock.BedrockSafetyConfig",
description="AWS Bedrock safety provider for content moderation using AWS's safety services.", description="AWS Bedrock safety provider for content moderation using AWS's safety services.",
@ -62,7 +60,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.safety, api=Api.safety,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="nvidia", adapter_type="nvidia",
pip_packages=["requests"],
module="llama_stack.providers.remote.safety.nvidia", module="llama_stack.providers.remote.safety.nvidia",
config_class="llama_stack.providers.remote.safety.nvidia.NVIDIASafetyConfig", config_class="llama_stack.providers.remote.safety.nvidia.NVIDIASafetyConfig",
description="NVIDIA's safety provider for content moderation and safety filtering.", description="NVIDIA's safety provider for content moderation and safety filtering.",
@ -72,7 +69,6 @@ def available_providers() -> list[ProviderSpec]:
api=Api.safety, api=Api.safety,
adapter=AdapterSpec( adapter=AdapterSpec(
adapter_type="sambanova", adapter_type="sambanova",
pip_packages=["litellm", "requests"],
module="llama_stack.providers.remote.safety.sambanova", module="llama_stack.providers.remote.safety.sambanova",
config_class="llama_stack.providers.remote.safety.sambanova.SambaNovaSafetyConfig", config_class="llama_stack.providers.remote.safety.sambanova.SambaNovaSafetyConfig",
provider_data_validator="llama_stack.providers.remote.safety.sambanova.config.SambaNovaProviderDataValidator", provider_data_validator="llama_stack.providers.remote.safety.sambanova.config.SambaNovaProviderDataValidator",

View file

@ -13,7 +13,6 @@ def available_providers() -> list[ProviderSpec]:
InlineProviderSpec( InlineProviderSpec(
api=Api.scoring, api=Api.scoring,
provider_type="inline::basic", provider_type="inline::basic",
pip_packages=["requests"],
module="llama_stack.providers.inline.scoring.basic", module="llama_stack.providers.inline.scoring.basic",
config_class="llama_stack.providers.inline.scoring.basic.BasicScoringConfig", config_class="llama_stack.providers.inline.scoring.basic.BasicScoringConfig",
api_dependencies=[ api_dependencies=[
@ -25,7 +24,6 @@ def available_providers() -> list[ProviderSpec]:
InlineProviderSpec( InlineProviderSpec(
api=Api.scoring, api=Api.scoring,
provider_type="inline::llm-as-judge", provider_type="inline::llm-as-judge",
pip_packages=[],
module="llama_stack.providers.inline.scoring.llm_as_judge", module="llama_stack.providers.inline.scoring.llm_as_judge",
config_class="llama_stack.providers.inline.scoring.llm_as_judge.LlmAsJudgeScoringConfig", config_class="llama_stack.providers.inline.scoring.llm_as_judge.LlmAsJudgeScoringConfig",
api_dependencies=[ api_dependencies=[
@ -38,7 +36,6 @@ def available_providers() -> list[ProviderSpec]:
InlineProviderSpec( InlineProviderSpec(
api=Api.scoring, api=Api.scoring,
provider_type="inline::braintrust", provider_type="inline::braintrust",
pip_packages=["autoevals", "openai"],
module="llama_stack.providers.inline.scoring.braintrust", module="llama_stack.providers.inline.scoring.braintrust",
config_class="llama_stack.providers.inline.scoring.braintrust.BraintrustScoringConfig", config_class="llama_stack.providers.inline.scoring.braintrust.BraintrustScoringConfig",
api_dependencies=[ api_dependencies=[

View file

@ -41,7 +41,6 @@ def available_providers() -> list[ProviderSpec]:
adapter_type="brave-search", adapter_type="brave-search",
module="llama_stack.providers.remote.tool_runtime.brave_search", module="llama_stack.providers.remote.tool_runtime.brave_search",
config_class="llama_stack.providers.remote.tool_runtime.brave_search.config.BraveSearchToolConfig", config_class="llama_stack.providers.remote.tool_runtime.brave_search.config.BraveSearchToolConfig",
pip_packages=["requests"],
provider_data_validator="llama_stack.providers.remote.tool_runtime.brave_search.BraveSearchToolProviderDataValidator", provider_data_validator="llama_stack.providers.remote.tool_runtime.brave_search.BraveSearchToolProviderDataValidator",
description="Brave Search tool for web search capabilities with privacy-focused results.", description="Brave Search tool for web search capabilities with privacy-focused results.",
), ),
@ -52,7 +51,6 @@ def available_providers() -> list[ProviderSpec]:
adapter_type="bing-search", adapter_type="bing-search",
module="llama_stack.providers.remote.tool_runtime.bing_search", module="llama_stack.providers.remote.tool_runtime.bing_search",
config_class="llama_stack.providers.remote.tool_runtime.bing_search.config.BingSearchToolConfig", config_class="llama_stack.providers.remote.tool_runtime.bing_search.config.BingSearchToolConfig",
pip_packages=["requests"],
provider_data_validator="llama_stack.providers.remote.tool_runtime.bing_search.BingSearchToolProviderDataValidator", provider_data_validator="llama_stack.providers.remote.tool_runtime.bing_search.BingSearchToolProviderDataValidator",
description="Bing Search tool for web search capabilities using Microsoft's search engine.", description="Bing Search tool for web search capabilities using Microsoft's search engine.",
), ),
@ -63,7 +61,6 @@ def available_providers() -> list[ProviderSpec]:
adapter_type="tavily-search", adapter_type="tavily-search",
module="llama_stack.providers.remote.tool_runtime.tavily_search", module="llama_stack.providers.remote.tool_runtime.tavily_search",
config_class="llama_stack.providers.remote.tool_runtime.tavily_search.config.TavilySearchToolConfig", config_class="llama_stack.providers.remote.tool_runtime.tavily_search.config.TavilySearchToolConfig",
pip_packages=["requests"],
provider_data_validator="llama_stack.providers.remote.tool_runtime.tavily_search.TavilySearchToolProviderDataValidator", provider_data_validator="llama_stack.providers.remote.tool_runtime.tavily_search.TavilySearchToolProviderDataValidator",
description="Tavily Search tool for AI-optimized web search with structured results.", description="Tavily Search tool for AI-optimized web search with structured results.",
), ),
@ -74,7 +71,6 @@ def available_providers() -> list[ProviderSpec]:
adapter_type="wolfram-alpha", adapter_type="wolfram-alpha",
module="llama_stack.providers.remote.tool_runtime.wolfram_alpha", module="llama_stack.providers.remote.tool_runtime.wolfram_alpha",
config_class="llama_stack.providers.remote.tool_runtime.wolfram_alpha.config.WolframAlphaToolConfig", config_class="llama_stack.providers.remote.tool_runtime.wolfram_alpha.config.WolframAlphaToolConfig",
pip_packages=["requests"],
provider_data_validator="llama_stack.providers.remote.tool_runtime.wolfram_alpha.WolframAlphaToolProviderDataValidator", provider_data_validator="llama_stack.providers.remote.tool_runtime.wolfram_alpha.WolframAlphaToolProviderDataValidator",
description="Wolfram Alpha tool for computational knowledge and mathematical calculations.", description="Wolfram Alpha tool for computational knowledge and mathematical calculations.",
), ),
@ -85,7 +81,6 @@ def available_providers() -> list[ProviderSpec]:
adapter_type="model-context-protocol", adapter_type="model-context-protocol",
module="llama_stack.providers.remote.tool_runtime.model_context_protocol", module="llama_stack.providers.remote.tool_runtime.model_context_protocol",
config_class="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.MCPProviderConfig", config_class="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.MCPProviderConfig",
pip_packages=["mcp>=1.8.1"],
provider_data_validator="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.MCPProviderDataValidator", provider_data_validator="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.MCPProviderDataValidator",
description="Model Context Protocol (MCP) tool for standardized tool calling and context management.", description="Model Context Protocol (MCP) tool for standardized tool calling and context management.",
), ),

View file

@ -19,7 +19,6 @@ def available_providers() -> list[ProviderSpec]:
InlineProviderSpec( InlineProviderSpec(
api=Api.vector_io, api=Api.vector_io,
provider_type="inline::meta-reference", provider_type="inline::meta-reference",
pip_packages=["faiss-cpu"],
module="llama_stack.providers.inline.vector_io.faiss", module="llama_stack.providers.inline.vector_io.faiss",
config_class="llama_stack.providers.inline.vector_io.faiss.FaissVectorIOConfig", config_class="llama_stack.providers.inline.vector_io.faiss.FaissVectorIOConfig",
deprecation_warning="Please use the `inline::faiss` provider instead.", deprecation_warning="Please use the `inline::faiss` provider instead.",
@ -30,7 +29,6 @@ def available_providers() -> list[ProviderSpec]:
InlineProviderSpec( InlineProviderSpec(
api=Api.vector_io, api=Api.vector_io,
provider_type="inline::faiss", provider_type="inline::faiss",
pip_packages=["faiss-cpu"],
module="llama_stack.providers.inline.vector_io.faiss", module="llama_stack.providers.inline.vector_io.faiss",
config_class="llama_stack.providers.inline.vector_io.faiss.FaissVectorIOConfig", config_class="llama_stack.providers.inline.vector_io.faiss.FaissVectorIOConfig",
api_dependencies=[Api.inference], api_dependencies=[Api.inference],
@ -83,7 +81,6 @@ more details about Faiss in general.
InlineProviderSpec( InlineProviderSpec(
api=Api.vector_io, api=Api.vector_io,
provider_type="inline::sqlite-vec", provider_type="inline::sqlite-vec",
pip_packages=["sqlite-vec"],
module="llama_stack.providers.inline.vector_io.sqlite_vec", module="llama_stack.providers.inline.vector_io.sqlite_vec",
config_class="llama_stack.providers.inline.vector_io.sqlite_vec.SQLiteVectorIOConfig", config_class="llama_stack.providers.inline.vector_io.sqlite_vec.SQLiteVectorIOConfig",
api_dependencies=[Api.inference], api_dependencies=[Api.inference],
@ -290,7 +287,6 @@ See [sqlite-vec's GitHub repo](https://github.com/asg017/sqlite-vec/tree/main) f
InlineProviderSpec( InlineProviderSpec(
api=Api.vector_io, api=Api.vector_io,
provider_type="inline::sqlite_vec", provider_type="inline::sqlite_vec",
pip_packages=["sqlite-vec"],
module="llama_stack.providers.inline.vector_io.sqlite_vec", module="llama_stack.providers.inline.vector_io.sqlite_vec",
config_class="llama_stack.providers.inline.vector_io.sqlite_vec.SQLiteVectorIOConfig", config_class="llama_stack.providers.inline.vector_io.sqlite_vec.SQLiteVectorIOConfig",
deprecation_warning="Please use the `inline::sqlite-vec` provider (notice the hyphen instead of underscore) instead.", deprecation_warning="Please use the `inline::sqlite-vec` provider (notice the hyphen instead of underscore) instead.",
@ -304,7 +300,6 @@ Please refer to the sqlite-vec provider documentation.
Api.vector_io, Api.vector_io,
AdapterSpec( AdapterSpec(
adapter_type="chromadb", adapter_type="chromadb",
pip_packages=["chromadb-client"],
module="llama_stack.providers.remote.vector_io.chroma", module="llama_stack.providers.remote.vector_io.chroma",
config_class="llama_stack.providers.remote.vector_io.chroma.ChromaVectorIOConfig", config_class="llama_stack.providers.remote.vector_io.chroma.ChromaVectorIOConfig",
description=""" description="""
@ -347,7 +342,6 @@ See [Chroma's documentation](https://docs.trychroma.com/docs/overview/introducti
InlineProviderSpec( InlineProviderSpec(
api=Api.vector_io, api=Api.vector_io,
provider_type="inline::chromadb", provider_type="inline::chromadb",
pip_packages=["chromadb"],
module="llama_stack.providers.inline.vector_io.chroma", module="llama_stack.providers.inline.vector_io.chroma",
config_class="llama_stack.providers.inline.vector_io.chroma.ChromaVectorIOConfig", config_class="llama_stack.providers.inline.vector_io.chroma.ChromaVectorIOConfig",
api_dependencies=[Api.inference], api_dependencies=[Api.inference],
@ -391,7 +385,6 @@ See [Chroma's documentation](https://docs.trychroma.com/docs/overview/introducti
Api.vector_io, Api.vector_io,
AdapterSpec( AdapterSpec(
adapter_type="pgvector", adapter_type="pgvector",
pip_packages=["psycopg2-binary"],
module="llama_stack.providers.remote.vector_io.pgvector", module="llama_stack.providers.remote.vector_io.pgvector",
config_class="llama_stack.providers.remote.vector_io.pgvector.PGVectorVectorIOConfig", config_class="llama_stack.providers.remote.vector_io.pgvector.PGVectorVectorIOConfig",
description=""" description="""
@ -430,7 +423,6 @@ See [PGVector's documentation](https://github.com/pgvector/pgvector) for more de
Api.vector_io, Api.vector_io,
AdapterSpec( AdapterSpec(
adapter_type="weaviate", adapter_type="weaviate",
pip_packages=["weaviate-client"],
module="llama_stack.providers.remote.vector_io.weaviate", module="llama_stack.providers.remote.vector_io.weaviate",
config_class="llama_stack.providers.remote.vector_io.weaviate.WeaviateVectorIOConfig", config_class="llama_stack.providers.remote.vector_io.weaviate.WeaviateVectorIOConfig",
provider_data_validator="llama_stack.providers.remote.vector_io.weaviate.WeaviateRequestProviderData", provider_data_validator="llama_stack.providers.remote.vector_io.weaviate.WeaviateRequestProviderData",
@ -471,7 +463,6 @@ See [Weaviate's documentation](https://weaviate.io/developers/weaviate) for more
InlineProviderSpec( InlineProviderSpec(
api=Api.vector_io, api=Api.vector_io,
provider_type="inline::qdrant", provider_type="inline::qdrant",
pip_packages=["qdrant-client"],
module="llama_stack.providers.inline.vector_io.qdrant", module="llama_stack.providers.inline.vector_io.qdrant",
config_class="llama_stack.providers.inline.vector_io.qdrant.QdrantVectorIOConfig", config_class="llama_stack.providers.inline.vector_io.qdrant.QdrantVectorIOConfig",
api_dependencies=[Api.inference], api_dependencies=[Api.inference],
@ -524,7 +515,6 @@ See the [Qdrant documentation](https://qdrant.tech/documentation/) for more deta
Api.vector_io, Api.vector_io,
AdapterSpec( AdapterSpec(
adapter_type="qdrant", adapter_type="qdrant",
pip_packages=["qdrant-client"],
module="llama_stack.providers.remote.vector_io.qdrant", module="llama_stack.providers.remote.vector_io.qdrant",
config_class="llama_stack.providers.remote.vector_io.qdrant.QdrantVectorIOConfig", config_class="llama_stack.providers.remote.vector_io.qdrant.QdrantVectorIOConfig",
description=""" description="""
@ -538,7 +528,6 @@ Please refer to the inline provider documentation.
Api.vector_io, Api.vector_io,
AdapterSpec( AdapterSpec(
adapter_type="milvus", adapter_type="milvus",
pip_packages=["pymilvus>=2.4.10"],
module="llama_stack.providers.remote.vector_io.milvus", module="llama_stack.providers.remote.vector_io.milvus",
config_class="llama_stack.providers.remote.vector_io.milvus.MilvusVectorIOConfig", config_class="llama_stack.providers.remote.vector_io.milvus.MilvusVectorIOConfig",
description=""" description="""
@ -739,7 +728,6 @@ For more details on TLS configuration, refer to the [TLS setup guide](https://mi
InlineProviderSpec( InlineProviderSpec(
api=Api.vector_io, api=Api.vector_io,
provider_type="inline::milvus", provider_type="inline::milvus",
pip_packages=["pymilvus>=2.4.10"],
module="llama_stack.providers.inline.vector_io.milvus", module="llama_stack.providers.inline.vector_io.milvus",
config_class="llama_stack.providers.inline.vector_io.milvus.MilvusVectorIOConfig", config_class="llama_stack.providers.inline.vector_io.milvus.MilvusVectorIOConfig",
api_dependencies=[Api.inference], api_dependencies=[Api.inference],

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-datasetio-huggingface"
version = "0.1.0"
description = "HuggingFace datasets provider for accessing and managing datasets from the HuggingFace Hub"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"datasets",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-datasetio-nvidia"
version = "0.1.0"
description = "NVIDIA's dataset I/O provider for accessing datasets from NVIDIA's data platform"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"datasets",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-eval-nvidia"
version = "0.1.0"
description = "NVIDIA's evaluation provider for running evaluation tasks on NVIDIA's platform"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"requests",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-anthropic"
version = "0.1.0"
description = "Anthropic inference provider for accessing Claude models and Anthropic's AI services"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"litellm",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-bedrock"
version = "0.1.0"
description = "AWS Bedrock inference provider for accessing various AI models through AWS's managed service"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"boto3",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-cerebras"
version = "0.1.0"
description = "Cerebras inference provider for running models on Cerebras Cloud platform"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"cerebras_cloud_sdk",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-databricks"
version = "0.1.0"
description = "Databricks inference provider for running models on Databricks' unified analytics platform"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"openai",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-fireworks"
version = "0.1.0"
description = "Fireworks AI inference provider for Llama models and other AI models on the Fireworks platform"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"fireworks-ai",
"grpcio>=1.67.1,<1.71.0", # Pin grpcio version for compatibility
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-gemini"
version = "0.1.0"
description = "Google Gemini inference provider for accessing Gemini models and Google's AI services"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"litellm",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-groq"
version = "0.1.0"
description = "Groq inference provider for ultra-fast inference using Groq's LPU technology"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"litellm",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-llama-openai-compat"
version = "0.1.0"
description = "Llama OpenAI-compatible provider for using Llama models with OpenAI API format"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"litellm",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-nvidia"
version = "0.1.0"
description = "NVIDIA inference provider for accessing NVIDIA NIM models and AI services"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"openai",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,23 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-ollama"
version = "0.1.0"
description = "Ollama inference provider for running local models through the Ollama runtime"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"ollama",
"aiohttp",
"h11>=0.16.0",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-openai"
version = "0.1.0"
description = "OpenAI inference provider for accessing GPT models and other OpenAI services"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"litellm",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-passthrough"
version = "0.1.0"
description = "Passthrough inference provider for connecting to any external inference service not directly supported"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-runpod"
version = "0.1.0"
description = "RunPod inference provider for running models on RunPod's cloud GPU platform"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"openai",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-sambanova"
version = "0.1.0"
description = "SambaNova inference provider for running models on SambaNova's dataflow architecture"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"litellm",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,22 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-tgi"
version = "0.1.0"
description = "Text Generation Inference (TGI) provider for HuggingFace model serving"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"huggingface_hub",
"aiohttp",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-together"
version = "0.1.0"
description = "Together AI inference provider for open-source models and collaborative AI development"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"together",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,19 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-vertexai"
version = "0.1.0"
description = "Google VertexAI Remote Inference Provider"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"litellm",
"google-cloud-aiplatform"
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-vllm"
version = "0.1.0"
description = "Remote vLLM inference provider for connecting to vLLM servers"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"openai",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-inference-watsonx"
version = "0.1.0"
description = "IBM WatsonX inference provider for accessing AI models on IBM's WatsonX platform"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"ibm_watson_machine_learning",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-post-training-nvidia"
version = "0.1.0"
description = "NVIDIA's post-training provider for fine-tuning models on NVIDIA's platform"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"requests",
"aiohttp",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-safety-bedrock"
version = "0.1.0"
description = "AWS Bedrock safety provider for content moderation using AWS's safety services"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"boto3",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-safety-nvidia"
version = "0.1.0"
description = "NVIDIA's safety provider for content moderation and safety filtering"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"requests",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-safety-sambanova"
version = "0.1.0"
description = "SambaNova's safety provider for content moderation and safety filtering"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"litellm",
"requests",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-tool-runtime-bing-search"
version = "0.1.0"
description = "Bing Search tool for web search capabilities using Microsoft's search engine"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"requests",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-tool-runtime-brave-search"
version = "0.1.0"
description = "Brave Search tool for web search capabilities with privacy-focused results"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"requests",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-tool-runtime-model-context-protocol"
version = "0.1.0"
description = "Model Context Protocol (MCP) tool for standardized tool calling and context management"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"mcp>=1.8.1",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-tool-runtime-tavily-search"
version = "0.1.0"
description = "Tavily Search tool for AI-optimized web search with structured results"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"requests",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-tool-runtime-wolfram-alpha"
version = "0.1.0"
description = "Wolfram Alpha tool for computational knowledge and mathematical calculations"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"requests",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-vector-io-chroma-remote"
version = "0.1.0"
description = "Chroma remote vector database provider for Llama Stack"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"chromadb-client",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-vector-io-milvus-remote"
version = "0.1.0"
description = "Milvus remote vector database provider for Llama Stack"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"pymilvus>=2.4.10",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-vector-io-pgvector"
version = "0.1.0"
description = "PGVector remote vector database provider for Llama Stack"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"psycopg2-binary",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-vector-io-qdrant-remote"
version = "0.1.0"
description = "Qdrant remote vector database provider for Llama Stack"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"qdrant-client",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -0,0 +1,20 @@
[build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "llama-stack-provider-vector-io-weaviate"
version = "0.1.0"
description = "Weaviate remote vector database provider for Llama Stack"
authors = [{ name = "Meta Llama", email = "llama-oss@meta.com" }]
requires-python = ">=3.12"
license = { "text" = "MIT" }
dependencies = [
"weaviate-client",
]
[tool.setuptools.packages.find]
where = ["."]
include = ["llama_stack*"]

View file

@ -1,40 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from pathlib import Path
from llama_stack.cli.stack._build import (
_run_stack_build_command_from_build_config,
)
from llama_stack.core.datatypes import BuildConfig, DistributionSpec
from llama_stack.core.utils.image_types import LlamaStackImageType
def test_container_build_passes_path(monkeypatch, tmp_path):
called_with = {}
def spy_build_image(build_config, image_name, distro_or_config, run_config=None):
called_with["path"] = distro_or_config
called_with["run_config"] = run_config
return 0
monkeypatch.setattr(
"llama_stack.cli.stack._build.build_image",
spy_build_image,
raising=True,
)
cfg = BuildConfig(
image_type=LlamaStackImageType.CONTAINER.value,
distribution_spec=DistributionSpec(providers={}, description=""),
)
_run_stack_build_command_from_build_config(cfg, image_name="dummy")
assert "path" in called_with
assert isinstance(called_with["path"], str)
assert Path(called_with["path"]).exists()
assert called_with["run_config"] is None

View file

@ -0,0 +1,21 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack.cli.stack._show import (
run_stack_show_command,
)
from llama_stack.core.datatypes import BuildConfig, DistributionSpec
from llama_stack.core.utils.image_types import LlamaStackImageType
def test_stack_show_basic():
cfg = BuildConfig(
image_type=LlamaStackImageType.CONTAINER.value,
distribution_spec=DistributionSpec(providers={}, description=""),
)
run_stack_show_command(cfg)

38
uv.lock generated
View file

@ -3946,27 +3946,27 @@ wheels = [
[[package]] [[package]]
name = "ruff" name = "ruff"
version = "0.12.5" version = "0.9.10"
source = { registry = "https://pypi.org/simple" } source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/30/cd/01015eb5034605fd98d829c5839ec2c6b4582b479707f7c1c2af861e8258/ruff-0.12.5.tar.gz", hash = "sha256:b209db6102b66f13625940b7f8c7d0f18e20039bb7f6101fbdac935c9612057e", size = 5170722, upload-time = "2025-07-24T13:26:37.456Z" } sdist = { url = "https://files.pythonhosted.org/packages/20/8e/fafaa6f15c332e73425d9c44ada85360501045d5ab0b81400076aff27cf6/ruff-0.9.10.tar.gz", hash = "sha256:9bacb735d7bada9cfb0f2c227d3658fc443d90a727b47f206fb33f52f3c0eac7", size = 3759776, upload-time = "2025-03-07T15:27:44.363Z" }
wheels = [ wheels = [
{ url = "https://files.pythonhosted.org/packages/d4/de/ad2f68f0798ff15dd8c0bcc2889558970d9a685b3249565a937cd820ad34/ruff-0.12.5-py3-none-linux_armv6l.whl", hash = "sha256:1de2c887e9dec6cb31fcb9948299de5b2db38144e66403b9660c9548a67abd92", size = 11819133, upload-time = "2025-07-24T13:25:56.369Z" }, { url = "https://files.pythonhosted.org/packages/73/b2/af7c2cc9e438cbc19fafeec4f20bfcd72165460fe75b2b6e9a0958c8c62b/ruff-0.9.10-py3-none-linux_armv6l.whl", hash = "sha256:eb4d25532cfd9fe461acc83498361ec2e2252795b4f40b17e80692814329e42d", size = 10049494, upload-time = "2025-03-07T15:26:51.268Z" },
{ url = "https://files.pythonhosted.org/packages/f8/fc/c6b65cd0e7fbe60f17e7ad619dca796aa49fbca34bb9bea5f8faf1ec2643/ruff-0.12.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:d1ab65e7d8152f519e7dea4de892317c9da7a108da1c56b6a3c1d5e7cf4c5e9a", size = 12501114, upload-time = "2025-07-24T13:25:59.471Z" }, { url = "https://files.pythonhosted.org/packages/6d/12/03f6dfa1b95ddd47e6969f0225d60d9d7437c91938a310835feb27927ca0/ruff-0.9.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:188a6638dab1aa9bb6228a7302387b2c9954e455fb25d6b4470cb0641d16759d", size = 10853584, upload-time = "2025-03-07T15:26:56.104Z" },
{ url = "https://files.pythonhosted.org/packages/c5/de/c6bec1dce5ead9f9e6a946ea15e8d698c35f19edc508289d70a577921b30/ruff-0.12.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:962775ed5b27c7aa3fdc0d8f4d4433deae7659ef99ea20f783d666e77338b8cf", size = 11716873, upload-time = "2025-07-24T13:26:01.496Z" }, { url = "https://files.pythonhosted.org/packages/02/49/1c79e0906b6ff551fb0894168763f705bf980864739572b2815ecd3c9df0/ruff-0.9.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5284dcac6b9dbc2fcb71fdfc26a217b2ca4ede6ccd57476f52a587451ebe450d", size = 10155692, upload-time = "2025-03-07T15:27:01.385Z" },
{ url = "https://files.pythonhosted.org/packages/a1/16/cf372d2ebe91e4eb5b82a2275c3acfa879e0566a7ac94d331ea37b765ac8/ruff-0.12.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73b4cae449597e7195a49eb1cdca89fd9fbb16140c7579899e87f4c85bf82f73", size = 11958829, upload-time = "2025-07-24T13:26:03.721Z" }, { url = "https://files.pythonhosted.org/packages/5b/01/85e8082e41585e0e1ceb11e41c054e9e36fed45f4b210991052d8a75089f/ruff-0.9.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47678f39fa2a3da62724851107f438c8229a3470f533894b5568a39b40029c0c", size = 10369760, upload-time = "2025-03-07T15:27:04.023Z" },
{ url = "https://files.pythonhosted.org/packages/25/bf/cd07e8f6a3a6ec746c62556b4c4b79eeb9b0328b362bb8431b7b8afd3856/ruff-0.12.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b13489c3dc50de5e2d40110c0cce371e00186b880842e245186ca862bf9a1ac", size = 11626619, upload-time = "2025-07-24T13:26:06.118Z" }, { url = "https://files.pythonhosted.org/packages/a1/90/0bc60bd4e5db051f12445046d0c85cc2c617095c0904f1aa81067dc64aea/ruff-0.9.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:99713a6e2766b7a17147b309e8c915b32b07a25c9efd12ada79f217c9c778b3e", size = 9912196, upload-time = "2025-03-07T15:27:06.93Z" },
{ url = "https://files.pythonhosted.org/packages/d8/c9/c2ccb3b8cbb5661ffda6925f81a13edbb786e623876141b04919d1128370/ruff-0.12.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1504fea81461cf4841778b3ef0a078757602a3b3ea4b008feb1308cb3f23e08", size = 13221894, upload-time = "2025-07-24T13:26:08.292Z" }, { url = "https://files.pythonhosted.org/packages/66/ea/0b7e8c42b1ec608033c4d5a02939c82097ddcb0b3e393e4238584b7054ab/ruff-0.9.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:524ee184d92f7c7304aa568e2db20f50c32d1d0caa235d8ddf10497566ea1a12", size = 11434985, upload-time = "2025-03-07T15:27:10.082Z" },
{ url = "https://files.pythonhosted.org/packages/6b/58/68a5be2c8e5590ecdad922b2bcd5583af19ba648f7648f95c51c3c1eca81/ruff-0.12.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c7da4129016ae26c32dfcbd5b671fe652b5ab7fc40095d80dcff78175e7eddd4", size = 14163909, upload-time = "2025-07-24T13:26:10.474Z" }, { url = "https://files.pythonhosted.org/packages/d5/86/3171d1eff893db4f91755175a6e1163c5887be1f1e2f4f6c0c59527c2bfd/ruff-0.9.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:df92aeac30af821f9acf819fc01b4afc3dfb829d2782884f8739fb52a8119a16", size = 12155842, upload-time = "2025-03-07T15:27:12.727Z" },
{ url = "https://files.pythonhosted.org/packages/bd/d1/ef6b19622009ba8386fdb792c0743f709cf917b0b2f1400589cbe4739a33/ruff-0.12.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ca972c80f7ebcfd8af75a0f18b17c42d9f1ef203d163669150453f50ca98ab7b", size = 13583652, upload-time = "2025-07-24T13:26:13.381Z" }, { url = "https://files.pythonhosted.org/packages/89/9e/700ca289f172a38eb0bca752056d0a42637fa17b81649b9331786cb791d7/ruff-0.9.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de42e4edc296f520bb84954eb992a07a0ec5a02fecb834498415908469854a52", size = 11613804, upload-time = "2025-03-07T15:27:15.944Z" },
{ url = "https://files.pythonhosted.org/packages/62/e3/1c98c566fe6809a0c83751d825a03727f242cdbe0d142c9e292725585521/ruff-0.12.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8dbbf9f25dfb501f4237ae7501d6364b76a01341c6f1b2cd6764fe449124bb2a", size = 12700451, upload-time = "2025-07-24T13:26:15.488Z" }, { url = "https://files.pythonhosted.org/packages/f2/92/648020b3b5db180f41a931a68b1c8575cca3e63cec86fd26807422a0dbad/ruff-0.9.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d257f95b65806104b6b1ffca0ea53f4ef98454036df65b1eda3693534813ecd1", size = 13823776, upload-time = "2025-03-07T15:27:18.996Z" },
{ url = "https://files.pythonhosted.org/packages/24/ff/96058f6506aac0fbc0d0fc0d60b0d0bd746240a0594657a2d94ad28033ba/ruff-0.12.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c47dea6ae39421851685141ba9734767f960113d51e83fd7bb9958d5be8763a", size = 12937465, upload-time = "2025-07-24T13:26:17.808Z" }, { url = "https://files.pythonhosted.org/packages/5e/a6/cc472161cd04d30a09d5c90698696b70c169eeba2c41030344194242db45/ruff-0.9.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60dec7201c0b10d6d11be00e8f2dbb6f40ef1828ee75ed739923799513db24c", size = 11302673, upload-time = "2025-03-07T15:27:21.655Z" },
{ url = "https://files.pythonhosted.org/packages/eb/d3/68bc5e7ab96c94b3589d1789f2dd6dd4b27b263310019529ac9be1e8f31b/ruff-0.12.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c5076aa0e61e30f848846f0265c873c249d4b558105b221be1828f9f79903dc5", size = 11771136, upload-time = "2025-07-24T13:26:20.422Z" }, { url = "https://files.pythonhosted.org/packages/6c/db/d31c361c4025b1b9102b4d032c70a69adb9ee6fde093f6c3bf29f831c85c/ruff-0.9.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d838b60007da7a39c046fcdd317293d10b845001f38bcb55ba766c3875b01e43", size = 10235358, upload-time = "2025-03-07T15:27:24.72Z" },
{ url = "https://files.pythonhosted.org/packages/52/75/7356af30a14584981cabfefcf6106dea98cec9a7af4acb5daaf4b114845f/ruff-0.12.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a5a4c7830dadd3d8c39b1cc85386e2c1e62344f20766be6f173c22fb5f72f293", size = 11601644, upload-time = "2025-07-24T13:26:22.928Z" }, { url = "https://files.pythonhosted.org/packages/d1/86/d6374e24a14d4d93ebe120f45edd82ad7dcf3ef999ffc92b197d81cdc2a5/ruff-0.9.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ccaf903108b899beb8e09a63ffae5869057ab649c1e9231c05ae354ebc62066c", size = 9886177, upload-time = "2025-03-07T15:27:27.282Z" },
{ url = "https://files.pythonhosted.org/packages/c2/67/91c71d27205871737cae11025ee2b098f512104e26ffd8656fd93d0ada0a/ruff-0.12.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:46699f73c2b5b137b9dc0fc1a190b43e35b008b398c6066ea1350cce6326adcb", size = 12478068, upload-time = "2025-07-24T13:26:26.134Z" }, { url = "https://files.pythonhosted.org/packages/00/62/a61691f6eaaac1e945a1f3f59f1eea9a218513139d5b6c2b8f88b43b5b8f/ruff-0.9.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f9567d135265d46e59d62dc60c0bfad10e9a6822e231f5b24032dba5a55be6b5", size = 10864747, upload-time = "2025-03-07T15:27:30.637Z" },
{ url = "https://files.pythonhosted.org/packages/34/04/b6b00383cf2f48e8e78e14eb258942fdf2a9bf0287fbf5cdd398b749193a/ruff-0.12.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5a655a0a0d396f0f072faafc18ebd59adde8ca85fb848dc1b0d9f024b9c4d3bb", size = 12991537, upload-time = "2025-07-24T13:26:28.533Z" }, { url = "https://files.pythonhosted.org/packages/ee/94/2c7065e1d92a8a8a46d46d9c3cf07b0aa7e0a1e0153d74baa5e6620b4102/ruff-0.9.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5f202f0d93738c28a89f8ed9eaba01b7be339e5d8d642c994347eaa81c6d75b8", size = 11360441, upload-time = "2025-03-07T15:27:33.356Z" },
{ url = "https://files.pythonhosted.org/packages/3e/b9/053d6445dc7544fb6594785056d8ece61daae7214859ada4a152ad56b6e0/ruff-0.12.5-py3-none-win32.whl", hash = "sha256:dfeb2627c459b0b78ca2bbdc38dd11cc9a0a88bf91db982058b26ce41714ffa9", size = 11751575, upload-time = "2025-07-24T13:26:30.835Z" }, { url = "https://files.pythonhosted.org/packages/a7/8f/1f545ea6f9fcd7bf4368551fb91d2064d8f0577b3079bb3f0ae5779fb773/ruff-0.9.10-py3-none-win32.whl", hash = "sha256:bfb834e87c916521ce46b1788fbb8484966e5113c02df216680102e9eb960029", size = 10247401, upload-time = "2025-03-07T15:27:35.994Z" },
{ url = "https://files.pythonhosted.org/packages/bc/0f/ab16e8259493137598b9149734fec2e06fdeda9837e6f634f5c4e35916da/ruff-0.12.5-py3-none-win_amd64.whl", hash = "sha256:ae0d90cf5f49466c954991b9d8b953bd093c32c27608e409ae3564c63c5306a5", size = 12882273, upload-time = "2025-07-24T13:26:32.929Z" }, { url = "https://files.pythonhosted.org/packages/4f/18/fb703603ab108e5c165f52f5b86ee2aa9be43bb781703ec87c66a5f5d604/ruff-0.9.10-py3-none-win_amd64.whl", hash = "sha256:f2160eeef3031bf4b17df74e307d4c5fb689a6f3a26a2de3f7ef4044e3c484f1", size = 11366360, upload-time = "2025-03-07T15:27:38.66Z" },
{ url = "https://files.pythonhosted.org/packages/00/db/c376b0661c24cf770cb8815268190668ec1330eba8374a126ceef8c72d55/ruff-0.12.5-py3-none-win_arm64.whl", hash = "sha256:48cdbfc633de2c5c37d9f090ba3b352d1576b0015bfc3bc98eaf230275b7e805", size = 11951564, upload-time = "2025-07-24T13:26:34.994Z" }, { url = "https://files.pythonhosted.org/packages/35/85/338e603dc68e7d9994d5d84f24adbf69bae760ba5efd3e20f5ff2cec18da/ruff-0.9.10-py3-none-win_arm64.whl", hash = "sha256:5fd804c0327a5e5ea26615550e706942f348b197d5475ff34c19733aee4b2e69", size = 10436892, upload-time = "2025-03-07T15:27:41.687Z" },
] ]
[[package]] [[package]]