mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 12:07:34 +00:00
The `trl` dependency brings in `accelerate` which brings in nvidia dependencies for torch. We cannot have that in the starter distro. As such, no CPU-only post-training for the huggingface provider.
22 lines
840 B
Python
22 lines
840 B
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
|
|
from llama_stack.distributions.template import BuildProvider, DistributionTemplate
|
|
|
|
from ..starter.starter import get_distribution_template as get_starter_distribution_template
|
|
|
|
|
|
def get_distribution_template() -> DistributionTemplate:
|
|
template = get_starter_distribution_template()
|
|
name = "starter-gpu"
|
|
template.name = name
|
|
template.description = "Quick start template for running Llama Stack with several popular providers. This distribution is intended for GPU-enabled environments."
|
|
|
|
template.providers["post_training"] = [
|
|
BuildProvider(provider_type="inline::huggingface-gpu"),
|
|
]
|
|
return template
|