mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 04:04:14 +00:00
The starter distribution now comes with all the required packages to support persistent stores—like the agent store, metadata, and inference—using PostgreSQL. We’ve added a new run YAML file, run-with-postgres-store.yaml, to make this setup easy. The file is included in the container image, so users can simply override the entrypoint to use the Postgres-specific config. The documentation has been updated with instructions on how to do that. Closes: #2619 Signed-off-by: Sébastien Han <seb@redhat.com>
23 lines
937 B
Python
23 lines
937 B
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
|
|
from llama_stack.distributions.template import BuildProvider, DistributionTemplate
|
|
|
|
from ..starter.starter import get_distribution_template as get_starter_distribution_template
|
|
|
|
|
|
def get_distribution_template() -> DistributionTemplate:
|
|
template = get_starter_distribution_template(name="starter-gpu")
|
|
template.description = "Quick start template for running Llama Stack with several popular providers. This distribution is intended for GPU-enabled environments."
|
|
|
|
# remove "run-with-postgres-store.yaml" from run_configs
|
|
template.run_configs.pop("run-with-postgres-store.yaml", None)
|
|
|
|
template.providers["post_training"] = [
|
|
BuildProvider(provider_type="inline::huggingface-gpu"),
|
|
]
|
|
return template
|