mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-26 10:22:00 +00:00
We are now testing the safety capability with the starter image. This includes a few changes: * Enable the safety integration test * Relax the shield model requirements from llama-guard to make it work with llama-guard3:8b coming from Ollama * Expose a shield for each inference provider in the starter distro. The shield will only be registered if the provider is enabled. Shields will be added if the provider claims to support a safety model * Missing providers models have been added too * Pointers to official documentation pages for provider models support have been added Closes: https://github.com/meta-llama/llama-stack/issues/2528 Signed-off-by: Sébastien Han <seb@redhat.com>
29 lines
901 B
Python
29 lines
901 B
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from llama_stack.models.llama.sku_types import CoreModelId
|
|
from llama_stack.providers.utils.inference.model_registry import (
|
|
build_hf_repo_model_entry,
|
|
)
|
|
|
|
SAFETY_MODELS_ENTRIES = []
|
|
|
|
|
|
# https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html
|
|
MODEL_ENTRIES = [
|
|
build_hf_repo_model_entry(
|
|
"meta.llama3-1-8b-instruct-v1:0",
|
|
CoreModelId.llama3_1_8b_instruct.value,
|
|
),
|
|
build_hf_repo_model_entry(
|
|
"meta.llama3-1-70b-instruct-v1:0",
|
|
CoreModelId.llama3_1_70b_instruct.value,
|
|
),
|
|
build_hf_repo_model_entry(
|
|
"meta.llama3-1-405b-instruct-v1:0",
|
|
CoreModelId.llama3_1_405b_instruct.value,
|
|
),
|
|
] + SAFETY_MODELS_ENTRIES
|