Make each inference provider into its own subdirectory

This commit is contained in:
Ashwin Bharambe 2024-08-05 15:13:52 -07:00
parent f64668319c
commit 0de5a807c7
42 changed files with 123 additions and 103 deletions

View file

@ -0,0 +1,8 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from .config import SafetyConfig # noqa
from .safety import get_provider_impl # noqa

View file

@ -11,7 +11,7 @@ from typing import Dict
from llama_toolchain.distribution.datatypes import Api, ProviderSpec
from .config import SafetyConfig
from .api.endpoints import * # noqa
from llama_toolchain.safety.api import * # noqa
from .shields import (
CodeScannerShield,
InjectionShield,

View file

@ -4,14 +4,11 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import sys
from typing import List
from llama_models.llama3_1.api.datatypes import Message
parent_dir = "../.."
sys.path.append(parent_dir)
from llama_toolchain.safety.shields.base import (
from llama_toolchain.safety.meta_reference.shields.base import (
OnViolationAction,
ShieldBase,
ShieldResponse,

View file

@ -19,7 +19,7 @@ def available_safety_providers() -> List[ProviderSpec]:
"torch",
"transformers",
],
module="llama_toolchain.safety.safety",
config_class="llama_toolchain.safety.config.SafetyConfig",
module="llama_toolchain.safety.meta_reference",
config_class="llama_toolchain.safety.meta_reference.SafetyConfig",
),
]