mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
Splits the meta-reference safety implementation into three distinct providers: - inline::llama-guard - inline::prompt-guard - inline::code-scanner Note that this PR is a backward incompatible change to the llama stack server. I have added deprecation_error field to ProviderSpec -- the server reads it and immediately barfs. This is used to direct the user with a specific message on what action to perform. An automagical "config upgrade" is a bit too much work to implement right now :/ (Note that we will be gradually prefixing all inline providers with inline:: -- I am only doing this for this set of new providers because otherwise existing configuration files will break even more badly.)
25 lines
678 B
Python
25 lines
678 B
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from enum import Enum
|
|
|
|
from pydantic import BaseModel, field_validator
|
|
|
|
|
|
class PromptGuardType(Enum):
|
|
injection = "injection"
|
|
jailbreak = "jailbreak"
|
|
|
|
|
|
class PromptGuardConfig(BaseModel):
|
|
guard_type: str = PromptGuardType.injection.value
|
|
|
|
@classmethod
|
|
@field_validator("guard_type")
|
|
def validate_guard_type(cls, v):
|
|
if v not in [t.value for t in PromptGuardType]:
|
|
raise ValueError(f"Unknown prompt guard type: {v}")
|
|
return v
|