forked from phoenix-oss/llama-stack-mirror
Splits the meta-reference safety implementation into three distinct providers: - inline::llama-guard - inline::prompt-guard - inline::code-scanner Note that this PR is a backward incompatible change to the llama stack server. I have added deprecation_error field to ProviderSpec -- the server reads it and immediately barfs. This is used to direct the user with a specific message on what action to perform. An automagical "config upgrade" is a bit too much work to implement right now :/ (Note that we will be gradually prefixing all inline providers with inline:: -- I am only doing this for this set of new providers because otherwise existing configuration files will break even more badly.)
120 lines
4.1 KiB
Python
120 lines
4.1 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from typing import Any, Dict, List
|
|
|
|
import torch
|
|
from termcolor import cprint
|
|
|
|
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
|
|
|
from llama_stack.distribution.utils.model_utils import model_local_dir
|
|
from llama_stack.apis.inference import * # noqa: F403
|
|
from llama_stack.apis.safety import * # noqa: F403
|
|
from llama_models.llama3.api.datatypes import * # noqa: F403
|
|
|
|
from llama_stack.providers.datatypes import ShieldsProtocolPrivate
|
|
|
|
from .config import PromptGuardConfig, PromptGuardType
|
|
|
|
|
|
PROMPT_GUARD_MODEL = "Prompt-Guard-86M"
|
|
|
|
|
|
class PromptGuardSafetyImpl(Safety, ShieldsProtocolPrivate):
|
|
def __init__(self, config: PromptGuardConfig, _deps) -> None:
|
|
self.config = config
|
|
|
|
async def initialize(self) -> None:
|
|
model_dir = model_local_dir(PROMPT_GUARD_MODEL)
|
|
self.shield = PromptGuardShield(model_dir, self.config)
|
|
|
|
async def shutdown(self) -> None:
|
|
pass
|
|
|
|
async def register_shield(self, shield: Shield) -> None:
|
|
if shield.shield_type != ShieldType.prompt_guard:
|
|
raise ValueError(f"Unsupported shield type: {shield.shield_type}")
|
|
|
|
async def run_shield(
|
|
self,
|
|
shield_id: str,
|
|
messages: List[Message],
|
|
params: Dict[str, Any] = None,
|
|
) -> RunShieldResponse:
|
|
shield = await self.shield_store.get_shield(shield_id)
|
|
if not shield:
|
|
raise ValueError(f"Unknown shield {shield_id}")
|
|
|
|
return await self.shield.run(messages)
|
|
|
|
|
|
class PromptGuardShield:
|
|
def __init__(
|
|
self,
|
|
model_dir: str,
|
|
config: PromptGuardConfig,
|
|
threshold: float = 0.9,
|
|
temperature: float = 1.0,
|
|
):
|
|
assert (
|
|
model_dir is not None
|
|
), "Must provide a model directory for prompt injection shield"
|
|
if temperature <= 0:
|
|
raise ValueError("Temperature must be greater than 0")
|
|
|
|
self.config = config
|
|
self.temperature = temperature
|
|
self.threshold = threshold
|
|
|
|
self.device = "cuda"
|
|
|
|
# load model and tokenizer
|
|
self.tokenizer = AutoTokenizer.from_pretrained(model_dir)
|
|
self.model = AutoModelForSequenceClassification.from_pretrained(
|
|
model_dir, device_map=self.device
|
|
)
|
|
|
|
async def run(self, messages: List[Message]) -> RunShieldResponse:
|
|
message = messages[-1]
|
|
text = interleaved_text_media_as_str(message.content)
|
|
|
|
# run model on messages and return response
|
|
inputs = self.tokenizer(text, return_tensors="pt")
|
|
inputs = {name: tensor.to(self.model.device) for name, tensor in inputs.items()}
|
|
with torch.no_grad():
|
|
outputs = self.model(**inputs)
|
|
logits = outputs[0]
|
|
probabilities = torch.softmax(logits / self.temperature, dim=-1)
|
|
score_embedded = probabilities[0, 1].item()
|
|
score_malicious = probabilities[0, 2].item()
|
|
cprint(
|
|
f"Ran PromptGuardShield and got Scores: Embedded: {score_embedded}, Malicious: {score_malicious}",
|
|
color="magenta",
|
|
)
|
|
|
|
violation = None
|
|
if self.config.guard_type == PromptGuardType.injection.value and (
|
|
score_embedded + score_malicious > self.threshold
|
|
):
|
|
violation = SafetyViolation(
|
|
violation_level=ViolationLevel.ERROR,
|
|
user_message="Sorry, I cannot do this.",
|
|
metadata={
|
|
"violation_type": f"prompt_injection:embedded={score_embedded},malicious={score_malicious}",
|
|
},
|
|
)
|
|
elif (
|
|
self.config.guard_type == PromptGuardType.jailbreak.value
|
|
and score_malicious > self.threshold
|
|
):
|
|
violation = SafetyViolation(
|
|
violation_level=ViolationLevel.ERROR,
|
|
violation_type=f"prompt_injection:malicious={score_malicious}",
|
|
violation_return_message="Sorry, I cannot do this.",
|
|
)
|
|
|
|
return RunShieldResponse(violation=violation)
|