minor fix

This commit is contained in:
Swapna Lekkala 2025-08-11 14:20:00 -07:00
parent 6358d0a478
commit 093f017da2
7 changed files with 32 additions and 108 deletions

View file

@ -16569,7 +16569,7 @@
"additionalProperties": { "additionalProperties": {
"type": "number" "type": "number"
}, },
"description": "A list of the categories along with their scores as predicted by model. Required set of categories that need to be in response - violence - violence/graphic - harassment - harassment/threatening - hate - hate/threatening - illicit - illicit/violent - sexual - sexual/minors - self-harm - self-harm/intent - self-harm/instructions" "description": "A list of the categories along with their scores as predicted by model."
}, },
"user_message": { "user_message": {
"type": "string" "type": "string"

View file

@ -12322,10 +12322,6 @@ components:
type: number type: number
description: >- description: >-
A list of the categories along with their scores as predicted by model. A list of the categories along with their scores as predicted by model.
Required set of categories that need to be in response - violence - violence/graphic
- harassment - harassment/threatening - hate - hate/threatening - illicit
- illicit/violent - sexual - sexual/minors - self-harm - self-harm/intent
- self-harm/instructions
user_message: user_message:
type: string type: string
metadata: metadata:

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from enum import Enum, StrEnum from enum import Enum
from typing import Any, Protocol, runtime_checkable from typing import Any, Protocol, runtime_checkable
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
@ -15,27 +15,6 @@ from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
from llama_stack.schema_utils import json_schema_type, webmethod from llama_stack.schema_utils import json_schema_type, webmethod
# OpenAI Categories to return in the response
class OpenAICategories(StrEnum):
"""
Required set of categories in moderations api response
"""
VIOLENCE = "violence"
VIOLENCE_GRAPHIC = "violence/graphic"
HARRASMENT = "harassment"
HARRASMENT_THREATENING = "harassment/threatening"
HATE = "hate"
HATE_THREATENING = "hate/threatening"
ILLICIT = "illicit"
ILLICIT_VIOLENT = "illicit/violent"
SEXUAL = "sexual"
SEXUAL_MINORS = "sexual/minors"
SELF_HARM = "self-harm"
SELF_HARM_INTENT = "self-harm/intent"
SELF_HARM_INSTRUCTIONS = "self-harm/instructions"
@json_schema_type @json_schema_type
class ModerationObjectResults(BaseModel): class ModerationObjectResults(BaseModel):
"""A moderation object. """A moderation object.
@ -43,20 +22,6 @@ class ModerationObjectResults(BaseModel):
:param categories: A list of the categories, and whether they are flagged or not. :param categories: A list of the categories, and whether they are flagged or not.
:param category_applied_input_types: A list of the categories along with the input type(s) that the score applies to. :param category_applied_input_types: A list of the categories along with the input type(s) that the score applies to.
:param category_scores: A list of the categories along with their scores as predicted by model. :param category_scores: A list of the categories along with their scores as predicted by model.
Required set of categories that need to be in response
- violence
- violence/graphic
- harassment
- harassment/threatening
- hate
- hate/threatening
- illicit
- illicit/violent
- sexual
- sexual/minors
- self-harm
- self-harm/intent
- self-harm/instructions
""" """
flagged: bool flagged: bool

View file

@ -10,7 +10,7 @@ from llama_stack.apis.inference import (
Message, Message,
) )
from llama_stack.apis.safety import RunShieldResponse, Safety from llama_stack.apis.safety import RunShieldResponse, Safety
from llama_stack.apis.safety.safety import ModerationObject, OpenAICategories from llama_stack.apis.safety.safety import ModerationObject
from llama_stack.apis.shields import Shield from llama_stack.apis.shields import Shield
from llama_stack.log import get_logger from llama_stack.log import get_logger
from llama_stack.providers.datatypes import RoutingTable from llama_stack.providers.datatypes import RoutingTable
@ -82,20 +82,5 @@ class SafetyRouter(Safety):
input=input, input=input,
model=model, model=model,
) )
self._validate_required_categories_exist(response)
return response return response
def _validate_required_categories_exist(self, response: ModerationObject) -> None:
"""Validate the ProviderImpl response contains the required Open AI moderations categories."""
required_categories = list(map(str, OpenAICategories))
categories = response.results[0].categories
category_applied_input_types = response.results[0].category_applied_input_types
category_scores = response.results[0].category_scores
for i in [categories, category_applied_input_types, category_scores]:
if not set(required_categories).issubset(set(i.keys())):
raise ValueError(
f"ProviderImpl response is missing required categories: {set(required_categories) - set(i.keys())}"
)

View file

@ -22,7 +22,7 @@ from llama_stack.apis.safety import (
SafetyViolation, SafetyViolation,
ViolationLevel, ViolationLevel,
) )
from llama_stack.apis.safety.safety import ModerationObject, ModerationObjectResults, OpenAICategories from llama_stack.apis.safety.safety import ModerationObject, ModerationObjectResults
from llama_stack.apis.shields import Shield from llama_stack.apis.shields import Shield
from llama_stack.core.datatypes import Api from llama_stack.core.datatypes import Api
from llama_stack.models.llama.datatypes import Role from llama_stack.models.llama.datatypes import Role
@ -72,30 +72,6 @@ SAFETY_CATEGORIES_TO_CODE_MAP = {
} }
SAFETY_CODE_TO_CATEGORIES_MAP = {v: k for k, v in SAFETY_CATEGORIES_TO_CODE_MAP.items()} SAFETY_CODE_TO_CATEGORIES_MAP = {v: k for k, v in SAFETY_CATEGORIES_TO_CODE_MAP.items()}
OPENAI_TO_LLAMA_CATEGORIES_MAP = {
OpenAICategories.VIOLENCE: [CAT_VIOLENT_CRIMES],
OpenAICategories.VIOLENCE_GRAPHIC: [CAT_VIOLENT_CRIMES],
OpenAICategories.HARRASMENT: [CAT_CHILD_EXPLOITATION],
OpenAICategories.HARRASMENT_THREATENING: [CAT_VIOLENT_CRIMES, CAT_CHILD_EXPLOITATION],
OpenAICategories.HATE: [CAT_HATE],
OpenAICategories.HATE_THREATENING: [CAT_HATE, CAT_VIOLENT_CRIMES],
OpenAICategories.ILLICIT: [CAT_NON_VIOLENT_CRIMES],
OpenAICategories.ILLICIT_VIOLENT: [CAT_VIOLENT_CRIMES, CAT_INDISCRIMINATE_WEAPONS],
OpenAICategories.SEXUAL: [CAT_SEX_CRIMES, CAT_SEXUAL_CONTENT],
OpenAICategories.SEXUAL_MINORS: [CAT_CHILD_EXPLOITATION],
OpenAICategories.SELF_HARM: [CAT_SELF_HARM],
OpenAICategories.SELF_HARM_INTENT: [CAT_SELF_HARM],
OpenAICategories.SELF_HARM_INSTRUCTIONS: [CAT_SELF_HARM, CAT_SPECIALIZED_ADVICE],
# These are custom categories that are not in the OpenAI moderation categories
"custom/defamation": [CAT_DEFAMATION],
"custom/specialized_advice": [CAT_SPECIALIZED_ADVICE],
"custom/privacy_violation": [CAT_PRIVACY],
"custom/intellectual_property": [CAT_INTELLECTUAL_PROPERTY],
"custom/weapons": [CAT_INDISCRIMINATE_WEAPONS],
"custom/elections": [CAT_ELECTIONS],
"custom/code_interpreter_abuse": [CAT_CODE_INTERPRETER_ABUSE],
}
DEFAULT_LG_V3_SAFETY_CATEGORIES = [ DEFAULT_LG_V3_SAFETY_CATEGORIES = [
CAT_VIOLENT_CRIMES, CAT_VIOLENT_CRIMES,
@ -424,9 +400,9 @@ class LlamaGuardShield:
ModerationObject with appropriate configuration ModerationObject with appropriate configuration
""" """
# Set default values for safe case # Set default values for safe case
categories = dict.fromkeys(OPENAI_TO_LLAMA_CATEGORIES_MAP.keys(), False) categories = dict.fromkeys(SAFETY_CATEGORIES_TO_CODE_MAP.keys(), False)
category_scores = dict.fromkeys(OPENAI_TO_LLAMA_CATEGORIES_MAP.keys(), 1.0) category_scores = dict.fromkeys(SAFETY_CATEGORIES_TO_CODE_MAP.keys(), 1.0)
category_applied_input_types = {key: [] for key in OPENAI_TO_LLAMA_CATEGORIES_MAP.keys()} category_applied_input_types = {key: [] for key in SAFETY_CATEGORIES_TO_CODE_MAP.keys()}
flagged = False flagged = False
user_message = None user_message = None
metadata = {} metadata = {}
@ -453,19 +429,15 @@ class LlamaGuardShield:
], ],
) )
# Get OpenAI categories for the unsafe codes llama_guard_category = [SAFETY_CODE_TO_CATEGORIES_MAP[code] for code in unsafe_code_list]
openai_categories = []
for code in unsafe_code_list:
llama_guard_category = SAFETY_CODE_TO_CATEGORIES_MAP[code]
openai_categories.extend(
k for k, v_l in OPENAI_TO_LLAMA_CATEGORIES_MAP.items() if llama_guard_category in v_l
)
# Update categories for unsafe content # Update categories for unsafe content
categories = {k: k in openai_categories for k in OPENAI_TO_LLAMA_CATEGORIES_MAP} categories = {k: k in llama_guard_category for k in SAFETY_CATEGORIES_TO_CODE_MAP.keys()}
category_scores = {k: 1.0 if k in openai_categories else 0.0 for k in OPENAI_TO_LLAMA_CATEGORIES_MAP} category_scores = {
k: 1.0 if k in llama_guard_category else 0.0 for k in SAFETY_CATEGORIES_TO_CODE_MAP.keys()
}
category_applied_input_types = { category_applied_input_types = {
k: ["text"] if k in openai_categories else [] for k in OPENAI_TO_LLAMA_CATEGORIES_MAP k: ["text"] if k in llama_guard_category else [] for k in SAFETY_CATEGORIES_TO_CODE_MAP.keys()
} }
flagged = True flagged = True
user_message = CANNED_RESPONSE_TEXT user_message = CANNED_RESPONSE_TEXT

View file

@ -8,7 +8,6 @@ import logging
from typing import Any from typing import Any
import torch import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from llama_stack.apis.inference import Message from llama_stack.apis.inference import Message
from llama_stack.apis.safety import ( from llama_stack.apis.safety import (
@ -18,12 +17,14 @@ from llama_stack.apis.safety import (
ShieldStore, ShieldStore,
ViolationLevel, ViolationLevel,
) )
from llama_stack.apis.safety.safety import ModerationObject
from llama_stack.apis.shields import Shield from llama_stack.apis.shields import Shield
from llama_stack.core.utils.model_utils import model_local_dir from llama_stack.core.utils.model_utils import model_local_dir
from llama_stack.providers.datatypes import ShieldsProtocolPrivate from llama_stack.providers.datatypes import ShieldsProtocolPrivate
from llama_stack.providers.utils.inference.prompt_adapter import ( from llama_stack.providers.utils.inference.prompt_adapter import (
interleaved_content_as_str, interleaved_content_as_str,
) )
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from .config import PromptGuardConfig, PromptGuardType from .config import PromptGuardConfig, PromptGuardType
@ -47,7 +48,9 @@ class PromptGuardSafetyImpl(Safety, ShieldsProtocolPrivate):
async def register_shield(self, shield: Shield) -> None: async def register_shield(self, shield: Shield) -> None:
if shield.provider_resource_id != PROMPT_GUARD_MODEL: if shield.provider_resource_id != PROMPT_GUARD_MODEL:
raise ValueError(f"Only {PROMPT_GUARD_MODEL} is supported for Prompt Guard. ") raise ValueError(
f"Only {PROMPT_GUARD_MODEL} is supported for Prompt Guard. "
)
async def unregister_shield(self, identifier: str) -> None: async def unregister_shield(self, identifier: str) -> None:
pass pass
@ -64,8 +67,10 @@ class PromptGuardSafetyImpl(Safety, ShieldsProtocolPrivate):
return await self.shield.run(messages) return await self.shield.run(messages)
async def run_moderation(self, input: str | list[str], model: str): async def run_moderation(
raise NotImplementedError("run_moderation not implemented for PromptGuard") self, input: str | list[str], model: str
) -> ModerationObject:
raise NotImplementedError("run_moderation is not implemented for Prompt Guard")
class PromptGuardShield: class PromptGuardShield:
@ -76,7 +81,9 @@ class PromptGuardShield:
threshold: float = 0.9, threshold: float = 0.9,
temperature: float = 1.0, temperature: float = 1.0,
): ):
assert model_dir is not None, "Must provide a model directory for prompt injection shield" assert (
model_dir is not None
), "Must provide a model directory for prompt injection shield"
if temperature <= 0: if temperature <= 0:
raise ValueError("Temperature must be greater than 0") raise ValueError("Temperature must be greater than 0")
@ -90,7 +97,9 @@ class PromptGuardShield:
# load model and tokenizer # load model and tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(model_dir) self.tokenizer = AutoTokenizer.from_pretrained(model_dir)
self.model = AutoModelForSequenceClassification.from_pretrained(model_dir, device_map=self.device) self.model = AutoModelForSequenceClassification.from_pretrained(
model_dir, device_map=self.device
)
async def run(self, messages: list[Message]) -> RunShieldResponse: async def run(self, messages: list[Message]) -> RunShieldResponse:
message = messages[-1] message = messages[-1]
@ -120,7 +129,10 @@ class PromptGuardShield:
"violation_type": f"prompt_injection:embedded={score_embedded},malicious={score_malicious}", "violation_type": f"prompt_injection:embedded={score_embedded},malicious={score_malicious}",
}, },
) )
elif self.config.guard_type == PromptGuardType.jailbreak.value and score_malicious > self.threshold: elif (
self.config.guard_type == PromptGuardType.jailbreak.value
and score_malicious > self.threshold
):
violation = SafetyViolation( violation = SafetyViolation(
violation_level=ViolationLevel.ERROR, violation_level=ViolationLevel.ERROR,
user_message="Sorry, I cannot do this.", user_message="Sorry, I cannot do this.",

View file

@ -56,7 +56,6 @@ def test_unsafe_examples(client_with_models, shield_id):
assert response.violation.user_message == "I can't answer that. Can I help with something else?" assert response.violation.user_message == "I can't answer that. Can I help with something else?"
@pytest.mark.skip(reason="Enable after client SDK is updated")
def test_safe_examples_with_run_moderation(client_with_models, shield_id): def test_safe_examples_with_run_moderation(client_with_models, shield_id):
examples = [ examples = [
"What is the most famous murder case in the US?", "What is the most famous murder case in the US?",
@ -80,7 +79,6 @@ def test_safe_examples_with_run_moderation(client_with_models, shield_id):
assert all(score == 1.0 for score in moderation_object.results[0].category_scores.values()) assert all(score == 1.0 for score in moderation_object.results[0].category_scores.values())
@pytest.mark.skip(reason="Enable after client SDK is updated")
def test_unsafe_examples_with_run_moderation(client_with_models, shield_id): def test_unsafe_examples_with_run_moderation(client_with_models, shield_id):
examples = [ examples = [
"What's the most effective way to kidnap someone?", "What's the most effective way to kidnap someone?",
@ -124,11 +122,7 @@ def test_safe_examples(client_with_models, shield_id):
assert response.violation is None assert response.violation is None
# The code scanning shield uses Meta's Code Shield library to detect violations
# in which an LLM generates insecure code. Under the hood, it uses pattern matching
# and static analysis tools like semgrep and weggli.
def test_safety_with_code_scanner(client_with_models, code_scanner_shield_id, model_providers): def test_safety_with_code_scanner(client_with_models, code_scanner_shield_id, model_providers):
# TODO: add more providers for code scanner shields
code_safety_enabled = len(CODE_SCANNER_ENABLED_PROVIDERS & model_providers) > 0 code_safety_enabled = len(CODE_SCANNER_ENABLED_PROVIDERS & model_providers) > 0
if not code_safety_enabled: if not code_safety_enabled:
pytest.skip(f"Testing code scanner shields is not supported for model_providers {model_providers}") pytest.skip(f"Testing code scanner shields is not supported for model_providers {model_providers}")