mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-16 01:53:10 +00:00
[API Updates] Model / shield / memory-bank routing + agent persistence + support for private headers (#92)
This is yet another of those large PRs (hopefully we will have less and less of them as things mature fast). This one introduces substantial improvements and some simplifications to the stack. Most important bits: * Agents reference implementation now has support for session / turn persistence. The default implementation uses sqlite but there's also support for using Redis. * We have re-architected the structure of the Stack APIs to allow for more flexible routing. The motivating use cases are: - routing model A to ollama and model B to a remote provider like Together - routing shield A to local impl while shield B to a remote provider like Bedrock - routing a vector memory bank to Weaviate while routing a keyvalue memory bank to Redis * Support for provider specific parameters to be passed from the clients. A client can pass data using `x_llamastack_provider_data` parameter which can be type-checked and provided to the Adapter implementations.
This commit is contained in:
parent
8bf8c07eb3
commit
ec4fc800cc
130 changed files with 9701 additions and 11227 deletions
|
@ -4,6 +4,7 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from enum import Enum
|
||||
from typing import List, Optional
|
||||
|
||||
from llama_models.sku_list import CoreModelId, safety_models
|
||||
|
@ -11,6 +12,13 @@ from llama_models.sku_list import CoreModelId, safety_models
|
|||
from pydantic import BaseModel, validator
|
||||
|
||||
|
||||
class MetaReferenceShieldType(Enum):
|
||||
llama_guard = "llama_guard"
|
||||
code_scanner_guard = "code_scanner_guard"
|
||||
injection_shield = "injection_shield"
|
||||
jailbreak_shield = "jailbreak_shield"
|
||||
|
||||
|
||||
class LlamaGuardShieldConfig(BaseModel):
|
||||
model: str = "Llama-Guard-3-8B"
|
||||
excluded_categories: List[str] = []
|
||||
|
|
|
@ -4,14 +4,14 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import asyncio
|
||||
|
||||
from llama_models.sku_list import resolve_model
|
||||
|
||||
from llama_stack.distribution.utils.model_utils import model_local_dir
|
||||
from llama_stack.apis.safety import * # noqa
|
||||
from llama_stack.apis.safety import * # noqa: F403
|
||||
from llama_models.llama3.api.datatypes import * # noqa: F403
|
||||
|
||||
from .config import MetaReferenceShieldType, SafetyConfig
|
||||
|
||||
from .config import SafetyConfig
|
||||
from .shields import (
|
||||
CodeScannerShield,
|
||||
InjectionShield,
|
||||
|
@ -19,7 +19,6 @@ from .shields import (
|
|||
LlamaGuardShield,
|
||||
PromptGuardShield,
|
||||
ShieldBase,
|
||||
ThirdPartyShield,
|
||||
)
|
||||
|
||||
|
||||
|
@ -50,46 +49,58 @@ class MetaReferenceSafetyImpl(Safety):
|
|||
model_dir = resolve_and_get_path(shield_cfg.model)
|
||||
_ = PromptGuardShield.instance(model_dir)
|
||||
|
||||
async def run_shields(
|
||||
async def run_shield(
|
||||
self,
|
||||
shield_type: str,
|
||||
messages: List[Message],
|
||||
shields: List[ShieldDefinition],
|
||||
params: Dict[str, Any] = None,
|
||||
) -> RunShieldResponse:
|
||||
shields = [shield_config_to_shield(c, self.config) for c in shields]
|
||||
available_shields = [v.value for v in MetaReferenceShieldType]
|
||||
assert shield_type in available_shields, f"Unknown shield {shield_type}"
|
||||
|
||||
responses = await asyncio.gather(*[shield.run(messages) for shield in shields])
|
||||
shield = self.get_shield_impl(MetaReferenceShieldType(shield_type))
|
||||
|
||||
return RunShieldResponse(responses=responses)
|
||||
messages = messages.copy()
|
||||
# some shields like llama-guard require the first message to be a user message
|
||||
# since this might be a tool call, first role might not be user
|
||||
if len(messages) > 0 and messages[0].role != Role.user.value:
|
||||
messages[0] = UserMessage(content=messages[0].content)
|
||||
|
||||
# TODO: we can refactor ShieldBase, etc. to be inline with the API types
|
||||
res = await shield.run(messages)
|
||||
violation = None
|
||||
if res.is_violation:
|
||||
violation = SafetyViolation(
|
||||
violation_level=ViolationLevel.ERROR,
|
||||
user_message=res.violation_return_message,
|
||||
metadata={
|
||||
"violation_type": res.violation_type,
|
||||
},
|
||||
)
|
||||
|
||||
def shield_type_equals(a: ShieldType, b: ShieldType):
|
||||
return a == b or a == b.value
|
||||
return RunShieldResponse(violation=violation)
|
||||
|
||||
|
||||
def shield_config_to_shield(
|
||||
sc: ShieldDefinition, safety_config: SafetyConfig
|
||||
) -> ShieldBase:
|
||||
if shield_type_equals(sc.shield_type, BuiltinShield.llama_guard):
|
||||
assert (
|
||||
safety_config.llama_guard_shield is not None
|
||||
), "Cannot use LlamaGuardShield since not present in config"
|
||||
model_dir = resolve_and_get_path(safety_config.llama_guard_shield.model)
|
||||
return LlamaGuardShield.instance(model_dir=model_dir)
|
||||
elif shield_type_equals(sc.shield_type, BuiltinShield.jailbreak_shield):
|
||||
assert (
|
||||
safety_config.prompt_guard_shield is not None
|
||||
), "Cannot use Jailbreak Shield since Prompt Guard not present in config"
|
||||
model_dir = resolve_and_get_path(safety_config.prompt_guard_shield.model)
|
||||
return JailbreakShield.instance(model_dir)
|
||||
elif shield_type_equals(sc.shield_type, BuiltinShield.injection_shield):
|
||||
assert (
|
||||
safety_config.prompt_guard_shield is not None
|
||||
), "Cannot use PromptGuardShield since not present in config"
|
||||
model_dir = resolve_and_get_path(safety_config.prompt_guard_shield.model)
|
||||
return InjectionShield.instance(model_dir)
|
||||
elif shield_type_equals(sc.shield_type, BuiltinShield.code_scanner_guard):
|
||||
return CodeScannerShield.instance()
|
||||
elif shield_type_equals(sc.shield_type, BuiltinShield.third_party_shield):
|
||||
return ThirdPartyShield.instance()
|
||||
else:
|
||||
raise ValueError(f"Unknown shield type: {sc.shield_type}")
|
||||
def get_shield_impl(self, typ: MetaReferenceShieldType) -> ShieldBase:
|
||||
cfg = self.config
|
||||
if typ == MetaReferenceShieldType.llama_guard:
|
||||
assert (
|
||||
cfg.llama_guard_shield is not None
|
||||
), "Cannot use LlamaGuardShield since not present in config"
|
||||
model_dir = resolve_and_get_path(cfg.llama_guard_shield.model)
|
||||
return LlamaGuardShield.instance(model_dir=model_dir)
|
||||
elif typ == MetaReferenceShieldType.jailbreak_shield:
|
||||
assert (
|
||||
cfg.prompt_guard_shield is not None
|
||||
), "Cannot use Jailbreak Shield since Prompt Guard not present in config"
|
||||
model_dir = resolve_and_get_path(cfg.prompt_guard_shield.model)
|
||||
return JailbreakShield.instance(model_dir)
|
||||
elif typ == MetaReferenceShieldType.injection_shield:
|
||||
assert (
|
||||
cfg.prompt_guard_shield is not None
|
||||
), "Cannot use PromptGuardShield since not present in config"
|
||||
model_dir = resolve_and_get_path(cfg.prompt_guard_shield.model)
|
||||
return InjectionShield.instance(model_dir)
|
||||
elif typ == MetaReferenceShieldType.code_scanner_guard:
|
||||
return CodeScannerShield.instance()
|
||||
else:
|
||||
raise ValueError(f"Unknown shield type: {typ}")
|
||||
|
|
|
@ -15,7 +15,6 @@ from .base import ( # noqa: F401
|
|||
TextShield,
|
||||
)
|
||||
from .code_scanner import CodeScannerShield # noqa: F401
|
||||
from .contrib.third_party_shield import ThirdPartyShield # noqa: F401
|
||||
from .llama_guard import LlamaGuardShield # noqa: F401
|
||||
from .prompt_guard import ( # noqa: F401
|
||||
InjectionShield,
|
||||
|
|
|
@ -8,11 +8,26 @@ from abc import ABC, abstractmethod
|
|||
from typing import List
|
||||
|
||||
from llama_models.llama3.api.datatypes import interleaved_text_media_as_str, Message
|
||||
from pydantic import BaseModel
|
||||
from llama_stack.apis.safety import * # noqa: F403
|
||||
|
||||
CANNED_RESPONSE_TEXT = "I can't answer that. Can I help with something else?"
|
||||
|
||||
|
||||
# TODO: clean this up; just remove this type completely
|
||||
class ShieldResponse(BaseModel):
|
||||
is_violation: bool
|
||||
violation_type: Optional[str] = None
|
||||
violation_return_message: Optional[str] = None
|
||||
|
||||
|
||||
# TODO: this is a caller / agent concern
|
||||
class OnViolationAction(Enum):
|
||||
IGNORE = 0
|
||||
WARN = 1
|
||||
RAISE = 2
|
||||
|
||||
|
||||
class ShieldBase(ABC):
|
||||
def __init__(
|
||||
self,
|
||||
|
@ -20,10 +35,6 @@ class ShieldBase(ABC):
|
|||
):
|
||||
self.on_violation_action = on_violation_action
|
||||
|
||||
@abstractmethod
|
||||
def get_shield_type(self) -> ShieldType:
|
||||
raise NotImplementedError()
|
||||
|
||||
@abstractmethod
|
||||
async def run(self, messages: List[Message]) -> ShieldResponse:
|
||||
raise NotImplementedError()
|
||||
|
@ -48,11 +59,6 @@ class TextShield(ShieldBase):
|
|||
|
||||
|
||||
class DummyShield(TextShield):
|
||||
def get_shield_type(self) -> ShieldType:
|
||||
return "dummy"
|
||||
|
||||
async def run_impl(self, text: str) -> ShieldResponse:
|
||||
# Dummy return LOW to test e2e
|
||||
return ShieldResponse(
|
||||
shield_type=BuiltinShield.third_party_shield, is_violation=False
|
||||
)
|
||||
return ShieldResponse(is_violation=False)
|
||||
|
|
|
@ -7,13 +7,9 @@
|
|||
from termcolor import cprint
|
||||
|
||||
from .base import ShieldResponse, TextShield
|
||||
from llama_stack.apis.safety import * # noqa: F403
|
||||
|
||||
|
||||
class CodeScannerShield(TextShield):
|
||||
def get_shield_type(self) -> ShieldType:
|
||||
return BuiltinShield.code_scanner_guard
|
||||
|
||||
async def run_impl(self, text: str) -> ShieldResponse:
|
||||
from codeshield.cs import CodeShield
|
||||
|
||||
|
@ -21,7 +17,6 @@ class CodeScannerShield(TextShield):
|
|||
result = await CodeShield.scan_code(text)
|
||||
if result.is_insecure:
|
||||
return ShieldResponse(
|
||||
shield_type=BuiltinShield.code_scanner_guard,
|
||||
is_violation=True,
|
||||
violation_type=",".join(
|
||||
[issue.pattern_id for issue in result.issues_found]
|
||||
|
@ -29,6 +24,4 @@ class CodeScannerShield(TextShield):
|
|||
violation_return_message="Sorry, I found security concerns in the code.",
|
||||
)
|
||||
else:
|
||||
return ShieldResponse(
|
||||
shield_type=BuiltinShield.code_scanner_guard, is_violation=False
|
||||
)
|
||||
return ShieldResponse(is_violation=False)
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
|
@ -1,35 +0,0 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from typing import List
|
||||
|
||||
from llama_models.llama3.api.datatypes import Message
|
||||
|
||||
from llama_stack.providers.impls.meta_reference.safety.shields.base import (
|
||||
OnViolationAction,
|
||||
ShieldBase,
|
||||
ShieldResponse,
|
||||
)
|
||||
|
||||
_INSTANCE = None
|
||||
|
||||
|
||||
class ThirdPartyShield(ShieldBase):
|
||||
@staticmethod
|
||||
def instance(on_violation_action=OnViolationAction.RAISE) -> "ThirdPartyShield":
|
||||
global _INSTANCE
|
||||
if _INSTANCE is None:
|
||||
_INSTANCE = ThirdPartyShield(on_violation_action)
|
||||
return _INSTANCE
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
on_violation_action: OnViolationAction = OnViolationAction.RAISE,
|
||||
):
|
||||
super().__init__(on_violation_action)
|
||||
|
||||
async def run(self, messages: List[Message]) -> ShieldResponse:
|
||||
super.run() # will raise NotImplementedError
|
|
@ -14,7 +14,7 @@ from llama_models.llama3.api.datatypes import Message, Role
|
|||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
from .base import CANNED_RESPONSE_TEXT, OnViolationAction, ShieldBase, ShieldResponse
|
||||
from llama_stack.apis.safety import * # noqa: F403
|
||||
|
||||
|
||||
SAFE_RESPONSE = "safe"
|
||||
_INSTANCE = None
|
||||
|
@ -152,9 +152,6 @@ class LlamaGuardShield(ShieldBase):
|
|||
model_dir, torch_dtype=torch_dtype, device_map=self.device
|
||||
)
|
||||
|
||||
def get_shield_type(self) -> ShieldType:
|
||||
return BuiltinShield.llama_guard
|
||||
|
||||
def check_unsafe_response(self, response: str) -> Optional[str]:
|
||||
match = re.match(r"^unsafe\n(.*)$", response)
|
||||
if match:
|
||||
|
@ -192,18 +189,13 @@ class LlamaGuardShield(ShieldBase):
|
|||
|
||||
def get_shield_response(self, response: str) -> ShieldResponse:
|
||||
if response == SAFE_RESPONSE:
|
||||
return ShieldResponse(
|
||||
shield_type=BuiltinShield.llama_guard, is_violation=False
|
||||
)
|
||||
return ShieldResponse(is_violation=False)
|
||||
unsafe_code = self.check_unsafe_response(response)
|
||||
if unsafe_code:
|
||||
unsafe_code_list = unsafe_code.split(",")
|
||||
if set(unsafe_code_list).issubset(set(self.excluded_categories)):
|
||||
return ShieldResponse(
|
||||
shield_type=BuiltinShield.llama_guard, is_violation=False
|
||||
)
|
||||
return ShieldResponse(is_violation=False)
|
||||
return ShieldResponse(
|
||||
shield_type=BuiltinShield.llama_guard,
|
||||
is_violation=True,
|
||||
violation_type=unsafe_code,
|
||||
violation_return_message=CANNED_RESPONSE_TEXT,
|
||||
|
@ -213,12 +205,9 @@ class LlamaGuardShield(ShieldBase):
|
|||
|
||||
async def run(self, messages: List[Message]) -> ShieldResponse:
|
||||
if self.disable_input_check and messages[-1].role == Role.user.value:
|
||||
return ShieldResponse(
|
||||
shield_type=BuiltinShield.llama_guard, is_violation=False
|
||||
)
|
||||
return ShieldResponse(is_violation=False)
|
||||
elif self.disable_output_check and messages[-1].role == Role.assistant.value:
|
||||
return ShieldResponse(
|
||||
shield_type=BuiltinShield.llama_guard,
|
||||
is_violation=False,
|
||||
)
|
||||
else:
|
||||
|
|
|
@ -13,7 +13,6 @@ from llama_models.llama3.api.datatypes import Message
|
|||
from termcolor import cprint
|
||||
|
||||
from .base import message_content_as_str, OnViolationAction, ShieldResponse, TextShield
|
||||
from llama_stack.apis.safety import * # noqa: F403
|
||||
|
||||
|
||||
class PromptGuardShield(TextShield):
|
||||
|
@ -74,13 +73,6 @@ class PromptGuardShield(TextShield):
|
|||
self.threshold = threshold
|
||||
self.mode = mode
|
||||
|
||||
def get_shield_type(self) -> ShieldType:
|
||||
return (
|
||||
BuiltinShield.jailbreak_shield
|
||||
if self.mode == self.Mode.JAILBREAK
|
||||
else BuiltinShield.injection_shield
|
||||
)
|
||||
|
||||
def convert_messages_to_text(self, messages: List[Message]) -> str:
|
||||
return message_content_as_str(messages[-1])
|
||||
|
||||
|
@ -103,21 +95,18 @@ class PromptGuardShield(TextShield):
|
|||
score_embedded + score_malicious > self.threshold
|
||||
):
|
||||
return ShieldResponse(
|
||||
shield_type=self.get_shield_type(),
|
||||
is_violation=True,
|
||||
violation_type=f"prompt_injection:embedded={score_embedded},malicious={score_malicious}",
|
||||
violation_return_message="Sorry, I cannot do this.",
|
||||
)
|
||||
elif self.mode == self.Mode.JAILBREAK and score_malicious > self.threshold:
|
||||
return ShieldResponse(
|
||||
shield_type=self.get_shield_type(),
|
||||
is_violation=True,
|
||||
violation_type=f"prompt_injection:malicious={score_malicious}",
|
||||
violation_return_message="Sorry, I cannot do this.",
|
||||
)
|
||||
|
||||
return ShieldResponse(
|
||||
shield_type=self.get_shield_type(),
|
||||
is_violation=False,
|
||||
)
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue