forked from phoenix-oss/llama-stack-mirror
# What does this PR do? - as title, cleaning up `import *`'s - upgrade tests to make them more robust to bad model outputs - remove import *'s in llama_stack/apis/* (skip __init__ modules) <img width="465" alt="image" src="https://github.com/user-attachments/assets/d8339c13-3b40-4ba5-9c53-0d2329726ee2" /> - run `sh run_openapi_generator.sh`, no types gets affected ## Test Plan ### Providers Tests **agents** ``` pytest -v -s llama_stack/providers/tests/agents/test_agents.py -m "together" --safety-shield meta-llama/Llama-Guard-3-8B --inference-model meta-llama/Llama-3.1-405B-Instruct-FP8 ``` **inference** ```bash # meta-reference torchrun $CONDA_PREFIX/bin/pytest -v -s -k "meta_reference" --inference-model="meta-llama/Llama-3.1-8B-Instruct" ./llama_stack/providers/tests/inference/test_text_inference.py torchrun $CONDA_PREFIX/bin/pytest -v -s -k "meta_reference" --inference-model="meta-llama/Llama-3.2-11B-Vision-Instruct" ./llama_stack/providers/tests/inference/test_vision_inference.py # together pytest -v -s -k "together" --inference-model="meta-llama/Llama-3.1-8B-Instruct" ./llama_stack/providers/tests/inference/test_text_inference.py pytest -v -s -k "together" --inference-model="meta-llama/Llama-3.2-11B-Vision-Instruct" ./llama_stack/providers/tests/inference/test_vision_inference.py pytest ./llama_stack/providers/tests/inference/test_prompt_adapter.py ``` **safety** ``` pytest -v -s llama_stack/providers/tests/safety/test_safety.py -m together --safety-shield meta-llama/Llama-Guard-3-8B ``` **memory** ``` pytest -v -s llama_stack/providers/tests/memory/test_memory.py -m "sentence_transformers" --env EMBEDDING_DIMENSION=384 ``` **scoring** ``` pytest -v -s -m llm_as_judge_scoring_together_inference llama_stack/providers/tests/scoring/test_scoring.py --judge-model meta-llama/Llama-3.2-3B-Instruct pytest -v -s -m basic_scoring_together_inference llama_stack/providers/tests/scoring/test_scoring.py pytest -v -s -m braintrust_scoring_together_inference llama_stack/providers/tests/scoring/test_scoring.py ``` **datasetio** ``` pytest -v -s -m localfs llama_stack/providers/tests/datasetio/test_datasetio.py pytest -v -s -m huggingface llama_stack/providers/tests/datasetio/test_datasetio.py ``` **eval** ``` pytest -v -s -m meta_reference_eval_together_inference llama_stack/providers/tests/eval/test_eval.py pytest -v -s -m meta_reference_eval_together_inference_huggingface_datasetio llama_stack/providers/tests/eval/test_eval.py ``` ### Client-SDK Tests ``` LLAMA_STACK_BASE_URL=http://localhost:5000 pytest -v ./tests/client-sdk ``` ### llama-stack-apps ``` PORT=5000 LOCALHOST=localhost python -m examples.agents.hello $LOCALHOST $PORT python -m examples.agents.inflation $LOCALHOST $PORT python -m examples.agents.podcast_transcript $LOCALHOST $PORT python -m examples.agents.rag_as_attachments $LOCALHOST $PORT python -m examples.agents.rag_with_memory_bank $LOCALHOST $PORT python -m examples.safety.llama_guard_demo_mm $LOCALHOST $PORT python -m examples.agents.e2e_loop_with_custom_tools $LOCALHOST $PORT # Vision model python -m examples.interior_design_assistant.app python -m examples.agent_store.app $LOCALHOST $PORT ``` ### CLI ``` which llama llama model prompt-format -m Llama3.2-11B-Vision-Instruct llama model list llama stack list-apis llama stack list-providers inference llama stack build --template ollama --image-type conda ``` ### Distributions Tests **ollama** ``` llama stack build --template ollama --image-type conda ollama run llama3.2:1b-instruct-fp16 llama stack run ./llama_stack/templates/ollama/run.yaml --env INFERENCE_MODEL=meta-llama/Llama-3.2-1B-Instruct ``` **fireworks** ``` llama stack build --template fireworks --image-type conda llama stack run ./llama_stack/templates/fireworks/run.yaml ``` **together** ``` llama stack build --template together --image-type conda llama stack run ./llama_stack/templates/together/run.yaml ``` **tgi** ``` llama stack run ./llama_stack/templates/tgi/run.yaml --env TGI_URL=http://0.0.0.0:5009 --env INFERENCE_MODEL=meta-llama/Llama-3.1-8B-Instruct ``` ## Sources Please link relevant resources if necessary. ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Ran pre-commit to handle lint / formatting issues. - [ ] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests.
130 lines
4.3 KiB
Python
130 lines
4.3 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import logging
|
|
from typing import Any, Dict, List
|
|
|
|
import torch
|
|
|
|
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
|
|
|
from llama_stack.apis.inference import Message
|
|
from llama_stack.apis.safety import (
|
|
RunShieldResponse,
|
|
Safety,
|
|
SafetyViolation,
|
|
ViolationLevel,
|
|
)
|
|
from llama_stack.apis.shields import Shield
|
|
|
|
from llama_stack.distribution.utils.model_utils import model_local_dir
|
|
from llama_stack.providers.datatypes import ShieldsProtocolPrivate
|
|
from llama_stack.providers.utils.inference.prompt_adapter import (
|
|
interleaved_content_as_str,
|
|
)
|
|
|
|
from .config import PromptGuardConfig, PromptGuardType
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
PROMPT_GUARD_MODEL = "Prompt-Guard-86M"
|
|
|
|
|
|
class PromptGuardSafetyImpl(Safety, ShieldsProtocolPrivate):
|
|
def __init__(self, config: PromptGuardConfig, _deps) -> None:
|
|
self.config = config
|
|
|
|
async def initialize(self) -> None:
|
|
model_dir = model_local_dir(PROMPT_GUARD_MODEL)
|
|
self.shield = PromptGuardShield(model_dir, self.config)
|
|
|
|
async def shutdown(self) -> None:
|
|
pass
|
|
|
|
async def register_shield(self, shield: Shield) -> None:
|
|
if shield.provider_resource_id != PROMPT_GUARD_MODEL:
|
|
raise ValueError(
|
|
f"Only {PROMPT_GUARD_MODEL} is supported for Prompt Guard. "
|
|
)
|
|
|
|
async def run_shield(
|
|
self,
|
|
shield_id: str,
|
|
messages: List[Message],
|
|
params: Dict[str, Any] = None,
|
|
) -> RunShieldResponse:
|
|
shield = await self.shield_store.get_shield(shield_id)
|
|
if not shield:
|
|
raise ValueError(f"Unknown shield {shield_id}")
|
|
|
|
return await self.shield.run(messages)
|
|
|
|
|
|
class PromptGuardShield:
|
|
def __init__(
|
|
self,
|
|
model_dir: str,
|
|
config: PromptGuardConfig,
|
|
threshold: float = 0.9,
|
|
temperature: float = 1.0,
|
|
):
|
|
assert (
|
|
model_dir is not None
|
|
), "Must provide a model directory for prompt injection shield"
|
|
if temperature <= 0:
|
|
raise ValueError("Temperature must be greater than 0")
|
|
|
|
self.config = config
|
|
self.temperature = temperature
|
|
self.threshold = threshold
|
|
|
|
self.device = "cuda"
|
|
|
|
# load model and tokenizer
|
|
self.tokenizer = AutoTokenizer.from_pretrained(model_dir)
|
|
self.model = AutoModelForSequenceClassification.from_pretrained(
|
|
model_dir, device_map=self.device
|
|
)
|
|
|
|
async def run(self, messages: List[Message]) -> RunShieldResponse:
|
|
message = messages[-1]
|
|
text = interleaved_content_as_str(message.content)
|
|
|
|
# run model on messages and return response
|
|
inputs = self.tokenizer(text, return_tensors="pt")
|
|
inputs = {name: tensor.to(self.model.device) for name, tensor in inputs.items()}
|
|
with torch.no_grad():
|
|
outputs = self.model(**inputs)
|
|
logits = outputs[0]
|
|
probabilities = torch.softmax(logits / self.temperature, dim=-1)
|
|
score_embedded = probabilities[0, 1].item()
|
|
score_malicious = probabilities[0, 2].item()
|
|
log.info(
|
|
f"Ran PromptGuardShield and got Scores: Embedded: {score_embedded}, Malicious: {score_malicious}",
|
|
)
|
|
|
|
violation = None
|
|
if self.config.guard_type == PromptGuardType.injection.value and (
|
|
score_embedded + score_malicious > self.threshold
|
|
):
|
|
violation = SafetyViolation(
|
|
violation_level=ViolationLevel.ERROR,
|
|
user_message="Sorry, I cannot do this.",
|
|
metadata={
|
|
"violation_type": f"prompt_injection:embedded={score_embedded},malicious={score_malicious}",
|
|
},
|
|
)
|
|
elif (
|
|
self.config.guard_type == PromptGuardType.jailbreak.value
|
|
and score_malicious > self.threshold
|
|
):
|
|
violation = SafetyViolation(
|
|
violation_level=ViolationLevel.ERROR,
|
|
violation_type=f"prompt_injection:malicious={score_malicious}",
|
|
violation_return_message="Sorry, I cannot do this.",
|
|
)
|
|
|
|
return RunShieldResponse(violation=violation)
|