mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 01:48:05 +00:00
Merge ef8df659f1 into 4237eb4aaa
This commit is contained in:
commit
ddb49172d4
2 changed files with 41 additions and 54 deletions
|
|
@ -125,43 +125,57 @@ class NeMoGuardrails:
|
||||||
|
|
||||||
async def run(self, messages: list[OpenAIMessageParam]) -> RunShieldResponse:
|
async def run(self, messages: list[OpenAIMessageParam]) -> RunShieldResponse:
|
||||||
"""
|
"""
|
||||||
Queries the /v1/guardrails/checks endpoint of the NeMo guardrails deployed API.
|
Queries the /v1/chat/completions endpoint of the NeMo guardrails deployed API.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
messages (List[Message]): A list of Message objects to be checked for safety violations.
|
messages (List[Message]): A list of Message objects to be checked for safety violations.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
RunShieldResponse: If the response indicates a violation ("blocked" status), returns a
|
RunShieldResponse: Response with SafetyViolation if content is blocked, None otherwise.
|
||||||
RunShieldResponse with a SafetyViolation; otherwise, returns a RunShieldResponse with violation set to None.
|
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
requests.HTTPError: If the POST request fails.
|
requests.HTTPError: If the POST request fails.
|
||||||
"""
|
"""
|
||||||
request_data = {
|
request_data = {
|
||||||
"model": self.model,
|
|
||||||
"messages": [{"role": message.role, "content": message.content} for message in messages],
|
|
||||||
"temperature": self.temperature,
|
|
||||||
"top_p": 1,
|
|
||||||
"frequency_penalty": 0,
|
|
||||||
"presence_penalty": 0,
|
|
||||||
"max_tokens": 160,
|
|
||||||
"stream": False,
|
|
||||||
"guardrails": {
|
|
||||||
"config_id": self.config_id,
|
"config_id": self.config_id,
|
||||||
},
|
"messages": [{"role": message.role, "content": message.content} for message in messages],
|
||||||
}
|
}
|
||||||
response = await self._guardrails_post(path="/v1/guardrail/checks", data=request_data)
|
response = await self._guardrails_post(path="/v1/chat/completions", data=request_data)
|
||||||
|
|
||||||
if response["status"] == "blocked":
|
# Support legacy format with explicit status field
|
||||||
user_message = "Sorry I cannot do this."
|
if "status" in response and response["status"] == "blocked":
|
||||||
metadata = response["rails_status"]
|
return RunShieldResponse(
|
||||||
|
violation=SafetyViolation(
|
||||||
|
user_message="Sorry I cannot do this.",
|
||||||
|
violation_level=ViolationLevel.ERROR,
|
||||||
|
metadata=response.get("rails_status", {}),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# NOTE: The implementation targets the actual behavior of the NeMo Guardrails server
|
||||||
|
# as defined in 'nemoguardrails/server/api.py'. The 'RequestBody' class accepts
|
||||||
|
# 'config_id' at the top level, and 'ResponseBody' returns a 'messages' array,
|
||||||
|
# distinct from the OpenAI 'choices' format often referenced in documentation.
|
||||||
|
|
||||||
|
response_messages = response.get("messages", [])
|
||||||
|
if response_messages:
|
||||||
|
content = response_messages[0].get("content", "").strip()
|
||||||
|
else:
|
||||||
|
choices = response.get("choices", [])
|
||||||
|
if choices:
|
||||||
|
content = choices[0].get("message", {}).get("content", "").strip()
|
||||||
|
else:
|
||||||
|
content = ""
|
||||||
|
|
||||||
|
refusal_phrases = ["sorry i cannot do this", "i cannot help with that", "i can't assist with that"]
|
||||||
|
is_blocked = not content or any(phrase in content.lower() for phrase in refusal_phrases)
|
||||||
|
|
||||||
return RunShieldResponse(
|
return RunShieldResponse(
|
||||||
violation=SafetyViolation(
|
violation=SafetyViolation(
|
||||||
user_message=user_message,
|
user_message="Sorry I cannot do this.",
|
||||||
violation_level=ViolationLevel.ERROR,
|
violation_level=ViolationLevel.ERROR,
|
||||||
metadata=metadata,
|
metadata={"reason": "Content violates safety guidelines", "response": content or "(empty)"},
|
||||||
)
|
)
|
||||||
|
if is_blocked
|
||||||
|
else None
|
||||||
)
|
)
|
||||||
|
|
||||||
return RunShieldResponse(violation=None)
|
|
||||||
|
|
|
||||||
|
|
@ -152,22 +152,13 @@ async def test_run_shield_allowed(nvidia_adapter, mock_guardrails_post):
|
||||||
|
|
||||||
# Verify the Guardrails API was called correctly
|
# Verify the Guardrails API was called correctly
|
||||||
mock_guardrails_post.assert_called_once_with(
|
mock_guardrails_post.assert_called_once_with(
|
||||||
path="/v1/guardrail/checks",
|
path="/v1/chat/completions",
|
||||||
data={
|
data={
|
||||||
"model": shield_id,
|
"config_id": "self-check",
|
||||||
"messages": [
|
"messages": [
|
||||||
{"role": "user", "content": "Hello, how are you?"},
|
{"role": "user", "content": "Hello, how are you?"},
|
||||||
{"role": "assistant", "content": "I'm doing well, thank you for asking!"},
|
{"role": "assistant", "content": "I'm doing well, thank you for asking!"},
|
||||||
],
|
],
|
||||||
"temperature": 1.0,
|
|
||||||
"top_p": 1,
|
|
||||||
"frequency_penalty": 0,
|
|
||||||
"presence_penalty": 0,
|
|
||||||
"max_tokens": 160,
|
|
||||||
"stream": False,
|
|
||||||
"guardrails": {
|
|
||||||
"config_id": "self-check",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -206,22 +197,13 @@ async def test_run_shield_blocked(nvidia_adapter, mock_guardrails_post):
|
||||||
|
|
||||||
# Verify the Guardrails API was called correctly
|
# Verify the Guardrails API was called correctly
|
||||||
mock_guardrails_post.assert_called_once_with(
|
mock_guardrails_post.assert_called_once_with(
|
||||||
path="/v1/guardrail/checks",
|
path="/v1/chat/completions",
|
||||||
data={
|
data={
|
||||||
"model": shield_id,
|
"config_id": "self-check",
|
||||||
"messages": [
|
"messages": [
|
||||||
{"role": "user", "content": "Hello, how are you?"},
|
{"role": "user", "content": "Hello, how are you?"},
|
||||||
{"role": "assistant", "content": "I'm doing well, thank you for asking!"},
|
{"role": "assistant", "content": "I'm doing well, thank you for asking!"},
|
||||||
],
|
],
|
||||||
"temperature": 1.0,
|
|
||||||
"top_p": 1,
|
|
||||||
"frequency_penalty": 0,
|
|
||||||
"presence_penalty": 0,
|
|
||||||
"max_tokens": 160,
|
|
||||||
"stream": False,
|
|
||||||
"guardrails": {
|
|
||||||
"config_id": "self-check",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -286,22 +268,13 @@ async def test_run_shield_http_error(nvidia_adapter, mock_guardrails_post):
|
||||||
|
|
||||||
# Verify the Guardrails API was called correctly
|
# Verify the Guardrails API was called correctly
|
||||||
mock_guardrails_post.assert_called_once_with(
|
mock_guardrails_post.assert_called_once_with(
|
||||||
path="/v1/guardrail/checks",
|
path="/v1/chat/completions",
|
||||||
data={
|
data={
|
||||||
"model": shield_id,
|
"config_id": "self-check",
|
||||||
"messages": [
|
"messages": [
|
||||||
{"role": "user", "content": "Hello, how are you?"},
|
{"role": "user", "content": "Hello, how are you?"},
|
||||||
{"role": "assistant", "content": "I'm doing well, thank you for asking!"},
|
{"role": "assistant", "content": "I'm doing well, thank you for asking!"},
|
||||||
],
|
],
|
||||||
"temperature": 1.0,
|
|
||||||
"top_p": 1,
|
|
||||||
"frequency_penalty": 0,
|
|
||||||
"presence_penalty": 0,
|
|
||||||
"max_tokens": 160,
|
|
||||||
"stream": False,
|
|
||||||
"guardrails": {
|
|
||||||
"config_id": "self-check",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
# Verify the exception message
|
# Verify the exception message
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue