This commit is contained in:
Swapna Lekkala 2025-10-10 09:16:15 -07:00
parent e09401805f
commit b5c951fa4b
10 changed files with 40 additions and 178 deletions

View file

@ -8827,10 +8827,12 @@
"type": {
"type": "string",
"const": "refusal",
"default": "refusal"
"default": "refusal",
"description": "Content part type identifier, always \"refusal\""
},
"refusal": {
"type": "string"
"type": "string",
"description": "Refusal text supplied by the model"
}
},
"additionalProperties": false,
@ -8838,7 +8840,8 @@
"type",
"refusal"
],
"title": "OpenAIResponseContentPartRefusal"
"title": "OpenAIResponseContentPartRefusal",
"description": "Refusal content within a streamed response part."
},
"OpenAIResponseError": {
"type": "object",
@ -10323,28 +10326,6 @@
"title": "OpenAIResponseContentPartReasoningText",
"description": "Reasoning text emitted as part of a streamed response."
},
"OpenAIResponseContentPartRefusal": {
"type": "object",
"properties": {
"type": {
"type": "string",
"const": "refusal",
"default": "refusal",
"description": "Content part type identifier, always \"refusal\""
},
"refusal": {
"type": "string",
"description": "Refusal text supplied by the model"
}
},
"additionalProperties": false,
"required": [
"type",
"refusal"
],
"title": "OpenAIResponseContentPartRefusal",
"description": "Refusal content within a streamed response part."
},
"OpenAIResponseObjectStream": {
"oneOf": [
{

View file

@ -6558,13 +6558,18 @@ components:
type: string
const: refusal
default: refusal
description: >-
Content part type identifier, always "refusal"
refusal:
type: string
description: Refusal text supplied by the model
additionalProperties: false
required:
- type
- refusal
title: OpenAIResponseContentPartRefusal
description: >-
Refusal content within a streamed response part.
OpenAIResponseError:
type: object
properties:
@ -7680,25 +7685,6 @@ components:
title: OpenAIResponseContentPartReasoningText
description: >-
Reasoning text emitted as part of a streamed response.
OpenAIResponseContentPartRefusal:
type: object
properties:
type:
type: string
const: refusal
default: refusal
description: >-
Content part type identifier, always "refusal"
refusal:
type: string
description: Refusal text supplied by the model
additionalProperties: false
required:
- type
- refusal
title: OpenAIResponseContentPartRefusal
description: >-
Refusal content within a streamed response part.
OpenAIResponseObjectStream:
oneOf:
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseCreated'

View file

@ -5864,10 +5864,12 @@
"type": {
"type": "string",
"const": "refusal",
"default": "refusal"
"default": "refusal",
"description": "Content part type identifier, always \"refusal\""
},
"refusal": {
"type": "string"
"type": "string",
"description": "Refusal text supplied by the model"
}
},
"additionalProperties": false,
@ -5875,7 +5877,8 @@
"type",
"refusal"
],
"title": "OpenAIResponseContentPartRefusal"
"title": "OpenAIResponseContentPartRefusal",
"description": "Refusal content within a streamed response part."
},
"OpenAIResponseInputMessageContent": {
"oneOf": [
@ -8418,28 +8421,6 @@
"title": "OpenAIResponseContentPartReasoningText",
"description": "Reasoning text emitted as part of a streamed response."
},
"OpenAIResponseContentPartRefusal": {
"type": "object",
"properties": {
"type": {
"type": "string",
"const": "refusal",
"default": "refusal",
"description": "Content part type identifier, always \"refusal\""
},
"refusal": {
"type": "string",
"description": "Refusal text supplied by the model"
}
},
"additionalProperties": false,
"required": [
"type",
"refusal"
],
"title": "OpenAIResponseContentPartRefusal",
"description": "Refusal content within a streamed response part."
},
"OpenAIResponseObjectStream": {
"oneOf": [
{

View file

@ -4423,13 +4423,18 @@ components:
type: string
const: refusal
default: refusal
description: >-
Content part type identifier, always "refusal"
refusal:
type: string
description: Refusal text supplied by the model
additionalProperties: false
required:
- type
- refusal
title: OpenAIResponseContentPartRefusal
description: >-
Refusal content within a streamed response part.
OpenAIResponseInputMessageContent:
oneOf:
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
@ -6376,25 +6381,6 @@ components:
title: OpenAIResponseContentPartReasoningText
description: >-
Reasoning text emitted as part of a streamed response.
OpenAIResponseContentPartRefusal:
type: object
properties:
type:
type: string
const: refusal
default: refusal
description: >-
Content part type identifier, always "refusal"
refusal:
type: string
description: Refusal text supplied by the model
additionalProperties: false
required:
- type
- refusal
title: OpenAIResponseContentPartRefusal
description: >-
Refusal content within a streamed response part.
OpenAIResponseObjectStream:
oneOf:
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseCreated'

View file

@ -7873,10 +7873,12 @@
"type": {
"type": "string",
"const": "refusal",
"default": "refusal"
"default": "refusal",
"description": "Content part type identifier, always \"refusal\""
},
"refusal": {
"type": "string"
"type": "string",
"description": "Refusal text supplied by the model"
}
},
"additionalProperties": false,
@ -7884,7 +7886,8 @@
"type",
"refusal"
],
"title": "OpenAIResponseContentPartRefusal"
"title": "OpenAIResponseContentPartRefusal",
"description": "Refusal content within a streamed response part."
},
"OpenAIResponseInputMessageContent": {
"oneOf": [
@ -10427,28 +10430,6 @@
"title": "OpenAIResponseContentPartReasoningText",
"description": "Reasoning text emitted as part of a streamed response."
},
"OpenAIResponseContentPartRefusal": {
"type": "object",
"properties": {
"type": {
"type": "string",
"const": "refusal",
"default": "refusal",
"description": "Content part type identifier, always \"refusal\""
},
"refusal": {
"type": "string",
"description": "Refusal text supplied by the model"
}
},
"additionalProperties": false,
"required": [
"type",
"refusal"
],
"title": "OpenAIResponseContentPartRefusal",
"description": "Refusal content within a streamed response part."
},
"OpenAIResponseObjectStream": {
"oneOf": [
{

View file

@ -5868,13 +5868,18 @@ components:
type: string
const: refusal
default: refusal
description: >-
Content part type identifier, always "refusal"
refusal:
type: string
description: Refusal text supplied by the model
additionalProperties: false
required:
- type
- refusal
title: OpenAIResponseContentPartRefusal
description: >-
Refusal content within a streamed response part.
OpenAIResponseInputMessageContent:
oneOf:
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
@ -7821,25 +7826,6 @@ components:
title: OpenAIResponseContentPartReasoningText
description: >-
Reasoning text emitted as part of a streamed response.
OpenAIResponseContentPartRefusal:
type: object
properties:
type:
type: string
const: refusal
default: refusal
description: >-
Content part type identifier, always "refusal"
refusal:
type: string
description: Refusal text supplied by the model
additionalProperties: false
required:
- type
- refusal
title: OpenAIResponseContentPartRefusal
description: >-
Refusal content within a streamed response part.
OpenAIResponseObjectStream:
oneOf:
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseCreated'

View file

@ -133,6 +133,12 @@ class OpenAIResponseOutputMessageContentOutputText(BaseModel):
@json_schema_type
class OpenAIResponseContentPartRefusal(BaseModel):
"""Refusal content within a streamed response part.
:param type: Content part type identifier, always "refusal"
:param refusal: Refusal text supplied by the model
"""
type: Literal["refusal"] = "refusal"
refusal: str
@ -884,18 +890,6 @@ class OpenAIResponseContentPartOutputText(BaseModel):
logprobs: list[dict[str, Any]] | None = None
@json_schema_type
class OpenAIResponseContentPartRefusal(BaseModel):
"""Refusal content within a streamed response part.
:param type: Content part type identifier, always "refusal"
:param refusal: Refusal text supplied by the model
"""
type: Literal["refusal"] = "refusal"
refusal: str
@json_schema_type
class OpenAIResponseContentPartReasoningText(BaseModel):
"""Reasoning text emitted as part of a streamed response.

View file

@ -52,14 +52,6 @@ from llama_stack.apis.inference import (
UserMessage,
)
from llama_stack.apis.safety import Safety
from llama_stack.log import get_logger
logger = get_logger(name=__name__, category="openai_responses_utils")
# ============================================================================
# Message and Content Conversion Functions
# ============================================================================
async def convert_chat_choice_to_response_message(
@ -325,11 +317,6 @@ def is_function_tool_call(
return False
# ============================================================================
# Safety and Shield Validation Functions
# ============================================================================
async def run_multiple_shields(safety_api: Safety, messages: list[Message], shield_ids: list[str]) -> None:
"""Run multiple shields against messages and raise SafetyException for violations."""
if not shield_ids or not messages:
@ -359,7 +346,7 @@ def extract_shield_ids(shields: list | None) -> list[str]:
elif isinstance(shield, ResponseShieldSpec):
shield_ids.append(shield.type)
else:
logger.warning(f"Unknown shield format: {shield}")
raise ValueError(f"Unknown shield format: {shield}, expected str or ResponseShieldSpec")
return shield_ids

View file

@ -1084,11 +1084,6 @@ async def test_create_openai_response_with_invalid_text_format(openai_responses_
)
# ============================================================================
# Shield Validation Tests
# ============================================================================
async def test_check_input_safety_no_violation(openai_responses_impl):
"""Test input shield validation with no violations."""
messages = [UserMessage(content="Hello world")]

View file

@ -44,11 +44,6 @@ def responses_impl(mock_apis):
return OpenAIResponsesImpl(**mock_apis)
# ============================================================================
# Shield ID Extraction Tests
# ============================================================================
def test_extract_shield_ids_from_strings(responses_impl):
"""Test extraction from simple string shield IDs."""
shields = ["llama-guard", "content-filter", "nsfw-detector"]
@ -99,11 +94,6 @@ def test_extract_shield_ids_unknown_format(responses_impl, caplog):
assert "Unknown shield format" in caplog.text
# ============================================================================
# Text Content Extraction Tests
# ============================================================================
def test_extract_text_content_string(responses_impl):
"""Test extraction from simple string content."""
content = "Hello world"
@ -177,11 +167,6 @@ def test_extract_text_content_none_input(responses_impl):
assert result is None
# ============================================================================
# Message Conversion Tests
# ============================================================================
def test_convert_user_message(responses_impl):
"""Test conversion of user message."""
openai_msg = MagicMock(role="user", content="Hello world")