mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-13 04:22:35 +00:00
clean up
This commit is contained in:
parent
e09401805f
commit
b5c951fa4b
10 changed files with 40 additions and 178 deletions
31
docs/static/deprecated-llama-stack-spec.html
vendored
31
docs/static/deprecated-llama-stack-spec.html
vendored
|
|
@ -8827,10 +8827,12 @@
|
||||||
"type": {
|
"type": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"const": "refusal",
|
"const": "refusal",
|
||||||
"default": "refusal"
|
"default": "refusal",
|
||||||
|
"description": "Content part type identifier, always \"refusal\""
|
||||||
},
|
},
|
||||||
"refusal": {
|
"refusal": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"description": "Refusal text supplied by the model"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"additionalProperties": false,
|
"additionalProperties": false,
|
||||||
|
|
@ -8838,7 +8840,8 @@
|
||||||
"type",
|
"type",
|
||||||
"refusal"
|
"refusal"
|
||||||
],
|
],
|
||||||
"title": "OpenAIResponseContentPartRefusal"
|
"title": "OpenAIResponseContentPartRefusal",
|
||||||
|
"description": "Refusal content within a streamed response part."
|
||||||
},
|
},
|
||||||
"OpenAIResponseError": {
|
"OpenAIResponseError": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
|
|
@ -10323,28 +10326,6 @@
|
||||||
"title": "OpenAIResponseContentPartReasoningText",
|
"title": "OpenAIResponseContentPartReasoningText",
|
||||||
"description": "Reasoning text emitted as part of a streamed response."
|
"description": "Reasoning text emitted as part of a streamed response."
|
||||||
},
|
},
|
||||||
"OpenAIResponseContentPartRefusal": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"type": {
|
|
||||||
"type": "string",
|
|
||||||
"const": "refusal",
|
|
||||||
"default": "refusal",
|
|
||||||
"description": "Content part type identifier, always \"refusal\""
|
|
||||||
},
|
|
||||||
"refusal": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Refusal text supplied by the model"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"required": [
|
|
||||||
"type",
|
|
||||||
"refusal"
|
|
||||||
],
|
|
||||||
"title": "OpenAIResponseContentPartRefusal",
|
|
||||||
"description": "Refusal content within a streamed response part."
|
|
||||||
},
|
|
||||||
"OpenAIResponseObjectStream": {
|
"OpenAIResponseObjectStream": {
|
||||||
"oneOf": [
|
"oneOf": [
|
||||||
{
|
{
|
||||||
|
|
|
||||||
24
docs/static/deprecated-llama-stack-spec.yaml
vendored
24
docs/static/deprecated-llama-stack-spec.yaml
vendored
|
|
@ -6558,13 +6558,18 @@ components:
|
||||||
type: string
|
type: string
|
||||||
const: refusal
|
const: refusal
|
||||||
default: refusal
|
default: refusal
|
||||||
|
description: >-
|
||||||
|
Content part type identifier, always "refusal"
|
||||||
refusal:
|
refusal:
|
||||||
type: string
|
type: string
|
||||||
|
description: Refusal text supplied by the model
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
required:
|
required:
|
||||||
- type
|
- type
|
||||||
- refusal
|
- refusal
|
||||||
title: OpenAIResponseContentPartRefusal
|
title: OpenAIResponseContentPartRefusal
|
||||||
|
description: >-
|
||||||
|
Refusal content within a streamed response part.
|
||||||
OpenAIResponseError:
|
OpenAIResponseError:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
@ -7680,25 +7685,6 @@ components:
|
||||||
title: OpenAIResponseContentPartReasoningText
|
title: OpenAIResponseContentPartReasoningText
|
||||||
description: >-
|
description: >-
|
||||||
Reasoning text emitted as part of a streamed response.
|
Reasoning text emitted as part of a streamed response.
|
||||||
OpenAIResponseContentPartRefusal:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
type:
|
|
||||||
type: string
|
|
||||||
const: refusal
|
|
||||||
default: refusal
|
|
||||||
description: >-
|
|
||||||
Content part type identifier, always "refusal"
|
|
||||||
refusal:
|
|
||||||
type: string
|
|
||||||
description: Refusal text supplied by the model
|
|
||||||
additionalProperties: false
|
|
||||||
required:
|
|
||||||
- type
|
|
||||||
- refusal
|
|
||||||
title: OpenAIResponseContentPartRefusal
|
|
||||||
description: >-
|
|
||||||
Refusal content within a streamed response part.
|
|
||||||
OpenAIResponseObjectStream:
|
OpenAIResponseObjectStream:
|
||||||
oneOf:
|
oneOf:
|
||||||
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseCreated'
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseCreated'
|
||||||
|
|
|
||||||
31
docs/static/llama-stack-spec.html
vendored
31
docs/static/llama-stack-spec.html
vendored
|
|
@ -5864,10 +5864,12 @@
|
||||||
"type": {
|
"type": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"const": "refusal",
|
"const": "refusal",
|
||||||
"default": "refusal"
|
"default": "refusal",
|
||||||
|
"description": "Content part type identifier, always \"refusal\""
|
||||||
},
|
},
|
||||||
"refusal": {
|
"refusal": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"description": "Refusal text supplied by the model"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"additionalProperties": false,
|
"additionalProperties": false,
|
||||||
|
|
@ -5875,7 +5877,8 @@
|
||||||
"type",
|
"type",
|
||||||
"refusal"
|
"refusal"
|
||||||
],
|
],
|
||||||
"title": "OpenAIResponseContentPartRefusal"
|
"title": "OpenAIResponseContentPartRefusal",
|
||||||
|
"description": "Refusal content within a streamed response part."
|
||||||
},
|
},
|
||||||
"OpenAIResponseInputMessageContent": {
|
"OpenAIResponseInputMessageContent": {
|
||||||
"oneOf": [
|
"oneOf": [
|
||||||
|
|
@ -8418,28 +8421,6 @@
|
||||||
"title": "OpenAIResponseContentPartReasoningText",
|
"title": "OpenAIResponseContentPartReasoningText",
|
||||||
"description": "Reasoning text emitted as part of a streamed response."
|
"description": "Reasoning text emitted as part of a streamed response."
|
||||||
},
|
},
|
||||||
"OpenAIResponseContentPartRefusal": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"type": {
|
|
||||||
"type": "string",
|
|
||||||
"const": "refusal",
|
|
||||||
"default": "refusal",
|
|
||||||
"description": "Content part type identifier, always \"refusal\""
|
|
||||||
},
|
|
||||||
"refusal": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Refusal text supplied by the model"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"required": [
|
|
||||||
"type",
|
|
||||||
"refusal"
|
|
||||||
],
|
|
||||||
"title": "OpenAIResponseContentPartRefusal",
|
|
||||||
"description": "Refusal content within a streamed response part."
|
|
||||||
},
|
|
||||||
"OpenAIResponseObjectStream": {
|
"OpenAIResponseObjectStream": {
|
||||||
"oneOf": [
|
"oneOf": [
|
||||||
{
|
{
|
||||||
|
|
|
||||||
24
docs/static/llama-stack-spec.yaml
vendored
24
docs/static/llama-stack-spec.yaml
vendored
|
|
@ -4423,13 +4423,18 @@ components:
|
||||||
type: string
|
type: string
|
||||||
const: refusal
|
const: refusal
|
||||||
default: refusal
|
default: refusal
|
||||||
|
description: >-
|
||||||
|
Content part type identifier, always "refusal"
|
||||||
refusal:
|
refusal:
|
||||||
type: string
|
type: string
|
||||||
|
description: Refusal text supplied by the model
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
required:
|
required:
|
||||||
- type
|
- type
|
||||||
- refusal
|
- refusal
|
||||||
title: OpenAIResponseContentPartRefusal
|
title: OpenAIResponseContentPartRefusal
|
||||||
|
description: >-
|
||||||
|
Refusal content within a streamed response part.
|
||||||
OpenAIResponseInputMessageContent:
|
OpenAIResponseInputMessageContent:
|
||||||
oneOf:
|
oneOf:
|
||||||
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
||||||
|
|
@ -6376,25 +6381,6 @@ components:
|
||||||
title: OpenAIResponseContentPartReasoningText
|
title: OpenAIResponseContentPartReasoningText
|
||||||
description: >-
|
description: >-
|
||||||
Reasoning text emitted as part of a streamed response.
|
Reasoning text emitted as part of a streamed response.
|
||||||
OpenAIResponseContentPartRefusal:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
type:
|
|
||||||
type: string
|
|
||||||
const: refusal
|
|
||||||
default: refusal
|
|
||||||
description: >-
|
|
||||||
Content part type identifier, always "refusal"
|
|
||||||
refusal:
|
|
||||||
type: string
|
|
||||||
description: Refusal text supplied by the model
|
|
||||||
additionalProperties: false
|
|
||||||
required:
|
|
||||||
- type
|
|
||||||
- refusal
|
|
||||||
title: OpenAIResponseContentPartRefusal
|
|
||||||
description: >-
|
|
||||||
Refusal content within a streamed response part.
|
|
||||||
OpenAIResponseObjectStream:
|
OpenAIResponseObjectStream:
|
||||||
oneOf:
|
oneOf:
|
||||||
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseCreated'
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseCreated'
|
||||||
|
|
|
||||||
31
docs/static/stainless-llama-stack-spec.html
vendored
31
docs/static/stainless-llama-stack-spec.html
vendored
|
|
@ -7873,10 +7873,12 @@
|
||||||
"type": {
|
"type": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"const": "refusal",
|
"const": "refusal",
|
||||||
"default": "refusal"
|
"default": "refusal",
|
||||||
|
"description": "Content part type identifier, always \"refusal\""
|
||||||
},
|
},
|
||||||
"refusal": {
|
"refusal": {
|
||||||
"type": "string"
|
"type": "string",
|
||||||
|
"description": "Refusal text supplied by the model"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"additionalProperties": false,
|
"additionalProperties": false,
|
||||||
|
|
@ -7884,7 +7886,8 @@
|
||||||
"type",
|
"type",
|
||||||
"refusal"
|
"refusal"
|
||||||
],
|
],
|
||||||
"title": "OpenAIResponseContentPartRefusal"
|
"title": "OpenAIResponseContentPartRefusal",
|
||||||
|
"description": "Refusal content within a streamed response part."
|
||||||
},
|
},
|
||||||
"OpenAIResponseInputMessageContent": {
|
"OpenAIResponseInputMessageContent": {
|
||||||
"oneOf": [
|
"oneOf": [
|
||||||
|
|
@ -10427,28 +10430,6 @@
|
||||||
"title": "OpenAIResponseContentPartReasoningText",
|
"title": "OpenAIResponseContentPartReasoningText",
|
||||||
"description": "Reasoning text emitted as part of a streamed response."
|
"description": "Reasoning text emitted as part of a streamed response."
|
||||||
},
|
},
|
||||||
"OpenAIResponseContentPartRefusal": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"type": {
|
|
||||||
"type": "string",
|
|
||||||
"const": "refusal",
|
|
||||||
"default": "refusal",
|
|
||||||
"description": "Content part type identifier, always \"refusal\""
|
|
||||||
},
|
|
||||||
"refusal": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Refusal text supplied by the model"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"required": [
|
|
||||||
"type",
|
|
||||||
"refusal"
|
|
||||||
],
|
|
||||||
"title": "OpenAIResponseContentPartRefusal",
|
|
||||||
"description": "Refusal content within a streamed response part."
|
|
||||||
},
|
|
||||||
"OpenAIResponseObjectStream": {
|
"OpenAIResponseObjectStream": {
|
||||||
"oneOf": [
|
"oneOf": [
|
||||||
{
|
{
|
||||||
|
|
|
||||||
24
docs/static/stainless-llama-stack-spec.yaml
vendored
24
docs/static/stainless-llama-stack-spec.yaml
vendored
|
|
@ -5868,13 +5868,18 @@ components:
|
||||||
type: string
|
type: string
|
||||||
const: refusal
|
const: refusal
|
||||||
default: refusal
|
default: refusal
|
||||||
|
description: >-
|
||||||
|
Content part type identifier, always "refusal"
|
||||||
refusal:
|
refusal:
|
||||||
type: string
|
type: string
|
||||||
|
description: Refusal text supplied by the model
|
||||||
additionalProperties: false
|
additionalProperties: false
|
||||||
required:
|
required:
|
||||||
- type
|
- type
|
||||||
- refusal
|
- refusal
|
||||||
title: OpenAIResponseContentPartRefusal
|
title: OpenAIResponseContentPartRefusal
|
||||||
|
description: >-
|
||||||
|
Refusal content within a streamed response part.
|
||||||
OpenAIResponseInputMessageContent:
|
OpenAIResponseInputMessageContent:
|
||||||
oneOf:
|
oneOf:
|
||||||
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
||||||
|
|
@ -7821,25 +7826,6 @@ components:
|
||||||
title: OpenAIResponseContentPartReasoningText
|
title: OpenAIResponseContentPartReasoningText
|
||||||
description: >-
|
description: >-
|
||||||
Reasoning text emitted as part of a streamed response.
|
Reasoning text emitted as part of a streamed response.
|
||||||
OpenAIResponseContentPartRefusal:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
type:
|
|
||||||
type: string
|
|
||||||
const: refusal
|
|
||||||
default: refusal
|
|
||||||
description: >-
|
|
||||||
Content part type identifier, always "refusal"
|
|
||||||
refusal:
|
|
||||||
type: string
|
|
||||||
description: Refusal text supplied by the model
|
|
||||||
additionalProperties: false
|
|
||||||
required:
|
|
||||||
- type
|
|
||||||
- refusal
|
|
||||||
title: OpenAIResponseContentPartRefusal
|
|
||||||
description: >-
|
|
||||||
Refusal content within a streamed response part.
|
|
||||||
OpenAIResponseObjectStream:
|
OpenAIResponseObjectStream:
|
||||||
oneOf:
|
oneOf:
|
||||||
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseCreated'
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseCreated'
|
||||||
|
|
|
||||||
|
|
@ -133,6 +133,12 @@ class OpenAIResponseOutputMessageContentOutputText(BaseModel):
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
class OpenAIResponseContentPartRefusal(BaseModel):
|
class OpenAIResponseContentPartRefusal(BaseModel):
|
||||||
|
"""Refusal content within a streamed response part.
|
||||||
|
|
||||||
|
:param type: Content part type identifier, always "refusal"
|
||||||
|
:param refusal: Refusal text supplied by the model
|
||||||
|
"""
|
||||||
|
|
||||||
type: Literal["refusal"] = "refusal"
|
type: Literal["refusal"] = "refusal"
|
||||||
refusal: str
|
refusal: str
|
||||||
|
|
||||||
|
|
@ -884,18 +890,6 @@ class OpenAIResponseContentPartOutputText(BaseModel):
|
||||||
logprobs: list[dict[str, Any]] | None = None
|
logprobs: list[dict[str, Any]] | None = None
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
|
||||||
class OpenAIResponseContentPartRefusal(BaseModel):
|
|
||||||
"""Refusal content within a streamed response part.
|
|
||||||
|
|
||||||
:param type: Content part type identifier, always "refusal"
|
|
||||||
:param refusal: Refusal text supplied by the model
|
|
||||||
"""
|
|
||||||
|
|
||||||
type: Literal["refusal"] = "refusal"
|
|
||||||
refusal: str
|
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
class OpenAIResponseContentPartReasoningText(BaseModel):
|
class OpenAIResponseContentPartReasoningText(BaseModel):
|
||||||
"""Reasoning text emitted as part of a streamed response.
|
"""Reasoning text emitted as part of a streamed response.
|
||||||
|
|
|
||||||
|
|
@ -52,14 +52,6 @@ from llama_stack.apis.inference import (
|
||||||
UserMessage,
|
UserMessage,
|
||||||
)
|
)
|
||||||
from llama_stack.apis.safety import Safety
|
from llama_stack.apis.safety import Safety
|
||||||
from llama_stack.log import get_logger
|
|
||||||
|
|
||||||
logger = get_logger(name=__name__, category="openai_responses_utils")
|
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Message and Content Conversion Functions
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
|
|
||||||
async def convert_chat_choice_to_response_message(
|
async def convert_chat_choice_to_response_message(
|
||||||
|
|
@ -325,11 +317,6 @@ def is_function_tool_call(
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Safety and Shield Validation Functions
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
|
|
||||||
async def run_multiple_shields(safety_api: Safety, messages: list[Message], shield_ids: list[str]) -> None:
|
async def run_multiple_shields(safety_api: Safety, messages: list[Message], shield_ids: list[str]) -> None:
|
||||||
"""Run multiple shields against messages and raise SafetyException for violations."""
|
"""Run multiple shields against messages and raise SafetyException for violations."""
|
||||||
if not shield_ids or not messages:
|
if not shield_ids or not messages:
|
||||||
|
|
@ -359,7 +346,7 @@ def extract_shield_ids(shields: list | None) -> list[str]:
|
||||||
elif isinstance(shield, ResponseShieldSpec):
|
elif isinstance(shield, ResponseShieldSpec):
|
||||||
shield_ids.append(shield.type)
|
shield_ids.append(shield.type)
|
||||||
else:
|
else:
|
||||||
logger.warning(f"Unknown shield format: {shield}")
|
raise ValueError(f"Unknown shield format: {shield}, expected str or ResponseShieldSpec")
|
||||||
|
|
||||||
return shield_ids
|
return shield_ids
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1084,11 +1084,6 @@ async def test_create_openai_response_with_invalid_text_format(openai_responses_
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Shield Validation Tests
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
|
|
||||||
async def test_check_input_safety_no_violation(openai_responses_impl):
|
async def test_check_input_safety_no_violation(openai_responses_impl):
|
||||||
"""Test input shield validation with no violations."""
|
"""Test input shield validation with no violations."""
|
||||||
messages = [UserMessage(content="Hello world")]
|
messages = [UserMessage(content="Hello world")]
|
||||||
|
|
|
||||||
|
|
@ -44,11 +44,6 @@ def responses_impl(mock_apis):
|
||||||
return OpenAIResponsesImpl(**mock_apis)
|
return OpenAIResponsesImpl(**mock_apis)
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Shield ID Extraction Tests
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
|
|
||||||
def test_extract_shield_ids_from_strings(responses_impl):
|
def test_extract_shield_ids_from_strings(responses_impl):
|
||||||
"""Test extraction from simple string shield IDs."""
|
"""Test extraction from simple string shield IDs."""
|
||||||
shields = ["llama-guard", "content-filter", "nsfw-detector"]
|
shields = ["llama-guard", "content-filter", "nsfw-detector"]
|
||||||
|
|
@ -99,11 +94,6 @@ def test_extract_shield_ids_unknown_format(responses_impl, caplog):
|
||||||
assert "Unknown shield format" in caplog.text
|
assert "Unknown shield format" in caplog.text
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Text Content Extraction Tests
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
|
|
||||||
def test_extract_text_content_string(responses_impl):
|
def test_extract_text_content_string(responses_impl):
|
||||||
"""Test extraction from simple string content."""
|
"""Test extraction from simple string content."""
|
||||||
content = "Hello world"
|
content = "Hello world"
|
||||||
|
|
@ -177,11 +167,6 @@ def test_extract_text_content_none_input(responses_impl):
|
||||||
assert result is None
|
assert result is None
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# Message Conversion Tests
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
|
|
||||||
def test_convert_user_message(responses_impl):
|
def test_convert_user_message(responses_impl):
|
||||||
"""Test conversion of user message."""
|
"""Test conversion of user message."""
|
||||||
openai_msg = MagicMock(role="user", content="Hello world")
|
openai_msg = MagicMock(role="user", content="Hello world")
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue