mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 12:07:34 +00:00
test(responses): responses API Response object unit tests
This commit is contained in:
parent
708b2c1b05
commit
66806c480f
3 changed files with 149 additions and 0 deletions
99
tests/unit/apis/responses/test_tools.py
Normal file
99
tests/unit/apis/responses/test_tools.py
Normal file
|
@ -0,0 +1,99 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import pytest
|
||||
from pydantic import TypeAdapter, ValidationError
|
||||
|
||||
from llama_stack.apis.agents.openai_responses import OpenAIResponsesToolChoice
|
||||
from llama_stack.apis.tools.openai_tool_choice import (
|
||||
ToolChoiceAllowed,
|
||||
ToolChoiceCustom,
|
||||
ToolChoiceFunction,
|
||||
ToolChoiceMcp,
|
||||
ToolChoiceOptions,
|
||||
ToolChoiceTypes,
|
||||
)
|
||||
|
||||
|
||||
def test_tool_choice_discriminated_options():
|
||||
adapter = TypeAdapter(OpenAIResponsesToolChoice)
|
||||
|
||||
cases = [
|
||||
({"type": "function", "name": "search"}, ToolChoiceFunction, "function"),
|
||||
({"type": "mcp", "server_label": "deepwiki"}, ToolChoiceMcp, "mcp"),
|
||||
({"type": "custom", "name": "my_tool"}, ToolChoiceCustom, "custom"),
|
||||
(
|
||||
{
|
||||
"type": "allowed_tools",
|
||||
"mode": "auto",
|
||||
"tools": [{"type": "function", "name": "foo"}],
|
||||
},
|
||||
ToolChoiceAllowed,
|
||||
"allowed_tools",
|
||||
),
|
||||
]
|
||||
|
||||
for payload, expected_cls, expected_type in cases:
|
||||
obj = adapter.validate_python(payload)
|
||||
assert isinstance(obj, expected_cls)
|
||||
assert obj.type == expected_type
|
||||
|
||||
dumped = obj.model_dump()
|
||||
reparsed = adapter.validate_python(dumped)
|
||||
assert isinstance(reparsed, expected_cls)
|
||||
assert reparsed.model_dump() == dumped
|
||||
|
||||
|
||||
def test_tool_choice_literal_options():
|
||||
adapter = TypeAdapter(OpenAIResponsesToolChoice)
|
||||
options_adapter = TypeAdapter(ToolChoiceOptions)
|
||||
|
||||
for v in ("none", "auto", "required"):
|
||||
# Validate via the specific literal adapter
|
||||
assert options_adapter.validate_python(v) == v
|
||||
# And via the top-level union adapter
|
||||
assert adapter.validate_python(v) == v
|
||||
|
||||
|
||||
def test_tool_choice_rejects_invalid_value():
|
||||
adapter = TypeAdapter(OpenAIResponsesToolChoice)
|
||||
|
||||
with pytest.raises(ValidationError):
|
||||
adapter.validate_python("invalid")
|
||||
with pytest.raises(ValidationError):
|
||||
adapter.validate_python({"type": "unknown_variant"})
|
||||
|
||||
|
||||
def test_tool_choice_types_accepts_each_variant_value():
|
||||
adapter = TypeAdapter(OpenAIResponsesToolChoice)
|
||||
|
||||
allowed_values = [
|
||||
"file_search",
|
||||
"web_search_preview",
|
||||
"computer_use_preview",
|
||||
"web_search_preview_2025_03_11",
|
||||
"image_generation",
|
||||
"code_interpreter",
|
||||
]
|
||||
|
||||
for v in allowed_values:
|
||||
obj = adapter.validate_python({"type": v})
|
||||
assert isinstance(obj, ToolChoiceTypes)
|
||||
assert obj.type == v
|
||||
assert obj.model_dump() == {"type": v}
|
||||
|
||||
|
||||
def test_tool_choice_rejects_invalid_discriminator_value():
|
||||
adapter = TypeAdapter(OpenAIResponsesToolChoice)
|
||||
with pytest.raises(ValidationError):
|
||||
adapter.validate_python({"type": "unknown_variant"})
|
||||
|
||||
|
||||
def test_tool_choice_rejects_missing_required_fields():
|
||||
adapter = TypeAdapter(OpenAIResponsesToolChoice)
|
||||
# Missing "name" for function
|
||||
with pytest.raises(ValidationError):
|
||||
adapter.validate_python({"type": "function"})
|
33
tests/unit/openapi_typing/test_schema_literals.py
Normal file
33
tests/unit/openapi_typing/test_schema_literals.py
Normal file
|
@ -0,0 +1,33 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from typing import Literal
|
||||
|
||||
import pytest
|
||||
|
||||
from llama_stack.strong_typing.schema import JsonSchemaGenerator
|
||||
|
||||
|
||||
def test_single_literal_generates_const_schema():
|
||||
gen = JsonSchemaGenerator()
|
||||
schema = gen.type_to_schema(Literal["hello"]) # type: ignore[valid-type]
|
||||
|
||||
assert schema["const"] == "hello"
|
||||
assert schema["type"] == "string"
|
||||
|
||||
|
||||
def test_multi_literal_generates_enum_schema():
|
||||
gen = JsonSchemaGenerator()
|
||||
schema = gen.type_to_schema(Literal["a", "b", "c"]) # type: ignore[valid-type]
|
||||
|
||||
assert schema["enum"] == ["a", "b", "c"]
|
||||
assert schema["type"] == "string"
|
||||
|
||||
|
||||
def test_mixed_type_literal_raises():
|
||||
gen = JsonSchemaGenerator()
|
||||
with pytest.raises((ValueError, TypeError)):
|
||||
_ = gen.type_to_schema(Literal["x", 1]) # type: ignore[valid-type]
|
|
@ -868,3 +868,20 @@ async def test_create_openai_response_with_invalid_text_format(openai_responses_
|
|||
model=model,
|
||||
text=OpenAIResponseText(format={"type": "invalid"}),
|
||||
)
|
||||
|
||||
|
||||
def test_openai_response_text_default_format_unique_instance():
|
||||
a = OpenAIResponseText()
|
||||
b = OpenAIResponseText()
|
||||
|
||||
assert a.format is not None
|
||||
assert b.format is not None
|
||||
|
||||
# Defaults to text format
|
||||
assert a.format.get("type") == "text"
|
||||
assert b.format.get("type") == "text"
|
||||
|
||||
# Unique instances (no shared mutable default)
|
||||
assert a.format is not b.format
|
||||
a.format["name"] = "custom-name"
|
||||
assert "name" not in b.format
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue