From 66806c480f1e428634906ab05a6451665390c9f5 Mon Sep 17 00:00:00 2001 From: Emilio Garcia Date: Tue, 26 Aug 2025 14:12:59 -0400 Subject: [PATCH] test(responses): responses API Response object unit tests --- tests/unit/apis/responses/test_tools.py | 99 +++++++++++++++++++ .../openapi_typing/test_schema_literals.py | 33 +++++++ .../meta_reference/test_openai_responses.py | 17 ++++ 3 files changed, 149 insertions(+) create mode 100644 tests/unit/apis/responses/test_tools.py create mode 100644 tests/unit/openapi_typing/test_schema_literals.py diff --git a/tests/unit/apis/responses/test_tools.py b/tests/unit/apis/responses/test_tools.py new file mode 100644 index 000000000..5c758b3a8 --- /dev/null +++ b/tests/unit/apis/responses/test_tools.py @@ -0,0 +1,99 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +import pytest +from pydantic import TypeAdapter, ValidationError + +from llama_stack.apis.agents.openai_responses import OpenAIResponsesToolChoice +from llama_stack.apis.tools.openai_tool_choice import ( + ToolChoiceAllowed, + ToolChoiceCustom, + ToolChoiceFunction, + ToolChoiceMcp, + ToolChoiceOptions, + ToolChoiceTypes, +) + + +def test_tool_choice_discriminated_options(): + adapter = TypeAdapter(OpenAIResponsesToolChoice) + + cases = [ + ({"type": "function", "name": "search"}, ToolChoiceFunction, "function"), + ({"type": "mcp", "server_label": "deepwiki"}, ToolChoiceMcp, "mcp"), + ({"type": "custom", "name": "my_tool"}, ToolChoiceCustom, "custom"), + ( + { + "type": "allowed_tools", + "mode": "auto", + "tools": [{"type": "function", "name": "foo"}], + }, + ToolChoiceAllowed, + "allowed_tools", + ), + ] + + for payload, expected_cls, expected_type in cases: + obj = adapter.validate_python(payload) + assert isinstance(obj, expected_cls) + assert obj.type == expected_type + + dumped = obj.model_dump() + reparsed = adapter.validate_python(dumped) + assert isinstance(reparsed, expected_cls) + assert reparsed.model_dump() == dumped + + +def test_tool_choice_literal_options(): + adapter = TypeAdapter(OpenAIResponsesToolChoice) + options_adapter = TypeAdapter(ToolChoiceOptions) + + for v in ("none", "auto", "required"): + # Validate via the specific literal adapter + assert options_adapter.validate_python(v) == v + # And via the top-level union adapter + assert adapter.validate_python(v) == v + + +def test_tool_choice_rejects_invalid_value(): + adapter = TypeAdapter(OpenAIResponsesToolChoice) + + with pytest.raises(ValidationError): + adapter.validate_python("invalid") + with pytest.raises(ValidationError): + adapter.validate_python({"type": "unknown_variant"}) + + +def test_tool_choice_types_accepts_each_variant_value(): + adapter = TypeAdapter(OpenAIResponsesToolChoice) + + allowed_values = [ + "file_search", + "web_search_preview", + "computer_use_preview", + "web_search_preview_2025_03_11", + "image_generation", + "code_interpreter", + ] + + for v in allowed_values: + obj = adapter.validate_python({"type": v}) + assert isinstance(obj, ToolChoiceTypes) + assert obj.type == v + assert obj.model_dump() == {"type": v} + + +def test_tool_choice_rejects_invalid_discriminator_value(): + adapter = TypeAdapter(OpenAIResponsesToolChoice) + with pytest.raises(ValidationError): + adapter.validate_python({"type": "unknown_variant"}) + + +def test_tool_choice_rejects_missing_required_fields(): + adapter = TypeAdapter(OpenAIResponsesToolChoice) + # Missing "name" for function + with pytest.raises(ValidationError): + adapter.validate_python({"type": "function"}) diff --git a/tests/unit/openapi_typing/test_schema_literals.py b/tests/unit/openapi_typing/test_schema_literals.py new file mode 100644 index 000000000..fe592af95 --- /dev/null +++ b/tests/unit/openapi_typing/test_schema_literals.py @@ -0,0 +1,33 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +from typing import Literal + +import pytest + +from llama_stack.strong_typing.schema import JsonSchemaGenerator + + +def test_single_literal_generates_const_schema(): + gen = JsonSchemaGenerator() + schema = gen.type_to_schema(Literal["hello"]) # type: ignore[valid-type] + + assert schema["const"] == "hello" + assert schema["type"] == "string" + + +def test_multi_literal_generates_enum_schema(): + gen = JsonSchemaGenerator() + schema = gen.type_to_schema(Literal["a", "b", "c"]) # type: ignore[valid-type] + + assert schema["enum"] == ["a", "b", "c"] + assert schema["type"] == "string" + + +def test_mixed_type_literal_raises(): + gen = JsonSchemaGenerator() + with pytest.raises((ValueError, TypeError)): + _ = gen.type_to_schema(Literal["x", 1]) # type: ignore[valid-type] diff --git a/tests/unit/providers/agents/meta_reference/test_openai_responses.py b/tests/unit/providers/agents/meta_reference/test_openai_responses.py index a964bc219..3fa83031b 100644 --- a/tests/unit/providers/agents/meta_reference/test_openai_responses.py +++ b/tests/unit/providers/agents/meta_reference/test_openai_responses.py @@ -868,3 +868,20 @@ async def test_create_openai_response_with_invalid_text_format(openai_responses_ model=model, text=OpenAIResponseText(format={"type": "invalid"}), ) + + +def test_openai_response_text_default_format_unique_instance(): + a = OpenAIResponseText() + b = OpenAIResponseText() + + assert a.format is not None + assert b.format is not None + + # Defaults to text format + assert a.format.get("type") == "text" + assert b.format.get("type") == "text" + + # Unique instances (no shared mutable default) + assert a.format is not b.format + a.format["name"] = "custom-name" + assert "name" not in b.format