diff --git a/llama_stack/providers/inline/agents/meta_reference/openai_responses.py b/llama_stack/providers/inline/agents/meta_reference/openai_responses.py index 3c05e8747..f4f1bac43 100644 --- a/llama_stack/providers/inline/agents/meta_reference/openai_responses.py +++ b/llama_stack/providers/inline/agents/meta_reference/openai_responses.py @@ -899,5 +899,8 @@ class OpenAIResponsesImpl: else: raise ValueError(f"Unknown result content type: {type(result.content)}") input_message = OpenAIToolMessageParam(content=content, tool_call_id=tool_call_id) + else: + text = str(error_exc) + input_message = OpenAIToolMessageParam(content=text, tool_call_id=tool_call_id) return message, input_message diff --git a/tests/common/mcp.py b/tests/common/mcp.py index 927ff561a..775e38295 100644 --- a/tests/common/mcp.py +++ b/tests/common/mcp.py @@ -24,16 +24,16 @@ def default_tools(): ) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]: return [types.TextContent(type="text", text="Hello, world!")] - async def get_boiling_point(liquid_name: str, celcius: bool = True) -> int: + async def get_boiling_point(liquid_name: str, celsius: bool = True) -> int: """ - Returns the boiling point of a liquid in Celcius or Fahrenheit. + Returns the boiling point of a liquid in Celsius or Fahrenheit. :param liquid_name: The name of the liquid - :param celcius: Whether to return the boiling point in Celcius + :param celsius: Whether to return the boiling point in Celsius :return: The boiling point of the liquid in Celcius or Fahrenheit """ - if liquid_name.lower() == "polyjuice": - if celcius: + if liquid_name.lower() == "myawesomeliquid": + if celsius: return -100 else: return -212 diff --git a/tests/verifications/openai_api/fixtures/test_cases/responses.yaml b/tests/verifications/openai_api/fixtures/test_cases/responses.yaml index 68da7f6e2..4d6c19b59 100644 --- a/tests/verifications/openai_api/fixtures/test_cases/responses.yaml +++ b/tests/verifications/openai_api/fixtures/test_cases/responses.yaml @@ -36,7 +36,7 @@ test_response_mcp_tool: test_params: case: - case_id: "boiling_point_tool" - input: "What is the boiling point of polyjuice?" + input: "What is the boiling point of myawesomeliquid in Celsius?" tools: - type: mcp server_label: "localmcp" @@ -100,7 +100,7 @@ test_response_multi_turn_tool_execution: test_params: case: - case_id: "user_file_access_check" - input: "I need to check if user 'alice' can access the file 'document.txt'. First, get alice's user ID, then check if that user ID can access the file 'document.txt'. Tell me the final result." + input: "I need to check if user 'alice' can access the file 'document.txt'. First, get alice's user ID, then check if that user ID can access the file 'document.txt'. Do this as a series of steps, where each step is a separate message. Return only one tool call per step. Summarize the final result with a single 'yes' or 'no' response." tools: - type: mcp server_label: "localmcp" diff --git a/tests/verifications/openai_api/test_responses.py b/tests/verifications/openai_api/test_responses.py index 24ddab5d9..c9b190e62 100644 --- a/tests/verifications/openai_api/test_responses.py +++ b/tests/verifications/openai_api/test_responses.py @@ -291,14 +291,10 @@ def test_response_non_streaming_mcp_tool(request, openai_client, model, provider call = response.output[1] assert call.type == "mcp_call" assert call.name == "get_boiling_point" - assert json.loads(call.arguments) == {"liquid_name": "polyjuice", "celcius": True} + assert json.loads(call.arguments) == {"liquid_name": "myawesomeliquid", "celsius": True} assert call.error is None assert "-100" in call.output - from rich.pretty import pprint - - pprint(response) - # sometimes the model will call the tool again, so we need to get the last message message = response.output[-1] text_content = message.content[0].text