mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-14 17:16:09 +00:00
# What does this PR do? previously, developers who ran `./scripts/unit-tests.sh` would get `asyncio-mode=auto`, which meant `@pytest.mark.asyncio` and `@pytest_asyncio.fixture` were redundent. developers who ran `pytest` directly would get pytest's default (strict mode), would run into errors leading them to add `@pytest.mark.asyncio` / `@pytest_asyncio.fixture` to their code. with this change - - `asyncio_mode=auto` is included in `pyproject.toml` making behavior consistent for all invocations of pytest - removes all redundant `@pytest_asyncio.fixture` and `@pytest.mark.asyncio` - for good measure, requires `pytest>=8.4` and `pytest-asyncio>=1.0` ## Test Plan - `./scripts/unit-tests.sh` - `uv run pytest tests/unit`
110 lines
4 KiB
Python
110 lines
4 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
|
|
from llama_stack.apis.common.content_types import TextContentItem
|
|
from llama_stack.apis.inference import (
|
|
CompletionMessage,
|
|
OpenAIAssistantMessageParam,
|
|
OpenAIChatCompletionContentPartTextParam,
|
|
OpenAISystemMessageParam,
|
|
OpenAIUserMessageParam,
|
|
SystemMessage,
|
|
UserMessage,
|
|
)
|
|
from llama_stack.models.llama.datatypes import BuiltinTool, StopReason, ToolCall
|
|
from llama_stack.providers.utils.inference.openai_compat import (
|
|
convert_message_to_openai_dict,
|
|
openai_messages_to_messages,
|
|
)
|
|
|
|
|
|
async def test_convert_message_to_openai_dict():
|
|
message = UserMessage(content=[TextContentItem(text="Hello, world!")], role="user")
|
|
assert await convert_message_to_openai_dict(message) == {
|
|
"role": "user",
|
|
"content": [{"type": "text", "text": "Hello, world!"}],
|
|
}
|
|
|
|
|
|
# Test convert_message_to_openai_dict with a tool call
|
|
async def test_convert_message_to_openai_dict_with_tool_call():
|
|
message = CompletionMessage(
|
|
content="",
|
|
tool_calls=[
|
|
ToolCall(call_id="123", tool_name="test_tool", arguments_json='{"foo": "bar"}', arguments={"foo": "bar"})
|
|
],
|
|
stop_reason=StopReason.end_of_turn,
|
|
)
|
|
|
|
openai_dict = await convert_message_to_openai_dict(message)
|
|
|
|
assert openai_dict == {
|
|
"role": "assistant",
|
|
"content": [{"type": "text", "text": ""}],
|
|
"tool_calls": [
|
|
{"id": "123", "type": "function", "function": {"name": "test_tool", "arguments": '{"foo": "bar"}'}}
|
|
],
|
|
}
|
|
|
|
|
|
async def test_convert_message_to_openai_dict_with_builtin_tool_call():
|
|
message = CompletionMessage(
|
|
content="",
|
|
tool_calls=[
|
|
ToolCall(
|
|
call_id="123",
|
|
tool_name=BuiltinTool.brave_search,
|
|
arguments_json='{"foo": "bar"}',
|
|
arguments={"foo": "bar"},
|
|
)
|
|
],
|
|
stop_reason=StopReason.end_of_turn,
|
|
)
|
|
|
|
openai_dict = await convert_message_to_openai_dict(message)
|
|
|
|
assert openai_dict == {
|
|
"role": "assistant",
|
|
"content": [{"type": "text", "text": ""}],
|
|
"tool_calls": [
|
|
{"id": "123", "type": "function", "function": {"name": "brave_search", "arguments": '{"foo": "bar"}'}}
|
|
],
|
|
}
|
|
|
|
|
|
async def test_openai_messages_to_messages_with_content_str():
|
|
openai_messages = [
|
|
OpenAISystemMessageParam(content="system message"),
|
|
OpenAIUserMessageParam(content="user message"),
|
|
OpenAIAssistantMessageParam(content="assistant message"),
|
|
]
|
|
|
|
llama_messages = openai_messages_to_messages(openai_messages)
|
|
assert len(llama_messages) == 3
|
|
assert isinstance(llama_messages[0], SystemMessage)
|
|
assert isinstance(llama_messages[1], UserMessage)
|
|
assert isinstance(llama_messages[2], CompletionMessage)
|
|
assert llama_messages[0].content == "system message"
|
|
assert llama_messages[1].content == "user message"
|
|
assert llama_messages[2].content == "assistant message"
|
|
|
|
|
|
async def test_openai_messages_to_messages_with_content_list():
|
|
openai_messages = [
|
|
OpenAISystemMessageParam(content=[OpenAIChatCompletionContentPartTextParam(text="system message")]),
|
|
OpenAIUserMessageParam(content=[OpenAIChatCompletionContentPartTextParam(text="user message")]),
|
|
OpenAIAssistantMessageParam(content=[OpenAIChatCompletionContentPartTextParam(text="assistant message")]),
|
|
]
|
|
|
|
llama_messages = openai_messages_to_messages(openai_messages)
|
|
assert len(llama_messages) == 3
|
|
assert isinstance(llama_messages[0], SystemMessage)
|
|
assert isinstance(llama_messages[1], UserMessage)
|
|
assert isinstance(llama_messages[2], CompletionMessage)
|
|
assert llama_messages[0].content[0].text == "system message"
|
|
assert llama_messages[1].content[0].text == "user message"
|
|
assert llama_messages[2].content[0].text == "assistant message"
|