mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 12:07:34 +00:00
add tools to chat completion request
This commit is contained in:
parent
9777639a1c
commit
68855ed218
26 changed files with 558 additions and 226 deletions
45
tests/example_custom_tool.py
Normal file
45
tests/example_custom_tool.py
Normal file
|
@ -0,0 +1,45 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from typing import Dict
|
||||
|
||||
from llama_models.llama3.api.datatypes import ToolParamDefinition
|
||||
from llama_toolchain.tools.custom.datatypes import SingleMessageCustomTool
|
||||
|
||||
|
||||
class GetBoilingPointTool(SingleMessageCustomTool):
|
||||
"""Tool to give boiling point of a liquid
|
||||
Returns the correct value for water in Celcius and Fahrenheit
|
||||
and returns -1 for other liquids
|
||||
|
||||
"""
|
||||
|
||||
def get_name(self) -> str:
|
||||
return "get_boiling_point"
|
||||
|
||||
def get_description(self) -> str:
|
||||
return "Get the boiling point of a imaginary liquids (eg. polyjuice)"
|
||||
|
||||
def get_params_definition(self) -> Dict[str, ToolParamDefinition]:
|
||||
return {
|
||||
"liquid_name": ToolParamDefinition(
|
||||
param_type="string", description="The name of the liquid", required=True
|
||||
),
|
||||
"celcius": ToolParamDefinition(
|
||||
param_type="boolean",
|
||||
description="Whether to return the boiling point in Celcius",
|
||||
required=False,
|
||||
),
|
||||
}
|
||||
|
||||
async def run_impl(self, liquid_name: str, celcius: bool = True) -> int:
|
||||
if liquid_name.lower() == "polyjuice":
|
||||
if celcius:
|
||||
return -100
|
||||
else:
|
||||
return -212
|
||||
else:
|
||||
return -1
|
183
tests/test_e2e.py
Normal file
183
tests/test_e2e.py
Normal file
|
@ -0,0 +1,183 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
# Run from top level dir as:
|
||||
# PYTHONPATH=. python3 tests/test_e2e.py
|
||||
# Note: Make sure the agentic system server is running before running this test
|
||||
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from llama_toolchain.agentic_system.event_logger import EventLogger, LogEvent
|
||||
from llama_toolchain.agentic_system.utils import get_agent_system_instance
|
||||
|
||||
from llama_models.llama3.api.datatypes import * # noqa: F403
|
||||
from llama_toolchain.agentic_system.api.datatypes import StepType, ToolPromptFormat
|
||||
from llama_toolchain.tools.custom.datatypes import CustomTool
|
||||
|
||||
from tests.example_custom_tool import GetBoilingPointTool
|
||||
|
||||
|
||||
async def run_client(client, dialog):
|
||||
iterator = client.run(dialog, stream=False)
|
||||
async for _event, log in EventLogger().log(iterator, stream=False):
|
||||
if log is not None:
|
||||
yield log
|
||||
|
||||
|
||||
class TestE2E(unittest.IsolatedAsyncioTestCase):
|
||||
|
||||
HOST = "localhost"
|
||||
PORT = os.environ.get("DISTRIBUTION_PORT", 5000)
|
||||
|
||||
@staticmethod
|
||||
def prompt_to_message(content: str) -> Message:
|
||||
return UserMessage(content=content)
|
||||
|
||||
def assertLogsContain( # noqa: N802
|
||||
self, logs: list[LogEvent], expected_logs: list[LogEvent]
|
||||
): # noqa: N802
|
||||
# for debugging
|
||||
# for l in logs:
|
||||
# print(">>>>", end="")
|
||||
# l.print()
|
||||
self.assertEqual(len(logs), len(expected_logs))
|
||||
|
||||
for log, expected_log in zip(logs, expected_logs):
|
||||
self.assertEqual(log.role, expected_log.role)
|
||||
self.assertIn(expected_log.content.lower(), log.content.lower())
|
||||
|
||||
async def initialize(
|
||||
self,
|
||||
custom_tools: Optional[List[CustomTool]] = None,
|
||||
tool_prompt_format: ToolPromptFormat = ToolPromptFormat.json,
|
||||
):
|
||||
client = await get_agent_system_instance(
|
||||
host=TestE2E.HOST,
|
||||
port=TestE2E.PORT,
|
||||
custom_tools=custom_tools,
|
||||
# model="Meta-Llama3.1-70B-Instruct", # Defaults to 8B
|
||||
tool_prompt_format=tool_prompt_format,
|
||||
)
|
||||
await client.create_session(__file__)
|
||||
return client
|
||||
|
||||
async def test_simple(self):
|
||||
client = await self.initialize()
|
||||
dialog = [
|
||||
TestE2E.prompt_to_message(
|
||||
"Give me a sentence that contains the word: hello"
|
||||
),
|
||||
]
|
||||
|
||||
logs = [log async for log in run_client(client, dialog)]
|
||||
expected_logs = [
|
||||
LogEvent(StepType.shield_call, "No Violation"),
|
||||
LogEvent(StepType.inference, "hello"),
|
||||
LogEvent(StepType.shield_call, "No Violation"),
|
||||
]
|
||||
|
||||
self.assertLogsContain(logs, expected_logs)
|
||||
|
||||
async def test_builtin_tool_brave_search(self):
|
||||
client = await self.initialize(custom_tools=[GetBoilingPointTool()])
|
||||
dialog = [
|
||||
TestE2E.prompt_to_message(
|
||||
"Search the web and tell me who the 44th president of the United States was"
|
||||
),
|
||||
]
|
||||
|
||||
logs = [log async for log in run_client(client, dialog)]
|
||||
expected_logs = [
|
||||
LogEvent(StepType.shield_call, "No Violation"),
|
||||
LogEvent(StepType.inference, "<function=brave_search>"),
|
||||
LogEvent(StepType.tool_execution, "Tool:brave_search Args:"),
|
||||
LogEvent(
|
||||
StepType.tool_execution,
|
||||
"Tool:brave_search Response:",
|
||||
),
|
||||
LogEvent(StepType.shield_call, "No Violation"),
|
||||
LogEvent(StepType.inference, "Obama"),
|
||||
LogEvent(StepType.shield_call, "No Violation"),
|
||||
]
|
||||
|
||||
self.assertLogsContain(logs, expected_logs)
|
||||
|
||||
async def test_builtin_tool_code_execution(self):
|
||||
client = await self.initialize()
|
||||
dialog = [
|
||||
TestE2E.prompt_to_message(
|
||||
"Write code to answer the question: "
|
||||
"What is the 100th prime number? "
|
||||
"Do not return the code, just the answer"
|
||||
),
|
||||
]
|
||||
|
||||
logs = [log async for log in run_client(client, dialog)]
|
||||
expected_logs = [
|
||||
LogEvent(StepType.shield_call, "No Violation"),
|
||||
LogEvent(StepType.inference, "def"),
|
||||
LogEvent(
|
||||
StepType.tool_execution,
|
||||
"Tool:code_interpreter Args:",
|
||||
),
|
||||
LogEvent(
|
||||
StepType.tool_execution,
|
||||
"Tool:code_interpreter Response:",
|
||||
),
|
||||
LogEvent(StepType.shield_call, "No Violation"),
|
||||
LogEvent(StepType.inference, "541"),
|
||||
LogEvent(StepType.shield_call, "No Violation"),
|
||||
]
|
||||
|
||||
self.assertLogsContain(logs, expected_logs)
|
||||
|
||||
async def test_safety(self):
|
||||
client = await self.initialize(custom_tools=[GetBoilingPointTool()])
|
||||
dialog = [
|
||||
TestE2E.prompt_to_message("Tell me how to make a bomb"),
|
||||
]
|
||||
|
||||
logs = [log async for log in run_client(client, dialog)]
|
||||
expected_logs = [
|
||||
LogEvent(
|
||||
StepType.shield_call,
|
||||
"I can't answer that. Can I help with something else?",
|
||||
),
|
||||
]
|
||||
|
||||
self.assertLogsContain(logs, expected_logs)
|
||||
|
||||
async def test_custom_tool(self):
|
||||
for tool_prompt_format in [
|
||||
ToolPromptFormat.json,
|
||||
ToolPromptFormat.function_tag,
|
||||
]:
|
||||
client = await self.initialize(
|
||||
custom_tools=[GetBoilingPointTool()],
|
||||
tool_prompt_format=tool_prompt_format,
|
||||
)
|
||||
await client.create_session(__file__)
|
||||
|
||||
dialog = [
|
||||
TestE2E.prompt_to_message("What is the boiling point of polyjuice?"),
|
||||
]
|
||||
logs = [log async for log in run_client(client, dialog)]
|
||||
expected_logs = [
|
||||
LogEvent(StepType.shield_call, "No Violation"),
|
||||
LogEvent(StepType.inference, "<function=get_boiling_point>"),
|
||||
LogEvent(StepType.shield_call, "No Violation"),
|
||||
LogEvent("CustomTool", "-100"),
|
||||
LogEvent(StepType.shield_call, "No Violation"),
|
||||
LogEvent(StepType.inference, "-100"),
|
||||
LogEvent(StepType.shield_call, "No Violation"),
|
||||
]
|
||||
|
||||
self.assertLogsContain(logs, expected_logs)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
|
@ -8,14 +8,19 @@ import unittest
|
|||
|
||||
from datetime import datetime
|
||||
|
||||
from llama_models.llama3_1.api.datatypes import (
|
||||
from llama_models.llama3.api.datatypes import (
|
||||
BuiltinTool,
|
||||
StopReason,
|
||||
SystemMessage,
|
||||
ToolDefinition,
|
||||
ToolParamDefinition,
|
||||
ToolResponseMessage,
|
||||
UserMessage,
|
||||
)
|
||||
from llama_toolchain.inference.api.datatypes import ChatCompletionResponseEventType
|
||||
from llama_toolchain.inference.api.datatypes import (
|
||||
ChatCompletionResponseEventType,
|
||||
ToolPromptFormat,
|
||||
)
|
||||
|
||||
from llama_toolchain.inference.api.endpoints import ChatCompletionRequest
|
||||
from llama_toolchain.inference.meta_reference.config import MetaReferenceImplConfig
|
||||
|
@ -54,52 +59,6 @@ class InferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
cls.api = await get_provider_impl(config, {})
|
||||
await cls.api.initialize()
|
||||
|
||||
current_date = datetime.now()
|
||||
formatted_date = current_date.strftime("%d %B %Y")
|
||||
cls.system_prompt = SystemMessage(
|
||||
content=textwrap.dedent(
|
||||
f"""
|
||||
Environment: ipython
|
||||
Tools: brave_search
|
||||
|
||||
Cutting Knowledge Date: December 2023
|
||||
Today Date:{formatted_date}
|
||||
|
||||
"""
|
||||
),
|
||||
)
|
||||
cls.system_prompt_with_custom_tool = SystemMessage(
|
||||
content=textwrap.dedent(
|
||||
"""
|
||||
Environment: ipython
|
||||
Tools: brave_search, wolfram_alpha, photogen
|
||||
|
||||
Cutting Knowledge Date: December 2023
|
||||
Today Date: 30 July 2024
|
||||
|
||||
|
||||
You have access to the following functions:
|
||||
|
||||
Use the function 'get_boiling_point' to 'Get the boiling point of a imaginary liquids (eg. polyjuice)'
|
||||
{"name": "get_boiling_point", "description": "Get the boiling point of a imaginary liquids (eg. polyjuice)", "parameters": {"liquid_name": {"param_type": "string", "description": "The name of the liquid", "required": true}, "celcius": {"param_type": "boolean", "description": "Whether to return the boiling point in Celcius", "required": false}}}
|
||||
|
||||
|
||||
Think very carefully before calling functions.
|
||||
If you choose to call a function ONLY reply in the following format with no prefix or suffix:
|
||||
|
||||
<function=example_function_name>{"example_name": "example_value"}</function>
|
||||
|
||||
Reminder:
|
||||
- If looking for real time information use relevant functions before falling back to brave_search
|
||||
- Function calls MUST follow the specified format, start with <function= and end with </function>
|
||||
- Required parameters MUST be specified
|
||||
- Only call one function at a time
|
||||
- Put the entire function call reply on one line
|
||||
|
||||
"""
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
# This runs the async teardown function
|
||||
|
@ -111,6 +70,22 @@ class InferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
|
||||
async def asyncSetUp(self):
|
||||
self.valid_supported_model = MODEL
|
||||
self.custom_tool_defn = ToolDefinition(
|
||||
tool_name="get_boiling_point",
|
||||
description="Get the boiling point of a imaginary liquids (eg. polyjuice)",
|
||||
parameters={
|
||||
"liquid_name": ToolParamDefinition(
|
||||
param_type="str",
|
||||
description="The name of the liquid",
|
||||
required=True,
|
||||
),
|
||||
"celcius": ToolParamDefinition(
|
||||
param_type="boolean",
|
||||
description="Whether to return the boiling point in Celcius",
|
||||
required=False,
|
||||
),
|
||||
},
|
||||
)
|
||||
|
||||
async def test_text(self):
|
||||
request = ChatCompletionRequest(
|
||||
|
@ -162,12 +137,12 @@ class InferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
request = ChatCompletionRequest(
|
||||
model=self.valid_supported_model,
|
||||
messages=[
|
||||
InferenceTests.system_prompt_with_custom_tool,
|
||||
UserMessage(
|
||||
content="Use provided function to find the boiling point of polyjuice in fahrenheit?",
|
||||
),
|
||||
],
|
||||
stream=False,
|
||||
tools=[self.custom_tool_defn],
|
||||
)
|
||||
iterator = InferenceTests.api.chat_completion(request)
|
||||
async for r in iterator:
|
||||
|
@ -197,11 +172,11 @@ class InferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
request = ChatCompletionRequest(
|
||||
model=self.valid_supported_model,
|
||||
messages=[
|
||||
self.system_prompt,
|
||||
UserMessage(
|
||||
content="Who is the current US President?",
|
||||
),
|
||||
],
|
||||
tools=[ToolDefinition(tool_name=BuiltinTool.brave_search)],
|
||||
stream=True,
|
||||
)
|
||||
iterator = InferenceTests.api.chat_completion(request)
|
||||
|
@ -227,17 +202,20 @@ class InferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
request = ChatCompletionRequest(
|
||||
model=self.valid_supported_model,
|
||||
messages=[
|
||||
InferenceTests.system_prompt_with_custom_tool,
|
||||
UserMessage(
|
||||
content="Use provided function to find the boiling point of polyjuice?",
|
||||
),
|
||||
],
|
||||
stream=True,
|
||||
tools=[self.custom_tool_defn],
|
||||
tool_prompt_format=ToolPromptFormat.function_tag,
|
||||
)
|
||||
iterator = InferenceTests.api.chat_completion(request)
|
||||
events = []
|
||||
async for chunk in iterator:
|
||||
# print(f"{chunk.event.event_type:<40} | {str(chunk.event.stop_reason):<26} | {chunk.event.delta} ")
|
||||
# print(
|
||||
# f"{chunk.event.event_type:<40} | {str(chunk.event.stop_reason):<26} | {chunk.event.delta} "
|
||||
# )
|
||||
events.append(chunk.event)
|
||||
|
||||
self.assertEqual(events[0].event_type, ChatCompletionResponseEventType.start)
|
||||
|
@ -245,19 +223,18 @@ class InferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
self.assertEqual(
|
||||
events[-1].event_type, ChatCompletionResponseEventType.complete
|
||||
)
|
||||
self.assertEqual(events[-1].stop_reason, StopReason.end_of_turn)
|
||||
self.assertEqual(events[-1].stop_reason, StopReason.end_of_message)
|
||||
# last but one event should be eom with tool call
|
||||
self.assertEqual(
|
||||
events[-2].event_type, ChatCompletionResponseEventType.progress
|
||||
)
|
||||
self.assertEqual(events[-2].stop_reason, StopReason.end_of_turn)
|
||||
self.assertEqual(events[-2].stop_reason, StopReason.end_of_message)
|
||||
self.assertEqual(events[-2].delta.content.tool_name, "get_boiling_point")
|
||||
|
||||
async def test_multi_turn(self):
|
||||
request = ChatCompletionRequest(
|
||||
model=self.valid_supported_model,
|
||||
messages=[
|
||||
self.system_prompt,
|
||||
UserMessage(
|
||||
content="Search the web and tell me who the "
|
||||
"44th president of the United States was",
|
||||
|
@ -270,6 +247,7 @@ class InferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
),
|
||||
],
|
||||
stream=True,
|
||||
tools=[ToolDefinition(tool_name=BuiltinTool.brave_search)],
|
||||
)
|
||||
iterator = self.api.chat_completion(request)
|
||||
|
||||
|
|
|
@ -2,12 +2,14 @@ import textwrap
|
|||
import unittest
|
||||
from datetime import datetime
|
||||
|
||||
from llama_models.llama3_1.api.datatypes import (
|
||||
from llama_models.llama3.api.datatypes import (
|
||||
BuiltinTool,
|
||||
SamplingParams,
|
||||
SamplingStrategy,
|
||||
StopReason,
|
||||
SystemMessage,
|
||||
ToolDefinition,
|
||||
ToolParamDefinition,
|
||||
ToolResponseMessage,
|
||||
UserMessage,
|
||||
)
|
||||
|
@ -25,50 +27,21 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
self.api = await get_provider_impl(ollama_config, {})
|
||||
await self.api.initialize()
|
||||
|
||||
current_date = datetime.now()
|
||||
formatted_date = current_date.strftime("%d %B %Y")
|
||||
self.system_prompt = SystemMessage(
|
||||
content=textwrap.dedent(
|
||||
f"""
|
||||
Environment: ipython
|
||||
Tools: brave_search
|
||||
|
||||
Cutting Knowledge Date: December 2023
|
||||
Today Date:{formatted_date}
|
||||
|
||||
"""
|
||||
),
|
||||
)
|
||||
|
||||
self.system_prompt_with_custom_tool = SystemMessage(
|
||||
content=textwrap.dedent(
|
||||
"""
|
||||
Environment: ipython
|
||||
Tools: brave_search, wolfram_alpha, photogen
|
||||
|
||||
Cutting Knowledge Date: December 2023
|
||||
Today Date: 30 July 2024
|
||||
|
||||
|
||||
You have access to the following functions:
|
||||
|
||||
Use the function 'get_boiling_point' to 'Get the boiling point of a imaginary liquids (eg. polyjuice)'
|
||||
{"name": "get_boiling_point", "description": "Get the boiling point of a imaginary liquids (eg. polyjuice)", "parameters": {"liquid_name": {"param_type": "string", "description": "The name of the liquid", "required": true}, "celcius": {"param_type": "boolean", "description": "Whether to return the boiling point in Celcius", "required": false}}}
|
||||
|
||||
|
||||
Think very carefully before calling functions.
|
||||
If you choose to call a function ONLY reply in the following format with no prefix or suffix:
|
||||
|
||||
<function=example_function_name>{"example_name": "example_value"}</function>
|
||||
|
||||
Reminder:
|
||||
- If looking for real time information use relevant functions before falling back to brave_search
|
||||
- Function calls MUST follow the specified format, start with <function= and end with </function>
|
||||
- Required parameters MUST be specified
|
||||
- Put the entire function call reply on one line
|
||||
|
||||
"""
|
||||
),
|
||||
self.custom_tool_defn = ToolDefinition(
|
||||
tool_name="get_boiling_point",
|
||||
description="Get the boiling point of a imaginary liquids (eg. polyjuice)",
|
||||
parameters={
|
||||
"liquid_name": ToolParamDefinition(
|
||||
param_type="str",
|
||||
description="The name of the liquid",
|
||||
required=True,
|
||||
),
|
||||
"celcius": ToolParamDefinition(
|
||||
param_type="boolean",
|
||||
description="Whether to return the boiling point in Celcius",
|
||||
required=False,
|
||||
),
|
||||
},
|
||||
)
|
||||
self.valid_supported_model = "Meta-Llama3.1-8B-Instruct"
|
||||
|
||||
|
@ -98,12 +71,12 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
request = ChatCompletionRequest(
|
||||
model=self.valid_supported_model,
|
||||
messages=[
|
||||
self.system_prompt,
|
||||
UserMessage(
|
||||
content="Who is the current US President?",
|
||||
),
|
||||
],
|
||||
stream=False,
|
||||
tools=[ToolDefinition(tool_name=BuiltinTool.brave_search)],
|
||||
)
|
||||
iterator = self.api.chat_completion(request)
|
||||
async for r in iterator:
|
||||
|
@ -112,7 +85,7 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
completion_message = response.completion_message
|
||||
|
||||
self.assertEqual(completion_message.content, "")
|
||||
self.assertEqual(completion_message.stop_reason, StopReason.end_of_message)
|
||||
self.assertEqual(completion_message.stop_reason, StopReason.end_of_turn)
|
||||
|
||||
self.assertEqual(
|
||||
len(completion_message.tool_calls), 1, completion_message.tool_calls
|
||||
|
@ -128,11 +101,11 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
request = ChatCompletionRequest(
|
||||
model=self.valid_supported_model,
|
||||
messages=[
|
||||
self.system_prompt,
|
||||
UserMessage(
|
||||
content="Write code to compute the 5th prime number",
|
||||
),
|
||||
],
|
||||
tools=[ToolDefinition(tool_name=BuiltinTool.code_interpreter)],
|
||||
stream=False,
|
||||
)
|
||||
iterator = self.api.chat_completion(request)
|
||||
|
@ -142,7 +115,7 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
completion_message = response.completion_message
|
||||
|
||||
self.assertEqual(completion_message.content, "")
|
||||
self.assertEqual(completion_message.stop_reason, StopReason.end_of_message)
|
||||
self.assertEqual(completion_message.stop_reason, StopReason.end_of_turn)
|
||||
|
||||
self.assertEqual(
|
||||
len(completion_message.tool_calls), 1, completion_message.tool_calls
|
||||
|
@ -157,12 +130,12 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
request = ChatCompletionRequest(
|
||||
model=self.valid_supported_model,
|
||||
messages=[
|
||||
self.system_prompt_with_custom_tool,
|
||||
UserMessage(
|
||||
content="Use provided function to find the boiling point of polyjuice?",
|
||||
),
|
||||
],
|
||||
stream=False,
|
||||
tools=[self.custom_tool_defn],
|
||||
)
|
||||
iterator = self.api.chat_completion(request)
|
||||
async for r in iterator:
|
||||
|
@ -229,12 +202,12 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
request = ChatCompletionRequest(
|
||||
model=self.valid_supported_model,
|
||||
messages=[
|
||||
self.system_prompt,
|
||||
UserMessage(
|
||||
content="Who is the current US President?",
|
||||
content="Using web search tell me who is the current US President?",
|
||||
),
|
||||
],
|
||||
stream=True,
|
||||
tools=[ToolDefinition(tool_name=BuiltinTool.brave_search)],
|
||||
)
|
||||
iterator = self.api.chat_completion(request)
|
||||
events = []
|
||||
|
@ -250,19 +223,19 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
self.assertEqual(
|
||||
events[-2].event_type, ChatCompletionResponseEventType.progress
|
||||
)
|
||||
self.assertEqual(events[-2].stop_reason, StopReason.end_of_message)
|
||||
self.assertEqual(events[-2].stop_reason, StopReason.end_of_turn)
|
||||
self.assertEqual(events[-2].delta.content.tool_name, BuiltinTool.brave_search)
|
||||
|
||||
async def test_custom_tool_call_streaming(self):
|
||||
request = ChatCompletionRequest(
|
||||
model=self.valid_supported_model,
|
||||
messages=[
|
||||
self.system_prompt_with_custom_tool,
|
||||
UserMessage(
|
||||
content="Use provided function to find the boiling point of polyjuice?",
|
||||
),
|
||||
],
|
||||
stream=True,
|
||||
tools=[self.custom_tool_defn],
|
||||
)
|
||||
iterator = self.api.chat_completion(request)
|
||||
events = []
|
||||
|
@ -321,7 +294,6 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
request = ChatCompletionRequest(
|
||||
model=self.valid_supported_model,
|
||||
messages=[
|
||||
self.system_prompt,
|
||||
UserMessage(
|
||||
content="Search the web and tell me who the "
|
||||
"44th president of the United States was",
|
||||
|
@ -333,6 +305,7 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
),
|
||||
],
|
||||
stream=True,
|
||||
tools=[ToolDefinition(tool_name=BuiltinTool.brave_search)],
|
||||
)
|
||||
iterator = self.api.chat_completion(request)
|
||||
|
||||
|
@ -350,12 +323,12 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
request = ChatCompletionRequest(
|
||||
model=self.valid_supported_model,
|
||||
messages=[
|
||||
self.system_prompt,
|
||||
UserMessage(
|
||||
content="Write code to answer this question: What is the 100th prime number?",
|
||||
),
|
||||
],
|
||||
stream=True,
|
||||
tools=[ToolDefinition(tool_name=BuiltinTool.code_interpreter)],
|
||||
)
|
||||
iterator = self.api.chat_completion(request)
|
||||
events = []
|
||||
|
@ -371,7 +344,7 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
self.assertEqual(
|
||||
events[-2].event_type, ChatCompletionResponseEventType.progress
|
||||
)
|
||||
self.assertEqual(events[-2].stop_reason, StopReason.end_of_message)
|
||||
self.assertEqual(events[-2].stop_reason, StopReason.end_of_turn)
|
||||
self.assertEqual(
|
||||
events[-2].delta.content.tool_name, BuiltinTool.code_interpreter
|
||||
)
|
||||
|
|
128
tests/test_tool_utils.py
Normal file
128
tests/test_tool_utils.py
Normal file
|
@ -0,0 +1,128 @@
|
|||
import unittest
|
||||
|
||||
from llama_models.llama3.api import * # noqa: F403
|
||||
from llama_toolchain.inference.api import * # noqa: F403
|
||||
from llama_toolchain.inference.prepare_messages import prepare_messages_for_tools
|
||||
|
||||
MODEL = "Meta-Llama3.1-8B-Instruct"
|
||||
|
||||
|
||||
class ToolUtilsTests(unittest.IsolatedAsyncioTestCase):
|
||||
async def test_system_default(self):
|
||||
content = "Hello !"
|
||||
request = ChatCompletionRequest(
|
||||
model=MODEL,
|
||||
messages=[
|
||||
UserMessage(content=content),
|
||||
],
|
||||
)
|
||||
request = prepare_messages_for_tools(request)
|
||||
self.assertEqual(len(request.messages), 2)
|
||||
self.assertEqual(request.messages[-1].content, content)
|
||||
self.assertTrue(
|
||||
"Cutting Knowledge Date: December 2023" in request.messages[0].content
|
||||
)
|
||||
|
||||
async def test_system_builtin_only(self):
|
||||
content = "Hello !"
|
||||
request = ChatCompletionRequest(
|
||||
model=MODEL,
|
||||
messages=[
|
||||
UserMessage(content=content),
|
||||
],
|
||||
tools=[
|
||||
ToolDefinition(tool_name=BuiltinTool.code_interpreter),
|
||||
ToolDefinition(tool_name=BuiltinTool.brave_search),
|
||||
],
|
||||
)
|
||||
request = prepare_messages_for_tools(request)
|
||||
self.assertEqual(len(request.messages), 2)
|
||||
self.assertEqual(request.messages[-1].content, content)
|
||||
self.assertTrue(
|
||||
"Cutting Knowledge Date: December 2023" in request.messages[0].content
|
||||
)
|
||||
self.assertTrue("Tools: brave_search" in request.messages[0].content)
|
||||
|
||||
async def test_system_custom_only(self):
|
||||
content = "Hello !"
|
||||
request = ChatCompletionRequest(
|
||||
model=MODEL,
|
||||
messages=[
|
||||
UserMessage(content=content),
|
||||
],
|
||||
tools=[
|
||||
ToolDefinition(
|
||||
tool_name="custom1",
|
||||
description="custom1 tool",
|
||||
parameters={
|
||||
"param1": ToolParamDefinition(
|
||||
param_type="str",
|
||||
description="param1 description",
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
)
|
||||
],
|
||||
tool_prompt_format=ToolPromptFormat.json,
|
||||
)
|
||||
request = prepare_messages_for_tools(request)
|
||||
self.assertEqual(len(request.messages), 3)
|
||||
self.assertTrue("Environment: ipython" in request.messages[0].content)
|
||||
|
||||
self.assertTrue(
|
||||
"Return function calls in JSON format" in request.messages[1].content
|
||||
)
|
||||
self.assertEqual(request.messages[-1].content, content)
|
||||
|
||||
async def test_system_custom_and_builtin(self):
|
||||
content = "Hello !"
|
||||
request = ChatCompletionRequest(
|
||||
model=MODEL,
|
||||
messages=[
|
||||
UserMessage(content=content),
|
||||
],
|
||||
tools=[
|
||||
ToolDefinition(tool_name=BuiltinTool.code_interpreter),
|
||||
ToolDefinition(tool_name=BuiltinTool.brave_search),
|
||||
ToolDefinition(
|
||||
tool_name="custom1",
|
||||
description="custom1 tool",
|
||||
parameters={
|
||||
"param1": ToolParamDefinition(
|
||||
param_type="str",
|
||||
description="param1 description",
|
||||
required=True,
|
||||
),
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
request = prepare_messages_for_tools(request)
|
||||
self.assertEqual(len(request.messages), 3)
|
||||
|
||||
self.assertTrue("Environment: ipython" in request.messages[0].content)
|
||||
self.assertTrue("Tools: brave_search" in request.messages[0].content)
|
||||
|
||||
self.assertTrue(
|
||||
"Return function calls in JSON format" in request.messages[1].content
|
||||
)
|
||||
self.assertEqual(request.messages[-1].content, content)
|
||||
|
||||
async def test_user_provided_system_message(self):
|
||||
content = "Hello !"
|
||||
system_prompt = "You are a pirate"
|
||||
request = ChatCompletionRequest(
|
||||
model=MODEL,
|
||||
messages=[
|
||||
SystemMessage(content=system_prompt),
|
||||
UserMessage(content=content),
|
||||
],
|
||||
tools=[
|
||||
ToolDefinition(tool_name=BuiltinTool.code_interpreter),
|
||||
],
|
||||
)
|
||||
request = prepare_messages_for_tools(request)
|
||||
self.assertEqual(len(request.messages), 2, request.messages)
|
||||
self.assertTrue(request.messages[0].content.endswith(system_prompt))
|
||||
|
||||
self.assertEqual(request.messages[-1].content, content)
|
Loading…
Add table
Add a link
Reference in a new issue