update tests and formatting

This commit is contained in:
Hardik Shah 2024-08-05 12:34:16 -07:00
parent d7a4cdd70d
commit 7664d5701d
2 changed files with 86 additions and 129 deletions

View file

@ -16,23 +16,16 @@ from llama_toolchain.inference.api_instance import (
from llama_toolchain.inference.api.datatypes import (
ChatCompletionResponseEventType,
)
from llama_toolchain.inference.api.endpoints import (
ChatCompletionRequest
)
from llama_toolchain.inference.api.config import (
InferenceConfig,
OllamaImplConfig
)
from llama_toolchain.inference.ollama import (
OllamaInference
)
from llama_toolchain.inference.api.endpoints import ChatCompletionRequest
from llama_toolchain.inference.api.config import InferenceConfig, OllamaImplConfig
class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
async def asyncSetUp(self):
self.valid_supported_model = "Meta-Llama3.1-8B-Instruct"
ollama_config = OllamaImplConfig(
model="llama3.1",
model="llama3.1:8b-instruct-fp16",
url="http://localhost:11434",
)
@ -45,18 +38,21 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
current_date = datetime.now()
formatted_date = current_date.strftime("%d %B %Y")
self.system_prompt = SystemMessage(
content=textwrap.dedent(f"""
content=textwrap.dedent(
f"""
Environment: ipython
Tools: brave_search
Cutting Knowledge Date: December 2023
Today Date:{formatted_date}
"""),
"""
),
)
self.system_prompt_with_custom_tool = SystemMessage(
content=textwrap.dedent("""
content=textwrap.dedent(
"""
Environment: ipython
Tools: brave_search, wolfram_alpha, photogen
@ -79,7 +75,6 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
- If looking for real time information use relevant functions before falling back to brave_search
- Function calls MUST follow the specified format, start with <function= and end with </function>
- Required parameters MUST be specified
- Only call one function at a time
- Put the entire function call reply on one line
"""
@ -105,7 +100,9 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
response = r
self.assertTrue("Paris" in response.completion_message.content)
self.assertEqual(response.completion_message.stop_reason, StopReason.end_of_turn)
self.assertEqual(
response.completion_message.stop_reason, StopReason.end_of_turn
)
async def test_tool_call(self):
request = ChatCompletionRequest(
@ -127,8 +124,12 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
self.assertEqual(completion_message.content, "")
self.assertEqual(completion_message.stop_reason, StopReason.end_of_message)
self.assertEqual(len(completion_message.tool_calls), 1, completion_message.tool_calls)
self.assertEqual(completion_message.tool_calls[0].tool_name, BuiltinTool.brave_search)
self.assertEqual(
len(completion_message.tool_calls), 1, completion_message.tool_calls
)
self.assertEqual(
completion_message.tool_calls[0].tool_name, BuiltinTool.brave_search
)
self.assertTrue(
"president" in completion_message.tool_calls[0].arguments["query"].lower()
)
@ -153,8 +154,12 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
self.assertEqual(completion_message.content, "")
self.assertEqual(completion_message.stop_reason, StopReason.end_of_message)
self.assertEqual(len(completion_message.tool_calls), 1, completion_message.tool_calls)
self.assertEqual(completion_message.tool_calls[0].tool_name, BuiltinTool.code_interpreter)
self.assertEqual(
len(completion_message.tool_calls), 1, completion_message.tool_calls
)
self.assertEqual(
completion_message.tool_calls[0].tool_name, BuiltinTool.code_interpreter
)
code = completion_message.tool_calls[0].arguments["code"]
self.assertTrue("def " in code.lower(), code)
@ -177,20 +182,24 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
self.assertEqual(completion_message.content, "")
self.assertTrue(
completion_message.stop_reason in {
completion_message.stop_reason
in {
StopReason.end_of_turn,
StopReason.end_of_message,
}
)
self.assertEqual(len(completion_message.tool_calls), 1, completion_message.tool_calls)
self.assertEqual(completion_message.tool_calls[0].tool_name, "get_boiling_point")
self.assertEqual(
len(completion_message.tool_calls), 1, completion_message.tool_calls
)
self.assertEqual(
completion_message.tool_calls[0].tool_name, "get_boiling_point"
)
args = completion_message.tool_calls[0].arguments
self.assertTrue(isinstance(args, dict))
self.assertTrue(args["liquid_name"], "polyjuice")
async def test_text_streaming(self):
request = ChatCompletionRequest(
model=self.valid_supported_model,
@ -211,19 +220,14 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
for e in events[1:-1]:
response += e.delta
self.assertEqual(
events[0].event_type,
ChatCompletionResponseEventType.start
)
self.assertEqual(events[0].event_type, ChatCompletionResponseEventType.start)
# last event is of type "complete"
self.assertEqual(
events[-1].event_type,
ChatCompletionResponseEventType.complete
events[-1].event_type, ChatCompletionResponseEventType.complete
)
# last but 1 event should be of type "progress"
self.assertEqual(
events[-2].event_type,
ChatCompletionResponseEventType.progress
events[-2].event_type, ChatCompletionResponseEventType.progress
)
self.assertEqual(
events[-2].stop_reason,
@ -248,14 +252,10 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
# print(f"{chunk.event.event_type:<40} | {str(chunk.event.stop_reason):<26} | {chunk.event.delta} ")
events.append(chunk.event)
self.assertEqual(
events[0].event_type,
ChatCompletionResponseEventType.start
)
self.assertEqual(events[0].event_type, ChatCompletionResponseEventType.start)
# last event is of type "complete"
self.assertEqual(
events[-1].event_type,
ChatCompletionResponseEventType.complete
events[-1].event_type, ChatCompletionResponseEventType.complete
)
async def test_custom_tool_call_streaming(self):
@ -275,32 +275,18 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
# print(f"{chunk.event.event_type:<40} | {str(chunk.event.stop_reason):<26} | {chunk.event.delta} ")
events.append(chunk.event)
self.assertEqual(
events[0].event_type,
ChatCompletionResponseEventType.start
)
self.assertEqual(events[0].event_type, ChatCompletionResponseEventType.start)
# last event is of type "complete"
self.assertEqual(
events[-1].event_type,
ChatCompletionResponseEventType.complete
)
self.assertEqual(
events[-1].stop_reason,
StopReason.end_of_turn
events[-1].event_type, ChatCompletionResponseEventType.complete
)
self.assertEqual(events[-1].stop_reason, StopReason.end_of_turn)
# last but one event should be eom with tool call
self.assertEqual(
events[-2].event_type,
ChatCompletionResponseEventType.progress
)
self.assertEqual(
events[-2].delta.content.tool_name,
"get_boiling_point"
)
self.assertEqual(
events[-2].stop_reason,
StopReason.end_of_turn
events[-2].event_type, ChatCompletionResponseEventType.progress
)
self.assertEqual(events[-2].delta.content.tool_name, "get_boiling_point")
self.assertEqual(events[-2].stop_reason, StopReason.end_of_turn)
def test_resolve_ollama_model(self):
ollama_model = self.api.resolve_ollama_model(self.valid_supported_model)
@ -325,7 +311,7 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
sampling_strategy=SamplingStrategy.top_p,
top_p=0.99,
temperature=1.0,
)
),
)
options = self.api.get_ollama_chat_options(request)
self.assertEqual(
@ -333,5 +319,5 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
{
"temperature": 1.0,
"top_p": 0.99,
}
},
)