forked from phoenix-oss/llama-stack-mirror
update tests and formatting
This commit is contained in:
parent
d7a4cdd70d
commit
7664d5701d
2 changed files with 86 additions and 129 deletions
|
@ -10,14 +10,12 @@ from datetime import datetime
|
||||||
|
|
||||||
from llama_models.llama3_1.api.datatypes import (
|
from llama_models.llama3_1.api.datatypes import (
|
||||||
BuiltinTool,
|
BuiltinTool,
|
||||||
InstructModel,
|
|
||||||
UserMessage,
|
UserMessage,
|
||||||
StopReason,
|
StopReason,
|
||||||
SystemMessage,
|
SystemMessage,
|
||||||
)
|
)
|
||||||
|
|
||||||
from llama_toolchain.inference.api.config import (
|
from llama_toolchain.inference.api.config import (
|
||||||
ImplType,
|
|
||||||
InferenceConfig,
|
InferenceConfig,
|
||||||
InlineImplConfig,
|
InlineImplConfig,
|
||||||
RemoteImplConfig,
|
RemoteImplConfig,
|
||||||
|
@ -31,11 +29,7 @@ from llama_toolchain.inference.api_instance import (
|
||||||
from llama_toolchain.inference.api.datatypes import (
|
from llama_toolchain.inference.api.datatypes import (
|
||||||
ChatCompletionResponseEventType,
|
ChatCompletionResponseEventType,
|
||||||
)
|
)
|
||||||
from llama_toolchain.inference.api.endpoints import (
|
from llama_toolchain.inference.api.endpoints import ChatCompletionRequest
|
||||||
ChatCompletionRequest
|
|
||||||
)
|
|
||||||
from llama_toolchain.inference.inference import InferenceImpl
|
|
||||||
from llama_toolchain.inference.event_logger import EventLogger
|
|
||||||
|
|
||||||
|
|
||||||
HELPER_MSG = """
|
HELPER_MSG = """
|
||||||
|
@ -56,7 +50,9 @@ class InferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
@classmethod
|
@classmethod
|
||||||
async def asyncSetUpClass(cls):
|
async def asyncSetUpClass(cls):
|
||||||
# assert model exists on local
|
# assert model exists on local
|
||||||
model_dir = os.path.expanduser("~/.llama/checkpoints/Meta-Llama-3.1-8B-Instruct/original/")
|
model_dir = os.path.expanduser(
|
||||||
|
"~/.llama/checkpoints/Meta-Llama-3.1-8B-Instruct/original/"
|
||||||
|
)
|
||||||
assert os.path.isdir(model_dir), HELPER_MSG
|
assert os.path.isdir(model_dir), HELPER_MSG
|
||||||
|
|
||||||
tokenizer_path = os.path.join(model_dir, "tokenizer.model")
|
tokenizer_path = os.path.join(model_dir, "tokenizer.model")
|
||||||
|
@ -73,17 +69,11 @@ class InferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
),
|
),
|
||||||
max_seq_len=2048,
|
max_seq_len=2048,
|
||||||
)
|
)
|
||||||
inference_config = InferenceConfig(
|
inference_config = InferenceConfig(impl_config=inline_config)
|
||||||
impl_config=inline_config
|
|
||||||
)
|
|
||||||
|
|
||||||
# -- For faster testing iteration --
|
# -- For faster testing iteration --
|
||||||
# remote_config = RemoteImplConfig(
|
# remote_config = RemoteImplConfig(url="http://localhost:5000")
|
||||||
# url="http://localhost:5000"
|
# inference_config = InferenceConfig(impl_config=remote_config)
|
||||||
# )
|
|
||||||
# inference_config = InferenceConfig(
|
|
||||||
# impl_config=remote_config
|
|
||||||
# )
|
|
||||||
|
|
||||||
cls.api = await get_inference_api_instance(inference_config)
|
cls.api = await get_inference_api_instance(inference_config)
|
||||||
await cls.api.initialize()
|
await cls.api.initialize()
|
||||||
|
@ -91,17 +81,20 @@ class InferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
current_date = datetime.now()
|
current_date = datetime.now()
|
||||||
formatted_date = current_date.strftime("%d %B %Y")
|
formatted_date = current_date.strftime("%d %B %Y")
|
||||||
cls.system_prompt = SystemMessage(
|
cls.system_prompt = SystemMessage(
|
||||||
content=textwrap.dedent(f"""
|
content=textwrap.dedent(
|
||||||
|
f"""
|
||||||
Environment: ipython
|
Environment: ipython
|
||||||
Tools: brave_search
|
Tools: brave_search
|
||||||
|
|
||||||
Cutting Knowledge Date: December 2023
|
Cutting Knowledge Date: December 2023
|
||||||
Today Date:{formatted_date}
|
Today Date:{formatted_date}
|
||||||
|
|
||||||
"""),
|
"""
|
||||||
|
),
|
||||||
)
|
)
|
||||||
cls.system_prompt_with_custom_tool = SystemMessage(
|
cls.system_prompt_with_custom_tool = SystemMessage(
|
||||||
content=textwrap.dedent("""
|
content=textwrap.dedent(
|
||||||
|
"""
|
||||||
Environment: ipython
|
Environment: ipython
|
||||||
Tools: brave_search, wolfram_alpha, photogen
|
Tools: brave_search, wolfram_alpha, photogen
|
||||||
|
|
||||||
|
@ -140,9 +133,12 @@ class InferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
async def asyncTearDownClass(cls):
|
async def asyncTearDownClass(cls):
|
||||||
await cls.api.shutdown()
|
await cls.api.shutdown()
|
||||||
|
|
||||||
|
async def asyncSetUp(self):
|
||||||
|
self.valid_supported_model = "Meta-Llama3.1-8B-Instruct"
|
||||||
|
|
||||||
async def test_text(self):
|
async def test_text(self):
|
||||||
request = ChatCompletionRequest(
|
request = ChatCompletionRequest(
|
||||||
model=InstructModel.llama3_8b_chat,
|
model=self.valid_supported_model,
|
||||||
messages=[
|
messages=[
|
||||||
UserMessage(
|
UserMessage(
|
||||||
content="What is the capital of France?",
|
content="What is the capital of France?",
|
||||||
|
@ -160,7 +156,7 @@ class InferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
|
|
||||||
async def test_text_streaming(self):
|
async def test_text_streaming(self):
|
||||||
request = ChatCompletionRequest(
|
request = ChatCompletionRequest(
|
||||||
model=InstructModel.llama3_8b_chat,
|
model=self.valid_supported_model,
|
||||||
messages=[
|
messages=[
|
||||||
UserMessage(
|
UserMessage(
|
||||||
content="What is the capital of France?",
|
content="What is the capital of France?",
|
||||||
|
@ -175,13 +171,9 @@ class InferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
events.append(chunk.event)
|
events.append(chunk.event)
|
||||||
# print(f"{chunk.event.event_type:<40} | {str(chunk.event.stop_reason):<26} | {chunk.event.delta} ")
|
# print(f"{chunk.event.event_type:<40} | {str(chunk.event.stop_reason):<26} | {chunk.event.delta} ")
|
||||||
|
|
||||||
|
self.assertEqual(events[0].event_type, ChatCompletionResponseEventType.start)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
events[0].event_type,
|
events[-1].event_type, ChatCompletionResponseEventType.complete
|
||||||
ChatCompletionResponseEventType.start
|
|
||||||
)
|
|
||||||
self.assertEqual(
|
|
||||||
events[-1].event_type,
|
|
||||||
ChatCompletionResponseEventType.complete
|
|
||||||
)
|
)
|
||||||
|
|
||||||
response = ""
|
response = ""
|
||||||
|
@ -192,7 +184,7 @@ class InferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
|
|
||||||
async def test_custom_tool_call(self):
|
async def test_custom_tool_call(self):
|
||||||
request = ChatCompletionRequest(
|
request = ChatCompletionRequest(
|
||||||
model=InstructModel.llama3_8b_chat,
|
model=self.valid_supported_model,
|
||||||
messages=[
|
messages=[
|
||||||
InferenceTests.system_prompt_with_custom_tool,
|
InferenceTests.system_prompt_with_custom_tool,
|
||||||
UserMessage(
|
UserMessage(
|
||||||
|
@ -214,8 +206,12 @@ class InferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
# instead of end_of_turn
|
# instead of end_of_turn
|
||||||
# self.assertEqual(completion_message.stop_reason, StopReason.end_of_turn)
|
# self.assertEqual(completion_message.stop_reason, StopReason.end_of_turn)
|
||||||
|
|
||||||
self.assertEqual(len(completion_message.tool_calls), 1, completion_message.tool_calls)
|
self.assertEqual(
|
||||||
self.assertEqual(completion_message.tool_calls[0].tool_name, "get_boiling_point")
|
len(completion_message.tool_calls), 1, completion_message.tool_calls
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
completion_message.tool_calls[0].tool_name, "get_boiling_point"
|
||||||
|
)
|
||||||
|
|
||||||
args = completion_message.tool_calls[0].arguments
|
args = completion_message.tool_calls[0].arguments
|
||||||
self.assertTrue(isinstance(args, dict))
|
self.assertTrue(isinstance(args, dict))
|
||||||
|
@ -223,7 +219,7 @@ class InferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
|
|
||||||
async def test_tool_call_streaming(self):
|
async def test_tool_call_streaming(self):
|
||||||
request = ChatCompletionRequest(
|
request = ChatCompletionRequest(
|
||||||
model=InstructModel.llama3_8b_chat,
|
model=self.valid_supported_model,
|
||||||
messages=[
|
messages=[
|
||||||
self.system_prompt,
|
self.system_prompt,
|
||||||
UserMessage(
|
UserMessage(
|
||||||
|
@ -239,32 +235,21 @@ class InferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
# print(f"{chunk.event.event_type:<40} | {str(chunk.event.stop_reason):<26} | {chunk.event.delta} ")
|
# print(f"{chunk.event.event_type:<40} | {str(chunk.event.stop_reason):<26} | {chunk.event.delta} ")
|
||||||
events.append(chunk.event)
|
events.append(chunk.event)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(events[0].event_type, ChatCompletionResponseEventType.start)
|
||||||
events[0].event_type,
|
|
||||||
ChatCompletionResponseEventType.start
|
|
||||||
)
|
|
||||||
# last event is of type "complete"
|
# last event is of type "complete"
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
events[-1].event_type,
|
events[-1].event_type, ChatCompletionResponseEventType.complete
|
||||||
ChatCompletionResponseEventType.complete
|
|
||||||
)
|
)
|
||||||
# last but one event should be eom with tool call
|
# last but one event should be eom with tool call
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
events[-2].event_type,
|
events[-2].event_type, ChatCompletionResponseEventType.progress
|
||||||
ChatCompletionResponseEventType.progress
|
|
||||||
)
|
|
||||||
self.assertEqual(
|
|
||||||
events[-2].stop_reason,
|
|
||||||
StopReason.end_of_message
|
|
||||||
)
|
|
||||||
self.assertEqual(
|
|
||||||
events[-2].delta.content.tool_name,
|
|
||||||
BuiltinTool.brave_search
|
|
||||||
)
|
)
|
||||||
|
self.assertEqual(events[-2].stop_reason, StopReason.end_of_message)
|
||||||
|
self.assertEqual(events[-2].delta.content.tool_name, BuiltinTool.brave_search)
|
||||||
|
|
||||||
async def test_custom_tool_call_streaming(self):
|
async def test_custom_tool_call_streaming(self):
|
||||||
request = ChatCompletionRequest(
|
request = ChatCompletionRequest(
|
||||||
model=InstructModel.llama3_8b_chat,
|
model=self.valid_supported_model,
|
||||||
messages=[
|
messages=[
|
||||||
InferenceTests.system_prompt_with_custom_tool,
|
InferenceTests.system_prompt_with_custom_tool,
|
||||||
UserMessage(
|
UserMessage(
|
||||||
|
@ -279,29 +264,15 @@ class InferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
# print(f"{chunk.event.event_type:<40} | {str(chunk.event.stop_reason):<26} | {chunk.event.delta} ")
|
# print(f"{chunk.event.event_type:<40} | {str(chunk.event.stop_reason):<26} | {chunk.event.delta} ")
|
||||||
events.append(chunk.event)
|
events.append(chunk.event)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(events[0].event_type, ChatCompletionResponseEventType.start)
|
||||||
events[0].event_type,
|
|
||||||
ChatCompletionResponseEventType.start
|
|
||||||
)
|
|
||||||
# last event is of type "complete"
|
# last event is of type "complete"
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
events[-1].event_type,
|
events[-1].event_type, ChatCompletionResponseEventType.complete
|
||||||
ChatCompletionResponseEventType.complete
|
|
||||||
)
|
|
||||||
self.assertEqual(
|
|
||||||
events[-1].stop_reason,
|
|
||||||
StopReason.end_of_turn
|
|
||||||
)
|
)
|
||||||
|
self.assertEqual(events[-1].stop_reason, StopReason.end_of_turn)
|
||||||
# last but one event should be eom with tool call
|
# last but one event should be eom with tool call
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
events[-2].event_type,
|
events[-2].event_type, ChatCompletionResponseEventType.progress
|
||||||
ChatCompletionResponseEventType.progress
|
|
||||||
)
|
|
||||||
self.assertEqual(
|
|
||||||
events[-2].stop_reason,
|
|
||||||
StopReason.end_of_turn
|
|
||||||
)
|
|
||||||
self.assertEqual(
|
|
||||||
events[-2].delta.content.tool_name,
|
|
||||||
"get_boiling_point"
|
|
||||||
)
|
)
|
||||||
|
self.assertEqual(events[-2].stop_reason, StopReason.end_of_turn)
|
||||||
|
self.assertEqual(events[-2].delta.content.tool_name, "get_boiling_point")
|
||||||
|
|
|
@ -16,23 +16,16 @@ from llama_toolchain.inference.api_instance import (
|
||||||
from llama_toolchain.inference.api.datatypes import (
|
from llama_toolchain.inference.api.datatypes import (
|
||||||
ChatCompletionResponseEventType,
|
ChatCompletionResponseEventType,
|
||||||
)
|
)
|
||||||
from llama_toolchain.inference.api.endpoints import (
|
from llama_toolchain.inference.api.endpoints import ChatCompletionRequest
|
||||||
ChatCompletionRequest
|
from llama_toolchain.inference.api.config import InferenceConfig, OllamaImplConfig
|
||||||
)
|
|
||||||
from llama_toolchain.inference.api.config import (
|
|
||||||
InferenceConfig,
|
|
||||||
OllamaImplConfig
|
|
||||||
)
|
|
||||||
from llama_toolchain.inference.ollama import (
|
|
||||||
OllamaInference
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
|
|
||||||
async def asyncSetUp(self):
|
async def asyncSetUp(self):
|
||||||
|
self.valid_supported_model = "Meta-Llama3.1-8B-Instruct"
|
||||||
ollama_config = OllamaImplConfig(
|
ollama_config = OllamaImplConfig(
|
||||||
model="llama3.1",
|
model="llama3.1:8b-instruct-fp16",
|
||||||
url="http://localhost:11434",
|
url="http://localhost:11434",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -45,18 +38,21 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
current_date = datetime.now()
|
current_date = datetime.now()
|
||||||
formatted_date = current_date.strftime("%d %B %Y")
|
formatted_date = current_date.strftime("%d %B %Y")
|
||||||
self.system_prompt = SystemMessage(
|
self.system_prompt = SystemMessage(
|
||||||
content=textwrap.dedent(f"""
|
content=textwrap.dedent(
|
||||||
|
f"""
|
||||||
Environment: ipython
|
Environment: ipython
|
||||||
Tools: brave_search
|
Tools: brave_search
|
||||||
|
|
||||||
Cutting Knowledge Date: December 2023
|
Cutting Knowledge Date: December 2023
|
||||||
Today Date:{formatted_date}
|
Today Date:{formatted_date}
|
||||||
|
|
||||||
"""),
|
"""
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.system_prompt_with_custom_tool = SystemMessage(
|
self.system_prompt_with_custom_tool = SystemMessage(
|
||||||
content=textwrap.dedent("""
|
content=textwrap.dedent(
|
||||||
|
"""
|
||||||
Environment: ipython
|
Environment: ipython
|
||||||
Tools: brave_search, wolfram_alpha, photogen
|
Tools: brave_search, wolfram_alpha, photogen
|
||||||
|
|
||||||
|
@ -79,7 +75,6 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
- If looking for real time information use relevant functions before falling back to brave_search
|
- If looking for real time information use relevant functions before falling back to brave_search
|
||||||
- Function calls MUST follow the specified format, start with <function= and end with </function>
|
- Function calls MUST follow the specified format, start with <function= and end with </function>
|
||||||
- Required parameters MUST be specified
|
- Required parameters MUST be specified
|
||||||
- Only call one function at a time
|
|
||||||
- Put the entire function call reply on one line
|
- Put the entire function call reply on one line
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -105,7 +100,9 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
response = r
|
response = r
|
||||||
|
|
||||||
self.assertTrue("Paris" in response.completion_message.content)
|
self.assertTrue("Paris" in response.completion_message.content)
|
||||||
self.assertEqual(response.completion_message.stop_reason, StopReason.end_of_turn)
|
self.assertEqual(
|
||||||
|
response.completion_message.stop_reason, StopReason.end_of_turn
|
||||||
|
)
|
||||||
|
|
||||||
async def test_tool_call(self):
|
async def test_tool_call(self):
|
||||||
request = ChatCompletionRequest(
|
request = ChatCompletionRequest(
|
||||||
|
@ -127,8 +124,12 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
self.assertEqual(completion_message.content, "")
|
self.assertEqual(completion_message.content, "")
|
||||||
self.assertEqual(completion_message.stop_reason, StopReason.end_of_message)
|
self.assertEqual(completion_message.stop_reason, StopReason.end_of_message)
|
||||||
|
|
||||||
self.assertEqual(len(completion_message.tool_calls), 1, completion_message.tool_calls)
|
self.assertEqual(
|
||||||
self.assertEqual(completion_message.tool_calls[0].tool_name, BuiltinTool.brave_search)
|
len(completion_message.tool_calls), 1, completion_message.tool_calls
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
completion_message.tool_calls[0].tool_name, BuiltinTool.brave_search
|
||||||
|
)
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
"president" in completion_message.tool_calls[0].arguments["query"].lower()
|
"president" in completion_message.tool_calls[0].arguments["query"].lower()
|
||||||
)
|
)
|
||||||
|
@ -153,8 +154,12 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
self.assertEqual(completion_message.content, "")
|
self.assertEqual(completion_message.content, "")
|
||||||
self.assertEqual(completion_message.stop_reason, StopReason.end_of_message)
|
self.assertEqual(completion_message.stop_reason, StopReason.end_of_message)
|
||||||
|
|
||||||
self.assertEqual(len(completion_message.tool_calls), 1, completion_message.tool_calls)
|
self.assertEqual(
|
||||||
self.assertEqual(completion_message.tool_calls[0].tool_name, BuiltinTool.code_interpreter)
|
len(completion_message.tool_calls), 1, completion_message.tool_calls
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
completion_message.tool_calls[0].tool_name, BuiltinTool.code_interpreter
|
||||||
|
)
|
||||||
code = completion_message.tool_calls[0].arguments["code"]
|
code = completion_message.tool_calls[0].arguments["code"]
|
||||||
self.assertTrue("def " in code.lower(), code)
|
self.assertTrue("def " in code.lower(), code)
|
||||||
|
|
||||||
|
@ -177,20 +182,24 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
|
|
||||||
self.assertEqual(completion_message.content, "")
|
self.assertEqual(completion_message.content, "")
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
completion_message.stop_reason in {
|
completion_message.stop_reason
|
||||||
|
in {
|
||||||
StopReason.end_of_turn,
|
StopReason.end_of_turn,
|
||||||
StopReason.end_of_message,
|
StopReason.end_of_message,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(len(completion_message.tool_calls), 1, completion_message.tool_calls)
|
self.assertEqual(
|
||||||
self.assertEqual(completion_message.tool_calls[0].tool_name, "get_boiling_point")
|
len(completion_message.tool_calls), 1, completion_message.tool_calls
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
completion_message.tool_calls[0].tool_name, "get_boiling_point"
|
||||||
|
)
|
||||||
|
|
||||||
args = completion_message.tool_calls[0].arguments
|
args = completion_message.tool_calls[0].arguments
|
||||||
self.assertTrue(isinstance(args, dict))
|
self.assertTrue(isinstance(args, dict))
|
||||||
self.assertTrue(args["liquid_name"], "polyjuice")
|
self.assertTrue(args["liquid_name"], "polyjuice")
|
||||||
|
|
||||||
|
|
||||||
async def test_text_streaming(self):
|
async def test_text_streaming(self):
|
||||||
request = ChatCompletionRequest(
|
request = ChatCompletionRequest(
|
||||||
model=self.valid_supported_model,
|
model=self.valid_supported_model,
|
||||||
|
@ -211,19 +220,14 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
for e in events[1:-1]:
|
for e in events[1:-1]:
|
||||||
response += e.delta
|
response += e.delta
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(events[0].event_type, ChatCompletionResponseEventType.start)
|
||||||
events[0].event_type,
|
|
||||||
ChatCompletionResponseEventType.start
|
|
||||||
)
|
|
||||||
# last event is of type "complete"
|
# last event is of type "complete"
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
events[-1].event_type,
|
events[-1].event_type, ChatCompletionResponseEventType.complete
|
||||||
ChatCompletionResponseEventType.complete
|
|
||||||
)
|
)
|
||||||
# last but 1 event should be of type "progress"
|
# last but 1 event should be of type "progress"
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
events[-2].event_type,
|
events[-2].event_type, ChatCompletionResponseEventType.progress
|
||||||
ChatCompletionResponseEventType.progress
|
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
events[-2].stop_reason,
|
events[-2].stop_reason,
|
||||||
|
@ -248,14 +252,10 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
# print(f"{chunk.event.event_type:<40} | {str(chunk.event.stop_reason):<26} | {chunk.event.delta} ")
|
# print(f"{chunk.event.event_type:<40} | {str(chunk.event.stop_reason):<26} | {chunk.event.delta} ")
|
||||||
events.append(chunk.event)
|
events.append(chunk.event)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(events[0].event_type, ChatCompletionResponseEventType.start)
|
||||||
events[0].event_type,
|
|
||||||
ChatCompletionResponseEventType.start
|
|
||||||
)
|
|
||||||
# last event is of type "complete"
|
# last event is of type "complete"
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
events[-1].event_type,
|
events[-1].event_type, ChatCompletionResponseEventType.complete
|
||||||
ChatCompletionResponseEventType.complete
|
|
||||||
)
|
)
|
||||||
|
|
||||||
async def test_custom_tool_call_streaming(self):
|
async def test_custom_tool_call_streaming(self):
|
||||||
|
@ -275,32 +275,18 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
# print(f"{chunk.event.event_type:<40} | {str(chunk.event.stop_reason):<26} | {chunk.event.delta} ")
|
# print(f"{chunk.event.event_type:<40} | {str(chunk.event.stop_reason):<26} | {chunk.event.delta} ")
|
||||||
events.append(chunk.event)
|
events.append(chunk.event)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(events[0].event_type, ChatCompletionResponseEventType.start)
|
||||||
events[0].event_type,
|
|
||||||
ChatCompletionResponseEventType.start
|
|
||||||
)
|
|
||||||
# last event is of type "complete"
|
# last event is of type "complete"
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
events[-1].event_type,
|
events[-1].event_type, ChatCompletionResponseEventType.complete
|
||||||
ChatCompletionResponseEventType.complete
|
|
||||||
)
|
|
||||||
self.assertEqual(
|
|
||||||
events[-1].stop_reason,
|
|
||||||
StopReason.end_of_turn
|
|
||||||
)
|
)
|
||||||
|
self.assertEqual(events[-1].stop_reason, StopReason.end_of_turn)
|
||||||
# last but one event should be eom with tool call
|
# last but one event should be eom with tool call
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
events[-2].event_type,
|
events[-2].event_type, ChatCompletionResponseEventType.progress
|
||||||
ChatCompletionResponseEventType.progress
|
|
||||||
)
|
|
||||||
self.assertEqual(
|
|
||||||
events[-2].delta.content.tool_name,
|
|
||||||
"get_boiling_point"
|
|
||||||
)
|
|
||||||
self.assertEqual(
|
|
||||||
events[-2].stop_reason,
|
|
||||||
StopReason.end_of_turn
|
|
||||||
)
|
)
|
||||||
|
self.assertEqual(events[-2].delta.content.tool_name, "get_boiling_point")
|
||||||
|
self.assertEqual(events[-2].stop_reason, StopReason.end_of_turn)
|
||||||
|
|
||||||
def test_resolve_ollama_model(self):
|
def test_resolve_ollama_model(self):
|
||||||
ollama_model = self.api.resolve_ollama_model(self.valid_supported_model)
|
ollama_model = self.api.resolve_ollama_model(self.valid_supported_model)
|
||||||
|
@ -325,7 +311,7 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
sampling_strategy=SamplingStrategy.top_p,
|
sampling_strategy=SamplingStrategy.top_p,
|
||||||
top_p=0.99,
|
top_p=0.99,
|
||||||
temperature=1.0,
|
temperature=1.0,
|
||||||
)
|
),
|
||||||
)
|
)
|
||||||
options = self.api.get_ollama_chat_options(request)
|
options = self.api.get_ollama_chat_options(request)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
|
@ -333,5 +319,5 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||||
{
|
{
|
||||||
"temperature": 1.0,
|
"temperature": 1.0,
|
||||||
"top_p": 0.99,
|
"top_p": 0.99,
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue