use templates for generating system prompts

This commit is contained in:
Hardik Shah 2024-08-23 14:21:12 -07:00
parent 68855ed218
commit ab8193c88c
8 changed files with 410 additions and 218 deletions

View file

@ -13,7 +13,10 @@ from llama_models.llama3.api.datatypes import (
ToolResponseMessage,
UserMessage,
)
from llama_toolchain.inference.api.datatypes import ChatCompletionResponseEventType
from llama_toolchain.inference.api.datatypes import (
ChatCompletionResponseEventType,
ToolPromptFormat,
)
from llama_toolchain.inference.api.endpoints import ChatCompletionRequest
from llama_toolchain.inference.ollama.config import OllamaImplConfig
from llama_toolchain.inference.ollama.ollama import get_provider_impl
@ -236,6 +239,7 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
],
stream=True,
tools=[self.custom_tool_defn],
tool_prompt_format=ToolPromptFormat.function_tag,
)
iterator = self.api.chat_completion(request)
events = []