mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 12:07:34 +00:00
use templates for generating system prompts
This commit is contained in:
parent
68855ed218
commit
ab8193c88c
8 changed files with 410 additions and 218 deletions
|
@ -13,7 +13,10 @@ from llama_models.llama3.api.datatypes import (
|
|||
ToolResponseMessage,
|
||||
UserMessage,
|
||||
)
|
||||
from llama_toolchain.inference.api.datatypes import ChatCompletionResponseEventType
|
||||
from llama_toolchain.inference.api.datatypes import (
|
||||
ChatCompletionResponseEventType,
|
||||
ToolPromptFormat,
|
||||
)
|
||||
from llama_toolchain.inference.api.endpoints import ChatCompletionRequest
|
||||
from llama_toolchain.inference.ollama.config import OllamaImplConfig
|
||||
from llama_toolchain.inference.ollama.ollama import get_provider_impl
|
||||
|
@ -236,6 +239,7 @@ class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
|||
],
|
||||
stream=True,
|
||||
tools=[self.custom_tool_defn],
|
||||
tool_prompt_format=ToolPromptFormat.function_tag,
|
||||
)
|
||||
iterator = self.api.chat_completion(request)
|
||||
events = []
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue