mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 12:07:34 +00:00
add tools to chat completion request
This commit is contained in:
parent
9777639a1c
commit
68855ed218
26 changed files with 558 additions and 226 deletions
|
@ -15,6 +15,41 @@ from typing_extensions import Annotated
|
|||
from llama_models.llama3.api.datatypes import * # noqa: F403
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class ToolChoice(Enum):
|
||||
auto = "auto"
|
||||
required = "required"
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class ToolPromptFormat(Enum):
|
||||
"""This Enum refers to the prompt format for calling zero shot tools
|
||||
|
||||
`json` --
|
||||
Refers to the json format for calling tools.
|
||||
The json format takes the form like
|
||||
{
|
||||
"type": "function",
|
||||
"function" : {
|
||||
"name": "function_name",
|
||||
"description": "function_description",
|
||||
"parameters": {...}
|
||||
}
|
||||
}
|
||||
|
||||
`function_tag` --
|
||||
This is an example of how you could define
|
||||
your own user defined format for making tool calls.
|
||||
The function_tag format looks like this,
|
||||
<function=function_name>(parameters)</function>
|
||||
|
||||
The detailed prompts for each of these formats are defined in `system_prompt.py`
|
||||
"""
|
||||
|
||||
json = "json"
|
||||
function_tag = "function_tag"
|
||||
|
||||
|
||||
class LogProbConfig(BaseModel):
|
||||
top_k: Optional[int] = 0
|
||||
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
from .datatypes import * # noqa: F403
|
||||
from typing import Optional, Protocol
|
||||
|
||||
from llama_models.llama3.api.datatypes import ToolDefinition
|
||||
|
||||
# this dependency is annoying and we need a forked up version anyway
|
||||
from llama_models.schema_utils import webmethod
|
||||
|
||||
|
@ -56,7 +58,11 @@ class ChatCompletionRequest(BaseModel):
|
|||
sampling_params: Optional[SamplingParams] = SamplingParams()
|
||||
|
||||
# zero-shot tool definitions as input to the model
|
||||
available_tools: Optional[List[ToolDefinition]] = Field(default_factory=list)
|
||||
tools: Optional[List[ToolDefinition]] = Field(default_factory=list)
|
||||
tool_choice: Optional[ToolChoice] = Field(default=ToolChoice.auto)
|
||||
tool_prompt_format: Optional[ToolPromptFormat] = Field(
|
||||
default=ToolPromptFormat.json
|
||||
)
|
||||
|
||||
stream: Optional[bool] = False
|
||||
logprobs: Optional[LogProbConfig] = None
|
||||
|
@ -82,8 +88,11 @@ class BatchChatCompletionRequest(BaseModel):
|
|||
sampling_params: Optional[SamplingParams] = SamplingParams()
|
||||
|
||||
# zero-shot tool definitions as input to the model
|
||||
available_tools: Optional[List[ToolDefinition]] = Field(default_factory=list)
|
||||
|
||||
tools: Optional[List[ToolDefinition]] = Field(default_factory=list)
|
||||
tool_choice: Optional[ToolChoice] = Field(default=ToolChoice.auto)
|
||||
tool_prompt_format: Optional[ToolPromptFormat] = Field(
|
||||
default=ToolPromptFormat.json
|
||||
)
|
||||
logprobs: Optional[LogProbConfig] = None
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue