diff --git a/llama_toolchain/observability/api/endpoints.py b/llama_toolchain/observability/api/endpoints.py
index 95870d11c..3f993ac2d 100644
--- a/llama_toolchain/observability/api/endpoints.py
+++ b/llama_toolchain/observability/api/endpoints.py
@@ -9,7 +9,7 @@ from typing import Any, Dict, List, Optional, Protocol
from llama_models.schema_utils import json_schema_type, webmethod
from pydantic import BaseModel
-from llama_models.llama3_1.api.datatypes import * # noqa: F403
+from llama_models.llama3.api.datatypes import * # noqa: F403
from .datatypes import * # noqa: F403
diff --git a/llama_toolchain/stack.py b/llama_toolchain/stack.py
index dc0cc3c5d..88a54976c 100644
--- a/llama_toolchain/stack.py
+++ b/llama_toolchain/stack.py
@@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-from llama_models.llama3_1.api.datatypes import * # noqa: F403
+from llama_models.llama3.api.datatypes import * # noqa: F403
from llama_toolchain.agentic_system.api import * # noqa: F403
from llama_toolchain.dataset.api import * # noqa: F403
from llama_toolchain.evaluations.api import * # noqa: F403
diff --git a/rfcs/RFC-0001-llama-stack-assets/llama-stack-spec.html b/rfcs/RFC-0001-llama-stack-assets/llama-stack-spec.html
index 4e0133108..f59653edc 100644
--- a/rfcs/RFC-0001-llama-stack-assets/llama-stack-spec.html
+++ b/rfcs/RFC-0001-llama-stack-assets/llama-stack-spec.html
@@ -21,7 +21,7 @@
"info": {
"title": "[DRAFT] Llama Stack Specification",
"version": "0.0.1",
- "description": "This is the specification of the llama stack that provides\n a set of endpoints and their corresponding interfaces that are tailored to\n best leverage Llama Models. The specification is still in draft and subject to change.\n Generated at 2024-08-15 17:30:18.232105"
+ "description": "This is the specification of the llama stack that provides\n a set of endpoints and their corresponding interfaces that are tailored to\n best leverage Llama Models. The specification is still in draft and subject to change.\n Generated at 2024-08-20 19:00:39.110138"
},
"servers": [
{
@@ -2580,6 +2580,9 @@
}
]
}
+ },
+ "tool_prompt_format": {
+ "$ref": "#/components/schemas/ToolPromptFormat"
}
},
"additionalProperties": false,
@@ -2726,6 +2729,15 @@
"on_violation_action"
]
},
+ "ToolPromptFormat": {
+ "type": "string",
+ "enum": [
+ "json",
+ "function_tag"
+ ],
+ "title": "This Enum refers to the prompt format for calling zero shot tools",
+ "description": "`json` --\n Refers to the json format for calling tools.\n The json format takes the form like\n {\n \"type\": \"function\",\n \"function\" : {\n \"name\": \"function_name\",\n \"description\": \"function_description\",\n \"parameters\": {...}\n }\n }\n\n`function_tag` --\n This is an example of how you could define\n your own user defined format for making tool calls.\n The function_tag format looks like this,\n (parameters)\n\nThe detailed prompts for each of these formats are defined in `system_prompt.py`"
+ },
"AgenticSystemCreateResponse": {
"type": "object",
"properties": {
@@ -4768,31 +4780,31 @@
],
"tags": [
{
- "name": "MemoryBanks"
- },
- {
- "name": "Observability"
- },
- {
- "name": "Evaluations"
- },
- {
- "name": "Inference"
+ "name": "RewardScoring"
},
{
"name": "AgenticSystem"
},
+ {
+ "name": "SyntheticDataGeneration"
+ },
+ {
+ "name": "Inference"
+ },
{
"name": "Datasets"
},
+ {
+ "name": "Observability"
+ },
{
"name": "PostTraining"
},
{
- "name": "SyntheticDataGeneration"
+ "name": "MemoryBanks"
},
{
- "name": "RewardScoring"
+ "name": "Evaluations"
},
{
"name": "Attachment",
@@ -4938,6 +4950,10 @@
"name": "ShieldDefinition",
"description": ""
},
+ {
+ "name": "ToolPromptFormat",
+ "description": "This Enum refers to the prompt format for calling zero shot tools\n\n`json` --\n Refers to the json format for calling tools.\n The json format takes the form like\n {\n \"type\": \"function\",\n \"function\" : {\n \"name\": \"function_name\",\n \"description\": \"function_description\",\n \"parameters\": {...}\n }\n }\n\n`function_tag` --\n This is an example of how you could define\n your own user defined format for making tool calls.\n The function_tag format looks like this,\n (parameters)\n\nThe detailed prompts for each of these formats are defined in `system_prompt.py`\n\n"
+ },
{
"name": "AgenticSystemCreateResponse",
"description": ""
@@ -5302,6 +5318,7 @@
"ToolDefinition",
"ToolExecutionStep",
"ToolParamDefinition",
+ "ToolPromptFormat",
"ToolResponse",
"ToolResponseMessage",
"TrainEvalDataset",
diff --git a/rfcs/RFC-0001-llama-stack-assets/llama-stack-spec.yaml b/rfcs/RFC-0001-llama-stack-assets/llama-stack-spec.yaml
index dd3ee3fa8..837036811 100644
--- a/rfcs/RFC-0001-llama-stack-assets/llama-stack-spec.yaml
+++ b/rfcs/RFC-0001-llama-stack-assets/llama-stack-spec.yaml
@@ -51,6 +51,8 @@ components:
- $ref: '#/components/schemas/Fp8QuantizationConfig'
sampling_params:
$ref: '#/components/schemas/SamplingParams'
+ tool_prompt_format:
+ $ref: '#/components/schemas/ToolPromptFormat'
required:
- instructions
type: object
@@ -1607,6 +1609,20 @@ components:
required:
- param_type
type: object
+ ToolPromptFormat:
+ description: "`json` --\n Refers to the json format for calling tools.\n\
+ \ The json format takes the form like\n {\n \"type\": \"function\"\
+ ,\n \"function\" : {\n \"name\": \"function_name\",\n \
+ \ \"description\": \"function_description\",\n \"parameters\"\
+ : {...}\n }\n }\n\n`function_tag` --\n This is an example of\
+ \ how you could define\n your own user defined format for making tool calls.\n\
+ \ The function_tag format looks like this,\n (parameters)\n\
+ \nThe detailed prompts for each of these formats are defined in `system_prompt.py`"
+ enum:
+ - json
+ - function_tag
+ title: This Enum refers to the prompt format for calling zero shot tools
+ type: string
ToolResponse:
additionalProperties: false
properties:
@@ -1851,7 +1867,7 @@ info:
description: "This is the specification of the llama stack that provides\n \
\ a set of endpoints and their corresponding interfaces that are tailored\
\ to\n best leverage Llama Models. The specification is still in\
- \ draft and subject to change.\n Generated at 2024-08-15 17:30:18.232105"
+ \ draft and subject to change.\n Generated at 2024-08-20 19:00:39.110138"
title: '[DRAFT] Llama Stack Specification'
version: 0.0.1
jsonSchemaDialect: https://json-schema.org/draft/2020-12/schema
@@ -2854,15 +2870,15 @@ security:
servers:
- url: http://any-hosted-llama-stack.com
tags:
-- name: MemoryBanks
-- name: Observability
-- name: Evaluations
-- name: Inference
-- name: AgenticSystem
-- name: Datasets
-- name: PostTraining
-- name: SyntheticDataGeneration
- name: RewardScoring
+- name: AgenticSystem
+- name: SyntheticDataGeneration
+- name: Inference
+- name: Datasets
+- name: Observability
+- name: PostTraining
+- name: MemoryBanks
+- name: Evaluations
- description:
name: Attachment
- description:
name: ShieldDefinition
+- description: "This Enum refers to the prompt format for calling zero shot tools\n\
+ \n`json` --\n Refers to the json format for calling tools.\n The json format\
+ \ takes the form like\n {\n \"type\": \"function\",\n \"function\"\
+ \ : {\n \"name\": \"function_name\",\n \"description\":\
+ \ \"function_description\",\n \"parameters\": {...}\n }\n \
+ \ }\n\n`function_tag` --\n This is an example of how you could define\n \
+ \ your own user defined format for making tool calls.\n The function_tag format\
+ \ looks like this,\n (parameters)\n\nThe\
+ \ detailed prompts for each of these formats are defined in `system_prompt.py`\n\
+ \n"
+ name: ToolPromptFormat
- description:
name: AgenticSystemCreateResponse
@@ -3298,6 +3325,7 @@ x-tagGroups:
- ToolDefinition
- ToolExecutionStep
- ToolParamDefinition
+ - ToolPromptFormat
- ToolResponse
- ToolResponseMessage
- TrainEvalDataset