add tools api with a stub provider impl

This commit is contained in:
Dinesh Yeduguru 2024-12-13 12:09:12 -08:00
parent 3b4b2ea30c
commit 72dab3e4bf
14 changed files with 310 additions and 1 deletions

View file

@ -18,6 +18,7 @@ class ResourceType(Enum):
dataset = "dataset"
scoring_function = "scoring_function"
eval_task = "eval_task"
tool = "tool"
class Resource(BaseModel):

View file

@ -0,0 +1,7 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from .tools import * # noqa: F401 F403

View file

@ -0,0 +1,117 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Annotated, Any, Dict, List, Literal, Optional, Union
from llama_models.llama3.api.datatypes import ToolPromptFormat
from llama_models.schema_utils import json_schema_type, register_schema, webmethod
from pydantic import BaseModel, Field
from typing_extensions import Protocol, runtime_checkable
from llama_stack.apis.inference import InterleavedContent
from llama_stack.apis.resource import Resource, ResourceType
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
@json_schema_type
class ToolParameter(BaseModel):
name: str
parameter_type: str
description: str
@json_schema_type
class Tool(Resource):
type: Literal[ResourceType.tool.value] = ResourceType.tool.value
name: str
tool_group: str
description: str
parameters: List[ToolParameter]
provider_id: Optional[str] = None
tool_prompt_format: Optional[ToolPromptFormat] = Field(
default=ToolPromptFormat.json
)
@json_schema_type
class ToolDef(BaseModel):
name: str
description: str
parameters: List[ToolParameter]
metadata: Dict[str, Any]
tool_prompt_format: Optional[ToolPromptFormat] = Field(
default=ToolPromptFormat.json
)
@json_schema_type
class MCPToolGroup(BaseModel):
type: Literal["mcp"] = "mcp"
endpoint: str
@json_schema_type
class UserDefinedToolGroup(BaseModel):
type: Literal["user_defined"] = "user_defined"
tools: List[ToolDef]
ToolGroup = register_schema(
Annotated[Union[MCPToolGroup, UserDefinedToolGroup], Field(discriminator="type")],
name="ToolGroup",
)
@json_schema_type
class InvokeToolResult(BaseModel):
content: InterleavedContent
error_message: Optional[str] = None
error_code: Optional[int] = None
class ToolStore(Protocol):
def get_tool(self, tool_id: str) -> Tool: ...
@runtime_checkable
@trace_protocol
class Tools(Protocol):
@webmethod(route="/tool-groups/register", method="POST")
async def register_tool_group(
self,
name: str,
tool_group: ToolGroup,
provider_id: Optional[str] = None,
) -> None:
"""Register a tool group"""
...
@webmethod(route="/tools/get", method="GET")
async def get_tool(
self,
tool_id: str,
) -> Tool: ...
@webmethod(route="/tools/list", method="GET")
async def list_tools(self) -> List[Tool]:
"""List tools with optional provider"""
@webmethod(route="/tools/unregister", method="POST")
async def unregister_tool(self, tool_id: str) -> None:
"""Unregister a tool"""
...
@runtime_checkable
@trace_protocol
class ToolRuntime(Protocol):
tool_store: ToolStore
@webmethod(route="/tool-runtime/invoke", method="POST")
async def invoke_tool(self, tool_id: str, args: Dict[str, Any]) -> InvokeToolResult:
"""Run a tool with the given arguments"""
...

View file

@ -21,6 +21,7 @@ from llama_stack.apis.inference import Inference
from llama_stack.apis.memory import Memory
from llama_stack.apis.safety import Safety
from llama_stack.apis.scoring import Scoring
from llama_stack.apis.tools import Tool, ToolRuntime
from llama_stack.providers.utils.kvstore.config import KVStoreConfig
LLAMA_STACK_BUILD_CONFIG_VERSION = "2"
@ -37,6 +38,7 @@ RoutableObject = Union[
Dataset,
ScoringFn,
EvalTask,
Tool,
]
@ -48,6 +50,7 @@ RoutableObjectWithProvider = Annotated[
Dataset,
ScoringFn,
EvalTask,
Tool,
],
Field(discriminator="type"),
]
@ -59,6 +62,7 @@ RoutedProtocol = Union[
DatasetIO,
Scoring,
Eval,
ToolRuntime,
]

View file

@ -47,6 +47,10 @@ def builtin_automatically_routed_apis() -> List[AutoRoutedApiInfo]:
routing_table_api=Api.eval_tasks,
router_api=Api.eval,
),
AutoRoutedApiInfo(
routing_table_api=Api.tools,
router_api=Api.tool_runtime,
),
]

View file

@ -30,6 +30,7 @@ from llama_stack.apis.scoring import Scoring
from llama_stack.apis.scoring_functions import ScoringFunctions
from llama_stack.apis.shields import Shields
from llama_stack.apis.telemetry import Telemetry
from llama_stack.apis.tools import ToolRuntime, Tools
from llama_stack.distribution.client import get_client_impl
from llama_stack.distribution.distribution import builtin_automatically_routed_apis
from llama_stack.distribution.store import DistributionRegistry
@ -60,12 +61,15 @@ def api_protocol_map() -> Dict[Api, Any]:
Api.eval: Eval,
Api.eval_tasks: EvalTasks,
Api.post_training: PostTraining,
Api.tools: Tools,
Api.tool_runtime: ToolRuntime,
}
def additional_protocols_map() -> Dict[Api, Any]:
return {
Api.inference: (ModelsProtocolPrivate, Models, Api.models),
Api.tools: (ToolsProtocolPrivate, Tools, Api.tools),
Api.memory: (MemoryBanksProtocolPrivate, MemoryBanks, Api.memory_banks),
Api.safety: (ShieldsProtocolPrivate, Shields, Api.shields),
Api.datasetio: (DatasetsProtocolPrivate, Datasets, Api.datasets),

View file

@ -17,6 +17,7 @@ from .routing_tables import (
ModelsRoutingTable,
ScoringFunctionsRoutingTable,
ShieldsRoutingTable,
ToolsRoutingTable,
)
@ -33,6 +34,7 @@ async def get_routing_table_impl(
"datasets": DatasetsRoutingTable,
"scoring_functions": ScoringFunctionsRoutingTable,
"eval_tasks": EvalTasksRoutingTable,
"tools": ToolsRoutingTable,
}
if api.value not in api_to_tables:
@ -51,6 +53,7 @@ async def get_auto_router_impl(api: Api, routing_table: RoutingTable, _deps) ->
MemoryRouter,
SafetyRouter,
ScoringRouter,
ToolRuntimeRouter,
)
api_to_routers = {
@ -60,6 +63,7 @@ async def get_auto_router_impl(api: Api, routing_table: RoutingTable, _deps) ->
"datasetio": DatasetIORouter,
"scoring": ScoringRouter,
"eval": EvalRouter,
"tool_runtime": ToolRuntimeRouter,
}
if api.value not in api_to_routers:
raise ValueError(f"API {api.value} not found in router map")

View file

@ -15,6 +15,7 @@ from llama_stack.apis.safety import * # noqa: F403
from llama_stack.apis.datasetio import * # noqa: F403
from llama_stack.apis.scoring import * # noqa: F403
from llama_stack.apis.eval import * # noqa: F403
from llama_stack.apis.tools import * # noqa: F403
class MemoryRouter(Memory):
@ -372,3 +373,23 @@ class EvalRouter(Eval):
task_id,
job_id,
)
class ToolRuntimeRouter(ToolRuntime):
def __init__(
self,
routing_table: RoutingTable,
) -> None:
self.routing_table = routing_table
async def initialize(self) -> None:
pass
async def shutdown(self) -> None:
pass
async def invoke_tool(self, tool_id: str, args: Dict[str, Any]) -> Any:
return await self.routing_table.get_provider_impl(tool_id).invoke_tool(
tool_id=tool_id,
args=args,
)

View file

@ -15,7 +15,7 @@ from llama_stack.apis.shields import * # noqa: F403
from llama_stack.apis.memory_banks import * # noqa: F403
from llama_stack.apis.datasets import * # noqa: F403
from llama_stack.apis.eval_tasks import * # noqa: F403
from llama_stack.apis.tools import * # noqa: F403
from llama_stack.apis.common.content_types import URL
from llama_stack.apis.common.type_system import ParamType
@ -45,6 +45,8 @@ async def register_object_with_provider(obj: RoutableObject, p: Any) -> Routable
return await p.register_scoring_function(obj)
elif api == Api.eval:
return await p.register_eval_task(obj)
elif api == Api.tool_runtime:
return await p.register_tool(obj)
else:
raise ValueError(f"Unknown API {api} for registering object with provider")
@ -57,6 +59,8 @@ async def unregister_object_from_provider(obj: RoutableObject, p: Any) -> None:
return await p.unregister_model(obj.identifier)
elif api == Api.datasetio:
return await p.unregister_dataset(obj.identifier)
elif api == Api.tool_runtime:
return await p.unregister_tool(obj.identifier)
else:
raise ValueError(f"Unregister not supported for {api}")
@ -461,3 +465,61 @@ class EvalTasksRoutingTable(CommonRoutingTableImpl, EvalTasks):
provider_resource_id=provider_eval_task_id,
)
await self.register_object(eval_task)
class ToolsRoutingTable(CommonRoutingTableImpl, Tools):
async def list_tools(self) -> List[Tool]:
return await self.get_all_with_type("tool")
async def get_tool(self, tool_id: str) -> Tool:
return await self.get_object_by_identifier("tool", tool_id)
async def register_tool_group(
self,
name: str,
tool_group: ToolGroup,
provider_id: Optional[str] = None,
) -> None:
tools = []
if isinstance(tool_group, MCPToolGroup):
# TODO: first needs to be resolved to corresponding tools available in the MCP server
raise NotImplementedError("MCP tool provider not implemented yet")
elif isinstance(tool_group, UserDefinedToolGroup):
for tool in tool_group.tools:
tools.append(
Tool(
identifier=tool.name,
tool_group=name,
name=tool.name,
description=tool.description,
parameters=tool.parameters,
provider_id=provider_id,
tool_prompt_format=tool.tool_prompt_format,
provider_resource_id=tool.name,
)
)
else:
raise ValueError(f"Unknown tool group: {tool_group}")
for tool in tools:
existing_tool = await self.get_tool(tool.name)
# Compare existing and new object if one exists
if existing_tool:
# Compare all fields except provider_id since that might be None in new obj
if tool.provider_id is None:
tool.provider_id = existing_tool.provider_id
existing_dict = existing_tool.model_dump()
new_dict = tool.model_dump()
if existing_dict != new_dict:
raise ValueError(
f"Object {tool.name} already exists in registry. Please use a different identifier."
)
await self.register_object(tool)
async def unregister_tool(self, tool_id: str) -> None:
tool = await self.get_tool(tool_id)
if tool is None:
raise ValueError(f"Tool {tool_id} not found")
await self.unregister_object(tool)

View file

@ -17,6 +17,7 @@ from llama_stack.apis.memory_banks.memory_banks import MemoryBank
from llama_stack.apis.models import Model
from llama_stack.apis.scoring_functions import ScoringFn
from llama_stack.apis.shields import Shield
from llama_stack.apis.tools import Tool
@json_schema_type
@ -29,6 +30,7 @@ class Api(Enum):
scoring = "scoring"
eval = "eval"
post_training = "post_training"
tool_runtime = "tool_runtime"
telemetry = "telemetry"
@ -38,6 +40,7 @@ class Api(Enum):
datasets = "datasets"
scoring_functions = "scoring_functions"
eval_tasks = "eval_tasks"
tools = "tools"
# built-in API
inspect = "inspect"
@ -75,6 +78,12 @@ class EvalTasksProtocolPrivate(Protocol):
async def register_eval_task(self, eval_task: EvalTask) -> None: ...
class ToolsProtocolPrivate(Protocol):
async def register_tool(self, tool: Tool) -> None: ...
async def unregister_tool(self, tool_id: str) -> None: ...
@json_schema_type
class ProviderSpec(BaseModel):
api: Api

View file

@ -0,0 +1,14 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from .config import MetaReferenceToolRuntimeConfig
from .meta_reference import MetaReferenceToolRuntimeImpl
async def get_provider_impl(config: MetaReferenceToolRuntimeConfig, _deps):
impl = MetaReferenceToolRuntimeImpl(config)
await impl.initialize()
return impl

View file

@ -0,0 +1,11 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from pydantic import BaseModel
class MetaReferenceToolRuntimeConfig(BaseModel):
pass

View file

@ -0,0 +1,30 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Any, Dict
from llama_stack.apis.tools import InvokeToolResult, Tool, ToolRuntime
from llama_stack.providers.datatypes import ToolsProtocolPrivate
from .config import MetaReferenceToolRuntimeConfig
class MetaReferenceToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime):
def __init__(self, config: MetaReferenceToolRuntimeConfig):
self.config = config
async def initialize(self):
pass
async def register_tool(self, tool: Tool):
print(f"registering tool {tool.identifier}")
pass
async def unregister_tool(self, tool_id: str) -> None:
pass
async def invoke_tool(self, tool_id: str, args: Dict[str, Any]) -> InvokeToolResult:
pass

View file

@ -0,0 +1,21 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import List
from llama_stack.distribution.datatypes import Api, InlineProviderSpec, ProviderSpec
def available_providers() -> List[ProviderSpec]:
return [
InlineProviderSpec(
api=Api.tool_runtime,
provider_type="inline::meta-reference",
pip_packages=[],
module="llama_stack.providers.inline.tool_runtime.meta_reference",
config_class="llama_stack.providers.inline.tool_runtime.meta_reference.MetaReferenceToolRuntimeConfig",
),
]