forked from phoenix-oss/llama-stack-mirror
Fix precommit check after moving to ruff (#927)
Lint check in main branch is failing. This fixes the lint check after we moved to ruff in https://github.com/meta-llama/llama-stack/pull/921. We need to move to a `ruff.toml` file as well as fixing and ignoring some additional checks. Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>
This commit is contained in:
parent
4773092dd1
commit
34ab7a3b6c
217 changed files with 981 additions and 2681 deletions
|
@ -23,9 +23,7 @@ from llama_stack.providers.datatypes import ToolsProtocolPrivate
|
|||
from .config import BingSearchToolConfig
|
||||
|
||||
|
||||
class BingSearchToolRuntimeImpl(
|
||||
ToolsProtocolPrivate, ToolRuntime, NeedsRequestProviderData
|
||||
):
|
||||
class BingSearchToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, NeedsRequestProviderData):
|
||||
def __init__(self, config: BingSearchToolConfig):
|
||||
self.config = config
|
||||
self.url = "https://api.bing.microsoft.com/v7.0/search"
|
||||
|
@ -67,9 +65,7 @@ class BingSearchToolRuntimeImpl(
|
|||
)
|
||||
]
|
||||
|
||||
async def invoke_tool(
|
||||
self, tool_name: str, kwargs: Dict[str, Any]
|
||||
) -> ToolInvocationResult:
|
||||
async def invoke_tool(self, tool_name: str, kwargs: Dict[str, Any]) -> ToolInvocationResult:
|
||||
api_key = self._get_api_key()
|
||||
headers = {
|
||||
"Ocp-Apim-Subscription-Key": api_key,
|
||||
|
@ -88,9 +84,7 @@ class BingSearchToolRuntimeImpl(
|
|||
)
|
||||
response.raise_for_status()
|
||||
|
||||
return ToolInvocationResult(
|
||||
content=json.dumps(self._clean_response(response.json()))
|
||||
)
|
||||
return ToolInvocationResult(content=json.dumps(self._clean_response(response.json())))
|
||||
|
||||
def _clean_response(self, search_response):
|
||||
clean_response = []
|
||||
|
@ -99,9 +93,7 @@ class BingSearchToolRuntimeImpl(
|
|||
pages = search_response["webPages"]["value"]
|
||||
for p in pages:
|
||||
selected_keys = {"name", "url", "snippet"}
|
||||
clean_response.append(
|
||||
{k: v for k, v in p.items() if k in selected_keys}
|
||||
)
|
||||
clean_response.append({k: v for k, v in p.items() if k in selected_keys})
|
||||
if "news" in search_response:
|
||||
clean_news = []
|
||||
news = search_response["news"]["value"]
|
||||
|
|
|
@ -23,9 +23,7 @@ from llama_stack.providers.datatypes import ToolsProtocolPrivate
|
|||
from .config import BraveSearchToolConfig
|
||||
|
||||
|
||||
class BraveSearchToolRuntimeImpl(
|
||||
ToolsProtocolPrivate, ToolRuntime, NeedsRequestProviderData
|
||||
):
|
||||
class BraveSearchToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, NeedsRequestProviderData):
|
||||
def __init__(self, config: BraveSearchToolConfig):
|
||||
self.config = config
|
||||
|
||||
|
@ -67,9 +65,7 @@ class BraveSearchToolRuntimeImpl(
|
|||
)
|
||||
]
|
||||
|
||||
async def invoke_tool(
|
||||
self, tool_name: str, kwargs: Dict[str, Any]
|
||||
) -> ToolInvocationResult:
|
||||
async def invoke_tool(self, tool_name: str, kwargs: Dict[str, Any]) -> ToolInvocationResult:
|
||||
api_key = self._get_api_key()
|
||||
url = "https://api.search.brave.com/res/v1/web/search"
|
||||
headers = {
|
||||
|
@ -135,10 +131,7 @@ class BraveSearchToolRuntimeImpl(
|
|||
results = result_selector(results)
|
||||
|
||||
if isinstance(results, list):
|
||||
cleaned = [
|
||||
{k: v for k, v in item.items() if k in selected_keys}
|
||||
for item in results
|
||||
]
|
||||
cleaned = [{k: v for k, v in item.items() if k in selected_keys} for item in results]
|
||||
else:
|
||||
cleaned = {k: v for k, v in results.items() if k in selected_keys}
|
||||
|
||||
|
|
|
@ -42,9 +42,7 @@ class ModelContextProtocolToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime):
|
|||
tools_result = await session.list_tools()
|
||||
for tool in tools_result.tools:
|
||||
parameters = []
|
||||
for param_name, param_schema in tool.inputSchema.get(
|
||||
"properties", {}
|
||||
).items():
|
||||
for param_name, param_schema in tool.inputSchema.get("properties", {}).items():
|
||||
parameters.append(
|
||||
ToolParameter(
|
||||
name=param_name,
|
||||
|
@ -64,9 +62,7 @@ class ModelContextProtocolToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime):
|
|||
)
|
||||
return tools
|
||||
|
||||
async def invoke_tool(
|
||||
self, tool_name: str, kwargs: Dict[str, Any]
|
||||
) -> ToolInvocationResult:
|
||||
async def invoke_tool(self, tool_name: str, kwargs: Dict[str, Any]) -> ToolInvocationResult:
|
||||
tool = await self.tool_store.get_tool(tool_name)
|
||||
if tool.metadata is None or tool.metadata.get("endpoint") is None:
|
||||
raise ValueError(f"Tool {tool_name} does not have metadata")
|
||||
|
|
|
@ -23,9 +23,7 @@ from llama_stack.providers.datatypes import ToolsProtocolPrivate
|
|||
from .config import TavilySearchToolConfig
|
||||
|
||||
|
||||
class TavilySearchToolRuntimeImpl(
|
||||
ToolsProtocolPrivate, ToolRuntime, NeedsRequestProviderData
|
||||
):
|
||||
class TavilySearchToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, NeedsRequestProviderData):
|
||||
def __init__(self, config: TavilySearchToolConfig):
|
||||
self.config = config
|
||||
|
||||
|
@ -66,18 +64,14 @@ class TavilySearchToolRuntimeImpl(
|
|||
)
|
||||
]
|
||||
|
||||
async def invoke_tool(
|
||||
self, tool_name: str, kwargs: Dict[str, Any]
|
||||
) -> ToolInvocationResult:
|
||||
async def invoke_tool(self, tool_name: str, kwargs: Dict[str, Any]) -> ToolInvocationResult:
|
||||
api_key = self._get_api_key()
|
||||
response = requests.post(
|
||||
"https://api.tavily.com/search",
|
||||
json={"api_key": api_key, "query": kwargs["query"]},
|
||||
)
|
||||
|
||||
return ToolInvocationResult(
|
||||
content=json.dumps(self._clean_tavily_response(response.json()))
|
||||
)
|
||||
return ToolInvocationResult(content=json.dumps(self._clean_tavily_response(response.json())))
|
||||
|
||||
def _clean_tavily_response(self, search_response, top_k=3):
|
||||
return {"query": search_response["query"], "top_k": search_response["results"]}
|
||||
|
|
|
@ -23,9 +23,7 @@ from llama_stack.providers.datatypes import ToolsProtocolPrivate
|
|||
from .config import WolframAlphaToolConfig
|
||||
|
||||
|
||||
class WolframAlphaToolRuntimeImpl(
|
||||
ToolsProtocolPrivate, ToolRuntime, NeedsRequestProviderData
|
||||
):
|
||||
class WolframAlphaToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, NeedsRequestProviderData):
|
||||
def __init__(self, config: WolframAlphaToolConfig):
|
||||
self.config = config
|
||||
self.url = "https://api.wolframalpha.com/v2/query"
|
||||
|
@ -67,9 +65,7 @@ class WolframAlphaToolRuntimeImpl(
|
|||
)
|
||||
]
|
||||
|
||||
async def invoke_tool(
|
||||
self, tool_name: str, kwargs: Dict[str, Any]
|
||||
) -> ToolInvocationResult:
|
||||
async def invoke_tool(self, tool_name: str, kwargs: Dict[str, Any]) -> ToolInvocationResult:
|
||||
api_key = self._get_api_key()
|
||||
params = {
|
||||
"input": kwargs["query"],
|
||||
|
@ -82,9 +78,7 @@ class WolframAlphaToolRuntimeImpl(
|
|||
params=params,
|
||||
)
|
||||
|
||||
return ToolInvocationResult(
|
||||
content=json.dumps(self._clean_wolfram_alpha_response(response.json()))
|
||||
)
|
||||
return ToolInvocationResult(content=json.dumps(self._clean_wolfram_alpha_response(response.json())))
|
||||
|
||||
def _clean_wolfram_alpha_response(self, wa_response):
|
||||
remove = {
|
||||
|
@ -128,10 +122,7 @@ class WolframAlphaToolRuntimeImpl(
|
|||
for sub_key in key_to_remove:
|
||||
if sub_key == "pods":
|
||||
for i in range(len(wa_response[main_key][sub_key])):
|
||||
if (
|
||||
wa_response[main_key][sub_key][i]["title"]
|
||||
== "Result"
|
||||
):
|
||||
if wa_response[main_key][sub_key][i]["title"] == "Result":
|
||||
del wa_response[main_key][sub_key][i + 1 :]
|
||||
break
|
||||
sub_items = wa_response[main_key][sub_key]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue