diff --git a/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py b/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py index 95c690147..a73b6bf68 100644 --- a/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py +++ b/src/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py @@ -8,7 +8,8 @@ import uuid from collections.abc import AsyncIterator from typing import Any -from llama_stack.core.telemetry import tracing +from opentelemetry import trace + from llama_stack.log import get_logger from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str from llama_stack_api import ( @@ -77,6 +78,7 @@ from .utils import ( ) logger = get_logger(name=__name__, category="agents::meta_reference") +tracer = trace.get_tracer(__name__) def convert_tooldef_to_chat_tool(tool_def): @@ -1094,8 +1096,10 @@ class StreamingResponseOrchestrator: "server_url": mcp_tool.server_url, "mcp_list_tools_id": list_id, } - # List MCP tools with authorization from tool config - async with tracing.span("list_mcp_tools", attributes): + + # TODO: follow semantic conventions for Open Telemetry tool spans + # https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#execute-tool-span + with tracer.start_as_current_span("list_mcp_tools", attributes=attributes): tool_defs = await list_mcp_tools( endpoint=mcp_tool.server_url, headers=mcp_tool.headers, @@ -1171,9 +1175,9 @@ class StreamingResponseOrchestrator: if mcp_server.require_approval == "never": return False if isinstance(mcp_server, ApprovalFilter): - if tool_name in mcp_server.always: + if mcp_server.always and tool_name in mcp_server.always: return True - if tool_name in mcp_server.never: + if mcp_server.never and tool_name in mcp_server.never: return False return True diff --git a/src/llama_stack/providers/inline/agents/meta_reference/responses/tool_executor.py b/src/llama_stack/providers/inline/agents/meta_reference/responses/tool_executor.py index 4f294a979..3822fb5b2 100644 --- a/src/llama_stack/providers/inline/agents/meta_reference/responses/tool_executor.py +++ b/src/llama_stack/providers/inline/agents/meta_reference/responses/tool_executor.py @@ -9,8 +9,8 @@ import json from collections.abc import AsyncIterator from typing import Any -from llama_stack.core.telemetry import tracing -from llama_stack.log import get_logger +from opentelemetry import trace + from llama_stack_api import ( ImageContentItem, OpenAIChatCompletionContentPartImageParam, @@ -39,9 +39,12 @@ from llama_stack_api import ( VectorIO, ) +from llama_stack.log import get_logger + from .types import ChatCompletionContext, ToolExecutionResult logger = get_logger(name=__name__, category="agents::meta_reference") +tracer = trace.get_tracer(__name__) class ToolExecutor: @@ -296,8 +299,9 @@ class ToolExecutor: "server_url": mcp_tool.server_url, "tool_name": function_name, } - # Invoke MCP tool with authorization from tool config - async with tracing.span("invoke_mcp_tool", attributes): + # TODO: follow semantic conventions for Open Telemetry tool spans + # https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#execute-tool-span + with tracer.start_as_current_span("invoke_mcp_tool", attributes=attributes): result = await invoke_mcp_tool( endpoint=mcp_tool.server_url, tool_name=function_name, @@ -318,7 +322,7 @@ class ToolExecutor: # Use vector_stores.search API instead of knowledge_search tool # to support filters and ranking_options query = tool_kwargs.get("query", "") - async with tracing.span("knowledge_search", {}): + with tracer.start_as_current_span("knowledge_search"): result = await self._execute_knowledge_search_via_vector_store( query=query, response_file_search_tool=response_file_search_tool, @@ -327,7 +331,9 @@ class ToolExecutor: attributes = { "tool_name": function_name, } - async with tracing.span("invoke_tool", attributes): + # TODO: follow semantic conventions for Open Telemetry tool spans + # https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-spans/#execute-tool-span + with tracer.start_as_current_span("invoke_tool", attributes=attributes): result = await self.tool_runtime_api.invoke_tool( tool_name=function_name, kwargs=tool_kwargs,