From 6a5b73ca7ce66e0dc2e111a7c60e6969517359b6 Mon Sep 17 00:00:00 2001 From: reluctantfuturist Date: Wed, 9 Apr 2025 16:22:00 -0700 Subject: [PATCH] feat(agents): add agent naming functionality Allow users to name an agent and use the name in telemetry instead of relying on randomly generated agent_ids. This improves the developer experience by making it easier to find specific agents in telemetry logs. Closes #1832 --- docs/_static/llama-stack-spec.html | 19 +++-- docs/_static/llama-stack-spec.yaml | 10 +++ llama_stack/apis/agents/agents.py | 10 +++ .../agents/meta_reference/agent_instance.py | 6 ++ tests/integration/telemetry/test_telemetry.py | 74 +++++++++++++++++++ 5 files changed, 114 insertions(+), 5 deletions(-) diff --git a/docs/_static/llama-stack-spec.html b/docs/_static/llama-stack-spec.html index 567110829..dca949b56 100644 --- a/docs/_static/llama-stack-spec.html +++ b/docs/_static/llama-stack-spec.html @@ -5112,17 +5112,25 @@ "default": 10 }, "model": { - "type": "string" + "type": "string", + "description": "The model identifier to use for the agent" }, "instructions": { - "type": "string" + "type": "string", + "description": "The system instructions for the agent" + }, + "name": { + "type": "string", + "description": "Optional name for the agent, used in telemetry and identification" }, "enable_session_persistence": { "type": "boolean", - "default": false + "default": false, + "description": "Whether to persist session data" }, "response_format": { - "$ref": "#/components/schemas/ResponseFormat" + "$ref": "#/components/schemas/ResponseFormat", + "description": "Optional response format configuration" } }, "additionalProperties": false, @@ -5130,7 +5138,8 @@ "model", "instructions" ], - "title": "AgentConfig" + "title": "AgentConfig", + "description": "Configuration for an agent." }, "AgentTool": { "oneOf": [ diff --git a/docs/_static/llama-stack-spec.yaml b/docs/_static/llama-stack-spec.yaml index 1dfd17f55..ca4a1f47f 100644 --- a/docs/_static/llama-stack-spec.yaml +++ b/docs/_static/llama-stack-spec.yaml @@ -3615,18 +3615,28 @@ components: default: 10 model: type: string + description: >- + The model identifier to use for the agent instructions: type: string + description: The system instructions for the agent + name: + type: string + description: >- + Optional name for the agent, used in telemetry and identification enable_session_persistence: type: boolean default: false + description: Whether to persist session data response_format: $ref: '#/components/schemas/ResponseFormat' + description: Optional response format configuration additionalProperties: false required: - model - instructions title: AgentConfig + description: Configuration for an agent. AgentTool: oneOf: - type: string diff --git a/llama_stack/apis/agents/agents.py b/llama_stack/apis/agents/agents.py index e13c4960b..28a0792fb 100644 --- a/llama_stack/apis/agents/agents.py +++ b/llama_stack/apis/agents/agents.py @@ -225,8 +225,18 @@ class AgentConfigCommon(BaseModel): @json_schema_type class AgentConfig(AgentConfigCommon): + """Configuration for an agent. + + :param model: The model identifier to use for the agent + :param instructions: The system instructions for the agent + :param name: Optional name for the agent, used in telemetry and identification + :param enable_session_persistence: Whether to persist session data + :param response_format: Optional response format configuration + """ + model: str instructions: str + name: Optional[str] = None enable_session_persistence: Optional[bool] = False response_format: Optional[ResponseFormat] = None diff --git a/llama_stack/providers/inline/agents/meta_reference/agent_instance.py b/llama_stack/providers/inline/agents/meta_reference/agent_instance.py index f441d6eb6..b5714b438 100644 --- a/llama_stack/providers/inline/agents/meta_reference/agent_instance.py +++ b/llama_stack/providers/inline/agents/meta_reference/agent_instance.py @@ -178,6 +178,8 @@ class ChatAgent(ShieldRunnerMixin): span.set_attribute("request", request.model_dump_json()) turn_id = str(uuid.uuid4()) span.set_attribute("turn_id", turn_id) + if self.agent_config.name: + span.set_attribute("agent_name", self.agent_config.name) await self._initialize_tools(request.toolgroups) async for chunk in self._run_turn(request, turn_id): @@ -190,6 +192,8 @@ class ChatAgent(ShieldRunnerMixin): span.set_attribute("session_id", request.session_id) span.set_attribute("request", request.model_dump_json()) span.set_attribute("turn_id", request.turn_id) + if self.agent_config.name: + span.set_attribute("agent_name", self.agent_config.name) await self._initialize_tools() async for chunk in self._run_turn(request): @@ -498,6 +502,8 @@ class ChatAgent(ShieldRunnerMixin): stop_reason = None async with tracing.span("inference") as span: + if self.agent_config.name: + span.set_attribute("agent_name", self.agent_config.name) async for chunk in await self.inference_api.chat_completion( self.agent_config.model, input_messages, diff --git a/tests/integration/telemetry/test_telemetry.py b/tests/integration/telemetry/test_telemetry.py index c46de3742..9f6d06b99 100644 --- a/tests/integration/telemetry/test_telemetry.py +++ b/tests/integration/telemetry/test_telemetry.py @@ -41,3 +41,77 @@ def test_agent_query_spans(llama_stack_client, text_model_id): assert len(agent_logs) == 1 assert "Give me a sentence that contains the word: hello" in agent_logs[0]["input"] assert "hello" in agent_logs[0]["output"].lower() + + +def test_agent_name_filtering(llama_stack_client, text_model_id): + # Create an agent with a specific name + agent_name = f"test-agent-{uuid4()}" + print(f"Using agent_name: {agent_name}") + + agent = Agent( + llama_stack_client, + model=text_model_id, + instructions="You are a helpful assistant", + name=agent_name, + ) + session_id = agent.create_session(f"test-session-{uuid4()}") + print(f"Created session_id: {session_id}") + + agent.create_turn( + messages=[ + { + "role": "user", + "content": "Give me a sentence that contains the word: hello", + } + ], + session_id=session_id, + stream=False, + ) + + # Wait for the span to be logged - increase the time to ensure it's processed + time.sleep(5) + + # Query spans filtered by session_id to see what's available + all_spans = [] + for span in llama_stack_client.telemetry.query_spans( + attribute_filters=[ + {"key": "session_id", "op": "eq", "value": session_id}, + ], + attributes_to_return=["input", "output", "agent_name", "agent_id", "session_id"], + ): + all_spans.append(span.attributes) + + print(f"All spans for session {session_id}:") + for span in all_spans: + print(f"Span attributes: {span}") + + # Query all spans to see if any have the agent_name attribute + agent_name_spans = [] + for span in llama_stack_client.telemetry.query_spans( + attribute_filters=[], + attributes_to_return=["agent_name"], + ): + if "agent_name" in span.attributes: + agent_name_spans.append(span.attributes) + + print("All spans with agent_name attribute:") + for span in agent_name_spans: + print(f"Span with agent_name: {span}") + + # Query spans filtered by agent name + agent_logs = [] + for span in llama_stack_client.telemetry.query_spans( + attribute_filters=[ + {"key": "agent_name", "op": "eq", "value": agent_name}, + ], + attributes_to_return=["input", "output", "agent_name"], + ): + if "output" in span.attributes and span.attributes["output"] != "no shields": + agent_logs.append(span.attributes) + + print(f"Found {len(agent_logs)} spans filtered by agent_name") + + assert len(agent_logs) == 1 + assert agent_logs[0]["agent_name"] == agent_name + assert "Give me a sentence that contains the word: hello" in agent_logs[0]["input"] + assert "hello" in agent_logs[0]["output"].lower()