diff --git a/docs/_static/llama-stack-spec.html b/docs/_static/llama-stack-spec.html
index 24fde9054..a7a2fd0b2 100644
--- a/docs/_static/llama-stack-spec.html
+++ b/docs/_static/llama-stack-spec.html
@@ -5221,17 +5221,25 @@
"default": 10
},
"model": {
- "type": "string"
+ "type": "string",
+ "description": "The model identifier to use for the agent"
},
"instructions": {
- "type": "string"
+ "type": "string",
+ "description": "The system instructions for the agent"
+ },
+ "name": {
+ "type": "string",
+ "description": "Optional name for the agent, used in telemetry and identification"
},
"enable_session_persistence": {
"type": "boolean",
- "default": false
+ "default": false,
+ "description": "Whether to persist session data"
},
"response_format": {
- "$ref": "#/components/schemas/ResponseFormat"
+ "$ref": "#/components/schemas/ResponseFormat",
+ "description": "Optional response format configuration"
}
},
"additionalProperties": false,
@@ -5239,7 +5247,8 @@
"model",
"instructions"
],
- "title": "AgentConfig"
+ "title": "AgentConfig",
+ "description": "Configuration for an agent."
},
"AgentTool": {
"oneOf": [
diff --git a/docs/_static/llama-stack-spec.yaml b/docs/_static/llama-stack-spec.yaml
index 27712ee74..0b6115c6f 100644
--- a/docs/_static/llama-stack-spec.yaml
+++ b/docs/_static/llama-stack-spec.yaml
@@ -3686,18 +3686,28 @@ components:
default: 10
model:
type: string
+ description: >-
+ The model identifier to use for the agent
instructions:
type: string
+ description: The system instructions for the agent
+ name:
+ type: string
+ description: >-
+ Optional name for the agent, used in telemetry and identification
enable_session_persistence:
type: boolean
default: false
+ description: Whether to persist session data
response_format:
$ref: '#/components/schemas/ResponseFormat'
+ description: Optional response format configuration
additionalProperties: false
required:
- model
- instructions
title: AgentConfig
+ description: Configuration for an agent.
AgentTool:
oneOf:
- type: string
diff --git a/llama_stack/apis/agents/agents.py b/llama_stack/apis/agents/agents.py
index e13c4960b..dec43280b 100644
--- a/llama_stack/apis/agents/agents.py
+++ b/llama_stack/apis/agents/agents.py
@@ -225,8 +225,18 @@ class AgentConfigCommon(BaseModel):
@json_schema_type
class AgentConfig(AgentConfigCommon):
+ """Configuration for an agent.
+
+ :param model: The model identifier to use for the agent
+ :param instructions: The system instructions for the agent
+ :param name: Optional name for the agent, used in telemetry and identification
+ :param enable_session_persistence: Optional flag indicating whether session data has to be persisted
+ :param response_format: Optional response format configuration
+ """
+
model: str
instructions: str
+ name: Optional[str] = None
enable_session_persistence: Optional[bool] = False
response_format: Optional[ResponseFormat] = None
diff --git a/llama_stack/providers/inline/agents/meta_reference/agent_instance.py b/llama_stack/providers/inline/agents/meta_reference/agent_instance.py
index f441d6eb6..b5714b438 100644
--- a/llama_stack/providers/inline/agents/meta_reference/agent_instance.py
+++ b/llama_stack/providers/inline/agents/meta_reference/agent_instance.py
@@ -178,6 +178,8 @@ class ChatAgent(ShieldRunnerMixin):
span.set_attribute("request", request.model_dump_json())
turn_id = str(uuid.uuid4())
span.set_attribute("turn_id", turn_id)
+ if self.agent_config.name:
+ span.set_attribute("agent_name", self.agent_config.name)
await self._initialize_tools(request.toolgroups)
async for chunk in self._run_turn(request, turn_id):
@@ -190,6 +192,8 @@ class ChatAgent(ShieldRunnerMixin):
span.set_attribute("session_id", request.session_id)
span.set_attribute("request", request.model_dump_json())
span.set_attribute("turn_id", request.turn_id)
+ if self.agent_config.name:
+ span.set_attribute("agent_name", self.agent_config.name)
await self._initialize_tools()
async for chunk in self._run_turn(request):
@@ -498,6 +502,8 @@ class ChatAgent(ShieldRunnerMixin):
stop_reason = None
async with tracing.span("inference") as span:
+ if self.agent_config.name:
+ span.set_attribute("agent_name", self.agent_config.name)
async for chunk in await self.inference_api.chat_completion(
self.agent_config.model,
input_messages,
diff --git a/tests/integration/agents/test_agents.py b/tests/integration/agents/test_agents.py
index 7def55291..f884d440d 100644
--- a/tests/integration/agents/test_agents.py
+++ b/tests/integration/agents/test_agents.py
@@ -115,6 +115,70 @@ def test_agent_simple(llama_stack_client_with_mocked_inference, agent_config):
assert "I can't" in logs_str
+def test_agent_name(llama_stack_client, text_model_id):
+ agent_name = f"test-agent-{uuid4()}"
+
+ try:
+ agent = Agent(
+ llama_stack_client,
+ model=text_model_id,
+ instructions="You are a helpful assistant",
+ name=agent_name,
+ )
+ except TypeError:
+ agent = Agent(
+ llama_stack_client,
+ model=text_model_id,
+ instructions="You are a helpful assistant",
+ )
+ return
+
+ session_id = agent.create_session(f"test-session-{uuid4()}")
+
+ agent.create_turn(
+ messages=[
+ {
+ "role": "user",
+ "content": "Give me a sentence that contains the word: hello",
+ }
+ ],
+ session_id=session_id,
+ stream=False,
+ )
+
+ all_spans = []
+ for span in llama_stack_client.telemetry.query_spans(
+ attribute_filters=[
+ {"key": "session_id", "op": "eq", "value": session_id},
+ ],
+ attributes_to_return=["input", "output", "agent_name", "agent_id", "session_id"],
+ ):
+ all_spans.append(span.attributes)
+
+ agent_name_spans = []
+ for span in llama_stack_client.telemetry.query_spans(
+ attribute_filters=[],
+ attributes_to_return=["agent_name"],
+ ):
+ if "agent_name" in span.attributes:
+ agent_name_spans.append(span.attributes)
+
+ agent_logs = []
+ for span in llama_stack_client.telemetry.query_spans(
+ attribute_filters=[
+ {"key": "agent_name", "op": "eq", "value": agent_name},
+ ],
+ attributes_to_return=["input", "output", "agent_name"],
+ ):
+ if "output" in span.attributes and span.attributes["output"] != "no shields":
+ agent_logs.append(span.attributes)
+
+ assert len(agent_logs) == 1
+ assert agent_logs[0]["agent_name"] == agent_name
+ assert "Give me a sentence that contains the word: hello" in agent_logs[0]["input"]
+ assert "hello" in agent_logs[0]["output"].lower()
+
+
def test_tool_config(llama_stack_client_with_mocked_inference, agent_config):
common_params = dict(
model="meta-llama/Llama-3.2-3B-Instruct",