From a9a7b11326e311315ae1b38a56827c73a30d40b9 Mon Sep 17 00:00:00 2001 From: Reid <61492567+reidliu41@users.noreply.github.com> Date: Mon, 3 Mar 2025 10:27:43 +0800 Subject: [PATCH] docs: update agent_execution_loop example code (#1350) # What does this PR do? [Provide a short summary of what this PR does and why. Link to relevant issues if applicable.] - add missing `import` - add client define - update `attachments` to `documents`, https://github.com/meta-llama/llama-stack-client-python/commit/40da0d0e76a664aeb00333d536737f477be30299 [//]: # (If resolving an issue, uncomment and update the line below) [//]: # (Closes #[issue-number]) ## Test Plan [Describe the tests you ran to verify your changes with result summaries. *Provide clear instructions so the plan can be easily re-executed.*] [//]: # (## Documentation) Signed-off-by: reidliu Co-authored-by: reidliu --- .../building_applications/agent_execution_loop.md | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/docs/source/building_applications/agent_execution_loop.md b/docs/source/building_applications/agent_execution_loop.md index 7f5392f2c..67974e241 100644 --- a/docs/source/building_applications/agent_execution_loop.md +++ b/docs/source/building_applications/agent_execution_loop.md @@ -67,10 +67,17 @@ sequenceDiagram Each step in this process can be monitored and controlled through configurations. Here's an example that demonstrates monitoring the agent's execution: ```python +from llama_stack_client import LlamaStackClient +from llama_stack_client.lib.agents.agent import Agent from llama_stack_client.lib.agents.event_logger import EventLogger +from llama_stack_client.types.agent_create_params import AgentConfig from rich.pretty import pprint +# Replace host and port +client = LlamaStackClient(base_url=f"http://{HOST}:{PORT}") + agent_config = AgentConfig( + # Check with `llama-stack-client models list` model="Llama3.2-3B-Instruct", instructions="You are a helpful assistant", # Enable both RAG and tool usage @@ -81,7 +88,7 @@ agent_config = AgentConfig( }, "builtin::code_interpreter", ], - # Configure safety + # Configure safety (optional) input_shields=["llama_guard"], output_shields=["llama_guard"], # Control the inference loop @@ -98,7 +105,7 @@ session_id = agent.create_session("monitored_session") # Stream the agent's execution steps response = agent.create_turn( messages=[{"role": "user", "content": "Analyze this code and run it"}], - attachments=[ + documents=[ { "content": "https://raw.githubusercontent.com/example/code.py", "mime_type": "text/plain", @@ -114,7 +121,7 @@ for log in EventLogger().log(response): # Using non-streaming API, the response contains input, steps, and output. response = agent.create_turn( messages=[{"role": "user", "content": "Analyze this code and run it"}], - attachments=[ + documents=[ { "content": "https://raw.githubusercontent.com/example/code.py", "mime_type": "text/plain",