From fcbd6dfae8b76e6a26330255df430b52b5697362 Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Mon, 3 Mar 2025 12:44:40 -0800 Subject: [PATCH] orchestrator agent --- .../Llama_Stack_Agent_Workflows.ipynb | 462 +++++++++++++++++- 1 file changed, 459 insertions(+), 3 deletions(-) diff --git a/docs/notebooks/Llama_Stack_Agent_Workflows.ipynb b/docs/notebooks/Llama_Stack_Agent_Workflows.ipynb index 83915104c..70f1c8b25 100644 --- a/docs/notebooks/Llama_Stack_Agent_Workflows.ipynb +++ b/docs/notebooks/Llama_Stack_Agent_Workflows.ipynb @@ -70,7 +70,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### 1.1 Prompt Chaining" + "#### 1.1 Prompt Chaining\n", + "\n", + "![](https://www.anthropic.com/_next/image?url=https%3A%2F%2Fwww-cdn.anthropic.com%2Fimages%2F4zrzovbb%2Fwebsite%2F7418719e3dab222dccb379b8879e1dc08ad34c78-2401x1000.png&w=3840&q=75)" ] }, { @@ -217,7 +219,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### 1.2 Routing" + "#### 1.2 Routing\n", + "\n", + "![](https://www.anthropic.com/_next/image?url=https%3A%2F%2Fwww-cdn.anthropic.com%2Fimages%2F4zrzovbb%2Fwebsite%2F5c0c0e9fe4def0b584c04d37849941da55e5e71c-2401x1000.png&w=3840&q=75)" ] }, { @@ -582,7 +586,9 @@ "source": [ "#### 2. Evaluator-Optimizer Workflow\n", "\n", - "In the evaluator-optimizer workflow, one LLM call generates a response while another provider evaluation and feedback in a loop. " + "In the evaluator-optimizer workflow, one LLM call generates a response while another provider evaluation and feedback in a loop. \n", + "\n", + "![](https://www.anthropic.com/_next/image?url=https%3A%2F%2Fwww-cdn.anthropic.com%2Fimages%2F4zrzovbb%2Fwebsite%2F14f51e6406ccb29e695da48b17017e899a6119c7-2401x1000.png&w=3840&q=75)" ] }, { @@ -941,6 +947,456 @@ "pprint(evaluator_agent_session.to_dict())" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Orchestrator-Workers Workflow\n", + "\n", + "In the orchestrator-workers workflow, a central LLM dynamically breaks down tasks, delegates them to worker LLMs, and synthesizes their results.\n", + "\n", + "![](https://www.anthropic.com/_next/image?url=https%3A%2F%2Fwww-cdn.anthropic.com%2Fimages%2F4zrzovbb%2Fwebsite%2F8985fc683fae4780fb34eab1365ab78c7e51bc8e-2401x1000.png&w=3840&q=75)" + ] + }, + { + "cell_type": "code", + "execution_count": 64, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import List, Dict\n", + "class OrchestratorOutputSchema(BaseModel):\n", + " analysis: str\n", + " tasks: List[Dict[str, str]]\n", + "\n", + "orchestrator_agent_config = AgentConfig({\n", + " **base_agent_config,\n", + " \"instructions\": \"\"\"Your job is to analyize the task provided by the user andbreak it down into 2-3 distinct approaches:\n", + "\n", + " Return your response in the following JSON format:\n", + " {{\n", + " \"analysis\": \"\",\n", + " \"tasks\": [\n", + " {{\n", + " \"type\": \"formal\",\n", + " \"description\": \"Write a precise, technical version that emphasizes specifications\"\n", + " }},\n", + " {{\n", + " \"type\": \"conversational\",\n", + " \"description\": \"Write an engaging, friendly version that connects with readers\"\n", + " }}\n", + " ]\n", + " }}\n", + " \"\"\",\n", + " \"response_format\": {\n", + " \"type\": \"json_schema\",\n", + " \"json_schema\": OrchestratorOutputSchema.model_json_schema()\n", + " }\n", + "})\n", + "\n", + "worker_agent_config = AgentConfig({\n", + " **base_agent_config,\n", + " \"instructions\": \"\"\"You will be given a ,