mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 01:48:05 +00:00
Added migration steps from autogen to microsoft agent framework
This commit is contained in:
parent
53bc1f0fd9
commit
9bc1cf1248
2 changed files with 918 additions and 1018 deletions
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,918 @@
|
||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Microsoft Agent Framework + Llama Stack Integration\n",
|
||||||
|
"\n",
|
||||||
|
"[](https://colab.research.google.com/github/meta-llama/llama-stack/blob/main/docs/notebooks/autogen/microsoft_agent_framework_llama_stack_integration.ipynb)\n",
|
||||||
|
"\n",
|
||||||
|
"## Overview\n",
|
||||||
|
"\n",
|
||||||
|
"This notebook demonstrates how to use **Microsoft Agent Framework** (successor to AutoGen) with **Llama Stack** as the backend.\n",
|
||||||
|
"\n",
|
||||||
|
"> **Note:** This notebook uses Microsoft Agent Framework, which replaces AutoGen. For the migration guide, see: [Microsoft Agent Framework Migration Guide](https://learn.microsoft.com/en-us/agent-framework/migration-guide/from-autogen/)\n",
|
||||||
|
"\n",
|
||||||
|
"### Use Cases Covered:\n",
|
||||||
|
"1. **Two-Agent Conversation** - Teams working together on tasks\n",
|
||||||
|
"2. **Code Generation & Execution** - Agents generating and running code\n",
|
||||||
|
"3. **Group Chat** - Multiple specialists collaborating \n",
|
||||||
|
"4. **Advanced Termination** - Stopping conditions\n",
|
||||||
|
"\n",
|
||||||
|
"---\n",
|
||||||
|
"\n",
|
||||||
|
"## Prerequisites\n",
|
||||||
|
"\n",
|
||||||
|
"```bash\n",
|
||||||
|
"# Install Microsoft Agent Framework\n",
|
||||||
|
"pip install agent-framework\n",
|
||||||
|
"\n",
|
||||||
|
"# Llama Stack should already be running\n",
|
||||||
|
"# Default: http://localhost:8321\n",
|
||||||
|
"```\n",
|
||||||
|
"\n",
|
||||||
|
"**Migration Note:** If you're migrating from AutoGen, the main changes are:\n",
|
||||||
|
"- Package: `autogen-*` → `agent-framework`\n",
|
||||||
|
"- Client: `OpenAIChatCompletionClient` → `OpenAIResponsesClient`\n",
|
||||||
|
"- Agent: `AssistantAgent` → `ChatAgent`\n",
|
||||||
|
"- Team: `RoundRobinGroupChat` → `SequentialBuilder`"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 19,
|
||||||
|
"metadata": {
|
||||||
|
"scrolled": true
|
||||||
|
},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"✅ Microsoft Agent Framework imports successful\n",
|
||||||
|
"Using Microsoft Agent Framework (successor to AutoGen)\n",
|
||||||
|
"✅ Llama Stack is running at http://localhost:8321\n",
|
||||||
|
"Status: 200\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# Imports for Microsoft Agent Framework\n",
|
||||||
|
"import os\n",
|
||||||
|
"import asyncio\n",
|
||||||
|
"from agent_framework import ChatAgent\n",
|
||||||
|
"from agent_framework.openai import OpenAIResponsesClient\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"✅ Microsoft Agent Framework imports successful\")\n",
|
||||||
|
"print(\"Using Microsoft Agent Framework (successor to AutoGen)\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Check Llama Stack connectivity\n",
|
||||||
|
"import httpx\n",
|
||||||
|
"\n",
|
||||||
|
"LLAMA_STACK_URL = \"http://localhost:8321\"\n",
|
||||||
|
"\n",
|
||||||
|
"try:\n",
|
||||||
|
" response = httpx.get(f\"{LLAMA_STACK_URL}/v1/models\")\n",
|
||||||
|
" print(f\"✅ Llama Stack is running at {LLAMA_STACK_URL}\")\n",
|
||||||
|
" print(f\"Status: {response.status_code}\")\n",
|
||||||
|
"except Exception as e:\n",
|
||||||
|
" print(f\"❌ Llama Stack not accessible: {e}\")\n",
|
||||||
|
" print(\"Make sure Llama Stack is running on port 8321\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Configuration: Microsoft Agent Framework with Llama Stack\n",
|
||||||
|
"\n",
|
||||||
|
"### How It Works\n",
|
||||||
|
"\n",
|
||||||
|
"Microsoft Agent Framework uses **OpenAIResponsesClient** to connect to OpenAI-compatible servers like Llama Stack.\n",
|
||||||
|
"\n",
|
||||||
|
"**Key Changes from AutoGen:**\n",
|
||||||
|
"- `OpenAIChatCompletionClient` → `OpenAIResponsesClient`\n",
|
||||||
|
"- Team-based architecture (similar to AutoGen v0.7.5)\n",
|
||||||
|
"- Async/await pattern for running tasks"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 20,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"✅ Model client configured for Llama Stack\n",
|
||||||
|
"Model: ollama/llama3.3:70b\n",
|
||||||
|
"Base URL: http://localhost:8321/v1\n",
|
||||||
|
"Client type: OpenAIResponsesClient\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# Create OpenAI Responses Client for Llama Stack\n",
|
||||||
|
"# Uses the /responses API (specialized for reasoning models)\n",
|
||||||
|
"chat_client = OpenAIResponsesClient(\n",
|
||||||
|
" model_id=\"ollama/llama3.3:70b\", # Choose any other model of your choice\n",
|
||||||
|
" api_key=\"not-needed\",\n",
|
||||||
|
" base_url=\"http://localhost:8321/v1\" # Llama Stack OpenAI-compatible endpoint\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"✅ Model client configured for Llama Stack\")\n",
|
||||||
|
"print(f\"Model: ollama/llama3.3:70b\")\n",
|
||||||
|
"print(f\"Base URL: http://localhost:8321/v1\")\n",
|
||||||
|
"print(f\"Client type: OpenAIResponsesClient\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Example 1: Simple Task with ChatAgent\n",
|
||||||
|
"\n",
|
||||||
|
"### Pattern: Single Agent Task\n",
|
||||||
|
"\n",
|
||||||
|
"Microsoft Agent Framework uses **ChatAgent** to create AI assistants powered by your model.\n",
|
||||||
|
"\n",
|
||||||
|
"**ChatAgent Features:**\n",
|
||||||
|
"- Multi-turn by default (keeps calling tools until complete)\n",
|
||||||
|
"- Stateless (use `AgentThread` for conversation history)\n",
|
||||||
|
"- Configured with `instructions` (replaces AutoGen's `system_message`)\n",
|
||||||
|
"- Can be created directly or via client factory method\n",
|
||||||
|
"\n",
|
||||||
|
"### Use Case: Solve a Math Problem"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 13,
|
||||||
|
"metadata": {
|
||||||
|
"scrolled": true
|
||||||
|
},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"✅ Agent created: MathAssistant\n",
|
||||||
|
"\n",
|
||||||
|
"==================================================\n",
|
||||||
|
"Task Result:\n",
|
||||||
|
"To find the sum of the first 10 prime numbers, we need to follow these steps:\n",
|
||||||
|
"\n",
|
||||||
|
"**Step 1: List out the first few prime numbers**\n",
|
||||||
|
"Prime numbers are numbers greater than 1 that have no divisors other than 1 and themselves. Let's start listing them out:\n",
|
||||||
|
"2, 3, 5, 7, 11, ...\n",
|
||||||
|
"\n",
|
||||||
|
"**Step 2: Identify the first 10 prime numbers**\n",
|
||||||
|
"We need to find the next 4 prime numbers after 7 and 11.\n",
|
||||||
|
"13 is a prime number (only divisible by 1 and 13).\n",
|
||||||
|
"17 is a prime number (only divisible by 1 and 17).\n",
|
||||||
|
"19 is a prime number (only divisible by 1 and 19).\n",
|
||||||
|
"23 is a prime number (only divisible by 1 and 23).\n",
|
||||||
|
"\n",
|
||||||
|
"So, the first 10 prime numbers are:\n",
|
||||||
|
"2, 3, 5, 7, 11, 13, 17, 19, 23, 29\n",
|
||||||
|
"\n",
|
||||||
|
"**Step 3: Calculate the sum of these prime numbers**\n",
|
||||||
|
"Now, let's add them up:\n",
|
||||||
|
"2 + 3 = 5\n",
|
||||||
|
"5 + 5 = 10\n",
|
||||||
|
"10 + 7 = 17\n",
|
||||||
|
"17 + 11 = 28\n",
|
||||||
|
"28 + 13 = 41\n",
|
||||||
|
"41 + 17 = 58\n",
|
||||||
|
"58 + 19 = 77\n",
|
||||||
|
"77 + 23 = 100\n",
|
||||||
|
"100 + 29 = 129\n",
|
||||||
|
"\n",
|
||||||
|
"**Step 4: Write the final answer**\n",
|
||||||
|
"The sum of the first 10 prime numbers is:\n",
|
||||||
|
"129\n",
|
||||||
|
"\n",
|
||||||
|
"Therefore, the sum of the first 10 prime numbers is **129**.\n",
|
||||||
|
"==================================================\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"import asyncio\n",
|
||||||
|
"\n",
|
||||||
|
"# Create a ChatAgent (replaces AutoGen's AssistantAgent)\n",
|
||||||
|
"# Method 1: Direct creation\n",
|
||||||
|
"assistant = ChatAgent(\n",
|
||||||
|
" name=\"MathAssistant\",\n",
|
||||||
|
" chat_client=chat_client,\n",
|
||||||
|
" instructions=\"You are a helpful AI assistant that solves math problems. Provide clear explanations and show your work.\"\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# Method 2: Using client factory (more convenient)\n",
|
||||||
|
"# assistant = chat_client.create_agent(\n",
|
||||||
|
"# name=\"MathAssistant\",\n",
|
||||||
|
"# instructions=\"You are a helpful AI assistant.\"\n",
|
||||||
|
"# )\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"✅ Agent created:\", assistant.name)\n",
|
||||||
|
"\n",
|
||||||
|
"# Define the task\n",
|
||||||
|
"task = \"What is the sum of the first 10 prime numbers? Please calculate it step by step.\"\n",
|
||||||
|
"\n",
|
||||||
|
"# Run the task (Agent Framework uses async)\n",
|
||||||
|
"# Note: ChatAgent is stateless - no conversation history between calls\n",
|
||||||
|
"result = await assistant.run(task)\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"\\n\" + \"=\"*50)\n",
|
||||||
|
"print(\"Task Result:\")\n",
|
||||||
|
"print(result.text if result.text else \"No response\")\n",
|
||||||
|
"print(\"=\"*50)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Example 2: Multi-Agent Team Collaboration\n",
|
||||||
|
"\n",
|
||||||
|
"### Pattern: Sequential Workflow (Round-Robin Style)\n",
|
||||||
|
"\n",
|
||||||
|
"Agent Framework uses **SequentialBuilder** to create workflows where agents take turns.\n",
|
||||||
|
"This replaces AutoGen's `RoundRobinGroupChat`.\n",
|
||||||
|
"\n",
|
||||||
|
"**Key Concepts:**\n",
|
||||||
|
"- `SequentialBuilder`: Agents process messages sequentially\n",
|
||||||
|
"- Shared conversation history across all agents\n",
|
||||||
|
"- Each agent sees all previous messages\n",
|
||||||
|
"\n",
|
||||||
|
"### Use Case: Write a Technical Blog Post"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 14,
|
||||||
|
"metadata": {
|
||||||
|
"scrolled": true
|
||||||
|
},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"✅ Team agents created: Researcher, Writer, Critic\n",
|
||||||
|
"\n",
|
||||||
|
"==================================================\n",
|
||||||
|
"Running Sequential Workflow:\n",
|
||||||
|
"==================================================\n",
|
||||||
|
"\n",
|
||||||
|
"Turn 1 [user]:\n",
|
||||||
|
"Write a 200-word blog post about the benefits of using Llama Stack for LLM applications.\n",
|
||||||
|
"\n",
|
||||||
|
"Steps:\n",
|
||||||
|
"1. Researcher: Gather key information about Llama Stack\n",
|
||||||
|
"2. Writer: Create the blog post\n",
|
||||||
|
"3. Critic: Revi...\n",
|
||||||
|
"\n",
|
||||||
|
"Turn 2 [assistant]:\n",
|
||||||
|
"**Unlocking Efficient LLM Applications with Llama Stack**\n",
|
||||||
|
"\n",
|
||||||
|
"The rise of Large Language Models (LLMs) has transformed the artificial intelligence landscape, enabling cutting-edge natural language proces...\n",
|
||||||
|
"\n",
|
||||||
|
"Turn 3 [assistant]:\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"Turn 4 [assistant]:\n",
|
||||||
|
" \n",
|
||||||
|
"\n",
|
||||||
|
"---\n",
|
||||||
|
"\n",
|
||||||
|
"**Critic's Review:**\n",
|
||||||
|
"\n",
|
||||||
|
"The blog post effectively introduces the benefits of using Llama Stack for LLM applications, highlighting key advantages such as simplified model deployment and improved ...\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"from agent_framework import SequentialBuilder, WorkflowOutputEvent\n",
|
||||||
|
"\n",
|
||||||
|
"# Create specialist agents\n",
|
||||||
|
"researcher = ChatAgent(\n",
|
||||||
|
" name=\"Researcher\",\n",
|
||||||
|
" chat_client=chat_client,\n",
|
||||||
|
" instructions=\"You are a researcher. Provide accurate information, facts, and statistics about topics.\"\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"writer = ChatAgent(\n",
|
||||||
|
" name=\"Writer\",\n",
|
||||||
|
" chat_client=chat_client,\n",
|
||||||
|
" instructions=\"You are a technical writer. Write clear, engaging content based on research provided.\"\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"critic = ChatAgent(\n",
|
||||||
|
" name=\"Critic\",\n",
|
||||||
|
" chat_client=chat_client,\n",
|
||||||
|
" instructions=\"You are an editor. Review content for clarity, accuracy, and engagement. Suggest improvements.\"\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"✅ Team agents created: Researcher, Writer, Critic\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Create a sequential workflow (round-robin collaboration)\n",
|
||||||
|
"# Each agent processes the input and builds on previous agents' work\n",
|
||||||
|
"workflow = SequentialBuilder().participants([researcher, writer, critic]).build()\n",
|
||||||
|
"\n",
|
||||||
|
"task = \"\"\"Write a 200-word blog post about the benefits of using Llama Stack for LLM applications.\n",
|
||||||
|
"\n",
|
||||||
|
"Steps:\n",
|
||||||
|
"1. Researcher: Gather key information about Llama Stack\n",
|
||||||
|
"2. Writer: Create the blog post\n",
|
||||||
|
"3. Critic: Review and suggest improvements\n",
|
||||||
|
"\"\"\"\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"\\n\" + \"=\"*50)\n",
|
||||||
|
"print(\"Running Sequential Workflow:\")\n",
|
||||||
|
"print(\"=\"*50)\n",
|
||||||
|
"\n",
|
||||||
|
"# Run the workflow and collect results\n",
|
||||||
|
"turn = 1\n",
|
||||||
|
"async for event in workflow.run_stream(task):\n",
|
||||||
|
" if isinstance(event, WorkflowOutputEvent):\n",
|
||||||
|
" # Final output contains full conversation history\n",
|
||||||
|
" conversation_history = event.data\n",
|
||||||
|
" for msg in conversation_history:\n",
|
||||||
|
" print(f\"\\nTurn {turn} [{msg.role}]:\")\n",
|
||||||
|
" print(msg.text[:200] + \"...\" if len(msg.text or \"\") > 200 else msg.text)\n",
|
||||||
|
" turn += 1"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Example 3: Multi-Turn Conversations with AgentThread\n",
|
||||||
|
"\n",
|
||||||
|
"### Pattern: Stateful Conversations\n",
|
||||||
|
"\n",
|
||||||
|
"Unlike AutoGen, `ChatAgent` is **stateless by default**. To maintain conversation history across multiple interactions, use **AgentThread**.\n",
|
||||||
|
"\n",
|
||||||
|
"**AgentThread Features:**\n",
|
||||||
|
"- Stores conversation history\n",
|
||||||
|
"- Allows context to carry across multiple `agent.run()` calls\n",
|
||||||
|
"- Can be backed by external storage (Redis, databases)\n",
|
||||||
|
"\n",
|
||||||
|
"### Use Case: Interactive Analysis"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 15,
|
||||||
|
"metadata": {
|
||||||
|
"scrolled": true
|
||||||
|
},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"✅ Analyst agent created\n",
|
||||||
|
"\n",
|
||||||
|
"==================================================\n",
|
||||||
|
"Multi-Turn Conversation with Thread:\n",
|
||||||
|
"==================================================\n",
|
||||||
|
"\n",
|
||||||
|
"[Turn 1 - Initial Analysis]:\n",
|
||||||
|
"**Introduction**\n",
|
||||||
|
"\n",
|
||||||
|
"Large Language Models (LLMs) have revolutionized the field of natural language processing, enabling applications such as text classification, sentiment analysis, and language translation. Two popular approaches to deploying LLMs are using local models and cloud-based APIs. In this analysis, we will break down the trade-offs between these two approaches, highlighting their pros an...\n",
|
||||||
|
"\n",
|
||||||
|
"[Turn 2 - Follow-up on Cost]:\n",
|
||||||
|
"**Cost Implications: Local LLMs vs Cloud-based APIs**\n",
|
||||||
|
"\n",
|
||||||
|
"The cost implications of using local LLMs versus cloud-based APIs are significant and can vary greatly depending on the specific requirements and deployment scenarios. Here's a detailed breakdown of the costs associated with each approach:\n",
|
||||||
|
"\n",
|
||||||
|
"**Local LLMs**\n",
|
||||||
|
"\n",
|
||||||
|
"1. **Initial Investment**:\n",
|
||||||
|
"\t* Hardware: High-performance GPUs, high-capacity storage, an...\n",
|
||||||
|
"\n",
|
||||||
|
"[Turn 3 - Summary]:\n",
|
||||||
|
"Organizations should choose between local LLMs and cloud-based APIs based on their specific requirements, weighing factors such as security, customizability, scalability, and cost, with local LLMs suitable for high-traffic or sensitive applications and cloud-based APIs ideal for low-traffic or prototyping scenarios.\n",
|
||||||
|
"\n",
|
||||||
|
"==================================================\n",
|
||||||
|
"Thread maintained context across 3 turns\n",
|
||||||
|
"==================================================\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# Create an analyst agent\n",
|
||||||
|
"analyst = ChatAgent(\n",
|
||||||
|
" name=\"TechAnalyst\",\n",
|
||||||
|
" chat_client=chat_client,\n",
|
||||||
|
" instructions=\"\"\"You are a technical analyst. Analyze technical topics deeply:\n",
|
||||||
|
" 1. Break down complex concepts\n",
|
||||||
|
" 2. Identify pros and cons\n",
|
||||||
|
" 3. Provide recommendations\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"✅ Analyst agent created\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Create a new thread to maintain conversation state\n",
|
||||||
|
"thread = analyst.get_new_thread()\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"\\n\" + \"=\"*50)\n",
|
||||||
|
"print(\"Multi-Turn Conversation with Thread:\")\n",
|
||||||
|
"print(\"=\"*50)\n",
|
||||||
|
"\n",
|
||||||
|
"# First interaction\n",
|
||||||
|
"result1 = await analyst.run(\n",
|
||||||
|
" \"Analyze the trade-offs between using local LLMs versus cloud-based APIs.\",\n",
|
||||||
|
" thread=thread\n",
|
||||||
|
")\n",
|
||||||
|
"print(\"\\n[Turn 1 - Initial Analysis]:\")\n",
|
||||||
|
"print(result1.text[:400] + \"...\" if len(result1.text or \"\") > 400 else result1.text)\n",
|
||||||
|
"\n",
|
||||||
|
"# Second interaction - builds on previous context\n",
|
||||||
|
"result2 = await analyst.run(\n",
|
||||||
|
" \"What about cost implications specifically?\",\n",
|
||||||
|
" thread=thread\n",
|
||||||
|
")\n",
|
||||||
|
"print(\"\\n[Turn 2 - Follow-up on Cost]:\")\n",
|
||||||
|
"print(result2.text[:400] + \"...\" if len(result2.text or \"\") > 400 else result2.text)\n",
|
||||||
|
"\n",
|
||||||
|
"# Third interaction - continues the conversation\n",
|
||||||
|
"result3 = await analyst.run(\n",
|
||||||
|
" \"Summarize your recommendation in one sentence.\",\n",
|
||||||
|
" thread=thread\n",
|
||||||
|
")\n",
|
||||||
|
"print(\"\\n[Turn 3 - Summary]:\")\n",
|
||||||
|
"print(result3.text)\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"\\n\" + \"=\"*50)\n",
|
||||||
|
"print(f\"Thread maintained context across {3} turns\")\n",
|
||||||
|
"print(\"=\"*50)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Example 4: Advanced Workflow with Custom Executors\n",
|
||||||
|
"\n",
|
||||||
|
"### Pattern: Data-Flow Workflow with Code Review Loop\n",
|
||||||
|
"\n",
|
||||||
|
"Agent Framework's **Workflow** enables complex orchestration using executors and edges.\n",
|
||||||
|
"Unlike AutoGen's event-driven model, workflows use **data-flow** architecture.\n",
|
||||||
|
"\n",
|
||||||
|
"**Key Concepts:**\n",
|
||||||
|
"- `Executor`: Processing units (agents, functions, or sub-workflows)\n",
|
||||||
|
"- `WorkflowBuilder`: Build typed data-flow graphs\n",
|
||||||
|
"- `@executor` decorator: Define custom processing logic\n",
|
||||||
|
"- Edges route messages between executors\n",
|
||||||
|
"\n",
|
||||||
|
"### Use Case: Iterative Code Review Until Approved"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 21,
|
||||||
|
"metadata": {
|
||||||
|
"scrolled": true
|
||||||
|
},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"✅ Code review team created\n",
|
||||||
|
"\n",
|
||||||
|
"==================================================\n",
|
||||||
|
"Code Review Workflow:\n",
|
||||||
|
"==================================================\n",
|
||||||
|
"\n",
|
||||||
|
"[Iteration 1 - Final Result]:\n",
|
||||||
|
"✅ APPROVED\n",
|
||||||
|
"\n",
|
||||||
|
"Code:\n",
|
||||||
|
"**Email Validation Function in Python**\n",
|
||||||
|
"=====================================\n",
|
||||||
|
"\n",
|
||||||
|
"The following Python function validates an email address, ensuring it meets the specified requirements.\n",
|
||||||
|
"\n",
|
||||||
|
"### Implementation\n",
|
||||||
|
"```python\n",
|
||||||
|
"import re\n",
|
||||||
|
"\n",
|
||||||
|
"def validate_email(email: str) -> bool:\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" Validates an email address.\n",
|
||||||
|
"\n",
|
||||||
|
" Args:\n",
|
||||||
|
" - email (str): The email address to be validated.\n",
|
||||||
|
"\n",
|
||||||
|
" Returns:\n",
|
||||||
|
" - bool: True if the email is valid, False otherwise.\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
"\n",
|
||||||
|
" # Define a regular expression pattern for email validation\n",
|
||||||
|
" pattern = r\"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$\"\n",
|
||||||
|
"\n",
|
||||||
|
" try:\n",
|
||||||
|
" # Check if the input is a string\n",
|
||||||
|
" if not isinstance(email, str):\n",
|
||||||
|
" raise TypeError(\"Input must be a string.\")\n",
|
||||||
|
"\n",
|
||||||
|
" # Remove leading and trailing whitespaces\n",
|
||||||
|
" email = email.strip()\n",
|
||||||
|
"\n",
|
||||||
|
" # Check for spaces in the email address\n",
|
||||||
|
" if \" \" in email:\n",
|
||||||
|
" print(\"Error: No spaces are allowed in the email address.\")\n",
|
||||||
|
" return False\n",
|
||||||
|
"\n",
|
||||||
|
" # Check if the email matches the pattern\n",
|
||||||
|
" if not re.match(pattern, email):\n",
|
||||||
|
" print(\"Error: Invalid email format. Please use a valid email address.\")\n",
|
||||||
|
" return False\n",
|
||||||
|
"\n",
|
||||||
|
" return True\n",
|
||||||
|
"\n",
|
||||||
|
" except TypeError as e:\n",
|
||||||
|
" print(f\"Error: {e}\")\n",
|
||||||
|
" return False\n",
|
||||||
|
"\n",
|
||||||
|
" except Exception as e:\n",
|
||||||
|
" print(f\"An unexpected error occurred: {e}\")\n",
|
||||||
|
" return False\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"# Example usage\n",
|
||||||
|
"if __name__ == \"__main__\":\n",
|
||||||
|
" emails = [\n",
|
||||||
|
" \"test@example.com\",\n",
|
||||||
|
" \"invalid_email\",\n",
|
||||||
|
" \"test@ example.com\",\n",
|
||||||
|
" \"test@example\",\n",
|
||||||
|
" 123, # Test with an invalid input type\n",
|
||||||
|
" ]\n",
|
||||||
|
"\n",
|
||||||
|
" for email in emails:\n",
|
||||||
|
" print(f\"Email: {email}\")\n",
|
||||||
|
" is_valid = validate_email(email)\n",
|
||||||
|
" print(f\"Is Valid: {is_valid}\")\n",
|
||||||
|
" print(\"-\" * 50)\n",
|
||||||
|
"\n",
|
||||||
|
"```\n",
|
||||||
|
"\n",
|
||||||
|
"### Explanation\n",
|
||||||
|
"\n",
|
||||||
|
"This function uses a regular expression pattern to match the general format of an email address. The `validate_email` function checks for the following:\n",
|
||||||
|
"\n",
|
||||||
|
"* The input is a string.\n",
|
||||||
|
"* There are no spaces in the email address.\n",
|
||||||
|
"* The email address matches the defined pattern, which includes:\n",
|
||||||
|
" * One or more alphanumeric characters, dots, underscores, pluses, or hyphens before the `@` symbol.\n",
|
||||||
|
" * The `@` symbol.\n",
|
||||||
|
" * One or more alphanumeric characters, hyphens, or dots after the `@` symbol and before the first dot in the domain.\n",
|
||||||
|
" * At least one dot in the domain.\n",
|
||||||
|
"\n",
|
||||||
|
"If any of these conditions are not met, the function prints an error message indicating what went wrong and returns `False`. If all checks pass, it returns `True`, indicating a valid email address.\n",
|
||||||
|
"\n",
|
||||||
|
"### Error Handling\n",
|
||||||
|
"\n",
|
||||||
|
"The code includes proper error handling:\n",
|
||||||
|
"\n",
|
||||||
|
"* **TypeError**: Raised if the input is not a string.\n",
|
||||||
|
"* **Exception**: Catches any unexpected errors and prints an error message.\n",
|
||||||
|
"\n",
|
||||||
|
"Each error case provides informative messages to help with debugging.\n",
|
||||||
|
"\n",
|
||||||
|
"Review:\n",
|
||||||
|
"**Code Review**\n",
|
||||||
|
"\n",
|
||||||
|
"Overall, the provided Python function for email validation looks clean and well-structured. It effectively checks if the input string matches the typical format of an email address using a regular expression pattern. However, there are some areas that can be improved upon:\n",
|
||||||
|
"\n",
|
||||||
|
"### Bugs and Edge Cases\n",
|
||||||
|
"1. **Input Validation**: In the current implementation, `TypeError` is raised when the input is not a string, but then immediately caught and handled within the same function. Consider letting it propagate up instead of catching it here since you're already returning an error message and `False`.\n",
|
||||||
|
"2. **Email Length Limitation**: The regular expression does not account for email length limits. As per [RFC 5321](https://tools.ietf.org/html/rfc5321#section-4.5.3), the maximum total length of a mailbox (local part plus domain) is 320 characters, but some mail servers may enforce shorter limits.\n",
|
||||||
|
"3. **Internationalized Domain Names (IDNs)**: The function does not support IDNs which are domain names that contain non-ASCII characters.\n",
|
||||||
|
"\n",
|
||||||
|
"### Performance Issues\n",
|
||||||
|
"1. **Regular Expression Compilation**: If this function is called frequently, compiling the regular expression pattern once outside of it and storing the result in a variable can improve performance.\n",
|
||||||
|
"2. **Exception Handling Overhead**: Python exceptions come with some overhead. Using if-statements to handle anticipated conditions (like type checking) instead of using exceptions for control flow might be more efficient.\n",
|
||||||
|
"\n",
|
||||||
|
"### Security Vulnerabilities\n",
|
||||||
|
"No significant security vulnerabilities were identified, but it's essential to note that validating an email address does not necessarily mean the provided email exists or can receive mail; a separate verification process (usually involving sending a confirmation link) should be implemented if you need to confirm the existence of the email account.\n",
|
||||||
|
"\n",
|
||||||
|
"### Best Practices\n",
|
||||||
|
"1. **Direct Printing**: Instead of directly printing error messages from within the function, consider raising exceptions with descriptive messages. This approach allows the caller to handle errors as needed.\n",
|
||||||
|
"2. **Variable Naming and Comments**: Although comments are provided in the docstring and around code blocks explaining what each segment does, variable names like `email` could be more descriptive (e.g., `input_email_address`). However, given the context, `email` is sufficient here.\n",
|
||||||
|
"3. **Example Usage**: The example usage section provides a simple demonstration of how to use the `validate_email` function with different inputs.\n",
|
||||||
|
"\n",
|
||||||
|
"### Code Improvements\n",
|
||||||
|
"\n",
|
||||||
|
"```python\n",
|
||||||
|
"import re\n",
|
||||||
|
"\n",
|
||||||
|
"EMAIL_PATTERN = re.compile(\n",
|
||||||
|
" r\"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$\",\n",
|
||||||
|
" flags=re.IGNORECASE,\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"def validate_email_address(input_string: str) -> bool:\n",
|
||||||
|
" \"\"\"Validate an email address.\"\"\"\n",
|
||||||
|
" \n",
|
||||||
|
" # Input type check\n",
|
||||||
|
" if not isinstance(input_string, str):\n",
|
||||||
|
" raise TypeError(\"Input must be a string.\")\n",
|
||||||
|
"\n",
|
||||||
|
" input_string = input_string.strip()\n",
|
||||||
|
" \n",
|
||||||
|
" if \" \" in input_string:\n",
|
||||||
|
" raise ValueError(\"No spaces are allowed in the email address.\")\n",
|
||||||
|
" \n",
|
||||||
|
" if len(input_string) > 320: # Basic length validation as per RFC5321\n",
|
||||||
|
" raise ValueError(\"Email address exceeds maximum length limit.\")\n",
|
||||||
|
"\n",
|
||||||
|
" # Regular expression match for basic email format validity\n",
|
||||||
|
" return bool(EMAIL_PATTERN.match(input_string))\n",
|
||||||
|
"\n",
|
||||||
|
"# Example usage with error handling\n",
|
||||||
|
"if __name__ == \"__main__\":\n",
|
||||||
|
" emails = [\n",
|
||||||
|
" \"test@example.com\",\n",
|
||||||
|
" \"invalid_email\",\n",
|
||||||
|
" \"test@ example.com\",\n",
|
||||||
|
" \"test@example\",\n",
|
||||||
|
" 123, \n",
|
||||||
|
" ]\n",
|
||||||
|
"\n",
|
||||||
|
" for email in emails:\n",
|
||||||
|
" print(f\"Email: {email}\")\n",
|
||||||
|
" try:\n",
|
||||||
|
" is_valid = validate_email_address(email)\n",
|
||||||
|
" print(f\"Is Valid: {is_valid}\")\n",
|
||||||
|
" except (TypeError, ValueError) as e:\n",
|
||||||
|
" print(f\"Error: {e}\")\n",
|
||||||
|
" print(\"-\" * 50)\n",
|
||||||
|
"```\n",
|
||||||
|
"\n",
|
||||||
|
"In summary, while the original code provides a good basic structure for validating email addresses, a few adjustments can enhance its flexibility and robustness. These changes primarily focus on improving exception handling, avoiding unnecessary overhead, and adhering to best practices in Python coding standards.\n",
|
||||||
|
"\n",
|
||||||
|
"LGTM with the suggested modifications.\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"from agent_framework import WorkflowBuilder, executor, WorkflowContext, WorkflowOutputEvent\n",
|
||||||
|
"from typing_extensions import Never\n",
|
||||||
|
"\n",
|
||||||
|
"# Create code review agents\n",
|
||||||
|
"code_developer = ChatAgent(\n",
|
||||||
|
" name=\"Developer\",\n",
|
||||||
|
" chat_client=chat_client,\n",
|
||||||
|
" instructions=\"\"\"You are a developer. When you receive code review feedback:\n",
|
||||||
|
" - Address ALL issues mentioned\n",
|
||||||
|
" - Explain your changes\n",
|
||||||
|
" - Present the improved code\n",
|
||||||
|
"\n",
|
||||||
|
" If no feedback is given, present your initial implementation.\"\"\"\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"code_reviewer = ChatAgent(\n",
|
||||||
|
" name=\"CodeReviewer\",\n",
|
||||||
|
" chat_client=chat_client,\n",
|
||||||
|
" instructions=\"\"\"You are a senior code reviewer. Review code for:\n",
|
||||||
|
" - Bugs and edge cases\n",
|
||||||
|
" - Performance issues\n",
|
||||||
|
" - Security vulnerabilities\n",
|
||||||
|
" - Best practices\n",
|
||||||
|
"\n",
|
||||||
|
" If the code looks good, say 'LGTM' (Looks Good To Me).\n",
|
||||||
|
" If issues found, provide specific feedback for improvement.\"\"\"\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"✅ Code review team created\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Define custom executors for workflow\n",
|
||||||
|
"@executor(id=\"developer\")\n",
|
||||||
|
"async def developer_executor(task: str, ctx: WorkflowContext[str]) -> None:\n",
|
||||||
|
" \"\"\"Developer creates or improves code based on input.\"\"\"\n",
|
||||||
|
" result = await code_developer.run(task)\n",
|
||||||
|
" await ctx.send_message(result.text)\n",
|
||||||
|
"\n",
|
||||||
|
"@executor(id=\"reviewer\")\n",
|
||||||
|
"async def reviewer_executor(code: str, ctx: WorkflowContext[str, str]) -> None:\n",
|
||||||
|
" \"\"\"Reviewer checks code and either approves or requests changes.\"\"\"\n",
|
||||||
|
" result = await code_reviewer.run(f\"Review this code:\\n{code}\")\n",
|
||||||
|
"\n",
|
||||||
|
" # Check if approved\n",
|
||||||
|
" if \"LGTM\" in result.text or \"looks good\" in result.text.lower():\n",
|
||||||
|
" await ctx.yield_output(f\"✅ APPROVED\\n\\nCode:\\n{code}\\n\\nReview:\\n{result.text}\")\n",
|
||||||
|
" else:\n",
|
||||||
|
" # Send feedback back to developer for revision\n",
|
||||||
|
" await ctx.send_message(f\"Feedback: {result.text}\\n\\nOriginal code:\\n{code}\", target_id=\"developer\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Build workflow: developer → reviewer (with feedback loop)\n",
|
||||||
|
"workflow = (\n",
|
||||||
|
" WorkflowBuilder()\n",
|
||||||
|
" .add_edge(developer_executor, reviewer_executor)\n",
|
||||||
|
" .add_edge(reviewer_executor, developer_executor) # Feedback loop\n",
|
||||||
|
" .set_start_executor(developer_executor)\n",
|
||||||
|
" .build()\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# task = \"Implement a Python function to check if a string is a palindrome.\"\n",
|
||||||
|
"task = \"\"\"Implement a Python function to validate email addresses with these requirements:\n",
|
||||||
|
"- Must have @ symbol\n",
|
||||||
|
"- Must have domain with at least one dot\n",
|
||||||
|
"- No spaces allowed\n",
|
||||||
|
"- Handle edge cases\n",
|
||||||
|
"- Include error messages\n",
|
||||||
|
"Make it production-ready with proper error handling.\"\"\"\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"\\n\" + \"=\"*50)\n",
|
||||||
|
"print(\"Code Review Workflow:\")\n",
|
||||||
|
"print(\"=\"*50)\n",
|
||||||
|
"\n",
|
||||||
|
"# Run workflow with streaming\n",
|
||||||
|
"iteration = 1\n",
|
||||||
|
"async for event in workflow.run_stream(task):\n",
|
||||||
|
" if isinstance(event, WorkflowOutputEvent):\n",
|
||||||
|
" print(f\"\\n[Iteration {iteration} - Final Result]:\")\n",
|
||||||
|
" print(event.data)\n",
|
||||||
|
" iteration += 1"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Example 5: Concurrent Workflow Pattern\n",
|
||||||
|
"\n",
|
||||||
|
"### Pattern: Parallel Processing\n",
|
||||||
|
"\n",
|
||||||
|
"Agent Framework's **ConcurrentBuilder** enables parallel agent execution.\n",
|
||||||
|
"All agents process the input simultaneously and results are aggregated.\n",
|
||||||
|
"\n",
|
||||||
|
"### Use Case: Multi-Perspective Analysis"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 25,
|
||||||
|
"metadata": {
|
||||||
|
"scrolled": true
|
||||||
|
},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"✅ Analyst team created: Technical, Business, Security\n",
|
||||||
|
"\n",
|
||||||
|
"==================================================\n",
|
||||||
|
"Concurrent Analysis (Parallel Processing):\n",
|
||||||
|
"==================================================\n",
|
||||||
|
"\n",
|
||||||
|
"[Analysis 1]:\n",
|
||||||
|
"Evaluate the proposal to deploy a customer service chatbot.\n",
|
||||||
|
"--------------------------------------------------\n",
|
||||||
|
"\n",
|
||||||
|
"[Analysis 2]:\n",
|
||||||
|
"**Proposal Evaluation: Deploying a Customer Service Chatbot**\n",
|
||||||
|
"\n",
|
||||||
|
"**Executive Summary:**\n",
|
||||||
|
"The proposal to deploy a customer service chatbot aims to enhance customer experience, reduce support queries, and optimize resource allocation. This evaluation assesses the technical feasibility and implementation complexity of the proposed solution.\n",
|
||||||
|
"\n",
|
||||||
|
"**Technical Feasibility:**\n",
|
||||||
|
"\n",
|
||||||
|
"1. **Natural Language Processing (NLP) Capabilities:** The chatbot's ability to understand and respond to customer inquiries accurately is crucial. Modern NLP libraries (e.g., NLTK, spaCy) and machine learning frameworks (e.g., TensorFlow, PyTorch) can support this requirement.\n",
|
||||||
|
"2. **Integration with Existing Systems:** Seamless integration with the CRM, helpdesk software, and other relevant systems is necessary. APIs and webhooks can facilitate data exchange and synchronization.\n",
|
||||||
|
"3. **Security and Compliance:** The chatbot must ensure customer data protection and adhere to regulations (e.g., GDPR, HIPAA). Implementing encrypti...\n",
|
||||||
|
"--------------------------------------------------\n",
|
||||||
|
"\n",
|
||||||
|
"[Analysis 3]:\n",
|
||||||
|
"**Evaluation of Customer Service Chatbot Proposal**\n",
|
||||||
|
"\n",
|
||||||
|
"**Introduction:**\n",
|
||||||
|
"The proposal to deploy a customer service chatbot aims to improve customer experience, reduce support costs, and increase efficiency in handling customer inquiries. This evaluation assesses the business value, ROI, and market impact of implementing a customer service chatbot.\n",
|
||||||
|
"\n",
|
||||||
|
"**Benefits:**\n",
|
||||||
|
"\n",
|
||||||
|
"1. **Improved Customer Experience:** A chatbot can provide 24/7 support, quick responses to common queries, and personalized experiences, leading to increased customer satisfaction.\n",
|
||||||
|
"2. **Cost Savings:** Automating routine inquiries can reduce the workload of human customer support agents, resulting in cost savings and allowing them to focus on complex issues.\n",
|
||||||
|
"3. **Increased Efficiency:** Chatbots can handle multiple conversations simultaneously, reducing wait times and improving response rates.\n",
|
||||||
|
"4. **Data Collection and Analysis:** Chatbots can collect valuable data on customer interactions, providing insights for business improv...\n",
|
||||||
|
"--------------------------------------------------\n",
|
||||||
|
"\n",
|
||||||
|
"[Analysis 4]:\n",
|
||||||
|
"**Evaluation of Customer Service Chatbot Proposal**\n",
|
||||||
|
"\n",
|
||||||
|
"**Introduction:**\n",
|
||||||
|
"The proposal to deploy a customer service chatbot aims to enhance customer experience, reduce support queries, and improve response times. This evaluation assesses the feasibility, security implications, risks, and compliance of implementing a chatbot solution.\n",
|
||||||
|
"\n",
|
||||||
|
"**Benefits:**\n",
|
||||||
|
"\n",
|
||||||
|
"1. **24/7 Support**: A chatbot can provide round-the-clock support, improving customer satisfaction and reducing the workload on human customer support agents.\n",
|
||||||
|
"2. **Faster Response Times**: Chatbots can respond to queries instantly, ensuring that customers receive timely assistance.\n",
|
||||||
|
"3. **Cost Savings**: Automating routine inquiries can lead to significant cost savings by reducing the need for human customer support agents.\n",
|
||||||
|
"4. **Personalization**: Chatbots can be programmed to offer personalized recommendations and solutions based on customer interactions.\n",
|
||||||
|
"\n",
|
||||||
|
"**Security Implications:**\n",
|
||||||
|
"\n",
|
||||||
|
"1. **Data Security**: The chatbot will handle sensitive cust...\n",
|
||||||
|
"--------------------------------------------------\n",
|
||||||
|
"\n",
|
||||||
|
"==================================================\n",
|
||||||
|
"All agents completed in parallel\n",
|
||||||
|
"==================================================\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"from agent_framework import ConcurrentBuilder, WorkflowOutputEvent\n",
|
||||||
|
"\n",
|
||||||
|
"# Create specialized analysts\n",
|
||||||
|
"technical_analyst = ChatAgent(\n",
|
||||||
|
" name=\"TechnicalAnalyst\",\n",
|
||||||
|
" chat_client=chat_client,\n",
|
||||||
|
" instructions=\"You analyze technical feasibility and implementation complexity.\"\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"business_analyst = ChatAgent(\n",
|
||||||
|
" name=\"BusinessAnalyst\",\n",
|
||||||
|
" chat_client=chat_client,\n",
|
||||||
|
" instructions=\"You analyze business value, ROI, and market impact.\"\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"security_analyst = ChatAgent(\n",
|
||||||
|
" name=\"SecurityAnalyst\",\n",
|
||||||
|
" chat_client=chat_client,\n",
|
||||||
|
" instructions=\"You analyze security implications, risks, and compliance.\"\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"✅ Analyst team created: Technical, Business, Security\")\n",
|
||||||
|
"\n",
|
||||||
|
"# Create concurrent workflow - all agents process in parallel\n",
|
||||||
|
"workflow = (\n",
|
||||||
|
" ConcurrentBuilder()\n",
|
||||||
|
" .participants([technical_analyst, business_analyst, security_analyst])\n",
|
||||||
|
" .build()\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"# task = \"Evaluate the proposal to deploy Llama Stack for our customer service chatbot.\"\n",
|
||||||
|
"task = \"Evaluate the proposal to deploy a customer service chatbot.\"\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"\\n\" + \"=\"*50)\n",
|
||||||
|
"print(\"Concurrent Analysis (Parallel Processing):\")\n",
|
||||||
|
"print(\"=\"*50)\n",
|
||||||
|
"\n",
|
||||||
|
"# Run workflow - agents work in parallel\n",
|
||||||
|
"async for event in workflow.run_stream(task):\n",
|
||||||
|
" if isinstance(event, WorkflowOutputEvent):\n",
|
||||||
|
" # Combined results from all agents\n",
|
||||||
|
" results = event.data\n",
|
||||||
|
" for i, result in enumerate(results, 1):\n",
|
||||||
|
" print(f\"\\n[Analysis {i}]:\")\n",
|
||||||
|
" print(result.text[:1000] + \"...\" if len(result.text or \"\") > 1000 else result.text)\n",
|
||||||
|
" print(\"-\" * 50)\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"\\n\" + \"=\"*50)\n",
|
||||||
|
"print(\"All agents completed in parallel\")\n",
|
||||||
|
"print(\"=\"*50)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.12.7"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 4
|
||||||
|
}
|
||||||
Loading…
Add table
Add a link
Reference in a new issue