Update tool_Calling101.ipynb

This commit is contained in:
Sanyam Bhutani 2024-11-04 19:35:31 -08:00
parent 800a9dc134
commit 6d3769aedd

View file

@ -1,335 +1,5 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
" let's explore how to have a conversation about images using the Memory API! This section will show you how to:\n",
"1. Load and prepare images for the API\n",
"2. Send image-based queries\n",
"3. Create an interactive chat loop with images\n"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "80d8f04a91744c7fa3b1e0cd2dab542b",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"VBox(children=(FileUpload(value=(), accept='.jpg,.jpeg,.png,.gif', description='Upload Image'), Output()))"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import ipywidgets as widgets\n",
"from IPython.display import display, Image\n",
"import base64\n",
"from io import BytesIO\n",
"from PIL import Image as PILImage\n",
"\n",
"def create_image_uploader():\n",
" # Create upload widget\n",
" upload_widget = widgets.FileUpload(\n",
" accept='.jpg,.jpeg,.png,.gif',\n",
" multiple=False,\n",
" description='Upload Image'\n",
" )\n",
" \n",
" # Create output widget to display the image\n",
" output_widget = widgets.Output()\n",
" \n",
" def handle_upload(change):\n",
" with output_widget:\n",
" output_widget.clear_output()\n",
" \n",
" if not upload_widget.value:\n",
" return\n",
" \n",
" # Get uploaded file data\n",
" file = upload_widget.value[0] # Get the first (and only) file\n",
" content = file.content\n",
" \n",
" try:\n",
" # Convert bytes to image\n",
" image = PILImage.open(BytesIO(content))\n",
" \n",
" # Display image\n",
" display(image)\n",
" \n",
" # Print image details\n",
" print(f\"Image size: {image.size}\")\n",
" print(f\"Image format: {image.format}\")\n",
" print(f\"Image mode: {image.mode}\")\n",
" except Exception as e:\n",
" print(f\"Error processing image: {str(e)}\")\n",
" \n",
" # Register callback\n",
" upload_widget.observe(handle_upload, names='value')\n",
" \n",
" # Display widgets\n",
" return widgets.VBox([upload_widget, output_widget])\n",
"\n",
"# Usage example\n",
"uploader = create_image_uploader()\n",
"display(uploader)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2. Create an Interactive Image Chat\n",
"\n",
"Let's create a function that enables back-and-forth conversation about an image:"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"def image_to_data_url(image_path: str) -> str:\n",
" \"\"\"Convert image file to data URL\"\"\"\n",
" with open(image_path, 'rb') as f:\n",
" img_data = f.read()\n",
" base64_str = base64.b64encode(img_data).decode('utf-8')\n",
" mime_type = f\"image/{os.path.splitext(image_path)[1][1:]}\"\n",
" return f\"data:{mime_type};base64,{base64_str}\"\n"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"class ImageChatInterface:\n",
" def __init__(self):\n",
" # Create upload widget\n",
" self.upload_widget = widgets.FileUpload(\n",
" accept='.jpg,.jpeg,.png,.gif',\n",
" multiple=False,\n",
" description='Upload Image'\n",
" )\n",
" \n",
" # Create output widget for image display\n",
" self.image_output = widgets.Output()\n",
" \n",
" # Create chat interface\n",
" self.chat_output = widgets.Output()\n",
" self.text_input = widgets.Text(\n",
" value='',\n",
" placeholder='Type your question about the image...',\n",
" description='Ask:',\n",
" disabled=True # Disabled until image is uploaded\n",
" )\n",
" \n",
" # Store the current image path\n",
" self.current_image_path: Optional[str] = None\n",
" \n",
" # Initialize the client\n",
" self.client = LlamaStackClient(\n",
" base_url=f\"http://localhost:8000\", # Adjust host/port as needed\n",
" )\n",
" \n",
" # Set up the upload handler\n",
" self.upload_widget.observe(self.handle_upload, names='value')\n",
" # Set up the chat handler\n",
" self.text_input.on_submit(lambda x: asyncio.create_task(self.on_submit(x)))\n",
" \n",
" # Display the interface\n",
" display(widgets.VBox([\n",
" self.upload_widget,\n",
" self.image_output,\n",
" self.text_input,\n",
" self.chat_output\n",
" ]))\n",
" \n",
" def handle_upload(self, change):\n",
" with self.image_output:\n",
" self.image_output.clear_output()\n",
" \n",
" if not self.upload_widget.value:\n",
" return\n",
" \n",
" # Get uploaded file data\n",
" file = self.upload_widget.value[0]\n",
" content = file.content\n",
" \n",
" try:\n",
" # Convert bytes to image\n",
" image = PILImage.open(BytesIO(content))\n",
" \n",
" # Save image temporarily\n",
" temp_path = f\"temp_upload_{file.name}\"\n",
" image.save(temp_path)\n",
" self.current_image_path = temp_path\n",
" \n",
" # Display image\n",
" display(image)\n",
" \n",
" # Print image details\n",
" print(f\"Image size: {image.size}\")\n",
" print(f\"Image format: {image.format}\")\n",
" print(f\"Image mode: {image.mode}\")\n",
" \n",
" # Enable chat input\n",
" self.text_input.disabled = False\n",
" \n",
" except Exception as e:\n",
" print(f\"Error processing image: {str(e)}\")\n",
" self.text_input.disabled = True\n",
" \n",
" async def on_submit(self, change):\n",
" with self.chat_output:\n",
" question = self.text_input.value\n",
" if question.lower() == 'exit':\n",
" print(\"Chat ended.\")\n",
" return\n",
" \n",
" if not self.current_image_path:\n",
" print(\"Please upload an image first.\")\n",
" return\n",
" \n",
" message = UserMessage(\n",
" role=\"user\",\n",
" content=[\n",
" {\"image\": {\"uri\": image_to_data_url(self.current_image_path)}},\n",
" question,\n",
" ],\n",
" )\n",
" \n",
" print(f\"\\nUser> {question}\")\n",
" response = self.client.inference.chat_completion(\n",
" messages=[message],\n",
" model=\"Llama3.2-11B-Vision-Instruct\",\n",
" stream=True,\n",
" )\n",
" \n",
" print(\"Assistant> \", end='')\n",
" async for log in EventLogger().log(response):\n",
" log.print()\n",
" \n",
" self.text_input.value = '' # Clear input after sending\n",
" \n",
" def cleanup(self):\n",
" \"\"\"Clean up temporary files\"\"\"\n",
" if self.current_image_path and os.path.exists(self.current_image_path):\n",
" os.remove(self.current_image_path)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'LlamaStackClient' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[12], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# Create and display the interface\u001b[39;00m\n\u001b[0;32m----> 2\u001b[0m chat_interface \u001b[38;5;241m=\u001b[39m \u001b[43mImageChatInterface\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
"Cell \u001b[0;32mIn[11], line 26\u001b[0m, in \u001b[0;36mImageChatInterface.__init__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcurrent_image_path: Optional[\u001b[38;5;28mstr\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 25\u001b[0m \u001b[38;5;66;03m# Initialize the client\u001b[39;00m\n\u001b[0;32m---> 26\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclient \u001b[38;5;241m=\u001b[39m \u001b[43mLlamaStackClient\u001b[49m(\n\u001b[1;32m 27\u001b[0m base_url\u001b[38;5;241m=\u001b[39m\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mhttp://localhost:8000\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;66;03m# Adjust host/port as needed\u001b[39;00m\n\u001b[1;32m 28\u001b[0m )\n\u001b[1;32m 30\u001b[0m \u001b[38;5;66;03m# Set up the upload handler\u001b[39;00m\n\u001b[1;32m 31\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mupload_widget\u001b[38;5;241m.\u001b[39mobserve(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_upload, names\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mvalue\u001b[39m\u001b[38;5;124m'\u001b[39m)\n",
"\u001b[0;31mNameError\u001b[0m: name 'LlamaStackClient' is not defined"
]
}
],
"source": [
"# Create and display the interface\n",
"chat_interface = ImageChatInterface()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"ename": "FileNotFoundError",
"evalue": "[Errno 2] No such file or directory: 'your_image.jpg'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[2], line 6\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;66;03m# Display the image we'll be chatting about\u001b[39;00m\n\u001b[1;32m 5\u001b[0m image_path \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124myour_image.jpg\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;66;03m# Replace with your image path\u001b[39;00m\n\u001b[0;32m----> 6\u001b[0m display(\u001b[43mImage\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfilename\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mimage_path\u001b[49m\u001b[43m)\u001b[49m)\n\u001b[1;32m 8\u001b[0m \u001b[38;5;66;03m# Initialize the client\u001b[39;00m\n\u001b[1;32m 9\u001b[0m client \u001b[38;5;241m=\u001b[39m LlamaStackClient(\n\u001b[1;32m 10\u001b[0m base_url\u001b[38;5;241m=\u001b[39m\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mhttp://localhost:8000\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;66;03m# Adjust host/port as needed\u001b[39;00m\n\u001b[1;32m 11\u001b[0m )\n",
"File \u001b[0;32m~/.conda/envs/quick/lib/python3.13/site-packages/IPython/core/display.py:1050\u001b[0m, in \u001b[0;36mImage.__init__\u001b[0;34m(self, data, url, filename, format, embed, width, height, retina, unconfined, metadata, alt)\u001b[0m\n\u001b[1;32m 1048\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39munconfined \u001b[38;5;241m=\u001b[39m unconfined\n\u001b[1;32m 1049\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39malt \u001b[38;5;241m=\u001b[39m alt\n\u001b[0;32m-> 1050\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mImage\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;21;43m__init__\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mdata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdata\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43murl\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43murl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfilename\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfilename\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1051\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmetadata\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1053\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mwidth \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmetadata\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mwidth\u001b[39m\u001b[38;5;124m'\u001b[39m, {}):\n\u001b[1;32m 1054\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mwidth \u001b[38;5;241m=\u001b[39m metadata[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mwidth\u001b[39m\u001b[38;5;124m'\u001b[39m]\n",
"File \u001b[0;32m~/.conda/envs/quick/lib/python3.13/site-packages/IPython/core/display.py:370\u001b[0m, in \u001b[0;36mDisplayObject.__init__\u001b[0;34m(self, data, url, filename, metadata)\u001b[0m\n\u001b[1;32m 367\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmetadata \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 368\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmetadata \u001b[38;5;241m=\u001b[39m {}\n\u001b[0;32m--> 370\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mreload\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 371\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_check_data()\n",
"File \u001b[0;32m~/.conda/envs/quick/lib/python3.13/site-packages/IPython/core/display.py:1085\u001b[0m, in \u001b[0;36mImage.reload\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1083\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Reload the raw data from file or URL.\"\"\"\u001b[39;00m\n\u001b[1;32m 1084\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39membed:\n\u001b[0;32m-> 1085\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mImage\u001b[49m\u001b[43m,\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mreload\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1086\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mretina:\n\u001b[1;32m 1087\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_retina_shape()\n",
"File \u001b[0;32m~/.conda/envs/quick/lib/python3.13/site-packages/IPython/core/display.py:396\u001b[0m, in \u001b[0;36mDisplayObject.reload\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 394\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilename \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 395\u001b[0m encoding \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mb\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_read_flags \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mutf-8\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m--> 396\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28;43mopen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfilename\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_read_flags\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mencoding\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mencoding\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mas\u001b[39;00m f:\n\u001b[1;32m 397\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdata \u001b[38;5;241m=\u001b[39m f\u001b[38;5;241m.\u001b[39mread()\n\u001b[1;32m 398\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39murl \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 399\u001b[0m \u001b[38;5;66;03m# Deferred import\u001b[39;00m\n",
"\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'your_image.jpg'"
]
}
],
"source": [
"from IPython.display import Image, display\n",
"import ipywidgets as widgets\n",
"\n",
"# Display the image we'll be chatting about\n",
"image_path = \"your_image.jpg\" # Replace with your image path\n",
"display(Image(filename=image_path))\n",
"\n",
"# Initialize the client\n",
"client = LlamaStackClient(\n",
" base_url=f\"http://localhost:8000\", # Adjust host/port as needed\n",
")\n",
"\n",
"# Create chat interface\n",
"output = widgets.Output()\n",
"text_input = widgets.Text(\n",
" value='',\n",
" placeholder='Type your question about the image...',\n",
" description='Ask:',\n",
" disabled=False\n",
")\n",
"\n",
"# Display interface\n",
"display(text_input, output)\n",
"\n",
"# Handle chat interaction\n",
"async def on_submit(change):\n",
" with output:\n",
" question = text_input.value\n",
" if question.lower() == 'exit':\n",
" print(\"Chat ended.\")\n",
" return\n",
"\n",
" message = UserMessage(\n",
" role=\"user\",\n",
" content=[\n",
" {\"image\": {\"uri\": image_to_data_url(image_path)}},\n",
" question,\n",
" ],\n",
" )\n",
"\n",
" print(f\"\\nUser> {question}\")\n",
" response = client.inference.chat_completion(\n",
" messages=[message],\n",
" model=\"Llama3.2-11B-Vision-Instruct\",\n",
" stream=True,\n",
" )\n",
"\n",
" print(\"Assistant> \", end='')\n",
" async for log in EventLogger().log(response):\n",
" log.print()\n",
"\n",
" text_input.value = '' # Clear input after sending\n",
"\n",
"text_input.on_submit(lambda x: asyncio.create_task(on_submit(x)))"
]
},
{
"cell_type": "markdown",
"metadata": {},
@ -622,451 +292,6 @@
"# Run the example\n",
"await weather_example()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Multi-Tool Agent"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"async def create_multi_tool_agent(client: LlamaStackClient) -> Agent:\n",
" \"\"\"Create an agent with multiple tools.\"\"\"\n",
" tools = [\n",
" # Brave Search tool\n",
" AgentConfigToolSearchToolDefinition(\n",
" type=\"brave_search\",\n",
" engine=\"brave\",\n",
" api_key=os.getenv(\"BRAVE_SEARCH_API_KEY\"),\n",
" ),\n",
" # Weather tool\n",
" {\n",
" \"type\": \"function\",\n",
" \"function\": {\n",
" \"name\": \"get_weather\",\n",
" \"description\": \"Get weather information for a location\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"location\": {\"type\": \"string\"},\n",
" \"date\": {\"type\": \"string\", \"format\": \"date\"}\n",
" },\n",
" \"required\": [\"location\"]\n",
" }\n",
" },\n",
" \"implementation\": WeatherTool()\n",
" }\n",
" ]\n",
"\n",
" return await create_tool_agent(\n",
" client=client,\n",
" tools=tools,\n",
" instructions=\"\"\"\n",
" You are an assistant that can search the web and check weather information.\n",
" Use the appropriate tool based on the user's question.\n",
" For weather queries, always specify location and conditions.\n",
" For web searches, always cite your sources.\n",
" \"\"\"\n",
" )\n",
"\n",
"# Interactive example with multi-tool agent\n",
"async def interactive_multi_tool():\n",
" client = LlamaStackClient(base_url=\"http://localhost:8000\")\n",
" agent = await create_multi_tool_agent(client)\n",
" session_id = agent.create_session(\"interactive-session\")\n",
"\n",
" print(\"🤖 Multi-tool Agent Ready! (type 'exit' to quit)\")\n",
" print(\"Example questions:\")\n",
" print(\"- What's the weather in Paris and what events are happening there?\")\n",
" print(\"- Tell me about recent space discoveries and the weather on Mars\")\n",
"\n",
" while True:\n",
" query = input(\"\\nYour question: \")\n",
" if query.lower() == 'exit':\n",
" break\n",
"\n",
" print(\"\\nThinking...\")\n",
" try:\n",
" response = agent.create_turn(\n",
" messages=[{\"role\": \"user\", \"content\": query}],\n",
" session_id=session_id,\n",
" )\n",
"\n",
" async for log in EventLogger().log(response):\n",
" log.print()\n",
" except Exception as e:\n",
" print(f\"Error: {e}\")\n",
"\n",
"# Run interactive example\n",
"await interactive_multi_tool()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Memory "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Getting Started with Memory API Tutorial 🚀\n",
"Welcome! This interactive tutorial will guide you through using the Memory API, a powerful tool for document storage and retrieval. Whether you're new to vector databases or an experienced developer, this notebook will help you understand the basics and get up and running quickly.\n",
"What you'll learn:\n",
"\n",
"How to set up and configure the Memory API client\n",
"Creating and managing memory banks (vector stores)\n",
"Different ways to insert documents into the system\n",
"How to perform intelligent queries on your documents\n",
"\n",
"Prerequisites:\n",
"\n",
"Basic Python knowledge\n",
"A running instance of the Memory API server (we'll use localhost in this tutorial)\n",
"\n",
"Let's start by installing the required packages:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Install the client library and a helper package for colored output\n",
"!pip install llama-stack-client termcolor\n",
"\n",
"# 💡 Note: If you're running this in a new environment, you might need to restart\n",
"# your kernel after installation"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"1. Initial Setup\n",
"First, we'll import the necessary libraries and set up some helper functions. Let's break down what each import does:\n",
"\n",
"llama_stack_client: Our main interface to the Memory API\n",
"base64: Helps us encode files for transmission\n",
"mimetypes: Determines file types automatically\n",
"termcolor: Makes our output prettier with colors\n",
"\n",
"❓ Question: Why do we need to convert files to data URLs?\n",
"Answer: Data URLs allow us to embed file contents directly in our requests, making it easier to transmit files to the API without needing separate file uploads."
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"import base64\n",
"import json\n",
"import mimetypes\n",
"import os\n",
"from pathlib import Path\n",
"\n",
"from llama_stack_client import LlamaStackClient\n",
"from llama_stack_client.types.memory_insert_params import Document\n",
"from termcolor import cprint\n",
"\n",
"# Helper function to convert files to data URLs\n",
"def data_url_from_file(file_path: str) -> str:\n",
" \"\"\"Convert a file to a data URL for API transmission\n",
"\n",
" Args:\n",
" file_path (str): Path to the file to convert\n",
"\n",
" Returns:\n",
" str: Data URL containing the file's contents\n",
"\n",
" Example:\n",
" >>> url = data_url_from_file('example.txt')\n",
" >>> print(url[:30]) # Preview the start of the URL\n",
" 'data:text/plain;base64,SGVsbG8='\n",
" \"\"\"\n",
" if not os.path.exists(file_path):\n",
" raise FileNotFoundError(f\"File not found: {file_path}\")\n",
"\n",
" with open(file_path, \"rb\") as file:\n",
" file_content = file.read()\n",
"\n",
" base64_content = base64.b64encode(file_content).decode(\"utf-8\")\n",
" mime_type, _ = mimetypes.guess_type(file_path)\n",
"\n",
" data_url = f\"data:{mime_type};base64,{base64_content}\"\n",
" return data_url"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"2. Initialize Client and Create Memory Bank\n",
"Now we'll set up our connection to the Memory API and create our first memory bank. A memory bank is like a specialized database that stores document embeddings for semantic search.\n",
"❓ Key Concepts:\n",
"\n",
"embedding_model: The model used to convert text into vector representations\n",
"chunk_size: How large each piece of text should be when splitting documents\n",
"overlap_size: How much overlap between chunks (helps maintain context)\n",
"\n",
"✨ Pro Tip: Choose your chunk size based on your use case. Smaller chunks (256-512 tokens) are better for precise retrieval, while larger chunks (1024+ tokens) maintain more context."
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Available providers:\n"
]
}
],
"source": [
"# Configure connection parameters\n",
"HOST = \"localhost\" # Replace with your host if using a remote server\n",
"PORT = 5001 # Replace with your port if different\n",
"\n",
"# Initialize client\n",
"client = LlamaStackClient(\n",
" base_url=f\"http://{HOST}:{PORT}\",\n",
")\n",
"\n",
"# Let's see what providers are available\n",
"# Providers determine where and how your data is stored\n",
"providers = client.providers.list()\n",
"print(\"Available providers:\")\n",
"#print(json.dumps(providers, indent=2))\n",
"\n",
"# Create a memory bank with optimized settings for general use\n",
"client.memory_banks.register(\n",
" memory_bank={\n",
" \"identifier\": \"tutorial_bank\", # A unique name for your memory bank\n",
" \"embedding_model\": \"all-MiniLM-L6-v2\", # A lightweight but effective model\n",
" \"chunk_size_in_tokens\": 512, # Good balance between precision and context\n",
" \"overlap_size_in_tokens\": 64, # Helps maintain context between chunks\n",
" \"provider_id\": providers[\"memory\"][0].provider_id, # Use the first available provider\n",
" }\n",
")\n",
"\n",
"# Let's verify our memory bank was created\n",
"memory_banks = client.memory_banks.list()\n",
"#print(\"\\nRegistered memory banks:\")\n",
"#print(json.dumps(memory_banks, indent=2))\n",
"\n",
"# 🎯 Exercise: Try creating another memory bank with different settings!\n",
"# What happens if you try to create a bank with the same identifier?"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"3. Insert Documents\n",
"The Memory API supports multiple ways to add documents. We'll demonstrate two common approaches:\n",
"\n",
"Loading documents from URLs\n",
"Loading documents from local files\n",
"\n",
"❓ Important Concepts:\n",
"\n",
"Each document needs a unique document_id\n",
"Metadata helps organize and filter documents later\n",
"The API automatically processes and chunks documents"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Documents inserted successfully!\n"
]
}
],
"source": [
"# Example URLs to documentation\n",
"# 💡 Replace these with your own URLs or use the examples\n",
"urls = [\n",
" \"memory_optimizations.rst\",\n",
" \"chat.rst\",\n",
" \"llama3.rst\",\n",
"]\n",
"\n",
"# Create documents from URLs\n",
"# We add metadata to help organize our documents\n",
"url_documents = [\n",
" Document(\n",
" document_id=f\"url-doc-{i}\", # Unique ID for each document\n",
" content=f\"https://raw.githubusercontent.com/pytorch/torchtune/main/docs/source/tutorials/{url}\",\n",
" mime_type=\"text/plain\",\n",
" metadata={\"source\": \"url\", \"filename\": url}, # Metadata helps with organization\n",
" )\n",
" for i, url in enumerate(urls)\n",
"]\n",
"\n",
"# Example with local files\n",
"# 💡 Replace these with your actual files\n",
"local_files = [\"example.txt\", \"readme.md\"]\n",
"file_documents = [\n",
" Document(\n",
" document_id=f\"file-doc-{i}\",\n",
" content=data_url_from_file(path),\n",
" metadata={\"source\": \"local\", \"filename\": path},\n",
" )\n",
" for i, path in enumerate(local_files)\n",
" if os.path.exists(path)\n",
"]\n",
"\n",
"# Combine all documents\n",
"all_documents = url_documents + file_documents\n",
"\n",
"# Insert documents into memory bank\n",
"response = client.memory.insert(\n",
" bank_id=\"tutorial_bank\",\n",
" documents=all_documents,\n",
")\n",
"\n",
"print(\"Documents inserted successfully!\")\n",
"\n",
"# 🎯 Exercise: Try adding your own documents!\n",
"# - What happens if you try to insert a document with an existing ID?\n",
"# - What other metadata might be useful to add?"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"4. Query the Memory Bank\n",
"Now for the exciting part - querying our documents! The Memory API uses semantic search to find relevant content based on meaning, not just keywords.\n",
"❓ Understanding Scores:\n",
"\n",
"Scores range from 0 to 1, with 1 being the most relevant\n",
"Generally, scores above 0.7 indicate strong relevance\n",
"Consider your use case when deciding on score thresholds"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Query: How do I use LoRA?\n",
"--------------------------------------------------\n",
"\n",
"Result 1 (Score: 1.242)\n",
"========================================\n",
"Chunk(content='.g.\\nthe :func:`torchtune.models.llama3.llama3` model has a corresponding :func:`torchtune.models.llama3.lora_llama3`.\\nWe aim to provide a comprehensive set of configurations to allow you to get started with training with LoRA quickly,\\njust specify any config with ``_lora`` in its name, e.g:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\\n\\n\\nThere are two sets of parameters to customize LoRA to suit your needs. Firstly, the parameters which control\\nwhich linear layers LoRA should be applied to in the model:\\n\\n* ``lora_attn_modules: List[str]`` accepts a list of strings specifying which layers of the model to apply\\n LoRA to:\\n\\n * ``q_proj`` applies LoRA to the query projection layer.\\n * ``k_proj`` applies LoRA to the key projection layer.\\n * ``v_proj`` applies LoRA to the value projection layer.\\n * ``output_proj`` applies LoRA to the attention output projection layer.\\n\\n Whilst adding more layers to be fine-tuned may improve model accuracy,\\n this will come at the cost of increased memory usage and reduced training speed.\\n\\n* ``apply_lora_to_mlp: Bool`` applies LoRA to the MLP in each transformer layer.\\n* ``apply_lora_to_output: Bool`` applies LoRA to the model\\'s final output projection.\\n This is usually a projection to vocabulary space (e.g. in language models), but\\n other modelling tasks may have different projections - classifier models will project\\n to the number of classes, for example\\n\\n.. note::\\n\\n Models which use tied embeddings (such as Gemma and Qwen2 1.5B and 0.5B) for the\\n final output projection do not support ``apply_lora_to_output``.\\n\\nThese are all specified under the ``model`` flag or config entry, i.e:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"]\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.llama3.lora_llama3_8b\\n apply_lora_to_mlp: True\\n model.lora', document_id='url-doc-0', token_count=512)\n",
"========================================\n",
"\n",
"Result 2 (Score: 1.221)\n",
"========================================\n",
"Chunk(content=' adds a small overhead to LoRA training due to the addition of the magnitude parameter, but it has been shown to\\nimprove the performance of LoRA, particularly at low ranks.\\n\\n*Sounds great! How do I use it?*\\n\\nMuch like LoRA and QLoRA, you can finetune using DoRA with any of our LoRA recipes. We use the same model builders for LoRA\\nas we do for DoRA, so you can use the ``lora_`` version of any model builder with ``use_dora=True``. For example, to finetune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA <glossary_lora>` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for', document_id='url-doc-0', token_count=512)\n",
"========================================\n",
"\n",
"Result 3 (Score: 1.093)\n",
"========================================\n",
"Chunk(content='64\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.llama3.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 32\\n lora_alpha: 64\\n\\n.. note::\\n\\n To get a deeper sense of how LoRA parameters affect memory usage during training,\\n see the :ref:`relevant section in our Llama2 LoRA tutorial<lora_tutorial_memory_tradeoff_label>`.\\n\\n.. _glossary_qlora:\\n\\nQuantized Low Rank Adaptation (QLoRA)\\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n*What\\'s going on here?*\\n\\n`QLoRA <https://arxiv.org/abs/2305.14314>`_ is an enhancement on top of `LoRA <https://arxiv.org/abs/2106.09685>`_\\nthat maintains the frozen model parameters from LoRA in 4-bit quantized precision, thereby reducing memory usage.\\nThis is enabled through a novel 4-bit NormalFloat (NF4) data type proposed by the authors, which allows for 4-8x less\\nparameter memory usage whilst retaining model accuracy. You can read our tutorial on :ref:`finetuning Llama2 with QLoRA<qlora_finetune_label>`\\nfor a deeper understanding of how it works.\\n\\nWhen considering using QLoRA to reduce memory usage, it\\'s worth noting that QLoRA prevents accuracy degradation during quantization\\nby up-casting quantized parameters to the original higher precision datatype during model forward passes - this up-casting may\\nincur penalties to training speed. The :ref:`relevant section <qlora_compile_label>` in our QLoRA tutorial demonstrates the usage of ``torch.compile``\\nto address this by speeding up training.\\n\\n*Sounds great! How do I use it?*\\n\\nYou can finetune using QLoRA with any of our LoRA recipes, i.e. recipes with the ``lora_`` prefix, e.g. :ref:`lora_finetune_single_device<lora_finetune_recipe_label>`. These recipes utilize\\nQLoRA-enabled model builders, which we support for all our models, and also use the ``qlora_`` prefix, e.g.\\nthe :func:`torchtune.models.llama3.llama3_8b` model has', document_id='url-doc-0', token_count=512)\n",
"========================================\n",
"\n",
"Query: Tell me about memory optimizations\n",
"--------------------------------------------------\n",
"\n",
"Result 1 (Score: 1.218)\n",
"========================================\n",
"Chunk(content='.. _memory_optimization_overview_label:\\n\\n============================\\nMemory Optimization Overview\\n============================\\n\\n**Author**: `Salman Mohammadi <https://github.com/SalmanMohammadi>`_\\n\\ntorchtune comes with a host of plug-and-play memory optimization components which give you lots of flexibility\\nto ``tune`` our recipes to your hardware. This page provides a brief glossary of these components and how you might use them.\\nTo make things easy, we\\'ve summarized these components in the following table:\\n\\n.. csv-table:: Memory optimization components\\n :header: \"Component\", \"When to use?\"\\n :widths: auto\\n\\n \":ref:`glossary_precision`\", \"You\\'ll usually want to leave this as its default ``bfloat16``. If you\\'re struggling with training stability or accuracy due to precision, fp32 may help, but will significantly increase memory usage and decrease training speed.\"\\n \":ref:`glossary_act_ckpt`\", \"Use when you\\'re memory constrained and need to handle larger batch sizes or longer context lengths. Be aware that it may slow down training speed.\"\\n \":ref:`glossary_act_off`\", \"Similar to activation checkpointing, this can be used when memory constrained, but comes at the cost of training speed due to the overhead of moving tensors between GPU VRAM and CPU. This can also be used alongside activation checkpointing.\"\\n \":ref:`glossary_grad_accm`\", \"Helpful when memory-constrained to simulate larger batch sizes. Often preferable to activation checkpointing for better training speed.\"\\n \":ref:`glossary_low_precision_opt`\", \"When you need to further reduce memory usage beyond using ``bf16`` by reducing the precision in the optimizer states. Note that lower precision optimizers may reduce training stability/accuracy.\"\\n \":ref:`glossary_opt_in_bwd`\", \"Helps reduce memory usage when using stateful optimizers, particularly when full-finetuning large models with high gradient memory usage. This is not compatible with ``gradient_accumulation_steps``, so training may slow down due to reduced model throughput.\"\\n \":ref:`glossary_cpu_offload`\", \"Offloads optimizer states and (optionally) gradients to CPU, and performs optimizer steps on CPU. This can be used to significantly reduce GPU memory usage at the cost of CPU RAM and training speed, as CPU optimizer steps can be slow and bottleneck training performance.\"\\n \":ref:`glossary_lora`\", \"When you', document_id='url-doc-0', token_count=512)\n",
"========================================\n",
"\n",
"Result 2 (Score: 1.079)\n",
"========================================\n",
"Chunk(content=' optimizer states and (optionally) gradients to CPU, and performs optimizer steps on CPU. This can be used to significantly reduce GPU memory usage at the cost of CPU RAM and training speed, as CPU optimizer steps can be slow and bottleneck training performance.\"\\n \":ref:`glossary_lora`\", \"When you want to significantly reduce the number of trainable parameters, saving gradient and optimizer memory during training, and significantly speeding up training.\"\\n \":ref:`glossary_qlora`\", \"When you need even more memory savings than LoRA, at the potential cost of some training speed. Useful for very large models or limited hardware.\"\\n \":ref:`glossary_dora`\", \"Like LoRA, DoRA can provide significant memory savings and training speed-ups. DoRA may improve performance over LoRA, particularly when using small rank updates.\"\\n\\n\\n.. note::\\n\\n In its current state, this tutorial is focused on single-device optimizations. Check in soon as we update this page\\n for the latest memory optimization features for distributed fine-tuning.\\n\\n.. _glossary_precision:\\n\\n\\nModel Precision\\n---------------\\n\\n*What\\'s going on here?*\\n\\nWe use the term \"precision\" to refer to the underlying data type used to represent the model and optimizer parameters.\\nWe support two data types in torchtune:\\n\\n.. note::\\n\\n We recommend diving into Sebastian Raschka\\'s `blogpost on mixed-precision techniques <https://sebastianraschka.com/blog/2023/llm-mixed-precision-copy.html>`_\\n for a deeper understanding of concepts around precision and data formats.\\n\\n* ``fp32``, commonly referred to as \"full-precision\", uses 4 bytes per model and optimizer parameter.\\n* ``bfloat16``, referred to as \"half-precision\", uses 2 bytes per model and optimizer parameter - effectively half\\n the memory of ``fp32``, and also improves training speed. Generally, if your hardware supports training with ``bfloat16``,\\n we recommend using it - this is the default setting for our recipes.\\n\\n.. note::\\n\\n Another common paradigm is \"mixed-precision\" training: where model weights are in ``bfloat16`` (or ``fp16``), and optimizer\\n states are in ``fp32``. Currently, we don\\'t support mixed-precision training in torchtune.\\n\\n*Sounds great! How do I use it?*\\n\\nSimply use the ``dtype`` flag or config entry in all our recipes! For example, to use half-precision', document_id='url-doc-0', token_count=512)\n",
"========================================\n",
"\n",
"Result 3 (Score: 0.937)\n",
"========================================\n",
"Chunk(content=\",\\n lr=1e-5,\\n fused=True\\n )\\n\\nSome helpful hints from the ``torchao`` `CPUOffloadOptimizer page <https://github.com/pytorch/ao/tree/main/torchao/prototype/low_bit_optim#optimizer-cpu-offload>`_:\\n\\n* The CPU optimizer step is often the bottleneck when optimizer CPU offload is used. To minimize the slowdown, it is recommended to (1) use full ``bf16`` training so that parameters, gradients, and optimizer states are in ``bf16``; and (2) give GPU more work per optimizer step (e.g. larger batch size with activation checkpointing, gradient accumulation).\\n* Gradient accumulation should always be set to 1 when ``offload_gradients=True``, as gradients are cleared on GPU every backward pass.\\n* This optimizer works by keeping a copy of parameters and pre-allocating gradient memory on CPU. Therefore, expect your RAM usage to increase by 4x model size.\\n* This optimizer is only supported for single-device recipes. To use CPU-offloading in distributed recipes, use ``fsdp_cpu_offload=True`` in any distributed recipe. See :class:`torch.distributed.fsdp.FullyShardedDataParallel` for more details\\n\\n\\n.. _glossary_peft:\\n\\nParameter Efficient Fine-Tuning (PEFT)\\n--------------------------------------\\n\\n.. _glossary_lora:\\n\\nLow Rank Adaptation (LoRA)\\n^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n\\n*What's going on here?*\\n\\nYou can read our tutorial on :ref:`finetuning Llama2 with LoRA<lora_finetune_label>` to understand how LoRA works, and how to use it.\\nSimply stated, LoRA greatly reduces the number of trainable parameters, thus saving significant gradient and optimizer\\nmemory during training.\\n\\n*Sounds great! How do I use it?*\\n\\nYou can finetune using any of our recipes with the ``lora_`` prefix, e.g. :ref:`lora_finetune_single_device<lora_finetune_recipe_label>`. These recipes utilize\\nLoRA-enabled model builders, which we support for all our models, and also use the ``lora_`` prefix, e.g.\\nthe :func:`torchtune.models.llama3.llama3` model has a corresponding :func:`torchtune.models.llama3.lora_llama3`.\\nWe aim to provide a comprehensive set of configurations to allow you to get started with training with LoRA quickly,\\njust specify any config with\", document_id='url-doc-0', token_count=512)\n",
"========================================\n",
"\n",
"Query: What are the key features of Llama 3?\n",
"--------------------------------------------------\n",
"\n",
"Result 1 (Score: 0.964)\n",
"========================================\n",
"Chunk(content=\"8B uses a larger intermediate dimension in its MLP layers than Llama2-7B\\n- Llama3-8B uses a higher base value to calculate theta in its `rotary positional embeddings <https://arxiv.org/abs/2104.09864>`_\\n\\n|\\n\\nGetting access to Llama3-8B-Instruct\\n------------------------------------\\n\\nFor this tutorial, we will be using the instruction-tuned version of Llama3-8B. First, let's download the model from Hugging Face. You will need to follow the instructions\\non the `official Meta page <https://github.com/meta-llama/llama3/blob/main/README.md>`_ to gain access to the model.\\nNext, make sure you grab your Hugging Face token from `here <https://huggingface.co/settings/tokens>`_.\\n\\n\\n.. code-block:: bash\\n\\n tune download meta-llama/Meta-Llama-3-8B-Instruct \\\\\\n --output-dir <checkpoint_dir> \\\\\\n --hf-token <ACCESS TOKEN>\\n\\n|\\n\\nFine-tuning Llama3-8B-Instruct in torchtune\\n-------------------------------------------\\n\\ntorchtune provides `LoRA <https://arxiv.org/abs/2106.09685>`_, `QLoRA <https://arxiv.org/abs/2305.14314>`_, and full fine-tuning\\nrecipes for fine-tuning Llama3-8B on one or more GPUs. For more on LoRA in torchtune, see our :ref:`LoRA Tutorial <lora_finetune_label>`.\\nFor more on QLoRA in torchtune, see our :ref:`QLoRA Tutorial <qlora_finetune_label>`.\\n\\nLet's take a look at how we can fine-tune Llama3-8B-Instruct with LoRA on a single device using torchtune. In this example, we will fine-tune\\nfor one epoch on a common instruct dataset for illustrative purposes. The basic command for a single-device LoRA fine-tune is\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\\n\\n.. note::\\n To see a full list of recipes and their corresponding configs, simply run ``tune ls`` from the command line.\\n\\nWe can also add :ref:`command-line overrides <cli_override>` as needed, e.g.\\n\\n.. code-block:: bash\\n\\n tune run lora\", document_id='url-doc-2', token_count=512)\n",
"========================================\n",
"\n",
"Result 2 (Score: 0.927)\n",
"========================================\n",
"Chunk(content=\".. _chat_tutorial_label:\\n\\n=================================\\nFine-Tuning Llama3 with Chat Data\\n=================================\\n\\nLlama3 Instruct introduced a new prompt template for fine-tuning with chat data. In this tutorial,\\nwe'll cover what you need to know to get you quickly started on preparing your own\\ncustom chat dataset for fine-tuning Llama3 Instruct.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` You will learn:\\n\\n * How the Llama3 Instruct format differs from Llama2\\n * All about prompt templates and special tokens\\n * How to use your own chat dataset to fine-tune Llama3 Instruct\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`configuring datasets<chat_dataset_usage_label>`\\n * Know how to :ref:`download Llama3 Instruct weights <llama3_label>`\\n\\n\\nTemplate changes from Llama2 to Llama3\\n--------------------------------------\\n\\nThe Llama2 chat model requires a specific template when prompting the pre-trained\\nmodel. Since the chat model was pretrained with this prompt template, if you want to run\\ninference on the model, you'll need to use the same template for optimal performance\\non chat data. Otherwise, the model will just perform standard text completion, which\\nmay or may not align with your intended use case.\\n\\nFrom the `official Llama2 prompt\\ntemplate guide <https://llama.meta.com/docs/model-cards-and-prompt-formats/meta-llama-2>`_\\nfor the Llama2 chat model, we can see that special tags are added:\\n\\n.. code-block:: text\\n\\n <s>[INST] <<SYS>>\\n You are a helpful, respectful, and honest assistant.\\n <</SYS>>\\n\\n Hi! I am a human. [/INST] Hello there! Nice to meet you! I'm Meta AI, your friendly AI assistant </s>\\n\\nLlama3 Instruct `overhauled <https://llama.meta.com/docs/model-cards-and-prompt-formats/meta-llama-3>`_\\nthe template from Llama2 to better support multiturn conversations. The same text\\nin the Llama3 Instruct format would look like this:\\n\\n.. code-block:: text\\n\\n <|begin_of_text|><|start_header_id|>system<|end_header_id|>\\n\\n You are a helpful,\", document_id='url-doc-1', token_count=512)\n",
"========================================\n",
"\n",
"Result 3 (Score: 0.858)\n",
"========================================\n",
"Chunk(content='.. _llama3_label:\\n\\n========================\\nMeta Llama3 in torchtune\\n========================\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` You will learn how to:\\n\\n * Download the Llama3-8B-Instruct weights and tokenizer\\n * Fine-tune Llama3-8B-Instruct with LoRA and QLoRA\\n * Evaluate your fine-tuned Llama3-8B-Instruct model\\n * Generate text with your fine-tuned model\\n * Quantize your model to speed up generation\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune<overview_label>`\\n * Make sure to :ref:`install torchtune<install_label>`\\n\\n\\nLlama3-8B\\n---------\\n\\n`Meta Llama 3 <https://llama.meta.com/llama3>`_ is a new family of models released by Meta AI that improves upon the performance of the Llama2 family\\nof models across a `range of different benchmarks <https://huggingface.co/meta-llama/Meta-Llama-3-8B#base-pretrained-models>`_.\\nCurrently there are two different sizes of Meta Llama 3: 8B and 70B. In this tutorial we will focus on the 8B size model.\\nThere are a few main changes between Llama2-7B and Llama3-8B models:\\n\\n- Llama3-8B uses `grouped-query attention <https://arxiv.org/abs/2305.13245>`_ instead of the standard multi-head attention from Llama2-7B\\n- Llama3-8B has a larger vocab size (128,256 instead of 32,000 from Llama2 models)\\n- Llama3-8B uses a different tokenizer than Llama2 models (`tiktoken <https://github.com/openai/tiktoken>`_ instead of `sentencepiece <https://github.com/google/sentencepiece>`_)\\n- Llama3-8B uses a larger intermediate dimension in its MLP layers than Llama2-7B\\n- Llama3-8B uses a higher base value to calculate theta in its `rotary positional embeddings <https://arxiv.org/abs/2104.09864>`_\\n\\n|\\n\\nGetting access to Llama3', document_id='url-doc-2', token_count=512)\n",
"========================================\n"
]
}
],
"source": [
"def print_query_results(query: str):\n",
" \"\"\"Helper function to print query results in a readable format\n",
"\n",
" Args:\n",
" query (str): The search query to execute\n",
" \"\"\"\n",
" print(f\"\\nQuery: {query}\")\n",
" print(\"-\" * 50)\n",
"\n",
" response = client.memory.query(\n",
" bank_id=\"tutorial_bank\",\n",
" query=[query], # The API accepts multiple queries at once!\n",
" )\n",
"\n",
" for i, (chunk, score) in enumerate(zip(response.chunks, response.scores)):\n",
" print(f\"\\nResult {i+1} (Score: {score:.3f})\")\n",
" print(\"=\" * 40)\n",
" print(chunk)\n",
" print(\"=\" * 40)\n",
"\n",
"# Let's try some example queries\n",
"queries = [\n",
" \"How do I use LoRA?\", # Technical question\n",
" \"Tell me about memory optimizations\", # General topic\n",
" \"What are the key features of Llama 3?\" # Product-specific\n",
"]\n",
"\n",
"for query in queries:\n",
" print_query_results(query)\n",
"\n",
"# 🎯 Exercises:\n",
"# 1. Try writing your own queries! What works well? What doesn't?\n",
"# 2. How do different phrasings of the same question affect results?\n",
"# 3. What happens if you query for content that isn't in your documents?"
]
}
],
"metadata": {