agent kind of working

This commit is contained in:
Kai Wu 2024-11-06 17:54:38 -08:00
parent 5a3e1c5f63
commit 9e24f31b8d
4 changed files with 111 additions and 78 deletions

View file

@ -39,7 +39,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "1d293479-9dde-4b68-94ab-d0c4c61ab08c",
"metadata": {},
"outputs": [],
@ -128,16 +128,23 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"id": "64d36476-95d7-49f9-a548-312cf8d8c49e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[32mUser> Sending image for analysis...\u001b[0m\n",
"\u001b[36mAssistant> \u001b[0m\u001b[33mThe\u001b[0m\u001b[33m image\u001b[0m\u001b[33m features\u001b[0m\u001b[33m a\u001b[0m\u001b[33m styl\u001b[0m\u001b[33mized\u001b[0m\u001b[33m,\u001b[0m\u001b[33m mon\u001b[0m\u001b[33moch\u001b[0m\u001b[33mromatic\u001b[0m\u001b[33m logo\u001b[0m\u001b[33m for\u001b[0m\u001b[33m \"\u001b[0m\u001b[33mLL\u001b[0m\u001b[33mAMA\u001b[0m\u001b[33m STACK\u001b[0m\u001b[33m\"\u001b[0m\u001b[33m against\u001b[0m\u001b[33m a\u001b[0m\u001b[33m solid\u001b[0m\u001b[33m black\u001b[0m\u001b[33m background\u001b[0m\u001b[33m.\u001b[0m\u001b[33m The\u001b[0m\u001b[33m logo\u001b[0m\u001b[33m is\u001b[0m\u001b[33m centered\u001b[0m\u001b[33m and\u001b[0m\u001b[33m consists\u001b[0m\u001b[33m of\u001b[0m\u001b[33m a\u001b[0m\u001b[33m simple\u001b[0m\u001b[33m line\u001b[0m\u001b[33m drawing\u001b[0m\u001b[33m of\u001b[0m\u001b[33m a\u001b[0m\u001b[33m llama\u001b[0m\u001b[33m's\u001b[0m\u001b[33m head\u001b[0m\u001b[33m and\u001b[0m\u001b[33m neck\u001b[0m\u001b[33m,\u001b[0m\u001b[33m with\u001b[0m\u001b[33m its\u001b[0m\u001b[33m body\u001b[0m\u001b[33m replaced\u001b[0m\u001b[33m by\u001b[0m\u001b[33m a\u001b[0m\u001b[33m stack\u001b[0m\u001b[33m of\u001b[0m\u001b[33m three\u001b[0m\u001b[33m rounded\u001b[0m\u001b[33m rectangles\u001b[0m\u001b[33m resembling\u001b[0m\u001b[33m a\u001b[0m\u001b[33m pile\u001b[0m\u001b[33m of\u001b[0m\u001b[33m pancakes\u001b[0m\u001b[33m or\u001b[0m\u001b[33m a\u001b[0m\u001b[33m stack\u001b[0m\u001b[33m of\u001b[0m\u001b[33m books\u001b[0m\u001b[33m.\u001b[0m\u001b[33m The\u001b[0m\u001b[33m llama\u001b[0m\u001b[33m's\u001b[0m\u001b[33m head\u001b[0m\u001b[33m is\u001b[0m\u001b[33m depicted\u001b[0m\u001b[33m in\u001b[0m\u001b[33m profile\u001b[0m\u001b[33m,\u001b[0m\u001b[33m facing\u001b[0m\u001b[33m to\u001b[0m\u001b[33m the\u001b[0m\u001b[33m left\u001b[0m\u001b[33m,\u001b[0m\u001b[33m with\u001b[0m\u001b[33m a\u001b[0m\u001b[33m small\u001b[0m\u001b[33m circle\u001b[0m\u001b[33m representing\u001b[0m\u001b[33m the\u001b[0m\u001b[33m eye\u001b[0m\u001b[33m and\u001b[0m\u001b[33m a\u001b[0m\u001b[33m curved\u001b[0m\u001b[33m line\u001b[0m\u001b[33m indicating\u001b[0m\u001b[33m the\u001b[0m\u001b[33m ear\u001b[0m\u001b[33m.\u001b[0m\u001b[33m The\u001b[0m\u001b[33m llama\u001b[0m\u001b[33m's\u001b[0m\u001b[33m neck\u001b[0m"
"ename": "FileNotFoundError",
"evalue": "[Errno 2] No such file or directory: 'logo.png'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[4], line 17\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[38;5;28mprint\u001b[39m(models_response)\n\u001b[1;32m 16\u001b[0m \u001b[38;5;66;03m# Execute the main function\u001b[39;00m\n\u001b[0;32m---> 17\u001b[0m \u001b[38;5;28;01mawait\u001b[39;00m main()\n",
"Cell \u001b[0;32mIn[4], line 9\u001b[0m, in \u001b[0;36mmain\u001b[0;34m()\u001b[0m\n\u001b[1;32m 4\u001b[0m client \u001b[38;5;241m=\u001b[39m LlamaStackClient(\n\u001b[1;32m 5\u001b[0m base_url\u001b[38;5;241m=\u001b[39m\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mhttp://\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mHOST\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m:\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mPORT\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 6\u001b[0m )\n\u001b[1;32m 8\u001b[0m \u001b[38;5;66;03m# Process image\u001b[39;00m\n\u001b[0;32m----> 9\u001b[0m \u001b[38;5;28;01mawait\u001b[39;00m process_image(client, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlogo.png\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 11\u001b[0m \u001b[38;5;66;03m# Query available models\u001b[39;00m\n\u001b[1;32m 12\u001b[0m models_response \u001b[38;5;241m=\u001b[39m client\u001b[38;5;241m.\u001b[39mmodels\u001b[38;5;241m.\u001b[39mlist()\n",
"Cell \u001b[0;32mIn[3], line 29\u001b[0m, in \u001b[0;36mprocess_image\u001b[0;34m(client, image_path, stream)\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[38;5;28;01masync\u001b[39;00m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mprocess_image\u001b[39m(client: LlamaStackClient, image_path: \u001b[38;5;28mstr\u001b[39m, stream: \u001b[38;5;28mbool\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m):\n\u001b[1;32m 21\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 22\u001b[0m \u001b[38;5;124;03m Process an image through the LlamaStack Vision API.\u001b[39;00m\n\u001b[1;32m 23\u001b[0m \n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 27\u001b[0m \u001b[38;5;124;03m stream (bool): Whether to stream the response\u001b[39;00m\n\u001b[1;32m 28\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m---> 29\u001b[0m data_url \u001b[38;5;241m=\u001b[39m \u001b[43mencode_image_to_data_url\u001b[49m\u001b[43m(\u001b[49m\u001b[43mimage_path\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 31\u001b[0m message \u001b[38;5;241m=\u001b[39m UserMessage(\n\u001b[1;32m 32\u001b[0m role\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124muser\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 33\u001b[0m content\u001b[38;5;241m=\u001b[39m[\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 36\u001b[0m ],\n\u001b[1;32m 37\u001b[0m )\n\u001b[1;32m 39\u001b[0m cprint(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUser> Sending image for analysis...\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mgreen\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
"Cell \u001b[0;32mIn[3], line 15\u001b[0m, in \u001b[0;36mencode_image_to_data_url\u001b[0;34m(file_path)\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m mime_type \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCould not determine MIME type of the file\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m---> 15\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28;43mopen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mfile_path\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrb\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mas\u001b[39;00m image_file:\n\u001b[1;32m 16\u001b[0m encoded_string \u001b[38;5;241m=\u001b[39m base64\u001b[38;5;241m.\u001b[39mb64encode(image_file\u001b[38;5;241m.\u001b[39mread())\u001b[38;5;241m.\u001b[39mdecode(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mutf-8\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 18\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdata:\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmime_type\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m;base64,\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mencoded_string\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n",
"File \u001b[0;32m~/miniconda3/envs/stack/lib/python3.10/site-packages/IPython/core/interactiveshell.py:324\u001b[0m, in \u001b[0;36m_modified_open\u001b[0;34m(file, *args, **kwargs)\u001b[0m\n\u001b[1;32m 317\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m file \u001b[38;5;129;01min\u001b[39;00m {\u001b[38;5;241m0\u001b[39m, \u001b[38;5;241m1\u001b[39m, \u001b[38;5;241m2\u001b[39m}:\n\u001b[1;32m 318\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 319\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mIPython won\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt let you open fd=\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfile\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m by default \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 320\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mas it is likely to crash IPython. If you know what you are doing, \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 321\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124myou can use builtins\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m open.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 322\u001b[0m )\n\u001b[0;32m--> 324\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mio_open\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfile\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'logo.png'"
]
}
],
@ -188,7 +195,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.0"
"version": "3.10.15"
}
},
"nbformat": 4,

View file

@ -28,7 +28,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
@ -38,7 +38,7 @@
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
@ -63,9 +63,10 @@
" client: LlamaStackClient,\n",
" tools: List[Dict],\n",
" instructions: str = \"You are a helpful assistant\",\n",
" model: str = \"Llama3.1-8B-Instruct\",\n",
" model: str = \"Llama3.2-11B-Vision-Instruct\",\n",
") -> Agent:\n",
" \"\"\"Create an agent with specified tools.\"\"\"\n",
" print(\"Using the following model: \", model)\n",
" agent_config = AgentConfig(\n",
" model=model,\n",
" instructions=instructions,\n",
@ -96,25 +97,24 @@
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Using the following model: Llama3.2-11B-Vision-Instruct\n",
"\n",
"Query: What are the latest developments in quantum computing?\n",
"--------------------------------------------------\n",
"\u001b[30m\u001b[0m\u001b[33minference> \u001b[0m\u001b[33mF\u001b[0m\u001b[33mIND\u001b[0m\u001b[33mINGS\u001b[0m\u001b[33m:\n",
"\u001b[0m\u001b[33mThe\u001b[0m\u001b[33m latest\u001b[0m\u001b[33m developments\u001b[0m\u001b[33m in\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m computing\u001b[0m\u001b[33m include\u001b[0m\u001b[33m advancements\u001b[0m\u001b[33m in\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m processors\u001b[0m\u001b[33m,\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m algorithms\u001b[0m\u001b[33m,\u001b[0m\u001b[33m and\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m error\u001b[0m\u001b[33m correction\u001b[0m\u001b[33m.\u001b[0m\u001b[33m Researchers\u001b[0m\u001b[33m have\u001b[0m\u001b[33m made\u001b[0m\u001b[33m significant\u001b[0m\u001b[33m progress\u001b[0m\u001b[33m in\u001b[0m\u001b[33m developing\u001b[0m\u001b[33m more\u001b[0m\u001b[33m powerful\u001b[0m\u001b[33m and\u001b[0m\u001b[33m reliable\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m computers\u001b[0m\u001b[33m,\u001b[0m\u001b[33m with\u001b[0m\u001b[33m some\u001b[0m\u001b[33m companies\u001b[0m\u001b[33m already\u001b[0m\u001b[33m showcasing\u001b[0m\u001b[33m \u001b[0m\u001b[33m100\u001b[0m\u001b[33m-q\u001b[0m\u001b[33mubit\u001b[0m\u001b[33m and\u001b[0m\u001b[33m \u001b[0m\u001b[33m127\u001b[0m\u001b[33m-q\u001b[0m\u001b[33mubit\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m processors\u001b[0m\u001b[33m (\u001b[0m\u001b[33mIBM\u001b[0m\u001b[33m,\u001b[0m\u001b[33m \u001b[0m\u001b[33m202\u001b[0m\u001b[33m2\u001b[0m\u001b[33m;\u001b[0m\u001b[33m Google\u001b[0m\u001b[33m,\u001b[0m\u001b[33m \u001b[0m\u001b[33m202\u001b[0m\u001b[33m2\u001b[0m\u001b[33m).\u001b[0m\u001b[33m These\u001b[0m\u001b[33m advancements\u001b[0m\u001b[33m have\u001b[0m\u001b[33m led\u001b[0m\u001b[33m to\u001b[0m\u001b[33m breakthrough\u001b[0m\u001b[33ms\u001b[0m\u001b[33m in\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m simulations\u001b[0m\u001b[33m,\u001b[0m\u001b[33m machine\u001b[0m\u001b[33m learning\u001b[0m\u001b[33m,\u001b[0m\u001b[33m and\u001b[0m\u001b[33m optimization\u001b[0m\u001b[33m problems\u001b[0m\u001b[33m (\u001b[0m\u001b[33mB\u001b[0m\u001b[33mhart\u001b[0m\u001b[33mi\u001b[0m\u001b[33m,\u001b[0m\u001b[33m \u001b[0m\u001b[33m202\u001b[0m\u001b[33m2\u001b[0m\u001b[33m;\u001b[0m\u001b[33m Zhang\u001b[0m\u001b[33m,\u001b[0m\u001b[33m \u001b[0m\u001b[33m202\u001b[0m\u001b[33m2\u001b[0m\u001b[33m).\u001b[0m\u001b[33m Additionally\u001b[0m\u001b[33m,\u001b[0m\u001b[33m there\u001b[0m\u001b[33m have\u001b[0m\u001b[33m been\u001b[0m\u001b[33m significant\u001b[0m\u001b[33m improvements\u001b[0m\u001b[33m in\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m error\u001b[0m\u001b[33m correction\u001b[0m\u001b[33m,\u001b[0m\u001b[33m which\u001b[0m\u001b[33m is\u001b[0m\u001b[33m essential\u001b[0m\u001b[33m for\u001b[0m\u001b[33m large\u001b[0m\u001b[33m-scale\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m computing\u001b[0m\u001b[33m (\u001b[0m\u001b[33mG\u001b[0m\u001b[33mottes\u001b[0m\u001b[33mman\u001b[0m\u001b[33m,\u001b[0m\u001b[33m \u001b[0m\u001b[33m202\u001b[0m\u001b[33m2\u001b[0m\u001b[33m).\n",
"\u001b[0m\u001b[33mThe\u001b[0m\u001b[33m latest\u001b[0m\u001b[33m developments\u001b[0m\u001b[33m in\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m computing\u001b[0m\u001b[33m include\u001b[0m\u001b[33m the\u001b[0m\u001b[33m creation\u001b[0m\u001b[33m of\u001b[0m\u001b[33m more\u001b[0m\u001b[33m powerful\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m processors\u001b[0m\u001b[33m,\u001b[0m\u001b[33m advancements\u001b[0m\u001b[33m in\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m error\u001b[0m\u001b[33m correction\u001b[0m\u001b[33m,\u001b[0m\u001b[33m and\u001b[0m\u001b[33m the\u001b[0m\u001b[33m development\u001b[0m\u001b[33m of\u001b[0m\u001b[33m new\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m algorithms\u001b[0m\u001b[33m.\u001b[0m\u001b[33m For\u001b[0m\u001b[33m example\u001b[0m\u001b[33m,\u001b[0m\u001b[33m Google\u001b[0m\u001b[33m has\u001b[0m\u001b[33m announced\u001b[0m\u001b[33m the\u001b[0m\u001b[33m development\u001b[0m\u001b[33m of\u001b[0m\u001b[33m a\u001b[0m\u001b[33m \u001b[0m\u001b[33m53\u001b[0m\u001b[33m-q\u001b[0m\u001b[33mubit\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m processor\u001b[0m\u001b[33m,\u001b[0m\u001b[33m which\u001b[0m\u001b[33m is\u001b[0m\u001b[33m the\u001b[0m\u001b[33m largest\u001b[0m\u001b[33m and\u001b[0m\u001b[33m most\u001b[0m\u001b[33m complex\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m processor\u001b[0m\u001b[33m ever\u001b[0m\u001b[33m built\u001b[0m\u001b[33m (\u001b[0m\u001b[33mSource\u001b[0m\u001b[33m:\u001b[0m\u001b[33m Google\u001b[0m\u001b[33m Blog\u001b[0m\u001b[33m).\u001b[0m\u001b[33m Additionally\u001b[0m\u001b[33m,\u001b[0m\u001b[33m researchers\u001b[0m\u001b[33m have\u001b[0m\u001b[33m made\u001b[0m\u001b[33m progress\u001b[0m\u001b[33m in\u001b[0m\u001b[33m developing\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m error\u001b[0m\u001b[33m correction\u001b[0m\u001b[33m techniques\u001b[0m\u001b[33m,\u001b[0m\u001b[33m which\u001b[0m\u001b[33m are\u001b[0m\u001b[33m necessary\u001b[0m\u001b[33m for\u001b[0m\u001b[33m large\u001b[0m\u001b[33m-scale\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m computing\u001b[0m\u001b[33m (\u001b[0m\u001b[33mSource\u001b[0m\u001b[33m:\u001b[0m\u001b[33m NASA\u001b[0m\u001b[33m).\u001b[0m\u001b[33m Furthermore\u001b[0m\u001b[33m,\u001b[0m\u001b[33m new\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m algorithms\u001b[0m\u001b[33m have\u001b[0m\u001b[33m been\u001b[0m\u001b[33m developed\u001b[0m\u001b[33m for\u001b[0m\u001b[33m solving\u001b[0m\u001b[33m specific\u001b[0m\u001b[33m problems\u001b[0m\u001b[33m,\u001b[0m\u001b[33m such\u001b[0m\u001b[33m as\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m simulation\u001b[0m\u001b[33m and\u001b[0m\u001b[33m machine\u001b[0m\u001b[33m learning\u001b[0m\u001b[33m (\u001b[0m\u001b[33mSource\u001b[0m\u001b[33m:\u001b[0m\u001b[33m IBM\u001b[0m\u001b[33m Research\u001b[0m\u001b[33m).\n",
"\n",
"\u001b[0m\u001b[33mS\u001b[0m\u001b[33mOURCES\u001b[0m\u001b[33m:\n",
"\u001b[0m\u001b[33m-\u001b[0m\u001b[33m IBM\u001b[0m\u001b[33m Quantum\u001b[0m\u001b[33m:\u001b[0m\u001b[33m \"\u001b[0m\u001b[33mQuant\u001b[0m\u001b[33mum\u001b[0m\u001b[33m Process\u001b[0m\u001b[33mors\u001b[0m\u001b[33m\"\u001b[0m\u001b[33m (\u001b[0m\u001b[33mhttps\u001b[0m\u001b[33m://\u001b[0m\u001b[33mwww\u001b[0m\u001b[33m.ibm\u001b[0m\u001b[33m.com\u001b[0m\u001b[33m/\u001b[0m\u001b[33mquant\u001b[0m\u001b[33mum\u001b[0m\u001b[33m/com\u001b[0m\u001b[33mputer\u001b[0m\u001b[33m/)\n",
"\u001b[0m\u001b[33m-\u001b[0m\u001b[33m Google\u001b[0m\u001b[33m Quantum\u001b[0m\u001b[33m AI\u001b[0m\u001b[33m Lab\u001b[0m\u001b[33m:\u001b[0m\u001b[33m \"\u001b[0m\u001b[33mQuant\u001b[0m\u001b[33mum\u001b[0m\u001b[33m Process\u001b[0m\u001b[33mors\u001b[0m\u001b[33m\"\u001b[0m\u001b[33m (\u001b[0m\u001b[33mhttps\u001b[0m\u001b[33m://\u001b[0m\u001b[33mquant\u001b[0m\u001b[33mum\u001b[0m\u001b[33mai\u001b[0m\u001b[33m.google\u001b[0m\u001b[33m/al\u001b[0m\u001b[33mphabet\u001b[0m\u001b[33m/sub\u001b[0m\u001b[33m-page\u001b[0m\u001b[33m-\u001b[0m\u001b[33m1\u001b[0m\u001b[33m/)\n",
"\u001b[0m\u001b[33m-\u001b[0m\u001b[33m Bh\u001b[0m\u001b[33marti\u001b[0m\u001b[33m,\u001b[0m\u001b[33m K\u001b[0m\u001b[33m.\u001b[0m\u001b[33m (\u001b[0m\u001b[33m202\u001b[0m\u001b[33m2\u001b[0m\u001b[33m).\u001b[0m\u001b[33m \"\u001b[0m\u001b[33mQuant\u001b[0m\u001b[33mum\u001b[0m\u001b[33m Computing\u001b[0m\u001b[33m:\u001b[0m\u001b[33m A\u001b[0m\u001b[33m Review\u001b[0m\u001b[33m of\u001b[0m\u001b[33m Recent\u001b[0m\u001b[33m Advances\u001b[0m\u001b[33m.\"\u001b[0m\u001b[33m Journal\u001b[0m\u001b[33m of\u001b[0m\u001b[33m Physics\u001b[0m\u001b[33m:\u001b[0m\u001b[33m Conference\u001b[0m\u001b[33m Series\u001b[0m\u001b[33m,\u001b[0m\u001b[33m \u001b[0m\u001b[33m218\u001b[0m\u001b[33m5\u001b[0m\u001b[33m(\u001b[0m\u001b[33m1\u001b[0m\u001b[33m),\u001b[0m\u001b[33m \u001b[0m\u001b[33m012\u001b[0m\u001b[33m001\u001b[0m\u001b[33m.\u001b[0m\u001b[33m (\u001b[0m\u001b[33mhttps\u001b[0m\u001b[33m://\u001b[0m\u001b[33mi\u001b[0m\u001b[33mop\u001b[0m\u001b[33mscience\u001b[0m\u001b[33m.i\u001b[0m\u001b[33mop\u001b[0m\u001b[33m.org\u001b[0m\u001b[33m/article\u001b[0m\u001b[33m/\u001b[0m\u001b[33m10\u001b[0m\u001b[33m.\u001b[0m\u001b[33m108\u001b[0m\u001b[33m8\u001b[0m\u001b[33m/\u001b[0m\u001b[33m174\u001b[0m\u001b[33m2\u001b[0m\u001b[33m-\u001b[0m\u001b[33m659\u001b[0m\u001b[33m6\u001b[0m\u001b[33m/\u001b[0m\u001b[33m218\u001b[0m\u001b[33m5\u001b[0m\u001b[33m/\u001b[0m\u001b[33m1\u001b[0m\u001b[33m/\u001b[0m\u001b[33m012\u001b[0m\u001b[33m001\u001b[0m\u001b[33m)\n",
"\u001b[0m\u001b[33m-\u001b[0m\u001b[33m Zhang\u001b[0m\u001b[33m,\u001b[0m\u001b[33m Y\u001b[0m\u001b[33m.\u001b[0m\u001b[33m (\u001b[0m\u001b[33m202\u001b[0m\u001b[33m2\u001b[0m\u001b[33m).\u001b[0m\u001b[33m \"\u001b[0m\u001b[33mQuant\u001b[0m\u001b[33mum\u001b[0m\u001b[33m Algorithms\u001b[0m\u001b[33m for\u001b[0m\u001b[33m Machine\u001b[0m\u001b[33m Learning\u001b[0m\u001b[33m.\"\u001b[0m\u001b[33m Journal\u001b[0m\u001b[33m of\u001b[0m\u001b[33m Machine\u001b[0m\u001b[33m Learning\u001b[0m\u001b[33m Research\u001b[0m\u001b[33m,\u001b[0m\u001b[33m \u001b[0m\u001b[33m23\u001b[0m\u001b[33m,\u001b[0m\u001b[33m \u001b[0m\u001b[33m1\u001b[0m\u001b[33m-\u001b[0m\u001b[33m36\u001b[0m\u001b[33m.\u001b[0m\u001b[33m (\u001b[0m\u001b[33mhttps\u001b[0m\u001b[33m://\u001b[0m\u001b[33mj\u001b[0m\u001b[33mml\u001b[0m\u001b[33mr\u001b[0m\u001b[33m.org\u001b[0m\u001b[33m/p\u001b[0m\u001b[33mapers\u001b[0m\u001b[33m/v\u001b[0m\u001b[33m23\u001b[0m\u001b[33m/\u001b[0m\u001b[33m20\u001b[0m\u001b[33m-\u001b[0m\u001b[33m065\u001b[0m\u001b[33m.html\u001b[0m\u001b[33m)\n",
"\u001b[0m\u001b[33m-\u001b[0m\u001b[33m G\u001b[0m\u001b[33mottes\u001b[0m\u001b[33mman\u001b[0m\u001b[33m,\u001b[0m\u001b[33m D\u001b[0m\u001b[33m.\u001b[0m\u001b[33m (\u001b[0m\u001b[33m202\u001b[0m\u001b[33m2\u001b[0m\u001b[33m).\u001b[0m\u001b[33m \"\u001b[0m\u001b[33mQuant\u001b[0m\u001b[33mum\u001b[0m\u001b[33m Error\u001b[0m\u001b[33m Correction\u001b[0m\u001b[33m.\"\u001b[0m\u001b[33m In\u001b[0m\u001b[33m Encyclopedia\u001b[0m\u001b[33m of\u001b[0m\u001b[33m Complexity\u001b[0m\u001b[33m and\u001b[0m\u001b[33m Systems\u001b[0m\u001b[33m Science\u001b[0m\u001b[33m (\u001b[0m\u001b[33mpp\u001b[0m\u001b[33m.\u001b[0m\u001b[33m \u001b[0m\u001b[33m1\u001b[0m\u001b[33m-\u001b[0m\u001b[33m13\u001b[0m\u001b[33m).\u001b[0m\u001b[33m Springer\u001b[0m\u001b[33m,\u001b[0m\u001b[33m New\u001b[0m\u001b[33m York\u001b[0m\u001b[33m,\u001b[0m\u001b[33m NY\u001b[0m\u001b[33m.\u001b[0m\u001b[33m (\u001b[0m\u001b[33mhttps\u001b[0m\u001b[33m://\u001b[0m\u001b[33mlink\u001b[0m\u001b[33m.spring\u001b[0m\u001b[33mer\u001b[0m\u001b[33m.com\u001b[0m\u001b[33m/reference\u001b[0m\u001b[33mwork\u001b[0m\u001b[33mentry\u001b[0m\u001b[33m/\u001b[0m\u001b[33m10\u001b[0m\u001b[33m.\u001b[0m\u001b[33m100\u001b[0m\u001b[33m7\u001b[0m\u001b[33m/\u001b[0m\u001b[33m978\u001b[0m\u001b[33m-\u001b[0m\u001b[33m0\u001b[0m\u001b[33m-\u001b[0m\u001b[33m387\u001b[0m\u001b[33m-\u001b[0m\u001b[33m758\u001b[0m\u001b[33m88\u001b[0m\u001b[33m-\u001b[0m\u001b[33m6\u001b[0m\u001b[33m_\u001b[0m\u001b[33m447\u001b[0m\u001b[33m)\u001b[0m\u001b[97m\u001b[0m\n",
"\u001b[0m\u001b[33m-\u001b[0m\u001b[33m Google\u001b[0m\u001b[33m Blog\u001b[0m\u001b[33m:\u001b[0m\u001b[33m \"\u001b[0m\u001b[33mInt\u001b[0m\u001b[33mroducing\u001b[0m\u001b[33m B\u001b[0m\u001b[33mrist\u001b[0m\u001b[33mle\u001b[0m\u001b[33mcone\u001b[0m\u001b[33m,\u001b[0m\u001b[33m a\u001b[0m\u001b[33m \u001b[0m\u001b[33m72\u001b[0m\u001b[33m-q\u001b[0m\u001b[33mubit\u001b[0m\u001b[33m quantum\u001b[0m\u001b[33m processor\u001b[0m\u001b[33m\"\u001b[0m\u001b[33m (\u001b[0m\u001b[33mhttps\u001b[0m\u001b[33m://\u001b[0m\u001b[33mblog\u001b[0m\u001b[33m.google\u001b[0m\u001b[33m/\u001b[0m\u001b[33mtechnology\u001b[0m\u001b[33m/\u001b[0m\u001b[33mai\u001b[0m\u001b[33m/\u001b[0m\u001b[33mbr\u001b[0m\u001b[33mistle\u001b[0m\u001b[33mcone\u001b[0m\u001b[33m-\u001b[0m\u001b[33m72\u001b[0m\u001b[33m-q\u001b[0m\u001b[33mubit\u001b[0m\u001b[33m-\u001b[0m\u001b[33mquant\u001b[0m\u001b[33mum\u001b[0m\u001b[33m-\u001b[0m\u001b[33mprocessor\u001b[0m\u001b[33m/)\n",
"\u001b[0m\u001b[33m-\u001b[0m\u001b[33m NASA\u001b[0m\u001b[33m:\u001b[0m\u001b[33m \"\u001b[0m\u001b[33mQuant\u001b[0m\u001b[33mum\u001b[0m\u001b[33m Error\u001b[0m\u001b[33m Correction\u001b[0m\u001b[33m\"\u001b[0m\u001b[33m (\u001b[0m\u001b[33mhttps\u001b[0m\u001b[33m://\u001b[0m\u001b[33mn\u001b[0m\u001b[33masa\u001b[0m\u001b[33m.n\u001b[0m\u001b[33mengu\u001b[0m\u001b[33mage\u001b[0m\u001b[33mcenter\u001b[0m\u001b[33m.org\u001b[0m\u001b[33m/topics\u001b[0m\u001b[33m/\u001b[0m\u001b[33mquant\u001b[0m\u001b[33mum\u001b[0m\u001b[33m-com\u001b[0m\u001b[33mput\u001b[0m\u001b[33ming\u001b[0m\u001b[33m/\u001b[0m\u001b[33mquant\u001b[0m\u001b[33mum\u001b[0m\u001b[33m-error\u001b[0m\u001b[33m-cor\u001b[0m\u001b[33mrection\u001b[0m\u001b[33m/)\n",
"\u001b[0m\u001b[33m-\u001b[0m\u001b[33m IBM\u001b[0m\u001b[33m Research\u001b[0m\u001b[33m:\u001b[0m\u001b[33m \"\u001b[0m\u001b[33mQuant\u001b[0m\u001b[33mum\u001b[0m\u001b[33m Algorithms\u001b[0m\u001b[33m\"\u001b[0m\u001b[33m (\u001b[0m\u001b[33mhttps\u001b[0m\u001b[33m://\u001b[0m\u001b[33mwww\u001b[0m\u001b[33m.ibm\u001b[0m\u001b[33m.com\u001b[0m\u001b[33m/\u001b[0m\u001b[33mquant\u001b[0m\u001b[33mum\u001b[0m\u001b[33m/com\u001b[0m\u001b[33mput\u001b[0m\u001b[33ming\u001b[0m\u001b[33m/\u001b[0m\u001b[33mdeveloper\u001b[0m\u001b[33m-resource\u001b[0m\u001b[33m/\u001b[0m\u001b[33mquant\u001b[0m\u001b[33mum\u001b[0m\u001b[33m-al\u001b[0m\u001b[33mgorithms\u001b[0m\u001b[33m/)\u001b[0m\u001b[97m\u001b[0m\n",
"\u001b[30m\u001b[0m"
]
}
@ -127,10 +127,17 @@
" engine=\"brave\",\n",
" api_key=\"dummy_value\"#os.getenv(\"BRAVE_SEARCH_API_KEY\"),\n",
" )\n",
" \n",
" models_response = client.models.list()\n",
" for model in models_response:\n",
" if model.identifier.endswith(\"Instruct\"):\n",
" model_name = model.llama_model\n",
" \n",
"\n",
" return await create_tool_agent(\n",
" client=client,\n",
" tools=[search_tool],\n",
" model = model_name,\n",
" instructions=\"\"\"\n",
" You are a research assistant that can search the web.\n",
" Always cite your sources with URLs when providing information.\n",
@ -146,7 +153,7 @@
"\n",
"# Example usage\n",
"async def search_example():\n",
" client = LlamaStackClient(base_url=\"http://{HOST}:{PORT}\")\n",
" client = LlamaStackClient(base_url=f\"http://{HOST}:{PORT}\")\n",
" agent = await create_search_agent(client)\n",
"\n",
" # Create a session\n",
@ -185,7 +192,7 @@
},
{
"cell_type": "code",
"execution_count": 27,
"execution_count": 34,
"metadata": {},
"outputs": [
{
@ -194,35 +201,28 @@
"text": [
"\n",
"Query: What's the weather like in San Francisco?\n",
"--------------------------------------------------\n",
"\u001b[30m\u001b[0m\u001b[33minference> \u001b[0m\u001b[33m{\n",
"\u001b[0m\u001b[33m \u001b[0m\u001b[33m \"\u001b[0m\u001b[33mtype\u001b[0m\u001b[33m\":\u001b[0m\u001b[33m \"\u001b[0m\u001b[33mfunction\u001b[0m\u001b[33m\",\n",
"\u001b[0m\u001b[33m \u001b[0m\u001b[33m \"\u001b[0m\u001b[33mname\u001b[0m\u001b[33m\":\u001b[0m\u001b[33m \"\u001b[0m\u001b[33mget\u001b[0m\u001b[33m_weather\u001b[0m\u001b[33m\",\n",
"\u001b[0m\u001b[33m \u001b[0m\u001b[33m \"\u001b[0m\u001b[33mparameters\u001b[0m\u001b[33m\":\u001b[0m\u001b[33m {\n",
"\u001b[0m\u001b[33m \u001b[0m\u001b[33m \"\u001b[0m\u001b[33mlocation\u001b[0m\u001b[33m\":\u001b[0m\u001b[33m \"\u001b[0m\u001b[33mSan\u001b[0m\u001b[33m Francisco\u001b[0m\u001b[33m\"\n",
"\u001b[0m\u001b[33m \u001b[0m\u001b[33m }\n",
"\u001b[0m\u001b[33m}\u001b[0m\u001b[97m\u001b[0m\n"
"--------------------------------------------------\n"
]
},
{
"ename": "AttributeError",
"evalue": "'WeatherTool' object has no attribute 'run'",
"evalue": "'async_generator' object has no attribute 'completion_message'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[27], line 113\u001b[0m\n\u001b[1;32m 110\u001b[0m nest_asyncio\u001b[38;5;241m.\u001b[39mapply()\n\u001b[1;32m 112\u001b[0m \u001b[38;5;66;03m# Run the example\u001b[39;00m\n\u001b[0;32m--> 113\u001b[0m \u001b[38;5;28;01mawait\u001b[39;00m weather_example()\n",
"Cell \u001b[0;32mIn[27], line 105\u001b[0m, in \u001b[0;36mweather_example\u001b[0;34m()\u001b[0m\n\u001b[1;32m 98\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m-\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m50\u001b[39m)\n\u001b[1;32m 100\u001b[0m response \u001b[38;5;241m=\u001b[39m agent\u001b[38;5;241m.\u001b[39mcreate_turn(\n\u001b[1;32m 101\u001b[0m messages\u001b[38;5;241m=\u001b[39m[{\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrole\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124muser\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcontent\u001b[39m\u001b[38;5;124m\"\u001b[39m: query}],\n\u001b[1;32m 102\u001b[0m session_id\u001b[38;5;241m=\u001b[39msession_id,\n\u001b[1;32m 103\u001b[0m )\n\u001b[0;32m--> 105\u001b[0m \u001b[38;5;28;01masync\u001b[39;00m \u001b[38;5;28;01mfor\u001b[39;00m log \u001b[38;5;129;01min\u001b[39;00m EventLogger()\u001b[38;5;241m.\u001b[39mlog(response):\n\u001b[1;32m 106\u001b[0m log\u001b[38;5;241m.\u001b[39mprint()\n",
"File \u001b[0;32m~/new_task/llama-stack-client-python/src/llama_stack_client/lib/agents/event_logger.py:55\u001b[0m, in \u001b[0;36mEventLogger.log\u001b[0;34m(self, event_generator)\u001b[0m\n\u001b[1;32m 52\u001b[0m previous_event_type \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 53\u001b[0m previous_step_type \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m---> 55\u001b[0m \u001b[38;5;28;01masync\u001b[39;00m \u001b[38;5;28;01mfor\u001b[39;00m chunk \u001b[38;5;129;01min\u001b[39;00m event_generator:\n\u001b[1;32m 56\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(chunk, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mevent\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[1;32m 57\u001b[0m \u001b[38;5;66;03m# Need to check for custom tool first\u001b[39;00m\n\u001b[1;32m 58\u001b[0m \u001b[38;5;66;03m# since it does not produce event but instead\u001b[39;00m\n\u001b[1;32m 59\u001b[0m \u001b[38;5;66;03m# a Message\u001b[39;00m\n\u001b[1;32m 60\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(chunk, ToolResponseMessage):\n",
"File \u001b[0;32m~/new_task/llama-stack-client-python/src/llama_stack_client/lib/agents/agent.py:76\u001b[0m, in \u001b[0;36mAgent.create_turn\u001b[0;34m(self, messages, attachments, session_id)\u001b[0m\n\u001b[1;32m 74\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 75\u001b[0m tool \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcustom_tools[tool_call\u001b[38;5;241m.\u001b[39mtool_name]\n\u001b[0;32m---> 76\u001b[0m result_messages \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mawait\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mexecute_custom_tool(tool, message)\n\u001b[1;32m 77\u001b[0m next_message \u001b[38;5;241m=\u001b[39m result_messages[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m 79\u001b[0m \u001b[38;5;28;01myield\u001b[39;00m next_message\n",
"File \u001b[0;32m~/new_task/llama-stack-client-python/src/llama_stack_client/lib/agents/agent.py:84\u001b[0m, in \u001b[0;36mAgent.execute_custom_tool\u001b[0;34m(self, tool, message)\u001b[0m\n\u001b[1;32m 81\u001b[0m \u001b[38;5;28;01masync\u001b[39;00m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mexecute_custom_tool\u001b[39m(\n\u001b[1;32m 82\u001b[0m \u001b[38;5;28mself\u001b[39m, tool: CustomTool, message: Union[UserMessage, ToolResponseMessage]\n\u001b[1;32m 83\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m List[Union[UserMessage, ToolResponseMessage]]:\n\u001b[0;32m---> 84\u001b[0m result_messages \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mawait\u001b[39;00m \u001b[43mtool\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m([message])\n\u001b[1;32m 85\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m result_messages\n",
"\u001b[0;31mAttributeError\u001b[0m: 'WeatherTool' object has no attribute 'run'"
"Cell \u001b[0;32mIn[34], line 142\u001b[0m\n\u001b[1;32m 139\u001b[0m nest_asyncio\u001b[38;5;241m.\u001b[39mapply()\n\u001b[1;32m 141\u001b[0m \u001b[38;5;66;03m# Run the example\u001b[39;00m\n\u001b[0;32m--> 142\u001b[0m \u001b[38;5;28;01mawait\u001b[39;00m weather_example()\n",
"Cell \u001b[0;32mIn[34], line 133\u001b[0m, in \u001b[0;36mweather_example\u001b[0;34m()\u001b[0m\n\u001b[1;32m 126\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m-\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m50\u001b[39m)\n\u001b[1;32m 128\u001b[0m response \u001b[38;5;241m=\u001b[39m agent\u001b[38;5;241m.\u001b[39mcreate_turn(\n\u001b[1;32m 129\u001b[0m messages\u001b[38;5;241m=\u001b[39m[{\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrole\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124muser\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcontent\u001b[39m\u001b[38;5;124m\"\u001b[39m: query}],\n\u001b[1;32m 130\u001b[0m session_id\u001b[38;5;241m=\u001b[39msession_id,\n\u001b[1;32m 131\u001b[0m )\n\u001b[0;32m--> 133\u001b[0m \u001b[38;5;28mprint\u001b[39m(response\u001b[38;5;241m.\u001b[39mcompletion_message\u001b[38;5;241m.\u001b[39mcontent)\n\u001b[1;32m 134\u001b[0m \u001b[38;5;28;01masync\u001b[39;00m \u001b[38;5;28;01mfor\u001b[39;00m log \u001b[38;5;129;01min\u001b[39;00m EventLogger()\u001b[38;5;241m.\u001b[39mlog(response):\n\u001b[1;32m 135\u001b[0m log\u001b[38;5;241m.\u001b[39mprint()\n",
"\u001b[0;31mAttributeError\u001b[0m: 'async_generator' object has no attribute 'completion_message'"
]
}
],
"source": [
"from typing import TypedDict, Optional, Dict, Any\n",
"from datetime import datetime\n",
"\n",
"from llama_stack_client.types.tool_param_definition_param import ToolParamDefinitionParam\n",
"from llama_stack_client.types import CompletionMessage\n",
"class WeatherTool:\n",
" \"\"\"Example custom tool for weather information.\"\"\"\n",
"\n",
@ -245,20 +245,45 @@
" required=False\n",
" )\n",
" }\n",
"\n",
" async def run(self,messages):\n",
" for message in messages:\n",
" print(\"tool_calls starting: \", message.tool_calls[0])\n",
" return_dict = message.tool_calls[0].arguments\n",
" location = return_dict.get(\"location\",None)\n",
" date = return_dict.get(\"date\",None)\n",
" print(\"Using Location:\",location)\n",
" if date:\n",
" print(\"Using date:\",date)\n",
" return await self.run_impl(location,date)\n",
" \n",
" async def run_impl(self, location: str, date: Optional[str] = None) -> Dict[str, Any]:\n",
" \"\"\"Simulate getting weather data (replace with actual API call).\"\"\"\n",
" # Mock implementation\n",
" return {\n",
" print(\"\"\" {\n",
" \"temperature\": 72.5,\n",
" \"conditions\": \"partly cloudy\",\n",
" \"humidity\": 65.0\n",
" }\n",
" }\"\"\")\n",
" return [CompletionMessage(\n",
" content=\"\"\"{\n",
" \"temperature\": 72.5,\n",
" \"conditions\": \"partly cloudy\",\n",
" \"humidity\": 65.0\n",
" }\"\"\",\n",
" role='assistant',\n",
" stop_reason='end_of_message',\n",
" tool_calls=[],\n",
" )],\n",
" \n",
"\n",
"async def create_weather_agent(client: LlamaStackClient) -> Agent:\n",
" \"\"\"Create an agent with weather tool capability.\"\"\"\n",
" models_response = client.models.list()\n",
" for model in models_response:\n",
" if model.identifier.endswith(\"Instruct\"):\n",
" model_name = model.llama_model\n",
" agent_config = AgentConfig(\n",
" model=\"Llama3.1-8B-Instruct\",\n",
" model=model_name,\n",
" instructions=\"\"\"\n",
" You are a weather assistant that can provide weather information.\n",
" Always specify the location clearly in your responses.\n",
@ -307,7 +332,7 @@
"\n",
"# Example usage\n",
"async def weather_example():\n",
" client = LlamaStackClient(base_url=\"http://{HOST}:{PORT}\")\n",
" client = LlamaStackClient(base_url=f\"http://{HOST}:{PORT}\")\n",
" agent = await create_weather_agent(client)\n",
" session_id = agent.create_session(\"weather-session\")\n",
"\n",
@ -324,7 +349,7 @@
" messages=[{\"role\": \"user\", \"content\": query}],\n",
" session_id=session_id,\n",
" )\n",
"\n",
" \n",
" async for log in EventLogger().log(response):\n",
" log.print()\n",
"\n",
@ -362,7 +387,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.0"
"version": "3.10.15"
}
},
"nbformat": 4,

View file

@ -40,7 +40,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
@ -50,7 +50,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
@ -65,7 +65,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"1. Initial Setup\n",
"1. **Initial Setup**\n",
"\n",
"First, we'll import the necessary libraries and set up some helper functions. Let's break down what each import does:\n",
"\n",
"llama_stack_client: Our main interface to the Memory API\n",
@ -79,7 +80,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
@ -125,7 +126,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"2. Initialize Client and Create Memory Bank\n",
"2. **Initialize Client and Create Memory Bank**\n",
"\n",
"Now we'll set up our connection to the Memory API and create our first memory bank. A memory bank is like a specialized database that stores document embeddings for semantic search.\n",
"❓ Key Concepts:\n",
"\n",
@ -138,14 +140,15 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 16,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Available providers:\n"
"Available providers:\n",
"{'inference': [ProviderInfo(provider_id='meta-reference', provider_type='meta-reference'), ProviderInfo(provider_id='meta1', provider_type='meta-reference')], 'safety': [ProviderInfo(provider_id='meta-reference', provider_type='meta-reference')], 'agents': [ProviderInfo(provider_id='meta-reference', provider_type='meta-reference')], 'memory': [ProviderInfo(provider_id='meta-reference', provider_type='meta-reference')], 'telemetry': [ProviderInfo(provider_id='meta-reference', provider_type='meta-reference')]}\n"
]
}
],
@ -164,7 +167,7 @@
"providers = client.providers.list()\n",
"print(\"Available providers:\")\n",
"#print(json.dumps(providers, indent=2))\n",
"\n",
"print(providers)\n",
"# Create a memory bank with optimized settings for general use\n",
"client.memory_banks.register(\n",
" memory_bank={\n",
@ -175,18 +178,15 @@
" \"provider_id\": providers[\"memory\"][0].provider_id, # Use the first available provider\n",
" }\n",
")\n",
"\n",
"# Let's verify our memory bank was created\n",
"memory_banks = client.memory_banks.list()\n",
"#print(\"\\nRegistered memory banks:\")\n",
"#print(json.dumps(memory_banks, indent=2))"
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"3. Insert Documents\n",
"3. **Insert Documents**\n",
" \n",
"The Memory API supports multiple ways to add documents. We'll demonstrate two common approaches:\n",
"\n",
"Loading documents from URLs\n",
@ -201,7 +201,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 17,
"metadata": {},
"outputs": [
{
@ -262,7 +262,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"4. Query the Memory Bank\n",
"4. **Query the Memory Bank**\n",
" \n",
"Now for the exciting part - querying our documents! The Memory API uses semantic search to find relevant content based on meaning, not just keywords.\n",
"❓ Understanding Scores:\n",
"\n",
@ -272,7 +273,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 18,
"metadata": {},
"outputs": [
{
@ -283,37 +284,37 @@
"Query: How do I use LoRA?\n",
"--------------------------------------------------\n",
"\n",
"Result 1 (Score: 1.242)\n",
"Result 1 (Score: 1.322)\n",
"========================================\n",
"Chunk(content='.g.\\nthe :func:`torchtune.models.llama3.llama3` model has a corresponding :func:`torchtune.models.llama3.lora_llama3`.\\nWe aim to provide a comprehensive set of configurations to allow you to get started with training with LoRA quickly,\\njust specify any config with ``_lora`` in its name, e.g:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\\n\\n\\nThere are two sets of parameters to customize LoRA to suit your needs. Firstly, the parameters which control\\nwhich linear layers LoRA should be applied to in the model:\\n\\n* ``lora_attn_modules: List[str]`` accepts a list of strings specifying which layers of the model to apply\\n LoRA to:\\n\\n * ``q_proj`` applies LoRA to the query projection layer.\\n * ``k_proj`` applies LoRA to the key projection layer.\\n * ``v_proj`` applies LoRA to the value projection layer.\\n * ``output_proj`` applies LoRA to the attention output projection layer.\\n\\n Whilst adding more layers to be fine-tuned may improve model accuracy,\\n this will come at the cost of increased memory usage and reduced training speed.\\n\\n* ``apply_lora_to_mlp: Bool`` applies LoRA to the MLP in each transformer layer.\\n* ``apply_lora_to_output: Bool`` applies LoRA to the model\\'s final output projection.\\n This is usually a projection to vocabulary space (e.g. in language models), but\\n other modelling tasks may have different projections - classifier models will project\\n to the number of classes, for example\\n\\n.. note::\\n\\n Models which use tied embeddings (such as Gemma and Qwen2 1.5B and 0.5B) for the\\n final output projection do not support ``apply_lora_to_output``.\\n\\nThese are all specified under the ``model`` flag or config entry, i.e:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"]\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.llama3.lora_llama3_8b\\n apply_lora_to_mlp: True\\n model.lora', document_id='url-doc-0', token_count=512)\n",
"Chunk(content=\"_peft:\\n\\nParameter Efficient Fine-Tuning (PEFT)\\n--------------------------------------\\n\\n.. _glossary_lora:\\n\\nLow Rank Adaptation (LoRA)\\n^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n\\n*What's going on here?*\\n\\nYou can read our tutorial on :ref:`finetuning Llama2 with LoRA<lora_finetune_label>` to understand how LoRA works, and how to use it.\\nSimply stated, LoRA greatly reduces the number of trainable parameters, thus saving significant gradient and optimizer\\nmemory during training.\\n\\n*Sounds great! How do I use it?*\\n\\nYou can finetune using any of our recipes with the ``lora_`` prefix, e.g. :ref:`lora_finetune_single_device<lora_finetune_recipe_label>`. These recipes utilize\\nLoRA-enabled model builders, which we support for all our models, and also use the ``lora_`` prefix, e.g.\\nthe :func:`torchtune.models.llama3.llama3` model has a corresponding :func:`torchtune.models.llama3.lora_llama3`.\\nWe aim to provide a comprehensive set of configurations to allow you to get started with training with LoRA quickly,\\njust specify any config with ``_lora`` in its name, e.g:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\\n\\n\\nThere are two sets of parameters to customize LoRA to suit your needs. Firstly, the parameters which control\\nwhich linear layers LoRA should be applied to in the model:\\n\\n* ``lora_attn_modules: List[str]`` accepts a list of strings specifying which layers of the model to apply\\n LoRA to:\\n\\n * ``q_proj`` applies LoRA to the query projection layer.\\n * ``k_proj`` applies LoRA to the key projection layer.\\n * ``v_proj`` applies LoRA to the value projection layer.\\n * ``output_proj`` applies LoRA to the attention output projection layer.\\n\\n Whilst adding more layers to be fine-tuned may improve model accuracy,\\n this will come at the cost of increased memory usage and reduced training speed.\\n\\n* ``apply_lora_to_mlp: Bool`` applies LoRA to the MLP in each transformer layer.\\n* ``apply_lora_to_output: Bool`` applies LoRA to the model's final output projection.\\n This is usually a projection to vocabulary space (e.g. in language models),\", document_id='url-doc-0', token_count=512)\n",
"========================================\n",
"\n",
"Result 2 (Score: 1.221)\n",
"Result 2 (Score: 1.322)\n",
"========================================\n",
"Chunk(content=' adds a small overhead to LoRA training due to the addition of the magnitude parameter, but it has been shown to\\nimprove the performance of LoRA, particularly at low ranks.\\n\\n*Sounds great! How do I use it?*\\n\\nMuch like LoRA and QLoRA, you can finetune using DoRA with any of our LoRA recipes. We use the same model builders for LoRA\\nas we do for DoRA, so you can use the ``lora_`` version of any model builder with ``use_dora=True``. For example, to finetune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA <glossary_lora>` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for', document_id='url-doc-0', token_count=512)\n",
"Chunk(content=\"_peft:\\n\\nParameter Efficient Fine-Tuning (PEFT)\\n--------------------------------------\\n\\n.. _glossary_lora:\\n\\nLow Rank Adaptation (LoRA)\\n^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n\\n*What's going on here?*\\n\\nYou can read our tutorial on :ref:`finetuning Llama2 with LoRA<lora_finetune_label>` to understand how LoRA works, and how to use it.\\nSimply stated, LoRA greatly reduces the number of trainable parameters, thus saving significant gradient and optimizer\\nmemory during training.\\n\\n*Sounds great! How do I use it?*\\n\\nYou can finetune using any of our recipes with the ``lora_`` prefix, e.g. :ref:`lora_finetune_single_device<lora_finetune_recipe_label>`. These recipes utilize\\nLoRA-enabled model builders, which we support for all our models, and also use the ``lora_`` prefix, e.g.\\nthe :func:`torchtune.models.llama3.llama3` model has a corresponding :func:`torchtune.models.llama3.lora_llama3`.\\nWe aim to provide a comprehensive set of configurations to allow you to get started with training with LoRA quickly,\\njust specify any config with ``_lora`` in its name, e.g:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\\n\\n\\nThere are two sets of parameters to customize LoRA to suit your needs. Firstly, the parameters which control\\nwhich linear layers LoRA should be applied to in the model:\\n\\n* ``lora_attn_modules: List[str]`` accepts a list of strings specifying which layers of the model to apply\\n LoRA to:\\n\\n * ``q_proj`` applies LoRA to the query projection layer.\\n * ``k_proj`` applies LoRA to the key projection layer.\\n * ``v_proj`` applies LoRA to the value projection layer.\\n * ``output_proj`` applies LoRA to the attention output projection layer.\\n\\n Whilst adding more layers to be fine-tuned may improve model accuracy,\\n this will come at the cost of increased memory usage and reduced training speed.\\n\\n* ``apply_lora_to_mlp: Bool`` applies LoRA to the MLP in each transformer layer.\\n* ``apply_lora_to_output: Bool`` applies LoRA to the model's final output projection.\\n This is usually a projection to vocabulary space (e.g. in language models),\", document_id='url-doc-0', token_count=512)\n",
"========================================\n",
"\n",
"Result 3 (Score: 1.093)\n",
"Result 3 (Score: 1.322)\n",
"========================================\n",
"Chunk(content='64\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.llama3.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 32\\n lora_alpha: 64\\n\\n.. note::\\n\\n To get a deeper sense of how LoRA parameters affect memory usage during training,\\n see the :ref:`relevant section in our Llama2 LoRA tutorial<lora_tutorial_memory_tradeoff_label>`.\\n\\n.. _glossary_qlora:\\n\\nQuantized Low Rank Adaptation (QLoRA)\\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n*What\\'s going on here?*\\n\\n`QLoRA <https://arxiv.org/abs/2305.14314>`_ is an enhancement on top of `LoRA <https://arxiv.org/abs/2106.09685>`_\\nthat maintains the frozen model parameters from LoRA in 4-bit quantized precision, thereby reducing memory usage.\\nThis is enabled through a novel 4-bit NormalFloat (NF4) data type proposed by the authors, which allows for 4-8x less\\nparameter memory usage whilst retaining model accuracy. You can read our tutorial on :ref:`finetuning Llama2 with QLoRA<qlora_finetune_label>`\\nfor a deeper understanding of how it works.\\n\\nWhen considering using QLoRA to reduce memory usage, it\\'s worth noting that QLoRA prevents accuracy degradation during quantization\\nby up-casting quantized parameters to the original higher precision datatype during model forward passes - this up-casting may\\nincur penalties to training speed. The :ref:`relevant section <qlora_compile_label>` in our QLoRA tutorial demonstrates the usage of ``torch.compile``\\nto address this by speeding up training.\\n\\n*Sounds great! How do I use it?*\\n\\nYou can finetune using QLoRA with any of our LoRA recipes, i.e. recipes with the ``lora_`` prefix, e.g. :ref:`lora_finetune_single_device<lora_finetune_recipe_label>`. These recipes utilize\\nQLoRA-enabled model builders, which we support for all our models, and also use the ``qlora_`` prefix, e.g.\\nthe :func:`torchtune.models.llama3.llama3_8b` model has', document_id='url-doc-0', token_count=512)\n",
"Chunk(content=\"_peft:\\n\\nParameter Efficient Fine-Tuning (PEFT)\\n--------------------------------------\\n\\n.. _glossary_lora:\\n\\nLow Rank Adaptation (LoRA)\\n^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n\\n*What's going on here?*\\n\\nYou can read our tutorial on :ref:`finetuning Llama2 with LoRA<lora_finetune_label>` to understand how LoRA works, and how to use it.\\nSimply stated, LoRA greatly reduces the number of trainable parameters, thus saving significant gradient and optimizer\\nmemory during training.\\n\\n*Sounds great! How do I use it?*\\n\\nYou can finetune using any of our recipes with the ``lora_`` prefix, e.g. :ref:`lora_finetune_single_device<lora_finetune_recipe_label>`. These recipes utilize\\nLoRA-enabled model builders, which we support for all our models, and also use the ``lora_`` prefix, e.g.\\nthe :func:`torchtune.models.llama3.llama3` model has a corresponding :func:`torchtune.models.llama3.lora_llama3`.\\nWe aim to provide a comprehensive set of configurations to allow you to get started with training with LoRA quickly,\\njust specify any config with ``_lora`` in its name, e.g:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\\n\\n\\nThere are two sets of parameters to customize LoRA to suit your needs. Firstly, the parameters which control\\nwhich linear layers LoRA should be applied to in the model:\\n\\n* ``lora_attn_modules: List[str]`` accepts a list of strings specifying which layers of the model to apply\\n LoRA to:\\n\\n * ``q_proj`` applies LoRA to the query projection layer.\\n * ``k_proj`` applies LoRA to the key projection layer.\\n * ``v_proj`` applies LoRA to the value projection layer.\\n * ``output_proj`` applies LoRA to the attention output projection layer.\\n\\n Whilst adding more layers to be fine-tuned may improve model accuracy,\\n this will come at the cost of increased memory usage and reduced training speed.\\n\\n* ``apply_lora_to_mlp: Bool`` applies LoRA to the MLP in each transformer layer.\\n* ``apply_lora_to_output: Bool`` applies LoRA to the model's final output projection.\\n This is usually a projection to vocabulary space (e.g. in language models),\", document_id='url-doc-0', token_count=512)\n",
"========================================\n",
"\n",
"Query: Tell me about memory optimizations\n",
"--------------------------------------------------\n",
"\n",
"Result 1 (Score: 1.218)\n",
"Result 1 (Score: 1.260)\n",
"========================================\n",
"Chunk(content='.. _memory_optimization_overview_label:\\n\\n============================\\nMemory Optimization Overview\\n============================\\n\\n**Author**: `Salman Mohammadi <https://github.com/SalmanMohammadi>`_\\n\\ntorchtune comes with a host of plug-and-play memory optimization components which give you lots of flexibility\\nto ``tune`` our recipes to your hardware. This page provides a brief glossary of these components and how you might use them.\\nTo make things easy, we\\'ve summarized these components in the following table:\\n\\n.. csv-table:: Memory optimization components\\n :header: \"Component\", \"When to use?\"\\n :widths: auto\\n\\n \":ref:`glossary_precision`\", \"You\\'ll usually want to leave this as its default ``bfloat16``. If you\\'re struggling with training stability or accuracy due to precision, fp32 may help, but will significantly increase memory usage and decrease training speed.\"\\n \":ref:`glossary_act_ckpt`\", \"Use when you\\'re memory constrained and need to handle larger batch sizes or longer context lengths. Be aware that it may slow down training speed.\"\\n \":ref:`glossary_act_off`\", \"Similar to activation checkpointing, this can be used when memory constrained, but comes at the cost of training speed due to the overhead of moving tensors between GPU VRAM and CPU. This can also be used alongside activation checkpointing.\"\\n \":ref:`glossary_grad_accm`\", \"Helpful when memory-constrained to simulate larger batch sizes. Often preferable to activation checkpointing for better training speed.\"\\n \":ref:`glossary_low_precision_opt`\", \"When you need to further reduce memory usage beyond using ``bf16`` by reducing the precision in the optimizer states. Note that lower precision optimizers may reduce training stability/accuracy.\"\\n \":ref:`glossary_opt_in_bwd`\", \"Helps reduce memory usage when using stateful optimizers, particularly when full-finetuning large models with high gradient memory usage. This is not compatible with ``gradient_accumulation_steps``, so training may slow down due to reduced model throughput.\"\\n \":ref:`glossary_cpu_offload`\", \"Offloads optimizer states and (optionally) gradients to CPU, and performs optimizer steps on CPU. This can be used to significantly reduce GPU memory usage at the cost of CPU RAM and training speed, as CPU optimizer steps can be slow and bottleneck training performance.\"\\n \":ref:`glossary_lora`\", \"When you', document_id='url-doc-0', token_count=512)\n",
"Chunk(content='.. _memory_optimization_overview_label:\\n\\n============================\\nMemory Optimization Overview\\n============================\\n\\n**Author**: `Salman Mohammadi <https://github.com/SalmanMohammadi>`_\\n\\ntorchtune comes with a host of plug-and-play memory optimization components which give you lots of flexibility\\nto ``tune`` our recipes to your hardware. This page provides a brief glossary of these components and how you might use them.\\nTo make things easy, we\\'ve summarized these components in the following table:\\n\\n.. csv-table:: Memory optimization components\\n :header: \"Component\", \"When to use?\"\\n :widths: auto\\n\\n \":ref:`glossary_precision`\", \"You\\'ll usually want to leave this as its default ``bfloat16``. It uses 2 bytes per model parameter instead of 4 bytes when using ``float32``.\"\\n \":ref:`glossary_act_ckpt`\", \"Use when you\\'re memory constrained and want to use a larger model, batch size or context length. Be aware that it will slow down training speed.\"\\n \":ref:`glossary_act_off`\", \"Similar to activation checkpointing, this can be used when memory constrained, but may decrease training speed. This **should** be used alongside activation checkpointing.\"\\n \":ref:`glossary_grad_accm`\", \"Helpful when memory-constrained to simulate larger batch sizes. Not compatible with optimizer in backward. Use it when you can already fit at least one sample without OOMing, but not enough of them.\"\\n \":ref:`glossary_low_precision_opt`\", \"Use when you want to reduce the size of the optimizer state. This is relevant when training large models and using optimizers with momentum, like Adam. Note that lower precision optimizers may reduce training stability/accuracy.\"\\n \":ref:`glossary_opt_in_bwd`\", \"Use it when you have large gradients and can fit a large enough batch size, since this is not compatible with ``gradient_accumulation_steps``.\"\\n \":ref:`glossary_cpu_offload`\", \"Offloads optimizer states and (optionally) gradients to CPU, and performs optimizer steps on CPU. This can be used to significantly reduce GPU memory usage at the cost of CPU RAM and training speed. Prioritize using it only if the other techniques are not enough.\"\\n \":ref:`glossary_lora`\", \"When you want to significantly reduce the number of trainable parameters, saving gradient and optimizer memory', document_id='url-doc-0', token_count=512)\n",
"========================================\n",
"\n",
"Result 2 (Score: 1.079)\n",
"Result 2 (Score: 1.260)\n",
"========================================\n",
"Chunk(content=' optimizer states and (optionally) gradients to CPU, and performs optimizer steps on CPU. This can be used to significantly reduce GPU memory usage at the cost of CPU RAM and training speed, as CPU optimizer steps can be slow and bottleneck training performance.\"\\n \":ref:`glossary_lora`\", \"When you want to significantly reduce the number of trainable parameters, saving gradient and optimizer memory during training, and significantly speeding up training.\"\\n \":ref:`glossary_qlora`\", \"When you need even more memory savings than LoRA, at the potential cost of some training speed. Useful for very large models or limited hardware.\"\\n \":ref:`glossary_dora`\", \"Like LoRA, DoRA can provide significant memory savings and training speed-ups. DoRA may improve performance over LoRA, particularly when using small rank updates.\"\\n\\n\\n.. note::\\n\\n In its current state, this tutorial is focused on single-device optimizations. Check in soon as we update this page\\n for the latest memory optimization features for distributed fine-tuning.\\n\\n.. _glossary_precision:\\n\\n\\nModel Precision\\n---------------\\n\\n*What\\'s going on here?*\\n\\nWe use the term \"precision\" to refer to the underlying data type used to represent the model and optimizer parameters.\\nWe support two data types in torchtune:\\n\\n.. note::\\n\\n We recommend diving into Sebastian Raschka\\'s `blogpost on mixed-precision techniques <https://sebastianraschka.com/blog/2023/llm-mixed-precision-copy.html>`_\\n for a deeper understanding of concepts around precision and data formats.\\n\\n* ``fp32``, commonly referred to as \"full-precision\", uses 4 bytes per model and optimizer parameter.\\n* ``bfloat16``, referred to as \"half-precision\", uses 2 bytes per model and optimizer parameter - effectively half\\n the memory of ``fp32``, and also improves training speed. Generally, if your hardware supports training with ``bfloat16``,\\n we recommend using it - this is the default setting for our recipes.\\n\\n.. note::\\n\\n Another common paradigm is \"mixed-precision\" training: where model weights are in ``bfloat16`` (or ``fp16``), and optimizer\\n states are in ``fp32``. Currently, we don\\'t support mixed-precision training in torchtune.\\n\\n*Sounds great! How do I use it?*\\n\\nSimply use the ``dtype`` flag or config entry in all our recipes! For example, to use half-precision', document_id='url-doc-0', token_count=512)\n",
"Chunk(content='.. _memory_optimization_overview_label:\\n\\n============================\\nMemory Optimization Overview\\n============================\\n\\n**Author**: `Salman Mohammadi <https://github.com/SalmanMohammadi>`_\\n\\ntorchtune comes with a host of plug-and-play memory optimization components which give you lots of flexibility\\nto ``tune`` our recipes to your hardware. This page provides a brief glossary of these components and how you might use them.\\nTo make things easy, we\\'ve summarized these components in the following table:\\n\\n.. csv-table:: Memory optimization components\\n :header: \"Component\", \"When to use?\"\\n :widths: auto\\n\\n \":ref:`glossary_precision`\", \"You\\'ll usually want to leave this as its default ``bfloat16``. It uses 2 bytes per model parameter instead of 4 bytes when using ``float32``.\"\\n \":ref:`glossary_act_ckpt`\", \"Use when you\\'re memory constrained and want to use a larger model, batch size or context length. Be aware that it will slow down training speed.\"\\n \":ref:`glossary_act_off`\", \"Similar to activation checkpointing, this can be used when memory constrained, but may decrease training speed. This **should** be used alongside activation checkpointing.\"\\n \":ref:`glossary_grad_accm`\", \"Helpful when memory-constrained to simulate larger batch sizes. Not compatible with optimizer in backward. Use it when you can already fit at least one sample without OOMing, but not enough of them.\"\\n \":ref:`glossary_low_precision_opt`\", \"Use when you want to reduce the size of the optimizer state. This is relevant when training large models and using optimizers with momentum, like Adam. Note that lower precision optimizers may reduce training stability/accuracy.\"\\n \":ref:`glossary_opt_in_bwd`\", \"Use it when you have large gradients and can fit a large enough batch size, since this is not compatible with ``gradient_accumulation_steps``.\"\\n \":ref:`glossary_cpu_offload`\", \"Offloads optimizer states and (optionally) gradients to CPU, and performs optimizer steps on CPU. This can be used to significantly reduce GPU memory usage at the cost of CPU RAM and training speed. Prioritize using it only if the other techniques are not enough.\"\\n \":ref:`glossary_lora`\", \"When you want to significantly reduce the number of trainable parameters, saving gradient and optimizer memory', document_id='url-doc-0', token_count=512)\n",
"========================================\n",
"\n",
"Result 3 (Score: 0.937)\n",
"Result 3 (Score: 1.260)\n",
"========================================\n",
"Chunk(content=\",\\n lr=1e-5,\\n fused=True\\n )\\n\\nSome helpful hints from the ``torchao`` `CPUOffloadOptimizer page <https://github.com/pytorch/ao/tree/main/torchao/prototype/low_bit_optim#optimizer-cpu-offload>`_:\\n\\n* The CPU optimizer step is often the bottleneck when optimizer CPU offload is used. To minimize the slowdown, it is recommended to (1) use full ``bf16`` training so that parameters, gradients, and optimizer states are in ``bf16``; and (2) give GPU more work per optimizer step (e.g. larger batch size with activation checkpointing, gradient accumulation).\\n* Gradient accumulation should always be set to 1 when ``offload_gradients=True``, as gradients are cleared on GPU every backward pass.\\n* This optimizer works by keeping a copy of parameters and pre-allocating gradient memory on CPU. Therefore, expect your RAM usage to increase by 4x model size.\\n* This optimizer is only supported for single-device recipes. To use CPU-offloading in distributed recipes, use ``fsdp_cpu_offload=True`` in any distributed recipe. See :class:`torch.distributed.fsdp.FullyShardedDataParallel` for more details\\n\\n\\n.. _glossary_peft:\\n\\nParameter Efficient Fine-Tuning (PEFT)\\n--------------------------------------\\n\\n.. _glossary_lora:\\n\\nLow Rank Adaptation (LoRA)\\n^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n\\n*What's going on here?*\\n\\nYou can read our tutorial on :ref:`finetuning Llama2 with LoRA<lora_finetune_label>` to understand how LoRA works, and how to use it.\\nSimply stated, LoRA greatly reduces the number of trainable parameters, thus saving significant gradient and optimizer\\nmemory during training.\\n\\n*Sounds great! How do I use it?*\\n\\nYou can finetune using any of our recipes with the ``lora_`` prefix, e.g. :ref:`lora_finetune_single_device<lora_finetune_recipe_label>`. These recipes utilize\\nLoRA-enabled model builders, which we support for all our models, and also use the ``lora_`` prefix, e.g.\\nthe :func:`torchtune.models.llama3.llama3` model has a corresponding :func:`torchtune.models.llama3.lora_llama3`.\\nWe aim to provide a comprehensive set of configurations to allow you to get started with training with LoRA quickly,\\njust specify any config with\", document_id='url-doc-0', token_count=512)\n",
"Chunk(content='.. _memory_optimization_overview_label:\\n\\n============================\\nMemory Optimization Overview\\n============================\\n\\n**Author**: `Salman Mohammadi <https://github.com/SalmanMohammadi>`_\\n\\ntorchtune comes with a host of plug-and-play memory optimization components which give you lots of flexibility\\nto ``tune`` our recipes to your hardware. This page provides a brief glossary of these components and how you might use them.\\nTo make things easy, we\\'ve summarized these components in the following table:\\n\\n.. csv-table:: Memory optimization components\\n :header: \"Component\", \"When to use?\"\\n :widths: auto\\n\\n \":ref:`glossary_precision`\", \"You\\'ll usually want to leave this as its default ``bfloat16``. It uses 2 bytes per model parameter instead of 4 bytes when using ``float32``.\"\\n \":ref:`glossary_act_ckpt`\", \"Use when you\\'re memory constrained and want to use a larger model, batch size or context length. Be aware that it will slow down training speed.\"\\n \":ref:`glossary_act_off`\", \"Similar to activation checkpointing, this can be used when memory constrained, but may decrease training speed. This **should** be used alongside activation checkpointing.\"\\n \":ref:`glossary_grad_accm`\", \"Helpful when memory-constrained to simulate larger batch sizes. Not compatible with optimizer in backward. Use it when you can already fit at least one sample without OOMing, but not enough of them.\"\\n \":ref:`glossary_low_precision_opt`\", \"Use when you want to reduce the size of the optimizer state. This is relevant when training large models and using optimizers with momentum, like Adam. Note that lower precision optimizers may reduce training stability/accuracy.\"\\n \":ref:`glossary_opt_in_bwd`\", \"Use it when you have large gradients and can fit a large enough batch size, since this is not compatible with ``gradient_accumulation_steps``.\"\\n \":ref:`glossary_cpu_offload`\", \"Offloads optimizer states and (optionally) gradients to CPU, and performs optimizer steps on CPU. This can be used to significantly reduce GPU memory usage at the cost of CPU RAM and training speed. Prioritize using it only if the other techniques are not enough.\"\\n \":ref:`glossary_lora`\", \"When you want to significantly reduce the number of trainable parameters, saving gradient and optimizer memory', document_id='url-doc-0', token_count=512)\n",
"========================================\n",
"\n",
"Query: What are the key features of Llama 3?\n",
@ -324,14 +325,14 @@
"Chunk(content=\"8B uses a larger intermediate dimension in its MLP layers than Llama2-7B\\n- Llama3-8B uses a higher base value to calculate theta in its `rotary positional embeddings <https://arxiv.org/abs/2104.09864>`_\\n\\n|\\n\\nGetting access to Llama3-8B-Instruct\\n------------------------------------\\n\\nFor this tutorial, we will be using the instruction-tuned version of Llama3-8B. First, let's download the model from Hugging Face. You will need to follow the instructions\\non the `official Meta page <https://github.com/meta-llama/llama3/blob/main/README.md>`_ to gain access to the model.\\nNext, make sure you grab your Hugging Face token from `here <https://huggingface.co/settings/tokens>`_.\\n\\n\\n.. code-block:: bash\\n\\n tune download meta-llama/Meta-Llama-3-8B-Instruct \\\\\\n --output-dir <checkpoint_dir> \\\\\\n --hf-token <ACCESS TOKEN>\\n\\n|\\n\\nFine-tuning Llama3-8B-Instruct in torchtune\\n-------------------------------------------\\n\\ntorchtune provides `LoRA <https://arxiv.org/abs/2106.09685>`_, `QLoRA <https://arxiv.org/abs/2305.14314>`_, and full fine-tuning\\nrecipes for fine-tuning Llama3-8B on one or more GPUs. For more on LoRA in torchtune, see our :ref:`LoRA Tutorial <lora_finetune_label>`.\\nFor more on QLoRA in torchtune, see our :ref:`QLoRA Tutorial <qlora_finetune_label>`.\\n\\nLet's take a look at how we can fine-tune Llama3-8B-Instruct with LoRA on a single device using torchtune. In this example, we will fine-tune\\nfor one epoch on a common instruct dataset for illustrative purposes. The basic command for a single-device LoRA fine-tune is\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\\n\\n.. note::\\n To see a full list of recipes and their corresponding configs, simply run ``tune ls`` from the command line.\\n\\nWe can also add :ref:`command-line overrides <cli_override>` as needed, e.g.\\n\\n.. code-block:: bash\\n\\n tune run lora\", document_id='url-doc-2', token_count=512)\n",
"========================================\n",
"\n",
"Result 2 (Score: 0.927)\n",
"Result 2 (Score: 0.964)\n",
"========================================\n",
"Chunk(content=\".. _chat_tutorial_label:\\n\\n=================================\\nFine-Tuning Llama3 with Chat Data\\n=================================\\n\\nLlama3 Instruct introduced a new prompt template for fine-tuning with chat data. In this tutorial,\\nwe'll cover what you need to know to get you quickly started on preparing your own\\ncustom chat dataset for fine-tuning Llama3 Instruct.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` You will learn:\\n\\n * How the Llama3 Instruct format differs from Llama2\\n * All about prompt templates and special tokens\\n * How to use your own chat dataset to fine-tune Llama3 Instruct\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`configuring datasets<chat_dataset_usage_label>`\\n * Know how to :ref:`download Llama3 Instruct weights <llama3_label>`\\n\\n\\nTemplate changes from Llama2 to Llama3\\n--------------------------------------\\n\\nThe Llama2 chat model requires a specific template when prompting the pre-trained\\nmodel. Since the chat model was pretrained with this prompt template, if you want to run\\ninference on the model, you'll need to use the same template for optimal performance\\non chat data. Otherwise, the model will just perform standard text completion, which\\nmay or may not align with your intended use case.\\n\\nFrom the `official Llama2 prompt\\ntemplate guide <https://llama.meta.com/docs/model-cards-and-prompt-formats/meta-llama-2>`_\\nfor the Llama2 chat model, we can see that special tags are added:\\n\\n.. code-block:: text\\n\\n <s>[INST] <<SYS>>\\n You are a helpful, respectful, and honest assistant.\\n <</SYS>>\\n\\n Hi! I am a human. [/INST] Hello there! Nice to meet you! I'm Meta AI, your friendly AI assistant </s>\\n\\nLlama3 Instruct `overhauled <https://llama.meta.com/docs/model-cards-and-prompt-formats/meta-llama-3>`_\\nthe template from Llama2 to better support multiturn conversations. The same text\\nin the Llama3 Instruct format would look like this:\\n\\n.. code-block:: text\\n\\n <|begin_of_text|><|start_header_id|>system<|end_header_id|>\\n\\n You are a helpful,\", document_id='url-doc-1', token_count=512)\n",
"Chunk(content=\"8B uses a larger intermediate dimension in its MLP layers than Llama2-7B\\n- Llama3-8B uses a higher base value to calculate theta in its `rotary positional embeddings <https://arxiv.org/abs/2104.09864>`_\\n\\n|\\n\\nGetting access to Llama3-8B-Instruct\\n------------------------------------\\n\\nFor this tutorial, we will be using the instruction-tuned version of Llama3-8B. First, let's download the model from Hugging Face. You will need to follow the instructions\\non the `official Meta page <https://github.com/meta-llama/llama3/blob/main/README.md>`_ to gain access to the model.\\nNext, make sure you grab your Hugging Face token from `here <https://huggingface.co/settings/tokens>`_.\\n\\n\\n.. code-block:: bash\\n\\n tune download meta-llama/Meta-Llama-3-8B-Instruct \\\\\\n --output-dir <checkpoint_dir> \\\\\\n --hf-token <ACCESS TOKEN>\\n\\n|\\n\\nFine-tuning Llama3-8B-Instruct in torchtune\\n-------------------------------------------\\n\\ntorchtune provides `LoRA <https://arxiv.org/abs/2106.09685>`_, `QLoRA <https://arxiv.org/abs/2305.14314>`_, and full fine-tuning\\nrecipes for fine-tuning Llama3-8B on one or more GPUs. For more on LoRA in torchtune, see our :ref:`LoRA Tutorial <lora_finetune_label>`.\\nFor more on QLoRA in torchtune, see our :ref:`QLoRA Tutorial <qlora_finetune_label>`.\\n\\nLet's take a look at how we can fine-tune Llama3-8B-Instruct with LoRA on a single device using torchtune. In this example, we will fine-tune\\nfor one epoch on a common instruct dataset for illustrative purposes. The basic command for a single-device LoRA fine-tune is\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\\n\\n.. note::\\n To see a full list of recipes and their corresponding configs, simply run ``tune ls`` from the command line.\\n\\nWe can also add :ref:`command-line overrides <cli_override>` as needed, e.g.\\n\\n.. code-block:: bash\\n\\n tune run lora\", document_id='url-doc-2', token_count=512)\n",
"========================================\n",
"\n",
"Result 3 (Score: 0.858)\n",
"Result 3 (Score: 0.964)\n",
"========================================\n",
"Chunk(content='.. _llama3_label:\\n\\n========================\\nMeta Llama3 in torchtune\\n========================\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` You will learn how to:\\n\\n * Download the Llama3-8B-Instruct weights and tokenizer\\n * Fine-tune Llama3-8B-Instruct with LoRA and QLoRA\\n * Evaluate your fine-tuned Llama3-8B-Instruct model\\n * Generate text with your fine-tuned model\\n * Quantize your model to speed up generation\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune<overview_label>`\\n * Make sure to :ref:`install torchtune<install_label>`\\n\\n\\nLlama3-8B\\n---------\\n\\n`Meta Llama 3 <https://llama.meta.com/llama3>`_ is a new family of models released by Meta AI that improves upon the performance of the Llama2 family\\nof models across a `range of different benchmarks <https://huggingface.co/meta-llama/Meta-Llama-3-8B#base-pretrained-models>`_.\\nCurrently there are two different sizes of Meta Llama 3: 8B and 70B. In this tutorial we will focus on the 8B size model.\\nThere are a few main changes between Llama2-7B and Llama3-8B models:\\n\\n- Llama3-8B uses `grouped-query attention <https://arxiv.org/abs/2305.13245>`_ instead of the standard multi-head attention from Llama2-7B\\n- Llama3-8B has a larger vocab size (128,256 instead of 32,000 from Llama2 models)\\n- Llama3-8B uses a different tokenizer than Llama2 models (`tiktoken <https://github.com/openai/tiktoken>`_ instead of `sentencepiece <https://github.com/google/sentencepiece>`_)\\n- Llama3-8B uses a larger intermediate dimension in its MLP layers than Llama2-7B\\n- Llama3-8B uses a higher base value to calculate theta in its `rotary positional embeddings <https://arxiv.org/abs/2104.09864>`_\\n\\n|\\n\\nGetting access to Llama3', document_id='url-doc-2', token_count=512)\n",
"Chunk(content=\"8B uses a larger intermediate dimension in its MLP layers than Llama2-7B\\n- Llama3-8B uses a higher base value to calculate theta in its `rotary positional embeddings <https://arxiv.org/abs/2104.09864>`_\\n\\n|\\n\\nGetting access to Llama3-8B-Instruct\\n------------------------------------\\n\\nFor this tutorial, we will be using the instruction-tuned version of Llama3-8B. First, let's download the model from Hugging Face. You will need to follow the instructions\\non the `official Meta page <https://github.com/meta-llama/llama3/blob/main/README.md>`_ to gain access to the model.\\nNext, make sure you grab your Hugging Face token from `here <https://huggingface.co/settings/tokens>`_.\\n\\n\\n.. code-block:: bash\\n\\n tune download meta-llama/Meta-Llama-3-8B-Instruct \\\\\\n --output-dir <checkpoint_dir> \\\\\\n --hf-token <ACCESS TOKEN>\\n\\n|\\n\\nFine-tuning Llama3-8B-Instruct in torchtune\\n-------------------------------------------\\n\\ntorchtune provides `LoRA <https://arxiv.org/abs/2106.09685>`_, `QLoRA <https://arxiv.org/abs/2305.14314>`_, and full fine-tuning\\nrecipes for fine-tuning Llama3-8B on one or more GPUs. For more on LoRA in torchtune, see our :ref:`LoRA Tutorial <lora_finetune_label>`.\\nFor more on QLoRA in torchtune, see our :ref:`QLoRA Tutorial <qlora_finetune_label>`.\\n\\nLet's take a look at how we can fine-tune Llama3-8B-Instruct with LoRA on a single device using torchtune. In this example, we will fine-tune\\nfor one epoch on a common instruct dataset for illustrative purposes. The basic command for a single-device LoRA fine-tune is\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\\n\\n.. note::\\n To see a full list of recipes and their corresponding configs, simply run ``tune ls`` from the command line.\\n\\nWe can also add :ref:`command-line overrides <cli_override>` as needed, e.g.\\n\\n.. code-block:: bash\\n\\n tune run lora\", document_id='url-doc-2', token_count=512)\n",
"========================================\n"
]
}
@ -345,7 +346,6 @@
" \"\"\"\n",
" print(f\"\\nQuery: {query}\")\n",
" print(\"-\" * 50)\n",
"\n",
" response = client.memory.query(\n",
" bank_id=\"tutorial_bank\",\n",
" query=[query], # The API accepts multiple queries at once!\n",
@ -364,6 +364,7 @@
" \"What are the key features of Llama 3?\" # Product-specific\n",
"]\n",
"\n",
"\n",
"for query in queries:\n",
" print_query_results(query)"
]
@ -394,7 +395,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.13.0"
"version": "3.10.15"
}
},
"nbformat": 4,

View file

@ -117,7 +117,7 @@
"os.environ[\"BRAVE_SEARCH_API_KEY\"] = \"YOUR_SEARCH_API_KEY\"\n",
"\n",
"async def agent_example():\n",
" client = LlamaStackClient(base_url=\"http://{HOST}:{PORT}\")\n",
" client = LlamaStackClient(base_url=f\"http://{HOST}:{PORT}\")\n",
" models_response = client.models.list()\n",
" for model in models_response:\n",
" if model.identifier.endswith(\"Instruct\"):\n",