From 624910b8801f97fd178305dfe0903195dacdecff Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Tue, 4 Mar 2025 09:55:11 -0800 Subject: [PATCH] update json --- .../recorded_responses/chat_completion.json | 4026 +++++++++++++++-- .../recorded_responses/chat_completion.pickle | Bin 297188 -> 541735 bytes .../recorded_responses/invoke_tool.json | 97 +- .../recorded_responses/invoke_tool.pickle | Bin 41560 -> 52685 bytes 4 files changed, 3682 insertions(+), 441 deletions(-) diff --git a/tests/api/fixtures/recorded_responses/chat_completion.json b/tests/api/fixtures/recorded_responses/chat_completion.json index a74773dbb..8a4bae93d 100644 --- a/tests/api/fixtures/recorded_responses/chat_completion.json +++ b/tests/api/fixtures/recorded_responses/chat_completion.json @@ -10665,7 +10665,7 @@ { "event": { "delta": { - "text": " provided function is unable to", + "text": " provided function definitions are", "type": "text" }, "event_type": { @@ -10680,7 +10680,7 @@ { "event": { "delta": { - "text": " find the boiling point of \"polyjuice\" as", + "text": " not suitable", "type": "text" }, "event_type": { @@ -10695,7 +10695,7 @@ { "event": { "delta": { - "text": " it is not a real liquid.", + "text": " for this task. Please re", "type": "text" }, "event_type": { @@ -10710,7 +10710,7 @@ { "event": { "delta": { - "text": " Polyjuice is a fictional substance from the", + "text": "work them to", "type": "text" }, "event_type": { @@ -10725,37 +10725,7 @@ { "event": { "delta": { - "text": " Harry Potter series. If you'd like", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " to know the boiling point of a real liquid, please let me", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " know and I can try to assist you.", + "text": " align with the task requirements.", "type": "text" }, "event_type": { @@ -10823,7 +10793,7 @@ { "event": { "delta": { - "text": "get_boiling_point(liquid_name='polyjuice', celci", + "text": "get_boiling_point(liquid_name='polyjuice', celcius", "type": "text" }, "event_type": { @@ -10838,7 +10808,7 @@ { "event": { "delta": { - "text": "us=True)]", + "text": "=True)]", "type": "text" }, "event_type": { @@ -10862,7 +10832,7 @@ "celcius": true, "liquid_name": "polyjuice" }, - "call_id": "3d18758c-2e41-4ba9-8808-ab6209c700d0", + "call_id": "3cb5e131-c553-494b-ae31-7d3836fbb4d8", "tool_name": "get_boiling_point" }, "type": "tool_call" @@ -10980,7 +10950,7 @@ { "event": { "delta": { - "text": "ice is a fictional substance from the Harry Potter series. The", + "text": "ice is a fictional substance from the", "type": "text" }, "event_type": { @@ -10995,7 +10965,7 @@ { "event": { "delta": { - "text": " boiling point of a substance is a", + "text": " Harry Potter series. The boiling", "type": "text" }, "event_type": { @@ -11010,7 +10980,7 @@ { "event": { "delta": { - "text": " physical property that can be measured,", + "text": " point of a substance is a physical", "type": "text" }, "event_type": { @@ -11025,7 +10995,7 @@ { "event": { "delta": { - "text": " but it only applies to real substances. If", + "text": " property that can be measured, but it", "type": "text" }, "event_type": { @@ -11040,7 +11010,7 @@ { "event": { "delta": { - "text": " you'd like to know the", + "text": " only applies to real substances. If you", "type": "text" }, "event_type": { @@ -11055,7 +11025,7 @@ { "event": { "delta": { - "text": " boiling point of a different liquid,", + "text": "'d like to know the boiling point of a different", "type": "text" }, "event_type": { @@ -11070,7 +11040,22 @@ { "event": { "delta": { - "text": " I can try to help with that.", + "text": " liquid, I can", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " try to help with that.", "type": "text" }, "event_type": { @@ -11138,7 +11123,7 @@ { "event": { "delta": { - "text": "get_boiling_point(liquid_name='polyjuice', cel", + "text": "get_boiling_point(liquid", "type": "text" }, "event_type": { @@ -11153,7 +11138,22 @@ { "event": { "delta": { - "text": "cius=True)]", + "text": "_name='polyjuice', celcius=True", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ")]", "type": "text" }, "event_type": { @@ -11177,7 +11177,7 @@ "celcius": true, "liquid_name": "polyjuice" }, - "call_id": "7ffd8d9c-1828-46de-9153-a6f9aeacc73d", + "call_id": "4c62a314-448c-4cd5-a921-610583007faa", "tool_name": "get_boiling_point" }, "type": "tool_call" @@ -11250,22 +11250,7 @@ { "event": { "delta": { - "text": " I answered", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " the", + "text": " I answered the", "type": "text" }, "event_type": { @@ -11343,7 +11328,7 @@ ], "type": "generator" }, - "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv file, can you describe it?', context=None), ToolResponseMessage(role='tool', call_id='', tool_name=, content=[TextContentItem(type='text', text='# User provided a file accessible to you at \"\"\\nYou can use code_interpreter to load and inspect it.')]), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\n# Load data\\ndf = pd.read_csv(\"\")\\n# Rows\\nprint(\"Number of rows and columns in the data:\", df.shape)\\n# Columns\\nprint(\"Columns of the data are:\", len(df.columns))\\n# Column names\\nprint(\"Columns of the data are:\", df.columns)\\n# Column dtypes\\nprint(\"Datatype of the columns are:\", df.dtypes)\\n# Sample of data\\nprint(\"Data sample from file:\")\\nprint(df.head())'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\")])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=, system_message_behavior=)), ('tool_prompt_format', ), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)}), ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv file, can you describe it?', context=None), ToolResponseMessage(role='tool', call_id='', tool_name=, content=[TextContentItem(type='text', text='# User provided a file accessible to you at \"\"\\nYou can use code_interpreter to load and inspect it.')]), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\n# Load data\\ndf = pd.read_csv(\"\")\\n# Rows\\nprint(\"Number of rows and columns in the data:\", df.shape)\\n# Columns\\nprint(\"Columns of the data are:\", len(df.columns))\\n# Column names\\nprint(\"Columns of the data are:\", df.columns)\\n# Column dtypes\\nprint(\"Datatype of the columns are:\", df.dtypes)\\n# Sample of data\\nprint(\"Data sample from file:\")\\nprint(df.head())'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\")])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)}), ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { "chunks": [ { "event": { @@ -11438,7 +11423,22 @@ { "event": { "delta": { - "text": " the file path is correct and the file exists in the specified location. If", + "text": " the file path is correct", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " and the file exists in the specified location. If", "type": "text" }, "event_type": { @@ -11483,7 +11483,7 @@ { "event": { "delta": { - "text": " file.\n\nAdditionally, you can use the", + "text": " file.\n\nAdditionally, you can use the `os`", "type": "text" }, "event_type": { @@ -11498,7 +11498,7 @@ { "event": { "delta": { - "text": " `os` module to check if the file exists before attempting", + "text": " module to check if the file exists before attempting to", "type": "text" }, "event_type": { @@ -11513,7 +11513,7 @@ { "event": { "delta": { - "text": " to", + "text": " read it. Here's an example:\n\n```", "type": "text" }, "event_type": { @@ -11528,7 +11528,7 @@ { "event": { "delta": { - "text": " read it. Here", + "text": "python\nimport os\nimport pandas as pd\n\nfile", "type": "text" }, "event_type": { @@ -11543,7 +11543,7 @@ { "event": { "delta": { - "text": "'s an example:\n\n```python\nimport os\nimport", + "text": "_path", "type": "text" }, "event_type": { @@ -11558,7 +11558,7 @@ { "event": { "delta": { - "text": " pandas as pd\n\nfile_path", + "text": " = \"/var/folders/rb/qvq", "type": "text" }, "event_type": { @@ -11573,7 +11573,7 @@ { "event": { "delta": { - "text": " = \"/var/folders/rb/qv8vwgyj6y", + "text": "vwgyj6yjd3t4pwsy9t0", "type": "text" }, "event_type": { @@ -11588,7 +11588,7 @@ { "event": { "delta": { - "text": "jd3t4pwsy9t0rm0000gn/T", + "text": "rm0000gn/T/tmpdcpkc9", "type": "text" }, "event_type": { @@ -11603,7 +11603,7 @@ { "event": { "delta": { - "text": "/tmpvg_5g", + "text": "_f/15dhK1rDinflation.csv\"\n\nif", "type": "text" }, "event_type": { @@ -11618,7 +11618,7 @@ { "event": { "delta": { - "text": "2pg/D2mMw3Z4", + "text": " os.path.isfile(file_path):\n df = pd.read_csv(file_path", "type": "text" }, "event_type": { @@ -11633,7 +11633,7 @@ { "event": { "delta": { - "text": "inflation.csv\"\n\nif os.path.isfile(file", + "text": ")\n print(\"Number of rows and columns in the", "type": "text" }, "event_type": { @@ -11648,7 +11648,7 @@ { "event": { "delta": { - "text": "_path):\n df = pd.read_csv(file_path", + "text": " data:\", df.shape)\n print(\"Columns of the data are:\", len", "type": "text" }, "event_type": { @@ -11663,7 +11663,7 @@ { "event": { "delta": { - "text": ")\n print(\"Number", + "text": "(df.columns))\n print(\"Columns of the data are:\", df.columns)\n", "type": "text" }, "event_type": { @@ -11678,7 +11678,7 @@ { "event": { "delta": { - "text": " of rows and columns in the", + "text": " print(\"Datatype of the columns are:\", df.dtypes)\n ", "type": "text" }, "event_type": { @@ -11693,7 +11693,7 @@ { "event": { "delta": { - "text": " data:\", df.shape)\n", + "text": " print(\"Data sample from file:\")\n print(df.head())\nelse:\n ", "type": "text" }, "event_type": { @@ -11708,7 +11708,7 @@ { "event": { "delta": { - "text": " print(\"Columns of", + "text": " print(\"The file does not exist\")\n```\n\nThis code checks if", "type": "text" }, "event_type": { @@ -11723,7 +11723,7 @@ { "event": { "delta": { - "text": " the data are:\", len(df.columns))\n print", + "text": " the file exists before attempting to read it. If the file does not exist", "type": "text" }, "event_type": { @@ -11738,82 +11738,7 @@ { "event": { "delta": { - "text": "(\"Columns of the data are:\", df.columns)\n print(\"Datatype", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " of the columns are:\", df.dtypes)\n print(\"Data sample from", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " file:\")\n print(df.head())\nelse:\n print(\"The file does", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " not exist\")\n```\n\nThis code checks if the file exists before attempting", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " to read it. If the file does not exist, it prints a message", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " indicating that the file does not exist.", + "text": ", it prints a message indicating that the file does not exist.", "type": "text" }, "event_type": { @@ -11846,7 +11771,7 @@ ], "type": "generator" }, - "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv file, can you describe it?', context=None), ToolResponseMessage(role='tool', call_id='', tool_name=, content=[TextContentItem(type='text', text='# User provided a file accessible to you at \"\"\\nYou can use code_interpreter to load and inspect it.')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)}), ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv file, can you describe it?', context=None), ToolResponseMessage(role='tool', call_id='', tool_name=, content=[TextContentItem(type='text', text='# User provided a file accessible to you at \"\"\\nYou can use code_interpreter to load and inspect it.')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)}), ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { "chunks": [ { "event": { @@ -11927,7 +11852,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "8vwgyj6yjd3t4pwsy9t", + "tool_call": "qvwgyj6yjd3", "type": "tool_call" }, "event_type": { @@ -11946,7 +11871,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "0rm0000gn/T/tmpvg_5g2pg/D2", + "tool_call": "t4pwsy9t0rm0000gn/T/tmpd", "type": "tool_call" }, "event_type": { @@ -11965,7 +11890,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "mMw3Z4inflation.csv\")\n# Rows\nprint(\"", + "tool_call": "cpkc9_f/15dhK1rDinflation.csv\")\n", "type": "tool_call" }, "event_type": { @@ -11984,7 +11909,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "Number of rows and columns in the data:\", df.shape)\n# Columns\n", + "tool_call": "# Rows\nprint(\"Number of rows and columns in the data:\", df", "type": "tool_call" }, "event_type": { @@ -12003,7 +11928,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "print(\"Columns of the data are:\", len(df", + "tool_call": ".shape)\n# Columns\nprint(\"Columns of the data are:\", len(df", "type": "tool_call" }, "event_type": { @@ -12100,9 +12025,9 @@ }, "tool_call": { "arguments": { - "code": "import pandas as pd\n# Load data\ndf = pd.read_csv(\"/var/folders/rb/qv8vwgyj6yjd3t4pwsy9t0rm0000gn/T/tmpvg_5g2pg/D2mMw3Z4inflation.csv\")\n# Rows\nprint(\"Number of rows and columns in the data:\", df.shape)\n# Columns\nprint(\"Columns of the data are:\", len(df.columns))\n# Column names\nprint(\"Columns of the data are:\", df.columns)\n# Column dtypes\nprint(\"Datatype of the columns are:\", df.dtypes)\n# Sample of data\nprint(\"Data sample from file:\")\nprint(df.head())" + "code": "import pandas as pd\n# Load data\ndf = pd.read_csv(\"/var/folders/rb/qvqvwgyj6yjd3t4pwsy9t0rm0000gn/T/tmpdcpkc9_f/15dhK1rDinflation.csv\")\n# Rows\nprint(\"Number of rows and columns in the data:\", df.shape)\n# Columns\nprint(\"Columns of the data are:\", len(df.columns))\n# Column names\nprint(\"Columns of the data are:\", df.columns)\n# Column dtypes\nprint(\"Datatype of the columns are:\", df.dtypes)\n# Sample of data\nprint(\"Data sample from file:\")\nprint(df.head())" }, - "call_id": "471afd08-0685-4d57-97f6-7ca09bab4d2d", + "call_id": "bdb9c5e1-2082-49c8-ab7a-15aae2135656", "tool_name": { "__enum__": "BuiltinTool", "value": "code_interpreter" @@ -12143,7 +12068,890 @@ ], "type": "generator" }, - "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv, can you describe it?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print information about the dataframe\\nprint(df.info())\\n\\n# Print summary statistics of the dataframe\\nprint(df.describe())'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\"), CompletionMessage(role='assistant', content='The error message indicates that the file \"\" does not exist. This could be due to a number of reasons such as the file being deleted, the path being incorrect, or the file being in a different location.\\n\\nTo resolve this issue, you can try the following:\\n\\n1. Check the file path: Make sure the file path is correct and the file exists in the specified location.\\n2. Check the file name: Ensure that the file name is correct and it matches the one specified in the code.\\n3. Check the file extension: Verify that the file extension is correct and it matches the one specified in the code.\\n4. Try a different file path: If the file exists in a different location, try using that path instead.\\n5. Check for typos: Ensure that there are no typos in the file path or name.\\n\\nIf you are still unable to resolve the issue, please provide more details about the file and its location, and I\\'ll be happy to help further.', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Plot average yearly inflation as a time series', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\nimport matplotlib.pyplot as plt\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Convert the \\'Year\\' column to datetime\\ndf[\\'Year\\'] = pd.to_datetime(df[\\'Year\\'], format=\\'%Y\\')\\n\\n# Group by \\'Year\\' and calculate the average inflation\\ndf_avg_inflation = df.groupby(\\'Year\\')[\\'Inflation\\'].mean().reset_index()\\n\\n# Plot the average yearly inflation as a time series\\nplt.figure(figsize=(10,6))\\nplt.plot(df_avg_inflation[\\'Year\\'], df_avg_inflation[\\'Inflation\\'], marker=\\'o\\')\\nplt.title(\\'Average Yearly Inflation\\')\\nplt.xlabel(\\'Year\\')\\nplt.ylabel(\\'Inflation\\')\\nplt.grid(True)\\nplt.show()'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\")])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=, system_message_behavior=)), ('tool_prompt_format', ), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv, can you describe it?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print information about the dataframe\\nprint(df.info())\\n\\n# Print summary statistics of the dataframe\\nprint(df.describe())'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\"), CompletionMessage(role='assistant', content='The error message indicates that the file \"\" does not exist. This could be due to a number of reasons such as the file being deleted, the path being incorrect, or the file being in a different location.\\n\\nTo resolve this issue, you can try the following:\\n\\n1. Check the file path: Make sure the file path is correct and the file exists in the specified location.\\n2. Use a relative path: If the file is in the same directory as your Python script, you can use a relative path instead of an absolute path.\\n3. Check file permissions: Make sure you have the necessary permissions to read the file.\\n4. Use a try-except block: You can use a try-except block to catch the FileNotFoundError and handle it accordingly.\\n\\nHere is an example of how you can modify the code to handle the FileNotFoundError:\\n\\n```\\nimport pandas as pd\\n\\ntry:\\n df = pd.read_csv(\"\")\\n print(df.head())\\n print(df.info())\\n print(df.describe())\\nexcept FileNotFoundError:\\n print(\"The file does not exist\")\\n```\\n\\nThis code will print \"The file does not exist\" if the file is not found, instead of raising an error.', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Plot average yearly inflation as a time series', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\nimport matplotlib.pyplot as plt\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Convert the \\'Year\\' column to datetime\\ndf[\\'Year\\'] = pd.to_datetime(df[\\'Year\\'], format=\\'%Y\\')\\n\\n# Group by \\'Year\\' and calculate the average inflation\\ndf_avg_inflation = df.groupby(\\'Year\\')[\\'Inflation\\'].mean().reset_index()\\n\\n# Plot the average inflation as a time series\\nplt.figure(figsize=(10,6))\\nplt.plot(df_avg_inflation[\\'Year\\'], df_avg_inflation[\\'Inflation\\'], marker=\\'o\\')\\nplt.title(\\'Average Yearly Inflation\\')\\nplt.xlabel(\\'Year\\')\\nplt.ylabel(\\'Inflation\\')\\nplt.grid(True)\\nplt.show()'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\")])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "The", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " error message indicates that the file \"/var/folders", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "/rb/qv8vwgyj6y", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "jd3t4pwsy9t0", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "rm0000gn/T/tmpdcpkc9", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "_f/FKWQnYoVinflation.csv\"", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " does not exist. This could be due to a", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " number of reasons such as the file being deleted,", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " the path being incorrect, or the file being in", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " a different location.\n\nTo resolve this issue, you can try the following:\n\n", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "1. Check the file", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " path: Make sure the file path is correct and", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " the file exists in the specified location.\n2. Use a relative path:", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " If the file is in the same directory as your Python script, you can", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " use a relative path instead of an absolute path.\n", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "3. Check file permissions: Make sure you have the necessary permissions to read", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " the file.\n4. Use a try-except block: You can use", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " a try-except block to catch the FileNotFoundError and handle it accordingly.\n\nHere", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " is an example of how you can modify the code to handle the FileNotFoundError:\n\n", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "```\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ntry:\n", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " df = pd.read_csv(\"/var/folders/rb/q", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "v8vwgyj6yjd3t4pwsy9", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "t0rm0000gn/T/tmpdcpkc9_f/FKW", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "QnYoVinflation", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ".csv\")\n df['Year'] = pd.to", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "_datetime(df['Year'], format='%Y')\n df_avg_inflation =", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " df.groupby('Year')['Inflation'].mean().reset_index()\n plt", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ".figure(figsize=(10,6))\n plt.plot(df_avg_inflation['", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "Year'], df_avg_inflation['Inflation'], marker='o')\n ", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " plt.title('Average Yearly Inflation')\n ", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " plt.xlabel('Year')\n plt.ylabel('Inflation')\n plt.grid", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "(True)\n plt.show()\nexcept FileNotFoundError:\n print(\"The file does", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " not exist\")\n```\n\nThis code will print \"The file does not exist", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "\" if the file is not found, instead of raising an error.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv, can you describe it?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print information about the dataframe\\nprint(df.info())\\n\\n# Print summary statistics of the dataframe\\nprint(df.describe())'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\"), CompletionMessage(role='assistant', content='The error message indicates that the file \"\" does not exist. This could be due to a number of reasons such as the file being deleted, the path being incorrect, or the file being in a different location.\\n\\nTo resolve this issue, you can try the following:\\n\\n1. Check the file path: Make sure the file path is correct and the file exists in the specified location.\\n2. Use a relative path: If the file is in the same directory as your Python script, you can use a relative path instead of an absolute path.\\n3. Check file permissions: Make sure you have the necessary permissions to read the file.\\n4. Use a try-except block: You can use a try-except block to catch the FileNotFoundError and handle it accordingly.\\n\\nHere is an example of how you can modify the code to handle the FileNotFoundError:\\n\\n```\\nimport pandas as pd\\n\\ntry:\\n df = pd.read_csv(\"\")\\n print(df.head())\\n print(df.info())\\n print(df.describe())\\nexcept FileNotFoundError:\\n print(\"The file does not exist\")\\n```\\n\\nThis code will print \"The file does not exist\" if the file is not found, instead of raising an error.', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Plot average yearly inflation as a time series', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "started" + }, + "tool_call": "", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "import pandas as pd\nimport matplotlib.pyplot as plt", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "\n\n# Load the CSV file\ndf = pd", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": ".read_csv(\"/var/folders/rb/qv", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "8vwgyj6yjd3t4", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "pwsy9t0rm0000gn", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "/T/tmpdcpkc9_f/FKWQ", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "nYoVinflation.csv\")\n\n# Convert the '", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "Year' column to datetime\ndf['Year'] = pd", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": ".to_datetime(df['Year'], format", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "='%Y')\n\n# Group by 'Year' and calculate the average inflation\ndf_avg_inflation = df.groupby('Year')['Inflation'].mean().reset_index()\n\n# Plot the average inflation as a time series\nplt.figure(figsize=(10,6))\nplt.plot(df_avg_inflation['Year'], df_avg_in", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "flation['Inflation'], marker='o')\nplt.title('Average Yearly", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": " Inflation')\nplt.xlabel('Year')\nplt.ylabel", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "('Inflation')\nplt.grid(True)\nplt.show()", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "succeeded" + }, + "tool_call": { + "arguments": { + "code": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Load the CSV file\ndf = pd.read_csv(\"/var/folders/rb/qv8vwgyj6yjd3t4pwsy9t0rm0000gn/T/tmpdcpkc9_f/FKWQnYoVinflation.csv\")\n\n# Convert the 'Year' column to datetime\ndf['Year'] = pd.to_datetime(df['Year'], format='%Y')\n\n# Group by 'Year' and calculate the average inflation\ndf_avg_inflation = df.groupby('Year')['Inflation'].mean().reset_index()\n\n# Plot the average inflation as a time series\nplt.figure(figsize=(10,6))\nplt.plot(df_avg_inflation['Year'], df_avg_inflation['Inflation'], marker='o')\nplt.title('Average Yearly Inflation')\nplt.xlabel('Year')\nplt.ylabel('Inflation')\nplt.grid(True)\nplt.show()" + }, + "call_id": "619c3b2c-3e23-485f-85bd-38a5ecf398b2", + "tool_name": { + "__enum__": "BuiltinTool", + "value": "code_interpreter" + } + }, + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv, can you describe it?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print information about the dataframe\\nprint(df.info())\\n\\n# Print summary statistics of the dataframe\\nprint(df.describe())'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\"), CompletionMessage(role='assistant', content='The error message indicates that the file \"\" does not exist. This could be due to a number of reasons such as the file being deleted, the path being incorrect, or the file not being accessible.\\n\\nTo resolve this issue, you should ensure that the file exists and the path is correct. If the file does not exist, you will need to create it or obtain it from the relevant source. If the path is incorrect, you will need to update the path to the correct location of the file.\\n\\nAdditionally, you can use the `os` module to check if the file exists before trying to load it:\\n\\n```\\nimport os\\nimport pandas as pd\\n\\nfile_path = \"\"\\n\\nif os.path.isfile(file_path):\\n df = pd.read_csv(file_path)\\n print(df.head())\\n print(df.info())\\n print(df.describe())\\nelse:\\n print(\"The file does not exist\")\\n```\\n\\nThis code will check if the file exists before trying to load it, and will print a message if the file does not exist.', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Plot average yearly inflation as a time series', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\nimport matplotlib.pyplot as plt\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Convert the \\'Year\\' column to datetime\\ndf[\\'Year\\'] = pd.to_datetime(df[\\'Year\\'], format=\\'%Y\\')\\n\\n# Group by \\'Year\\' and calculate the average inflation\\ndf_avg_inflation = df.groupby(\\'Year\\')[\\'Inflation\\'].mean().reset_index()\\n\\n# Plot the average inflation as a time series\\nplt.figure(figsize=(10,6))\\nplt.plot(df_avg_inflation[\\'Year\\'], df_avg_inflation[\\'Inflation\\'], marker=\\'o\\')\\nplt.title(\\'Average Yearly Inflation\\')\\nplt.xlabel(\\'Year\\')\\nplt.ylabel(\\'Inflation\\')\\nplt.grid(True)\\nplt.show()'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\")])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=, system_message_behavior=)), ('tool_prompt_format', ), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { "chunks": [ { "event": { @@ -12208,7 +13016,7 @@ { "event": { "delta": { - "text": "rm0000gn/T", + "text": "rm0000gn/T/tmp5zsm1ywy/RKBk", "type": "text" }, "event_type": { @@ -12223,7 +13031,7 @@ { "event": { "delta": { - "text": "/tmpoy7lzz", + "text": "Al1zinflation.csv\" does not exist. This could be due to", "type": "text" }, "event_type": { @@ -12238,7 +13046,7 @@ { "event": { "delta": { - "text": "an/HWxJnIYlinflation.csv\" does not exist", + "text": " a number of reasons such as the file being deleted, the path being incorrect", "type": "text" }, "event_type": { @@ -12253,7 +13061,7 @@ { "event": { "delta": { - "text": ". This could be due to a number of reasons such as the file being", + "text": ", or the file not being accessible.\n\nTo resolve this issue, you should", "type": "text" }, "event_type": { @@ -12268,7 +13076,7 @@ { "event": { "delta": { - "text": " deleted, the path being incorrect, or the file being in a different location", + "text": " ensure that the file exists and the path is correct. If the file does", "type": "text" }, "event_type": { @@ -12283,7 +13091,7 @@ { "event": { "delta": { - "text": ".\n\nTo resolve this issue, you can try the", + "text": " not exist, you will need to create it or obtain it from the relevant", "type": "text" }, "event_type": { @@ -12298,7 +13106,7 @@ { "event": { "delta": { - "text": " following:\n\n1. Check the file path: Make sure the file path is", + "text": " source. If the path is incorrect, you will need to update the path", "type": "text" }, "event_type": { @@ -12313,7 +13121,7 @@ { "event": { "delta": { - "text": " correct and the file exists in the specified location.\n2. Check the file", + "text": " to the correct location of the file.\n\nAdditionally, you can use the `", "type": "text" }, "event_type": { @@ -12328,7 +13136,7 @@ { "event": { "delta": { - "text": " name: Ensure that the file name is correct and", + "text": "os` module to check if the file exists before trying to load it:\n\n", "type": "text" }, "event_type": { @@ -12343,7 +13151,7 @@ { "event": { "delta": { - "text": " it matches the one specified in the code.\n3", + "text": "```\nimport os\nimport pandas as pd\nimport", "type": "text" }, "event_type": { @@ -12358,7 +13166,7 @@ { "event": { "delta": { - "text": ". Check the file extension: Verify that the file extension is correct and it", + "text": " matplotlib.pyplot as plt\n\nfile_path = \"/var/folders/rb/q", "type": "text" }, "event_type": { @@ -12373,7 +13181,7 @@ { "event": { "delta": { - "text": " matches the one specified in the code.\n4. Try a different file path", + "text": "v8vwgyj6yjd3t4pwsy9", "type": "text" }, "event_type": { @@ -12388,7 +13196,7 @@ { "event": { "delta": { - "text": ": If the file exists in a different location, try using that path instead", + "text": "t0rm0000gn/T/tmp5zsm1ywy/R", "type": "text" }, "event_type": { @@ -12403,7 +13211,7 @@ { "event": { "delta": { - "text": ".\n5. Check for typos: Ensure that there are no typos", + "text": "KBkAl1zinflation.csv\"\n\nif os.path.isfile(file_path):\n", "type": "text" }, "event_type": { @@ -12418,7 +13226,7 @@ { "event": { "delta": { - "text": " in the file path or name.\n\nIf you are", + "text": " df = pd.read_csv(file_path)\n df['", "type": "text" }, "event_type": { @@ -12433,7 +13241,7 @@ { "event": { "delta": { - "text": " still unable to resolve the issue, please provide more", + "text": "Year'] = pd.to_datetime(df['Year'], format='%Y')\n ", "type": "text" }, "event_type": { @@ -12448,7 +13256,7 @@ { "event": { "delta": { - "text": " details about the file and its location, and I'll be happy to help", + "text": " df_avg_inflation = df.groupby('Year')['Inflation'].mean().", "type": "text" }, "event_type": { @@ -12463,7 +13271,97 @@ { "event": { "delta": { - "text": " further.", + "text": "reset_index()\n plt.figure(figsize=(10,6))\n plt.plot", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "(df_avg_inflation['Year'], df_avg_inflation['Inflation'],", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " marker='o')\n plt.title('Average Yearly Inflation')\n ", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " plt.xlabel('Year')\n plt.ylabel('Inflation')\n plt.grid", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "(True)\n plt.show()\nelse:\n print(\"The file does not", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " exist\")\n```\n\nThis code will check if the file exists before trying to", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " load it, and will print a message if the file does not exist.", "type": "text" }, "event_type": { @@ -12496,7 +13394,7 @@ ], "type": "generator" }, - "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv, can you describe it?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print information about the dataframe\\nprint(df.info())\\n\\n# Print summary statistics of the dataframe\\nprint(df.describe())'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\"), CompletionMessage(role='assistant', content='The error message indicates that the file \"\" does not exist. This could be due to a number of reasons such as the file being deleted, the path being incorrect, or the file being in a different location.\\n\\nTo resolve this issue, you can try the following:\\n\\n1. Check the file path: Make sure the file path is correct and the file exists in the specified location.\\n2. Check the file name: Ensure that the file name is correct and it matches the one specified in the code.\\n3. Check the file extension: Verify that the file extension is correct and it matches the one specified in the code.\\n4. Try a different file path: If the file exists in a different location, try using that path instead.\\n5. Check for typos: Ensure that there are no typos in the file path or name.\\n\\nIf you are still unable to resolve the issue, please provide more details about the file and its location, and I\\'ll be happy to help further.', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Plot average yearly inflation as a time series', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv, can you describe it?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print information about the dataframe\\nprint(df.info())\\n\\n# Print summary statistics of the dataframe\\nprint(df.describe())'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\"), CompletionMessage(role='assistant', content='The error message indicates that the file \"\" does not exist. This could be due to a number of reasons such as the file being deleted, the path being incorrect, or the file not being accessible.\\n\\nTo resolve this issue, you should ensure that the file exists and the path is correct. If the file does not exist, you will need to create it or obtain it from the relevant source. If the path is incorrect, you will need to update the path to the correct location of the file.\\n\\nAdditionally, you can use the `os` module to check if the file exists before trying to load it:\\n\\n```\\nimport os\\nimport pandas as pd\\n\\nfile_path = \"\"\\n\\nif os.path.isfile(file_path):\\n df = pd.read_csv(file_path)\\n print(df.head())\\n print(df.info())\\n print(df.describe())\\nelse:\\n print(\"The file does not exist\")\\n```\\n\\nThis code will check if the file exists before trying to load it, and will print a message if the file does not exist.', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Plot average yearly inflation as a time series', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { "chunks": [ { "event": { @@ -12596,7 +13494,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "0rm0000gn/T/tmpoy7lzzan/HWx", + "tool_call": "0rm0000gn/T/tmp5zsm1ywy/RKB", "type": "tool_call" }, "event_type": { @@ -12615,7 +13513,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "JnIYlinflation.csv\")\n\n# Convert", + "tool_call": "kAl1zinflation.csv\")\n\n# Convert the 'Year'", "type": "tool_call" }, "event_type": { @@ -12634,7 +13532,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " the 'Year' column to datetime\ndf['Year", + "tool_call": " column to datetime\ndf['Year'] = pd.to_datetime(df['Year", "type": "tool_call" }, "event_type": { @@ -12653,7 +13551,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "'] = pd.to_datetime(df['Year'], format='%Y')\n\n# Group", + "tool_call": "'], format='%Y')\n\n# Group by", "type": "tool_call" }, "event_type": { @@ -12672,7 +13570,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " by 'Year' and calculate the average inflation\ndf", + "tool_call": " 'Year' and calculate the average inflation\ndf_avg_in", "type": "tool_call" }, "event_type": { @@ -12691,7 +13589,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "_avg_inflation = df.groupby('Year')['Inflation'].", + "tool_call": "flation = df.groupby('Year')['Inflation'].mean().reset_index()\n\n", "type": "tool_call" }, "event_type": { @@ -12710,7 +13608,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "mean().reset_index()\n\n# Plot the average yearly inflation", + "tool_call": "# Plot the average inflation as a time series\nplt.figure(figsize=(10", "type": "tool_call" }, "event_type": { @@ -12729,7 +13627,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " as a time series\n", + "tool_call": ",6))\nplt.plot(df_avg_inflation['Year'], df_avg_in", "type": "tool_call" }, "event_type": { @@ -12748,7 +13646,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "plt.figure(figsize=(10,6))\nplt.plot", + "tool_call": "flation['Inflation'], marker='o')\nplt.title('Average Yearly", "type": "tool_call" }, "event_type": { @@ -12767,7 +13665,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "(df_avg_inflation['Year'], df_avg_inflation['Inflation'],", + "tool_call": " Inflation')\nplt.xlabel('Year')\nplt.ylabel('Inflation')\nplt", "type": "tool_call" }, "event_type": { @@ -12786,7 +13684,643 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " marker='o')\nplt.title('Average Yearly", + "tool_call": ".grid(True)\nplt.show()", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "succeeded" + }, + "tool_call": { + "arguments": { + "code": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Load the CSV file\ndf = pd.read_csv(\"/var/folders/rb/qv8vwgyj6yjd3t4pwsy9t0rm0000gn/T/tmp5zsm1ywy/RKBkAl1zinflation.csv\")\n\n# Convert the 'Year' column to datetime\ndf['Year'] = pd.to_datetime(df['Year'], format='%Y')\n\n# Group by 'Year' and calculate the average inflation\ndf_avg_inflation = df.groupby('Year')['Inflation'].mean().reset_index()\n\n# Plot the average inflation as a time series\nplt.figure(figsize=(10,6))\nplt.plot(df_avg_inflation['Year'], df_avg_inflation['Inflation'], marker='o')\nplt.title('Average Yearly Inflation')\nplt.xlabel('Year')\nplt.ylabel('Inflation')\nplt.grid(True)\nplt.show()" + }, + "call_id": "61b988d6-45f4-4147-8b62-69c3abbb03a9", + "tool_name": { + "__enum__": "BuiltinTool", + "value": "code_interpreter" + } + }, + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv, can you describe it?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print information about the dataframe\\nprint(df.info())\\n\\n# Print summary statistics of the dataframe\\nprint(df.describe())'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\"), CompletionMessage(role='assistant', content='The error message indicates that the file \"\" does not exist. This could be due to a number of reasons such as the file being deleted, the path being incorrect, or the file not being accessible.\\n\\nTo resolve this issue, you should ensure that the file exists and the path is correct. If the file does not exist, you will need to create it or obtain it from the relevant source. If the path is incorrect, you will need to update the path to the correct location of the file.\\n\\nAdditionally, you can use the `os` module to check if the file exists before trying to read it. Here is an example:\\n\\n```\\nimport os\\nimport pandas as pd\\n\\nfile_path = \"\"\\n\\nif os.path.isfile(file_path):\\n df = pd.read_csv(file_path)\\n print(df.head())\\n print(df.info())\\n print(df.describe())\\nelse:\\n print(\"The file does not exist\")\\n```\\n\\nThis code will check if the file exists before trying to read it. If the file does not exist, it will print \"The file does not exist\".', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Plot average yearly inflation as a time series', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\nimport matplotlib.pyplot as plt\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Convert the \\'Year\\' column to datetime\\ndf[\\'Year\\'] = pd.to_datetime(df[\\'Year\\'], format=\\'%Y\\')\\n\\n# Group by \\'Year\\' and calculate the average inflation\\ndf_avg_inflation = df.groupby(\\'Year\\')[\\'Inflation\\'].mean().reset_index()\\n\\n# Plot the average yearly inflation as a time series\\nplt.figure(figsize=(10,6))\\nplt.plot(df_avg_inflation[\\'Year\\'], df_avg_inflation[\\'Inflation\\'], marker=\\'o\\')\\nplt.title(\\'Average Yearly Inflation\\')\\nplt.xlabel(\\'Year\\')\\nplt.ylabel(\\'Inflation\\')\\nplt.grid(True)\\nplt.show()'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\")])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=, system_message_behavior=)), ('tool_prompt_format', ), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "The", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " error message indicates that the file \"/var/folders/rb/qv8", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "vwgyj6y", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "jd3t4pwsy9t0", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "rm0000gn/T/tmp1ugde3u9/FSj", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "wY288inflation.csv\" does not exist. This could be due", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " to a number of reasons such as the file being deleted, the path being", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " incorrect, or the file not being accessible.\n\nTo resolve this issue, you", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " should ensure that the file exists and the path is correct. If the file", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " does not exist, you will need to create it", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " or obtain it from the", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " relevant source. If the", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " path is incorrect, you will need to update the", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " path to the correct location of the file.\n\nAdditionally", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ", you can use the `os` module to", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " check if the file exists before trying to read it", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ". Here is an example:\n\n```\nimport os", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "file_path = \"/var/folders/rb/qv8vwgyj", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "6yjd3t4pwsy9t0rm0000", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "gn/T/tmp1ugde3u9/FSjwY288", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "inflation.csv\"\n\nif os.path.isfile(file_path):\n df = pd", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ".read_csv(file_path)\n df['Year'] = pd.to_datetime(df", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "['Year'], format='%Y')\n df_avg", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "_inflation = df.groupby('Year')['Inflation", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "'].mean().reset_index()\n plt.figure(figsize=(10,6))\n", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " plt.plot(df_avg_inflation['Year'], df_avg_inflation['", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "Inflation'], marker='o')\n plt.title('Average Yearly In", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "flation')\n plt.xlabel('Year')\n plt", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ".ylabel('Inflation')\n plt.grid(True)\n", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " plt.show()\nelse:\n print(\"The", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " file does not exist\")\n```\n\nThis code will", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " check if the file exists before trying to read it. If the file does", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " not exist, it will print \"The file does not exist\".", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv, can you describe it?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print information about the dataframe\\nprint(df.info())\\n\\n# Print summary statistics of the dataframe\\nprint(df.describe())'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\"), CompletionMessage(role='assistant', content='The error message indicates that the file \"\" does not exist. This could be due to a number of reasons such as the file being deleted, the path being incorrect, or the file not being accessible.\\n\\nTo resolve this issue, you should ensure that the file exists and the path is correct. If the file does not exist, you will need to create it or obtain it from the relevant source. If the path is incorrect, you will need to update the path to the correct location of the file.\\n\\nAdditionally, you can use the `os` module to check if the file exists before trying to read it. Here is an example:\\n\\n```\\nimport os\\nimport pandas as pd\\n\\nfile_path = \"\"\\n\\nif os.path.isfile(file_path):\\n df = pd.read_csv(file_path)\\n print(df.head())\\n print(df.info())\\n print(df.describe())\\nelse:\\n print(\"The file does not exist\")\\n```\\n\\nThis code will check if the file exists before trying to read it. If the file does not exist, it will print \"The file does not exist\".', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Plot average yearly inflation as a time series', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "started" + }, + "tool_call": "", "type": "tool_call" }, "event_type": { @@ -12805,7 +14339,235 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " Inflation')\nplt.xlabel('Year')\nplt.ylabel", + "tool_call": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Load the CSV", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": " file\ndf = pd.read_csv(\"/var/folders/rb/qv", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "8vwgyj6yjd3t4pwsy9t", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "0rm0000gn/T/tmp1ugde3u9/FS", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "jwY288inflation.csv\")\n\n# Convert the 'Year' column", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": " to datetime\ndf['Year'] = pd.to_datetime(df['Year'],", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": " format='%Y')\n\n# Group by", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": " 'Year' and calculate the average inflation\ndf_avg_in", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "flation = df.groupby('Year')['Inflation'].mean().reset_index()\n\n", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "# Plot the average yearly inflation as a time series\n", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "plt.figure(figsize=(10,6))\nplt.plot(df_avg_inflation['", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "Year'], df_avg_inflation['Inflation'], marker='o')\nplt", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": ".title('Average Yearly Inflation')\nplt.xlabel('Year')\nplt.ylabel", "type": "tool_call" }, "event_type": { @@ -12845,9 +14607,9 @@ }, "tool_call": { "arguments": { - "code": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Load the CSV file\ndf = pd.read_csv(\"/var/folders/rb/qv8vwgyj6yjd3t4pwsy9t0rm0000gn/T/tmpoy7lzzan/HWxJnIYlinflation.csv\")\n\n# Convert the 'Year' column to datetime\ndf['Year'] = pd.to_datetime(df['Year'], format='%Y')\n\n# Group by 'Year' and calculate the average inflation\ndf_avg_inflation = df.groupby('Year')['Inflation'].mean().reset_index()\n\n# Plot the average yearly inflation as a time series\nplt.figure(figsize=(10,6))\nplt.plot(df_avg_inflation['Year'], df_avg_inflation['Inflation'], marker='o')\nplt.title('Average Yearly Inflation')\nplt.xlabel('Year')\nplt.ylabel('Inflation')\nplt.grid(True)\nplt.show()" + "code": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Load the CSV file\ndf = pd.read_csv(\"/var/folders/rb/qv8vwgyj6yjd3t4pwsy9t0rm0000gn/T/tmp1ugde3u9/FSjwY288inflation.csv\")\n\n# Convert the 'Year' column to datetime\ndf['Year'] = pd.to_datetime(df['Year'], format='%Y')\n\n# Group by 'Year' and calculate the average inflation\ndf_avg_inflation = df.groupby('Year')['Inflation'].mean().reset_index()\n\n# Plot the average yearly inflation as a time series\nplt.figure(figsize=(10,6))\nplt.plot(df_avg_inflation['Year'], df_avg_inflation['Inflation'], marker='o')\nplt.title('Average Yearly Inflation')\nplt.xlabel('Year')\nplt.ylabel('Inflation')\nplt.grid(True)\nplt.show()" }, - "call_id": "dda25133-4be1-418e-917a-dc8c7822e679", + "call_id": "da5760dd-614a-4c19-954c-b4e354e75d79", "tool_name": { "__enum__": "BuiltinTool", "value": "code_interpreter" @@ -12923,7 +14685,22 @@ { "event": { "delta": { - "text": " error message indicates that the file \"/var/folders/rb/qv8", + "text": " error message indicates that the file \"/var/folders", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "/rb/qv8", "type": "text" }, "event_type": { @@ -12953,7 +14730,7 @@ { "event": { "delta": { - "text": "rm0000gn/T/tmpoy7lzzan/HWxJ", + "text": "rm0000gn/T/tmp5zsm1ywy/RKBk", "type": "text" }, "event_type": { @@ -12968,7 +14745,7 @@ { "event": { "delta": { - "text": "nIYlinflation.csv\" does not exist. This could be", + "text": "Al1zinflation.csv\" does not exist. This could be", "type": "text" }, "event_type": { @@ -12983,7 +14760,7 @@ { "event": { "delta": { - "text": " due to a number of reasons such as the file being deleted,", + "text": " due to a number of reasons such as the file being deleted, the", "type": "text" }, "event_type": { @@ -12998,7 +14775,7 @@ { "event": { "delta": { - "text": " the path being", + "text": " path being incorrect, or the file", "type": "text" }, "event_type": { @@ -13013,7 +14790,7 @@ { "event": { "delta": { - "text": " incorrect, or the file being in a different location.\n\nTo resolve this", + "text": " not being accessible.\n\nTo resolve this issue, you should ensure", "type": "text" }, "event_type": { @@ -13028,7 +14805,7 @@ { "event": { "delta": { - "text": " issue, you can try the following:\n\n1. Check the file path", + "text": " that the file exists and the", "type": "text" }, "event_type": { @@ -13043,7 +14820,7 @@ { "event": { "delta": { - "text": ": Make sure the file path is correct and the file exists in the specified", + "text": " path is correct. If the file does not", "type": "text" }, "event_type": { @@ -13058,7 +14835,7 @@ { "event": { "delta": { - "text": " location.\n2. Check the file name: Ensure", + "text": " exist, you will need to create it or obtain it", "type": "text" }, "event_type": { @@ -13073,7 +14850,7 @@ { "event": { "delta": { - "text": " that the file name is correct and it matches", + "text": " from the relevant", "type": "text" }, "event_type": { @@ -13088,7 +14865,7 @@ { "event": { "delta": { - "text": " the one specified in the code.\n3.", + "text": " source. If the path is", "type": "text" }, "event_type": { @@ -13103,7 +14880,7 @@ { "event": { "delta": { - "text": " Check the file extension: Verify that the file extension is", + "text": " incorrect, you will need to update the path", "type": "text" }, "event_type": { @@ -13118,7 +14895,7 @@ { "event": { "delta": { - "text": " correct and it matches the one specified in the code.\n4.", + "text": " to the correct location of the file.\n\nAdditionally,", "type": "text" }, "event_type": { @@ -13133,7 +14910,7 @@ { "event": { "delta": { - "text": " Try a different file path: If the file exists in", + "text": " you can use the `os` module to", "type": "text" }, "event_type": { @@ -13148,7 +14925,7 @@ { "event": { "delta": { - "text": " a different location, try using that", + "text": " check if the file exists before trying to load it:\n\n``", "type": "text" }, "event_type": { @@ -13163,7 +14940,7 @@ { "event": { "delta": { - "text": " path instead.\n5. Check for typos", + "text": "`\nimport os\nimport pandas as pd\n\nfile_path", "type": "text" }, "event_type": { @@ -13178,7 +14955,7 @@ { "event": { "delta": { - "text": ": Ensure that there are no typos in the file path or", + "text": " = \"/var/folders", "type": "text" }, "event_type": { @@ -13193,7 +14970,7 @@ { "event": { "delta": { - "text": " name.\n\nIf you are still unable to", + "text": "/rb/qv8vwgyj6y", "type": "text" }, "event_type": { @@ -13208,7 +14985,7 @@ { "event": { "delta": { - "text": " resolve the issue, please provide more details about", + "text": "jd3t4pwsy9t0rm0000gn/T", "type": "text" }, "event_type": { @@ -13223,7 +15000,600 @@ { "event": { "delta": { - "text": " the file and its location, and I'll be happy to help further.", + "text": "/tmp5zsm1ywy/RKBkAl1zinflation.csv", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "\"\n\nif os.path.isfile(file_path):\n df =", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " pd.read_csv(file_path)\n print(df.head())\n print", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "(df.info())\n print(df.describe())\nelse:\n print(\"The file", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " does not exist\")\n```\n\nThis code will check if the file exists before", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " trying to load it, and will print a message if", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " the file does not exist.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv, can you describe it?', context=None), ToolResponseMessage(role='tool', call_id='', tool_name=, content=[TextContentItem(type='text', text='# User provided a file accessible to you at \"\"\\nYou can use code_interpreter to load and inspect it.')]), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print information about the dataframe\\nprint(df.info())\\n\\n# Print summary statistics of the dataframe\\nprint(df.describe())'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\")])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "The", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " error message indicates that the file \"/var/folders/rb/qv8", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "vwgyj6yjd3t4p", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "wsy9t0rm0000gn/T/tmpdcpkc9", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "_f/FKWQnYoVinflation.csv\"", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " does not exist. This could be", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " due to a number of reasons such as the file being deleted, the", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " path being incorrect, or the file", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " being in a different location.\n\nTo resolve this issue, you can try", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " the following:\n\n1. Check the file path", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ": Make sure the file path is correct and the", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " file exists in the specified location.\n2. Use a", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " relative path", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ": If the file is in the same directory as", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " your Python script, you can use", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " a relative path instead of", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " an absolute path.\n3. Check file permissions", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ": Make sure you have the necessary permissions to read the file.\n4.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " Use a try-except block: You can use a try-except", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " block to catch the FileNotFoundError and handle it accordingly.\n\nHere is an example of", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " how you can modify the code to handle the FileNotFoundError:\n\n```\nimport pandas", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " as pd\n\ntry:\n df =", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " pd.read_csv(\"/var/folders/rb/qv8vwgyj", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "6yjd3t", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "4pwsy9t0rm0000", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "gn/T/tmpdcpkc9_f/FKWQnYoVinflation", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ".csv\")\n print(df.head())\n print(df.info())\n print(df", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ".describe())\nexcept FileNotFoundError:\n print(\"The file does not exist\")\n``", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "`\n\nThis code will print \"The", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " file does not exist\" if the file is not found, instead", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " of raising an error.", "type": "text" }, "event_type": { @@ -13356,7 +15726,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "/tmpoy7lzzan/HWxJnIYlinflation", + "tool_call": "/tmpdcpkc9_f/FKWQnYoVinflation.csv\")\n\n", "type": "tool_call" }, "event_type": { @@ -13375,7 +15745,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": ".csv\")\n\n# Print the first few rows of the dataframe\nprint(df.head", + "tool_call": "# Print the first few rows of the dataframe\n", "type": "tool_call" }, "event_type": { @@ -13394,7 +15764,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "())\n\n# Print information about", + "tool_call": "print(df.head())\n\n# Print information about", "type": "tool_call" }, "event_type": { @@ -13413,7 +15783,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " the dataframe\nprint(df.info())\n\n# Print summary", + "tool_call": " the dataframe\nprint(df", "type": "tool_call" }, "event_type": { @@ -13432,7 +15802,26 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " statistics of the dataframe\nprint(df.describe())", + "tool_call": ".info())\n\n# Print summary statistics of the dataframe\nprint(df.describe", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "())", "type": "tool_call" }, "event_type": { @@ -13453,9 +15842,9 @@ }, "tool_call": { "arguments": { - "code": "import pandas as pd\n\n# Load the CSV file\ndf = pd.read_csv(\"/var/folders/rb/qv8vwgyj6yjd3t4pwsy9t0rm0000gn/T/tmpoy7lzzan/HWxJnIYlinflation.csv\")\n\n# Print the first few rows of the dataframe\nprint(df.head())\n\n# Print information about the dataframe\nprint(df.info())\n\n# Print summary statistics of the dataframe\nprint(df.describe())" + "code": "import pandas as pd\n\n# Load the CSV file\ndf = pd.read_csv(\"/var/folders/rb/qv8vwgyj6yjd3t4pwsy9t0rm0000gn/T/tmpdcpkc9_f/FKWQnYoVinflation.csv\")\n\n# Print the first few rows of the dataframe\nprint(df.head())\n\n# Print information about the dataframe\nprint(df.info())\n\n# Print summary statistics of the dataframe\nprint(df.describe())" }, - "call_id": "a46e3fb0-048c-436b-a64e-cc27f662a7f5", + "call_id": "4208ff16-c9e6-4754-8566-8aeb587afcb3", "tool_name": { "__enum__": "BuiltinTool", "value": "code_interpreter" @@ -13496,6 +15885,598 @@ ], "type": "generator" }, + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:3e3a0\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:fd0f6\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')]), CompletionMessage(role='assistant', content=\"I'm ready to help. What's your first question about Torchtune?\", stop_reason=, tool_calls=[]), UserMessage(role='user', content='Tell me how to use LoRA', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'using LoRA in Torchtune'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text=\"Result 1:\\nDocument_id:7da0c\\nContent: .. _lora_finetune_label:\\n\\n============================\\nFine-Tuning Llama2 with LoRA\\n============================\\n\\nThis guide will teach you about `LoRA `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW ` alone will not handle the definition of which parameters are trainable.\\n See :ref:`below` for how to do this.\\n\\nLet\\'s inspect each of these models a bit more closely.\\n\\n.. code-block:: bash\\n\\n # Print the first layer\\'s self-attention in the usual Llama2 model\\n >>> print(base_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (output_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (pos_embeddings): RotaryPositionalEmbeddings()\\n )\\n\\n # Print the same for Llama2 with LoRA weights\\n >>> print(lora_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): LoRALinear(\\n (dropout): Dropout(p=0.0, inplace=False)\\n \\n'), TextContentItem(type='text', text='Result 3:\\nDocument_id:7da0c\\nContent: 06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune\\'s `LoRA recipe `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 4:\\nDocument_id:7da0c\\nContent: from our Llama2\\nmodel without any wrappers or custom checkpoint conversion logic.\\n\\n.. code-block:: python\\n\\n # Assuming that base_model already has the pretrained Llama2 weights,\\n # this will directly load them into your LoRA model without any conversion necessary.\\n lora_model.load_state_dict(base_model.state_dict(), strict=False)\\n\\n.. note::\\n Whenever loading weights with :code:`strict=False`, you should verify that any missing or extra keys in\\n the loaded :code:`state_dict` are as expected. torchtune\\'s LoRA recipes do this by default via\\n :func:`validate_missing_and_unexpected_for_lora() `.\\n\\nOnce we\\'ve loaded the base model weights, we also want to set only LoRA parameters to trainable.\\n\\n.. _setting_trainable_params:\\n\\n.. code-block:: python\\n\\n from torchtune.modules.peft.peft_utils import get_adapter_params, set_trainable_params\\n\\n # Fetch all params from the model that are associated with LoRA.\\n lora_params = get_adapter_params(lora_model)\\n\\n # Set requires_grad=True on lora_params, and requires_grad=False on all others.\\n set_trainable_params(lora_model, lora_params)\\n\\n # Print the total number of parameters\\n total_params = sum([p.numel() for p in lora_model.parameters()])\\n trainable_params = sum([p.numel() for p in lora_model.parameters() if p.requires_grad])\\n print(\\n f\"\"\"\\n {total_params} total params,\\n {trainable_params}\" trainable params,\\n {(100.0 * trainable_params / total_params):.2f}% of all params are trainable.\\n \"\"\"\\n )\\n\\n 6742609920 total params,\\n 4194304 trainable params,\\n 0.06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune\\'s `LoRA recipe , tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "To", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " use LoRA in Torchtune, you can follow", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " these steps:\n\n1. Import the necessary modules: `", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "from torchtune.models.llama2 import llama2_7b", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ", lora_llama2_7b`\n2. Create a", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " Llama2 model with LoRA: `lora", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "_model = lora_llama2_7b(lora_attn_modules", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "=[\"q_proj\", \"v_proj\"])`\n3. Load the pre-trained", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " Llama2 weights into the LoRA", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " model: `lora_model.load_state_dict(base_model.state_dict(), strict", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "=False)`\n4. Set only LoRA parameters to trainable:", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " `from torchtune.modules.peft.peft_utils import get", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "_adapter_params, set_trainable_params`\n5. Run the", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " LoRA finetune using torchtune's Lo", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "RA recipe: `tune run --", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "nnodes 1 --nproc_per_node 2 lora_finet", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "une_distributed --config llama2/7B_lora`\n\nYou can", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " also experiment with different LoRA configurations, such as applying Lo", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "RA to all linear layers in the self-attention, increasing", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " the rank, and scaling alpha and rank together.\n\nNote: You", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " need to have the Llama2 weights and tokenizer downloaded", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " and installed, and you need to have the necessary dependencies", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " installed, including torchtune", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " and PyTorch.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:3e3a0\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:fd0f6\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')]), CompletionMessage(role='assistant', content=\"I'm ready to help. What's your first question about Torchtune?\", stop_reason=, tool_calls=[]), UserMessage(role='user', content='Tell me how to use LoRA', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "[k", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "nowledge_search(query=\"using LoRA in Torchtune", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "\")]", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "succeeded" + }, + "tool_call": { + "arguments": { + "query": "using LoRA in Torchtune" + }, + "call_id": "62b19206-ed9f-42d1-a614-1582d8598193", + "tool_name": "knowledge_search" + }, + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:3e3a0\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:fd0f6\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "I", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "'m ready to help. What", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "'s your first question about Torchtune?", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { "chunks": [ { @@ -13554,7 +16535,7 @@ "arguments": { "query": "Torchtune documentation" }, - "call_id": "f4f09cc4-7ea8-47f3-a07d-562a7241b41e", + "call_id": "42e0a687-a52e-4208-8181-db6e7a84faeb", "tool_name": "knowledge_search" }, "type": "tool_call" @@ -13642,22 +16623,7 @@ { "event": { "delta": { - "text": " attention instead of", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " the standard multi-head attention.", + "text": " attention instead of the standard multi-head attention.", "type": "text" }, "event_type": { @@ -13691,6 +16657,89 @@ "type": "generator" }, "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Instead of the standard multi-head attention, what attention type does Llama3-8B use?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Llama3-8B attention type'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:num-1\\nContent: 3 `_ is a new family of models released by Meta AI that improves upon the performance of the Llama2 family\\nof models across a `range of different benchmarks `_.\\nCurrently there are two different sizes of Meta Llama 3: 8B and 70B. In this tutorial we will focus on the 8B size model.\\nThere are a few main changes between Llama2-7B and Llama3-8B models:\\n\\n- Llama3-8B uses `grouped-query attention `_ instead of the standard multi-head attention from Llama2-7B\\n- Llama3-8B has a larger vocab size (128,256 instead of 32,000 from Llama2 models)\\n- Llama3-8B uses a different tokenizer than Llama2 models (`tiktoken `_ instead of `sentencepiece `_)\\n- Llama3-\\n'), TextContentItem(type='text', text=\"Result 2:\\nDocument_id:num-1\\nContent: instead of 32,000 from Llama2 models)\\n- Llama3-8B uses a different tokenizer than Llama2 models (`tiktoken `_ instead of `sentencepiece `_)\\n- Llama3-8B uses a larger intermediate dimension in its MLP layers than Llama2-7B\\n- Llama3-8B uses a higher base value to calculate theta in its `rotary positional embeddings `_\\n\\n|\\n\\nGetting access to Llama3-8B-Instruct\\n------------------------------------\\n\\nFor this tutorial, we will be using the instruction-tuned version of Llama3-8B. First, let's download the model from Hugging Face. You will need to follow the instructions\\non the `official Meta page `_ to gain access to the model.\\nNext, make sure you grab your Hugging Face token from `here `_.\\n\\n\\n.. code-block:: bash\\n\\n tune download meta-llama/Meta-Llama-3\\n\"), TextContentItem(type='text', text=\"Result 3:\\nDocument_id:num-0\\nContent: :`download Llama3 Instruct weights `\\n\\n\\nTemplate changes from Llama2 to Llama3\\n--------------------------------------\\n\\nThe Llama2 chat model requires a specific template when prompting the pre-trained\\nmodel. Since the chat model was pretrained with this prompt template, if you want to run\\ninference on the model, you'll need to use the same template for optimal performance\\non chat data. Otherwise, the model will just perform standard text completion, which\\nmay or may not align with your intended use case.\\n\\nFrom the `official Llama2 prompt\\ntemplate guide `_\\nfor the Llama2 chat model, we can see that special tags are added:\\n\\n.. code-block:: text\\n\\n [INST] <>\\n You are a helpful, respectful, and honest assistant.\\n <>\\n\\n Hi! I am a human. [/INST] Hello there! Nice to meet you! I'm Meta AI, your friendly AI assistant \\n\\nLlama3 Instruct `overhauled `\\n\"), TextContentItem(type='text', text='Result 4:\\nDocument_id:num-0\\nContent: \\'m Meta AI, your friendly AI assistant<|eot_id|>\\n\\nThe tags are entirely different, and they are actually encoded differently than in\\nLlama2. Let\\'s walk through tokenizing an example with the Llama2 template and the\\nLlama3 template to understand how.\\n\\n.. note::\\n The Llama3 Base model uses a `different prompt template\\n `_ than Llama3 Instruct\\n because it has not yet been instruct tuned and the extra special tokens are untrained. If you\\n are running inference on the Llama3 Base model without fine-tuning we recommend the base\\n template for optimal performance. Generally, for instruct and chat data, we recommend using\\n Llama3 Instruct with its prompt template. The rest of this tutorial assumes you are using\\n Llama3 Instruct.\\n\\n.. _prompt_template_vs_special_tokens:\\n\\nTokenizing prompt templates & special tokens\\n--------------------------------------------\\n\\nLet\\'s say I have a sample of a single user-assistant turn accompanied with a system\\nprompt:\\n\\n.. code-block:: python\\n\\n sample = [\\n {\\n \"role\": \"system\",\\n \"\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:num-3\\nContent: LoRA to Llama2 models\\n------------------------------\\n\\nWith torchtune, we can easily apply LoRA to Llama2 with a variety of different configurations.\\nLet\\'s take a look at how to construct Llama2 models in torchtune with and without LoRA.\\n\\n.. code-block:: python\\n\\n from torchtune.models.llama2 import llama2_7b, lora_llama2_7b\\n\\n # Build Llama2 without any LoRA layers\\n base_model = llama2_7b()\\n\\n # The default settings for lora_llama2_7b will match those for llama2_7b\\n # We just need to define which layers we want LoRA applied to.\\n # Within each self-attention, we can choose from [\"q_proj\", \"k_proj\", \"v_proj\", and \"output_proj\"].\\n # We can also set apply_lora_to_mlp=True or apply_lora_to_output=True to apply LoRA to other linear\\n # layers outside of the self-attention.\\n lora_model = lora_llama2_7b(lora_attn_modules=[\"q_proj\", \"v_proj\"])\\n\\n.. note::\\n\\n Calling :func:`lora_llama_2\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=, system_message_behavior=)), ('tool_prompt_format', ), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "L", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "lama3-8B uses grouped-query attention instead of", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " the standard multi-head attention.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Instead of the standard multi-head attention, what attention type does Llama3-8B use?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Llama3-8B attention type'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:num-1\\nContent: 3 `_ is a new family of models released by Meta AI that improves upon the performance of the Llama2 family\\nof models across a `range of different benchmarks `_.\\nCurrently there are two different sizes of Meta Llama 3: 8B and 70B. In this tutorial we will focus on the 8B size model.\\nThere are a few main changes between Llama2-7B and Llama3-8B models:\\n\\n- Llama3-8B uses `grouped-query attention `_ instead of the standard multi-head attention from Llama2-7B\\n- Llama3-8B has a larger vocab size (128,256 instead of 32,000 from Llama2 models)\\n- Llama3-8B uses a different tokenizer than Llama2 models (`tiktoken `_ instead of `sentencepiece `_)\\n- Llama3-\\n'), TextContentItem(type='text', text=\"Result 2:\\nDocument_id:num-1\\nContent: instead of 32,000 from Llama2 models)\\n- Llama3-8B uses a different tokenizer than Llama2 models (`tiktoken `_ instead of `sentencepiece `_)\\n- Llama3-8B uses a larger intermediate dimension in its MLP layers than Llama2-7B\\n- Llama3-8B uses a higher base value to calculate theta in its `rotary positional embeddings `_\\n\\n|\\n\\nGetting access to Llama3-8B-Instruct\\n------------------------------------\\n\\nFor this tutorial, we will be using the instruction-tuned version of Llama3-8B. First, let's download the model from Hugging Face. You will need to follow the instructions\\non the `official Meta page `_ to gain access to the model.\\nNext, make sure you grab your Hugging Face token from `here `_.\\n\\n\\n.. code-block:: bash\\n\\n tune download meta-llama/Meta-Llama-3\\n\"), TextContentItem(type='text', text=\"Result 3:\\nDocument_id:num-0\\nContent: :`download Llama3 Instruct weights `\\n\\n\\nTemplate changes from Llama2 to Llama3\\n--------------------------------------\\n\\nThe Llama2 chat model requires a specific template when prompting the pre-trained\\nmodel. Since the chat model was pretrained with this prompt template, if you want to run\\ninference on the model, you'll need to use the same template for optimal performance\\non chat data. Otherwise, the model will just perform standard text completion, which\\nmay or may not align with your intended use case.\\n\\nFrom the `official Llama2 prompt\\ntemplate guide `_\\nfor the Llama2 chat model, we can see that special tags are added:\\n\\n.. code-block:: text\\n\\n [INST] <>\\n You are a helpful, respectful, and honest assistant.\\n <>\\n\\n Hi! I am a human. [/INST] Hello there! Nice to meet you! I'm Meta AI, your friendly AI assistant \\n\\nLlama3 Instruct `overhauled `\\n\"), TextContentItem(type='text', text='Result 4:\\nDocument_id:num-0\\nContent: \\'m Meta AI, your friendly AI assistant<|eot_id|>\\n\\nThe tags are entirely different, and they are actually encoded differently than in\\nLlama2. Let\\'s walk through tokenizing an example with the Llama2 template and the\\nLlama3 template to understand how.\\n\\n.. note::\\n The Llama3 Base model uses a `different prompt template\\n `_ than Llama3 Instruct\\n because it has not yet been instruct tuned and the extra special tokens are untrained. If you\\n are running inference on the Llama3 Base model without fine-tuning we recommend the base\\n template for optimal performance. Generally, for instruct and chat data, we recommend using\\n Llama3 Instruct with its prompt template. The rest of this tutorial assumes you are using\\n Llama3 Instruct.\\n\\n.. _prompt_template_vs_special_tokens:\\n\\nTokenizing prompt templates & special tokens\\n--------------------------------------------\\n\\nLet\\'s say I have a sample of a single user-assistant turn accompanied with a system\\nprompt:\\n\\n.. code-block:: python\\n\\n sample = [\\n {\\n \"role\": \"system\",\\n \"\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:num-3\\nContent: LoRA to Llama2 models\\n------------------------------\\n\\nWith torchtune, we can easily apply LoRA to Llama2 with a variety of different configurations.\\nLet\\'s take a look at how to construct Llama2 models in torchtune with and without LoRA.\\n\\n.. code-block:: python\\n\\n from torchtune.models.llama2 import llama2_7b, lora_llama2_7b\\n\\n # Build Llama2 without any LoRA layers\\n base_model = llama2_7b()\\n\\n # The default settings for lora_llama2_7b will match those for llama2_7b\\n # We just need to define which layers we want LoRA applied to.\\n # Within each self-attention, we can choose from [\"q_proj\", \"k_proj\", \"v_proj\", and \"output_proj\"].\\n # We can also set apply_lora_to_mlp=True or apply_lora_to_output=True to apply LoRA to other linear\\n # layers outside of the self-attention.\\n lora_model = lora_llama2_7b(lora_attn_modules=[\"q_proj\", \"v_proj\"])\\n\\n.. note::\\n\\n Calling :func:`lora_llama_2\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='insert_into_memory', description='Insert documents into memory', parameters={}), ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { "chunks": [ { "event": { @@ -13740,7 +16789,75 @@ { "event": { "delta": { - "text": " attention instead of", + "text": " attention instead of the standard multi-head attention.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Instead of the standard multi-head attention, what attention type does Llama3-8B use?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Llama3-8B attention type'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:num-1\\nContent: 3 `_ is a new family of models released by Meta AI that improves upon the performance of the Llama2 family\\nof models across a `range of different benchmarks `_.\\nCurrently there are two different sizes of Meta Llama 3: 8B and 70B. In this tutorial we will focus on the 8B size model.\\nThere are a few main changes between Llama2-7B and Llama3-8B models:\\n\\n- Llama3-8B uses `grouped-query attention `_ instead of the standard multi-head attention from Llama2-7B\\n- Llama3-8B has a larger vocab size (128,256 instead of 32,000 from Llama2 models)\\n- Llama3-8B uses a different tokenizer than Llama2 models (`tiktoken `_ instead of `sentencepiece `_)\\n- Llama3-\\n'), TextContentItem(type='text', text=\"Result 2:\\nDocument_id:num-1\\nContent: instead of 32,000 from Llama2 models)\\n- Llama3-8B uses a different tokenizer than Llama2 models (`tiktoken `_ instead of `sentencepiece `_)\\n- Llama3-8B uses a larger intermediate dimension in its MLP layers than Llama2-7B\\n- Llama3-8B uses a higher base value to calculate theta in its `rotary positional embeddings `_\\n\\n|\\n\\nGetting access to Llama3-8B-Instruct\\n------------------------------------\\n\\nFor this tutorial, we will be using the instruction-tuned version of Llama3-8B. First, let's download the model from Hugging Face. You will need to follow the instructions\\non the `official Meta page `_ to gain access to the model.\\nNext, make sure you grab your Hugging Face token from `here `_.\\n\\n\\n.. code-block:: bash\\n\\n tune download meta-llama/Meta-Llama-3\\n\"), TextContentItem(type='text', text=\"Result 3:\\nDocument_id:num-0\\nContent: :`download Llama3 Instruct weights `\\n\\n\\nTemplate changes from Llama2 to Llama3\\n--------------------------------------\\n\\nThe Llama2 chat model requires a specific template when prompting the pre-trained\\nmodel. Since the chat model was pretrained with this prompt template, if you want to run\\ninference on the model, you'll need to use the same template for optimal performance\\non chat data. Otherwise, the model will just perform standard text completion, which\\nmay or may not align with your intended use case.\\n\\nFrom the `official Llama2 prompt\\ntemplate guide `_\\nfor the Llama2 chat model, we can see that special tags are added:\\n\\n.. code-block:: text\\n\\n [INST] <>\\n You are a helpful, respectful, and honest assistant.\\n <>\\n\\n Hi! I am a human. [/INST] Hello there! Nice to meet you! I'm Meta AI, your friendly AI assistant \\n\\nLlama3 Instruct `overhauled `\\n\"), TextContentItem(type='text', text='Result 4:\\nDocument_id:num-0\\nContent: \\'m Meta AI, your friendly AI assistant<|eot_id|>\\n\\nThe tags are entirely different, and they are actually encoded differently than in\\nLlama2. Let\\'s walk through tokenizing an example with the Llama2 template and the\\nLlama3 template to understand how.\\n\\n.. note::\\n The Llama3 Base model uses a `different prompt template\\n `_ than Llama3 Instruct\\n because it has not yet been instruct tuned and the extra special tokens are untrained. If you\\n are running inference on the Llama3 Base model without fine-tuning we recommend the base\\n template for optimal performance. Generally, for instruct and chat data, we recommend using\\n Llama3 Instruct with its prompt template. The rest of this tutorial assumes you are using\\n Llama3 Instruct.\\n\\n.. _prompt_template_vs_special_tokens:\\n\\nTokenizing prompt templates & special tokens\\n--------------------------------------------\\n\\nLet\\'s say I have a sample of a single user-assistant turn accompanied with a system\\nprompt:\\n\\n.. code-block:: python\\n\\n sample = [\\n {\\n \"role\": \"system\",\\n \"\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:num-3\\nContent: LoRA to Llama2 models\\n------------------------------\\n\\nWith torchtune, we can easily apply LoRA to Llama2 with a variety of different configurations.\\nLet\\'s take a look at how to construct Llama2 models in torchtune with and without LoRA.\\n\\n.. code-block:: python\\n\\n from torchtune.models.llama2 import llama2_7b, lora_llama2_7b\\n\\n # Build Llama2 without any LoRA layers\\n base_model = llama2_7b()\\n\\n # The default settings for lora_llama2_7b will match those for llama2_7b\\n # We just need to define which layers we want LoRA applied to.\\n # Within each self-attention, we can choose from [\"q_proj\", \"k_proj\", \"v_proj\", and \"output_proj\"].\\n # We can also set apply_lora_to_mlp=True or apply_lora_to_output=True to apply LoRA to other linear\\n # layers outside of the self-attention.\\n lora_model = lora_llama2_7b(lora_attn_modules=[\"q_proj\", \"v_proj\"])\\n\\n.. note::\\n\\n Calling :func:`lora_llama_2\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "L", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "lama3-8B uses grouped-query attention instead of", "type": "text" }, "event_type": { @@ -13861,7 +16978,7 @@ "arguments": { "query": "Llama3-8B attention type" }, - "call_id": "1a4d353b-697b-433f-94b8-d01299cd1003", + "call_id": "b3019313-870b-42e5-a2a3-02f933f153b1", "tool_name": "knowledge_search" }, "type": "tool_call" @@ -13972,7 +17089,7 @@ "arguments": { "query": "Llama3-8B attention type" }, - "call_id": "95048731-e3a9-42ea-a6c1-04147bb303b4", + "call_id": "e4659511-69a4-412b-b995-fa90f43a25c7", "tool_name": "knowledge_search" }, "type": "tool_call" @@ -14010,7 +17127,75 @@ ], "type": "generator" }, - "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Search the web and tell me who the current CEO of Meta is.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'query': 'current CEO of Meta'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content='{\"query\": \"current CEO of Meta\", \"top_k\": [{\"title\": \"Executives - Meta\", \"url\": \"https://about.meta.com/media-gallery/executives/\", \"content\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer Joel Kaplan, Chief Global Affairs Officer Susan Li, Chief Financial Officer Javier Olivan, Chief Operating Officer Chris Cox, Chief Product Officer Andrew \\\\u2018Boz\\\\u2019 Bosworth, Chief Technology Officer Jennifer Newstead, Chief Legal Officer Dave Wehner, Chief Strategy Officer Will Cathcart, Head of WhatsApp Naomi Gleit, Head of Product John Hegeman, Chief Revenue Officer Adam Mosseri, Head of Instagram Erin Egan, Chief Privacy Officer, Policy Michel Protti, Chief Privacy Officer, Product Alex Schultz, Chief Marketing Officer and VP of Analytics Tom Alison, Head of Facebook Nicola Mendelsohn, Head of Global Business Group Ahmad Al-Dahle, VP and Head of GenAI at Meta Joelle Pineau, Vice President of AI Research and Head of FAIR at Meta\", \"score\": 0.8190992, \"raw_content\": null}, {\"title\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer - Meta\", \"url\": \"https://about.meta.com/media-gallery/executives/mark-zuckerberg/\", \"content\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer | Meta Meta Quest Ray-Ban Meta Meta Horizon Meta AI Meta Verified Meta Pay Meta Horizon Workrooms Meta and you Learn about our community Shop Meta Meta Quest Meta Portal Meta Horizon Mark Zuckerberg is the founder, chairman and CEO of Meta, which he originally founded as Facebook in 2004. In October 2021, Facebook rebranded to Meta to reflect all of its products and services across its family of apps and a focus on developing social experiences for the metaverse \\\\u2014 moving beyond 2D screens toward immersive experiences like augmented and virtual reality to help build the next evolution in social technology. Shop Ray-Ban Meta glassesRay-Ban StoriesPrivacy informationSupported countries \\\\u00a9 2025 Meta\", \"score\": 0.79099923, \"raw_content\": null}, {\"title\": \"Meet the Executive CSuite Team of Meta (Facebook) [2025]\", \"url\": \"https://digitaldefynd.com/IQ/meet-the-executive-csuite-team-of-meta-facebook/\", \"content\": \"Harvard University Executive Programs Free Harvard University Courses As a chief financial officer of Meta, Susan Li oversees the firm\\\\u2019s finance and facilities team to keep track of the company\\\\u2019s overall financial health. The chief operating officer of Meta, Javier Olivan, oversees the firm\\\\u2019s business team, infrastructure, and other products. Andrew Bosworth, called Boz, serves as chief technology officer at Meta and is responsible for leading the firm\\\\u2019s AR/VR organization, Reality Labs. Andrew has also served as engineering director to oversee events, mobile monetization, and feed ads and as VP of ads and business platforms to lead engineering, design, analytics, and product teams. Meta\\\\u2019s c-suite team comprises experienced and diverse executives, having extensive experience in technology, finance, legal, and all major industries.\", \"score\": 0.7602419, \"raw_content\": null}, {\"title\": \"The 11 People Running Meta\\'s $1 Trillion Social Media and ... - Observer\", \"url\": \"https://observer.com/2024/01/meta-facebook-top-executives/\", \"content\": \"He returned after just a year, saying Facebook is \\\\u201cthe best place for me to roll up my sleeves and dig in to help.\\\\u201d\\\\u00a0As Meta\\\\u2019s chief product officer, Cox is currently focusing on Meta\\\\u2019s A.I. initiatives, most recently undertaking Meta\\\\u2019s Fundamental AI Research (FAIR) team, led by Yann LeCun and Joelle Pineau. Cathcart started at Meta in 2010 as vice president of product management, notably leading the ads initiative of Facebook\\\\u2019s News Feed product. Filed Under: Business, Social Media, Media, Technology, Big Tech, Will Cathcart, Dave Wehner, Tom Alison, Erin Egan, Andrew Mccollum, Javier Olivan, Andrew Bosworth, Susan Li, Dustin Moskovitz, Chris Cox, Adam Mosseri, Yann LeCun, Eduardo Saverin, Chris Hughes, Leadership, Mark Zuckerberg, Facebook, Instagram, Meta\", \"score\": 0.7591723, \"raw_content\": null}, {\"title\": \"Mark Zuckerberg - Forbes\", \"url\": \"https://www.forbes.com/profile/mark-zuckerberg/\", \"content\": \"Meta CEO Mark Zuckerberg \\\\u201cloved\\\\u201d an image on Facebook known as \\\\\"Challah Horse\\\\\" that happens to be AI-generated, highlighting the amount of AI spam on the platform. ### Meta Donates $1 Million To Trump\\\\u2019s Inaugural Fund Weeks After Mark Zuckerberg Met President Elect Meta has donated $1 million to President-elect Donald Trump\\\\u2019s inaugural fund, the company confirmed to various news outlets on Wednesday, a move that comes just weeks after its CEO Mark Zuckerberg met with Trump at his Mar-a-Lago residence in an apparent bid to mend years of strained ties. ### Meta Donates $1 Million To Trump\\\\u2019s Inaugural Fund Weeks After Mark Zuckerberg Met President-Elect Read the full profile on Forbes: https://www.forbes.com/sites/kerryadolan/2023/09/26/mark-gets-meta-zuckerberg-talks-ai-and-that-musk-mma-fight-thats-never-going-to-happen/?sh=671046e73037\", \"score\": 0.6410185, \"raw_content\": null}]}')])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=, system_message_behavior=)), ('tool_prompt_format', ), ('tools', [ToolDefinition(tool_name=, description='Search the web for information', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for', required=True, default=None)})])]": { + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Search the web and tell me who the current CEO of Meta is.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'query': 'current CEO of Meta'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content='{\"query\": \"current CEO of Meta\", \"top_k\": [{\"title\": \"Executives - Meta\", \"url\": \"https://about.meta.com/media-gallery/executives/\", \"content\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer Joel Kaplan, Chief Global Affairs Officer Susan Li, Chief Financial Officer Javier Olivan, Chief Operating Officer Chris Cox, Chief Product Officer Andrew \\\\u2018Boz\\\\u2019 Bosworth, Chief Technology Officer Jennifer Newstead, Chief Legal Officer Dave Wehner, Chief Strategy Officer Will Cathcart, Head of WhatsApp Naomi Gleit, Head of Product John Hegeman, Chief Revenue Officer Adam Mosseri, Head of Instagram Erin Egan, Chief Privacy Officer, Policy Michel Protti, Chief Privacy Officer, Product Alex Schultz, Chief Marketing Officer and VP of Analytics Tom Alison, Head of Facebook Nicola Mendelsohn, Head of Global Business Group Ahmad Al-Dahle, VP and Head of GenAI at Meta Joelle Pineau, Vice President of AI Research and Head of FAIR at Meta\", \"score\": 0.8190992, \"raw_content\": null}, {\"title\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer - Meta\", \"url\": \"https://about.meta.com/media-gallery/executives/mark-zuckerberg/\", \"content\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer | Meta Meta Quest Ray-Ban Meta Meta Horizon Meta AI Meta Verified Meta Pay Meta Horizon Workrooms Meta and you Learn about our community Shop Meta Meta Quest Meta Portal Meta Horizon Mark Zuckerberg is the founder, chairman and CEO of Meta, which he originally founded as Facebook in 2004. In October 2021, Facebook rebranded to Meta to reflect all of its products and services across its family of apps and a focus on developing social experiences for the metaverse \\\\u2014 moving beyond 2D screens toward immersive experiences like augmented and virtual reality to help build the next evolution in social technology. Shop Ray-Ban Meta glassesRay-Ban StoriesPrivacy informationSupported countries \\\\u00a9 2025 Meta\", \"score\": 0.79099923, \"raw_content\": null}, {\"title\": \"Meet the Executive CSuite Team of Meta (Facebook) [2025]\", \"url\": \"https://digitaldefynd.com/IQ/meet-the-executive-csuite-team-of-meta-facebook/\", \"content\": \"Harvard University Executive Programs Free Harvard University Courses As a chief financial officer of Meta, Susan Li oversees the firm\\\\u2019s finance and facilities team to keep track of the company\\\\u2019s overall financial health. The chief operating officer of Meta, Javier Olivan, oversees the firm\\\\u2019s business team, infrastructure, and other products. Andrew Bosworth, called Boz, serves as chief technology officer at Meta and is responsible for leading the firm\\\\u2019s AR/VR organization, Reality Labs. Andrew has also served as engineering director to oversee events, mobile monetization, and feed ads and as VP of ads and business platforms to lead engineering, design, analytics, and product teams. Meta\\\\u2019s c-suite team comprises experienced and diverse executives, having extensive experience in technology, finance, legal, and all major industries.\", \"score\": 0.7602419, \"raw_content\": null}, {\"title\": \"Meta to spend up to $65 billion this year to power AI goals, Zuckerberg ...\", \"url\": \"https://www.reuters.com/technology/meta-invest-up-65-bln-capital-expenditure-this-year-2025-01-24/\", \"content\": \"Meta Platforms plans to spend as much as $65 billion this year to expand its AI infrastructure, CEO Mark Zuckerberg said on Friday, aiming to bolster the company\\'s position against rivals OpenAI\", \"score\": 0.73914057, \"raw_content\": null}, {\"title\": \"Mark Zuckerberg - Forbes\", \"url\": \"https://www.forbes.com/profile/mark-zuckerberg/\", \"content\": \"Meta CEO Mark Zuckerberg \\\\u201cloved\\\\u201d an image on Facebook known as \\\\\"Challah Horse\\\\\" that happens to be AI-generated, highlighting the amount of AI spam on the platform. ### Meta Donates $1 Million To Trump\\\\u2019s Inaugural Fund Weeks After Mark Zuckerberg Met President Elect Meta has donated $1 million to President-elect Donald Trump\\\\u2019s inaugural fund, the company confirmed to various news outlets on Wednesday, a move that comes just weeks after its CEO Mark Zuckerberg met with Trump at his Mar-a-Lago residence in an apparent bid to mend years of strained ties. ### Meta Donates $1 Million To Trump\\\\u2019s Inaugural Fund Weeks After Mark Zuckerberg Met President-Elect Read the full profile on Forbes: https://www.forbes.com/sites/kerryadolan/2023/09/26/mark-gets-meta-zuckerberg-talks-ai-and-that-musk-mma-fight-thats-never-going-to-happen/?sh=671046e73037\", \"score\": 0.6410185, \"raw_content\": null}]}')])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=, system_message_behavior=)), ('tool_prompt_format', ), ('tools', [ToolDefinition(tool_name=, description='Search the web for information', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "The", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " current CEO of Meta is Mark Zuckerberg.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Search the web and tell me who the current CEO of Meta is.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'query': 'current CEO of Meta'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content='{\"query\": \"current CEO of Meta\", \"top_k\": [{\"title\": \"Executives - Meta\", \"url\": \"https://about.meta.com/media-gallery/executives/\", \"content\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer Joel Kaplan, Chief Global Affairs Officer Susan Li, Chief Financial Officer Javier Olivan, Chief Operating Officer Chris Cox, Chief Product Officer Andrew \\\\u2018Boz\\\\u2019 Bosworth, Chief Technology Officer Jennifer Newstead, Chief Legal Officer Dave Wehner, Chief Strategy Officer Will Cathcart, Head of WhatsApp Naomi Gleit, Head of Product John Hegeman, Chief Revenue Officer Adam Mosseri, Head of Instagram Erin Egan, Chief Privacy Officer, Policy Michel Protti, Chief Privacy Officer, Product Alex Schultz, Chief Marketing Officer and VP of Analytics Tom Alison, Head of Facebook Nicola Mendelsohn, Head of Global Business Group Ahmad Al-Dahle, VP and Head of GenAI at Meta Joelle Pineau, Vice President of AI Research and Head of FAIR at Meta\", \"score\": 0.8190992, \"raw_content\": null}, {\"title\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer - Meta\", \"url\": \"https://about.meta.com/media-gallery/executives/mark-zuckerberg/\", \"content\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer | Meta Meta Quest Ray-Ban Meta Meta Horizon Meta AI Meta Verified Meta Pay Meta Horizon Workrooms Meta and you Learn about our community Shop Meta Meta Quest Meta Portal Meta Horizon Mark Zuckerberg is the founder, chairman and CEO of Meta, which he originally founded as Facebook in 2004. In October 2021, Facebook rebranded to Meta to reflect all of its products and services across its family of apps and a focus on developing social experiences for the metaverse \\\\u2014 moving beyond 2D screens toward immersive experiences like augmented and virtual reality to help build the next evolution in social technology. Shop Ray-Ban Meta glassesRay-Ban StoriesPrivacy informationSupported countries \\\\u00a9 2025 Meta\", \"score\": 0.79099923, \"raw_content\": null}, {\"title\": \"Meet the Executive CSuite Team of Meta (Facebook) [2025]\", \"url\": \"https://digitaldefynd.com/IQ/meet-the-executive-csuite-team-of-meta-facebook/\", \"content\": \"Harvard University Executive Programs Free Harvard University Courses As a chief financial officer of Meta, Susan Li oversees the firm\\\\u2019s finance and facilities team to keep track of the company\\\\u2019s overall financial health. The chief operating officer of Meta, Javier Olivan, oversees the firm\\\\u2019s business team, infrastructure, and other products. Andrew Bosworth, called Boz, serves as chief technology officer at Meta and is responsible for leading the firm\\\\u2019s AR/VR organization, Reality Labs. Andrew has also served as engineering director to oversee events, mobile monetization, and feed ads and as VP of ads and business platforms to lead engineering, design, analytics, and product teams. Meta\\\\u2019s c-suite team comprises experienced and diverse executives, having extensive experience in technology, finance, legal, and all major industries.\", \"score\": 0.7602419, \"raw_content\": null}, {\"title\": \"Meta to spend up to $65 billion this year to power AI goals, Zuckerberg ...\", \"url\": \"https://www.reuters.com/technology/meta-invest-up-65-bln-capital-expenditure-this-year-2025-01-24/\", \"content\": \"Meta Platforms plans to spend as much as $65 billion this year to expand its AI infrastructure, CEO Mark Zuckerberg said on Friday, aiming to bolster the company\\'s position against rivals OpenAI\", \"score\": 0.73914057, \"raw_content\": null}, {\"title\": \"Mark Zuckerberg - Forbes\", \"url\": \"https://www.forbes.com/profile/mark-zuckerberg/\", \"content\": \"Meta CEO Mark Zuckerberg \\\\u201cloved\\\\u201d an image on Facebook known as \\\\\"Challah Horse\\\\\" that happens to be AI-generated, highlighting the amount of AI spam on the platform. ### Meta Donates $1 Million To Trump\\\\u2019s Inaugural Fund Weeks After Mark Zuckerberg Met President Elect Meta has donated $1 million to President-elect Donald Trump\\\\u2019s inaugural fund, the company confirmed to various news outlets on Wednesday, a move that comes just weeks after its CEO Mark Zuckerberg met with Trump at his Mar-a-Lago residence in an apparent bid to mend years of strained ties. ### Meta Donates $1 Million To Trump\\\\u2019s Inaugural Fund Weeks After Mark Zuckerberg Met President-Elect Read the full profile on Forbes: https://www.forbes.com/sites/kerryadolan/2023/09/26/mark-gets-meta-zuckerberg-talks-ai-and-that-musk-mma-fight-thats-never-going-to-happen/?sh=671046e73037\", \"score\": 0.6410185, \"raw_content\": null}]}')])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name=, description='Search the web for information', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for', required=True, default=None)})])]": { "chunks": [ { "event": { @@ -14144,7 +17329,7 @@ "arguments": { "query": "current CEO of Meta" }, - "call_id": "7edab4db-ac58-4935-9722-0fae4e3ea80a", + "call_id": "ccadcdbb-cfa1-4f69-9c60-0fc50ae35f11", "tool_name": { "__enum__": "BuiltinTool", "value": "brave_search" @@ -14356,7 +17541,7 @@ { "event": { "delta": { - "text": " boiling point of polyjuice is -100\u00b0C.", + "text": " boiling point of polyjuice is -100 degrees Celsius.", "type": "text" }, "event_type": { @@ -14463,7 +17648,7 @@ "celcius": true, "liquid_name": "polyjuice" }, - "call_id": "004c78cd-faf7-407d-ba1e-3979baf32bd3", + "call_id": "cbea2158-ad0a-4faf-a2ec-3e411bd5aa50", "tool_name": "get_boiling_point" }, "type": "tool_call" @@ -14575,7 +17760,7 @@ "celcius": true, "liquid_name": "polyjuice" }, - "call_id": "a974f1f5-8dc0-439a-9fa3-7ebdebfdeb3f", + "call_id": "ac3bf39b-16e7-46e9-a243-130939094e24", "tool_name": "get_boiling_point" }, "type": "tool_call" @@ -14648,7 +17833,7 @@ { "event": { "delta": { - "text": "juice Potion is a", + "text": "juice is a fictional potion from", "type": "text" }, "event_type": { @@ -14663,7 +17848,7 @@ { "event": { "delta": { - "text": " fictional substance from the Harry Potter book series by J.K. Rowling", + "text": " the Harry Potter series by J.K. Rowling. As it", "type": "text" }, "event_type": { @@ -14678,7 +17863,7 @@ { "event": { "delta": { - "text": ". As it's not a", + "text": "'s not a real substance, it doesn't have a boiling point", "type": "text" }, "event_type": { @@ -14693,7 +17878,7 @@ { "event": { "delta": { - "text": " real-world substance, it doesn't have a boiling point or any", + "text": ". Polyjuice Potion is a magical concoction", "type": "text" }, "event_type": { @@ -14708,7 +17893,7 @@ { "event": { "delta": { - "text": " other physical properties that can be measured.\n\nIn the Harry Potter", + "text": " that allows the drinker to assume the form and", "type": "text" }, "event_type": { @@ -14723,7 +17908,7 @@ { "event": { "delta": { - "text": " universe, Polyjuice Potion is a magical brew that", + "text": " appearance of another person, but", "type": "text" }, "event_type": { @@ -14738,7 +17923,7 @@ { "event": { "delta": { - "text": " allows the drinker to assume the form and", + "text": " it's not a physical substance that can", "type": "text" }, "event_type": { @@ -14753,7 +17938,7 @@ { "event": { "delta": { - "text": " appearance of another person. Its properties and", + "text": " be measured or analyzed in the same way as real-world", "type": "text" }, "event_type": { @@ -14768,7 +17953,7 @@ { "event": { "delta": { - "text": " behavior are", + "text": " chemicals.\n\nIf you have any other questions or if there", "type": "text" }, "event_type": { @@ -14783,7 +17968,7 @@ { "event": { "delta": { - "text": " governed by the rules of magic in the", + "text": "'s anything else I can help you with, feel free to ask", "type": "text" }, "event_type": { @@ -14798,97 +17983,7 @@ { "event": { "delta": { - "text": " wizarding world, rather", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " than the laws", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " of physics and chemistry that apply to real-world", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " substances.\n\nSo, I'm afraid there's no boiling point to report for", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " Polyjuice Potion! Would you like to know more about", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " the Harry Potter series or is there something else I can help", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " you with?", + "text": "!", "type": "text" }, "event_type": { @@ -14956,7 +18051,22 @@ { "event": { "delta": { - "text": "get_boiling_point(liquid_name='polyjuice', celcius=True)]", + "text": "get_boiling_point(liquid_name='polyjuice", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "', celcius=True)]", "type": "text" }, "event_type": { @@ -14980,7 +18090,7 @@ "celcius": true, "liquid_name": "polyjuice" }, - "call_id": "9e8b3842-e12c-4cda-a78c-e0b96290a94f", + "call_id": "fc32cf9f-db3f-42a8-baad-da88903b53be", "tool_name": "get_boiling_point" }, "type": "tool_call" @@ -15019,6 +18129,89 @@ "type": "generator" }, "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Write code and execute it to find the answer for: What is the 100th prime number?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'def is_prime(n):\\n if n <= 1:\\n return False\\n if n <= 3:\\n return True\\n if n % 2 == 0 or n % 3 == 0:\\n return False\\n i = 5\\n while i * i <= n:\\n if n % i == 0 or n % (i + 2) == 0:\\n return False\\n i += 6\\n return True\\n\\ndef nth_prime(n):\\n count = 0\\n num = 2\\n while True:\\n if is_prime(num):\\n count += 1\\n if count == n:\\n return num\\n num += 1\\n\\nprint(nth_prime(100))'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\")])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=, system_message_behavior=)), ('tool_prompt_format', ), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "The", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " 100th prime number is 541", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ".", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Write code and execute it to find the answer for: What is the 100th prime number?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'def is_prime(n):\\n if n <= 1:\\n return False\\n if n <= 3:\\n return True\\n if n % 2 == 0 or n % 3 == 0:\\n return False\\n i = 5\\n while i * i <= n:\\n if n % i == 0 or n % (i + 2) == 0:\\n return False\\n i += 6\\n return True\\n\\ndef nth_prime(n):\\n count = 0\\n num = 2\\n while True:\\n if is_prime(num):\\n count += 1\\n if count == n:\\n return num\\n num += 1\\n\\nprint(nth_prime(100))'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\")])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { "chunks": [ { "event": { @@ -15182,7 +18375,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "\n if n % 2 == 0", + "tool_call": "\n if n % 2 == 0 or n %", "type": "tool_call" }, "event_type": { @@ -15201,7 +18394,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " or n % 3", + "tool_call": " 3 == 0:\n ", "type": "tool_call" }, "event_type": { @@ -15220,7 +18413,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " == 0:\n return False\n i", + "tool_call": " return False\n i", "type": "tool_call" }, "event_type": { @@ -15296,7 +18489,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " 6\n return True\n\ndef nth_prime(n):\n count =", + "tool_call": " 6\n return", "type": "tool_call" }, "event_type": { @@ -15315,7 +18508,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " 0\n num = 2\n ", + "tool_call": " True\n\ndef nth_prime(n):\n count = ", "type": "tool_call" }, "event_type": { @@ -15334,7 +18527,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " while True:\n if is_prime(num):\n count += 1\n", + "tool_call": "0\n num = 2\n ", "type": "tool_call" }, "event_type": { @@ -15353,7 +18546,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " if count == n:\n return num\n num += 1", + "tool_call": " while True:\n if is_prime(num):\n ", "type": "tool_call" }, "event_type": { @@ -15372,7 +18565,45 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "\n\nprint(nth_prime(100))", + "tool_call": " count += 1\n if count == n", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": ":\n return num\n num += 1\n\nprint(nth_prime", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "(100))", "type": "tool_call" }, "event_type": { @@ -15395,7 +18626,7 @@ "arguments": { "code": "def is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef nth_prime(n):\n count = 0\n num = 2\n while True:\n if is_prime(num):\n count += 1\n if count == n:\n return num\n num += 1\n\nprint(nth_prime(100))" }, - "call_id": "152c5638-b06c-4b7c-8e66-3e71d47a9083", + "call_id": "11645d4d-35d0-4542-bc8d-d01ed1758163", "tool_name": { "__enum__": "BuiltinTool", "value": "code_interpreter" @@ -15436,7 +18667,7 @@ ], "type": "generator" }, - "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='when was Perplexity the company founded?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Perplexity the company founding date'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 3 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:perpl\\nContent: Perplexity the company was founded in 2022 by Aravind Srinivas, Andy Konwinski, Denis Yarats and Johnny Ho, engineers with backgrounds in back-end systems, artificial intelligence (AI) and machine learning:\\n\\n Srinivas, the CEO, worked at OpenAI as an AI researcher.\\n Konwinski was among the founding team at Databricks.\\n Yarats, the CTO, was an AI research scientist at Meta.\\n Ho, the CSO, worked as an engineer at Quora, then as a quantitative trader on Wall Street.[5]\\n'), TextContentItem(type='text', text='Result 2:\\nDocument_id:perpl\\nContent: Ho, the CSO, worked as an engineer at Quora, then as a quantitative trader on Wall Street.[5]\\n'), TextContentItem(type='text', text='Result 3:\\nDocument_id:nba_w\\nContent: The NBA was created on August 3, 1949, with the merger of the Basketball Association of America (BAA) and the National Basketball League (NBL).\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=, system_message_behavior=)), ('tool_prompt_format', ), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)}), ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='when was Perplexity the company founded?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Perplexity the company founding date'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 3 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:perpl\\nContent: Perplexity the company was founded in 2022 by Aravind Srinivas, Andy Konwinski, Denis Yarats and Johnny Ho, engineers with backgrounds in back-end systems, artificial intelligence (AI) and machine learning:\\n\\n Srinivas, the CEO, worked at OpenAI as an AI researcher.\\n Konwinski was among the founding team at Databricks.\\n Yarats, the CTO, was an AI research scientist at Meta.\\n Ho, the CSO, worked as an engineer at Quora, then as a quantitative trader on Wall Street.[5]\\n'), TextContentItem(type='text', text='Result 2:\\nDocument_id:perpl\\nContent: Ho, the CSO, worked as an engineer at Quora, then as a quantitative trader on Wall Street.[5]\\n'), TextContentItem(type='text', text='Result 3:\\nDocument_id:nba_w\\nContent: The NBA was created on August 3, 1949, with the merger of the Basketball Association of America (BAA) and the National Basketball League (NBL).\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)}), ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { "chunks": [ { "event": { @@ -15471,22 +18702,7 @@ { "event": { "delta": { - "text": "plexity the company", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " was founded in 2022.", + "text": "plexity the company was founded in 2022.", "type": "text" }, "event_type": { @@ -15519,7 +18735,7 @@ ], "type": "generator" }, - "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='when was Perplexity the company founded?', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)}), ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='when was Perplexity the company founded?', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)}), ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { "chunks": [ { "event": { @@ -15592,7 +18808,7 @@ "arguments": { "query": "Perplexity the company founding date" }, - "call_id": "bd92ebc5-f219-4604-bb3e-6ba58756f291", + "call_id": "42bca45b-e3d6-40a8-b110-d9d77328089e", "tool_name": "knowledge_search" }, "type": "tool_call" @@ -15630,7 +18846,7 @@ ], "type": "generator" }, - "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='when was the nba created?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'NBA creation date'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 3 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:nba_w\\nContent: The NBA was created on August 3, 1949, with the merger of the Basketball Association of America (BAA) and the National Basketball League (NBL).\\n'), TextContentItem(type='text', text='Result 2:\\nDocument_id:perpl\\nContent: Perplexity the company was founded in 2022 by Aravind Srinivas, Andy Konwinski, Denis Yarats and Johnny Ho, engineers with backgrounds in back-end systems, artificial intelligence (AI) and machine learning:\\n\\n Srinivas, the CEO, worked at OpenAI as an AI researcher.\\n Konwinski was among the founding team at Databricks.\\n Yarats, the CTO, was an AI research scientist at Meta.\\n Ho, the CSO, worked as an engineer at Quora, then as a quantitative trader on Wall Street.[5]\\n'), TextContentItem(type='text', text='Result 3:\\nDocument_id:perpl\\nContent: Ho, the CSO, worked as an engineer at Quora, then as a quantitative trader on Wall Street.[5]\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=, system_message_behavior=)), ('tool_prompt_format', ), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)}), ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='when was the nba created?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'NBA creation date'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 3 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:nba_w\\nContent: The NBA was created on August 3, 1949, with the merger of the Basketball Association of America (BAA) and the National Basketball League (NBL).\\n'), TextContentItem(type='text', text='Result 2:\\nDocument_id:perpl\\nContent: Perplexity the company was founded in 2022 by Aravind Srinivas, Andy Konwinski, Denis Yarats and Johnny Ho, engineers with backgrounds in back-end systems, artificial intelligence (AI) and machine learning:\\n\\n Srinivas, the CEO, worked at OpenAI as an AI researcher.\\n Konwinski was among the founding team at Databricks.\\n Yarats, the CTO, was an AI research scientist at Meta.\\n Ho, the CSO, worked as an engineer at Quora, then as a quantitative trader on Wall Street.[5]\\n'), TextContentItem(type='text', text='Result 3:\\nDocument_id:perpl\\nContent: Ho, the CSO, worked as an engineer at Quora, then as a quantitative trader on Wall Street.[5]\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)}), ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { "chunks": [ { "event": { @@ -15665,7 +18881,7 @@ { "event": { "delta": { - "text": " NBA was created on August 3, 194", + "text": " NBA was created on August 3, 1949, with", "type": "text" }, "event_type": { @@ -15680,22 +18896,7 @@ { "event": { "delta": { - "text": "9, with the merger of the Basketball", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " Association of America (BAA) and the National", + "text": " the merger of the Basketball Association of America (BAA) and the National", "type": "text" }, "event_type": { @@ -15743,7 +18944,7 @@ ], "type": "generator" }, - "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='when was the nba created?', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)}), ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "('meta-llama/Llama-3.3-70B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='when was the nba created?', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)}), ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { "chunks": [ { "event": { @@ -15778,22 +18979,7 @@ { "event": { "delta": { - "text": "nowledge_search(query=\"NBA", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " creation date\")]", + "text": "nowledge_search(query=\"NBA creation date\")]", "type": "text" }, "event_type": { @@ -15816,7 +19002,7 @@ "arguments": { "query": "NBA creation date" }, - "call_id": "d1fecd80-0ad5-4d1e-8719-ece63aafa913", + "call_id": "bc879653-70ed-4c38-8a7f-fa8a4621b088", "tool_name": "knowledge_search" }, "type": "tool_call" diff --git a/tests/api/fixtures/recorded_responses/chat_completion.pickle b/tests/api/fixtures/recorded_responses/chat_completion.pickle index 3e435911d072cee576bc2ad0ca617cc157240a1a..aef1aa45db1fbad744a507b0f021e2d2009e06f8 100644 GIT binary patch literal 541735 zcmeFa+ixVtb6Mw`@6-Rqael%w%TrP8UnVx^+{oVwbAeD#^uCGBV<1 zMie6>vLhnNVz+xS!a~9VCg1^ve)2vnFi(CEGy|+27Vy5zAD|gyFn*991PhDZr^W6c zun+tDzH{Q17hdWXWOY%QI#@Y8*Df4t1T>j&-VPaE14<2rY_g?K8VF|m*t1@RY~w@^Pn;wW%`xI_Ti)H zM@L|^!#8N)jt)Q96)p{!=N}#Zo?#8#-QegEe!k4y*8-y$yLh7=)NHpSe478{)6wC# zuY(t^qe~73(sS>rX~yhe@UByHNB{n4`ltNH&$e2oZG^2BfBOu0+_#P3X!Gz*u?N;V zvOaV8&GF6MP1@Q|4jMJ8Q65ilvGIUt4VCR_Wgz{!!}pzHje=-|4wQ`|z`l*9FhCX`DVE5G!M31JA{e z*5ys>ZEE0~)>r?XRmR=Pr7y_~*5~Bd^N?9z+_b)AeIKKa1wOSf2q83>y1Z z`i|GF5_|-;vJ;c%&AK#x&JMpOxxVLN6$CX? z4|Q5cVDqW_V6l6W!?0pLj{$9YofhP&%dbh=1Jg~MY%%!J&g~FvCECnH zA@(cU+V(BUK*qp_fD5(jH{a8|j&=u|um-^gsma2i*wN1Cjz&A~4bib(-pS=|?CL%2 z#n%S56WT7_P;2}8o`BRPF2LTI{z{v(DFVZ{`?QIzm4|o*I*W$gk2M$k4t`)&1xwEl z);=nKFcAJhd6hK+R#{TS@Qu8OKsm#H=aTA>c^4$B#x0N-ljLc56>jsz2Lsy|<{D`r z^i)S5I7+Mf7EDZ;chOlVn6zu0NAh|C7VtrzyCuJD4Zhb%%G* z&yjZ0GVuG6^)u_z;TJG$&=s*=hlAkA`jf*yjGo8%2I9WF4*B#M-qD3QB%6L_{e0pM z^-MkaX?AP;nf0^73}e-(4;7xywbB@IQkp)pes29who85hai+K3M6Mo3`$U0z^5oO* zI!e7|9DX($+U%?xeulczvdyEz%k2ey&RAY(R*eS!nx9))s& zp&{`k(!x7Jv!73`J<`3tkX~>+U5B63{qCR#fg3;}JbaT!?db3i#|+z1kc`hStY2Ec zQnO1#`MF?V7($pQFo9(yO&qs%ojj61Iny#a?R&9hF8|RhyJgO3iH(poHxp|#Wwb*0 zrBq$mL9tR&ODQCf;;C=3g++WDtUZj_x}x}at$gME_cv}_McXl>o@s}Oxiuy7%VU_e zOtM+phmkpy?J~U{R6H-bl%cX#{;0%DsI;n$6+JUriOh#x{QmHx63kvl(5*M02xAb! z$PctCt2`y#IPe|%%?iVQusS=dx4l7FBc}w+qTXyzn6_T+Vw|z@&5F2zS$b5FLyzCl z{ax*^2F9-N+rsb8Xt%t9YYHFSVCgpe04}Rx*DYIgw3rU_qpf#3cs_{lygvxg)@?hw?UwE8u3_WOxGh-JX!1P=wpHBPd+aK}o*dtH z-SX`Ky!s?+>8|gY@VCS_th=T!_OKdorjyZ}y+(25@15{Bh^Fy0R0 zXTL98*M<$PZHj%kZgexc_qM=T#<$&oo}xVv7R>VKhRE3w-}AtR=6YQZEdvJajP@=X zBaQU|jPYQ--`6&EuV-Vlgq`{|>dW^%%f%yIfeDD-aF49%frwwQZt6Ylju*fggi(#! zC-;-yg_lOV2~|eB*-aX_OIE89cXmd*>p3>Q-?0r?X&T-ghVlH2-mHds-4Rc;`*1@z z;fE1v#6@r}q-Kc>z#rbF4zI&$a{yC2fb!MT)*Ty`aYF7b-4Jch+toI0!*g_SH@MFU z(Ea2d#ZlJ=f$hQu*52{GL0?<9dKl4lr+P!T9Ozzn8@)6dhj)a#zM;Vh#wG&ssUx(z zXh0v}UNA5ED+1djV}hvM&>#t<{gH0w*80Z1s1;f|fdRV}3#VRNZmiT-R+_lx>-*$$ zB4-|caR-j`@r;&QzR$SEo_WdlFgw)`<2C-AMZjOn5$0k3!9WC|c27U3Uc+MNOZ?Zn zo^OBXsVf+2zW77%EgUVT{B~CdBa$^2-TT1vcYV+61@baY$^jguw=qYMNhIwwZ{Wl1 z>-7dOD-N{#me-f}ayyb1{&*Lw9;-Lg{LrMph?EYBNc+>~j!`0q1LJ#s47n<;P#<%zaDi;{uB*TKq!@s`hI0Etv z4HIYDVW2?|B@q`$zl!kpz`yun_*ngXLq~_-<$!L7!%^O*gDALVGz_~5$>Df?TAzW( zGFLo-T!kx!njzDP9K=10{vOOFp+x7r*7Nr0nYK9aF#63K8tg9tzbvTT*L_p7dvN&# z|K4dV}e$ID$f;xw`6IoRhwF~&|& z8pcApCTBRqU0nyti3p-=P(skFAd+1vgYf>K-v<|f9s^SaD-RvVzEH30D>Nw!Jo?!M zzC`PLr8#$s6`pLM+hxoa#3}?H`rEhnyikKjvnw!pkoqL3`U8CC$%drvV6)*l5@uJ7 z+1B^>_iKjRHh5DfLTBsE*~a{AvYdk|*w6}TLaOv;xSx?m^rDHBAC6A;ZGj53QQEMRzlaUaZ@Vkof(q zE;yOccBB*#X*1*k@^vr^geAgDGfck+`utdg)}WBEewb*3q~(KAp((gVomU=UL#q2J~)+3!xjc zl68nnpuJ;&lMMspfFzUIo;H%m)S1yJ#D@N2XTYA5-DPNlNTy~c;@26_3_V>^MeNtp zA7h|w*F=1RWd5-HSgbea8!M+09}tWflYruS8q`mGzPz}gVYnUIutE}H2PCTSL*Ls6 z$3f5OdYH$VlzpYuYD~hA@kUbK7XxxuvOGz;2Dt(OZo6R0usY~h7Z<8+$E_L&_dx_f z*bqH7F*}5h9aM>gDhaM?y-{t>4~rR=ad+cMCcBNNT6PP3(HlUw!M_vxhBq?f(|)!w zvfjpQ9WoAtA-d(;rhb6Av3tBeJgtp*C;FTbAiRG<{?VZ7U5Z|Z8rdxlOiA>0ePd_` zm|JPg*B6$?PvN7F%SCYeoCUYZ>67xlKb@l|2Izm?9}Q$?=-#rn;#KmMPUZ#NUU{_mUCKW8J?zp>`6e;Fdj`mOcb z;ZU`r#`@Py>vyS;23~Ca?s>nu!n!LN{n-fOu)>=T4!;okelm7w#!M0qOWH+!+_=C+ zZJoEM?gO8+Yy)v@-s$BIFJQLPRtf!38i3e{Xw!u9vC2NO$RKaj>##4NH6RG=4tg-6 zzekQ)a<>%0-isXUC5C9(m_{FlBmPFHVEEEV&{vv7l9|8-9J!4`y!7xhMR=J*!IFwW z(f%tF-5nEEyo4a<-z4$2<3gIlGWgVqOXCBYhhD4I>f9#ItYFE~@ zhWv^v6o>u$(~mz**-6B17R^Oks2nMgMc9_L~WTZ);6$ZB47g(!_wi ztarEwGV%)HKwMpTe@K-!?G19$J3myl-D+C{X;t2>KBKXg=p5re6wC{behZ6`T4e} zwiiXCI={HoG=;I!STvW9kRJ4(4!=CC?a3V3Um?%v4f^TmNB;+oT%1x>7?nJcIx&8; z=Gm2keR#VZ5^EzO4U#y$VI8w&j0o+sh|SXruH!|=vo7A}vwV(c;EAbY|2RqeOWVWe zG*vA8%sERHd*OCCvO(fgCfRhVbCf|gEOjQBo#{+6dIXl`h}p93(zX!^n#QX9PS?^x zBYW4}=v}0bq;49~-=#o0ws8_k@&;C-(=sV}PKmo=O)@2vl6%K@g04z^O5T}LsJ7j0 zH+nAF-;>|u z2Ct!|ZFk}(``j}IPsxW-<{)zR$k6j2ZM$0t!HPD8VJkh40wuyA*Cqut zHDpGFa`1cGKZhT;z2x#@+R z5qQ)0$uqaT)5YE~jM5O!TGj$;F5K&g2+7Uq|@9 zrj0tZO9g##ZxWr`wCr*viO=AFB(TN}4;UM+3AbkLsqj`Q&88@lhTt#{kn?N|B{ZCCa5k zO0GF|NxO~ZovR=3k2N~RJ!N)auk}r2eCcKj0=ZI}&6TV-k$F1X$(4jZ@{msJ2eZb9 zvwH{D(m}&)G@j&2(O0_5%MB-Z5(JiOEEs?($(5Wxtashnt=X{G-)*}GoI z-!4}$YHBv8lYwKe$oXKu<}ss}qCK8-%>+J9j`E~R;3m<+y`j)fqN!IS9nHR(?`HMc zn#oWs=PUANG|l<>W@EW3gpPPyec7zG=jH$iU@kWomgYsXJ&){)Uq1dPMAa-r*&hEl zeE8A-h`%OlOB3}K))>a^*(1B=n0;$_=NK!yPbWPm$*)sMkkhQ;v#+8v@o~&auVe`9 ze?D&^u)+`WOn#78J-|0c5N^BDZe&S_5Z8$5mi91B)7_+iMuDW#Q%EstLrI;unPa37 zG@*w_9ma@&oX069_fjZv4G zG(B>nrLLO}A1&-R3r*VTl4y(;@Bn>Ce8KFeQ z-1rWlK@g$`#olrn&Ly1x!O<`}2+!~ZScO2m5Kae(D;*gDMAVb;0lEkhIvxRX9JEFVH62Ov*Jv%LLym`x81u{t%iv$HdmQSEc0bwNd& z)Kb2mn5IfOcK4G6D^C-VcPTMFry*U^fr}43dXSK;Z!&6r>D{pqP;pNg0GJ12r)ZM&}t%z>B*d^Ug7ao zT8&&;8&XCfXA2>_euAV%szoGI@IX!kf9m4q?ESA(uR_k=t9<|?qr^#Niu{$gX~U$A zzwpl@_%tzQTKH$5?kK|Vo&>gxO0hxZDrlk#G4Ft625thLBo~svd82I?+IRmT zv^;zqD_>5MFxy!*a=VNu5Iw_Y(Kr=3il`*^iKa!jOT#*?XGhCf%D32lP=-wMBwe7g zU5uN?=(v$NlzLyJA;9WLo{TK0L`PTfx=8~VBj(W9MqxS>uT7$s(HJMtq|5|myG?vE z)pdDhlzBpWKe>k5T?c+R!oNkOHU{zooNjFF65jy+k1&V`ZCg;4t^$JS99K~LgvD@v zZT%PPzm61`8HdRB)|p_(vS@6>I6!BDfy+P;{aRHbp@WJ?j|Vs|Gg?t50Vm=c9`E5U zg*!J26!#oSlW!xM4*z1zh7frYA!@M`hVYEm`Umuh^-(8`-F8fH3cu(Sy?92ryX+9B z`thBVFE5>Up53|Fn|E}M$eu$U1b|O*A{Cn-o!Ft|S<*r1D+yy{(myQIm+~W-Q6GBe ziT*3(+Gj$Mp%D?vE>l^-pnV__W9h^ZR)R4=A~g;&$vx;ibz~vg38xf!KyFic-xzsl zGF__4&^}0Yw053)aM=uN1dTiWFyZ;~Ba}I%~j`?7Tc%`w=AlJ!BjOD<`?G0 zqA=?#RAD$izI(*^;X`%b6du!;Q1|*7G@{chAy2XIKg(A8RJ+@0Hk32S@MjjAPa~MYn*lS!a$Y( z&o36yftOO?Z#XSt!lCVP@JwSw(@DO&3_d`Sy^RxC(?$07*|N8b=s*!2I41sBLXO zA#aoJjL1lk*CH3f7%M^EkeKcAyR;KZ&%5CADY=z{bZ&@3?*)7!BL7j5;s{p@wWj+F ztkvUEeDE)>)oXw{L9RO0q3+|`3Zkby{YfkIc2R|Gtxi=j`fzGfpt*&DTqx>yu-2#n zhk8b03b@Nre?D6##!e=u!FH=*VcCw8o`gqsX>`ROWzoPGdXGOHKzDSl~+)aaVN3%dAXE ze>o9Yllgm@0W@u@qRqh9GDUz0Mnn-DoD-FA$16uSAEWdvt#HK(29UO?KCpA}!lnHS z^M?Nx@y$w^E7LlT-qUAq)8Ey(TBEvrt-67dtyF*+>fXcq2Pp8`%N7F&_!@qVl=JeB zA!-2Zg%mrg3f~zx$fXHzTu67r@~M*d9?-(V-bKUQ_E4=FNv?el$I?jlqA2>o>Q3>}HY};(w?420%Wb4c zsX_;%3nCeUG&Nk0jfS+N51ep~t+$V{^vEu|AyA}~?LBF;QNPx%TpQR7Uj1;DiK^>X;mvlhuo=#H?a|2 z!_q!Si4=z`Svfhm=WA;Y5MM%k72EVrPD7 zq1tH(vpSEgl^2p|0B*ZK&i0Lr)C3rEXibiLJ||c z@q4d^Dt2B&Bx5y?boN1t|ENY(%z)1$jz0F~vDREg#SW@Y(0W)Y!(-F;+(5MCAs}q+ zo1QDAOO~)Ppb)oMjM2%J`|8SF-Pe0T1=>2?Ro#QNEw6t!`mOL3Yh@g;hWn;`Rl2s> zUyZZky?!`ml<8L*+lLc1rm>S*=&%^G%pepxY<4`zlK5aCB>0Y0u{A7m4BK(01clok z9eoTSw+o=dj&bF`j&A4VySbb|G3|`#9XsS@fOjk>+R8YYiL?_iQikeIRVj-1H* zF1uOiAQT0O)f!1^8^n2j;F-qn4qw7Xsy zioBv8pH)XpFRpv=^2>7%!o|O@A5hSW^dD0N2Cx^?fhvbM5K~Q)sqMo>Skq4DPJBm5 zkL}pCC3$?`2J{?`1V^u;lgg}_p+ps{UWHy(_!bM_qA$#~@xH|iq@P>>AKT>8hCVmnt}e87INQvHPIaZz znX4KLi>6pwSnL=p3n#kWDwF+gID95!7)OV1(1;ujV!*#w`h-v)XP#bCO!WLJJ+Gb@ zFB6k16GIn{y&?tFb5o(v!m&p-bcNl^EsFH5m0w11`T{ACV;o$c{-~6b2c&vJFg6?i z%!x?tB%LrPy8a*)?^WtwPLzC^vRFv%?C4IwdS|R>SuZSic%mir57R$oZ}S)6a#rzO z)2Mfr7tOh9v%REO=NH=b>ati^t}b`xSB#Yvl&V{)KPN@=O&)_!il+I$zr2udp=rXu z*J4-V8b|g{JW@AS88}tEI6qDERoB{CIuvSqp|%%lyXSDyM=GVWP}|2M$xJxrm1A6b z>6pxR3Tje15K;%azQ9S7B(?eCh7tdt*I$}P?Q~(m;*OfqF)sQ@3E`uu zR2(w})-xXsY2oxv02t<0TL8`5_WZRgnR6u9)a9B?&_d%yrq(NP+Ta2?bSB!t=O^p_ zTgOKP&r>vQZY0g>B=3(_I#2X}Ax`=||KYzm(a@WQX;kMIjk)T4r%B;IL$59`&38

{laH?jS14_XzicpEOgYdJyjNZ zHXA&rIGM%;wkKtw!*P-mU9+dmLcj3Y;Qv?Q#<}PR?OdEC#CA{r6z-b>EaFw%>OZxS z(?0NTaBO7Ym~I)RwdqWk?sDC^xNaU*T^xsMllMWYQtB+%*fUkOo70G1;{i*b~CCVPeDm zOvT1x*FT`TbuOCR*5M7tLz#`1K><$z6b?rT&_9P?NxqUXu!1%1y6%#jR);4g9QY(T zbhkS|Ha4V$3zmiB8D#E?gMHL72#BgEA&61KlZ}v3ut*P;IE1fTZ&+_Wf?fZ))KO9E zi<{P$tgoc6eARl(`lglHlU}z<`0rBUlP}5Q!d7L|n%+z)$6`KaN&w`Z*8O;iVCnZ00U< z3&c_^R|5ZFNWzkjBF6#CAYxm;IJ~^L&}=l8I?Gj@j9vl=_Da192<)Y*v1HcgIxF)+ zUz$I%etG!i;fX)8egz!tH+i^^4!;qP_E3M1lGI%57uGMWUnxfZxs_Ds9^&tGU2d$c zXJDG2oM}m)_Py{x{O7mM*%_givh$zYwB2Fe#O{!GS43M@c@mR8mqWT^h={UxA8v^! z;dPJpv2X*0X`(uaxC&B3{Igabeq3TacDw19sOnCQ z%J9@s-co*@ySmeo;docjR(4KB0%uvZ;O$ z{vYmpsdoqVhv*Z?;m&fi+T5xJjT#!cwp71{z%_Lj=3NM7(zl`T>{Dpnae%UHP@Nb# z8hC=*lH-V1Lw`~)gq4Mn?twZm2lYOH{uJ&D;Y$6zSzVHE~#N$y;t-6?yTMpW}9>Mg<4~-IlsU&e+I)t8iR69L1UH{S(wbh z>P3FUo@eN7+5bwTxjfTcfI*zRX|6egnxX0D6uY6^>`4Eh(Z=PvxTYvZl`7yt?UbZM ztXkO#?Onbj>3SEatAjRkyGON0?b+y_q0a9_%4**h&{*RolWL&rd0j`$W^P0)X_`;F zcuQ*WHcwi-pM*h^)+z}D#XU%<5q?jYHWkG*ZAi8NW-rxZvQcC5&h5Ln@c<#G-#=^#74rAa@>ktEwX{zNN-wSo1IrqJQs~2Hui(XrpRDCTa ziiVa`qds4&uPiUlgBNIl{x$stKw#85>T0@S;Akr43&o2$E_ViAKgAzv?G_BEjL3lt zK*=0fUW0(%gr4vcqe5yJt!g-c@Y7H%lE)4LEheC*-Lm~4oY5RX=|ZNr?>bbFo?2i* zBNyhoQsUxdI7F^S71vnEMHL=99;HPMzczr7m-27N>%f^r;*lk3pA5ort12bQ9onAe zX4{U}o+a@(d++A@jXO7MJ(CtI#;{8=Ga1sD;M%skiLIFG4)4P3L{(xgM%nctgizDZ z*V27KLnurv`?$kGIVNjplL_P`2D7r1F^Is6=)YQxYx7k>sj<7dO5A19bq#-L+>5z3Y zB1XpcT~+({w#z(O)9%}(SkRoO8r}zVe6rUoG;i9RWqa{UW;DCQJh6`wfz&g90Ak@g zp)!!t3#CVdjk}pid&?b3YynPJ$A5oO7-Cdz!ah^r(VI{Rz`+||>emigfD1L;?G?5->O%N$NJ(>1Z; ziCBZ&KQtS+P<@17o2*7PIdXiq@syf{F3R1dRw#2zqAHr65zO-FR1NqggDSKkC|Ok7 zq$>SS_?Z}-pQ|=m7Sae&pGWwC#g~B%JPQ%W0N6JHz&xu|UqEsX-3}|u|k|t4RGcFs6;iA=h<;tvT zM}B|T{20h)aX%A=R3*Qxxf7Cr+d|TT@u}I>oo&XR$<^vCvdy&4({NzL6 z*q9-xp3tD<*J@X00}#FK@)*q}%XWwA{9353hbfKa)3Z$qH2u@cowCwT=1-72+o$X0 zmA@7q87F_uUEKmp#PbYo+=h*DkSKX_elV~Hk`at>pgXt$>l7x5ncT=)JebIih7U=l zw?(*(J#Alib}=_ljJuYU_x6Xp9lM%%LdA?!GHi-F5_w0DFn;EHw*W>`$ey`uia zvYDk&@(xy`3PpV^g5n>^|fE-0i*oCC@!g%t$UAS;bFcG!5TPpmdCFr)$C#E16uWu3BF`E{NWa975gqgqjvf zHaSH93}HkH(vNv{b12kF>I_FXZnFo9@@L>e$oSP$nOQHB($6Q9rjMY#5E5Qk%=PzP zUU|3Efgh1`tjM-vA9stQB2IX))~MBG*;;6gLhnJkZJ~Av7X;>nWvPAAWy-2zRaBO= zfIb87$J&*Pn{5U2Q26jq;2bZIBzvBJFf7}_Ft`J5T^VOZxk}s%Z;!e+oo4m1LX%3J zLgVq1NKwOD$ncAb#ICq10P;t2OY9o6XMFmjshswJ8e6@iML4GTI`I`HO~ss*6O|2> zs#uzlbR}y7)P2O)nBFAID$RWx^dwgu0aRzOPY5aKVb+L&uJtMkhXOV#DJu2)wYqB+;@m~(S_k>9l`kAAAGP2_h~zggsWF&1)#++=Iz z*hhz*23t7gF|C#HIlvLL7B>3e#hyotrBaf}T;zA1D)bw7F5LXqDU1UBXCgdC7=o zSCQW(bEGLzr^xRr^1CR3Nv1Ou`CUbR*E%Oi75QD7OyMi?yMhXWy@msi4P2w&@NES?;GX_O<@K)Bi&DGUQoBV+ zUY_PfmVrcop>)+xG}g+Pko>1^zl>1rm>0hthmiSTMP++U>K-kh&{9GF!4 z5O#GbCz3yX?cyf4{V%Vdw}3MrR|`X=pwsaiDi9DSF7eR;&V1jB)jJI*+4{(3f-xBE z%lILLOuxzx`{)`0KH(WrXpRsz!NL*#9moEv9~7UiRB4|bvTzv zcZ<``CgX`g*5yj@*X04QtQ0J98iwN+99NurdkE2Ew_R z(bN&W{~?SOOS+E3i9wA{T({J>9oVy?$A~x#8VXP)6R5)>Kxyc-qIA?9gdE4&JPI|mrhUW}=xTt8${1$`< z4*CJm5Mb62l^{7OKDM({3M`zg@Akuz>2(r@`1N1a3y>fPqX=)7G8P z>k62G{)|RQIH&Cx7rfR2913>KZQujpY_Ve#xQ=&4KLhisz!I_^j@n^q3I@m^=Usz#VxW9@D7K2dD0-lxD+Te>M%hgA~FTfoT@_Z9tXN z9f|Nmt;*BPB^*bkY;`k0qiJ$0@;dz%zjrPElsbwod2+4v7zwuNBkBo%u9fKXZ0~@V zMs4^;X4e7dmi@{Y(S%+Y2au~t4r{Ms~8k@+C6hBd`?Yedh+DMeLb{RwWph;h(^U;ClM5J zVMZK9osiSDs?jV&*OIAP<;9qps^})3CiI)Y2BT|ZrU)&lDH?kdPtPirn_S-1v(#B= z8Y+Ucp(Wl&LO01Pl4A-}8SoNahJR!MheOi38A;nD&v6LaiV;Pk5t-d<)|&88&kA?8 zuN%97>OyL`LhYEP_TWgs$tawgH-k&jQ?>MqWVR(8DaR>$$Qjwo8*&HG3+j zMA3otD**O^^WH>{a5RwphX28z28bT+2w{H!mMJ+$BHIiGI9d%!y3rLSP{MfEphR7oaw1MB)5} ztY<_yeB5>cGf87PM3jVNAxtXTixl*fie~~9O^(|giK9T%M}Qs@N{zR} zwPXGuy_4p9OTyZy^eo2tAP9+V_n1f5ND%PSkmy|OjzdN>W|CoL+CXN|b{L6bQZjg> zz6f-}#bpBfgg%rkr>fwLF^FyEM}@i*3d3@3xBxM`#9>eX_W51Wza)cb+G9b_lch!k z?W_LDSYg~ttN^vHbO(n{Ovo-E+{-!!Og@%Ay)Hs$gT{grDv+swuA>(yETL@|-3IE~ zKA=AdlqH&pi~=W@CeitL(CgFFs}N?Za+uT;#6n;k8+8-Xy5AKBn2lyh zxbCV3+zBZ+zz%e&YJ>9`6AeGEdcam@g;+>hd zjCjhDhu)P~F&JK&y3CL&RLN*RA9+SRL2#3>IhDXoaB9@6(_l4GM~@#g2U4dxz@`A5 zTtvfm*T+CHX*L-nDMDYQ9Rq?~F&qQp5SXu84HAj*N_4Zt#7Wu*U=XngmI967_l7c1sOyr4 zPqWk+(HWC!>V06TRtZE0#YA`qC(#N-Cf&!5N574i14IdSJ{W4G+30O-M1cFO0(dO-NQxziUDFO=bVoq3&>+=B10Truo&e!ecKU?(+*XgZGLPX!HR9cC2Pv^7 z7c9?e1-hKq2js5Qwo(F`w|+t)PqD%@xQA99iFcAf063+|0*s_Qj8Ebc%mY+giS!Gd zP?o7*aWzi~*v?~Y{ z6L^{iLENhy972N^n$mY@y_q;t2^X2EHY)NIjZ=M)qeN?s9d4c*nK+O_Qif4!fIWgj zW3kouVEBOE&?0foCS!KrV@abB+ETkpX5FF3@Cp*39lQ>(h2@Jd7Nn)2BeD-X06ZO1 zA(vO>M#|H{{Y^;{)%CQhH?+H^_%-#BruG*3hX_bg`PhKPMq3F@PUKnRo~hT7K_s{P zVFe01gWSIrHL)~J9YF-j9w}UKM=3Xo1jC+RUTiL!jTN)ms4sU`n#R)n$~+3sw#|i3 zz0+yT*XzwWy}2+en+{;J19C5-sMbG_LO7-NM7k(zFCBsdN4Q3wFd{ijJj2>c3?EI~ zL~PKKktaQqv{6fK2hY$Zt3U^7lboRFVD^5jBsiXNNw?=0CJoXaE#DUbE01WWUX&T zY2zjOu@3!Xr$o6>XkWw%GU1PtYeyip9UB}bjJ(t|ThcL-tXcRxlP}VBzV;AaN#^2K z>L*&;j}j(G{j`SLW9CR*Z_y+0s*r@jrbMoRNI0Q&yAykF6j5rFYUF{0yB;D6u;19* zsq>|h8iJ61;&wYqYCV30(0~XzYB8VxE))O0^<7_>-fTGTmYY25# zFc=5WtR$l5jMj$Sg9s%2@yJR~nSi?1Mhs^dk`x0B+$1ovL?%t^)M}(AuAm(?18p@0bgfonx*rf&CiXLuwwUs>>IuWhw8M%Zd!e>n71D{B zcf9L;xeZN^&bi!x>X&w3k~in1CSpX-xmMiHh{kw=R3(=g3;XPxOF-e4DfS3MJu5fo zW|yVhY-R7--ljpu0>shajM}P}>jY-u2gdQm=+I?4EN3}-5%3`aj7D#gPF)&L0?5V+ zmzKM9=BD%-R>;{oAw!!IyqT4hE3tM>qjD8dK}aoNtpXJt(a&1!_GhHm4+hY-V0p0k zAd_Bc8qpD8wm+ccNI@C5`~fw+L(*5OBs)7%LrT@B4F_I1SP)_l8!}7ItU2}VaRZxd zm@jB*v)HvUs%?7qM^0_dh2i$=6X z)51TG8?i)gJsT^I3{1q6B=JUu1On|inBh^eCS_REl9X$6aC}nh(GaBHnCX1_Hp9G-GUt9mh`mZAujQ{(l_0QP~#=im3<-cTcihphW z7XPIx82?sPU;Woj>vyS)LV zs|EWNTMX|Ji9>+U-`Ns|PDE}O*hF+xAtx%UOdY87VJJnXC03`V#-~f?L}Xzaa{r)^FK@%3CnP)-H{n37YieED{|0eGwX|lJ`}}X$`d){qaNkLB4HT?-;0Fh5Jg~#d!iUq zk+6(_LKN#L5|+)?;WQG;Rbgjvln9W@kT^c+_*RjyTqGCjf3i%4@dJ{$40 zS%Ig@&Wnzp(5WIm%`%iFhtpHfTxR$Y4k5pfRLg9}7AIe(3L0+-a=i(S4*9_dqv0ch zKmd-)g*cp309Fq|Z&fQ7fYlcVu=--AkAM23(u>ZHoshWxN8v$VP&#pxE?%0^@}lLu zr(b_-jDU}%=1a(LrWyR{^iPjIKB`z>puc|oc<{z+ukl~jKcs$mTzd1h*A6c)0*`vx zFy^X8qoG&l+d52Z(=e)Hp}lN2=Gx24%blagm+9Uw51%|adi*x7e|&WKCg+o9|yU6{1 zf8)kg=tZIDIW5u#*UG~SCeoVRaP8q1bf@d2Lb>4vfbOCSSGY=TkYY9RI5!P=kO6QD z7Jv;iaCZY33D<7Ev$08N1;ekC_xmCkH~?EZ>lyhX_q)jb237=x-V3nm0<5|Ks}2gV z>H@6V2P8WnXB47x8ttV3t0pWSH86}wB|17Z53K;JrkwBstoq(MbkGP+u>h+sz^W%8 zd>6UjMecWz`(5OI7rEc=4bR~EGc8m|7{(|pa=(k*ZvxJaaNy_!YouKjVAT|;FTkn^ zY%@BeT7Xpp_`LwDF2Jf8dY{W&=z$IF{Q|67R^3u)?-3SK0ajgrRdbqjplVE_EFrcN z2Kg?)ssTt|fK?m2D9u)YRZ~JUuasJm`wb9vM(R$MN0IwolyK5ck?m6CeoJK53zz$S z#!P&qH$8Li`WAAmt|K$0Lg^`Rj;)oiynm|ntEixJu=Y`zlNOP*Tpr3={N%!ZMg@C^ zQ@@5$2@3oxswosBYDX3m9gv8;EiusY@NzCytn?I@eS5yNw9RnR!L<)GM0v@S=M=Icvih8a^J=db1E7WFK6%!C|UTJuwxgkAa zI9E!AHhtY@b&9LCa(2iE1KZd|4f1XG@c=cfgT63OJQ-E65Ek~_j@^|{qT)RTzfkHP zKnyl&1Jn7w8BNr>5lBa7)L%qMgp8tX7v+bM+e4*HsVX;W2y>a@R7F&{Afi&JsyaQc z1OwDnhs(m}it|BDyG3=$sq{Kkf=oTVZ`p>$$PT{eY`gNS1I%(AwTATr;mbO#>OdIP z!={3nHWfDaqDt}+w9>Zw9c^c4OwG*Ob`U;9mE}hmPTep>ADz%O%KD{4jQ9bSm4j$1 zsLHY;L%tuk5YhY=dMj7#qzDYpC!iQ;m^6*vElUm4TbM{iJD#Bi*!zc4&fa4oE1+@z zYSFX1E&6G~v%BJ8@{_V8>Vzj{J?n{Y^#);o5FYnx)KjJswp6>E?;S@6_2GeKI~Y)v z?ZJg-)dZsdm^boMaYZnb3s2iU(1e1aa#%4d17A02Spd<=vIDFF90NqPYfLy7T*hZP zUAAkWz-<5yP<3qwf!>v+zkOj2P$ydpqh*KHB}|O4dxqhGB<2d|M9;;Fn}2b*^nM`4CWq` zkM_XPsit>dN2TQo#6pdTb#%8opwZy>1i)KCsc#xm4-80k)KO&_i>s@JdaxVtJaCkMLZsNI;yqy$4=SM^^gHyvkN-68TzQce_sk}jY z?hcWH=6Lzj>HUz_wCJ@3m$nbIN-%(6&;#uq(d+44IsFIw!fk4e+QK#dTE~ZJehoTD zZGywbjVVfOQOY7@Qorx{;e=`1*vvLjNqD%qjxp* zQps%$pAP|N*ThQ`(ORaYah4mOy~grPDJ30Bs4aZlZqI{)j+3V`Q>Z;Dg2T5=$nB61 zj`Sc3>QbxxxJ(Hds_}kYN#rq|xGykcCT&KOHX@eS0YW{3HV6{l5Lddv+XYK~NJF|B zbvm{4$ZGPo8KoYfdz+tYnpgaiR6&008*Ux3Of9MTZAO(LVXvD38k#PT@vLA)(LlG z-yL8`Gj#Ko9!EFi>wrB#AHa)D4lmPTtutK$?3j99&8P|(=M*(tOYIq1V>|vX_wq~H zwSn!J5pB`yNlD}Bmm3@LEY%2aF&p5(cHBy3I%)^1}E!k}>r7{_Q& z3xw0D#+!RQ{RYZ;)9Z2U^!QriGc1I&A|iT?;7}D8x<{v_#w6?4gkCEaOc;gif|ue( zR%A#2vMeaKFsT~K-YE3~PX)FauheY6m?9qQc#`6rG8*{Gh`N}nI0M|e zUb(hQ`(G$kASnmI89a`pKewUcvzmu=Rt)+9KRUcH6T{?^b{BfK)a5#44M1r1e_rB{=j@UZ@57ushqXoB)W~9FU%*{yU{h6DQONaIN zXKt(S1+7FALv#1M5X$CVFJMO0otv?mMuJY$cy*9`NoF{JxytNyn`IbFa4D4bh26D6 z>`>{sOjhXA&LtoM)qKin#tfOh*T=Y^12^QSN`I|ht3yR|`wsNx%zSER-Iy1KHs%%6 zXfJKM*CoQvDip)yB!sNib(K`iTUc_uYM?^=fHxBehiVfMzjm_RvW1{x)wmJK?0d#&xU<5psxv$ zHEn~`A6O_#zvs<^rC0opZI@&VNj1^;u&JT9I+=4le1Ke$TfZuv$b16X2Y(1@4=@V* z10PH%;5VTC6ARro_+iLJzBwYWst9Dl<9EY$A}Kam6g^MSVD+Z-nW!!DlaS8S89<;% zZU!HYp1y$KL?56K+GIEQkU4$mqBOixe}dtpU>*E{3sZ&Bjh%q34cd`OlcZ6kMzHme z$cG}A+HPTfE8(whtI};6EN$P%CK+f~499LoUuo-sYYF;F`vWDU?# z!r1M55bLn5lP@jP_SddtzhBKf7?eOI-UddfLAttgn;Stb6B^~L*cQD z1eGa59j+Z(!f0jNO*Jg<#Re`-Yt@rTe;*cR$%FJ#Q;2@mq}6_fjq4x;Q@4_PgwOIa zlgvx3qE;7mh}Rb>m{3k)^8*S_+O8up#ImYFYlfA`>1eHKN-~oFAvN|!UyZuaZ_#RT zbSP3-Do{xcuYaI%OJFDXZIv=W#6()YB&2ql21zQzGsY~y0$9oj*owYQK1u+)(Pjnd z4FV|vj8%J((IcBm>Ru3~;ljs^mX|orShZl_ZC$VcB92bJIRhvk#w=B%kI% zJb@oH5kN5DvLl${1aQD9hKXb!vzu~rdRRU*aq`jR#Y*5MV;zk%dQ6_>tUrVHzL@wa!Dg)b`zU79Eg=N#0Jbr05M6k0YBYWSMKV* z-U}+cZHw-~+LqV98~t{l_HtZ|UzUYgPmCjaQz_}M5tTk%&X-QTHH*gP4(y17}!Z?VMtLQRi>~ch88>)v2jQEhh@U2M_@m1dw?wt6gD;AfVU=9Im^FWk4fF$j2G5k{laK zk<8X^=S23OOS_i0mfBRaM&ZD;V%U_sPEOR#^7ur-O;n{R-2luX*rR3Rl6a5 zn6&$`5tsP3pgpPx5dBDQb?%ey^eys0I|C8qMD=%TQ}dYbE%Lji^b2JrM?I0|Xig+v zV)X?3C)u2AUPMZHZEEUWis;*Ujou$)8Y9O6X+O&U2r-q@Sbb9|S{x3c*-NeLoJgue zAn)w#$f(BDDFPZf(fPF`X1l@pEM#+)7>9m|QZ*)|3auD8>MeOvXpMAP#}>#6bHfS%8$rM4L*@0DINFBiL|#%k7|+yNvWR{Y|V+d%T8a$ap8WO805`~wy%>X zFQWrXzhF+}G}x1``|+7(FLyzK{;z=S0KTLzf0*;2?DT zFqMUr6FGmPMAReR)RSN=hud?a>06SiCq?;lqUw*~7C~}lM%SO>w1S)nDj$>10KpK? zWlrS1C4)y4Oc@U7=R{E@@}|palX*PS8wjgUx7vR>s#$|(F!Ufa%J&`y)i_}oe2dooQLp%v`tre?O49>zC*<>+k zba9*`H77d1$(b1{^Gf;&Damq*hBqgY>KY|0WmD5+23bobXjQ8+E-U?ob2-uZ$Fs}V zWEygYI&9QA5i}Pm2%IrMgLoYG$poC7NL`VNUp8w{8iUEYiF}B1vej9DoCw~iX_5Q` znRLdI#x_G4WyGlSYU8uzWDM$Mb>Qzl_1dRiqd!S0RqMCbZx6qQB)y@9P7CfulXb@8 zrQ-1Sp*ys^j>_LS`q|OF6p4tcTfdtSR~LgH8K~=8Dff_SR^>dOSvE7ObG1e~O%-V! z5APo!WZlcw5D)koe#P|CX~nfNO+BCD(k$Whktl{S43OxAvk_$^i@bkg8FCdQqpa0n z4|{=wTq06PC=CmFl*rIR`j8!dFM4TIms}>E+~d5?KxE0JJQTWLR}gae%&93{yYmVi zIYA;8<)_rhU}$kRWZy>`laDNw;Y;PKNq0|E+w&54nhyBwArTZwPuwX!4~vnZoG)z0 z)H@`Pm9A{vymPm8YvcCKtEFu>T1TWp43j}mu~9?1WOQP{4r}G6?2W!O% zT=Z*z3lE94IN;7HRWdE=sd-N>f<7HhSu5XsB8)*OxdU%XqWC~PB#wEe1S4vI*i6Wm zpnYH97(Gt=vWs~p3SIG#PJfK$J+Y%B6>ia3rX%-p@EkdKeF2KmPb?^YNmlUwn>aUXYz6P1J_H|<=<&b4Ic0Vb)a{!!q$VPlIoPX|W{)Jre}5K=C+?IdmmEK?9Q)xP z;^gKm^1gAFIhZv*oZUOHmd+D>-#BQPjmDGi@^Zroo&U@2EuG*gK=+(vf`L?;#(K}|nbM*K! z?tT1EP~jiHjZYsRJucJ3e{)JXF=lB_S`EVtn40qwhB-*25cN!wOw>xvT#BlHOl}&y zft&X5mr8*B*G=npmKFa_xw6k7tIlX)obtV}CjPh26xPHmXHA?})bFP;+|sDvNNC`aH12pQcBV52@iUzJCPI0Lf1@9TY7#+1A&RWCxT-FEMA0DjX8 zZ+SRKcGLGgn9G|uB*oVmILp+^$VdAz-Hp2%DoW1}WU zD|rTCv?dp#fWY~;WEYJ&n6fJi)rAFp36|_KEZGIKQC$>bzS%a~Vxgl?vSk0$IkIGv zu|IY)J|Xt*(o|vZ7WVE-s&e%-)?s1qo)>$!@S<>7<^(TF%CDmKm(-QiZeMs?3U3Pq zbSd(C{Z#@Rc^V;~38WVp0R^hc@1A2E{#&W2ob_wYi+dfJ6R8Zd-=<Ag9wD_>TWM_AX#s zb0P|5yoI5wq;yRV79H$h%$JB@LW)F)fbh4rC|CRmhBr6h3IM}WdK@JID!MqMbeMFR z4coW-GG%irr}1rof|yQbMR^_xrDp*K=Of6{k-3bXTPDQey}Xvvdl~@2dNy(pDZiPD zdPp#!1pX%)^_W?9U2h*ufqXfcE(LH*05Ef+R+tyi(!ACM5MerJ4A6|Sn4ot3{tsoc zCG`yn4~jEB%8NJNCOEzl`TI+&S}7X4(##ZJr;I?lo)hENhG#dTD@iH@bssUUpf@Kf zzoV5nXO9LdvIkPR!X+l}s3O2Su`oxn$BT^VA{t#}OaoQ4$e6CY za?$AeM`LsjtrY1Sqx=4OiU=-frK|#y?EfhlPsvZQd`GVTdFsJu=>UT$eT>9Z%ILJt zLq__zWZOj?$y;u&G^?Gt<>l(UFczxI^w&atsbQ?ltt`#2G-ZhL@XNzIbEHyl58q_% z?C3DP0S`|OkH^s`Bt%#`Zy~~`RgunX0At+IqIB3RIwVv+MqDsQK988fIAWp<&C?tf z99~y^fK6(x+8og42kvfwl+9~5-`UtCGh+Px02N0S9;zusXD+wIlMp^~I-j|L6Pyt+ zIIhBh9^#+1GLB8+0j)7qS_KY*KCsW#!>j2vomQ$z-&Tq>3~{YlZ#L#ljA{iWxk_(8C1&j5thg*)uWKr^Sppt$w>p5;PIe;4rk!bMpGL3KzO zp`UVCyFU5R;6lUkEPSovM4mdwhqiS;M8QOZs#R!X2E_RnO80 z@pJc6)H03HxX}YY7y!bBZ-fPkUvb0cpc$ zW1Hp)Y{T<;Zm1ouAndmF)_#h~w=f->*VcJ9sN!h|M!*=Y54u!0cy2~(tjw=qvLe`8 zt}%dNls5e}6!;Wjn+9|}2t4p3yT|bBI&OroTB}@JUzZ$7L$b-ggK1L_-WGayfT{$W z*KSXrehn9;2a0GQrIQ!YK%g05-@pmzXzMS|2VdT_-u~O8$_uHn@KJ6a>rqeou~uzz zeC}n2EWa_yfX&H+{G!cD1o2 z8Y>-ru5BzV6vnA_mUE&wulav}=@s$c7shFbOeolMF~-f6_ut>Rag{7Q{Lo^wk-|7F zjMKt69r9@u#wnSu=g2rMM0p{~#|Q5VKO?zK3qNBIF42~4t`57W3sHW~M0t@_Q)JZ? zSv5sg4Z9xCgz+%$)B(I!FT*P}=GB#Psf1&+k&Bzi`R=(*pIEYRuf(Z$G)>@3t57Zy#k+N>iA1^m%^b-A|p{gNX*S0p?RCp^9?xnL@v^Q6SQR2~3vf0U;^#-gk43^`qGA^bkDG%wXFz39NS?_^9JNdjbA&(n*72F?Ik`vw2Y7l4SaxbMSaz8=`PRUsNY_d1UsHA(=TbagPBzCUAfb>?Tw|qk?SHHAfQrUw{*)Ft|e?bQJ2|}viYBmKIuxU$k&)~oY# z2L9^Q+f}`7&R6RTD@#kwW_!LVgmPMcWenTMa$1$c&!=YS=eB`Mh(f);Ttp} zM}s(O!xg+<)jC)2qjJ}|!aaInn$5ZOdXKOxcA@mK{}jt&6D-F(i`!D0W0|=2x%k1y z(G#8gFIOA&Iu+(E%JN=_l1GJ>NQc%dtjQ`ss6tDuSX7u22@4aTX>)W7m5;Gi$(kkc z^Pm6Y^^2RH_QkKAv;9`3@Q_usFWT%uAj}cq5^MXG$JUZD@L?2&+Vz|7k?}_09heKM z7R}p2Qn&s(u&W$FD=u5LUI#QQAX!~9D&w65pM%irw*X8LpaA%l`}lH?Ki2?<((*bj zGS{%-rM|^Q=IC%>u*T)cNnxnacO=rUmB$!Pi8$UAKHllIz}+sHM9; z>O2}i$Wb%_+{YLD+V=JU88XY)ybtB4744cA08b~hq6fExVY!~;br0fazb{)9ABIBSw$qrMRS5FY6YOh81!J-{}*fMbdKX#&sZju!-gb4gm*AoS!eQoXgC zfIHG|c9RC~`WRUw?(B?q*K=%qzhfH~rV;NB!+3s1Z&t&+4m4BkJ`kXs@WY5S;vxu+ z)GQIx{O~Rx195fdAheADLBgK4?${6)3AwkZEU4%0YMZvA~CRrO`OQBjJ)HSP}E7Bec6{Kp)^87;MZ zpK*;n^OEmjcB&u7Yy3HjfWMYI3U4bv7zjWU-O~@M*Ra_6694tC=i475!;ZgTsQKR? zf^R!EK#lm@T^)=_TT^uJ13+K-p4SWHWtv;U0Y^lFV2l}B+Z*_lE7BXd1h0GF^7`^# zZb#B0+s3NL>diDiG%1m^K7Q3=I=!+JGeM%e5!5$*|t{ zz`yun_*ngXLr3q~AOpAS{l2_S2Naq?Zeb)gLUK4>pVlWMK{=Xu0=WuAFKVU($1&*- zJ&Znp;RB&W=RArQ05`1*oc;q3qu;y%6fi(@x$sGQ`??S3a}Q8b0k)M)dsJ0G(+6ET z9Y@$@cy!P9!vSUh%7Vj4$R5Ve>1%D&>_XqkwG8+-P3(EjfMKdIc8bz47Sc62!x`@C zqIPf~g6JBQ5cDdDWLFA|{r;fe2N!@I0~=gOcTqR%b$x{WWQ`6Mm2mBpP7#2%HD8xQ(7hqh>tv2S? z>sV$OW59?JzB%vlop|&1)TXPtIHN`Tyyh`4;W?V#4pa%X4TItMY48X*10*8?7SePu zJ{|afSBSob-4wN=(JDQSQ6Ny&xB+UKR!2gH1;z?3s%(L~nM5yE97ag|epb<#OlUjW zafmdkO5O2Ms@jLLG4QeD5d9wL^W*hdBV<8RJ`z=fR9kQz)DS4lSh?sZgoLCq;cfI( zDpU$xkK2ZtgDtA>xq;or=0)oin1LoO$&oQ#zc>5Cdyq)j?(Gj5%54T4np|zS^>z?b zW)c1#HdsjvmS+$n98$xkjmq3uMI?~az+hQoB?H;8=e2Fn0c1h!_0das5(TYu03y;C zG$=Rz=t4Y-&=Ny*0<4C{sVC_-bM(vZV%ygvg+Y$6l6}k`=mssMm=TOBuL8+B#3fJ- zF~Es*lj6r@wkIhi49FME!3-6=p#DPaK%iws7zNdmshNrRbp|v;36xY3`}Oq47$_i1 z0*{XbN^MAfEY_Rz4IuQLLVRG~!a`qTW!&9(lF2CGsg~UWU-SmpIPveqzTu6;LbTDWqr!u?Vnf*dA1fXw1NEwre;87D3XC+lsg6*e;Dv9hv6YYuu`JrZs@6r~>Na;l+?d-t{Xcw6mSA>=%5wEQqX zBp+T#4;&T%+t#OPpsC&tEs_Rni;!#s$gUtIz9)_~O@thUks+dY(_VQKOGIe5^#K8} zf>W^kXe-`ujT%N#>q=W5M(6+=H3BoFs>ebT!b*R@++ts8TCC>dLivEK zhmHo9HhC{9J&)`QaX~A8Z-h`X1zGD+Mbi$M|D|;0Y|ZqbE>dYxlsF5*nntw@wmg-| zip5Qw2lj;xO+I>Z;G&iSyE|dm@4@}fcaUxg_aL|5N63V7S?Ni>0BcY<9k@|ty-G4x zWQk2A!Fq77q}nFg5*LYdu$o%L!Kvzyte8{FJHvCmgN$0JC(!(%veTn60kTuE6m!v4 zo#vhR)AIJYQ}kjJr7J@&=CHG~v+a&u0ox!A% z4`QEUTJhjS^UBhOn^R-vgY7%;f{`d9Ghz%y(nbqB#HnMg4}N6CuBgZaqD6Al@4_U8 zL)sT`Cct1A>3CB8K(P>W4`vXY2BSkUo@{Fj|M;Tk!AbiZn3ETR(?#x6w~L1}4R6ubfksInOV2H}|H>F-nXLNPt)Jn)OV-b=zeyDRpIJYD zF}C37s}E<#Y*$n2T2BxA5C+&EYyAWIgw=lpQj_{|pnWo_6OEOVtz|M?#}Ip2>pS7p zRBDTy8r7F`rEofyl(a#Yris|=9vtn^`k|5#W0P_Y*3Om8?=UeW#xZ~o%S2iEku*F) zk8~#ldU^s%!ozZ|bY7Q~@i5Bl50v%dfCzabp#vHNs6%i!s_iNs{V8u^fVJjA}Uw1u2d4Xi}ZAa2?bKtMt?x`30X7bJcm!+U(0u)&3k1z zhMap?DX*&q^<(fR^uLYB>`-?-arCxuavkf%N|}gL1RXFz3Ejg4cM&G6Y|#OsJom3J z$T1FJJyxdkq$E>))%wNZWmBIwI`xJ5>f#(iyz}$*1+8OnS)9m1}6hCPXJ^e2B9QKi?kmRS8r4yyuv&fUD zS!nYoz zUGs%+-5faLsw{NN-q-fNulEtgprDlWyY`R>wC&zOR*EAwy^v2p-1L195uHsB>27qL zV;m+sVnE13!gtU3%ps-})J44VTqrOP8C1e2X_HhC9A#*}LG2ST`8x$0Xf4I$#vF!<- zvG`XNggcXeYTF%g2#mOh;uj=NqEs9VWGP3D?9i~Q27$iVS2Jv1$lu6a@(c#XcG=?q zB)vv@4Ynt`4dNT6;{bt7vfbmW%6dIsrlybpM4Jj{gQ)%OnX_ai zbJ@WF1B@|?_4;KO3+&E5 z&CJVw-}&Q8GBcT37n0S{mb1uU#2^3l{O3RCJKyQUn~kZyIR%gn?JW_It>1+|rs`=> zxuZJeV+SnV4)80@TpClomQH{SM5mJ>PWv%c^}A`Xz)5bFG1cxpg@+yqCUB~IXH504 z;EXo}AP}AWxQ_bIeKTXK>+;FR&B!@Fc-kCOW%B^Pj^vgfQ~ee;bLVp3jj5VT#o#=D z4p;n`s;U8e6F>lx?n>D}XNYkgQ@t*qfUOoluE53uEO!Dr7VQILs`HQLH9;~lClA3( zeNvVc4z>U2sG`u$_{ols2m}_5sR=g&_;PV(rVnb~rv{t~G#^vte zo51K^99BQ3O23;vT}9L3ykPz@RsS2Z_6CCp##Eh6MeoDlh4UK%lI)Pv6?*x`q zs35mdJ9Lp;{VM0DqS9*XyB4``I*tr<$ZIpEx_+I!(V(2Uj|>7}g;bOv(`(1ey*xV} z5*Qrny^!eH2;2-%u0*xsQbk~}bm}2kn2K6%B<_}X13L1PGsmhIk##tD)j&R&3?rJU zX3r;;>?^C9O%a}TvW#KBGTaz8PD{?wDvh_aYz^p4ryIk@T4^md#nBiBt6g7e*s<2` z^+~hNt*us!#!|gfsMS}NkcM`#Tv%UP#wVj>R_f+@eW|i~igoT{320xT_;>H+rJJ2x z#pu;D_GOu9OT~34)@}W1)pz!h5M;ANrnwI2uXwzwA~yu`%k(zO#nK$^%jdvYhm0{mJIQq~#P&y@{>AK=Y!U@>NK-uutzp!TPSo+nRmszJ%RcRH zOqKk4N~>%jQH!`;V;-Lug3=n!{W#!oZLAy1%Zp2TVa327uvgU!wbG(d(94UZN^N<) ztgo#+vr6|_Y4mY7Zt|S;gJNb1a(GO@!62gizD3o8Ea43u*{3(<>_wo;ljDz(ducjZ1zz834heqw zgHOE%Vom(l983fCjC|Tdl=SZn9@t`MDE!65>Y&$mksx+l@ljt)oE4|nvH0&^e>L!K zpJ3l$D2xEz3O_Q-lbC-HEY$A!Rpz+W{mpyGlMCvt9#Sl}t8HZJL?Vr9&1@otqvLK~ zekeZ#sJd&xh=iQoGY+`3S!=+(($O;EdI`HD3+D6;%Ncoo>e&YfZnB zwjxa!V1Bi1tXEcbqhM5wl|n@?FBjI#`f|ZouQk@zOQum-uAgGR`Gf)HzxVPSaHAoO zGQhk-|m zzSF68AXj0p=pgMU4!`DRsaPtN%Cv9bH+*~PFc}+04(#~+C$qC;Fd5T$6?gl+-~Xl8 zUW>s6FAPsH#P(l6Zh>yguyaV+%#bYImNdU&!UXh9!~YTnurcjV--|c*C=TkRkf4P+jo@|chtl2$X} zXhX(zKc|wr>sqJT^CaNR>(-E@+eR|(Lko%8#}ys7ns!-5+2LVb2kRW>up?I750P4t z2u|czyqz<&+7MUUOHrxcNT>ClUJus)k!e)Zs8J2|{|q{nGM|-ErGo)n7<8p&kmJuZ zKl=pGiZ3LAyL(k$`cjV6*U$1&jo>XB zliB6g33Nj`OmITn6!HF?>X#W$?f3429p?qxDqmOv_pj$C41jxE)7u&de)M`14jHk( zA(30X+cs@qSJY03lihOMdeiUPu*tn=wmMpyHZJcFro+g5VAKA(vV z%h|V`!DsR27!XsSqPm5N2}|p?UGE~la2qMKNJthurXAvDMQvr9 zCYat}Tke>3tI^YTc08x+*3F%rd3@&m)rQqFJ$~2GVfY>1Nq=WYgI!u|xj~uRcCBl* z49t4k*rRGoMMiDSyj09EB(5K(_+FIxFR0$Oy|a_-FR;hAx@}wx>X!P=^Ls6{qp1D3 z>q^T}<=DKxvoq^8b=TbO__L}tfQ17Q?rhuWow{yET_TPFppLP=ueN*#3^e#Muc4bp zA0{3dG^E3YGiTdwInF*p?)Oc+4YOn0E*3Eh46{(fDD7`-Xq}$lbZ~v!*6^S5{zp9; z^fy@W+8xLBF@fXxF!X^oif(zuR!eW|OW{-Mn<7-OJ~G|#Wx}w)3rxEvPsI1+XBYf- zXW_2t>xDb~t*}&ByH>bu!++ha`wNb1?ONbsScnQK%DZQg4wwxwJQSL&ZeCsIZ_y*0 z(Wm+NS2ZU$XZLVGn@wDg=jY}uKQFXljS0vnmwM!jw1)>!F5gL*pof*W#htkj0o9s_-TmCbXNw z6kbd9yc!~u5Dd}Kn`C%4u-w%0X3H=tpo=s&f@Xby+$UV(9o@q^vPF*Vo4rHFg^$(U zl?`45+)k8w5Ykq=-1Pm9x3RF$?CxTw?>6+hS*$w?&Zm9W;=hD%0!M;U@G#Sw}4gMJQ=0 zgP|OW8x&6)f#@eUh?>M*;85s>rn6C%7u-lQ{SNy*$a}#-L_?E?1JouE^_pS zZ$i13jIDaj>H69Zi;0`9yHBlyqT}u^=rwO)v0PdymeyC+ma#&i55>!=U^Z|%v>>)+ zJ_gg#H|tFs)H)_cmop3sz@fY={FgF_!yf5NGFU;6pnRFni)zxvt;7ll^NF>+^!d6&9o50;Lu1i6O|9Lge0RGxSIf4I6i}|_9NOxsa3oR+oLZW!=U%tn zVQCu>W*e%Tf+skIFmbGonR@iHjIa7Zjqa5lR9%EI^ZY^4S>#u4dzcQqRIs4K7wWpp zA2%Evw;^aYcG}Q$bhl7*@WXGG&Gv8ZXcF4M6jm~dEu2kIk}6Bn-cdg%`+_|V+F%a| z1P@C9C1PcD0r7wgQ2i}E!n*MJtH9PkD?hNzLmmmFq-bZBEh8GT_@(dR9q1r*FNcov zDC*L#$2Xn)jQJFxK4jX%fC+NLuo|*s1Ou89w-DW@!-m7PjfT{ zA!1-kay#UX0YM&2I4Ycrh1F}}YA`vSz02XvRnb=C^0)0U`h_F${cXFD{EuCz=5Vy9 zpR&}2)ft0o=pAgGg&K}t1||`AaJ7R(+v*8&fGZO_AAm*FYy>0?oKXSro_)x7nOGPR zZlyScxNxfBP6tX(I0;&M4_h{8qK+dGw%r!~@u9BlL$aWcY#b6y^CLcn4hw$5o`hU~ zcps-;R70<*rY2-W+wpLG5XAsHE@E0+mTb}}G6Wt)JKj8(_F>b4u&kr`0fJgR%(`-4 z=ewh3g$+Sr3c1AE%N62%7g|P3^Y2}b)H)282psjT8F>q zPViVY^%!0xU2v@z$`%f;ELagZdFXOZr{i#fqTgfDCW?>fBZ;XnhuOnbNAXSxRG00+ zGtE#!LR_MJ50X+Aw~l2l05c!zH~Q1SsgKR%iN9%X+jh5I!;C?zTm9_x+QQom};c~ctE2E@(wyKPiV4#+!K4ZZy+-j6%5Xw06={<74kh?q;O^`(UU zWry;l+ky_NW;YK1-l5)+;pE)mR2i;$;UTZsF&lo7U4KVr#12Y9^LBQyt5*%^H^PGf zBSbK7tGg`+vF0w0sZ95rkM*g%}U<@_iLJ^mA6H3 zq9HOXm0lkS;&5b=$HP#&fQU=M+b%m_9rT8f6LKA5#! zYhrM7?C^*q>xMdqYqrb-Dr_RQRNe!g=P9n`$e7;uM8U(ER8BEbW76vmErwOl$My`X z0}eAvL}7KR^%@MJbbw3>rI05THW?0SZ$gp;O=lsYQL1luP0ZKCM-J^R+J{;_G4#m+ z94icJnbH-MWldTZyp9JVCA9h#DC`Vs{|;&sN$p*nMY}be%G(Ptqp^Z8M+FA@1sK?B z3zfB%#TBExZa|%1YpgHUS1aq4a=B46mK&u;qg*MKKv}oAyr7EqaAr@OQ;MCQ62b|! zhueFky)-Nr+iIwmP!4m>i1q^WrYJ4a#OOfFL!PosQYHp^SJUJs8(7x3U!0)mVDvr+ zBskt)hm(1&@Hrg@4xS9dD)mri-EfR^ug7; zvX*=e8aG;5UU~Q0Tqt@{CQRr^FbSv`$K#z;tWdCR7!o|K43TDIH>`ua)vfZHwg}ZB z(alL|gPWBCtSpj?3#->;ssrlp>T#`1sSl% zsg{5Xl|}#z81x+(EoB&q*R1KnA_l!f-mOgMn~!0Ylv!L&{KU2W1cMS701`iK;`U+J zt9%4j6-o0A%*xT9o*;zHuRKEi3f;ECY zU8&DIvw>((@8S-K^Ah?&1RWF%8`dUukQL2A_biCBdW+a}&NNkq!ot!_InY0fjP$rXkn~(Z4Py;P`UD;A~RkD!@h`ieUSjXtlQ zu-vC7^ecj5h1#|QUsbecC>KWPJ_$|lYc*(k46(QY)h}tjq&4TPA}Hsi-Kg zF!kZ+2nSqI3xy4s>Kr)dvG8#6kOjmeeV}QCd$gQCpyVh)8I}AYi{7F1RVvBOPFPT& z>T{!oRSqWz^8g#NNX|gg1jniG4C~lD=wgO73@ZJu&M6fNqNgb-6=P06sZBqi6DVEAO|VHYIdm}-3ZL(u+k=ai@4x9M%C z_;14PPn8mN;wM2Z(LNKVhK(Xx5OQ%4=_Ub^^lDMyJpi?>l^aR{+n7Z0YA61d+fY#422MeVve zG{I0!k^UdV9OMPtW3kHXOM-?F&-%B(M_@I zVvCVIA~pzwDHrfF#27{|Kr3>Gz}+rR92i)M%-xuo8B^s;rxOhJFNoRsG1U`kE8<0k zsH|Vi%A#O=R^Se#DU0|79O1@P$6RRZ5Irg3El8U1maegQwqn{!yA`J`Y zMFpM*oi`#%?s-6{$n5++pq6RGmVhr4V680^_)G+OG1v}=Fm@kJ(jlG;+=c>?&0yRa zjQg2#Vq`FGWYPHI$N2O0CX+J^NyPQ(U(CK}bnFRl-eCGa!ihuJHj|l$dDi6b2niua|%`9-d&M`S+7qQU3Vot+m=pdDUE5DU{Y~6(ovS2b<+$dA+c>UR$fJ zRqBe0>kML@L98=w=8T&e+X~6c$it!bk(f&&gIH$}>kv}i z|8EAd7G}SQ{xsueCV3?#Nx+7jBzDQTnKN!?5WpnT%f(ca$h($tGiTh))^5hloN+T} z+{{Y~blMNq&bXN~Zss7FL9|__?W1Wz?R2_=QE0aZD2N9+kq3x~Gj8UAF3Gr=vy4T^ z(LAvki!zAyaHokRBnlH|03bMV5bJXS-k(SP$;x~khQT};0FgL;Gk5v@k>FQDSvLDH ziaF9MBa1!Wfna##oA%5!6rBh!Lb=ZQ?vQ76a-m;ge%f+hHjvB*io`fsepibB!y>0Jxk+RwAfT62_1jaT}_R6tMJ&6SGw5KDp zJHR6rQsFYbZ(cKtyF#svvs|jzV+_IbbBXZmy=`wXfx6&(n1mW`S;@*91LH`A5N7@i zlCk58*F_p}p=V=)b+4%1Vj6B{8fRLo#M6gOtKMYGeb;Gi+v;lzIhu5&*42Bat9XBD z3+c8Drhle$#$q0CN9>d+POy5?E$<+(ZHOD<4l?gNMpEG?=uW*}H#=yAt`YUi+f}}zWKyw;i2es{nM(A^uf&7O2$38cUV2x zhDS}Sg$@leaSPF0Fo0-3#w|*`g^bk5vuw3dC*;Re-HK6xxuVWx0T4_RY|p|64$eai zxG&vTchqnpFD=t0VFmnPCnn8=R_pE)Z~=3t4ce@-n_S}QrIEgQM`26|Lx!xr0*|8>1C48D*J z$u37{9zgl1bz3@9Sa)GmPbtF)F$1a`IIRuC7&=P zRx(}cT~-Cf$?|8czRxvnwri$ja!00nuM5GTd)m8ZyRFOc{^ZcK7qxP6`I>w!;lrG~ z290A~FvEuRDH6u>)sRV@j^p|R6G08a&W_BJt)_{}Fi|nOJ`f44SvSh=y0atQh*{HU z=}3C+hH@L-=R&|)Mf0VJXf2h&cVK=$quY9ZR&|ZaG>i=4Vv9}fJMnHWoq~qbPI(56 z93yr5u^sBcffj@bcUh{uA>biPr!+q;1MrYWJT$T27~G6b+K5EpcyPBwDcvEikTss$ zg-Cr!LyGjU(MlS~&ZDvnTeeTJf>l+=Uc+0%($1SFy@EbH%^(r(LR!+72b*u<)c-h^ zkC-q$BKc#&V#!bm*=kYfFh6yF;iAUq+JWv`rr%?4#F8UzbjD)3WBQbIzP^vegv`y( zK1F%Jo`k!x?{?9psd~$n9=aZtCPcx`_hK$eb!3_L>pz_yZa|wv47g4^ZdVvtC@7-E zR803`b+tCHQc9~|@m&>hNxRmypl$?p3tvwqO@?1>Y&Gg76Z`h(1@P?XZj3I_2I zNd{FlIfhDALZ?Xy=*MtR^UPMG5N+yAdN zxipyT9KDcU@Jie$sfG5Xr=Z%xlxnD26Q~z>%0pIp&PG{isdhm!jv{k!CxAS08gUuVzAa2cZe6vT26^$~lB=9r+U00*r6*wm*2ep;Kv1+C4y1)G zhdWghJqyvp*2oxJO8sgCL!FRX+^9s`(Q7SJ+Nx;}O;g)&&Bn$~&1^Y`mxF0njqVHb zN8`PmWWj>OCP}>uWkf62vUxi3l?tSK7c)cpk@|BDD!!cNRRGX&}}b&C>=GV*l5uX*=~c4olmLE$1*0M*Sc1?9qz)ivxlmUt|=jbLoW+WQzy8sz}qyOv|SgQq^Di3x2$UT zm0J%?OVB4wLySvj$nfXcalz4r9j0mZrdi+bI1uYNTgP9jh$-8=oc?|#^`P&-AH~dj z#g6Vb2ku>ne^>ke_I4;9`d%6c&|xp+5X+fd89=o&rB_j0Q#H(C<<#Kq?DRJU z!-})75$HH>BG)%SRo$F!4kaH1}hFqk+ZlU0 zG&==@tNpSZu9+O$sK)o0uEZ>n!Vw3MQ13==cFeD0!}uaxPqJ z;gN^-heY4kJ3+74J5{chmJUSCicXvWue>$W;r= zm`ni?z(KsAS9dMiC;|K9W`)&_3c{MIRC`pT%`=tKy&yE9sn?-0?wYcx(td_>7Rsh7 zNrP# zt{#nF90 z%vlq)^y8@&n6c0|TroyMB)0;;UVKN(+1-T=k;?&lEVPWskcn5R;s@4SV7s)jZq&+y z-4KOfx{2zePEOnoF_|!RnP`dHfMar8DDY6S9~vK zcLn9^g$Z9zU=9BCmNO>y-?zEDh$NYp9fVts+W09M@ssD99dAsP+t`?yDHj3A!+`)Y zPh_=i)=kf&ZvzI`eRy`D5yR!v>WY#a9hAmY<8Ow0ManKwoLMZ6sX|M1Lnal{#bCFi zwi7y+Q|?TETvM@_Hs}>+mN4`;lns-+pKPwNZEH*&`Zl!I!1KgP(VQ=1E~!so`EGS_ zES(us<%$(_*%rN<3B9I|ZX-MfLp>Q&Ex$92_o6tnjJm%MiGyw(Q^kIubVO)kN+aai zz_P{j3H=dY8eJE)!@q&41btYXnR$;02q=}hJ*Ik>LqiL-#lT)0igo2Ak@b^cz#CIV zOAzQgJ3Go7GovgP;b|~!0}mWAW2)yjV^>X|187VYdZ#usGn&(BT%xBEo7>Z#E>?UC zk}^F1CcRu^>diO%oIDBd+L&rofYclC>Y?L?yY$9X$&JxmiL7?NA8|}IerI(5<(O*t zJ>@21CJ^Q6!6&L_eSTA!3S+9`Vr)Gg)mb~HnqH~;PE{FdM{p9OPtzXaG1YqE1kdzL z?DR^xrN>mwOY}vb;SL{D)i#2q^JI>jP3&Y$xRFotB9E!YWgNQ!-bdAGviW^*2Fz}sBHFQJh**Y+Zj_o6=c38@j*MWcZnXM6i8`ImE1_c@YI%v zQ!BjC;t@IBQZS|(&l8A1U(FDP%-B%LGw%4T1-XQWG;pS?<1H~wGvcMCkG?ggF8p2! zU~)QgVN6vlu3b~8zf|p%oQ@sp(JUQPE$0%}pwj^>W2)edFw@L1iA+~#n^K(KY4HNf zf{&b`;c+EA~tMhTe54#Id~8&OVp3VfhSLxe*I0X>$)O36e4U;sdc z?zpVSZ$wiAa}>Jfiyq_iC6C4n2jR1! z<@zt>uEeRJN8!`DBpIctRUZI%hv_YilJG->R1so>S*n42)myjk+`KZo zZHH<%S3`d__>`<^iu@3uZTZC<7{oFpM~38>d?bfjvEug?IC=ir)}#^+9Y#oO=59VU z>s?>18m#u%nY{T6S#31MA#@T%X z>B)`8O0B$DHVW&yu~?`W<>kV9LpKYHEA^#vqf##~)t8Z;eD9xQ1~MV}-v5dZNB{Zg zQ~88BbjB!-)Q_)Z3iN;Y`Amg<7W%j+uX(7q{OWgs=M{bBX*tQ3^w&ZyLAqJ5| zd>%x>7io$dxsXGCHpJB%+cpSQu;sa+1ppweZq|U-1c03+9kA(=N`h>~a3y#Mz+(V_ z#33z9Ji}!eBM@qPMa2aOOsIC~>K&ZJh_q6>e;X*bZTqg=dC&20IfxRu>ADUMXZH{R zA=iZ!$Ix=|(>YXZA*k7Gb|T7UC!%N4`4wjmlztC{M6gf-F<3;{w}kEJn0<&EhL<;@ zUGXsQO~#_YtO?4VL&pX8(U}K@>_qfMo|C?OulYvvtI3nd*PGdi2#Wb{A616z8z-EI zVl{G{>hsYzQwO1>5=7i)t@w7+|iwRPg`K4pH5 zGQvaq_?0}qeS0Q5zGcTZgr7N`D*60izPMijkkG;qCK*SD)BD7T0Q9JUScEi81;vW$NItzTP{bhJa2jK( z`%PIY*B|~g0RJU8KTz!$H*7n0!6vVsqGG@VR{W%Or`2tbD?on}XHwLXe1@9e>p<8d zP+yqiRkcFKr?{8pe!9uvKNwSwmyr1itmbx|*(VHykbo}4^@Qj@s62YzbcK46efz>G zPR<8-Z8js^4`5SYKM~afwCkhKOqi|duxZ*LbMv(OrVF03rxwVxIf3Y2CCRjGho&)r zcFho;*&!u6q-2Mb^B+BYWn9pnbwsQ!Te%Gujw>y7} zvApB-IUSWzzhBG>EPypl$nTWwW@sa{0TbEN$64J07n!kCuQ%38tA$d{SSeIi>y^U# zTD?@j^_9|6ZLPLc*Wn^N`bz&0923)c^jT^6@lkT$8rtsTSJG!SJ83?vi|P@RYFN+p zPdgYelYSVPe#Ft8weFsW^?O)lLLl5I5kUQI5x136?X)GtYIf~?56HM{H{ZQ|kLQKq z_k$rEtisI8kV87BEd)MYR}AyFeY2gX6y3}*911;^N@QE^qo;>%-vNJY@bE@*1t&F6 zDy4z;$)nX`X>pOf!B<`V0G1@}Au=ym2RaNLS8b!G{m8KoVd>bn=0W6V1m{7r8mlhuwdjO@v*8)x=n%4svWgBJdu8*Wsbu!*F zC<{q8b_G?Hzj}L4&MIwk8iUrn1ymNp$kA4XC5-FGMqR)8V^GsO?mpwMeC@{_)4qC} z@dPl?-~+l=P+gMFiwwn4b3`}S!Bvk;Stv(3gd*KH^)^eofrDNRC!>9i3!hZ&R;R%0 zTYT%lTaW?B!g-HL=TO33kZK3dvWKL$4->VOI)?qm2Y%8;ytmv4rg!`b{s!cEv3y0C zk7Fdjn55`_MAYR&-!)CY_;?xU%=1l~{`|-N(?-T~Vrtu;h0(6*)x*Rj-@@SCyLMGZ zgIVKzOa%1N)$T5HFD}h%<@L%s21_y_;a=fsMmMTVGS`qK&Gc*R(5s&3U_ORAeUK^H2 z=r|jhIWIHk(P4m~n+_FSvyB-cxsRC{w^S-q`NePewhnhgTYn15b!6D|Hjy^hH9ICO z&M-<f}HG=jVAtHhzmjvO}h)x=1CW@0gtXQ@R~Z$%xL zVHpat9|ifU0K^^$DpWfQt%vj9i07~jvngP)V}cZZ>lC>9n5y)p6ij|hm5{)PQQ`Sxs$==wkdQG|^z%VD z!i#?W3ybIf;-s10NkFLWntrwBSl~I|t#%v)E=j18ZhMD_%z4yA3=aaNcnA${($r0W z5_OzbZ?6kluFO7`vaTXm}|8SifCP1m%W zrp55SRNRNUT|lbOLHM)SSWi|D#_UZa!BA?6F#ezoX3#ET;+L z0C-F6=ZO_aCV)tWv!YlbW9`5%A84FO;~-dDTC6YYwR)klw6b2P=!<5dwzk|T zn5&iL#m34~#ayY=p#If?IUpni`J-P>bp7$s7owg&KKgthnvRb?!RXRo>!rv3bE(sFmcFzshzE%F016Wdghrywyw$yN}}P~4W- zJ#(C+&&5VRtUe)CL9S3Pm6#s1`Z%9M$a}}JJ+rC+X{gUVq`X!LKE2&hx+}v-J^b`A zxFT_-ULFuX1W@nxHn*J4{qQ$Ln#;wIi68?O3hohUU)vpjNH6nO_O0-iCt`KwDQ>-+ zxnYBu(x5U;k{Hy@+$UI*oGhzV?vx`f551H?|L9MyPcPO_|MUq#pG$?+(zU{EkkGo_ zIuB`&9}0=dUB$kecbyhiv?4aauaQhMWHAW_$%3J`g3$*` zEeLN2%txBmqt7X-pyU1&!bhKPn_zXQd&l>VJ`3ht+dodF&pMisi$VZ#+g@?_(X2KrLU2}DWWnlVXieu314Bf*0bA|vq>swpUD&+ism!ISCh$v}>OVhoK@OK4 zi3%Rgwpw~yuVSC8?-#*5Z??RmTq=?}nWE1-ZW_OT+-&^z@q@&WM55)4l?#3b{q;%P zsvoMF(CPs$*fXGyS}-CQ#w2XQScb%L&^#8Ldc)+lGE(}k-=S5hrJ&_5ba&XpMLG+ z^xgfHQ^ScmTzZ*+v-mv!< z_%o82exVhY%F3UkVsiEs$$XNRNrcSKgCb~l?pRvT{KJbj>kM&yesyqQP~x`j{ap^p zfA{;x30f-{gcZ5V@zGoLx^C2sTCGrT=w;xzVS8Q&>20CZs4tgvb7{FzE<^E-3^B*5 zz(2um^bffk9ep9P@Es?paKARB;}Xd3y}#r<__zG?SMg-vyhzxfRU?K@mc2j1t#S8{ zx)GkEXP?8qe!)s&xBhp(^6Ku^lWM&_&*l_O`9$1UZpe&(9#-u0)GIQiB)GVTRExei z#DbcogW7lobW!Cilj<>O}fFvnhzlbBYjrLGPoDp=6 z;JUruw9JMU)kFJn1BAEmivQ3tTiTEG4pNHq3EuQ>%K>SDcD2#K^Iml4!>)(2?pWb% zw=7$?g)=uQ3!J?u^2aSO_C}?F1Q&J%lG%p0U2nRu(Oq|*h9%v1krS#8zTBWrp!#*q zLv4GzyI3l(U2{HCpVqZ&jt3Ky-wYqzGV4v-fvKPuJ^Mq`wyg$ganC#iyRUA9_uetl zm(guEpdx9H%qEN;;SHf@A-d<01*Q4A?lP6b=nqs3uuY> z*RU;QBgP}UCI%pU!voq!K+YPq(*V2gUB`o|4ZRwcPvf4x3zvv?6BL@-&E2?w`!0GG z=~IFN=C%7y%fk1&RvqU}4e$2-XnY254!Zd&5`btAVdQD~AB9!pECSa;Vw5;(etKVs zhd{m1^I`V^I=HP}ZCS{j8dvw0UN-@|*w^k^$UUQDc4PLnJTyPPCzw&!m=FqvO6^^c zF?Y19%{F@UYO8QVZ?+0wT3=tpHCI2Z1}hxDFx$_^^Wa*HmhUsJ zv1eZL$dt5S_$XTA&shZgmOB5*A@C<%6GX)i^j_f_7Q0-MKkqrN^%0CQ@Z^6yVE zZyOeD8}jYGj)};FWO(l*5MjHH)ArP54hplCfuR@}9FcaK({-7Hrroufk>+93>8N|9 z990)pHdZ}WZ>spdK?%WbicB1p{S%_1FdJN2b+O~H{>ro*uFoR#3{d%qRSll#FzcX(AX6Y#E*c6Uq3RgVZM0M= zQ~}q%lnpf}06FNe=Ys)dvedeE^As?cVxGn7HS$p{dn&N3VHN&6;>S zfQjVKMrA}|g}nVN*sxs49!dl!96>BgtYqXlXgf6v^+28h?DgSGWDrdjg!Bmfg7VeI zA6|%h5n7^8;}GSCv?R*3fH#X}$=b!XuZIeQ>fxYoWqP0)6yniDs25}vsHwwQ0@V;5 z97`i5ehiuRlv1G@Jg+sGAz~MokX5cvgb~#m56yfyU*}P0D1oY0oPKS6PYhlL*!d|? zihc59rLHmr{Y z&@1E&l0_C+g#ravp;Rs`R{F&Zheo>}4Kfb}GSsSBm>2CXHctF|V%zXWP9bi2Xkh<( zqq(_%+xFnl;TCtxwG6$7v9a2+J{YvLHx zw04_FY|<1hpW+AkMzE1mL}RGD4!1ewZ3~xqQM+{MlG18!I5uEc82B;{8){B(Ipkhz zccQg?+lJ22g~4VLVLd{+LBB@3O7UEW$<0liPZXW;@bR_Ue2Ht)SzGoQtBNbBS=K6X1YvDng0NL z1lEhbHGw4xQw3}iZcjIGEVLIOb(R)N>kEr3YM}si<|(BoKG+l>wD!G%ZWSOzAhC46 z(C&Ks1*D2CfYz@`6$*#%f{hK$E$q^dTJW8Mn)M6c_nMn4tL0K<#avw~Ev=?S!AhkJ zW5V)qN$}+346vx>PC=+#-U5#BzJLrN7C|jo?DTwi?W?c`_(Yt>-{@mf0HkV=6TDh8 zxmi2z=H(Px=~{5PC{hc^p`7}VyP`NA#WapNmqIq4t0r@#iexdTLm0!6Q7E%r5zBj;rhKj{+dSmunZu5SY8_G z!^-ZlR`TcQbsR$yaDceFdtbyo_x8TTzi~C9`pFOdzO@%s$-Fki0!=eW-)@_LK}zca z2jR_V(E!a$ z3c4`yh>Qvhlh`DxB1ZD|+{O&^Nm>of)-G>qW%ZSpf?!z*f}JK@#?hrzi|P1{vY>~L-YS<@D$7O%6x7Q`sZd$2EEZ~YFj5<( zvT2l8m)FWGOUDXi{J+s~_D?_c+G~U?3*;DB<@_0=cJ-1pPeA#;(J-eq%|o1h364%MSTzM zPz~4yCbCa$R(FMZb4h&-R8jRMq1~wqd@(ATR`24U_;ai=rLW_$SY_&K3!jU%kodV! z4cWH0?LI|B3B2bXbugmgH;g`*g+JYKx9!KCZveFL1fOoYuI*@xPr$3Ac|_+BibCs9 zhy&|kuGS7+y_3V;3-N>agwifn1RtEu7=AK`IZ*+?K)oW~3vmMf2P(Ax5q|~(OkbnZ z_avwCo6qj$9Oh7dS-#|{aCpC|4SGQ7?_REyC*4`!N2f4@sq~3)!@qkHd-B-GjNUUr zg@lO%G`x2JB@_reMy4oe4}g=FKj2tLqFKl~L>LL3U0R`~4hsD>(mL^3)KZ~WyNJFS z?)Q(kfNs97V*PF-cWy{}hpT)C{@Ki367nWO^9~RT1>~sG0Q5Q{Lp*_<16sh=0fjhw zdF$rg`_)^w@7%mHyKR#cifsqi6O^U25GNp^A4kB|4>!snmehRg0k zd-qwp;qd)sG>IL`g;8W+Gy~6;DkrSd=QQ&2lZoI} z_x=r$k@Ol3IpM$)mN?=lF_|>O%&F(@kE-Q`uBljYN)|AnI;DkN;^AO_Pd+@t4xcC> z+~OhVGQXH|tz~|(uJ6FEbc%WGT;ZE9)Q)zpnp`B1wBNcmwFbA$FZR~G=G(t_oF7N{ z)T zv5}95u;q`ucDZ-hTX^uJYx@&l`O?)^`6DPW3Bw*(L%`5sJK!LD3hP7>L{f0@0RjY$ zklN(Oa(`blKnw|Ih#o}t(z>UxaGt{Tz=j?zi+s^*DQ3Y#_YZTNxAFV%$DzgnD`*%C$Rf0*VSvv6a*;`NSG$R zg};u$fS))V5w(^SD;Ta#eD%MpCYdaJnO~r8R7P?^#f2hb;@h#HI$q77L(Uc#4&Z_t0DH9+Gc^U8(P0zL5L*ZCWu+U&Y@Gnzvpz$|a#JDWU* zk$j~LO3k2d2d)XyTA8v~CcZTlPJ7liXcAB<&~3^87I+`Hv>}798X&sEW#GSi^Aq2y z-;m8A1mCcEJC3&l#xbJ{McsEMz5%OXz%)Vd37j=+FY;A1FAsY>m`l(Iw-*rk3699} zH)dvbb|$`I@9fM3#y!UiKc!&|Y-^RmVB*{I6<{l->(KY*J`6=1U^E8ECiE((z|f z*<>E#f5Tf&%R|f?_Wlxog81ftijVj9{u%$q^?NxL-;=uV&(ZZEs&i=FMK*&D1X#uX_nT>@=0RSkGE z5ne|lY`ME@m`mODg7+!Y?yidqSh2T&6g>oN28^}RZXm#6Q zE~H$Tk_9AwN4iR>xgbpSv1-Z_6|U+#)$li1GsEBJr>ceDWI{R!zs=m+ALJzbPUU5- zA#`<$25LE+WMnD;2SdFJSeSBfhbn0_NimL9{a_bd`_H$DfAHFC2%N%y0Z#NqN%bED zkT*<1*wA;GMz_|>tC>0XICndQ#O0nKk+g2-=MY!znLgTUm{0Q({5nk|i@+~##}p*t zP@Z|ZKosm0HF2fW@{34q1#lQVSG%6|k+})3vH2Cm!}2GpBM>mbqzy*@ac;WWD0{#Y zFhC5nBQar-osv1I-OZc>VT86%RU3a$lw{HQ+|}R|%r@f`95+yn=hK#6Gh5+`l_BoQ zrKxIdE}sxJs$siG-J&dA+c{T&Wjo6?191Vy-S5tLw){>WoCjpX0rMj=kXM3lVALam?hT$w4pomj4We};3#9*rmRXShXW zFSbNnq=tx6{^K_?AL+#N@(wZd(3y}$=8nc)clrJIZ{N5A6aDjHu$wH#+RR6aTR*0ZHIR1dWNBruT2|$(k8#K ztU1&zvN3F*VD?iTb$(MC$UaWAPO0C9o2SCc4Kw+qs~Z`8wsVej7+R5=j16ht*$f1OGj31AmS`1FZh1 zLL%OLc3Z=XMm?Nj2kB2^AEQeWwvb`-)-xJ@?+ew&q zg1uxoH2pMgkcn^VdujHPi<^ZezNO#mTY#rAZ%lk+ekH{X5``a3e7%1aghz@Sq%S~X z;%mMT1&)lyyfN_&T2l==Kl8@Kw@gz@`(y*}#J6Da{3hs$Z_`^t1Bu8Nl3?SQ_*Sfh zi1K8ZQ+VxUY{$g6>+LA2ieXwqj$v}hnfPY>jg?;VkfV+e6JP6ZG7n21P+S3ZCcf(H zR)|LxaQLWf9pJ*4_@=E&!lGep9Wfew7*c%V8}^+D%N{y26gr#%lFB79@lALKaOV+W zVH4k$)e~GgiWufZ@661^H*8%ZZqb0@E~FsXVi;z4@|zYpmzCY2t@Ng)8=1VTQ9QO?+E! zod19pag0*amR^|nR#sxNZ4i!*NJ%i>R*eri{8yiuH01Cj(x8ASnFzORX@ z5VWctr`6l*T6J?E4RVGKI0YRrNvVHxy9oOW2DT~#3rv_IHtCQicQg0E^t-No!VJ*0 zmi1xR0>_K3T{l~GtBZV-u69drx~APUE#{w2Wdl~XR~Z2FOi6)RK*;Q?%EX&a%L_b@ zG{&leBfPh!GIgi=;Zf7XA^;6NN!-~bL)Jvx3(4Bt(m`CvUTx^zmcJ>{PalIYg~+)Z zX2Y_DkT`z=lDg(1rm^ZFPORQUYMA8LfjrC);W?@lP_ggWrSbln3~DPx8zq@C+GUm9LiGlZmMPDe1dU$k(aY zmuiir^;)64Vy+e{E9QDZU#u(@%1fp7rS;N!#ayf$@BLfM*`u!xjNWmOzB7OH%gJ#( z`a;xA#|UOr0`mB8cVigJUvhD0UU*X=rUC zdb2{gRLbEc)yMf90%$sp?U_}z`$7iE_BXHPa~|pMphtrn9_hGAbM`Q}a$hdwvEPE3 zVz;-s<#g_czdZ~t=KWsB+|2n-r`o}zzS-`8PYwErxmhYA9ebG}S@;d#*3sOy{t7n zv%^v46jKL{Ar$nO5xcI5Tyi+}dN}qL$J?s0YC-vzXtvU>whr~4hpmhotO1Hsq>JND zMNX#{Q~P1h^L-rD4`I40YB_9fXET$&YbyGnlTBReMIa~*Uq#+j+4?hn(rn|;Y+ew7 zSP+o~Zn(`Wv;FM-x4GnbcruMYiv#9^r158{X=S{--w2Aw=cn8FvsAAwo8_f)p}xGl zRH&?1Y6acI*HvR_ZE2-Zt5u9OW&GLu7not8wP){N;!D!Vv-huXA+zr&d+!-WDA;%0 z&%QkS&g5z~nQ=&LAyoLFV^%F=GdG0)W~eQZr0KTYHR?-?^~QRmVAPfxh03D7R;cN^Q84tiwe`|cZF#8%E_gv$WtO{U?q0XH z7o#*Uj?CJ8iI%$;s-$Gbx6JtV%*d=z$IH~cEI1)^)G~+VSpfw7q==JgsK}5?h$I;m zR1SaM|CO08cBdU>Y&*7zeT@HPCW|b<&NMbNu+ee8`Nb%xj$4%k!5w|(J|c7{{;01> z3^9SVIx?1tCrNK1xL90D4HzRQz6A>sa`~R_x;^c_Bj{6=!(OZPv>z6KRMZ|ghm83s zYF9l(!B2dH)^bGBBEvPD9vBO+Tl0L~t|OWgA^e78dUnp&ng~qA9VyK0#JB7nc+JAF zb&eI?mNN6&`mP0d2Qj^MM<(IKw}sOV;rYO%IEN}g+i)!#$uVKaM{;tnix_FR+=QWt z*(4^uLGZhEI*4KBRAY3vjycGfYlJv?jyghClp3JIf1*steujIrJ z#)?)mHKYgj5L;$2s$WOMSnnfzMwmON6$05z4PbmsH4zg9_;_2BKgrg}yVB;Uc*U8S z+YPPfbOTvt_cS%(5Z&WZGzgo4)nId{z{$RXd8gYw00h9&fv^-)yA8yetu@V7M_S2X z31pu!%@*i>O$Y~yiT#P6iJ$t;c&j+p;F`bAj;vF8WE~0z&HU>b8_ZBPm@G0T^RJ%~ zj_`a5US1jhdc9`qi{<6Df^L+McfFxEK#OA53rl9DT&@|*y1rb>{Ofr!2t6-)7Fg03 zGXFY`q??ytSfcBp`a{NiGC9mALkB2R@G=E2Q}Bj+Bc>v(C6~5mr)W+??vH=;C)Yp8 zbRYlK#91V6+n#UgFj9)8PNT2F&|O1ogKyI+H2D)bL2R1z=7ks}{;rWwOEF5{Y3Xf! zsjzlU>v|cd?G_2Yrjpoe--fr%GzJ>mI6_=)-|WG-Zg{L!h~*PlbVjclQpv_% z?@vFTe3SM&eN!|q(`$1!7D17~8n5kp1f<+Ai3ptquWwBBIX<}D`H5A{%F6Qka=Bbs zS=TFtN_nwXsI9Lr7aIC{sZm+d7nkd+0kP^=`bA5~tGf5s09_pY>X6}+&N=qtuE`ix z1?3-iNKCT~yTaGMHEF?xFGabSSd@SNgvSPyG(v>CV(z4wpL-s(t3xUg5^Fr(GN1a_ z9d4EWZLpz+{HVB!s`~h6GuQvPggF8g(FK3pGQ7cEd*9pGwy)iM_x3&Nki+i>;~dmU zuefb9-eZKI#|Vf)|?*|59qLV4Q`>b#+8OWNh8?{~b7g@qP1&|=&4 z^ack!Th^|rzoSZxOAgQ2hs zqC9cf14bcqv;n$;K71Lr#h}z}J1$nQyN*Xh)(+4Z&_?i!VKo|XgCqAt4SGqF8H>EQ z>do%%F8t07y>6oJ3v5F{nzO)K734()D%Y2&UKKhZ(RX#rHjRR62wI7*z20^C2DDII z?qm_r{6i=4ZqND%Z9;XVu~MwIw4p(1htjXDmaZX53Y*Iy`L6G{7IdmZ;1sMDoZc7) zJga&GPp~Z2k9hSKef;u5z|hdsZPX5vgRS?_V*k)I?V!I3tLkm?b~!MpHejT;?Si@~ zJV$ui&aUfpJ7}mZmc+>;8g|`%Y8@0EcXvUrc?*k6rR8FIX|b{_BY%#wjY{GxwOT|S z({*VArh$A7SiL~h95{8orrMt`FRslmF0VlUjZRTZi}TZC)VnQ~4lw|khLy;YcU;HUfwAm3p0HSB zX`AhuX&4YZ2~pIyoXVw2v9!LnQo&r{0{t!i0pq7nn2cVpn~-xr!bFR?z#ZT1)_u(N z5&mOow*Zeyi5yrN0-1xuUDxBAC~i4hSTcI6;CCVXv;%Y#`wjvvsz6b@Wx1X|ueD4< z$qeVvZaJ`Fumo93YGJ;oBrdY8K;&v40L3Ii&Nfjj+S71|3+#Wb2O+QI-;UD&+Lq!` zBx#3d&S9%6A<0`}AX%ujoZ14#&lL5$QJnBWgUbY5%JKWSdAv_XcM(iV3}L-u8%VPU_u2(1+HM&9LA@>^kvYA2O<7b5`X=F9EVx zA@L4%C>Us&+eKxOy@nbG(V%M5%Bv(dcMkh`IMpFaWS$Emk(BsSZQ6GnWHD-EiYHVb zSpZT)+IouGk1`pTYY)iJx;Ktke;{70bE zF@ucEaLP=ZY)PzaofamJ#w`vfUmLGRnIDTm|7_!_pgFt9vy@n&GFxI*(QxYCg7i+I z4%4SsfHnjri%cE`QTlxR9O-0*MUjQnwKCm6 zV?^QUUu^84aIszXt~|bd@8Q-H?egV^A3VHrMXf;!D=9>|P+tNo%@v_~4ZtXUa(FsEu$){%< z6lnUR${nQTlEX#i47syCS}QOAmg(?t@>|l>Eli1MoS}_dIAio;B@ZV+j1M{%$X`x` zy5H4XxB=@FGS`T26fGW;$O^k}v8dfK{cY@NhkA>2P*99_N#6~5Z+#@&v8|a;nGS=y zCz-wD*%>W_;O)_nL_c7BaiqhBO8Y-1&>`lG>;+hl<_3C-6Nrrq#_}3<4z-~M%E(Tv z$@a;oVD_DGP&?=9lOtWMAjh*xy`^T>bxH}uT2N`HH0fd5)_4F3H3-?p;MPzhflh-o zju)w9{E*S<+JTss+>x01>J=O~NLUEiT@=|TWCy#4W8rjtgy-0h`(1eq3XW?+4nq4= zstHv%ReCfcCy3yp_O59oZ3$)#;K&d{QLEUVNH&WUir*n>lzKt`VwAZ;4Y6<3iZa{~ zEt0P=rJ-?&<4o>}BEs%Cx%hAzRaNQLuzA%3uNuh3Ajyiz!>xEa_f^N!zS}oOqwd^F z4;4A27LbQ+=5`o@gME)H20A3>0EQHwF@M}bEX5>Y9TYR{2fA%xkxGx@?jFv3?QL6C zbf`G*08s8=9jTrVUbU$`R^Pd#ei47<*ICzTnX?<(tSWqVzVGKz&tRd9TOK0gmJ+&^ zcE@>emBL+S#bR($J*#KgphqHN9Aws`QW$M|h`M4VS_QF~w1U>9;+UsKC=jAtagUwNd zKI3gBTOG9ZiHs*yy>KsO1vI!XTa0VeHpl_Gu@etzb#dfne)HZ9*8FU%D?rj&axK_2 zPY8RLR9yE(<-a94KZ{BxR9n!XnHx<6xqM>@N;+4u1$sF8S8FEXIW5Npq*3;SyXyM2 z;BpaD5gR>R@P6)B!6#?*7Di_5VFXyZ;0K z{73v5NSgZEcV0t!(@)`VI%(=>dF9Q$&+(sB;v*?<2AB8vCz&Xb-vhYt7xD4l-k10{ zcG|J5J(jC(ihtnS)wMKW{nWZu&aB(sKb}b@&KS9RQty{k$D>jbK;&sB+5M^QD!5X@ zV?LQJx}-Lg(*RXTDBKtt@$)mM&Y9D?*^H-eG_TK?IB->e{w1cp8jWI(HpL7FmnFCq z#@fvsQN zI&5>kg^RChM)PTa-2KKGkh{cQJU;YVN;~^L=TeMo{2D+9;bC>kA$PT=|Q54<@8*^T6X4{gL5$Swh1$Ut_9jWY7h_Mw6@ zkR9Dr#O=i92yG{Yos0e28OZL1BNH=_-Dy}mI+I}3hBYdI>&-xR^dr*aBc2^NdNPn* z0F}HWme?4rlY#6AiOWECA>>u&M+UOXKz4H0J1KA|1KDLDJAg+rkR4+d6c#B1*<~O* zqR2$!L@>!|O1la;$Ut@($S(3*L>S8qWS4>LsvQ`*38n@V8~|l=vs6UXX&KjGb?@NY zI&8pg{b>NM-YhXnq=Tp%;%lpn7H#>x&2q7%plCqTntB^WZn<4ELY18>Ev^o99U*R? zSdd)?8Bjsmu&a1L*#aaC6!ME>>B;bX_nL429u9g6us92{%HlmIRqdjKo$LEmG7F^Vk7= z3ny0Z5JB~jWkDK?+bX;Q*&Yx(@zf`enE1ItkHQ4+8L z>xby~fxxB#X;(f0X%`8qv=EzK<~RV?O8Uy$YC&IKGy!8PtrgbFYh|Q*UoltpHIUJp zwUc0M-#DQ+GOD=O_I^a*<3s%E58Oym#TgSvLH`~Zq$HbM-8(?AbPL-?9T9~HyAEQN zzy@Ra-|ydjab!i<6TA&z>hjDqG>rjPydQyf2+$W)n#5e$dfVK*%pgth*zj?1C>bWf z-lj{!4b#Ozw*M02zCwK4D9mY>wD$oJXH@k8;tOF3mqrB)FIEk4#yBXkbqH&ny}Whv z?)~bm+jnkWnccQOz*V-Lf$aU&;8VhAL(4jJQxZR*Uy3>CAz6suDb|Nf6KPk;*Swj7 zu?H$K7Dg%tkmL?8%31BW^x5>s?S^an+!K$si0_(76o}7x$4Tv7M-a z+;?X0uuv{PQ@sS`XDSy|u2WcJo0sVH7$m!fzj_b8$B?|k9fii0CAu7A-H#`NMAQw zb&DwkT(E98!A#sVE#{d>*=W@5!ol%OrW$I0p?0gvJi}Tu4}-6&;0W)nDZ6sY^7IH~ z>)^|!eFzq0bjgr45%)r}Hn*~*heMMdPDNJhUmaK=c{Se>7?;VU#Q2SX=_ZoWn3KU0 z5DvwOZ@{OgHfvU;z5>RMG7@H0*6M{y-2jEh`XZ8Ol}pQOOQq6kL)Qbv_+R8A#W=tB zFY)E*3(gX#46Z;cJ(@W0=L^%c zCN??NfZT-mNfLLYHGy0->2X_frv_a4d1T2NR-4e-n=6z{CAwFukMlY3VT+@=3T!I~ zcTt-NfF!4S(UR$1A|t05EO15KO;Hw2RI_BOYi&i(MWhrz`#`4-{Zcy!93-4o?p z9X6OLT`kjDX}aIcWjgD*5Rv zu70E?nVtCh#yi(64JDG^1|5!cFlvV@YWKxVGVu+0n`vMKpoIiSoI7ZO*R3&O2!2qs z`Au`;oADj(J>5kfwEGVD05OeBBqQ)tu8Y8cHStZj6r0%KONfR*#-`Kkc@_X~6W@sM z5*df+H6((NBuT`(0pZsMVI0EQjCrkwj187Q@$JYXy;hL$0uxD@f08Q$bL+Nb=bHGI zlyU}S64GtJghf`pAqFfMbrTsYC%#2r4rJ^+(!F4QO?-X7q4|(Z=>8^SQ6yao*G7^h zd-!Nt~D-r*ba@ z6pULi5R*5@TUTQ-nIv!X-Vv}WJrO>>XQ3w)MXC_yooZC=86u1?qsF7z6p~>d1D~!zk7c zUA>dT-3#%9_=M6fRsKYP=hgdIRru9^J0Ph)dPhe( zJ#`(Y4hLzr>&V9{lzKsB_-1v3%HT?TKRD%(GL-Hh&uJZCTV<6jWKKh519ATzl2W01 zNC3>7uIa`}D-;$js(>^|-YqIcRN-{fRoSU*wsjmgfQd#*JXG1K`Ajs6FQLkg3bxDx zrhW2|PL}y_p|B+1nrJiYohrNA!CGYPgI>bs#m_+}AyNTD$K@9+T#lUd%T**ER)+}Y zv173mqzriA<2#PGLz)z1RN{76Z<_UeCOPGAGWbFN)J!aJ+~W2)X1G%#BP?H%vF+?2 zC1fC)9MG+tAf;GdRYrKTKMn)WG&IL6^4Fr}@y>j7`y5`vG)@BoExtqDFest;SadZK zJ*le+d9rOISG^g$O+7Zd6?Q~=@Mj^xq}j|>EAs`2bs4X7ccIEuDbLC~v0+9lGBFV8 z_cJ#3nR%a|KxQy>CcRT)YPiXL{C*2dPDdV5=3efBZvaYogi0lVTi1}jmI=Lhzzd(u zVs?vX^K?v-G4~=HVanD!kr5%UICn)V(t>S0`hvvWF%a)~?<+-|CN3R)5x@^*Kcsrq z^^TJvBD`VmtN0Uyh`hWcS_2iA%F3Uk>qGXBncx~cK^@4WmQ_REIDV1{l9oRM03KsV zWaEMkd@RjsCSpm>*VZ!ag^Dr#@Rg~VN$_Tt10 zP8bJsgI-vc>eDpuI)VF%Q!6enT`G!~7uS&eL_(;3K%i(Q)zfZ74|CWqBm2G+(UIU( z0vrwxonfuuL->$(z#GCuFKlbGkUK~3a*3jMfV;10)igcp*$t;>ma33>qJJ8@^-?kH zHz9=FR#EUKg($egj*Ccpx;&a>_kSdEo}T|9tsB6Mym=qxScrS|Pt-3edE-g04S{R;B$6(q_jYc!G4^?zxr|C)dP zH8$8~3I8e%%{|@7eVOvX7qD{QT!G{N@@M(E6$UOnG1sSiaHFe{S9A|<(Y=C>Dl(o?%iK4Mw~?<+dvekaJk!FB1}9b<^Dn(7&&^`fp#cQ%<`!9P6S{jq*VE}XrBFC!IuarVaCII$TM-Kbq7=5@MH z+B=dO5Pd0jIjHU{pF^(rBo&`L&+QCshAm3N2SY@2hIb^i5a+!iDgoAeh&qF1llVr< zMunf&b>K|B;dJdI*B{olQn9Kx^v`8`fWj;-G^0KguB+lKV)!6h9xsj44lmS`sG1rTE?{$F5ujlp$NxVHrmYnd+l)ssgya1eqmpvbjkB~ zW)1k9#IRgXg!E^VxNl6x8Z2eL)89#BRVL#L4m02RYm+erGerkyp0IleX?%V0%b6$l z&I}ROb91}r#aCya&eszGo_RWN$Km;}5Tl-XGH^AhUe=c;}ckuYH5SpC3!N&%rO2TbNE*+4wVX>qtwQU?4dlVIqXC+!5x*g+Ufpf;Y41hi#hwG%@T1rHHIhiq;006 zEX|}%jkJti^oU)&49jgx7;r4Y(vUbU%FAjxc08Z6*yN&0A=*~x@0BzlM&24vF5BP2_?u^>P5hj}au;&6Z!rb;Dkh}o& z01yUq+s*C}00cMvXXX~1nzlx$RcUV6_F6;d*xK70RhO36Z}^|6Pa77TF30{b?3xF6 zoo3hbdw%C6diE!d2WSTL;=XengeaI|?!D_^ETh|QfeWq2PS;ZdMY0XtYIM(I7t8P_ z03Mq55TG9KqcJW&7B&dB2LtQA?e|@b7QpJ`tA>`J_+1Z=bR0~8>B2+j$nk(T64BYR z`_?@_AP_;^!j6Z;=MErjtlQXxt=pZrfd@m3Y%?NukpO`%zTa~J;?M&&4v_1j`I)>~ z5A$}14LrXSLAlxW`_?wjvwe_chU1Ribbxw) zXx(>FPsj#$gZp{`$dB*Q9Cc#^XhvYS=v2V%ZXYAM-7DR)yR7MoxACQ>;rASGdxs@Z zMc$E50r~L&4cH^xOUS_ouo_T6Rh5*$y$_L>W)Fd3lWgYB_Rd4o3YSg*$E1UWvs7MR z-dNh$sN%PweO!;1lsD@2K3lXB%l8%6*efr2CNq^jiPrdQ76HFhBP_%GQ`WY(9@;0R z8(8e}ll*z#AG)6atVO5pE)T+bGeH- zGN4O9m4TzU-yeA{(Ec8E{eiky+EIN`ZDZAA^`@F1n-l_2auaPE@z+d=zj*6h#J-C4 z0+26&-nbn|u^s?I)MKc{8=wY-=4heA9GP;WfzLtr5hAE>%iU51P`w+tO@9AJ3l z@%yTK_;={Ed#t^ShPi&+5H%y!+EchaSno&RUwml}vHInPw%vEp4cv|h4|N;L0{cyt z#qfYRbma8>0oP~Xi_CSNL9V(E05${MhT;++r;pJ`ejAW-l)zjCP|gvbX*ef7M!$N? z3V`?nSRwTG*dC(zM!%2yv8_OBj@uKcKYP^aV`&NWYs)%vhv5h_KzLG46{r*>x&|sG zD+~kAc?O83Ba~YcR3D6;?r9ha>9U&P6nAxc06KF5^BWQa2+Cq)7b8gtrVX5+N29?2 zT!8)nyU9bHS`ZF0mzL}ePRdmo{qzFA#`V2XU72NtJC4c;jd#eKk47$1eRmy5VoZzh zUeQeag7t(ieL7_yZ-J-4tSzT~;LY9$G4-++t-SK zCK1r&dY`v^;UzpL%L=N5-iE<&@-z}?!BprnDgcFoaLJ(pXB;}t0LV)=keSdbpT-); zvB62y0GsCOh`WJE;vS%sfX;=xrHh0BRf@wz7k`pgG*0hm!|XU5{NT@!>ybVf~LS;5AjtxW-wkV>) zOI2^K(;l=4F3E{8-F{g4*+WPqZ1?Ub!tsmP(9~+XYd7Mab%|hv4OVqVB$nerj5&}8 zXxQMkaU%4f$UQv+wO;#Zcigc7KFMfU642bdBhvX8(N;NbF@{8(G6t}buP zBtEe3VN6hS9lZt20Y1OAcGbdg_lR*O0B?k%;)Q{K435Jl+VL@uiwWi3Dwm}TW0t}3 z@iB7lQM`H>h&+i&gIs~sa1o~*gr(7-w05=B=y|24J)j7Juwj2(GCPEi8f%x}S+VDnUd~A6jaD2VdfF0kq0~pQP}l8rd-$Jd z`4m5PpBpJfG^Q#TaGO)5smwAjTUV}JQ5)YafO->X@vUVT8;YlQeG4ht{Xw*rcRc6} zBgl2@4lGbuSI!~0pv@^9pL8%>)ilX({X~9BKj7aT?P#+jq zv;pD|ASU3XAz8MNMa^#7jAB(Pj9%#-tq(%(VGv4o>8{<8z$6+jgiWV;PMr_PvzUwq z76ZBm%?rvh7ONnOLO6lb*>TFK&u3CfYErNRDfO-mqlj88IbE1i_?KqVM801I;bYtDTRJXaed|8pj%tJw!E~u=3HA@TDg`M1#7Fza3)-xE(xA~ zmRE5>Z22hkqpVxZbsSY*%PjEUIexoT4EmU>mFif+z(D+as^Fk;KT2Fb>-Ho9 z>bZS;J{Foj6*hcY|IVrI^?Ey5w5GU!sql|&bzf$L_7DCrYlkh5hcxL4pCV?3GA9K8 z7^@Ukg4*kJ1zRlFP4V^`*Aj|-0qq%>w>XhqS_I z)MX5co&h>W=#yt{)vRR=Ma*MmUFuXyn5z6>bJxn`UC@&N*v`k~rXN^Ut5&m?h)V5Q zKd@Hh+vH=(#t{v3RW$+k9&{AfdK>>kr@X{-x^*|zdeOChWK|c&-!YvC>7(_dnzg20 zrpZ(UASV;!0`uTYHQ8M@knU(a%%i>;Xuh5QsrJx|Wz~lIRYp(0 zDowY!u6v93$Ir1dswD%N9}@tM&r$0Q@V!ER$de#1qnb#O55z*)q64e}2ZCLoUXnji z^G&I6%EPL*i*e;p*SC0i-B9)*Y&v=i%Jsr8h+gCpAx>ux zerk9I-LOn=&KF%3*Vre?bevbiJHeqVJdpZeDle@xo5o-kLAFXO-+jf(g_$RC{`}gCnWy1qR2mWnPs?p1E}e(`212xa zCP)dZ9-@3<=7}sQlfm!x{9_!1ZqCgumyt=px{S(}fSt!OiJ2#4aTAAkC~~cfZt8lH zQvVSS9D`>;OA~Zuo|+|PVe4|342Z*tG!0}GvK{kawM^UGvNKQ7*Knr9Ee1h3mx7-b zHch!y@pXWQ%q-YAKs0fhCkW`u{LowU%?zyvf@sJfa(9577y4^-ZPv_F@Eu6t6#HN@ zBtHo8Z<+5rBUVwS8pVPU1S-s5o?9_OR+tO1(tQ*VhWeF#X1bY_v*aN}@~V7o356xtqzjji_DMfe8;QoR>J;3 z`B$cSFEUR;R{b7GTG zGD%T-dc}JP-ptecSCb$mLY76cGT-}a%E)Z_$d2$(Ra30Zc?R)uFEnNkuSk9^Gw?*| z&OBj%!L%4;T_9n-L**N>U;gk7Yn^{{Zir0xN_@aEb;mgPCme;u_e7!H=Y&o>qn{uD zr>w>DM!KYh&7N@$o~UR>d_ej9(naUyApyzV&n(VA5je-+&iI(!(_z82$6f7Rkw2RcP4{Rg{TR`HcZ$Dw zMsMt%XWX%AZ`I7M#Pb|=XSl&8olJY)sa~ce?JwiQ+4F#_A%5qC3*V`^0Dg9k5^<%$kt$yrRv+F@=-UAYc_?srR z=Hx@CtIe7kEEQ(0ijU)%P)M;}`jT4TRGkVTf&n43V73ES2=#AXqBQ8|QC}?&smepD z@{lTIrbb4?>&yrJyJQlp0UYb7yk2KW4lGJ@kiB#gNOKdUu>9Zd7+dkFxhvLPUmwNXeDpKf2j=vycDYD7 z(@lJ-Ncqzg{{N-w`k$rT#~trdb^aSuT*;S;fVU>Y_7=Fpk;(yh4V4K{xKtDs1^FK; zYj4y?z>9CS09q3wT^lm$iH811;_aYysi^wva_H9$CGhqeN=y-hOGV88tErL;;IpXg zt(qA*I7W6-bA%ut6%Rl(LxLbhv89;DvufU+RRaM)Y)*)L&zAG7C~W><_G-6tXA~na zg$QPW`M*>%_0H6y1S!aW(moaL|56e9cB(i5X&Qt5=T8dq|5DLaC>C~1P&TSAkczm< zv3ymq|CfoVgI`0{PudEcVGgmnnU1Oaq#tQ_QkU&${oO zvy*(8W%DaYyV>Q(tSd5z?LjiQ;#@MVHh9G#{#AU_=?3vjp7>qBaD1c3lK=W=`LX27 zcubiScoPVc#V8$o>!Xi$Ze4%nr0`?`2fB_~zhR1WrO$kdF985LZBft@0yKHN1m`xN zdyr=;19uxho}5Ntkj{i{k{?S(5h`WCOv};62neD8N^PMi1FXf@SwuNqqPXEOj)YjB z)6#&*)cLXGY0>%lv82kfxM0eUO{Uyk^jrwH;V_1uRY5JX4+an-y-XO zukaLwrEIUb0KF>V2GiQq?2a%X1j35|C}tqa7x7Qve&W=M%S(%Ej62{p*5sUYg_&MG z1;pA|_s9B)A_Xi(G{jK(GE!_*`IzYPmRm6r*=K-Zue^M;yLKaRPu&7vg5G2He2Fg;g5HOPm3Qk>Mkpo6N%d=$VWaY~P8eIuJ_^I1Fk#H3V`V~ zXMPT<8hKreHVGLG0NA0b5=AlC%H4u8lz{qeEoPpkibR}C;J9%VX|$2)UZl(u^ucsL z*DP>1W5M@xbD1ah>l4WVneTE*GAWWF{L#{xlD?36a&E@ihO@)njOABkp2%CXg2|kf z0g`zttJa8EMCr*fS)-KslRzk$r|HAjoJ5m(f|sN9NT`-svV1a6(j)L>4At_gc}NNf zCG$i-AaFgCpChQ2SIU#R1bg}Q38)qggmR9@Sp%hHentySFq=GLPg>?;_hz1uZ$y@P zZ1-lq(|7;E6lagwmgLx8_EC|popL~?(0NWhBP(Qn-qs|{blMqSeAHOLWqpEI@DX*4 zNy|K4Z(H#p42MlKcYi0 z<3V}PDPt9U)elwN^GQtSh4OUR%0RM$iQ{Xv@&Gd)BdI3A{@i z(|~8yXy6I9rN$Aj-i=;}%550wK6(e{AoT$vsKaB&@hml7rEBVK@=eyXq5({_=atk= zW~m3(zSsdER255)*GDw%_VAf|RQ89R3J|a>)s>~I<>i&?>Q$Nf3y2@9jf(bhAJcv5 z0`JzgdRV>A&^q#)c0bin(Wu`27HU0lsqvE+Dk0Hqi0?PS2^NZbU1| zKF_*%=k?;PVoJ|kyfMVO^@SZYwPz)`N2<*BotDci<`$CAJ;Jgx_}>jt9{%nFu-OSh zV3JILH8YiWx^4#x&!SMUj_jUFQ%L1VL#FAL_lJHc(CL040dZK`PQT%_2)>;VMPtip zd1{U!gr=Y&Wu0q|v$c( z`M#34)*Xsm!CV(1=Q(H=^dJlgc_sh${dU`JQap+z9WV<$l1PbVnULf? z(P1h;d2duG9xD%TZ{NCiyWDSavErQ_%FK93qYjt%y!$AUNxRupA0eVzX9yvL4?`vS4rW~Z<4#Z{GFB{$2}g?1a%w+ zvrp}L^D}MoMozHc`?#DyCG=>M-`tO8QB5;8Da8t46s!O&n=LiE^`6~ude?!q#{b(z zsHk8Cn2jhQ;bW$qjZsS_X%YM&U8r(%3pGOxosUN@G#u;@x`0Yk!c^tm$6ZVZl!^X8 zmJDYV+jmLt-+P|$WZ8P;QnBFGM8n5KS&#O5Y{MZz^|3jt_M(>nH@7X|rpGobbOOO} z1oDB3z2=a97`kS~diSpNL&+_uP)B z=oJp9%mgw+OHO~AkalIx#vN|hd~LiM_2j7W*~3$s13Dv=m5o=Za7(Nzw)|#Lk-nFj zuug&!dJ83sb5>H}%(xusWTmReLXHq$KEV$nzK9hWI2>89iEsr5GPEISsg>C+5F-lD z_+sM#kv1;2t>F5To%@e=pITd6kACs!`gQg5FR=Kf5v%L;2JKOgVa`(8Rz6dc>mu|T z!*8SF2Zuhya7lw(72S^d{=WNtYsa$tVBGGgZ+m6yNkzTzeFyT74cM21!t)iDOG~HK4gSuII?@V0qYbd zNh`ilw0JO)YlaWNpLd;b4}03N-DB-6ILJGY2bA~jC$b&KN3mK|MBEuI1pW4CN}>yx zUzF&;hRUVNb6oI@>;=e*bT%#XkBoD~!j65x-VHSyi zRj=U0LBc}7GG}v4$W9Iq=fWQW;v5DW6&F4R1!w3$4nq4=g=?t7sn(+zIYk7Qt@j)c zxHsS$$s3BPB0Z687AX|JL)0ksg7L*Dyh7b`Y*%3xEV~Io0tG9jp>c`BCJ$uO;c%SZ zykVodYQ1jAs~-h*Ef;mZhRDO+n4QPE6IeeSo1V%gHv=#kL1nKB&FuNDAzCy9%m8yOi)Wwt!aHK^9fbY?4|60 zWf5S5o55y#t9?!MY*xg^OjlxaurO8 zr&F(_Dxh4jSXt^cgYQR?L9A4`X@HGkW1obP(}UA@M1P2JC32x2Iw!}d^A@mI+FWkQ zLgi~jywxOK^fPej(-)s1nNCvOe#}(2ZTxxqqIdzwp>H1$gt&V~>fE`NP=41y!DsaC z;D660i?B>B)H1DmuN~|6j6ObJ5^t71kqh9ALbT2lFFiGG9`a?Yh~__j^(+73EBvqf zClRZ~S(R64_r@-ZX-Ab`=1M>1@8pNftU}sxiqzF>g~(8VY=d6PgUMO}pI^Cbfh3wk zM>rFNYuaX!A2QG6e>OJK@)R2>KV*&_&-o#9W$aWeKV+txhUZrC25;>fg)cv3t|X2C zKaZ37{E+!9e-)i6cVL91T$tjq5VuKw$c$JhI6Cq}W*sUZF2(p5X-yfdf{?wW8j@*H8ZT{f4g5#BEEjWU-I+p?pyb}Klpou1#pB^k)sR51FW7K5AgguXPjrh z*^J^DI=`MsnoT(5^JDD7{ZS*Xf)(?QGy3^AiT_=iJ(%ucWj>^8egZ^sWhn* zYkxmL0$bDB-(SRMzNX%G5k$%F3=hS1grJ7|`@;H+pVeL2^BRn>1=lAdepN?wIOVz2 z$O#ZKadb8$!j;Fmx99EeC&vq=0a!Q~y-|HjT?@jK9_Ta2vJJwyWo`vm2y z_K^LE$myQT)IooS81=F6XFcA~{Z!<0k{?tQ>(~Ryc)S(m#=S@Hdun2eQ-SO!cE4u^_X_f@xZV*~uPBdb1siY_EDIcsY4s4XRNssP=TkB7D=PFo zF)`+0LIH1DFE>f1kD9&C>yY8XlsGc|P}8+(aTd&P@lGk=`6-ChT#E4-hFvc||#<17hI;?g+Sr2sng*+btt);Hf zaz;^kOUZnz*+Z|H-vx0DQFNkI3nUoIfx6SD0~Z;o_n8~WGq_8l#WRFfgW_CX9lnzG;rCaXsO^_T&3n7JA0^clS<-J=fo=WHF`jg4~7eg zL>y5SpDoG1__*f*M1x~F)+v=2%8XkdGEFDe>TH|{!2TYxIT2#b9KcPPm(BgeE~h1L zU)@xWrEOV1w`-*BK12v}T_XDF(5>EthOn#iZ1)#1a+=~j(1Dy3 zrCx%(CNS&CjJyLw><)pBVp8B#(of9sv!*|o4DxZLiEAx;shP-f!h%#;BFuPUx{^v1 zm+i22#(#(#Vbb2NO4`%szLLen1erbWj7Mt}1hO<#?y+d(h=oSnK2m5cyTOE{eaW+l zGW5ry%$AqdfW5G`PAC)TL-BGdIdT*Yu`T?la|%PJ+4Wpl0hnEK7{SlvUFKiPAWnOv z=E-E~5kdQ!K9%VbQHd3x*OlEv!K4;s7hsJPR+DreOP{YZx#!SW5c&$4io|@rKxazs zc_14(<;MUvVal1AiIf7TmL~goFzOHZ^d^MarWz*w1X&1-;{qEhsrzk-lT=+WFaYSf z2xDT`ZEjk~s!?(S?7*K&E;wH@zHxEW2eJeN&E`H_VF1b~HGHIkzq9Q0-`TewGmD$k zCbJ04pdngPZCTda>T+@{IO33IflUY@dnaTLkdl3jm#2s{bP}anc z>tD`#nzPx({J?@#VL#k4Ao(@JN$MP-iu7twI1^juW|i?5w~us4vIv#}NATUT{7I6o zczBkju2Hq^mOVh~PKn7p08En|oT3%^rmULAw4PJs0DFR+50(ct8@qvx2!==rSX76Q z@4{FL0)`obLnQZ$Nf$GLNsI%UKK4>c>Vecm{>~vJ3&x1>B922ODGr#vggt5K5Z8oE z9`!Il(mGia1Jn1BNURhCogb=l3(+Kc7I2${9iTQb#2R6rm&8P(J#a0d1+7(J-np;y z-4R)a5HyDLBzjJCI-g4T<8FK42y~1rClSNR*9yrXVIuo4rOYWVRML>jz{oRP3K7nRAoH%<~_ zOD$NL)glt?{lNeUpG*=>-XmK-G1d~SumJAis-t+P1Zv9mK#&YAAt5foJizQR<(7j{ zL*OfjvjeH3uzy7Acaba)EVu?Z1|u1>o9Y~(aaw?*fcpZX8SJX$AzmwpCW5df+gl%u z;)SC=Iwl74{&vgmKaSOq0X!PBLD`sSpK&Ogc5A72y6h3`SQ8^(WdY!8I7 z$U9gCr>J(!EiDh6c39@n-<3SpeY67d_V=-?*IP(Tm6SR-AvkH&c{)Ad&;j(R4lcn; zZ-~Mpmv%qA^`XA&mbx}VEhM>=IEB}Dq?&36t%0 zeDThsTMuBQ;2QwI7fp$^XKYO0SOuv6&f@2+UP%v?~ad7SKYP z1n|>4IE02QG_CJ&y|sWt0~c9KZ&dUYO;dev9>=vN9xdN9CJv;Kl3@fsJYv|1h^>JS z!w3BhyA;=AGN$)Eku-4SDF~xzS4piqPzUv3&|7PkAP}Q7dgn z?O0TrG`?DJilT$r`-PU^c*dh{FLN6=$UR!UF9LrW*G|C_s)JOoL=F?A^1TNi1y)fu zW)Q}{{p{iPJy}b@TL7v=Xf3b3cVoebo|M}QI#L>ok+4mHJ5D0ym1 zvC*j*)oH!si)(QLgja6L=+si|dI?T-#4$CP-YW z;r56*62GHZZ`g%bg%S#z5?upEIH7g7eZmfrRNW~(N@aQ=;jV{>0;n1Gb`n27ulP)j zYu!MLs#U)~uSJ7;7k5BdU>FAxbb>gVG+;HpNVA7wlEtff1KJGihk6SkL`2RcC(JOf zc?R8Au!)!^<4j61%09XQWK9`;xSonO92|HkRy~AmDQRUj+DDMh+>+$SjW&#$lSsl+&_;YGNsGAXdk(EO(=pGVY8mZ@SJArsRd++r)vl(vKWjL=&}mn zZ_#Q%?m+|+UCmhO2@}w^8aTrl2VB7bBR&L-tZ-M7I<+3DQ&-SFdQA(`%s^_X#vbY` z);-Y>x3vot7NVu(FV;9mFXf$^`NH4IW$E{00vKRFqqN18UsO*R$K4rM1jP%r=jkJzhT)~@|AiHig3MHr_CQM^l?d=7}(S zlCv+olw7?D=$r^IXx>2RSQC$O9oLrECNIcxuB}v4R}2d%c%WosW<=|p7P&la#A3Pi zYOFXhFoq{7@g{}@0_`jirAe`-WSD75$+Z?1r^1hQr>J&t)4}`kPh`KXp=ogu$@TN7PS5CCh%B!%rPNlpx?o{2NCt z63Xg;VZbWvHNFssGrrbP*n@xgqwK>^eG|~U>M#=e4`Gn$>9w<8`>g=`OiVuIw=p4` z`Z##km~rpE*^uKmTEytj7xKVdx{mU|-0LvmWlL2FD^s#a;AarHt;<))C=bkqOQ9Ua zqb1`s5&H{g6Um++)sC^1xi~a^f%y%6VXJYjZqYf-xE5 z^fU+FiAaF6gKKfNp9kie(2+baHxJA`C7L-8%;gDg9+;a4=JN0=56s0`lISCepxHby zchkxPb0@-$3U);usb56sO2b4~V99++#OBl5u9 zJTNzrPo`DCJTMoD=y_nSDdd?4=H`L9dAh0q=43Hl^#Xypi`LBxpaIVBOb@-AF|ZA_ zfM2;T-cuyEeuU(tbc7V5+lL4kG9f{QU5BnN$5f2QWyz&Aw-_wMrwjEbMI`eFz`;Tm zHDL&B7IDx(iN%5x?M5mV65JTLtQd}8B#bpEGM$Uq#I;?2@W5Pqq<_o-iwoBm zFki)jL~YimVu0fY#3m$sS?RWDVbcG zSIJwFHHI9oD9ktW^!(mDsxikA=WnhQ@!usev86QhKQk;|{`?xBl9zvj@6X>_t2UN5 zs!MAnr?t^8tyWvhC3|gowX}S7z1mv8y0N~zvGU^J-`L7;-^QPR$EVL;9Q>9Kf9tgQ z3gWTgTh!xxQpM&U{0lyIe-+;({o{}NdK}#GRun=jh5gx^j-LOvG9QFKFC8cy+qK)ydJ7#k~c1 zzYZX-a&%>kMvktWqbn;kYdB|>`b2c*99>zX{F&_X99v81h z$~n3+QK=)4i!>zm#euUYK!hhzjVGXlb9Cj2A<5B|^O{4bvYZET8~oJE8pM6E5cUFM zxL-L4UErNz{}zGu->bW=T48Ipa3@poH>iDDknm7|A{R~xIQ;S!v^SMq#|zqzr}gIm zQY=2h(m{0IMZkP43t_a4ys~Rv*)>#kMG;VcD|GVQsZnBCxMC@rNDC&2fqdaD1uW09vidE`DcVg;~H!j7%Y}BqB zx=wh)I&yLK=Cq72kJoiG1>IOv54}~W7pSb8?suxRx~e_KT6TOdO-5!ip=GJBm%;@= z5zMZS-x+;#2L5!_pjJl(-4<2tqv*S8;)-=+6HJi7+e zqEs22$F!ilX&nic2lG$OYaG!Gr2rX-woY*yfx`3^)t2%pfk)CT=mSO%Lm~x;6*Zb< zJQbk)8M%?=pVD+mk7TSB`Wz-Hs?%2GOiegv68rK` zgRZ-qpJHR zB^!Gu80yJ|85exJVm%nL+%Kt8RY+w1_@eZbnZmuJV1)YHcct{AdMl^>`t|E5Q;CL( z@zN_RPI$8Pw9LA0m@Lb>hZX0(@7S&Fkfj<>*rLdvEvu;NG%jpfcZHXVu2*k6b~r*V zc5QWOV{MUp?)Xm)*A8lTz|oGhj%E}f#Va?X>dd`zGl^yR%59;rdfh>db1D8Bpt*;B zh+^Ik{6JSmxE)=jptCGl9h7$Q3?qA8$uK>CvU7+~(v=6(b15rAIO}ESGAin(rx`QU z8u|l_3kbNSJ{1SGrSj4u7;Mn9Q7bDspVmbyZH|{*ANLUCz<)XLUk?121OMd}x$}zL zc}4Cis$pE&_6rUCw~8o?TYhuYhZc(Snu$^r%3fu;dZjAudCF2r8lA^O*h19zp|7iY z&{nhSG!F+pHEP(>N4N+T7sgG;ZBA9hlNRz7YdZ)=eVQ&(XFS%VVTq{nhFv=lO&t~C z#cn{UU8Bi4Zl@atQfdB**bK^eZMi5OAEIbh&$l7w{>n9)y58s<+%`O6^bl3aB68A>n$Y)QK%(>@_O>kGl?wRkOCdEj8{< z4Q$a(b?KO7BPa;QLyiEh2lxW;q}x7G14c&x%BnD&f>VM@-b33ubWUh9DJz3k4&Q@u zi&{1$sEXYfVCNa_E2mYC-7qwVWUw%p?f@Bf+Ad>_W8+6Sa&7ezZFe5oJr{M8;D=CM zs>5YcM*$e!IVB+`A~i?we|wHx&!w(&xkg+0p~!5SRW`w~wk8<(rPQy>xYrubw~#@$X2j7ZPJfR=v3 z@{6A@E<6>(a(w(R-j3y>U**(rsHT+!mzQ8~A{B7_8LN^So`ubFwf)%-IED$HBL74W%6wUt?x>!OwQH-@wWSTf4NdlR zb$MfTWob2TA=x+dYVEw&O~_AE^atIFWW9H0q8Hf5~1G({{7uw=Mh=nK4ee za+ryat$~5HbL;d>>-3`TkgMmYIePifn_p}!aE5aLY`=N9xoKTeJvdTFP~&LAS^ra6 zi9ds<)<1xX1JyLpr#Th_wJGV=A<`cC@Dfr_8M;)D^*tg=y7mzrh$CjX$U!({0HRH> zbQ@6P+AelgWZsV-5>R}qBLYjG4p1Q@v!#8^YL~?0uGfUZ%McdaBVEIf%WKr?R;p#7 zafZBvN=|f+vz$}UJ#?VGBp;>j-nXA=eg>mAa8v~m2&aCB|Kd#M&~XMv5#(F8w#2~- zjkXUhlkbKmV&gPSE1Mt_qR$T9Bfbopx$n*bqDPjwrkg>51oj9jf;yCFKOwYR5s0@*w0 z-REf=b-CJ;y@GP(?-dsJ3d$WXAC*g8J|^7adxfXz#z~OVm zMP3?@aebL{u#Hi`WDVT_8kIU0v5G<8r()Gw0EyVZnxo+n#-QPZ$Bv`tilf7kCA&~l zCZAe94HC}3Iug0Mv;;fG8K4El?7-$C)8ib+Oo7`6@&?34MT{c1S#kTFiH@=pb=8UT zfS?2uyZJ8OX*ckCycv->JT?SZ2#-YC>f;~YC`Z{fsW>m1DVxn6_Ug_3zm1Q%L|uWt zUhczcbeovLPKRF~gYFx5@x>YLtLX&CnU`6C+jo05?Xqa(7A>wJY6j|1%mIhUl7q>I zC611nt$sAXC&3{ZK}`r8Dv==;P1(AGHQe{9t%xcD;~AqsLr^I#Yy-<8>O3`m%m*fC zUa2fNM{P9>3H$R{khI9mGzfRr!x5f`B&zovyy}DoL(B2o?L;}rUHl|=V!Lg2JaU23 zTXabjTkI*G$5Zy5cX&VTXrD8@Lq>D3s$ojYE1hRV40gl_1Gg(34NMyz`xulXto)#i zlWy2QkTdEsCl<_RdfEn8i>4f%50i_zfJ-VaW6z^mfhqJl5Fe^hJ-~e*XVq9LpwvaM zzh!_Ig6mUnIM{w&e^}dkBo{Q|^$q=VncgU`h4^9hA%|M##@e&J&Iqy@a>T=t zGqzO-AUK7N{b4I8BlcTcp~`GuH2Rawz`|#+c%Q!b>_zcFP-cG&l-7f7{CWDK*!|-q z%IqEdc>mx%{Ox`{`TGa_<=Y1YbnSjGc|qDEg6;wTbg#tVuQS8yDiqoHLB3u6c7eXl z0Au{sT|X13zh{i!^B+pUv#6-pJ$4PYjWT1oX}A3z1Zd{zSms^o3$&%q+O33CNDiJkfG zf6vkf21?V30zkk?RI4-J|AjcL@0^IS%#%`?9@Dlzx1#)LMr|yK{_eCgPtbP_ZEIQ7}gJWVUs{^TBW&cRaV ziCUiN=xcxOYT0@yekUjMBwbOau5mPJgETVr<%~7+M11Svw)QzdP$c|oneX@yXY&AN zo{nYsG-vTq?$6ErLM20Ho}~BT;l*Km?9x=v;zTAcgD{Hw8tFOWquMjg#fy*)iDNL_neheocHCqIUZwd(vzEc+BTx> z7C8VCl}_1jM%AP9G{)s_0eV$&rZzNvmXxq&^0e>np^x#CiI^{Rp-8WiB={J9rwK7WYBYuSEmbTRl zj&K0MQw@6CxPDYMJ{+04;k$6NV)qIWk*EBsi*V8Ty@$HYV!Co8D+BU=EJp#-z-K8I z=plEdw#5uU#Gdi-GLJp$urZw>;~||PJm2F#(Y-Ej_QKkeUA+G$yWxd9@T;3*jQExB zVCM=~q?9BN$d8b`)&gN9iU(Qn!MhE7ghU{9ID4Mo+P!`6LH*9o-P_j@s02AKwDUx6 zyc>Lq^3cc{N#JtBasmG3tfG~?%upxMOgIJvGtnNt0d@!~D}gm=?Rhi>FdT@FM9^fb zjR>+q3-MsEwZTZm)W{;L>I!n5j*&g9qQmCzN3m@~{48VI^n0il#6VBkh1u>Vii@_| z<)CX1m>qTnbxq}-s2Tm;92WB#3(;MCuIG3~JgZw>NIY(l4dbqy(MIC=`0Z9JJVE?u z)Q4MmDSt7Y&~G&TKpW}->48)em0Q)g>EU1-7BahVrl=ww$+DaCctqWRM!Fc;;=;n~ z0FZeZwlFJj-Wd-4;hy&-2wOPOe~M4HheOY|s!tKqp%hwKZ#be+!E89a1q(G8XPo((~P5GYhjKqkPb zwD{&m`*Neh1~@p5dI{RkPg>7D1%Cc};Ho#P-_CFxyk?0MM=Xj&C1A@%0tgPj7#}PV zd5m4cR-~o2L@*)YBlC3asJO;5z#&pGg$<`^OOzE>yb#7n=U7AJyVUhF4H`g`L0(*A*YK-x-YyHDdtm{%i3vYo~Cj<9heDO7g!%K zHtYn{PNcCeW}coOCD(;Ec7;SE$VA3r^Tsr0o|w0V(IBL@0gb_+c6bg@hk`4gz6c|` z_x&LI!7}SQvWJS3B-b!gYJ4p<9B7(QF#(~{A21OQcV~WG%WW0EXSnck^5D49!=7Y*s-49}NDW zGHg^nKKeNG9skDBap&Y>b0hP; z{%*ZpS-#rpey}{e<$7)A=lEV3e#H5?IXC;d#ir$>*ch`X-GHK_$Um|=7dGdZ7%|}< zjc&;N)RY&5f?55xnWtwa>X7hM(0e1kN70{Qd0t=( z_{UEwUy=VLW8;9?8wZb~w2I$-s>RMsx_+!lBl$lv4$|J7%+DtdW|8wMYmw*2W8OqD zYEtgRG8KW^MP4MoJduz9=eER-*$8hhtI* z@g2$Nzo?ekER6j+;?kyd$i$t^68Sk!56)Elo?3s_3eI9i-eCYRtsA%B+qutZz3KPk zkQ@m2#PKU2uyclhQ^dfs2*r3|vjG+9Jx@ogZd#{|BGF;Aa?R7o>Qc2zNXqRYa3T

0$SY z>m=4N`zi*cWB1t-10)*&zByF*iYQGjbR*`MZ{6iv8JU`T3y_yw#0fJl0xjGFR*ilu zM@XJ~l<2KG%+lk=2Yxz2cCy@vGh*7eACG{S=OWk>m@dS?!&wHQA0IOl=219woUr`l zDxw)L#x~<+XvWz_<})+B9mnSP8g~6S!Q{J`j{7&ZWj2WaMZ_bF(e|jr`YJ1n7QhoX zFj-O|2lw`!A)t#)=ug){K*pgWiY<2Vn)!s-navqFxPfjA~mn?QY8FGEH4#)8sLyxy75u zoQ@g?u{M5xiYESfq5J$EREta)TxY@i6sLI=rzk&PX1DN8cHP30dQ;E+!1=Do zgbR~uU~I+Q51eOWWMo&o41VArjd@^-Oqeb-A&$zJ8iZ_3IPkko!+(a`HN(pZ`I2{io;Y5|`d#I>ENNcXZr4 z`bBXZY3J{GUcB7-n>&A#AQhTz#@(qIb^hkw(cC*aL=AOnqbg&L$1$k1K=AEU%6V=F y7V?bz$ykLq>K(t&wUzdZHi@y&MqO-AmF%Ksm!r6AA% delta 8363 zcma)BdstP)7H`iC@3%aZM3ILGA|P*3-ttgGLOduSDX1taK2Q<$<^%lH%x)n&n#?lm z%gfCkYUp=JDapLwt+2XfWD102miK;IS;|8b?%w;%%-(08b2^8=eylZn_FBKSX3g5` zG(F|_$yY;dN&^nOpxD^8!|=l4!^zv{$5Cmo4U0DIDr#!O9sU)vwk5@@zXT+%_S7rpOBx`5J zsw5+3#i%5wW~HknOCL*8NnCP9DI|>WqdD!IdBS5K-^B=bDomk-zg8GcVMq}RgSKcI zm1Y;`QFyDkfWj9Ru)~#EqM_1XN(w1F^u&A$vllYy-Gy_g^rb~?hnb~|iL{|?4b^{K zMuq&=#pzV)wxoo@^GnJo+`N=YL&^gU5~#QQRR`-$=#fWOR^!?*;Dg&%fh7J~jW<6~ zm8(#UmQHJmq-k59WI|K3Aj4Sx_)$t}Y8#X1I(1g}#jA}}aO1=3r zn}bbgO18wA&^T@#V?=Z0#Q5gpTb~5hnjMlT3<%cvj++ZN@!R}(El283EI!kB`EUwSrXDKxK54lTPYk+ zggp6Bj6$Id`K?1aCN%H8>}5oA6#kW-&U8gWiM_>0<2 z9b(^$zDAxAO>Vzvajc!c7(q#qW9`yWOsHt>p!8_I)fhT7_41d9tSsp&C&;5ao1k~Q zmlIU+{W605dgc?<-V-VzJ&K;XcGd(%rgNt9CsWxp=4WFCMWpEKpDm`y^S_8vRQYQ< zF`oNv0YS%qGZN`n?o`Tc)W*Iv#ce3w=>h%GD#?X@e}#nJnEcX#HRdl&JlH&%Trl`m zPmW%Fc%~N@uD)Wl4kNkSwDZKXeK>l};XiyhdOhNo$8gi7Cv}|?&KS#?w?kXw%enhd zLwcrhGbnyCky}V{L=rdNz^I!^TovOBUr6UvtrLD?IyYUxkH8^z&fq2}M3gezoWZR& z%bE7mG3&J1kHvHsVs1!FjK8Yb-1?+VVrhcJDyG{{BbW z2_Hj04);98xoV$IwOL_{>x+=9R``FEEpA%|T=CEzL1otgi)^*e{lIm(6Kg)~<4FC+ zrTyGeA{{xvJx1W^0U;%%j|E&Z$Somo#C?uj0QmHMA;`J~c@MZ`0(&2DsRVBPQ%FbM z0-uN6Gy;!3iip!v;_%m)QbxAz~{# zkV4jCJ3wc>}FC}sW4)z3olFD0q)Ctu> z?SIySR-m|X7)Z7c{dp?Y^L7;8e6G?Y82Y6e>D-@2lP6W{k4RI9bg@WmZVzE8PPLsem6RM_3Ak2g+H_W8!UKlXe z{fSO~5e|keb@H{zz|VnXqGuu?7d}3p2*?$O=O=+hWPx2WSU{jQnH{3i6tIyvw)G$7JOnBFxEkfOjSJw zc38^Pg&g2Ones1W9(_^>l8DQNMZ{%YEq5-qeDkse;Ib>(W`89q9(u&ATn+qBopmQ6HvL*Wd+(xj@Qr|CCgJ0~4c^e+v87r1V` zrNfWz1L-F0|8^gUHKE%5G>9^y5?#yK4{9xS?cg&Yh4ghelqbbp&vLuxL7y;OlZJFy z^Og`}3D3nYX)ws3hk5&L@VSH3kOR$tNfZ!GRb=HUg2sbxT_ab5UiLH;mOx!*Kb@0E9vq#egwk2R+o3tH^ep4 zNSIA=wG(~<&1}+$Gj9Ra*bd*kWsn{RZv!vG__^Y+U%)c+{c_>20NPNSPV|9nigWsf zo{8An-48M;t{IRi4HyJd41D-?5DdsYf8{cy?B>O+|9Kc>D-0GyjW5H=`Vu^qgYF6u z`w9>SjhoGTWeZ`kHMLl42k8J2m)OggbAUSQ$;Z%P2e`%j)?JS9BqQtx5130|v)dC! z7?3f;3w|tdRbTI0jk}(0yZ}J9GU`AY|ud-+0K* zOH2@CXYG0rOf#^%Wdi()eNBEpNhWPqnCx*-IAkZhGaQyvCK@B;-7_Y`D1+VYBjM@4 zWaYCssIFX3#zSMXXFOtxC9EVRKr6-yPJzp)t@){Ng$|o2|H235L*W+z-Ju=53exkA zzgq?8Qu)qG$m(0*YA9SNs)H?jHwvGnvkyb>KyBU@bc}0Gz1zg$w$Yp?PO*albfexE z;S&_|2gTqC8vl?`WG#3(;yi*o3m1$JmT z#cuYp4IkMflCT=Q!$IE1-x09`*yV`iLl=&>Iw2P+kw%MOd@E9##QKJaD72c&??lPLYKWEt z9T$Uyda6r|i!pLCY>$-#IyDZlI?^A9SQULc9tkx?RXc<)s9?D%2uen?6`qOC_9i3c zAd)z{$Go4NsmPA`nUI56k8?W*31>$xJ#WlLo6K+5lZ!f7D`;1WgceKX%FR;bExTf* zH{g~d)vuZO=yIg`p%lkd=!sZ#FIAxB=FJ>lfo|E;2&Gn|WQs3UqsbH(ZjvMV=O(#K z9IZhU4cuC?8FkC|*<0I?@@Hu=LRs5Uq$+4Pwi_hv-W|wSv5L7BxD#2>^OV)1RCdMn P?G>wyKKrP=x;p&-L%a&B diff --git a/tests/api/fixtures/recorded_responses/invoke_tool.json b/tests/api/fixtures/recorded_responses/invoke_tool.json index 1bd2b578b..2dde8c83c 100644 --- a/tests/api/fixtures/recorded_responses/invoke_tool.json +++ b/tests/api/fixtures/recorded_responses/invoke_tool.json @@ -71,6 +71,15 @@ "metadata": null } }, + "()_[('kwargs', {'session_id': '', 'code': 'import pandas as pd\\nimport matplotlib.pyplot as plt\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Convert the \\'Year\\' column to datetime\\ndf[\\'Year\\'] = pd.to_datetime(df[\\'Year\\'], format=\\'%Y\\')\\n\\n# Group by \\'Year\\' and calculate the average inflation\\ndf_avg_inflation = df.groupby(\\'Year\\')[\\'Inflation\\'].mean().reset_index()\\n\\n# Plot the average inflation as a time series\\nplt.figure(figsize=(10,6))\\nplt.plot(df_avg_inflation[\\'Year\\'], df_avg_inflation[\\'Inflation\\'], marker=\\'o\\')\\nplt.title(\\'Average Yearly Inflation\\')\\nplt.xlabel(\\'Year\\')\\nplt.ylabel(\\'Inflation\\')\\nplt.grid(True)\\nplt.show()'}), ('tool_name', 'code_interpreter')]": { + "type": "value", + "value": { + "content": "error\n[stdout]\n[Errno 2] No such file or directory: 'bwrap'\n[/stdout]\n[stderr]\n[Errno 2] No such file or directory: 'bwrap'\n[/stderr]", + "error_code": null, + "error_message": null, + "metadata": null + } + }, "()_[('kwargs', {'session_id': '', 'code': 'import pandas as pd\\nimport matplotlib.pyplot as plt\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Convert the \\'Year\\' column to datetime\\ndf[\\'Year\\'] = pd.to_datetime(df[\\'Year\\'], format=\\'%Y\\')\\n\\n# Group by \\'Year\\' and calculate the average inflation\\ndf_avg_inflation = df.groupby(\\'Year\\')[\\'Inflation\\'].mean().reset_index()\\n\\n# Plot the average yearly inflation as a time series\\nplt.figure(figsize=(10,6))\\nplt.plot(df_avg_inflation[\\'Year\\'], df_avg_inflation[\\'Inflation\\'], marker=\\'o\\')\\nplt.title(\\'Average Yearly Inflation\\')\\nplt.xlabel(\\'Year\\')\\nplt.ylabel(\\'Inflation\\')\\nplt.grid(True)\\nplt.show()'}), ('tool_name', 'code_interpreter')]": { "type": "value", "value": { @@ -89,23 +98,23 @@ "type": "text" }, { - "text": "Result 1:\nDocument_id:606ad\nContent: .. _lora_finetune_label:\n\n============================\nFine-Tuning Llama2 with LoRA\n============================\n\nThis guide will teach you about `LoRA `_, a parameter-efficient finetuning technique,\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\nIf you already know what LoRA is and want to get straight to running\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\n\n.. grid:: 2\n\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\n\n * What LoRA is and how it saves memory during finetuning\n * An overview of LoRA components in torchtune\n * How to run a LoRA finetune using torchtune\n * How to experiment with different LoRA configurations\n\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\n\n * Be familiar with :ref:`torchtune`\n * Make sure to :ref:`install torchtune`\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\n\nWhat is LoRA?\n-------------\n\n`LoRA `_ is an adapter-based method for\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\ntransformer models, in which case it is common to add the low-rank matrices\nto some of the linear projections in each transformer layer's self-attention.\n\n.. note::\n\n If you're unfamiliar, check out these references for the `definition of rank `_\n and discussion of `low-rank approximations `_.\n\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\nyou can expect to see memory savings due to a substantial reduction in the\nnumber of parameters with gradients. When using an optimizer with momentum,\nlike `AdamW `_, a parameter-efficient finetuning technique,\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\nIf you already know what LoRA is and want to get straight to running\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\n\n.. grid:: 2\n\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\n\n * What LoRA is and how it saves memory during finetuning\n * An overview of LoRA components in torchtune\n * How to run a LoRA finetune using torchtune\n * How to experiment with different LoRA configurations\n\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\n\n * Be familiar with :ref:`torchtune`\n * Make sure to :ref:`install torchtune`\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\n\nWhat is LoRA?\n-------------\n\n`LoRA `_ is an adapter-based method for\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\ntransformer models, in which case it is common to add the low-rank matrices\nto some of the linear projections in each transformer layer's self-attention.\n\n.. note::\n\n If you're unfamiliar, check out these references for the `definition of rank `_\n and discussion of `low-rank approximations `_.\n\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\nyou can expect to see memory savings due to a substantial reduction in the\nnumber of parameters with gradients. When using an optimizer with momentum,\nlike `AdamW `), you need only pass the\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\n of in the recipe.\n\n\n.. _lora_recipe_label:\n\nLoRA finetuning recipe in torchtune\n-----------------------------------\n\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\n\n.. code-block:: bash\n\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\n\n.. note::\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\n for more details on how you can easily clone and modify torchtune configs.\n\n.. note::\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\n and (b) the memory constraints of your hardware.\n\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\n\n.. code-block:: yaml\n\n # Model Arguments\n model:\n _component_: lora_llama2_7b\n lora_attn_modules: ['q_proj', 'v_proj']\n lora_rank: 8\n lora_alpha: 16\n ...\n\nWe see that the\n", + "text": "Result 2:\nDocument_id:cbc88\nContent: 06% of all params are trainable.\n\n.. note::\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\n of in the recipe.\n\n\n.. _lora_recipe_label:\n\nLoRA finetuning recipe in torchtune\n-----------------------------------\n\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\n\n.. code-block:: bash\n\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\n\n.. note::\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\n for more details on how you can easily clone and modify torchtune configs.\n\n.. note::\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\n and (b) the memory constraints of your hardware.\n\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\n\n.. code-block:: yaml\n\n # Model Arguments\n model:\n _component_: lora_llama2_7b\n lora_attn_modules: ['q_proj', 'v_proj']\n lora_rank: 8\n lora_alpha: 16\n ...\n\nWe see that the\n", "type": "text" }, { - "text": "Result 3:\nDocument_id:e37c3\nContent: with training with LoRA quickly,\njust specify any config with ``_lora`` in its name, e.g:\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\n\n\nThere are two sets of parameters to customize LoRA to suit your needs. Firstly, the parameters which control\nwhich linear layers LoRA should be applied to in the model:\n\n* ``lora_attn_modules: List[str]`` accepts a list of strings specifying which layers of the model to apply\n LoRA to:\n\n * ``q_proj`` applies LoRA to the query projection layer.\n * ``k_proj`` applies LoRA to the key projection layer.\n * ``v_proj`` applies LoRA to the value projection layer.\n * ``output_proj`` applies LoRA to the attention output projection layer.\n\n Whilst adding more layers to be fine-tuned may improve model accuracy,\n this will come at the cost of increased memory usage and reduced training speed.\n\n* ``apply_lora_to_mlp: Bool`` applies LoRA to the MLP in each transformer layer.\n* ``apply_lora_to_output: Bool`` applies LoRA to the model's final output projection.\n This is usually a projection to vocabulary space (e.g. in language models), but\n other modelling tasks may have different projections - classifier models will project\n to the number of classes, for example\n\n.. note::\n\n Models which use tied embeddings (such as Gemma and Qwen2 1.5B and 0.5B) for the\n final output projection do not support ``apply_lora_to_output``.\n\nThese are all specified under the ``model`` flag or config entry, i.e:\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\n model.apply_lora_to_mlp=True \\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\",\"output_proj\"]\n\n.. code-block:: yaml\n\n model:\n _component_: torchtune.models.llama3.lora_llama3_8b\n apply_lora_to_mlp: True\n model.lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\",\"output_proj\"]\n\nSecondly, parameters which control the scale of the impact of LoRA on the model:\n\n* ``lora_rank: int`` affects the scale of\n", + "text": "Result 3:\nDocument_id:8892b\nContent: with training with LoRA quickly,\njust specify any config with ``_lora`` in its name, e.g:\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\n\n\nThere are two sets of parameters to customize LoRA to suit your needs. Firstly, the parameters which control\nwhich linear layers LoRA should be applied to in the model:\n\n* ``lora_attn_modules: List[str]`` accepts a list of strings specifying which layers of the model to apply\n LoRA to:\n\n * ``q_proj`` applies LoRA to the query projection layer.\n * ``k_proj`` applies LoRA to the key projection layer.\n * ``v_proj`` applies LoRA to the value projection layer.\n * ``output_proj`` applies LoRA to the attention output projection layer.\n\n Whilst adding more layers to be fine-tuned may improve model accuracy,\n this will come at the cost of increased memory usage and reduced training speed.\n\n* ``apply_lora_to_mlp: Bool`` applies LoRA to the MLP in each transformer layer.\n* ``apply_lora_to_output: Bool`` applies LoRA to the model's final output projection.\n This is usually a projection to vocabulary space (e.g. in language models), but\n other modelling tasks may have different projections - classifier models will project\n to the number of classes, for example\n\n.. note::\n\n Models which use tied embeddings (such as Gemma and Qwen2 1.5B and 0.5B) for the\n final output projection do not support ``apply_lora_to_output``.\n\nThese are all specified under the ``model`` flag or config entry, i.e:\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\n model.apply_lora_to_mlp=True \\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\",\"output_proj\"]\n\n.. code-block:: yaml\n\n model:\n _component_: torchtune.models.llama3.lora_llama3_8b\n apply_lora_to_mlp: True\n model.lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\",\"output_proj\"]\n\nSecondly, parameters which control the scale of the impact of LoRA on the model:\n\n* ``lora_rank: int`` affects the scale of\n", "type": "text" }, { - "text": "Result 4:\nDocument_id:606ad\nContent: LoRA to Llama2 models\n------------------------------\n\nWith torchtune, we can easily apply LoRA to Llama2 with a variety of different configurations.\nLet's take a look at how to construct Llama2 models in torchtune with and without LoRA.\n\n.. code-block:: python\n\n from torchtune.models.llama2 import llama2_7b, lora_llama2_7b\n\n # Build Llama2 without any LoRA layers\n base_model = llama2_7b()\n\n # The default settings for lora_llama2_7b will match those for llama2_7b\n # We just need to define which layers we want LoRA applied to.\n # Within each self-attention, we can choose from [\"q_proj\", \"k_proj\", \"v_proj\", and \"output_proj\"].\n # We can also set apply_lora_to_mlp=True or apply_lora_to_output=True to apply LoRA to other linear\n # layers outside of the self-attention.\n lora_model = lora_llama2_7b(lora_attn_modules=[\"q_proj\", \"v_proj\"])\n\n.. note::\n\n Calling :func:`lora_llama_2_7b ` alone will not handle the definition of which parameters are trainable.\n See :ref:`below` for how to do this.\n\nLet's inspect each of these models a bit more closely.\n\n.. code-block:: bash\n\n # Print the first layer's self-attention in the usual Llama2 model\n >>> print(base_model.layers[0].attn)\n MultiHeadAttention(\n (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (output_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (pos_embeddings): RotaryPositionalEmbeddings()\n )\n\n # Print the same for Llama2 with LoRA weights\n >>> print(lora_model.layers[0].attn)\n MultiHeadAttention(\n (q_proj): LoRALinear(\n (dropout): Dropout(p=0.0, inplace=False)\n \n", + "text": "Result 4:\nDocument_id:cbc88\nContent: LoRA to Llama2 models\n------------------------------\n\nWith torchtune, we can easily apply LoRA to Llama2 with a variety of different configurations.\nLet's take a look at how to construct Llama2 models in torchtune with and without LoRA.\n\n.. code-block:: python\n\n from torchtune.models.llama2 import llama2_7b, lora_llama2_7b\n\n # Build Llama2 without any LoRA layers\n base_model = llama2_7b()\n\n # The default settings for lora_llama2_7b will match those for llama2_7b\n # We just need to define which layers we want LoRA applied to.\n # Within each self-attention, we can choose from [\"q_proj\", \"k_proj\", \"v_proj\", and \"output_proj\"].\n # We can also set apply_lora_to_mlp=True or apply_lora_to_output=True to apply LoRA to other linear\n # layers outside of the self-attention.\n lora_model = lora_llama2_7b(lora_attn_modules=[\"q_proj\", \"v_proj\"])\n\n.. note::\n\n Calling :func:`lora_llama_2_7b ` alone will not handle the definition of which parameters are trainable.\n See :ref:`below` for how to do this.\n\nLet's inspect each of these models a bit more closely.\n\n.. code-block:: bash\n\n # Print the first layer's self-attention in the usual Llama2 model\n >>> print(base_model.layers[0].attn)\n MultiHeadAttention(\n (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (output_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (pos_embeddings): RotaryPositionalEmbeddings()\n )\n\n # Print the same for Llama2 with LoRA weights\n >>> print(lora_model.layers[0].attn)\n MultiHeadAttention(\n (q_proj): LoRALinear(\n (dropout): Dropout(p=0.0, inplace=False)\n \n", "type": "text" }, { - "text": "Result 5:\nDocument_id:0b7ba\nContent: ora_finetune_label>`.\nFor more on QLoRA in torchtune, see our :ref:`QLoRA Tutorial `.\n\nLet's take a look at how we can fine-tune Llama3-8B-Instruct with LoRA on a single device using torchtune. In this example, we will fine-tune\nfor one epoch on a common instruct dataset for illustrative purposes. The basic command for a single-device LoRA fine-tune is\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\n\n.. note::\n To see a full list of recipes and their corresponding configs, simply run ``tune ls`` from the command line.\n\nWe can also add :ref:`command-line overrides ` as needed, e.g.\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\n checkpointer.checkpoint_dir= \\\n tokenizer.path=/tokenizer.model \\\n checkpointer.output_dir=\n\nThis will load the Llama3-8B-Instruct checkpoint and tokenizer from ```` used in the :ref:`tune download ` command above,\nthen save a final checkpoint in the same directory following the original format. For more details on the\ncheckpoint formats supported in torchtune, see our :ref:`checkpointing deep-dive `.\n\n.. note::\n To see the full set of configurable parameters for this (and other) configs we can use :ref:`tune cp ` to copy (and modify)\n the default config. :ref:`tune cp ` can be used with recipe scripts too, in case you want to make more custom changes\n that cannot be achieved by directly modifying existing configurable parameters. For more on :ref:`tune cp ` see the section on\n :ref:`modifying configs ` in our \":ref:`finetune_llama_label`\" tutorial.\n\nOnce training is complete, the model checkpoints will be saved and their locations will be logged. For\nLoRA fine-tuning, the final checkpoint will contain the merged weights, and a copy of just the (much smaller) LoRA weights\nwill\n", + "text": "Result 5:\nDocument_id:9dcb7\nContent: ora_finetune_label>`.\nFor more on QLoRA in torchtune, see our :ref:`QLoRA Tutorial `.\n\nLet's take a look at how we can fine-tune Llama3-8B-Instruct with LoRA on a single device using torchtune. In this example, we will fine-tune\nfor one epoch on a common instruct dataset for illustrative purposes. The basic command for a single-device LoRA fine-tune is\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\n\n.. note::\n To see a full list of recipes and their corresponding configs, simply run ``tune ls`` from the command line.\n\nWe can also add :ref:`command-line overrides ` as needed, e.g.\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\n checkpointer.checkpoint_dir= \\\n tokenizer.path=/tokenizer.model \\\n checkpointer.output_dir=\n\nThis will load the Llama3-8B-Instruct checkpoint and tokenizer from ```` used in the :ref:`tune download ` command above,\nthen save a final checkpoint in the same directory following the original format. For more details on the\ncheckpoint formats supported in torchtune, see our :ref:`checkpointing deep-dive `.\n\n.. note::\n To see the full set of configurable parameters for this (and other) configs we can use :ref:`tune cp ` to copy (and modify)\n the default config. :ref:`tune cp ` can be used with recipe scripts too, in case you want to make more custom changes\n that cannot be achieved by directly modifying existing configurable parameters. For more on :ref:`tune cp ` see the section on\n :ref:`modifying configs ` in our \":ref:`finetune_llama_label`\" tutorial.\n\nOnce training is complete, the model checkpoints will be saved and their locations will be logged. For\nLoRA fine-tuning, the final checkpoint will contain the merged weights, and a copy of just the (much smaller) LoRA weights\nwill\n", "type": "text" }, { @@ -117,11 +126,11 @@ "error_message": null, "metadata": { "document_ids": [ - "606ad61f-350d-46ba-8b8d-87d78e3d23f7", - "606ad61f-350d-46ba-8b8d-87d78e3d23f7", - "e37c3510-37ee-479d-abae-6721363c3db3", - "606ad61f-350d-46ba-8b8d-87d78e3d23f7", - "0b7babf3-9483-45d0-ae22-74c914d8cdbc" + "cbc884b1-9d88-4d5c-aff4-7a4b3a56618c", + "cbc884b1-9d88-4d5c-aff4-7a4b3a56618c", + "8892b092-6394-471e-b143-a23c6cc374f8", + "cbc884b1-9d88-4d5c-aff4-7a4b3a56618c", + "9dcb747d-0627-40cc-a23c-0bee2b6b05af" ] } } @@ -289,23 +298,23 @@ "type": "text" }, { - "text": "Result 1:\nDocument_id:c4b2d\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\nlook like so:\n\n.. code-block:: python\n\n from torchtune.datasets import chat_dataset\n from torchtune.models.llama3 import llama3_tokenizer\n\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\n ds = chat_dataset(\n tokenizer=tokenizer,\n source=\"json\",\n data_files=\"data/my_data.json\",\n split=\"train\",\n conversation_column=\"dialogue\",\n conversation_style=\"sharegpt\",\n )\n\n.. code-block:: yaml\n\n # In config\n tokenizer:\n _component_: torchtune.models.llama3.llama3_tokenizer\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\n\n dataset:\n _component_: torchtune.datasets.chat_dataset\n source: json\n data_files: data/my_data.json\n split: train\n conversation_column: dialogue\n conversation_style: sharegpt\n\n.. note::\n You can pass in any keyword argument for `load_dataset `_ into all our\n Dataset classes and they will honor them. This is useful for common parameters\n such as specifying the data split with :code:`split` or configuration with\n :code:`name`\n\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\nSince we're fine-tuning Llama3, the tokenizer will handle all formatting for\nus and prompt templates are optional. Other models such as Mistral's :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\nall messages according to their `recommendations `_ into all our\n Dataset classes and they will honor them. This is useful for common parameters\n such as specifying the data split with :code:`split` or configuration with\n :code:`name`\n\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\nSince we're fine-tuning Llama3, the tokenizer will handle all formatting for\nus and prompt templates are optional. Other models such as Mistral's :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\nIf you already know what LoRA is and want to get straight to running\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\n\n.. grid:: 2\n\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\n\n * What LoRA is and how it saves memory during finetuning\n * An overview of LoRA components in torchtune\n * How to run a LoRA finetune using torchtune\n * How to experiment with different LoRA configurations\n\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\n\n * Be familiar with :ref:`torchtune`\n * Make sure to :ref:`install torchtune`\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\n\nWhat is LoRA?\n-------------\n\n`LoRA `_ is an adapter-based method for\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\ntransformer models, in which case it is common to add the low-rank matrices\nto some of the linear projections in each transformer layer's self-attention.\n\n.. note::\n\n If you're unfamiliar, check out these references for the `definition of rank `_\n and discussion of `low-rank approximations `_.\n\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\nyou can expect to see memory savings due to a substantial reduction in the\nnumber of parameters with gradients. When using an optimizer with momentum,\nlike `AdamW `_, a parameter-efficient finetuning technique,\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\nIf you already know what LoRA is and want to get straight to running\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\n\n.. grid:: 2\n\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\n\n * What LoRA is and how it saves memory during finetuning\n * An overview of LoRA components in torchtune\n * How to run a LoRA finetune using torchtune\n * How to experiment with different LoRA configurations\n\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\n\n * Be familiar with :ref:`torchtune`\n * Make sure to :ref:`install torchtune`\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\n\nWhat is LoRA?\n-------------\n\n`LoRA `_ is an adapter-based method for\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\ntransformer models, in which case it is common to add the low-rank matrices\nto some of the linear projections in each transformer layer's self-attention.\n\n.. note::\n\n If you're unfamiliar, check out these references for the `definition of rank `_\n and discussion of `low-rank approximations `_.\n\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\nyou can expect to see memory savings due to a substantial reduction in the\nnumber of parameters with gradients. When using an optimizer with momentum,\nlike `AdamW `.\n.. .. _glossary_fsdp2:\n\n", + "text": "Result 3:\nDocument_id:fd0f6\nContent: ` module, which we swap\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\n\n.. _glossary_distrib:\n\n\n.. TODO\n\n.. Distributed\n.. -----------\n\n.. .. _glossary_fsdp:\n\n.. Fully Sharded Data Parallel (FSDP)\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. All our ``_distributed`` recipes use `FSDP `.\n.. .. _glossary_fsdp2:\n\n", "type": "text" }, { - "text": "Result 4:\nDocument_id:606ad\nContent: 06% of all params are trainable.\n\n.. note::\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\n of in the recipe.\n\n\n.. _lora_recipe_label:\n\nLoRA finetuning recipe in torchtune\n-----------------------------------\n\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\n\n.. code-block:: bash\n\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\n\n.. note::\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\n for more details on how you can easily clone and modify torchtune configs.\n\n.. note::\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\n and (b) the memory constraints of your hardware.\n\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\n\n.. code-block:: yaml\n\n # Model Arguments\n model:\n _component_: lora_llama2_7b\n lora_attn_modules: ['q_proj', 'v_proj']\n lora_rank: 8\n lora_alpha: 16\n ...\n\nWe see that the\n", + "text": "Result 4:\nDocument_id:7da0c\nContent: 06% of all params are trainable.\n\n.. note::\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\n of in the recipe.\n\n\n.. _lora_recipe_label:\n\nLoRA finetuning recipe in torchtune\n-----------------------------------\n\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\n\n.. code-block:: bash\n\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\n\n.. note::\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\n for more details on how you can easily clone and modify torchtune configs.\n\n.. note::\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\n and (b) the memory constraints of your hardware.\n\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\n\n.. code-block:: yaml\n\n # Model Arguments\n model:\n _component_: lora_llama2_7b\n lora_attn_modules: ['q_proj', 'v_proj']\n lora_rank: 8\n lora_alpha: 16\n ...\n\nWe see that the\n", "type": "text" }, { - "text": "Result 5:\nDocument_id:e37c3\nContent: etune\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\n model.use_dora=True\n\n.. code-block:: yaml\n\n model:\n _component_: torchtune.models.lora_llama3_8b\n use_dora: True\n\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\neven more memory savings!\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\n model.apply_lora_to_mlp=True \\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\n model.lora_rank=16 \\\n model.lora_alpha=32 \\\n model.use_dora=True \\\n model.quantize_base=True\n\n.. code-block:: yaml\n\n model:\n _component_: torchtune.models.lora_llama3_8b\n apply_lora_to_mlp: True\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\n lora_rank: 16\n lora_alpha: 32\n use_dora: True\n quantize_base: True\n\n\n.. note::\n\n Under the hood, we've enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\n\n.. _glossary_distrib:\n\n\n.. TODO\n\n.. Distributed\n.. -----------\n\n.. .. _glossary_fsdp:\n\n.. Fully Sharded Data Parallel (FSDP)\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. All our ``_distributed`` recipes use `FSDP `.\n.. .. _glossary_fsdp2:\n\n", + "text": "Result 5:\nDocument_id:fd0f6\nContent: etune\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\n model.use_dora=True\n\n.. code-block:: yaml\n\n model:\n _component_: torchtune.models.lora_llama3_8b\n use_dora: True\n\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\neven more memory savings!\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\n model.apply_lora_to_mlp=True \\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\n model.lora_rank=16 \\\n model.lora_alpha=32 \\\n model.use_dora=True \\\n model.quantize_base=True\n\n.. code-block:: yaml\n\n model:\n _component_: torchtune.models.lora_llama3_8b\n apply_lora_to_mlp: True\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\n lora_rank: 16\n lora_alpha: 32\n use_dora: True\n quantize_base: True\n\n\n.. note::\n\n Under the hood, we've enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\n\n.. _glossary_distrib:\n\n\n.. TODO\n\n.. Distributed\n.. -----------\n\n.. .. _glossary_fsdp:\n\n.. Fully Sharded Data Parallel (FSDP)\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. All our ``_distributed`` recipes use `FSDP `.\n.. .. _glossary_fsdp2:\n\n", "type": "text" }, { @@ -317,11 +326,11 @@ "error_message": null, "metadata": { "document_ids": [ - "c4b2d1f8-ea4d-44f9-b375-ea97dba3ebcb", - "606ad61f-350d-46ba-8b8d-87d78e3d23f7", - "e37c3510-37ee-479d-abae-6721363c3db3", - "606ad61f-350d-46ba-8b8d-87d78e3d23f7", - "e37c3510-37ee-479d-abae-6721363c3db3" + "3e3a05a7-23d4-461e-a304-8aa7cb35a4f5", + "7da0c755-7ffa-4c1a-9ab0-cfdda7cce00f", + "fd0f6ee9-15d2-43b3-8500-25bc5bdfd365", + "7da0c755-7ffa-4c1a-9ab0-cfdda7cce00f", + "fd0f6ee9-15d2-43b3-8500-25bc5bdfd365" ] } } @@ -329,10 +338,56 @@ "()_[('kwargs', {'session_id': '', 'query': 'current CEO of Meta'}), ('tool_name', 'web_search')]": { "type": "value", "value": { - "content": "{\"query\": \"current CEO of Meta\", \"top_k\": [{\"title\": \"Executives - Meta\", \"url\": \"https://about.meta.com/media-gallery/executives/\", \"content\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer Joel Kaplan, Chief Global Affairs Officer Susan Li, Chief Financial Officer Javier Olivan, Chief Operating Officer Chris Cox, Chief Product Officer Andrew \\u2018Boz\\u2019 Bosworth, Chief Technology Officer Jennifer Newstead, Chief Legal Officer Dave Wehner, Chief Strategy Officer Will Cathcart, Head of WhatsApp Naomi Gleit, Head of Product John Hegeman, Chief Revenue Officer Adam Mosseri, Head of Instagram Erin Egan, Chief Privacy Officer, Policy Michel Protti, Chief Privacy Officer, Product Alex Schultz, Chief Marketing Officer and VP of Analytics Tom Alison, Head of Facebook Nicola Mendelsohn, Head of Global Business Group Ahmad Al-Dahle, VP and Head of GenAI at Meta Joelle Pineau, Vice President of AI Research and Head of FAIR at Meta\", \"score\": 0.8190992, \"raw_content\": null}, {\"title\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer - Meta\", \"url\": \"https://about.meta.com/media-gallery/executives/mark-zuckerberg/\", \"content\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer | Meta Meta Quest Ray-Ban Meta Meta Horizon Meta AI Meta Verified Meta Pay Meta Horizon Workrooms Meta and you Learn about our community Shop Meta Meta Quest Meta Portal Meta Horizon Mark Zuckerberg is the founder, chairman and CEO of Meta, which he originally founded as Facebook in 2004. In October 2021, Facebook rebranded to Meta to reflect all of its products and services across its family of apps and a focus on developing social experiences for the metaverse \\u2014 moving beyond 2D screens toward immersive experiences like augmented and virtual reality to help build the next evolution in social technology. Shop Ray-Ban Meta glassesRay-Ban StoriesPrivacy informationSupported countries \\u00a9 2025 Meta\", \"score\": 0.79099923, \"raw_content\": null}, {\"title\": \"Meet the Executive CSuite Team of Meta (Facebook) [2025]\", \"url\": \"https://digitaldefynd.com/IQ/meet-the-executive-csuite-team-of-meta-facebook/\", \"content\": \"Harvard University Executive Programs Free Harvard University Courses As a chief financial officer of Meta, Susan Li oversees the firm\\u2019s finance and facilities team to keep track of the company\\u2019s overall financial health. The chief operating officer of Meta, Javier Olivan, oversees the firm\\u2019s business team, infrastructure, and other products. Andrew Bosworth, called Boz, serves as chief technology officer at Meta and is responsible for leading the firm\\u2019s AR/VR organization, Reality Labs. Andrew has also served as engineering director to oversee events, mobile monetization, and feed ads and as VP of ads and business platforms to lead engineering, design, analytics, and product teams. Meta\\u2019s c-suite team comprises experienced and diverse executives, having extensive experience in technology, finance, legal, and all major industries.\", \"score\": 0.7602419, \"raw_content\": null}, {\"title\": \"The 11 People Running Meta's $1 Trillion Social Media and ... - Observer\", \"url\": \"https://observer.com/2024/01/meta-facebook-top-executives/\", \"content\": \"He returned after just a year, saying Facebook is \\u201cthe best place for me to roll up my sleeves and dig in to help.\\u201d\\u00a0As Meta\\u2019s chief product officer, Cox is currently focusing on Meta\\u2019s A.I. initiatives, most recently undertaking Meta\\u2019s Fundamental AI Research (FAIR) team, led by Yann LeCun and Joelle Pineau. Cathcart started at Meta in 2010 as vice president of product management, notably leading the ads initiative of Facebook\\u2019s News Feed product. Filed Under: Business, Social Media, Media, Technology, Big Tech, Will Cathcart, Dave Wehner, Tom Alison, Erin Egan, Andrew Mccollum, Javier Olivan, Andrew Bosworth, Susan Li, Dustin Moskovitz, Chris Cox, Adam Mosseri, Yann LeCun, Eduardo Saverin, Chris Hughes, Leadership, Mark Zuckerberg, Facebook, Instagram, Meta\", \"score\": 0.7591723, \"raw_content\": null}, {\"title\": \"Mark Zuckerberg - Forbes\", \"url\": \"https://www.forbes.com/profile/mark-zuckerberg/\", \"content\": \"Meta CEO Mark Zuckerberg \\u201cloved\\u201d an image on Facebook known as \\\"Challah Horse\\\" that happens to be AI-generated, highlighting the amount of AI spam on the platform. ### Meta Donates $1 Million To Trump\\u2019s Inaugural Fund Weeks After Mark Zuckerberg Met President Elect Meta has donated $1 million to President-elect Donald Trump\\u2019s inaugural fund, the company confirmed to various news outlets on Wednesday, a move that comes just weeks after its CEO Mark Zuckerberg met with Trump at his Mar-a-Lago residence in an apparent bid to mend years of strained ties. ### Meta Donates $1 Million To Trump\\u2019s Inaugural Fund Weeks After Mark Zuckerberg Met President-Elect Read the full profile on Forbes: https://www.forbes.com/sites/kerryadolan/2023/09/26/mark-gets-meta-zuckerberg-talks-ai-and-that-musk-mma-fight-thats-never-going-to-happen/?sh=671046e73037\", \"score\": 0.6410185, \"raw_content\": null}]}", + "content": "{\"query\": \"current CEO of Meta\", \"top_k\": [{\"title\": \"Executives - Meta\", \"url\": \"https://about.meta.com/media-gallery/executives/\", \"content\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer Joel Kaplan, Chief Global Affairs Officer Susan Li, Chief Financial Officer Javier Olivan, Chief Operating Officer Chris Cox, Chief Product Officer Andrew \\u2018Boz\\u2019 Bosworth, Chief Technology Officer Jennifer Newstead, Chief Legal Officer Dave Wehner, Chief Strategy Officer Will Cathcart, Head of WhatsApp Naomi Gleit, Head of Product John Hegeman, Chief Revenue Officer Adam Mosseri, Head of Instagram Erin Egan, Chief Privacy Officer, Policy Michel Protti, Chief Privacy Officer, Product Alex Schultz, Chief Marketing Officer and VP of Analytics Tom Alison, Head of Facebook Nicola Mendelsohn, Head of Global Business Group Ahmad Al-Dahle, VP and Head of GenAI at Meta Joelle Pineau, Vice President of AI Research and Head of FAIR at Meta\", \"score\": 0.8190992, \"raw_content\": null}, {\"title\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer - Meta\", \"url\": \"https://about.meta.com/media-gallery/executives/mark-zuckerberg/\", \"content\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer | Meta Meta Quest Ray-Ban Meta Meta Horizon Meta AI Meta Verified Meta Pay Meta Horizon Workrooms Meta and you Learn about our community Shop Meta Meta Quest Meta Portal Meta Horizon Mark Zuckerberg is the founder, chairman and CEO of Meta, which he originally founded as Facebook in 2004. In October 2021, Facebook rebranded to Meta to reflect all of its products and services across its family of apps and a focus on developing social experiences for the metaverse \\u2014 moving beyond 2D screens toward immersive experiences like augmented and virtual reality to help build the next evolution in social technology. Shop Ray-Ban Meta glassesRay-Ban StoriesPrivacy informationSupported countries \\u00a9 2025 Meta\", \"score\": 0.79099923, \"raw_content\": null}, {\"title\": \"Meet the Executive CSuite Team of Meta (Facebook) [2025]\", \"url\": \"https://digitaldefynd.com/IQ/meet-the-executive-csuite-team-of-meta-facebook/\", \"content\": \"Harvard University Executive Programs Free Harvard University Courses As a chief financial officer of Meta, Susan Li oversees the firm\\u2019s finance and facilities team to keep track of the company\\u2019s overall financial health. The chief operating officer of Meta, Javier Olivan, oversees the firm\\u2019s business team, infrastructure, and other products. Andrew Bosworth, called Boz, serves as chief technology officer at Meta and is responsible for leading the firm\\u2019s AR/VR organization, Reality Labs. Andrew has also served as engineering director to oversee events, mobile monetization, and feed ads and as VP of ads and business platforms to lead engineering, design, analytics, and product teams. Meta\\u2019s c-suite team comprises experienced and diverse executives, having extensive experience in technology, finance, legal, and all major industries.\", \"score\": 0.7602419, \"raw_content\": null}, {\"title\": \"Meta to spend up to $65 billion this year to power AI goals, Zuckerberg ...\", \"url\": \"https://www.reuters.com/technology/meta-invest-up-65-bln-capital-expenditure-this-year-2025-01-24/\", \"content\": \"Meta Platforms plans to spend as much as $65 billion this year to expand its AI infrastructure, CEO Mark Zuckerberg said on Friday, aiming to bolster the company's position against rivals OpenAI\", \"score\": 0.73914057, \"raw_content\": null}, {\"title\": \"Mark Zuckerberg - Forbes\", \"url\": \"https://www.forbes.com/profile/mark-zuckerberg/\", \"content\": \"Meta CEO Mark Zuckerberg \\u201cloved\\u201d an image on Facebook known as \\\"Challah Horse\\\" that happens to be AI-generated, highlighting the amount of AI spam on the platform. ### Meta Donates $1 Million To Trump\\u2019s Inaugural Fund Weeks After Mark Zuckerberg Met President Elect Meta has donated $1 million to President-elect Donald Trump\\u2019s inaugural fund, the company confirmed to various news outlets on Wednesday, a move that comes just weeks after its CEO Mark Zuckerberg met with Trump at his Mar-a-Lago residence in an apparent bid to mend years of strained ties. ### Meta Donates $1 Million To Trump\\u2019s Inaugural Fund Weeks After Mark Zuckerberg Met President-Elect Read the full profile on Forbes: https://www.forbes.com/sites/kerryadolan/2023/09/26/mark-gets-meta-zuckerberg-talks-ai-and-that-musk-mma-fight-thats-never-going-to-happen/?sh=671046e73037\", \"score\": 0.6410185, \"raw_content\": null}]}", "error_code": null, "error_message": null, "metadata": null } + }, + "()_[('kwargs', {'session_id': '', 'query': 'using LoRA in Torchtune', 'vector_db_ids': ['vector_db_']}), ('tool_name', 'knowledge_search')]": { + "type": "value", + "value": { + "content": [ + { + "text": "knowledge_search tool found 5 chunks:\nBEGIN of knowledge_search tool results.\n", + "type": "text" + }, + { + "text": "Result 1:\nDocument_id:7da0c\nContent: .. _lora_finetune_label:\n\n============================\nFine-Tuning Llama2 with LoRA\n============================\n\nThis guide will teach you about `LoRA `_, a parameter-efficient finetuning technique,\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\nIf you already know what LoRA is and want to get straight to running\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\n\n.. grid:: 2\n\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\n\n * What LoRA is and how it saves memory during finetuning\n * An overview of LoRA components in torchtune\n * How to run a LoRA finetune using torchtune\n * How to experiment with different LoRA configurations\n\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\n\n * Be familiar with :ref:`torchtune`\n * Make sure to :ref:`install torchtune`\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\n\nWhat is LoRA?\n-------------\n\n`LoRA `_ is an adapter-based method for\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\ntransformer models, in which case it is common to add the low-rank matrices\nto some of the linear projections in each transformer layer's self-attention.\n\n.. note::\n\n If you're unfamiliar, check out these references for the `definition of rank `_\n and discussion of `low-rank approximations `_.\n\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\nyou can expect to see memory savings due to a substantial reduction in the\nnumber of parameters with gradients. When using an optimizer with momentum,\nlike `AdamW ` alone will not handle the definition of which parameters are trainable.\n See :ref:`below` for how to do this.\n\nLet's inspect each of these models a bit more closely.\n\n.. code-block:: bash\n\n # Print the first layer's self-attention in the usual Llama2 model\n >>> print(base_model.layers[0].attn)\n MultiHeadAttention(\n (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (output_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (pos_embeddings): RotaryPositionalEmbeddings()\n )\n\n # Print the same for Llama2 with LoRA weights\n >>> print(lora_model.layers[0].attn)\n MultiHeadAttention(\n (q_proj): LoRALinear(\n (dropout): Dropout(p=0.0, inplace=False)\n \n", + "type": "text" + }, + { + "text": "Result 3:\nDocument_id:7da0c\nContent: 06% of all params are trainable.\n\n.. note::\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\n of in the recipe.\n\n\n.. _lora_recipe_label:\n\nLoRA finetuning recipe in torchtune\n-----------------------------------\n\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\n\n.. code-block:: bash\n\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\n\n.. note::\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\n for more details on how you can easily clone and modify torchtune configs.\n\n.. note::\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\n and (b) the memory constraints of your hardware.\n\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\n\n.. code-block:: yaml\n\n # Model Arguments\n model:\n _component_: lora_llama2_7b\n lora_attn_modules: ['q_proj', 'v_proj']\n lora_rank: 8\n lora_alpha: 16\n ...\n\nWe see that the\n", + "type": "text" + }, + { + "text": "Result 4:\nDocument_id:7da0c\nContent: from our Llama2\nmodel without any wrappers or custom checkpoint conversion logic.\n\n.. code-block:: python\n\n # Assuming that base_model already has the pretrained Llama2 weights,\n # this will directly load them into your LoRA model without any conversion necessary.\n lora_model.load_state_dict(base_model.state_dict(), strict=False)\n\n.. note::\n Whenever loading weights with :code:`strict=False`, you should verify that any missing or extra keys in\n the loaded :code:`state_dict` are as expected. torchtune's LoRA recipes do this by default via\n :func:`validate_missing_and_unexpected_for_lora() `.\n\nOnce we've loaded the base model weights, we also want to set only LoRA parameters to trainable.\n\n.. _setting_trainable_params:\n\n.. code-block:: python\n\n from torchtune.modules.peft.peft_utils import get_adapter_params, set_trainable_params\n\n # Fetch all params from the model that are associated with LoRA.\n lora_params = get_adapter_params(lora_model)\n\n # Set requires_grad=True on lora_params, and requires_grad=False on all others.\n set_trainable_params(lora_model, lora_params)\n\n # Print the total number of parameters\n total_params = sum([p.numel() for p in lora_model.parameters()])\n trainable_params = sum([p.numel() for p in lora_model.parameters() if p.requires_grad])\n print(\n f\"\"\"\n {total_params} total params,\n {trainable_params}\" trainable params,\n {(100.0 * trainable_params / total_params):.2f}% of all params are trainable.\n \"\"\"\n )\n\n 6742609920 total params,\n 4194304 trainable params,\n 0.06% of all params are trainable.\n\n.. note::\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\n of in the recipe.\n\n\n.. _lora_recipe_label:\n\nLoRA finetuning recipe in torchtune\n-----------------------------------\n\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe tyH=Xl^P)`REdWO)V}dhimLKdl{QE`7OCI4v+JMm zn@HZ>J9p0g&N<)t&bjaWIrG-lTX#IQ<@q<>x<&uHyqde>sJ{-6tGzGXwsQ2>nKyU7 z`u_IYzx$w&oXL>IO%d2EY0~%o(`8okeBTQLCS5OPZNVdY&@; zO;2*)5SIvnCzD7Rbf`gx8pT4fY!oJqVrgO|&qk~;kRp&H6*i)QpZ2+Q!l)6mrq6?z z$rF7~c+47!6XZc`g^>XMf;lsFXtGo& zrLKHB})j{6R%`MpEC+=Js!uOeA)mj2x8&bqVe!}AvuD{oV8kM7RAIH)Gr@YX3Y z#S7hcrmqaDH%Gs&I*0dcow0er>b`tlWKDA85U&*s%du^2 zv_zrcV4FrN#35s{Y~NEfN>jBd<6yZ^FpA}xRj%2NJvCikb)R#W)k1kl70+M0?aCG@ zF3DB*D#d5us(N*K_u!Cw)xCPIq+_Jst8MQtKeA_V)3fW_)YFH@y8o%Yx^2^Xc2K={ ze60J=i#K}fTeYq32hr)x8_x_XHaXmlesrQ|INhi|C{4j&=iC8x{oH|!`?|VuE;sP{ z@1H4cxfPgz@m}@sm*&zld$xb`>~Ln|g8KVURyO@+ue_1|p4DL7Kd$HHYCkXc&bxc( zU5328MtRxZ6j9R`mps|7w!B4e)%{St__Np4gL{WRgIgL`k6*jg&Y!O|8WiHCKM^PTEPfF<1n44ROaJWd zq0F3nOFgwxQq5=X>VEt}b)akg;{5@&R4(0}#8mAk!|xq|IXHpDE9n3cp5`z=>ki_OprBt#2J9K|G7P-o9F z)w>73(HUh&;y6h~&gGKTcr2=^Y3@gY+im7TQc3lik&qEoPS`yNtR=jOwXi?M~UaVPzphKP_&K5;GhQsnQbp$mm zYK4{*wuwfQQcj?^Xc{7tUQ@)(4$}bF+RPRXPkhOiJg(2EI7wht7SMOlvk;pepekBj zH3|B*t7w~53TAGc&8{8OF_DVI7qQtCjx@iljc+fQ+3cyn5{TW{(wd9pEhVL&-yS`% zi6IpsYjN-pM2Lk%lLbLeju0D~JmQ!Yh`D&EImTY&uSm11#%@y@0Vf z28qaBXqp}D7CsmRGO;FU-+|CO={4%bH;xU}3!MhpdB`Y12*e zB8M}eiz6c=`pXj=F<-oY)kb z@rqe=E~^{0y${ffDHa{->6ub-x^U=FvG55frO87jj85yuHymCtF{|v#sB5ERnLG68 zvHhk_(0%>XwSmr>3}9SG)j;{Fo~WGg>mewU&3*vM4lhDkp!spCZk$CtSfp$pG_wzf z4WYFs@){Vjdt0;(m!qnkFo6|a2p;+e4_FQL4fS+9tK)$?pE#Gw$w!C4?ZM}5Dro2+ zaV-|UV{l2cJiuVCCc`0~TIewl4BDfSVR3Sr8rW3PWb;t%^n86Qfv3K=DB!0IJL~UK zS84D|tRWg<)XryX)DE#VgtX%m;1}k82DF{dhJ$%Pd)J9!yWcR%}!l&2xdTikrZ+6!r0?g z%Jo=&Y_b1kss9DzK{Ju*uSYkUpiskU zeZma@-CHzaA*EYPuh)bhw#;mY`hnNrb+mFep(g>)n(!L+%`kzIFjP|vW;3YYJOZ4- zdAx?BacUiRKQty}MdwJ%JgNNIZyq(-d9-~3k`P71&U$aISU&nfW#P%SPHJkk!#H80 zm0|ngP z(5UTN--Cq_R0B;R)m}%|q>u?-hX_>DvZF3*i_meZK1aGL+3XoC&~$~hUTJFBX?F#o z=o-)U2+?n<>K_LVsO8)poox05aYm1vme5wjCEma_Cr{-J-lj?i{>fGdAD2)_vB{1i zaCH=I`WVFu+VszMz)#&Zv^ delta 1997 zcma)6U5pb|6sF5A+Ze(kKkLFG9tf~NrbFAUvXE%9Aj>Xc%LY~f1IN`PD+89J0vNRmdQikX;ZH}SBs%N{OoO{! zE*Q+BX($M8`i2KQowf{$Q1s$*2i@0WqB6yEbOmA zo+D5}je4H%m{fQqr&6ce6GzZP@)(2y50AlN>Y@e)gRLXOV~4kaM@e1-8-+@h;5cy{ zI7s*UPC)Mfr#235!g46_b=XThW)R_l>*1-S^#X`W0#q*w#DyZ#g6V>wu4Q6RB)Y&k zPaKk=3W){|_ecua+LV|Iu@at`ZW}?B*&I+)gCw1DVT5AmhK@RnSPJ@Uiu`Waqm-kr zJFWd^qclW`i>1LNHJHO~o+)Xt2S=xD4UXaxE^yP}RZ^sIlrl$+XOib(*tT5mXs}Np z&wUw!ufvWh=oE|_2JVHA`W+xe3%Bf$Z57c!-3-{n-6<|SY;weR2})~WecVJWG|t@| zFIwJd+yN$V1#lCvFpf&OaPKj+H4f}<`b3xN{WD726pfls_!pB!B8$R=c;X zf3rM(R1f84;ntgYlJmWb^DHmlOZ0c86KQj>c0ExE*BwjHaV!y*w>R|3j|)Aufn#gi zJ)eZ+=U?>69P6olx9>uzt=K#J`-6mQ`{%mbN}XLIA57H~E*;7&X-lxngJ|6-Z{8eW zh^A7J_@Fr$Nu<+5kyL7kM1~SCMH4YX<8dQhm3PYn^0)HV6<3&~uFsxLvK1GrTiH4W z@~hYHb~43ifnQfZLzWY3p2p(a?gf4=xJr`R1Z&a_%yWf_2ARP8W6ErZp<^(sv{2+P zu~kVwMcMi!dm+i-OqE?ciRiB2BiWVO>a&T?K=Z<_^)fs2a!ZHWH#2=5byI<^oRf1K zdgc9@9=Ucl+X|D{XYbUl-T$w3rKdmz{;*G3Ug_;AD`!>NpK|W}*OyN(AAH2Rx1Ft? zL442PQRz0g%ix6eO7fnWb=5p?82Q;V21