From 6cf79437b37a4ec0ddb2c27c9a882d0dc28ae57e Mon Sep 17 00:00:00 2001 From: ehhuang Date: Wed, 5 Mar 2025 14:30:27 -0800 Subject: [PATCH] feat: support ClientTool output metadata (#1426) # Summary: Client side change in https://github.com/meta-llama/llama-stack-client-python/pull/180 Changes the resume_turn API to accept `ToolResponse` instead of `ToolResponseMessage`: 1. `ToolResponse` contains `metadata` 2. `ToolResponseMessage` is a concept for model inputs. Here we are just submitting the outputs of tool execution. # Test Plan: Ran integration tests with newly added test using client tool with metadata LLAMA_STACK_CONFIG=fireworks pytest -s -v tests/integration/agents/test_agents.py --safety-shield meta-llama/Llama-Guard-3-8B --record-responses --- docs/_static/llama-stack-spec.html | 20 +- docs/_static/llama-stack-spec.yaml | 13 +- llama_stack/apis/agents/agents.py | 5 +- .../agents/meta_reference/agent_instance.py | 25 +- .../inline/agents/meta_reference/agents.py | 3 +- tests/integration/agents/test_agents.py | 29 +- .../recorded_responses/chat_completion.json | 5941 +++++++++++------ .../recorded_responses/chat_completion.pickle | Bin 620451 -> 888589 bytes .../recorded_responses/invoke_tool.json | 120 +- .../recorded_responses/invoke_tool.pickle | Bin 53549 -> 67524 bytes 10 files changed, 3984 insertions(+), 2172 deletions(-) diff --git a/docs/_static/llama-stack-spec.html b/docs/_static/llama-stack-spec.html index 68f27ef3b..1a8169090 100644 --- a/docs/_static/llama-stack-spec.html +++ b/docs/_static/llama-stack-spec.html @@ -9321,11 +9321,21 @@ "type": "object", "properties": { "tool_responses": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ToolResponseMessage" - }, - "description": "The tool call responses to resume the turn with." + "oneOf": [ + { + "type": "array", + "items": { + "$ref": "#/components/schemas/ToolResponse" + } + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/ToolResponseMessage" + } + } + ], + "description": "The tool call responses to resume the turn with. NOTE: ToolResponseMessage will be deprecated. Use ToolResponse." }, "stream": { "type": "boolean", diff --git a/docs/_static/llama-stack-spec.yaml b/docs/_static/llama-stack-spec.yaml index bb994b0c5..d6001c00d 100644 --- a/docs/_static/llama-stack-spec.yaml +++ b/docs/_static/llama-stack-spec.yaml @@ -6287,11 +6287,16 @@ components: type: object properties: tool_responses: - type: array - items: - $ref: '#/components/schemas/ToolResponseMessage' + oneOf: + - type: array + items: + $ref: '#/components/schemas/ToolResponse' + - type: array + items: + $ref: '#/components/schemas/ToolResponseMessage' description: >- - The tool call responses to resume the turn with. + The tool call responses to resume the turn with. NOTE: ToolResponseMessage + will be deprecated. Use ToolResponse. stream: type: boolean description: Whether to stream the response. diff --git a/llama_stack/apis/agents/agents.py b/llama_stack/apis/agents/agents.py index def61b617..dbe35ac09 100644 --- a/llama_stack/apis/agents/agents.py +++ b/llama_stack/apis/agents/agents.py @@ -353,7 +353,7 @@ class AgentTurnResumeRequest(BaseModel): agent_id: str session_id: str turn_id: str - tool_responses: List[ToolResponseMessage] + tool_responses: Union[List[ToolResponse], List[ToolResponseMessage]] stream: Optional[bool] = False @@ -432,7 +432,7 @@ class Agents(Protocol): agent_id: str, session_id: str, turn_id: str, - tool_responses: List[ToolResponseMessage], + tool_responses: Union[List[ToolResponse], List[ToolResponseMessage]], stream: Optional[bool] = False, ) -> Union[Turn, AsyncIterator[AgentTurnResponseStreamChunk]]: """Resume an agent turn with executed tool call responses. @@ -443,6 +443,7 @@ class Agents(Protocol): :param session_id: The ID of the session to resume. :param turn_id: The ID of the turn to resume. :param tool_responses: The tool call responses to resume the turn with. + NOTE: ToolResponseMessage will be deprecated. Use ToolResponse. :param stream: Whether to stream the response. :returns: A Turn object if stream is False, otherwise an AsyncIterator of AgentTurnResponseStreamChunk objects. """ diff --git a/llama_stack/providers/inline/agents/meta_reference/agent_instance.py b/llama_stack/providers/inline/agents/meta_reference/agent_instance.py index f868bee2c..720e73503 100644 --- a/llama_stack/providers/inline/agents/meta_reference/agent_instance.py +++ b/llama_stack/providers/inline/agents/meta_reference/agent_instance.py @@ -216,13 +216,25 @@ class ChatAgent(ShieldRunnerMixin): steps = [] messages = await self.get_messages_from_turns(turns) if is_resume: - messages.extend(request.tool_responses) + if isinstance(request.tool_responses[0], ToolResponseMessage): + tool_response_messages = request.tool_responses + tool_responses = [ + ToolResponse(call_id=x.call_id, tool_name=x.tool_name, content=x.content) + for x in request.tool_responses + ] + else: + tool_response_messages = [ + ToolResponseMessage(call_id=x.call_id, tool_name=x.tool_name, content=x.content) + for x in request.tool_responses + ] + tool_responses = request.tool_responses + messages.extend(tool_response_messages) last_turn = turns[-1] last_turn_messages = self.turn_to_messages(last_turn) last_turn_messages = [ x for x in last_turn_messages if isinstance(x, UserMessage) or isinstance(x, ToolResponseMessage) ] - last_turn_messages.extend(request.tool_responses) + last_turn_messages.extend(tool_response_messages) # get steps from the turn steps = last_turn.steps @@ -238,14 +250,7 @@ class ChatAgent(ShieldRunnerMixin): step_id=(in_progress_tool_call_step.step_id if in_progress_tool_call_step else str(uuid.uuid4())), turn_id=request.turn_id, tool_calls=(in_progress_tool_call_step.tool_calls if in_progress_tool_call_step else []), - tool_responses=[ - ToolResponse( - call_id=x.call_id, - tool_name=x.tool_name, - content=x.content, - ) - for x in request.tool_responses - ], + tool_responses=tool_responses, completed_at=now, started_at=(in_progress_tool_call_step.started_at if in_progress_tool_call_step else now), ) diff --git a/llama_stack/providers/inline/agents/meta_reference/agents.py b/llama_stack/providers/inline/agents/meta_reference/agents.py index db33bca4a..a46fa8eb7 100644 --- a/llama_stack/providers/inline/agents/meta_reference/agents.py +++ b/llama_stack/providers/inline/agents/meta_reference/agents.py @@ -27,6 +27,7 @@ from llama_stack.apis.agents import ( from llama_stack.apis.inference import ( Inference, ToolConfig, + ToolResponse, ToolResponseMessage, UserMessage, ) @@ -168,7 +169,7 @@ class MetaReferenceAgentsImpl(Agents): agent_id: str, session_id: str, turn_id: str, - tool_responses: List[ToolResponseMessage], + tool_responses: Union[List[ToolResponse], List[ToolResponseMessage]], stream: Optional[bool] = False, ) -> AsyncGenerator: request = AgentTurnResumeRequest( diff --git a/tests/integration/agents/test_agents.py b/tests/integration/agents/test_agents.py index f221582c8..277b37448 100644 --- a/tests/integration/agents/test_agents.py +++ b/tests/integration/agents/test_agents.py @@ -4,6 +4,7 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. +from typing import Any, Dict from uuid import uuid4 import pytest @@ -40,6 +41,25 @@ def get_boiling_point(liquid_name: str, celcius: bool = True) -> int: return -1 +@client_tool +def get_boiling_point_with_metadata(liquid_name: str, celcius: bool = True) -> Dict[str, Any]: + """ + Returns the boiling point of a liquid in Celcius or Fahrenheit + + :param liquid_name: The name of the liquid + :param celcius: Whether to return the boiling point in Celcius + :return: The boiling point of the liquid in Celcius or Fahrenheit + """ + if liquid_name.lower() == "polyjuice": + if celcius: + temp = -100 + else: + temp = -212 + else: + temp = -1 + return {"content": temp, "metadata": {"source": "https://www.google.com"}} + + @pytest.fixture(scope="session") def agent_config(llama_stack_client_with_mocked_inference, text_model_id): available_shields = [shield.identifier for shield in llama_stack_client_with_mocked_inference.shields.list()] @@ -551,8 +571,9 @@ def test_rag_and_code_agent(llama_stack_client_with_mocked_inference, agent_conf assert expected_kw in response.output_message.content.lower() -def test_create_turn_response(llama_stack_client_with_mocked_inference, agent_config): - client_tool = get_boiling_point +@pytest.mark.parametrize("client_tools", [(get_boiling_point, False), (get_boiling_point_with_metadata, True)]) +def test_create_turn_response(llama_stack_client_with_mocked_inference, agent_config, client_tools): + client_tool, expectes_metadata = client_tools agent_config = { **agent_config, "input_shields": [], @@ -577,7 +598,9 @@ def test_create_turn_response(llama_stack_client_with_mocked_inference, agent_co assert len(steps) == 3 assert steps[0].step_type == "inference" assert steps[1].step_type == "tool_execution" - assert steps[1].tool_calls[0].tool_name == "get_boiling_point" + assert steps[1].tool_calls[0].tool_name.startswith("get_boiling_point") + if expectes_metadata: + assert steps[1].tool_responses[0].metadata["source"] == "https://www.google.com" assert steps[2].step_type == "inference" last_step_completed_at = None diff --git a/tests/integration/fixtures/recorded_responses/chat_completion.json b/tests/integration/fixtures/recorded_responses/chat_completion.json index 4b0d9b1c1..9e70e3df0 100644 --- a/tests/integration/fixtures/recorded_responses/chat_completion.json +++ b/tests/integration/fixtures/recorded_responses/chat_completion.json @@ -102,7 +102,22 @@ { "event": { "delta": { - "text": " boiling point of polyjuice is -100 degrees Fahrenheit.", + "text": " boiling point of polyjuice is -100 degrees", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " Fahrenheit.", "type": "text" }, "event_type": { @@ -312,7 +327,7 @@ { "event": { "delta": { - "text": "type\": \"function\", \"name\": \"get_boiling_point", + "text": "type\": \"function\", \"name\": \"", "type": "text" }, "event_type": { @@ -327,7 +342,7 @@ { "event": { "delta": { - "text": "\", \"parameters\": {\"liquid_name\": \"polyjuice\",", + "text": "get_boiling_point\", \"parameters", "type": "text" }, "event_type": { @@ -342,7 +357,22 @@ { "event": { "delta": { - "text": " \"celcius\": \"false\"}}", + "text": "\": {\"liquid_name\": \"polyjuice\", \"", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "celcius\": \"false\"}}", "type": "text" }, "event_type": { @@ -366,7 +396,7 @@ "celcius": "false", "liquid_name": "polyjuice" }, - "call_id": "b9ded2e6-bef1-40bc-8a5b-a8c1018d0ba2", + "call_id": "00c0968b-d7d4-450d-a6ff-03d64ae9f772", "tool_name": "get_boiling_point" }, "type": "tool_call" @@ -590,7 +620,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "{\"type\": \"function\", \"", + "tool_call": "{\"type\": \"function\", \"name\": \"get_boiling", "type": "tool_call" }, "event_type": { @@ -609,7 +639,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "name\": \"get_boiling_point\",", + "tool_call": "_point\", \"parameters\": {\"liquid_name\": \"polyjuice", "type": "tool_call" }, "event_type": { @@ -628,45 +658,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " \"parameters\": {\"liquid_name\": \"polyju", - "type": "tool_call" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "value": "in_progress" - }, - "tool_call": "ice\", \"celcius\":", - "type": "tool_call" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "value": "in_progress" - }, - "tool_call": " \"true\"}}", + "tool_call": "\", \"celcius\": \"true\"}}", "type": "tool_call" }, "event_type": { @@ -690,7 +682,7 @@ "celcius": "true", "liquid_name": "polyjuice" }, - "call_id": "98c011b5-f5de-416e-9a06-c2e3d0fa5581", + "call_id": "eda85f20-da80-4e11-a0e4-3849159ae70f", "tool_name": "get_boiling_point" }, "type": "tool_call" @@ -831,7 +823,7 @@ { "event": { "delta": { - "text": " boiling point of polyjuice is -100\u00b0C", + "text": " boiling point of polyjuice is -100\u00b0C.", "type": "text" }, "event_type": { @@ -846,7 +838,60 @@ { "event": { "delta": { - "text": ".", + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Call get_boiling_point and answer What is the boiling point of polyjuice?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='get_boiling_point_with_metadata', arguments={'liquid_name': 'polyjuice', 'celcius': 'true'})]), ToolResponseMessage(role='tool', call_id='', tool_name='get_boiling_point_with_metadata', content='-100')])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='get_boiling_point_with_metadata', description='Returns the boiling point of a liquid in Celcius or Fahrenheit', parameters={'liquid_name': ToolParamDefinition(param_type='string', description='The name of the liquid', required=True, default=None), 'celcius': ToolParamDefinition(param_type='bool', description='Whether to return the boiling point in Celcius', required=False, default=True)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "The", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " boiling point of polyjuice is -100\u00b0C.", "type": "text" }, "event_type": { @@ -1103,7 +1148,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "\": {\"liquid_name\": \"polyjuice\", \"celci", + "tool_call": "\": {\"liquid_name\": \"poly", "type": "tool_call" }, "event_type": { @@ -1122,7 +1167,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "us\": \"true\"}}", + "tool_call": "juice\", \"celcius\": \"true\"}}", "type": "tool_call" }, "event_type": { @@ -1146,7 +1191,7 @@ "celcius": "true", "liquid_name": "polyjuice" }, - "call_id": "15326d2e-d284-4c7e-86b1-5bfbba74a914", + "call_id": "8b8b3ad5-5e47-4f56-a823-e2d82fa72d9c", "tool_name": "get_boiling_point" }, "type": "tool_call" @@ -1184,6 +1229,168 @@ ], "type": "generator" }, + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Call get_boiling_point and answer What is the boiling point of polyjuice?', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='get_boiling_point_with_metadata', description='Returns the boiling point of a liquid in Celcius or Fahrenheit', parameters={'liquid_name': ToolParamDefinition(param_type='string', description='The name of the liquid', required=True, default=None), 'celcius': ToolParamDefinition(param_type='bool', description='Whether to return the boiling point in Celcius', required=False, default=True)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "started" + }, + "tool_call": "", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "{\"type\": \"function\", \"name\": \"", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "get_boiling_point_with_metadata\", \"", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "parameters\": {\"liquid_name\": \"poly", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "juice\", \"celcius\": \"true\"}}", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "succeeded" + }, + "tool_call": { + "arguments": { + "celcius": "true", + "liquid_name": "polyjuice" + }, + "call_id": "3438f2d7-895f-4a94-8e1f-c2f01860ce88", + "tool_name": "get_boiling_point_with_metadata" + }, + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Give me a sentence that contains the word: hello', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [])]": { "chunks": [ { @@ -1219,7 +1426,22 @@ { "event": { "delta": { - "text": " customer smiled and said \"hello\" to the friendly store clerk.", + "text": " customer smiled and said \"hello\" to the friendly store clerk", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ".", "type": "text" }, "event_type": { @@ -1673,7 +1895,7 @@ { "event": { "delta": { - "text": " error message indicates that the `bwrap.core` module is", + "text": " error message indicates that the `b", "type": "text" }, "event_type": { @@ -1688,7 +1910,7 @@ { "event": { "delta": { - "text": " not found. This is likely because the", + "text": "wrap.core` module is not found", "type": "text" }, "event_type": { @@ -1703,7 +1925,7 @@ { "event": { "delta": { - "text": " `bwrap` package is not installed. To fix this,", + "text": ". This is likely because the `", "type": "text" }, "event_type": { @@ -1718,7 +1940,7 @@ { "event": { "delta": { - "text": " you can install the `bwrap` package", + "text": "bwrap` package is not installed", "type": "text" }, "event_type": { @@ -1733,7 +1955,7 @@ { "event": { "delta": { - "text": " using pip:\n\n```\npip install bwrap", + "text": ". To fix this, you can install the", "type": "text" }, "event_type": { @@ -1748,7 +1970,7 @@ { "event": { "delta": { - "text": "\n```\n\nHowever, if you don't", + "text": " `bwrap` package using pip:\n\n```\npip install", "type": "text" }, "event_type": { @@ -1763,7 +1985,7 @@ { "event": { "delta": { - "text": " have permission to install packages, you can use", + "text": " bwrap\n```\n\nHowever, if", "type": "text" }, "event_type": { @@ -1778,7 +2000,7 @@ { "event": { "delta": { - "text": " the `knowledge_search` function to get information about", + "text": " you don't have the `bwrap` package installed,", "type": "text" }, "event_type": { @@ -1793,7 +2015,7 @@ { "event": { "delta": { - "text": " the CSV file instead:\n\n```\n{\n ", + "text": " you can't use the `", "type": "text" }, "event_type": { @@ -1808,7 +2030,7 @@ { "event": { "delta": { - "text": " \"type\": \"function\",\n \"name\": \"", + "text": "b", "type": "text" }, "event_type": { @@ -1823,7 +2045,7 @@ { "event": { "delta": { - "text": "knowledge_search\",\n \"parameters\": {\n", + "text": "wrap.core` module.", "type": "text" }, "event_type": { @@ -1838,7 +2060,7 @@ { "event": { "delta": { - "text": " \"query\": \"describe a csv file\"\n }\n", + "text": " In this case, you can", "type": "text" }, "event_type": { @@ -1853,7 +2075,7 @@ { "event": { "delta": { - "text": "}\n```\n\nThis will return a description of", + "text": " try to load the CSV file using the `p", "type": "text" }, "event_type": { @@ -1868,7 +2090,142 @@ { "event": { "delta": { - "text": " the CSV file.", + "text": "andas` library directly.\n\nHere is the corrected code:\n\n```", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "python\nimport pandas as pd\ndf = pd.read_csv", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "(\"/var/folders/cz/vyh7y1d11x", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "g881lsxsshnc5c000", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "0gn/T/tmp8d5c", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "8spc/zOZSE5", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "zcinflation.csv\")\nprint(df.head())\nprint(df.info())\n", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "print(df.describe())\n```\n\nThis code will", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " load the CSV file and print the first few rows, information about", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " the data, and summary statistics.", "type": "text" }, "event_type": { @@ -2162,7 +2519,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "import pandas as pd\ndf = pd.read", + "tool_call": "import pandas as pd\ndf = pd.read_csv(\"/var/folders/c", "type": "tool_call" }, "event_type": { @@ -2181,7 +2538,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "_csv(\"/var/folders/cz/vyh7y1d11", + "tool_call": "z/vyh7y1d11xg881lsxsshnc", "type": "tool_call" }, "event_type": { @@ -2200,7 +2557,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "xg881lsxsshnc5c0000gn/T/tmpc_", + "tool_call": "5c0000gn/T/tmp8d5c8spc", "type": "tool_call" }, "event_type": { @@ -2219,7 +2576,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "ozqkdv/GwQ6oJB4inflation", + "tool_call": "/zOZSE5zcinflation.csv\")\nprint(df.head())\nprint", "type": "tool_call" }, "event_type": { @@ -2238,26 +2595,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": ".csv\")\nprint(df.head())\nprint(df.info())\nprint(df.describe", - "type": "tool_call" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "value": "in_progress" - }, - "tool_call": "())", + "tool_call": "(df.info())\nprint(df.describe())", "type": "tool_call" }, "event_type": { @@ -2278,9 +2616,9 @@ }, "tool_call": { "arguments": { - "code": "import pandas as pd\ndf = pd.read_csv(\"/var/folders/cz/vyh7y1d11xg881lsxsshnc5c0000gn/T/tmpc_ozqkdv/GwQ6oJB4inflation.csv\")\nprint(df.head())\nprint(df.info())\nprint(df.describe())" + "code": "import pandas as pd\ndf = pd.read_csv(\"/var/folders/cz/vyh7y1d11xg881lsxsshnc5c0000gn/T/tmp8d5c8spc/zOZSE5zcinflation.csv\")\nprint(df.head())\nprint(df.info())\nprint(df.describe())" }, - "call_id": "551648f3-c903-44ef-84ae-0f1dcbaaa68f", + "call_id": "09b4d9a1-8ee4-4de4-a5a3-91cad464e668", "tool_name": { "__enum__": "BuiltinTool", "value": "code_interpreter" @@ -2523,6 +2861,592 @@ ], "type": "generator" }, + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv file, can you describe it?', context=None), ToolResponseMessage(role='tool', call_id='', tool_name=, content=[TextContentItem(type='text', text='# User provided a file accessible to you at \"\"\\nYou can use code_interpreter to load and inspect it.')]), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\nimport code_interpreter\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print the data types of each column\\nprint(df.dtypes)\\n\\n# Print the summary statistics of the dataframe\\nprint(df.describe())'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"completed\\n[stderr]\\nTraceback (most recent call last):\\n line 5, in \\n from bwrap.core import main\\nModuleNotFoundError: No module named 'bwrap.core'\\n[/stderr]\"), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\nimport code_interpreter\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print the data types of each column\\nprint(df.dtypes)\\n\\n# Print the summary statistics of the dataframe\\nprint(df.describe())'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"completed\\n[stderr]\\nTraceback (most recent call last):\\n line 5, in \\n from bwrap.core import main\\nModuleNotFoundError: No module named 'bwrap.core'\\n[/stderr]\")])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)}), ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "I", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "'m unable to access the file you provided. However, I can", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " suggest a general approach to describe a CSV file", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ".\n\nYou can use the pandas", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " library in Python to load and inspect the CSV", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " file. Here's a general outline of the", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " steps you can follow:\n\n1. Import the pandas library:", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " `import pandas as pd`\n2. Load the CSV file", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " into a dataframe: `df = pd.read_csv('file.csv", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "')`\n3. Print the first few rows", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " of the dataframe: `print(df.head())`\n4", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ". Print the data types of each column", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ": `print(df.dtypes)`\n5", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ". Print the summary statistics of the dataframe:", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " `print(df.describe())`\n\nThis will give you a", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " general idea of the structure and content of the CSV file.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " If you need more specific information, you can use other pandas functions", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " to inspect the dataframe.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv file, can you describe it?', context=None), ToolResponseMessage(role='tool', call_id='', tool_name=, content=[TextContentItem(type='text', text='# User provided a file accessible to you at \"\"\\nYou can use code_interpreter to load and inspect it.')]), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\nimport code_interpreter\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print the data types of each column\\nprint(df.dtypes)\\n\\n# Print the summary statistics of the dataframe\\nprint(df.describe())'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"completed\\n[stderr]\\nTraceback (most recent call last):\\n line 5, in \\n from bwrap.core import main\\nModuleNotFoundError: No module named 'bwrap.core'\\n[/stderr]\")])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)}), ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "started" + }, + "tool_call": "", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "import pandas as pd\nimport code_interpreter\n\n#", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": " Load the CSV file\ndf = pd.read_csv(\"/", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "var/folders/cz/vyh7y", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "1d11xg881lsxsshnc5c000", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "0gn/T/tmpjxdo91ce/g1r3", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "WGZRinflation.csv\")\n\n# Print the first few rows of", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": " the dataframe\nprint(df.head())\n\n#", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": " Print the data types of each column", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "\nprint(df.dtypes)\n\n# Print the summary statistics", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": " of the dataframe\nprint(df.describe())", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "succeeded" + }, + "tool_call": { + "arguments": { + "code": "import pandas as pd\nimport code_interpreter\n\n# Load the CSV file\ndf = pd.read_csv(\"/var/folders/cz/vyh7y1d11xg881lsxsshnc5c0000gn/T/tmpjxdo91ce/g1r3WGZRinflation.csv\")\n\n# Print the first few rows of the dataframe\nprint(df.head())\n\n# Print the data types of each column\nprint(df.dtypes)\n\n# Print the summary statistics of the dataframe\nprint(df.describe())" + }, + "call_id": "fbc1b233-207f-4f7b-8298-8d72a86d6f2c", + "tool_name": { + "__enum__": "BuiltinTool", + "value": "code_interpreter" + } + }, + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv file, can you describe it?', context=None), ToolResponseMessage(role='tool', call_id='', tool_name=, content=[TextContentItem(type='text', text='# User provided a file accessible to you at \"\"\\nYou can use code_interpreter to load and inspect it.')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)}), ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { "chunks": [ { @@ -2566,7 +3490,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "import pandas as pd\ndf = pd.read", + "tool_call": "import pandas as pd\ndf = pd.read_csv", "type": "tool_call" }, "event_type": { @@ -2585,7 +3509,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "_csv(\"/var/folders/cz/vyh", + "tool_call": "(\"/var/folders/cz/vyh7y1d11x", "type": "tool_call" }, "event_type": { @@ -2604,7 +3528,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "7y1d11xg881lsxsshnc5c", + "tool_call": "g881lsxsshnc5c0000gn/T", "type": "tool_call" }, "event_type": { @@ -2623,7 +3547,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "0000gn/T/tmpc_ozqkdv/Gw", + "tool_call": "/tmp8d5c8spc/zOZSE5zcin", "type": "tool_call" }, "event_type": { @@ -2642,26 +3566,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "Q6oJB4inflation.csv\")\n", - "type": "tool_call" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "value": "in_progress" - }, - "tool_call": "print(df.head())", + "tool_call": "flation.csv\")\nprint(df.head())", "type": "tool_call" }, "event_type": { @@ -2682,9 +3587,9 @@ }, "tool_call": { "arguments": { - "code": "import pandas as pd\ndf = pd.read_csv(\"/var/folders/cz/vyh7y1d11xg881lsxsshnc5c0000gn/T/tmpc_ozqkdv/GwQ6oJB4inflation.csv\")\nprint(df.head())" + "code": "import pandas as pd\ndf = pd.read_csv(\"/var/folders/cz/vyh7y1d11xg881lsxsshnc5c0000gn/T/tmp8d5c8spc/zOZSE5zcinflation.csv\")\nprint(df.head())" }, - "call_id": "204b3ad9-ff20-4fab-a055-13da99874d88", + "call_id": "c19a0d1e-6b44-408f-9839-819436425778", "tool_name": { "__enum__": "BuiltinTool", "value": "code_interpreter" @@ -2927,6 +3832,555 @@ ], "type": "generator" }, + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv, can you describe it?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\n# Load data\\ndf = pd.read_csv(\"\")\\n# Rows\\nprint(\"Number of rows and columns in the data:\", df.shape)\\n# Columns\\nprint(\"Columns of the data are:\", len(df.columns))\\n# Column names\\nprint(\"Columns of the data are:\", df.columns)\\n# Column dtypes\\nprint(\"Datatype of the columns are:\", df.dtypes)'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"completed\\n[stderr]\\nTraceback (most recent call last):\\n line 5, in \\n from bwrap.core import main\\nModuleNotFoundError: No module named 'bwrap.core'\\n[/stderr]\"), CompletionMessage(role='assistant', content='It seems that the file \"\" does not exist. \\n\\nTo describe the csv file, you need to provide the actual file path or the file itself. If the file is too large to be uploaded, you can provide a sample of the file or the code you used to create the file. \\n\\nHere is an example of how you can describe a csv file using pandas:\\n\\n```\\nimport pandas as pd\\n# Load data\\ndf = pd.read_csv(\\'inflation.csv\\')\\n# Print the first 5 rows of the data\\nprint(df.head())\\n# Print the last 5 rows of the data\\nprint(df.tail())\\n# Print the summary statistics of the data\\nprint(df.describe())\\n# Print the data types of each column\\nprint(df.dtypes)\\n```\\n\\nThis will give you an idea of what the csv file contains.', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Plot average yearly inflation as a time series', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': \"import pandas as pd\\nimport matplotlib.pyplot as plt\\n\\n# Load data\\ndf = pd.read_csv('inflation.csv')\\n\\n# Convert 'date' column to datetime\\ndf['date'] = pd.to_datetime(df['date'])\\n\\n# Group by year and calculate average inflation\\naverage_inflation = df.groupby(df['date'].dt.year)['inflation'].mean()\\n\\n# Plot the time series\\nplt.figure(figsize=(10,6))\\nplt.plot(average_inflation.index, average_inflation.values, marker='o')\\nplt.title('Average Yearly Inflation')\\nplt.xlabel('Year')\\nplt.ylabel('Average Inflation')\\nplt.grid(True)\\nplt.show()\"})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"completed\\n[stderr]\\nTraceback (most recent call last):\\n line 5, in \\n from bwrap.core import main\\nModuleNotFoundError: No module named 'bwrap.core'\\n[/stderr]\")])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "This", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " code will create a line plot of the", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " average yearly inflation over time. The x-axis", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " represents the year and the y-axis represents", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " the average inflation. The plot will also", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " include a title, labels", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " for the x and y axes, and a grid to make it", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " easier to read.\n\nPlease note that you need to replace '", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "inflation.csv' with the actual path", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " to your csv file. Also, this code assumes that the csv", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " file has a column named 'date' and", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " another column named 'inflation'. If your csv file has", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " different column names, you need to adjust the code accordingly.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv, can you describe it?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\n# Load data\\ndf = pd.read_csv(\"\")\\n# Rows\\nprint(\"Number of rows and columns in the data:\", df.shape)\\n# Columns\\nprint(\"Columns of the data are:\", len(df.columns))\\n# Column names\\nprint(\"Columns of the data are:\", df.columns)\\n# Column dtypes\\nprint(\"Datatype of the columns are:\", df.dtypes)'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"completed\\n[stderr]\\nTraceback (most recent call last):\\n line 5, in \\n from bwrap.core import main\\nModuleNotFoundError: No module named 'bwrap.core'\\n[/stderr]\"), CompletionMessage(role='assistant', content='It seems that the file \"\" does not exist. \\n\\nTo describe the csv file, you need to provide the actual file path or the file itself. If the file is too large to be uploaded, you can provide a sample of the file or the code you used to create the file. \\n\\nHere is an example of how you can describe a csv file using pandas:\\n\\n```\\nimport pandas as pd\\n# Load data\\ndf = pd.read_csv(\\'inflation.csv\\')\\n# Print the first 5 rows of the data\\nprint(df.head())\\n# Print the last 5 rows of the data\\nprint(df.tail())\\n# Print the summary statistics of the data\\nprint(df.describe())\\n# Print the data types of each column\\nprint(df.dtypes)\\n```\\n\\nThis will give you an idea of what the csv file contains.', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Plot average yearly inflation as a time series', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "started" + }, + "tool_call": "", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Load", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": " data\ndf = pd.read_csv('inflation.csv')\n\n#", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": " Convert 'date' column to datetime\ndf['", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "date'] = pd.to_datetime(df", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "['date'])\n\n# Group by year and", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": " calculate average inflation\naverage_inflation =", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": " df.groupby(df['date'].dt.year)['inflation'].mean", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "()\n\n# Plot the time series\nplt.figure(figsize=(10,", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "6))\nplt.plot(average_inflation.index, average_inflation", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": ".values, marker='o')\nplt.title('Average Yearly In", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "flation')\nplt.xlabel('Year')\nplt.ylabel('Average In", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "flation')\nplt.grid(True)\nplt.show()", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "succeeded" + }, + "tool_call": { + "arguments": { + "code": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Load data\ndf = pd.read_csv('inflation.csv')\n\n# Convert 'date' column to datetime\ndf['date'] = pd.to_datetime(df['date'])\n\n# Group by year and calculate average inflation\naverage_inflation = df.groupby(df['date'].dt.year)['inflation'].mean()\n\n# Plot the time series\nplt.figure(figsize=(10,6))\nplt.plot(average_inflation.index, average_inflation.values, marker='o')\nplt.title('Average Yearly Inflation')\nplt.xlabel('Year')\nplt.ylabel('Average Inflation')\nplt.grid(True)\nplt.show()" + }, + "call_id": "6b6c11d8-75d5-4b34-b97b-ee523c7a8168", + "tool_name": { + "__enum__": "BuiltinTool", + "value": "code_interpreter" + } + }, + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv, can you describe it?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\n# Load data\\ndf = pd.read_csv(\"\")\\n# Rows\\nprint(\"Number of rows and columns in the data:\", df.shape)\\n# Columns\\nprint(\"Columns of the data are:\", len(df.columns))\\n# Column names\\nprint(\"Columns of the data are:\", df.columns)\\n# Column dtypes\\nprint(\"Datatype of the columns are:\", df.dtypes)'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"completed\\n[stderr]\\nTraceback (most recent call last):\\n line 5, in \\n from bwrap.core import main\\nModuleNotFoundError: No module named 'bwrap.core'\\n[/stderr]\"), CompletionMessage(role='assistant', content='It seems that the file \"\" does not exist. \\n\\nTo describe the csv file, you need to provide the actual file path or the file itself. If you are running this code in a notebook, you can use the `upload` button to upload the file. If you are running this code in a script, you need to provide the file path.\\n\\nHere is an example of how you can describe the csv file if you have it in the same directory as your script:\\n\\n```python\\nimport pandas as pd\\n\\n# Load data\\ndf = pd.read_csv(\\'inflation.csv\\')\\n\\n# Print summary of the data\\nprint(df.head()) # Print the first few rows of the data\\nprint(df.info()) # Print information about the data\\nprint(df.describe()) # Print summary statistics about the data\\n```\\n\\nThis will print the first few rows of the data, information about the data, and summary statistics about the data.', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Plot average yearly inflation as a time series', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': \"import pandas as pd\\nimport matplotlib.pyplot as plt\\n\\n# Load data\\ndf = pd.read_csv('inflation.csv')\\n\\n# Convert date column to datetime\\ndf['date'] = pd.to_datetime(df['date'])\\n\\n# Group by year and calculate average inflation\\naverage_inflation = df.groupby(df['date'].dt.year)['inflation'].mean()\\n\\n# Plot time series\\nplt.figure(figsize=(10,6))\\nplt.plot(average_inflation.index, average_inflation.values, marker='o')\\nplt.title('Average Yearly Inflation')\\nplt.xlabel('Year')\\nplt.ylabel('Average Inflation')\\nplt.grid(True)\\nplt.show()\"})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"completed\\n[stderr]\\nTraceback (most recent call last):\\n line 5, in \\n from bwrap.core import main\\nModuleNotFoundError: No module named 'bwrap.core'\\n[/stderr]\")])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { "chunks": [ { @@ -4205,7 +5659,7 @@ ], "type": "generator" }, - "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv, can you describe it?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\n# Load data\\ndf = pd.read_csv(\"\")\\n# Rows\\nprint(\"Number of rows and columns in the data:\", df.shape)\\n# Columns\\nprint(\"Columns of the data are:\", len(df.columns))\\n# Column names\\nprint(\"Columns of the data are:\", df.columns)\\n# Column dtypes\\nprint(\"Datatype of the columns are:\", df.dtypes)'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"completed\\n[stderr]\\nTraceback (most recent call last):\\n line 5, in \\n from bwrap.core import main\\nModuleNotFoundError: No module named 'bwrap.core'\\n[/stderr]\"), CompletionMessage(role='assistant', content='It seems that the file \"\" does not exist. \\n\\nTo describe the csv file, you need to provide the actual file path or the file itself. If you are using a remote server, you can use the `requests` library to download the file and then load it into a pandas dataframe. \\n\\nHere is an example of how you can do it:\\n\\n```\\nimport pandas as pd\\nimport requests\\n\\n# Download the csv file\\nurl = \"https://example.com/your_file.csv\"\\nresponse = requests.get(url)\\n\\n# Load the csv file into a pandas dataframe\\ndf = pd.read_csv(response.content)\\n\\n# Print the description of the dataframe\\nprint(df.describe())\\n```\\n\\nPlease replace the `url` variable with the actual URL of your csv file. \\n\\nIf you are using a local file, you can simply use the `pd.read_csv()` function with the file path:\\n\\n```\\nimport pandas as pd\\n\\n# Load the csv file into a pandas dataframe\\ndf = pd.read_csv(\\'your_file.csv\\')\\n\\n# Print the description of the dataframe\\nprint(df.describe())\\n```\\n\\nPlease replace `\\'your_file.csv\\'` with the actual path to your csv file.', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Plot average yearly inflation as a time series', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\nimport matplotlib.pyplot as plt\\n\\n# Load data\\ndf = pd.read_csv(\"\")\\n\\n# Convert \\'Year\\' column to datetime\\ndf[\\'Year\\'] = pd.to_datetime(df[\\'Year\\'])\\n\\n# Group by year and calculate average inflation\\naverage_inflation = df.groupby(\\'Year\\')[\\'Inflation\\'].mean().reset_index()\\n\\n# Plot average yearly inflation as a time series\\nplt.figure(figsize=(10,6))\\nplt.plot(average_inflation[\\'Year\\'], average_inflation[\\'Inflation\\'], marker=\\'o\\')\\nplt.title(\\'Average Yearly Inflation\\')\\nplt.xlabel(\\'Year\\')\\nplt.ylabel(\\'Inflation Rate\\')\\nplt.grid(True)\\nplt.show()'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"completed\\n[stderr]\\nTraceback (most recent call last):\\n line 5, in \\n from bwrap.core import main\\nModuleNotFoundError: No module named 'bwrap.core'\\n[/stderr]\")])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv, can you describe it?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\n# Load data\\ndf = pd.read_csv(\"\")\\n# Rows\\nprint(\"Number of rows and columns in the data:\", df.shape)\\n# Columns\\nprint(\"Columns of the data are:\", len(df.columns))\\n# Column names\\nprint(\"Columns of the data are:\", df.columns)\\n# Column dtypes\\nprint(\"Datatype of the columns are:\", df.dtypes)'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"completed\\n[stderr]\\nTraceback (most recent call last):\\n line 5, in \\n from bwrap.core import main\\nModuleNotFoundError: No module named 'bwrap.core'\\n[/stderr]\"), CompletionMessage(role='assistant', content='It seems that the file \"\" does not exist. \\n\\nTo describe the csv file, you need to provide the actual file path or the file itself. If you are using a remote server or a local machine, you can use the `pd.read_csv()` function to load the csv file. \\n\\nHere is an example:\\n\\n```python\\nimport pandas as pd\\n# Load data\\ndf = pd.read_csv(\\'inflation.csv\\')\\n# Print the first 5 rows of the dataframe\\nprint(df.head())\\n# Print the summary of the dataframe\\nprint(df.info())\\nprint(df.describe())\\n```\\n\\nThis will print the first 5 rows of the dataframe, the summary of the dataframe (including the index dtype and column count), and the description of the dataframe (including count, mean, std, min, 25%, 50%, 75%, max for each column).', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Plot average yearly inflation as a time series', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': \"import pandas as pd\\nimport matplotlib.pyplot as plt\\n\\n# Load data\\ndf = pd.read_csv('inflation.csv')\\n\\n# Convert 'date' column to datetime\\ndf['date'] = pd.to_datetime(df['date'])\\n\\n# Group by year and calculate average inflation\\naverage_inflation = df.groupby(df['date'].dt.year)['inflation'].mean()\\n\\n# Plot the time series\\nplt.figure(figsize=(10,6))\\nplt.plot(average_inflation.index, average_inflation.values, marker='o')\\nplt.title('Average Yearly Inflation')\\nplt.xlabel('Year')\\nplt.ylabel('Average Inflation')\\nplt.grid(True)\\nplt.show()\"})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"completed\\n[stderr]\\nTraceback (most recent call last):\\n line 5, in \\n from bwrap.core import main\\nModuleNotFoundError: No module named 'bwrap.core'\\n[/stderr]\")])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { "chunks": [ { "event": { @@ -4225,7 +5679,7 @@ { "event": { "delta": { - "text": "It", + "text": "This", "type": "text" }, "event_type": { @@ -4240,7 +5694,7 @@ { "event": { "delta": { - "text": " seems that the file \"/var/f", + "text": " code will create a line plot of", "type": "text" }, "event_type": { @@ -4255,7 +5709,7 @@ { "event": { "delta": { - "text": "olders/cz/vyh7y", + "text": " the average yearly inflation over time. The x-axis", "type": "text" }, "event_type": { @@ -4270,7 +5724,7 @@ { "event": { "delta": { - "text": "1d11xg881lsx", + "text": " represents the year and the y-axis represents the average", "type": "text" }, "event_type": { @@ -4285,7 +5739,7 @@ { "event": { "delta": { - "text": "sshnc5c0000gn", + "text": " inflation. The plot also includes a title, labels for the x", "type": "text" }, "event_type": { @@ -4300,7 +5754,7 @@ { "event": { "delta": { - "text": "/T/tmpc_ozqkdv/EzGU", + "text": " and y axes, and a grid for", "type": "text" }, "event_type": { @@ -4315,7 +5769,7 @@ { "event": { "delta": { - "text": "QEnJinflation.csv\" does", + "text": " better visibility.\n\nPlease note that you need", "type": "text" }, "event_type": { @@ -4330,7 +5784,7 @@ { "event": { "delta": { - "text": " not exist. \n\nTo plot the average yearly inflation as a", + "text": " to replace 'inflation.csv' with the actual path to your", "type": "text" }, "event_type": { @@ -4345,7 +5799,7 @@ { "event": { "delta": { - "text": " time series, you need to provide the actual file path or", + "text": " csv file. Also, this code assumes that the 'date", "type": "text" }, "event_type": { @@ -4360,7 +5814,7 @@ { "event": { "delta": { - "text": " the file itself. If you are using a remote server,", + "text": "' column in your csv file is in a format that can be", "type": "text" }, "event_type": { @@ -4375,7 +5829,7 @@ { "event": { "delta": { - "text": " you can use the `requests` library to download the file", + "text": " parsed by pandas' `to_datetime` function. If your date", "type": "text" }, "event_type": { @@ -4390,7 +5844,7 @@ { "event": { "delta": { - "text": " and then load it into a pandas dataframe. \n\nHere", + "text": " column is in a different format, you may need to specify the", "type": "text" }, "event_type": { @@ -4405,502 +5859,7 @@ { "event": { "delta": { - "text": " is an example of how you can do it:\n\n```\nimport", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " pandas as pd\nimport matplotlib.pyplot as plt\nimport requests\n\n", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "# Download the csv file\nurl = \"https://example.com", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "/your_file.csv\"\nresponse = requests.get(url)\n\n# Load", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " the csv file into a pandas dataframe\ndf = pd.read_csv", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "(response.content)\n\n# Convert 'Year' column to datetime\ndf", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "['Year'] = pd.to_datetime(df['Year'])\n\n# Group", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " by year and calculate average inflation\naverage_inflation = df.groupby", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "('Year')['Inflation'].mean().reset_index()\n\n# Plot", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " average yearly inflation as a time series\n", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "plt.figure(figsize=(10,6))\nplt.plot(average_in", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "flation['Year'], average_inflation['Inflation'], marker='", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "o')\nplt.title('Average Yearly Inflation')\nplt.xlabel", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "('Year')\nplt.ylabel('Inflation Rate')\nplt.grid(True", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": ")\nplt.show()\n```\n\nPlease replace the", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " `url` variable with the actual URL of", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " your csv file. \n\nIf you", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " are using a local file, you can", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " simply use the `pd.read_csv()` function with the file", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " path:\n\n```\nimport pandas as pd\nimport matplotlib.pyplot as", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " plt\n\n# Load the csv file into a pandas dataframe\ndf", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " = pd.read_csv('your_file.csv')\n\n# Convert 'Year", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "' column to datetime\ndf['Year'] = pd.to_datetime", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "(df['Year'])\n\n# Group by", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " year and calculate average inflation\naverage_inflation = df.groupby('", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "Year')['Inflation'].mean().reset_index()\n\n# Plot average", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " yearly inflation as a time series\nplt.figure", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "(figsize=(10,6))\nplt.plot(average_inflation", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "['Year'], average_inflation['Inflation'], marker='o", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "')\nplt.title('Average Yearly Inflation')\nplt.xlabel('", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "Year')\nplt.ylabel('Inflation Rate')\nplt.grid(True)\n", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "plt.show()\n```\n\nPlease replace `'", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "your_file.csv'` with the actual", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " path to your csv file.", + "text": " format when calling `to_datetime`.", "type": "text" }, "event_type": { @@ -4933,7 +5892,7 @@ ], "type": "generator" }, - "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv, can you describe it?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\n# Load data\\ndf = pd.read_csv(\"\")\\n# Rows\\nprint(\"Number of rows and columns in the data:\", df.shape)\\n# Columns\\nprint(\"Columns of the data are:\", len(df.columns))\\n# Column names\\nprint(\"Columns of the data are:\", df.columns)\\n# Column dtypes\\nprint(\"Datatype of the columns are:\", df.dtypes)'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"completed\\n[stderr]\\nTraceback (most recent call last):\\n line 5, in \\n from bwrap.core import main\\nModuleNotFoundError: No module named 'bwrap.core'\\n[/stderr]\"), CompletionMessage(role='assistant', content='It seems that the file \"\" does not exist. \\n\\nTo describe the csv file, you need to provide the actual file path or the file itself. If you are using a remote server, you can use the `requests` library to download the file and then load it into a pandas dataframe. \\n\\nHere is an example of how you can do it:\\n\\n```\\nimport pandas as pd\\nimport requests\\n\\n# Download the csv file\\nurl = \"https://example.com/your_file.csv\"\\nresponse = requests.get(url)\\n\\n# Load the csv file into a pandas dataframe\\ndf = pd.read_csv(response.content)\\n\\n# Print the description of the dataframe\\nprint(df.describe())\\n```\\n\\nPlease replace the `url` variable with the actual URL of your csv file. \\n\\nIf you are using a local file, you can simply use the `pd.read_csv()` function with the file path:\\n\\n```\\nimport pandas as pd\\n\\n# Load the csv file into a pandas dataframe\\ndf = pd.read_csv(\\'your_file.csv\\')\\n\\n# Print the description of the dataframe\\nprint(df.describe())\\n```\\n\\nPlease replace `\\'your_file.csv\\'` with the actual path to your csv file.', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Plot average yearly inflation as a time series', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='Here is a csv, can you describe it?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name=, arguments={'code': 'import pandas as pd\\n# Load data\\ndf = pd.read_csv(\"\")\\n# Rows\\nprint(\"Number of rows and columns in the data:\", df.shape)\\n# Columns\\nprint(\"Columns of the data are:\", len(df.columns))\\n# Column names\\nprint(\"Columns of the data are:\", df.columns)\\n# Column dtypes\\nprint(\"Datatype of the columns are:\", df.dtypes)'})]), ToolResponseMessage(role='tool', call_id='', tool_name=, content=\"completed\\n[stderr]\\nTraceback (most recent call last):\\n line 5, in \\n from bwrap.core import main\\nModuleNotFoundError: No module named 'bwrap.core'\\n[/stderr]\"), CompletionMessage(role='assistant', content='It seems that the file \"\" does not exist. \\n\\nTo describe the csv file, you need to provide the actual file path or the file itself. If you are using a remote server or a local machine, you can use the `pd.read_csv()` function to load the csv file. \\n\\nHere is an example:\\n\\n```python\\nimport pandas as pd\\n# Load data\\ndf = pd.read_csv(\\'inflation.csv\\')\\n# Print the first 5 rows of the dataframe\\nprint(df.head())\\n# Print the summary of the dataframe\\nprint(df.info())\\nprint(df.describe())\\n```\\n\\nThis will print the first 5 rows of the dataframe, the summary of the dataframe (including the index dtype and column count), and the description of the dataframe (including count, mean, std, min, 25%, 50%, 75%, max for each column).', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Plot average yearly inflation as a time series', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { "chunks": [ { "event": { @@ -4976,7 +5935,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Load", + "tool_call": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Load data", "type": "tool_call" }, "event_type": { @@ -4995,7 +5954,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " data\ndf = pd.read_csv(\"/var/folders/cz", + "tool_call": "\ndf = pd.read_csv('inflation.csv')\n\n#", "type": "tool_call" }, "event_type": { @@ -5014,7 +5973,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "/vyh7y1d11x", + "tool_call": " Convert 'date' column to datetime\ndf['date']", "type": "tool_call" }, "event_type": { @@ -5033,7 +5992,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "g881lsxsshnc5c0000gn/T/tmpc", + "tool_call": " = pd.to_datetime(df['date'])\n\n# Group by", "type": "tool_call" }, "event_type": { @@ -5052,7 +6011,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "_ozqkdv/EzGUQEnJinflation", + "tool_call": " year and calculate average inflation\naverage_in", "type": "tool_call" }, "event_type": { @@ -5071,7 +6030,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": ".csv\")\n\n# Convert 'Year' column", + "tool_call": "flation = df.groupby(df['date'].dt.year", "type": "tool_call" }, "event_type": { @@ -5090,7 +6049,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " to datetime\ndf['Year'] = pd.to_datetime(df['", + "tool_call": ")['inflation'].mean()\n\n# Plot the time series", "type": "tool_call" }, "event_type": { @@ -5109,7 +6068,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "Year'])\n\n# Group by year and calculate average inflation\naverage_in", + "tool_call": "\nplt.figure(figsize=(10,6))\nplt.plot(average_in", "type": "tool_call" }, "event_type": { @@ -5128,7 +6087,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "flation = df.groupby('Year')['Inflation'].mean().reset", + "tool_call": "flation.index, average_inflation.values, marker", "type": "tool_call" }, "event_type": { @@ -5147,7 +6106,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "_index()\n\n# Plot average yearly inflation as a time series\nplt", + "tool_call": "='o')\nplt.title('Average Yearly Inflation')\n", "type": "tool_call" }, "event_type": { @@ -5166,7 +6125,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": ".figure(figsize=(10,6))\nplt", + "tool_call": "plt.xlabel('Year')\nplt.ylabel('Average", "type": "tool_call" }, "event_type": { @@ -5185,64 +6144,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": ".plot(average_inflation['Year'], average_inflation['In", - "type": "tool_call" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "value": "in_progress" - }, - "tool_call": "flation'], marker='o')\nplt.title('Average Yearly Inflation')\n", - "type": "tool_call" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "value": "in_progress" - }, - "tool_call": "plt.xlabel('Year')\nplt.ylabel('Inflation Rate')\nplt.grid(True", - "type": "tool_call" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "value": "in_progress" - }, - "tool_call": ")\nplt.show()", + "tool_call": " Inflation')\nplt.grid(True)\nplt.show()", "type": "tool_call" }, "event_type": { @@ -5263,9 +6165,9 @@ }, "tool_call": { "arguments": { - "code": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Load data\ndf = pd.read_csv(\"/var/folders/cz/vyh7y1d11xg881lsxsshnc5c0000gn/T/tmpc_ozqkdv/EzGUQEnJinflation.csv\")\n\n# Convert 'Year' column to datetime\ndf['Year'] = pd.to_datetime(df['Year'])\n\n# Group by year and calculate average inflation\naverage_inflation = df.groupby('Year')['Inflation'].mean().reset_index()\n\n# Plot average yearly inflation as a time series\nplt.figure(figsize=(10,6))\nplt.plot(average_inflation['Year'], average_inflation['Inflation'], marker='o')\nplt.title('Average Yearly Inflation')\nplt.xlabel('Year')\nplt.ylabel('Inflation Rate')\nplt.grid(True)\nplt.show()" + "code": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Load data\ndf = pd.read_csv('inflation.csv')\n\n# Convert 'date' column to datetime\ndf['date'] = pd.to_datetime(df['date'])\n\n# Group by year and calculate average inflation\naverage_inflation = df.groupby(df['date'].dt.year)['inflation'].mean()\n\n# Plot the time series\nplt.figure(figsize=(10,6))\nplt.plot(average_inflation.index, average_inflation.values, marker='o')\nplt.title('Average Yearly Inflation')\nplt.xlabel('Year')\nplt.ylabel('Average Inflation')\nplt.grid(True)\nplt.show()" }, - "call_id": "7e62f796-c5cd-4021-a651-b0048b75a083", + "call_id": "65691869-f741-420c-bb73-23a1f8c0d82a", "tool_name": { "__enum__": "BuiltinTool", "value": "code_interpreter" @@ -5356,7 +6258,7 @@ { "event": { "delta": { - "text": "olders/cz/vyh7y1d11x", + "text": "olders/cz/vyh7y1d11", "type": "text" }, "event_type": { @@ -5371,7 +6273,7 @@ { "event": { "delta": { - "text": "g881lsxsshnc5c000", + "text": "xg881lsxsshnc5c0000gn/T/tmp8", "type": "text" }, "event_type": { @@ -5386,7 +6288,7 @@ { "event": { "delta": { - "text": "0gn/T/tmpc", + "text": "d5c8spc/Q8Y9qzV", "type": "text" }, "event_type": { @@ -5401,7 +6303,7 @@ { "event": { "delta": { - "text": "_ozqkdv/EzGUQEnJinflation", + "text": "Xinflation.csv\" does not exist", "type": "text" }, "event_type": { @@ -5416,7 +6318,7 @@ { "event": { "delta": { - "text": ".csv\" does not exist. \n\nTo", + "text": ". \n\nTo describe the csv file, you need to provide", "type": "text" }, "event_type": { @@ -5431,7 +6333,7 @@ { "event": { "delta": { - "text": " describe the csv file, you need to provide the actual file", + "text": " the actual file path or the file itself", "type": "text" }, "event_type": { @@ -5446,7 +6348,7 @@ { "event": { "delta": { - "text": " path or the file itself. If you", + "text": ". If you are using a remote server or a local machine,", "type": "text" }, "event_type": { @@ -5461,7 +6363,7 @@ { "event": { "delta": { - "text": " are using a remote server, you can use the `requests` library", + "text": " you can use the `pd.read_csv()` function to load the", "type": "text" }, "event_type": { @@ -5476,7 +6378,7 @@ { "event": { "delta": { - "text": " to download the file and then load it into a pandas dataframe. \n\nHere", + "text": " csv file. \n\nHere is an example:\n\n```python\nimport", "type": "text" }, "event_type": { @@ -5491,7 +6393,7 @@ { "event": { "delta": { - "text": " is an example of how you can do it:\n\n```\nimport pandas as", + "text": " pandas as pd\n# Load data\ndf", "type": "text" }, "event_type": { @@ -5506,7 +6408,7 @@ { "event": { "delta": { - "text": " pd\nimport requests\n\n# Download the csv file\nurl = \"https", + "text": " = pd.read_csv('inflation.csv", "type": "text" }, "event_type": { @@ -5521,7 +6423,7 @@ { "event": { "delta": { - "text": "://example.com/your_file.csv\"\nresponse = requests.get(url)\n\n#", + "text": "')\n# Print the first 5 rows of the dataframe\nprint", "type": "text" }, "event_type": { @@ -5536,7 +6438,7 @@ { "event": { "delta": { - "text": " Load the csv file into a pandas dataframe\ndf", + "text": "(df.head())\n# Print the summary of the dataframe\nprint(df", "type": "text" }, "event_type": { @@ -5551,7 +6453,7 @@ { "event": { "delta": { - "text": " = pd.read_csv(response.content)\n\n# Print", + "text": ".info())\nprint(df.describe())\n```\n\nThis will print the first", "type": "text" }, "event_type": { @@ -5566,7 +6468,7 @@ { "event": { "delta": { - "text": " the description of the dataframe\nprint", + "text": " 5 rows of the dataframe,", "type": "text" }, "event_type": { @@ -5581,7 +6483,7 @@ { "event": { "delta": { - "text": "(df.describe())\n```\n\nPlease replace the `url`", + "text": " the summary of the dataframe (including the", "type": "text" }, "event_type": { @@ -5596,7 +6498,7 @@ { "event": { "delta": { - "text": " variable with the actual URL of your csv file. \n\nIf", + "text": " index dtype and column count), and the description of the dataframe", "type": "text" }, "event_type": { @@ -5611,7 +6513,7 @@ { "event": { "delta": { - "text": " you are using a", + "text": " (including count, mean, std,", "type": "text" }, "event_type": { @@ -5626,7 +6528,7 @@ { "event": { "delta": { - "text": " local file, you can simply use the `pd.read_csv", + "text": " min, 25%, 50%, 75%, max", "type": "text" }, "event_type": { @@ -5641,112 +6543,7 @@ { "event": { "delta": { - "text": "()` function with the file path:\n\n```\nimport pandas as", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " pd\n\n#", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " Load the csv file into a pandas", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " dataframe\ndf = pd.read_csv('your", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "_file.csv')\n\n# Print the description of", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " the dataframe\nprint(df.describe())\n``", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "`\n\nPlease replace `'your_file.csv'` with the actual path", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " to your csv file.", + "text": " for each column).", "type": "text" }, "event_type": { @@ -5822,7 +6619,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "import pandas as pd\n# Load data\ndf = pd", + "tool_call": "import pandas as pd\n# Load data", "type": "tool_call" }, "event_type": { @@ -5841,7 +6638,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": ".read_csv(\"/var", + "tool_call": "\ndf =", "type": "tool_call" }, "event_type": { @@ -5860,7 +6657,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "/folders/cz/vyh7y1d11xg881", + "tool_call": " pd.read_csv(\"/var/folders/cz/vyh7", "type": "tool_call" }, "event_type": { @@ -5879,7 +6676,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "lsxsshnc5c0000gn/T/tmpc_oz", + "tool_call": "y1d11xg881lsx", "type": "tool_call" }, "event_type": { @@ -5898,7 +6695,45 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "qkdv/EzGUQEnJinflation.csv\")\n", + "tool_call": "sshnc5c0000gn/T", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "/tmp8d5c8spc", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "/Q8Y9qzVXinflation.csv\")\n", "type": "tool_call" }, "event_type": { @@ -5955,7 +6790,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " are:\", len(df.columns))\n# Column names\n", + "tool_call": " are:\", len(df.columns))\n# Column names\nprint", "type": "tool_call" }, "event_type": { @@ -5974,7 +6809,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "print(\"Columns of the data are:\", df.columns)\n", + "tool_call": "(\"Columns of the data are:\", df.columns)\n", "type": "tool_call" }, "event_type": { @@ -5993,7 +6828,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "# Column dtypes\nprint(\"Datatype of", + "tool_call": "# Column dtypes\nprint(\"Datatype of the columns are", "type": "tool_call" }, "event_type": { @@ -6012,7 +6847,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " the columns are:\", df.dtypes)", + "tool_call": ":\", df.dtypes)", "type": "tool_call" }, "event_type": { @@ -6033,9 +6868,9 @@ }, "tool_call": { "arguments": { - "code": "import pandas as pd\n# Load data\ndf = pd.read_csv(\"/var/folders/cz/vyh7y1d11xg881lsxsshnc5c0000gn/T/tmpc_ozqkdv/EzGUQEnJinflation.csv\")\n# Rows\nprint(\"Number of rows and columns in the data:\", df.shape)\n# Columns\nprint(\"Columns of the data are:\", len(df.columns))\n# Column names\nprint(\"Columns of the data are:\", df.columns)\n# Column dtypes\nprint(\"Datatype of the columns are:\", df.dtypes)" + "code": "import pandas as pd\n# Load data\ndf = pd.read_csv(\"/var/folders/cz/vyh7y1d11xg881lsxsshnc5c0000gn/T/tmp8d5c8spc/Q8Y9qzVXinflation.csv\")\n# Rows\nprint(\"Number of rows and columns in the data:\", df.shape)\n# Columns\nprint(\"Columns of the data are:\", len(df.columns))\n# Column names\nprint(\"Columns of the data are:\", df.columns)\n# Column dtypes\nprint(\"Datatype of the columns are:\", df.dtypes)" }, - "call_id": "e57ec9d1-68d8-4493-b3d3-0fb683a4663a", + "call_id": "15893b4c-5a55-4ea7-9902-8a2f28fa3659", "tool_name": { "__enum__": "BuiltinTool", "value": "code_interpreter" @@ -6076,7 +6911,7 @@ ], "type": "generator" }, - "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:71183\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:84988\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')]), CompletionMessage(role='assistant', content='You can ask your question now. I will help you answer it using the knowledge_search tool results.', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Tell me how to use LoRA', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'How to use LoRA'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text=\"Result 1:\\nDocument_id:98cad\\nContent: .. _lora_finetune_label:\\n\\n============================\\nFine-Tuning Llama2 with LoRA\\n============================\\n\\nThis guide will teach you about `LoRA `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 3:\\nDocument_id:84988\\nContent: with training with LoRA quickly,\\njust specify any config with ``_lora`` in its name, e.g:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\\n\\n\\nThere are two sets of parameters to customize LoRA to suit your needs. Firstly, the parameters which control\\nwhich linear layers LoRA should be applied to in the model:\\n\\n* ``lora_attn_modules: List[str]`` accepts a list of strings specifying which layers of the model to apply\\n LoRA to:\\n\\n * ``q_proj`` applies LoRA to the query projection layer.\\n * ``k_proj`` applies LoRA to the key projection layer.\\n * ``v_proj`` applies LoRA to the value projection layer.\\n * ``output_proj`` applies LoRA to the attention output projection layer.\\n\\n Whilst adding more layers to be fine-tuned may improve model accuracy,\\n this will come at the cost of increased memory usage and reduced training speed.\\n\\n* ``apply_lora_to_mlp: Bool`` applies LoRA to the MLP in each transformer layer.\\n* ``apply_lora_to_output: Bool`` applies LoRA to the model\\'s final output projection.\\n This is usually a projection to vocabulary space (e.g. in language models), but\\n other modelling tasks may have different projections - classifier models will project\\n to the number of classes, for example\\n\\n.. note::\\n\\n Models which use tied embeddings (such as Gemma and Qwen2 1.5B and 0.5B) for the\\n final output projection do not support ``apply_lora_to_output``.\\n\\nThese are all specified under the ``model`` flag or config entry, i.e:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\",\"output_proj\"]\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.llama3.lora_llama3_8b\\n apply_lora_to_mlp: True\\n model.lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\",\"output_proj\"]\\n\\nSecondly, parameters which control the scale of the impact of LoRA on the model:\\n\\n* ``lora_rank: int`` affects the scale of\\n'), TextContentItem(type='text', text='Result 4:\\nDocument_id:98cad\\nContent: LoRA to Llama2 models\\n------------------------------\\n\\nWith torchtune, we can easily apply LoRA to Llama2 with a variety of different configurations.\\nLet\\'s take a look at how to construct Llama2 models in torchtune with and without LoRA.\\n\\n.. code-block:: python\\n\\n from torchtune.models.llama2 import llama2_7b, lora_llama2_7b\\n\\n # Build Llama2 without any LoRA layers\\n base_model = llama2_7b()\\n\\n # The default settings for lora_llama2_7b will match those for llama2_7b\\n # We just need to define which layers we want LoRA applied to.\\n # Within each self-attention, we can choose from [\"q_proj\", \"k_proj\", \"v_proj\", and \"output_proj\"].\\n # We can also set apply_lora_to_mlp=True or apply_lora_to_output=True to apply LoRA to other linear\\n # layers outside of the self-attention.\\n lora_model = lora_llama2_7b(lora_attn_modules=[\"q_proj\", \"v_proj\"])\\n\\n.. note::\\n\\n Calling :func:`lora_llama_2_7b ` alone will not handle the definition of which parameters are trainable.\\n See :ref:`below` for how to do this.\\n\\nLet\\'s inspect each of these models a bit more closely.\\n\\n.. code-block:: bash\\n\\n # Print the first layer\\'s self-attention in the usual Llama2 model\\n >>> print(base_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (output_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (pos_embeddings): RotaryPositionalEmbeddings()\\n )\\n\\n # Print the same for Llama2 with LoRA weights\\n >>> print(lora_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): LoRALinear(\\n (dropout): Dropout(p=0.0, inplace=False)\\n \\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:9c730\\nContent: ora_finetune_label>`.\\nFor more on QLoRA in torchtune, see our :ref:`QLoRA Tutorial `.\\n\\nLet\\'s take a look at how we can fine-tune Llama3-8B-Instruct with LoRA on a single device using torchtune. In this example, we will fine-tune\\nfor one epoch on a common instruct dataset for illustrative purposes. The basic command for a single-device LoRA fine-tune is\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\\n\\n.. note::\\n To see a full list of recipes and their corresponding configs, simply run ``tune ls`` from the command line.\\n\\nWe can also add :ref:`command-line overrides ` as needed, e.g.\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n checkpointer.checkpoint_dir= \\\\\\n tokenizer.path=/tokenizer.model \\\\\\n checkpointer.output_dir=\\n\\nThis will load the Llama3-8B-Instruct checkpoint and tokenizer from ```` used in the :ref:`tune download ` command above,\\nthen save a final checkpoint in the same directory following the original format. For more details on the\\ncheckpoint formats supported in torchtune, see our :ref:`checkpointing deep-dive `.\\n\\n.. note::\\n To see the full set of configurable parameters for this (and other) configs we can use :ref:`tune cp ` to copy (and modify)\\n the default config. :ref:`tune cp ` can be used with recipe scripts too, in case you want to make more custom changes\\n that cannot be achieved by directly modifying existing configurable parameters. For more on :ref:`tune cp ` see the section on\\n :ref:`modifying configs ` in our \":ref:`finetune_llama_label`\" tutorial.\\n\\nOnce training is complete, the model checkpoints will be saved and their locations will be logged. For\\nLoRA fine-tuning, the final checkpoint will contain the merged weights, and a copy of just the (much smaller) LoRA weights\\nwill\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:255c3\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:3b16c\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')]), CompletionMessage(role='assistant', content=\"I'm ready to help you answer questions about Torchtune based on the documentation you provided. What's your first question?\", stop_reason=, tool_calls=[]), UserMessage(role='user', content='Tell me how to use LoRA', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'How to use LoRA in Torchtune'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text=\"Result 1:\\nDocument_id:14b97\\nContent: .. _lora_finetune_label:\\n\\n============================\\nFine-Tuning Llama2 with LoRA\\n============================\\n\\nThis guide will teach you about `LoRA `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW ` alone will not handle the definition of which parameters are trainable.\\n See :ref:`below` for how to do this.\\n\\nLet\\'s inspect each of these models a bit more closely.\\n\\n.. code-block:: bash\\n\\n # Print the first layer\\'s self-attention in the usual Llama2 model\\n >>> print(base_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (output_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (pos_embeddings): RotaryPositionalEmbeddings()\\n )\\n\\n # Print the same for Llama2 with LoRA weights\\n >>> print(lora_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): LoRALinear(\\n (dropout): Dropout(p=0.0, inplace=False)\\n \\n'), TextContentItem(type='text', text='Result 3:\\nDocument_id:14b97\\nContent: 06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune\\'s `LoRA recipe `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 4:\\nDocument_id:14b97\\nContent: from our Llama2\\nmodel without any wrappers or custom checkpoint conversion logic.\\n\\n.. code-block:: python\\n\\n # Assuming that base_model already has the pretrained Llama2 weights,\\n # this will directly load them into your LoRA model without any conversion necessary.\\n lora_model.load_state_dict(base_model.state_dict(), strict=False)\\n\\n.. note::\\n Whenever loading weights with :code:`strict=False`, you should verify that any missing or extra keys in\\n the loaded :code:`state_dict` are as expected. torchtune\\'s LoRA recipes do this by default via\\n :func:`validate_missing_and_unexpected_for_lora() `.\\n\\nOnce we\\'ve loaded the base model weights, we also want to set only LoRA parameters to trainable.\\n\\n.. _setting_trainable_params:\\n\\n.. code-block:: python\\n\\n from torchtune.modules.peft.peft_utils import get_adapter_params, set_trainable_params\\n\\n # Fetch all params from the model that are associated with LoRA.\\n lora_params = get_adapter_params(lora_model)\\n\\n # Set requires_grad=True on lora_params, and requires_grad=False on all others.\\n set_trainable_params(lora_model, lora_params)\\n\\n # Print the total number of parameters\\n total_params = sum([p.numel() for p in lora_model.parameters()])\\n trainable_params = sum([p.numel() for p in lora_model.parameters() if p.requires_grad])\\n print(\\n f\"\"\"\\n {total_params} total params,\\n {trainable_params}\" trainable params,\\n {(100.0 * trainable_params / total_params):.2f}% of all params are trainable.\\n \"\"\"\\n )\\n\\n 6742609920 total params,\\n 4194304 trainable params,\\n 0.06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune\\'s `LoRA recipe , tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { "chunks": [ { "event": { @@ -6111,7 +6946,7 @@ { "event": { "delta": { - "text": " use LoRA, you can follow these steps", + "text": " use LoRA in Torchtune, you can follow these", "type": "text" }, "event_type": { @@ -6126,7 +6961,7 @@ { "event": { "delta": { - "text": ":\n\n1. Install the necessary packages", + "text": " steps:\n\n1. Install Torchtune and its dependencies", "type": "text" }, "event_type": { @@ -6141,7 +6976,7 @@ { "event": { "delta": { - "text": ", including torchtune and the Llama2 model.\n", + "text": ".\n2. Download the Llama", "type": "text" }, "event_type": { @@ -6156,7 +6991,7 @@ { "event": { "delta": { - "text": "2. Load the Llama2 model and specify which", + "text": "2 weights and tokenizer.\n3. Use the `l", "type": "text" }, "event_type": { @@ -6171,7 +7006,7 @@ { "event": { "delta": { - "text": " layers to apply LoRA to.\n3.", + "text": "ora_llama2_7b` model in Torchtune", "type": "text" }, "event_type": { @@ -6186,7 +7021,7 @@ { "event": { "delta": { - "text": " Define the LoRA parameters, such as the rank and", + "text": ", which applies LoRA to the", "type": "text" }, "event_type": { @@ -6201,7 +7036,7 @@ { "event": { "delta": { - "text": " alpha values.\n4. Train the model using", + "text": " Q and V projections by default.\n4.", "type": "text" }, "event_type": { @@ -6216,7 +7051,7 @@ { "event": { "delta": { - "text": " the LoRA fine-tuning recipe in torchtune", + "text": " Set the `lora_attn_modules` argument to", "type": "text" }, "event_type": { @@ -6231,7 +7066,7 @@ { "event": { "delta": { - "text": ".\n5. Use the trained model for inference or further fine", + "text": " apply LoRA to all linear", "type": "text" }, "event_type": { @@ -6246,7 +7081,7 @@ { "event": { "delta": { - "text": "-tuning.\n\nHere is an example of how to apply Lo", + "text": " layers in the self-attention.\n", "type": "text" }, "event_type": { @@ -6261,7 +7096,7 @@ { "event": { "delta": { - "text": "RA to Llama2-7B:\n\n", + "text": "5. Increase the rank and", "type": "text" }, "event_type": { @@ -6276,7 +7111,7 @@ { "event": { "delta": { - "text": "```python\nfrom torchtune.models.llama2 import", + "text": " alpha values to experiment with different LoRA", "type": "text" }, "event_type": { @@ -6291,7 +7126,7 @@ { "event": { "delta": { - "text": " llama2_7b, lora_llama2", + "text": " configurations.\n6. Run the LoRA finetuning", "type": "text" }, "event_type": { @@ -6306,7 +7141,7 @@ { "event": { "delta": { - "text": "_7b\n\n# Build Llama2 without any Lo", + "text": " recipe in Torchtune using the `lora_finet", "type": "text" }, "event_type": { @@ -6321,7 +7156,7 @@ { "event": { "delta": { - "text": "RA layers\nbase_model = llama2_7b()\n\n", + "text": "une_distributed` command.\n7.", "type": "text" }, "event_type": { @@ -6336,7 +7171,7 @@ { "event": { "delta": { - "text": "# The default settings for lora_llama", + "text": " Monitor the loss curves and adjust the Lo", "type": "text" }, "event_type": { @@ -6351,7 +7186,7 @@ { "event": { "delta": { - "text": "2_7b will match those for", + "text": "RA configuration as needed to trade off memory and model performance.\n\n", "type": "text" }, "event_type": { @@ -6366,7 +7201,7 @@ { "event": { "delta": { - "text": " llama2_7b\n# We just need to define", + "text": "By following these steps, you can effectively use LoRA in", "type": "text" }, "event_type": { @@ -6381,7 +7216,7 @@ { "event": { "delta": { - "text": " which layers we want LoRA applied to.\n# Within each", + "text": " Torchtune to fine-tune Llama", "type": "text" }, "event_type": { @@ -6396,292 +7231,7 @@ { "event": { "delta": { - "text": " self-attention, we can choose from [\"q_proj\",", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " \"k_proj\", \"v_proj\", and \"output_proj\"]", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": ".\n# We can also set apply_lora_to_mlp=True", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " or apply_lora_to_output=True to apply LoRA to other", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " linear\n# layers outside of the self-", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "attention.\nlora_model = lora_llama2_7", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "b(lora_attn_modules=[\"q_proj\", \"v_proj\"])\n", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "```\n\nYou can also customize the LoRA parameters", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " by specifying the rank and alpha values:\n\n```python", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "\nlora_model = lora_llama2_7b", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "(lora_attn_modules=[\"q_proj\", \"v_proj\"],", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " lora_rank=8, lora_alpha=16)\n``", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "`\n\nTo train the model using the LoRA", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " fine-tuning recipe in torchtune, you can use", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " the following command:\n\n```bash\ntune run lora_f", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "inetune_single_device --config llama3/8B_l", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "ora_single_device\n```\n\nThis will", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " load the Llama3-8B-Instruct checkpoint and", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " tokenizer from the specified directory, then save a final checkpoint in the", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " same directory following the original format.", + "text": "2 models with a low memory footprint.", "type": "text" }, "event_type": { @@ -6714,854 +7264,7 @@ ], "type": "generator" }, - "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:71183\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:84988\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')]), CompletionMessage(role='assistant', content='You can ask your question now. I will help you answer it using the knowledge_search tool results.', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Tell me how to use LoRA', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { - "chunks": [ - { - "event": { - "delta": { - "text": "", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "start" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "{\"", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "type\": \"function\", \"name\": \"knowledge_search", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "\", \"parameters\": {\"query\": \"How to use Lo", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "RA\"}}", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "value": "succeeded" - }, - "tool_call": { - "arguments": { - "query": "How to use LoRA" - }, - "call_id": "ee82ce77-7143-4b2f-8eb8-de5f31517b84", - "tool_name": "knowledge_search" - }, - "type": "tool_call" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": { - "__enum__": "StopReason", - "value": "end_of_turn" - } - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "complete" - }, - "logprobs": null, - "stop_reason": { - "__enum__": "StopReason", - "value": "end_of_turn" - } - }, - "metrics": null - } - ], - "type": "generator" - }, - "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:71183\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:84988\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { - "chunks": [ - { - "event": { - "delta": { - "text": "", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "start" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "You", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " can ask your question now. I will help you answer it using", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " the knowledge_search tool results.", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "complete" - }, - "logprobs": null, - "stop_reason": { - "__enum__": "StopReason", - "value": "end_of_turn" - } - }, - "metrics": null - } - ], - "type": "generator" - }, - "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:7bdfa\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:0c95c\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')]), CompletionMessage(role='assistant', content='You can use the following function call to answer the user\\'s question:\\n\\n{\"type\": \"function\", \"name\": \"knowledge_search\", \"parameters\": {\"query\": \"How to fine-tune a Llama2 model with LoRA in torchtune\"}}', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Tell me how to use LoRA', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'How to use LoRA'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text=\"Result 1:\\nDocument_id:64211\\nContent: .. _lora_finetune_label:\\n\\n============================\\nFine-Tuning Llama2 with LoRA\\n============================\\n\\nThis guide will teach you about `LoRA `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 3:\\nDocument_id:0c95c\\nContent: with training with LoRA quickly,\\njust specify any config with ``_lora`` in its name, e.g:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\\n\\n\\nThere are two sets of parameters to customize LoRA to suit your needs. Firstly, the parameters which control\\nwhich linear layers LoRA should be applied to in the model:\\n\\n* ``lora_attn_modules: List[str]`` accepts a list of strings specifying which layers of the model to apply\\n LoRA to:\\n\\n * ``q_proj`` applies LoRA to the query projection layer.\\n * ``k_proj`` applies LoRA to the key projection layer.\\n * ``v_proj`` applies LoRA to the value projection layer.\\n * ``output_proj`` applies LoRA to the attention output projection layer.\\n\\n Whilst adding more layers to be fine-tuned may improve model accuracy,\\n this will come at the cost of increased memory usage and reduced training speed.\\n\\n* ``apply_lora_to_mlp: Bool`` applies LoRA to the MLP in each transformer layer.\\n* ``apply_lora_to_output: Bool`` applies LoRA to the model\\'s final output projection.\\n This is usually a projection to vocabulary space (e.g. in language models), but\\n other modelling tasks may have different projections - classifier models will project\\n to the number of classes, for example\\n\\n.. note::\\n\\n Models which use tied embeddings (such as Gemma and Qwen2 1.5B and 0.5B) for the\\n final output projection do not support ``apply_lora_to_output``.\\n\\nThese are all specified under the ``model`` flag or config entry, i.e:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\",\"output_proj\"]\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.llama3.lora_llama3_8b\\n apply_lora_to_mlp: True\\n model.lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\",\"output_proj\"]\\n\\nSecondly, parameters which control the scale of the impact of LoRA on the model:\\n\\n* ``lora_rank: int`` affects the scale of\\n'), TextContentItem(type='text', text='Result 4:\\nDocument_id:64211\\nContent: LoRA to Llama2 models\\n------------------------------\\n\\nWith torchtune, we can easily apply LoRA to Llama2 with a variety of different configurations.\\nLet\\'s take a look at how to construct Llama2 models in torchtune with and without LoRA.\\n\\n.. code-block:: python\\n\\n from torchtune.models.llama2 import llama2_7b, lora_llama2_7b\\n\\n # Build Llama2 without any LoRA layers\\n base_model = llama2_7b()\\n\\n # The default settings for lora_llama2_7b will match those for llama2_7b\\n # We just need to define which layers we want LoRA applied to.\\n # Within each self-attention, we can choose from [\"q_proj\", \"k_proj\", \"v_proj\", and \"output_proj\"].\\n # We can also set apply_lora_to_mlp=True or apply_lora_to_output=True to apply LoRA to other linear\\n # layers outside of the self-attention.\\n lora_model = lora_llama2_7b(lora_attn_modules=[\"q_proj\", \"v_proj\"])\\n\\n.. note::\\n\\n Calling :func:`lora_llama_2_7b ` alone will not handle the definition of which parameters are trainable.\\n See :ref:`below` for how to do this.\\n\\nLet\\'s inspect each of these models a bit more closely.\\n\\n.. code-block:: bash\\n\\n # Print the first layer\\'s self-attention in the usual Llama2 model\\n >>> print(base_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (output_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (pos_embeddings): RotaryPositionalEmbeddings()\\n )\\n\\n # Print the same for Llama2 with LoRA weights\\n >>> print(lora_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): LoRALinear(\\n (dropout): Dropout(p=0.0, inplace=False)\\n \\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:1d70c\\nContent: ora_finetune_label>`.\\nFor more on QLoRA in torchtune, see our :ref:`QLoRA Tutorial `.\\n\\nLet\\'s take a look at how we can fine-tune Llama3-8B-Instruct with LoRA on a single device using torchtune. In this example, we will fine-tune\\nfor one epoch on a common instruct dataset for illustrative purposes. The basic command for a single-device LoRA fine-tune is\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\\n\\n.. note::\\n To see a full list of recipes and their corresponding configs, simply run ``tune ls`` from the command line.\\n\\nWe can also add :ref:`command-line overrides ` as needed, e.g.\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n checkpointer.checkpoint_dir= \\\\\\n tokenizer.path=/tokenizer.model \\\\\\n checkpointer.output_dir=\\n\\nThis will load the Llama3-8B-Instruct checkpoint and tokenizer from ```` used in the :ref:`tune download ` command above,\\nthen save a final checkpoint in the same directory following the original format. For more details on the\\ncheckpoint formats supported in torchtune, see our :ref:`checkpointing deep-dive `.\\n\\n.. note::\\n To see the full set of configurable parameters for this (and other) configs we can use :ref:`tune cp ` to copy (and modify)\\n the default config. :ref:`tune cp ` can be used with recipe scripts too, in case you want to make more custom changes\\n that cannot be achieved by directly modifying existing configurable parameters. For more on :ref:`tune cp ` see the section on\\n :ref:`modifying configs ` in our \":ref:`finetune_llama_label`\" tutorial.\\n\\nOnce training is complete, the model checkpoints will be saved and their locations will be logged. For\\nLoRA fine-tuning, the final checkpoint will contain the merged weights, and a copy of just the (much smaller) LoRA weights\\nwill\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { - "chunks": [ - { - "event": { - "delta": { - "text": "", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "start" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "To", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " use LoRA, you can follow these steps:\n\n", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "1. Install the necessary packages", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": ", including torchtune and the Llama", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "2 model.\n2. Load the", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " Llama2 model and specify which layers", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " to apply LoRA to.\n3. Define the", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " LoRA parameters, such as the rank", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " and alpha values.\n4. Train the model using", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " the LoRA fine-tuning recipe in torchtune.\n\n", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "Here is an example of how to use Lo", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "RA with the Llama2 model:\n\n```python\nfrom", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " torchtune.models.llama2 import", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " llama2_7b, lora_llama2_7", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "b\n\n# Build Llama2 without any LoRA layers\n", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "base_model = llama2_7b()\n\n# The default settings", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " for lora_llama2_7b will match those", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " for llama2_7b\n#", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " We just need to define which layers we", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " want LoRA applied to.\n# Within each self-attention", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": ", we can choose from [\"q_proj\", \"k_proj", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "\", \"v_proj\", and \"output_proj\"].\n#", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " We can also set apply_lora_to_mlp=True or", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " apply_lora_to_output=True to apply LoRA to other", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " linear\n# layers outside of the self-attention.\nl", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "ora_model = lora_llama", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "2_7b(lora_attn", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "_modules=[\"q_proj\", \"v_proj\"])\n\n# Print the", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " first layer's self-attention in the usual Llama2", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " model\nprint(base_model.layers[0", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "].attn)\n# Print the same for Llama2 with", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " LoRA weights\nprint(lora_model.layers[0].", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "attn)\n```\n\nThis code will load the Llama", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "2 model and apply LoRA to the", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " specified layers. You can then train the model using the Lo", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "RA fine-tuning recipe in torchtune", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": ".\n\nNote that you will need to modify the code to suit", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " your specific use case and requirements. Additionally,", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " you may need to adjust the LoRA parameters and the training", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " settings to achieve the desired results.", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "complete" - }, - "logprobs": null, - "stop_reason": { - "__enum__": "StopReason", - "value": "end_of_turn" - } - }, - "metrics": null - } - ], - "type": "generator" - }, - "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:7bdfa\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:0c95c\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')]), CompletionMessage(role='assistant', content='You can use the following function call to answer the user\\'s question:\\n\\n{\"type\": \"function\", \"name\": \"knowledge_search\", \"parameters\": {\"query\": \"How to fine-tune a Llama2 model with LoRA in torchtune\"}}', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Tell me how to use LoRA', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:255c3\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:3b16c\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')]), CompletionMessage(role='assistant', content=\"I'm ready to help you answer questions about Torchtune based on the documentation you provided. What's your first question?\", stop_reason=, tool_calls=[]), UserMessage(role='user', content='Tell me how to use LoRA', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { "chunks": [ { "event": { @@ -7611,7 +7314,22 @@ { "event": { "delta": { - "text": "parameters\": {\"query\": \"How to use LoRA\"}}", + "text": "parameters\": {\"query\": \"How to use LoRA in", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " Torchtune\"}}", "type": "text" }, "event_type": { @@ -7632,9 +7350,9 @@ }, "tool_call": { "arguments": { - "query": "How to use LoRA" + "query": "How to use LoRA in Torchtune" }, - "call_id": "ce86a63d-964a-49a0-8488-29c28ecb2f80", + "call_id": "41f1d05b-cfca-4d54-a0de-38a968017c8b", "tool_name": "knowledge_search" }, "type": "tool_call" @@ -7672,7 +7390,7 @@ ], "type": "generator" }, - "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:7bdfa\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:0c95c\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:255c3\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:3b16c\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { "chunks": [ { "event": { @@ -7692,7 +7410,7 @@ { "event": { "delta": { - "text": "You", + "text": "I", "type": "text" }, "event_type": { @@ -7707,7 +7425,7 @@ { "event": { "delta": { - "text": " can use the following function call to answer", + "text": "'m ready to help you answer questions about Torchtune based", "type": "text" }, "event_type": { @@ -7722,7 +7440,7 @@ { "event": { "delta": { - "text": " the user's question:\n\n{\"type\": \"function\", \"", + "text": " on the documentation you provided. What's your first question?", "type": "text" }, "event_type": { @@ -7737,7 +7455,45 @@ { "event": { "delta": { - "text": "name\": \"knowledge_search\", \"parameters\": {\"query\":", + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:292ee\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:2513e\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')]), CompletionMessage(role='assistant', content=\"I'm ready to help you answer questions about Torchtune based on the documentation you provided. What's your first question?\", stop_reason=, tool_calls=[]), UserMessage(role='user', content='Tell me how to use LoRA', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'How to use LoRA in Torchtune'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text=\"Result 1:\\nDocument_id:47152\\nContent: .. _lora_finetune_label:\\n\\n============================\\nFine-Tuning Llama2 with LoRA\\n============================\\n\\nThis guide will teach you about `LoRA `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW ` alone will not handle the definition of which parameters are trainable.\\n See :ref:`below` for how to do this.\\n\\nLet\\'s inspect each of these models a bit more closely.\\n\\n.. code-block:: bash\\n\\n # Print the first layer\\'s self-attention in the usual Llama2 model\\n >>> print(base_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (output_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (pos_embeddings): RotaryPositionalEmbeddings()\\n )\\n\\n # Print the same for Llama2 with LoRA weights\\n >>> print(lora_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): LoRALinear(\\n (dropout): Dropout(p=0.0, inplace=False)\\n \\n'), TextContentItem(type='text', text='Result 3:\\nDocument_id:47152\\nContent: 06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune\\'s `LoRA recipe `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 4:\\nDocument_id:47152\\nContent: from our Llama2\\nmodel without any wrappers or custom checkpoint conversion logic.\\n\\n.. code-block:: python\\n\\n # Assuming that base_model already has the pretrained Llama2 weights,\\n # this will directly load them into your LoRA model without any conversion necessary.\\n lora_model.load_state_dict(base_model.state_dict(), strict=False)\\n\\n.. note::\\n Whenever loading weights with :code:`strict=False`, you should verify that any missing or extra keys in\\n the loaded :code:`state_dict` are as expected. torchtune\\'s LoRA recipes do this by default via\\n :func:`validate_missing_and_unexpected_for_lora() `.\\n\\nOnce we\\'ve loaded the base model weights, we also want to set only LoRA parameters to trainable.\\n\\n.. _setting_trainable_params:\\n\\n.. code-block:: python\\n\\n from torchtune.modules.peft.peft_utils import get_adapter_params, set_trainable_params\\n\\n # Fetch all params from the model that are associated with LoRA.\\n lora_params = get_adapter_params(lora_model)\\n\\n # Set requires_grad=True on lora_params, and requires_grad=False on all others.\\n set_trainable_params(lora_model, lora_params)\\n\\n # Print the total number of parameters\\n total_params = sum([p.numel() for p in lora_model.parameters()])\\n trainable_params = sum([p.numel() for p in lora_model.parameters() if p.requires_grad])\\n print(\\n f\"\"\"\\n {total_params} total params,\\n {trainable_params}\" trainable params,\\n {(100.0 * trainable_params / total_params):.2f}% of all params are trainable.\\n \"\"\"\\n )\\n\\n 6742609920 total params,\\n 4194304 trainable params,\\n 0.06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune\\'s `LoRA recipe , tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "To", "type": "text" }, "event_type": { @@ -7752,7 +7508,7 @@ { "event": { "delta": { - "text": " \"How to fine-tune a L", + "text": " use LoRA in Torchtune, you can follow these steps", "type": "text" }, "event_type": { @@ -7767,7 +7523,7 @@ { "event": { "delta": { - "text": "lama2 model with LoRA in torch", + "text": ":\n\n1. Install Torchtune and its dependencies.\n", "type": "text" }, "event_type": { @@ -7782,7 +7538,988 @@ { "event": { "delta": { - "text": "tune\"}}", + "text": "2. Download the Llama2 weights and tokenizer.\n", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "3. Use the `lora_llama2_", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "7b` model in Torchtune", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ", which applies LoRA to the Q", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " and V projections by default.\n4", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ". Load the base model weights into the LoRA model without any", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " conversion necessary.\n5. Set only LoRA parameters to", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " trainable.\n6. Run the LoRA finetuning recipe", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " in Torchtune with the desired configuration.\n\nYou", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " can also experiment with different LoRA configurations, such as", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " applying LoRA to all linear layers in the self", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "-attention, increasing the rank, or scaling alpha and rank", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " together.\n\nBy following these steps, you", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " can use LoRA in Torchtune to fine-tune a", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " Llama2 model with a low memory footprint and achieve good", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " performance.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:292ee\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:2513e\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')]), CompletionMessage(role='assistant', content=\"I'm ready to help you answer questions about Torchtune based on the documentation you provided. What's your first question?\", stop_reason=, tool_calls=[]), UserMessage(role='user', content='Tell me how to use LoRA', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "{\"", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "type\": \"function\", \"name\":", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " \"knowledge_search\", \"parameters\": {\"query\": \"How to", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " use LoRA in Torchtune\"}}", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "succeeded" + }, + "tool_call": { + "arguments": { + "query": "How to use LoRA in Torchtune" + }, + "call_id": "5beb7c24-953b-4ad7-b834-a26522fb5ac7", + "tool_name": "knowledge_search" + }, + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:292ee\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:2513e\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "I", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "'m ready to help you answer questions about Torchtune based", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " on the documentation you provided. What's your first question", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "?", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:ab1b9\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:8bcf6\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')]), CompletionMessage(role='assistant', content=\"I'm ready to help you answer questions about Torchtune based on the documentation you provided. What's your first question?\", stop_reason=, tool_calls=[]), UserMessage(role='user', content='Tell me how to use LoRA', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'How to use LoRA in Torchtune'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text=\"Result 1:\\nDocument_id:cc646\\nContent: .. _lora_finetune_label:\\n\\n============================\\nFine-Tuning Llama2 with LoRA\\n============================\\n\\nThis guide will teach you about `LoRA `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW ` alone will not handle the definition of which parameters are trainable.\\n See :ref:`below` for how to do this.\\n\\nLet\\'s inspect each of these models a bit more closely.\\n\\n.. code-block:: bash\\n\\n # Print the first layer\\'s self-attention in the usual Llama2 model\\n >>> print(base_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (output_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (pos_embeddings): RotaryPositionalEmbeddings()\\n )\\n\\n # Print the same for Llama2 with LoRA weights\\n >>> print(lora_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): LoRALinear(\\n (dropout): Dropout(p=0.0, inplace=False)\\n \\n'), TextContentItem(type='text', text='Result 3:\\nDocument_id:cc646\\nContent: 06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune\\'s `LoRA recipe `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 4:\\nDocument_id:cc646\\nContent: from our Llama2\\nmodel without any wrappers or custom checkpoint conversion logic.\\n\\n.. code-block:: python\\n\\n # Assuming that base_model already has the pretrained Llama2 weights,\\n # this will directly load them into your LoRA model without any conversion necessary.\\n lora_model.load_state_dict(base_model.state_dict(), strict=False)\\n\\n.. note::\\n Whenever loading weights with :code:`strict=False`, you should verify that any missing or extra keys in\\n the loaded :code:`state_dict` are as expected. torchtune\\'s LoRA recipes do this by default via\\n :func:`validate_missing_and_unexpected_for_lora() `.\\n\\nOnce we\\'ve loaded the base model weights, we also want to set only LoRA parameters to trainable.\\n\\n.. _setting_trainable_params:\\n\\n.. code-block:: python\\n\\n from torchtune.modules.peft.peft_utils import get_adapter_params, set_trainable_params\\n\\n # Fetch all params from the model that are associated with LoRA.\\n lora_params = get_adapter_params(lora_model)\\n\\n # Set requires_grad=True on lora_params, and requires_grad=False on all others.\\n set_trainable_params(lora_model, lora_params)\\n\\n # Print the total number of parameters\\n total_params = sum([p.numel() for p in lora_model.parameters()])\\n trainable_params = sum([p.numel() for p in lora_model.parameters() if p.requires_grad])\\n print(\\n f\"\"\"\\n {total_params} total params,\\n {trainable_params}\" trainable params,\\n {(100.0 * trainable_params / total_params):.2f}% of all params are trainable.\\n \"\"\"\\n )\\n\\n 6742609920 total params,\\n 4194304 trainable params,\\n 0.06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune\\'s `LoRA recipe , tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "To", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " use LoRA in Torchtune, you", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " can follow these steps:\n\n1. Install Torchtune", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " and its dependencies.\n2. Download the", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " Llama2 weights and tokenizer.\n3", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ". Use the `lora_llama", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "2_7b` model in Torchtune, which", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " applies LoRA to the Q and V", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " projections by default.\n4. Load the base model weights into", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " the LoRA model without any conversion necessary.\n5. Set", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " only LoRA parameters to trainable.\n6. Run the", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " LoRA finetuning recipe in Torchtune with the desired", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " configuration.\n\nYou can also experiment with different LoRA configurations, such", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " as applying LoRA to all linear layers in the self-attention", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ", increasing the rank, or scaling alpha and rank together.\n\nBy", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " following these steps, you can use LoRA in Torchtune", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " to fine-tune a Llama2", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " model with parameter-efficient finetuning and memory savings.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:ab1b9\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:8bcf6\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')]), CompletionMessage(role='assistant', content=\"I'm ready to help you answer questions about Torchtune based on the documentation you provided. What's your first question?\", stop_reason=, tool_calls=[]), UserMessage(role='user', content='Tell me how to use LoRA', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "{\"", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "type\": \"function\", \"name\":", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " \"knowledge_search\", \"parameters", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "\": {\"query\": \"How to use LoRA in Tor", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "chtune\"}}", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "succeeded" + }, + "tool_call": { + "arguments": { + "query": "How to use LoRA in Torchtune" + }, + "call_id": "5af3ef1f-98c0-4c60-9b8b-892b5e921040", + "tool_name": "knowledge_search" + }, + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:ab1b9\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:8bcf6\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "I", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "'m ready to help you answer questions about Torchtune based on", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " the documentation you provided. What's your first question?", "type": "text" }, "event_type": { @@ -8737,6 +9474,568 @@ ], "type": "generator" }, + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:f3963\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:e075f\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')]), CompletionMessage(role='assistant', content=\"I'm ready to help you answer questions about Torchtune based on the documentation you provided. What's your first question?\", stop_reason=, tool_calls=[]), UserMessage(role='user', content='Tell me how to use LoRA', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'How to use LoRA in Torchtune'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text=\"Result 1:\\nDocument_id:0484f\\nContent: .. _lora_finetune_label:\\n\\n============================\\nFine-Tuning Llama2 with LoRA\\n============================\\n\\nThis guide will teach you about `LoRA `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW ` alone will not handle the definition of which parameters are trainable.\\n See :ref:`below` for how to do this.\\n\\nLet\\'s inspect each of these models a bit more closely.\\n\\n.. code-block:: bash\\n\\n # Print the first layer\\'s self-attention in the usual Llama2 model\\n >>> print(base_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (output_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (pos_embeddings): RotaryPositionalEmbeddings()\\n )\\n\\n # Print the same for Llama2 with LoRA weights\\n >>> print(lora_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): LoRALinear(\\n (dropout): Dropout(p=0.0, inplace=False)\\n \\n'), TextContentItem(type='text', text='Result 3:\\nDocument_id:0484f\\nContent: 06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune\\'s `LoRA recipe `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 4:\\nDocument_id:0484f\\nContent: from our Llama2\\nmodel without any wrappers or custom checkpoint conversion logic.\\n\\n.. code-block:: python\\n\\n # Assuming that base_model already has the pretrained Llama2 weights,\\n # this will directly load them into your LoRA model without any conversion necessary.\\n lora_model.load_state_dict(base_model.state_dict(), strict=False)\\n\\n.. note::\\n Whenever loading weights with :code:`strict=False`, you should verify that any missing or extra keys in\\n the loaded :code:`state_dict` are as expected. torchtune\\'s LoRA recipes do this by default via\\n :func:`validate_missing_and_unexpected_for_lora() `.\\n\\nOnce we\\'ve loaded the base model weights, we also want to set only LoRA parameters to trainable.\\n\\n.. _setting_trainable_params:\\n\\n.. code-block:: python\\n\\n from torchtune.modules.peft.peft_utils import get_adapter_params, set_trainable_params\\n\\n # Fetch all params from the model that are associated with LoRA.\\n lora_params = get_adapter_params(lora_model)\\n\\n # Set requires_grad=True on lora_params, and requires_grad=False on all others.\\n set_trainable_params(lora_model, lora_params)\\n\\n # Print the total number of parameters\\n total_params = sum([p.numel() for p in lora_model.parameters()])\\n trainable_params = sum([p.numel() for p in lora_model.parameters() if p.requires_grad])\\n print(\\n f\"\"\"\\n {total_params} total params,\\n {trainable_params}\" trainable params,\\n {(100.0 * trainable_params / total_params):.2f}% of all params are trainable.\\n \"\"\"\\n )\\n\\n 6742609920 total params,\\n 4194304 trainable params,\\n 0.06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune\\'s `LoRA recipe , tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "To", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " use LoRA in Torchtune, you can follow these steps", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ":\n\n1. Install Torchtune and its dependencies.\n", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "2. Download the Llama2 weights and tokenizer.\n3", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ". Use", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " the `lora_llama2_7", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "b` model in Torchtune, which applies", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " LoRA to the Q and V projections by default", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": ".\n4. Load the base model weights into the LoRA", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " model without any conversion necessary.\n5.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " Set only LoRA parameters", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " to trainable.\n6.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " Run the LoRA fin", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "etuning recipe in Torcht", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "une with the desired configuration.\n\nYou can also experiment", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " with different Lo", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "RA configurations, such as applying LoRA to", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " all linear layers in the self-attention, increasing the rank,", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " or scaling alpha and rank together.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:f3963\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:e075f\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')]), CompletionMessage(role='assistant', content=\"I'm ready to help you answer questions about Torchtune based on the documentation you provided. What's your first question?\", stop_reason=, tool_calls=[]), UserMessage(role='user', content='Tell me how to use LoRA', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "{\"", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "type\": \"function\", \"name", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "\": \"knowledge_search\", \"parameters\":", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " {\"query\": \"How to use Lo", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "RA in Torchtune\"}}", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "succeeded" + }, + "tool_call": { + "arguments": { + "query": "How to use LoRA in Torchtune" + }, + "call_id": "42e1de09-f47e-44b0-9331-9b878556970d", + "tool_name": "knowledge_search" + }, + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:f3963\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:e075f\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "I", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "'m ready to help you answer questions about", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " Torchtune based on the documentation you", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " provided. What's your first question?", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='I am attaching some documentation for Torchtune. Help me answer questions I will ask next.', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'Torchtune documentation'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:f4fd3\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\"json\",\\n data_files=\"data/my_data.json\",\\n split=\"train\",\\n conversation_column=\"dialogue\",\\n conversation_style=\"sharegpt\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we\\'re fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral\\'s :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:8892b\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we\\'ve enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')]), CompletionMessage(role='assistant', content='You can use the following function call to answer the user\\'s question:\\n\\n{\"type\": \"function\", \"name\": \"knowledge_search\", \"parameters\": {\"query\": \"How to fine-tune a Llama2 model with LoRA in torchtune\"}}', stop_reason=, tool_calls=[]), UserMessage(role='user', content='Tell me how to use LoRA', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'How to use LoRA'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text=\"Result 1:\\nDocument_id:cbc88\\nContent: .. _lora_finetune_label:\\n\\n============================\\nFine-Tuning Llama2 with LoRA\\n============================\\n\\nThis guide will teach you about `LoRA `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune\\'s factory settings, but we may want to experiment a bit.\\nLet\\'s take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: [\\'q_proj\\', \\'v_proj\\']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n'), TextContentItem(type='text', text='Result 3:\\nDocument_id:8892b\\nContent: with training with LoRA quickly,\\njust specify any config with ``_lora`` in its name, e.g:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\\n\\n\\nThere are two sets of parameters to customize LoRA to suit your needs. Firstly, the parameters which control\\nwhich linear layers LoRA should be applied to in the model:\\n\\n* ``lora_attn_modules: List[str]`` accepts a list of strings specifying which layers of the model to apply\\n LoRA to:\\n\\n * ``q_proj`` applies LoRA to the query projection layer.\\n * ``k_proj`` applies LoRA to the key projection layer.\\n * ``v_proj`` applies LoRA to the value projection layer.\\n * ``output_proj`` applies LoRA to the attention output projection layer.\\n\\n Whilst adding more layers to be fine-tuned may improve model accuracy,\\n this will come at the cost of increased memory usage and reduced training speed.\\n\\n* ``apply_lora_to_mlp: Bool`` applies LoRA to the MLP in each transformer layer.\\n* ``apply_lora_to_output: Bool`` applies LoRA to the model\\'s final output projection.\\n This is usually a projection to vocabulary space (e.g. in language models), but\\n other modelling tasks may have different projections - classifier models will project\\n to the number of classes, for example\\n\\n.. note::\\n\\n Models which use tied embeddings (such as Gemma and Qwen2 1.5B and 0.5B) for the\\n final output projection do not support ``apply_lora_to_output``.\\n\\nThese are all specified under the ``model`` flag or config entry, i.e:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\",\"output_proj\"]\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.llama3.lora_llama3_8b\\n apply_lora_to_mlp: True\\n model.lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\",\"output_proj\"]\\n\\nSecondly, parameters which control the scale of the impact of LoRA on the model:\\n\\n* ``lora_rank: int`` affects the scale of\\n'), TextContentItem(type='text', text='Result 4:\\nDocument_id:cbc88\\nContent: LoRA to Llama2 models\\n------------------------------\\n\\nWith torchtune, we can easily apply LoRA to Llama2 with a variety of different configurations.\\nLet\\'s take a look at how to construct Llama2 models in torchtune with and without LoRA.\\n\\n.. code-block:: python\\n\\n from torchtune.models.llama2 import llama2_7b, lora_llama2_7b\\n\\n # Build Llama2 without any LoRA layers\\n base_model = llama2_7b()\\n\\n # The default settings for lora_llama2_7b will match those for llama2_7b\\n # We just need to define which layers we want LoRA applied to.\\n # Within each self-attention, we can choose from [\"q_proj\", \"k_proj\", \"v_proj\", and \"output_proj\"].\\n # We can also set apply_lora_to_mlp=True or apply_lora_to_output=True to apply LoRA to other linear\\n # layers outside of the self-attention.\\n lora_model = lora_llama2_7b(lora_attn_modules=[\"q_proj\", \"v_proj\"])\\n\\n.. note::\\n\\n Calling :func:`lora_llama_2_7b ` alone will not handle the definition of which parameters are trainable.\\n See :ref:`below` for how to do this.\\n\\nLet\\'s inspect each of these models a bit more closely.\\n\\n.. code-block:: bash\\n\\n # Print the first layer\\'s self-attention in the usual Llama2 model\\n >>> print(base_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (output_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (pos_embeddings): RotaryPositionalEmbeddings()\\n )\\n\\n # Print the same for Llama2 with LoRA weights\\n >>> print(lora_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): LoRALinear(\\n (dropout): Dropout(p=0.0, inplace=False)\\n \\n'), TextContentItem(type='text', text='Result 5:\\nDocument_id:9dcb7\\nContent: ora_finetune_label>`.\\nFor more on QLoRA in torchtune, see our :ref:`QLoRA Tutorial `.\\n\\nLet\\'s take a look at how we can fine-tune Llama3-8B-Instruct with LoRA on a single device using torchtune. In this example, we will fine-tune\\nfor one epoch on a common instruct dataset for illustrative purposes. The basic command for a single-device LoRA fine-tune is\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device\\n\\n.. note::\\n To see a full list of recipes and their corresponding configs, simply run ``tune ls`` from the command line.\\n\\nWe can also add :ref:`command-line overrides ` as needed, e.g.\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n checkpointer.checkpoint_dir= \\\\\\n tokenizer.path=/tokenizer.model \\\\\\n checkpointer.output_dir=\\n\\nThis will load the Llama3-8B-Instruct checkpoint and tokenizer from ```` used in the :ref:`tune download ` command above,\\nthen save a final checkpoint in the same directory following the original format. For more details on the\\ncheckpoint formats supported in torchtune, see our :ref:`checkpointing deep-dive `.\\n\\n.. note::\\n To see the full set of configurable parameters for this (and other) configs we can use :ref:`tune cp ` to copy (and modify)\\n the default config. :ref:`tune cp ` can be used with recipe scripts too, in case you want to make more custom changes\\n that cannot be achieved by directly modifying existing configurable parameters. For more on :ref:`tune cp ` see the section on\\n :ref:`modifying configs ` in our \":ref:`finetune_llama_label`\" tutorial.\\n\\nOnce training is complete, the model checkpoints will be saved and their locations will be logged. For\\nLoRA fine-tuning, the final checkpoint will contain the merged weights, and a copy of just the (much smaller) LoRA weights\\nwill\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)})])]": { "chunks": [ { @@ -9841,7 +11140,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "\", \"parameters\": {\"query\": \"", + "tool_call": "\", \"parameters\": {\"query\": \"Torchtune", "type": "tool_call" }, "event_type": { @@ -9860,7 +11159,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "Torchtune documentation\"}}", + "tool_call": " documentation\"}}", "type": "tool_call" }, "event_type": { @@ -9883,7 +11182,7 @@ "arguments": { "query": "Torchtune documentation" }, - "call_id": "6ec2bf0f-42f3-453d-ad5f-52bc6e0267b7", + "call_id": "0f0eb27a-1126-4d26-8b33-b630a9518093", "tool_name": "knowledge_search" }, "type": "tool_call" @@ -9941,7 +11240,7 @@ { "event": { "delta": { - "text": "L", + "text": "The", "type": "text" }, "event_type": { @@ -9956,7 +11255,7 @@ { "event": { "delta": { - "text": "lama3-8B uses grouped-query attention instead of the standard multi-head", + "text": " attention type used by Llama3-8B is grouped", "type": "text" }, "event_type": { @@ -9971,7 +11270,7 @@ { "event": { "delta": { - "text": " attention from Llama2-7B.", + "text": "-query attention.", "type": "text" }, "event_type": { @@ -10039,7 +11338,22 @@ { "event": { "delta": { - "text": " attention type used by Llama3-8B is grouped-query attention.", + "text": " attention type used by Llama3-", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "8B is grouped-query attention.", "type": "text" }, "event_type": { @@ -10107,7 +11421,7 @@ { "event": { "delta": { - "text": " \"type\": \"function\",\n ", + "text": " \"type\": \"function\",\n \"name\": \"knowledge", "type": "text" }, "event_type": { @@ -10122,7 +11436,7 @@ { "event": { "delta": { - "text": " \"name\": \"knowledge_search\",\n \"parameters\": {\n \"", + "text": "_search\",\n \"parameters\": {\n \"query\": \"L", "type": "text" }, "event_type": { @@ -10137,37 +11451,7 @@ { "event": { "delta": { - "text": "query\": \"Llama3", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "-8B attention type\"\n }\n", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "}", + "text": "lama3-8B attention type\"\n }\n}", "type": "text" }, "event_type": { @@ -10190,7 +11474,7 @@ "arguments": { "query": "Llama3-8B attention type" }, - "call_id": "95471ab3-196c-45ba-a7f1-7585026662c2", + "call_id": "ce62cb6d-fcb0-437a-abd9-b0bed88628ed", "tool_name": "knowledge_search" }, "type": "tool_call" @@ -10271,7 +11555,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "{\"type\": \"function\", \"name\": \"knowledge_search\", \"", + "tool_call": "{\"type\": \"function\", \"name\": \"knowledge_search\",", "type": "tool_call" }, "event_type": { @@ -10290,7 +11574,26 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "parameters\": {\"query\": \"Llama3-8B attention type\"}}", + "tool_call": " \"parameters\": {\"query\": \"L", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "lama3-8B attention type\"}}", "type": "tool_call" }, "event_type": { @@ -10313,7 +11616,7 @@ "arguments": { "query": "Llama3-8B attention type" }, - "call_id": "f026154f-72fb-47aa-828c-065bd5a16267", + "call_id": "25fcc4f2-72a8-4175-82ca-c7a692d13d66", "tool_name": "knowledge_search" }, "type": "tool_call" @@ -10613,7 +11916,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "brave_search.call(query=\"current CEO of", + "tool_call": "brave_search.call(query=\"current", "type": "tool_call" }, "event_type": { @@ -10632,7 +11935,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " Meta\")", + "tool_call": " CEO of Meta\")", "type": "tool_call" }, "event_type": { @@ -10655,7 +11958,7 @@ "arguments": { "query": "current CEO of Meta" }, - "call_id": "b9ee4732-1663-429c-ae7d-186578174556", + "call_id": "f5d644f1-3ada-4a5a-a088-736c89428fe9", "tool_name": { "__enum__": "BuiltinTool", "value": "brave_search" @@ -10829,7 +12132,7 @@ { "event": { "delta": { - "text": " function `get_boiling_point` is not able to find", + "text": " function `get_boiling_point` is", "type": "text" }, "event_type": { @@ -10844,7 +12147,7 @@ { "event": { "delta": { - "text": " the boiling point of polyjuice as it is a fictional", + "text": " not able to find the boiling point of", "type": "text" }, "event_type": { @@ -10859,7 +12162,7 @@ { "event": { "delta": { - "text": " liquid from the Harry Potter series. The", + "text": " polyjuice as it is a fictional", "type": "text" }, "event_type": { @@ -10874,7 +12177,22 @@ { "event": { "delta": { - "text": " function only works with real-world liquids.", + "text": " liquid from the Harry Potter series. The function is only able", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " to find the boiling point of real liquids.", "type": "text" }, "event_type": { @@ -11070,7 +12388,7 @@ { "event": { "delta": { - "text": " function `get_boiling_point` is not", + "text": " function `get_boiling_point`", "type": "text" }, "event_type": { @@ -11085,7 +12403,7 @@ { "event": { "delta": { - "text": " able to find the boiling point of polyjuice as it is", + "text": " is not able to find the boiling point of", "type": "text" }, "event_type": { @@ -11100,7 +12418,7 @@ { "event": { "delta": { - "text": " not a real liquid. Polyjuice is a magical potion from", + "text": " polyjuice as it is not a", "type": "text" }, "event_type": { @@ -11115,7 +12433,7 @@ { "event": { "delta": { - "text": " the Harry Potter series.", + "text": " real liquid.", "type": "text" }, "event_type": { @@ -11296,7 +12614,7 @@ { "event": { "delta": { - "text": " function `get_boiling_point` is", + "text": " function `get_boiling_point` is not able", "type": "text" }, "event_type": { @@ -11311,7 +12629,7 @@ { "event": { "delta": { - "text": " not able to find the boiling point of polyjuice as it", + "text": " to find the boiling point of polyju", "type": "text" }, "event_type": { @@ -11326,7 +12644,7 @@ { "event": { "delta": { - "text": " is not a real liquid. Polyjuice is", + "text": "ice as it is not a real", "type": "text" }, "event_type": { @@ -11341,7 +12659,7 @@ { "event": { "delta": { - "text": " a magical potion from the Harry Potter series.", + "text": " liquid.", "type": "text" }, "event_type": { @@ -11559,7 +12877,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "{\"type\": \"function\", \"name\":", + "tool_call": "{\"type\": \"function", "type": "tool_call" }, "event_type": { @@ -11578,7 +12896,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " \"get_boiling_point\", \"parameters\":", + "tool_call": "\", \"name\": \"get_boiling_point\",", "type": "tool_call" }, "event_type": { @@ -11597,7 +12915,26 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " {\"liquid_name\": \"polyjuice\"}}", + "tool_call": " \"parameters\": {\"liquid_name\": \"", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "polyjuice\"}}", "type": "tool_call" }, "event_type": { @@ -11620,7 +12957,7 @@ "arguments": { "liquid_name": "polyjuice" }, - "call_id": "a994859b-38d2-45d5-913e-359409ee8ae2", + "call_id": "22050f4b-36df-48fb-ac11-e3a47fa0beaf", "tool_name": "get_boiling_point" }, "type": "tool_call" @@ -11843,7 +13180,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "{\"type\": \"function\", \"name\": \"get_boiling", + "tool_call": "{\"type\": \"function\", \"name", "type": "tool_call" }, "event_type": { @@ -11862,7 +13199,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "_point\", \"parameters\": {\"liquid_name\": \"polyjuice", + "tool_call": "\": \"get_boiling_point\", \"parameters", "type": "tool_call" }, "event_type": { @@ -11881,7 +13218,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "\"}}", + "tool_call": "\": {\"liquid_name\": \"polyjuice\"}}", "type": "tool_call" }, "event_type": { @@ -11904,7 +13241,7 @@ "arguments": { "liquid_name": "polyjuice" }, - "call_id": "e48d4312-1a88-4759-9b9c-bc573c23fee6", + "call_id": "11302682-7a3a-45f3-955b-6709444fd626", "tool_name": "get_boiling_point" }, "type": "tool_call" @@ -12120,7 +13457,7 @@ { "event": { "delta": { - "text": " couldn't find any information on the boiling point of Poly", + "text": " couldn't find any information on the boiling point", "type": "text" }, "event_type": { @@ -12135,7 +13472,7 @@ { "event": { "delta": { - "text": "juice. Polyjuice is a magical potion in", + "text": " of Polyjuice. Polyjuice is a magical potion in the", "type": "text" }, "event_type": { @@ -12150,7 +13487,7 @@ { "event": { "delta": { - "text": " the Harry Potter series that allows the drinker", + "text": " Harry Potter series that allows the drinker to transform into someone else. It's", "type": "text" }, "event_type": { @@ -12165,7 +13502,7 @@ { "event": { "delta": { - "text": " to transform into someone else. It's not a physical substance", + "text": " not a physical substance with a boiling point. If", "type": "text" }, "event_type": { @@ -12180,7 +13517,7 @@ { "event": { "delta": { - "text": " with a boiling point. If you have any other questions, I'd", + "text": " you have any other questions, I'd be", "type": "text" }, "event_type": { @@ -12195,7 +13532,7 @@ { "event": { "delta": { - "text": " be happy to help.", + "text": " happy to help.", "type": "text" }, "event_type": { @@ -12413,7 +13750,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "{\"type\": \"function\", \"name\": \"get_boiling_point\",", + "tool_call": "{\"type\": \"function\", \"name\": \"get_boiling", "type": "tool_call" }, "event_type": { @@ -12432,7 +13769,26 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " \"parameters\": {\"liquid_name\": \"polyjuice\"}}", + "tool_call": "_point\", \"parameters\": {\"liquid_name", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "\": \"polyjuice\"}}", "type": "tool_call" }, "event_type": { @@ -12455,7 +13811,7 @@ "arguments": { "liquid_name": "polyjuice" }, - "call_id": "cd0e926b-b1c8-468b-8c55-b3e42e7ae89d", + "call_id": "e704d0f9-45a1-4ed1-90b0-8a05c504da6c", "tool_name": "get_boiling_point" }, "type": "tool_call" @@ -12528,22 +13884,7 @@ { "event": { "delta": { - "text": " 100th prime number is ", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": "541.", + "text": " 100th prime number is 541.", "type": "text" }, "event_type": { @@ -12619,7 +13960,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "def is_prime(n):\n if n <= 1:\n ", + "tool_call": "def is_prime(n):\n if n", "type": "tool_call" }, "event_type": { @@ -12638,7 +13979,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " return False\n if n <= 3:\n return True", + "tool_call": " <= 1:\n return False\n if n <= 3:\n return", "type": "tool_call" }, "event_type": { @@ -12657,7 +13998,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "\n if n % 2 ==", + "tool_call": " True\n if n % 2 == 0 or n", "type": "tool_call" }, "event_type": { @@ -12676,7 +14017,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " 0 or n % 3 == 0:\n ", + "tool_call": " % 3 == 0:\n ", "type": "tool_call" }, "event_type": { @@ -12695,7 +14036,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " return False\n i = 5\n", + "tool_call": " return False\n i = 5\n while i *", "type": "tool_call" }, "event_type": { @@ -12714,7 +14055,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " while i * i <= n:\n if n % i", + "tool_call": " i <= n:\n if n % i", "type": "tool_call" }, "event_type": { @@ -12733,7 +14074,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " == 0 or n % (i + 2) ==", + "tool_call": " == 0 or n % (i + 2", "type": "tool_call" }, "event_type": { @@ -12752,7 +14093,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " 0:\n return False\n i += 6\n", + "tool_call": ") == 0:\n return False", "type": "tool_call" }, "event_type": { @@ -12771,7 +14112,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " return True\n\ndef get_nth_prime(n):\n count =", + "tool_call": "\n i += 6\n ", "type": "tool_call" }, "event_type": { @@ -12790,7 +14131,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " 0\n num = 2\n while True:\n", + "tool_call": " return True\n\ndef get_nth_prime(n):\n count = ", "type": "tool_call" }, "event_type": { @@ -12809,7 +14150,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " if is_prime(num):\n count += 1\n ", + "tool_call": "0\n num = 2\n ", "type": "tool_call" }, "event_type": { @@ -12828,7 +14169,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " if count == n:\n return num\n num +=", + "tool_call": " while True:\n if is_prime(num):\n count += ", "type": "tool_call" }, "event_type": { @@ -12847,7 +14188,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " 1\n\nprint(get_nth_prime(", + "tool_call": "1\n if count == n:\n return num\n ", "type": "tool_call" }, "event_type": { @@ -12866,7 +14207,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "100))", + "tool_call": " num += 1\n\nprint(get_nth_prime(100))", "type": "tool_call" }, "event_type": { @@ -12889,7 +14230,7 @@ "arguments": { "code": "def is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef get_nth_prime(n):\n count = 0\n num = 2\n while True:\n if is_prime(num):\n count += 1\n if count == n:\n return num\n num += 1\n\nprint(get_nth_prime(100))" }, - "call_id": "a184cbe8-b941-472d-9254-fda5ed8d770f", + "call_id": "6d57c323-7679-447f-9928-ccab76c0bdc9", "tool_name": { "__enum__": "BuiltinTool", "value": "code_interpreter" @@ -12965,7 +14306,22 @@ { "event": { "delta": { - "text": "plexity the company was founded in 2022.", + "text": "plexity the company was founded in 202", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "2.", "type": "text" }, "event_type": { @@ -13101,7 +14457,7 @@ { "event": { "delta": { - "text": "type\": \"function\", \"name\": \"", + "text": "type\": \"function\", \"name\": \"knowledge_search\", \"", "type": "text" }, "event_type": { @@ -13116,22 +14472,7 @@ { "event": { "delta": { - "text": "knowledge_search\", \"parameters\":", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - }, - { - "event": { - "delta": { - "text": " {\"query\": \"Perplexity company founding date\"}}", + "text": "parameters\": {\"query\": \"Perplexity company founding date\"}}", "type": "text" }, "event_type": { @@ -13154,7 +14495,7 @@ "arguments": { "query": "Perplexity company founding date" }, - "call_id": "9ad1f31d-4fb3-40e6-8037-0cc50794d6ce", + "call_id": "22d5440e-2873-4956-a81f-f114fc78671d", "tool_name": "knowledge_search" }, "type": "tool_call" @@ -13361,7 +14702,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "{\"type\": \"function\", \"name\": \"knowledge_search\", \"parameters", + "tool_call": "{\"type\": \"function\", \"name", "type": "tool_call" }, "event_type": { @@ -13380,7 +14721,26 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "\": {\"query\": \"Perplexity company founding date\"}}", + "tool_call": "\": \"knowledge_search\", \"parameters\": {\"", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "query\": \"Perplexity company founding date\"}}", "type": "tool_call" }, "event_type": { @@ -13403,7 +14763,7 @@ "arguments": { "query": "Perplexity company founding date" }, - "call_id": "11c1dca5-6754-4ba6-8337-1bb8a538342f", + "call_id": "98d3790b-1b84-4ab7-ad66-117fea68d5db", "tool_name": "knowledge_search" }, "type": "tool_call" @@ -13618,7 +14978,7 @@ { "event": { "delta": { - "text": " NBA was created on August 3, ", + "text": " NBA was created on August ", "type": "text" }, "event_type": { @@ -13633,7 +14993,7 @@ { "event": { "delta": { - "text": "1949, with the merger of the Basketball Association of America", + "text": "3, 1949, with", "type": "text" }, "event_type": { @@ -13648,7 +15008,37 @@ { "event": { "delta": { - "text": " (BAA) and the National Basketball League (NBL).", + "text": " the merger of the Basketball Association of", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " America (BAA) and the National Basketball League", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " (NBL).", "type": "text" }, "event_type": { @@ -13794,6 +15184,245 @@ ], "type": "generator" }, + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='when was the nba created?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'when was the nba created'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 3 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:nba_w\\nContent: The NBA was created on August 3, 1949, with the merger of the Basketball Association of America (BAA) and the National Basketball League (NBL).\\n'), TextContentItem(type='text', text='Result 2:\\nDocument_id:perpl\\nContent: Perplexity the company was founded in 2022 by Aravind Srinivas, Andy Konwinski, Denis Yarats and Johnny Ho, engineers with backgrounds in back-end systems, artificial intelligence (AI) and machine learning:\\n\\n Srinivas, the CEO, worked at OpenAI as an AI researcher.\\n Konwinski was among the founding team at Databricks.\\n Yarats, the CTO, was an AI research scientist at Meta.\\n Ho, the CSO, worked as an engineer at Quora, then as a quantitative trader on Wall Street.[5]\\n'), TextContentItem(type='text', text='Result 3:\\nDocument_id:perpl\\nContent: Ho, the CSO, worked as an engineer at Quora, then as a quantitative trader on Wall Street.[5]\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')]), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'when was the nba created'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 3 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:nba_w\\nContent: The NBA was created on August 3, 1949, with the merger of the Basketball Association of America (BAA) and the National Basketball League (NBL).\\n'), TextContentItem(type='text', text='Result 2:\\nDocument_id:perpl\\nContent: Perplexity the company was founded in 2022 by Aravind Srinivas, Andy Konwinski, Denis Yarats and Johnny Ho, engineers with backgrounds in back-end systems, artificial intelligence (AI) and machine learning:\\n\\n Srinivas, the CEO, worked at OpenAI as an AI researcher.\\n Konwinski was among the founding team at Databricks.\\n Yarats, the CTO, was an AI research scientist at Meta.\\n Ho, the CSO, worked as an engineer at Quora, then as a quantitative trader on Wall Street.[5]\\n'), TextContentItem(type='text', text='Result 3:\\nDocument_id:perpl\\nContent: Ho, the CSO, worked as an engineer at Quora, then as a quantitative trader on Wall Street.[5]\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)}), ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "The", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " NBA was created on August 3, 1949,", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " with the merger of the Basketball Association of America", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " (BAA) and the National Basketball", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " League (NBL).", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, + "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='when was the nba created?', context=None), CompletionMessage(role='assistant', content='', stop_reason=, tool_calls=[ToolCall(call_id='', tool_name='knowledge_search', arguments={'query': 'when was the nba created'})]), ToolResponseMessage(role='tool', call_id='', tool_name='knowledge_search', content=[TextContentItem(type='text', text='knowledge_search tool found 3 chunks:\\nBEGIN of knowledge_search tool results.\\n'), TextContentItem(type='text', text='Result 1:\\nDocument_id:nba_w\\nContent: The NBA was created on August 3, 1949, with the merger of the Basketball Association of America (BAA) and the National Basketball League (NBL).\\n'), TextContentItem(type='text', text='Result 2:\\nDocument_id:perpl\\nContent: Perplexity the company was founded in 2022 by Aravind Srinivas, Andy Konwinski, Denis Yarats and Johnny Ho, engineers with backgrounds in back-end systems, artificial intelligence (AI) and machine learning:\\n\\n Srinivas, the CEO, worked at OpenAI as an AI researcher.\\n Konwinski was among the founding team at Databricks.\\n Yarats, the CTO, was an AI research scientist at Meta.\\n Ho, the CSO, worked as an engineer at Quora, then as a quantitative trader on Wall Street.[5]\\n'), TextContentItem(type='text', text='Result 3:\\nDocument_id:perpl\\nContent: Ho, the CSO, worked as an engineer at Quora, then as a quantitative trader on Wall Street.[5]\\n'), TextContentItem(type='text', text='END of knowledge_search tool results.\\n')])])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)}), ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { + "chunks": [ + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "{\"", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "type\": \"function\", \"name\":", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": " \"knowledge_search\", \"parameters\": {\"query", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "\": \"when was the nba created\"}}", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "succeeded" + }, + "tool_call": { + "arguments": { + "query": "when was the nba created" + }, + "call_id": "c132966d-e4be-47de-9512-7e9e2e6d896c", + "tool_name": "knowledge_search" + }, + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + }, + { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "value": "end_of_turn" + } + }, + "metrics": null + } + ], + "type": "generator" + }, "('meta-llama/Llama-3.1-8B-Instruct', [SystemMessage(role='system', content='You are a helpful assistant'), UserMessage(role='user', content='when was the nba created?', context=None)])_[('response_format', None), ('sampling_params', SamplingParams(strategy=TopPSamplingStrategy(type='top_p', temperature=0.0001, top_p=0.9), max_tokens=0, repetition_penalty=1.0)), ('stream', True), ('tool_config', ToolConfig(tool_choice=, tool_prompt_format=None, system_message_behavior=)), ('tool_prompt_format', None), ('tools', [ToolDefinition(tool_name='knowledge_search', description='Search for information in a database.', parameters={'query': ToolParamDefinition(param_type='string', description='The query to search for. Can be a natural language sentence or keywords.', required=True, default=None)}), ToolDefinition(tool_name=, description='Execute code', parameters={'code': ToolParamDefinition(param_type='string', description='The code to execute', required=True, default=None)})])]": { "chunks": [ { @@ -13837,7 +15466,7 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": "{\"type\": \"function\", \"name\": \"knowledge_search\", \"parameters\":", + "tool_call": "{\"type\": \"function\", \"name", "type": "tool_call" }, "event_type": { @@ -13856,7 +15485,45 @@ "__enum__": "ToolCallParseStatus", "value": "in_progress" }, - "tool_call": " {\"query\": \"NBA creation date\"}}", + "tool_call": "\": \"knowledge_search\", \"parameters", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": "\": {\"query\": \"when was", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + }, + { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "value": "in_progress" + }, + "tool_call": " the nba created\"}}", "type": "tool_call" }, "event_type": { @@ -13877,9 +15544,9 @@ }, "tool_call": { "arguments": { - "query": "NBA creation date" + "query": "when was the nba created" }, - "call_id": "9ffcb7be-c9ba-478a-af1c-8f68d4033c4f", + "call_id": "0145ecf7-ff15-4e06-8684-d9c60e0e2966", "tool_name": "knowledge_search" }, "type": "tool_call" diff --git a/tests/integration/fixtures/recorded_responses/chat_completion.pickle b/tests/integration/fixtures/recorded_responses/chat_completion.pickle index c4f1c7efdc966d90b11b6df1c5f7d19982624c80..eb7534e6a7222e3faf9edc4a07629989e0466697 100644 GIT binary patch literal 888589 zcmeFa>u)64btl+TcUx+zt%qev?rvEQvAUf}iJ8pgn{=@>tXDVXDt4)gt&&_UB_ktl zW<)VEB0D0IEOxsWV=QDWU;;K^`jhv=0{ai_0=omO;n@%R=D$HRn4QsLTEJ>7WB1!) zu-M-@_uhEqgJeCEtd4Gpof#Q%rs@BHgm{_e`}f9@sv=aZxIPj){4(o5x1 zN5p2u^URJpbA$d?=Bo9|^5x2gAH`wMic8bRy*md{EIKzu6q#*N4g*iDl_I`|U#-B8 zg&(h#ej4-)GZcntIKu0;dY)lMksHOPAD5=4jSnIb=9}o@O16P_-92G+sDTI#3f~e& z?3gjX!F2ry--NLrg!U@E!3*@W5977Xz!y{ZryBRlrBFoOz>h?u6@(p==;t4&jdCe6 zJ6+H9+l{Unnw&eNvi!jbTuZE7#oYWCt_)hN+_N|%TZJl_q2PB&J= zz#}wm$XRQ2P^;;sO`Ahs2j~|`4 zH=TE=fp0i({f$$?-RY&T$P3PEa_o7?oG))WUva*cz48s`TZdmn^KGophzG`b%lS_B zj+dQD^z7v=M;xWCPpa0-o6b~@j;SZ0WPbRo*9@zNes@G@L><=?wqg3V5t*)SOtQe4 z#KK}B)Cyg!3-7?d?T~nuC&Jw^5&gO9vwkt=4U9_5PK=&6=fdzgJDkjNeJ8+Lh^l<4 zYMZf1YYHqsaR=;nTXGv#&zCT`jiA+lZ1wq-Y1`j$Mo+xx{bc9mGq$JQ5oTyPa_16F zW~dl@7j1L-S?_!xjygea%c9< z4)*2CJ=cp}pKho&Lvv3+0uvWtAI*NHjoKEG6}ny8(AG+QyaN5jz)r|o41Nbcu-bxU z7)EO!mp<%?@SwEH+5xLC8DjWG-cO*YVej)vjmW(VQr6%WNTf;f47>`rh2q1W8wz`k zv=Mr$W%fL!VSNG}i}!TrW#<*=RqWm;i~To;3focgA}8Y?f3G14|Lw!yr75^#dYC8L zf%|vU*GN-oSoru z)Nb`;J^73LHu-bs=Z8j?ld9B*GEe8)q!K4b&R;tJ-Qky9=$qM1H+p+uV)Hz4_yroshHD=kzTH}|7w6|&^~#)So0WNU!K|3I z<>kuK+@iI-GC#ZA5-ZTvn1yKBAEDVVWmZ0Efv;yLiJ9f_H8X7YIuQ2}RMW#(dCraw zzdmFSIEvEo`3L78onI*4qG^3C>RFZ$_6aQLIB65dZJa01>dzi&5uf(G*szy>|ApPK zr;XHF$s4|@U7RtWA$l`PH!QM5C8}oBA?zWUZ;7o(d>gIZ(`;QODCdrW5 z#qak%o`eDJ3A**#Lt*t|xEms)!V1?UZtR5~{pQ4RH(H&UF`Gdzu9ABM23lvPBW%~K zv@y=uc4tJ=zzjXA$f3t?n&GbTXFY3IgiR5)r;Y1D&$mSgZg5N&9toe7z$=a`T1G;L z@qViXn?5vt7zoe!Q?u)tK0QG{u|Zr;Yc}80pJ*;TDM2yIo_`3_31GOSqX|bzgoMI6fX}3rv81!)>zb zdm?$kx@~rhn?VG(6h>9IPmVCN4S$hw4T_X;t(`V-i|l7B>Fl&|EAU)=zv)`A7Y)2S zj+6NryjczNx+fkQci<55;*T_G#6@s(WM+v>(I4NU4zI&~bO572g6h{X);$+CdP?qf z(-O@f*flm?EAUKkH@MG>(Eaot#Zi}gk?X@6H{K0{Ue{Q6IvCM)uX5FNJZOG+8@*JI z!@I&?-!R~gV*`cw)Dy-nG+_2{FPIno6_IO`DMM6l7?1?g`pGtPeSPD$ZiSXkWWj>Q z!l_l4>npXDm04U1&3*EIky{bJ_&v}2WZK9q-zQvSPrT$in4QW;$r^vkBH+*E2=g%i zs3)S>xNRO(E@QFtCI0hX5V{`)>I#OMFa8*O3zv^Azuhvyh-6FZd+!F}ZWsieNM5E% zIe^Rd2IdGdiKLwo^gp3A*xLZb#C>A8%pRWA)~m@0%2uu+mkc#h;ZE zf2plI4MP~~1uk6!ooYjhd2p<>_%V23A|v5wtiIV=ZN6%3_{RHI96-L}!Yur*$t|IP z$_0kE$*f`W@ZV6hJOO!zhKV!XI5MCUlZcC?hew2a;9q>PLactip=H7YbU?Sm^(k*N zK@=QB28P{+73SH$&TSJ8(Wl z#ze=lFVt%03QfuakA8lEFVXs5nVmbu3QsrC?Gk1SVif`p{q4GYL2SUc+7_5RNPQAi z-5x&kWJ6N7u-Wh&3A-(ZZ0q~``&G+tTD++fp)!1=j zZY!=h;G7CgYDK@NqA00=Njy>@Ox&sM@#Ah zAIk}A3X3Eq6gxBR=#VW~e#VenuRs!F85rxBCBtGh44xjPUj<56Q(Z(9NvUE+z{?%a z(YUmr?I}$OGJ&7QGRC?G$(X8`Rt$B8=6_d+u7N4Bc4^?~X>z%m{z1|JHBAC6CBwnm zkDaQqMRzlaL899Ykod#AE;yRdrdA4wv}tlcg(jEn=1&s~(7F`GfF6P@OBeAkR*mT72#v25fIsE*oM0c+X;krpl1-0lWcTE z@q#8I0r-y(bxgm6ClThiVcKXIRHJ>Vo9PS5C_<5s(Fw2`8mFEZnK!df&TWHiQ|okc zgw^)M?1663LP{9Hs_-h1tV3J^?HvQ0ZWtg3B-zaNw2^32XWF1(ANq@(0eenIz&uW8>?@;MWfJ;~HvfFs7Ww*c= zogQ=>{C8yE@J42Q+Rruy*4vP+Lq?=9MAt*tHV-g2Zim-LU^EeVMW1s5g!fO#KN?iC zO>y+tAiKqbDT%(WZ}jZ|b1U`v+QQQCDZKwlsR*;5u`oM1eKNlP$8+@Lf&=F$5OG3s zLC&*zI3y7u8>7w#`rp)n`+Su8mpTaln`{W zel~PyhD;I<%i%X=Tv|n>(^#})|Lu{}qQs4KdX5`7_^_R`7IeNFk{WIy*v_heQ~?C+ zTG~sYKuQY{I~DD!&`ws_iKZ>~daVWn1Zo9h)PAo6EBgoJxg~#05sN<0v1nq5hKp$o zVOioYVhR1124d3EW|GVVF5tkjjzH7HPZWS=4h2gp21WZXOz?Ry1nOKs%=52Op>j<8 z^{GdT-+*b2<&97<7IoR5TAeWPkJ~c*aRy=!SB#5mMqLs^1c57&@dIz37m0*Ix@WHc zp0SesZBEoo34F(xHP+UQ8cfkK59se1?-+ATOEO?%9jSMQOsTPEEb!RD?F-A=HNH1i z65jv?;4nWKFkTiIM|Z4beLdwU&{B4d?;EpYCCHe{^A)Yv-0Y5(y00@w5lI-SHDi&! zj+LOA9vjM{nwTK@3zv1nkDWdt%-(aXiLp}qwHl8fv^`KVJNCi5NFgD}1{%BEx=gpQ zUPo97oLkSO@Y4^@I_LDD1FmesdCrexiTWeurV#PmPuMw@KZnKE#oAi zt@$n^HZ={W6X${^K5ZFfvq@njzh|})78?1x=2i#pqGg@A zDQCN+0%_o}Y~v)7Qmf8-|n)YBmG{--}|TFNXKG{Vl|&MU!IG-?m!iD}(nexAY&c*k3?vXB zpm-79)t)CV$=kuJCs0zh@y+3xXE{J1~I2gUJFC=N^S-SyiyAW zWmwxdK9M|K0%0?XvdJPObuL3vr)0`pXsYl>CTB5EeeiO`amO*9w0z4L3^GvoVPU?W zjT1eS!=KMg1-t&r8B@XDL(UhCDl(X&J<7fJIpI^TrzU=i2Ld~flEM#n{fJoeDHpch zZSK*5=PNSbcLT@j^+9`FrFT%&sdRzy11RZe4=Kh-aX}ml`+|vMFaQ~#R3XF>D4i9_ zM3Wb{uHC%VxV~}Y+NH^DpKuAN?|>V?PM!~e&{JN3tQ=0k@R7BFJW)5UQkFvjCvdjl z1WuRd&_BZ?H11+kdBW5D!_WDRGsyKvN};55DoXf6-mpD!!MH)oo8tRp^{u@+VYiI6 zv63)}@jz0XX*aMPlqYA99y`+tkaZhIGuB5ldk4KWr~A*S+ZBSV{Rx z6gj@NU;)*o*7j$%#!AfVGjXT8Y%f^LQP&zPC2!39X!)lrAAa=XpCBz5Igdywtb&&& zrzXZq*8BzIcCa5QpY`M>=d4g3b%?9Hv*RRGuY<%)N^(WE^w_1r>?)Ej?N&8%%&wS% zkh&rX)Ll?tbuReW>8%j36VdDmzYMz7cuu9HvxTmvV64nARh}F&dYb+W?c?RySv}{5 z%@`28q4(pc{$4Y9?Rurhdu)H$YoVO|q=E6c{UYJXW0@Y?K%jRqg z=k(3F#RZB2J@~JP@HmM0Jox|c;V1ts{+ysO@Sc70u9@4n`gV@2>>epSA4h&Yx&+Z{ z;8bh))T`*x`M8ghUdV9Fe}C4(F@-PSiF^Ssda!ATAl&vdwiCiFFkkYbFzaQRqPs~6 z(?N|>YGFAkMs29QJ89;ay}VzJrH@Nx>FwBMkWPW}l&Kym7CLxj$0i$o@{H&mptOUv{PvgSxt{h2YqUq~-2NtX1 zVnuP$Ja=(s_$3IR1#sB~z5o;pUMj##?6HoxYm>v_g3rwkw2+|1dWF6yEJ4UPLGc-pM z9mGvzB6c=mNMhE+iu18>2)Z2U(Gxp6JKclW3H%AE#gB>7Z{u*-@W2b)C}X1O5BqGE zQqFHZCZc9b}J^b3xO(iQUc0%pW6v!^#DIqG$Q zfyMAhw0`=i#ZzM<_#y$u2Slw*tpoWw+kq}(r9j{lu*5!L$?-ku6pV@HH%3kUxa1>J zE-X{IaOw|MuCFOH4Yo-9p+NGXw>zHW&!|V`p zYAIl5gb5TF0Zjo@O2NCS^P3p< zzf8OciD55xwte9MWf!h=x2l+A;Q-|$7tb(n{Sm_o8}V=tOx6?MXIfVcI%x1nMlSE0 zr!Y2iMqk(i&M$+sLA#i>lS+k7pQH;^N{bQ17$=rBE0`#mr}W4{&rYAzBmyrphM$Kp zizU!(+5pCgIdqg(K9pL&a2zOdg<*Wj0=L}ah`-}xie(C%>=id($dKrdI_U0o2aOH5xo(vEzhqd!d zRg;5Gr5!yuz?qD}$}TAi&x4O}1K*_(*ajfeo+fGhBK~vuCqp&_<-dc~6=NriJ-1oy z9?&PwW%l9;mCby_RA=g^oZO2i1%As9ai$;NW9QFcqEvq8g2!v(!dW7F4v7YU0LAe` zcmwEI2$`6qgV0wJ#^}+#5`8J%mp;TeI7{?jY-mE^Y}16&AS*2Y=7GdcB{v9Gg4IK| zE>6qHJ?JcTWFg%NClq;rC{xMB7&z? zWu6h*{?8J@-_L3r6J0N^qRc4pZp-*jg)0zh zpXBb8eX0PtMg0VFW1*TxxK=t#voIe(DAM3_QJQjTUFnBE1uH>6+|X-4-SyBjn*tje z?N?_*@_(aTB3c!F2TWi2Obzh`J*NGG*#>ZOnQ#PfaN!J%D6>NZbF@5lhRAyGO-h^o zUvtItvW3!?>-LhlyfjytTUu;Z=I540Wu?AoSL}uPg?X_k?Ai(y|4ojH9?1s(?cW|f z_NSsCi=&2%c`q2`ni?~4%H_*r0i@3qiY@W}2 z+~5&q#M20ekZb%3{H~`jKl}JQ@8r;(S6v+CL6zaN<3et!4~I^+>wwz{&O%y(+6kw< zg+na_`F>e$^H>*MY9nm>1AdK>Zn5Kx1Qj_XZ*nHsgaF=1|Bw9bNqsIb6l6mu`Z?rn z($S$!1$j+77lzmh@`lu)m*1teF!{8L5}%S=Ieh2GD8*i&=piRS7xLAxPN>J+Wn8BY zm#c&4ajjMb&<5&wQmy7LzO5jxirlkC9PFY}*;$ z;C~0~alOWZY=)6+p@#7h02B1BU2M^K^Q9sPOCY44r zmvTbYKk3#nk$#bRFse>W+(Jc6pz=|c80f*NLQ1pJXRg2#9sp;Jyh=7En&(PsU0#(N z$7iq*GUlu=ohyc#qbM{=DfDBL3M*1700q0(p%SzDw5BEzd2Ez&q+;F*S(iiIV;b8j zUUDY5zyV+8=}MMaiP8bbL|`>P$2@?xOO>V>|5zpu&_E*A2yPe?mG2}g$Fv`yJS(km z#R?Vxm+8cYcks-m{j+q4|A(`NuzHu)7Pc=M>1KeU&d7l626V6s;up2U4<7VTSbhLy zwLlg71rw~XA6u-PwTTzvO(N+Tw%Rw)B%*#o#LfU7*qXkz3T`T-PjBj z={V0{O-q-1g$r@YE0?_-BjwD*odxS zX`i7ii^G+?oFPh6(Exfsp3DdbqCeSQ&QdR4p8N#(dS@m{>A?l024reRaniuy+s%d6 zVrzbBq0*`gyE2b-6#xb*_eDxC7j=swN_x8 zNra7|evdJc_kH%a&~Yg`rYjM~2#5-xQ*xWE8n?7pW=tf0PgdmVm~EG^u(|;stK~E1 zWHi5LhFEF0f;blAihz7k9Yj6XuEMvUpQ{ia|9$g-VpyaC*(y$e?U*a$5fjphKFlWy zqndHmC_yVa`OxsYLOO7VE-%UE`z}D>aCTU~ijFk1a)yFQtbG-#S>b0a{EVTn*M|EU z&yk994*YFzpXhIci|Y(IhYsr;u#tZK)A`%Z67_HKCO~v6=RG+EV;pKb7cRJkdd)KD z=9`s;rU}QJy@1N?t=3${T3EEj(!yfPT3I;J1y>&JgTo0m8PY&~cN&zVUUK5+GjMXK z3%6&v)Df{+@jSC*&fK8CmAPuYvV6I+;ltz7vncv>@6JJlii`RB*%4pU$^;S_OFyN; zihy=C4M%w0R?mZq9RbkAM5Ob~YCkWYM=uwsD-l~4PQD^FH1IR=(Zb0`_H>zD%?%3w zt(Bff#QGd*l0%$bpZ|DrOx}>n2@%?SP;^X08Yd}-W1{Q#GC^Ra2abu7uTp9Y>7Omr zi&z0ov@PeExlTu!L?eQ}wpD8_FWPgJ+2)d2nO|tuD$8PFxw71vU$ItJ=4b0GwWp+U zzRJV!*-|YcjB45p<2PIg2>}jIbU@3on=I!zZd#@p}z+n zXM$uBJ`4Tb8HP49;vCo*gVjsNT*>kZlUoa zgX;x2j&P2gJELskGnAJ9o#VrVXDP%sH;{gHk|)TRJWKSyo+JaGi{tS{AS)`=dP(ILUlR9b%d`9%PqsgCmEc>z8_W~@Bmc3b7l z=cBKrgHY@h?wy8fuaz!-@WIB_OSrdqqM%HO*wHpZgrJ zA(6W=TK;D_O?}~%Y3liO@i7I@49e$6YR&L8bvRMRMAr;xI!&7Ti;yn&b6=*<73`7N(`t z3{g4PjVR~{NLfQ5*9WLvaY-Mv>pBM!pK6PGO-c?1c$+$-lsm;VCcF;ow^|_9b{l`L~I*$s~P|s2k72|XSfg_yJ z+Q+3l;Kml*bv)A}vKMgF0=~N(!T_D9KmQ|?b95dJ4?Z^Z!oN04ms;wM8)P;Y8JAUXDhR29Z@kM>Xj8Sw^UiG*IQ=OZd%sz zauF5Vl#B7WVqU1I^=1(j3w$K4Jgcf&L)C{<GrJXsej*ejpXO(pp^Vb+1|8!x>;O65j5II@m;s{~ z70Bo09Z*>*1Vuy{!T8U4h)?bM!M-PKBs)vYzkVl%!qbK~nrf9LkpRGan_PLhiNYQP zNR``b8AtFGAmebF05G)wmE<=Wf<#y~u9!Y~ZB2Mp;$BF?!}QxdBx*xY_+VN1p+V-Z zIM_!SgovopDME}mp6-Q=nnkLpgf4v9dBu74K5YD)ZU5y>=PSE&-YZ^<9hOJ5o0P#E^odAu~|P{K<8HJ%GvkqHs}eF*71c52*SSkO z4H=#H#lF!pJ1(k~fyz!`BgBlonyZ8%FneI!#Dm8Ah76LU*lUPVW|7eY?k306QBaaq z5#I4ze2J%RR<*kAr_GvH7(`SEYX?zvs0#Qct?AT+(%^K=a5qXxcY5tMw2~HTl7aS_ z47wIkRpA#^NNQgo-KT&km&Ml{C3`nWFIio+kPdlmF zyF5L+0Gl{{)7Bf^fbU8<>RyO3-V%wDSFs3#V4;oylTO5CFmJr}&3jyAA^?CvxBdP%;OWS1+PBp(nh= z2wM=NRf&5Leguwmu(}pJ~d&my9hsZSu zgph@tE)dxYC`+pUwGo88lz%%x3;rY$k1R>MWDpKpRT)X{vF1I~^n&ILiN~4S*VeDz zyjJblv{*5QZIYSkkR}9IxBX3Q#Z{_LOCI; zZ)*J z4UrSI%qG9Olgy%=W^7UtW)h5o6@X=PQI2lIGn>M@v?DvRB~TrN)`8xL*yG1d%%)6x zI!3KjsLd@fk$N6&#)_eo#ZL<|>I%yRXFz#U@7{MX9kR5BW@K#N6}5kF`^=M7l|u8s+{ECh!iNY}FGwtd-O=5UgkzKs=6#9HM3q1m{Of+hUgbTz8U zk>j(Cr_?O8Q7SL9LYZ4qRnZQtXog3pV!Ru?mYiy#>Bym+o05~4=frm;RtuHe_g%CI{W(cY$H0b2D#>JTkL~r{% zMsw-1-JvqU4(jq@N)!3?WRn6-|Fm+ar1X>d6XeeJ>3Vtb&qYAS$)9sqx4;s~JVP6I zVPhPmN}ik_4D5kq1S{^D9&W%og-K$kH?kHFCUW)gA;yA zFO$w8H&msJ?4+7(pL|l>H^NYR#MQ?pU8*3bTqWPq6qZR+0#!OlX{SKd0ob-l1_7T*SH$D zJrkQn%zl_uWWXoYa@z>8kX}HJA|>o%QhH%L1;H+Yf+U!TTHLLca*l2+$CM;BO;{B4 zURr@f4q6^makJAT!@{~>RTgnk)g$eF)_`9q2xCKjYb-TkW(1+xNWRONzF<%(SzC{( zemxxv;1Q7q9?dLSZVf3#*V0zYu)68j3oLL|1zV^SY;{Gv=<}~d524Ya6pV!_aRP`x z5*`_0l%J9vwM>t(yD(z$R@kVr#=0N#MLCGrQ3_;rTh9dq~l@5q1rulTJlnZdmIOi zj@RYT8tIG7WyvqHE%_sOB+r8KJO+vdqNK0vjWV{a z1RjfOiba5YmFr)haH;@IIo1IA-+lh2&%H$dP?5iL5GfxF&mZ<>jg1V^KS>yog7iaP z{YU-DGf-zZ!f`{!elwX^&y%e0z!M7B#Rz|X_C(q8DR^=eI4x`oy#%nqd67D}AJOIa>)&a%`#=`v+iu_`JnUqGLM_haqi`OUV1 zc_>2oCvdD6Ad{USJm{D0U>Mv1x2}w{qQWKag||oDn@zL&MBz(iPNebpN$jY8Eu{ZN zMPe71t$m%gwJ|1k4cRk3|MA3_wuLHNy@RznCipt>6(vn2oHZsY>nc@oa$3@rt_e{0 zF<)bPGg)3~vd}Zx&qi$iFyw~kcQK8hOnmY@25?B`V1Is>B^GC`=AvC`S*VsVKZm** zIB~X8Y1R;uTwY$BT^4qc-z86;sjN-pcU9gh^1B!&xlC@dwNm1v!%l-Oobs5~O86Y% z6j}otee`UPq$N@*O=K?eyG|AQ)$Yk6zbg|rFY>!GmTM9g>`O=~^1CSZK`A2?_fdA< zPA+XL4XGCSUC%rzuE_5?F3pSL7}q&bB9l_e!2*t06!~39Lq?3D$nR2FdeTLiCW;jK zU6jC9Z5^njT3?D@SPzAc$0LlMAnU!#INP0@L9v`sJ0 za&vC3(p;RYnJWwR<=V>JDRR2Ll?-;iyDuhW*Eh#geY$1YlI9W6cfqZ3jl9=rHUIBXa_|$OgJ^4CEI& z66han`#`FSaR3njCaQ5dMu@YGrnhe%;83{WQ;iC(MMYcgws}n7T0Oo@8yKe3PZQOdHX@w7_3Ukc@ZbQi=MsWVaEhbSeW!3W zBj^LTS$>d*Df=Ffer|becTPXW2V&=>TZ(YDuam_~I7H*-&UawhI{XR`P< z$kR(;j%EOo2Iw}^lQ25esys40i8G&+5^hImG)rz-UZ>xZ_pT+MGUvS|Pp(ZqKo)EE z9C*r~Ym@YOrgOkcquPHXGws?WV;E!`$T5s$CbR@FqoCarBX4BRT1)vyQL2x&5`0Ah z8FXNFJUzGHSKYeY;6rlw*MMH1m z=~=~clgpcWmN{@tLq)hTw8Xo}nkIQga!dgi1H+-s7>g|6a1z3H$J)7`8)? z$jshsbrx>m8R5@#O=}l;SjfLt&=@n+9vlfceuTs6c632MRn5LgrZm#IZAcBs8QIIL zatAOh4^$K88N@L}(Sh_U09%2R%S4C3Dv)A^|G`%U931Wl0c-&KC_Stp+l+cRzYc5= z3D$H#E)F{RO;q6;1U&-=7^}q6t4j3KX>-ZM3Zj7Tk?pA)>8uJPPi;srCfa4>J_G3v z2!lR63kJ?H_DMPdcLKh4K(q18#Pa+Vk(4YIBzQ=KsC}_r0{jS_+}BtlGG_|j_v`~| zd@x+Vv!D@$hZVA(VcYO=+Xt2-jb$HE68D6FpJ*>q_)aQl2J@BQTMF)JX9^>y zdb~}0DtuFXYyjP}EC6-$lrz4O8v>+LBe6*AiIn3{dkS9zne_u6T1^yC%7ATPK}xU! znm$6!ka%Uh9j+bo4{|7JzPBWHjLL!Hus-25Z2QMNx<-P4mxhGEVs{+UhB1?j-O?mn zj2(u17?p8duPp)saB-OsHK7ls%c&wbEem3s`B6dJ#KLlX7nU<-mpBXxz&^iA|CeMC zO?x8fd9u`qpncUp8IVhQi4~yMmFeMRhYi^U7Og!y-t^&UWG7QmBXZ-AQl4SxF}AL)%`9qii7+? zqtStM7RK!m2)3&Rz#XLA06Wm7iVMzX;4%ET8UWxBf@XCGr!RrmSZM}8B>R)P=={l! zahH@bnl_$AK-Y$7Nw#Ge-;+OQ$AU&2(kuedO{C6q8g`HH?{&ZNF@1jBHwc$&&jo-h zK`PStBs(*28OfBT54|S=Sungbb-5u`(2aUOA9zMQL{0F1PSb#p%VE#%}FmW>fz8cnLc_m!D39f zgYxl7)})WCE5|)ev&le28sd<43R;bSV>Pd=^UF@dX!OfYd~tg`9>@cuivRgu)2eX~dcsm`(sZE2$VLm?D`1fhKhcWY__66GPrDPzV_xhd>8aDH)|z zVBTq8r*|ja(j7r#2yIf>M4`$^+!@+!9{{J15RRy67Q@-sa+lGC0T7y;GAFrEN<&fx zdcNjjL{}VP?LvKm%yR@tPLwbS8${IZFz^qXn?xSZN5T-oukO3MkZMpi8C03R&D_SE zHOjIR4by9jW@t_^3L+_%BzA2#g3+zfSfN3xi3SKy7(4`wr0nzw@42BKYvdloi>k!C zmk%;xODBF2Ov2 zj+Jn_&6qYNAyp&#T zf3$CQnX!m>u)asAb_8)$?TS`hrJ=upUE2(I&4&;Og}rxlQiC^dW_t zOa|i?gp=oC;>*|XT)hP&6}SED#DCO_)};l49%+h+DmYB!zRos=u|o}KCyC`guNgLt zuo*z)%@CbcC++~17UKab26{nS<7qWbJ;D9ZmHnD#ZSfuANaBCyN~zZ;f>_Ut#-pND zthFl$6dRzG7U9UN9UMY~7n;&{Xua7uw+I)Rtu`w16b(~-kO)L;jU8@*uT30CAt}QM ze}9kg##n510{~Be-q;~=%_d`h-(yLmV9+wVN^aeu$M6agY8<={u!ZG|Fczexp(C;n zJODf$QX!XDX81Mpk*4-K`G*LtQTf;act%?ZO^)`ganIE2 z$RLv2eZK;Qok8y3ikd{4X3qHm%8nE+xTBOCMIuhm<4FCYU0<$T<9%B;0CzcP<9 zuT6WQRcp2C^R?RSoH@HNBb$z3vjf*oQ&jC9NFkh2d$caf+DnTtw-K(9$9PB%6VFJ@ zJbi|Twu!``B_mIICTXLV+76zfO;#cM(Iz=U(ZTHfR7r3=uc`X6<4=2h|lVb7R%TrqPW~@cthB++(%+z9{AQLeGpi~Qm0h`+NeKY&PKyXT z8qEMXz-i+p`mqkfW2Z#9P-tJo3bNsklWRu+^$!~yCXBq)G+WX!lCD|!Jku}Ib-s2F zUP74+xJr@$o#a1+Y{!%be!d(v$ z1#E5X?bP|nNy%q&TpPN=g7Hf{qJcWDQsab7^@PCSJUf8uU#q zJLN4RBukbv*$LCktDnJ4N%Ve9lXfQM7-a`!0Lq9*AG29D;o!hSsp`ROOOjTCn(je1 z)9wR}f(DLOLb)lQQA!TRfURR_Ckb=V{<#m4$s})~ePEAUPy{jMdOfk>In36R3rKgK zTtg^kg26a|W+h=Or;R4$9z-C4d}}K`V*;8+6EU2A+)oTJz=^=f68&30xq^1o z6zCbqE!EUReZjcN8sfTgfx<#Gr2NGir_sxKr}=Fj3pT)i${YAv76Wn>0Iwy9eYuT{ zr|pu&f!r?L&pbeSH%hN$u9#leF;^j9=vuYPbl)XJO6+GOZ87DKswecrzxFGF?1kF) zRjevu-r=tI>G2P z*FUKK>z$LI{(vy*nR@{=Y?De2=_&x9Pum@AKUzttrb3(K@CZU`$I>I(j>VW3R`Gz9 ztma|;>{`VI3i1mGTsZ_?3ucrRx*VNcD}5HwB!B7rJLlgIlnMS%o6fKDWr9CNnc!dN z!GM41{09GK$^`#Ll{o$9P3O0njN?z8U&|j-z`|dtJ9D*(hrM)u=t~b@hJlkg7Bce1 zzkT|zM(rxtuh?REk4PN?1hLMRFefEJTaiZ<#zbX>sRNZE3?+S9Vs&C-c)IkMh%66) zZzdp^`Rc}FqVFB!;X&+5KxxUC*E^?MnpW0jGEX&fE z*}6f7EV`LGf^?Rn&OXj20ZU7`$XPCOmLEB>oUCzjqbT-L&d5GD^&l4(Im;-lUgRvt zD7H%6qr*%^&N5;OI^0p@EZeL7nIx31!v0{L3y?{YI6m+AdXckSk+YmRYNu4d5zO*riK6}4nnljC8Nr86Ui2bo zIhVFuWx5l2ij{VD-U|&(KNUN61-zM4&`{yzJCjkV!Too!d&% z5oxZ5CnTOWG4NCgdin$k9W3GlEq#e{I6(Ep<$Uks9P$Uqw#;X2asFkdaPXEO*PAft zoF9xZ8a^Tj1mLt>jPp4KW_2$PR*h1DS$%#mtIuZ!`R6~LeAWrFBXZY&KR)P+$yH-g zXN(gp^q6Ql>lxVJ86x1d)O-#p&NPF6G4&TmpB$B)*XhqM9~`{$(o6Esg;!sCDf8Qd z622e4y*NKVzie4^6{}u1EAveg*0pU}6|vAlqp(Ss?v_pANqPmUf`aQ%~` z!&f=${OIs2D(PHzIRjA5uj6+3A$gwS!AGFZ`KKh!n!hu(nUfihkcsV_cs}DrT%53)h9n*ySD`;>I&&ne!KzDJ6%!=SE&usum&FI zrU4bw1BSr@aAJD?ZUk%L^0jw2HVL_)|8?^IP((csz)O#MUcN{IFOtB48$mJn0=T*W zt}cM9qXM|P0Im)J&kpz*g|3`Mekp*f371C=45L$tmI=+nD1fUeIlKU_zP%0|R6{Ek zz|{qC^$4`@A_=@m0xy!lizM(O3EaOLSX_&yfoci;c!fn0c##B7Ale!ej*hZw?WzE- zrigt3Tus26`mkyNTn!NT0=T*Wu4Wj1E_q=_E^zn@;A&ZcOCi5&P^1F5x&W@`eCbHl zn?#91Y$pu(T>w`DoV)<8wsujbtpKj3%w}FG)glQTfa;9eoi2|e3A`xhWSk<~rAPvo zD6Qu%3H%XL@{!*3#7XR1$g#SD%#<>vr@%S3R=W7XsnV}>ap!35;}WMXB73>im%I4c z#sAdBd;3$r`r!!*AS|jcl%Q*Ci-}H3Xm3jby4=5ArUf)wtbW$Ms5$4HKhvOs3*)Ni!;?x;eya* zQB|dST#0(9u@09-$kpegs&Soalv5dYstTETdf#y^han!qz}xoaR}bLjCh88G2O^X; zS=E6ss*O#>GhHfj9_Xs_8e?hO|DLh4Go*gz4L6GKq2ls=45w*XqKi(L24(%yAx8Xw zD$9|c3aYrQ$dK>HEkrcGh2F~5J1GLg^9f7_8YaV_cgwQF^cE&k(T-=R0rvi(taI={ z$ZBZZzp8##7WN$R?5;Q%{iH09I^s!L+j`_%gI?V2#SQv>L<6X;Oyz8;jyc~uj1KC< zUB~q>peoygi_oeGME@~w2 zWTs5sAU$`JNI`SF{OJsTOlw+nnu5#RM@Bj7K`@w+@vi7}Os=B-qkZAe8ujYJW&T>j zhbevyI!A4S!-kD1YHd-W#%tJ1eSJ3Nfw znwDp_4fImUZ46%s0q0i5a}&`WVXs+T4vce(W*bY*X<2VO z`7ZbJ3&!Q1>)Dz%{d!W;c>3kWMm$UP!W+y6IIx|xQl8T7K%N-3I+h6KDP~QBL}QxT zL}bYAu4BfKdFU!c5Z}W^H1RXN>4I@r7_2k;*wjc7LA%&f^UBITzN5;#B$MV)s!A9% zZUN({_q0fOtxB@FC)01Cv^TvT$4-y0J3h@qI4>gfYXpa?xad7PEj1=t$R-XNiD1Gg zD(43bG}qx&vFPj>tEHwoBehsBCvAr+uybDx;||b^S3}WS-DwIh*uSXQuEAOn-Phm-BD|NFOf5)k zlB9Q`jb!DzKJ9<8RDq-%1ZVI#lK$L;iqC2u(pfR+2mEON!b}a53&t(z*;1Elku@MS zDH0L+Dbs2_t3P@P@>^-zFI~C>B?S$Y)1_CHoN%vpze+m;CJPD%4g$OH3DaKJ`fHgk z8%9|w=~Ju54K_2%uHR@0GwuPlWNp5-vN%m@S^uS`Yes9=!O?A4if8u7O^8Uol z$fd)2{1dm;4WdS>iJ`gMK@4T{RuC~Gn%=cUO(Q{P7`!@2zN9l8!CYnby1_DxCAbty z`@(HIF?OizT&64Zapw|PfoeYGG-HPBFz8}j(1EM+Q@OiVtJa{R`CSisb8bG3N8Okg z`Znej+iEUt`&T5w&MFkc^dy8V+jWUl%`HAo;^?76Tn zdh|6VvTAIQ`U49^>G!;Ou=GmavF(#=A*m+10X8-C)+BSTgAb4^a_d)B6q!#T```~D z?Eyw%w-OD~doSJb5>4Cz4{LMbXnl4_0r=o{8EbKMCnP ztsVrrb~A)<^mGLTC;9+|&?UPegv{we7p38q`V$N%1?v#@e3&W>bL>TAZP1QPnk0=P zHG-{&L_QR`%ytX&TM2)4TZL{jU}=XTHp$4iXnAf!f2FMlt|jOb!e-|ZxA%MoPMIzw zk_|vh32V0-K&->IPQNs4H(a}z|9&a=U>qPGg%r@R+BIWm;NF?^ce(#>Z)@?;_fjQ5 zhrN)-m~C=-fIcvJ0*9@llu|j(nm9RmJ3IYd!L-8mwe^lC*E~9uAk$w>Z(%-CC9HR@Kk zL94|xp-5q=Kqa+;?t#HAft}#DRmuPnqqTfVNcA)gl2nFgj9GvMu#^$76+@SNlmK?4 z%?i>R1X2PRtM(wHLpGJvy&yCp7f{~VZwua3$$thr3vH7sNe)xXam60yrg@-dA3h~X zKFx!82tQ~lfMCGoM=-+);DA#M6UjbiH|6FGuzYCZ3XPj18?AJ0p@k71vAe z@Pc+5K15m$ne(0G$i$e*<_FfB@?H8^H*)2{Z-_>~Zmjx9%Zau_FcVmpjXI*U+##P= z)PY$c!l^!Wi5fk%j`9ep&G)Ao_sS)?5y6&~OByM&o7lYJKrELaHef~qh)J3a`00+i za?1?OPE_V?TeJ_>wu0_0{o5Vd%W+Y|3DCGiCS%e1#qVi?}4C{RA{8~c7rk8%DFk(-{d>w&n{Q0e5Oen_Rquqri zi9`-#Lx4Aal8pE%ibXeCotUUs$3)=|03rZ`05XqdwJ$6YMHJhB!*v(23~0m%`FK56 zk`qHo%WUJun8^NPY1i`BQk|HsQaCWH7|zOFXH3-1@c8K9axR<+^&#h2t4_>SjWLmV z$+#+gn6&$`5tsV5pgpPx5dBDQb?%etbsh3Rdp!}2iR$lHC+0EVTjY1k=oiXL);*Et z=$J^n!0HM1PqI1LywFN{bz?>EJ#ZIs9@`uhn(_}L~chdNtOp``v1v~mMZ`YV6|BnYW z45>BJ=h#C@M;HJgTb zjJ`G|62F^TRB-g^TC{8%P#ggKHzvYxT7vT$kkx_`Pbz~2!zsO_NO4FVsbiwB29G7l%1g$3*8_oSC6AucV)lk}M}^c*jIi&7fqZd}^A^AZw@utx844Wu?Dx zZcKE(F|&MGrXlC3!$v(Og66b>z!?KHh{tiCOu!ivsVh?P%V!NrV=!Ge+J`77Tb%_M z6Tur*L(4yqNoOo+Y;%-RMvOYIHauHS#-PU3fo~!=uWxxrBXPKetvX2L++=}Z%2mKMeipT3cQv}9b}tT zIMru{Ese@twVusXMP|pnI|qnbck=bbBff@TF~f9VajitN&j+~-M|fT2ieWq>cx^f@TN47 z58y-Mm?z3Hq6Ub~gnS9w4+T!rM0I%BLLk;anujRD3%pmhnG^n)sS_!#w&?oy}xrLoc9uG$! zqvZWnoG{`HQ4`1ikunE|RdT{Nf{YWC_6Ra@I($C}?TDy4eq691xnlP@C`>=9xVUpc zmh%3qWFHz@%KIrMbd93qu!K{gbfUNm!G}D0{I6?I*e%3$yLJ`XiAZOT_R5npN0Q>d ze*}t0@02GO9zU)$`{CDdcyk7+-#E@3%~&7J>>W5uXNkT)IH=q8`os3}a@~s_Mv>!N z3l{Q3YiEhbueSY}t(myf^;crA9Y^)EMBJBVKKyC)qq#r(;SJY6OLTn)=jBO~RSFhdXj)hR{AkkvVALG<%rX4+2}=_nOM z)H6vkQ7iR%DXsn~z3HP@aMJ<)WcB?wpzkNQAZhkVBv!`eIE|@s`d7oO_}{)zm=!Oa zS#eeoz#qqg%OZp;MR}NFvF#6P7Fin&N0B&ySx!aj)4&yfoE~Wtd{Q67vA2ETFG1x9 zfg*yuSxYpL+>P@tgy{p{Dk0C1NeuNi!g*afFT}?d7x?(oMHynhB##ksHe_=@G`ld7 zDUns`V1!(|?cd~J{ALhe4{)gLS{Mefn>TS_imx+>mTi>Mr%NDgMo~6d7^@{Z5mOke zg|S)~tIi{x`59#*o*`pZ3(Aqk>KRhN55J`>=YDngEYbM8u$)uf{47!SX5Mnn8VYBM zOtPGZTPO$A)GB zv{*`*qqIOp7pIx_lP*(ayKYw|bWV(EeCwbfp_5rr9#TR*S|G%Qh_pv^h}jLBtlS%38Y6oVU}G#*auS}gH9$;0WlM(%$&Lvj|+Wi-e?1z zFq=gNyhd4J(71Bv$1?qr`i2w;#Tg%ui#OgOjJ`>7{7}G+Mb65ZN-5tbnAUwH3f>YS04g84sG=Z`C7`|pstfRDEbM`l z@*<78h)fr0)Ib|8(x}TXTx7cT@esAcnA9rA;MRYZqJs;@WL^hJRe+3)r$nekzT*JE zS?a+T=~RQ5`~<10l<4W4g*5dM8Muo$t+zb8GFxfQEiYH*g|$#wraud{rMk5;x3V<9 zGAqNChhObyn-_X@@d2uh7qIFHcxYuuzz*&@iwWx8gqc7@AGb9X&Ub3$a9o8mJ;s05N;p)B2aI}O?G-o+x&TL453godc2>J4#an6E zFv^YD+UzVH0bUPr;0{L@@8D>byJrG$c-^-Tj6V&0R7Z$*-D#lI`f!c?6maKMITW}4 zFmQaR>+b=dU-+n-AgE9&L-2K z2MjHrBjZ|?WWp!iVf({GAr^^szym@U2XI1ksHLlP#w|pZTACMnQuZ5DKxW|;L% zC|TfQ+Nh!`TDXZbyy{sRA%5;ohFYdkj~hMkqaH9`_(oWx_!WoYa7x#WWi=Qe$y4z~ zc!KUyjxdnrg@~*77LZH)Xlyfk0^9I>o*8QUOAPx>v$3CH@-0lq=H+#s4JwHmgAp)B z>%BG=7M`0n>MQdrm@JKJ%S8w+6yu zX-GEtcQ9?{!5hMC_fWQA^YV?U)34#8HbD^?q#W}iG6<{#>>D^At+)Q-^zhr8&O3j7 zRDLE!7(O1G_j=H)eyn+$9;a-%gfD`r5X@S?NbM%zBVuhpRF9pSI%sXvY$cO6%#Y#Ox=C3UBGX;bEr<_6)eBd`KOf(}CQL;^V(ToCCHWU0B1V#%oU zs4AIqbve|NE9$1nV;PSW5*_8MKAbt;Lp;FZXURo8ImAVrStN{+bau|sAO2Ay^NDV? zsEEio?46G@|E9ILveH^MEAw*}{%7Oj0A)G^Htx&@2 z@JpFVIy!tU8J45NFQ`#DI(&r&<*1jODB-$aFLrgT4=_Au3L)v4sW@lW13tjC*p9Ne z02#)`Zdgh<7q?}0$rADIQ}Kxpqenvnlq&UFjq3UqRejGzp`=1xq*Lu>R%sPeTSxe&Q7yr|hbDg&K?}9-7`RnJ zue*CdqptA$(T%;(BU`|U<8HJ%GsEDtRiF}>D6QC;=?L33D{YLc2oGjt>2+#ghD`|x z)I*QoG{arv&wAFb2%92oPaD?>{)XU$uQ;ZQ`so050Rqky#}zFjp~HB;1sELI=sygE zXZ)$zMQL+-f^K@(3xG^wthZWtK1%Mq(~Hp74FK}U+pfF5=`)f}(iV_>(B%6baD0;1 z0F49S9)h^hw_R~U2$L(pL*3G?5CtMFpy()?000zGm|$9dIXNGP6WX^W$55Lc}+{gV?nq#0)#ey5~Y%q~uL+T|hK-jdiDk5ncBxS53!**9UK-m+EnTS3)XD zKqKZ;PZ+n*fZ4;nU|#fBM6OMek&Yy87)V`U)TLZA*Vi|0Ygi;&IuX?^#lopom+LFF zm6cgs3(ft8TH*M`?|I%Q(?(|bKH(aB;w9g~>{LEV*7#Ev0e>!c6y8>T)DwU&x@{g* zE@QFtCI0hX5V{{B^Nzn@sQJG?2H&<^03Y$UTP7HhwkCb=UBF|7LC}fhWtv;U4M%{2 zV2>G!JLrX!I@0O+gvEQu3A*xLZb#B0+s3NL>diIZH!0fhE+NJs{;Zt%OSevdGC5c; z1~y!D3h-g3=Ox&`1b~%rG*;hitu|jZHhklKD-IxE3AU^b8LOE-6xhMQ@HRO+nLKuS zwmboOhK7kVUBH%g<=T_@WmxZf;9q>PLactip=EYlkb&FHZdcxB0xr!W7cmkZAvwID zOY4)-qC7)9gj@yc7d6v@6Pfgf4n`k9@{v%Ya~?$wfUVXBuKz)R(Vx8vq%gpD`S4E% z`(_A7bO&%#5w?|FdsKzMFnet}I7j$qcy!MV;~r)J%7VwR$pOaC>l#he_(I>w%?yY* zL+k}!kD;tEc8bzC5z!3Chl!Uh-9UDVB5 z&0L{LS>VynFYqN=-z&2K<2reT1JsPWk?xRJ?)2PP02dFE7}LU&ThdBuvT{R#oURf1N9Ca=;`eAU0g-!nrLp zf-0f5VK5v&4ITk!fMi6&Vww)dumk_^3eh#No1(}xTBWBk3WUI#G(b($>PX3Oz*w zwFTEf4S~Xpm5Yu-NJtt}-bPQQLZuM+xNWF8*rK|DAGu9zUbIeuD`?Y_92nE}+cQ7D z4T*&9-u;N--lnmk$<=nlY(@!X4q^0RgO$W!c}58eA~kHgDCvz=L;^_-43;HUGSCk@ zLDK~tKp(_juV2EGC}^DnG?BibLAmkk3&|)#ON`M8uo@bto@C$5kubN7ZQs-igB)Qc z`b<77CD4tjITO0Qd4T&OfXAL->?5wm-!EoZ#@$LLnT!IS zYS}IDMW=_26aO9AH@uNph_*a*uz$Ug0o%WAMk@IrYgRpAGGz&a4v?k=!6%0eAojgA<(xF<eaq{y2$E*pR|}I4l6Rtxwayta>}NNE)myLc5J1yP}Nv9y!)D z5pom;hKSxxd*wAO5nKF=nt{fs^j_hT;CMSJ_6DhelFa$KfMl3Ie`GfFQZG7U6n za#sc6(c$k%9POj~0K|MPQ|j*U3lR6f=#7px4_{@?>d5(8_V)?C$N`Td=gq@c;SPz- zquhzb2mj&sUU~^Hh4OOr;9u}Lb9j-z|BX{ZM(W`g;oE}x)TDQPDdVj@O4C#y{9kzd z!Ef+KK9k1JKTWRX=zs8^aeeav{hFhj8*=8ksX=0yD*n@##F`=Z!T-%I|3CCMC4=ds zYtNO)k}2j(^%U$NdO!KE zXYIh>djfhEcnGRmv`65HzAV_s1-zR0DIUn}gpPCegECfNiw5E0hg~RYB1B-O|5AT& zhJL4n2)9FC8HjS7 z-0W%PEW~A2K;T&Jb)fo^SqGhm+FgZ4ZnvtD0}XkK-grgclQg5gYaNZAL3BybbDr?a zcviJKm3f?#B~wz4X(RJ|`gX&nZ6tmEDqc!gG!g2JnhvxfADDXT;jI4Y8!@9gtTjpY z6(WZMlXowQp-YCq_lL3y-#)o<%AJ4=Epbjurf-)h26`A~QIEpDEKe$(E$LriazkAP&xy)np5WTHfiO>m8&SLr;SV z1lxrkO$d;kN~D;Nu9`IO#Gg*E&z+(d+o<^&bLESjotCYV5qQHT(5h*zOi=TGPKLIdf_zkV_MOq zUgobzg|sR7Dt88xO3s*l3hpI?qvw^S4L7I8ECk!P;0`2FME2AWilng`1qj$D>MtDX z2;xyi4(N$;)NjEz0MC3V;8lU;G|=(1RD@z7<{r!-I1Sd9Vm#T_5dH~8CxEy5DKIBZ zfFkx08C#CTh8$b07|7->Ga-ixXvOI10DDpTLPn!~y!?avQ`5!=aLVS>9k6afw629) z>6V98O_34!9oP$pKuf}Qv}eJ0=VJQs7;I<#q)w|S((TTpq1rE*Mr?q|ibV!YN%~>P zlLp zZheo5r=1NGbgx7`DczU0SR9Z#i6CG{KySKNI#>O=B74HqBr}OnMvjAPIzKVbJgKFr6FVPpyX10yKeJ<=S_|TtjH?$f9hVUO`od{mP_}M_-OiN zo>NxWG78xQe5*&ftGl4izo$Vp4X`6PIK) zZH`(r_d~Ob@E-+pWxS$;Ou}vdCeqqGu^GgCgz8!t28e`i2FRDC>l_=jabzudx`bfa zjEa^`!V~>Sk=`OEC8HHZOo{`IMNA4SY-GT+h)Kz4*8k7my9Gy*UT0!M&QK#llqibJ zTaP5>pl4VG5>==hu4**d*O{hzz-BjwBW`1+kX4z5%0g9UO=VW2G1NHB2QfB6koG~K zV-;G(YVC-P^@AhUj#$|%>%peOVJqy|^^5)B-3YDLvbdWK!Nuo+ZUOjF=r+%Uw z$G49Io{Xp{>WRsg-cU&)faU&{SWmZ(FrS9M`p)Z986~&uF)PZLoiBVrWa*3fkI^mP zdZq?6YCnU6JT#z<>MZIv+N2&sY1zs-;<)E)l<8#aNZUzxqq3d=06>F8c-1#aM9+sd ziSU-tDnZXb-l!D)3_)``X(jV&XI||ysI}&&@_XLB+VRxCYOKWWD){urZ+U449Us&t zd)=O|(>^`lzXW@dos<7ua`decIJ1n!sm;mw~0keK?>)E)&gnfg(!JSwO&^&?w8 z4t6>-6PhVUL8qx7*vbejc4h`R(g4j9$Gy*tKw?urvYbNu2S%_El{*UVN&P@HXiQ=6 z^AQMwo+$V_XD+6G1l1z|N1+-z0m75|L473&GCInem-=zMse(|Z0u~+XZ%qA2YcM5D z0274<26Bc`^TJH(N40pg{Ue0rjGI4FKde0B9j1ceq<$m|>$y|;y;DD+YSA}uodRx? z`ax-c9{PZwICen9p&16%q<#dKjtKjaSP_By`e8;f$ZvQ8r+#$5IIjsdlm+()2OT-J zI5gkX97VsVkRAMFM`|&lm#H7@4L@3^I5RU6Sv4a59UE7b`eA-K7Wp+ZGamPq`T?vX z%;nJ|(^5aYHEc4*<2mtkrM_Ik5s{tezrdCv zrdUX;5ERF?ewrldiI~@Ol>MpwV*91`Cq_eS+ds9@{&XTkQu}l53+-1DxsmYbEdK9e zh>UK}D#Ua832c}%b7p7@j;Tx3T4u@X8=-WcT?JJSwo=-~HeV?~A7;jpOC?1vwo zW$)U*8%tx>{=I|O%H?XQvb0huEbCQN|5|KS3#*HZjl$A$X|-0XwTz|8(qa4e4fo1<@!PL9&iq!ER3(=VpHYW?5@ulJw^Ju{ z%&PWumhGvDc)4e16GLI7J$*kYAYu4!3!_nENZ!_QlY)!mpK`w z(-^(%S;$(*Y6kwR#_G&v(DBTm_GHS{Keu8|P_aHUETQ`MDQsE?b4MGNMoc?rX(%}; z*ZU4~jwTID$C`G|w7Z{YM<o{*dbwDd!+rT2Qm~@f8y%#`TqRe;o0a)Znf}}yFY%4s^R*yNSR-KQ3Z}yC=0YA&42$bgn+YaRu zx|@0JjQaHDG3!!Nx;vw8y?U%v$u6Ez$37dACtKVdbY4b2x+C4-k82~G;0PaeTb4@A z((2MmqhKr>HMlKHMnPX{wF;%Gu~gH|)z>% z%G6V{cl|fd&ZU_Nm4fad+2K&d1`uo@p0971L4A zuS@YSUwl4LV;^C!U|=Nx)IRvga*1MgLLgAD@6}QK7T((R4abHR!QDsE=3c#r+TW;1 zRd1MW6oqyMD1jwE_}H|o{^cU7XrYQbas2WjcUjrF#+wH_fDZYN=5wtmvi1<|2O6mzu}eZ$4tM^RGQS$KP}qqYQSw z-x(qlFEax?bw^zwZe@~J=CB(+_YS)=GzZKb3&W1Xp`V2hXaY|BwPRt}KD_me`t}7J z1|u+RfkU5BcYgfb!?6GF%w{4*h}}HEE+}=mv%kLS^zYnbF?V2U@A((=bhEGLJf~mp zLo0>Np^qBaP+-mVQn6Gjm1(2EZ}|4iAvQJ)6*#r|Pi8CeusTtI@>nDzy@9gcF*}aj zURMizbkXVapPJwtA*shtrOt2t_zN#Y0D-55i5P$LK@|l~h1 z)9}ABv)&nlwUU~v<`Aj>f#gi>fLNA|b?>?1UuUDl~S}e6{jY4(FXccNJtwup_A_~r| z>b2#TUTT>7aRG22F|XIpWnM4G24VyToV%v(I(88lk-F2W6Q2OXO6*%`UtxZurw=a9 zk}W-r@U%?Q%V4h=>@}HD^KnCoCYa5%ll)v~WCi(3s?!a9IhH;qOgE&%@FLybLRx>T~XV_4?!RzO^%M;-IvTxXVmSl9q-DFB~o(Fufgea`INz)Z)ti@ zLqa9J*@g>EEVU?b*&Oyv+tbDJ3^Ojw2ZLtY8``kmz6pb$)}vL@-Gi|=G)ZcHl6>9W zMM1`g-dO&m)W$&V+Rk3rGyq9}rtRTol}gMuO=PKq7ou-At=7J_z3n=~LDSscp2uh2UvF7m)8%&^6Nca6PxiLA zF@f@ipv^72F|@h{sGi=Ku-np+X-6|J9WxBc;sC{DKdfHrcW=wy-j2@~GT(QHJzVtr zmiW!}_PZEIQTx_#AiYGFBZtiP_N?312j))Sn^mI$7#}!%XUoRyG<7?i5^?1K>y7<= zrRzDct>DYNhH099nW$!Is7em^+m_vRoLwA4yC&WS>e#Z2MNolZ78+Qk-L*BXzwfmj zT;H-a{HIQ;a7F_{2ud`@>0#mnJv7~Qi`}l?)2qQ#>YGZRVSQi@f|p4?4PIdRM0p~* zCqKL3_4*5MnVw#_&EE>u!phacEgR0}Vbfc12G)*+%o__~BSm@lEUL0|Acl)Z-;TzWIg;eAN{Ima(%V~rFJ%UJ)+O`S$c>e^cT{p{(_Ye*7L`nB!}?ulKsat1_Jd$dIg`skC_NgWEPFu{(3eyHVI)3^%3wP>lrr9(sx>V(=0Zf1;_;MLNFr>4=Tk9urv#%z0lX2 zyA01no!(Q7DKsy z*T|mMeAdrQFp7wWKn@W&K=-g}&%cq5m2J+uU!97lj5=p@12e3t2q-}xb%9lqTH4wk zxufP<04(kMD}x-Rbdj@5;Z`Yb+4n4QN}F4Vts<1cEI|BX--Rnx0bzan0xMd@`4d-&CQ@DuMojymXcX5iK(SfEmq87~CTNcq)-L0Ift;xD1&TN0z z_S8+qXu}r$=Qf$Wh}?D?ECj2CsUd$1Cx&I){QNRn{^S?hS|kaTlV@f|83bfx!#+x^{yxyz3tq+a?-O~wrq_Zq&6a`0kh$m z5bgy|pf{YMr){&DXxRGTq4l8X40aauhPzNHmzIi5BeIAc3VA5nP6e|C&DH|j7JBr_ z(KDNE8;O)mtS%`G0>GYvF8KeF2XWa$c}W(_pAob#^juLy7mX4-AgKD*h302~cR@;* zoUfX6pBNduZcj&du=YIs2FUk6Cokyj()~`2pA@6EMma~Yhzd~IWfH8)sz{0cIyTMJ$}-<;5vmN4h6{MW-x7c2Ka)^ zUM?m&Y=;BB3P!5;tgeL`ksQoQFh&kd)e0|F<#Q)|H5EfR1 zM=1`b0ifZy(}9o^5J6Yphl)r_)S(iA4!ZcqgSfH_&Vo6zp(L2*2ZB2@gP(9Fq0S;C zDUx2;L$|0%3Ou6cxSlT<(7M%!Vkd_*nhYRn;fXiTt-aT_z$}{>zMnqamnzU#p6gt! za9F`W(3rxwvG;NZwi}2Hr)|d+TcB(wVZ-s)%8)Sx*X*_mOloa&eu`W;(0w4xDJoRl zkix*w4yYKVJKAP*7vd9mUfV>MLm|xNqDf!2akyfBWaiO5S&xt)OuM*e?SiX8*pwrH z-xls-&FXnTsxgSV-rX@91APw1f$9m_u3@=Qx?Qd&+Jpq@FEl22;5@W2G59HVv`&7{ zZU3=);xW8Prr_#+z+1SuvSEec*Fw~@wd$_+aC5BpcsrgA~)5!odLvNEFq}x z9g?f!hj=aJtDfkaGKe0)khVAMp<|*TZ(lL=-aFBG+=oSD_Eh$l>WCm>R+pRAnEhp& z{AAdL463Lb%D=m(_hmUrJ0z9Kk{2Fwi+!`@6*={{g(9}m3Wm46jZ?jDK)#W*fG|S% zbX(u)IuO4GP*W)sS`86+grqmWb^TlZUDwrbL(hcB1Zt0QF8>Z(jGAh>Mqk>&)tf__ zP3}SUA^W7tpxx2IFS=$|%ip|r{SK5=-1gpy|M@Sv0xnJ|VS791F?$7ri8|Ls$I!Of zaD0)3@5`PM)d+_HiXtqqpT)M<>xvwJUW|=`QscCeX3m#1YaFYj=_8L)@IL}vI06)i=RL?2Ks_nH{o z#GN3@r|cWPb6|GO2Nc+ZZAmI=^w(>XU5mnrK<$d2xiGgVp=4&%yB? zTZB)(FlvxzG>;`52aqwT`;JMcAPLn$idVviAzNr~+eCNg%1;b@Nc8;49LIZek&SR|x$1aI)LLr4>2N|%(sgal< zE*x-JFvvTyTFNjI?OAgGix}h%dABm1uiuANQmD8d`-yw|U3AVj0K|S;$L*7@*ZByn zD&!V8lxP|Vm=juu@tz&xP<GwEl^}da?o8146@l}6ay(u^(NvDzO{(H6X7%Q1q+Bbcm~~n zpm!l|fF=WDQi4#n(GBLl;b75`ybc2g9*RT{A=_{_>JPx1dHO)2;KZ?i`G$JNR|7Ez zq8&pz;ai0`cz*7IWlEPf(S9Gy0D>T18tf-gJSXXTY6As4ho&Le2{FI>kY$BmBSN78 zz6VL8r^9;ZYw0l+P}dsp;cVHrO^=V0gJaKPf)QK-LWcJM42N+e>~CzvkK94q{t^T$ zkl3n`h5DlQ7JUWUmBEhqIH4hGivMDdbM_L}X?dGyU6uNBL){?PAg$XZcnU~s+WoEE z`*lkDTkyzjWL%{i$F@;RBeYCUXcOkzBNtA;!^ zx#l!8bRl_~4;WvZ`8%Npvwjb%_D1RSsSR z^gU9t^G(XA%lU*Z0(B4CG9?dX%#G<2B9My>O1U1}dfC6a7!2Mzh6J`WsN z<)A^B4{#t0=kz&^uTH%>X=0-T^C>MY_4|D#;(1QJ*X>JxwfO3f?cVkKvuf|o&d=`3 z#>EGD7vFY+R>Qa59Jm zQPgww`*~Q+RQX#!A0ZCuFkztUCl7$+InaB|r4pnQpiQB7j#CY=S{qPH=cQ=d=xSmm z5?LAV2{ub?RhCT^PpLXMy>#*Al8+K|79FEVC=g(`9zFa`THtx7uwi_EQM;y;CK#%j zRswdyu5RxP;RVNC#A!h;fkNSX+gFa z`78d7erJ=?pt>pgfp7QAmQUZe+KGKTcoBk21U#Ltf9&%!?c@zHBjZGqBPp^!gfLpc zK!I3%a3l{T+&zGT13Hq>?X{Vi)DN>X9`_fIyc^xAv-7DR5$c6Rd_jWqWM4BKr0>zB zHI5JqXrZYeO)fC(h)@`Fjl}JhiMk|bv7g>Manqm3kUC#N0g#CI2veG3)0@Lz`cLLE zoHb0Ueop25!093*5(Hy-U7!-;K!Qbx{>JqjJ_9}52Y!p;bWbz1Zj^7}%^A)*!&zrI zYcW?(3m}IG0?jPLS!X!w3}+3~JHf>n&N{r*Co45Cy{52a+VScs*hVd1n`#nT`Gr^MMk2?*7@h@E~LP|+k}OTeQsu-2vt><41v z7@G(sjMGPxbb#kVDo8;TXQ=lK_5N5zUozBtZe!Spkf%=z_5Shi&88l0yvAyaL`nxJ z_v5kPasHb6v1t>r-~n4ba|pE`HxaK2q+&Y0@mWMZ$e)WS2ojG4`7``-@Y+gasl041 zEfq>Djas2*t|E(RrMz0GtTt8}E45~AX}NOP`E%a;)1$E5!_Hse`iF-HpAar^c<^Z- zA|Fi4p&M-szE0Yahx~|=E4bMHgXkuHm)?#=oIE1H`xId@XNp4qi_Hv$o}tj+zIFXF zBs8f{$E=~_>AC~?4S4)4+qY@m=24A3bWDo$427Pd(6emcS+;K+D@=#S zFlcp-M5#TpY~O2ImhF24e(rzGQ0S5?GEAtOW&372RB{qOzji!(PL}OE%l3^NIkAF; zQSwq$^2@S)XW723oh;jTmhC&s_FYxD+)*@pmhC&s_U)I)2#?F8eKeV({q%|4$C6r) zXW1T0ZJlNN9-ERZ+jmw23st{UTLUXYp-*<2BnpuNs+c+y`YB@qo<#qd=ldEAgLx)N ztXsx(3gTu3akGNB zN;^wWG{$S`gQ-1U}F$0fLB@CL-7SL#w%qBJNvuXNV%;ZXXJ73nj5_ z6oD4CO+C52EtqeVFadbPLUCor_swf&aR)CJnKm)&5r*K&xkUJ$EqfC+yHUCd|If1K zaLY=a=Li_bY!N{%S(P(FYT6A^m|im2F`fbGd~dR_J8Rc7qiyWzy|&eCOU)ou656uW z*DgwC=_uB$?;{nkl2lb$5*8GvbH-w2ct`A%Xil(tGA&<4VA}*Y#BCHOx{or|?_xUj zX4C9r5V}UxFRMM`2ViyFU@3^(@w=hk@0W>Ih(NqmyhGF^WIa&|k<&!w9Mx>4%q!m_ zo%H*TXE>lW5QLydD(?zVd96DqE%X~n9b++ma3 z0w9<+5@JirbfksEf};#QrmyL!<+AK1D@uU^{KCLPm-P`@n|xpzLlhd8nj)OhGYqGQU%%^Z&A(9v5m@TrK-7Qqq^hR zj0!bua{Tz;&>h0y3*Hc)a!lp}2p^4MS7%Z5zK)WRd9Z~dI~!Jul=rx>DWg_8r_{j= zGC?BBRAY0Y(!K8Py0Q*!9g7gzk0L^GwF0W9v$9vq3ZC_s#lK0`O3`*yL0%p)QKg+3 z8O?_*f+`iNfxwR!;C+i-L36VG8UOEbPn*4lX|PbdtL5DxCSG^7H_Tp7m*xHTo@rOK za&hshd@bR_T#zN3eF23{np4ei_-f4^_WRDj8zTg@2;192C!1{(ogur!)cY)fJsXDg zg15J28bLL!uD-)^oIc;iE*XHqSw-`iv1lz(6MBrkAJc6;KdYuj)viX(`SEinI_+gr zFmT!_Psp^#NS#q^hk9^q1VIUBwkmJ%Q?+@XO`I63FQ4FLo-l}eCRWVgVRY0%Bn-!e zy9MP|Tk-|$N^i<>AzUBakUS|EwBiPG@~FDimhF+Pv|3>Gh&jitL2Fxfj;p5B?Mhy}*%;|9kiQp8{;=VlhB$O%F@*N9ipfC?2OJJ)U>$+=^>9FGY zXyYQr=-LB)V42=N=O*GDY8QZu^Q<6%?Y8NW(|P(XHWO-rJG*4%eu637jdOR1DNXcS zj&#RYu$3f4#?JSG7Nt9~O~FkP@=t#sH(*S{1_n+q8dp))$dBo+Bx1TN_2tIADpD>O zBYsy+T-2@(t*#ODEqFbJG+BOiu<>Oq4y+K1C>s{GlAjCO0Y5>t;g&w^f&rPHC%lcU zQHUjcKpl6eJj}zU?SKWzJrdIEuG9yJQia$~pz)T7&Sj0WlVN~7Z8Y)o< zlO`=-9)mN@HM^}sc({kl-$XrhzMhkKY_WFKSj5UCSw!#}MM-+s6^T*Nl8RyUoO-X@ zSBwd>klgT!-KZZiz9b2%BTR{gYBauhfu~%{2zP37T+$<%YoP-!5_3I<6Q7r_n1+y_ z#DF|eVR~85kt0k)wPtmi`W5z~mg^)X?NYe8l0+DTweh~oU=&>k^_wdPPME_e<{n)SzwoP}vR$5(}r&u<6sbT5v`b|)@DXpU!C9gkrGfH@W>}J%~ zk(2td+v+=RJ(9%G++D|muzAOEE%J7~dm|Ln$mldpb_e-Ow8AdbRUxn2DlS(}4&j}9 zrnS@da6-k`GTNb!x|T>*=&z?*W~`7gaQc`R4B)!@l<%*XiY16>HY&O`&BS_Yr#+Y# zM-JvvW4WQn!nVm~g81Z^Kl>)Ooa`Ei5Z{&w0YeTM1xn^(_aY~Rzk}5sn?u}SLAJg0 zzD(4FX2VT8;kfk=_D}&+dkTrWt`=?ur*O1B=$Ec31SPo70}J|vAJ7w=%SG)L#UE%W zzI-o-hj6d(9a}be3%Q!vcW|gltIYC`58x~6==U>Y2%T8nF6<$c9-xo(hXW9y3A+K$ zPi%CniSr(M08SjX8+5SXcTnard^dC_a_U>^R$N0 zAp$dlLN>tgfH8oxThto{;5mT%P~-8n2q;c2WKD=(gCUdAiaDq2Qri%*2GbEt1dl@D zxsVH397m2@sNX*6__q~!n+8pLFu)<{YL}W_s~&vi(F4;GJ@0Sw~dJgt1{8-i7FQb^dQ{1?*wur9K0l z^g=GN=;W%5o1@DDPTECLPt`I9ol}dqy*)Y<_26s3 zNJ1je8(2$NO+Zn$=2Djj*@+$;>kByy2uFCbiqdmnJ7_swhzDW~z>i=slI({23vwtd zb^)U@Oizbthg%q}Y}v7f)x9{tm1^yeB;XUH%q)74FEvcFUog1aFNt!^`r$@By2l(` zRw{W+>>{c7yNd&o{RQ!cib$w_K){LXbz~R9tMc4Wz%(JLHz6|am~yDneg-`YZBvw_!L-@7%m-MT#y-?FyaIMu zN^$5u*TH-UJ7~m!pui<(Fh&WWfGMVlyidr@H*-=iVj1lt$ zoK+8yt;-!Ev#Xo7XAvo<&P3r@-?DK^g;qZtOkjSnJ9)5}vnOinN2KMOu`oB>F-D9f zcYVKJbVt|O*?|p_+W}`Rq>Rvzi8)ivkL@?#c4=eZsGSG9AsPX>iRhy=CtVQ}NLQ3P z_A}q3&CBgUtuP03N{0)@B{n*>jB*NSbC%LMOX-}Ybbg`p7e9mCvKOP=viv@lR(2!$ z{ZH|G=ih}lML#^Xl+K$@>eC*-&cjR?*Ss89JmHj_K}L+9T+i&gsUODL+RRM3s6o$x zR00{E%-J@Zrt8w_0!#2NTuuZD{O#hFEn@@?>CELG}<@C_y6 zLy%SiJyko)ZULV{@gHAW!vV&Df1`CN&dj_?01I>i-A?@o%Yj9W8m(`b4-Qo24da&b zO{y{psUK1aN9p$VwsO$TC?i&2AV|7JeBWH^NA#u0&o|;dOnukC+L)P{&bOI<23?E% zKS|dshq#Gt5*TFSZr9YW(1j8IWXv&~`o0%%ij29gu^5y6)~O%P+H{_E4sg`1p8BzU zb$Z8t>c{dm<%wnSAmvQOC)#>Ppb0{VsUJ)wa^=vloy12>Moy%DOqc6GeJKCR6xa@| zQxe#b`tdCUAh9Hlp*RzHwG+6L)DP$)VJRmdV^TkeHJ=&0J4b;aLMFtpCr3d~Qa`pb z%xu60Cp3YXV^J~b9};joiFG;=L6Z6*<@_ay*uW^bg__Y!`bF&(@qQC<3F%Zf6reS( zg(?Hjc+5lEm*|J(7e4j^|2tUn!!sy2h>a?axmV{=Q45O8Dj(*#u$g>Z+fMr;928`O z#4(uLkq4h%_XH3$^}|_|FhK$q`m2)> zL+0?8ny9;-&tZx6f*>9jMA0f#i{&_AA7O&`@9hIk(@SQGbmba;<dUX(#o z!FvEmJob?cN-_grW{7^pcH-y&qn*99dE>1+^_#bD-?%)xWd|A{x7}zr_>{02>X1R~ zl;st3NbZ?|EHjX0`azZ|1+8F@B+@JYczaSYhdyJ*)^j%=n$4l7b`5rW1ZZ0So^0sH znjJAsVf&ftC1`(OBGC#CLvc9F2fkp%n3!$%-NO$L^X(VgFSS4MEz>ME`zks{YMA@yR*(+5GGR;?oy^IyI0zW}*4P!h`+x@_yMUmmi*K&pyAivQqB4 z4_&uyHy4{F{I_E-Y@TVKzOdl+`YXm_bH(j97e4sb-@bQa@q?yipJ}Il9*H%`G>V|5 z**V{cZnTPR&{uwL?kxM({@wU?{Jn$Mn&nl!WR%UqQlnNY)JiL@!s<$OwXjlNtyPz5 zmBr=dmBaS$cm4);=Jt<1JnZ~`_;B#6(`=*T2zfFo9kG}vOs5-d)!%FXe)~s0Gx
    Ylc8A_mP&IVC;#+HqAYdC%fNE!qTb<=DhWCG!BOyPw9MChf^ zAQ>C2y9m!`c%VcnFG{rIC75WC*ypl}8WhB6V{f4Mp$Zbns|1i9fZ1F2TXN@ynnNtKHjW8*j3%U=W>PYCf=M0b;@kCwaqo$N~{*Y#}f6YICgg>L&S?5o1aihZ^ zyM&I`p^2@1K_c=Jnwn~l(R}F$@rx^Ts&C);Ec(Vj?Tei&{3lZGM`LzaFZ=MVjF$1~-N2G@-*EPbD=)CC82MOL4U^A#Itz=%%W z_8jez&P8}CV(M_`&%nD5UaPhm%S+Xz#X`ANZWU@qX{n$u8E_2abakdSRC zzOip~=84;Z3ZlDypH4vLSvg5uFK~ltvq0|nXxA`_N2`N|GXP};*fz6L)5{G2vR>= zoSSb4Ij)n78m#yLy!M#;2xT@bBr9#|9ukR4mO&yawj7X8JPr-vsVhgd7jgMQqIAQ#c|a$n=NR{k%l~p`9C)c;#S%)`qCAn^WEnXF(T7NdH*f)SRGT>7$UvH5H$_7T>{fxtAibFZ)v6tduGh z7BIOo&>z4Csog^fIO~B9bJi8x*w=p1u~Es!-L>YCI@^Yo;k!CYNvUE>-*nnG1nD(!;%wM@R zC%RUT#Z{0dqzeEj(?Daa3bq{ekAu2)<6EeaYMhJTa2E984e#XwE`fO zy231#5)MZ+106-PP_h^8$b^ukd#2uFOV^>&HlW$J?cHmaq0e2$-WQ!yOiZ zug+@-7h1()A(5yoffS6vG^*mWS5d9m^ctMdE3WH+9vL3RU$2N>uWR|MSFR|E3sQf#K;ybTIdaw7?+x3{5{_cAn!9=eYk73N+%X4?YgXD#S(}9a{Z3 zp@-B*C%n0Ils0;lK6pm`{Do-ui5@DuXIf408BA>)T#rR%-1=zjY^7o>)@miQP+3{7 z7HX@DKnkw_Gu$edYpv$;%F=S#$aKKm#;_5sm!CTT0NX$N^mAg#&U8QzXC_WWB3|p3 zv7Wp1_S?6vU&a;wpmO45I$)*)W;)=A=^)bqQ>FuEtUP1olik>vIgi%K%$(PQpIVBwH&Y9xqz?=pIL-I8W!Oo^P-n4#OdsuLuBt2>?<9+QB7M-r6u^@KjAxmR zH?7#o*`s`Ak9d0Sh@Dz#Lu{7HwMDbpS}wF&<;6nHL|&hj zC15v=)#g&kESZ(nrKKsnbzcl-dvv_dCsOmJ)FL%c5|YlnEthyJkpKx^AqG0bVvb@l zpBI?gUBRYmCt(I-U|mvw02m+F1~aJXM+s^=3f@eqZ92nZW?0M&i+OYjs05%ijpb&t zWH;^CHrg~OOD5(MGa5hHY?xCdyLsgpjDBPwPHE?VG7&|P`tGVwlPTjIQa_sVsUtB` zKctuaSdp{s=$R&q_*B|F9883&?U-J@;aI4cu~Y9mh)a`5G~IUh5P0=Y8zEYVf1`fm z-=wLV5G~Yqy8E3W(gS85k#p-28C(6)P@20YljRlH#P7SlABjQym2+n0S10U5vlG+Rp7wM%K+fK<84`=EU z_@cNeVmpu>CJ#T;UcDkG2&c*4pX`-47^W`8#7bkOQPqvb!lGGQF4S5QUR0?T%!;v6 zY3a)qcrNI$_{ip!57BnfRr&6{3D> z(obq#9b=Sj&{F31LoU8ZxN_J(Gu#2DW`B(19g*+mJ;jG!&U*B~4cY@!LNI)*;RTMF+ z3iR

    Yu|}%R*b`dZ~zts$~XE;WvC+MFsMn{;=*jyQb}~mnd)aO=K#7Kd6oj40_kw zUoRI+bGQ%PLZ$+}hbA{MS#m{!uQ0ln^?c^pp0t<=2`ZB$Nh<5PkFX>;3Cp<|Ph0(T zFpEdTX6KGkX{q#*1O4cqT#Mzmd^+8GYLH6(<445wRtw9etA$(0IXxIQ$xiRz+jqhI z-co6~^8=@AuIE(V8vGjaK7fRx9z5yjTm72*)hHHF7*9o3|=&4%k*>RArFoJhg<$K7jNV*51 z7Ws^LemaB0gCA3wpbkeXE*`wxGm*5T=^kzzd;+PeZSOEqdGTOIE=qon!}f)PPZ(y` z(+?BL6*>nR%6q-!nGe0g&Tr#$`(h|E%Xj4rIQa3dv(q0q4ZiCqVzq+~W7Q7-9v<)f zCjO{rqIM3$YYAPo^BcIn(c!O&es|bVyCzMIcUF2Oe-2*40T54e%>Mh%i+>~ zwF@b^bQrB>g~iM&dnZyN8Cu)f!>|e9FklJYN&EQPch7VI-aC=jCvVG6MCP#)?6QA4 zQ_wk*Rp>9K?O?m7O7@b<_RNNm6cfDhtlMilLVeBQ09H|t+1qHkWTrGo8O(en=WBMv zowLe9i7IgoBoYkLx&i!estbGGXB*$63om^Ryha zonPbO^XvFCdX^u57G8LK{xEtBKIwBho>oW+6`xdkIy_^CoxjGA+ZPW$h4p~#0b;H< zbPwa@l>a>^hIgHKen0h&OE=`tK`~B!MW%tt3lXi)PGCW^C=$zIv}1*orytf7?E3V= z)7YX!V@o7`g56Z5xH&v{tx~G48clPhptl-kp|)6FE;Jf7y-=$*S1X3zSZTIekWa%5 zIEU^3_TZDD?dvd>1NgHOCMgCc?|ku9toa50c?Ey`g_jwO)oDF?1|Q|bJH-^9-I@HC zpMQSOOJ6o;)L?=N(1=I;$|yVf4SZgtIh7_@X}fC6ewqzLx=gzUxxb5Gma8dgf>m8;GN z>eH%r)p22V@!G+In`X0ZJ5ZnZ!)L!~+P2j~FE-3Q7>jfxxc9b+xeRZ+4natJ$85t8 z5Zn-0+rxX_u^<{>)4g^RDUs*3H_;eHVxmplEB!t)&N@8{vjqe}^lLB{CSb-RJ0=z& zc*9-V43LjD9H)VV#&0<;%ukrrpnV#=^c}bfv>V78s@>R$8n`pS%%b9r-@v?f$LU)5 z{+88*mZ;&~o)@mq_|5(_UqQtc?H&vtUGIaSYosE$31X{6qWR7pNzehW!M+Du2%yM4 z?Ml}|UE`>`H}$3oD8jC`VWF^)4(bN=bzKZUy2q!etE^uJf@=y1*EA1X;^h&pI zU2k`xwBv1jX)q6OsC>~XTeHw<*VOKy0ey&jsW9Jx$HhVoAWwREOG8A2QgGtU+`Mw@ zZjketTgPppDi=0Rskl;JEv>FraBZOP)%_igUs&|%!+DfB2)FNJ?y<+-@+hsfTlgT{ z<4>3c{Ib&WMAiHD&_r(LyZV0NDmJ@Zl0R=c1M34ADC7&Wcgw%u0o}GNSR~}z9UX*7 zbuqa29pwHVI8M)1m$@h`!-u5Iz@-Sc)12Xe5h=Z)%@RQO+D>2HEA6PhsJ5}|v3nEE zk1R?608{zXv8$U2_7|zM$T;1`eu3#$!=R8GUGJj$VUTtmR+W&VvHL2eQmv@nvbAqD zJqP@iIoaWE3vU^i5Mx2`us%7edjh>_bxrUyG)&60kdqzuJ+e5JoE`i90q7TBngi^9 zxuK=^EOZ06>;1mEO-HAi#3~_@3!J0t^tnGJ6?RuMAA+yKz{O^$n^C9lVfIl~4>cSW z!>pk)B@)N)ptR$@gW0cKM;dhGCZ`44+0zF=MD+khb!qO7x5t8znm*iN-Xe^Q3-`bp zctflJOWjGeCP$3xS_9?YmF3NbnNu?#pg3BPIN9&17c%LhTHyqB?Q~(BH{IYGs- zZ)ka@w#lsXfh8m#Ec~2ypD%rP!Z~h$ra-JfQthM4CXe!4-^OWfdIhXeAw0+nO_%K# znAN`Evf9U!{glF16=QT%Y3D1$XS`a1tV;BS{PlHB48R#-#nI4xz67+-qcTKae z!FS!<<;3x6Kk6)OfK79EMBQkEu)H>llHzXZq7w=ZV_p1aQqVZMqmAIiVb|tqz!>Pt zodoAOUzgmmOaoW=Ni;O@6>ts922py2VZX4nSZH+ZLR0UP1%cVHKNgW9U_O>xU>6Ex zT!m7(P^pcw87_@>CtPGI1+vs?SfGpE5Cm`acOk*6qb*<{IUJ=9_#Y=#-=Qu*J>TL>Ju8Hp;F>szXga>uJRgOcJbmxCDmMaY(SB)@MS0)il#Rm!iIYNa4+AoAu|lY*R`9_ zK%rfkyP$#=mvD5^L1;1Q{f0;lduzYD0V zhB3#^9{MkMh=I{z58^bn9LQESv_wduP}4!N82&q^0S!eFBBdwR@5Q`={4n?q>z3@v zuyh#~$$Zj^VppoNCek}5u13aQIqi^&iDNs%wo5lqbt2ay@*3Mx4QUbwq)xTU6 zt_3trPJPH-R`5izgM;Sq(g;&|s-6Julh3gkLN4!f46zC=(mZ1{)=y&zJg-i&XO4a; z!|@QHU#{YF+|ADK$|*4w`ehxrcP@v|M0kSL@LJOQu!8Fw9sZg`zpx>oU)WR<{ld<6 z*em&S@FLWZ7_K7fZs$7g+34Kh->4hXKVEyLoy=`*eesHL$sN>d;Y=o4#mI!YI z*b~*#pd|sap-RTAHn0TGz_zD=+ymq!Jr?o&XpJpNfy@T=o28+PUg(zB)!N(0R>h`d`;7 z+WNXyI#;9mD(tY>46mb7F#A3>MA;B(jOS`xU(nXIMd>$OU@!z*+E?-am)6hKATDZl zv>?K?UdJG~PQj|>t#dV?vi3EtGB-kLqwH5!jTbHLYv*gd%G#22-akx)JpRbsn~BaKt)xgyGM!~}$4&leF$fBa9a)7Ky?gk;If*o^)P zu50$W8tvt1oj4Hw=aVazH5?{M zn#+a7%F+^2GZR(2R4Elo=1RHQS}d1V^zxyC!2S)a2ZDnIfc7QoHHW|SuW;#6q+p?i zKvJGo_6g3Vlck9x&gUskyi=bcr{>C2^SQ_69FMmbed=~2B`m{2hbai7XhGP2M7n^L`k>#7ei1VhV~wrG0BpZj_~M**wYue#K1K{LcU4s(tb@iW3{O8Eo)W@Xi0oE2hcZ4(_<89HslLx`8l#sP0b!SZI+_JX- z)Ef~y)m^^SqLtKFUzAf{RCh&|Qy1hFar+6yqv$989LYz?>j?pA%pM{gx8Eo12UC)3g{at*zF&Nm6R(Tf*P&Aj25#l3AEde-b zAA6>;H_-bz+`SMzh)-zk(VF0cvso&Z%!^AmFMv5y5plwsn@W4ScqS|KEmrYoEVl7j zPv?clcfG~z)Z*pLys#s#;tSfiPmWI5#acP-9^`mr{u#Kq&(tye#!+c*&n)ZAn^m*?7)^f67~q%USTdxS_+le6Z3##>b7$f6Bj6|DH|9 zr5p0+;0>$C7$hJqYy&`L{I74!%v{uNlP3{teC^(MgwxF!EhxNw1F(JFsDq#9XBPlD zUT8Up3mUkG9gZ)a%wYQQat@yUYQ+Y9VPVi%c>lr5gT0;o&eDFzsCu>jp1Z&5!Kr(W z#!(*hkSnvaV=rtjc)fn3Q7M;}D~k)p@SW>k=M5Xd3W$7wVW^1pnw`VCo~z+q211^B z@kxfjxdW-*LKxG)A--J&VWAwHj`*jZ>ANqCRuNHGMt<(xjrEKESYzRVDxmaSt=J7& zs|l;ZVX0N|$LS7%fFJC`7zqM(B@|o5&tkF9)sVl;nuMc77)ZDn@_8D}ky&1LnwQ<)uRh)kvZj-%oWLzdWHs=V&&5YR33@ ztmdfA`$V?GNdy-23%z;~C+S80c@2MjK|>C-SdN^d59WA|9Fatx?8*F|lB43uXgidn zI+X6@&t#FW6NyTX(nI)#ewaGHP^KC`mTLUGd%Y$wNP0^q$mQ?=jS1#)^TV8aZfZ#? zaLGiXRJ?&P@g&LNVh{T#e*EDHXa1-h7i%DPhGzaCpA}hc`TF z5Q>ai=^WL|zV?uIm)5~&Cz+|zKD)0ZHL%4G++KNqZ-3$LFJ9eE{fw8cbju$=fQcE; zkir4!5^M(vp&r6sQAAcBIJSU_LBfVc`U7jft{KSE2Vax!$5_&W21_n*U#`Nl!15bz zBK1o)3kx>FNL9}*8yWGKGiaU&onTn0AHz3eB@ zI9KBm6lm2K=MJim1J$QZ(UO>bw6HMP#z^g}#tS zPZ^>G4HmG7)Q@99O}wI@J>~?pnJo+K0lD;WurqWU8^>v|HVnR`en3Bi(ilkONxB+< zYav%_`uqF^}ZGu}HJot)%tYf$g-1Gi?>PPiuIUEAe35U1sxZ6noWDFswdrs;Hu#9ws zCbBI8U(DVMe-+8g#Tkzr5*Wl_-_NQh_!-Mvo0-|(PW`~Xx;^6?_Z&C)lytsxto3uT zcYYd(fb*sF{Z!IrlzM9Ud>oq)sZ^=&_bl-U38imLrAqx6=4NMRzmq#Qv!ejL6;}T}=JBF6Yth09(LtlW;6C@4*amxBF4c>gU-_ z|3o<){_uqt;Eu+BVLVwnja4^B><<9*Y;SKvGgMXx1J$l5#-DIzrhYva#!q#X+Ezv< zs|O24Xoqa2en4M}q?bTD^bJU7+x0UAs{g+;X^YZ)Byarl9e0^|po_6HB=bPWJKfH+B1i3tJ(oHHZ_Wpw;-A{*19Gw2-w$6Gc8p)EVqsl$(PrJA>eQZJR( zIGypwW957mxjY852^$2&CLw$J5af}oAs{AkZ;Z!m*&?XM9}6Ybk-adBSIV$KjHi+S zCr#@l6Lvs%Fa*a$uOqv|VycgwR^VNLAWp7iey0l`9BaZbl|1u_qr^<;s$={DsunPn zC@WBz4BL?)udK-Hc`ow$ydYhkc}Ueie=Q2B`a^t<<7@f-6d_grJ8tj%%kY^9fAGJB z*ODPs|0}L2q8q>4=?L#m{+@sO(jhqoLTQuXJ!p~Jg;r1Kx)<3{J-@Nd+= zXVY=%hWt6WHbHa_EWJoG-vA{>UyIgSBD*rEgnua{a88vr@vCM0e^Lx z05N5FJy+xU?1TWZb2WeqrO0GVgKT-YW0=+9>cY)?=V~04&fa$`D=XnKABM9b_Yn#O zA$*ZOqTF{O4dig%cZWUX>N`gpNPLiVn^JN?pyhovly_CYrRUUx-(c<&cIkVvtPb^XB(g8k3c5WnH0 z-avb>t?#g`X=6XH2y*Uz?pA<`%e{-jzPg>CLqM==dKji*KFnij&(&xbFQPaYPGRzo zU>qxUTzl0!J{zNGRe8s0#fA#O@&ebw53G06s z_4>xf5hGYs3uH~;!FtXC55m~b)fm4~l;XYl+!bH><(Q%TMgvsq`LL@u%xq)qNne+47v&twx@zg#+H+sBdJ#MAm5>wh%V5g4j zHcaJlKlpvJ@i+W+K2UJBU{T%5`B^^Rpt(+K>}$b&l2-HtLnU9EwEiS*e?ePw=v!!P z*gC@grzYz3HZ>&vIMPg|ew%Ek3OYB*_LJ;xX#LsBIWl2ic z-0-wf56757M(c}5=#rQ@WVC-z)?VXdN{;K~FHDO=ji%AH>W6icQu8BbGKm4dQ$NmIahP^2lgVUg`f+(e zQa{u;lbK8&-7J*)k$!Dt10I)!BlUy%d?HUs7=DoYet$8{6EYGYk@}u5gn=W|W#LHu zfL7FiPM?J%^&`{N);`+6oB9z{PH%!v{g_^x7)Zq2A+gLHsUO8sfGCfLIR)2_MtG!t zT(5^wRSeUbpoWQ(llo!&tz=aZr9z~>*Ix=MiNt{73a*p-uCH1FFjc_e(`N2KUX0Wa zZ8<8r6K;+-sUO&?MBHKklU+!Du*D?saQcT9I^=TjfW_g>!I+N%4X1u+d4FF4svV4F1lsIh zPX9;(KVjrPjSVVt6sCVCT?|%2c{3+wR!aSHsBpy*E6n7CN~s^q_0ub8VZKoc6EX{_ zA7w2{yp80eQ>G+1+fkhF7 z-P`Y*>p9m$9m$>19&Lga*+7u!SG9?|1}6G4FpYHv8*vydec1KZ{er-`rt07n?`^1p z)rom{r)^>ikd>Y}-8m%_W+a-0cyDg%$YU6FMfcVC`ir0N`KKwJD8|NJ`sj6p7b0~a?shxnV!Lh<)_M8wr{l)`*!fL`>``_s@}23&$Np_H^vw8 zv8|i>?*Ht1EwP@xnY}(TsX+`e*Gol|5-;N#f5W#`47aC0tb5Kbs>`gGC>QmS zp@iaGy>Frrg15h3E|%tSAG+lsOy!xT-K;T#9`mV|6 zbC#fN;$kMGs7&Hjl7A-gCZ%iY9ht=YnYeHw()bh-?~9nbRES;2s0;_SV=NrBfsoOI zB6bEQD#}6Ib)oGR&vsO2Cj#Z4p{Yu{(%sYdU7TY)T@7S5Ma4LtMoi_@WpzIscpfYa z+8)eRMJM z7ij1R#vaNwZNaspd)cQ^HY{fQ#79hq5PGGqx+lq zFcAPXQNGT6beJwoTVfubw2v;c)@9bZ6Ko|<77O=$SnHbR;_70Hc&w#XwNNW9E*2U~ z%Zr85stIsLY1wG5`tYpGTDLK5bRrz)BO5bZK4FN;0M ze6p-kc~X49XxhVc8Yku^7~W>b9JR9mC#Bvh^fSJNu+_4f7C^k`!8WvvEX?j6V}2C! z&lSrjN);oN(Qp6)qtTOYA02@|u|S*DFUG5y-f)ITz@KIr6_ENNT#&%SSr|rxAVfm+ zxFCA!`~ErD7xiusI>(ZPq^8t&J15f%gAje%ySB4uCjpZZpuF!4Q$LVbH8lu(Uvsdv z2SoMC-j?7++ce|3Wzu;vd?n6r{N1c;hSh4Bh`jWC+L}-OQv9rTOWSjXT|;Y_Se1T% z9|r@A-7^7x>U;kwAJUOZn>f{A8j zQVNj3@`INsaHjrb(nLI_NUWnAs6aL#arPb2U|MkerG5kpqIJEg4_|w@_Nly&qAVKkI%B2ACA_AF&nfj%w zNoqzZBSqglVva#-%$TA>N2bn$_+i(v=d}jx0LarKw3+%*UB#01+xxDpS-69+AXqY{ zYlaem;`T1Wq7CMu(Gl#t{{cS3K0_N0VzAS{a<6I#b_1yin`(nT#$hYy3_4_An}k_3K5ki)IqKz5%X*#^&|bF0LM_&%{05nqHlsZ zkWK8qdbT}3ySUK*Gt&mor8YPbay5?Q%3xwfmb{E9RX*xTyv_wo>_zwr&-Ngi!V~+s zWd1=v>@@Y=epUEDEHD<(%<~_1S}j#pSF6oRq1s$rLfC1uRajYBEf@5b-YmB&rR7RB z3p<_Y5_&`++d>9pgBl8aInr0%h3e+mMIg%SPOHuwo3Kg8zJ;b~N&B%b`0;BeRiB42D*Twu|6jR`arYEKe- zR0}ItwV|7(u4fS5`OMou2wK-Pc7p8aqp@**Hgp7|C%{th1pq0fBT^j!)y(LYIBB0s zQuLyBO*tn}`P;{JpKXS`Ligb%xHp4xq zcrCKY;{mOVrms6qNlcG)VQcoVS151UexKJgt*TvWdtTpNTUh8aqg;_G=88?Hr@mG% zZ`Tz}%QpA4mfo|vK&7{|o@2lt4k{UTtRN!%Oky2Hmj}kcpM8%j!K@9|>#ME9J4v0?22*vp?iG#d7C-!dF`T%1> zcVw{AtX5sqAgeP>0*Uw#WWeDvbI;Io1{OrAJrhp&E|wmEtr%1m$1G;M#r3d`zk@^(1_)flkSTXsR+6sQlbw!JfOhJ6fFHcL$5 z2$x+SJhUDZox#q6-f$Nx)zV_IT&>g=W#vzy(g0uJ^rAi{`_cwr1l5?Zdx7nF;57Ay z8h^fAS(&daEb>+AtePctBi<(K*rsiQosNG@d0c#<+XJqi(ff(GkOvu9F+{GH$ah#p5xsbRKt;G90?dF~JH*e*n z&HGW9G^Gc}s0Y>E%t6mIER>_b;v)%?D~LXT`aD;A>-HUx**?%?aU#H;nMgZrYlmPn zG6n5{-W>`)2Il+bu*=rL2Efy>6WQ|i!0~jTJ^PL;#VWA1&0fPa3^1M;D;n8O>1pwc;^mzW;CT?hl^YG299Eu9odCK3-q1fzjg*-LI80MwD?%i5bBbs zF>VV}4!bpS$hc||-33+21f9T(_yYip#gHm|eDDd#Nz`yStMg(;?;Y?Fp1uEtFs*nDlY8-+e9 z{`_p=DW3v%P(?AeLxox*QPFUk?t;utp$UtpTYxkKA&dFD3L^A*_?ZMQ&lM`d3pqo4 zx&O*&*@S%&X6X{INHP(sz(jgFIISctHZT|wct$rHCn(r#m)y(uZ*AP$d{?`4>E3tm zUB0aLAemJn^D(Vie1(TB$j~q#$+Zz_gy|16B4fI^doNvD@Y_+}-?Ux@B2@2zaNEP4 zZWp!t3+jb$n&5vN-oSiS+dxSoQdQ4HO$V$89?JCsd0FUb0EUCGa|5C$BQ9FU}Y)v=HOTTP79+Z<`mZ5Hf zB*Jy3u^c+aekA0n^})mfgG`CJ=?(QRZoobT&qb(m5MwG*Jc!5&rms@eZkyf~&a^$f z%M6eZjCWY51N`3lK#pTuGas_*6;Dsr@QA20+z6x&To0Ed`~m9=l@13g&Hf;uJ2iZ2LfCLhR9AofisJXzTfg4AgR0?~^>I`k4O$V{p>J?l#a9A+d9W*(@WXGq6 zYr%8?$mj~bKa|HH;0#RgK}df}G@%HmS`Sy`2o_w_-Y{)cCj-?07!4*Aj*8<6d9!e# z=p7(I-HK9n-9pSu3E1L!>d1V>poxf zYpDr8+>FS1q&u$mm60`?_T*M_sPG}Rfm|Fjw}Ma`oO|3ckRd?>SW+=C7a`xtvWi9o%aF0cd(Dt%=@oe*Y2zD+*03*KJx1<^WD#`X|t;F z+4+&5r`5p%9=AAw$5mspm3G^?dnFWxD!!(A(#&!|@2E0CsGCcnFg)}CGNO$IQ8D;C z(zriw4|E(Bp4ed`mI0d-O5(wJL%D#4BA?lL6nde0JI*e&e=;Wa7I*s+pipzrZ|nv9 z>UYUsCTt46mphQ%LCfPJZhVKJ^Zd# z`l5DqXd$k|?_2PCgkq=}Q3@VcmfOCrxafM+Y93lQf4wL>(1&P1oDi+cl}(>~Ek+25 z4po|(|~7Iwut`%8;^fTtkw(=U~#L1POH>k_1&)79tuEtffZdtJ!$UrEmxazymWm zF6oiHIb5W~dgtXgOoPo;ggoP}$4Bjt^<7y{hsjV^X_u6)}r1i?w z+x*7Hb@u#Zt1FN_Sq)9tG((i5e;#3@VBe^Un|DOyzbRLWiYL^&s3`)RWb7MTc#4U9 zFA*{@ZBL+QtCFH8Wc(ea51988iEcj%&ipdVEm!!bia%o|(N}PBqqB;?$&%>nymFy) zng1ke4M=-eak;}k@nXpQ?w4!6j*lCi8~hvjFWICtI9jNgZ&%lnX<(1-TkXWY9enI| z%o{#c@7UvK+QpKX$9&k3s~u(br?9I)N~s^wMRlGW2cwFk-_BrJ-aCS? zZmgOsMyuKIOIQBjYarxTAjy4rn4#+)J1PEun4#;EML9zYDb7V0jXZSq3Z&ytMAWNp0f;H=eqC>!dos* z;tX9EL=0ug3|&X~m`I$6A~{ZISCIx8x-LW4W$3yLU3U)% zbkE$`4??rkl2%SoYEvh+ITfbu{GsbIq=1UfhF!%4ycXzSpemnUR-TV8kgxNf!=oGF z2+sgAHU-%5m1B^6sR!+?#GD}5)>GrbKFUbGfB0hR(#}Xwqvs(|fdG5gL1=4$(vkoZ ziI3O}B1MWC;vM?{Zb504inL&7e#J`EPh@BXYEB?-;-SaXVdCmWYLpEGS!SU6oPIQ@ zo-_1RPjgv<3<3|L%<|cd!t)P^;)@E)od=9v`G|aWA)`t%v1gDh?uU1X@5X|f6YbUpZ4#@fBPaZX$P;hD&3f>oXn0oAp4-TCv3`jGn#f_pSS#dDGEBm}%>k2bm;;1+jb38RR{(LFCTrS7?QKBA z8BzTJv4t>(%b@)1E>;6E#?X~GI)t>&UfR6z)}8vzTeojqp53yU-hpFh5c_C1_>^R` zp>ZAJDU&{6UWz%EjLE|LGD&1jljuRzBkaW@4I~hu&J@k-IfP9^B*tb)#Q!m|!!yz0 z)sBtz?eX?xVh}eTn$027UUAo8fbsAP+0ePinH@ID?Psc&p#7;tu7j62?S}`S+>xXW zh$}xl_%W3=@o+T8;oxN?nHgA3_i*Fj6XpZJX%7>z4hJ)GQ5N#Bec|8}$YJQ|hsmhK z|9T!r>q|@{bJ+Qx@i`v&!0#%&@ZiV0&JMU!gYWu@7-07>76&%p&hF97mG7|axiTpGsAMMTl-#$ktDZvR%) zyXl9(VLkl?h>hc`lDT`vXld&hK@k{kqmC0fKf8dO4GTz$#so17gT})956;!NKD+W@ zZ)d-=wBIqRUai09?yq8doTG)gP#W}*RI;>VFKjM&y?)KE8};_0voNfkt8oobxSbhb2ZX$_?-_@W^jDWm&7r4O*@Zq`ZF*W4Hc>RkuXS& z_gsy3H9vdIcw=J?S5-53=DRcJYM2#&^IgYcN-+;VABw+{!TMsnYIIoHb2YwiX!pWI zPbAo2=W)EOT+JL%lKO|*=NfpDnh%VZl~3|>bLVNa32V#2%apBEG%ZD z(5x<33$;gp2KL5OrKfaJHg3=#M zBf-YYG>R1dWJTEbln#+d!$KWGG@WDAolc}CJW3B?GyBQZWvgKJC=C^8ll5Ii{_wq6 z?=bf$BR`)bV&yYL7KKvF&H zedNWZn0n3ZHZ7+2A0Q)k8(D+fro|c%2^)&KJuA?i)rSJd{b#v25@o8C8oxtl&~M_8 zI##0dBr0N)v?opDb4r{ABR;?U*aq>-Uyl}-j=@sWIg-y{FP{JM*UQFo#VG4$VYRhb zDbz~M<-&^5Ttp%4Rtr}emBp&=JN*6_+&Se~Sx$XZZiK_io*7 zWZ9WuxomgYY~O0xF85@)%28BR0xS~%Z=y&Rp=|1wsVYgSl9;K=5mi6{2mnC>5y*%D znapx}E%!sejBG!&dad^I>ec=W^Wuk|nV0E@8Lu^$=N@}rX3ejd?uYrlea?vs0FscD zl?rA?+C>3@h!f|s&p!L?v-kHU0>tyg!?*Z{XcyXQ`7bzIQXU|?PD24?qBe1`jqH|jh}014fXI`wwZ@f)Ja`Jr zIAfBa<@&xqdT4(8azrxcDx_)FU}jFl3j2>v=6dM~h;YDV!1Ftt@WUjYu_sEbU!EQus8Ao!8H5!0jc zo*jTq>!A+_^Z;oOfV2-4V}f?I@aK!fnq~IkAWpiQjl}s1`BaFzz_gwl+#(ZT3{&vbBXV13IzyN?pttGEvxByEaY_& zVr2l3U`|1eR9WEQ9(2+aMtVJv(711NM8Lp;Q7|tGi*v>Juu-Ar?&K7cfhyRMwHzt zMzM7g*rO8et|brR6H0rvBKRPg4PQ$3VJ0h66U$ND5_7}0wJ=Bm7%Exz|Bx~2O`koa$62_pb#sF*k&v- zygsr)52miey7WOC+y=j-Acg75@XhK5#YoARW#HU^0+Cy&&>tKl8j@9V!32d35@VS| zknEs(AT%XMWwvqB3Kbm_S3nvh?-qWoxWd_{tFklfoNcrfah_Z#5hm1%NSui;in#~P zBdzl>X>3B!W0K$VuHtr)X0y&on_2Hn+2awW6Kk(~30p>f)}4gtYYZKypPslJIq8=@ z5UDD2mb_dnmevS^xcPX~51T}P07DE{%XZ&sACa^%ZnN&6mV*h7TY^)L8Sa#5Z#NQh zsb&)tcUm?1K#@2@OR-9D&0vJu5U<~?~<-U&lD zo{_16$O<3x{Zi4f_1(w7wtXZstqSV4hTm<_od9K%`W92naL)-sAab}f9U!|>LNo%D zZj@Z<;Q&+41}`FY#3|CKVFJc+attZ8Ezoh2pom)y^JI~SSmkLg2H=t$+P=y{nDOc5 zG0Oe$OP-RS30h}cb%+!v_>058s9q4X{;zO@+@(*RzBp2#sftTAr0nG8)DnBcqy!+C=h zX3psLOVqB}(e0P00pFg8_q+OOI9xqFIjueV$&Je;VMlv|RdDfUquV7g*u%=Mwri!2 zu(+2lRlBxyNINZ3n#VLmFpr+9raV*GBB04Jzaa!%{dTEZ`NySa!l+9=z|s#sDlJjl zy$hP*4}~(iK{U8i&G;wwK-TA4Wg>B z>2iD53*AqhdUc@xPRd9dxjcs0;T6P-rEogZnIoI4t?jL?4j8Adb~h>;s~d>jYi+JoHn-dBcB|D| zS+}>dnIp?6Y@tf}(WLx7dr$w0l&CMU*L|bBt^ewGiv!G8 z@d6W=<0kw?Wjc+JjaM)2e(=HFTi4;qjnnO3374igz?59W;s8_5PtH37EeU&0)}@p6#ZLt3OfC*q3uq@};K|ch13tKPTDD#5ZamiFuu_lXjD2Iz<1J z_#RaCmGhy9+)}|`%J3-?o6d=m_;OP;6j2u`WZpW?(fe;5UgtlV^l6FWAgRD1|D=%v z=VBUQfy!HwWYuiP*IvTEF$c-OMEcQO_4}m+9GugE&wn&;azL9qr;>0M6a|8Yt7A^?)&wV>@ylz4Q0%#H|FP|1l z8JPnnpw$n}rwm53a=_%iCEExLIV&JH&dy-E$CM$N`=M%?IZU(u z-t!_ibKkh_Y-}k-?QH+DNFslxi#h*InC4lfYC~Y3^HgN9B71f1Jka!mB2| z=DwZRO=k0}rRBca_Px>gyTAKZ-s0~bE6!)`nopdT9R5ZQ(6*4QeA3su=`r&Bk&u7$ z_WR77zwZEa-wmhOR4+vkKr287Mne%@6J;Xxm6_k^a6F;Z0(xVqJ>CA#{cJW}ks0-h z6U~3IDAiiru@+}k3mhJc$Z>BR;rFM%Sd`U}x2m*b_ZX-Y6&XL?I1UEqV{uo(0 z0i>aCyTK4R3S?Ee*>|0;71zUhzY7F0U~m1{cLvr^?9ssX_yliyci^{xN3_@N;`uPX z^YJ)DS@&FX+a1>f>X?f=WAu{Mhe%c%du4lj4c7wuq>)T1Z#)=$wrr)Q z?`zJn*PilBY^r=3&+*S$1^k)nVd>_dlDpe_WS>@UV6w|4`SYG1xSs;JMZTb`<=+n> zw?tl3-yYf!L@rI{-Y0%=6!`uyRF^p@M0UA{F){#5K$wBAcsLw;E@0>$_x+K&SISX! zk&h2C>oI#X#ZL?hVG+5Ac8&ULXVhP^bS`6E#eA`FFB;W@78?L8L_G%7yAT-uTB0%g z)>c+FfPmv!?*kW!SV3zmYpc+)=9a+0!U2Kr(DN3pjTZ7u#ch8 zv9swz^geRkfSRKe<_17>j`>W>IrY)|YqzWr=s$oMLTyj%0OSS3A@0Yr0>e2eZ{Qvw zQF7cHVrnTMKH{1n4@{3}IGVY#VsCR$u1fD`C-^na z@9nkqd1kodsQl1mg}nKA>>}-V-+?B^u*l$+&A=~NPx;bkGuH7AWD3Fx(A!h5BTM<+ zpJFw4q6)gH5-;SHcF6K85z4Rl-3kY<($#V`Il}>$2@b)I^?`@6!hm4}l8Qp@bGd-v zVr+@5IeF_%Of&ScwMQ77wyg2pc=7hNA)uQGIBLDlTfWE=o|9<>Q$jDp5IA`nG6JDO zl|haYaX8o%O2(1njDW&q1EmS2@@dR*oE)6S1+Zw&j-(n0CLRD*$=c`LQbl4@D#Kx_ zia*X88fRCuWmX(kZJCHbfen0Z7#jfDQky3q^%JgPnjion-BZR#Vrnqs1X2ex1ZYN> zxo9YqgsNjA+i0mWsLX=JzJW@@5=A6%VJ(I%gW}EZa7s?~>E5H-hmW9DUD$V)Z`+lDua z;A6={11IMjH?Wh-c4&h{2VHart}`3pZ?-#>`Qd|gX#fizU6xWHB`l>r^q*ij_H3|t zM;49Z0rn|q>)zdo5nSJ1-B`JLZJr^_?xV0zyW?X=70%G`91lzb{3yc#`Q|ZV?IJ4; zLnT}I$sSOXp*_HC?WoTbdaTXFGGoseW8=v4PhdH5-CM-wZZNR>G`2zq-_c9ZJRrzK zFUXhy=Wy?CMH43zy0Gu|`UCt=tbF#VJ>)`46^)?^M_lF+JiO71AHj9)trM^B3&Q0m4{uT z1z1eLJBp$8#OYv%q6CrF6JbBluP6hJKZKfqlZMT*rCGG>X2~>WC2;nDAt<%u?t}1q z=!A-0xo7tzHi;V->|vm(xI-U-4T2k)7A6C#2g?h_GA65_j6yj<(phops4r$vDr!)$ z0x9#Zi%k)&Sd}IRp%nSwv5fa(VoqR+!c_sEL~v?%ur2gz&^qh2mF?QvrkW_AUkH`a zlWc4%P+CV}#da%DBG6cNR2h!LqY9u`D}dtY(}cq2yW(L%3o1PTaQ7;aUr|zDdnfGI zH?OU(Y-~E$)>qcAWmUoE#wwf%S7&R2XP*_!9hVaX@B+VG2K;Zr)e05P70PTdkQ9|6 zY%}ghCOQc<%Pj|Xge*N}xlyR(>m`~FUJp-a-n9<}DbIff9# z6y#_?y_kqje**yL8y9j;Juh1`+`lyVCziUeEL8hXzLB?+mL~!C^F%3^jjP&A2oN%+ zD5kbr=rsK&=IV~PdW~NR>`qwFu|L7Vj0(4&;{Jp)^-B%x4wnXcqal4z_6*`NLZ3Wq zw{ER!Oky4@Yn~wC|55qD=C1YWyQl|KH~qj`v+8wgh49p#^#f~NzD+-tE*#M?S5*=4 z?O})OT0g}9P$@6{@?ntu0NyV|F8W75CSzP4zO}Ow|D7WTJn(s6W|3 z;;%vtSJXog9(-9#SC<83E1GokcxXDBZ|8riJk(-Um7%VpW?lUg+Lvy(`CZo*?@yj% zWmHK9J3navB))*JHwgGe5@Z1s4GMrD1nHnAx_EWNP}U$UI(i8zjKYiUtxI7PzGY2Y z3A7)sZmgzaZgOeR_8%n)32&f+Z;n#vX723NSE3Z6GQ#<{u#RS_5+eKp4HE-r;WIBO z7Ux$k%m{u)=B7l$o;PJ?dnyTa3W{u_u8#0u2n zyZ(8Kkm$vO#O8U1+Yf&pKa>3MQG6{^|HGf*`h!FMnyL383+iu3QPY)`TFReEFK1HS z!;X~f@NZHLsUNR>p_TlcFgWtrxZloTaHI_Md{+0ZlkjwVGIg@k9v!u}b6@xWrqQk4 z`N@-?!l?TYM=ppkzyW@>9Ud>{zUtpSA0;IB^?&Cz=?-(>K=2rouJ1fT#@x5zW?X&j zkUDM`0ql$hFmR*Qa{*0Al?cwm+&8kM_89(P;Gf{cbZ220eRp@lAU-v2ynR11+tGnJg-NxOdAU) zL*oahFn3}fxQ9bBdpN>9mtgumw7^;vYdbJjyIrKppz-FwPd9gFf509}K`trxZtjQr z4;hX;>MSfkJa;roj8)Hlwckda5mIk=8rXxDr-7d|tWxgV@y|9-4?F8=9F%-2x!>6s zrM(xq@7KkiSKF^e!;$i@%<^93z72KpuL$4@!`3{~09G|;Lki{t7a0Uw245fph0L9~ zZ{CBd@`tw^R$Z^0Q;Uq!Nvctc`_RN<1Z8RJ1ui7_?fv6vKoe<}#b)Kc_Sa^1JVVyF z7RsPt+v9iqeIrw_9aN>V2e{r<}z zJvif^o&58>1^vc`q=l89Q4pTkXGV9x^8Ct$@fHCD>80ayN0OP07PwRa1TPsp!xdFD z9x~RjA4k3-!suOW`j9OriF2`b&jk0Jf28u72No9}2VCH6XEe?J+4$kw-L4j{*qP0T zX1lY~PK>xa&Ttmb>54t@j4w9ps+!x6c#)Ux93R-Uhv~pO)5Vl-`>Qx`4m_Z5h|@Xc z|G3CM^19v!4rRP-r2P-CwYz7j*xw&`XQ*FppjBM{6b@@P0q{D+*)(l3ryn}oX4b92RAFwbI5UlD*+1Yo_v z1OWy_9X{fp$M`cDb9H{q(p#^;YnY5z`n=<*{vs71b|}GLzbT>2jBS2#_$TX(0DJyh zh=mIQF&Np=IDFCn?~;v-g&X89%}RZCpw%zE;!+Ly^L%~Af?(kG&jPN0amcpx|Mped z_DcyOn;Mx^1HZGdaK*aEXlQAbX z-{@c~PIDE$f722}e{iW9^Iz+VtO0Ts6XvRskz-^S0PQhCepGaTvEE~|9piW&Tkd)6 z3BYf2BBUO5oKMOk=CVKFLN@i5sm;N{IWz`_!eAbPfJ-$}KblErkRc0T+h-C5T&jlt zFhgs=Hcb!)@RA}6xK!;bl}mR`h&K2d*ouT7S-vX5fXmdV{@8OYwMMbt*hP2*40+0vc6?sb=uwa?XA`t5?bPf$rp$J^;eK{@|!VE`HRn9 zJpVO%KVJOoufP7auf2jTf39_WuDzMD;3jOM8CJ$T>-M}W;RS2US6N%XhW?vhqs%); z!(@l}pSk<}NB;RI{Lx!r*+%|Da?{yc;5?gkbphY>KQ+5GzA%#tVtnRA!o%(6JMoGa+OIJ1JeFjLkN>K1zf4oc6!O=WxKCD>IdmzcJuK*lMjI~O~}fHMe>0??cR zN?*o5q5G**FR!jFZ!#!>zmeH<+AU^k^$b9}6V;!nCwLCXW)z^SeCa6`s{Bm{r9FMJ z&y6ll_EYWCZ`V#7^E!Q1lRjoTME{gHM^yEdcccg_^Pbki2t;JTvqhjXHJ+ABnhV2e zD9mLj5{a2uw#ucDIX(!XZ3vJF!zv!@D-UgQo0N%x9yz_!`o2GUXnuRFFB;@2Ft3fc z{cwiD9RdlfuT)o7R>0UpR<;qoZ6gR|Xn)d({38J6*H?(gJc5gyfCLR-=Yxghw7yye z@}*QOLXyZ3MG~P?uE^E~!6J}cUgjQ1zLeDueILX=^yoJ`%)bo)nae1vIs!OnzU!MYT(GV#Yc$_taO72Ir#FL!qBlh&p zT=u=WZ^-Y*JM(1UoBK-t@oWHC1yGXv+W$@#057yhk7 z^weygJPyiz)9O$`2utlpL%rnMI(w1Fm%S14{o;f<8JsW;%e&b1K?nmmqvYh3` zGf5rSz5;+$?pykf;3JJ>U=lG z)|CxElD55$9dxYW7_q4pqSb*JiUd{x#OBA}^{?{;#x9-3m$4fJx^$ zor-D*T8XZ`ISxoGfEL16AHYY7|3xQ$>fH>+3RxX#tQ2dl?^u{u1na%Fa-)ifDLf6Q zPK3CYz_ka~iDL=yOBcg{XH{?D36`b$5wG5lUkC;`^z;z5gK)6*Ar7UY6UXr^)nAos z>TU8(=Co=9gy_JlsGH1G53Qzb0?<%Jww}z7c-ZaW6Zg342fZ4Av1@DVD_5(l>uVcV zW#nH%{aD?=A0~Co)}=4NFayk9kkcLeZM&t~UtV3?T3)-lY48l$x9e-mIJiz1r^O8c zWmD}-(?Lkg5_%Q%cj!4?6(g$UX5=2p9f{cV+^9cpiM0ESZE|boo{7dcL&yc>&KbEt z)}C2F&-Z%+r3bji)Yj|5TO z4<#B7Q`;G~oDMO$Q>tiUI<2m3R9CjQHa8#(oS=WkKkqsbGE0Dm*=EE%-x*Kl3RnDK z+>TJ+IsRj5cYqb1Q8_RKw9aWcSn&BK9zIf9STcI65{;q!EW~m3KY92InP0{Xb)nLz+y7_+7L=!>A$Am?YeEM zN71Asf=R_1DK#xqntUKOOby`gts2#1?a}SMTla5QhaFBFkt0u?wPk6lSRCx#y zPnkY40i=dNp0KOd`*<=qaYF|YxF|zSFquh*V^EFiCxPvz?S69kLAnU|Z%lr-4e1)% zFv|i+_^E7a?Sb3#RC`6jsWSo5(2=9xHnd%pqj84|HeZ{}Mm;#He-7}JmVn+E^t8zg z6=_LK#g5+&Yf|@08@rRRg4)8!;+R!bEHi$NZL-Rm=tA}oUp^%dqP~bC89MA)h>1uA zIx@1MX=#<&9W+J^p2^9^4r1Fl*>=P0Pwzf>y#LJF-F^Jg}8Rs01C&&2*LD(1s%WE+O37C^noXRw4O}cCT#1V zZ;!D#`n(1Omj1kYr=;zZjSKY7!F;XU{WHhs=H$<$srwL#c${H%K{23LV)LI2E&`MV zx*rNcI~v;q+<!h_uu!l)6gi<}r@Mz^;g5lQjtv_P7d{39Cvcz#Vf`uC zG@5X#^ms(hP{CE}UB?4_C8S1@hLTos;9}h@S}1vkm{IBllaoqzb*#;z**d}7FFSk%M(A(y9YBm?&=I3x1O@C0NnRz zkIG%7Vol9x*tzH1?rhK#u||BjwP+N^iyr(b^s(5Z5&ljTuKNu^{;m^=A0|;5@JUJ1 zm{5n(QKXe!M57m*C*MCJi988-P+Q!sEx=5@F&FTwu96-mObWi2GmzCW@q)6)?7I2u6`6q@r;C*mR)jX#u{jAQUW*pOqC>KykTLmCe{zr18MRbpJ#j2C z%8cQMa-osnbcF?G_MF^!(T0?P;}}xhBOjf{+HlCx!;XyyLTAve#O|1Q_}hJ7UQg5P z>Eg$n^@oeg*5Xn8>DYXtx?04rjK-0=^vt{lMle=Bdl33;w34NUbTlB(hJ#U^M?AC_ zMatq|R9Wgr$VgTMeHt?sa?s$3Rjtw6Y&;dxI20X6+~0J&q(<`Qc#tyVU6$X_4IW1k z_Don$w_3OLnT#h)J+qdw0+vN|4K4;d-Lco+X(p1?SetvA-+pk5HNV*F`m9LOyDUk1 zvt0HWa`?w`njmu=_JVBEU6iEDTb%#m3M$Ku!o^-M zL9aq6@pR^uwEUC{7BfqoX7K&s9>h$Ao5r?0;PJvLfhO^KWBNmkE0GiR$T>X${aeUd z>FshyCMsVe>P_Kme+DUi_Tt}u{cAws!hfG3rS7=}NTa{xpTEMNDa_-4gNqLiU*K<+ zr~TjY%3Ftj!+*Y;LI+dj{d-(K+c6BX_ zh;(k!YhTCof++vx)50;8E za0ay%XW%+;s5k>x0Yr$HD9*sMMtpGw&J*$C47@l4FV4UpBTkIywZ_mYfNBLUSBHG5 z6KYtVF&r>|B?$0So*|^pq2(zLfyng|Ja=Fl1Tz~$KAfQ+oZ|k>k@Q`;1JURzJuHN? z4+dO(Z~E~2GdkSQeAQ&|#N>;#lVko%YMAUp1aYzGy&+>__n>(08JPV=H1Fr!c8 z2~G$`vMrn7$Ujdaa(NwLp<$J=?u0<%w1bqC5gqDzVQb|02U$WoQxl=o6V*HLn$2{7HQN4QJj6v^ zx6E&0bUHvgkgkPnLDmvgW_}}cOp!g+TnzgN;_r>3Mb#R_6CysuKk(2yZ6-2}oVMF# zS~(-IkrPz4_6CvPbMSbuY%vFM-i9$dbzlLMWjyE{ATtwD-2<0tjs6_*^An-jy1!xg zsi^FjVv_qB%1 zdF1ok1C?9JQdJx|+36Vh+X+!x+kgwN57oB=T8b`(3Cz;x~vw*fW2T@kZ266hZL&D4m!oGESWJ>Vfm?nF%J`?c*lCRK{9pJ;&oAlj1*?n zk*S9ot{sb`V6G)YwIh=;GgRg#878!wFc0S1j3Gh^dWdG;B*U|V>87SPTb7DKXD>6E zkW+kwFfr;Y>MKX`IYK~VM+3h0) z1vdgm=S$UC9&8D0?vR|(39p!^s_7T07!C&WLu)|Ks8-%mI^Sv!P;2I%K^;RCohs7; z$&YfH?hNU@#hE((9~nJJj+BNl>k0|6$@Wl{X%OPIuA`Mh<;Yq=|3T497Kyb(k>1g& zpOMW4_vK$)>bUs2!1vgyhvzJu-kK z;qI1Iil+(=#Z}tyiE{w*olJt6IwZznPx=AfKjrNzapk zO#vj#o0?&=+b2;lu668{b}9o46H;ZcFdK)dR4R#Gmc#lv{~>6E35EMAp->dGc?AvU)NvjTf4nNAf) z&Y_{UMILp&W8}2^o{L=oMwb#sR5p2+`IkC~!yYSo87$o+C||cvWkV^_jN~({k&)1o}cvvimdxcH~XaHZJvp5etG#mNuCjebxI-D7ai~*;nChK`P z9*+3*4wTuB>L&dJMF@i90=Ft{`|XN{RQ+He3NU#^852}*XU9UukJ1|u2VSbUkbKET z$B#QcP%ofpcAD^m0mP%y@{y+gqg7}4qo(zQMj3}qMiF>M0ZLM3S=JBL&*{Ekk3*Y< zpmGbfx6JB{ne$=VjgQImd!EH)?PC|HKE&Bz|HLb^EE(~TB@ex)vh>is9J)-GYCI3K zo=-jFd;-t_qM~Spn`8*;SEthiO+(M`v;nkEWsF4uHo0TE?O}i~z-<5+ii3Q|q#ie* zNQo2W+JRR2GR$!+cTIPgkxU-F9|Ls6(m{U*bxrcPzH;8v9L+w)2Rld=^TQPbid+kv zr1cR-NzVp_fw60DR@sP2`AF5Ih+rzP2j7{OFB1jssv}~0oDX7A9fzJZ0r^mB5Xt|z{xs-eiy+~@HEV33;;YJTe?IDOwwK# zg5^1qa0IQ1+@d3B7W5IJPaKCRSL`tT6V{}_A?6B&Jg#AggnEi5I%eo2$ypf&I(Jp& zJ)%g|EaWnYeSliT5NHHw3Q3wI5(VcHO3+pX#+~ar-yJv0Gz5hqb&8%7ooc9(5xLqP zIs$7W(@E5D`nA$zG+`<;F=NarDOB2!#=zJ!QVa)6-)SGgd=d^vR5>=nLa~Tg+a?xQ zoejq7$#}#9;ds>(_Xt`I#-<=`@muW?#;j3RjcC||p3@5KC4t7EVM(>yaYJmn4MYzt zNIlTV%nbZbfGwh2LKnQ}o_?&6c?>VAQg&~grqoulGFW9K|NEm6l2Mt!n!ZPte&WBS zL}3Zi!&yi5P7TzS<$(Ym>O6t!62b#KA%(F7n{eC`Xbpx-K&vQRBGLMNB=Q5Nt_6uf zPbT7~DhJq}4xlpNzJO|mxGH^!*9zK-pnu6CK7^on(Rhf8$p*Q(*Rh9B62p50kH&m3 zJ0{d;5@x5{S?O-343Q=U94V~uJ&1H;^BXMh zt)Fwz0jjy4r1Zu69?t-PMhim9=$y?P^UG9b(T8ume-0>gZG{ z;gs29Y*DdZx&&gIv|-FfMFV$nnTR*E+~g_GBo}IB?O=Gg$ZEI;9wD4z=wS3d(i$Ak zc(mVFGBr`|yL%D$Bx*hOurRKH9r4a|u`wK)i^o z)y;QrEE(04aeKi=%EG6yVwH;LVF?PYRer>3*akUm(BL&|4W>h?o736`H!FXG#)_z2 zyCI^gRaAQBrUu2g)Fi?(NMWY~T$~kXEaqXLTO7xcXNE+(P`EB)26f<%Q*+0`MiUDh z28>>6j+SzaBy$!%&*Y18o!6hjD=AXkNd3gQ{VWlJ)K7KX9t%h6I#`6GKD;W_P*{}c z8ZgQUrNdLr3bCoWQo5I_^gzO04;2N#HLUGy{NkdLGu5vR1AD4weREN(2K6qkfY^bd zA4Jg!8ftn2tFceo19X#2UR4{kO;0=REh4;$=$Z6@8R0e0p!y2Z5yNDhNf|+Dq8dQ` zl-`F!t8Bx;frk>)L&TPvR%WAp4BgDt2Nnf8j>BATsb{p2gFcYmF{~2`w+IK<&l9Lj zsq#L`N8()@h9G*_%qQ7+&f4p#31k{i*ANhH(HW<(Wfl0~vekm#g9;?_o7ttOwt%+P z!Wqsa7z{cX!66W2g$a|ksdZ1CIfI&d2+Rm%rfTA$zGB@M3vo}oK(Rx#l>WsWXYXaa z)AF`K8g0c=b2tqQvIEqgk^)3Iz@g_sNr8iacwLn!h0yK>9&hT3Js9=v9q1Qct5&7n zPYAAn^^DpUL;kXM!Xzfpq#-C?r~^+Q>BPdDfx=lguJ^4LEIoRiZo%|RyD#a@xu}R3 zOJ%(gmosHCUZ7R!G81K=9diZc)aW?JM6b*0&Gp)r(wmL!T?Ys3WK2LL9A(T_JzdWb z7I|QnTugUeron2I&5L$~gq&|uPF?mVp~*1A)y`cxb5nMW806xdP)C~z1x}2V-PpQj zuUyBeAhZ@xZh%LJ1GsAJ_7{wukH@gKu=5c0!7aVEGz_?=>_1>BDMcBV{4tAeQu`{C zq}enDX;UA159*UU2ZD)>0Do5%lu+QKtUI!3o||1;IPQgvwAGiTdyaIQypVuk{WNSOQ6us z19+NNYf6Wil9XOshvSo_n=VMdG1GYSgKocBIpygO2wW)A3-qZDO=@UY;QEPw341@z zq{LEDh5?U`CXXnReJmd_`&jg8u`8b1CF^n6%kQqZfX0%T*PC+6UY5{WptE5$tnV@w z_NIKU+Q9kWu?lFc(-Jfcx`jTXVJak9*4z9*V=-3ur>nqtdc;4E@n;GV>u0$5;P4~- z%_3qwiguvA3l(r;8mF<+#*+1&~oyatNIdAxy#ngmw*CXygNJYZqBGSkQBc*Bi=J)wtj%j zq-=x~LfZob44IIi(!N7imt&}`@v|h-np+GI;sXLD&Plybz_pBfBosvn+=J*IBxoR6|hK1%m)CQ;%+VrmvenlWo}8z z7!thV7~kBt=T{d&6P?7NzkxVp-RXakrk0jT@F(ktH|L-J-=}~3&$#mZ`=q*|Hu6zn7at({@{bV zw~Aoyxv_f-h~)xeIf2|Rg1HNb+ zRROV#-Y6iJ3y5U}X$>c>GM@<1TtF;qs6UfpUO+4ZH@kpXc7aD+kmKE)?n_J3uP}Mmix7X z(FtaJ6Z_vX0RIOKw^J|e&KK@vXn(`{FG><=2xR2a8KH(>T~>QT?>fn9KbhWN07;R7 zhWtTv-o*fYyB74Wnra-@ z&Q}sh>LQ+brC_n_rm57SV$V83QP+8n@`UtW@yZ=DR%*68+3HUgE{tF{2*Ui>jm^)3ph$0{(p&j4LEh96UP_mHhW$*#OgxU9T zozXYv;83p_^y-+^ZCMc@#otvCSF9UjchE6)Gq1P3Q`IAiu9`0+fs^7swCb9@v2m?c z1_>>Hy<(AC4;;+)cmQs8&5kE&%;FS?UyBw}zU>IH5oifaDzbpZKdZ994|g~QDWyRt z7j9=q)B0)#Z6KM8GgxxJIEISxx~fY+KO!MaC@-_T)A8_wOGaLg#L4rCpq7Pda1p}- zj?)GbEe{u;nb$aC843#-8f~57HbM)tzo@d5PYE)T6+s_xdgu~iAy%!?O~z9pIM67K z&#-jmqMl|WQQ!GuA;EK=R{<61UsuxW{bF}-i2UFKjO`#a8%@P!rM4FMORNx%!Kvr ziuEuc=Pz5Os7Pe|_@eBP8TQ_BI0gy#J>g$eZ{@IGzkVHDl_;p3Oued-gr_Udsw8*A zU|H6E%sBTw$L{P!q-+4eMVXf^tE|Wxmv*dsB1>i0Yjhnu8Y3sWzOk~sxy&_p@>0vS z!}=Xav?HaX7zIr6+Qlfcxz{cxH4R_8EU>FL91xug18j)m9{CX%y&w9aW=Oal|42n= zSu#7Q?UE5j7Q516di-SN5T&H)2eV@-GeRWmRmU=BcGJU*5$Xi~2>pTv+)|&)qxwp9 zWf=lC8rUGrN{^@YvUHo1yz7%5f&u`r001li01E)Xf|k3Wd|K4lOCRG$ zn7G*7^xXDLLp;4hzGCf#;dscci;#^cmNa%EisP_vhhnLN7GCxZPO=$x++IHlg>L?e z>>1SNwc~<8J_32xz_+3G`_2${7ItyMJt&snbh}bD_dq|7q_Tk92+bv|?LidR2qQoq zH9BrPWA>-60`dpANccTA>SQls_G>i!pY$CvR+C`f7NUDYge|tIW|&VG0%9;8a)fX_ zz!!ig-R`OCFggN2uflK&4hiVI1KT=sPPu1NyA1Af_#X6IT(W6`qU}ZpJD+fW<#eiv z8;08<=`3`nJ3@w?-j|W+*!U5SU0c0G?>mp}feZd5_#sr48gQ93KzF7~C;E|)o|Km+ zwxkI$^1MY{Z;Eie?}2<>(=qE-vcZ%i?WEIvtEklmp|89W5C{4Ld=FwGtxRoY@mst@ zH4+S1nxxeNN~CCF3%y$juHmYQODbY?Kd68l4EZLimE?X!`xiFCnGHyQ|0O zy1@B(jENRDdV$?ho)Ve%i1JaTDYs1aHreN(*QLhdN8vu-n%TtEi?we|l^fS0+2cjn z77i?LjNM>B?@Kb6EL?uol|mDtZ^}V>#LXFJF%3 zqF&WZcc`M34?)eN?&9L2`t*x*tv^$3&|@qL&$50oS*Ony6Sc84Yk_5 z=yg;2(~$q5TCuKI@dKRuDWzqVq1!rOf^&gQrCm|*-8s85)=IG{-lgkJ=xyp)acPQL z7-7nA^-$vxI0NU{<{FnQcOyIMSMffj#Py=07A0{>YMn9Ja^)}+A6p9@YZtrIb9bjN z>khekftjPH554*2))Gg!0K)d$M>{*#CAEWNa|B#RQ_lLI%1rzao?8D9CJs!~P@m>l z4AiEk+k#4a?88e)J0);w9_xEVmGtdnIuOUqa*=~@=m12UVCuGD#&un+s>r;bJS3p{ zR7V7sJ{_PzMq$hPnAIwY$6c=tgO?#JxJRmni>sTo>ekn)K;?}16BwQ794D>Qz&&zc zy`&$d?LM?W(eeyIZ{es4DiBWn9{lpI$nBh@x-VdvKy@$4%W?jZIdn^W`&jK^Z(6KYrQ&%$a5Ak`e#H2< z8yMaOuUTtqwKoAmXg$qs^cI$hY%ua#?b;2oS*?QuwGznMLGM0K+i1(xpB|KyEB~Oh zd{9#Ec=@PY>hdw=7C$IG%NE8K*4K5Nq~Z0=4f_6ho`n^zUaiSXlRj>&at!v+3mB}x z4PjBKV-c$y4t*L{oh3983s`#`9HS3fPITfpdaT$x>{*HnEoI87<#R*A@mEJ8S65cB zk8wsQK?yssxhV8F$1y|T4uQM@wNaBzk=w4h!`@UyS&4?KM0G?^f~nPfAMdnVcs<^X z$Q&LULMlW?Vr%uu+c&Clc1)`V=ggQV6Rt)*fqLsj9{U|7@S3QdWLhb{E( z?`IwDbA<0w&>YNaY^CLu&L>0;c4QL~{~F)dx!?DiQ<P(8$&lkk9at8#xJCd8&u3%4M=f3*iwRGr*vUK7A5J(cu>fG0VDGBSl zAYv@{O{vX}X=^U5D?ges8!KYJJDuD&=sQNX%n^>9G*NL;G3UPeHj- zIalr*xu#FznhP6M#3asf>a&ykHmzID={4qpgQeUzYIUxoujazls`W_xPEPKdbVY4- zjiX6#NaL-(oU!J<5r6b(Px~C8C{q5l+*kbV`8v50-E&mVaNbuJ7I=)YLr-q*+qNBNx5xpIm~<)wz?gb;o+h~59Y7DO z2$bA6?HzGhBdLNeY^S5fEe#}Kd(ZFY3YIsOosqZrV4}oWVyvj4r1~!;Z zz+gxxfZKcW5`F9PW-qEg-N*ZHvKn4=7fyA<$cU?a2P;>)B7BlO9Y03qS_chdqj-u1 z2fW+DN2mj~4hPSRyZg8AKWyB&d++vjL@7auOWS!IH(3onMP+Cdjbv}RQMCl;a$ccI zUZtzkP$ujHBAMt&z7duOS}LJ6>Ku672cSC;9Epg@P8Z>0qYi?>u+fGfl`}nykg6+4 zbUH!mtO^ZVd=Lk=1-LAI+V%&a3SyL}EWs>y2MnW~ZZ+)NBc_I30lBH%6Bnbuo0DQb zV50s%EkQxZ3 zsNAahO?L;&u#nb;14R|?NRi!H#3Skk6w*h}mY0@35A>K<5eqW|7o8yRg9Gnr7oIdWz zO7xJ7zPAv(^@wY0@g&;t~fp!I*VffEPq(^Y$yt3 zuD<^1qgpCcb$h+VFw=F#RFTC>B;s45iGRK5c-v5Tnhy2sO zlF)GSyJq10Kk)Iv;s43M$?q&u3RE{GKk)79+Dp`J_W^W4UkYk5ofCJ_Zd#gDHx5KVv%)TEOiRonoLfB;lvn_c41z ze{h_C`P8XT$ha9}H$q!ohi*Dv$Z$GJY3@hpCt}7X;sz-*GA3>oBa1R}vDrIg=&ZZl z+&6YRX)M|)QU~dSDQ~AxY+`#bJ*K&D+NMzy(nK?a@wj4U?E%bzVc6L2f;oZW+P1yi zx9f%k+t3Nv^uwl(Md4~M=^)JQ;cwDTEut+4<`L{7fPM@|OdZR88^6i+a8C*kM%ALL zBllJQAr8=Bf9mj31P?WvO_hANpts?Es6XYtEoIoFiOXPNfx?8WHLSgLYq18Ts~Q5j zum?=LU-M}({Xs2g)jmGnIzH*09&Vl;9dqer>#Q+Fv_dU3I-K^EG_03fI!&!h)Os1@Zfq5ArEy)43tVz8uT`(#`?{ z<>@iY{UCfVt1C*%7bOvVZ$4U;CA>@xL$Br|?d?eY1Sosc}u**$W6Fc0?h* zN=v`c2JV;!(r-vr2>6Kf=6i98rW*Fy^gW2@Oblu4?DD@_-M!e>&sfq(SC2Ob*wp~Pn@9Q9i7Ab=!}2%vtP^G z!Sx9sXT&r45QoPDY*CR1N1>DiQeenPezVh=I=C**jEamk9BRM%sYr2Vbl! z!~SnE+fdH4<~TF@`qQJ_FPjz5KS5w!&jDw$HU!lfjA3j0s2 z>OUkm@w?Q2>Xi0X43U>Ot9^5(QfcAKVk+e|uB|jF%!o`#$E}G(GjMGlk^Vd)s%BNl zL`=;SvpCrvY_|M?nt9eb4rIpOQ3wpE8@J!R`+#v~v+u|8E`ZqyKUYFX;vAuph~6Z3 z!XzKphz9k6r{gSltTTF%peEYL=1Je$%Gw%{6!!wa8z7GSF=B4qV>?{7_Poxi^%LJa zK?w7ayKLQZJZzCaBAAE@{rj=s_h7NV=Pz512T&7-aGa9V+Ope6y?{tXaze_l6$EW- zOa+u}2N6!X1+a)=q~Hty^aXj$sPdyd^3LvkP7xJOK=IR~S_ zYLTrRloha+$`V-zniatB)OWeXyD6Wm-11^QD0aTZ&bNq`d@ybu<|JAYrbR9`Ugw0H zK7V_bcX8VI$$dM%zV?;%Uf<2H_nOuKY!OS_9EH2OShShRU}E^qR<>|g7by-oS+p;M zyZTQjyq(5f%^jX-TJFv+vFEq5y|J>g*(vejL+E1i{Xr@PkK*zDLX z<@r7Qf41R)RleTC{}-R1e{-r63a{=3kt8?(|MmQObuTg@F1;IfN?;40?WE84%i_+` zF6oPWi-k+Na7m{@AGFv^I(c*Ik}iC?o^{%o`|98RT+Tyt-^{NsePw;?-zw)V+zD|T484dX@}p;k zB*N-wmRPmuk|8ms7hq!WV1xsGL}7#{IMc_X8!B3@yNV(Qhx>p|`~+ZMQy^$JL02?z zcy|^84eUJ#-$2-fc?9TXC43Gef7C#D0r-tT0D>=%pIGo!*9V@n-z~VGe9ctglq@peSdxR3y+EFUfb-nQ{f`; zMhh{piQ|AT5g|94G&aTB#OokU46AKmRy6dtCh3rdLhU9vLqZtVWr{j^l|X=d;Atje zA}I_c`45GV?93jV*r37{PDkXnE4YYLtJw^}uGDs#U_5I!`I-h(ltO+d+5~xVE`q9KcbgA1JAC|XplfgO!$y@n=mAk+b4aV)L29hnr+}VR#Tu4=DB?v#q zvJrY$$9zk~x2&#b-dm3!)?}3YRmo(1@etZ>F?BtW&-x-i*M_GuHL8<0iqO^6px<+WGFt{;uq0+*P%MwRg))nin$N7!`qlw;z7_gm| z1&Id1i-(}S1`vefL6?5iEGg@XEG08|>S3mdgLCec2FVD37O#scWTY^oj!YxeaP3$e z1#>MKsvVh(nV~W_$uObafo&+dt&D!u^BpDVAuyL#puWPp*07KSb6NEL>BemnT>KeFvYmQd~ zTjUYQrm#+Tp+68#3JyRangfYf)b~5#74wv0meAj>HyhBR(8>%0ggRE0zNJn&n05DPz56?P_`7cIZ$L$Ja?98C4^XJ9?<=R?n0_9dldZv zT~FK@emwBTsxK!hg^MWN2R;n}d9aG0P;wP^piE=1_4_2pvk^t^@%43!4+tz|5>?T> zHZX`Z_%nOV4Xh@R%OEJQ8f9R^MB?WSBV9bh@7~_%w%1c;4`@miIisO}NRctt&-w4b zyF*^X{V_wZ)p)Nd8C9f==R8`cKuS_|Zwx?RB^Hqb<6fcowg5wFH3eHgW;(QkPu%0G zAM|Q=E3B=pu54CUwl}w~Vur#VN~TlA=`wg6YFp${(Jn&1+kMZ)E&!v;*$VKn6M2{U zmu80G$r(j2gM|hPlZ0*BC&Qey5;MTid=H381l@&GYS1p#5efbV{NcP@=2ZSjE9-nB=Niz}|11^T4vJS_?5ld6NoAMt01V=*gZe|{x#UzfZ z%M#fO+z#kb*Z7rd7sP>=DlSw)rELfNamOd`t?%tL!P*(wL8ay6haasv!yh%RCu}Sc zvV?k6V)9K{7P#Ey=X77N$DvL_5Eh4+0^|X;I%A@FrUp#ah0pIHAYPl3hu*_G&_O~Ws~(BFR8z2$gbT!OzD|bzI7*6=)(feR&he9Nro{F^>zHzI<0- zIqzwXrl9L5bOEBswZI9%K;_yErGl}Ap*V-T=H`Z2t4a9>p2z^E0tfG%3A$D`y(yzQ z&r}y!b65!-djx3l3LWg=hLsgutst?h{R}-oVa3Xaod+!&o7mylh*X5WxkHsZ@-ZUx z;$vY#ls{w?eVGt`Xqs$DBCuo~fs%!eV3r9q3sg4<4Uv5DDIa4Lp!x~;Ux72k`zYjb z4M8Sr=0(E&>S4ttZwQedBkX`sO)um!8LxN%J3x}W5W}|7d|8@_P9atW>dJMU?~a>g z8iK;GyQJp?wU;}f-XLUHZDZ1L6fJ5v{aR@USB@IL`FGzU7hw9bCdJ^ffC^B7(vU2p@i z1ZtV!;L3!RQ7t1)#|LjO1r+_Bx<{6N8ZRkPSc3F$)^Qo41_Ej+2MY|#T0=rzqWln= zQYJTS!f{KSQRH=pR>7{1&BXYjiw`_624hdeO;xT(er2qVkiI7rSEUc}TH04bT0rW) zAq2&X#zRz$9PUn2J;^O{Yp>WvPvr|SE`m`(^Sw~Ze8vu%)2Yg%>L|BFcPzklm} zeb+4-jEZtyeoApk>pN&Z#YJ_)&Pd9^)jMNgM_P}u^`XJdErazCVW8kL8Cc~zk8eH1 zCKb2+{KEhAi}utG1T8W(CJ{UwCThVC#e8Y9;PfDg-j_KeTO)`JR9=nstoEbf;5z#Q zwU}xJqc!HWG)vmj+`u!pv{M!&5j&HX=E~*|I1efRB9+Rnj}@^Q8KY5XjTq|+3PnMU zqD%D(4izU8P1`%1Z;H7by{#51dWvS5K9E|@HF3E4RJgLmk$J;+2F}1arokp^Yvg0& zgF68GhW9CMa%}~wbx#aDxbkEd6E`L`w=tj4Vt553EYI*dU^Ams6#7EVS3QK%0WuuQ zAXhC*nd8#llqQ*B>P@V!DS1tO5iyoZsqhueGmjY;UZt zc3YjR-IZ>46#-O5!nkqWPo;{!!R9WTK(Wz3xDYM7eqGG*t-2i!h#w?*8rp%m2 zkAz%T<(Z@|*LIhzSTvgGS>t+fhM|Me`$%hWyvGyclP^pPIEL_ylsp&D5 zm04Ag=9NlJ1o^yh!Ix!WM#EUQ4Kv7yxV5AM-FccSy7gh3g__PzU}v)e;B7g{2AuMlUr- zi;doCCUX{fy~!8lIjHQRuL`vV79~_7R!%70 z?F#7|o2n~y?S@R~>Zv^(z+De7J}Evi+GzaZqLMS^$H8e4<_OkwHS3#;S~aM5aRtPF z3H<=auM6h~R{Zn^7LwpXowWz(CYijdHfWokcG_FSyk*fd=>apsYo0;%kL>}>4GfcU zCc$iKHz2zW51rjaHAuI`-bY2sHXIyyC^0=mY^iBwHrmIiGFKm16zn+8gmOzgqm3N& zf$WZ9oy5Yy_45QOQ>wg=^1&+9h9QWTn)xL9b=F=_O(4^Fx`yBO976cy^Z@&bv`wvhDx;_}>$M&NGXj~Z zO8HPz^@??0EX2K_H>Mqi9ipZ5FXlLVFXNq-x5=(+4NTqY2Dt{`nn!_Gl;lqjN*_09 z?H}M|@t}09KO;+=CCUS|yLYUu)D>aL+kt-JwQ5!B{REa7#Mhvzy{w&Zb;3@VGz7)x za^UGBomhC)5yqtJeX9jaj~U#zVEU!qm-OabR7A{Fz21n+nX(u!(5kUc&5n5p2?aEA z$2rC((yCO{{MIKqwJoI)8ri!J4%o>UFK1BAn5}xco*^vqz%03#?z&8a)hL@60Y3`{ zGT)?}y6jKvMZn92-7;4x<;+d(C&VDfUJhCp3H3fU51f^#hdy|y$Ag`w;}U@0j@8JGMqi*8c;DwCwyGzDo>p9?L# za@avQ$5@a>b7r?w-<#F3$%6Sy8d~86RVH5KH->|e)YnRnzQ|H~_NFMw7msA(E-o*s z8NB#x`ccl~`s(KN4>FzW>uZ@`j0jBE!}Mr_!y-S=>aj#`y`Cseb&Qe8SU;t^WXF2C zsPq}Q!QxJ_;$BH9F{Rd&4%3=W>$P<_K3Tfyg7h0RjW<8&_M4Scp8f!wlgf?J_kA!` zo;c914uDU%++pv>nUq*6$}kwYQI(IFeXRPS*cE3t%6c63^1CZ8 zLc)uX@Gn(J_%lH25d`tMh4jO}$A@@#W5~+s1`;JN#Ray(Vv& zt9q>R7g@*W|DrUc*@MGK*gt*5LDJJ}N2N3+M?P!*1LY4;ej5|AY1widyl=K~@1!D! z-#{~dexZ!6;Zi6RuPq(J$E$U@_+tVMCBGyxqCW(FbN4SqH-n7sEEofqH>FTZ$ORnlmMCHC5$o}Hpv*PaNifTg) z`)dOUK0{{f2gppyMo1yFJwU>b2?;9ggHjV|<&rsu$QJx8IUDAdPonw*qTWUNl zBa=S_2o_SPnXZT+!evW-T(-)k&;~vbGCq(E2X>nfKRwo0nCCnU%gp79oZe}D-yc0R zzdhC$%hBn`sh5bIIKra@AaZa!z$@y2Y+6}a#WnthZ`)|r(EdcF;)V5vBfb3A z#wru)*D71<*FZ9~vbkEZH|*8QioM!d-MrfAtY2GyarhSQJ^a^yia+1Rr_Wv-euoeL z(OH9)B9P}>)Z=_ohwlFuRwhLt&+k&hsnGsciHv@U0RJ}^yZs;ci`eZVc6&ZlXJe?n zF3WroyIsU?7l_ILwBk`MLv@ST?E+DmLD?EAp@`is5S4)&q7hpZn0|q%Tp%iY(Or0> zjDIRcNmn2$Yv?t?ycCGaHo)ZD03|g@w+7OwKvbr&1?Xmt!kmKB5}>98OjrCq$}y_} z>|-Qk<$(MeO#RvD+^fxt>E3`!hlfzq%y$=In75w$phWTLo?u31>(b#Jq+W zkB<q(cm-~hVbCgYqYB(8p(NNC#DEftLUh&Eu6I5z)JK5?_7A{M;4Zga-b)r*p z$0b1~+4vHy@mVf6L*hmrJ=7L=BY_Z1kJ)->=+qV8nDDxzs@yVa>p?j6G9+CfzU=$Z znG8Vg2L3QCpOhJ&?vf`%afOBgP*G}H0$w?VDM z*RvDP<$YQfX2C`D3n)z$Q`R%{8YdwgL!*Ug`*fO!3oMIShmtBw`IOMUtO)u*&jS~r zkPfTX=qBT-5R_+>Mp8ay+a)!UzE(zPePX=JQ_h(b16aawCF2R3KSM`l-LwZx%r*3P z$&fZE1lDdmxw1l7O>X3UWNi~BNihZS2Jkp~9T29oHCeiDMcAC8h9liu%!Mm$wJb7T zDhf9M+4?8DTI?F}eL9gC?Pl7NQU$yD22(`sJPyQ zrg3nJ*H2H(63 z72F=&j(?=0vn-h%)Gx^hBWYdfFgfA-v$di)E8LbW#0gHE@R0l z`%T#8ugIQ3E%|YjE4`U~b_hEQyEvQ^plPJ$oo-jE<{sGQ!w_bs>|)aPAhRdK2na{u zcW-CR{?t_f5W$~--(#ar_9AA#M#KL}-yuOY`Q&ZkwKu%iVw;-u#8e?oMROiv?+;%9 zo>b9g=rB40z@|c0aSjP`*8bE=Sx42}}1jW*g4razq8?czw#9hJd zkaU&`4h}87FC(w9@gp3&wt9)CT}QB+3mzo+AykzbP&N(3OX|`IDJ7fl^3ueXlyvn9 zac$lrt~W)v-uJ+@uGyE_o8)eTuuzW7C~GOfU685JS6&H-1N{NMH_(oh)K(V1#XD3Z z!Hu)_t}`Q>(jq?X1WFo9=I6#Zat9bLJQaaaM6}4)STg?P6>?%9*7dSUM&|0eZo4H2r?%mypup-PL1sUEq8? zb^#XF=mmC1c}is3Bg#jWrra{w+hpI0UY8n+ABFpT@MFzrV(P`(H>S#sYmw~nB5Vr> zmN&+3FrfD(8B7)~zv@b%iO@IYr=u$Fb_NhcdLBmH>8ECT@_ZSnXi`RCyF%{$%H>!t z>Q&8jhbmh65Y#;CE-o&rPrpdl`ZLuAJ;tK&EbAAOGV$4BBDWK_mRDD>-$aJq+T&)wS+tKVToGWNMagM^{u2#^$w+wau07?X{KZnr^IaZ>+CuB(F}_4Yk_5=yg;2 z)128ttYckYWHdNBH%cdSK%!%>-$hP%@yk=CfJzGEej58=;ZU{UT%#;RK6C>epEMv- zC+kYj=t51Xv$gtQ<2vXVLtkY zFmYg-(lLbdCff>Z?}(ai3ro;rA6~*FZ&}|X60GuI{4vB{R#VIeMw`H~)Pfn;b+M{) zc1#=+aLTA70!yC`&>*92lJzmGRT7W8UK<84%og4wRl~*AOTs4y>2-qcm`Vn+dGXp&y(=&|5UyajVaqNi@oVZRm`QA;`Dv?uvsI7VQvLCf^N9 z#KviuHa5}Bz!}()d(4-?QuhONSA3=xjG<;&p``CXKAt*tiJK)s@D_aBurWLUe-5Zv z@LvM-J+CPEy?$O8YFy%O^K*iEsaxm!D%D9H^C9>;!uEdD=!> zuKx6(q+Iz2rR9T?a>vU@b!lF{gB33yZ`ZTOMODHZ3u}KS#oe-iLoj8sjD-Ifamf}K7nR06R z+>mhm)se{6l@;t`oDoV;!XEN*ImaWK-m}YwoZ&RZ&)=p(;@wc|BG; zb$Z3!&_3P?#`j~sxd${@80e4+k&)P1ee(8=YMfn@iSy!0v(p~HyxnR3dA#6N#m>Q| zs5->1(QRYb(d+T+6SVurJ$!M3rW#I2oOziQxh^$0K~Q>~_|^MtZfT#gFm8 z;4G@sGn}J#TBZrjMNCMdP?%wm9aSS5qw&y04K??Sx50aRh%#l|TdkK`1-+3I5V7(w0c_M>PN?$l~?n;kC^qg)^?+Yr;zbs|0c z10K~6M>yaVplQLNhUlL{>kJ{qaKZGPM$5tS>-s@`_p$t-fv#`p%T;=#yv~kQQXfj! z6(sa@@xXzOajf|((W{>l8TuBMa8HgC%kq?m^TK-RXac?#cXyny9k?T!5cSey#||Jr z+&sFTY5=`nn8vn6CTObFDjrI9lJ#GdJ{~*4X$c7#c@i|G{75O>?2?vOt9&n>_d;zV;Gzn*;6l>-&E05sBZ9f!rx}`)2Ei}JtxmSl?~9wM2wdakOOfWS&~ zR+#%i*?pPQ%;v&I)#CAwJ~_*M>$Y&*n;79Voki&E*1*AtCF`_V1>?iH+_&s)IWWEO z_%QeF_#Uh1EA(*Eh#kcIO*;<12g5frdhW zfNizIW3ZXRqs>XKj$sg%;70G@CaFA87{DEGa4vcX&V4MZ@jX34xQG z_v|vuHgY|l%v2d0-hdCn2^e$i1UOxqyu>3cd9xSQ=V9%-A}pdJ@?(W~d|FCiA?!DXB5e%qJJ#qF=R)bGbzsuY+Ns?|6Aeg0nv3%tiV6PX}wD4<*{BDhjzdRRw0|6ZncjB%S)L8(#L@aYrvT{ zSJzn&^UjHf5t}+heq{zsksu?W60XP7UaJZw~9TAMoVVkMVq>Z?kMW|B(oUP1 z)%=gPZktuNjm`I+d#^tbh!@GsKx8_zii`mKai8a&$9KMiSO^d|5fCxx*_qEUy8+L6 z8&pZJGdX4-l$Wb zqRWH-SA4(6zp_De{;un*{%`p4;okqwztdPbpDj-w52CXf>a%1XENCTeV*u zY-DuB>FnI^9US(K_STN}I?M6O@Gv^sj2EW9(LV|Ys0P2#^J_b`cra|WmKGP*msV?? z(bK!}-bYj4{t8U$E)lbVUxltwLgExw6WKJJ7dX?a06apg1xgU9T&?S{K{_K5fxs6f z2{uG36G?46byRU=o3tEyhB}T1#krGpXiet^`JO0eLmPNUY5CMIu0Ml-Y6l_w^fA~$ z3}6@XNe%n}Fj_i*h(dF_U06_nqMF2SIOcM&rsU&2(uj1& zkBAbcc%J&{LOPV$8M67I&eK7M&t%Ps9FWNb=1HuKsqfKyYM+ZtlP;yMw-|ZQ|Na%5~hA7Z3qh&r7it!1kCF?CdLp-7{MUB zz``^VJqM`GsqfiVv)P=P;53?|na;ywZU)T{Kq!IzP<11AqMU4y zxvd$dUMWBE)bH%2oFI~xpKxZUU7O(rQ{SgMIexx#obA%(aew zF!giviLARMYLTA>9k`Iymdxc@p^K^S?(H-{lkaLKzB1#0b5jWqwH`}5MncjWn)*I& zr$$1xG&e(aXhyrD;NupHL8zQ2$bO9%xdvTCUbcgrLV;E3*eK$m+PF?Fx^yVS5R}f# zq)vSom*LtO^HnPhS=L!S=5f5*PW|%SNWr;LjDCMCjB$Z5fiwDx|NA$;7MO-R$dZLW z75?Nrq5(PY@F&A8Cai%l2!Ij3y2?JU6Sphk_*ANsQ#Au5$ec4p9A7~JR2=`3Fcu!k z7YyiSVK!&SXsp+dfQTBE#uwhidrx%BPH+kD__zgkp6@wxJf7~^y^P;9d&<(Kbe>mo zqR-EG)Y$!rKDF2LSn+XCfp+}~E+K^@I>#xr>nC;>eJkWOA1pg6{vhN>b7 znTTkjb#aCv$QEP`5M|4Upuso;)x;adEv@kfov>u`c7l3q@(^t(j6dWoQz0uIa}CN! z$X#Dl@$Ad;x_G+qpy}#)#^zCR*9$7Ufa1NGD)}0U8V>R(4u%aCUIpg`et9gR)+$&o z8bKE{FQE_2ROYvYO@qira1jyt_gUnGd0hdVIbl{!c;q1-ZG-ZMG!aK2lK{CBW=n;w zCHKOLT~K1-HrF9nMh<6LfUB+vkICyOY2PGp)g;`~xz@;+jj8&}EFw(sUjHXiQT(fa zWAE?z&)=l>5Pz5NzH;o>8SQ*^XTq^+Jjf_6G=%u^+*WmX#GeGc>&5gwXTiJ2s;__d zI(@x>fp~ea+gTlcU$m#06KC=S+}O41*Y9nd#E@O94*Y20Og!1O>dOZojRkDxBw{11 z>k4N+`wN`qvpB82(E#*nU{^!vpC|duwd!cK(|xA8@?0VJ2@4FsXOo;HGmpvJXF(8HW-)u}bH1B^nMXp)eJ4f~*nY#O6l)?v0( zd+c}Xs62bE`glk7*4(q%`=&Ru&%rNg`{g-gmXO2%;9=)QS*BJLY?@tWOq7y+tvZY% zg{G$`OoExA7grTwCt|C;$o<@@gJt|$^>1;0ZEj9FLM;SxCBIgU1Oh>x`Q}=6?*qN( zatn4I1i|6H6V_(~$RJK)?8bs#Ac(9+u~>Qy=<&pS5oO^u{~yyn5GOTU%LMTiB@Bs1#6H*;v`E zY;3GJmBo$KrOv9;U9s138s5OQd;b-=FnjOfr3eV0E?E-jzmK;pfD3^`?2MycV-b*>gL*RHnI#ghN$ z^rF)(`PYemUc$V+x(My&Zx<2T*KzVPq45sKgh{Nv3(~{f$#~g! z#j&tE$7+L?K<{4HT@N5&Ef;8jQca!~Wrg6_Ro;G92<(r#$uXbkx z8L-rWwL9j)O6|GiLhThYuhVVPj-lL(^jE33MKxc!TMB?)7NA#PK?*EL!pe1>iFT$E z3z93Kljr^^_;tpCIA03r<7FU($Z8v1?e`}*+&_L{W&7vqKzYh~(tdQ0uc*Ucc9`u>6DeX?)h zV$3VZahUopeteR7#bjZe!fSGhhi~e8`ZFNq6v5+Zye6lyhMe#ihB23-F$t#VRuO9bC!r zp2!(y=#7N&-~@1;dfC+XYD+ke0Jv1ai>!}ULuw}=p?Pg*`l+~94E}JU6Ipc+9#a+V zG1#A}pRqqvGnF{G^)%&>&$2oVZWa+Lm6fKK>D2eIewH6zb@vPm@zi(kUW#&7Q+fF~ z0cUdR`?+bI9?X!X$gd~HGp4?4t46X+h_|HCp;-u*ELM2xyLF4}F%1Zv8V>^Q*<6{$ z2~U0hHZu*Mgl0CKN~2b$pVQy9stT>0!?BbJ#2W^qsqfmyZ!7J5>U)~;=j92A#yFO& z^BIh$zOTPM?sdM}7GE8L_XpoT1;M*m*|=S~@1vUKs7;2VXHSkI5Gv$p2_pFn-zNM; zEO%ss-rKhNjyLR%JY*gjnxhLb8xR(Y5^8ay6}T)@*cb+=`;9|4?l%TbY?FHl%Pq%o@E8RJLliTZ zxE^fFbAL5*Sv=9Y<9Ka%go03^bj%X708@HO zJri#_tN6@3%9N=FUf{hgRShjS4o~|Ih5$7MNHWABnJ^;hC}dl64^>`4(%*5q_Q;Ft zlArVvC77NczdSDYf4%?h{&&Vp;Pij%VgGx%;y3;8_kYm;p(%SK(Rkci!hbiC9EpBO z7pf^g?9Zj)U)=gb^+kH+_w<$D&Ey+ReCY~an$7ITOaBW0-RS>yhGqf3{MA?W>Q_Jd zq7)wuozj+7>W=(2ncqtDR*5TupBFzMaOR^gCzN-tktWy5lbdHsKQ_3rY@@buD~d;IEO>VY_Z^=*#B z@hIKI|Iq)({y*t`_t*V2S88n0XY!*U3f^SZ|MF52=%Vih7v;bE&NoF*npWuX1=NCU z_EToLvfN~{UUBYa&e>V6*tY=)y+G>{9l5f&u)q?Zjc4Uq5DyGdiOFdwKQLPNFz_8{ z%H`R}25+cnz&6TOlEmwY{^XH-P=;;}o{8R3eJ2<`GT%MXAC`Fp)n`dZHpH#uLIYbE z)B>lzP(^*NMW&kJJN&kZ*A46!4JkYw)fcFz48c0WgR3!gP@y?KsxMX-=2Wv{pj!rb zq^JpOsHCi+ci<7=$INY$U&`u>egJj6ep5b|pQ}_vF(-vSTIi#RR*sdEXvf!HAI*Xe zl@dIL_B>q?ev{LWs_=wY&z@l>(~zb-`ltWnPNsJDm(#p2?Qs5|et*gi=O;)XY4^qc zI&@l!xYdDQ118~NUlpfmk5D)aj*UAXeo8a(6BxIciz>E(oD*qD>}x?_LL79czKyXdn>AOzXO4?z4+U??~ z4XvLAj%WRAd+6Cd@8Crrc|ptetnF?W_eaTYk`+ndBy`$vbpE*95cF~H5&LNzTcE?=%z`Faf3skDUX@ zM}9ZChrRRxk3x)WJL&Aa^(gRM{QilHat@w_ zcgJxuKPNBN!@TV|FRUl+KKwVIo2GFUL5<7I5*N+SAF;#R(CUt)K>1EEu(mxH^+40+ z-m}{dTul4cLl*!H8><_u&x_Fg^cuaQZjU0@NAX=tK?1h>1B~dlSGjBVJ@}OHHonx1 z^GA-qeIL}kO5Smu0zLc)53onL7K<1Cbt1Pz$;ejTw;qF;ON43mG55CbKQ>R{)`?I6 zkb82Wy0N&qu(`Q}&qDjKp?5gG@kgHbavuJKWc$A19(&_0Kfvr%K2P@eTNVNTof=^| zR{nhCfI0fHeN?%P%`P9wUmpjd`#I8#4U*NDG% zPW+`u=RD3;>=y(b;N5k5kYXMfk=1P-lAk3j8oO_4VPOR|MSSbib{s&y;)5laL&+tf zgM$MLUMjIl^Tegv^-#6d!h^Xo-8cf-wum@Y6cqdY0P7dOv_tHExu9zgTr>lh+ry!{ z%toWy^g;V@w;niNFy#J>0+G4S3&>U1@%b3ogQ7nSF#4eOiX1IES5VdOfcLbVqX477 zbk~YdAre(pVb2}fAw1oK0j|ffg2ILA^E`JSQJGP1fUSj|apDf#5Cs4|=-sx*sX{Lq zw6R(v7qyM)PWPP`D0_SWHcHxp^r7pP#)*)wsu|9$uAYaWj}w{Cz>q=~Y9qTc8SYO; z!y#4y+Jkz^e$1=b&4mSflasP4izC0m*SWtpmzFQG!yQMlYNjXTohKs~wKR8N!E#z; za?57o=d5RZ>GKKacn50=ixnR7Bfld@`Tak~Y3{@o3{xdJ$SduL&#&N&8dQRAg_Bq5 zYGFFQ!#}n|)Fkg%pZS<8Oc-V$Z79S(j|(hZ%&j%y(mU8@7-MT2>&j}&8Q)C~??5X8 zx{L4}=yTpefnnT+`#8NIEEwox7zvr%4tKsU=HT3 zJ{AG`3$X)%mKR}^KIzl|IH}g_JlYH;P_>H7Z(#3Xpg`q0!lJ+mu8zr%wS}dX#mx(e z51e}#lL#)bjs>Du{QU0Ps)gb9C=I2)HEBWl#V}xK8i!~vz&y@ply|FIl_rc?28V}- zU>zOB*p4Dk(yl?SKx()?1|+VGhLyF|O3U*rZF@)&1YyJWpmb{pIfIf26*i$l!BtsU ztSqgJiy4t|kCI8IQ6N*Tx`lN&7~$Z=zo+&MZ?JMCToiz=ER;W;1J2KZ8@1M8az>e_i)8m3B2`0)`?o1yKopu75+iymh)3pQ|M zjE24M2Z!i3&%F|iJI}NGRJI}qzhj9(^7NrRsN)@HK4zfOb70}X^9wVm@Amp0{;w*g zLCo3%9;6h}n5t;VV@}0^%{H%EH*VZe2jAVm$5Sxzi!e6Sn%)WM)*TF!y?oz?&M<;p zx9-6Lg>~iZV-<8ch2veuLd$6HABwgli;u#sBk$_)UC@ct{t42{E2t0+<%%tW>XB~( z_PK5XqN(U}=Io)bgohXe9rhqjQ#XKa<-w0YS6WBwgY`%-2o<~X!0t(99~v&OVIZk!p$`$tq#@J7 zWJtujfU<+R7rF39FMO!Sk0z zkmcG0SuW$Z%b{MzT&)n{JfRGQ7Tl>Ro&e|`<9=i?N}_0zLMKO0SuSj-uo+bcpV6$` zyQs&?6S$qVTLQ1bWl89QL9rA`BeA?|g`}AfKKONXJB3GnIiZ15A{S+Af@wr<#?7#sQ;V5*#XocGDQWhkAjQ*X^p;E+q z2)^Zgp5ucMAF3=1iA3b@GPhgApd`I5~L*nomyJVO{i zI65IlMo7J~;bK*9p_DOiuq^?;BnwI@r^aGL6DW>Q4^Nr}&nHZn7`qxK7IIuwTR*dPjdZ}4 zf#a+rw4T}fxE#;EEw)|n#BCF}KqtS{49bZoh+L6f&rR;wa2Kduo@ue(~M4ICxXeo{-PpBu#)1ph#3sP(J@crUan2 zICFu+)xMx0Jbv{r5dHP{qvLU?{;Pip*G1^IqvMCKz9U}a;~a4R)r@>7md|nj2d}<^ zP;YD>=a58uH*TZS$G2lRW{>w)@N)*#FTX30@2lTH+#Yk);=6t`vyYEcu>W2Sx9=_B zFLh6X7T)_w@>#z4!Tb39p?sTbw>+r7B9F>t0I0@(SC#+|^5eZbctZcitAB;L1Jn|s z_;?f@XVL@q?&1QuO0{WkOe*EZ} znw01HT7DAIffjhJ`m&JL9ZV|LC0gi==Hkef@w-;NTg*~nbHAt+2+Oj{*#aF$(pOE1 zeXY8;G*)sjQ@Q(;GJW&cs(bpM2bS@tdIeO588*bf5wij{oBO-+Hs^ee^6`Nb9?x9(B5AV$O`oXZ>YQ&r6?2mzVIbDKjrEh4Rb(Li;*@ z%jBJsDW3NqX-)hm{`H^nmo~6uBc%zUXBa1$Bu!;HzFq6&m&$(3#=XRT6sz_OQ#WHG zzD^5~Qri2iBJ_j23ZM~yJ+A^B(FnQ?@)p1;llhH4{KS-x>f-3R18H`3we!w)|K>dI z5_c&=KMaAZ%uvAjCEqpABg6TH>q{R}_1U*n4Y3IQeDbJq@BV`iZOu9TGk3FF7tGGwf*+IMM( zzlXoZqr`h>Mp=LG=2r5T9Y{UHf`9dGlDMWA)=b1YW5$m+@a_5;be#IER=z6`10)#$ zd!~U+q%Q>Yj7P2p*iC&eYQ~W-xeiEwP5k+ix+aR1N(p>Xv0q$+X{iE|0fb@1bUOiX zC8`C{Tu0K0%q3GlLGMc^w91jrj+zr$X|e*DjyUyw`2)1S?Tvt;p~jAkA0S`t7N`oQ zzU5N+L1yBG;D3)S`vs#%%(SyDDqMsG4Qym>=Sofyd~+^HH#B5P1VBGi`1xoHJ}2TUTk zNRzfBZ7$+vk#%1wxOTKJ^di@W@obiv+8~VpLPe$i!7Z5eo|)9b*<=v%1N1iXUnaG| z1Ua7V0GtbV8kiH}`=I+Y2y%RimJ+#JKXJPR))Y++G7A)b7`YF(Km6$aL)shTT~^ma z7Y5B#RNeJw8Pz%ewFvs)25S+IzN^;^un1dAtE=thU0=6(%fj|2STd6Jgp6}K66dWg za%gWg|1Ib*^xER*5i*UdU;~O9`nRS@f06VOXiz}_9uqgNGOHP@@MzZu?@o+#9WOwf zNgj@eAoUx>8;AB0)5cX!35XHeZuiJ)HY1#vZKv6s$IpCCgSjXXf2Wp)@4?R+H=Du% zf;X6F?)t)@18s=OTuHN~As6ip2#wIN4tO50X%Ou^8gCvmEwbYUt}C8?XpL%H?z<>H z^3ac}^(j;U_5@A#Jf@ye>NcAtw6>_6KbL;kffVxRSp3#UIgv^tu1SK6!!yob~EW#>4=(z zVA8I#>tW&z$h?AWPzA}tz+N_Ysoxa)E!?xAd704R@&ovU$FC`uSU9mpdV|UXKCQBx zt(Q?VBviHJAys*I3BF?1`2j0S9s4a^zGd|}ze{eZCqK=n->NlPFYO^{Udnu)uFraj zKi39FvNfvX7s{fogR&gyHrWT{7K~awS3EZy-6}{)XVF< z2ANfu6Gx^KYPz;8&VuJ2dRc0WSis$%H*4ZLOPzBcea*^`H@uUsJ?<{&jW?)STi)~n7LNOxHJ zDzgdaGm=x0BA}~K2P4p+M2NXT@T-|Y$r}y>o0M#o>aPC;{tuAKI=f&J$8qO_69~L} zXuC+IWyO|~+@Qw?Cj=OyIHDL2$a-Oq!H>H>*i(3Xj5P`$;^~3pL)jiq5sH1Lu|k+y zX52n;NonHkg3V`3<|TJ#>s>og7gf^@Th;gN1p5w1vYfC95rWym&`>@m17o#tzN!Kx zR{8uF=%?PnBX;!AHs>&My2pt8q4zL%Bdkeu&zKy%0-bY1tBFitEEE8dWMZRq#a~qI zDQUH52|F#T_AIUdZ<$ehupt{>5K_AUg%_0PhPbeB*REebtkMq5gneQdFMjr%^ zx6iv5sn6URJ=Dg*`k~7ibZW@#q-7?owN{glNxN-_FWiGFxXeIm8`YK;7uKo^n`;}Z z*rCve((P1nNSX!~!&ug%UXDUGvQi^#8yfgeg9D!NZ#}OpPHJO;?E;AE3 z1ujih-UCAt@7{ti+fu`%@8HhE!f}xgpVj?t%KWM?)a>4BqUj=x>FR@`v&3I{c40a2 zqYB7&1|aTg?GUGs8+Nz!x7ZI)e|IsvZ`nO=b~Zw-8TK&$6ud)z{gv z;D|$-MeqQF{L8@;v9m_PGne)vy?@&mzdcxh4`~v}laiBJ@f8>qH@)d&yh9Byr;h6= z8B&#JolNtIdlXF^?MgMxsS82BnmU6Px1b?V_n$pzV7CR68V&^{{X1sZKr}0LinVnIcf1tQWJ7G%uJ?T_=N7-2(kWjb&$=BvL?M zjIGYI6gw4#xQ3uysI*{+b+Cwdf>X2t9>CfZq+>Wm4p64!_f6Zbo*8pjG3Q{Ba;NC@)gdc z5TwnnoQz2eLAxbO3SAQJ2c;O`#07JgM+)2+77&{0M?5Bz6HjO$6y0o_9&YRN+XW|`!)DSV8eXVpET_CYS1*bK(2Cl7g z;xfvdT7^m)QW+TeW)%az)OX0+IT8W_G&xbiQl8l+4p)qiC^2ZB%tx{y9KU+#?nA0U z*_0!IzttXN&KhL~DDALedp*+T&fz#vEuq-$xDkx*2(4w8p+V}2#sG)G3y|TdPM`Ch z2l}=~?l!zghTt}Me$_ES;VTF|LD*j%5-w=U>BUC+To_CI&`{ml|%nPRzwr0FuK=l;#6;Rpx;PcIXEHo zx^474@Fd$I%v36cZc7v%S<*Y7-u+Zxbys}{o=niTV6|r?m%aiYrjP1IouNDjpWXwz z9r>qmQerRDWUwAVIDz=pvL+%>20#{tQpmFkn@orFHzP@O*R!hL*y);CubCe?wfAuPVS$RHk5Xrd z9fqTX!pV5nq-XkdG>BBU#}z1?4C?&Wtx2RQD69LUmZCE~l644oly;)l;9zUjRyNj_ z);f!u9jNmg-OZ);`pV|Y;$pYeS?w-#yP#PG!K1yjT2qgXV6$iJ6d*vA5YDJQJl+%S z1=O6NtTAc{4^*xh(OzrNy91?-^WFfwFr?0TCkvvN?T!rpIJ*#9Wuk5l{UCo z`5Tl+a;dg{Tb8PRZ+7QqCWZ9G!ak(@2Xug&a~^XQo;fAjg~Anz9RwOzy?0O`gH8ZU z7`@b-E#(+V_pB4bBW5q|Y0?1zr_O3mi&x z4G2RvLb;v=1PgdHO)3;J~qc<(9ffD>)bgI&rX8M8lTF z!SfS!1srMe4xSGzR2zyQUTXG}7@pj2ri& zO&BXZV*=V>&yDfw2Tsh}DZ%j;3^1Vi*d~Y`K;UF`YCTe??x3cg0y6`-t(tnML6IyP z;dgMjE3)L^{zN9zjyhl*Z$+#^ylINVz7|)TaiA>GUIn`sq}IdwUn ziU}XqGBpq7%+2UEqL7OZN`*Gv&$%l_Nx7M5*Bq5whzde#k)9fUB)V{oup!D1oVK)M zKIei$Np;$^(xc>z7W)HAjuMo~lYhlWHz|FUO44ka2WeHG2Q9pEupr1Vg@>XId2{Md zCk+xcEWs;L`Jxw{joX z7uQaHA=|mWyp;RKEP<2lFeBRFw8+ zl1i*88KxzjmTSv!eDdjL2(oX?b>4iT`)^K8x%)E~N0ZgtN7$b#HKZ#lONf67wjXy= zs;MZ&fT5%D5JhUo@)l#qD*0Vl#d$+n&%^fgTE!neyi48bd=LvN-514pq4y@K_RvvM zy2UzK%mD{ELg9`9qO}q;YqT6%LP1!+`9!|Z3Ej8#$5n_D#Lke)uYP!2Y>p=&Owgoq zY>k=^o<}3_lZFSLcD^mKT*^^m&eCJ_%c6y^qRyZgO>k7R@G|^_sF~0ku>uH_38Yj6 znyI7hLVbE6^2cv*CePmA{l+`*q>PNy35Ne*c0jcU!AR&oQc;aI*a;w{in8t)>qr!@ z=?5$S)D)C~J2Qj+WDg-V$PwM4W802lt%?1poInQu`O~XKV_zMj_t9K|=tY1;9~0#; z@KBu|Dj59xFg_rV;d@|Yt3HdLGt99#U%o7om`P`efjmW-Q=vfg4l!3m5Myc-khnnf z3dsE)j_(4|3x`61=q(Vv*8Kv}3nwoDqXnY3K=c-fUK7wO5WOmxC~=H40KN4B(ffEC z@hyWnDG6I)Hy#=B-AG!SH zFQ-3tSs;1~MDMo79;R^e0?{icTY=~;5WNb&S0H)|L~nuUMOoki(OV#Tm90}CdJ9A^ z(ew&dTOfK(W@>@x9k+tVf1iG<_!5ZTd6bL;eU8IKxAQ?qrTcGy*?U7oTzxEFQ zygV*{gHh41zV$`vYH_s}sBW+j9}S(-mQ^C3C#5pvUWx2z{A@f&gWpYkkKRvn2+)Tw zO2Pok-e~qCBVUnxF!g=;Z&L2w(#w}q(cHhB`%9FXE%*ObvbpcDcnSaN|9$p578rf? z?#g0!v9qw+sX41%Wk| zMM4JNCt8wQ4gz`7u?2KTb~x1 zfm?+f_yvJ=L13+6xnyk`Re)B{}*SZ`Scf%SMuU;RG) zf&BvRUclW8xO+ihZCD+t5*Gy4Na8C9tW!Gqg1}k|P80;z1>C(Lu>OTk?JeN$1>C)W zyVKuHdj5jI`s-BIsero&T@+*q{e+mBRD)6d*;|FXUoKN%3h`QJVHIazg4-NL;)1|~ zj|JtlBPX9D5aSyr0AOGnZl^wb^Rv(H-@QemPkhlJKDRyzWD8E~j-wnZLUzFd?R_!gy?_Re2E-Sj;)kPDaYHf`x zBC2o~j@>L$2zBQ&bwelo$k^UfqKT4|JEuvu?jfg5I&MR3MM%AXY>bklno?1qc zVD%i9J_MtlJ@Qa+P**P$u04UiC9g#f8J)M|Hm0PHGKK0I0e|V*fHy4E@7o)pXsA@c z#f%G6pM!$QF7DD5&QTLl>LE%wH_oA|NRH`XuQ=rxVJS(P7fi|Yw^0j=uje41S*UAX zNM%v{Xi)P&o~ZgL$F>^(q@O17m%$z(pE^P5N$8(^VQkB9c4 zh$)j3x0>mb8+^2NGg-;`MOaNQ8>5=)6uu$#`LdOm z*(#PORWo zr~%Cqo2odU)YbHkFw`eECiaIeV|&AT6iPW+^h#BCk@;hN&is^#48TRBXoTWNS+T}Z zzjf;tO2*(p<#g*+)k=7_@Vv^I#AG23h#lvCjCwlTF$;tstE#Lft*i?2&23q#Qd`;e z8(oK`mYk@*vaq=}PsGspM=jTm>i4js9eKKCl~p;vH-3yN2KdI0$!xvO6eW(ix860+8jU=TA-!5lXuLc785pN65;0 z-MLIE@9AmA40Xa_h;cy&?y8^4!}>yXVIB)?=-F*2H=ovJ%b_lxb1)ZW(~Gj{McMSC zYa1Xt&kw zJMH~pK#f}GJVP~uanp0#6BY5Sg?z)>j-t_kri&DVPc>;+BB};u-$qV3l<;Bbh~0qP zJ)_AvZm%Cl^Hkt&h|Qpk*Nz)H?btg)8*E7ZzB7QH1uIU|mpDmR0M z%w-?aRCVbixE|mOz@2Uv#W|T%LPx;BMHxao1yh0=ccIOStP#4*=36p$d=JJgd9vw( zDyxnGc3#lFV(H7oQlvQ~gN4C#hv4Pcb{Pv%8$ZHbK!%Q0209FS%H_j&zEF9$RrMh#x?_5?BuO z2LwKriS#mK%Hq4kLNzP6ahmOlJ$2I7%r5bhPN1~2L#G=zMzQN*x(34_jIEv%H|&l* zjGa&$$5>AIENPJb)OdC7Ids&(`d0;kQ9VI?P8b5U#?>N0-FlR&4B9T{M)|O$)99RD z;K~k%!(r`M=@|TI-O1?R^So|oo^O2=J zp^KRpYu}h^Wzve&#*1MVdX_(eH5k(Sl1?TcF5l`#p^K0=E))(Vz z;$dvs|?xJabnx`DE>mH zyVZp6&bcXbtpuA&_Py1F+-4wIT$&;lhS)Nco@zcq$8!!)={0-Y4ehvJ#ru>H*N2Ka zl*A>eWyW~V9c=DM9aUgTiS5EVJ<~e9tUF|!OL{y`p|=l(Q@J%K=i|T`1mO|Y7o57- z=rQ!0uF@C!2%#Th17O+j`a4PwX*+Uiw}ZiePZ4ivIAVo%S$hfWKwM3^1GQ_n@%s_w zU5q0;nzvdACcsrbu#c3x9ET5&%g`ObSEziOcy&I4A#P4;55c|#9Uuze;Zj~vbHvN~ zHtm46LS+JPaynl}vhhS6K|Lu9U*1{&=du%j&f?yCP;sD|a_b@9WLu%_?{nNXFu+eZ zhUviTYh*h6_5mG;Bh0j%rf@d1CJ(l53u;`~#i{C@d@seP3K5uq2o*BQr@W6@1uxKS z*Kb4Nh1$Yvq-pqYagAEt@>11$7{vSv(@p0%OJ{lRz60$gdnk^%Epc^_vvr71un>WS5iZwETmdSTR6TvFAw6ckALiE|Od%%}LQx8H6SF$3x z?4?2J*r9xSChC%O%l3wS+k%f9I)?9r7&8ncy3q@Q4rT|hbfLU^i7M+s#JU*7e&@0f5iN^8`xJ3er7GH z(|*zp!NrgR2q5YpT5VmGYOr>9)kz?GD<7xXomH;<-P!rwS>=wGAC*g8e$2SVcW0mH zAI5p8FY7i*$Lnh=^!+oQg%hq`t;tK%F>Wk!4z@80n5@u^piyZ7P>u!x6|2r1x`+d; zJqiyn1}!H(bR0cbBCD{Rxm>6zbDdfN4HC}33W=;PEWnO&hIoQn>=AF5IF6YDcVPD% zjDAgwBDY<02fY&wWg{A@5!IpJW3w~SE9yKuc&FXM>+$9uVjqx$SQWBH5^eSAk8f8k z1DVg^ylAFuwLP5GTg`u)$l9uiaY)$e09K>hhSkvn)ljec6f5fX1N`EIO*NfZapq-K zq=?iGqTZUYW$cFOioFuEU;KQEz^Z&Nte|z(;yZVV5aCiBvC`{eHsiM zC+K!FTqZmCNbJOJS4#+Ky3$*)HJ}OOJfl5D`AFG!UhsZGJio#jzR!gwCc8c=ofkIA zti=e!YFFnZ7HxdU;PC-=K1$!+hOp{E&Zw_Bu?QNPmUgkKMN>}V!(7GOz!jBMzU$Mh zz!dsDh!6EpJ-~wiv1*>yLB|DH;G;*MK@>`?IvNed>}s~UupVQG+nfD39!6WW+J)7Q zOQ4(!X6k~OdUi|D%#RrPz6G`*8CnY%sDb_%1NPjl|$08p)}KY zts?j@+K6rWj2+`(^=!X4f^3Ey@ey*yj)nk&Q|K@VJ5d$S)mEr7+xZ%Sh14GgzB5O_ z^>Y%}exnJgcY>+l5dKk?C-?>kYWG&mU$5p90(_nxJApQ$lo`uITa_83B#xY^@6EE( zHnwJF7OR$ZABGr@*Q8f+Qn|3pI?mAXJ6P1N6HR@m{-8Po)!n)q9Qt%l%AqLE>#1+~ z>{8;CrVbvPTIki8WpwB>ZKgE6sqe_2=0H}$u;Re<$ZVLY@4^qE@`$!V%NEOCX;08w zv9qVX@i(kV7I+#%sLrfZr@j+&7O-rzyit|-g!#uHN)OItDE;yDpBjHS<{wG@6jMJ7 zf0BAqWGeEGl-$?7s(1QQ(`?C*6t-Uq;_;7EXH}}&EYWQ6~l*g(|({FQSYQA4C z%G4mFm!||kEC2yd#OmScg7jk2MSTis4^rPoM`zU`$`<(FT*iZ0&7wKZc+g$mTwC7t z@2IKVvN)+pX%)CTS^r_)q&{V0KojdiG#Oke%G5MrwZgB{Y-Y!@C{xoe%G59}$v+W% zO>w;>fz*=5%<%Kvf@ufdXyDgN9Yh|3-pD!iLMeb$FGYaUIK5$Pt`qp!cr-tSV-wel zSR!7D(69;D0xso#?t}ndvN@+#g^{qPDK%8}sO&5^5r3j8<(k5^4M>^HAW&BDsYN;jkSKujvAhxSW3 z8E8lSj6b{nJ)$Ib6!*ReI0Sc6h{qYX)(Pg=A>OPvVq&{MU}DL_CL#u?9Y8jkf|5&c z6L$E*J*Wm@uV%NR+S1~}S{13x8>=AP0M;#qW>;WP13O2zx$cmd-cGyky9jgR#G!%$ z6xyL;(AEDku$cn@(HCh(1kcy~Q=pclm%z#x(6CM`VF^`)Re-0H=A(;zoyiRV*o;{m zHWN(v0$@#8xnS2vw}CJ})CiX3y%31-_$cimbs6l921DMhP(1RUOji~BIEVb1giAIs zAC^}E*lLMW!ebB1;f#tafEyuGy3z^&U;SXw8GKNbsX2epiZV4tnVO@*guEi{&~D~=Km@3q$bQGK9=iL;7K1$?M*x4TJqEnBQC48+hVAv7R%p-RI8bto zViy2)Boju=8w89M8suR`9*lr@7`y<^RN=PIdCvoVTO)T{LG*N4qNi~_31?w_HLHje zWoo`=WopJX%_1eMNXa@gC9A!@vbnOj*ll%Iy9?d!;>yCp(z3m@TGP40Dht49$JODH z62h5^C{#3PLa5{#Hv~Ytqfv4M*fTt;>P?!|XdV-Z_l7yh3f)MXL8;d&Xa}Y!aoRA? z=cajL!{f6|q8}7$l;e?1C&W zi+zNSv28zpy!{E!G<4TPc2A6qo{w(NO-vg8Dw0?xq+etv1>Kc2e)O9Ig5(-Gc-elj zd(N7YD)KmBMA1A$%1{SLVs*SbFf@@^f(f&Q#mVFzrK|x?SWSOvbdjnFhU~$S;<9UG zzM`-0dAbNPU)6DWvN$rIBgr-H>pTzWFGQUgZUE*2bdSX3L|2P4HR*0F%G7*a6D2R4 z(xL-#yIo(CKO_#29LM0YalLP}no3aKg&c zK$u_)tN6NF#YF-CqHGN!4bewuo_Bx z;G;4KU!rc=vab{Ofx$Xt0PtuT*5K8^Q%p0-^p6>X9I< z#0>oV&?3u<-ZI9gNX_Rzb>n|2FvlbW^*nIXmG3>d`v|r;&W*_t)$FGl(6^0c!_1^+ zGsm3M)U-g6q)avW;Ory`Vn=ZLl8%Wbg8~<|Zqyl8`|-e&5-Vs0{@3V{$ObiW8vK@( zfG>+B8ZSOHu4xvS;{tPBV2+7#E-=SDtP0FA`QJ4GydpqIIo>@RMEnU6ymlu8;5kda z3YaU1l2WM14cedHV7U-%e?M+);h@pSZ35TMfvlBKF)spVeK>;YV4S*yJ%Rxgianx5 z%#2aaTp_Vn<*k@6suFWlnMn6@iULWZxtS=FL}Ih(JUqC7Tc9>NO24FaJ_^infjKTP z#|7s2>u42U0&_fX-GSKl9Qtt1Cps+MzXYoA!kE2_A>v9beH#oLtYa4V^_x%d%VYVu zii4sNbfLk9K0w5o-)3*&=m5&N0nzUv@$Wg@MxRAanC~ghp)kwlxZ}`I=7@p|iwR%L z(PA!vV@Yb66!mrrxOM^8o_@IYt8aZ#x>`8x`H=Ege?aebY0D~gN4_}JA?`|qP)vQx z-({;wc!essJqM+%Sv!iYPxbZE*3|dn53MgsQvZs*`55e7iQu35rvEOiB}89#jLB7c z`En}S{Fm9(3Mnsjhe;6ptN-`e@BSk`dG+qflC#)x7B(y0m361GveH_pY%VV^RyJE3 z>l>@9Yn$r}o#Va##A|_OZa>3oqsvtlkWrLDXkQu)yILIQ)Y9Td3K$z_hEV*>yN!QuMFD;TJgk0*6n# z6F52-Xe-d&p{h8%nd1bd7gc*onrVrMs=K-hOryZz7u??jcnJPZaDWB(H`55M_bu%+ zIJ@>_fy3WwrsR|Y05HXzPoc!*nNOwz3k;47(f@SUuVnx5`=*NB_v3gNZPjXL(ERf$ zzzo|Rdl)<5pcgiXffM(Gj>_ap+>WQz0ANdC*PzTO82EjHQo%*3DWN+Qxx*k5qsKym zSO}sTDM*?oV|ezF6Gk*nZKNh6AH%~x;O7kY0a%9UMrriWaX#m505a*X!17Sw@CzJ% zfy2)!BIA^g1Ep50yBECu&6Snuu39Hgv6%GIE$ZD6<8Q-;PFyfgISG zRQ3d3L!v{4St9UFsr&%_P(MmCfRQl=NEEV@b28XCxBOY&?K8U zJynu}h}A`bC;)SDY!^8E0*7DV@C)v5r)oVxjvdpg3LHN4WSwk?Sdvu6LY82Xqyg7q z=Me5ggnL*o%mG}?X`=`X#4sHsQdx`g6=k$eFzHJobpl2vMFVOC;`bwDIEuWE=B*Yg z(je9`u#b`oT^SS5wpwnCS3huK-cAXQw;<60pfMN@!9LmdBcv>{z0~-1#zLM48Z~HO zOW;jTr>QMS=?*d>PJve6K&AH%685&k-iY~-5#tG`;;rRv!soNpSZ}G&gM0>#3!h~7 zB$eH=p6$;5s)1CBy!@pb#!xZAR&cZ6rzMNyy0;$pw z%|m`&-m$3a`;+zup=$Sd2Dz$`2hXICG-S5z)e z*!k?DaVSLt2RPBndJxQ#Q&KXijg^5@C`>t9_pa?0w>%jOkjih?yFFy6T@Ul`XaZSXwXV z#Vw+jMfB2Ab=+r>GCuOV_Q3UAJH+A2FhCK#{Dwy_&x27Z*$(`cf=4Ln0=Hcj>u~aC zo;6RwoeSWXj*JNMnx!t0+)7dv6r9H2a&V*%7ujxuCna>9_=qPm|&;4o!#@MHlHW3N@QIzT@_ii~$5_gAh-Tg5iP{kbt?dK5MEury>n4`SeO}Qxu`e;7N*)y zh(;hnfnU%72}^iKc>T$ZiTweBZZ%(zpstGJ9x5xVBg_0Typf+WP3nkv`v@qftXSh? zZr!?tPz)YaPPblFt%PR_&#R0)2ckRVx!h8Wkp=3cy}z|(rG$Q8Yt+ekEYZqdB_vC!O1r*x5N3?d!w|NR;QvEQaSvue zqOhgm>y1b;>jgcxJy8+QTF5u7?I;=zXu1fNCe@^2iKz0OeZX32e*pC)b_1+PqscjL zuOCNJBkP9P49a-zxEd$x0jUp`pw}kd02f zZUoyOz5v`Q+_I9%0Y|{VjRX)36AG-6p>6FuNA#>K^^;@A_Yf3JPsLU;3~aHo%dFN>;zu}eZS@jucY?OoMYN#;k=k{ph7|RKtB@NP_ z8cX(`Lq`p)e^t@|_&&NzI0S0%%C4eqZbwnjb}=^{waVD(bb%{79FC)t?^k{ad0Jv! zJ;Bh0&aXz;XiS@X?lfO*t9C8-3#o|Yyoo`L-V&=u#H>RGOv?8_f zVwiK2(xV>>x-NSN5?00ItUAr7?=8aPIz(4Q=|YAfO$Vke6Vn+GT6(t5C@PAbbQj; zsw2IG0xLN)9+t)7(afA4X<>{W4&70!iV$!OQj!gnv_|mKGN_ zx|>Uvb%%_DYscLodi$^&g&v%XU0hQ=I8jG{P&nbN|8rTdKPSiP9;@h00M z)SHrS14qykBG^;xfWAgnuyE)riY7Ttu^$+nLON3mYFyXFsmk3k79`-7Q6T~o5TQcG zgE{YGR;MIxM?FVUyDDe9Gz}jvuCdbO@>11$h|--n*9C8me&T;D4urs1_Esvm1N()B zWniHb+Yj{)PJL8ALZ=+)hR)C^f_%%(o8n-FMmvC($@W1LL5_e?HqlL_gMotKfG>ll z9)uXKB&J4)2EFx}u|s*IOw=XmmcZyO__(2C_zu&CF_5^i7a((a7~+-4HD+%jV7VYz zh`N`#(_^8e&z!t`T;EE4`#9~PZ%QsirQ-Y0cOz@j!jHKE3f7W3?Tr}A*7d=~Du8Ht z5+kXtE5>+Bal~tjAe&cE&!!dZ&MJ32G7QoL1N@kAi|@`p&p(VktS{>}NyqDJEA;&{ zo`n;xUag68lCICjBIjV62_Vo_To3}Api?;-1XQd#bLe6e!AJ`U)M+6_+d*!&%$4A3 zApW>qs3~)u3Q9-jUxh?g7Zza0I72)^Eq3HPaM2@lYd_PeS74%%GXF z)%Kv?=19Fn&w*K4MQS{ZF<2eF9)Eobv48skesMw+XXWd$;>^n|6kc^bn|4_;awQ+ep!XJ1js3L4Z#UaEphaI>2Gn zLlZ_e5v$5lK2r8wNN*U6>2rqfbD?Dum7EtgS++&5z-m{9B^E7s10cN*$TCnxq#O1R z!aQ(sF0`BSmC`))N&s*X1_*ef%$~4R0Pms($Ouw)3CO%MIlz#D(L7w8a* zNlbk^e@{lcnS(k_eWTxP2^kC$pz~W=sVxdl7fpRHexejTsMu5oCz;}DFk$LDQ>)Ib zV97r)`}bsw0du9hO67U#JM5I7PL4WE#f|or@u3|B)~K7t(G%;+*z&N zx$GU6B0ZP$Q@_C8r4yZQWj6a1$MO}6+^ef}@*jR<$|{|F4AMGbIkno+0S*?r4Qa*F z2Mf{H29FTFkAKRnZC60TgE*>jP)o(Jz2IO^2GDl#t<20bhIUxZ@PKbhf z;~%lAp1j$Q>lZ2daYJhEF#>gf?2rzelCr7CB`J3xF$maTurl*F@NV0bP8V`C&23rwQU%n>R-rIz=1Za(K17H z9l-+pZ`j%O^@ro*M2GZ^RBgZWLFbuv7mmh0&;Y^)jl3ro;&dXR?Dzh=S*&vT$Xm=jRt66a%)GvKXEHC zD+44xz7I%=epJf;PMj{)E?=G_yZSj8LJxri1x;7R4PW48gXANJQId*+?oa zi*KHUW~7^xps5`pQG)Ri2}`XqNy54b*F9iUw^Z0fFitH(5wTq8;PQj^~gv$QdB+A{jr- z2U%EHq+G&x_-zwFLd0?#;uDJMNEQj<@rs3-xdGRg?Zrp+#p=Qwu0yk8tl|M4$x_nt ziO4slqVQwLwmYIPmem*i06~}fP5E4YuF^G+tyOsFr{6=b=JPJV2fnNaHK(tAm??QN zBsiiz`{4_x4W2$ZZm@7tC|oLjxoBjyXIdcFHPrGZ{?+f{=G=pKpf6H3^W1XOOK^z> z#3=+J?~n{tRbiIgFVBy^0TGC^N(J87lX852lg2tuFUR*~M@r6U=d0?3GkNBeBY9@# z#&{=)^@k#Yz=A`yeyH#TK`+NuKb-hTx+Mb|vu`+uPg{6!ldTuuSG*pWWzkeqz%Ocvyq7meo&n~QAUOK*q^ zF0rC?^x5bmXY?2U&u!qdF)cx9McyJ(+3X~R z7ErS=uu0X*JUWeyu`PpfnEF2bVFtD+X`OJiDp1GNH@ymcLMtRF1A_dKu>NuuP9SpV zJH~uWeSdx|s2J=Dz^{;cnXWP9Aqx^i@vcsNKUQ_xGWR-RV=DPcr4!v(KoL%fh7RdD z+?x7s{lscc@NYI}kXDprBwU+qU&WrGcus>VTUPT#Ky{Wfo8hFXpPWCLoomi4S2K>; ze8g+&J1~2~{fh}1k8?F=R;Ioq?`0?9^x)RCt1-Wy9ey3sYR;@q{qQejhyMaTw&|C< zmCQac8WwPs&6$}U(w+z@1W5}NfOqOA=z~Oc1QgjeyB>fo6hI*N2C>8%yJ@~Or^*#0 zHk=>7g|f+r2LTE_A$4Io;rf$YTiNK5aZ=5m*3-8EKcCbH z124Zw`o*|=PJ(PO3xE!{mcd>-^7kX)&2NAB(fx;%d6TaPBph`d$X0o|QqSY%`>+fe zhr2$o*sOQ5CG1BJZ*R*o(aVGUnA@Ws3#=^9TVPMz1m_JYdAN)QP6+NIBb#sA(Y_P6 zh?@tIt`L0EwTND9<3hJ>Tjkr^+jELs0CztWh!(iFGB-YO?B2++$`5ZpAiLJ(x;I`F z2yphHm+9Uk{_eaW?G9VdYyj4-A;pu-0I>KlpO+SvmdGo+9oh#jrsWCn!|s8NoVsnl zb7cMNz(0hj+jr-!yN-`j_ZOsAf{ltxe-`w8Jn-XS-g10UJUJvMmeFE!L*g{vopeWr!5)yDjn9z*VTxJ|C_kTc z7m%|SW^rGUq*}V)2ov+IJACVjw;<2WWg#89iDK&$C${xlIYM&ZlT2%+jb_|deJ3`xA zVLog zgZ>Y5ELFI*g#S`bOv#*%+(v^uXef6y`N~r;I z`g`Zz{+oY5R6uFVDj7?Nb=DZ>EM-jfue3$)qvG3GfR$}ZgQaNg7bVd|uV~EvgyVIZ z23&gi@~dc8HhTpt_pk8Z-)9%!KfHRky|}!zxwh7+I4douva;TBDx0f|OOJLuq>2Zn;?f*mnAN&8LH^E=`|2c8D_5Zs6 zSD9VZ@2B5hs(B<0=af`W-UW z$Pcr(6a!8H;~ru3P&$HvB}6emd51cH2upM#g3Hh5S^1_B%3M>w;IHrWGkn`$sT=Nw zb$*oct8jE>!QaY7`)c#GqoY^U;L`a~*(++xzN zDCA4coYX{(L~dSypz((F7*vdgKC$#LDF7-AjS9;~f<*xUFr3WiZXtXClE$cS4;>IK z-mvZn31j*g{ky?f2#ORXFnOQn_+{LyXd5F^Jeu2uSnxv6$lRZ~U4cB4t}yh^P>gAW zej{sM>h^0?=-WWpyv~do1kQCMc)X-btUsfHwjZMV>#j0r!Chm%I=aPaLmXH2_Ce zWL3q(P{z<1ur^@ipwJtz87R<+BouH4fIU*+D|xshpV-Wjp3XHb6YFD(B!4B_VAY*O ze);I6)bbKAOtaZEwT>rbCnUV9r)=ll*&J)s?yT&mM*#nrp$-9KS-m*lYL;`#ppRAB zN}6e4wivb{F`>fKnjqaX2;W^Fos*eNaK3Vxo|7|Lrf@c=y`Loz+ulejv2mP%cXD1> zvYR#fmJ$m{DO8a|P=G>=mt{ijMka7ng?cvoNVyNpp^ojtvWh$S(Zx?otMAQQs|)yR zojB*TRy;?OQ4}WBMq4=yJn)?YXB*<_MrO=D@~~NKELU1AM?goR zv}Cdr6z(W?QIHe2p}LQ`(-1uuCiOMpXnmb?C9}?_lZ*5^n2rxO@|3;OMpovYrTb>~ISTXIep&H^a@$kK$(;FD+SJ<2(2J|88gUu_MDFKK zy}Yeh`fLDs$>9a4 z+;hsa+j>*|LTQ2bO{;O$m9BvwvyCsLcyf7`jnmCO(jVz|Uu_#v9#c=UZe1^QJ7p;W zAY&Z$gTwONRkra23UO_9ZF6yBZL`u{Usg(hHwumpx4`D9VDx26nqnz(=H$12>{rxRBWJHuBfS7K)0X{GeOOS;3%*_f{8eirj1{sJ9b4azL^3U)cDC<+g?Pr zF5*4_6+#tYI==xYvs;cwCLD$Id2vN;{-KQMk!8PNn1F~Au)o5S%!XZ2ld4wBiBW#r zI&e`U+x0*o1ghXiVB!H~8=zJQ77>tTuvAyn$_*l>utGhQGe%KrBr$BTLvg^8hO;ufFy|{e zKh<3F{I;$N8NW{A{s?$J5LhW1!C9-B32>+QIs@MeB`kkMLt8iK05gysx7!8hGcb2* zNEKdiU>_wYMMa`03|?2%=o`9imXJ@-A|OvzZj>{$hhI)&#FqwE|9*MOy!{|ABmY<| z_|cvf1%TppG+UP8CpagAkRPD8fy$oL24lOvDevLxSI&F5w7KLs8Dc=L11Or2Ata>R zXWrT(6nU%pZ{uWb@$<+5@nRe2JwZeKt!Y5n0mMF)l^}q!;sSbokN9W*XxDFpJ~0?* zB(%QGI>g1^p?w6jwgzAu(bjB6IAz*SvpH`yo4me3Aa=yxsSx4&A(W6Ngt>Zy0E*F+ zUB885cVxxGsgyQb8gkLzfX5FF>vW`+J7n7l$xAG9cDx!2;Dy$xw&lKy;v*0Js9K-m z*kn)8WY0sM1kOr0&zmJ=K7>wh7?)IU;FAOJH`w*j2p*(nNJJ6n<7A+&5C=U6x1(4F z595n=Onf&M+&v?&v+H|7un+rZAGqE?(BsuARsm>;TEMICZ*5ryO0H-UyC}(%8Bsll zIQvX0A5-7Q<g~rmB?$$4KO^hLwebJ)9oT`(Aky+FeI!+OAO{ReuX=~l=$MG=Q zs@3`^Km*0A3v&lKP&McTQO%4_8v`}5BIeu*ju2rUThmL5LC~D5<;Z7uZ$Niua z-7t4mvoE^UY@k&zkst#pXH+lmDw%J!J+zwEv&x77iL8su`hl;k=K;-a>a-CuW1Q{? znKbwU%L@Fe)X>3U2YV73T3j>H2>c!3H`TIENLQI;L1`(Ip{%zpil;3t`k7jz7V!w| zv!sM1&8&K!zK}$Dk~4C@o0z65(Ff8U=akBR2ba?TCSwc-0PWX9N=GY#ScBkKGZU*} zkZ6FBRMlPo2|Nh!Cpo*bNdM`=kyL^D#kPx7T2^@{$==fAgW>Xc?8$gQ*2la7L$m9T z)L4!+3LoO>!H_+)J)9yG`$+&%jJg3n1c2Ov1OXekaX6s!$P#0Cx4O`Xs?U+tA zAwo#P4r*-4uhIiUQ>x>1Z7?odo9Hp(H0V8STha@ld+j!o z;$_MM9*J&fHA9CB1+)S(u~E9>FG_4%LI{Jc9p^n+SzlaT%BVfqkPQzMyU;Fthstw9 zd_Zy>*3bB}>)+#i?_}nCNv+Wb!Q<^+|BPGf6a=z0dLxkjt7QR-Ve9}dKf8qjKUR~E zNxN-_FWiG_5cX$y5lF5RM>eKDItzcL>H} zJ`frU5}F`Wdwuym9QoWWyS_LSf> z39e3%RwG`!0lKo33ZT0l@pgy=F}=w(vZr)$TzDP4wk;t-iP`m zITdJfqJ*V^h9(YIjE{^wnkVy-EC|Q19=iLGYEU-i2;gtE$C$H5S#_dedp!rW@8)nE zsFqOdcH9U?cf{GmGoeB1iIy6LFI)@^zRDb}li%||-`2?8h8M{Y+&(h0g_A2HLuR!M zT`m|711?aEdn(#Q@ezF_vlQmAdbsO&3{e8L<#<4zw~>$#mw0{zNhzBfI^n2=osFr$ z3WQWau^e`jQoj$UhM({bq{~e;t_Q+=o{m_3Pq|!m&NfB}`qh92sJdiC256WV z$j$AJJ$Ra)$3u8DN|&Mt9VbIvT3uYu`d^xqCnE--)ar)uA0665nNF@9D$kQOFWeMW zhfX)Ha_Aq(vTiEJY_o||z0rYwQ)dzaoDh25HhNwF^(%y#N~O?kiNYgGdgs%-pX#gb zs_#(q72X1@_Kf7xS5SRQ9@ULHLwOE9y*HxUNt zL@%f{UR2XG9Iz|6CtE^cT+>`wdrt%>IwGr5>Gg>qHZzko4~=#Ofzol&(HKLjPjHAh z*=Snd;ePAD9|9LyM{7p(6irfnuxh#2#NihB#>A0*LrW}lJm-K4n~1Gpz}$b@^Qx>N zce&cVhw9uD1rKgFv0{?WWcD`p6M77sr{;_PzFF2g;L0~3Y$!a z^fx0(bl3S1wUv#vrM1rDW(Vs0Mt5_my}q)!vbfl7 zbym9z-R|Pb!UB?TmR4)((GhI+jGcn?TqT4vYLC%HMSD>d4~<$vIm|U9+6zopQCg&n zseK9~PkAOe%UbX1Bi4_3Ql2%Q7pEvXn7vvI2^H{=h03DNu`U&k6 zOF}h}>XjTO&@HTw9(@*BWjUBhM2EKh{PFfD|9^X5*W^Zap0~2BiZ!tnVjWVIv_Vpfir{oiuE9HHr{ONW2YfX%Q@db^}dn@S3*?)1lDK#oY#9)_8+PBsbTt zU6-k9wy5IDEer}?R~8Z8n;k+b5PQyJ9mX#l5)(q<48;oSARec05@2%SsKS6TO3l&I zz(~GkA#RiZLqq5F`-n=)EN&D&;@W zXKWB()ilcVQjHNvgzKpx>fOWH&X=!Lbe?IyHd0U%#yHc}d~R0E)}YVg2`CzJy3xVF zh!Aa3M^-EcKMAp&wFl@Xnc3Q!e0MO>B8vCOK2sbpJ9+JI(0nP=6nisIA6OLZIIe_hOMheBK=grl$As44sWLfuem=&Q zDNR1c{Qp7F<+<7tEb)^^9QMM0)@EZ(dK1qYR>`(Y2 zaQEOX)AG<@Zo!`rgIr=zn%WFM=c*JVC?k*eNjE4%?#*4vTy|ugCIz>-DDMOvl(cne9)-F2RPD^$8StOs9BRuM{>h z-PUvuGdrEx*R~M&I#_k$G{g1~Td_S(F z+)~kFf~lkF6h-dG@+a1h)&1Lf#brm?jKh0xuUG7(`kYkX*Gj4n)bY=sKCgZ?>n`MTy*eEg^7A?*-k2Vs!(2~4C@G=ogkwCAyPSHu$PEC}6tM30UK zRT*T-cEf=g-N=`0X|*`S?X)PIL);RZPDmHxPgD3| z3B)s&$kzjNaqAGbL@L|b zqbZnlxd`PlVCorQ=`7OKA#MeVY+v*W#;=s!N zS@drI$|XR3_F)<$L{eh}!?)i^`jZ zvYzwg@23#SsvLL~@?H{)hW)2P7O~2~f?BkV;vTRnp^^stZHY`-wka<}Lun#we`=lU6ZB#88Ec+^y!z;pjoZx8L`WqR?xcE-rQc@-`Q%dZu^~UtF67Q?Nxts zcV~06+uHHl*C2NJ?9ZmV{5hpBpM6WZ`T4US8ipl%MwH(6`PWhb@t=_fEwHt>+=p-)UlH#sqQ$DgJ38GT8^PB$`$49~aU$zE zl81F2C-Q;kIFYB4(B_|Se=gdnZ#Yim0@HrJwzV$RaUwfTWXFk2nkC1H>^PAT-gKPE zlteg(=GJi{J5J<+l!q|^&zUUZOqeu5i7Pu#63ACTC+#?qRfNlNA{QB$Eg|qMQoKY9 z>^PByGQ6ZFAc;1|iJaAfnw@G!W5<@c{2nObR|*p;4$_o6}_@h zO(s$asY43fNjdlQ&1G-*p;;G)L3&30xO}58@9{V&%X)5B$gQuk;rOV5>PA`-LT*=8lTC<^RoIQU zxZ+)(goBQ4n|-_=ooSD#LN&ih%_fyQ^y>LttI(a*`y5l>A?W&(0W|E4Af6Nh<{>dY zR&#X_OGc?6-UL_$77P|z#Wq0_?%@PFlp?B%5f?e*g7i&LVl}LI8!va}T6^43X9}|ekvIvjirFAaNigDo$ z%cKE%FO3G_{mN5tgd7gKM1+U+ZOkj#n1=*o+ zXsD`{X5~VVRaCNNx@hlWX6SkXG!d$es`n;Rje|2hes*EzLT^{RyD?>c`BtiABI8HN z$MTTL9)K@Rk_mLTi`yE9{pjcj@+i1bHD7u)oeB5XAJjOK7%b1bjTIOE9EvdSSvB=4 zFMD29HQrVZJQe?{hEby%_~|4LlKS@g{_a((s83&Ng?>`Mi5VTp-EkWgLwoJpsJ`55 z-=?q(U;ADVRBr^sRv@CU32yiCD20gk-BDtM6F$g3ez&r#jGHyC1Y4jYqVoP0cfi1K`~B!uDK4yyzic@!m_4$&jv#=!@r;G&~ViQSAGu zDTfgbC3x{0AhByLxgaFRtSHQ15uZVw*G>r0_!N;24sSKj1DPbT0HGNbT=a3*?6Z_ty;4Hx_tCzvP z<8#n&*`4hxs8TmNm{Id~N}0#ZQ=~g2orTT}$3X&dNbO^28{0U-Y3S=ijNb`D(-76T zh(lj>ZqAP7;PUr%i)FLmec+J^7rvVw5wL?4!yO;5Of`NE0v1=^+mDOPxT$l7|Vv| zd0$Mo>1idOw^LuOZmh%KM3!v&H{M#QdsYr=o86~BW*-*j)V$b^X{Z^D-D}&MyX*V= zo9nYJ-QL*W-df+zA6;x4TD5!8<7W0xTXBkMo$d7#*#f!t3!9e5D1IkM9c%sE*n9aR zx0JV$wf<$pAv@TTZ;aYl-ci~JXUo0K@^R?vuM7G*Ge=NhI}@z`g(L@lNg?ZpFmYg- zCTeE+UQ7`j>~woLg6@qFCCsq{<~y>L^!-x?5GTm~$Z3jTbK8$Rlryzp#&tuSs)O0* za{JU2foHM4WmUs|8 z3Shkye@feZ=s$t--Eb72VWPK?s=^kCpni}4kDSb-AQ)RikWV>0lmII%+99k=J{y*Z z-$sr|jZM5}91Q$4Jmte+sgGiGSC&(YCsYncYx2H>RCpollD(D?oqLFJ!^Vh$7=4D0 z#1DI;Q3s=gM}{!v@HR=?7x}_*-U`Z$5 z#_vRf!Bf1j@d(O!nv-P><)18HJz3Upy!@#lb@{Up7C%{jP`(-8u)eja7KrpyeRrF& zf9A7r!qsbQVw~iC+}Pk4ya$I1gB6DfEGkWlc-3S$qG8op!R?X+PFj45K4=B$;~+3& z#opoe$LT^#ne)^e(IMgZYf5BieI0&GFvcBpvM2nxap2{>=nJCV?5Ju;qiEK54jH*=$I8UM>1>m)1O?gc~(JzG=wEL<)A%) zd3(_OZ`swF*g3G*YeRUAVH;jY4^%_F+NXH$>mTADLEKbB3p36>%uB*yIPmG0Wj%M* z;~IkNsJ^Amcp@VOqjKP(VbYbb(I;d4<@xP4whE?LX+)-2G&S!g)-V<%b7(Y0;5?%j z`08r3@E*l%GcA6M2L`92*)!x(J1zSP%?cJIJu*8C;!!oy30@vMQA5{09fnRY>UJAB zm&q|MiJ#c*njJ!#u8snndRW3kU;HWNaoO8}JTiFV561(N zY|GxJHX0^v`k2Y%Q>^@?hD>+r^{~%G`fT{0NE+Ifb}_5PQqJsn58H>{)O7H0gj_Y23f?pXSm0-OKfzYm58*f^ld)vso2@Rq#}wObCW>}xv*RUD z&W=pkktr{ObRT?@cj>O*fcX$fDzhpsXM=RS^<{>Bq8{)!CH`jalS0TUhh#?PTto6- ztPud(m^;S7I`E_31bZ{~5mHm)Sj7f}pwQz{+(~MW4M_O8kl{GJX zBl{GiHw$X!!78E-u;?(%M2<n%`pZPWZ;X3B&}@gULruTpaudPI>Wo^&-IJN2J+ke5Se8Osx7F+ zf8rHF3AH!fh`)X*hoj%mMO(htU+JHxK{Xx?;Fu!H79#Ey zrSKuKg^TRLTUbz>dodl82a>b#_qUeVmhep168o)<*8WNKz7FMqXRSBsE(8+DtbWxN z6%?YYa~ap03|l$@h$cWaICY%>goe0ZPy@APK`#Ryaa)bdfoBMr;qmN(t!OHIr&?J{ zhvT)|h^Vj15xKgxx_5n*fsA<4PS+3&pQ|lU5jY351rpq}jeJ_eOy&%&UwCdkYk^4` zvsJ1nP_H~h)}>Oo&ig>bbb#WG3jbakp2@vy(_c(b3zgO@lALV1fuk6hfnpkAyPV5Z$X?*P$|i% zng1&e9y`ec$Rq6|H}pz_WG8?bnu{7_zLwOA4c2PHC2r^y4St zX>Anu*8Enowz;vsTSJlH-p*080ThX%>IiyV4Z1|7AW|R>tfog%%iZorAreS9vS^?< z3YD7Kj2(rF9N3OR#k=i4LOzZd6fQymesxUN34vC|*%XkWF`3nC*UK35L;K~1*hadG z906wniUTJA5eGxx2|#cbA>8N$AT(tqum?fv|2_&p?6un6q9Ni0Aiib-5Yv{XD|T?D z_^uQmrqsx0iKr|Gq^b2(09ET6Pm1Q1I8y|P3TCq^{t?VFW)Oe*@q4$WP6-@Jz|E(? z%Kz;8%G`3iF9RMvC$N)SlYCt%ki{RgBAoz)6M%rvMK`?KLXsR)LSfqs$RdCZI~mYB ztOU3r;6?J|&SQiR#a}(fcToy$aMc7g^DYuT5Lo}zYSIN{QGKlGMVt^w@iQcAq@Wex z3j*AM%O3h?S#4|qz9IBVcyyUWO_-Jllpj6vP?M6de8yBDm?~p|zhwcH4)TU^h$jV| zrinUR_q$X%n@6a>)}{EP_3J?YK&LA&nqSNt}PpS-)l8>qr8EQR6(YX zrBIx>hKp{Iagc2_vimff_+_JmkEE`**}Py^Wz(eOraa~f#cvoW{_3hG8rHr-5P!e~_NlH)`uE_#T>-Vt*atmRuWhg)fIQh@f0v#=qW|e^m8@{s`4>^+UNX zf7e(Ob>J<#(x|USY1CJ?BI@C9|LC1}asXuIZ~M!4Car$;PJiW2e)&bX$cqcWw&&t8 zpB*g^X@_*AT)iI*#*$$1qvSD)j**j0v@l=Oo;iGvrw+=Lqc@ErJ*U-&;+d5&gwk$s z37jHn3$`Stb>LHpo2GTs_GNzhrrZ3+kgs=F9P-s6Uq88Zjqy7lEp(dh9Y z==1_`Vfk=K@hMI@!2sVs2Jsk9IsW4o&Q^Qc;wtYUEb`MoS0g}qHFki+n?#Qim`K+@ z_}Q&H3?l@MqA7H% zf)yR|)xho)2OEMI(tkAp69lpj|^cR z$!myXLV07SL&PAYHO3&6f-?+;C$O^+^`xO=FyV}`@hc`7JWaORI0MAPZNG;aL?zz@ zV9J)tF*PUfs4X1)$W>%N&>V#kl)$M%NHbh~l4|nLlW3AiLS(8@xQM%t#(EajypM2G zvgvdJ@aUMbQSUaRG5&z*jX7LE`sErpN%^H(4ppz2-yM&Tl}3~+ZUP(0pt&!V0a?d* zc|o{Wz4!sD|&O%8^!*F=5?i5>7RNFh#= zW4$j+N@K+Xr!{+~@dyt2$|-MaK(joH3>K(`B(Z@6^-KvSQ`T4kmWa|&71Lw|#R2kR zT1~1GT2yc_wkU)pGq6w6Vm<)rI^``A?-4M=sni`x0kQ_=)FBTi!!dttfQW_EITw-^ zsw`%z^kc*bNDogkLz)8)`O2f8#!FtVkvQb*f#;B~4*5Dw z3~H?_Y$%TAJ1U|i&oT8bMb}Bw?`9}*y^Kl|JhNourC(wRTf`T$@GYEox@j4fb&E%$ z!_}0A8N)NHRqKI5bkagS>TnHuAN>AWW|6M72BX&6_TKL1Zf9e^gF?2w?*3-`+V=kT z#zwc*+3Bu#yBpi<>ziBt=FXblI#Fg>+oIa|3}tVyV+$Zl8s0SLm9ROCfeEz*Ii`BY z81hzbgUGquKZTR0ynO6kHLvx=BG)Tx$`K7s&MEOhL;lJ*68KFdU?}`ixS>Vr!Jshw zDCpGUm^$Puv|MD?WkXT}H!vzB>7Idxx&wAfH8cWDE(8HEV2ESLXsKjMu0#{Wh=^HP z8dH>iANBY$iyMWHSf&0Tp9F=E>iBxrlEQW157NHj4hG*6LjzWC26snvoDJgGn;^_J z#ktjpY=q7w_yKC5Lc?g2-`5cQDq0}t@DZZ{;MyqL^tNSfiE;U?#wL zoY~qMyqf_J8rY=@Yr^rj3ZnFY?Bun-LGvNRGLboHgGmU?w1Iz!8)VgP8e&MkEO>XT zzM@!j2@U~lXRFaa1@E!n0t2wVj6=SDUC8$@8K08C$>ig z{3+dP!iH}Igk2Wk9uCG9Zp!mXn-4{0p5WBaI(V@p_yav=H1rAn!vj&JsUEF!aUsbR8B}4(Pdha9hH~_-*GIV>DTdxPDlYRCTA#D{(8v-RuotjZp zKfH&3d@O(0kgIBpy0Bp52vF9-zm|^-hzP1KgQR}{0Wh+kBtcvz7Q_mS3wWaC<3Lhm zRuUc`i?3wA@+4KRdIuVIpkW6ZE`fK?qcnYG=UMi~eRaRJ*IM1%-)!v! z`v^yFuRrhq!?Qn|9_r`)e|-A<*|(&dpFjKI{9e|#eV*ii`oHi0L;oKQ*8VsBf671j zP5;*g2>xZci7#HrZwwj!&&LiKekmi(aUwfpc#2p?>Nt^w#?~Rj9WvZ;B0EmxMDVPT z3k7Ah<3vtqp?4>Oghp}>@{~~l6{Mj_i*PoTH#WD(?Zgv;YGwW~IXb!0qzsH^6rl#l zsFFj5J7l=6!*j@R10j*6t|z(D)o+m7zJNP*si-8H&4S zK`e}I4jJx{;Xqhk2bfR`h`6t0ng9SwVU|F1oV*qY(H6cVMPUvZelSgi=W^g*)f~8? zUf4zv^Nkm%Z#Yh5$BAqpsty?*vntvl!wHUl5m?_L!^LY;H^m{t0atgN$izvOxNimh zIFQ=)#TU2GOhWZz1+(0`Ag$*g| zaDa1k$Z*Gr47lJh{G2Y3lsX+|_-)R^BPgY!whv5t&@=};u$GmfIAnMO0BC_+mk`-A ztG9l?qHDKuwem<7F8hx8yzjEDmxzoFkH2skOQ7fY zqZ=O?SiIvzz6>FuLxvli_!~urzZ|IkEyQcR2wnL5D`ao8m(z5fj@JqWzfRHBo>z|7 ziq4?pwQ|`@l<5Om3~mj@K&f ztN-q@mlj54=?pF+*wlVQrnkr;nOsEFBhfUFoaE#k8C8#=`H*Vq>6=TM*#~n+90utb zNz`-hx)LCe93W*qH!I}U*U}+;TRCsa?Jf~ZYF1m-=h8k2-WcPtOQ=*M4ho?DDB@dr z-V124H$AYnE7yejKa{gk^%AIW?Dj%_8Mbj3sX^Wg-XLuA=yZXfpr z0sx+Mh+i3i2xRtz4pgFH9K%q)i%yd};6*@nAS##TjVT-cHj2#ocrf?AxB~vFaKKd1 zFCcf)$j}4(m_R6XB*=yu1#hPEUIS0)h4LqQFZ~ejSDu0+M10Y0Qns#dV_q4*N)Y0+ z>VW0ZLAZacbTdX!6>bx3I!kc7#6)pgIS5LTq z6fxBb2BXJ^Mu)MHea{FS;klfMW35mGJXWkmWNFC0KEDnnfL>{1ZDKt>qkdN zNX6hr)qLsIbSB(ie^A3o8)2|K?>1Ik_;V=2yl2nest#IJCxG!>G{>M5!sM zZ?Es~UL}|0^rcqlC-s|{(Sh9E^QsyReeK(5EcLZ-Q&@(teJ>bujbPXcg!(GM?LHo* z&=|XGxJ&)P2ib?*=sZtW2ltnJgdwJ-dzd+va)6TIT`7;HtOyz7R~^fY(!vZgMyL~y z#^@KkzzzMYO7$(sZG{o&$f1!{98d3Mvq@VHb&~BUg{lHQ$-t(T`VMvCP$y!jI6gv@ z$lw44zS*Hp;-GubRA<5Q5y}qgl(@ayVw}Gh_a+dfNa4PE%F<@!X}-#+lZ())>T2aX z!%3u`O3~?j9K#n9{67ls-~z%o%PB@#ig$_+S%)_m^}_aCL%irAU-8~clF3jCQi!F= zEopcns-oBjjGX=lP){;P;YC_YE}*_dB0QT{#AnbWKO-p}jC}0+{a^??3tpUVdL${E zeXl~xBPh;`l&yF%MSBppLqR9xV;Y^XT{8O%S3&xOcmh7pjJi^l0KK(Ok%5LtHC=CC zNL?)hpxCCiz_DoGwr0KWwHDqaj7#y|knUZ^b2T1be*#lH6_I7{%#>SeI+ z_#E_Gc4zwvib_ET2T$lq|AeTb1>2a#%sdQHS3 zv`G!Pj}0iMo4Zr$R+P}ID^o5hdG3@E?y|QyYRcsL$m;Tdht9kuUzqdAj5-|@#O$Y-TV38oXSWeqY{ttWdkV4w!xzZ$-Yi8-MQbdKvJfotq1$*3J- zY&sguG65{ZH>i(7Ok(!=8kdl}W!}|2bX^>LHUS%O(&)v0$AnS1>|;M`I0I(ouZf>S z7G1Vj;wbdsw{YO)jJ)|^^5*iXX%t=&`%U%!xQ1^B1589l z9>(eiO5LHi))<1Wqi&^A(Z9YZw)&~QgBfE(Ed0gv zoOoKv=k3&2s~hX^H<2Zq{*AX*>YkN@+Gh9Zk7*(n<`ki8*$-vBzz(GiHG{ExZF_Tf zeSd#*eYU0B8~fW^>)ZJqifu!yb}xF|%>L=v@=Xz|Y(BDm6{@QYKYdXc>7pRKNV;=N zx%n2d<-cq=WSVw)8ZhB_xgpnX&$ZIAMc#IMu-yLY*uj}O0s{S+VEr#7Iq*vg06v6? z1Jg9o1JL&f5#>&|ht9Y+LX?npN*s#apC6J&V_`I*f&xu01e*;_e+y<@H^iw*Es3cl z0k=<05!i$X4Kms$&oI$j%oX9QBbbNyf0#gzf?#Y7K|ba1Py(#5Xos*e**;hzK4R_0 z*hKs+4j`Erp7LR^)JL(*9TS;{vYT3kdEY@QypVOtUQ39GJ;bzx!a6Bpu@&W_=SqO`tEL*&? zxf$QE4z6mkK$N+9cbl<)=Ck0<)N85;lJ{|AgX{D?Se+QGI0QwlO*&PR;fRJ+X9c%Q zpeQkkPtgahAblJF_sYzQ^bTz^P8V9roTuK14hhF!QzAR->+oZOai&RudIxG#Adh2* zARPL=;NZYp6Q?L_uZ6?jOheg-hBl%$j(Ti%A$vt{=oruRTX;O4+?&AEc#N5+zqT;=Qkbh<`uBt?#2S|TRIX>C z&sIWr-lKS0)8fZ?U~r6P2J)z#mVJdxE5iUfhE6c*c9rcy425G{5HTY({rde}bnriL+pI6|&k zScf3=eddW?hbAzS2@2Gag>SaH@E%iax0$TZrOl3)Ksh@SXGh}fNSxm=iSzO3%V6LA ziz3zTH~W=-<=GFWX^52|S7LVyYSNmRs;c$HEg2`L9O?&4OB*%MyJb$XV@n3Kpk^-1 zK@=kW&6d z8$_ zj!w+E5xi^-X*_~67alFb?wtko=`Wb$6F*JpHfu{ue^r#Y$~{zVa1P3qc~kl=GaHkI zZ3~BthX}5dE|IhjTf@AljZFJ;&}KoM4}BREPGOrl85lVLN-&n72``=#Ho-VuKU+|% zcaUNkz>_m=wkC+CG#N>=k~gJ8V?m95mk}uIRU$57PW}b8=E&wS>5ykhsnwv%s3YJB z(o%JQ)s>gHpO^Tj=+&L%R!v51R z;?E&1+zA>u`Gc4sh#-dS)bu4*7|WBRw0{2>&wroIh|*hWFsxEsroPNp@fmE~@)c30 zq8V}u#CiuW#+PFB3fPAba&BA)d^-f3D~HD)+`ikmdF#Uuj*y+S8$OrqbTjxXG1eJg zw5aVctt}&s`nJ|7ze-p879UjqLWTlP{j$m6kMZg}S`u&EIf*)5WSqwx^)a{Kie@ zSU!m35hnPZ5w||v!Q*o1c+2^(m(j2_)3OTYRpWkj88`%4VMLgqb_LV?4zz(W<*LiT zf1~y!>Y2nnKDuYF+(m$Pm{bw@N1V2IRzDt%@7mAqnTvMqKniXwi)V;HsX+7z;9g(f z;G)H6_}4xjhvbVYDJONHyJI-Bsgz7WOc*lc=~;cFw!WgxN--sdxaDy?(T2+6$1uRU z;$nC``F#$8=dEC02fWN780SLHVr zwRQfhnXU7U=l;(*VBQ6$R2DzheS##p*#Ql=$08;P$d%x67w=RE~(&GiS z?m#ldO`I#O7_3%cFhp84MZw6@lEoXaeaM*dI+CDF#^`Rfpcel}Eo@)9O!#y`P5fcC zQqGYS!?p`*(|3#65eXMBs3Ct+%8M+h5&x3;7!w{}P!qNtb6MciVC(tJ;~An90oKoi3J!b?c^F#V%E`Rg#OPWMst2 zj3`D%WJg4j#cuavgoT6!Ouz#S{p5XEV4nOSXa-n4EZ}{aKR`3aVDun82o@H*PmA3@ zU?2APedok2FT8YJf~<~esVXxv;>35(`ObH~+xdO}_O*Zb!JmBU75ekhQRRm_pMK?) zO1UROz3Mo6PoKR_e^=*fjq38X>V_MH{=f*!Gup%Z2SF%$cSI2AT~YBpN34|tzJ^~7 z&kcneu9bh}4K&>snyy*G>30T>rU!u?gt{A+r)RYH0^w(y7~o2}fw%2Fq4lVN01XP) z5L#&IA-_Sl-2mT&w(t4oD!sw+qGz9kYnz@crXNkW9#+b}2>PBIh*rn*dpgn2Kh9{C za-jG6j_r0^ecji40e-r#uH5Ad6?9n-MfYHB%j@5be!H(OR>FfmIv#rcRv(XIMEU}^ z4}7s!uhr}I2CmU>__l)n_Vg#M(AyPmuvVYZe9;%79onAT>I+wQ!h^L&tv-$WKvw7r zy@w{Z{DI&rq31a*3~a~l(q(+R&R;6>2g|b!v37;8^XHmA2)$LUO#hUx5*>KH?|Z#| zsD^<@XhxH>*6PWrY_)}@@7bQeb|o{#*VN^j-UqQ5?y~$)zA6bGZ5~v{qfEcj*gkwT z{pbjccK8|%+|l9Zy27Oa^ZcX3KQOF;yBi!m!q1nP`&wWWV;66w)WVwL{g;h&^_@B8*1_??~`v=2Y)cwO*Jo5tz$0kJYhHt<~h zXkFg4-l7KHw7&Z9tTOISE`3Q}us$cpo`=l(;->W_>nrIiU$wq|_*pdH#rh0*V9?k% z(|5dTmC&UkiG+BTP+qO)b!EQ!BB+ zDPdu;5bF3g)`fGR;dVbSazCRPj_`MD&wI2mZ(vkfc4G3pS(nDo+2PkD*Y`ZEf}m#V zp-$@vY(8}#EOt+F7*@>ZF`zB4(}Fy8`87#TWYO*jWcC_=kqtT9gLv(DHcXGKKyLu0M z@wI{NgtkjJ)Y`tjCm?l+3$S;lztZMxioo#gK5b%agCAH`!P4`C zwGYeh4}^bEUS*AdRhASnd?W86P|mR5xuiN|-UZ33aSJ5IBzYQMh1-1b{=oKyxkefY zJ=M_%j?$_=f?mb@xb>>_n)Nz%?2|?P8)F6RBpH#B>mPrAA*udnhkr;@a9wvWPqga} z@1UO}?WASk_ap14)}_NQVA!B5V!I9p!4aPSyS^M;T>I=L$c|o*3TyG zP*2yBpJcbjpISdX%rI7s`cUENTq}(cC#C5l>u1*gbohB28fSXjP2}n^v`-eeCr>`j z!zlHZaroJ2XtT3&_!;U-%QlY=FSi%;Ib(UDSv4B?YkqEJsoJhL%&O6;w-**ny)nO7 zhla$HNDJ=>&3-<$_DJ{oLVCgRbRB+9_q&501a1I@@bGmWwWGs79y4r5K{7r+w|;5; zLd`A><>!KdVF+QKzyy|+G;!S4b@E95_)N>_wC}~1x%?+D?Up&CB{o9V+)S*|l+g;| zmr`|M2gOQBEv1k^il@HC78dbsu=X%w>x$yzwepqs-rKlw6>Z0idZryB=GK(RFOOl? zGRbCXA4cX-w#)Q-Q1PtjQijS}`NI+~q0*{0R`kqhB{Cm&@%zIMOE7yKLATy~B8))@ zBR|lptn!p_X6aEy4n2NH z_jk3w85q05ZwtRWquufbt|@$QgQeT>1Gub)UAJt}(PBEZcRL+e(!TaRPdM6N>wQOe z=?S{&ZO3ctj<(+E;Q1iF^Zp<}Tet1#wp+HVyM~QBs4w61EEkV-1tuVR!#%R52O@sKx~ccHJ6-^15Joj> zpWIJ+7hW3eCR7>iW;bczE?KQc+}RoJuIJeJe#bUorD=F~7{>E6db1klbw@nW?!yh? zgdaqt5f{O^keVej0Do|oI=l|2%>hj90LoWSTX$?&#tFH%bVIZ~Z&%y24bRcR-QYea zK=+e-6h~bf1hxwsSbN*|27PVa>S09Jo$3wUa-e(RZS>M;9Nrf0`i2H47@G*hr;gC> zq5*w?d%?WuuLx|Dj0vK0LxUuc_D8yzTk9M5qE=|>1P1I@ES!36xv^4TS!v>$ukVx3 ziJW=(#T_`#M>AS#`99?ud+H_M!|YT)h}ZZt76E@NN0^8C`vVb#+CBZCdJT)6FY#aR zc)tCCr>?9WQs`h||QL=U|@$#TYw9 zX&4LXnw;SbcXb^oCnAWhK?y;xf=G6y48r?^eji)_dJIeztUPoa`$E00uh66{@aSh3 z_!6z}mFCJRXlCmWKwgUyELNSIwQ zW?SFi->(^N+u%)|2%W7rXB+dg$#M>=U_&dU38~VXje}}n*`2Uzfpe-fsnzH`RYge^ zOyY=|751EAb%0hN`BsLO2X#v~XliKk;qLQ&FulG9*4ej1>9Xm^@)W*tp}}Dd^~KcS z?RUHbA%Yo9cc}G+hj-&yFs!Ys$1SWr(E%f3WC92RaD*@To#d*+U9ihJzEB&QqNT_gvFssFVC)~bCQr=9 z_n~l!u!{Dpv947$0&2esHKJGbI&vyHYQGy6!tdz*9xbW&Tr4N7DJ+tNQ0&aKqeHe} z`DtTry$(r;WuUENmNbLaF!)-Oe&s1$O?45m8>NbA9xr!1N8{3gwx={D$OL{G%NXk( zBx9;VS~1iWn*UuP`WmLh*rkD^r^(T&y9aRt)HDgOgbWL7KeTGv7TwJxda-UdLgM$b zy5M9&+mTX0q|J~E$k)Lv5S9op%`p8Q=<{O{T7yEu`eC9Cl9mrfg{BCb2Er8`g&dbO zCgKM@l{#;vQ_$FeZ^88xHnZEr)mRVUSCgdX$e6C*oBhE(Y#PwW><@HWOEb_pq(Ho_ zw}Y563vbt<14&|dQxPt98Uc}whD|t}u$@3?d(jL+a*~aX*qPUc{~iChP{*Q|@Fc?g z)^rmMgKD%db+hO~Jc>}{Lv#YHhQ_HUTI$X06|=h_TaR=)Il^jtV)j5cXd%UnU{rY( zNY)`Pf%c98PBsjX1CmT;d)i1MQ)fn_5F7f7odJ7Jc9)?IBAJ?*h+k(wGxT&x6|rAW ze~f{$T@&#MlKI2(W3k?xZ>*e3d_XW_OahAQX;44$`SRj|hT(Q-!wN}+9gwKP4}EVR z90xt8>tP;eQudWrt1$^f#v4g_Uku1u$?_!W8srKDxb1=|!|I@4U0kTP9k*&A+y@Z^ zVMFxT#Ox40c2Fe}swB9o^+vThKP+Zg#@&r4nd~;6YS}IDMQ;Gz2LDd%8{Wu_Py5-% z$a)*Ib;vjnhUk`WoB9Ff#_sX@@U%AKo#=B$fbjka`A37QcPV-uYGk)KFeTB~^^Kt& zU~Z)`Utd@nKZTDzDi^`+a~9ktr%%fJ{_z|=8Tb4J3I&`H($9D{566Vx(~;q9c>Rqi zT%L^$|0)W7|1KRw_?>jlo+@(vTkALS?!-C!YvtR`M6Un)ruFM=%o5z(dz zM1PkYv*d0mg1r|x*h>u2vN4T53`hKpP{Ht}k)W?Mi6k?D3pjEcg?Q=Vr;6}0hk_*) zgQESHCb~N&s(1-O&c91U`Y|ERXC96%!*a&*MsyX6y5df+PHFgKcQjYKvZgiUS6rbu z>_480#AhFMJ*;E)DWwL^Iwuer3g==9qkAa&pDVR*CIr5vHMO-htqw~Q1NxRWmn&hj zOv}iCr3O`7(-xR|_*I>pM^Ur2Z{z>qLN_6q8S|}NNAW8lDm|waJ6CdMwC`xm>7kjW zH9WFp@?OO>o8MW}7MZ~mb+lNgrbq_CE8B8IOA4Y~NA#Pq7-aV})&0J~w zH_7VcMC;7TNW}P{rvy1M0{xLa!RY5o{jCO*FG=EEMvZW0jYmH#7QwK`Ptg>7XD!!0 z{JGYcn!;}BhLsG1?Z?A6^!sSr_xodbQDME?4L0+oIZD6piZq z;!@KT#!6$+Tt2dXVg0AWFAr;bGDr58$TND4emeT$|A8YHr&JY2B~PSIjNhzzcBNn+ z-Y$p4+K5PlBu;Nw$7~rRLi;RY^Ynu2c+v5+i}%?qpW_*LV(QpGPtyL<_V77P6$?Le z&QismyB&^fkoc5IHl6AmWsnU^oe5@VI+KhZfn_;jwrsn!ZA5~mu`0jQwY1R4-ZeLR z7wIFZn@03^DUgnBoJ5kmftBdAOiG?p;%-=zObMmr-tnEFt5Tnmccv7oZFd`a3_~gv zNhWz`tkuY`O0AJ!B*`=isj&gES=B<`OBB$bShr|ouj8@Q_MZKXo=f)kZ^!!KL?iNC@qD^7gO3$M}i7?2uNdZj_nGvBJ zew^GB8e-&#nO(tgl7SL) z%x!vzEm8{_B@vs*(T|@lgRogeS*eJ+T*#=)DVZ`8eJOl$@gcucpXi%#j_y_DMm+CB zX%k-kHEq<<4hftSHLq(6^Nn-}=DD0^d~T-%ZoPKSRKItSPDKF~BnJh1l-BNYE~Q+@ zZHmQ=1Xa$G9S>LegfQ+Y1xmEbJ){%7*JUd02F{5`C&zJ>-a!$c@+CeHPx(B1$lgWv z2x3Usk93?c!Kr!5!hsK;GDVT6Q@XNs^UmGYt&Q6^ua>r5I%rRQ$N71v3EA)iJ>_^v zc;V~@7YPwa!L-8~B@h(nW@|s?jm-DYFB6t#2aZOE+5($d%G;u4KKA%+uLUt|a`4hjdy$m^D6_ z-8--=BApP~dbv{b)f(1OX&Q_Cp0^(;UwJ84lKyg&vsEZT+6PUnSKx@# zpoff0%3ehpG)aS8Y5c}2vMJ3@EwJ>ypwYW737pE6s<)&Urlg38Qp{=Oq zOon1PUy(PXY0l3#8_QK8bi~{0%VxDbHwQofbGf;&G%uR%d1P1o;_<&As%9a|_V~Zy z!w>&Q{54ryny9a^#xQQr9@#a=>|4V-$5`2YI_Wt{ew|8!oMsK5eHERFk7G`HDMMiY z^LYz_6@HMX@`JqW0lqPUaNCu3BTGVrxJFF3w1;V$?j{8^3M7?2h7_YVl+=lvIYtUW z6UxZ37hmZo5$vASY_RG3qjt zrbkZnR1;?zn>?E7;Uu&Rgb48jaL+YuYHG_<$b;>?0>Mpr8aLSEu-Xhq(s1&e5lU3d zjqd;&1R;7*>@BC^T*CPu91Wv`@C;voRS3ij;dFqw(vcBBL_G-~po<`(;}IZ7j^qOO zg`$#C{E=whp@3tH{8~F&XW%lvhQ>)b{A*6*x-c9#+tC~wI5j~0h+?GCfD_9qn<^(_ z=iyhy@*y;G0P=J+%iE8K+4R5=t5Z`uJ3CVu)jlU$7gQvQXLxv!qvWfYC8nZ?RX$rv zE#>=(X{wZCcRxw6@-z{7mlD%+8qy^#81#T|#n4B@m9&_Y7ZFwsFh|S^{Ik*>gbRcS zL>F=*_q#D6QR)Q|L;N|lMF0}}gc(Ppoe{zuI^ZXBBKxnEg!PeP*a5U2#WR58FmCK6 z^hN+IaJ7*Et`}fHKp+aS#SxZ#KJ5Hm5>nLVbAn`~_BP&LWHO1L z6WM=32|K)N5&fgvT25rGU<`X6PIc3MdMX_u+N*OS`0A*R5VOMsttO(Fp3I5r6&_!u z)yS2#A!P(|wh*%GCrE0fT0}Ah59CDfr!H>J-v2uFGUV*N+y^i+N}N=t$X|JzHcZ<1 z3;!&FPZMLNg@5+r9c4IU)MFzY?;uJC3Pvn{I(w3`{WI#{nfBMweJ-Um_yeFDGc+A^ zO)@|z5k)5t1Y_`yF;EA}WYl`4-y`%8*H(qzhEG zi*eH!9XB$EQtyj21Xvx(laU3L=;#VwH)#N4#2gyiC`^aqwMo=68sh|-l$oGxw~242 zx-L(RGEYeFC)ZHB>%b33__wIk#z206(~XT?;v2yK5e5;VZ40W>RX`A(;|gk@uo%v- ztp8&D*O3A<;}F^2I1}tx2953MVBj(kM88s%Na&#A(c=M*%Zyf(Nx+HthR1ujOX1Fq z0L48^(&XESro+D)vmr#DM2K4Kgdsemwf+HpVtv#JW49d>oWd_Uxff3hcb6UFR6o9* z^5v!T&a*ogd-IOY5!rLdg8=X;PNZVDHYGBhGW*<~s#7_<*0Vl16F!b&g(NTkL=CbZf56EpQ?;9fzO{PmV z8QKS_j@Hgo4=$Twji7O-A0|9seuQ!Y^>n(MDTag+QwAh32P{63Y6ap{t4_4 z#2T<)W){h{?VKyR*J~sTDMO<1eJY74H18>e4b`u-h)u2!RJyu zmGZjM4}Sz!f_}IWtpRn{6Gv|gY-qGqoe#LYRrDP&eU)=H#FwHm9Ujas;G!#p z3V~?NIT}%BhX7&5%JexRD}%$7t_!!^Fqib@rMc?d(qbER;g&^}Fqo?5!u-O#SQKV` zg(?ik$9Io7KYXa}o5ExI66#(*g+_FGCFEo5`%kmgeyrW?G#knpWcX8y&5t9g#tMg1 ztVfcFY6&EH>yJyXzw*lCx9G3S_)Dtla^WU0PGgneG?>$I;KmtrY-^l#^V~p{|IaTL z(Sa9I;BPoBV#1;AaqvuIMAJ#Wy9_=+k-d!*Skp!J_Sv$xi|9ZR9XKZbSVRX>IYa4K z#wRH{a7G#NEP@{70KX3J>gh}Ce*B%cGIzrU@mjZpwTbPT)q7Tn^-<-9H+K2vxC6wT+CV8Kko(UnKE=!9_WrRCOFf zqUDW}mQ+rX^E)VNlGB*hIq{vcP!7hD77b$@ipq)3>op!fo?&JH+VgnuAPo`Q!>mW7 z(r9K9OQ^Cb-I^2WSC|KbTAU0`xnQ6NM;j@bDmwTzK4U&7n&-;LX6nZoVW_8yBC6QQ z@{(S}g}sZ0x$U7^HIiKW9*(7v>_t)ZgU18g5F;43 z1+MD_!F4TD*&8*nEuL7gPX@U49v5qyzyUb@Yrj9RQLfw7t_#Po2LT-$zNK4`8kVq$ zRWr2&N#rEOSJ!*6|H36|SPky3>Y zMi)df1Zirx9vcm5M;|!h8e4B4Vd;@wc0-^@C)<0{W}|+sUAZ=}ozOqrU53skJvx&UU-q?|wNk_^3`gDlE#eOMy6ywa*xiVnF`4R2y2x`w5F zjuI&jSF&=3C{08I=>4#i5)MRvyuF;KUc6fR2nc`YCJ!l-p2CR&hnL$6oyE@l(n7V< z5N35ASt-?(m4y``7cPjUrlHr3hIz!P1czT45&uV=Xn;~-pHIyO@>r6|_=F@TeB%#Z z4pr>DhDgS09_j3Z6#r3;sF(qtMI3$X%VVv%ii#anouKuwQijK-@410!$wNTc+BZE{ zNS7>OV?ZHpu^6M1EBDovySlIUf(o>CxU0GcYg=CbZuDE>Db~t3U=8<8`Koknv%eZ= z#e4m5%qY{ZG`0^XYD{A%v(RBNW|=`KblB{8kR|c{KuGW%sbXtbXf?<76hz%EG>bNhU#J#=1LlBJVrw zW}$;nbO2XEo9WG_p#FMdSNK? zih6uj9WlMQ?!n71&OHbh|Gs`eK`YXKOcfZwUQ7q79O6JsHBF|r4;NugJDof6Z6Q6j zW7n4C@qHW6b2t(ly^2mMvu1`8RjhgydRgIHEPRW;FxSTW7B7%~ashm7mrwMu!L402wmh~V@EQXt1TxIX=1DJKs|^@Lz-HvX9t zk=jW*VNP`YQ7Yc6)W4i4`7&j(klNYNoq+YuSkJPao4a(PCG=0zKVfh47vOSM@mT&9}Q{Y^iBX6=2lw(&D-|;wJVu(B-hmCnoQ6_<3*;{OK{rY0y%Uh+QH{1>;9X^ zM+DDPG;MAq&FUoYk5)QQ^nW2v`aS>Qzd6y+n}%sr=NFB+>U^h3;XgyKE-%e@OrbZH z>UHBpugv7=-zOyNzVhONea}@jdHF2=kC6B(kFDKPDe>9Er=)sNsur@tTDG}XzVhCC z8#k`v3hN|%v~8>lr^vc@u1kEDNRcaay5a937(T zTL%H3SPKSi0!Bjla&-hLb4V#C!kcj!P%ghoJS-8!aB>w@-*Ga1M%yTxTB>YyPL%!u zLrh)VpxD0_+SEnVrpEe|iZB=_9|#=J4Awp@XOT3v;Hcx7o{*~m)Czd*GSC5ZRQ}?F zZWUoL>#e^BaKW!tvCq$?QcEYMYD@~q9bUMDahFeXFz!4>zrM;ac^M?E^B)GAU#S~N zNp4n~dIMoFAsW>cF}GA*YBV}}+iV-g@^TRdL#^P+HEG4*hi?>NFwaGD%JZs>HCAgl zRWflA1_LC-GcQvxc0LL%uF&Gd2z2TN(&ENAB619T{rR!*OF4NB7Rkc}*rziFzH*{= zNx?7a@F*JOeTngxw6Q=(7e2)FkLS%PLJ;Ue=gJS{yd{U7sr#nZ1GutXwdf~8C30zU zW*C|%`L5CVOwII=#m%tlbHG&B20?*dP96c3gF?_kl=h2%E~i21#8-M-6c1z4o^xr z@JVv$Zg+reY)A10O5LHn^5Tk}C8zG}$ksc~>2w$~cvtEA$yFO#j ze{s|LlJ%9eh5uFS4eL!SvnRc3mGIxC#3x@;u7S#?HNBZ!e$)D@{2{sYrEyM#aW9?6 zOEXS{SFNAozf0E7tOP7lUVT_UK7n_br%E$Np!FYI5ZFsqW67+~byntu zzBGSi{o?S;!xMjG{Sr9Xuk&yp9eyJo?Vgvok_3WamG(X}iO`iQOUXu86j*@+2mGCWmy#5D{hXKHL&d z!s{OGW8nr0(?oR;aTTP7_-Cy={J6w;?05qN=NB}?8o0Z`>b86B=Gz;aRNiC!{Q#td zt{2p{T}t3S#cPqSj0dy^n!e!~C>Q01NKjsN2fb=z+f_8LYTBH3#R|iIusS>IP}Q9p zmEozOyruj)cXg*F!||@z*E)L7M!7Li+4D@ql(APk0?Gs;u?N~6JgBX2$f!7~wfd+< z7H9+DXmW5I6(Ly_;T^xjmw4Kms?}{bX;wFUFCbji9fZ-LD&Uu-C{r6sgVodh-5?>| z8g#qRN;KW}ln_x>ZK-|@fotk6%)1cEq;Esv*{9ID;{avZpgJ*f zH1GtqCC3r3hW?~p2rCOC-2-)C4(fdX{VCiR!j<}av$`bT#&2hBUiJYqz3o=BN`9d2 zbbW8oM@MLQi;!Q6V*qRy7$zunB786j@ZrOei&S;LHbRpB*cO5E7Pc5*Z zkqh%3DRFT!93oevifb(7q6&{4kJ6%sUmHNkOZm6sb>K`Q@yL?2PX^(*Rh5$D4sB0! zvu(#~&yskYy?1l{#+{qBo=J-pW7s8`nG9)6aBbVymohj(FiqAD>LqwM++La6EI zYw5nAArvN-ecWN89FsM)$pmr|gIQV17)0Pj^k1#UwfU-`)Yx5JCGN85x`sbA?#Y;@ zdPl<$?Ni%s=~SD1Bq!#^$ccJtlV9D5XHiZwHYu3#UW=Kl zmO!ZwS_jca#O^+BqM0!5=@_+Ir8c*~MCy6887hWS7C$Y>pf3y?oB`!Yy?funbjUgx z5hG*!uB!ce+hv}tY4>eXENISC4etXwKH2LPnm29Evc32vGn(CDp4dl;KR`BknYuJfp#Ogw(1Q-ZrNHmI2`oqe_e?&|4K%Y1P7f%GdocGs2tWez8q>6%#a zM65yXADWF@s6N84O;)3t963JQcuLJe7v=6!E0nn*AW9tav)unBVo1~SwkX}PkaiO#UXGrZVnfTG2A zCAj)0uh3^Q+KkA+@zNKxVSR`>(PXGuW^MQxTy+~hXe)1u3 zY|Ic;PiWBbYqcx00f^pqd5q?gWxGRlel672!<5GI>FFi~n*M3!PFd+E^C!ri?bG%0 z%HIl)jFZ3Ru5N)P;(3NPZo|enNR&J|KN#2p$p}U`&>h@>bqbTjOm1W?9!z9M!-u5O z+alb?p0=+$yOHuupCIf`@P2M)7`k=-Fg9d4wEK=G)oV~-sB*!O6~K!?O3(Q1k}J@`|| z#{vi%{5{mT8n-_Sp{VLW->F06kNOhhg2R!f<= zIiefOF(FA!6BY%%msTK=gO*2C-1Icbm~hmu8jHA~=8*Q@)ZiEL{7{qMT1#!18J@4V z;_q^%FKJW`){Inx==F3CfJa0ccr>$QxwWJeT}xW6!0M)7FR{Q?RcN73u+vq9 zal{A<9S;P;=~UBETT;x(=i~9~Ni};|dY@MPV}dW)jX&*0pGaJlFfIK-C@(#VUL%_V zkgG?Xmb}#B9*175=kz(WM*1RiS^SG^Oa2HR$+Dn4fPu_4N_YCse&?FC_vr#1U{`mNDQz`R zz07ZJ-XO|P_PQeQSd>OA0_3Y)!}^4i1lR?04UqrCE3bU&75YOJ`z}DFd^9|NIFvFr zF+~3~VMGejk9l=7ASP?|o1_FPDKZZX$C zcyZ<3P6vKO&aoofihbNIii$Yl!CIqMmt||AH4424>9&R1C0r1g6PBg+NtY?Bid9ir z(gOMnydP^@Y^djgQlR22Kh(Y;X%f|9z zXHLvl8*`0jb$)qask+?O_3BDPH0RnKb8b#A^1C+W(NC4NiTtkWn?-&XVSmVSwhU1iWnd@HUcNO_vaSS0&?y@msi4P1lQy^n7z@B#PqCn~RZtzMMc&6e6NLh|x7FT5ZU0fy35Khan#V?y$u zy8R+TwPRlVRvbd+gB_VfvBW{26LEi)c6)Qa?r~sJ=|kAnrJP9q^tFqd-1fh`dfo!g zd|WLIk%CUgZ>T^(oVdhC2RQS6D^B0naFVT$TqYQU!9I>kL-0rhejEVdqFnI0PsSomO-Ywq1Z|g*Y1tY!%fwop8jlOWoPm4{%;x@PSJO zByynb>;!;#F~rW!3_jCqS{)n54CyK1|-WyR(xXua@CCgB~s_+A_Zd;emsG05k-cHAE#yPKuB1?34lv zC+oZYuq1nfbu-BhWL+>DkFh*dS1%4Y)<#b0P{7|d%G;;~ZQ7K#94A5wr z+={$Tzs2uei$A4~qD!7!D?LVnZTg6M!k=p;`aIh^;H6O;zLA-Bt;FyO=>{?kBbf;; z0cXkU4#dP8nX}ds{!x@hr)~*ZBStei(0fiaxR;P)Q<8H``@nf`qDMFy$bQ5B z;7=oFkEK1_K3p03HLVaXd32CP$^N#Y+Xb zClb7BUu>6wj6$FS5zL89=R#lreTW}W)CP?xoWGFuj3|eX+b&=xX)K3`l8`Kf zNkw~+f}T?GOrWC4al3OzsSd2y+C7n_@AXtzRM}ek~06ip>8gGYd$NWKhC(ZYkgtbxWS&Z{R z5E9$&F^{g1AmF7T(Ye?ihm2;-B*V(Ify|)oFcQV2Wbj6P5$J@A%LMibeJEK@Rlyl! z5Zla;3UwzGhUMCD0b+KE!=M1{^Sh#dNe0oh$AX?GON|KHSN&r?k}HwO!wOLAN_TMR z#Dweu!o93xz~p1;)9WI1HfSt3p#qr-=sJ3V!V=nc(QTlv?F0IgKv|-h$S81fX%d}} z2faQ$y$WHrDu+ouK`aEuu~9c6t@~YJfC>3S1e6ETSr~VHfa|Vmz@3nC1MEPTsx~;E zG12hjst4>v2%6O$91I1NWVP)9EbXrvqW4!j+5=L`XxexdfvW4HCE1pyeOvyV9t#?A zNV5PKJb^lDs+j|V=8yV~kID1vu11hydp59T3GI=_C*GNP%ZR5edFUO96@%fWsmlzh zLY0j6^O0x769hL2n^Otg1gA#5It^A6b@cc_b0Br918fS=$wf45cYS<$e_#WWQy?oN zq2n3^DWMhHx-S_`PIKJLE75R7GnqVkCx(fPxr2@hkgQ1_S67aEnr4$Rk|Oj)+A$!= z6~i$g4uScq)gX}=uS7RXOq`^B00t3@U@6cDes?GXg}N?z_%utM5uGuqrrrmZYL!5A zP)vk(a1yORWYT@?c=X$NIY5+P=Yyd}nvLGZMg+LeD$tsDA>Wy?BuFY@>i9SS&L&;L z1QB!$q01QrMBhP^Q8{MV0df;VN-zKt8M=q?5NR!;1*KJB-f3T_cgNj|I)cU!b)~S00=$us zIJDb7281AiFHzGhhSRU*E~5)0_&6D5PI94?hNKJ(+=z=2U9p6*3-t*y&k`UxR>CBr z5mCFtm_%%D5_voyF+&Kqwr}r3szKRgyl470dk=Hgs>n{Xbf+uYzCO(WjigwT*fs3{ zMt1}h3k_0DH1L6Z?+FktWv5Sg&u#TsEAtp$R3qNKc90TV$n9ZQ6e~=FduY{>cqa)2fK!?*z(~r&_#`gDJV3RTNWahtW%=unkZ2)QBswljeZu$y zT6!BCgONP73)0zr==iis*XOys`KHiouC4W}oG0sTf~vX|IC$Aua51!Rr89SiT5jL0TF*BKyDtz|$cWa(Pv5q&ywm-;^X#T~Die zL%VB=UsE4xYHyK$h=3%Oj}2ICw3X20M4mP7nR*==L~^?yR-mvm$o*SU6HC+75k#Qu zk-`Oclyaj;Fzor|#pa^fSTUQ8`f_KbX)MjJ%%kvZ+g#|>JDtXSz22PDn+vnD=>Rr6 zAon7QYW)K#gi~ryq>Hll(jho-glpspBa*|!GpxPD@X@qQB=#wcJn5OFjaunlykn8l zq!7r135pJ8??+04<2^3v_WZ)6LE59``yycF5$zN#Aqgb)isdksad`Xgdx2KrjTs#y z+J10v{SL1sAQIxRJIZA&zI|;v5YyhK0Np?~a@C>ILti&#M> z{Bd&a2&A@SgTsW8mzrivI!2N;3!i85MY_({9>OchT--|iL~Hv|!UU)Us3FA_CcDIg_5Shyw4v^%UJt}7QPEJRJp zU#xK&y^MF7-{!DjL*}}?fv=_EDOZ6#Tb8h#+sJs@E=!2Y?ee441EhDODopB%?({8v z74n6y)oM)l0|Lv$en!$3Q+`%GVHlZqSP^6|)V8ZaIx+K(cfBvSq3O{%mm5(1((X(0 z=A6_-jOaPnirX2{7*CL@uV%vXq;x>|NX2G{{(h!l`A{ zR<&FwFbh90jxR=sF4JK-%h8K~4+&s2dXseO(s&X;HdeT_+@&)&rPr`R&ORs^+LYkU ztfX9twQCxctB49hY5{8%sOX4()?&9mBfY*qfVKt8gT)7#^h(o+jsUa$0VPKY%DCnC zsp%b(zEUOG*@+rbsy=Nv@XEo05PR5=S#oC0sc(%N*kr?ePE(u3u8mP`)3ZOadQ;%C zF`{lM0-4oD9;J1>)>s_s;Xve_}kBT)Z!=jd?T$_XAlUk34ApOQn=c6A~|D&CgpMH`&VrY(H8_iKarEKI6kB4N2m zSWca@Q!3yDW_h|S(cx6hB4Jq%;6o=bdXcc4$=NLumN}n4bJT}}Ek(j|d>T?os*wY* z(W#G5&@tXKBrKm1B{4c%cIp(!I2Dl&ZN<5WG*{!Z5l@>Hc&hBY==cepD&o^DLs@b- zJ@wROh9BV&@_R_N%w}wH@@1-^@s=Rho6zWxAB->>J|YMN;HX@P!#M?D^&s?CwQ>Pi zeQ^M*FJ}7qr#~z`@9fwKiR*t79`psJ6G!Rdr5PsOBluf6gL|7HDC>W9ar*I#+%@bV(?sFw|6u4*(IdUd|7!?ZRHqbe5K z%VuM)y}Z2KIeL7V?)~!c$&;hUZ{hk!M~AO-zWLGNmsGlW)ZY{|HNTGU=f>noipP5( z&HCp!mop>zcx#fQUzgtD_cai)3-o}lq(2GLPb6TVgu9b%uOr$lr z;o8G3=uX#3g>u6U0Nqtc#IBV|qoOuQu^M@tn+80{0JsGUz=j#Py8(=ZYd7EC*d(-q z;n&IgeGv>CfGwT%jC_&%UF3cPD}qAr1z2?fR$YKq2L)Jl0aon;k{ysU3Q;+Y_ELaV z6Bdse7)GQL9UYp7R)AGgPIv)UU4T^=VAb-VO_BQ@PQq}c07C&*4bZFtthxZJX0-Am z_Z#(pP$6L$qp-;RE^@yKI6K0DqZ6zJSarH~OaWF+V4Kky)dH*s!dNx(>%+h0;^t z99t`2dGA!|S5ZOdVC};)CoLjrxjdA$_{oL+j0*M+r+y8i63|hV0{<#T)Q&7BIv^2w zTVkN);pL1xyvR#e6znYu_D-mBT;N|7_*X>?w7|dmgyUaP4cHr=F`z=g6ayVrL<)GM z0v@S=M=Icvih8a^J=db1D^dqxnNL8xd8y%%=7#iy;an*d+VpjsQ_8qnD`$tiKd_Bm z)F9t>9}iH&I_L`n#gkD53t?f;?bu!UBr4vMzXPT20mNXVHZYy$K%CaIuz8|*`(fk&AD_88K z2n^3BpcrVFG>zUZOAXUom`FuCo}mWV`-f7_-eVyvpmG0d(X+cP`f0+myW(K-ld>f0 zgePS^>xpmm24Q~?9`|YtC6%zH+U0!jI69~g4=mfkfU0Z{Ex}Mf18{(Q~{f~0&UWKXIqrx zv#K4_DEF509Bfy2Mn{ynVT<~Y{|$lxoXwC8=^@8p?m_uz4;-CpdiQlyTCPAW)QDI| zce?``4Sr7mycLxCrXlsffK*2vRhF^1x>~3Qy8+Jw*C|K+_70Xg>eVNV5@=NHv%5GiPmmp`4}4{1${UR!W! z`#`G%0|*8^(B2lkp3arizrQcsrq-w}T;s2Ge3<6fpmWqFIBeXQqQn-ZEJ7yr`<@?8 zn6{lA=94W;fEW^Ylm#rMGf{>U(5l?+&JGVFxTfRiT@AfdavQ_vL%`WJ@xny3mMLkR z<;JJ4u{={sNrw_@3m>=J^Pr&PloXmN)SeW<;aeu;cE|@udJqM5sa1Ylri5h0`>`dF z$8_Spz>Jx+8BN-VSY8JR^$6M^NO(hB=>~5XEcGD`>1x#J)XpQT$=hy7Vg;+pz+OYQ zMy;LoQ2ac`^f-q^vL#AfFaG$&0BgL-H@*X_5ghV zFETm2Ooz43bP2Fy>UlMzDqx&b)NC!aXJn1-_`BT8FKO2Xwqr)LMXx6%ji+C3Y{auv zBfQ0IfCJlcE0yV}9mo^SRL2sbJcX=jkZ4R&n}`g#-SzYkG7nva2;zIVa3+4HH(k;m z2#s|{7n>R>B4`(TYF1g<$9Ghgmt@i$N>vGi#w}nRqdhGUPNy1g?(y^+DCbSD$FbAn zYmLva5YCE-=rw|izjC2_bXsamGJY3&tynN&6tW9miW^yx9sSF)pxnZwYAAc7)C)Wn z*k-&^v;AUrnnw41IHO3|v+JedbtJf~OU z#TAt{k_?hcYoq(FLQps!O(dy2v{P9+#FH$X%X4f?^{b<%IwiGOFDGq>s;+ZiHSIn? zi&uToS>0(1$J@W6*sjG|5#86~1_Hd7m`p85Y?7pRp^bRu+Ai&Xp;UpS90X_ZIFkO{ zhKkQ>9@1Gc=m-4h@WMyR}dH7ODi*(r)A+`lA}{445n^7&r)Qzaw;WJ?}u_w^R z9vfhCP0dx8uT?jqU6{ApNVU%8>IfgLQ(oXZyqeY;&*Jj zBwI+TiN1$T4ZYRLoa^BO~&U_t@E0qvhy=(fQRLoV{o z5rI`jAQK+H8@3ZkvB{$7S%LJ-+ZIPdZbe_%t0zGmw_;B>}1q3Jh0EN&dyTOOd z=|dN#;g$Lm3?~Ka;167wDvWOI1Y~W{j!c>)jUqLIt%pQD6uH!P3-enEe|1}xZqs0C z`#v_wK)Yf%b}RZyTMt}I&?kh+&LwW|#SEM>T}UKrfR+-*Zr_7ghi#pFX_>aab|w4$ zYUaVvLp%y8pnk2dht|lwv&rvr|KHw@#KX`_l>i<0LK4q{LD;5+De4%7o)+4vCxrpwP^-9itA$1W07rU-Soc4!Hsm2EfGu)G%= zxHPR*Pa^$&SePXb(o0Pt`c;!w`xQ2>gAh#JO70Op%ganMFR_YRUDP37U!-6{If=~= zC^%`mj>HhlstTPEjstHsfwNMWf!B{jVMfyOO?o#3}s z$^a1)Y59_n+G!djsSM8;vj7WVDI;Jj`ZoC}0qjPb6{I%^gf~^H_8_B2HkH)9AT%Kt zP}SJ&3f@%7e+D}XZIdcV4pYOj#UAFSeV}F^J|#&$&4YLXKWHL=V8CTZFvSVrfKv<; z$v$Q`<>vITd}!k2qsfbvz)QwD8fWyFJjW&Z0Eu;815&f*n|4ZoeV2P8ORRTY>{7AU z4;vGNAN)=sJmy4+T>1%XDK{3zhE|N363LE=>m_$MUbhP$A}t5(vCuN&eH3FRn;%(k z%6I8v-N=;(zabg{yRqscEhpLz!AxLXHtLAZN{@VAK@Vny@TZ5=C2I84I?5SRn;%WL z9#+b7BZ4g}mo!plH?eucfmkU+Y`}~J5R)_;@Y8*D<*x4Qy`aL|w&)(LZF&8>(Qo%@ zFUQ3QX3*=m`goM!dIWxfwoSTNz23kz`VHS!@VcJ1_1{oO|KH zTBBBWrjDsk-iY`;eE=N=<@(0U%ECV=fRs05 zNLawx{zihqCYOG#&|yyme4PMi{OK((Cm`i-)9%8OL?VZ=A;23yNk;q>grXm;PE9px zIZ^msfC#`KfXriA?FvH#0mU}paNR{L0~#?xKF)xZ z3J0bY!=~JIa-wdQ$0rIdXTq6KA2N=$+SFW4%ZbdZ+70Q$q}`7<*TlC4?NLR5=tpv^ zbDwmlZ;=Pu8HgY!s=rg4n#X)^k>4$)Unna%>WMT*b0YE5fYT0?K1pOoN_lN+>K%&c z+j))NpJEy##{p?S%Kr#4mD5@iQ77f80AFe4B{WtG+$=7|%<*6dt zQ%+8VevLv}(wBx@hd7L$6E$B*hwpQu;p?GG12S+BI!Pb=loT&O!xJ8g$nbGy9IISQjg38CFGe9uJbD0x)Z^+;g1yhCt`Z-ZliM;7@ z+GHM&^u}bfL2{z+JJV#rO-*s&kaG2<63a0?_IBiNWlASeb|WOxlB`!bk}?Xvlh?NM zSG3clGd_2c_?}FYMrZ{)`Z#Y_PLuzaBN~R(8fkQQ5_Kpi>MkL|LRokS{zH>Z(L>i| z9DFAy(%wp@7D2pJcr>H28^Cex5J)M2B}iJ-YiLEwx5 z8pPwcPbT2xMCyuE{IXet(ilwEP2@wAldaAI2;HIObyWVw(NB-= zrAYj(_4|o&busv%fx51hau2CyRnGI7Wiz8XS8JrxRFT&4@cscp*1c>E@qn-4S4=OR zR$MF7)blAW%@R%@iDDST0EtdG8&O8G$a^Q2Ay+{%%32NfuuVD0B_f4{(y)+6i3}~I z582^&qnAc?$z|fnJ44uJ5B`p4J9k^RHg4a% zTH1D_bwnz}Fd6g|8#Sa$MkfaBuvVUag#U`vqR^v>;%Uavqm-MG)6d$OE#4%5mU_=cNn+f?6wC@WXqsM7qb}`RHp(`HJ z>5s6yCw6qC!YvxhbmTq`o+Ag3933{Lupi6Tn>DtA_cP4qnncNA8An3tIB_3>4@va+U*DWE zI|%Cb%^Fe@k;@$HRZ6o*lHz|n1I3ef%9BfuA6Jh3@Q-nFa~65uILjQ&8XwH=9au}} ziN0?fG|WchNq2d<;RH{Dz;cZR14*Lw^F-vAyYB4PY}o6&E1}a3gT{Fx?(?(n|0wwW z+~0ifw(XuLy1t3S@+3)XSQMpc<+C+AH46)+GCh5cNVWcH`X?+VzW_D-aS3|q;bpOC z%rCd+>(#}D`h0c1K0jA&&vo?b;{1HuTQvX{uik5kKe+lkB%Of>EXXS zrJNYEG$*ZwVFpai`3b`uBvOca#)LB6GQm}fs((&y8oY*^_VAZVfc@7^sPR_(J91^8 zMpm8C!Z_u7VNLvRpDC<~m(H3vuc+T2$8bxdf-C)aoIbJbjw%#s(+tOtIA~c(BZ@}?R{VG!!oAiRjGPW#m;T_4hP^jz3`TYgJd^---Efli9=F+oq@AV zt(-hv24S;`vQlBAmgy8sVWSo{YGI>VXFBII$uhLgkBu4$%854WInuiizpf1CVMX{n z(fEfjm=mr1JW=*W)?iMX2HW~Y4C*u=h?=DRh_HJSCzMv{s zAICZ@?A`NX?-pJZ4$GY2MM?Qp)c%sXlG^PHZ%g5Ap@1$$ey_hwU?U$#$Y%oSMMgk@ zs`C417>EC6Dk^9Fiu2-LMdm~*!|Zpd7**!QPZOe=0O+!y!LNPEI~emN zBAAdO5h5V`tu4wGe}du74Y&fpu#_H0Nq~wj&L|xwU1r1f?Y>OeoXTl@8=xSjlUY%o zM?&dYfWi3)vUFrFqvw_hadG=BnAIM}&>KhUs6lZ*x7jL{paC{~5 z_m@_+QZ#m@nJK?C&sM}&u&Cll2i!lK4e-!Z%$NxTPtzS9t~7v52SL1OHAHJ zQy=AbwxF3kTL272pOY9lcbU^R34JHcp3fZt?78K8JV1uZxyl~jCpgx9pWznk4v^)#F4z^=1Q~LnOk12&I@Cq zx=ep9)R!8@%G}D*{7O@XC=b6p%ri$S_4e>}*3OO&(;M*ciVv_!tyP-?+Wf%X4Un>V?dIDX zn`B0ezaOCDsKP@vh3L%XmUt4vM^5K6H*kV80tUxbIM74G zxq5gty{6MjHR;<*v4$b8HS5hLo$y`vaik816Yt~jmA$6}Pk7xm546AbTogYDcI_E} z(7JGk{Rn90R2USue$TU9XzA|&eqXpKiy){DDI@e#4r|vZKN?(UIG%;CRh-CE=lIaJ z?uRItXi&8lI=_hmQC&W%RaxJd=C*r;#}B*%2PeGQrfieGWu z4d-y}P!@XuP(0OEg!ktGr2_*vUWl;vZ~pE_PuUe~ITVIzPNkg*9zk_L058f7fcYvw{o7ZknpMDJ&r3Z>=Af=NR(LkUXVBf$A z=xFON&Ie!KwBGvrqsnusvG8GT9_vw0`mt7Ra(wPZhAh7^$$-ttgZ`FQI!O~ITlEDD z*2F~qaVJ6N^@UESVODjVONMb;*Q@Q7xpuX&BpNFneXeaREELA6b(V9YIIsDCf9WOh z-xtPdh)gKhb1}xvmG|D;xN(&%Jp9mNw2{I%EsWE`I34n76vio;uII=&Ekt=C%Et%q z3qK>dO$$F`4=&M`ZLSWxrwdVj&O~{URa0cu6j?PzRt>uz&xG+X?$iOiRxiRUHRjco zajAr3w2_OO$obB>P2{8?8A5cL3dI$fn{?vsB9_rEPPW+!w70;zjd32W-&k)TC^af` ztxKl}Ks;T&@QLAv^MfN)J6q=FIZ7}*tW)X6Ntv6JU%>*M&K_Ni?8weSeQ{yYG^@=z zqENsetyh;jE1jyLFE6)Omg>MIHBTLY`pU=vd{WXOUpu$ykS8e`7v3*9!gEE!<8Z>` z%aRMG@;OgR%uD3~5cfxU+G8x6BA@dUl+QVwA$d;ytVP0Ok?>d~JSHl3k?^=VXmbWs z28HD5jKopP^e{*GlW!iMnVyq-^v{mVh|h_p8|j4lkDG~~6YXErGA96@Hxw$mWiX8L3m&ZtN@>2flvmK08B;~ zL^1?vYHp$CF)B8yf}~t04wd1G3TX29#hGHDll;?%v&VadM_BwkxrIw(+`_3v!uUt$ z=a~HAU&Jz>=vIr0hz$L~1vt{L8;dI|on^f`KWE^tPQ6{#+va?=zOb^i)NHorn?fk3 z^;gEQjVz~CIsANThK>$D7mvx&;b+v4938$!BXTr|qc&W@>t(HT^&VlQ|$^UY-QLj^B-l8n;g(!JcXo+-a zy~3KT0)#5G#EM0QDUq-+0h%^Pw@~>QTa~O?5KVHAM>1kj5+Bw^ARSFMTMf;-7 z9t6T10WPt&Z+UDj83P|iVW?fd`7Rl61m1zUplZ>)EhKg8uLGOV5wzm6RqJ&?vjURU zC8ILlN$@!cy?zV86afl=U%8Jj_xN)Sa40RW(;{;X8(!*LTx5<82L@|gj+_*R3VlZ+ z{aSgb@(_GYblY{yha|b~J&9Vn>!Z%20fZby6Tp3Zv9E1!50D|Ve9ikn zep=D4c>(ZrLMwW3OBj~xIbQc5e)fC9b?pwQ*cAJW)EnJxl zfm#EEmQ+&@Y%Ig;dT1Fy1e(#_0qid=KXU8^>-|0eSG=B$(Soxk`8Dbb;Sb@FuD}FD zB-{gRvkN$uxSuBQeC~Kb063SVg$+Va?jqG&y9u}>?PfP=;I5C6HR8_BXm>rw#`in6 zVPP8a?l6q!XY^(@%Dz}g}Rw6_7j)YsOn9!7NCsou~n2c90h zjb0j!^Vy4V*}KPzuncrh_p3D_dWphmG61I zKwhS~B^+=>BnZZsp|!n%Pq`w!flKhZ_bsn4@8xzREwXK_daT||^Fxyo`PwCX7{s5E z5r4_n3BV-_>qWzci%tO=On00Z&X>TgVvfe@Yu4-YHEqMy-ZerG@|7UT8j!IO)B6HD z7#Q9p2Pc!qj?RuFAkWY+ai$GOvc6n<5}OR`eGmMLFNTlR&o^}Ro((c^yWa21+jKyo z8RQm5Vk0Dn$+SmR1vGuorPFbQU4}>ZY(E@e2B0iBjD+lA{G7hl zM$Intom|U+f78UC=L{I83S*}z4Pzl)lQW#*t}bc^2O@~BK?y;xf=G6yz}W8(`h9Q# z=rORtg>)BnvtHL%Xi^q<^s@_miPrZ@6F^%huW$gCaW|43^7{RO9SUIHK@wwHcycSz z#7}Dv>7|dx?BgbQ3d{<>^#MvX&{n?jeeC8UtYVm|@kU-X0&2esH2GfD>r`p-svX5w z!z=t9-A7GLQ+p5acQj#GED50y`?OtvaWS{rm|L%7nPH3pBS!ej0jjr(?MOKb+9W$U&C&STG42gp2jE;Xm;EH zHBGA{A;SV=g%(w|z}-xu7b^}UBz`ZeXiO%w9ql+o8dat4_$XEFL)jSk*l~z{5A^x* z`m7PMASoY-szItPxDILv6lSbkbQD5D(wOizdMXtvg|5eKL(Rb!)%V=MZe#PJbqdTt zla}Pjn6BTO{lPs*By9Kg2MpylgAGltw%d9;h$*uOe-9h1BnHbfh!GB{VbexsZmc4j z2sJQRmRQL^HtczA8*~6!5PN;}5}rgs>l}cH^aTydjX$~&k0P|h5S;+4p>gU-`pq2u zvb)&!^+;imBdlZ}vj@6C3n^vV%1uv+- z5IYcPSrJA-wPb2$B7U6#%}@d*Rm6Th{V@g#$dbV0BY{#Ik{^rp=6nMPeWwr~*!M6d z0Wcm-4Z00JUtV0$Fx(Co#h`jE2iW-Nhd%0e_@u9NJ47?K!qfXa(N4lJ9Xye{}?OonhyH*G}Pa7>i%n!+j7t#ZV1;DoTX&Pv%w?m7h!P+7u z+W@jFNQv)>V@(qwM`2`$=-sqe-oz3S+HHM60Ic8?EI-f_{q)77(@iqx210RV9$`odLb4YeB zHkK9|a}$iKM;{d-46S?-PxQmuInI5TN&Oh|Y|`3K>GN>rl1vQ#g{wh^_tn$07{R z6fJ^y_xsRKgpYW{@TK9H1pQ7)0Cta@5s+Z6iLD&}>zmu|CGEBcn+T^+^=;SeXlu9% z#a%aB;|dmE((ZZtK~$8uv^j`$5+9d&--SPQ(1XTD)(p0KYIhZisM)Cn78Jv2dgFC@ zPuz_99;qhu41y|xo^ym-!LzE>>D1$#7nqQ8OdF}^lecRo!RIE=-@r@hN<@TuV?+nq zkPl2h^C-^n^i7!2D4J6uvjPD#fysLqgixV;K-NP?gG-yd7nPnz_Jz2hmA^MaD4Bw+ z^{Aq0hs^&{x^lK=dQcasv?xlP1z}C2S_WI5%4EgjCe8!wmvDODaGGbR$WCGD5IqG*|62l?w3pf*CFpP9OseYhXh`9$d2u_31 zp%_oLHHLqD(evP>eFn@)6QGDaBV)^v*pOq36$9DaXC`D1IjxvzI>25OwSVfc`=jX@ z?LGJ+vr&Gm8y~Gl!mWJQ!K$W!0L*Q6J|VskvmNbOa9P=yK0F4KF?uq}CMZ(OE~0%l zESV;3fboh&;zwD!AIO0O-Db!t<7=yYI`l4z>-262xa`^emdWWN_o>^(!GMNW~#;0 zc4+-jNrD;>4%AG zmn*@Q@*^aZ>28I`oOOZJZa;LkkDQlxYAo_%?8FHS(r|z%iyU^ynvK&LsJ*<@1)q?sFcoX{HMr3xVyPi0D zTR6Fn^8|6IQn9fKZ z`on2<@K}nUw1=L47kdW#$j6Z6rrPG zQqu3*Ln6?&dk0x5j@a}NfYNHNBj@$>C-rDMewNYkSXPhv zBFUDJ0kGT01<;4g3Hii$r(A4MP$2%{Hao_)CwRu$?x`eI+ruzewaBX`MD7#Q1Sj{}hO8tFCIp6E7+ZUNm@~~KAluLHI#3b6TM}bARGBx;zYLL5PnYNX;8Uinkq>E zmckDIKYQ;M<4BsF2lh<&%xF?=jmC11G(GYtI;%%yb|;y<Qy}kG7z0k z#yIV#RMlThg9VOrvrMUW?<+j?L@v;4@J#>#NV+R!1Dzwrc}n%V zcnr2$0J#Di3$WY?=vcH5OsUS_Ths)}#GE_?FAYgqRyfrDqoayKJL4xiJ|YlUG^Hk7 z58%tixw#>z`H&iLBG7zFmH&kVzPdA>p~|MywTGm&MHfZvMTY=DWv4JKYJ`I7hL2T5PN zc~P#P$5h5M&kuY|SjYKKB%vl#BA!Ji6x+3V7Gls;z)Qw>@JtyGMgvEt8DR7FB)KxC zi3L8aCQ=cRuqfu&0ITIE^9de%62~eY`>9s=i(o;V|Mb&o)?oxdU0qu-Dl0X!u%g#0 zh1xRmG_N&Qfu~+sHyVx7+DfTh1AzK)OqL7csgKWa@lxVxzwyNxqtkvug=tn1Ib-#M zBuRE+QKyqrklRM>aiGN|1)-Ua7Up&bjf(7s#}RjreR&jKoMoaI_zvQ0&j}D=>|my} zmnr8f%QVf5TkA}eN-r(9*4tWZU^j*1a%UtN&<>fHz}GS*!_e2k+zqHNBKkoDpeR;? z?3Poi&^*X3NtyyKmnbt6$t^6&zDZ&meM&W}!Hf!W8?{3h$yD5_DURkL%1XPqr$*w@Az!=`D; zIb5ajmX@s%o#|v_*iJ+Pyk!*15ISn$fB@YlTL0rHVAPm2zRd zx{Oao$*eWa^=7rUdV+QC$r8}MMDg$5i%T~Sg|X+B3v<~NgE_d;xc z`1w!fr(}~TkVBg4QD_aLc66+cH?2yZtXmFgcT=k58!4@_g+wjlc8z&_o(oEAJon>> z!?m?;EH788dSS)DAFx+73yo66DCp%%sn%FtFY9Y7Pp#5@K^lEDh@1TKa2ox$FU|*> z>2%n~>hzgTn*4aI+ifBT?(}rJmt8*=S@0|v2gVL=<{f*c+ba4?7{zi&|W zAWL{lNA~GWIeQVP^6=JQbu9kNS6>di+h^D}7z!gmx5AIi@+9UT1Pk^0ew{gP zb${~?^5lYg>i{Vhd-WbNbs~{QyK-b)#U^jFm!7FE1C? z%;s{zSZ}n})=Q>QTW+3Uzxj*-=HGpB4!FsXMj2pUq4F|&z;kz)h3ZzOd1a2X0oC1b zLTr9^>;f;u9H5;BAfJ*Ssw#;_j{}g82XarTs;fx=`5A!RQ>yMawGcr5asj#j@66sJ zun8{@zzPzi?H+7yIsLo$nZFjO-21`BJYD6RIp68m`;e=!SM-tg6Ng`Ovs5gVN@dzN z@Eg9paF~n@BL{YT{*&2RGMJ2Myo|ej=Qn@il~-bL!E?h?jIsR}kXvBTHS8QxHZvqk zwqZ&7ZNq4DsG0MSTGKXD955jdymA;~32e3UFtbxo!R2~y@L(*y{9Bs(B?&nl;cU|kZ4?GDt^9Bth>9&!K zd*4E$_Gv}Ot)^X8QFeG(*TFi6IqZlP_amfMB!Uz96>sJYtue;c_Ci$ZucXs@pI;Bw z|Cwo2=TV~?>;D;bDrG(^qfQ3{xG?BS%OJ!1dpxNTGiEpUaJ~~nhEcfzG^HN^!3%! zau6uh(X7<=0;q#fH+3%VLz_A{ZNHp8QMu!Y{d>exO;K5G}~gq%Ue4jEmja`Rjf(w|Lz%QGq7x)J*8aZ#!Y`K?l}TP1klE$AFju z71a$)OjugC?dAaag?mV$MMARRF>N0=D{3p-G{N)++j8G*TCD?ZXUB5}ZqwY^S;S}F zUvF7m)8ls?9fsfGo%DBhG}xuZmK&70Z8rv1*TAf&jXkQiRAkcDEK0=;L*n{jia&@l z{{_|Cws&@t{RQ^;?x2T@LETcndHz8c?I>#BAGp$TR5>>9@9fNbZQV6@`~JLY4PfCw zgge_ddZ($|QJ08g0H|ZE@5^1^0Rs)bENbYc$%lzY1`X+O;mp~#yNdmcFiLwH8(ROsZ#%fYZEN^XdHH#c4@GyqVz;aJ z^lJE&`lbjKtdC7Me3>vT@B-7W$rJHC`S~Tk*I&AA`g-9Oe=AfAYgY<4ZTPPTO@GO8 ztz8RT3`g zK$b>v_(qwuoAYp}Sg8V16GlxYlw|2$XLkU`p12078)ar~#`7n*PjXBTkL7{h>xP|s zLAz;ly(8g)DmiK^rU83F(=ekktTx11eIv1qQezklWjrw9!%Wh?p*`6kg>?+3*J%|B zXJJYlh4oOwwV`no!fWwRZOCFw4OMuP3=`VTVG6IMdR`3?N(hE%>`gK}8(3~?d9!60 z70^YR8$q*vh}JJ*pHdKxQnN_EKMO?lG{$HCvD$@XIZ+JUoez6D)DVyAVEA zOup#SPF;EYC^aBwR4cD3nXfgwsI}>d$b%?4fUgg?L0~{2<9n~DF;y0sC`@g0+WOgQPFXC zm-L3WR4JEMilz0HwPmbO=tJ>xDwr*t4lRgnnUBGA^v!152DOff(d7(-0)9Ff`Yde&!fZozQ}6_*5GIb*H&c&Zl<`$R zsL{Q&gQ|-#W}ZJNIu(B9wukA!O9cx$e4(kk{BgsJ$dp6rhuvVYl6N@daE@9y&TIhYh|0L8|wVWCWQ4r3Px4qZ(ff zyCWRQxY@ULYO=s1#1mFpAVdsINp6SSF(Al;2}gyqQdqqbt_G9S z*}E9tTo-LME`Qq&qhB}@-`=(h$^Y1eY7R$x`Uy*2Se-GbhTg~4S!m$sWndC<2Uj~t zw5=W^2e>k^^8r{y%|<}dz!?<)@7aTVmx+ZD;Z}-6hzqA0?sTB!gp;7FA7IPoOw@5C z!nWJRKR(ozJxCVxk&Q!wX@1Pd&|$$(*pra!5AWmDi)!c<)zpNH=s6ya526@g$3;wQ z*OE;dMTWqmXvbUR(%x@d5SC3eKR{5chgnzd>wI_Atgs;{Od*$8d$|M44U-JJZO0UQ znk**~!^zi5lhFjv?6wLdS+?0fMGjm-nNzb+Nkb|F13R3>NPp2bn|n~7B-K5t94TQQ z2Tkg-gUuEFBR!Am$#_H)!nBL~)*hr9lug+J_-*MP#;l$PKo^au>)ll?5vTCl6iD z>GvH@Q1p8&+C=dYeIzjz7BG9b>L}hRfts>Cc%~UjNQg_6??F<^;?}Xu1z_ew{YHNp zIQ6laJn^^9ZQCC78kjL?bv)fv<#t`Dy%<8wKE&{H(__8yTIyFlId4iMdVsju{-B47 z$pLxivZ41rjQ8U{EE@A?vcFV^6cMw!+N>t*FFTYcgD!MXHM?>6_xAO^3@7Ifr^mehSa5QXMDEL)k)shd10)t$Vm*s( zztVsL!wI&8P#}1D;vTmqzxMtUUM1@Vnmdbm; z^E}1192wL5o+x-YlgcS3YD{|Fp~bKY`q-Xfb--api72d2wO)fEln#(dp%n6@!Y0EZ z?M+CMpy@0mG)fKau8H}Y_{gEXLHkhmKn#6y0LKc0TBdXbWm%J!1+U|QNC~ZB1qwTZ z+P{O^L{fVfXVF0er}Ews%xJ72%u#`XehCJ)#!_u7jA$<~Z;H|) zO^gn-Jme|MBxPctcePD!vVmod`^7Pe4o2^Xfdt2UY*9Y>!ni>0(cG4>9YDwA*>^-c z#gtGLq%7h6W2_^wG z<9NK2iWLgB4MT#bl_An>?1oK{x4Ly+(<)FM65X7XHn>?Sz{(<7Sz5g!Q#BA#$(5TJ z6f{?ohoIf;5E{V4eI8@wo;W0d357EhE69L7PPGJFs5Am#z@YEQXeq-;yk<=o7BT1@ z@@{21-+Tb8q|D-a;wP@{hZvN=0Fd};6St4MUgsmQs!&>BQ=(}g6iz4|#(P$XO%04N zK`#|)frPmpBC4p}$KKAy&(EuQrur3N7pxKN=}LXxnGZyRdKY&T$U8ozFlc6ywHBoj@4YIJX zv8YI1hk*kR#j1x)+h{fFk06`5`#_^$$FY9-hI%GA2BHt-bPVl;Zwg$ z^7{}5Pz3SPa6QSvbKF@^Euc{5&@==)A^P_Kx~#}+;N2ULd(bp`I;?lWDLrun)U^hD zINSCu)92%q;8?ThVB~JWM27bO0*BE6tZ!n(k6l4K!4QNakXov-h5CYao4x|=a$o`# zBOyjc>@w`VlyzF(Ca11SeYK&wSFS-@w>jadAZ=(5wsRlVsqJsWBe$J<6nuV|cz}BM zhPIZtqIdgkeFO4^*NR1{_lMBTFnz@xgGQfKPgow(6NVK*u|jRzfv+msGn5M>bf1K# z_q7HzJ%(6ZgX)(wU(%X$S`n0U(r?vzRL+RTc#2ewWNLcMsaAKPd76)Kh#Zoe)ulBh zH|y!Uwzt{IGR(!4+8Qj^VG^0)IwncraA}Cc`Q7fJY)g! zNFQh#;T|pL4=6cGP(~$x#G-d7eU(bGvlA8+sQTP!VU@!P!hD1cStMs5X@cX_TjM%5 z-Y}oj(DHnLpj15HsrR~lsjrqC{gKtX`CwkH-TB4&Jz2Q;pjPnw!_h~%jyKCIqd&-U z-mF$qzl2j@v>suP);TQl^SB<1<<`?p#gUE)=OiWGNS8pMMSY@0ouWm(k`Q7_tSK25 zmZaob6^2ii9(F@{ z-gb|*il}0!3b190K~|>Wxo41t&M_S*=;3v->tc(MJt8&;gee#BGsGB1FF-4Dhrrz~ zP8=9miOk)Yo10SQOD7Wy4ljuL#VOSjX)EGIg{Z7g=4DYZJ}Yns(v(Gf0*-J~s$(v+ zb%>sn@D?OZc+3WNUV+2Ew~zscVIxc@frq9CHv-N$kgzau(1Es@!)L%j`v4>{*6Df1 zJq@o=yg5Wxd5O&@0}czhPZF=30fz-ke*;=V1{{_>Is*=8z~Kxy3|lhMvKeqV0}hh} zF9Qx|z+oP%l+RM!B`4s9H#6Yyy~}W;h0vA^IGh28k3m~!z+vKffRCk5F#`@q5knbp zI0Fs`!DAV4ctgv8!)e!O_-*!Gq#1BH0}fvfAdWF=Jp&F8`3XayBRqr|a99QN2sZR2 zEO!PRmN-y_ZXz5Z0}f}vVF2AS;P5cuHv8E`lQ4s+)am@P0}Hr3*3eRka$;mKZe-DT{ZssTqs`uf^+h#KJFwgoh$TFSid`dOe#=@XO>Gc9|#^V!gwEsaeE6VR5 zzP;91DX*FtM62l-CQD^~PFbt=6oqtX7yP;-`mu`KbNJpB^24L6*(Y z;a38jci6xqFNRV7e*2Hx|L{MNTnp`AZ~saB!>_e}J-D2|(_Uz|<6pjkuWzW|;)`Di zZay{D%X8yq{(rYJh;;_B{@~`dOHe-~Hx*^jD7kW|oB(W|aWk7{Pl&uSZf5WYAqFAh zxvpp2%!)ByOdTP)K*r6SaWju4(B{dP)>6Ws;kpIZk>8HBo(8w2sp7+DGH&LKn>piV zHbYvMjGNim7-q~PEuUE5Ll(9SVlDBzu)&2rLJBoS={6Kon?bBIZf0)HSVJceT4xaJ z3}T&eGiTh)*j7khMjj5ekHlOO8N@n+Scj16;eRuTwJ`fd^rsm&Gs!C{Ndh+HB(Y1z z&75&Fg8(LxUM{AZMBcTGn>piVwstda=8T&;<7TcZ(CIK#JL6`~xS4}w2GMqzwvVO> zwbSVeCZXLPp&%aRL>?g`&bXOJx+LRf&N3DuNAt{PEXp9(1GUe$tr#WEWTc~*>V5}^C zX_me;OJAC$FU``IX6Z|_^rh-_mhxuDX#ij9^rffeu@CY#ySgPn5ILoZ%JtE}YVINB z_O{&_AZ4@H2SZm235;!|?3H7idJ+lVX-`LHcYsGMq{3x<-=by~cZFITXSr0b#~6ZV z=Mv%Bd)wY(0(HUnFbOr>vXYfI2F8&JAt0d2!8F{=G|seE ziKqA5Rxa7w$;}zay02kt*alHuHyZrEo9I&nEsj08H;(m9kElQIKk>kx4eVE zwlQvqTgbfg07-=(qC53w)9j-Wx<=G5bMxT`V0FB3D2Uq$s-fN=B=uE@K)hAFLyX^% z@kHV}b`uZh$UQ1aIr$c;WKefJ!&a-fex}|<3VQ^vs`eGbXH`PaW1dBRHsm}$=1G+S z^q41A`sQPwg@>X)@K35X(g!nRD;f9X-f{I{8$N7XU36%WiCc)~f&oPPF>X=fEo7ud zo@J|tIw3!%>Q;;j%oR;83xHtSV0#umaBv=Cz>>GDk~P z{c~cW(pt$uZQ01hH`1dr8@AYf{BPh5VDN=(NOn0o^AVJf#-OV+g>_#?0>3=OLXnk? za{I}9JlK?x*_vJIU;>i;54oeUxR94z_x3y)2jr4RDsijT!ZNr0#G5>=fE?FMTh+3{ zXMXQt}ByVkOh1-ey%$oGgFF>ib;NX0KsNCU<1I z_XZFQx~IKo_IkPu?=SD0c10@}m#@gz51v6}1pCVyAUk#bm?>nwPG7;1u z?Ci)q*=n1p3=jRO%nhm1tt~)!@jhHp9u8ySVZYa0WeJ%u?RWx6ih}KdWd`IT@ zQ@X7e=T+CJOvA_!F1Fazz7y~E(kW;-?Ubj`$T3o97~7#99BDzAaF?aZ8v-7(bV~D+ zG60Wh#C;R{jls?6xQ$2zjt6&3l+qpI3R&a1U5M0&G^9um8?B^)>^v&Vux0xcD_B)c z>@~bKEbXj$(rf6`lME8!E~F)Wd9e8wPW+E!`5_agML=3o2FK$;DStux?N-CziQeSN>s+7{|SA17RT+przET|hn z-NM&XNt5AM8yjE7RIUoKh@4naDfxx49LSR}gDRCLUo;JhMwu+p18TcNo><5{v;-nZ z?va=_sgE_sJhiiK%*#U?bE&b~(6{ZYD!1zf6hplp z#wV+Y?MqZKZ(zyEs(}je%d(k3AV-1%`CzenodI%f$_iF@Y#}|mYeBcY_>pwfm|~+v zJ7&8LHg-OxG9SyBfL`lL;byoC%g!FEHoB$|l#o7;ESxWbfSzz)E^0Ta{@{cX==ZXD zi1doyvCX{a8l{@qcd)6UwK^2#UJoB2SJc)YFk^@qg6xAm#6_lboJaZt7n9J0-GKWi z7P{4xhq>dh+^~X$pn_8I=-oIwQHmWeiV|@a>Hd5I;Arrs@N7c~vqd`zb)MD$0zEV{ zs8}EfPP74%-Qu~yh0Fom$Ka7n=}$N~dC)bXdbtBKt%^CP>+!T9Y7M$0Oa#EKP-HIj zf+n)O%XW+7cQAE=+X}o*!%5q9u}ON`#b(#4hhMq%z_bK?!ZgIVbczgrmK_%yUD#oo z)@+;2y}kpnjoDzb3FF3XJ5jK@7aO=bS_dr&<;1Wsiy!2CH%zl%Fu2+;%Hf*Hv5k6skLilMlyy92 zb+OK}zNlbAIf;%RP=u1_I*JRCRe3HvQ6d+@wH6+EXn#obeX}3*db3~WYU%1wq_9+= zk~W?GfhHwkI>BxmC%fQo0v}CN%XXRK{IXHdWfsaLz*6R3&LJZMH4*5yqx*fa4lo0XrkG zU_OQ&G!{TG!=-yLF%vKYCg>*0K6Z!~m^pba;~H0w#xD*8UfkDVKcmIm0q4~N6zj4E zL}yonu``cIIkhK>#QMICT}t#@HUt|JgdglqJ}l;}iCX&c)C$a4=o_vWBO#KzfnP7a zqwDPM!iLD@fISvkMr6put5oqL>n*Td+E_Pg<-u-RJmdeUA9H_?mQnW)AaT&GQ>xf^l#U2ZOlgEX8(6k@KA}J2OKaewcKA0im7oucb93(# z0Rg2_x2II^a%gCwwiwuJL$R)$B(i=I40uzjXbA#+XJ>9Ake`9rENWqX`yfXT_og(+3BxOPRM{!+D5ayoXXN3(QF zwOmM8gH8slOsRs`!b~&cBr;u{JxXzUr^O2_3qEp&hR5+K)qO2(PL1OP1QrBP0lo8t z_z?N(0muQ#b-4u;$du}PBXT2;h@rC>52sZ7cLT8+PZYtBu!ofr65EQWwV_^NjS?`A z9fa|~Hlm#H6!<`uh6sl~0(vZom6C}9zyN>>gK1fhzY(#EV4uV>*#^t4O`QF5ojvIS8K(EjN5AcPUNx31s5TfcGh*7ZyC+jgjSb2SWCgHOqtrpONg+Lm9;fk7-o za%4!3*++7y6)S#Ufs^N-ZcQrT&}W3iX72i9vpMk9s=;cHoynV@$bx>V-jTySmY=C! zg7RGx(cIXh#2C34dO%bfcvAX3JYp8>SKF_(zwi(rPE8m5=Gbi4?F+G3pHHMIeuj{h zznRR%{A&Bx@P8MOA^FGgi_;mBpX;j+-vI!cftr2DIKY$fzrHbNv|uFd8%4zaLVfTa z<>!|WhIWKn{!UcO-}?M3pL>P>q5jE)&Z+7izb~FXK;u(cAO7qTATLBk9SF)=HyXDI0}#-KZ35MtQlg-qOuN zWu;jyw`$FDwYiM+oqviChktAGseH^FI%O0m>c^Kd1^OR-DN|uzIu&+S znSGz+w2)-}rS1UbBR57IY}=z-Tk<@B5Q9h}J`1AYCuxctxsXGCKE%}<+cpSQu;sa+ z1ppweX*Ph?1c03+9kA(=N`h>~a3y#Mz+(V_#33z9Ji|p8BM@qPNyP;SOsKK%>V2HU zh_q6>e-9|PZTq&|dB^c@IEWIt?z#>RXLk?*A=iZ!$Ix=|(>YXZDX7_eb|T7UC!(j) z`4wjmlztC{M6gf-F<3;{w}kEJn0<&EhL<;@UGXsQO~#_YtO?4Vea8j&(WwW8>_qfB z&q+UZr~Q@oSCc1^H`>{W2#Wcy9oEL|8^@f8Vl{G_>hs~(QwO1>5U z-(I|rFZbkg(L^k`)2c(ivT%M^V}xq zH?;ZIz?KLPG9(a~&ue?Ovk#Si*Q_hIJAaF@ywmhK9hOnQPv!*{z?vrHcS?3Mv=Q2X ziR|gqtnP@5%&0b-t+mo>q0}%|3boZ{t+2k zAPqk{OzvA_+kNy>`mE+>&1dzbdc>?6)^o$t4hGDmA4aAhadfAxyJunj9#@$V2scRt z&~RJCZKYH@t%_L9fxYJe8F%ISdpGa!yfFTLFoc6un0Xm;NawVLz^AKmIsOcSdkMUQ&_WizTU%ttB0vKrU0bMJoE=lJ_hT^C>qMPgBsz;_Q zlp`HNk?xy%k0o8hL9c<6(H_TzPpWpSQ{dGtzIEg+$N*&FyvL+-DB(6pwS#BbLsHxO ziP}mX!+zrfzdS&^x7-M(cl-+e2IP6Md_|a#VylBSCAyl^c(EZ%bw?8K8AT%F3Y)C*YZ~`Usf}cU2;c$$D~a>c+1px2Vi!- zbLG~;WfnlW?#C1&Xyu?9&-%LVnUv1B~=MO*s$^4Xzkr$u*JUG8Gr8+O2^&}V@ zERT)jJl>A^q|u2ZXF$ZSAyrYWS}@letA$#*)heu)TWf{#a;dehn`@0qxtg5;b9V-f zc)S9KFl@Zk3qSP44d92(&oBm--N{(MyWb1>3Y{c*en-I3%DEh~eeGZ}7X&O4S@FJ+U=IK0|#-)Wy6;W5UX_v+oL@u=p@XBj*=X z`2Ng-3h9f2Crk@GUKxszA);BR6Nyc|%w?4EM2@p;Ke2aViae2D8;>HKCgASy7taBP z@vCVJBLEUp?Xs_`jf%eRS%gehYt6=5ZOtfDo9kc|X|0wD>y2`=u+}nat+m?na;sFD zz+LuL;!tEq4jcM%VkBRiS&ZaqYSFp3qK?e43?+ zl&bWXQZV@`RYC$ECWYrusgC6{Lqeuh(Juwz2+#ZV&n=$+^;t8$lYmg$HT`+L^@e@Leao0L`P^i}uVw4Yy@ zwcMT0P5YTyi~PvU#5R@WDM(CEvK2%#6t^XI&m8CIGqKT+t4~N(kSml+C8h_hKgj10 z^4@oB&#Wsz8tQWgDX$fRPw(}W?#eJy4?o=xu1H*|mj}cT0o1z(n_Eu*ZulD_&E;aq zM34as1^0-wuf4uMrkD9E`&M|%6S2DT6t~{Z+_=F^X;7IaNet>{?lY`O&X(0GcfygD zhh9pcfAj}e&o9MY%vK2$%3J`g3$*`EeLN2%txBm!!IhTprhdw!iQh#nP7Ej zdPjE-zX0Z3+doRB&pMowi$VZ#)PD2u3x?VC!S0om9i4A$$Wrb(nSx{tLm3R zv=UP3$bOt{iRUZ>NGh9C1I%L%@v%1VQ-H@voIf0Cidq-t%R4H zWL=NU*|ITrzQfJxGX4zj{U`68Z$7;+8QwOZ^}e38M=K@d%PL?Hp0uORAEL?a3x}^` zJYcJUK=TI>T|bT`G;iqq5&oPirk{JorLywpu$Y{EMe>T|WfCE8@~8-!UpSH$w14MG znl*vAKD)9tGAMD|I={&w`LBNSC_x$pbE~3GIXZm1QmU>SO>?cFw;E=lwp?B zHs0oX#n2mT%~lI)ZDi&+QU(42cB4PyZglwN$bNQ|Ae#M|F&&q{O?Up3^WdNH&!5GU zf%76^;Z=3t;Z<`cZ>y{-dF z0PS+Ch3CEK&iexoW!muV)6Ku3Woj|1Pn)}-J_MlQKuU&CIR-e|jD~<;Pj^7R++%lVO+rdHnAbR$@ zrfpj-)Z&i05AIyu2=BdRqA#P{u0ci8J~Z1fDTFtKcKhg_4=pIoS9QPL1pDry_C5-u zvP|5Hce&pOxvA5$&|APF#J`4ZA=fV+*)=f$;T!JJ`T*+FsGSD5b8kBy3|Z*auzZ@% z^j$a{wCfAMfvq(-76tJk>b-EV5-?o}KZ)$kA??>Y^dUMdtmyupWyAKme z*Z(-I8fOuh3=*TnN%ModLhA!!g99H{6<}|B+U2f=%%pL3Z|F@EpoTr|j)lxFI%YR! zU)MwP<9mV`b%p7cU!tPe3a+{O zemz*>_=UN2K3xQpVYGaoa*aLpl1Cn*y~4-Q8h^$j;8)f8PY!{--PJJ>d5{e6{SY+MuH*DPb(w?0Twvh;1;Rz7 zo#qT&W_alhZ038p-*)=yUMWY_MU{>jg$& z4UGZ~x86l!HT4)su{|xC(O7+zQmIzdZra-So4y12O8Rdwf<(8tCKOyuc-XNVRXqXm zw7Mqb84Bjiv_St2OCd#^;sM8ce}wsqFHIM#Uv6mWJqy*q?RvkjZqrezCQ(m_0)*t~ zI(@EBVO{QO=3~fJn95iTPi;Ky_t5)D7v`CP=&T`?9XQH&ku~nXLGM?tfpHr&*tCT^ z`??EkRS&ey9u4Qo@|bH*(+9hx$U)0EaE~n4A7BKS?MqVgIAW&P8c5izY4n#j?-;G}LfMr#i{2mf`qab>9PC*3DgTttH8l4v;tu|yu#nt-A5d+4?s1~ z0mEX6D+;mC?E(`QW2=q1^(vMb`WTemaE^c`9(#N%+Ps}$(+!#kA9t|NYmUq%Jjc1! zf+`Vg!Rg)aaez4xDD55ku3gHK4MjaCpyeJ zs3FM1hn0(lLP)4O#&a7jRSH$WWG`hy%?aQIRs)+C*D3h<4KB%%KD~Tz=?C{9k+9uc z9}Ca*A~rO&+HUC$FRED^ZwKs;?9`}?NUV?*p9LG13)w@7;DjTHWr>xHECM~JVWA#i z@yA{tzC;GmWI;$iz%M9YZT#Vds28Cn`ZNwven?BAJO+5PSeC3^Z2NksFsL35`c|d~ znn58RO@wAaR)Ly2oFz~V(ZR7aQsT#uX-_E?s=v%WWN`rZJq1c}NPeu8Dz!4$@Fx%-*!R#U$QOqWgl>b+Z?7zC=tg{@3OR#Rj0IMqK*3cgl?#>Hu$bY{Xm_JQ=Al4_S~Uyv zqBp?CiGPo68{Wt%#4QgE9A0lUHxF;y9^EDAq8qMd=m!`Zt0(Kjfd{<{4PI?+p#aHY zD|I0M*s1z1-MYR;rMQc8isZ5w8o}y%xmH?Uon#2p=O~=hZa6qmd1E9z#{EVH#1o+dnVMdnGOZb=Ds(N!yOzMZ7N%yiSL+V zkUVWD4{CeIo{teI1Wr>E$Dp>g+eX@uwrKejKgi~Rjg%r9L*@0k%_(12xXg>%g$oyy zR&&j<0cyg)mvPuob9&1mvstent>v3GbcO-sx^@F6P@JyJJQZ}`7p1-7(z@c$P#&s8HXXm&gxBDB7RMsS?MQOjHBszGJ#U9GY(G}DH~uZ$Wm zTH3qkYrM+Zic~(>Ohi1)aR;*f_Rtn2ZawXMHE%&HN%NqMV|7&$tBWm7&eU@?DZi6Y z5C*+KMd1A7e`=iGg{}}yOJ2re3`THMv(Hs)FU8};hVZ{&x?)(PHgbri(R0w8Y(F1O z;B(d1FXnNGTsU7i+kXs`O9Epn2-m~6SE@#7rD?7fmMbeOg_=pA?MkIoD4A>JW^1`z zTGPu%9b#ZR|CE3J$Ird;3SqT^jRl_eH6FT-e&v70}b9`@6Bkr`Lr6WvAKCUH}E z${UhyBag|!DR_?ZO?^xsMj+tg=Z?|u6A~`IcYGswl{&MwqV7znq1*O0aC<|FsJbiA zW3-a`8Yp+_i>j_@cj|(?B56^f!W93+pJRVl5uHU{UJp}3A|^H zjGxdr86xE8;q!IeZTo@e8*r>Y#HZ`7Ydc!yA(&M(j|dXNB{*9`tkMDIYGdEk`#Idb z6hDYhDDBCL;DhrS$4l;np#C$tQqQ+jcfv`Wa_%aGrE=Fm^rs>r0|_f$agqaFGvU1v zSW;f@6N&-QTmV*PGudCNV8J*-MAPXLxPWlWWI>SPsQhy(wEqHsMuH$ubbP+~^q#mx z5M<_*ef`s;d}qjW{H8YQrK3-FxmHfZm&g9^gqL%;Z(n)A4&|Yc|0&dQdamO4j^odr z8lBDiCdhCwae&zN9zh8ON{rDHN{|5DXZa(}U!)p^I}}kcLTHv&XsLrj$V6HvK8so^ z1Y1v{Z-)E*gDoI%uc~;oo5-FT(w^Ze-+_NNa~FiZhw!mSh}QwSQE32r6VVHvK%*fE z**YNQ<}YqtzkRoU z8f^7LT))%duW7Iq%K+Gl<+W3B^itU*xq-`DP9itp4IK}E0u+{sk9RsP{*CMRVk+K| zy7158dsdGjNze>!1Gr`UuW!uFUC?e(CJ}mk_5Ke;(#;ty9C-T%Q2e@4hdj^EF9CtP z)N&9egN{-4YyExiVBLqCmqT<; z+EI3U$nIU*wU@S*{9eD&sFX{qmE|R4@ZmMT^PY{^1q4FCFjT~N%`aeF&sFm-0WeR( zeNIEb-T_>1A*#uB2zOU8S!6&$I8@IJ92!Qeh#)K@zi{r_`g1{B6VZYyzVuv;*mW7J zF{8p}sZj~q=?wsgcMo8U#02#uGFyeyVzAFulYfEfEr%yzsNtf?=czHBAvaZ$3Z|+z zsx@TCEa>ZvwL)#x02Pc`E)`ad>RNeuy}7QJj5HO@zs1>ahyW&VSlxod>KW}j$4l}v z%f_dZHpi{sXR;zrn_~$F(f`8D^nc@@{}q1%T|@S?m>pehg=2M*AzB*IN1pBe{3E4J zrT8lEvA)#t9iSI~%tp0qpU69% zin80x979Pn;|OVH<`@cfqc}t>VTZ{aL*pDnH4TCXKJOqVmaG$2Q)22R!d#ux;TBI@ zhGOg16x=Zr1WJA>I77_3Z|m@9#@VZ8y?4KnK8G!T?DfhA`v*(+zISDB=6k$!xm*4i z3QWR^25JY;DA*2&q8`I=Q3Tr$JY0asfXJbdNH9B%Fx3er`?oa%?0s-L=|T70=eXuz5t@;Jo976O7o44b5JD`O!22j*} zXXYEQ3YtO_EQ|mev-ToiMf38o$Aex1jc^YFwlx9LSpLS`+|JI-H|(9Axxl#Rc;TnC z7oKgcpNqBg(|`t?uS4HYr5#5zKU=<*1S!Nvs+q6%Jb?+RL*K}eYUbOpFh4g3@#AePi0%FH#hGu{r97$~f(#gU6164aJ(vTh zdl1m9exBb7c9fZK-}NE01He2xJ3BZKD&qri_@aBAvQ^G}GnYm;bH#Kkt&>56;UY3W zwr0LTUyqfR@K6}ojiy`m>7msBzqwfp(R?Bg{L3A5nR%UyiG3sUIw!r(XGC0Zi=EJG zlte2G?RP_F_NO#rpKc-^Z-6C+VNw$;1pn~{&dJP&XEe@@n!b)PES<${Gm%k*{gN_^ zoMLo1!;zk7bH>9UrA`n_WTRVjM!X~1c-w{+v~5S{=;&keAkEt%O_vANKxR>9qcRz@ zBN1L%D%kU!3O0+Udcq{|KYlxor}}&NoP^c#`x)Y?{w{9s{GI5T7<2HO(Y16u)qjfX zcRKtv9Z$tF5KqPO((zPM*;U)r{qEk}*^tBm~U?U}4I^9jc_!B*i$^^+&sa;GU}{ekVY>VeY}6 zzRMJ+jf1?JkP8oTH$x;`?je%=>UMqs(ZHVRqnU>JIFF${SFK&XfJ9-~g(*M65j^!! zalV?k((n33q#Xh#3=XSZ&-&Qh1jp9m3L<3r6SWY?m0+$0qyHc`tHbzPpMM3RgZQs4 z@Z=v;*@<~7{92qSdDtTuLR8^2SOjK2BY<=_;|Hscf= zH&6}iMLN6`*{S}o}UfB{SOjEu*nEp zLtkE9DH%p#rCieswPty}u)bVt78*6Px?D3?myOl+bcF3c!D1L6Vf)*kA0J^GoK7Y^ ziA=8BkI|No*FVp$(a{VU>Oqt0NXKd&PqdFe?auf_d(D$c?f@h3RP%SukfDj$FLO{P?mgZ{HU{QgLpDN=#Tvg6Qr(mCFu(upk*Pf|lfDgXXg zG9T&86Vr|{^w61*MdnUM-gfbW4{lz&^vo#SW{a{l^O53QczT#h<|92Gn4S4Zk)Q%b zh|EXYt&{N*yz4K*M|wud^(cSkqUhwu?%JoEik@y;Pg<$B#gw)k+NqPL8q#+> z2!9`M{EdEH439S3u&8e50%ni*Xs**52U>WaaEhK{sN`$Y)}OTPFDz>Tb&G5a+sD}d zR7aiNrUtT)W6f0RxAAtWuyW%}KI!U4MxX7RBOS&UDoe`tv8I&h#=w{oz7GA($CW)L zpL$X7Q*5MAw<;?|h9gVj$TD`@jJ*VszxcA?J33+;IHYahKgORCTKf|r5pO=dt>Jm2 z9!{`>4Cfk;(Ip96$Z-9ht+6J+l$=z_Uz!w$n&h<|_Y7YbkD!DEwgN>-}>eJW|{sLje*qU-P9XaAY#(jhS!I znrhJ5nKx#>Wtv*r#~XNOz6F)Do1kaDO>d74BqCo(f{kP5Td@)%%9CME;kDzj9W&pq zZ$(j64AUBO43k67%s1n&q*IEhBgD+t`s-mDkpxg&0d;1+>g!erOcikWq--7F!kGD{ zt;YFwqQ$Ww#2hgid>m4I<{S1_gk=w%844ZF07>PNnE58W1Kjh3u&|kL%jz*M9mN52 ztaoN^<{P#y5w~c-co$L-Y%vZqJo`=uuu+7I|g=fBL`CwfEp&hnm2+FO{honIPQ8I(98jjT^RcGH}T0hXL_MPrQXJ9qW zkvzZ|I^YC!z%1py&Fv!oFBsUm3@k8V3bfZJP3~syp6L%<`As#E>&VcWza01Z7!+}R~#)Be20H7>AZ)( zX$p$(@yeT>Tl^;l7m)Jq;&O+7l583L9z>_y!^bqQOn%@ zLyoch8Or0?`BVII_%@;mYvxM1QCK!t)(f@DYNODomYRjN)mqDFG+WEn*6LB`&v@@w zM@H|c^Uv@D*4^PxCdcvc%TYHSA(&AK$fMsF#A|@>mS?5pU>W@pF18avUQ%mb99d>V zxgKSyJ=@9rqf^)0sj}b+D~V9A!WUl>g>6=W<74#`-Q6AZOxyQ1KglI#Gq$qSs!wkC z*onCHs7?2YL!8FeMo7wR&|?(JrBV(rsXxf)5J1y+Y|pH#-4`-Qw!cL!pYuq6hYc0n z@W_2lnzQ@C6;Xw}JoZ~KQ|umWZaMwC;cxeYi+TT`Z*Jy%r(f^mQQz$K!KVg&#M~?u zkxjgeYy1u0*3sOa{nX;t4> zacr0g{p*CY!(r_NQwNP96!e%8yRL~`aya&SIQAB&+p4K*LHQSGw$d(l_w@r0TNyW4 z0~Du77ss87oK9V)_QRg%`#7lY!*o^Da@gEXXC{5mR6Ih*o4C}AKu{XKioB_^^=JH~ z*~XvQydVOxAR-Igc$-&d`{_K&z4A(Yx}HtrPkzLFkTm`zOe@pf^CqygeU(C5SvAhL z@u%EsRt#8r3Tx{%vrt=GUM=WmrBPVbkzsUoxmm52OlAD({4Qo#Xzl6z9=;@vJe|Lf z3z>cA6|n)IVo`;C$Nj>Kv+vBVW|J9*#1=wX0v)q%8JoE={5NCm37K)In;C~Pzi8wg z&UC~Hw3c&WB&i}vpYzBvkZ`&+u$#0L(#0KkvysaK314i`^Lhiz*BgKOw;eT&CeiqKU-i2f5zs`+?_jGefIoXtfNr z1(QlfJE5jtL7nn`XY+IqBXu z;?=89Q{{S0<=DP=^$DjGyzm6Qyp5^%(X8mW` zW$^!GE`#0k9^GR*wu$p2|H)hhSzx{M-DM9I00wA1xs@9dS7>;4%~Gl>ccZ#3kNUc(fMxidqwS@v(H$|qIQ{wvYBtt zTe;wfq7DGwpyBzt-2{t5aH6zlz6pytKhU0smH+r4hOr`C=p0xjehF)%r|((-e)1sS zbQI9ZCoK-Jpw7M`MlryUfuz(_O`-51IGGc3=7*{#+>MfF3W_zjLpasQs-okV%wrEi zi#^9))EY1afXhYZ=FGS13WluTKJaABq7{Sz!H|*98Rv)|U{i>gHpo?@BM$oDV|<2* zhGrauXV3n~y`>=t4pU9UL;=_;oR69SSFz1rac=HrYvzaPeV&ObAls7);H7af@a@@aG;pjduzHipK8eH|CJpG&*gz| zY>-2S8WSoH6H&*blq6e#=E>1NLG>lK*ABP6lOfA$Pn-91ZUp*~V z^;iujV;q|u#<8&jl&PSZ3Obf%H&a2+2a@fZX=`zcUi>^5ioYzZVE^b3u6~x?{C{ue ztU5Ps&o^}#dc~Hg(VJrEuA%k7#BCMY{E2K~HvJKcLKzl+*LXlzIKEq5y{A_TYge>^ zm$BW89(67$KM6|fn#OKOsE)%}it#fbP)DeG3O)tb0I*X!;@A;xP2#uIX8T-t)Qj3x z<*PuhWYW754yfC^1AP}R3d~niTs5w?XCA=(Z+NU#i1HVzql|bxB$?+4OpalPpMNm> zVf{ZpG)40=y*B4X1kJp;<7*-$Batd1bRLZVvp{+F1eEK|b$z{3X~C#rz{b&Nt`*j* z4ZToVtCY))Mzv*B)&t7*pB@%1A>(@If5P-T{JD4sNq5eX7kAB{a-x!_J>yz$f9r2! z`Njzt5YhL>H)f4?;Dsm`GaLUmJmIkcCCwz^u9!P+=I0ny{?%-wwwVV_sj6859~c}W81!R{k@xas6&pwA6&Fh zC%xjf%}CG*UJC+xJfM|P^fjj`T=rlU+pq_{LV4Q`>b#+8RqbNi_xs+)(o&ZiXt8Jd zda>#B)Ys~zow|Z=+2+31(tB1Hfc2Kva}4;&p$-T=43N_Y+HE|jUB0PM5LS=WJwQnf zK%pz32LL^Us6N1d5CI@Xjur|iK=_6H zeJAm5&-xf`LUp9EQmj_p(4e$4f?~CF1@Q-LE(v}HzT;ZZsrF4c=er#8CQ}!w-oO(q zOZ6jOy+ss)ybwS+^mLEmJk2&+52Tst@0+F_^jBe3y-nUOd!cFrMta*WsGGubgs1K7 zy3U}FhRR||oIIjo*WJg~qoU*PF6j+#sZuR17t7U3ZCOVC3}+kQ4xCOh>BJ;)4ax;@jdkii0j53VFnkbVIB`JQ(B)?G}q1GveP zlK^XG?A&Qvy9A+8C}@xL?m%!eFy%J~U6u|p0GWoB$dY$l$Jc=Z?K__2x4_aidkxbt zAb1j@XlOZ=OSNKYeQl+NxxfYbtNa5laGy{k5UZMybHM6Ei@CrZ-yJl4)OUjaSlSI7 zB~u~?R)#?40Ejm5_$G>5&K8!8-YWP52tVx+x`}-UffiMusNJw!&tKHKCgFGnLae$D zY#1y-1dUpl?<ok@q9+&Q2zkKcX^m#Zb)f`X9BM7428gmP4^p-sk9gXk@(EUc~<^7J=h5%mQlK+}qgT>}vBm zv!k-#<33Haj@=OMQ``3ZM4NmlC#qxQL@BY!Z|p>)sD>Gvl$r{dC|CiQ+!s~v*1LMc z>|WYY4cS8G7g-14MwF27@uZz>q817)b1UBUQKlbEsKBVeHSYDzrUj(9WSxoM``C5jy*}hx!{)5Yi(Ufkb4%vLzAjP0nma&c z5lzcLHsV#)q?K1K^2QwY^KhzT8G8Jj~cuC z7L)fy^Hr^+&Hyo8!IQC3kvb)27sdryc9cEQYGQWvfYrns`H#e(#@uo;!znXu@+h*h zOG-3Mm3ebk2WRc^m zAWEN)pCg^DP!UihfFTR@8HJxtv8pr_kK?ZJ|I;rk}!ADiczZ)tas#fa;rXCem##sd%KdZE57 z^T~z4K}5O%)e{ zJsOhe2aGR{bl6a7|HlN{$DEP90PE4*Ku>W3v2npzUZKvRHq=0o-HGZxET4kecg#WU zjH^$Mbg_aQ&noqnhS}69B@hilrJd5`0MoX?14yVr$TkMIh8hWU8l-W&NF^7TjLyIg z#B>;ztzN-_gM@{E-9?c@LUy!!I2L3I0P!p2{y-jsg5#QygV6qzYC;uGl^%`AF(SCA zy=U4;Xogt>z%+zV)GD?olFcH8;&+G|rCu<+7-gBuzB@IUOkYDLHae3hgFVO}cX{Jyhh7T0kDQnVVq< z4)#5+80e6g0~k_##{6*$u@sY3wouHlALzD)MJhdpyAN>Y>uuYrqGQE*AE0~(>qzx{ z@TyJif%?uR^-27ZU*}z?YtCTXT`GmqrU$?gZ7hhE!QY|6gMNFYW3%wZ4ik$E*rbp}4_+MV1vC@|!p^7C zi=(&W?BVoJ!Nl6)?mz>S*&NmzYXQFoRSJd)i-Pav3S@Oq@~DcN941i`qe6-z;=E#) z+IvNVUC41k4Itm@s|_4899OSL-_=N8(5?(D#G3?l3tx{A4b>y+fX9*Lvac&Cx*3<6 z$ElmYUXT?SWJQKLA*U`+7Jd4)XdyH@WOZs%>N^mB6by-4v8)eGgfn6>hl-z2QDduU zLjw!XoIH3@hm?Wi7;?J%4mwRrKtG~~O&t$-W*0{PgkzYB8Erm}yq>DrgZYoR>O1p` z+WcPh=~4KJ;%Xkl(jWNh(!=mIswjX}m^K+|4Chi^TI@UZUbipaJBdt6{i4cJKVpug zr=SYgLZl;txKwL`V>X`hs2oB7aKlWuOKK!kSiZD3;>_bSZnnZFT)haJh)7h>ae_ zs1LbIDxv5oBoAiyi^L90S`(bJRTfgFmJ;-4B8EbWXNbh86(4th=Dhmb^d#NywiE*E&PqW zm@KQqa@9@o4}812mQJUBV%=(|)~)m3%_UQ6O;kNr`9;?8uq5`e06^qv$Jza8eNhsVD8u6o3r_QOkk8PkPX$EM0@_8j+Mc+bRy!5y%MEB7?mGjK1AWWl{j0Nkg67eCvanE(fNKYx=+ z6BdYXczE<_2t)f0_r=TjGqO3~z{NWPc1dr}U10Qi_d@HcD*HEbElCcj?Xf z#JWuowwFQf-o7&oW_q?g{Bl9=8m(%hwcc7UluU*du2xC~eMMg{G|V;KSU1;gU$WEUK7^R!iN<|0EW zHb~gz2qxRi;j@SM!#Y@6pK~zcu;~MD&Omk(`QJkuaR#zWfEO~5T?Vovf+liHCopmu z$PN~bSwtpgAiI;Wc627esEx5p;CeHV9sP*(_=sl*j-Cu;7eFP&Z5iV#GLW612s4mf z2zd?M+RA~Jf$W67J_Fg|BuMZgj$iy|Gdx^mAiHXes>P0zf$SJmq%h|h$PTdw#8hS= zJ3umBiGIjHcA+XywLJsbWgxo@WS4>L?gNPKo4W^Lgf=YeebKaf2(cmgiMd%SBI>jZ z?Tf$R+d6E(J^gV2uHGy$N~Djd8{%v0j27+s2b<+$NkP$orZx2*irjJsCZ1HNvbsX$ z@-m451)`f5SQ&7hhze1#xvP^B-JA*9cK$$i8Du~OX+z8PfU*Th7~sn1$I>JCyA02l z7x>|+aKi~;S!t*HQ(_Jezi|SdZ|0$UYY8U^mi5HIu+J=>?{{CFIqDe+Z1jBuD-dMw zIS6kJv04&jA~6zMVYEn5L(F3z@GXReNy1vlvLK-)(kn8&0{JKqJMq{jkC^zmVdW4M zQwFTpj|bMXho0+JE<+G1EExo#aWJ-@J*GD@s<>A=lcCs*nIp-6pA1rxX|d@& zLa=lf+eQ;$0}u`bu}WZrvHWij@4jc@N5ivmP}D^RX@bXwM_X_x879Hrrc1&N(?zV- z@Fm85h4{8fnA0w39{?cEsOm?EFGRSxG%8?tu^Nao#zBd#Ls;wl#jWeN@78bJymkH3 z{I*Ss2ezGo?8DXIQ^IJ&_(|xdBz{1@6m!HCbA*Mn!CtV*h|D4NI1f&36);R z0bZ@xMBSclO{M}FuRk`M15jUa)nK(JgD_-4=bol_SSXjDsa}HeC*l)6e2p~OM~7eD z72*cpagPqasF)Lvh9eygzXYNg*J^r4cMiW`J_45ZC>8N=I42inAdlK_9)1BlhQ5B3 zj!fKN1fS|_bi*EXegmJA;Sc<-j0J~3(RFqqof>@CPbSd1M~P^NzlGa7|6%k@jF|o# z(Y3VM;IHHQoeqCZ)mzGF*E%e(ouZkQ%6?fH6%i19)cHHS<-+0X7&`>VgO9`?ct`CY z$FJrMoxh7efzjhX!pA$EAMfFr^nTOjx}jJXSlq}Pojscrjr z;@VB#1UBpW4?vTVs$c8xdk5=S9-Kz!WMJMbxjmTpOS|^c){@`r*X+7cZ!bGbgW9=jN8zAd z>6FW0&H+IhoDoILuK5Kl6=b6J=K9DJ?C0n248TnY3bNL@YT8B3Wxit};&Hlzp3R^@ z0U44mZ39FYMyu$x^}fmWURAe*O+HsG{Z3H%kT!$uBb5@UJH3;@HP^$|#Nsjpr&yG@H+|1RjU+n^9`n-+21DPxMdUC1(?%BGT~ z@WwVhj>6)%UVQ~SEB@zKl|rr5TrI2_&1K}wZnbcwQCY6)fiLiP zb65iBh;^@%lnJbh_C3+d_5Ar!O&Vz&3tSkOZloH`^ z>TsazA%3tQh~3iEHCJA#Dc0FuBhD&jSvb2ge*E78QkCv8q7-3 zG-rM|-cjabN{{z-7sO$=Mw*YotsXi^!&#*vS9jM)#!*Z`@=ocn288#=fifbGUmceh{Bf+LIN*2bufuOmlw=+Je*{JCmf7fJij3!TQLrByy7K zgw#HT5=I;VZeV^Zk)u>D;NEFE10X$qGqBZ3|qSp6U+Z)1q=4CL^SP zBn0vqy9}@QbtDi|*P&oKNG#k$21+3j3o64ms~c3#N+O!UqJ<=hJVkj<_Yts?tdfOX zQNVH&>Fgm552}Zhr_5rRZk)72VP~QWNQ30vl7TC#aJuQL>{R~FCfW)VDECT$h#Edq zYvPN_?Stkq@$;kqpS|~cawNOX#AZopDbAos|h541dvrgCMGjc)iu<}A=?iQZ%621TPsC4!m+P@v>!M0W7~J@?#m&-p$Q-Gm^}Bm?MO#r`7A zW}TBZv)-Aq$0JN9)?W7#c8&b3I|;Gb7&=ZrJ#jg5(l2|UT2&S;nY&ml4HAZU^YNx1 zHVH)r4jC?(?Y`4KBCTWGX5Bw62NN8(1g9J`+$pi(ZY1X z!3ftUBhT?n*Yd+Ef33P9?<~i+FX1H?{+m*0$sOv3X${54;;XUh$y`n8lLHUz+>U;m zdTenYoy7M{$o(QTm^7R0oFXrX(UtLf5l@;CyNL-3C1hP&TP=F$oQ~ouCY&|H_HLSom zijD!NwgtjYQW$Z!VV*3q6stVK#QgXv>@Rx`6>IEU~yy5UV{^%p9FON28s^U^v`SarDjP+wdUju2@2>NK?w!oCdPZIGy zh!%9ryDVI=fnQ3q(jnIKxAh$aoqgdPMCUaa{u`_?bH=`NxGqV%X2-r?q6YlwL@40Z z&%)vA>B(vB(a&yNE(tr@8?1t@HyisdVZ$C)cC}qAeS*ckbgA041^br)@3cs19@7xP zJbJ2{@=WEDfJ(>wh5&K(+ofvdpOu~o=PnroOF#Ogv_x(90f?AKR_pXqwdAU$Mn>f- z*n{>M$rx%UVl2#9NHLBL`?!bn*h|&KpXxaV*+T@k@X57K%c|K+PfK^r94tKp6TIz} zmyqiOx=}Q<<2)~8XfIW3Z^4DcRhaw5sS%vo94w3mQC0YMxxMR!?q^QDyt;C!TD-cv ziQE!4gyshniWX8m>_+u4=jJks?@!`(q$HJ)Zo!cg)Jr~;58DnvH*C>MdwN^Qm1A(Z zRMAhYq=BkU&j)tP8JMXm%{?q zx!T&^+UkIh>S}kRvaz~>2)@?lT4i&)y>7Q!t(A3qJDXARKO?$zc1FqHerk!aQ+oC$_jL$UvjL^&BJ;gHO4tAFTgAEM>o|Y+39Sk@QJKCXcH@r+vLzInWj zd~Mc~lXc*kvX)S{*c)(gh7(UmH_h)DLv+bKd>CQ~c;hbOB)QP0?gb5Qc7 zrxGvD!Gs$p*~Y{-YS)N)ovxGij$}GS|CG2KRP~k5p$OPgF<;8?DN>rw361#8CT)0~ zeN_=`k)*}^e+YH>(qk4fs2>to!fFT)!ervnT=o04#2K7d$)C&{@HvgSx0MR`&u8QQ za2jWCCHIy7(=4`L#^0O!%3qs~DVT_ooE;sU`-a^|NaGh5znuH#-kBr9dSPMzqWJ3k zxATjM0MC6pZztjTZxExN`)1z!0;1@1-^e?RY+`W5xpDpr3l~KB=f0gc-ZT#ZaWk)I z9)9WOR~ZQdCXUq)&8G}-vvR=Xz9rj;2RSPaH%`i6mdBJKnfs>w;qmmbeC{j#Zggg# zYVPZP^=CI`gf=WJoSP7I2_`$8_2*E2ust%Sy7heHy$M9e&n`vbfu}ZI_+4HY*ID76urPyGHLExc1NXo1C*S` z>%!~iy5_!}*G)q6>t*A<-S)k`;JbhK-Mj_gJysOZ+%lgyEjjs(9Dr;gLHVSwx6)(e z_9F@Z=IxJ|Eq~tu48I#r@vB~n2!K|Atc!-?y(Y3m>MOIo)3JC$vIWq_QhU1nU--dn zx*{{`We_wzgm=~khiK-WA_+H6crgi-Z%~h{H-5F zqj0BI6BrVt*hDrcL58)V({b%e4*?`ja9UGTS}dT(M|E}h_oB-0$x7}a}>;&?x-0Ztf*NW?5ebfbT7$CR)!gmJN&+O5__V@&E`e5L<07JCb z?c(_`zVq=oL|OM-bK4!)1K600JL9tKV;4n!G;o0#VQ%{f$Wj1$00@J*?Pfm!0KrZF zxw++G06Yh<^O+m=yiVYpSO*8=wUyPa8~$hN)3ybt3t*3;zIkxpY4<%J^jxR$vwz`u zfM!4~9ylk$M{VxC=b$g++ipP$ttU?3Qys--8)VJ+o+o58z6pSbwjBV}<3n&;aQYD; zKHM9PtOvF~bkSQt{Z4*0ZTSno@8OZ2g8?vActlPz;ElxX?ASx=z8?~ZASvN4c(m;v zAZ)DLScI+Hy`+GL0eZF_H+Gp=a4x>zcLCxs05%R_)Z+1(zFBwk-oSZoJqEGbAo|Qy zjZ%bU#MCHJXnygK4c-H|+^L}4?E6D&Z{PwQC8_QmyX~}m|HyjaBDK+mbVK?EA)24u zqb2Ib7$|(eZqWfRd;KALbZ=0(W%tQqiMR2krk_7>yuG_5eH491IR&W2Llj_-aW8}y z?R7#>Mk;K6-1`W5X?6e%n{+XE_U=A1rEuzma7;RwI4jky)$Nt-?KNBr?2|?^rM&TA z@cFWpn!ay1$KH6#GnuLKSv<$TU={FJs)wbUe@-@U>ydq0xq-$8h9GijGWS05gQLLrhoQR6K_QyUJ&ch7T>`2M9L2-o*mHqC_qgwm z)V)%Us*8Mlh*^)>n<;)`P>6=eMYLni38K)wKa^ zjypB?6mAdZ`!VDfU)lj?zueHZhc2pt+wIXv-3B65zfFDt512#8&cGjWeulp2T<1CT zs_OvlF%*J!HhqZRM}8ZSa+Jc{08q{`pJ_R#K6-!cmK6f=2e3k@?THLDJ6=x8cgs0OcC(=Rpzm}| zqgY8-)d**#t2eNr|3Y&Oi2(#HX{kzsC8!y|zBj40jxr6Pm1$Hy@8(r26hV(8L%P8QiiN_$BKpU;1puI^KayL0CIZ z_tfjiQhxX6Sk0ZNf^MqB3wfm-viwSf@+*F~!ojO_wOmckaG+#@Rj^}y>|v}hU>Jd< zqEP!>E+DuVTLNlM-g*<$41H|v5&EVrYkW6eynSs5=q3V%TCek#FS3N^WLm+L(919c zPM(I0K&Vh5;1S8B`G!#lg)iIH6v{V^XX2D|LKqX;`A^^DX7(C3$+7I~V|9Bj^?`K{eFBzg3_D;B@cDOU>e{@84k!d z&k$=DS!oz5*}_lufSL^L0cLAQeWuW3Z68vX4ivj9=Lk~J=G?NVFJ=o46P?l2Rjrch_s#v^?`mx8EE_=)C8O~Y?dv}qGdNrrZFpltq0sd zsU3G8gxo_XRP4$~Q@ew0p;v>}S+A{Z*VZ=G zL;>qUsEnRuV^e|BItnYcTY(aR#d1USBd7csld0eJEHdfzw&LCGaZT7L#H? zIluDpJK0@l%Ft@8Qogkdyvy3R2?mTuQ8W4@qv6<+{H-2CqoeB;6nd{MS$xPNKzPb_s`WrX&heIsv&El&b&=?P9Qdsnrc5d34zQp|3()@dqG zOxGRp_8Qj`c%87OV~c{b85M3l#RCd|>TfijJG2#gqal4z_6*Q5LZ3Wqw{ER!C}JKf zYla|U{!#hC=C1YWyQm6NH~q+3v+8wgg{ah?^&@LtzD+-tE*#M?S5*;k?_rbcT0h4B zP$@6{@?rtu0NyV|F8W75CSzP4zO}Ow|D7WTJn(s6W|3;<7>wSJXog z9(-9#SC<83E1GokcxXDBZ|8riJk(-Um7%VpW?lUg+Lvy(`CZo*?@yj%WmHK9GCyem zB)&kcH^BEr5@dl74dnkI1l^$Kw|ITqP}U$UI(i8z?81vpuCK$UPQ|U{(=L41nzj;X zKV02d&3oMs?}M*B$w_z%{rgt=gl^`}T74}(A*v)CgbV9vmM0-nl#Vfc7IY3TDLUuZ zN$@s7F;Dm5gum(Iu+WR{M$WJtosOJ(DZ*901OL=ZSA@8M9;9PJEOa~KB^K73T-b@t$>hHKD`s&wc4rrY8g{3M-^ z^Jai2ICX^!lCI%`TWJ8-epb{Nh&FMF5o=}=^-vGmb_T2cVo9 z(6$*DCgqi^*RSotWC2vMuV|&d_${sgz_HcGPhg0B@y`)(<{gDECtaB@zJb${032#B zAH4W&IjDZw|8~mN`C>sXinH})|J@hAjp(w-ewp=desdKdxxdAg=H=n{@wtB`v19mM zL2tbH9fS&E1#0nK|02ak^zuP+`@6XP@K54rk{|vkzLs^xeh1ed9P-ypy`_x)#v#j_ zuB_Bj{!F?%ljrlykJrA`N`65|9QkbAH#0~aDKkBv)qVFQJl&p5pzO3q zNA2z0*Zof#-P)a>J^49|x?kYz1)&Bw$*;D<6k$XTU zN45FD#*jmq`-Uv<;J6R5vYMt-6EBJs!sTNSFt{~EyK>)}6}2VT1ey$?!%mPpY8B+2 z$lzL)n|aI5eT#k{r%+sC5XN&U7W^qRa7KJE7?M$hh`HQXo-wW@*E^Uge2omwTC-aBr52@HMkk83mfS;jC`8)t4gVI z1xMg++ZiDm5IXm0$4Wau5oR3!o_i^LqFTW50NFD_hq<`#O zDc~jL-p&0`{}JPoN1cTQi06*Rim~dsul9SWGh*xQP6K<;@-+OD23N{`JO26R>0xI* zjfRp>CHMOqqqO%T_x-xq^J@FGXgE^-m08}4+_#}F{uO~;Vepzq4#28rZb*TB;3A`8 z%ODG6xRALs_sx4yRsQgn!>a3*b83-MI!QHZabJ$Ou#{vu>IF0;_wD_&X;>3!mc?e} zzV_GDCUc_W8Mwx^&}@4=Fmy^rE_3$8=+1q^{)mY*$kRZIdymF9qQ(5+EnA(xyzum4 z_CZPN&sbUCTF@A=Nswh2VuVG7$+(pFGyV&<3b54@zVg;Xx@0nPGtH=Ns-{~01eaK<# z051jG$1f7!Jc}M~jtge{+Zj-^e>RY~_PDFHEB0seq1hhov>zkxjx+qlb9!SBJmZed zdaLGkC0^vHJI4(+?PNOe&h#>++x|K}oC6O?9O8FQxj-&*k-Vt`fNDWGTo-NRDv@rJInhk}$;+Qia-# z>9{~>iBGNR@PIdt$v}7vnYrdkggZ(yUlD^<#9+O}OaaD39sWN5d<%aj!>$g`4O@Em z%{LB{AxmF$NY&q@V#E$5{_A%onwjCvFAx928iT-I{2s#LLck41b~FxO_WyOVk>L%8 z{}F#QE7RGrR=@U&OJ(KHi}e``g2CNC3&H-SG^77tzfRkJEpcR1Ba>?2_ZAkeSoavP zP4nmG<6p?au%MT@%hi-~{l=H7Dc{Uc2wbYFe=Flp?s=E0@_#tPw|uD@@Wbgiz9r6Z zB!j?eQn>`BOVy(CCouD7(Y!)OJ=N?1$lV=vE>)}kWi|2}9c;yEuEPCqT4Hz*E>&ax z-@0OLfSkq5xoTwOBpC)kdyJ?b6(C@&_tm3FZJ^Qmg@&s$HdW z>8^>=25|#hkq{-zSH&D~nHqKYPdL#?Z602HLuW+3eDTB0)$R6rYpq>bch=S`8(UYq zm949-PGx<|zUs8Q>)TtcHKet~X_GGx|DPWq^W=A9*m7X^y!b78eP90N-+t|#cV5Gs zKi5D$*W}C?aT8|I3`1j{rF-7R@Pg&#tE{l!Kn2dPROX$gVX{g5yLD`U|ABx09)I*M zm>c5$KPESwy-R-0bHP_goqDT>m;d{>io?s-@vAb`@FqMkWvx@|rQMG|zI*FBe50|_ ztiMjqW2|8l@Qu<8?Ml!33%dAee?rxfV!{_Xn@8+Y#ByM28TE^t2gut55lzoh-VNe&*E6t1HW! zj85QhYM z(D;z5ZG>;zhz1$j&l{0{1oZs+3PGAj@RbvspaDRAP?Mb2SF6Cnlxjst7#X5SVs^?E z+1em#1d_|k+ylv%vihO#gYBojE7#?7m4Ok61uWttiued%eHV~3zemV(30Ps;lETH= zm^YfV1~P|Q*9YTLkpEQH->(I&+NV0LY%~8@4Zp{h?rVLC zL2*iRhHvM74AwNFyKLGV8x)rzU_BJuQ5>nQV#NVK2I$Goa_-wy6YE>T%T1KK#w3{? z0-yT^{cLuS{yd;K6CwHw3%PIXx2IAFa$n_&AWftL{o|=KBgrB6&AF+BhB&qJgW^nN zVdTD%x8{YMIWJEn_pMyB5ZnJsnJoEN5FdTPsW!PE#MO8{63}IyT%g=H=`mACVnCNS z%~n!yDYg>_VmtN_Px1p z$REZ#^JL$f`%3@WY%o{_R+9VL|9%z>Huv@YUJ6bniKJH1C6S-|>TjPH2d)G4)NGzS z4$6Jg>QF(5PxZ&6)@gbEKyFE)9poQ7>Dn0wWD2h5%rml5?#FFY!c1qK;U!0nCH$;U z@Jc>HkuhkwZ`Y5lNecY4oFTb;7UbU1{<(rfl_wY zP}dJAB#2{*Y=k@yYw~sddb1&33d?g&tgbzD5f_A7B3RNHg!J*^j0iMltyAkh9<=uE zLQWxs*w2IE42VZ#5U`2pBIraSTf8=rYvqy*+nO%*z)Om?+kqbvRiqi%9w0~XOTq!r za=dn*CQz7Ey+7{t5FpdF+YahpV;d?`{~Bvmkr!1c|5sY5ZiSp_fT;7FPDM2Ytwh(} z90z0>Knvln58x)n|DqE=^=`&vg{+P=R*JROcPz{+V)tHKxlu*X6rP4(CqigT;MxQ0 zM1#O|F${QC^#-0`S*joL>izhIAcR9t4^cY^2U{Nk{wX?f9M4kyRk^0#Cf{UEt2RK0 z4!nxG$xQXoYRWzU4OJxU$^3|i-433+$5lV*)qs;-TU%ebT3uaV+qfzt{|f5I>IVKW zsbjV-eF2FXVD^Hm?$~eJE!F<=>e|-w+SN@1Z^*t~Ut7l6b-Fk$Zip$HYG0ZiLc*5N zt6;-J&*`cdQ7tzk_ekzY#HQy){c%gA-Dh}{TQm1eG`<-^E+BW#$OX{$%mRA8-y1l! z%#A1|UFUfx@1mZ(YZ%gVCvO78{+rurYR#fD(9$EUheN01GPSvbJZ_KR?GPZ~7J_#E z-b37YiV&Ez5Ma*Cl%2lY!^E?w6s%)=pfJ`liPO+&y5wfyM*^_!hY}EnsqG9~PKOZP zDOEHvomN*isw>-Dn;Vb?PSC&NpAVb}$t8ftY!kJexNh-eu5iT<#_b68o#Q{2b_dYm z8I=P=KGbbIgC{oB=Hhm#fW>``YXT^d)odf+_(BO3S3hNFbIYrOzU z2p<9~-4+yt*hkgIrYPmOS`&*5;(R%XVNFF#q7rx!|F2pVc=L)vt>2+^^?98;LH2vn zr>WMl8_YU&;4RLz$QwODBKb)@0fzN>k>6>?qo{@%iPE-NwLfIfLs! zWaIzsBUDrX1k6H|((sAU&L^mqij)X>kSeTkaf`G-1`=pQI zfH5%~$&}%!V)?G<_4~jRnXFomT^bhrHC6BlvDo9a9$0LkMUt7K%8OqDblt8%pq|(y z{shk97|jP%0$GP>uPC9FR|$DX37~F9)On!F_aaIcA553Q*0Fq4c?c0tnLaWBq=rDD zu&dTbcrrL~LkB^)C__y!nMsG^Ffva9=S|!Fr1pb!5&z$q{B9f4HMC)t1(5Jl+1J_w zx96$$iiA^V0><8;%iudBEXEYuQ5xZht(qy{h%Jre64=1O3P51UrAHPI9AI@em@{bRXbB?>W%{*0d9QK!zMbp&ZBwOzfrLgoy=12l+ZHz<#)6P#OmR(f24l(vT6J@hO7eh=&e+px z8rvdKu<8{YIA~ZX*dB_UP_onA!?Ezk07A!xjfM*!gMkw`(1Wo46oeX0I8}N)B4?=J zs`Y{60m>3mBY8tft2l76ZWb++yhF?=^@7RCD6&G^b7EB?gfFWJMFImWW1(@1V^1E+ zqQmYuyLcl;4OM!>G_P?SHnd*U`5K}R_Y-lRs7`3Tmnc#hqj^_uWrvC$QWGe|GILjv z4`JL58x5Clj18j1AU{DR(PGg!5W@`Xf$h1Nq|#%!`xKD&LnH>tN-nQGj!Z~%C-WC-=jS$cae%UHKSqY zo^QLeK~KaQ@!{5@Q5Y|J@TbtnVvk1nJ5jjqH^lk7P9%PqL}kDyC3$1QBuYn-R(27M zUTmIx|A?IO#OA)opCzriV$4#JG^EsJO5? zp!L2+zmN|+H1(}MuO87 z7MR&{a^poCQU;D=NO6yRbQ){JAx958HXaC_LAMgSW8&d&_kDRiO|z$qpK{h8E-qV( zNAag)^NH$e5yLVXN9xiu^BNezSo!Qh=(EvEmKxI0fIk}!Ms=R@&|VZNi+@pNsUIOD zSrPQ1>CtUyu^p(^=xsKh3TYgQjw1$ex?NHud2>8Snei^mZ|DY(qX>H@tfyP8+xkq# z6Q-V7OIZQSBESY0gPrc!Ywt7@NouUkJ~(!si`wUn7V>wNb zIS%{74>0ky3kmK-?8~TCsyJ-OkjR_U2Bt4c-sLUMe{lts+4{`aBf6~M${H_6%9^vDI!^ixapeV7dLzb&jv=&@3#WA=JW)P=ma`I6egUdO8aSU#P zQba!1^np%s3|<_A4-JYM&N{L2W0XPNY%Ypp@b&nlnrBPJF*xJeieqpcJ5(Hls~92# zO%%uAStGtU2Irx8aSUD@gBQo(j}azlznyaI})FCx2&lnV#zY+@gDbEm6 zN73?>hgjr#37$KU4}z$TQ6J9G4^DA^=2ZHw+<|~}l_3_w*#`qIzBhgT{W%@(XTEAO zeq!=Pzv(CL#XmgL?I)ULPXz+ zW7`gokcaX-VpxzIOv5dqSBRT&lF0qbc$OV^Lbij7jPA*!>~&||PA7X&4w%;`@&rEw za7LIS?A0Ee*r&)B&WS(dc+FTrp01`1uows2q5An)CXBiJV2guJvZ1=!ra-%;-6#Ybiw(f7(ekwLQ zEe|TrcH)6_MEAORN+P(|RVD&cuvGvy5*}4P0)|n?XQ%EdFV>>rXfj1V&Va2Ue;!%= z_CRG=vQ!mKPMSK#|8_zYnyzkHUFUD{d+YJ1R4Ray$@=0U@U7D+0*QRq7x}q1Je8?Y zoxD+mw!SFoE$IR>0;7ZpCBYx)_r}hdH;QD{6Zz3KHBrx*Q2SGRI53@i1sP$S?+E-? zEsxs@F<>uP79<+O>LHP;v4c)AD@$e!Rd9Z4V9dh=E8ekQZ;(tKwRl}rAtQwub!6(H zhHJ;-D41)>Q0>TM%nX&eNrnmSCfI|yHe-lTf*zuoH_7nqV7jU4&6cHN(b>yPDdZF% zAySO`ih4|k>>?4WC-57h;1Hy4cZ{}8QuiiOizTDW^hoV^t-1!LmYU<$Mt1uMp}~#7 z(OFY9mIqq``#Yp)biymfFGpd!hl+L%>1Js%sXi&#cMW@QNKnkQB zs5?VCaB-}T|3_XAQY57z%)LTdY_daCWg4h>t?OvzP&u+z(0@?$l1pOkP^5RX>Suhj z5@N(ZOCUBAK2&-7M($@NDOshFi5i#03TA>Lc6uV!NpdQ!6KY4KBq4?MRF4dRNw~pf zmEx&_b8(eCeBvB{g(s6{rp}3R-jjYn_Yb-YsZQ)swKH)pdHAXQRXF@*9NLSg^_ol$J_w;Ehb4%1~vtg zFmHl}$#I{=!noG4SK6sOEKEq1$HJT(CRC|Zc3BSV=lqAb5vCRHtF%IW?o0WT6rKa` zoJZ?a1Tr;LcC=XJ2+~G!50ZQ>yTz2Lrep%e7y=VfW~(ckK!@1eBG?M-p=3H$965@H z+7@}#S&xy^?t3nF0T^9M7~$FEUFKivAP#%1=w+~UkDz?rK9M6e6Lm_Lo>%q&lujM! zE&wknG%2Y*raoV1vg2W~AoLYF70>~EfzIVT@X%~zy`KPjg-LN{Br*n^nwqTV;dnUW z(>qXRJF1)X6BHo`jtlgvwC%SmPEz%Q0V=@c6=h6xy`3Ekc|S^TKpc3f;zIHzCmlcT z_`tq^qSwH z%C!Tn@@1IgR_>baFe8~fdOwEgh^2!r59*raaed{yr#YH^j1P8@D(r_V1{Ap#I7#j! zn3A3i3JGJ^+^ljElk$SavD;b_=s%zYDyJL?cWXdw#2#{{Ff>X63 zBbK~wAXf>_&;zUqRzB=JXxZ2;EJWCdRDhOs1pO|8r9f(!(-;7LK(=&=5SY}xE(FVS zB<%=V6B$NF&@AX90-!h!v98!*`X{VOfkWgK3VB?^5NY)kO?1rAN2;?j40Hyo%6>$V zs9DHm68iwPh#}Sp02PutNjwV9C6u783XD6~b-p`pmT3qILjn~&CpzI!r6h8-J#+-z zMy8Xf;q+^z$!NkPCLBo=2x8sJ`bQ_=^Sde<4 zk)Ij(&w((aTtXMT=bnD7k$DU+s#11uoTk)PvNBj@Kmhoo5mHi_#+trImVTnZr9@!~ z(!*It^-c}cmgRvk9_mPe>Jq{OR3U}51eipd7Kx!19WPZGm>1dqmiU^^z- zXA*3u+ga&urVNoL1ssX2@I8ohWAh)L*dq}v$_{418KxbxU#lah8&x@>?n$O?6Q!Ve z%_dg$MhB_0lJo~B1P6^aPj3LEJit;lASIaTEiriHr~Qv^eWdTYrG6WOtx0}LoxcJ+%Wti%gAK zD>zKlx-NSRtI2}XgCu%i=8SBOATm&SHP*A*kA?s&LVutZQ>~!YIIpE?26!@@YtqtO z+58bDlJYN7sqFe#5u1@Qsmh$i#=3$+=>X59O^89gfpAOQv7b--R&y$F3lD-8`%ZIBL-;ZO#-x~djZ84hXh`KI2)>Y9?*)JG2O9r}j| zixX5bI2Oc4uVd!A=KG7N+&v>-$ zWo(lIxkjt^#qd;<)+t0nRgmVD=wZTWe(><)&??KqjE1ppzj(BFU*-}JA%J@kS*x2L z+*mTIC*$^ljg$pZW5p^J&%+WFTC4ns)vyhg+MvN})*4KQR5z!!4Q^Kc28|d|yLLlF zRja7<%uNl7Z>dRyWst&72e>#Z&{)jFK({y!CC?0rcA;=x#0=`dAE)MygN-H@I1CuQ z)Eq747)j`xv^Js}C#+b{vPf+)~eIBL{sTyJJ`<6mAg?uAe7RnNsC_ zl#krIHVi@Zu$fP?@tn2SQxnKEo~|L_+@dp1VaqBA!ey%ky$2OY{5P{pPi+BhtA#V1 zNk|xUFyccX$O;)IZBy%>I&%g!^$?g5$V}D5Lw&`%FBamSc7bAtXes@RInLh8c&FuU zgEZ!frRH!N9%KjDKP3f=a)3k6gOY*>0Ry`#Q3|2m4OHIL6?-u1+dI%NyjHDBy`KXZd~tMEm(T=I^BZlmv&#$n{!bSF`~+P zBQ9skV!S}B(q$&fK0D?L(5caJj)`HH)tl?JEu}Xb*}D!7*vXiH$T`ZGt$MniAuRI1 zEV-EOx=e%BD4Q4U2pc)yq@23!PXd%-hO3>sa^|M&8ZpSlIiZd=6&9QrDZ8VsQ)ZD|;YP1%3IQc{XCF8Nay-K6$aCP}kt z3eu)N^d8hFcM#4o7G%+!+3nO1W_4_`VE&qhHj5n;qY9-*Ut}pgdsE;(F{06;0r|0} z9_2i)uWnBNAk(?NzLxpLh=2kxN*88&w83GKpJ(-0qPN~m6sJ1I$Rsu1RF^=Zod@$Y zt=5zdGbJg#whqT9OE+DReq*Nb<_Fz=vvSJQ9}~V%q!$=e9h%h8u0Zz_{}T3ooJonL zq6`Bb9ZeolB>PxCV)n7<(_&XVwM*9Hu$SLmaRHDeVXrskl)Ws`wZLe@YFOW8IP6XN zT(yDrzhe~uS*Imv7<3DLM8i~6vaGlH0mx#w?l)I~^z{4u^DX?D!o~UyEKMm@xkE__&14{Wm$(TcM=ziZ=Z#Wb#C1@ zF09+(zxkasF4kPtW0k+mI==WWz=PFcBFxrR%5&%)O2+yez3IVP!@Z37jbcw{`ibmWse!xD={!JX$(V z6S2Q=R?+3Df}wDDQ3U2%igYCLc9^a+A-3rrRNK8GFt-TIy`LhE0g7nlu0G>AGkTg^ zPWlS8cxjK@gfDhl3X8y85X%*TxkX^^8POY{L#!5oxt*QKa0$j_h||*?c&8!(&JV7| z*?tk2YeGkgz}zA*_l#)fA~2UHxJ6)Y5tz%vt0FKLXGwB^nF^XM0&{n)A~1LA^ieM4 z{|NF7y1_|Hvk1&B0&|PNT*=eWjI&c|V?|)D%4CegVT-_A$+Oh%YV_v*xNN)WY$w}D=DMYsi z2pBRUL8X0%t}e%LTH|NQr8T!0EW~F^ji+TK^M}B}LKZb)2yBu##8A3ID~?nwB*Y#9ZtJ`ZUn-!tSqL%%COm312^Ws#*{{$hgME!rUVfAk{s{7yX7g60sRQJap-@R2tbmCyRf$P}bx0$RC%TQ1<13%KP1ZW-~Q1>AB0x7;Y;mZu3i3b^G0Zkf^WG3rGY zBm32nw5L#or(uq#u!Re_<*6EE0IitE?EvCJMpzb%0N++=K zP3(Wm0RJB}+)ll;J72hyp#To+zbZ+*AyAS_X9OI6bs6pr)$1g~{bZhhfha{b8gdEI zc^4<)ktf1f8%+EbA{E0yVF+!j2VQ|FW$3jEM5zK%N@&t0*SDa0CGC}BfkmoULG@Zt zy=vNVSUX=yM5&87=M{4&mfiGP3y!n1PEgc!o})bBdK|CZF?*$EzLTx~WZ}XNW`oQw zaGmIs#BvFpGcqE(ZVxoh%s(J4-aRKGPaLT_VG0~LSO}vOA zBO|#T-^(o{Q=3q-5bk9dyugat_i>%kH|GFSuNfF?QMPN1YprEPh7^BSMO?9NjNL)U z)Xlu!_D)rgDC%myjATxV2hpl)CP$Q71|cnfy<(AO4}8q_cmSSu&6Fqfy9rDItAQ3$ zzU>IH5oifaDl&qlYQ?g^6?ZrWL8XBy7oKNFQ~YW+Z9tlfGgy+qIEIS-x~gi!AxCDI z&|hXbsN>-WuZ+AN8IzP+^+Lo4IkNQbE+uUZaxx_7nMHDY#PonnmlQpog#?2u8^Rdkkm=7OnJ=(9y!RPRDC z70d=^H0T`h1!Q!>;W#)&S5HsOgaPh~^)Mj+FI%N(No4$}BCt; ze*HRlD^XB6nR-@$Wqz$ z8ePYZ#z@YtZ>(%@E_2PDywq~-uzm*;?MUe;=164zjf+vlb8lQsY8t+ASzuakI3PV2 zHrNowJ@O;4dq4C;O_gvv{*j8#vSfBpLnR}Obath~^!UljAxcS85oX6SnW3*cmNCnl z9%hVCC-6t;7c}6O`cxj(SE?(^5U|m}26 z-33K=LD4vrV)?OHnhupdd@pxiMV<)2c4*NEC z?y%HB5ik1&C)o@;Zm%DOLOp*)_6%zC+Ht`qAAv=S-b_9_gq?+5obV8eTfIc@JCE&w3nC@> zAykzbaG5l~d!|b#43e;)l$WgRmNX$rp0|kWO%bk-JP@&KYG&O^Hkfi`4tune2*=tW z^p#fv;y{0Z??FtYm8q>Pev5agMuLHF6GiCcslDsW$fmT2PdkB<#*UnB)EGzZ0K+vL z`9Wm$oT#BQbM)pI;)H8)h4fY9)w$!)QG?yTg0~N~&k0?iWL#zydD_8D?YCWwjq+hh zqZ0vFP+xEyO}}6HC8V@?cl8)u7dW4eG10-70zBDWK_mRDD>-$W|l+T&)wS;DKVl!IWQy_=KPcm6S-PTnFgCAktZlAr z1Al0`rW>o<8|y0@NeSt?p;nt0y>3c>nloEeE7tWYet>g7rL?RvbXy0Ma4tZqv?~g} zJ7-tMS}8Vj&;`9s9V;$PQ41qX8Ll2`JOXFn9NS#ulI3n>NBt_^rL(gVxA59zrM%` zFRpm1lsslMysA6o>IG(wo<8*Ems?BH(?f+*tBwL}7z?m(y2@VY+d=09gCoPRk5Pdg z(spFkZu`TbIun5U4aQJvO7`Ng16j^v*DDv)uHDA>Q|ddeCgHLLTu-h{L;Eydn7CX9 zF0C}>+r+E0Cm7=9r1jt*S>XB!eR#M~U&@SL;zhnqJz%ZSm=FZfCT%pFj!1LhM$VjF zTv9tYHb+2uH07-Sxy;00VA%Xam^eHU(x*8V1GU+2Tj-3(?8Btv^*u7R`t~s$h+}5C z2(=P)0HRGWbz3mwx-M2#WZq965>S1rBLYjG4$vUu5M_PLYL&#}uGfaa%Mcb$eO3(@ zS2tX{O*NzJ09r zus3BkRw|wcz~#_dweS(+-)>-DHF(WhQ>*=~8G@VD6w!xXwQDy-@oJ=Iy(6G^pQmlK z z+*qZy-oy08UPY13$_n-|&IlzaVFxxBg&yZPW(eFNkT-UAteR|!+;+_!_NFSzN;Fg@sw09D zOs(eoc&FXM>+xos+Va>CQXw)DTdPmryHSm^Ycg?O)T%q}0oLlB=3mFUwkqZdG;DQ< zU8CE^2=;pX`UDbn;~u^^K~pX4kT~-)D|CnMz@}!6droO+S_bM+%z+^xQx01`OmS4q zEcN3NJ_!!V7@9kDXhcSsG*#;k=CGVkDN_WC3wnVYL1nbC3@-U*wWh_7@xb6Ls?#%^ zqjp-R3C+c{VGu|EMPob1y5lWy#Ppl8(299RgOsc9Ed zEtYb8K1?a*1g@yKj02C`3JjswgZfZ~>JA?IIIHGr?So?b)cWAz$54fRX8(udk!-t~ ztuA(t5!CH&KZ-`-POUb#+3^Z7$_4ha4KXcU*EIJTaO64xs3H2N&^kj%vEceNS`L<9 z*AMEukL3pqfqg??uF@Ojbr3(SK9sI6!HzGN3hd=EbTjmbha+b!t58623Z3{tC#)j& zTkk^Ewtd;yPci~apJT`S?B(Y#%ZFcO?B#tw7cjuzPviW`co35KkbhphCjrl5qGI*f zPgV~uR$R2Z{s0Oz_w87v)d9O#tc{(8h1IHcS2@FIjl|wqI<#nQfx0mFO{z>g{YZ$d zqNt3kPklq!P41iXy~)_L<^pykH?dv8p2W_5^}lcF&<|zl!~r0XB%0N^um4gK)^|a~ zSniurn;p~ETv%6rG}(Z_Y^;d=?sRhBpzj&kGDkRa(nQ5Y#hm-<-&Ds0dL}42d)0HU z+&6MfpTsp6HmZn8oa5AIC--eyx0=&y%moKaxo_0!Tt{Ecg{xKTk@%gQ+&AgUgiBZ- zQy61S&RBEbh(CU`r+p4k6e<5&?koP@d>+8uw__DP&9oQ)EFWcaVc`>H{O7(&AHu_n z!}!Fdsl4(@7KdzXlqc4ta=<$<=4m&g&5=)VN?+E!{O{AYF?--KoOK`|=Fa4KfcBW9 zb-5qJ9ccjF*XkI9u6vHk8P5Ca!UB&mcIe5?ecQI<>=roy5|d6P5gSvFhZ6}dcL&hJ zDgq_v1TuLu!&%CEW%7o9(Nq= zXU~hf`?v2uY}~nf@Ah>BDnXA++j$~4Sq(l#eQ0!zByhP=wFLiiUeQWkr>oOwChP-( zndo4?5mpEqE1@;&9C+LcpgRyBiJ-|&7ZGHm4&uQOQwl*UXL=S?hcj# zT4|mss)$F5?9L({Q8%EFK6Ia}1c4tMcu&KqgA@H{_;fo6Jl|S- zhL{dz(8_$n5seCFL*xcTsKrR@67H@g58@L_d%Ys~Ajl01^4H=3az|F8hh+D?h4>{; zaw*Fa^V;CVtd|%r=Jl`&W%y?I;nftpCaQMD_sQs4oCIL`!=h(HQ6PC0kv#+3>%GAN zq*^Ldb$heLFw={G0sFvJP3Ux+(dAZ&%k|p>7)&*6qdDAyp~+cCPxd&R=RJH!acPh((d8 z1T48o0KwrG3|w@ZMEfliWyt76~B><#_FasK5~ zXFnn7W{lkkZFQY0L{hTAEBR#8Jmb3q|C^ecv_4s%E-lL?~I|d?sjwE*zKgT zXs1XWqz|UNodU9n?ZNbz=Dum0Mo~x;%@D@pikY#7dsns--umtvu50($q>d6$>GSx~eG2cV+c(RXof_A~oxOlKVn-C>MJ;{n z4csveq~DOL5C{_K&G+IGO*QPZ>3a~*nHbX8+2wz=x_hy$pRuHou^w*@vb~V|VZ>=7 z5;bKm^3Zd_1;%KPtPd>Py!6@iTd-7rd7%UHLg(n=zqQ_ZC-H-dgLDpM;f(+CFMlg< zXV)hHo)PEdL!2KEuuDZY9OY8dNP#3HSg>8WHY)PgaIXFO2P4I?(Rqhz#jz36 z4g0^xoI^R%n&a5$Yfq1Izj9VQ{{-Q6JqO@XHUK!wG7IX9vz2e|oAYh8JaENFEQW(% zf?QPjZ_+2Ca`5^0JRX!k|AeJG{qv72$eaA(`qs*ZW4G+e%C+uRWn*KjUD@h%HY!_= zv%1l_wy|ZeTzz@C%zOXfXkrXs9#(Mu^OrAvTL$yxiyzJ&&1_%23=;!_b&x{j4f-th zA5i7~KPERx?RjzPtoAj{kykjheXBN8Y2i1;X3A@fTWM0H5v7ohVH3G#5ZgS0{Y6Aq z&8m=zo|-3qak4$wc=-c0|EzT!&WydI5RgzeZhvt10b|Z)-;bkS;ItEsu7vQ!Il?9p z$w?}ONqVjkE$ahM$6W4MXY?X5PPDbnQ@^#9wKW1O?gc#NW8bHt?bLyw0ih zGv7NwMDvlmY~6A^Y?nVFwulM?{0qPD!F2!7U$z_%7$*+VIVH!nWw(!d0fCF;w3J^f zh~d@{3n<$TBAj{)ei1`TK_LJn6rcr^_wFu9h~dy~gQWrZkyyVxD1N~kqiZIqAGZ%T zZ+`?-4nH_z^k`&#gh=tdyOO5iS@?jimG#ddauA=6ndq{L5*&eg5vZHkDAxmPI zUkmUpM+zkn1k2KHM91>2`+O_oD>H8aevpf3SDx;pg!=%0({E)D$#aiWwN+_N{;KrVe22ret+k-yZ8~4Z(vc7Bq0&p9HB|I;^99+@M`+ytGjAfIxE{wcdfIr z*|A&73w-$BY-^Y8=PwWc9X`MK_EbL<9^Q8(mFR-B9Gr^(c78p)7ugb*-laPwwuS$8 z(trD#;@Z;g>5Ke~g?qYiPXi(XXF!5fQPN^F>F~{|d%E!77XI4+T;;SzRqh-QjKF9> zkn2opcd-vF6`AkT(G-(G(naRGM9?r&SD*u4l;=Iy%e@Epdvf@D*2!b;tAG0oIUCJ= zGrzXu3&HweFHXF(w~i&hTJ_^L+$mgeN%S$GRIT z(yhCSKnG|2fEj!a9Iz>Tw3{F^8aTW=i;f0vA4GE?3d1}CV6zfFhmk*OAld-zN1zA6 zmq+rsiWrxM-)#Vj#>267>RU`aLZCI!Hpv+`K9*zPwcU?DzI*FB%9gWmfnS>yo-{LF zD((d2xhRrd>7cxCv9EsVAyVCIo1JzldIX+np$#@+9q=n60!LHIrZ}B=9mIKIwGB9n zhW^$hS<+C5-vok4=)}6rUMK4k=y4A`%~MQ}g@NP+q7cOfdt!qq*MMFsB#Be2*$lzG z)OMQSLu)qqng+9%LVhO@1%4k5+^E?^0GGVMlu5cZm+9F8D`Fs?RYm4)jj$WpDSU*< z>K%B^X1c!`3O)_BQFY7w7DlH7w4-W$1TrbM1XU&{i11pXA&WqY3!L64T2!rpDUE66 zA9x^&10;iGxx=wc8=lJ4s7~G}LR()HVuf@88G%v4gp$A{<|Cl$%o|0r>WTd5 z8ljoO@5YRVN((dY2>fsft)yjLvF?KGRN{>$dK+TEc3KuB8uTw7V)z=s7mf#A`cZSJ ztSc&&%;2eqnL!Rly4M;cBLG^wE~=1`!i+jHjZnk2V{sJBwPdJvWHM%k%G@Nwgmwp@ zAvM=#3=vAuLp1Xy8J-?DaAXXzg=%OphW>jLahOHtSWs=tpJwr{UMi>Di2P3P^Y@Mt?z@JlHZ33pcPQ8 zx?@HIgRY(ga9+wSpvp9m;#$|Kghu45Qd`PksIW!Doi3`MpjS$WH8i>0qsr4a(slsD zAh_^^NLJK1q9WO%4l!00_Zhryuz3(KsbFwUgx?)3gg`e(ML|MHj!bp~k0xTZPz7Hp z0Js$KIRIu-EO?e^B}80i9?<=R?m`wWdldZvT~A0FemwBTsxK!hg^MWN2Xqawda#P1 zP%;^IpiE;Z_WLB4wFhVLFzQ(gK6u_#O|)%QKez9L{@DEtbc|3T#2iB?hhVDdj!Dza zmuCk^va`1N&*4~RKra#+#4HsFagG&Fn64Xh?W%^)bS8pRw56N#TU zs&w%T*L!=T+g?wZJu!7-%tAV_!LKRHVf~!{gr(W3?2j3dt;Tyz$tWT754>|8ty3T+ z-?}#j)~^za$boUM0DfCQD7Bh`ydU!*+QD=8xatSJn%xR(YpW}p)s^kdt*e-!u!oZA zRB^hDC5PG;c~qo~kneWibFmA+=yJBg0639%nSW_!2-KWW^fFioIv1V^+q6%HIcX(k zfFS`N5UvQi3n$hfbwmWWR39<&d>xoGL6}R1PcU*)r}p~t9o&)Zy#o*6Q;0)8(TtR4 zBr*nEj8J7Aj)x5 z0Wt(zoiTwvQv;^z!squ8felL7W7j!hD7KUougsEA5UCH0 z^~@_VL70Y~-)RGAoyr)C0&H@}blbxKUp^hXAvzkM#7RAFK#|(ue|13sE0OAInB!LN zn(i=+@yVn2!Ql>tNTfX@9U6ch7m)c!S>DxG&U>1pDcJi7nSdyAEpS3VP`P$PsbFkj zD9+)oxw#?MYEu4zCo+Jkz`=Weg3OgoZ_237Gt~w099BZd9szZ{LLYkvLc|qZtst?t z{R}-oVa3Xaod+!&o3P^8h*X5!xkHsZ@-ZTGtV$us|Z0LBkX`sO)um! z8LxQ2K|u1n(8jjWd|8?aTp?Bk>dJMU?~a>g8iK-rmzyGK*Sz3ZZDUGu6fJ5v{aR@< znvf+3sdC^T_r{o0QmC|H2@*K=j1~plU+33&MG9xNtdV&PFOn{}0k{ISOmJ{xQa`JVtQ{Y$z7$aOd+Hup`f0qRL}3Zi z!&%2=h#ClhrW`ErFKZ16b&2vrXiAyfunEU4aYm8#9a;suJ~k8Mhb}(wycnlF5jR!2 z9@&?%IzsxMP+XNh#A|6^4QT3q09g50q%PS!qFj>9uevNk8XXW@4BUaL%so!^C9gil4*ShE+$2F!_G*`!PPrs zz)D(=vGpOF0k;g+LxhBa>11G)?>xTs5Svuo_R9cDdu!(Z%UKQF!d%@*Oa`bK5}U9 z&_6`nQ|)6LVTQph`4O(D}!3Ub!}p8+C!S%xP@K$um(38?GL!4 zv=vf=Q>Inh*xFp%?5u8gV9sxKx7XU&HnulbSG%pw)$U5SyNbxjwRL;#YE2a#V$Y6l zF*T}=PL&c)nLWl973-zz21xreW(oB$W#&BcB&5D7&m?uZw!37-qR~Xp8g%Sgh7LyW z6RpAV9#4!moo#iPCZ zGM6GI#iEs~n;+a*GO8!z_JWPn=YUhRWivyoxKf#ql5S4IH?5-5GdDFTXs%2mw3{7b zu&r5t%){W^A<-@ru8WvK9r)u^OB@InmMRPwz0@2nHhQO-%vq%QCSR26y#5qkNs;14 z>LV!wobfaBMN^8+h>dIJkdaG}oH19X#2 zUR4{kO;0=REn*t8=$Z6@8R0e0p!&!50OkgU$vBf>Hnkg&-G+zGZlW3_<6`flB4ry6 z4m^~Y9wN5Xv@#p*V^o=|4=f6H9A`qgrJm774*EcL$FNRf;o$mt0+lIM-beW$9BRW5 z#7oV568$=Bucs!EX*^v+@DrjlPGMPzzDDwS3wjSKaA?DOr+4Y8Eud|+aE5c>-E$&7 zP7RJZiw+JUd~$k#{Y2WP);*O`RGIZ!4}lqh%v4Q0)K{$gVj=DYy)o@D><}%be=*0| zdl~PvyiImpYvA!#H^?>k);tQlA}oJ;Q2Mk%YySWziwC7+{TWH)EKwey-MwRNrLG8v z-VXE&uT`s3?BPdTjxZ)&?^`WcdQ9lP1=BC> zzN9zjq9VAQNWZN&;&P@e#tXD+tW&dN9zsHan%r@Yv5A}@%3QB)DUHy`-gR)mPR4jS zgKEZX)zkG1VUY)B$;EWnWg4tT*}Mq2S}>6LCgs#+e_}5J$}a4dxk@Q#ZfZXv208X} z(7MR4_py24tds?9H@2?XE7x%<2(1NIA0P~H09PgRqU?2qd{aRid`5wCENZD)>0Do5%lu+QV7eZr zM;ja#`FU24C3@@4L~*KPj7-M*DcvPI*3(6$&p;a%cZwDFN=k_-wWf5K)^u90t;6xj z(oGkn-u?7u=5pxxkX^E_7oR^xkX?ueFqs|Y~pic z=;lJZgb)GMdXLS(7 z0|*>bnb46UFt-TIJtLaA2+U=?Oc9t{1m^PSwFu0`S&~{ul0)tyFn7l)0&}xRkLFwc zk05mlSP__;O7}?Sb`h9c1m=RzCJ7|O&Y}p+Rmn0%U~UnZi)g$eFn0nvq6o|_0&`RO zWWUa6$jOkzA}}}3WGMo3GqI^fV6MciXHr^Df}_X&K4HAg0O=^LLA6$Lj3etUwJ4O%1q~q zoZe}D-yc0RzdhC$%hBn`sh0?!IKrdEAaZa!@GI&-ZCY7b#WnthZ`)|r(0;D6@xuDb zG9nyDz-VMJ7!uNu${L;4SF0;axDV9=AErG-k<4h3D}tk_QtWt{KK#dLjaQ0Wo^O$dqzoOv|KHu%=haF>fV_1(Oh`R{lE`qp$jaQ&6_YKaC0@g3k zl?!xbFS-l2lyOw080rdiWgUaf_=y5tISbY&sETqLQJ^bxZ3T?8Mr2MwYYAvm0<0?z zAmyReVD>R8GL9|Ifut^ixQig}0$sU4S4K<#*CM+J;*JQuI|-^+1aX_N?d-or5Vv6S z#Q>%n-WRcdvLb6x-vV7($P)^5WkJp^(3N9Su>>r#Kvyo%l_jR6Kvyo%l?!xba3f~G zhBZoafv#MjD{DG}B8a;P;s)Q4KuZ_s$_3}p=#@B!iXiSegScNUggu9Z_Gbhfesu}$ z&DrDX1dqlf-&O%2Me-R^2Qk4R2IeaOqzYzN62=r~kBIrvtZ0f_C7~YEaB2m!Yr*WQ z111Y*SEvj*5-FHnC0P$if`<5$hWmNMpQ_)GXqX0TM0iIkJ$1%Mp3FU=qK;SYn5uv8;^@w7s=F7<4q-YOKurq1m%HW^nuU9Ox>VbaQ9uL4}r-|`|Z8!1Y z@c{*wgbdI&hNKS$Javz$r78>5aEBlRQ{<}XL!otcG^4L3&j#I|Q0>WV;utEj>#8mR z4M8H9uw7;;r{m#ogJy}ZXD6P^0kteFgNx`F(3>jOtY_vmPC`J2Mhofo=`<-97#Fjw zRH`h+l!f+XMbHO~9(VzTfLOIgHyKZbpg*HDlKv^%E~%09wOV%T6XRW$-_q1qDK@Z# z?@GoKHh+ei%DRbw2*yQdD!gP!8#1Q5@#M-1Vaj{U`^exXtdfEyiZ_7M(d&Rbovq1I zcPsMd6v`H<-(p@|X{%+C{ZiQ+EeEvRyISlT@qId@81E&^oSu*!5{SC$d_~V(5VHzf zwup=BT}X!XBj|<6-l%xrgtBpPiq}t1%;XI3iuEuc-7j0ESV(02Nd1@{GDF-u4#(i% zPOCKz`}OPBK~sr>%E{ELDoJ>{@~p~{#9&$0eatxbLr}HsMWkv#E_0ceEvu|}&6aj7 zMFLiKy++rO)abCjv9i6n%y^&4OD)$9>vuq@=Sb-&Mlr>`aWRT#?v0B{O~W@X3&iRT zXV`Lt{53>zkNgN^-Vgmya>DJw?f6G3I?IyTL2Z|eFtXQ`4%6c&D~Bj0%{-VL%VdVW z?pVg8ZhDw8LY=@LpF&sHcfHZTZabOrv|g2IbCPm>(nC-%auHu#^f$ZrW@a7MR4$ruD3hx+yCj1hzS0 znd+B5RNF_7ydv1HC zA)ekLU$OSWa6FXsA|^N_mNa%Eir%nqgOOdypX?jp%gib+VVb`pdj_?6jiX$d?#O3{ zu(Pm>Q)s6!&ousbdNZA>xd-O?Foc;YyO^{+$n4250@M+>-rE_oKXnyIMDQoz_t>b* zAeK>xp7b4(Rg+uZ7LI$vfi1SFCYMha(o~e^A@=_81>i}y3*JX|7##tiRUyMThXlE6 zfo&Z*ry-iG1d@Hn_n_b6l1&p7V>ddO89VLB=~NSU1-Chy*``s!{{n#|GFubm_#FlFfH{$;xg?NnNivk>@Q!XJ38ffp=XqG3!>c z!IUF2%4&HNw}KFni&p~TK!1Sm4YVUAwUxzh@eb8UaO14Kt3g(K1t(C_Sh7Gj#*sU~ za4C0C&xsm##~wvapf|@5CtQmwq^}yU&K-x28tncp1i^cyLybS3!G2KE>Oc7y}<4$Pl-(Xz>X_UhgtSE*|(zC zrN-h%;XWTkSu>iLda?G6sanRhNcMOUwuJ-B8)G*Z(7Z_olZDH#x>E3sLEn_0j;grZ z89)%ZxR1EgPtEk?`7+?qq>R9Jh3NaW%duS4tD5N!RkZRUsCm>~TwGM2ewD8E=c)~Q zj78yD)~_aI;`7BsZYORnudZOfi448TXRKPLdzN;pYu(R(#6C>P)GXhQuBaZ2&1)NL zn=9MfYb(<=-B{h;SYO#lUY)KRYPEUM>!$RlA^m|^$GX1AXmE6HluqV=M8{sgi=6P{ zm#0d(10f#_PGcV|9I6(aYm|k^hi;(ble0QS&7|~j=jKZd->YKr;5Xpx&LkS;z&3P7#t`INc6Y_W3X66KE0gbrC1T?=OdFeM zX5b9qI5_6ZV5$27x+^|Y3&v10tWeT-AR zO3O)l06wPN;s>Q?*}^yu_4PGnfk?yan;Z1~^E?YHT)kS8mnMDOSmhY(p%*Y%fg38X z4o9yX4t*L{oh1|(hS;P9$4&@QjZPd#j}-@vJxg(+rA#@sd~Qfsf;tkpy0U_Oj59(B zO4vg_F6TIA2;8CFb9Q#Dnrw>PcFi63rYg!xG*l(3Bd^D5r%tc98`{S^!Tx^CH}?Pw zi(^Abg~&*3tv-40M%DUcR-6}Cnw|Cl=Iu`Nuj2);Ds~PwMb)9h%@}qay&k_lLA!6< z!xtxLs^NshnU`6iJ9Gy&_hoU_(PvOMMC zys%z6n!vHe-5n=v2kwX_M7{Lbv2mm#n@87E4WQQx)7Z912~D+H#Y4$Xvi_^mr(-8L zEg=g6uYE}9?9{vE#)iVmrt8P+O}>(PQc93TGJby*hyP-Y*p|n$V=Sx#+v|G$Xe-LsM@x(WfB&m;J`n}j`!Ki&tH}g1!eZ9M0wrApJy-2uYkS! z?%_Q=)Bk=NmR8Cmg6<*z^skKjKYY;t(cj`t{5}N``ylz9WgY%3zA5?PgZSDj)NKyf z->>caxrZixFGh5y*zKFGmvR+y(eC;KDA3%uW0h+K>|U`pb`};^tJYoR4C5LSdtt z*PdruBbFFZcXeLH@DZOZfkXLI3d)p{h}MJM-7xS|{y z@u|qfX^5Pp=e`ktjG3Z5OPCm(4i-Hul}hd_{+{Rg*qmFdsH_K{LK|FBL4(&U?NgP3 z$$dLk(dW>`-2KDnh-I--QK>9CNu+k|hWPrL3k%v6n)@bw2tR)4rw#|y*-MhEBkQMX zr2&M64`|Go21NEFYvhz3%X|4Z2X12aU~!ps>?4N4t7RW@5@A{1rr+x#8_9h|DQhpZ4fV=?6ru)q@i9zOem>G{rdfhip6~E}XbgZS z*NK9Y9Qo`r(Kd2D9?l>bi_`XfxW_#defapRGfP$Kvk(?fVZKckbT1eI4~yPr6j)x$6p%g< zL}UYwwW$$jJ?A$WUY(P0*1X|MT433!xi%t;uVBdNg zMjbT!89v<(0?)VBp5gRW+4UmzIFD43n|^SLfow6YpoF_?$%FWW(q69!J_zE@g4p+@m_a3eLGszWh_wV+k|^mr>}F6Ogwe|UV-J3ZVyJ?yMU8>5r(bURwf zeWkx248aV)((`KjwP-kMwboWwuB~0Ib;eI_MTZ~czWx>Ls=ExD^}Q-=jYVw4Ax#9- zaGqmLH_5e<&DFXL6Qna16^Ia{ILC&;$_%D9&N`YnGEG_zokJaG0K>VH_G#sFf_zJi zvyqLEN2z)4h3ltKP;Ebet3Coih>6*Seo`GjL?o@`x+4(H?RK$)LNw|?=YGRCmx*l3 z=aBq4R@l%V{QvD;U2hvj6peUXUMeB_MsZup+9W2nUGo!1MVwBDG;XKkiEdYqPju@wI0o~-d;p^1mx;;v;^@n0As=E9YBR{ z^1ODg0`F3DMS&jOgF`6Zi&oeE`Tl-k0O|g74@<32U%}{12;e~^SU-|#Y{W&YE%9xn zndigiBD~bGyJ&Y}!)fx(uB3w<3_N$9B`9 z;0vdd5y#Z?ROQk#+^nZ6E1^_{=0bxUJ?OkT%>a(+MdYI9PS339*tvO>95lZNPYKM2 za(h4^iv9#CtvPi1O7O(i&+L1Fg@{`|MrLbh4qae9rbfV`ZvxrQki&d?2DY2`#2|aZ zW|Lzz0qgtdD;{@#)gssnvT(tp&F{;k(uMVKKi37CAXKBi;&`y#6pM!#kGHne3Q5_J z^%x)MB%yq(j`G5`_As~(OOva(9nMo|xy$+gbt^jE=& zAcN;lwI1Tfs2zv6Y9S*t$?6c}IA2iTOso`4tYVzJ!BIb5u;5BYib{budiuxPS3=g% zz*$*Ps-TpEq6VCN2c?Y6n3N5qgn%C5^6u|5OX)5z)xvz}RZ?Qoc|Fh*JKP zN*3CcQ`YoMrA=~~rZ9UEgd-_kVmJ?hPXd>%-V_9RIEkA?KN~w_Jh?j!b!u+3#41f9 zCL?J-MXC|}NLp<+(u&>=QBboR!3qf}(F9(o+4WN|qFv0MX<81_#0M1>X37x?FEuC|upb+Okw zg4s*I3)z(MpYN$b;>JOJhs1x|1Fs*96(El8n_OlAqvvZk-PaE++yey%TK3$P(D4QzgWQA84>|)>UUoYtiR5JHAR9rz1}Se z@JAdyIb(Zrgmf%ZxZbIa%g8bX;Kp*Iq%2cd)*psyHYG0Fu=l!FkQDWEi)6!vquM5$AtUftmtM6V}s{bX~%t} z(73yBnQV27B^)jxD+B0D-+BGI+rt_}oj7a{ zQ6hIA$YhWPdFL!!Knr?C3QRBA`?M&QRq@c7d?<8jrhs0@%Go|FZuLi=00mX-55-cJ zFr563YL1!U{E5`%)d#I=wNkAv*OpqiWx!IUR#{!D)hgc7-P+2%!xitS(z=fg@h@C^ z^6yRj_#1!y^!nr<{CfK4t%ZdPNH@|4@DU+#a*%nBuuTU$X#$~?p~Y;Q^#yEy({CtK zFVW{%^vl2PBM z#Bb3*+WyJ*_M^(Ls{ zrYkLFe0Eq}#{wRoAC6Aq=wpL47CzdbT-AG(&^Dh6K?5U+Oz}m>wp` z;<;$TO*L-UpxjhG7i_LiW8wZv{Do;Is~MuQbXMnIDCr^fak5R4Aqsqud`iuUONlyICZJ+P4$o%VmcA` ze9y(CaM0Q$@4=K_I~)tf$;_=s`k0$fDRF8in>ZP~^;kC}oNP4wbUrbhtyqtAL(Ion zj}lmq53cXg+(0@P794i49^&S>3yV6A5#w&W|9>=FCJW!GxMyv-| z33#CJ9;U30*!^Hk!A^0R^{Cd^{|L%UQM@oYTB)s#kVMn9hxXfYRg@4hl0+8pz?h0^ zk5T|x-(#PNp7Kd<*-ZfnG>=XtphbmB98Z(O)Ov*VBpzPCO%M}X5ARDYcNSeaeVu@M z(t4b$h4^9y4@G)9vYuf*v=z0oMATb!b;!lyVv2^XhqXcDQ5S?_>p>7et1C^xu=V&> zjfO|bniVKrwNg0khgKG)wF!PpMj>9g8Cef)^P^MYnXA-|;*C*-ZidM(U4 N&$q?%6!DJA{{i=J6AJ(U diff --git a/tests/integration/fixtures/recorded_responses/invoke_tool.json b/tests/integration/fixtures/recorded_responses/invoke_tool.json index 77995f72f..2878275da 100644 --- a/tests/integration/fixtures/recorded_responses/invoke_tool.json +++ b/tests/integration/fixtures/recorded_responses/invoke_tool.json @@ -1,4 +1,13 @@ { + "()_[('kwargs', {'session_id': '', 'code': \"import pandas as pd\\nimport matplotlib.pyplot as plt\\n\\n# Load data\\ndf = pd.read_csv('inflation.csv')\\n\\n# Convert 'date' column to datetime\\ndf['date'] = pd.to_datetime(df['date'])\\n\\n# Group by year and calculate average inflation\\naverage_inflation = df.groupby(df['date'].dt.year)['inflation'].mean()\\n\\n# Plot the time series\\nplt.figure(figsize=(10,6))\\nplt.plot(average_inflation.index, average_inflation.values, marker='o')\\nplt.title('Average Yearly Inflation')\\nplt.xlabel('Year')\\nplt.ylabel('Average Inflation')\\nplt.grid(True)\\nplt.show()\"}), ('tool_name', 'code_interpreter')]": { + "type": "value", + "value": { + "content": "completed\n[stderr]\nTraceback (most recent call last):\n line 5, in \n from bwrap.core import main\nModuleNotFoundError: No module named 'bwrap.core'\n[/stderr]", + "error_code": null, + "error_message": null, + "metadata": null + } + }, "()_[('kwargs', {'session_id': '', 'code': \"import pandas as pd\\nimport matplotlib.pyplot as plt\\n\\n# Load data\\ndf = pd.read_csv('inflation.csv')\\n\\n# Convert date column to datetime\\ndf['date'] = pd.to_datetime(df['date'])\\n\\n# Group by year and calculate average inflation\\naverage_inflation = df.groupby(df['date'].dt.year)['inflation'].mean()\\n\\n# Plot time series\\nplt.figure(figsize=(10,6))\\nplt.plot(average_inflation.index, average_inflation.values, marker='o')\\nplt.title('Average Yearly Inflation')\\nplt.xlabel('Year')\\nplt.ylabel('Average Inflation')\\nplt.grid(True)\\nplt.show()\"}), ('tool_name', 'code_interpreter')]": { "type": "value", "value": { @@ -80,6 +89,15 @@ "metadata": null } }, + "()_[('kwargs', {'session_id': '', 'code': 'import pandas as pd\\nimport code_interpreter\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print the data types of each column\\nprint(df.dtypes)\\n\\n# Print the summary statistics of the dataframe\\nprint(df.describe())'}), ('tool_name', 'code_interpreter')]": { + "type": "value", + "value": { + "content": "completed\n[stderr]\nTraceback (most recent call last):\n line 5, in \n from bwrap.core import main\nModuleNotFoundError: No module named 'bwrap.core'\n[/stderr]", + "error_code": null, + "error_message": null, + "metadata": null + } + }, "()_[('kwargs', {'session_id': '', 'code': 'import pandas as pd\\nimport matplotlib.pyplot as plt\\n\\n# Load the CSV file\\ndf = pd.read_csv(\"\")\\n\\n# Convert the \\'Year\\' column to datetime\\ndf[\\'Year\\'] = pd.to_datetime(df[\\'Year\\'], format=\\'%Y\\')\\n\\n# Group by \\'Year\\' and calculate the average inflation\\ndf_avg_inflation = df.groupby(\\'Year\\')[\\'Inflation\\'].mean().reset_index()\\n\\n# Plot the average inflation as a time series\\nplt.figure(figsize=(10,6))\\nplt.plot(df_avg_inflation[\\'Year\\'], df_avg_inflation[\\'Inflation\\'], marker=\\'o\\')\\nplt.title(\\'Average Yearly Inflation\\')\\nplt.xlabel(\\'Year\\')\\nplt.ylabel(\\'Inflation\\')\\nplt.grid(True)\\nplt.show()'}), ('tool_name', 'code_interpreter')]": { "type": "value", "value": { @@ -98,6 +116,52 @@ "metadata": null } }, + "()_[('kwargs', {'session_id': '', 'query': 'How to use LoRA in Torchtune', 'vector_db_ids': ['vector_db_']}), ('tool_name', 'knowledge_search')]": { + "type": "value", + "value": { + "content": [ + { + "text": "knowledge_search tool found 5 chunks:\nBEGIN of knowledge_search tool results.\n", + "type": "text" + }, + { + "text": "Result 1:\nDocument_id:cc646\nContent: .. _lora_finetune_label:\n\n============================\nFine-Tuning Llama2 with LoRA\n============================\n\nThis guide will teach you about `LoRA `_, a parameter-efficient finetuning technique,\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\nIf you already know what LoRA is and want to get straight to running\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\n\n.. grid:: 2\n\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\n\n * What LoRA is and how it saves memory during finetuning\n * An overview of LoRA components in torchtune\n * How to run a LoRA finetune using torchtune\n * How to experiment with different LoRA configurations\n\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\n\n * Be familiar with :ref:`torchtune`\n * Make sure to :ref:`install torchtune`\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\n\nWhat is LoRA?\n-------------\n\n`LoRA `_ is an adapter-based method for\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\ntransformer models, in which case it is common to add the low-rank matrices\nto some of the linear projections in each transformer layer's self-attention.\n\n.. note::\n\n If you're unfamiliar, check out these references for the `definition of rank `_\n and discussion of `low-rank approximations `_.\n\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\nyou can expect to see memory savings due to a substantial reduction in the\nnumber of parameters with gradients. When using an optimizer with momentum,\nlike `AdamW ` alone will not handle the definition of which parameters are trainable.\n See :ref:`below` for how to do this.\n\nLet's inspect each of these models a bit more closely.\n\n.. code-block:: bash\n\n # Print the first layer's self-attention in the usual Llama2 model\n >>> print(base_model.layers[0].attn)\n MultiHeadAttention(\n (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (output_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (pos_embeddings): RotaryPositionalEmbeddings()\n )\n\n # Print the same for Llama2 with LoRA weights\n >>> print(lora_model.layers[0].attn)\n MultiHeadAttention(\n (q_proj): LoRALinear(\n (dropout): Dropout(p=0.0, inplace=False)\n \n", + "type": "text" + }, + { + "text": "Result 3:\nDocument_id:cc646\nContent: 06% of all params are trainable.\n\n.. note::\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\n of in the recipe.\n\n\n.. _lora_recipe_label:\n\nLoRA finetuning recipe in torchtune\n-----------------------------------\n\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\n\n.. code-block:: bash\n\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\n\n.. note::\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\n for more details on how you can easily clone and modify torchtune configs.\n\n.. note::\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\n and (b) the memory constraints of your hardware.\n\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\n\n.. code-block:: yaml\n\n # Model Arguments\n model:\n _component_: lora_llama2_7b\n lora_attn_modules: ['q_proj', 'v_proj']\n lora_rank: 8\n lora_alpha: 16\n ...\n\nWe see that the\n", + "type": "text" + }, + { + "text": "Result 4:\nDocument_id:cc646\nContent: from our Llama2\nmodel without any wrappers or custom checkpoint conversion logic.\n\n.. code-block:: python\n\n # Assuming that base_model already has the pretrained Llama2 weights,\n # this will directly load them into your LoRA model without any conversion necessary.\n lora_model.load_state_dict(base_model.state_dict(), strict=False)\n\n.. note::\n Whenever loading weights with :code:`strict=False`, you should verify that any missing or extra keys in\n the loaded :code:`state_dict` are as expected. torchtune's LoRA recipes do this by default via\n :func:`validate_missing_and_unexpected_for_lora() `.\n\nOnce we've loaded the base model weights, we also want to set only LoRA parameters to trainable.\n\n.. _setting_trainable_params:\n\n.. code-block:: python\n\n from torchtune.modules.peft.peft_utils import get_adapter_params, set_trainable_params\n\n # Fetch all params from the model that are associated with LoRA.\n lora_params = get_adapter_params(lora_model)\n\n # Set requires_grad=True on lora_params, and requires_grad=False on all others.\n set_trainable_params(lora_model, lora_params)\n\n # Print the total number of parameters\n total_params = sum([p.numel() for p in lora_model.parameters()])\n trainable_params = sum([p.numel() for p in lora_model.parameters() if p.requires_grad])\n print(\n f\"\"\"\n {total_params} total params,\n {trainable_params}\" trainable params,\n {(100.0 * trainable_params / total_params):.2f}% of all params are trainable.\n \"\"\"\n )\n\n 6742609920 total params,\n 4194304 trainable params,\n 0.06% of all params are trainable.\n\n.. note::\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\n of in the recipe.\n\n\n.. _lora_recipe_label:\n\nLoRA finetuning recipe in torchtune\n-----------------------------------\n\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe ', 'query': 'How to use LoRA', 'vector_db_ids': ['vector_db_']}), ('tool_name', 'knowledge_search')]": { "type": "value", "value": { @@ -307,23 +371,23 @@ "type": "text" }, { - "text": "Result 1:\nDocument_id:f76dc\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\nlook like so:\n\n.. code-block:: python\n\n from torchtune.datasets import chat_dataset\n from torchtune.models.llama3 import llama3_tokenizer\n\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\n ds = chat_dataset(\n tokenizer=tokenizer,\n source=\"json\",\n data_files=\"data/my_data.json\",\n split=\"train\",\n conversation_column=\"dialogue\",\n conversation_style=\"sharegpt\",\n )\n\n.. code-block:: yaml\n\n # In config\n tokenizer:\n _component_: torchtune.models.llama3.llama3_tokenizer\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\n\n dataset:\n _component_: torchtune.datasets.chat_dataset\n source: json\n data_files: data/my_data.json\n split: train\n conversation_column: dialogue\n conversation_style: sharegpt\n\n.. note::\n You can pass in any keyword argument for `load_dataset `_ into all our\n Dataset classes and they will honor them. This is useful for common parameters\n such as specifying the data split with :code:`split` or configuration with\n :code:`name`\n\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\nSince we're fine-tuning Llama3, the tokenizer will handle all formatting for\nus and prompt templates are optional. Other models such as Mistral's :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\nall messages according to their `recommendations `_ into all our\n Dataset classes and they will honor them. This is useful for common parameters\n such as specifying the data split with :code:`split` or configuration with\n :code:`name`\n\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\nSince we're fine-tuning Llama3, the tokenizer will handle all formatting for\nus and prompt templates are optional. Other models such as Mistral's :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\nIf you already know what LoRA is and want to get straight to running\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\n\n.. grid:: 2\n\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\n\n * What LoRA is and how it saves memory during finetuning\n * An overview of LoRA components in torchtune\n * How to run a LoRA finetune using torchtune\n * How to experiment with different LoRA configurations\n\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\n\n * Be familiar with :ref:`torchtune`\n * Make sure to :ref:`install torchtune`\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\n\nWhat is LoRA?\n-------------\n\n`LoRA `_ is an adapter-based method for\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\ntransformer models, in which case it is common to add the low-rank matrices\nto some of the linear projections in each transformer layer's self-attention.\n\n.. note::\n\n If you're unfamiliar, check out these references for the `definition of rank `_\n and discussion of `low-rank approximations `_.\n\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\nyou can expect to see memory savings due to a substantial reduction in the\nnumber of parameters with gradients. When using an optimizer with momentum,\nlike `AdamW `_, a parameter-efficient finetuning technique,\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\nIf you already know what LoRA is and want to get straight to running\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\n\n.. grid:: 2\n\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\n\n * What LoRA is and how it saves memory during finetuning\n * An overview of LoRA components in torchtune\n * How to run a LoRA finetune using torchtune\n * How to experiment with different LoRA configurations\n\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\n\n * Be familiar with :ref:`torchtune`\n * Make sure to :ref:`install torchtune`\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\n\nWhat is LoRA?\n-------------\n\n`LoRA `_ is an adapter-based method for\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\ntransformer models, in which case it is common to add the low-rank matrices\nto some of the linear projections in each transformer layer's self-attention.\n\n.. note::\n\n If you're unfamiliar, check out these references for the `definition of rank `_\n and discussion of `low-rank approximations `_.\n\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\nyou can expect to see memory savings due to a substantial reduction in the\nnumber of parameters with gradients. When using an optimizer with momentum,\nlike `AdamW `.\n.. .. _glossary_fsdp2:\n\n", + "text": "Result 3:\nDocument_id:8bcf6\nContent: ` module, which we swap\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\n\n.. _glossary_distrib:\n\n\n.. TODO\n\n.. Distributed\n.. -----------\n\n.. .. _glossary_fsdp:\n\n.. Fully Sharded Data Parallel (FSDP)\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. All our ``_distributed`` recipes use `FSDP `.\n.. .. _glossary_fsdp2:\n\n", "type": "text" }, { - "text": "Result 4:\nDocument_id:c4fc3\nContent: 06% of all params are trainable.\n\n.. note::\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\n of in the recipe.\n\n\n.. _lora_recipe_label:\n\nLoRA finetuning recipe in torchtune\n-----------------------------------\n\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\n\n.. code-block:: bash\n\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\n\n.. note::\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\n for more details on how you can easily clone and modify torchtune configs.\n\n.. note::\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\n and (b) the memory constraints of your hardware.\n\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\n\n.. code-block:: yaml\n\n # Model Arguments\n model:\n _component_: lora_llama2_7b\n lora_attn_modules: ['q_proj', 'v_proj']\n lora_rank: 8\n lora_alpha: 16\n ...\n\nWe see that the\n", + "text": "Result 4:\nDocument_id:cc646\nContent: 06% of all params are trainable.\n\n.. note::\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\n of in the recipe.\n\n\n.. _lora_recipe_label:\n\nLoRA finetuning recipe in torchtune\n-----------------------------------\n\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\n\n.. code-block:: bash\n\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\n\n.. note::\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\n for more details on how you can easily clone and modify torchtune configs.\n\n.. note::\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\n and (b) the memory constraints of your hardware.\n\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\n\n.. code-block:: yaml\n\n # Model Arguments\n model:\n _component_: lora_llama2_7b\n lora_attn_modules: ['q_proj', 'v_proj']\n lora_rank: 8\n lora_alpha: 16\n ...\n\nWe see that the\n", "type": "text" }, { - "text": "Result 5:\nDocument_id:de2d4\nContent: etune\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\n model.use_dora=True\n\n.. code-block:: yaml\n\n model:\n _component_: torchtune.models.lora_llama3_8b\n use_dora: True\n\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\neven more memory savings!\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\n model.apply_lora_to_mlp=True \\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\n model.lora_rank=16 \\\n model.lora_alpha=32 \\\n model.use_dora=True \\\n model.quantize_base=True\n\n.. code-block:: yaml\n\n model:\n _component_: torchtune.models.lora_llama3_8b\n apply_lora_to_mlp: True\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\n lora_rank: 16\n lora_alpha: 32\n use_dora: True\n quantize_base: True\n\n\n.. note::\n\n Under the hood, we've enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\n\n.. _glossary_distrib:\n\n\n.. TODO\n\n.. Distributed\n.. -----------\n\n.. .. _glossary_fsdp:\n\n.. Fully Sharded Data Parallel (FSDP)\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. All our ``_distributed`` recipes use `FSDP `.\n.. .. _glossary_fsdp2:\n\n", + "text": "Result 5:\nDocument_id:8bcf6\nContent: etune\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\n model.use_dora=True\n\n.. code-block:: yaml\n\n model:\n _component_: torchtune.models.lora_llama3_8b\n use_dora: True\n\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\neven more memory savings!\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\n model.apply_lora_to_mlp=True \\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\n model.lora_rank=16 \\\n model.lora_alpha=32 \\\n model.use_dora=True \\\n model.quantize_base=True\n\n.. code-block:: yaml\n\n model:\n _component_: torchtune.models.lora_llama3_8b\n apply_lora_to_mlp: True\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\n lora_rank: 16\n lora_alpha: 32\n use_dora: True\n quantize_base: True\n\n\n.. note::\n\n Under the hood, we've enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\n\n.. _glossary_distrib:\n\n\n.. TODO\n\n.. Distributed\n.. -----------\n\n.. .. _glossary_fsdp:\n\n.. Fully Sharded Data Parallel (FSDP)\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. All our ``_distributed`` recipes use `FSDP `.\n.. .. _glossary_fsdp2:\n\n", "type": "text" }, { @@ -335,11 +399,11 @@ "error_message": null, "metadata": { "document_ids": [ - "f76dc7f5-9648-4272-a579-c8387fb1408a", - "c4fc3cb6-6172-489e-90a7-b39d343e14c0", - "de2d49de-55de-44dd-9bca-6f4f6d633b0a", - "c4fc3cb6-6172-489e-90a7-b39d343e14c0", - "de2d49de-55de-44dd-9bca-6f4f6d633b0a" + "ab1b9c78-180f-48cb-bbef-c70a4a59e42d", + "cc6460bf-74ab-4d11-8d32-bc02144a4e79", + "8bcf61e4-98c4-41a7-87f9-833c1a4d2b28", + "cc6460bf-74ab-4d11-8d32-bc02144a4e79", + "8bcf61e4-98c4-41a7-87f9-833c1a4d2b28" ] } } @@ -398,5 +462,41 @@ ] } } + }, + "()_[('kwargs', {'session_id': '', 'query': 'when was the nba created', 'vector_db_ids': ['test-vector-db-']}), ('tool_name', 'knowledge_search')]": { + "type": "value", + "value": { + "content": [ + { + "text": "knowledge_search tool found 3 chunks:\nBEGIN of knowledge_search tool results.\n", + "type": "text" + }, + { + "text": "Result 1:\nDocument_id:nba_w\nContent: The NBA was created on August 3, 1949, with the merger of the Basketball Association of America (BAA) and the National Basketball League (NBL).\n", + "type": "text" + }, + { + "text": "Result 2:\nDocument_id:perpl\nContent: Perplexity the company was founded in 2022 by Aravind Srinivas, Andy Konwinski, Denis Yarats and Johnny Ho, engineers with backgrounds in back-end systems, artificial intelligence (AI) and machine learning:\n\n Srinivas, the CEO, worked at OpenAI as an AI researcher.\n Konwinski was among the founding team at Databricks.\n Yarats, the CTO, was an AI research scientist at Meta.\n Ho, the CSO, worked as an engineer at Quora, then as a quantitative trader on Wall Street.[5]\n", + "type": "text" + }, + { + "text": "Result 3:\nDocument_id:perpl\nContent: Ho, the CSO, worked as an engineer at Quora, then as a quantitative trader on Wall Street.[5]\n", + "type": "text" + }, + { + "text": "END of knowledge_search tool results.\n", + "type": "text" + } + ], + "error_code": null, + "error_message": null, + "metadata": { + "document_ids": [ + "nba_wiki", + "perplexity_wiki", + "perplexity_wiki" + ] + } + } } } diff --git a/tests/integration/fixtures/recorded_responses/invoke_tool.pickle b/tests/integration/fixtures/recorded_responses/invoke_tool.pickle index 33bccd4d34f1167ba246bb389c88de885e55eee5..a03204511a05a9542d235aab184538a73de63b56 100644 GIT binary patch delta 1456 zcmah}Z%i9y80T&+*EnN4vldF*?2z(*S803Ft8Cy035(9+mmzWJa(BJU%LPjV?bu?L z7TgSDjch*0HUo~${mUjY@Is76q9bNsh&VIj5~Jcw{4x{cjEP@;-)p-8hIp6!?tOpH z`#kS^zvq3Pk3QJ))wrcJYnj+jQS+rEPKfF*`1q~sa98&jXI)n*cvg2lB;4*18dlEO zX;cH5u`-x*u2Ky~!rNGG6{VmQHO}!%HjCWMHygfgzYTu!dEjDug;kJbwb@u|U#9ZL zQyqUY1@i6Axzdd3EJp+`C^V@YFUS%nNs7wJktUHBpNT4bD6Z%HGKt(Isa%8?C612= zgPaf#hd4=Y3I%yyW$@?z&=EWXlzoob8ADtQ1>i>Zy)6OM051;D ztP#8dZjXGuZhr9S46KItK;P6Z_|fMzmX4LtSmg>jcNFF;U9e;N5avB_Zgh#zaAlFWFxA@_~h))tyG^mNy6usuD1Q}CL4t} z9deeGaww&18JHidg!8^#G#)xmxVGiPW$-aH0{P4s2X@!l3M=dzRv6-JC$pJtdEtjm zZC@_JrZE$@d_~*#4g2M4+GEedy|I9ooG-7s@*fTHEhvN6yZmA8BKei|P`s-|VIsIG zb1{FtB!A$9JHXuD$2JplqzLOX*n(-2!Fp{s9>ug)6Wa(r${<-(44s89#>4QEJ8i9v zizx+WCfx9|`$HNjFn2l(pH=+2_JY#6sZ)*+{L^JOESEinYJb=Yq*+hC{)cB(rq~I- zXW-^yf&XRkdZB&1bF0Tj598nJbjba2raSX|M0WTowB=VOi`7-+|gtw)92q_76NE{?V*n}jq@_>SsiNj5AKfHN4 zSSU^pom+r!r(E#DiEE@*X&Y^}h8@#(H521}R#mH!v|;Z(b?$L7X+s z+$<$|^E{J79Fq->^KDj-km8tpS5|uRVpl2FlvJY>lg)=*Pcwq$X1hOQhl;iNZ(vge zYc)?Z)wMJ;vCuU!GB?sqG&Q%>O|~$$Fi%S|G%>J9oHBX$d3CiO6|hm3DXF@qra;2P zBqc@HGATJx*DTE>%`C;t*f_}`aq^0=jcVAu5exLD1#(!3Pp%KIWEanvoiRJYWO9Rv z$@h#GPX2XG8j-+k@0)=+yz5ojC*P5knSAIOME3Qw=j>pa zmVKEVljj;qPfqzRv)SM{BL`5K^yE4JT{a6}C}LDF09uj-vK>sIWRxsZkOKmjbZBOo X16h7q7GTm6M8bU!Rs;!v(o#JDKWIan