From 95f204d05cb09dcbf9745cd7bc117e1e5b35d4cb Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 20 Mar 2025 09:06:13 -0700 Subject: [PATCH 001/119] bump to openai==1.67.0 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 0e90c69b73..0683d105e4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ # LITELLM PROXY DEPENDENCIES # anyio==4.4.0 # openai + http req. httpx==0.27.0 # Pin Httpx dependency -openai==1.66.1 # openai req. +openai==1.67.0 # openai req. fastapi==0.115.5 # server dep backoff==2.2.1 # server dep pyyaml==6.0.2 # server dep From d384e790269f36171812e80d072b27c983ba2a68 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 20 Mar 2025 09:14:59 -0700 Subject: [PATCH 002/119] test_openai_o1_pro_incomplete_response --- .../test_openai_responses_api.py | 97 ++++++++++++++++++- 1 file changed, 96 insertions(+), 1 deletion(-) diff --git a/tests/llm_responses_api_testing/test_openai_responses_api.py b/tests/llm_responses_api_testing/test_openai_responses_api.py index 37674551fe..2dc33653a1 100644 --- a/tests/llm_responses_api_testing/test_openai_responses_api.py +++ b/tests/llm_responses_api_testing/test_openai_responses_api.py @@ -94,7 +94,7 @@ def validate_responses_api_response(response, final_chunk: bool = False): @pytest.mark.asyncio async def test_basic_openai_responses_api(sync_mode): litellm._turn_on_debug() - + litellm.set_verbose = True if sync_mode: response = litellm.responses( model="gpt-4o", input="Basic ping", max_output_tokens=20 @@ -826,3 +826,98 @@ async def test_async_bad_request_bad_param_error(): print(f"Exception details: {e.__dict__}") except Exception as e: pytest.fail(f"Unexpected exception raised: {e}") + + +@pytest.mark.asyncio +async def test_openai_o1_pro_incomplete_response(): + """ + Test that LiteLLM correctly handles an incomplete response from OpenAI's o1-pro model + due to reaching max_output_tokens limit. + """ + # Mock response from o1-pro + mock_response = { + "id": "resp_67dc3dd77b388190822443a85252da5a0e13d8bdc0e28d88", + "object": "response", + "created_at": 1742486999, + "status": "incomplete", + "error": None, + "incomplete_details": {"reason": "max_output_tokens"}, + "instructions": None, + "max_output_tokens": 20, + "model": "o1-pro-2025-03-19", + "output": [ + { + "type": "reasoning", + "id": "rs_67dc3de50f64819097450ed50a33d5f90e13d8bdc0e28d88", + "summary": [], + } + ], + "parallel_tool_calls": True, + "previous_response_id": None, + "reasoning": {"effort": "medium", "generate_summary": None}, + "store": True, + "temperature": 1.0, + "text": {"format": {"type": "text"}}, + "tool_choice": "auto", + "tools": [], + "top_p": 1.0, + "truncation": "disabled", + "usage": { + "input_tokens": 73, + "input_tokens_details": {"cached_tokens": 0}, + "output_tokens": 20, + "output_tokens_details": {"reasoning_tokens": 0}, + "total_tokens": 93, + }, + "user": None, + "metadata": {}, + } + + class MockResponse: + def __init__(self, json_data, status_code): + self._json_data = json_data + self.status_code = status_code + self.text = json.dumps(json_data) + + def json(self): # Changed from async to sync + return self._json_data + + with patch( + "litellm.llms.custom_httpx.http_handler.AsyncHTTPHandler.post", + new_callable=AsyncMock, + ) as mock_post: + # Configure the mock to return our response + mock_post.return_value = MockResponse(mock_response, 200) + + litellm._turn_on_debug() + litellm.set_verbose = True + + # Call o1-pro with max_output_tokens=20 + response = await litellm.aresponses( + model="openai/o1-pro", + input="Write a detailed essay about artificial intelligence and its impact on society", + max_output_tokens=20, + ) + + # Verify the request was made correctly + mock_post.assert_called_once() + request_body = json.loads(mock_post.call_args.kwargs["data"]) + assert request_body["model"] == "o1-pro" + assert request_body["max_output_tokens"] == 20 + + # Validate the response + print("Response:", json.dumps(response, indent=4, default=str)) + + # Check that the response has the expected structure + assert response["id"] == mock_response["id"] + assert response["status"] == "incomplete" + assert response["incomplete_details"].reason == "max_output_tokens" + assert response["max_output_tokens"] == 20 + + # Validate usage information + assert response["usage"]["input_tokens"] == 73 + assert response["usage"]["output_tokens"] == 20 + assert response["usage"]["total_tokens"] == 93 + + # Validate that the response is properly identified as incomplete + validate_responses_api_response(response, final_chunk=True) From da87ec0966eee683fccabf2d2ed614204b4dccc2 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 20 Mar 2025 09:18:38 -0700 Subject: [PATCH 003/119] test_openai_o1_pro_response_api --- tests/llm_responses_api_testing/test_openai_responses_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/llm_responses_api_testing/test_openai_responses_api.py b/tests/llm_responses_api_testing/test_openai_responses_api.py index 2dc33653a1..709fb316fe 100644 --- a/tests/llm_responses_api_testing/test_openai_responses_api.py +++ b/tests/llm_responses_api_testing/test_openai_responses_api.py @@ -829,7 +829,7 @@ async def test_async_bad_request_bad_param_error(): @pytest.mark.asyncio -async def test_openai_o1_pro_incomplete_response(): +async def test_openai_o1_pro_response_api(): """ Test that LiteLLM correctly handles an incomplete response from OpenAI's o1-pro model due to reaching max_output_tokens limit. From 14c54e44f7fd9cb1bdf3592bcdbaf343d8c8826c Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 20 Mar 2025 09:26:05 -0700 Subject: [PATCH 004/119] bump to openai==1.67.0 --- .circleci/config.yml | 18 +++++++++--------- .circleci/requirements.txt | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 0a12aa73b8..a1a7e69c40 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -49,7 +49,7 @@ jobs: pip install opentelemetry-api==1.25.0 pip install opentelemetry-sdk==1.25.0 pip install opentelemetry-exporter-otlp==1.25.0 - pip install openai==1.66.1 + pip install openai==1.67.0 pip install prisma==0.11.0 pip install "detect_secrets==1.5.0" pip install "httpx==0.24.1" @@ -168,7 +168,7 @@ jobs: pip install opentelemetry-api==1.25.0 pip install opentelemetry-sdk==1.25.0 pip install opentelemetry-exporter-otlp==1.25.0 - pip install openai==1.66.1 + pip install openai==1.67.0 pip install prisma==0.11.0 pip install "detect_secrets==1.5.0" pip install "httpx==0.24.1" @@ -268,7 +268,7 @@ jobs: pip install opentelemetry-api==1.25.0 pip install opentelemetry-sdk==1.25.0 pip install opentelemetry-exporter-otlp==1.25.0 - pip install openai==1.66.1 + pip install openai==1.67.0 pip install prisma==0.11.0 pip install "detect_secrets==1.5.0" pip install "httpx==0.24.1" @@ -513,7 +513,7 @@ jobs: pip install opentelemetry-api==1.25.0 pip install opentelemetry-sdk==1.25.0 pip install opentelemetry-exporter-otlp==1.25.0 - pip install openai==1.66.1 + pip install openai==1.67.0 pip install prisma==0.11.0 pip install "detect_secrets==1.5.0" pip install "httpx==0.24.1" @@ -1278,7 +1278,7 @@ jobs: pip install "aiodynamo==23.10.1" pip install "asyncio==3.4.3" pip install "PyGithub==1.59.1" - pip install "openai==1.66.1" + pip install "openai==1.67.0" - run: name: Install Grype command: | @@ -1414,7 +1414,7 @@ jobs: pip install "aiodynamo==23.10.1" pip install "asyncio==3.4.3" pip install "PyGithub==1.59.1" - pip install "openai==1.66.1" + pip install "openai==1.67.0" # Run pytest and generate JUnit XML report - run: name: Build Docker image @@ -1536,7 +1536,7 @@ jobs: pip install "aiodynamo==23.10.1" pip install "asyncio==3.4.3" pip install "PyGithub==1.59.1" - pip install "openai==1.66.1" + pip install "openai==1.67.0" - run: name: Build Docker image command: docker build -t my-app:latest -f ./docker/Dockerfile.database . @@ -1965,7 +1965,7 @@ jobs: pip install "pytest-asyncio==0.21.1" pip install "google-cloud-aiplatform==1.43.0" pip install aiohttp - pip install "openai==1.66.1" + pip install "openai==1.67.0" pip install "assemblyai==0.37.0" python -m pip install --upgrade pip pip install "pydantic==2.7.1" @@ -2241,7 +2241,7 @@ jobs: pip install "pytest-retry==1.6.3" pip install "pytest-asyncio==0.21.1" pip install aiohttp - pip install "openai==1.66.1" + pip install "openai==1.67.0" python -m pip install --upgrade pip pip install "pydantic==2.7.1" pip install "pytest==7.3.1" diff --git a/.circleci/requirements.txt b/.circleci/requirements.txt index e63fb9dd9a..fc0bbb2ee2 100644 --- a/.circleci/requirements.txt +++ b/.circleci/requirements.txt @@ -1,5 +1,5 @@ # used by CI/CD testing -openai==1.66.1 +openai==1.67.0 python-dotenv tiktoken importlib_metadata From 1567e521856124251056bab3ecde6e370ad86475 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 20 Mar 2025 09:54:26 -0700 Subject: [PATCH 005/119] add should_fake_stream --- .../llms/base_llm/responses/transformation.py | 9 +++++++ .../llms/openai/responses/transformation.py | 24 +++++++++++++++++++ litellm/utils.py | 21 ++++++++++++++++ 3 files changed, 54 insertions(+) diff --git a/litellm/llms/base_llm/responses/transformation.py b/litellm/llms/base_llm/responses/transformation.py index c41d63842b..53a7a21290 100644 --- a/litellm/llms/base_llm/responses/transformation.py +++ b/litellm/llms/base_llm/responses/transformation.py @@ -131,3 +131,12 @@ class BaseResponsesAPIConfig(ABC): message=error_message, headers=headers, ) + + def should_fake_stream( + self, + model: Optional[str], + stream: Optional[bool], + custom_llm_provider: Optional[str] = None, + ) -> bool: + """Returns True if litellm should fake a stream for the given model and stream value""" + return False diff --git a/litellm/llms/openai/responses/transformation.py b/litellm/llms/openai/responses/transformation.py index ce4052dc19..891b8849db 100644 --- a/litellm/llms/openai/responses/transformation.py +++ b/litellm/llms/openai/responses/transformation.py @@ -188,3 +188,27 @@ class OpenAIResponsesAPIConfig(BaseResponsesAPIConfig): raise ValueError(f"Unknown event type: {event_type}") return model_class + + def should_fake_stream( + self, + model: Optional[str], + stream: Optional[bool], + custom_llm_provider: Optional[str] = None, + ) -> bool: + if stream is not True: + return False + if model is not None: + try: + if ( + litellm.utils.supports_native_streaming( + model=model, + custom_llm_provider=custom_llm_provider, + ) + is False + ): + return True + except Exception as e: + verbose_logger.debug( + f"Error getting model info in OpenAIResponsesAPIConfig: {e}" + ) + return False diff --git a/litellm/utils.py b/litellm/utils.py index 677cfe7684..6bf1560a3b 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1975,6 +1975,27 @@ def supports_system_messages(model: str, custom_llm_provider: Optional[str]) -> ) +def supports_native_streaming(model: str, custom_llm_provider: Optional[str]) -> bool: + """ + Check if the given model supports native streaming and return a boolean value. + + Parameters: + model (str): The model name to be checked. + custom_llm_provider (str): The provider to be checked. + + Returns: + bool: True if the model supports native streaming, False otherwise. + + Raises: + Exception: If the given model is not found in model_prices_and_context_window.json. + """ + return _supports_factory( + model=model, + custom_llm_provider=custom_llm_provider, + key="supports_native_streaming", + ) + + def supports_response_schema( model: str, custom_llm_provider: Optional[str] = None ) -> bool: From 6608770e645c99ec5fe76e646e8040bec57dba96 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 20 Mar 2025 09:55:59 -0700 Subject: [PATCH 006/119] add fake_stream to llm http handler --- litellm/llms/custom_httpx/llm_http_handler.py | 2 ++ litellm/responses/main.py | 3 +++ 2 files changed, 5 insertions(+) diff --git a/litellm/llms/custom_httpx/llm_http_handler.py b/litellm/llms/custom_httpx/llm_http_handler.py index 01fe36acda..fa4cd5dcc6 100644 --- a/litellm/llms/custom_httpx/llm_http_handler.py +++ b/litellm/llms/custom_httpx/llm_http_handler.py @@ -978,6 +978,7 @@ class BaseLLMHTTPHandler: timeout: Optional[Union[float, httpx.Timeout]] = None, client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, _is_async: bool = False, + fake_stream: bool = False, ) -> Union[ ResponsesAPIResponse, BaseResponsesAPIStreamingIterator, @@ -1100,6 +1101,7 @@ class BaseLLMHTTPHandler: extra_body: Optional[Dict[str, Any]] = None, timeout: Optional[Union[float, httpx.Timeout]] = None, client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, + fake_stream: bool = False, ) -> Union[ResponsesAPIResponse, BaseResponsesAPIStreamingIterator]: """ Async version of the responses API handler. diff --git a/litellm/responses/main.py b/litellm/responses/main.py index 43f37bdbc6..aec2f8fe4a 100644 --- a/litellm/responses/main.py +++ b/litellm/responses/main.py @@ -232,6 +232,9 @@ def responses( timeout=timeout or request_timeout, _is_async=_is_async, client=kwargs.get("client"), + fake_stream=responses_api_provider_config.should_fake_stream( + model=model, stream=stream, custom_llm_provider=custom_llm_provider + ), ) return response From 69c70ad80a4bbf5a223679c026c61c01691c2e2a Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 20 Mar 2025 12:06:36 -0700 Subject: [PATCH 007/119] add AsyncMockResponsesAPIStreamingIterator --- litellm/responses/streaming_iterator.py | 61 +++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/litellm/responses/streaming_iterator.py b/litellm/responses/streaming_iterator.py index c016e71e7e..ec0ab13a2d 100644 --- a/litellm/responses/streaming_iterator.py +++ b/litellm/responses/streaming_iterator.py @@ -11,6 +11,7 @@ from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLogging from litellm.litellm_core_utils.thread_pool_executor import executor from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig from litellm.types.llms.openai import ( + ResponseCompletedEvent, ResponsesAPIStreamEvents, ResponsesAPIStreamingResponse, ) @@ -207,3 +208,63 @@ class SyncResponsesAPIStreamingIterator(BaseResponsesAPIStreamingIterator): start_time=self.start_time, end_time=datetime.now(), ) + + +class AsyncMockResponsesAPIStreamingIterator(BaseResponsesAPIStreamingIterator): + """ + Async mock iterator - some models like o1-pro do not support streaming, we need to fake a stream + """ + + def __init__( + self, + response: httpx.Response, + model: str, + responses_api_provider_config: BaseResponsesAPIConfig, + logging_obj: LiteLLMLoggingObj, + ): + self.raw_http_response = response + super().__init__( + response=response, + model=model, + responses_api_provider_config=responses_api_provider_config, + logging_obj=logging_obj, + ) + self.is_done = False + + def __aiter__(self): + return self + + async def __anext__(self) -> ResponsesAPIStreamingResponse: + if self.is_done: + raise StopAsyncIteration + self.is_done = True + transformed_response = ( + self.responses_api_provider_config.transform_response_api_response( + model=self.model, + raw_response=self.raw_http_response, + logging_obj=self.logging_obj, + ) + ) + return ResponseCompletedEvent( + type=ResponsesAPIStreamEvents.RESPONSE_COMPLETED, + response=transformed_response, + ) + + def __iter__(self): + return self + + def __next__(self) -> ResponsesAPIStreamingResponse: + if self.is_done: + raise StopIteration + self.is_done = True + transformed_response = ( + self.responses_api_provider_config.transform_response_api_response( + model=self.model, + raw_response=self.raw_http_response, + logging_obj=self.logging_obj, + ) + ) + return ResponseCompletedEvent( + type=ResponsesAPIStreamEvents.RESPONSE_COMPLETED, + response=transformed_response, + ) From 830c51805c2cc006114f13ecdc2a1f8041c069f3 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 20 Mar 2025 12:25:58 -0700 Subject: [PATCH 008/119] MockResponsesAPIStreamingIterator --- litellm/responses/streaming_iterator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/litellm/responses/streaming_iterator.py b/litellm/responses/streaming_iterator.py index ec0ab13a2d..3039efb9f7 100644 --- a/litellm/responses/streaming_iterator.py +++ b/litellm/responses/streaming_iterator.py @@ -210,9 +210,9 @@ class SyncResponsesAPIStreamingIterator(BaseResponsesAPIStreamingIterator): ) -class AsyncMockResponsesAPIStreamingIterator(BaseResponsesAPIStreamingIterator): +class MockResponsesAPIStreamingIterator(BaseResponsesAPIStreamingIterator): """ - Async mock iterator - some models like o1-pro do not support streaming, we need to fake a stream + mock iterator - some models like o1-pro do not support streaming, we need to fake a stream """ def __init__( From 435a89dd7915cd4af4119033a07c3e0cf7c5cd36 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 20 Mar 2025 12:28:55 -0700 Subject: [PATCH 009/119] transform_responses_api_request --- litellm/llms/base_llm/responses/transformation.py | 2 +- litellm/llms/openai/responses/transformation.py | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/litellm/llms/base_llm/responses/transformation.py b/litellm/llms/base_llm/responses/transformation.py index 53a7a21290..8e0a1f5285 100644 --- a/litellm/llms/base_llm/responses/transformation.py +++ b/litellm/llms/base_llm/responses/transformation.py @@ -97,7 +97,7 @@ class BaseResponsesAPIConfig(ABC): response_api_optional_request_params: Dict, litellm_params: GenericLiteLLMParams, headers: dict, - ) -> ResponsesAPIRequestParams: + ) -> Dict: pass @abstractmethod diff --git a/litellm/llms/openai/responses/transformation.py b/litellm/llms/openai/responses/transformation.py index 891b8849db..e062c0c9fa 100644 --- a/litellm/llms/openai/responses/transformation.py +++ b/litellm/llms/openai/responses/transformation.py @@ -65,10 +65,12 @@ class OpenAIResponsesAPIConfig(BaseResponsesAPIConfig): response_api_optional_request_params: Dict, litellm_params: GenericLiteLLMParams, headers: dict, - ) -> ResponsesAPIRequestParams: + ) -> Dict: """No transform applied since inputs are in OpenAI spec already""" - return ResponsesAPIRequestParams( - model=model, input=input, **response_api_optional_request_params + return dict( + ResponsesAPIRequestParams( + model=model, input=input, **response_api_optional_request_params + ) ) def transform_response_api_response( From 6d4cf6581d5fcebe14540dc9f52c28d8075d8b8b Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 20 Mar 2025 12:30:09 -0700 Subject: [PATCH 010/119] MockResponsesAPIStreamingIterator --- litellm/llms/custom_httpx/llm_http_handler.py | 51 +++++++++++++++++-- 1 file changed, 48 insertions(+), 3 deletions(-) diff --git a/litellm/llms/custom_httpx/llm_http_handler.py b/litellm/llms/custom_httpx/llm_http_handler.py index fa4cd5dcc6..00caf55207 100644 --- a/litellm/llms/custom_httpx/llm_http_handler.py +++ b/litellm/llms/custom_httpx/llm_http_handler.py @@ -20,6 +20,7 @@ from litellm.llms.custom_httpx.http_handler import ( ) from litellm.responses.streaming_iterator import ( BaseResponsesAPIStreamingIterator, + MockResponsesAPIStreamingIterator, ResponsesAPIStreamingIterator, SyncResponsesAPIStreamingIterator, ) @@ -1004,6 +1005,7 @@ class BaseLLMHTTPHandler: extra_body=extra_body, timeout=timeout, client=client if isinstance(client, AsyncHTTPHandler) else None, + fake_stream=fake_stream, ) if client is None or not isinstance(client, HTTPHandler): @@ -1052,14 +1054,27 @@ class BaseLLMHTTPHandler: try: if stream: # For streaming, use stream=True in the request + if fake_stream is True: + stream, data = self._prepare_fake_stream_request( + stream=stream, + data=data, + fake_stream=fake_stream, + ) response = sync_httpx_client.post( url=api_base, headers=headers, data=json.dumps(data), timeout=timeout or response_api_optional_request_params.get("timeout"), - stream=True, + stream=stream, ) + if fake_stream is True: + return MockResponsesAPIStreamingIterator( + response=response, + model=model, + logging_obj=logging_obj, + responses_api_provider_config=responses_api_provider_config, + ) return SyncResponsesAPIStreamingIterator( response=response, @@ -1147,22 +1162,36 @@ class BaseLLMHTTPHandler: "headers": headers, }, ) - # Check if streaming is requested stream = response_api_optional_request_params.get("stream", False) try: if stream: # For streaming, we need to use stream=True in the request + if fake_stream is True: + stream, data = self._prepare_fake_stream_request( + stream=stream, + data=data, + fake_stream=fake_stream, + ) + response = await async_httpx_client.post( url=api_base, headers=headers, data=json.dumps(data), timeout=timeout or response_api_optional_request_params.get("timeout"), - stream=True, + stream=stream, ) + if fake_stream is True: + return MockResponsesAPIStreamingIterator( + response=response, + model=model, + logging_obj=logging_obj, + responses_api_provider_config=responses_api_provider_config, + ) + # Return the streaming iterator return ResponsesAPIStreamingIterator( response=response, @@ -1179,6 +1208,7 @@ class BaseLLMHTTPHandler: timeout=timeout or response_api_optional_request_params.get("timeout"), ) + except Exception as e: raise self._handle_error( e=e, @@ -1191,6 +1221,21 @@ class BaseLLMHTTPHandler: logging_obj=logging_obj, ) + def _prepare_fake_stream_request( + self, + stream: bool, + data: dict, + fake_stream: bool, + ) -> Tuple[bool, dict]: + """ + Handles preparing a request when `fake_stream` is True. + """ + if fake_stream is True: + stream = False + data.pop("stream", None) + return stream, data + return stream, data + def _handle_error( self, e: Exception, From cfd075d709c11de905f1750ec30f1d259c137480 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 20 Mar 2025 13:04:49 -0700 Subject: [PATCH 011/119] test_openai_o1_pro_response_api_streaming --- .../test_openai_responses_api.py | 123 +++++++++++++++++- 1 file changed, 122 insertions(+), 1 deletion(-) diff --git a/tests/llm_responses_api_testing/test_openai_responses_api.py b/tests/llm_responses_api_testing/test_openai_responses_api.py index 709fb316fe..677e13b08a 100644 --- a/tests/llm_responses_api_testing/test_openai_responses_api.py +++ b/tests/llm_responses_api_testing/test_openai_responses_api.py @@ -829,7 +829,8 @@ async def test_async_bad_request_bad_param_error(): @pytest.mark.asyncio -async def test_openai_o1_pro_response_api(): +@pytest.mark.parametrize("sync_mode", [True, False]) +async def test_openai_o1_pro_response_api(sync_mode): """ Test that LiteLLM correctly handles an incomplete response from OpenAI's o1-pro model due to reaching max_output_tokens limit. @@ -921,3 +922,123 @@ async def test_openai_o1_pro_response_api(): # Validate that the response is properly identified as incomplete validate_responses_api_response(response, final_chunk=True) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("sync_mode", [True, False]) +async def test_openai_o1_pro_response_api_streaming(sync_mode): + """ + Test that LiteLLM correctly handles an incomplete response from OpenAI's o1-pro model + due to reaching max_output_tokens limit in both sync and async streaming modes. + """ + # Mock response from o1-pro + mock_response = { + "id": "resp_67dc3dd77b388190822443a85252da5a0e13d8bdc0e28d88", + "object": "response", + "created_at": 1742486999, + "status": "incomplete", + "error": None, + "incomplete_details": {"reason": "max_output_tokens"}, + "instructions": None, + "max_output_tokens": 20, + "model": "o1-pro-2025-03-19", + "output": [ + { + "type": "reasoning", + "id": "rs_67dc3de50f64819097450ed50a33d5f90e13d8bdc0e28d88", + "summary": [], + } + ], + "parallel_tool_calls": True, + "previous_response_id": None, + "reasoning": {"effort": "medium", "generate_summary": None}, + "store": True, + "temperature": 1.0, + "text": {"format": {"type": "text"}}, + "tool_choice": "auto", + "tools": [], + "top_p": 1.0, + "truncation": "disabled", + "usage": { + "input_tokens": 73, + "input_tokens_details": {"cached_tokens": 0}, + "output_tokens": 20, + "output_tokens_details": {"reasoning_tokens": 0}, + "total_tokens": 93, + }, + "user": None, + "metadata": {}, + } + + class MockResponse: + def __init__(self, json_data, status_code): + self._json_data = json_data + self.status_code = status_code + self.text = json.dumps(json_data) + + def json(self): + return self._json_data + + with patch( + "litellm.llms.custom_httpx.http_handler.AsyncHTTPHandler.post", + new_callable=AsyncMock, + ) as mock_post: + # Configure the mock to return our response + mock_post.return_value = MockResponse(mock_response, 200) + + litellm._turn_on_debug() + litellm.set_verbose = True + + # Verify the request was made correctly + if sync_mode: + # For sync mode, we need to patch the sync HTTP handler + with patch( + "litellm.llms.custom_httpx.http_handler.HTTPHandler.post", + return_value=MockResponse(mock_response, 200), + ) as mock_sync_post: + response = litellm.responses( + model="openai/o1-pro", + input="Write a detailed essay about artificial intelligence and its impact on society", + max_output_tokens=20, + stream=True, + ) + + # Process the sync stream + event_count = 0 + for event in response: + print( + f"Sync litellm response #{event_count}:", + json.dumps(event, indent=4, default=str), + ) + event_count += 1 + + # Verify the sync request was made correctly + mock_sync_post.assert_called_once() + request_body = json.loads(mock_sync_post.call_args.kwargs["data"]) + assert request_body["model"] == "o1-pro" + assert request_body["max_output_tokens"] == 20 + assert "stream" not in request_body + else: + # For async mode + response = await litellm.aresponses( + model="openai/o1-pro", + input="Write a detailed essay about artificial intelligence and its impact on society", + max_output_tokens=20, + stream=True, + ) + + # Process the async stream + event_count = 0 + async for event in response: + print( + f"Async litellm response #{event_count}:", + json.dumps(event, indent=4, default=str), + ) + event_count += 1 + + # Verify the async request was made correctly + mock_post.assert_called_once() + request_body = json.loads(mock_post.call_args.kwargs["data"]) + assert request_body["model"] == "o1-pro" + assert request_body["max_output_tokens"] == 20 + assert "stream" not in request_body From 4be40d1ba0eebcc1fee1a5870f1ea56ed9f90e8f Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 20 Mar 2025 13:34:57 -0700 Subject: [PATCH 012/119] supports_native_streaming --- litellm/utils.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/litellm/utils.py b/litellm/utils.py index 6bf1560a3b..8696b6712d 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1989,11 +1989,23 @@ def supports_native_streaming(model: str, custom_llm_provider: Optional[str]) -> Raises: Exception: If the given model is not found in model_prices_and_context_window.json. """ - return _supports_factory( - model=model, - custom_llm_provider=custom_llm_provider, - key="supports_native_streaming", - ) + try: + model, custom_llm_provider, _, _ = litellm.get_llm_provider( + model=model, custom_llm_provider=custom_llm_provider + ) + + model_info = _get_model_info_helper( + model=model, custom_llm_provider=custom_llm_provider + ) + supports_native_streaming = ( + model_info.get("supports_native_streaming", True) or True + ) + return supports_native_streaming + except Exception as e: + verbose_logger.debug( + f"Model not found or error in checking supports_native_streaming support. You passed model={model}, custom_llm_provider={custom_llm_provider}. Error: {str(e)}" + ) + return False def supports_response_schema( From 360931ef1bd041d30178957e98bed53f57ad4c35 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 20 Mar 2025 13:52:30 -0700 Subject: [PATCH 013/119] supports_native_streaming --- litellm/utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/litellm/utils.py b/litellm/utils.py index 8696b6712d..52dbccb0c8 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1997,9 +1997,9 @@ def supports_native_streaming(model: str, custom_llm_provider: Optional[str]) -> model_info = _get_model_info_helper( model=model, custom_llm_provider=custom_llm_provider ) - supports_native_streaming = ( - model_info.get("supports_native_streaming", True) or True - ) + supports_native_streaming = model_info.get("supports_native_streaming", True) + if supports_native_streaming is None: + supports_native_streaming = True return supports_native_streaming except Exception as e: verbose_logger.debug( From 3088204ac28f386a95007de7ece3808f6f8a6c2c Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 20 Mar 2025 13:57:35 -0700 Subject: [PATCH 014/119] fix code quality checks --- litellm/llms/base_llm/responses/transformation.py | 1 - 1 file changed, 1 deletion(-) diff --git a/litellm/llms/base_llm/responses/transformation.py b/litellm/llms/base_llm/responses/transformation.py index 8e0a1f5285..29555c55da 100644 --- a/litellm/llms/base_llm/responses/transformation.py +++ b/litellm/llms/base_llm/responses/transformation.py @@ -7,7 +7,6 @@ import httpx from litellm.types.llms.openai import ( ResponseInputParam, ResponsesAPIOptionalRequestParams, - ResponsesAPIRequestParams, ResponsesAPIResponse, ResponsesAPIStreamingResponse, ) From baf859f580c01a8d0ff82607108557580a8d758f Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 20 Mar 2025 14:50:00 -0700 Subject: [PATCH 015/119] test_prepare_fake_stream_request --- .../custom_httpx/test_llm_http_handler.py | 77 +++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 tests/litellm/llms/custom_httpx/test_llm_http_handler.py diff --git a/tests/litellm/llms/custom_httpx/test_llm_http_handler.py b/tests/litellm/llms/custom_httpx/test_llm_http_handler.py new file mode 100644 index 0000000000..26fc18de16 --- /dev/null +++ b/tests/litellm/llms/custom_httpx/test_llm_http_handler.py @@ -0,0 +1,77 @@ +import io +import os +import pathlib +import ssl +import sys +from unittest.mock import MagicMock + +import pytest + +sys.path.insert( + 0, os.path.abspath("../../../..") +) # Adds the parent directory to the system path +import litellm +from litellm.llms.custom_httpx.llm_http_handler import BaseLLMHTTPHandler + + +def test_prepare_fake_stream_request(): + # Initialize the BaseLLMHTTPHandler + handler = BaseLLMHTTPHandler() + + # Test case 1: fake_stream is True + stream = True + data = { + "stream": True, + "model": "gpt-4", + "messages": [{"role": "user", "content": "Hello"}], + } + fake_stream = True + + result_stream, result_data = handler._prepare_fake_stream_request( + stream=stream, data=data, fake_stream=fake_stream + ) + + # Verify that stream is set to False + assert result_stream is False + # Verify that "stream" key is removed from data + assert "stream" not in result_data + # Verify other data remains unchanged + assert result_data["model"] == "gpt-4" + assert result_data["messages"] == [{"role": "user", "content": "Hello"}] + + # Test case 2: fake_stream is False + stream = True + data = { + "stream": True, + "model": "gpt-4", + "messages": [{"role": "user", "content": "Hello"}], + } + fake_stream = False + + result_stream, result_data = handler._prepare_fake_stream_request( + stream=stream, data=data, fake_stream=fake_stream + ) + + # Verify that stream remains True + assert result_stream is True + # Verify that data remains unchanged + assert "stream" in result_data + assert result_data["stream"] is True + assert result_data["model"] == "gpt-4" + assert result_data["messages"] == [{"role": "user", "content": "Hello"}] + + # Test case 3: data doesn't have stream key but fake_stream is True + stream = True + data = {"model": "gpt-4", "messages": [{"role": "user", "content": "Hello"}]} + fake_stream = True + + result_stream, result_data = handler._prepare_fake_stream_request( + stream=stream, data=data, fake_stream=fake_stream + ) + + # Verify that stream is set to False + assert result_stream is False + # Verify that data remains unchanged (since there was no stream key to remove) + assert "stream" not in result_data + assert result_data["model"] == "gpt-4" + assert result_data["messages"] == [{"role": "user", "content": "Hello"}] From bc71407c831dcb8c789e4b561a8786f953389328 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Thu, 20 Mar 2025 15:05:49 -0700 Subject: [PATCH 016/119] docs(release_cycle.md): clarify release cycle for stable releases on docs --- docs/my-website/docs/proxy/release_cycle.md | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/docs/my-website/docs/proxy/release_cycle.md b/docs/my-website/docs/proxy/release_cycle.md index 947a4ae6b3..c5782087f2 100644 --- a/docs/my-website/docs/proxy/release_cycle.md +++ b/docs/my-website/docs/proxy/release_cycle.md @@ -4,9 +4,17 @@ Litellm Proxy has the following release cycle: - `v1.x.x-nightly`: These are releases which pass ci/cd. - `v1.x.x.rc`: These are releases which pass ci/cd + [manual review](https://github.com/BerriAI/litellm/discussions/8495#discussioncomment-12180711). -- `v1.x.x` OR `v1.x.x-stable`: These are releases which pass ci/cd + manual review + 3 days of production testing. +- `v1.x.x:main-stable`: These are releases which pass ci/cd + manual review + 3 days of production testing. -In production, we recommend using the latest `v1.x.x` release. +In production, we recommend using the latest `v1.x.x:main-stable` release. -Follow our release notes [here](https://github.com/BerriAI/litellm/releases). \ No newline at end of file +Follow our release notes [here](https://github.com/BerriAI/litellm/releases). + + +## FAQ + +### Is there a release schedule for LiteLLM stable release? + +Stable releases come out every week (typically Sunday) + From 45a65d71f33140888927f1f9f59107bb5ceaa1ce Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Thu, 20 Mar 2025 20:11:49 -0700 Subject: [PATCH 017/119] feat(pass_through_endpoints.py): support returning api-base on pass-through endpoints Make it easier to debug what the api base sent to provider was --- litellm/proxy/common_request_processing.py | 4 +++- .../pass_through_endpoints.py | 16 ++++++++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/litellm/proxy/common_request_processing.py b/litellm/proxy/common_request_processing.py index 7f131efb04..fcc13509ce 100644 --- a/litellm/proxy/common_request_processing.py +++ b/litellm/proxy/common_request_processing.py @@ -57,7 +57,9 @@ class ProxyBaseLLMRequestProcessing: "x-litellm-call-id": call_id, "x-litellm-model-id": model_id, "x-litellm-cache-key": cache_key, - "x-litellm-model-api-base": api_base, + "x-litellm-model-api-base": ( + api_base.split("?")[0] if api_base else None + ), # don't include query params, risk of leaking sensitive info "x-litellm-version": version, "x-litellm-model-region": model_region, "x-litellm-response-cost": str(response_cost), diff --git a/litellm/proxy/pass_through_endpoints/pass_through_endpoints.py b/litellm/proxy/pass_through_endpoints/pass_through_endpoints.py index b13d614678..63398e553b 100644 --- a/litellm/proxy/pass_through_endpoints/pass_through_endpoints.py +++ b/litellm/proxy/pass_through_endpoints/pass_through_endpoints.py @@ -284,7 +284,9 @@ class HttpPassThroughEndpointHelpers: @staticmethod def get_response_headers( - headers: httpx.Headers, litellm_call_id: Optional[str] = None + headers: httpx.Headers, + litellm_call_id: Optional[str] = None, + custom_headers: Optional[dict] = None, ) -> dict: excluded_headers = {"transfer-encoding", "content-encoding"} @@ -295,6 +297,8 @@ class HttpPassThroughEndpointHelpers: } if litellm_call_id: return_headers["x-litellm-call-id"] = litellm_call_id + if custom_headers: + return_headers.update(custom_headers) return return_headers @@ -596,12 +600,20 @@ async def pass_through_request( # noqa: PLR0915 ) ) + custom_headers = ProxyBaseLLMRequestProcessing.get_custom_headers( + user_api_key_dict=user_api_key_dict, + call_id=litellm_call_id, + model_id=None, + cache_key=None, + api_base=str(url._uri_reference), + ) + return Response( content=content, status_code=response.status_code, headers=HttpPassThroughEndpointHelpers.get_response_headers( headers=response.headers, - litellm_call_id=litellm_call_id, + custom_headers=custom_headers, ), ) except Exception as e: From d41573886050d07840589edea7617f74092436da Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Thu, 20 Mar 2025 20:19:52 -0700 Subject: [PATCH 018/119] feat(pass_through_endpoints.py): return api base on pass-through exception enables easy debugging on backend api errors --- .../pass_through_endpoints.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/litellm/proxy/pass_through_endpoints/pass_through_endpoints.py b/litellm/proxy/pass_through_endpoints/pass_through_endpoints.py index 63398e553b..a13b0dc216 100644 --- a/litellm/proxy/pass_through_endpoints/pass_through_endpoints.py +++ b/litellm/proxy/pass_through_endpoints/pass_through_endpoints.py @@ -1,6 +1,7 @@ import ast import asyncio import json +import uuid from base64 import b64encode from datetime import datetime from typing import Dict, List, Optional, Union @@ -369,8 +370,9 @@ async def pass_through_request( # noqa: PLR0915 query_params: Optional[dict] = None, stream: Optional[bool] = None, ): + litellm_call_id = str(uuid.uuid4()) + url: Optional[httpx.URL] = None try: - import uuid from litellm.litellm_core_utils.litellm_logging import Logging from litellm.proxy.proxy_server import proxy_logging_obj @@ -420,8 +422,6 @@ async def pass_through_request( # noqa: PLR0915 ) async_client = async_client_obj.client - litellm_call_id = str(uuid.uuid4()) - # create logging object start_time = datetime.now() logging_obj = Logging( @@ -600,6 +600,7 @@ async def pass_through_request( # noqa: PLR0915 ) ) + ## CUSTOM HEADERS - `x-litellm-*` custom_headers = ProxyBaseLLMRequestProcessing.get_custom_headers( user_api_key_dict=user_api_key_dict, call_id=litellm_call_id, @@ -617,6 +618,13 @@ async def pass_through_request( # noqa: PLR0915 ), ) except Exception as e: + custom_headers = ProxyBaseLLMRequestProcessing.get_custom_headers( + user_api_key_dict=user_api_key_dict, + call_id=litellm_call_id, + model_id=None, + cache_key=None, + api_base=str(url._uri_reference) if url else None, + ) verbose_proxy_logger.exception( "litellm.proxy.proxy_server.pass_through_endpoint(): Exception occured - {}".format( str(e) @@ -628,6 +636,7 @@ async def pass_through_request( # noqa: PLR0915 type=getattr(e, "type", "None"), param=getattr(e, "param", "None"), code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST), + headers=custom_headers, ) else: error_msg = f"{str(e)}" @@ -636,6 +645,7 @@ async def pass_through_request( # noqa: PLR0915 type=getattr(e, "type", "None"), param=getattr(e, "param", "None"), code=getattr(e, "status_code", 500), + headers=custom_headers, ) From edf7eb867d5e6813f5280896db87db39bac59e2c Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Thu, 20 Mar 2025 20:28:03 -0700 Subject: [PATCH 019/119] fix(team_endpoints.py): consistently return 404 if team not found in DB Fixes response on /team/delete --- litellm/proxy/management_endpoints/team_endpoints.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/litellm/proxy/management_endpoints/team_endpoints.py b/litellm/proxy/management_endpoints/team_endpoints.py index 1994e27ecf..f5bcc6ba11 100644 --- a/litellm/proxy/management_endpoints/team_endpoints.py +++ b/litellm/proxy/management_endpoints/team_endpoints.py @@ -470,7 +470,7 @@ async def update_team( if existing_team_row is None: raise HTTPException( - status_code=400, + status_code=404, detail={"error": f"Team not found, passed team_id={data.team_id}"}, ) @@ -1137,14 +1137,16 @@ async def delete_team( team_rows: List[LiteLLM_TeamTable] = [] for team_id in data.team_ids: try: - team_row_base: BaseModel = ( + team_row_base: Optional[BaseModel] = ( await prisma_client.db.litellm_teamtable.find_unique( where={"team_id": team_id} ) ) + if team_row_base is None: + raise Exception except Exception: raise HTTPException( - status_code=400, + status_code=404, detail={"error": f"Team not found, passed team_id={team_id}"}, ) team_row_pydantic = LiteLLM_TeamTable(**team_row_base.model_dump()) From fda69fb1901e4a3386e6ee11b381eb1fe458719f Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 09:26:03 -0700 Subject: [PATCH 020/119] doc litellm MCP client --- docs/my-website/docs/mcp.md | 114 ++++++++++++++++++++++++++++++++++++ docs/my-website/sidebars.js | 1 + 2 files changed, 115 insertions(+) create mode 100644 docs/my-website/docs/mcp.md diff --git a/docs/my-website/docs/mcp.md b/docs/my-website/docs/mcp.md new file mode 100644 index 0000000000..ef198de947 --- /dev/null +++ b/docs/my-website/docs/mcp.md @@ -0,0 +1,114 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# /mcp Model Context Protocol [Beta] + +Use Model Context Protocol with LiteLLM. + +## Overview + +LiteLLM supports Model Context Protocol (MCP) tools by offering a client that exposes a tools method for retrieving tools from a MCP server + +## Usage + + + + +```python +import asyncio +from litellm import experimental_create_mcp_client, completion +from litellm.mcp_stdio import experimental_stdio_mcp_transport + +async def main(): + client_one = None + + try: + # Initialize an MCP client to connect to a `stdio` MCP server: + transport = experimental_stdio_mcp_transport( + command='node', + args=['src/stdio/dist/server.js'] + ) + client_one = await experimental_create_mcp_client( + transport=transport + ) + + tools = await client_one.list_tools(format="openai") + response = await litellm.completion( + model="gpt-4o", + tools=tools, + messages=[ + { + "role": "user", + "content": "Find products under $100" + } + ] + ) + + print(response.text) + except Exception as error: + print(error) + finally: + await asyncio.gather( + client_one.close() if client_one else asyncio.sleep(0), + ) + +if __name__ == "__main__": + asyncio.run(main()) +``` + + + + +```python +import asyncio +from openai import OpenAI +from litellm import experimental_create_mcp_client +from litellm.mcp_stdio import experimental_stdio_mcp_transport + +async def main(): + client_one = None + + try: + # Initialize an MCP client to connect to a `stdio` MCP server: + transport = experimental_stdio_mcp_transport( + command='node', + args=['src/stdio/dist/server.js'] + ) + client_one = await experimental_create_mcp_client( + transport=transport + ) + + # Get tools from MCP client + tools = await client_one.list_tools(format="openai") + + # Use OpenAI client connected to LiteLLM Proxy Server + client = openai.OpenAI( + api_key="sk-1234", + base_url="http://0.0.0.0:4000" + ) + response = client.chat.completions.create( + model="gpt-4", + tools=tools, + messages=[ + { + "role": "user", + "content": "Find products under $100" + } + ] + ) + + print(response.choices[0].message.content) + except Exception as error: + print(error) + finally: + await asyncio.gather( + client_one.close() if client_one else asyncio.sleep(0), + ) + +if __name__ == "__main__": + asyncio.run(main()) +``` + + + + diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index 1df988f372..a1499cc0b0 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -293,6 +293,7 @@ const sidebars = { "text_completion", "embedding/supported_embedding", "anthropic_unified", + "mcp", { type: "category", label: "/images", From 941fe82e19ef0a736c14606d33ae2dd0e0af07ea Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 09:33:09 -0700 Subject: [PATCH 021/119] docs add central platform team control on MCP --- docs/my-website/docs/mcp.md | 204 ++++++++++++++++++++++++++++++++++++ 1 file changed, 204 insertions(+) diff --git a/docs/my-website/docs/mcp.md b/docs/my-website/docs/mcp.md index ef198de947..fa894a85a8 100644 --- a/docs/my-website/docs/mcp.md +++ b/docs/my-website/docs/mcp.md @@ -112,3 +112,207 @@ if __name__ == "__main__": +## Advanced + +### Expose MCP tools on LiteLLM Proxy Server + +This allows you to define tools that can be called by any MCP compatible client. Define your mcp_tools with LiteLLM and all your clients can list and call available tools. + +#### How it works + +LiteLLM exposes the following MCP endpoints: + +- `/mcp/list_tools` - List all available tools +- `/mcp/call_tool` - Call a specific tool with the provided arguments + +When MCP clients connect to LiteLLM they can follow this workflow: + +1. Connect to the LiteLLM MCP server +2. List all available tools on LiteLLM +3. Client makes LLM API request with tool call(s) +4. LLM API returns which tools to call and with what arguments +5. MCP client makes tool calls to LiteLLM +6. LiteLLM makes the tool calls to the appropriate handlers +7. LiteLLM returns the tool call results to the MCP client + +#### Usage + +#### 1. Define your tools on mcp_tools + +LiteLLM allows you to define your tools on the `mcp_tools` section in your config.yaml file. All tools listed here will be available to MCP clients (when they connect to LiteLLM and call `list_tools`). + +```yaml +model_list: + - model_name: gpt-4o + litellm_params: + model: openai/gpt-4o + api_key: sk-xxxxxxx + + + +mcp_tools: + - name: "get_current_time" + description: "Get the current time" + input_schema: { + "type": "object", + "properties": { + "format": { + "type": "string", + "description": "The format of the time to return", + "enum": ["short"] + } + } + } + handler: "mcp_tools.get_current_time" +``` + +#### 2. Define a handler for your tool + +Create a new file called `mcp_tools.py` and add this code. The key method here is `get_current_time` which gets executed when the `get_current_time` tool is called. + +```python +# mcp_tools.py + +from datetime import datetime + +def get_current_time(format: str = "short"): + """ + Simple handler for the 'get_current_time' tool. + + Args: + format (str): The format of the time to return ('short'). + + Returns: + str: The current time formatted as 'HH:MM'. + """ + # Get the current time + current_time = datetime.now() + + # Format the time as 'HH:MM' + return current_time.strftime('%H:%M') +``` + +#### 3. Start LiteLLM Gateway + + + + +Mount your `mcp_tools.py` on the LiteLLM Docker container. + +```shell +docker run -d \ + -p 4000:4000 \ + -e OPENAI_API_KEY=$OPENAI_API_KEY \ + --name my-app \ + -v $(pwd)/my_config.yaml:/app/config.yaml \ + -v $(pwd)/mcp_tools.py:/app/mcp_tools.py \ + my-app:latest \ + --config /app/config.yaml \ + --port 4000 \ + --detailed_debug \ +``` + + + + + +```shell +litellm --config config.yaml --detailed_debug +``` + + + + + +#### 3. Make an LLM API request + + + +```python +import asyncio +from langchain_mcp_adapters.tools import load_mcp_tools +from langchain_openai import ChatOpenAI +from langgraph.prebuilt import create_react_agent +from mcp import ClientSession +from mcp.client.sse import sse_client + + +async def main(): + # Initialize the model with your API key + model = ChatOpenAI(model="gpt-4o") + + # Connect to the MCP server + async with sse_client(url="http://localhost:4000/mcp/") as (read, write): + async with ClientSession(read, write) as session: + # Initialize the session + print("Initializing session...") + await session.initialize() + print("Session initialized") + + # Load available tools from MCP + print("Loading tools...") + tools = await load_mcp_tools(session) + print(f"Loaded {len(tools)} tools") + + # Create a ReAct agent with the model and tools + agent = create_react_agent(model, tools) + + # Run the agent with a user query + user_query = "What's the weather in Tokyo?" + print(f"Asking: {user_query}") + agent_response = await agent.ainvoke({"messages": user_query}) + print("Agent response:") + print(agent_response) + + +if __name__ == "__main__": + asyncio.run(main()) + +``` + + +### Specification for `mcp_tools` + +The `mcp_tools` section in your LiteLLM config defines tools that can be called by MCP-compatible clients. + +#### Tool Definition Format + +```yaml +mcp_tools: + - name: string # Required: Name of the tool + description: string # Required: Description of what the tool does + input_schema: object # Required: JSON Schema defining the tool's input parameters + handler: string # Required: Path to the function that implements the tool +``` + +#### Field Details + +- `name`: A unique identifier for the tool +- `description`: A clear description of what the tool does, used by LLMs to determine when to call it +- `input_schema`: JSON Schema object defining the expected input parameters +- `handler`: String path to the Python function that implements the tool (e.g., "module.submodule.function_name") + +#### Example Tool Definition + +```yaml +mcp_tools: + - name: "get_current_time" + description: "Get the current time in a specified format" + input_schema: { + "type": "object", + "properties": { + "format": { + "type": "string", + "description": "The format of the time to return", + "enum": ["short", "long", "iso"] + }, + "timezone": { + "type": "string", + "description": "The timezone to use (e.g., 'UTC', 'America/New_York')", + "default": "UTC" + } + }, + "required": ["format"] + } + handler: "mcp_tools.get_current_time" +``` From 2983c9238ef83d033beb80094c2aac854986062a Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 09:34:38 -0700 Subject: [PATCH 022/119] docs litellm mcp --- docs/my-website/docs/mcp.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/my-website/docs/mcp.md b/docs/my-website/docs/mcp.md index fa894a85a8..0d64aefb6e 100644 --- a/docs/my-website/docs/mcp.md +++ b/docs/my-website/docs/mcp.md @@ -16,7 +16,8 @@ LiteLLM supports Model Context Protocol (MCP) tools by offering a client that ex ```python import asyncio -from litellm import experimental_create_mcp_client, completion +import litellm +from litellm import experimental_create_mcp_client from litellm.mcp_stdio import experimental_stdio_mcp_transport async def main(): @@ -82,7 +83,7 @@ async def main(): tools = await client_one.list_tools(format="openai") # Use OpenAI client connected to LiteLLM Proxy Server - client = openai.OpenAI( + client = OpenAI( api_key="sk-1234", base_url="http://0.0.0.0:4000" ) @@ -224,7 +225,7 @@ litellm --config config.yaml --detailed_debug -#### 3. Make an LLM API request +#### 4. Make an LLM API request From 8d0d85577ca53e004a1f2877e2d76345c574eecf Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 09:37:04 -0700 Subject: [PATCH 023/119] docs litellm mcp --- docs/my-website/docs/mcp.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/my-website/docs/mcp.md b/docs/my-website/docs/mcp.md index 0d64aefb6e..fbdeaa191f 100644 --- a/docs/my-website/docs/mcp.md +++ b/docs/my-website/docs/mcp.md @@ -7,7 +7,8 @@ Use Model Context Protocol with LiteLLM. ## Overview -LiteLLM supports Model Context Protocol (MCP) tools by offering a client that exposes a tools method for retrieving tools from a MCP server + +LiteLLM acts as a MCP bridge to utilize **MCP tools** with **all LiteLLM supported models**. LiteLLM offers a client that exposes a tools method for retrieving tools from a MCP server. ## Usage From ea8ef3a0d6562820ec5864596c049f3f5fee80a7 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 10:11:06 -0700 Subject: [PATCH 024/119] simple MCP interface --- tests/mcp_tests/mcp_server.py | 20 ++++++++++++ tests/mcp_tests/test_mcp_litellm_client.py | 38 ++++++++++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 tests/mcp_tests/mcp_server.py create mode 100644 tests/mcp_tests/test_mcp_litellm_client.py diff --git a/tests/mcp_tests/mcp_server.py b/tests/mcp_tests/mcp_server.py new file mode 100644 index 0000000000..99a67edd02 --- /dev/null +++ b/tests/mcp_tests/mcp_server.py @@ -0,0 +1,20 @@ +# math_server.py +from mcp.server.fastmcp import FastMCP + +mcp = FastMCP("Math") + + +@mcp.tool() +def add(a: int, b: int) -> int: + """Add two numbers""" + return a + b + + +@mcp.tool() +def multiply(a: int, b: int) -> int: + """Multiply two numbers""" + return a * b + + +if __name__ == "__main__": + mcp.run(transport="stdio") diff --git a/tests/mcp_tests/test_mcp_litellm_client.py b/tests/mcp_tests/test_mcp_litellm_client.py new file mode 100644 index 0000000000..19a40f68c0 --- /dev/null +++ b/tests/mcp_tests/test_mcp_litellm_client.py @@ -0,0 +1,38 @@ +# Create server parameters for stdio connection +from mcp import ClientSession, StdioServerParameters +from mcp.client.stdio import stdio_client +import os +from langchain_mcp_adapters.tools import load_mcp_tools +from langgraph.prebuilt import create_react_agent + +from langchain_openai import ChatOpenAI + +import pytest + + +@pytest.mark.asyncio +async def test_mcp_agent(): + server_params = StdioServerParameters( + command="python3", + # Make sure to update to the full absolute path to your math_server.py file + args=["./mcp_server.py"], + ) + + async with stdio_client(server_params) as (read, write): + async with ClientSession(read, write) as session: + # Initialize the connection + await session.initialize() + + # Get tools + tools = await load_mcp_tools(session) + print("MCP TOOLS: ", tools) + + # Create and run the agent + print(os.getenv("OPENAI_API_KEY")) + model = ChatOpenAI(model="gpt-4o", api_key=os.getenv("OPENAI_API_KEY")) + agent = create_react_agent(model, tools) + agent_response = await agent.ainvoke({"messages": "what's (3 + 5) x 12?"}) + + # Add assertions to verify the response + assert isinstance(agent_response, dict) + print(agent_response) From b3b93dbd14b66b0d5f93ef1f98e1130417a9832c Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 10:20:21 -0700 Subject: [PATCH 025/119] fix(anthropic/chat/transformation.py): correctly update response_format to tool call transformation Fixes https://github.com/BerriAI/litellm/issues/9411 --- litellm/llms/anthropic/chat/transformation.py | 2 +- tests/llm_translation/base_llm_unit_tests.py | 72 ++++++++++++++++++- .../test_anthropic_completion.py | 4 +- 3 files changed, 74 insertions(+), 4 deletions(-) diff --git a/litellm/llms/anthropic/chat/transformation.py b/litellm/llms/anthropic/chat/transformation.py index 383c1cd3e5..aff70a6e62 100644 --- a/litellm/llms/anthropic/chat/transformation.py +++ b/litellm/llms/anthropic/chat/transformation.py @@ -387,7 +387,7 @@ class AnthropicConfig(BaseConfig): _input_schema["additionalProperties"] = True _input_schema["properties"] = {} else: - _input_schema["properties"] = {"values": json_schema} + _input_schema.update(json_schema) _tool = AnthropicMessagesTool( name=RESPONSE_FORMAT_TOOL_NAME, input_schema=_input_schema diff --git a/tests/llm_translation/base_llm_unit_tests.py b/tests/llm_translation/base_llm_unit_tests.py index 32f631daad..f3614fdb4c 100644 --- a/tests/llm_translation/base_llm_unit_tests.py +++ b/tests/llm_translation/base_llm_unit_tests.py @@ -20,6 +20,7 @@ from litellm.utils import ( get_optional_params, ProviderConfigManager, ) +from litellm.main import stream_chunk_builder from typing import Union # test_example.py @@ -338,7 +339,7 @@ class BaseLLMChatTest(ABC): @pytest.mark.flaky(retries=6, delay=1) def test_json_response_pydantic_obj(self): - litellm.set_verbose = True + litellm._turn_on_debug() from pydantic import BaseModel from litellm.utils import supports_response_schema @@ -995,3 +996,72 @@ class BaseOSeriesModelsTest(ABC): # test across azure/openai ), "temperature should not be in the request body" except Exception as e: pytest.fail(f"Error occurred: {e}") + + +class BaseAnthropicChatTest(ABC): + """ + Ensures consistent result across anthropic model usage + """ + + @abstractmethod + def get_base_completion_call_args(self) -> dict: + """Must return the base completion call args""" + pass + + @property + def completion_function(self): + return litellm.completion + + def test_anthropic_response_format_streaming_vs_non_streaming(self): + litellm.set_verbose = True + args = { + "messages": [ + { + "content": "Your goal is to summarize the previous agent's thinking process into short descriptions to let user better understand the research progress. If no information is available, just say generic phrase like 'Doing some research...' with the given output format. Make sure to adhere to the output format no matter what, even if you don't have any information or you are not allowed to respond to the given input information (then just say generic phrase like 'Doing some research...').", + "role": "system", + }, + { + "role": "user", + "content": "Here is the input data (previous agent's output): \n\n Let's try to refine our search further, focusing more on the technical aspects of home automation and home energy system management:", + }, + ], + "response_format": { + "type": "json_schema", + "json_schema": { + "name": "final_output", + "strict": True, + "schema": { + "description": 'Progress report for the thinking process\n\nThis model represents a snapshot of the agent\'s current progress during\nthe thinking process, providing a brief description of the current activity.\n\nAttributes:\n agent_doing: Brief description of what the agent is currently doing.\n Should be kept under 10 words. Example: "Learning about home automation"', + "properties": { + "agent_doing": {"title": "Agent Doing", "type": "string"} + }, + "required": ["agent_doing"], + "title": "ThinkingStep", + "type": "object", + "additionalProperties": False, + }, + }, + }, + } + + base_completion_call_args = self.get_base_completion_call_args() + + response = self.completion_function( + **base_completion_call_args, **args, stream=True + ) + + chunks = [] + for chunk in response: + print(f"chunk: {chunk}") + chunks.append(chunk) + + print(f"chunks: {chunks}") + built_response = stream_chunk_builder(chunks=chunks) + + non_stream_response = self.completion_function( + **base_completion_call_args, **args, stream=False + ) + + assert json.loads(built_response.choices[0].message.content) == json.loads( + non_stream_response.choices[0].message.content + ), f"Got={json.loads(built_response.choices[0].message.content)}, Expected={json.loads(non_stream_response.choices[0].message.content)}" diff --git a/tests/llm_translation/test_anthropic_completion.py b/tests/llm_translation/test_anthropic_completion.py index da47e745e7..8f8f4084bb 100644 --- a/tests/llm_translation/test_anthropic_completion.py +++ b/tests/llm_translation/test_anthropic_completion.py @@ -36,7 +36,7 @@ from litellm.types.llms.openai import ChatCompletionToolCallFunctionChunk from litellm.llms.anthropic.common_utils import process_anthropic_headers from litellm.llms.anthropic.chat.handler import AnthropicChatCompletion from httpx import Headers -from base_llm_unit_tests import BaseLLMChatTest +from base_llm_unit_tests import BaseLLMChatTest, BaseAnthropicChatTest def streaming_format_tests(chunk: dict, idx: int): @@ -462,7 +462,7 @@ def test_create_json_tool_call_for_response_format(): from litellm import completion -class TestAnthropicCompletion(BaseLLMChatTest): +class TestAnthropicCompletion(BaseLLMChatTest, BaseAnthropicChatTest): def get_base_completion_call_args(self) -> dict: return {"model": "anthropic/claude-3-5-sonnet-20240620"} From 50ec2bd5c9dc5046e6b775fe05c87f8196a8458a Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 10:25:22 -0700 Subject: [PATCH 026/119] basic MCP client structure --- .../litellm_core_utils/mcp_client/Readme.md | 6 ++++ .../litellm_core_utils/mcp_client/__init__.py | 0 .../litellm_core_utils/mcp_client/client.py | 0 .../litellm_core_utils/mcp_client/tools.py | 30 +++++++++++++++++++ 4 files changed, 36 insertions(+) create mode 100644 litellm/litellm_core_utils/mcp_client/Readme.md create mode 100644 litellm/litellm_core_utils/mcp_client/__init__.py create mode 100644 litellm/litellm_core_utils/mcp_client/client.py create mode 100644 litellm/litellm_core_utils/mcp_client/tools.py diff --git a/litellm/litellm_core_utils/mcp_client/Readme.md b/litellm/litellm_core_utils/mcp_client/Readme.md new file mode 100644 index 0000000000..4fbd624369 --- /dev/null +++ b/litellm/litellm_core_utils/mcp_client/Readme.md @@ -0,0 +1,6 @@ +# LiteLLM MCP Client + +LiteLLM MCP Client is a client that allows you to use MCP tools with LiteLLM. + + + diff --git a/litellm/litellm_core_utils/mcp_client/__init__.py b/litellm/litellm_core_utils/mcp_client/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/litellm/litellm_core_utils/mcp_client/client.py b/litellm/litellm_core_utils/mcp_client/client.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/litellm/litellm_core_utils/mcp_client/tools.py b/litellm/litellm_core_utils/mcp_client/tools.py new file mode 100644 index 0000000000..6dc9863853 --- /dev/null +++ b/litellm/litellm_core_utils/mcp_client/tools.py @@ -0,0 +1,30 @@ +from typing import List, Literal, Union + +from mcp import ClientSession +from mcp.types import Tool as MCPTool + +from litellm.types.llms.openai import Tool + + +def transform_mcp_tool_to_openai_tool(tool: MCPTool) -> Tool: + """Convert an MCP tool to an OpenAI tool.""" + raise NotImplementedError("Not implemented") + + +async def load_mcp_tools( + session: ClientSession, format: Literal["mcp", "openai"] = "mcp" +) -> Union[List[MCPTool], List[Tool]]: + """ + Load all available MCP tools + + Args: + session: The MCP session to use + format: The format to convert the tools to + By default, the tools are returned in MCP format. + + If format is set to "openai", the tools are converted to OpenAI tools. + """ + tools = await session.list_tools() + if format == "openai": + return [transform_mcp_tool_to_openai_tool(tool) for tool in tools.tools] + return tools.tools From 039129676ca42d1d45d0ad9b2c4e92640d7c742f Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 10:30:57 -0700 Subject: [PATCH 027/119] change location of MCP client --- litellm/{litellm_core_utils => }/mcp_client/Readme.md | 0 .../{litellm_core_utils => }/mcp_client/__init__.py | 0 litellm/{litellm_core_utils => }/mcp_client/client.py | 0 litellm/{litellm_core_utils => }/mcp_client/tools.py | 0 tests/mcp_tests/test_mcp_litellm_client.py | 11 ++++++++--- 5 files changed, 8 insertions(+), 3 deletions(-) rename litellm/{litellm_core_utils => }/mcp_client/Readme.md (100%) rename litellm/{litellm_core_utils => }/mcp_client/__init__.py (100%) rename litellm/{litellm_core_utils => }/mcp_client/client.py (100%) rename litellm/{litellm_core_utils => }/mcp_client/tools.py (100%) diff --git a/litellm/litellm_core_utils/mcp_client/Readme.md b/litellm/mcp_client/Readme.md similarity index 100% rename from litellm/litellm_core_utils/mcp_client/Readme.md rename to litellm/mcp_client/Readme.md diff --git a/litellm/litellm_core_utils/mcp_client/__init__.py b/litellm/mcp_client/__init__.py similarity index 100% rename from litellm/litellm_core_utils/mcp_client/__init__.py rename to litellm/mcp_client/__init__.py diff --git a/litellm/litellm_core_utils/mcp_client/client.py b/litellm/mcp_client/client.py similarity index 100% rename from litellm/litellm_core_utils/mcp_client/client.py rename to litellm/mcp_client/client.py diff --git a/litellm/litellm_core_utils/mcp_client/tools.py b/litellm/mcp_client/tools.py similarity index 100% rename from litellm/litellm_core_utils/mcp_client/tools.py rename to litellm/mcp_client/tools.py diff --git a/tests/mcp_tests/test_mcp_litellm_client.py b/tests/mcp_tests/test_mcp_litellm_client.py index 19a40f68c0..b42278b7fa 100644 --- a/tests/mcp_tests/test_mcp_litellm_client.py +++ b/tests/mcp_tests/test_mcp_litellm_client.py @@ -1,10 +1,15 @@ # Create server parameters for stdio connection +import os +import sys +import pytest + +sys.path.insert( + 0, os.path.abspath("../../..") +) # Adds the parent directory to the system path + from mcp import ClientSession, StdioServerParameters from mcp.client.stdio import stdio_client import os -from langchain_mcp_adapters.tools import load_mcp_tools -from langgraph.prebuilt import create_react_agent - from langchain_openai import ChatOpenAI import pytest From 6e888474e1db5e4320e75114991144a07b606e3f Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 10:32:51 -0700 Subject: [PATCH 028/119] litellm MCP client 1 --- tests/mcp_tests/test_mcp_litellm_client.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/tests/mcp_tests/test_mcp_litellm_client.py b/tests/mcp_tests/test_mcp_litellm_client.py index b42278b7fa..97b637af59 100644 --- a/tests/mcp_tests/test_mcp_litellm_client.py +++ b/tests/mcp_tests/test_mcp_litellm_client.py @@ -10,9 +10,10 @@ sys.path.insert( from mcp import ClientSession, StdioServerParameters from mcp.client.stdio import stdio_client import os -from langchain_openai import ChatOpenAI - +from litellm.mcp_client.tools import load_mcp_tools +import litellm import pytest +import json @pytest.mark.asyncio @@ -34,10 +35,14 @@ async def test_mcp_agent(): # Create and run the agent print(os.getenv("OPENAI_API_KEY")) - model = ChatOpenAI(model="gpt-4o", api_key=os.getenv("OPENAI_API_KEY")) - agent = create_react_agent(model, tools) - agent_response = await agent.ainvoke({"messages": "what's (3 + 5) x 12?"}) + llm_response = await litellm.acompletion( + model="gpt-4o", + api_key=os.getenv("OPENAI_API_KEY"), + messages=[{"role": "user", "content": "what's (3 + 5) x 12?"}], + tools=tools, + ) + print("LLM RESPONSE: ", json.dumps(llm_response, indent=4, default=str)) # Add assertions to verify the response - assert isinstance(agent_response, dict) - print(agent_response) + assert isinstance(llm_response, dict) + print(llm_response) From 6c51bdebe73d3d14b894b1f3e610955532c827ff Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 10:35:36 -0700 Subject: [PATCH 029/119] test: add unit testing --- .../test_anthropic_chat_transformation.py | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 tests/litellm/llms/anthropic/chat/test_anthropic_chat_transformation.py diff --git a/tests/litellm/llms/anthropic/chat/test_anthropic_chat_transformation.py b/tests/litellm/llms/anthropic/chat/test_anthropic_chat_transformation.py new file mode 100644 index 0000000000..04f2728284 --- /dev/null +++ b/tests/litellm/llms/anthropic/chat/test_anthropic_chat_transformation.py @@ -0,0 +1,35 @@ +import json +import os +import sys + +import pytest +from fastapi.testclient import TestClient + +sys.path.insert( + 0, os.path.abspath("../../../../..") +) # Adds the parent directory to the system path +from unittest.mock import MagicMock, patch + +from litellm.llms.anthropic.chat.transformation import AnthropicConfig + + +def test_response_format_transformation_unit_test(): + config = AnthropicConfig() + + response_format_json_schema = { + "description": 'Progress report for the thinking process\n\nThis model represents a snapshot of the agent\'s current progress during\nthe thinking process, providing a brief description of the current activity.\n\nAttributes:\n agent_doing: Brief description of what the agent is currently doing.\n Should be kept under 10 words. Example: "Learning about home automation"', + "properties": {"agent_doing": {"title": "Agent Doing", "type": "string"}}, + "required": ["agent_doing"], + "title": "ThinkingStep", + "type": "object", + "additionalProperties": False, + } + + result = config._create_json_tool_call_for_response_format( + json_schema=response_format_json_schema + ) + + assert result["input_schema"]["properties"] == { + "agent_doing": {"title": "Agent Doing", "type": "string"} + } + print(result) From b6756b78d21f986518a3d216a2ba9ea3297736b1 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 10:46:35 -0700 Subject: [PATCH 030/119] fix(handle_error.py): make cooldown error more descriptive --- litellm/router_utils/handle_error.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/litellm/router_utils/handle_error.py b/litellm/router_utils/handle_error.py index e1055a9d0f..132440cbc3 100644 --- a/litellm/router_utils/handle_error.py +++ b/litellm/router_utils/handle_error.py @@ -1,7 +1,9 @@ from typing import TYPE_CHECKING, Any, Optional from litellm._logging import verbose_router_logger -from litellm.router_utils.cooldown_handlers import _async_get_cooldown_deployments +from litellm.router_utils.cooldown_handlers import ( + _async_get_cooldown_deployments_with_debug_info, +) from litellm.types.integrations.slack_alerting import AlertType from litellm.types.router import RouterRateLimitError @@ -75,7 +77,7 @@ async def async_raise_no_deployment_exception( _cooldown_time = litellm_router_instance.cooldown_cache.get_min_cooldown( model_ids=model_ids, parent_otel_span=parent_otel_span ) - _cooldown_list = await _async_get_cooldown_deployments( + _cooldown_list = await _async_get_cooldown_deployments_with_debug_info( litellm_router_instance=litellm_router_instance, parent_otel_span=parent_otel_span, ) From cd6055cfb0b70826503dcc0b2a353410f77e4273 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 10:49:06 -0700 Subject: [PATCH 031/119] transform_mcp_tool_to_openai_tool --- litellm/mcp_client/tools.py | 22 ++++++++++++++++------ tests/mcp_tests/test_mcp_litellm_client.py | 2 +- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/litellm/mcp_client/tools.py b/litellm/mcp_client/tools.py index 6dc9863853..e6b403f975 100644 --- a/litellm/mcp_client/tools.py +++ b/litellm/mcp_client/tools.py @@ -2,18 +2,26 @@ from typing import List, Literal, Union from mcp import ClientSession from mcp.types import Tool as MCPTool - -from litellm.types.llms.openai import Tool +from openai.types.chat import ChatCompletionToolParam +from openai.types.shared_params.function_definition import FunctionDefinition -def transform_mcp_tool_to_openai_tool(tool: MCPTool) -> Tool: +def transform_mcp_tool_to_openai_tool(mcp_tool: MCPTool) -> ChatCompletionToolParam: """Convert an MCP tool to an OpenAI tool.""" - raise NotImplementedError("Not implemented") + return ChatCompletionToolParam( + type="function", + function=FunctionDefinition( + name=mcp_tool.name, + description=mcp_tool.description or "", + parameters=mcp_tool.inputSchema, + strict=False, + ), + ) async def load_mcp_tools( session: ClientSession, format: Literal["mcp", "openai"] = "mcp" -) -> Union[List[MCPTool], List[Tool]]: +) -> Union[List[MCPTool], List[ChatCompletionToolParam]]: """ Load all available MCP tools @@ -26,5 +34,7 @@ async def load_mcp_tools( """ tools = await session.list_tools() if format == "openai": - return [transform_mcp_tool_to_openai_tool(tool) for tool in tools.tools] + return [ + transform_mcp_tool_to_openai_tool(mcp_tool=tool) for tool in tools.tools + ] return tools.tools diff --git a/tests/mcp_tests/test_mcp_litellm_client.py b/tests/mcp_tests/test_mcp_litellm_client.py index 97b637af59..8efb728017 100644 --- a/tests/mcp_tests/test_mcp_litellm_client.py +++ b/tests/mcp_tests/test_mcp_litellm_client.py @@ -30,7 +30,7 @@ async def test_mcp_agent(): await session.initialize() # Get tools - tools = await load_mcp_tools(session) + tools = await load_mcp_tools(session=session, format="openai") print("MCP TOOLS: ", tools) # Create and run the agent From 7d606f0b75cbedf927503b240c6ddd9557252f7a Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 10:50:55 -0700 Subject: [PATCH 032/119] fix llm responses --- tests/mcp_tests/test_mcp_litellm_client.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/mcp_tests/test_mcp_litellm_client.py b/tests/mcp_tests/test_mcp_litellm_client.py index 8efb728017..a4ca90eb1f 100644 --- a/tests/mcp_tests/test_mcp_litellm_client.py +++ b/tests/mcp_tests/test_mcp_litellm_client.py @@ -44,5 +44,10 @@ async def test_mcp_agent(): print("LLM RESPONSE: ", json.dumps(llm_response, indent=4, default=str)) # Add assertions to verify the response - assert isinstance(llm_response, dict) - print(llm_response) + assert llm_response["choices"][0]["message"]["tool_calls"] is not None + assert ( + llm_response["choices"][0]["message"]["tool_calls"][0]["function"][ + "name" + ] + == "add" + ) From 47a18285f1bf6f02ebbbda77089c8a40b5cf8c55 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 10:51:34 -0700 Subject: [PATCH 033/119] test: fix test - handle llm api inconsistency --- tests/llm_translation/base_llm_unit_tests.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/llm_translation/base_llm_unit_tests.py b/tests/llm_translation/base_llm_unit_tests.py index f3614fdb4c..82a1ef40fb 100644 --- a/tests/llm_translation/base_llm_unit_tests.py +++ b/tests/llm_translation/base_llm_unit_tests.py @@ -1062,6 +1062,7 @@ class BaseAnthropicChatTest(ABC): **base_completion_call_args, **args, stream=False ) - assert json.loads(built_response.choices[0].message.content) == json.loads( - non_stream_response.choices[0].message.content + assert ( + json.loads(built_response.choices[0].message.content).keys() + == json.loads(non_stream_response.choices[0].message.content).keys() ), f"Got={json.loads(built_response.choices[0].message.content)}, Expected={json.loads(non_stream_response.choices[0].message.content)}" From bde703b90cb5582e0ad9d458aa8ee650648679fd Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 10:53:10 -0700 Subject: [PATCH 034/119] fix beta caps --- docs/my-website/docs/mcp.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/my-website/docs/mcp.md b/docs/my-website/docs/mcp.md index fbdeaa191f..3f7d6fbc93 100644 --- a/docs/my-website/docs/mcp.md +++ b/docs/my-website/docs/mcp.md @@ -1,7 +1,7 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# /mcp Model Context Protocol [Beta] +# /mcp Model Context Protocol [BETA] Use Model Context Protocol with LiteLLM. From f6981606f7975a50fba2ee97cc5f600ec67e8466 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 12:20:21 -0700 Subject: [PATCH 035/119] fix: fix linting error --- litellm/llms/anthropic/chat/transformation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/llms/anthropic/chat/transformation.py b/litellm/llms/anthropic/chat/transformation.py index aff70a6e62..1a77c453f4 100644 --- a/litellm/llms/anthropic/chat/transformation.py +++ b/litellm/llms/anthropic/chat/transformation.py @@ -387,7 +387,7 @@ class AnthropicConfig(BaseConfig): _input_schema["additionalProperties"] = True _input_schema["properties"] = {} else: - _input_schema.update(json_schema) + _input_schema.update(cast(AnthropicInputSchema, json_schema)) _tool = AnthropicMessagesTool( name=RESPONSE_FORMAT_TOOL_NAME, input_schema=_input_schema From 068102233c800ebc4f11aaef4b4a5450e0826ec2 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 14:19:05 -0700 Subject: [PATCH 036/119] docs(image_handling.md): architecture doc on image handling on the proxy --- docs/my-website/docs/proxy/image_handling.md | 21 +++++++++++++++++++ docs/my-website/img/image_handling.png | Bin 0 -> 68110 bytes docs/my-website/sidebars.js | 2 +- 3 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 docs/my-website/docs/proxy/image_handling.md create mode 100644 docs/my-website/img/image_handling.png diff --git a/docs/my-website/docs/proxy/image_handling.md b/docs/my-website/docs/proxy/image_handling.md new file mode 100644 index 0000000000..300ab0bc38 --- /dev/null +++ b/docs/my-website/docs/proxy/image_handling.md @@ -0,0 +1,21 @@ +import Image from '@theme/IdealImage'; +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Image URL Handling + + + +Some LLM API's don't support url's for images, but do support base-64 strings. + +For those, LiteLLM will: + +1. Detect a URL being passed +2. Check if the LLM API supports a URL +3. Else, will download the base64 +4. Send the provider a base64 string. + + +LiteLLM also caches this result, in-memory to reduce latency for subsequent calls. + +The limit for an in-memory cache is 1MB. \ No newline at end of file diff --git a/docs/my-website/img/image_handling.png b/docs/my-website/img/image_handling.png new file mode 100644 index 0000000000000000000000000000000000000000..bd56206911c54adbcfc43a61cbadda60bcad8136 GIT binary patch literal 68110 zcmdSBWn5HS`#%gLASsQMfJiFc9fEWzDlHvD3?MBzpdd<04>c+&CEX?6DcuNA@l z--G9z`<(ms@O$&TdGO<~*?X_JVqLYqYl1b@z9He&110T)0qBei)&k zh{U6y+;m8)dME}wcxkS!XrZc#au>M9L_tR-Lb(Q9p#uLBgP0Tsm?HrJ;P{iCtfJ-}bXJa~d zJ6nj8h`TubBIi&Nkxo+Nv6KGWO5S=>#~qIJoE~u<7XN#Gad3h&+;& z`=dGVmpHwZv$KN;C#Rd68;2V&hy8O)PHtggVNNa{P97e1;0<;s4~Vm|J3GYb&aY1X z>_^tz$@Dqc!5M51p+oj-Y+~=?EKX049O$o~U-NVZTl{Avh|?cr0R(a)pKx+>aB=>w znYlaoUz#DG{AzYd*RSEkke!L>JTiB(w{<~IOM;t^PwaAtf871g1b?;oPlG2Au(Jfu zA5H%2_J6n1{?GHjjq~@r|J_3UIoKRv<*%9jG05NV{q21ueIgoQcXL~9S+Jcs#Oab6 zK|ujA&fh-z@1`>Lw)W389gI!QCAj}+^0!<6{r2TRI{z6+n4jzKpxA*W(e=*1j(Pdu8jc24x?X5BWcNh; zwd#kI~L zfX*OU|KFy0IsbTCRO=@tIXVN77X|Vo5Yd*^ZSlbAeNC52$YetFk&9@mqT^4Yf29?J znEVM|RAbnui_)AFb+(&LW2W1l<*VHFgZ-3(G4c8e|H7invB_&uO2s~8$m3q0wyEAA z$#lb^Y}qN)_X;=uy0~8ERescRR-#uoy|-}eqxG*u7{#ImK9NbEcKuQ`d8*A3Sm=fO zu`ltEri(PFP+!Hq+^c%E@Y$-v!`0!{cx`@-LAy%G?pw-@SbM?Rin>|Se-)yJkr&7t z{qgde1D1enXsq;s4z4~){mld7-M3A^FgbP=^MN>Oq8lab2)FtZajzc!QLi~lo7}?H z4-ei7=vO$*PvDTTX}?upoQuPK0m?7<7wI9@u#{N4@cB|_{Z{2h*(9Xu=ZK!~&v4(< z#m@S(4g31Tp9Q{Wv(><(aNnOfhJ@WmE1!77DfziWq|_bBHINKjn(z)^O_%X!BBWzA zfCmK=`UO9EGyC==_^8FWZfZ3%)hG3q>zHwz9NZ#^Q8`wuj4(-uSzuk`O6dej-GT3B zlHPmjRZiG`WN|0Ca4o<3yzkS7oD>s?yTy22BK9q!P`?_%UF)pSpGL01YVg_2gnRCh zepK@mb<$n z*8}^h7uL^t)pBbP!{M`D(7(u(JX<|$qxC%v4i#!Oq^jn`aG*yaZW8+9fj=dSmmuUv zPG&RQd=O8&mRDx|<%3=Mv}-AEuh633&r!pR2Dt`{Y3=^^zqKQOjLwAx3PtIqEvP>~ zUUVBbkHgn@9(aIe4>54jZ;?`?hM+xv(-cqJG>D=u$qxaBrV2Agvqs)pTY@Gkb5xq0 z1WD|@N9dMV4OkTV$^72p{L*40B6LXY&k<$L>h&Tm$pymj!~IW-REl#<{;W@MwFL}P zP*h{^p_CHJhqm=+`!*LxeZD?@%Zc{qPXYw+2&1l`LZt?U8_$HQS?IM3-wmN~Prg9H z>)AxS!JV8*_S~CaO2oF_VVLd&+nuv=#6iT$o(P2ej7{?1AE(-=SPWz4d{*QBR^Y^; z=!y|oxCTJwQ_s89>$XGfYqnO^eUCY*u2-3SZIm0bY+)K)@J!O&_RNjzV?HE;#%SLT zB#aL>c6{N*nNQorsaq-~ae97Gf033~K67juT4%dSbWh~*zuFAoLB>P)z(G$cXWik# zS=qQn;_lO1zI)!pO<}08R*{yYqk|*NDYm+0m`zIUb}N-{g(JaZDjwE9mWB-zOf849A$YgfW-5dyzG&`KPX0s-T-9Cl3TmO0*lY}B z_f^syAscs_9!uDP1Z^3nH2^#7Xqp>^;bl_U61wV@1ui~5s5_{=MKzF)Pa13I#eXHa zr9|2aDlf+r7IUB4d%cd4onlyEas1LEq^!~xr+kfz+q8e)e@IQ#ZV|9so6W6(>q8bA zs-ypI$EBcP0`2>@FUb`bjj+D##b`U0KCx+)Gtr(h;%+DIBIKrLUq}VS`(i^*cC|}6 zmdyigR`tpio{O2QUAXYBO>CK4)X;0YMKDif<@?%l?33BuI$NUf$#>gg3%uP8J^obo zHQUL6ZQJx&RinOQhN$Qk59W||xoy+upt?T8u?`LV0(&a0)GAAwxYX*AXslw~TLhZd zVBe3~4rHxNIYtWp4Pxq9W< z?84q5O;b&FiK^hVbDlD1?OsyL+nbVn2IG!b+JX@@Naa#&m%yZLapp6Rr5Ilwm(794 zx_qHzP4JF-Vy|HJZY#kMN@4C0OhRZPqh+f1{yB!-#DK8D71Om)^oz$TjA|9;1be+s zm(^!nVB@wm4%X#mC-yuoG8!*3x0$^-Ke#xc8FjjHdhjw(v)b$Td}P+wPSM@hhRm2luu4YdewL>u4okUk}MtVuVc2{#VKDl@yjWwHxd ze(nCmo!HiU@u3KW!6?^*OY5_c@3(h03xxIXUopFJyywJqUxJK!X zwdim~rBe*4zT8!dQv*&IT`68hL~2RDJa|89qyn5Ua7c7d&Vl)?Zo(fOhwP@-R`r!k zl&Wm_G=`0uSmUZ{s<58Uc!Sumv%KHO8qINnbDsKY?tPGIBgf%~OrYDpxmTp`^=9{t z=+CQ1YtTnEVI#DEpPH!Pv7TobORH&eVtL!sef-!%TsQpa5S_sV0Fz(or z@I(`W%nJHh(}_+sPv?}Yn`BEhYY$f7;4E{;n|#iwc!fF4r=?vgB~n`vc2)KUnAT48 zLGrF7N9(CH;WkZJp#>#hk!&{cEu15KkVEpj}unMy-Vbgnsp7~H>dmC=zZn6j_B$L>20 z@q8>4m|bjYH91$Ri@(Y;qM{!Yz1CK4eg@l)hwwuNNlp5=+3T(fM}aA1aYVH0nuyW5 zVox>uw@h=@PS-3H{a^k}2CkW8E}hwcm&hgMbUn@VD#{zO(01ZgCfZcjv&g$y<$Q3L zUq)Z4&~P_kB@j8r*UToGL}VPORtH`sJ$Qo#T9zX*Yv^Wki8DE2sxxfyx3dXRnfrpn zNj+XR{TbO^!v<*5t~{A!k&q>{It`k*_j!;`O(cb5vEsLbUBe=X7v8K`)Gc#tM88?$ zb@2AGlT$3QSTaoa{rqk~sRKv)Dc0YyX@54FY$b!(ctLde zYdpS2fXhp9rnr*vrKv!|AygG$y5B0s3t|PEujIL_J-z8bbp@(OD)b#5up?6}poY-O zQk}CfH&wo{^}2fZbHnxO^>_qhFoldW^O;tvmb&W z!4he$r;8bU4#cIln8UJpRL701IgX}t8OJUTivPQUj4aTOvBqSH#h}sqg+@_g?-!7U zl)O%g&$uJ@Ep&gWE3PkLWEB&4&UvuX^jTs99zO(P=k=5*f^}_?$zK4?0e6azK;?Jx zokkB6-yaJ(x2a+T@eXu{K@%Xly~qTj^!X-@#Hn35ZBp-Oj`Ey^nkE>{7IQ>< zF{3!g>34M}BIsSzM9dNLEX6r$WN4V7;zubm+`UTHT)3}#f-}kJ5H(s+Ir-`9kgfoD z%c8W^Yk8mzZ_$Qd0!J%fAFK(+!~9JS&$R}c_7||2={df&aC8qYWq^KqGCLci3KH}M z+d_TpAP_IByP=NP1o6=RYOSFMEy-J$NJ(SP09<;AP6Ok%poxgz#GRRv z9>bin9X)i_TO{U(oDYyAv=>_&dT#lI{gtH>GZOGySt*a9^vD`e;v?gU?<`VG0+;H% zs<70}N@Nu$Y*}oj=}aj87?_k18fYU0zvencP35hcxCYhXqTqA8LByUTFu({TWT>qN z{Z2&&1oBd6_6Xu;1CELrFnk?lvTi6xL zo)|FiwBY(qZZ6Pz0tj-lx8EBkW;r&MSkMZhSu7B%ex#@X!~a@BP)97;hE}7VihREl zr1Y2N8*oF899=n^NXM)`hA!LodwSdmCPv2P31n6`YlMFeYg*P}JdbJUsmj?*!JI0| zadgUTeGaA-9H~fHy>LzWv&J2w1W)x- z41pWgQ{)vL!L1kS$4y43@_=0fB=(bp>tl7ws0(H9C$>9j>lGdYi~%pdo#`@t<3n=& zIojc*+6UhvNO1keY3PByry%P}xF%eCvK8JVR1kRb6lsSH;p=XjxssxRgcZS3YI}~w zPZytl4o@UJ_kI1SY?b2;WBK4;Dnyr8DV6z!Ve%tw-Vqz5cKt!wcY;Pr-rYAmmmVcU z-f!0Pv~G>8IpJq_$ZF+>Vz)GN1lvqOu^X*>E3mAk3P9C}x|1YWd;RGWr{}baT`VWo zlq{{_o?Fy`<4|5}C^V69_Qu12z?*K@`0lj2?jlV&0eZVEbA)O3*zFI!-!nRSq#j}O z$~?krL-O_*^YEZvV)Xgb-n8|UpW^0E^Xpc!0d>b^lA<{*GVjtA;OXj%Fh+xr@$-T2 z1IuNqk{c457^|0izzNQy4B)RlsYLo8u3%7&Lj<5=!CIEj!?W40F@%j!3f!JY8nCQ# zsdCFk*36_w_9%T2YqZYIuR0v;;`}#zuOZckNfw?v(vzK;A`+$A%mhuW7vR7zwf)fA zlNY+J#1Fxp!rg8S^9yZ+-TqL;iWCb7-J){19qn0Iuy|TgH~x(gzn72eSa$Ue#HJ+{o&ej(nrVOdiF@gATw1QjRY^M0^3uoj3 zv+FPE+PD}MKavh`U(iDlNME;gK?OT$clJk?06sF@`t50olA zGAiphDNsZTRtoBamen1>1WQkCB;k%FFC*Batk4-RDl=3<7eTkHVD&iKN6(Dz4MvJ& z7{?iBE@J$?XMV;wDT_q~MM=W6XCp7t6INjn)Uc9a-@aCxOCVKlCu|5c#wOvN3aqp% zPX}hfqTvfLHt{&sWs;XIv=WGk2HSf-^|Xiua?iM`7mIxaawm|$$mfKm>Lw>GDX0$7 zcanzNO)YahY}0sdNjVgDZrVafC&i+n>gV>xgvq0jK&q6zG8%M-LxQMGSB~4S=N|o4 z8->gISqNKlrEkZ^Rwz?eG%_Y9`J(Lv5qD2{K9o)ILgTy!z!URFPm|`*@H!1-Qj%f$LzU>@raP!6}uu zuuz<)tpGpAKBCAdj3w_|#6L%0I*O>(=X(x zNZ^Ovk2FQT;yW027W;#qmk-oI8$5!YJj0$7pKBafUKg38gB>y}5@^UsTCuu4i9n z`?xyr^~#6)qR#ErE>75HEjK=PZ9Yi2B!lGp6ZoMR>phnKHG#DfFWtqA^_oFHHIZj! z>)mSiaz6BiTCz?g)G~@83w`j%aLug@XLRNf-G+mgx`xlx)sTh*@F#gxyh4G!>llAV zJWxl>w8A*A8ojwd*RR?U>LrB&Vb@Z>dy^>(18VcVYpD@Qn<**XPeeo7nZnZyrW5ErspK^!3m1y=Tbs0CYw!5kC4 z7`2Ru#23>~Z5chUv)f$u*pLhGAx?Curn-~vNH2Nm>0!COMcZKkR36zr3qeg9<#5ub9E0CL6D?#j3l&6s+9<QP!=Do->fV2QUG(}Dde<`{EV5<`{D!#y!)Q8{KHH`E@ID-wU+!{l+>Uj>%Y+CwdpB2vP$qYl@e*L<}Id>@%Y+M=m`q9JKamf(x z%Fv^mEI7>xEjhW3z`yBe$#4Zl8>-5Z zMAx%7c0f$anSTA!RKJueT+KDHxR9kzyZ-)C@&56F`>;X6Xwp`ckBUhnemPG-Fe2DvJhJjMmVXhY$w&ZAERjrf789ociD0-;Vz!*y zh2UnC2mi`2n+Uje&B@;^&dTYqPjA!4TCE*bxWNh7yqwOVF6|MSY9{TH)EU_jO0wO_h@so<9%84vy5@b`4`gXc1Y9Vz=dO_8A_V4EPIk+G~v(w|omZeJncXFCqTHLJdVDAVrUfb-Ml6Orfj9bPhxL_K9kirk^vT!gl{N`~SUyJ_pKW zWHwXLA7j)?3!sn4n19Vm5w87qXjwQ2ZO!60jaZ4kkF@oWBzL;s_$qeRd(w9)9XJ`ND~_|E#lxNGzm)*Vq_60mK<=G4Qs`a!Bhl?Qnf71Z z0_*=1$Q+vZX{{>h4}HRYh~$OL6fE(V2mRZ-z_jA!pwB>pBh3k|_tv@pBb9&Z6;i2z z;IGx-1=f_xf}+Q7Q}RD2E@g*C#%O6$FMt*J@y~m|)7b_u9PLGmx|V5VzVlDT|J$Ga za&&o)Ll%*_C84FRIHG@Q;(xRA7kA-BQp#_wMk=z`m;JBk6W(Bjjyn}HGMaY;E%0jN3Uc5N8rhv+3s(a?uLCC*rhKA97U|^N+ zueLw`p2|xB8ttRu0zL8n|FI`PnQ@r*w8h!3;w*dwsmHphpP4g%r#*k(Q~3m}BgI-C znz$>ps5=RGDWUpX%4IF^&!&IVvP*aN|6xxJAHFq^F)XgK3{CbZ$2ncO53K;lx10pB4_LsB#U#kKv{~16p_Li3huhjo`;H8j|5xgt)zZQ$UVzs9W zSxfxgNSF7Zkyva$J};q%dU(Cb2Jm9f|Bq6#6{*!t%MMc>z#X>iT)AbzC4hQO(Db8Q z+f{@}*R76Mbk6VE4oa^$!lQ3l3gxHE1j~TvSh%R zEZ*dD;61mQyDXXgLt&)w(A0#NqSwbv!+YozhBl*3e9#;*4{ZVH*#?!CC3vhfG?cPma;iTF4k6xK*oESI(WRC)-4OjI9f#!W_=HV? zfqu==8MEY(Vi`~;TRvO;e4{ie9{{wi1DhM&al-R2iD6x>?JYMAr+(UNvaJE-saDR< z4yQrCAr2@H62MtY49|DsJn~>!Izy}7m@{m8Wo(l`k#h&Go@t((*}#Rq>9ry%C@9|( z?xR_i93*)?_Bhku6jl2o7C(U~& zD%EzJmtTKSwkLzVY>YnnvB-Iky$`SG!s|Oc)U*3#<>vKAJ@(6N@_3X$dA~AiY4>Y^ z55MSQ2ro=JQTSdiY^#OShr1epVNUkARsrc&yc8!C*d{k!G5sreX=ReikUJjtKA7(L zMn3t~;ZFn6ujYrMM%fH5*B42qQ$usesx*CM^cduO)MdB>qp?ke_W%&Zp@aOa4#L&Q za4)lveg0Emw1Fa;v*pw8@{-mGu3w*Mm3n19?G+mGSV@jE0KhQpK^l8$9f15N^UMWOjDVO?`JZTsJ7ljvU93W^xR)3yTDWYokGZDZ+&6-OdHsbGm&1QU*3m#l^EvVV)6wWza z@jbWP&yp9vo2rKLu>zNWsx>C7@qS*a$z7Xew!Dq|L(cLv9%g0k+Y7b;^r!;@m?vAH zBuF7m2YQHvy(T7l0n0LVAhS;o08`Z{mH@l_-_92MWv7t_!!`gWNAgg34o%m9|FT5_6%cy@i7O4%is?syHWsFpoY+bVN*|**>r#0YWMa*vl zn)NQwF@(X+tZfr_IFbNt%;^XF$x(wNbT}ZyJ%wEWWDf(a%&@ z0HFTH7Xj;DgK?wY_&e{4aG!@-cP|n^yl&3vsk5sOK-<=8zCva2#rZtvaSY?n0501& z>GMv$C`A@nEK+#z_6RVsF)C@cia$gPQlyu$W(gCHOty}U= z1cl4)(ieF2*eLPJUG5WeQKRDA$89UDAcaZMbAL;I1rwSk>N@fnQtddJMmq=ala_`@ zomx9B-%|tWr0RoVw<1#xo0(g!3}vi`ND?2I$_z_Xt`~Lf7&YryCVw3*lP??u@V$wn z{;ZSx0P>mJa;FbimdhoF7qdwkJ?_LhxGaE7vm_GV-#R) zSX%YACIcalDIs78$z0t40bH8F08Ck@J5NAD>&!gDA0FpOCrR%aXhDx|!o5%UteOb> zvAEJ-fe(@&UR<`J^HMjL}#z!X9#In0C4jh97{xT`LXs_!3`As$-q$BPsmyhTE`(u2#xy}sS? z@RdUQh(rg9Y3#l=Jf$zt*&D4t=NA~_nKF6H&SOm-$5=+Y3h0Ax2mqpmvXTJ;YPs1b z^9M8%I*Hz%`&wWz`<2Wa3t5Pv$!0xw>TP~UtH1`8@Jh8Onf(aN4Oe@h|0i0k2AlT2 zCwg)l*h;rKbRQRJJrO}H$xEcOP#}?X=@@FZr0{0z3Fr<0aD6Afo6y%S0eqODK@?7D zpB0)sD6<_e8i_d@dFU5X4M2j2#!;_RN0jHxIvQK{rup@?tX8ZKzuwVRZ7YSn{K&xK z&`M$XE$F@>3lMkM%?8Eni|>Y5ihV?EG`~T598q=pe0r<-n;J94mqFBL}oh28~Y}FVH`nD@U^0!FdJ~_UD6d}f(xT6LC@vcvqyKmk z;&@yTWce%O2FloGUlJfUU#m${5G7>~%1L%TV5crKu~0auUJalNcZ8~QlyUe+jQO!Pi*-PPdrpAoAD3?Om#QK!&UCZNO213 zM{#`C3SuyMpMkYsMldB4uuo_&S zy*QgqR85%%+;{KH>9U}pTf%;66o#Yr+@Lb29mmZ(HG)VV+e>rQF2CH!X-&GD;xt-4 z!%vni_HCRxctR2|h4DT>6hGvPCmz5`atH)Uk+eUNHu;r2;`TkDZh|ug-P+2(LBWy+ z;2kV8lb&~O3FtSmM?$HBiQ>*OoJH|{51US=RqAGt0O#1BhCe@Pne*Yq2&l)#3VCH5KQPS_2cphZ&-$$9BM)-)qBERa#r^xFN7PfaI*z}lX`x7=RCo8EXv0N8z2-osz{_xcm$qNss%Cm!6 z$w{X^@!dsxHEIV2YWJI*7@-)z7COMHcr@F)b}Cz&uCC}v?_48`o_;rL(;6B11u`~- z2i)aX3M{SH8NZP+2J1w}l#)%HKGi@%T03v%a>aDNREQR%W*DfnfqfTbK6~FI<};TW ztSkIc1w0qHEcIO>ZZLB8sMl>LW60|M2G#7L-2PDNxpQ<7x69;t{l!WBGIwUeou1&( zNUge^nDP3v$$Ea~ThH5Xd!!2XV1+r8!aX_?AyusP7rj6rDc0X{5p3_hrZ=8?zL)9c zy_|WWo|W#}geT}cT?XJe0l-m>x<-3ohE9ij{?3ft2Bme_qt`FPOKocsF$fh-(t?+3 zkH=?@R#Ini4<(nm!@(HL$Kzyc{V#2G{pj-0dqjg4uSx<{nvxdMfny7jCE3oF)fqis#1PmG2&G`0rFsCLJ5F@(0jK_@yEL$C4 z&H`smX2j%xoYSZZJ=c+UdbICjDi5m2X!>KTv>|m#@q&G-*;{uBKxQ0+djy=>k6Q#- zH6Aq3?mUS;){~puhk!G;1BM+A0R@XYo4YRA?k3Bsti*ETU8hftrqRN~@E4EkbW%E6 z%-btyo_rX=Y=kMG#fp@oUjU&91m5d!D(OO=fm+}SrRv>%eJ89Ph(%JDpig0))#t}+ z#V0_wE_CF_!u`=WcJuN~_@dwU>ZCKMRTpxB*p0#)Th@qV1sQxJ%31Sz1op1E24Jfc zqBO#?TJ#t-nzAQICP)mouq5|r&$ohl?u58~Sbj+($5Hw1-SXg6K|q=$pzq_=Ge7f} znXDw%Df-;yc17<5(Y&zQ2cpp1)!#q7Eph`Qf*#oYq$Xr;<~9D1%xnJa!`SRi)UOcl zwTGJV883z?^QqVA(35wf*iU8ZMI%2bjvsCU`BsKjLqpHrv+7`x=QJkK73tsZa)P26 z;AZz2?ZM%D!RlTnWnZ7_E!l;s4@3KQ8#VX^&Y(3we%mVy@=++zF8q)_B+Ro7eq#b; z*4@a`!1|ke`R(l}NO*DdK*Ai|8CJJ23bV>fl#P=VAe1|@%uIIuG_`yxOI>rF`zflA zhU{{KHh!9%C}KY#S1xm6G&`*)6pyGv(dPYN#t}T5JlfbZn#UVRtW)h_xIudf-OVKi zJ6`Wm&ySj!B_c=DZqv0Ck{o=s8!8@5x=TU9SAHKut~%txSp>D*6+Hjtl}E9`Lz#Td zh|Fn$A0lT!aNQLqd43pj?tczs8kyXyvLOQa<)vk9$(=FF6*8=Hy$cMY{ zICzyogrB*!d6l)ii~3?D_$>B(p9l@il#nTd*x&Ta(?nvtNrc5Xnu-VOd?1Gutr zPe9$Z(}dF!Cgat5mda~4?CfTlVfTIteJLchIek2ihA2h=g@fJu_+rdMvQgIGR&ej7 zm$qMLKjq6ceJ%<_>lHxk*NA8N0>`;X*>eE_bf|Re@OE3!QS?hV$%C(I>dJybOT-@; z6q4i$Wk%g^(`m>=1mXtOUz{O4mc`f7?Alue;$8R0Emv+exAFxIaYn>)Aft52p$q$r z3+rA41z^+l5$!_8VF}Dm-WUmAcz@h8JW{H|I?fz_v6go-L)}-TKypaV5Q(K>OBT8j z3GVU4LRYrJwtgI(!FN7;apD^zs18<9%6ZiFZUVPNcsAe3pf_~+D@Xn5P7VBeX_VFT zAb1NOgHqQZH8m~vxvb~pE$E zTr#m+rzTCidGQGhARc>Wr@@d{CvTa2?rfhQSB=(keC$Krl-U#y5O}|+0c8{2A2Ypo z^FuHUdtYxcnU@v>c6fPK3mgRNPNej1xJDCE?n;0M|+M)nBYde z@SY6;!7=jppPO)=ZTSycvZy9+*L+J3SYZ(O7XIKpC$_k(##<+KVuU0z5){csID(!J z4~IF=c&wOjHo(qS%e*EQ$*HWNB}uKg!Nm z%C5c}ysVB4M?!jiQ!h?e`t&!beAa!D6^i$FV_(84O53gIWj-n`K-E=KjO&sfj~8@> zJO#F*w%*x30;;I!^hTziGu)~_*?0kowjD=R7e}>Obl^I3Ej ze1bP4mV+r2^A(gKhi5lGH2hS5Wr6#-+VG6hH9^C}FE@*T<3=Up_1J69I8zbEvfxSA zVE7G$F4`@f*s#z^^o~J0l@H!8ypmfxgmQ>VJdi=GrCKEFgB}?57@jRz)D$4D@uiJk+FXl1s)RrxE8momrPZDpv)8Khv1qPpKl7Kn& z=qobutrso#o)Ja?_@suHIOG)xTR42#LGrDwKytLO+{7)ersPA^Qr{-N89BgqExG$a z;!id=K&VUUWY10}Ml*80`XoKVZwX1Va-~wHdFVwf%Af8QEl*qfDcqVr!K`V0*rDpH*bDTc8hacbl^0kea~6uD=bI_KePYFx z3%#Of_q(2j^o$Sfjm2 z*@@0{&MRyZ#m(nCk)C_od_qUTzc((waQ5<`uEdanN5h>n_Z`a|aj=CczVNq*!wKpl z@frRO^7$iqtU(ACEy%@;i*&X{J5G0gc-RB*s|Hos(i_Txy7BF`(o7oVp0rN>;b>hf z=qebvUe(r5|NJh1&D<{0+lZi+<`dAt1dxRO^djrpy@+|&NRssiw+2}}(b0WgS&{s# zZny5s1u9R>Aa21JMq8(SaFM_K=~;$i0`dTFkA)coIF5Zv`Oq{>Hjq}e^a&PMi^Q$F z0CWn?ymLkdccgoi)Io9s&AoMtLweW1oHgaT`L4-?B6D(dz2sIM88k6|v@605TNir_ zs21xz%{lIk$d^^!z?|AjX>+}Kd#PQgr@qf*s+E{)4ICriZe3wmYg30rF{o zr>IVUCq9GzSanaFID3r3P6c6p2t?17l|qt_I?!IPu=?`e*)6Z^{}gHb)q#N*2BIP{ zdM|4&hQK0!FqaWG0ixE@k(1Nu(6*M*^xyyfF&tv?V#upCQl4h~-BCn}s6b0dl(WR5 zZQO-T5Ur2hgQ9a=u))ijvKksK-t<>3Q@ILos&0k_^J9E z>T46?BJ&mb$i>iETz*?XU_S1SA5mwniX+`B6A-FvuP;)0b{~z$L`@z$ja{S zQl(^8dW$EY#Oj3}&na;J58wPcjr&}Zov1WW8{HD@PisiS)|m5OVJ|fpgv)ZaDWl%N z&fXG)W$topwJqAmJ_{7|)4>rO=r;Zm;I0mw`nZ0@Mm++oN znuCh`#5B$YzJ~-*OU_qng9k9_@bH5-YQblAHQTL6ycfp!p4xR6clkp^?S?#n;Nk)V zp0hYV*rdsPzY7m_r+AKEbBH`fyyM?S)@U( zV=<6G_Y{VPFohhV>h0xK8yNH=G7=%3lL?GYcU8?aG(`QDH$Na=Is245@+jXa{y16G zs|8mBnbB#&T!8v)db(~`?^h$$1f`21tC71zXO@q%j|jRlWvu-i#1(6ZBaS2#Ogzox z`%osR3t;VYYP?$RvkT&Hn6!kS z^(GK~Cj6F3p{JC(Su&*Nf$>83B-1w4%yo3?Yt#}2l}8?1|PSyi%e4kkgWMF?KZDp2YHr2 zryChkn6I!{ggWiNX1R112d`kGU!x%UzH%g`N|eHVnzvFLE_>cwUiUMv27_fcp$(x2 zdzkL0RCOz5jpY054Gs7Lvb20YBmF)=ny8V+8ciM#lEN;wl2bEOGRe83v_OsZ{0Sb* z!?&q0g>48S!pA1B^yzti>j^djWB3IP!vpvaU^lOWW+x)(8^3*a@LOR-UGY@;9$kKZ zPD2{78=Fvaj|aW22|vdj)3M^zYB9(xddkKPo3BM$Zxfl^!`^Pmu<=m9h;0P!uj<0s zS(OKl49CSX(tFIelOLt;#<;)#Kyf3`%zA*KgG#el(PK69dE^xfxnkAQFdc(Jh;N2`%0JrSA0IQwZuY1tvo zQhZH^dtx_Y5El7jHAG;9^+xtmB9l+M=9h?fq2Og7HktW1mxULt8GB5DkWaEE#|N+# zP~Q>XN0G2>A(a#2ZwcMScVMsSHT^AkvyEl)rht>OqP{-6HMboi1%mE#y`tpW!@qA# z2zqK(67@YOe6ZN7^O5e@OStSYM21QF7^09Nak8MjdctCD(c;>n0cYYtja5syJFH@? z!}I;f*N}gJft+8mbHXh%n8O;|v75nqSVJU*l2xDB-!1xl>Dle53|dc~jIB9p;p_f< zgeAtQ>p^9ktgrNPBb&u$ZVg&2Vs54Zhi}yPT$;VRYL%9C&H6Q*Ui(cTt6;8Q`P2Db zJ+YWfRG`GxEDt^3xXEs27Qyk_b_V0>N{;lPA&ep)moI37aq|r?jz>K<@+t}v!difu zQ{DPYVzoJ<E8C)85hg4Im~5%9vRkQd0HS_3p#u;5hd%-ufY)LXN(#4@svO?Sn0h zS*5=0z8^YyK&_s6`xPgq#&$^dPCnV>0(JIkx6?b9I}S}=K5h7qWW@IyBitmd(Sfob zdxg0WRe=N(h0I?l=Q}H#aOOypvAmn0yBG;II1(Lmels0sYOF4zql<`mlOxP8hTIbeW0rGeuQBSjOYH%$(+ zk|(WTl{h$h#>h*TD~!)WG}vvI@qnI}fkzfI1rY8`y;a~nb}P-?7OCJzDch?&g?2mx zchaS)u<0YDrFo4A^=}U`z&?NIVB>Kb94xVY8{(CZuj)(4c6XBbbf}J~=VZj*|Bz_fQFEz=Kkt6JQ^ak2i?uGui`|pUM%hHXLZT^Oe9OKI^Q* zvL>Qa>IGg)_jVDhk^>>sT_nW(_6r7Y>EtP==4<0aV0+$l;&NpVh)qs5$_DI zA;7oaJBt})*oxIY)4zV+7i~U4=)8C)s=BxOnw@{!W2M>8DT9d!(HR-9w#5w`aoW7& zoMpNJnTVP*74Yv={z1}K1ArB#%J9*W0-0|ahGLCU9v7}+P$?X-iO=4HM< zgV8A#+}b@Uw5*jTMwQAJq(C+M@{{tZPZ|I%J^PhR&i!)y{C#b0kR*|rBf!s^j0L^iOtR+dNJQO6dT=~IiZbC5pcXd zgJZ_vIc$DK`iK3{FV}}V>EVsVd_*UUFap~TMwqJAF{B|1v*X%==NSERlBp&2@nZr_ zC9woh0{>?lL@W2D?qWYBkcA6Z`K%9l_dizFp+6)zpx*V^C$6DOhmu>TwQv(Kq)RCl zHfU}q$JuR3cmrR?!#%+bpdUU6r=AqDaVOhE)Rkwb<`PmbUO=`h+*jDE<4kCWX_y@3 z^KKFGhfbhFkMN`}K*LX04ERBU!?=RKa5MxUJ}L`j)%6Fx(Od?H*7KC!J3Q~kZzbF^ zm&~ueg>9E5_hHz`Hh_uU3P#UPLD99nn1G)!VHVLQ0(|vQbS{IFe+Jb=I{c{ow9}99lV#dfQ?mCC4Sda#*WC7+j zJ)%RO&eY#g>E|GWA}q4`;*S6GsUfooS-D$`;Kd6)mA7Z2kBwr{F*_e;XA5-6?K*A( z@CN+E>Uo_R`>Jp6lwmn?s@Xv4P(BMB8QaTzC*|M`Uu2}ph62csB;&P;N$+hQe~-G* zyQI8j*^bcysjZ?|{F3gy{a(XBc{N2aJBCIb=8>YN-o~mx_zHyirhmkfd*DDB4SUdI zk5%SEh>$wi!yIwlEKS}x*mHXAS*A(@v+E6 z8Mk<1+F(bM5SKm&O7I2wJ#%RYhN4PHm%6+92n8^!ne|-E*+uAXuuErPw#@K{ zyBuv7=GZO|h?~Ghr9+~=j-r1DaT&qT$kc`!U*s;5!p$tq7@Qc)CHp^7-%ekp-@YbJ zBq`VBF92*pNqQ@MmCk~y=diGjbIYhPQk^SC{@?g z=?@&t+hdgSJOnQ@B=|%YK8g+QK9iFVwdkAx#%%#(0#%u8h6z2l+A1JyN#7tj+ zdG3zn@R(g}Q=5B;TvH=f12SE-Gzqp!QtD$s~RLPAlBg<01>i;lk(`7{YbhVY9+rS!J&zpPR<{wKj z8~kA+xQOAyj|R1#J9^F>ld@dZp^>jzX?YHk!)KtlgNm5qXelYF0$d(BhdXyTnH?H7 zw%BESe?uobOpCbi9}^8G8uQOE5(|}79*!DrWhlCRTC=H;(t6<f~fM7ETnTwW@`VD<%1EVqO$b znSVa5LA6m=6r_g1O~L*MRnXGi#>qd7ywx~+@~&7GdDae*aIv)b$Fw~NAWv7o*+ zm1DjOK*S(;Q?~ zVX6~gSh`3xB27OjHbMGH(49c<+Wv=Mzu`4kr^ORhY+JG$c9+4KV4Z0KotHSAw%01^ zr*pe2kI7Z-0cBJ(K0hw3&#wBZneXfQQv3XAIx;UjP{338$lDgbUGebF+l(z8>EGh@ zC^wp=Lqe*hXGgK8jv`!SKOYje(4O@p+mI%6w?B9-Vy;mE1)%M|@$d2=Q7ymh!Nn0w_I>X;mAJo>3T3D) zwumsLSDm0ofBKnJ4}P&9(&jEq;dd1T8srh*l0Gd#k%1*vI~gLML4l|4n|q-cl<2je zvld^WS#@2;OeAs0J@hxeu9vCuH`4q zhmq{jtY;FE1z%HgMw`@B?fx7ZcIVIID`2}d#lg)}0tF?>vKfb+8Oq9P{z$Wu#}%^% zB5MRZM>1BOgVNeZ>~#iiDzbGZk}h4MN+4L=QKZ+bjxy^Z@xR87nwaSz>-BW{M(taf zPFdGwB2l=Pb$kJ?(-?n!5%=YuPa8xlE$J$-2Kxzr+9_@kgX^(Q)W1HvDqahDDCF&6 zbj}G>ui`YG_7_#`;hEBYF8tsoi}^t6yZACCK}xgUK%dM7j%KW;;^vvHFv>g@?S9py3nw zA`=S7UL~xWyO(FaeM3gJe(k);p4w|?yx0Oc+@aScs4-i69T{i}8n>dIL%T%f6dn@B zC%sAz;Sul5a=s$Bn_%B1+u906S+--`+#I&!j1mcscwHbcYEHIYRiv$e#`h)#IhH^7+abmZZj0~Lp1Swl02RM~?IV;yJgzS0PF zwWJvMkM`3lZl^Za+aH=cx^pwrl$U$(HY1F^38w2E6C(C1U%c3l9+?lh#$d=oAdsS% zxO%Ttwwfh$;yPs&rX8XC$9%avx|_d?N+Ei)=qv zQMuTos%({|pOC9rpYY=~EB+tJl(4 ze~1xBejilzxtSv-IgXp_$W9bDz#^=WMkK+wJ*I>JMmcBqoq^{U3 z;7$C}6?5efb_n?wX>BLMv2NSaojZfnQg; zp<#prP2A5uC{kRnAJ+3CSviI)th&z~i;jFX!ehE?s?O!xmG;{lpD(P~3GP)Y?_SC( zuQaWS_>r})64|Kjpcx+t1%PN8!7f&O{(O%D*1X?h_^Wysf&4R%>@H*>s)1v~$FZ1aN1ok2gWZglhxg=@{fs-#7I z<{NC&=sAAH_{#U#QIK!ZWIVUa&Jy*G6Xh5`=eT7J@gcWl+0q zrbhzo{GBnf;P^}B%fjMbPh)==)pCrmUI@8eL#4P7wU6~wR&{VqLt*+i=F;cTGJ@_+ z^Ym^@ow<)(C0EZ37iWpVr15-f3m2l*2_9TDbO*t+?H1Fi?t9CnUfwD_RSXBZ`rl-r z@OZ@1H*BmxXQ&DUGFt7F-BW=DUzwv^_IWwgZYrG2RFs%rqb3~dt|w%^*pYfSwrMb( zrbsxp;ki*a*>LB9P_2)QA&kUn4DM`2?(r*aH!X(O10&2Vft0b82T%2bl1k$iYqL!T z59T|17ezw~6^6oWapNFHj>lau0(CZTJ8XOKk6rH(zI+p_I2rYci&bVk9ueWJv)=XQ ztDM4LP*S-b#nO$dXWtIfaoo{)F429{kkDr74nJ2cnxSwE@$fv?&}}Aa;(1Q0ZtoTu z;o+fTS!FC%9BxPSu?~r*5!a>bVVb^Xv=v6Rl5#ifJb%?@Mbq~dvR6Y{HbCr=YY~EE z-HRQ2^yhdD1Oz?5Uw>Nt&IcR@II~EHMHv>VexXX>^|2Jtj#8M<90*mKCK6}9%kegF z=^WQrSrjil0&d~AG~R-!RGGE%x|G@N8QpxgZd_OW&eopYAZSlz*hec+HW$tigT-=x z-$UO?-QTk3$Gk|Bs#vsQnHG6P@j0`0d2#m}yQxl8BC?=a}) za$Hm!YY`&$|4m@VY0EBj{c(Jj*{hw~HUVDOHif{2zH!v<)vzc2cDpSm!eN`|9KJd9 z{SXU(()#mGbA0-2K$fOz9uz)!z)Nm!RlkF3P}Z=m_mc13(`^CK%Rk=rYB1t6U+-E_ zk4SH)W0Nd;9zW73=Qt*DDq$#HG--e60FR`?@8)mVp3Ss0PMSL9ZH921cuOPHRnt5 za|Jh#y9Asj*1DX{P&Nf&tAK2}EG9oyx-2J0&KHH`_g7)J%}i=qBV0&ihw_vWiu1)W zVvAX3pDJ&q^iUId8cKz{mzM1;d@`)gV`UQGg3Yo1Wrh-)j*8QFR*MnFSAR`_N!m=8 zgTwSElQ|E2JPOVxaT@P-$aoF+)X21pgPc>xl&8i@#O~8O8K_qq?@l^VNLI@mf%<`q zPHjHlpSO^4By>&5chSfh3>_9^*~TLOUS_`NSM1z4QLyY7wH+Vn@9-sm%H06ZU%>@N zB8YIxwb6PlBS`S2XDX&<^_dqow`{jY>a2$|4c@N4{IugW6U1^`m)U!J(j_yl@!m&r zK357Zo9o(Cp}qo_?#5jys=I2w9|?Q-lhLl82{O?09?YynvoaXMD3C#H$xXi_Uh1p* zwyXVNKefAgFS@-Tb?N*pztQ*M>U-v<%N`)b=z!}{6VpywGX9k%!*6#=HO4DGthxMl z7&2*@-W#kOZQwYqQ=;!f0f{a@K06JEdroHwhJZ3?A7hp(&7+Na(%l+JR3Tw-igtC7zMYYQsnC;=MtcRY(q-%kXOljBF|}@qCX4WZE>PQ^Zz12g&T-+<=t=!5$?gG zyFyhrm5}#2fdBRzR`tQQ<+>DvAYy4@7=}g7oWC`!g?9_{lIW9~CS6t*> z1MPUf5ZbB5G)I@8A4L0u@8x~3smsDEp}~_e?e{h$XwdiFvApOn4dRu8Z~*t5v>aEC zwLmdqet3p4Dm`<&d`|e=yej@gopmy~_fnihmG<6ps;pf5p=eg723DKj9NRgmn!I^P zaJjuGuOxr}8ZsDkxpzm7KGCO!F?1_$co0u7A*Yv?Hfk@j!x1|Gt!hNf|`fFv*V%NQM zPQA-44?EJ|L^q#KtEYw8hnV01ILIPdkuvy~!EvRLw$!pjTa_w{ zl@ZV4UBM#5rdD&?zt!2qW!6(Q!*zG2`8}2*v?DesrrgfK@uTrex=4s)<-E2tmhZ;s zkVCq>Cxu~Ccc`HMH8#{B)1Lt-n5~hWaroVhTj@{EiUrSv`94``2Rm%J#YY+ONnNFw z{^)5&*;Qvr&mt*EG@$)3IeKdQy9;F*>VLUlf zng=9j;UO|k9glFiwM|EY=09;$s!4f0SLS-~nV-v5TDlk2X9kg>W=B_|N%9li&ROL# zi;=>f)@v9-X$`C2mCr`kFYf$H`g6J}2$8hcDUIPX-`SVpTpWtOS&IeZw0}Z83yPFC zZ`sRZGtHHdtY#HpyN8m1pe^S`Qu8|{&oWZ@L{#413JQOGbf@QlSvdj-=i zw$h0H@AEF21F~p)<{r2GY*cyZope5C`;o^g0`X40+`j@OrL@h6rzJvB^G0qvvRwNS zJiouT^UxT@Zx?3Ze}(S4_lkr0=Q0;o6Mwr&<(owN(+r{GL1}gIs?BNNJe~mmAiJLz zr$N%{x-iuad-?VhVl`H`l9aQ#+YgiMq+;Ar96xe9+oP*j_C8^Ka|J`1Dj&qwP80p$ zl5_xpfa~7|b`o5F#)O6*q_Miw3(RL1t)w@k$a0zYs&tPR1iM=vB&Un##mrFh?`v2- z>DfRP8vz%TQ};n0Yj7bHggJmMO~P*f!K!3{MqsI8b;ZLrv(^EW)qjU-JHzYeI%6B` zs#O9q_u6L>84F%z?G=4ZhH-}lg8n8EasVZPfs(|o4wm#f&`Kc-hQg{fK^*sv@ReAS zp@+djE11N|C1)F7tj6i5{(jMwP!%u&id&n2u=D0syPF{Ekz+?#ZubpQZ)4}=z?GNj z-CH3uCeOVlCh7xMmky96ERZ{VBy(QK>5XUdEIhe7BRK}r=XC9sLDEn%pYRK83jqxA zf@&*_ArB})_X>!!nyQU+o{DK*z8%!@J6H^H`{D;^414;tuHa%SoS0wV z^9Q3YU+;I@=`co{G5C37WtXn7l>_Tu9oNN1A(ddLrq>_e|Ie=(ad5UuW}3_420t(S z3*Pz?IyZ1$eZXVYOJ$o%E-J$5_HQ*8M8Bc(b4QdU*%pFVUTEn7vefU$2I_4CIFXBYpqm&u&JbjJsVI1t0%+ULTGqGX{hR z$7-u6OaII8ptp`gy9I*(SN^^^4Y(sSCg|xttVi_U*#)1y3KF?0``Re~M~!=UEQs*P z8aTIZ|Bun1{!`BdstvL9ZuQHlJ>)+-O)>`8{pL@^gs%S&4n*rfPU8Qu>HpaD|D!aa zfMo8s!rTJf|8w2pMY8~VK)|fLzA#Ssb7YwELQ0L)9XYPyhaqOw%{`bZl22E{5NQn89Ke$nIOToqyOpAfm!tZI3x9+jDYW4P}_O$(bvNX#Ujv%K?R-Tp4LV8 zKLq?Mi28i?-00h{|Rc zzH9w+QfQQtqMfTisENOT6lNwV?EH0mm1h6w&fbsX1kk(i8yEVyOc+54#nYuoC~@;I zkA`-Z>mN54Eq!LxYzliw8|sn!>P@J;bX&_UI%blXzdt->*|Kqxs^JFoGX};8wWG&t z6e{;olD4vEja(hHdZ7#1H`B<10uKrDRqG3MVVICyWxn%jz4vuz#pUtQhdmle3qqeq zJ946iYZTx5jsBA<(fnuRvy{G3Bi@{4nV(M2E?&DEAAbHV!(}#uUL9N zu^cn@X7Tky+WB>SI>vi!~8m;22n?5O4$Jc+X2sHeBsZTXet&^nB$jKvUyC`D=4^#FH(GK2z zLq)TG!v_jf9y6FNU$o%<*BWB`vxg5j>Gf7AIGqO?Ji3hbF{8J6Hkv06Yb~o%tK1(- zU)bijN6Ve!!)?=jHk&%yrRMSBf`ccVYk4{wV>$7FuFA{hCGEj0XD`rP&E1Q5i2T^5 zuwO~EI$naIg^t7)-xT)JR#Bjl`eV~w+8R>O*#5z8qxy5=?vaFiX3?W*>1VB#xl+^e zGBm#&YF{rMH6Vs`Bal}$$}^i*GxGiY3bF^*y30t5IRLwJVD1sWbit5H%rHeOpA=RT-f*Mf$ z%-NHk5sOWeVX+-ZmMfx*uL>)w@aFBq7ewKO3ejhHXXlPPP!We0e)Qp)*RH!HRGT3| zqsjj0Y0J$~6WXm5XH$p+(V%GAUN7NKx7!%Q_8YdjM{IwH%Z6)gxhDS2Rz2%3Cy%e( z0#sNM9)qJ+<>uVHji1QL%(L5Y34lzZn8ajS$B$-J-e?3C;ZG@Mc|7@ut*+sZ-;oO&50OaNW&Eur8a1G= zNNvzm!S9Flq0FT#BxT27a|)*~Q1s5&1a4Et#nvLOY3nIVf?f$#lC(R3$gq#)n!hiH zBW`HjM+gwXQV@`LJMUX0!x}oBxOIR;FoDREpF4Mto zE-Rx=J#I7ZCC}g&$9O&tCBUcO?IZDCvUhqn&*nXQM$W(xKz?8w`t-RsFTX$volYj< zuaR|8vPboq0{#KKw|_m){IGnV%^c70_U(!k)XwLHoK8t}*j(4{V@obiR2lwWWy~rm zPcP4KO*bhIQWk3O`07x#G`;vueQs!J7soVMw7TPSloHx53A#0!-5kli-s}A$2?B!m z(Xq%y_0GVIFnOzrKQEb_hDM!@2w}-Clwb# z2e}bKPv2_S<3%6#t(CNpT2OR5Fr!haS?B!g@9^n_4FgM9T?rq|%P|Oo0inOpN+Miq2zg_q zi%z)xQahvu3$~nXxH{G9Ui~y2(`Jv@6kW>?hg>x277V-h)95%ml|~*Z&+Xx@O}VE# z2d#zqpEcQC2iuB7mSdmY)Eu?Ra(cNqF#m*?q-cL(zKL10#La!<-8*?VwpGl*2_L7A z7if!n#p^FXbEOYk4vwFwi{2vBk+(Y+@Z0x!>}L^UclOlxR=LK2BWf>12db~S**UPM zv}s>~GlkP12$U|2EDIVL7*M#^Opl5i-73Kb4OPIfdp_ZKW=tpgZ8ApuZdlA%jdEwa zpWW~IWU+i8kH`G!gJLVM!(dp4YJ^F!*zqn>5*`9X{gfU!?G+F}pmNye@V zA9NCr$~Cc8p}gwL2eo_7z+${QXu^JJJb)Lr{~~wr_aruwf6Bj2UCmnkjUQqllNR|U z!vOL3CPU{d3t6Ku7`Q>bPaDpgy=>AhGaN4>A3|eIQb=0xG~B1mP|g)UOI5X?cqz{~jqaebh7_okf5Vv!##gPmHs>{B1NJsA1 ziSyL7|Q`e9N+d;|rF# zLv|tuE}%parmlwTfZvZXy5fx(Z}sJWz17irHY$0GMB%W>aG{W8wsdVWKb>(!x-ANs z55kws<-va{z+_+>RD04+Mr89!iiUKDN8ZJRp$?K`wO6*v@{~v755H(DT$3~Uow!Qz~stn{rVi@B+w1yW$M1|q5~?lz6zRXPNn4F z`^(__E?08ZI$sjo(B=c?+ZE5+Wp96S1@HH0>Q&Xym`^VqxRe%j0lpnBf0qmB)-H`N zoLtrOh{s!>hiV6b#<@W;TB{T}1n;9<=To4C^XR58b(Yv`28tu08RNjJF-&@_N0UH_Exw0U^ZIK!VARjC2A4-w>s&W_N`^*ijWS6}UHTkAOUY0b+t#GM`+Hm?x#V?%+^mBMXce@tsqFL=c za-Vr5w(jxu9;z}Y_}2BrBXRZEoOZ}Y3Jm&Fud#GkqK|$R_B=xEswtZ!Bn)J5Cs9)Q zy!RGOO5e(CU+b$h=~*Hf3>MAd(9nwkk5rST$Wk{I)oqI&nb%h>3~3J7{xwaGx2hU$ zX|WFp&wRZc$deiV9kPK(pCA8N#Ks!Fo1a5)ZR~D7Lx2SIKWZ5E8)?&rrr|gwi2rz> znE<@Mb(XCK%lvAtZwkbwnru=0UR2+j&(hxD>vi1c$!z=k+SN}~4G!1UVSf&ISi?P; zUs3tE&6y;rMU&Pcjr=QOc}`4sUgQc2Fn{lB2H#l-)al1YWlz1#+JXe_h*EPf16g8} z5m(u(mLJ5)-w^4$Wve^DeaH~_vkMZ0$7;Ykwj#*kaAhW05`J$*@b-PUBL`H)@%r%+ zO*=Pr$@>2N@-=1_EkV3Bz5IGIlzX&Db`T=G^t(SxEq(seGkVKKOG)@i1K@|%)&6|_ zy0|w{RtUS0%ctJNQlkKi38%K43E3SAod1FZYgh^M07+QA3vpx_-GtD||IWag>4Cmc zvf89T_D`-N27hPKM`O{OEqgylBchnTX11>=UGd7*JsAd^lP}@-FQHp&eA{C<8Ck~@ zI6B>mkiyP0BE`a3)@ZXp@vyk2*m?KZAt z#F(LXYCJs}K)B|oRGt1zAu2uvn@n)5SdjXc%6RcVd_{qK0L|pVzkysbpp%Z9@b&pV z-drVyLy0Z3USr034CjGUfqbt66|V0q zN<7t1)1P_vgcoN(QADia7uA=;k?(;I;6^xvPoj%w93GzP#;fKr&;*gHLzPGl0Fnsb z)&P&HV>2-!fpmw27AbGg8+yrLNY9%1f{}EBb_XxuxppWhDYa#Z+akuhrmzN1$OCHt z2WlUyFAZAl$JAs7i4L%t&G+UYO1-c*W*j(K!_J=WJ`@a7B824>D@EU(3lK7KdB#&& z^5DZ0Iq5&c5>*AfQ#6xZKWQCUYEbx4$oPJqPCOw?4sF`?Q`5&(0NVD40|eg_X2F*) z;Z2wL<0vXBt~03>^t~MKa=9}g_NR-Q=jTv)DbXDGPEGk6TDBQBGxf&TeTug9IY_qPV(`$@38xe%~T(#`&~Amn55q*pDj<~J;g69 zLTFk(C9#~ISch}K3-Dzb{`Bcn$Mns=w9TcfSi?+r+ni4|(IcnVKf@?&kM|>lwh5?Irr4XUUNkm;^KaUAfQsGO?FUZn zaSNcTbA5)o87P4KqP6+HzshO#_=ZIfd81LL-kNq94 zV)c&3zt#f^JQ*x%aa+E_-P<9@MBUG&^eJFC8=n$hpnB$jsJ$8Dk&`;1NN*CxK0J`$ ze5;LUgEZ*Gh(Rk6UX6}!?S}Jb2DsGp9zQO8`=oQ;IGWm5w&*X;)~o=RHL`s${?GeN zA8N`S=BI|cZOrUsEKe^#bAQNVbAf+-`+&Ayhqn&H{l4In)>!V>tf`Er zL=()OG%!HP`L|A}Pr45Cxe|4sM)JLraOat z69rC9K(`U;Ki;+TjMJzt3n)frXTEPIlt4eMfPE?6$4xpok+mGG=Q_OgX*cmSJUnf6 z*|!?WEdJ68s8Y0`pZGM!KT?ocTr)X2Df&aR+GAg|fe*RgI2yAQL{??iDTZaHiWUGF z@Vn1LrJ+84IqZJA0r3gV;KzUaq9&;>jff`aRcnUI38kp$#Xu8wzw*7W_=gQQ*Z?b? zbDqV8iHsiZPEUjZg9Q*W+!XrfdvjJ55*DkFixKNRM3aIICvQSOYJP!f8gGsJV?O{w zGxkS}ZXe?tV#-#wc+fucq^YY|z-dgv#`~k)MDIar?~Ml6kJA=HloFVrmzMV<{ygl^ z)a%gU@*(ZpRoeKQf_y-Mem%*FA0(PI1*-#6wK~Tje5!9$b86r}-m{}W<(YM&V{{=t zvzfgvcmi!5PTIww1U!&xF8K548Uqhyv%MF$yyXB7oUYu^C`@0j;ftq~?zxVcjE4@1^k<|t3 zsk(#JpDAr@gUcv+z6iS1Tc>CjO_GF7{k@QpA0fYH(>Y@|{GxC4+aDI5ZUB1i>?J}5tG^w`KZ@nB zok%bZMa5OQr(BSK^h2lyCW%UWa7T7Vg^vC3X5qG*L zJu|T0aEXxUCb`iKys7;tj;Vo zAG%{7rGo{IZp>rZ-*InN5y9h7x#@dI!V7V*VmT@oKYfze;pgV&4l*dCMGC6}Sbr83 zhwHB{-OAnPR>kLy1&0sBK?5Ee4nHE9HLHYo%})#=;VtrcFbtaqoKc=T<7s zpK{C7FAhR{%^!w!GSa$CqT5Dlj_e;X8qN>qcW&6F=~$Y3G{`SmKIMLAE%gkNP57B} zDYQW-CZbG!$F?X3$W(Fp_Pyg>0ij>bEf`{fxLMt~$obqvWn&8G)uRKRaM*xNLOm-9 za9%LDD>6__=rtSP9myG-26z{-9RYL4E2~FJZ+~Z9wndk76sJ~!N7_rzSeSVidN=7J zY*~OMQp}^h-WC?8l8>CuDIm8~vHeJIfbe|{Ib0FH7u{T{ZOI!ND;pYI%$sDqmGIhU z=HpJAz(^TJB`zL|5QzDy>i8`Fw!Acd1)6YC5%`wS%~2b3VqR;mH4_nu-D3{j2hNSg z*I|ZZg~e0~?gpeV5kIUiZ$jrOaNCP(yYYlSRwR9GOM+aFW#=9auvS^z;m?kw)HEC% zvE2pdO58qrl8I`Z;EE`C~SoWRlbem>sAbh4=JfFiI{v_uO08Xnz73HIPxYN5{| zn^IxymKrSwda$*lyA@81@L>^auYN z-D~A$<9=ch$MfwBI$OozLn@4r)Q8Ii)kN8)FO|^_i6+!a>I*UR(XB6&NO@fpNjUC}(m zTxlrIu?PGeX!<`^3w{k9ulj+bG$-xxf)U_xh-fSe4Fd=L|pyE%efRD;0enAy$06*w9TTb}3uuUB&wz2MR_lH-+X zj~Y<@3KCZx@GiyVrh6gZvy@3O)tQHaPnjM;N7ubCq=HZ z(KpCoA}Syt?C`Q{&suNt^#5acz|jaA!=*Frj! zDy-m}%0YRv<4G~X0~`3+a?Ry4s(nh2j+H?&svD639n1OSWzGzid2wdJy}wQzju>$? z)tM!LJA;d_?N2`htA`sLCWM!yuP`7zn=&yy>I=t1MS(v)gBkqllh)ZEiI+72Ic#!p z9!w%0KE9YOcY!A5FpFbltO$H!uH>s+0xlGJsf3Nv$G0PQ*@ceAp z_9!=S4`(Yg($gc~VOXDR0)a)DwkZTbq&JUl{SHNV z4s!k3Z=&Z$GNB$gWca*QZ;n^OI{wVnK|}aUQl52d-87)OYm#95usFi| zicXxFsV8>^U!h&x>+9jrDL+I%8!(I=G`_9a{TPz7df*Y46ge!eaJ>5|?@Kd-tH+i? z1DxTR$%?}3A{p98Aq&8U1E>ZIE(Dem|@ zeMofxZ2u!-qG05B;Hc<#<$)>8*-*K5EWDz+aDzw0$%?ux0zg_j%;D&d0_;WzIdZ;| zU5Gd~YUW@>S+e5v$0LG*1}-0MBRiDIeLLgg;uiQ*x2U~x<4@Ex`U`Eat=OPZK|wB( zJs9>N9$E3M6=yYq5}HHunC^KR1vjD^+9QJsX78UA>6G534tja^(^ zs#F(|<`#?42W+}9W~`4DG8Um!(t}3`{oHXisAJPoQQKTZ2i?Y`WogJDk_m6Xt~X0b zlMZ>XqNjJJ(h9k6Xt@}^UOrH4hD&@*ISTwDaNrpF>5eXoK&xLa;Z0Zz!>JYh9> zf6poj6*;RQL;S-1pCLarLr&tNx4lvn)$$&8(gc+;gZ(a|%m74e6C>#3codwhj)3VGD`AN#IL z_;juz=?$&keDR0kS_ETnS6p_KU6%hJ-MCx~zpSguQ()hIssGOz%8}LWQ35jgfkqWt zQh+^4%&__kuLD=W&(IAfxPzTj)muMHbG?8$NAJ);J^*q z!MiVuB!w9l_!il<_YR{p{>;G1P_Z&|fh{SwdTn^jMkEa^MR_@Hb4TH*y?Y{^0^`a(8S&+^#(BMCT1}`s7Iy^`_HuxbmP^EJc`wp?j+Vf;_ zagHO-m*1$bzg+=^GzGUIjLN;eX%S^50A&`6zxC#AuAC025KHR`K+X~c=WYkfd6ELq z2U-XY+;c)B{5)wtU$0yxDi1)b z$*76a^>AL9RrciHZ+rr>A~6+GN~KWwc>yzuT34&80I;SURN8H=jI-?~<@-geM*>TH z-*1w;R4f3HlE$Wp=G|(G2U@n08Rey*PME-apgP~~LQ&YX;51)jp2zxIz9{EkK4ftv z<8jxii9G;73hlCqC)iL^yZ$buzm)`iB)cc{3(OMFfXD8lE2+i#6LSn_xWu&d`@&uI zTLULv2?MPSh@zl@@UOy#Kyk4~z_HC@HwZwgYdp^7Di7&!E;zUBvmTPB!xC$LtF!)w zi^~EXd*fMV$1NZ=SY*{D_1eu6VXPD7YgtwD4hc-@5U( zEY6undjrALE-P-kL!(L?0Q{lrq1B&@W)%nE9DUW4*+%k#`Rem+#fe%srp~iBJkZpu zw(40gTBE_Q)v_-q?X7hyE9b2x{@zY(#7+416rSq>XnZorSzSh|YAdX@xfQ-nu z2ak8A@Jg-4Js+d+t>*u|w;DE6d028`o|J5VMmlWExhpIooW%AMb|XmO?t3TVird!` zN%$nE&vtjNau2veB=r3?simMwMJ%D47N;V>g5qX2~5- zo3Wa1z}SgPExfC~6!kiYpJ5x6nhy8Xl>fF%^T8wpV`n<=1?yCk^I&&EZ)wGjxU2c0wkD=q(; z)8)6x-qQu*8M?+Jegga*M+SGq&@jy+d|Bl8s>p2WwC?`g0|9Xbhbx7JhA@Dq^?PXc z0yeKdp9z|_!SiBEP!B}D>#IfXdzzteG>&@zg`#-uIYCfkQ3=3pE{@@Lh7+K-pHu4956ZeHjmVKxd?@sMNmN#|I!LT z(_-0%aGu+y(A6mIiKg1kQSWY((|BjQ0tA?N3;YKuj!MEafjaUh2SDIx58PGtyk8GT z9&UDUz%BABOH{uDM3lEh)<)asl=x>;+Z4%GqZ{md&#O23q1w8k@;U;Y@j>Zj2L&t& zLR*quOPzgZ36jfa^R>43w-c@v2>Esfc+o{h3z&eK)XIHt_Yz$Ja8n~xSc7ksyFSFG z^DW(!C((};fZ0=J_kQMUaR~Jwqj&G#3RJ~#z$_zv1+kMWo#{9kD*Ytr5WAaO@)0L-U3mov+8WgWAK-UeBf6BI|FsX7w5KjfXOQ_V%7R{V}v>fLQ1k+FENN5+aGYr zPoU$ZpHcGme$HHW@MG$q1pr06&9!lc8>jBsK0r0p<-g$S)+{ph!{7O)L1sE(lg0Z; zI?(gp)Z}EC^3QT346^lFOb<4HQg#T%E-a^Xq}<~7tx*ZKaAsFvj5~zX4B8?s8)0Q` zkKXhX2w5KjW9Qq`WuwEUG!2APfme}Xr-mxHJtBtb(&59uJ6E{kbzYF%UxjLF!#{b< zz<<0JI=8+;K(JaXA;4tZxH+aS6PyZA8HLrWJ=`B)Q=kOAcW#Q52Gw;M0UY0FKJiRR z!YF2_)}O}cUYyK8)xtVdeYdZ}XO z*1KS=Cxf$kC{yfedRjEy7t z1zWy&UeWq-*s%J#`ZwSTbve5jRAuaS=wVyeE4x`kC||5)x75k;2)>j`Vmu@e(xX^o zYr1@Em$~u-)z{{C;a9uzTbt`O{( zHK2s1)hmEo#k6mSv-x5~cbj^Sot-^&)DWo(zcI}{>3-1nzdDj8gfKqomECp$>C)4`~G(!$=5y7XFFPH-=Xnv_A> z1}iLOI*P7t2sj{ot4@=_{_A}J_eypR^*M@3eZbZFZ`w-_O|uB9em#pXuWv&q=CK}* zmRJgPQW>@IAGuK9K?6(71mK_9A`xm9lX~H~f@o*)=JD{r6x=TMlq6XaV;r(D~(v3JCCnKymE1$^j@b z^%9-1KiZ?4jH(3c3EfQQ711?OMe(lIF;*Y=B@RBI6on zR2ONqo3bg7A&sB-fzGu=A{e35ylCzCq1X~~FjoMMcmZyzWN?0T5iQwcso+dP7)v$Y zeC+50EzEgB>sI77A=o6e!J2tJ#N#7Q^s%Vk0&F=%KyyW0-)N{w=e48aTFIA>F#z!B z_Y6N1sZztrGgdc!B^lhx$19sJzEcc0wrAk{Z~}ye;dP;#n^P;^^cu-`=ID%Hmqmj3IWdkvlobGBGN0{K6SucMy>P^!zshv6#X8?V4-3I{-2-G0tOW*6gMOs7;?%DKo{W zyXHnujK5fO7BgWGwU(PO$!V4j^ZBc^>hVT_9uBLfC80G0y#D(}((oDXBp*LR0#W zcg_#%6rjd3OaLt&yCs{xa42?79a$% z6@Z{rZoz_ZqD#~r0r>UQ>>{!QZoJs}676w&IF&{QK|wQVtB^nael>n6PQnWtv+=d# zM=NVPggq%D-f@J_{a0eOE(RmMUBIWhHA3q-0IxG&?hz*-!c};9R$fJ=u3sM2Y4)d0=pVSc`u!7| zX7l@}-ASt#C5JYN1DKljp*nGk@T+iTj|;EU<`nVzf;)2+ndC+j z0EV@;s*OKP0-#MBiAR3B695pG<8&GNBZ{#=k+VQC^S&~8 zl)X>`ZV0$!nnoHd*6y5&e9im-RPj&d%sr$kjzl*U_mFh(yKHUn^QF@lEv<>z?ZxFW z!CRq(RZNlSZiS>bw)MMHq?;{(!pv=!>w-F1|Qo=K3md ze9P^IyAw8w?S)(p4B?V-1ZVD}aDzsY}Dq8p0EqsUuzA%wrOCq%Q=X<&$ zzb4ffY49k?Ed z?K5ZanjxI(>6~>+*ot(zUq#`_udS1gruP7m`mlrk<<3p51(c zzKu<+WLq`oUlk_j8lYzxh~nBcedoANgd$?V=FD+&N@g^;jEN$3|#L(}1LY%RcI#E`1-; z#m6CVHSHnkWg6e~z!9VR?U*NE=Ik?TOnodb?4ny+yL2OH*zNTW>|);Ceqy=d0@d|; zJ3D`C9!p=XM52bFxRL9Re*mJI-j+WQ-9BOL;0>%w-NGNnydTs{_}Au(64tZ3>q0A5xA9A1P@&*iHk*29u?aC3R1xTiLwb<%`~A}Ldl|HM!YO^L0`xnY zB4_;9ASYz;a_A_T-8o6ZFy_v;G}VS1+g1gnXF35q@;24){NK=45J$Z4$8;lo?38%? z1ozu5VPDc+2&%QMSrx+y+K8m@uAl|xh#wT;>Q9{`&{A<}Bz(+PJ^7mn@3&>WNgm;d z z4dq)ZvBeZyWO0&#S3nX*;<>jx@UvKp?Ekg*)=^cpUHhma>INh>AfN~cNJ)!ygMdm) zNtdLwGy-l#Ldu{7=|;Mx5flOGE|Ko;KKDX4`aIwBKHnH;jC00sjNj{D2z#$}ult^B z=5@`v+v?S>n~_V(Wqkl+iL1N2uIEjPzB2)hisZQ{yI$e_=}sJ>sC91v*C8MTXgRA1 z+FoWt8y*dojooA9t{zHlY&Rz>eW;vSzr5CM%cDVwO)pZk zm$|fZSv2p9oB^Eg3u_y7sD{nGt2_rFJ16A^`uHXb!o&H-$+7RnP1P5NfMC;8+_$Ac zA51uXLr6bMT(e;ynQ!?tGtb`V%p`{Pw#X{;(JFHv`OIA8qHS{X9#H_3NPfD5+9iF0 z8dQM^I@x<(4}1l&+ctNoK1XV~YpvAaxZ7w&Q??KU)Nilxi*K=E7CIex;yaY+`Ah2=L z0eq|9GMy|a9BHv)LnyMjjUvzX%Yo;}!X98DV(bL2|^7e%yCEM+T!hCdar)?g|cG&sAr540yf z-R6b*%`cBVsNr`erbgrpT)#NkS`n0$|{qdR3)0W=T zQr;8hu(~EKMzjfqlbGeV2DaJo0}LcDjf5WMsn|wm#43 z)Z=>Y0+%slUxTS{bHdU8`aI}9H!M^HqRqC)X$<*382Db5jo0=x`J9=>s%% zI?vWFt3!D}BNuBb41!k!YfIWY4Jy>2)Vd?xSQr&uM<)^yCTfx@5|N_#@T*a(W1!Vd zfLkGy{!Xu`K@r5akl(S}U&r0Xn{GxLIl8f1UipR5qOEV}v2l6x+2a-tze<#ycL4|K zDtkyAl4;UUVFl7Dc1;Tpl$RW7$;hQIbp;)ayTsLU2f7T7z2z*Qkv0*+D?g{_gG24C zbtf=tT3(Lh!^2BO1fo`7inik!a=>nm1@JPFEHYqR)N|oebg#zHdVTDD6b@JrNAO5` zTx4~S8RU%!q@2|C8EFL@n;19WR!Tw2SGTMM-fP$X8uEtAq6h;?H^%}aFo?tCF8;^D1(Bw4ny6ws*3_FP9U%LzPanc z{DNYcdnc!7uSTtA=PW|+0}7cTT_1c=f0$93$Czdh4%ifp|?3RVo`GQ)Dq!ird%m8%1J zJ=>LyT!wpn(ZTA4W;?s{ACdXE*1L$AnVB2IHRLz1@H2-zzOr=?`Z4NY5pTFZFgBaw zk+&7)=K#moB<0c+Dc@RnJC=B$xR=LE9F?Pnf%T}qOGTTXrq^eIbzJqkKW@3{D2+2c zh$u4@Ali>zXT@`*fAI?v8xva%KHXN{Ioef zcB6jO8zK>g=9bU#MD2`%MskN&F*E~pKTRaxWX-Vipy|Oge8O$<`mJZ^7CREC`d6Ts z-=_LJVZs93eG%k#i@4oMFJwH)7tWu{j`kAH6x=&XlC&8TUeuh@@6OU5X3uIHAAe*6 z`du)%1izZ_Mq!v*8ZgMpCPoUh;d#R&&9b9;$)}Rbull0rjNCKB5BhHHtxY+SEFNhY zwVWcVK_?6>#B?fL&{wm;x$}}ulVi5Fy@i=}foa$E(CKShunXKk3wS>J;poU`7<_S0BC4>S@2; zWBi$Qr7ti__DgIBfm)IAi$t{$ic4x~xg|`$%L~G&xmp}AuFVl4l@$0>$i3^%8;>NP z;#CyiA+lX>lSsy^#KyC)wR*(r5r!0)Ux5AUKU5v`Agtg)zTQrJby_& zG&>up4)JOqWb8S)$NbpZw}yASi+BdW%#N1kl0Wi~VBN+Q&tludhrgO4L3I%09P~vs zlu6Z#3WvsF)6_fPy8Vw<_yT4TRZ2-#Z*ym6dMG6Jt6*}Jtn(W<*{Rw6oke6m+93l6* z_7+lOiF;n@$4EbNzxT>i5pOc{@usSlAAkI?uS#4k|XWY9P2OtmF6YR z2moRQDRZYHg!3?_YO!=FstlCw)PMHL{H|_7>!pSf-ONDCNWnUll)wETLI|L zCpe^2Q9u1595g&I5us1}lj#1Ji?qT(+MhC9{(nNJPd@f{lEz=o^0+{98&J19ybTpk zVauoQFmm_MfW%Sf$NgEV{SEtp4%Qz6Fvnd{Lc&4+`d6?LmQvctrIF$1gpSsjq zzonfx8lB=GJs6Sn;3RU5SwI2NpgGMi9uX1ZSuCf-vuRx9!l(fFn1MV~66wbgP-a^U zm1K7C8_$g{AG0>aZ|E2|!-Y#Oo!nOE4lq@J{+}E5yJYsYhD8XkY=~jjn;rY{IbhIe zE|s4;b^im8PKUFGiC=kM^J#DQ+9TfM%@y(y>LWb!s`BxDnYP-I zL!u3IJ^)gX#aF`fzssLpP-mrN#vqTA&#*|um_G@Ozm^M=|DQW4ZKEqR=qRJ7%JZ(5QoH%7wttczzCN+teO_QkLSNU2( z%fq{YXHNJEG1Op-!*2)aodhQfmuVy1t7?ii-AIo+%3;81=gbv0_YBB;>`%LbR2hR? z#BbQLBX9BTyk1nqXbpS0g?RJ)b7*b(LaIN3yDQH5so})GVu$Mf#VhzH;taYu_SF>R zdRZ;Nbs+{HDL+o<8=QxN`28ZbM6e(!Mku4US zvV?hzvl$}tswb-vdI?r{+2WcQ(tjI2KT597XV%EP!eZg<%u`wai;X-D3p&3Kq$l#= zw@aP|_ofmceb7n*d{37w=X5m$jf zYuao4>g4^;SAmGjPn?IK)+DN>_U10~#~X?|2@873Vg0(*5GxqjX9{5N0r8^e20F5N z%`~%I_nf$6$MAP8u#WtDF)GI34C1AkW|8o|6ko7AOpJ*ikh?>S3Uc3oA~fZA$1%^e z>*kXm-{0gw6to5SG+{sJ78uon2tPUG4d^IJ4}K{ga9F8A9-QSB+~tN%;WtPJRgWK> zA0j8tWWWEyZ7xz(#<8$-ErssarnFuHPAXf!JmW7xMG&CZawfaH(G40KfSkDHUMhyXFZV$c0A%rf3jmL`dZIs$ZRNK;NFV^BnT( zvC?_{8s;ZA*@UH=$traPoxF0bQV1drbI{YqWGOtC+$OF&huq~pv2Rf&EA|)EE!_7RY7P*cGh^Isy%vC<`^xgS-BLEsu zn}3j=ssL&rqkq{uhB{=Mx73U%Klig3vy^*sXnik|0h1C4uJkAM%jx_DG|ODyTG6WQ zdM`fs0PLF$&*LVzol2inYJ>KInM zTf;_Qo3qvu`1yW`2kHR`TH?Tn2+W;63?bv~><{3Dp6Ik^K6x9eZefg{`%ql4Z15g1 zD!^`Lqe5*ULpd1wanF?AKx*3W;9IPm&jygnIYjh2u?VA!e-u{^%qB#1m>>Ek7XoJb z{azuLs1fpGv!F{$s={MBB;y!pY}IFYS*$yD)(`XdH88%oQr>*t2$?|8OV0W*+srM; ztdx{pfEu~$n?UmWdnQehRuk~Syq=R@vVY@<)OyfM2Uhu&I=icPasJOHZl{*@hiUmw7Z@=nEcMPDILLTZ&Gz3TTYJmArk#_W_mpcC8 ze=-AGMRfp5IFdh9(_Z!O**ZWO1w_4*w#2`rdmX13WW;$~GRsyQnw`~^;|^N@Ch;Bb z(2PudXyyI{RqEzOk9PD2fWWHL4EdWDXSYs+2V;+_f*!4b z)H*Pa~{Vyr%Fjh zvn%xg6o&>>wHJdyk%7#Lq33||ZhZ(;dQ;czocltk{ef?%UREZ6f3ugn$L9wp#9){* zV;;a6Y1yhgI=G?b!UzGUZ9v&pun!1GNJ5=l)?WXgWJLtTJO8tLDB?T}AT^1Hro}Oe zKaa}#{Ged<=;dFu4jd2D511iVWH_`KQrQM@wBEk#wPxO9@I0uMkUOo%lSS6znC(m~ z*I>&wgPCpP3emfYtDVUL?3EvF`8~}`0SX`js<77A(?n}HOPTF|lMr}&O!n9!19fr! zz{g_&u<~Q{TVR52D6A0zr_R*hX87>CwUrvkn3~wFeLU@qSe1ghDw=3sd9WE@GB4NT zL*5Uij+3UY0en^c7yz2D$L`1xs{X9r(Gmbf`$m+k)zFU5tK2P4GBkJVnFvOzb-hSJ z2Imbq8#y*yp-HvTWguTJ4agAMiFF$hF4O6G&9fShK>=eiUH#tMcQQP5Bq2U+CYmweaOBgfH*U8wo~iQqP)wFq!~R*5P*$ zrZ_Slz<`ny%Pn|_ZRp$VjXqe58#RLAaD&Yy!IkJAY#Qj?UD>MC);sG#fqxFb)I;{4 za0%sH`vZEn3jCD@&NnE?x)7{W_&SQ0*_9qS6YHa7z6rYebEM19(rIVr-bj$XhZ-*U zIBA5K)^8>CTGx>fK8=8e`PD=9lja?GKbS%oeHTKn*cUbx;Uc=`w6;o`b=wBUz+6c4 zDOc+W2q6@7>)qt1wJPLHSXko6z#fs?{H!Gka4DmdME0Jtm9`qT+u|k z?e<^*OD)VJpA+urYc-;J9-$!1&khkrcqCj`e6{D()lc1e5VHNX2M*8|H$Yaf2I_N$6sw~rcq5U z_}Fj%*s2J{;wIR7#RGoY(o9p|EVEeu05}i&xHYv0n?;q<(qyZzy%MUmavUBUvPG(H zlvxFm6jf|wyxYF=$fu4jWQHQ6>JZEeUd$5IGo6 z7anU%LQJdi!lH6>p5#W0(ghCN_i-1$0?f82 z=x`o)X6|rrDH>u`M9NWBJ^{r*=|guH2b4z=N?WdFqPVzvuQgM%D`>yf7~;ZF8YpoY z^hnP#YMmc9pgRi&zCNQ+63IcM5D__h^XeNh)|sf;S{&?K{Qgw7t7%5S)p>95y~rNt)H*yt6y&}u$i`Jxny>*cyV;^n)w^}*^hS|8y2bxyp8T>yh~ zhYe-IB}{S5ZwD2q)GyCU_`yAoTh?cn7svAJ7IgP5VH=!!I?? zN%#?5)_P{M@=ep;w5$f#=AK>z?hw6rD9&wLc3iLP42+u6Q=a^kQ5ImvH$x=4#e8qh z4EZZL4|xTt9I&fzh8PGe^*7@)h?q+Yyc24-nasW!}n&Mx%B(dwy0Qc zCL?=>uXvO22LajS)tomD^@7Ud9RR?kPrCjHm8cp%GV+p8>mizXuUB-Nf($nm7%ZUG z6ixP=H0joONhpKr15{PAoo^DaTC=*j%@v@jO24*RhZaUOcHQFI=StqRZxmOj0$C%LYVCNe@1fUc-KjZ5j53_ZIS9Ewuck!OK5-8kd zCw5bm31_epNIDD`ba!Jl$B)ORmJU~32Y%Y2yx*&zuD7J((8Suf#1NqP6Jty&i+D+3Q4 z>}@(nJSHna`RPVc7t&FQSna2rZOU1b{NjK?r4lWz`8CJ|)o`M95-)oD&WbB8sW^G@ z&5N9a|&9suMJ#&$!rg@z?;Vfm|r2}`ii zs5q8#l32y3G{xHP2OC%F?zWp*oWchN4qzM+fPX-S`(?l;(A->jiU?-NYs{I}SCH|ob z%>Vqj_~~P4KimChbm4KeFQ#5%5GwPJ8;w^(_M+joB43r^E)%H~^ky=N&p`o&*iXE= z`(zI#)brSDt8^3 z`_n{OSwe`pv1uDxa!|SAZ;3xFZ~M~?=E+pKze*u=FFDg?XCgFSJ#o^A0n;4D1Y700 z5dWclivSKF3*Gd7G8BA1pIc@i8>N@V<@VL0c*L_;5tYj$OupTuL$9_8oRKD~=}IY{ z>kjvXqY3bt-5y+fvN_*p@+2fcrgY^iaULKHrsbCJyVNdes1^SRbX$*Cij^+mXK>WU zBBv)0gC-aubxJJ7Pw$A>RcWWl-`lT=zHG(8C^ev%l|V@{jmKO%rE7~%`gW;k*eyp* zi7gQ?UVK}weakH&9xu(_R4=h@D+K2&3#OV1l{g-pItZh~C^-2F{g#Pgq>%R#A~g^8 z#8D9ErGg|@HEi3&`oL6N)h?P%+2ILM;w67w^iQ-JWRwWliv~*duYqJ_L$^>0 zVdkbUpV-z0zu}r#+5~Y~>NT&Iwnw*Lk4<^X3wp4@H(6#q9nQ*I83f6z5t)Ek%;hp= zK-g+PoBq?f{jzN4y78#tgGGnr(ogB9syRkT5}jx;f4pZ@?Dv@Mb;W01`SOluTJZG> z(6Ku^Ok$um;G;t{>(a^lqlruYAw(~Qj@HE%{TH>fhEtQH!ddT+fdGR+{1VPHv%Dsb zKI#aw2OeaAus7`MFgIViRB>hLb*c)H7(^n8g>knF@Wlv%>Aks7$Yk;)E0mjQiBQk9 z5Ox(7;0PnrMIFJU6bNg7wF*3oA|f$-8o^O-N}iV-X8Jc-eyHG8V_qmg?`T*#jLW=# zb#U1t_zIh0=fS6$S+0UYR%d!fKh*o8#D(yXApqO@;_xtvhl!*>1l$&xmur~^q`)e;C^cE>6# zDs=A%1XfDd!k3=E;qv&1DhNmIx;k`6TO4Jnd|xadUPZ5?^Wqqk{opI$Mdrq( z^p%!WgcyC#THr;*^BQ-gIG;VQrcw1X=}YBxX4z9Ak6}f?VAD1 zn(IL$?e1-LJf3BNh}>mS&;>5eRFCl3NsFQ}G*nB?3&~Z}FqZLBi+8Z#Qe$3?ABL#vH?xe5?p#*zPCTo( zQ#uSLB*(cZHMOYopwn<9#TxT`x!xnaEj}%-+g}An>J~hlt?KCgKU8;FywTO8}jZei(bfWcsr=e$o@)nZ|9>_on{( ztZ^?|r$Bp%w5yC?_><~>hWUrs;-gVY@0J7&=h%KD_rhQLF+$qWrd!YqQZjSO;a93f zwzoazcVF;=)dCM{c@MB!VAzb7)rrV z=8_M3%+2zok$3ih*Q6{pM(COXX4Y%aSBgr_6}5>IE0tJl-QU^rnAs2jG-s9FFDOfC zP$vuJ^jqjYZ&Xc2w4ja;2JdKOnLgi2YNnSZmpB3f%Q4hsaB5b-d#DE9!^BEbW|`Cd zj*&l`B)~QZvu>iCtc)fxOS~_kr-4-v#3BYWE~)~+021h8E#Z%l&`->b2p@nZ z6JexDMC7(d2U{xTzBK%X{qNt<8bvu7_IJh`d`_VC^B%d{qiPgsJZmS7^W{Kny6SA@ zMuw#UsD3pH9IgZ!d1y+#;4o=VkeQC$oYb>LoiA-a3g+E=t@-5tJY-U{za>Mh0*Hj9$Z!vSyA`bva z>lLK1-oOXy*ROU5Fee7-@F{OVQpiJ+JR7rvj`#x2ZPz-_IFmvU5G@m_ETEk?2jM*7 zVTpTexG$sSKZgN>!AKd#wfc=R4}w3!sI^=-x<*~nHnbwvxjf)y+MYUoB3#c+cFWn= zuTv@#4Sn7-bgWcR+jg6-`SxL^BiTsGNuu@h-D~2zIAJc5Z z?x0_T$;r?UAes>kFp?kg3Q-gp(?Lg~F?YPhe72^RpL|`iich?T!%-4_a6#hPForL3 zpIF+@gGQmdvd^}Thv5R}6BA<)5acko3dVk-eGg-e-=1?J?f!aBTigMMVGu;tVmfT5 zohkF%V}Z;`p>eWh5|ktvH7Q;j-RfB*#P=k`1|7%zF_(JCNYLIow$y{c@Jf8?Q?6tO znqh~&*$A&)KhIR9ZPKt=-`2z^M#^L&c~8;8P@-?q;Y>&1RZ+ zNOLy9@@b_$6@tq<6iEgau{po2^4q#;y;Oh>xD-I+>)pocNQDdF2H_YkGlj3_+o!YH z%mNZ*(+`03RVH~$HvxH&4K4h{SNd?5ipk49%_K#<(pOmsOCNL?c0DG}>h<3eKfjWh z@B&+)7#FreQn#d(t8VZ{j_HS89vF)o7deAo6MEhAGy4i#LrbF=p{8Pf*RGeF)PZ>Q zjRs)vB8C9AxQ{ASgJE0KP~ulH#$@wTC~JL+ z_#bqTZfpk#;H^4K30Q7MI+>rEC(n#30(CQaJ4-32TJ2Bjo<(P{E1V=QEG^SkCMSI4 z1#oSpl}xyJD9eGO?B>F3v8Xk1^|__1BL=JZN#`iPo7msHm6WaTA(3HH<~&!k_YAgr zAr9vdw;d;-cF(wBiYiR^RSrzuQ3X>WiWS#Op8aV_sn+Lk$c4Ki_^ymN`P!kCjJOD^ z=GFM7L(slQB?*&3yM?i6@PVJR?oDp-MVu%*6-Oid*h@>a{XkZH7d0>h@%-{<7Iz<$ zxcB86^m(A|Yqu>n0@SSrS|HIvO}=+X_vJ8t7yB+_7V$xr#F$eToTwRP=ZN}nY5!1p zBHCalnem7v)XL>hS7yroMTAB*atbecCoW5IYNB2Q3i|rXGfTqGtMxQ_K%=L_Zbvze z`cS0n_UGD~=2UR0IMjJtO#{2fnCUY|LCdq=D(dUdZCm92(cCD%8?d$%G`ay1NNGdh zgL0{I7EL1-&veW@@R3vHScZOpoaoItreCK8UWFR@n9`qyQf?gyTm*mUcHnIx&f@-b z74XwCd4wwIu8rF<2v=OYmrOagw6@u^awQi3%T}U?M*;FBi>Z>s^`2?-Y-e}LK!zDr z&tO4h+llUQzUzcP?;SgE&J4?d|EsuZ@=_<=hLx!kf4MS)UDfO)21me|bgbAuyPqj@ z{mWW(xX_lFB5BLC>rXd}7TgdIg z9Vr(Mf_S`*Q&>y+L!R9Pw1M{K3m-RTkv=8j5YAFOL#7gVJW>{1>EsDoWl?}`mf^6Q z?KE2*ukYL6S)(^1|2`sW9iERa*~kQG5k?nttuW*lSi(>)aK1=g`g9V~#w4DX;ajOq zn|nffcoRzn+Eror!`w&rI2;xS8$JbcPliFsA*Svn;E>+m>sGhaRJD&~KW}>AVAr2- zlHru26?}T_XW-iBycbeWR;@FQvE5TGdc?bq1&03`WB8Hq7It7l^@=O;R!~2XhC;@| z-niU(k;mvk$=4Z=s_#73Do{y8KHYldvuf!hVwVdiTTK*)vfpLtlzG7YWb#s5=}Inm z2E+(?cJ&3~N5`EiB6RVai7m_IO@@J2akG&rKS?M9&Y9tq_{SQvp*7Ae^dWcl#4~WF zTLx5Y>QbQM@|-(NA@{p)cP?wVF|XLg9sPS0e`&`^qdP-e>I=+M;emlOxk9d;UzB>X z;wH49Wb=`%7m-XE+>%=lKrUzZfezr7G-U!yH4j|U3?2qb8yUfa^)wC@Pfwxp>lVfu z!w$0~bTL)Sa3X$ZTZpo1z-H9^9m;;rgN_;v zP}0;a-k;+tAQeb0)ou-3>R|33IJpKw01n*CXclK2un@(vXO7)YA$;4di zZ>{VmyKC!kt>{$Q*Qi$gG~{wG{Q(r#u{v3Qe+E0aVX1^0Wc~K$w@L3FZ!=FM6Z2dC>ZT(X7qaiq~Yor@Lc$Q2bXI?$Y)&!=bc=AJu*^9v{8wNNw~ z(ct=(#EZ!7A7NrK@6VTsw{V2n0fr&gmipG=$)baWG6Zc?HZH=;d3<<(FZvExblB53 zKRizseaTNK!%Ir)y`du;%VE^9&eoYm!|CVhH=K60)XG7AEDG%K&psDJ=5c zZ02eiPWxWH3r?R-PT_u4Ym&;-MdZFxYUn7s9W6BocE(VR^iyjRql(DWDx6*ybIJke}k&7ZGoEn_PxZ1cfCAn=&hBtQlh4s1RN5_9TccWEPr^aA= zxa#cgEk1wbDO~|yCtp)?iba0C^L$A-DfOcNOou(ljwZ`v;5>#Fk07VP(ZwXxvh=_l z*tR5%_3!YE(>?V~4k>l4*2pkIe@PN#3E{>!s(dg;5CmVgMm)Tajh@g)wA)uZYwd`8 z|FD8`>L4U4^gXqJLX{ZnH>&8gFJYI*v53_DvR*>&=nNXg*|W=<{t2nG57n&I@^QUC zo~$@V2opTRRa%bIgFil|5);v^ zBpcghN#-c#3kwSllLe<ewAUoYZX>3cj+gs_0%s|6WE{jR#6gdj6OfNk zWrZ_MQLGv~tj$~fy>u0OV|riIr~jv6f!EQg);VAjdX1OJm!Y;(ey*)W?U$;kAn@Kw z`{EtWRUs^W_u=VFTTTe9b9t=9Nka2Pdm~ThaBxfnI}~M?kh}j+AFulPH5&Pif4pIg7|_0I`!uOCnY>M=>I0>&)*_Z9s4lwfm=keB?& zRsA2gIYbI$JpGUJ`x21O&#C{5lv(Hxtt}`4#WO}a*vL5X&p)2p$J;l7B2Ec9_{S$d z*J<}V-;84`tt$FdcjsC|6u*IFerEgs;O~Xw5ug_2opT1`$ilTTA z(LXStg4ImrqWq^6{9TZM*$m1JP8+0JOroilo>%Aujm@0%(yA~2+n0n8B`E%n=%p`m z(yL_NE>4wPLb{`Wzx=Vl#i+%+F-v^&GpT$x`(w|l;e+ zf%Lz<9&#~xLK21T-8odZ2$ks@O{_%q4Mgi?CT`+Jvq!mZs=kQG4LEg#{z)K(NDu*L zRJrfJX{0&VeNFqyq)gFkqF!t@634Xr>E6HI`Z|32o3g2Yaq2onlatC}Y``NUsk-hq z67x<}zWfd&^X@u-;;rT$kfGXT)&DB>z2{fB^p86S?S>sH7{9!+TkC^&w6!|XsNGbG z^P?=poronbP5;2W+l6P8df3RW9(%CxQTH8hLa z@&W38Zdi1~Z4;6?OTo=vefQ?j|7Ag%Fxy8r{nC5}U;cQ+oTD{<%Rivlr!}e9hf*FC zBAys&SquFy=W329=6&UN9BDEogtsUPjxTll#_`$<=F;66Rkkg;2h0CHJlHWr^(#;>m1pS zfV2g>x4+@=;7KQphCN>0h(nYlC-Mu967eM#kPF5a!6{llwK8w@1Y zc$AMRzF&zX^DZQE!kc4VH593{7ysd>$;%AVwSyl(r&*)Mr$hh#^bZjL4KOhS%2xBW zJ*#giS<#RK{!*cj6}3g4Vbo{uaLiR%o?L2BjOvwZptR5%$D}mxwafr% zwKeDO=Rk|im-X*xj$|v$P94~Px;iMWgWR9vS`eK=MZ_>yHRe(gd6^U25l(mQw+JE= zJUjUEoxAV0<_fK{Zy)#=VAXnU6oRCz_6EqK9P08+B6?52v>lJ)aW^C|zN^;ybL`}_NqBi#>o2sP~c3<9Sd z2!t`u8N$p`DD!pS<5Jq_m8X=VTlD(Kc(9xJY^3yktyWEThj%gnJjl#sgyrGq&QyhD zJF`amf7L=P^Qiq0fVeNuut_a!Sk*6`&hTbqkAA20E)(%;&_kp5vhZ=+%Y&fv59Q;B zQ+Sldagq9RMAkKkR9`pFAC6jJmyz#{F{4Cgat}?P-aUqfNRwI{LTS*cVXr*)9u>3#QirZO;-(agKQ%w zwiTa8lw}Z6;l)a^>Un-GOE71s!hBL=?U7B+!p9|ZZz;zmQE~%PsiyzbE zn%DnV;r9Gc)euY{8VT@%gpmM8x-~)Aa+mn?Rgo*E&%;tW=-O5%)9BhCCNn3dps7%g zN8f(tGm+8(zdcWM7PIl%jRnFv`3|quO@KdI85PCmw{2OU={TAr0nkC`j}Zqd|H+;~ zs+&j+1yq?`XF(qY-YHOv*>-W8u$+u0G7&AOP^Dq^fpemAvG#kKQH891iObgVU?NV} zVh*A7hz)o~ZQ36f$%7KfO0uwG5y8&eo!aN{R1E@Tk6D&)bRXq`#-C5q<1 zt^~Mnivgo(SGRpoFo;NPgXuT0S7RciJ6Tsf&v@Y?q%+=diDl<@&$

Q^v#wd`5$r zwsRB)OLS*9d%Xh#Yn?S}(~5pLe`JhaTBt5_vjs&~^^AgUwYl+we|P~_%mN4ed-A6azowd#tskAvVNe@t2=dvua{EB^kQj2%q zk;cIrNQsYxa|Qv0d;s=LP<*u2LE}(Y-LF=aE&7)EcYws$69fsl{aVluntF2Je_0HS zLAzpq1Ekz0jz~ePM9xZpm0`VnmHZN=X$qPJOk3{}CPLDp$4*6tZ>Gj6a%XQPY zievF!X1~O>^We;pArZ#1L*+)j9RxEqmOb|YX4^ez)5uuY0*ZcL>j1>{3LdqT6{<}2 zq~L#an$T%k9-;scoy!D(!$-IS_3VvSvSW129Z7*7Fw z)BYW~_D!wxoE|Kw>?Y4@(DcTo%hr-K*j7CS;NvW}ylz=Q2}ldpu@iYNPD8B{>|Gm_Z&rGqStG@fMI23ZBNqZ z@OpcJ&E&NLXR)f~hMO1+FWw~1Pyv+<9(PCQU8Y1O%q9iRkk+J0z^_V|7lBn5pyMUv zwW+rUfb=7n&|pn$nJ7M|(k9N~eOJiXdpwQ!S7SfThPX_x%m6(RnagtZ#UuNAs?b4q zFoeE5jO>Hl%QtMDCPJbxytC4NRX)$CGy0gtyYEZJ&oz{DoZ%-F~Ul`}^h^|oKSPemuZ_l`>m zV*uWiAJ1@xjaz>^?wow`fGOjaT_u-9e4YQHJZGu){P2KtcFXX<9OI=Nkuzt`&)vHt ztmxqW9|S}v1%3IMJBwuA)Y6Qb|K^fOrOna9z-KQv))lZcU=OxyOHC=Nbfb-v19UtM z?q8#M9&Q}S(?90iZokRg-x#lG?00c<<`!=!eZlVKi%Y!-ygF7BJz_#aLi~YZ;gEQK z?euvG-aWZGn&**RZqChR>RlD-4k}z0+96z8o&5avd3zh{d>gyin>%xwp$q@vU8=Go zq!nBuo!&Fw+|SYObhR7uCuQChi;Rjc+O$_GCn`&Y#1Y& zZHIxz9-hDxaD{le$nb}vaI*oG4*KIzOowI!p*7yjc zGbcm&C+-NkUCFh*@upz=kVdLq`8u0VvL#wG?=En*5g|mMMS*GPaWir3 z)*Zq}vq=Z^qKhA#Xu){ zoeJ-Ac8hx25v@y!uD!CV(yO@o~mCMx_l z>6ZxSnihxW-K>8s&K?T5IZQc}{wEOxAhDIf@=xWZ!yo&`fO6H4kKgN=BV2exUuzw2 z=(6jxxeB||&wZh#t>dtbd6rAtiGRpTU#v1^Y*#N*AV{xrQGBqKj&wP>@e90UJDJO| zu3~6`&=t*aRPWQhW@1WEQi?~~SWeq_ZPbNsynciwbV6{mtT)n~xJdtx$oGDUQr|34L_BJBVG literal 0 HcmV?d00001 diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index a1499cc0b0..96267d09da 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -53,7 +53,7 @@ const sidebars = { { type: "category", label: "Architecture", - items: ["proxy/architecture", "proxy/db_info", "router_architecture", "proxy/user_management_heirarchy", "proxy/jwt_auth_arch"], + items: ["proxy/architecture", "proxy/db_info", "router_architecture", "proxy/user_management_heirarchy", "proxy/jwt_auth_arch", "proxy/image_handling"], }, { type: "link", From 0b021b8334202ad810c7cf2407d594e16378834d Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 14:36:32 -0700 Subject: [PATCH 037/119] call_openai_tool on MCP client --- litellm/mcp_client/tools.py | 51 ++++++++++++++++++++++ tests/mcp_tests/test_mcp_litellm_client.py | 41 ++++++++++++++++- 2 files changed, 90 insertions(+), 2 deletions(-) diff --git a/litellm/mcp_client/tools.py b/litellm/mcp_client/tools.py index e6b403f975..bd803b995d 100644 --- a/litellm/mcp_client/tools.py +++ b/litellm/mcp_client/tools.py @@ -1,6 +1,8 @@ +import json from typing import List, Literal, Union from mcp import ClientSession +from mcp.types import CallToolResult from mcp.types import Tool as MCPTool from openai.types.chat import ChatCompletionToolParam from openai.types.shared_params.function_definition import FunctionDefinition @@ -19,6 +21,27 @@ def transform_mcp_tool_to_openai_tool(mcp_tool: MCPTool) -> ChatCompletionToolPa ) +def _get_function_arguments(function: FunctionDefinition) -> dict: + """Helper to safely get and parse function arguments.""" + arguments = function.get("arguments", {}) + if isinstance(arguments, str): + try: + arguments = json.loads(arguments) + except json.JSONDecodeError: + arguments = {} + return arguments if isinstance(arguments, dict) else {} + + +def transform_openai_tool_to_mcp_tool(openai_tool: ChatCompletionToolParam) -> MCPTool: + """Convert an OpenAI tool to an MCP tool.""" + function = openai_tool["function"] + return MCPTool( + name=function["name"], + description=function.get("description", ""), + inputSchema=_get_function_arguments(function), + ) + + async def load_mcp_tools( session: ClientSession, format: Literal["mcp", "openai"] = "mcp" ) -> Union[List[MCPTool], List[ChatCompletionToolParam]]: @@ -38,3 +61,31 @@ async def load_mcp_tools( transform_mcp_tool_to_openai_tool(mcp_tool=tool) for tool in tools.tools ] return tools.tools + + +async def call_mcp_tool( + session: ClientSession, + name: str, + arguments: dict, +) -> CallToolResult: + """Call an MCP tool.""" + tool_result = await session.call_tool( + name=name, + arguments=arguments, + ) + return tool_result + + +async def call_openai_tool( + session: ClientSession, + openai_tool: ChatCompletionToolParam, +) -> CallToolResult: + """Call an OpenAI tool using MCP client.""" + mcp_tool = transform_openai_tool_to_mcp_tool( + openai_tool=openai_tool, + ) + return await call_mcp_tool( + session=session, + name=mcp_tool.name, + arguments=mcp_tool.inputSchema, + ) diff --git a/tests/mcp_tests/test_mcp_litellm_client.py b/tests/mcp_tests/test_mcp_litellm_client.py index a4ca90eb1f..cb38614e58 100644 --- a/tests/mcp_tests/test_mcp_litellm_client.py +++ b/tests/mcp_tests/test_mcp_litellm_client.py @@ -10,7 +10,11 @@ sys.path.insert( from mcp import ClientSession, StdioServerParameters from mcp.client.stdio import stdio_client import os -from litellm.mcp_client.tools import load_mcp_tools +from litellm.mcp_client.tools import ( + load_mcp_tools, + transform_openai_tool_to_mcp_tool, + call_openai_tool, +) import litellm import pytest import json @@ -34,11 +38,12 @@ async def test_mcp_agent(): print("MCP TOOLS: ", tools) # Create and run the agent + messages = [{"role": "user", "content": "what's (3 + 5)"}] print(os.getenv("OPENAI_API_KEY")) llm_response = await litellm.acompletion( model="gpt-4o", api_key=os.getenv("OPENAI_API_KEY"), - messages=[{"role": "user", "content": "what's (3 + 5) x 12?"}], + messages=messages, tools=tools, ) print("LLM RESPONSE: ", json.dumps(llm_response, indent=4, default=str)) @@ -51,3 +56,35 @@ async def test_mcp_agent(): ] == "add" ) + openai_tool = llm_response["choices"][0]["message"]["tool_calls"][0] + + # Convert the OpenAI tool to an MCP tool + mcp_tool = transform_openai_tool_to_mcp_tool(openai_tool) + print("MCP TOOL: ", mcp_tool) + + # Call the tool using MCP client + call_result = await call_openai_tool( + session=session, + openai_tool=openai_tool, + ) + print("CALL RESULT: ", call_result) + + # send the tool result to the LLM + messages.append(llm_response["choices"][0]["message"]) + messages.append( + { + "role": "tool", + "content": str(call_result.content[0].text), + "tool_call_id": openai_tool["id"], + } + ) + print("final messages: ", messages) + llm_response = await litellm.acompletion( + model="gpt-4o", + api_key=os.getenv("OPENAI_API_KEY"), + messages=messages, + tools=tools, + ) + print( + "FINAL LLM RESPONSE: ", json.dumps(llm_response, indent=4, default=str) + ) From 4eb63832aa7543f6bac78c39a97388a468654c42 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 14:39:05 -0700 Subject: [PATCH 038/119] clean up --- tests/mcp_tests/test_mcp_litellm_client.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/mcp_tests/test_mcp_litellm_client.py b/tests/mcp_tests/test_mcp_litellm_client.py index cb38614e58..c1dd30c8f9 100644 --- a/tests/mcp_tests/test_mcp_litellm_client.py +++ b/tests/mcp_tests/test_mcp_litellm_client.py @@ -58,10 +58,6 @@ async def test_mcp_agent(): ) openai_tool = llm_response["choices"][0]["message"]["tool_calls"][0] - # Convert the OpenAI tool to an MCP tool - mcp_tool = transform_openai_tool_to_mcp_tool(openai_tool) - print("MCP TOOL: ", mcp_tool) - # Call the tool using MCP client call_result = await call_openai_tool( session=session, From 93d5c8537d28830c0d1eaa69fadeeed2314c170d Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 14:51:12 -0700 Subject: [PATCH 039/119] fix(in_memory_cache.py): add max value limits to in-memory cache. Prevents OOM errors in prod --- litellm/caching/in_memory_cache.py | 33 +++++++++++++++++- tests/litellm/caching/test_in_memory_cache.py | 34 +++++++++++++++++++ 2 files changed, 66 insertions(+), 1 deletion(-) create mode 100644 tests/litellm/caching/test_in_memory_cache.py diff --git a/litellm/caching/in_memory_cache.py b/litellm/caching/in_memory_cache.py index 9fca969226..a28fd6e415 100644 --- a/litellm/caching/in_memory_cache.py +++ b/litellm/caching/in_memory_cache.py @@ -10,7 +10,7 @@ Has 4 methods: import json import time -from typing import List, Optional +from typing import Any, List, Optional from .base_cache import BaseCache @@ -30,11 +30,40 @@ class InMemoryCache(BaseCache): max_size_in_memory or 200 ) # set an upper bound of 200 items in-memory self.default_ttl = default_ttl or 600 + self.max_size_per_item = 1024 # 1MB = 1024KB # in-memory cache self.cache_dict: dict = {} self.ttl_dict: dict = {} + def check_value_size(self, value: Any): + """ + Check if value size exceeds max_size_per_item (1MB) + Returns True if value size is acceptable, False otherwise + """ + try: + # Handle special types + if hasattr(value, "model_dump"): # Pydantic v2 + value = value.model_dump() + elif hasattr(value, "dict"): # Pydantic v1 + value = value.dict() + elif hasattr(value, "isoformat"): # datetime objects + value = value.isoformat() + + # Convert value to JSON string to get a consistent size measurement + if not isinstance(value, (str, bytes)): + value = json.dumps( + value, default=str + ) # default=str handles any remaining datetime objects + + # Get size in KB (1KB = 1024 bytes) + value_size = len(str(value).encode("utf-8")) / 1024 + + return value_size <= self.max_size_per_item + except Exception: + # If we can't measure the size, assume it's too large + return False + def evict_cache(self): """ Eviction policy: @@ -61,6 +90,8 @@ class InMemoryCache(BaseCache): if len(self.cache_dict) >= self.max_size_in_memory: # only evict when cache is full self.evict_cache() + if not self.check_value_size(value): + return self.cache_dict[key] = value if "ttl" in kwargs and kwargs["ttl"] is not None: diff --git a/tests/litellm/caching/test_in_memory_cache.py b/tests/litellm/caching/test_in_memory_cache.py new file mode 100644 index 0000000000..de24bdf11a --- /dev/null +++ b/tests/litellm/caching/test_in_memory_cache.py @@ -0,0 +1,34 @@ +import asyncio +import json +import os +import sys +import time +from unittest.mock import MagicMock, patch + +import httpx +import pytest +import respx +from fastapi.testclient import TestClient + +sys.path.insert( + 0, os.path.abspath("../../..") +) # Adds the parent directory to the system path +from unittest.mock import AsyncMock + +from litellm.caching.in_memory_cache import InMemoryCache + + +def test_in_memory_openai_obj_cache(): + from openai import OpenAI + + openai_obj = OpenAI(api_key="my-fake-key") + + in_memory_cache = InMemoryCache() + + in_memory_cache.set_cache(key="my-fake-key", value=openai_obj) + + cached_obj = in_memory_cache.get_cache(key="my-fake-key") + + assert cached_obj is not None + + assert cached_obj == openai_obj From 1e24bafcfdfbd28e9813171d8ffd695339536047 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 15:01:19 -0700 Subject: [PATCH 040/119] test: add unit testing --- litellm/caching/in_memory_cache.py | 46 +++++++++++++------ litellm/constants.py | 1 + tests/litellm/caching/test_in_memory_cache.py | 11 +++++ 3 files changed, 44 insertions(+), 14 deletions(-) diff --git a/litellm/caching/in_memory_cache.py b/litellm/caching/in_memory_cache.py index a28fd6e415..5e09fe845f 100644 --- a/litellm/caching/in_memory_cache.py +++ b/litellm/caching/in_memory_cache.py @@ -9,9 +9,13 @@ Has 4 methods: """ import json +import sys import time from typing import Any, List, Optional +from pydantic import BaseModel + +from ..constants import MAX_SIZE_PER_ITEM_IN_MEMORY_CACHE_IN_KB from .base_cache import BaseCache @@ -22,6 +26,7 @@ class InMemoryCache(BaseCache): default_ttl: Optional[ int ] = 600, # default ttl is 10 minutes. At maximum litellm rate limiting logic requires objects to be in memory for 1 minute + max_size_per_item: Optional[int] = 1024, # 1MB = 1024KB ): """ max_size_in_memory [int]: Maximum number of items in cache. done to prevent memory leaks. Use 200 items as a default @@ -30,7 +35,9 @@ class InMemoryCache(BaseCache): max_size_in_memory or 200 ) # set an upper bound of 200 items in-memory self.default_ttl = default_ttl or 600 - self.max_size_per_item = 1024 # 1MB = 1024KB + self.max_size_per_item = ( + max_size_per_item or MAX_SIZE_PER_ITEM_IN_MEMORY_CACHE_IN_KB + ) # 1MB = 1024KB # in-memory cache self.cache_dict: dict = {} @@ -42,26 +49,37 @@ class InMemoryCache(BaseCache): Returns True if value size is acceptable, False otherwise """ try: - # Handle special types - if hasattr(value, "model_dump"): # Pydantic v2 + # Fast path for common primitive types that are typically small + if ( + isinstance(value, (bool, int, float, str)) + and len(str(value)) < self.max_size_per_item * 512 + ): # Conservative estimate + return True + + # Direct size check for bytes objects + if isinstance(value, bytes): + return sys.getsizeof(value) / 1024 <= self.max_size_per_item + + # Handle special types without full conversion when possible + if hasattr(value, "__sizeof__"): # Use __sizeof__ if available + size = value.__sizeof__() / 1024 + return size <= self.max_size_per_item + + # Fallback for complex types + if isinstance(value, BaseModel) and hasattr( + value, "model_dump" + ): # Pydantic v2 value = value.model_dump() - elif hasattr(value, "dict"): # Pydantic v1 - value = value.dict() elif hasattr(value, "isoformat"): # datetime objects - value = value.isoformat() + return True # datetime strings are always small - # Convert value to JSON string to get a consistent size measurement + # Only convert to JSON if absolutely necessary if not isinstance(value, (str, bytes)): - value = json.dumps( - value, default=str - ) # default=str handles any remaining datetime objects + value = json.dumps(value, default=str) - # Get size in KB (1KB = 1024 bytes) - value_size = len(str(value).encode("utf-8")) / 1024 + return sys.getsizeof(value) / 1024 <= self.max_size_per_item - return value_size <= self.max_size_per_item except Exception: - # If we can't measure the size, assume it's too large return False def evict_cache(self): diff --git a/litellm/constants.py b/litellm/constants.py index eb59858d43..da66f897c9 100644 --- a/litellm/constants.py +++ b/litellm/constants.py @@ -14,6 +14,7 @@ DEFAULT_REPLICATE_POLLING_DELAY_SECONDS = 1 DEFAULT_IMAGE_TOKEN_COUNT = 250 DEFAULT_IMAGE_WIDTH = 300 DEFAULT_IMAGE_HEIGHT = 300 +MAX_SIZE_PER_ITEM_IN_MEMORY_CACHE_IN_KB = 1024 # 1MB = 1024KB SINGLE_DEPLOYMENT_TRAFFIC_FAILURE_THRESHOLD = 1000 # Minimum number of requests to consider "reasonable traffic". Used for single-deployment cooldown logic. #### RELIABILITY #### REPEATED_STREAMING_CHUNK_LIMIT = 100 # catch if model starts looping the same chunk while streaming. Uses high default to prevent false positives. diff --git a/tests/litellm/caching/test_in_memory_cache.py b/tests/litellm/caching/test_in_memory_cache.py index de24bdf11a..d69899fec1 100644 --- a/tests/litellm/caching/test_in_memory_cache.py +++ b/tests/litellm/caching/test_in_memory_cache.py @@ -32,3 +32,14 @@ def test_in_memory_openai_obj_cache(): assert cached_obj is not None assert cached_obj == openai_obj + + +def test_in_memory_cache_max_size_per_item(): + """ + Test that the cache will not store items larger than the max size per item + """ + in_memory_cache = InMemoryCache(max_size_per_item=100) + + result = in_memory_cache.check_value_size("a" * 100000000) + + assert result is False From ec1e737feae747311814f5ac98bcaa7a44f9550e Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 15:10:30 -0700 Subject: [PATCH 041/119] test: update tests --- tests/llm_translation/test_anthropic_completion.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/llm_translation/test_anthropic_completion.py b/tests/llm_translation/test_anthropic_completion.py index 8f8f4084bb..a83d1d69e9 100644 --- a/tests/llm_translation/test_anthropic_completion.py +++ b/tests/llm_translation/test_anthropic_completion.py @@ -455,7 +455,8 @@ def test_create_json_tool_call_for_response_format(): _input_schema = tool.get("input_schema") assert _input_schema is not None assert _input_schema.get("type") == "object" - assert _input_schema.get("properties") == {"values": custom_schema} + assert _input_schema.get("name") == custom_schema["name"] + assert _input_schema.get("age") == custom_schema["age"] assert "additionalProperties" not in _input_schema From f0ede6334afd13c84ce976a613b7084da44dd106 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 16:18:02 -0700 Subject: [PATCH 042/119] test: mock sagemaker tests --- litellm/llms/sagemaker/chat/handler.py | 3 ++ litellm/main.py | 1 + tests/local_testing/test_sagemaker.py | 73 ++++++++++++++++++-------- 3 files changed, 56 insertions(+), 21 deletions(-) diff --git a/litellm/llms/sagemaker/chat/handler.py b/litellm/llms/sagemaker/chat/handler.py index 3a90a15093..c827a8a5f7 100644 --- a/litellm/llms/sagemaker/chat/handler.py +++ b/litellm/llms/sagemaker/chat/handler.py @@ -5,6 +5,7 @@ from typing import Callable, Optional, Union import httpx from litellm.llms.bedrock.base_aws_llm import BaseAWSLLM +from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from litellm.utils import ModelResponse, get_secret from ..common_utils import AWSEventStreamDecoder @@ -125,6 +126,7 @@ class SagemakerChatHandler(BaseAWSLLM): logger_fn=None, acompletion: bool = False, headers: dict = {}, + client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, ): # pop streaming if it's in the optional params as 'stream' raises an error with sagemaker @@ -173,4 +175,5 @@ class SagemakerChatHandler(BaseAWSLLM): custom_endpoint=True, custom_llm_provider="sagemaker_chat", streaming_decoder=custom_stream_decoder, # type: ignore + client=client, ) diff --git a/litellm/main.py b/litellm/main.py index 6cc1057bb4..1826f2df78 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -2604,6 +2604,7 @@ def completion( # type: ignore # noqa: PLR0915 encoding=encoding, logging_obj=logging, acompletion=acompletion, + client=client, ) ## RESPONSE OBJECT diff --git a/tests/local_testing/test_sagemaker.py b/tests/local_testing/test_sagemaker.py index ba1ab11596..9c7161e4ae 100644 --- a/tests/local_testing/test_sagemaker.py +++ b/tests/local_testing/test_sagemaker.py @@ -8,7 +8,7 @@ from dotenv import load_dotenv load_dotenv() import io import os - +import litellm from test_streaming import streaming_format_tests sys.path.insert( @@ -96,26 +96,57 @@ async def test_completion_sagemaker_messages_api(sync_mode): litellm.set_verbose = True verbose_logger.setLevel(logging.DEBUG) print("testing sagemaker") + from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler + if sync_mode is True: - resp = litellm.completion( - model="sagemaker_chat/huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245", - messages=[ - {"role": "user", "content": "hi"}, - ], - temperature=0.2, - max_tokens=80, - ) - print(resp) + client = HTTPHandler() + with patch.object(client, "post") as mock_post: + try: + resp = litellm.completion( + model="sagemaker_chat/huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245", + messages=[ + {"role": "user", "content": "hi"}, + ], + temperature=0.2, + max_tokens=80, + client=client, + ) + except Exception as e: + print(e) + mock_post.assert_called_once() + json_data = json.loads(mock_post.call_args.kwargs["data"]) + assert ( + json_data["model"] + == "huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245" + ) + assert json_data["messages"] == [{"role": "user", "content": "hi"}] + assert json_data["temperature"] == 0.2 + assert json_data["max_tokens"] == 80 + else: - resp = await litellm.acompletion( - model="sagemaker_chat/huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245", - messages=[ - {"role": "user", "content": "hi"}, - ], - temperature=0.2, - max_tokens=80, - ) - print(resp) + client = AsyncHTTPHandler() + with patch.object(client, "post") as mock_post: + try: + resp = await litellm.acompletion( + model="sagemaker_chat/huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245", + messages=[ + {"role": "user", "content": "hi"}, + ], + temperature=0.2, + max_tokens=80, + client=client, + ) + except Exception as e: + print(e) + mock_post.assert_called_once() + json_data = json.loads(mock_post.call_args.kwargs["data"]) + assert ( + json_data["model"] + == "huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245" + ) + assert json_data["messages"] == [{"role": "user", "content": "hi"}] + assert json_data["temperature"] == 0.2 + assert json_data["max_tokens"] == 80 except Exception as e: pytest.fail(f"Error occurred: {e}") @@ -125,7 +156,7 @@ async def test_completion_sagemaker_messages_api(sync_mode): @pytest.mark.parametrize( "model", [ - "sagemaker_chat/huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245", + # "sagemaker_chat/huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245", "sagemaker/jumpstart-dft-hf-textgeneration1-mp-20240815-185614", ], ) @@ -185,7 +216,7 @@ async def test_completion_sagemaker_stream(sync_mode, model): @pytest.mark.parametrize( "model", [ - "sagemaker_chat/huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245", + # "sagemaker_chat/huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245", "sagemaker/jumpstart-dft-hf-textgeneration1-mp-20240815-185614", ], ) From f780b5c2bd848f6e5f661839673876e1dca208c1 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 16:20:28 -0700 Subject: [PATCH 043/119] build: update pull_request_template.md --- .github/pull_request_template.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index d50aefa8bb..6c887178d5 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -10,7 +10,7 @@ **Please complete all items before asking a LiteLLM maintainer to review your PR** -- [ ] I have Added testing in the `tests/litellm/` directory, **Adding at least 1 test is a hard requirement** - [see details](https://docs.litellm.ai/docs/extras/contributing_code) +- [ ] I have Added testing in the [`tests/litellm/`](https://github.com/BerriAI/litellm/tree/main/tests/litellm) directory, **Adding at least 1 test is a hard requirement** - [see details](https://docs.litellm.ai/docs/extras/contributing_code) - [ ] I have added a screenshot of my new test passing locally - [ ] My PR passes all unit tests on (`make test-unit`)[https://docs.litellm.ai/docs/extras/contributing_code] - [ ] My PR's scope is as isolated as possible, it only solves 1 specific problem From b6b6d668823ce3aeac8fcf738b70cad7864485e3 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 16:18:02 -0700 Subject: [PATCH 044/119] test: mock sagemaker tests --- litellm/llms/sagemaker/chat/handler.py | 3 ++ litellm/main.py | 1 + tests/local_testing/test_sagemaker.py | 73 ++++++++++++++++++-------- 3 files changed, 56 insertions(+), 21 deletions(-) diff --git a/litellm/llms/sagemaker/chat/handler.py b/litellm/llms/sagemaker/chat/handler.py index 3a90a15093..c827a8a5f7 100644 --- a/litellm/llms/sagemaker/chat/handler.py +++ b/litellm/llms/sagemaker/chat/handler.py @@ -5,6 +5,7 @@ from typing import Callable, Optional, Union import httpx from litellm.llms.bedrock.base_aws_llm import BaseAWSLLM +from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from litellm.utils import ModelResponse, get_secret from ..common_utils import AWSEventStreamDecoder @@ -125,6 +126,7 @@ class SagemakerChatHandler(BaseAWSLLM): logger_fn=None, acompletion: bool = False, headers: dict = {}, + client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, ): # pop streaming if it's in the optional params as 'stream' raises an error with sagemaker @@ -173,4 +175,5 @@ class SagemakerChatHandler(BaseAWSLLM): custom_endpoint=True, custom_llm_provider="sagemaker_chat", streaming_decoder=custom_stream_decoder, # type: ignore + client=client, ) diff --git a/litellm/main.py b/litellm/main.py index 6cc1057bb4..1826f2df78 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -2604,6 +2604,7 @@ def completion( # type: ignore # noqa: PLR0915 encoding=encoding, logging_obj=logging, acompletion=acompletion, + client=client, ) ## RESPONSE OBJECT diff --git a/tests/local_testing/test_sagemaker.py b/tests/local_testing/test_sagemaker.py index ba1ab11596..9c7161e4ae 100644 --- a/tests/local_testing/test_sagemaker.py +++ b/tests/local_testing/test_sagemaker.py @@ -8,7 +8,7 @@ from dotenv import load_dotenv load_dotenv() import io import os - +import litellm from test_streaming import streaming_format_tests sys.path.insert( @@ -96,26 +96,57 @@ async def test_completion_sagemaker_messages_api(sync_mode): litellm.set_verbose = True verbose_logger.setLevel(logging.DEBUG) print("testing sagemaker") + from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler + if sync_mode is True: - resp = litellm.completion( - model="sagemaker_chat/huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245", - messages=[ - {"role": "user", "content": "hi"}, - ], - temperature=0.2, - max_tokens=80, - ) - print(resp) + client = HTTPHandler() + with patch.object(client, "post") as mock_post: + try: + resp = litellm.completion( + model="sagemaker_chat/huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245", + messages=[ + {"role": "user", "content": "hi"}, + ], + temperature=0.2, + max_tokens=80, + client=client, + ) + except Exception as e: + print(e) + mock_post.assert_called_once() + json_data = json.loads(mock_post.call_args.kwargs["data"]) + assert ( + json_data["model"] + == "huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245" + ) + assert json_data["messages"] == [{"role": "user", "content": "hi"}] + assert json_data["temperature"] == 0.2 + assert json_data["max_tokens"] == 80 + else: - resp = await litellm.acompletion( - model="sagemaker_chat/huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245", - messages=[ - {"role": "user", "content": "hi"}, - ], - temperature=0.2, - max_tokens=80, - ) - print(resp) + client = AsyncHTTPHandler() + with patch.object(client, "post") as mock_post: + try: + resp = await litellm.acompletion( + model="sagemaker_chat/huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245", + messages=[ + {"role": "user", "content": "hi"}, + ], + temperature=0.2, + max_tokens=80, + client=client, + ) + except Exception as e: + print(e) + mock_post.assert_called_once() + json_data = json.loads(mock_post.call_args.kwargs["data"]) + assert ( + json_data["model"] + == "huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245" + ) + assert json_data["messages"] == [{"role": "user", "content": "hi"}] + assert json_data["temperature"] == 0.2 + assert json_data["max_tokens"] == 80 except Exception as e: pytest.fail(f"Error occurred: {e}") @@ -125,7 +156,7 @@ async def test_completion_sagemaker_messages_api(sync_mode): @pytest.mark.parametrize( "model", [ - "sagemaker_chat/huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245", + # "sagemaker_chat/huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245", "sagemaker/jumpstart-dft-hf-textgeneration1-mp-20240815-185614", ], ) @@ -185,7 +216,7 @@ async def test_completion_sagemaker_stream(sync_mode, model): @pytest.mark.parametrize( "model", [ - "sagemaker_chat/huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245", + # "sagemaker_chat/huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245", "sagemaker/jumpstart-dft-hf-textgeneration1-mp-20240815-185614", ], ) From 04fd43bad6c91e3436ca95c63dadc33a725ca54e Mon Sep 17 00:00:00 2001 From: Hammad Saeed Date: Fri, 21 Mar 2025 16:30:46 -0700 Subject: [PATCH 045/119] fix(model_param_helper.py): change TranscriptionCreateParams.__annotations__ to TranscriptionCreateParams.__dict__ to clean logging error // handle typeddict --- litellm/litellm_core_utils/model_param_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/litellm_core_utils/model_param_helper.py b/litellm/litellm_core_utils/model_param_helper.py index 09a2c15a77..ebe3ce93fc 100644 --- a/litellm/litellm_core_utils/model_param_helper.py +++ b/litellm/litellm_core_utils/model_param_helper.py @@ -123,7 +123,7 @@ class ModelParamHelper: This follows the OpenAI API Spec """ - return set(TranscriptionCreateParams.__annotations__.keys()) + return set(TranscriptionCreateParams.__dict__.keys()) @staticmethod def _get_exclude_kwargs() -> Set[str]: From a23a7e14860c2ab0146de06725545434b9f774b0 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 16:40:18 -0700 Subject: [PATCH 046/119] test: initial e2e testing to ensure non admin jwt token cannot create new teams --- litellm/proxy/_new_secret_config.yaml | 5 ++- .../test_user_api_key_auth.py | 42 +++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index 86172ae269..afb1b7b3aa 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -15,4 +15,7 @@ router_settings: redis_password: os.environ/REDIS_PASSWORD redis_port: os.environ/REDIS_PORT - +general_settings: + enable_jwt_auth: True + litellm_jwtauth: + admin_jwt_scope: "ai.admin" \ No newline at end of file diff --git a/tests/proxy_unit_tests/test_user_api_key_auth.py b/tests/proxy_unit_tests/test_user_api_key_auth.py index e956a22282..5e86a2d688 100644 --- a/tests/proxy_unit_tests/test_user_api_key_auth.py +++ b/tests/proxy_unit_tests/test_user_api_key_auth.py @@ -947,3 +947,45 @@ def test_get_model_from_request(route, request_data, expected_model): from litellm.proxy.auth.user_api_key_auth import get_model_from_request assert get_model_from_request(request_data, route) == expected_model + + +@pytest.mark.asyncio +async def test_jwt_non_admin_team_route_access(): + """ + Test that a non-admin JWT user cannot access team management routes + """ + from fastapi import Request, HTTPException + from starlette.datastructures import URL + from unittest.mock import patch + from litellm.proxy.auth.user_api_key_auth import user_api_key_auth + + mock_jwt_response = { + "is_proxy_admin": False, + "team_id": None, + "team_object": None, + "user_id": None, + "user_object": None, + "org_id": None, + "org_object": None, + "end_user_id": None, + "end_user_object": None, + "token": "eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJmR09YQTNhbHFObjByRzJ6OHJQT1FLZVVMSWxCNDFnVWl4VDJ5WE1QVG1ZIn0.eyJleHAiOjE3NDI2MDAzODIsImlhdCI6MTc0MjYwMDA4MiwianRpIjoiODRhNjZmZjAtMTE5OC00YmRkLTk1NzAtNWZhMjNhZjYxMmQyIiwiaXNzIjoiaHR0cDovL2xvY2FsaG9zdDo4MDgwL3JlYWxtcy9saXRlbGxtLXJlYWxtIiwiYXVkIjoiYWNjb3VudCIsInN1YiI6ImZmMGZjOGNiLWUyMjktNDkyYy05NzYwLWNlYzVhMDYxNmI2MyIsInR5cCI6IkJlYXJlciIsImF6cCI6ImxpdGVsbG0tdGVzdC1jbGllbnQtaWQiLCJzaWQiOiI4MTYwNjIxOC0yNmZmLTQwMjAtOWQxNy05Zjc0YmFlNTBkODUiLCJhY3IiOiIxIiwiYWxsb3dlZC1vcmlnaW5zIjpbImh0dHA6Ly9sb2NhbGhvc3Q6NDAwMC8qIl0sInJlYWxtX2FjY2VzcyI6eyJyb2xlcyI6WyJvZmZsaW5lX2FjY2VzcyIsImRlZmF1bHQtcm9sZXMtbGl0ZWxsbS1yZWFsbSIsInVtYV9hdXRob3JpemF0aW9uIl19LCJyZXNvdXJjZV9hY2Nlc3MiOnsiYWNjb3VudCI6eyJyb2xlcyI6WyJtYW5hZ2UtYWNjb3VudCIsIm1hbmFnZS1hY2NvdW50LWxpbmtzIiwidmlldy1wcm9maWxlIl19fSwic2NvcGUiOiJwcm9maWxlIGdyb3Vwcy1zY29wZSBlbWFpbCBsaXRlbGxtLmFwaS5jb25zdW1lciIsImVtYWlsX3ZlcmlmaWVkIjp0cnVlLCJuYW1lIjoiS3Jpc2ggRGhvbGFraWEiLCJncm91cHMiOlsiL28zX21pbmlfYWNjZXNzIl0sInByZWZlcnJlZF91c2VybmFtZSI6ImtycmlzaGRoMiIsImdpdmVuX25hbWUiOiJLcmlzaCIsImZhbWlseV9uYW1lIjoiRGhvbGFraWEiLCJlbWFpbCI6ImtycmlzaGRob2xha2lhMkBnbWFpbC5jb20ifQ.Fu2ErZhnfez-bhn_XmjkywcFdZHcFUSvzIzfdNiEowdA0soLmCyqf9731amP6m68shd9qk11e0mQhxFIAIxZPojViC1Csc9TBXLRRQ8ESMd6gPIj-DBkKVkQSZLJ1uibsh4Oo2RViGtqWVcEt32T8U_xhGdtdzNkJ8qy_e0fdNDsUnhmSaTQvmZJYarW0roIrkC-zYZrX3fftzbQfavSu9eqdfPf6wUttIrkaWThWUuORy-xaeZfSmvsGbEg027hh6QwlChiZTSF8R6bRxoqfPN3ZaGFFgbBXNRYZA_eYi2IevhIwJHi_r4o1UvtKAJyfPefm-M6hCfkN_6da4zsog", + } + + # Create request + request = Request(scope={"type": "http"}) + request._url = URL(url="/team/new") + + # Mock JWTAuthManager.auth_builder + with patch( + "litellm.proxy.auth.handle_jwt.JWTAuthManager.auth_builder", + return_value=mock_jwt_response, + ): + try: + await user_api_key_auth(request=request, api_key="Bearer fake.jwt.token") + pytest.fail( + "Expected this call to fail. Non-admin user should not access team routes." + ) + except HTTPException as e: + assert e.status_code == 403 + assert "Unauthorized" in str(e.detail) From fa94be9598e56396132f83e5210e4fafe22c976d Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 16:50:30 -0700 Subject: [PATCH 047/119] docs litellm mcp bridge --- docs/my-website/docs/mcp.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/my-website/docs/mcp.md b/docs/my-website/docs/mcp.md index 3f7d6fbc93..4fc66589d0 100644 --- a/docs/my-website/docs/mcp.md +++ b/docs/my-website/docs/mcp.md @@ -1,14 +1,16 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# /mcp Model Context Protocol [BETA] +# /mcp [BETA] - Model Context Protocol Use Model Context Protocol with LiteLLM. ## Overview +LiteLLM acts as a MCP bridge to utilize **MCP tools** with **all LiteLLM supported models**. LiteLLM offers the following features for using MCP -LiteLLM acts as a MCP bridge to utilize **MCP tools** with **all LiteLLM supported models**. LiteLLM offers a client that exposes a tools method for retrieving tools from a MCP server. +- List Available MCP Tools: OpenAI clients can view all available MCP tools +- Call MCP Tools: OpenAI clients can call MCP tools ## Usage From 0c8415e6181b6915f92cede3e2a4ecfcc18bfd43 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 16:54:55 -0700 Subject: [PATCH 048/119] add diagram for litellm mcp integration --- docs/my-website/docs/mcp.md | 20 ++++++++++++++++---- docs/my-website/img/litellm_mcp.png | Bin 0 -> 113591 bytes 2 files changed, 16 insertions(+), 4 deletions(-) create mode 100644 docs/my-website/img/litellm_mcp.png diff --git a/docs/my-website/docs/mcp.md b/docs/my-website/docs/mcp.md index 4fc66589d0..75e582ace6 100644 --- a/docs/my-website/docs/mcp.md +++ b/docs/my-website/docs/mcp.md @@ -1,16 +1,28 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; +import Image from '@theme/IdealImage'; # /mcp [BETA] - Model Context Protocol -Use Model Context Protocol with LiteLLM. +Use Model Context Protocol with LiteLLM + + + +

+ LiteLLM MCP Architecture: Enabling MCP tool usage across all LiteLLM supported models +

+ ## Overview -LiteLLM acts as a MCP bridge to utilize **MCP tools** with **all LiteLLM supported models**. LiteLLM offers the following features for using MCP +LiteLLM acts as a MCP bridge to utilize MCP tools with all LiteLLM supported models. LiteLLM offers the following features for using MCP + +- **List** Available MCP Tools: OpenAI clients can view all available MCP tools +- **Call** MCP Tools: OpenAI clients can call MCP tools -- List Available MCP Tools: OpenAI clients can view all available MCP tools -- Call MCP Tools: OpenAI clients can call MCP tools ## Usage diff --git a/docs/my-website/img/litellm_mcp.png b/docs/my-website/img/litellm_mcp.png new file mode 100644 index 0000000000000000000000000000000000000000..2d18071aae4c3997cf30b9b3ed0f6170e7aaaf1f GIT binary patch literal 113591 zcmeFZ`8(9_`#w$!EtX2MlT^wcWgAN+^ej=?Bl{rxzQmYTvQC?QnNV5AE@K^IpE4NI z*oF}kQ<%|U3}%e)L%rV5&(Vq9}EDaK`=&=dn9(*4OT| z>ni!$|;S(_mh znbHQH9pHaE9v(Zc15g%83L_Cdd1v1N&POHZ_dX}Xp)s`8;=+_e!cYF~nx4wbA8~+g z)Au~%P~lB&?`Ub~__tN^R4{lZah1}&C4O2 z=}SO%6H!r#>XE>!b(8A6M^7+D%k4DEBHip*#m2g&2L!#+OOE_sll89B{pvFjVI9h+ z;n}z5rt#)xx|dQkXJ2q|oV7xZhEGGn%N$VJ)TYG>=jVsjZf|@l(!Evu^7X&Z8^@z@ zEu+1?c0gNxK#%FexsM~`NsX6t>|r!>YMD<|p)Zjm@Aos=u3u7ZhZfDVa=H^JMYW|Kli!N|J zxwCt2Lao1_$ExppbmBFJ%i2EBuv6}N1y&beo9~Bb3^z7y% zTAE{?N0mzD0K;%=qZY=U;qd8=xnt};y|sLPLt|rO^r%wfkJk+wvx$2-Ja5_NSKUeN z(5YZ_lT(>EdpCAqU!Y8F+g|z0_=S3uii)TDhE~U z8{v&iYBR}0pJ_|09a7P!KReTtvb%3ZxjA_?zWwS*(bIj%wK-*IBl0-M&s0pfwId`n zu*F&#CX~0DLq3iP7f<^WXper)0YNVoQjAr#4;r!c5X^PqaCN3H;Aw?Y+n$4M*Qn5c z1KY`MiNyI)mh$5paI828HF!G@C!HMZ>B{lQh)Z;RV-p8$y*xAX&)>(F4AQJvVu^ZQ zuj5H|YdCFiR?drqbiVW0rs39Q=lHUwH70t8S*m=gT~G4+@BG}Hhg_l)8zQ&}qrkl! z!X|~6^QB?@c7IY!+qZDwJTd8L)?935xqV|z@%#L%f+|C?hi?7|NGVn3?ce=~_qjQu zK}_^AWLe6D|lA<0Ui;`NI)J2FAZh19> zI>Qf*P@zEX{5{0GK@Hg5s|nE-9_~$*C#_(Wm19N_{NGX^w``G{nw!O9OgHluqQv&E ztu$_ZccM2w{&;>`e=uqhF6EgSv}J_7bi1nhPTQTfBWF#ig+T6HY5*da`D`u-9)?w{ z?;0|elE9`b!>sU2dND1$8a{f!v36KHO38h5m=1d&@aW4$3s7XoLD}Nlnp}QOzdmlP zq@yRvjXc46%aORml6T|l^$&>`zeRCJ>AYs}gg_gxgP+}y7z4%W0OYu*UU=2&Pa_n@G>5tL*+5)A5`v?RAbKeO>r6n0;QPk~( zy**94vy-P9py@qQmZ1-g>C4urJfHyHpRoeb{>N$dK7@{^p_U<))2p@+WrT8Fmis!5 z1b(N|=H%SsPKaFyPM%jA)IrSrcE9=vud;c0xe_VG@x~COrB`FFPaveEq!7uf?=thZ zzwmjD)YozIiT~6M$J#}jnz#B_sh6_pUSbZ@?6ls^5wkbAOWMk1=nU_R6li3g4b(x? zwI|7@%IQjAYZ^m#%f6f)@g9MY4_hWm`oZJ#dg^hYu)xos!ZQEVOO-P}s?x`Q zS(aPjJnnSKpJSXqUkE8mL1` zClv3!f3xrGm%330yUB&r&dvE4gyqFrm=r%wk)9>cPeBarpN5`_KjJr!kS7 zHhhU0oJgH;dH?d0xY6~cadYvf3br+;Q*_tIh{1I8$NR09|I{TIhCOog zq<$JUmB0FItv-;{Q&8wPUIR8Pt}2Vk2i|n#@|Ae2#(Pjr<_7KVbl9hd94MX~k1-tr zjweS7%Ay%QKb?QoNww}FNGnbCT84ASLjl1_sq3~9N}b*a>q@1GtnNf1%ms|NfSSiZ zlGR(^AY`|exEs419@KlDbj%Zh&M1gslF^7Vx9L)XJ`iYV^UtJjladIlayhweS&RWk z|CbxXwt-D~H-?8Nz-q*mwE7ms-Q0BcA2sPWH3C0`&>Mz*dAcA|{q41anq$deddbr1 zUf*nqGq#b^d!PTqeb6Kaz*BV_nGy{jmaeq|=Qzt+#p#!Uf@I~d+wi{g@PJsEd>%+* zLEK)dfR^n_1a#wU`a%czR#WAzPvGXJ-NHU9+c#82px!5QoV_bw|HhS4il|sH57c06 zraL@*V3MYKz2U*|$5}>pYTzm(7r(JU$hCU2m`C;+mn2VbWnJ=qD1V@+n(z3$q5CeF z3-rq%peLyH1(TIO&@dh20TRCD`)Pv;I#UdI;m%+npB$%{YzJV*kyhEmg5|vqI+P~hXEpv#ZOd5^<^V%TNJ;VLi*AFPdE%E`A9HT4o zOG`Lt`sRM-Olc;BWm8hw@#cya=UPv1TcmCEg}rIxLxoQiRYdO9<%-`Ny&r!6Xj>Vh zX%tmad<`_TK$ry}f}oIkE@XYYJ5n41u2QbY-VdL$)EP)a;nJ5;45EvemgF8!2T3gWdB0A)DN0v91 z*X*&;I{hJRPXZj9X5ft4eXMO=UvDitYqTH~1)%~hJ z^uHn-a+IDnYW;Xm(T@h28vCPiWtF9rhyzX=E3@re>awu?6=?RB)7D=|aJD-RfR%c% z+RnwVHQM`r=x9dp3lRA6u^&mK4$wZtlC?h$w`v-UEKRiOv6v% z$%3t5?#!2yc5LE8XKnwdZaP69_S9!*e$MdRW_`Bh9SLsEi?Rp-JzBik(&{9y#^fOR z3HL|tzo@ojZZLgwE*fPd;V_rZXbxXiDpeDYTB>gU`9pmdt9&iIENzl9ISRgA;1DKC zZlwjNOhQ-@Um*9a&K$q&Z|a9AbFi?I-It*d7X{K9DZSG1rR;`{cEI<`M?W81@OOwE zAE|Vlw#yAT-MKU$Fsj8*Ku1REye0Q}k5*Wgh&b)Dkk!%o8?^#*@*Y1dOXxMVh>01? zmqV`M1r_87^=N#K;n7w1|pFkTIh2QX-)GlV4Z5oM6V zzQOEte4=WbBl0N68}PfZyWniLsO6a)lku1zw6-Vv51$1`Ol=+hsP0b`ub&FDqt&lc z{Gp&$pndD<*O<1q54k-(aO;D*a;J~&`1yKg+rY?%Ui#-7op0*4`MD#n+;Di5p5ON) z133Es=dl;Er&;cB<(I0bH7(+-xx~_x?u#ZRi|Zlu&J_gjJ8Mj3mRc{7^@6m;zUaj$ zSj1V{*SP&-VhBEY60!e&%;sFR&v=cezrXU;sYoodaVhJinPb1ovNRuXKA^)hZf*Z2o)UY7_@l2-lF^^!(;Br%dTso?%7DR-FMYa!$j~ zg2dSpGl-YYuYb%EfK-e=kl3H8R@AWbTsb=5^J(yw`;#crdF2IR8w^zo_}(8@!;X<$!S6f>TT++___kC(8$He3g$tZ=0y2%`Fp! zN5A<=HLh=MwAE?!2u`!^v@AF4C(5JQ+Z*-%(HgD4Da#mMjp?uc4Y)7Vm!Ewbp*!r1 zL)bm#3ZV1w%|4IyNG0Gf>$GRCuhU!=J2`CsahKy7R6E|_fy>jRe=~ROP7tKE^T$*j zu(ObtnG0i$Vf3t8mVuI*m1)E3+=XVcrE2KMO|cpkj%qt z@@>-&`D&aE{dLn(PEQS{TeIh`m7Jtk6P)!!mexYnt<2CP5P9fkAurIcd=eKspLJTY z`Boxa<#P1RJm!|){W)^3Qq#|C(tnFYhF5nvPX>?r8hBjfwS=y%gi`@N{Ka+RWLaOT_DNfOl_5>Tqqzow?At_>Kl`BNB%9E5Zh97G_QT`%D(4(tKKuUliwNt$D zfWpR85=p&|elO(Fw5JX1k9H41XKGfu-9m2sz_*XLYgopuKe2jkz2s3Au0mIX2sFddo_|zCncwlm;T1Yl4x6BS!^;8~~&#V5NAT@Ip0!Pi361C@iQ9 zTpZM&bqSJ}T_&BfNT~IxJS*2{6-!Zz-myc|?NInvi-a+rLnHE1qwvN4j0-Xjv;^zp z_{dt`oTbaodCd9!KK*%xse&h-A2e2m#jPwOSfCQN1gAl8G!NkiSc8}Dwm1~u`Dsduq9S4l{MU-ma?7*@quX*ysa z5j%$B0(=qQ3`;6Fcw-3$mGumrwbpz3 zmS&@m$EstDbt1?g$3|Muv4d$}Xs_^#vzYp6+p@)Le6F@r723v5D;Kw3Z*MJD!hX5c zAdZ=aJaTTRQQPU1Vh6M=ftBU2J_eIxtS;XGfMNmz=vZjLPpW>}G|nc-C%iL|+xtrE zmML?$hYw^-g_}jxotF67<_7w^cQg+{%$K{dD`yv2inwf5lgQb4*%K77?Uf22P`aYs ziJf)-ofErozO-XH5F=q|m#x3L=Y{y5&BKlxS91UovYUww(;`gr$m*@Uf+a}-q?F-x z+AhDuAN_rA;9RP84}eO$PkGu%xl2@jcZeLajf9?4IxZGfdTdY9fUQR^o*anmG~qWV zub2Rc;(HbHA$>s)7H3}VRUmKYCi~TLC(t}Zf1UNyCdA$cA9ioK1aO>wA`|qz|nQBc=__E5yuU|8A=P1>G-W|-cKban(C!9HoM(`7IUeVbklL`?lLK; zm}I3JlWl78hwK40YuEHNuULd3Kh!+kmto&% zR7DIq#^Gc0Y?OUZjp*G%b!A6+`D1A_ zD?UQnAQ0&dkj$Sw-`9IGAHa7$!;QJ({N{l#NJb3IJ&_7rZ|i2Q-t~yD|HumZ&b`T7 zd++>w2D`p-0TyX;;XZ_rwJLFgy^SJCEWdbuSg5CRd!kZsRZiJQSiVzNxx#Uj#iHrJ z2im|y06I)CBTl8l>z#+w^k5hR$P4qAN8Uw7`qcbEpq9$l+`RsBrakxhrfmF+OGWVc z)jf-e%%zCWrp0E&joJ-rjb-2t1mwE2J~cFD6$4hyn~Bex5K9!zF6Tv@QgiqO6P#JvV zt3xP3vlr17x$?fhP#bcDzF{+cRPjQ#uY1gTK+680t>|feHc5Y->sa<3=p+q*P0hvb z83yv?=pMQ6BPQ2_Ps#n9V~zSviXxibdvtwo&dxLIO>GRf@5T`2^=tJVHlKf>f#3;a zD36_n9JS_*Jx)FrXsKd{BkkW#|4DsG%?mWk{|(TcIEe6`lVA1fQga`aF0A+It#KjD4h zzO#;eoEFORM7OyQB^Bck)=cRntaY2>532{bH%K0K9iZ2=x{q|zuXF_Mv^saj5}mF? zHUem~-L$nLeUw>Uet)(Gj5%!xdDo$WX|uR+pRkK8qKd9&ROx+Imn18LyNMd;&3+W+ zE;3c*ENdJWqwaPa9iw+%Io9Tr=baX6Xyqa=EgkDP%{L!qszd84@cRAw%*}`J%1WXg zUm)vsddulN<|KKYFH=Xf{}UR2WF{g{pE#|h?}z@NCTTM)*i1NqnV@I8o`TSOqGnr} z4_$Ecy4Ak)iilMYB+M?NPP^{{M?XTc@PL3Rf5)!ttgWF}Ier1O8T2ACU%R<9SK3v= z_4;^w{z_H<1gvVo?q(ENB}yED-42WamJxpSo9J^3HScgh6gI=(xUN0)xWq{t`G)kNiTK7+u;JoS^07@3 z+s%3%jb6-n1u+P#*iFiR@(c`K-)&AaJ@Q0^93q^T#%qf-crsS8m!0%tR^GtHH6{ZL zmZs7ANdIX`#$>SB(Jf{9Q^e@ienV#Na{q&b)uy<48o;EV*`=+IW6iD1#N&e^7e}ED zJ*$jrC^Wpw03*lUJaJZMu-V5ss!wNja9O*4V8xIm5T5bk)iI8HSLnhYQkA(fP5%O5 zpb4kcGa_|zQYu!kTdHj;X;+%|a`PS6*GJZ=sh3ZSaeKp}dQZ;mlkboXl}@G-`4W+y zm5N`tnl&&e4E)3nn*{1XU6oe3WP#YSWaOLo?*iTO7^yU7Z#-XX>XnW%pS6l`6oS$C z_6pFboH*Y8eCs42Ed|*s1NFqkv+FM3cxLCtw4hdxoIoQqF8X&nw`+l?%%c*wrn)zz z{Ch(lbG@mCo)Fbk-x(Y3!Nt9yk&@}o9d~0KkcIrvjZW@z6YD&)`6#94rJC7}I7jwM zI*PN+BKCk3a|~kyh#=9c+0#iFU25QksP(0oRqSR?I5Cn|R|zRE)?~ex)h)?TCv2?L zvRCzY)MA+R(B|+~8Ov!d4K;YY&WWs_q-Tc*w54Mx?^A3(1v%)j-oL0veIE-Hn-_jG zgmr!E6SWAab&44i|JzSX%2Kb~5rYydPGN-dO6^`R094UR%DG)Sn;=8OQ)wSK3Q-#^ zUw$bFFPRg(7g5xTTh+85aowRBQ|j{)qV)V*PF@*)1{}$5Qu-skkTExlLHSMr=$bL~ z_|67wXUs#S*EylK@I4@b1YF@d5%c@0)S4Jq^@KFBxb#k8K&MTqBdlgipv}UmxaFsH ze^JcOsV^3J^~v2YYr!v6aMSFq${lThk~|SS-*;Yae;UP&%^Hl;yXZ-9v^vb{lGVGT zFpAM<(`G%8D7xkT;BzFNkM5T`S`NIkhqsSh0u%DuJE1!asPcR!IqSo&P7+ARTf@ey zzXT9wJ5l`u)6B)Z+Ka8fS6)D4hMu@I!!RlseD6z+BzuRA+m)8`H_c@Kcby+xT(eQL zMD(h3+C<00b2-NwSBD(+g!#*+r6^O}Qlb+PUg5~KZ`uv&6#zUQ&;?PU0r$~^U#_R7 z8ayfiYx8nmoB*K6c~C#5P$p6Z{PMS8WrU%9#(zS5~@evbMfYn9pF& zunb^9aU?-`t1gS%QdFtrnuPCw_>$#fvxweEM85z?-V>Fn6n7+gb-q8*SZDSqZ)Y(0 zh`#(0rTa}6cXcr0jb-rl)@ar%SoHgholeR1DwixDSRBG<8oE6SMR|^@u`-6q(W_*C z_Pl>e!8MBxzhNoT*rsAW_=kS&+tp(D0|63!7pgfg0^r$D~j-Z&_Gm81sWH1Vf*4pq2nK_8X4Xam>&!vxz3 z^i$RVDOkuGmP#4em40tv#?w+Cqm7m!eXf#f-*tjEW;}FAB7vl2jdi|D_OXLOd67FG zlpznImt3V5d6~)cEID!{wUKPT1eYHmvXSd& zOt}vEPl>`Eno&QUL=xLfwskCQAC(l|3q23Q7Zp^GVPL88JiDn@wZWSASYE_z{9dku zwF^#=msLEMSUvtMqYKwuARH&BgLk$WLIz5FNOT31wM&*9xCJyEQs*Si+|HbS6t8+8 zXbn{b$u-8%Qt%xsm{S2!iQrnDgl0X0phLghAB{)4hPgBBm&t6qO;V4%kw$4OQd3^;71%e;^*8 zJ)ayIe-(mf#2gbFT`mnZaw{@0GqKRW98Sqj7n#x>M+h>#VGN0-l!|+pZ>+q%fwM<- zX`eTzXm7ijvsyRTd(L3jyAG!XYAU|+0Ptcd0pLw{Hb-OhE@ zmV8{o0{kKA)j0!&wr}PVauKBtb^2KV2P}e1A*vpE*?ANS@Yqz|L+^P&KVBx)Psc>T zrDL(Tox<{Tez)xfYK(H^mzxQ4Cdp5pZE8YCxP=xB_*coeW!{(HxKH&CEPGd?)OY;| zNuI@>TMKsqUGd|SUjRLyF5Xe^5E^9?TQhYIHsPY+AU8|4T z-r9ifEbj=mx6bq-c<8CCpVevp0JGpaT5;bH-=^whQ|SX*-K4<&lNL4e@0PVEo;TIyOLf@}QwBciuNqB^2`GNO+r5%^0D zw#)k6dUbrx7avpWyr%u30ZQ!r#B9uBp znZfWUdRLi<@wcsO^Tu{}#;Zm^cbSpzkb`PK|D7aKy+x(aW>AZ>c$lu5QE6Gz1&KYU z0QMyb5E;4w#OVUSN6xtBh3QQTlo#|pphT+X<+gy^zCSRFx)%);B8t#yon21i4c_{+&&U3AgkFr^dNs<| zCXz1$f1oyK`7)kEEEi*;4^rm%9O;&wMe!Xs8kHwd|JaE7bXw0FB+o5sP45zF)t*BI z8~!^KbT^<+x9W8s8pV>Q*OB#t@cT;SsqpEk=88^BN6YFv95qcunQe)rYkqG4Cb@gI z+j#w;)x7GwHw@$)NNeyIUpdM<%JM)rvhp9KP=a$VzUrO!-JTxzG`?li9OethXo-gt zjbw1Ct5*IB1DRs|;R54TAiWm5!N-OpDYI5$lY)^ z3-8Mtmo)CPJEBJJOKhxbEwe_m!o4L<=NhuBn#s3~`^qxa3Yg^k-3k+GTQ*GS$jr}y z1d+Ow@=R{Nu9je2Qi=q~W$4L%nFaeV4;G5Vd@C!^J^yx&4{9H zjiU8FHhCF$+l025Sa4g2Vh0k8auu}=xU9-mEO;rm1ZAc$uymT}Y~@l{%-1)07CbN; zDX-7BFI#Cf0lqy^u7l|cws%>~Qm%u+!n4qgIOxyeD-N+If+X=Loh!__gxEh}>|)1D z@1uA8w@3U1ar%Hll_cihQjJq>UHhP%yvy$0q5y?WjQS~=T@DM!ta{GFeNBkHa|4@h z&-*9!E#rp8ztr%$tFAw8jBacd8LV_9+m8mJoV7EQC~orS5u79Xk|{p&a9Wbb8lSJd zX*$I!b|yG)@BZJhTS;ZasQyc%Yq6YS&)GG+1HDgSfN#+i;Kos@kArQ4b~2d#sx}$* z(G>X3CY-dr422&3f&TIO=GOD66}F+!{cq&j8i}cfKv#3-_)16RmR$SZ^5DAHzaw%P z^U#3h=WQj(~b#7GxprKXm38@G`@Q%5o4sP~$AUr1SZBa_zJn#!xa40lF@ zqvjXGCs9fRnxWnApC6PdnC=cQ)CJ52=?_J;0W1y|E2QNvM;B$7p<3F&pNOk@jIMNG zpY(8Z==fds@4|_iJ)TxOW$8P56|Gwf!pe<~C6b7gHRf}BHjl7|46J@vypO;m94`H6 zJ(*s+#ds|w4NkRq6#tidPRjPeIB1EJZuj;+o7;XB{Q&N`6-Dc}TAU zFH-Z=**&B#REV=JlEOV(oLOgDtQ+M*(q;5ac{2<#tXipi6%*Q_^~MAu^=HER1~r=~ z29Fu_cEKW33=C}XL=}W?`X%{juqtivHpZ~Lljp75<@XSyGIXtsUTtV7tKED6_DT-q z)#LS>#Arir1C?!KMt#TwW5dO3fR`yZaO7aH$CiT*Ozue@Wt2w0MqhdEbvo0D$e zi@j~`_~z+ts*4@Wihb*17JA_2V@pOVeuk$g;ngmyc$-0fRGH3Gs#D>6RdQ|9yE`WmQF3__(+d%SkiMML*!fOc5A?qoW^lcV`X z*;rHJ4xh&$SD1_LiF0vl;M>~BsU7k#9R4C+CZ;QzO6Up*q zFhOndq#g1OzutyZG)Zjwr>}RAqzLl-(`U(Y2l=cc^=xyEMUG%c9XC3JdUhrMSzWMQ z7Ss_S{YDeKx}>%S;}X7`wdPggacT(RihFgX#hw|=m94q{goW{s%hPByBY4+XaP&D6 zJChaX1&b5V7BA0PhU6Z0At~k#S=oIAV-ym(NBoj3<@xs?3TFCH#za6TBn1~4iYfhK zd|kGfHQqyEBWxo!TBeFDxZ0Fi1TdXiQQ>TVGaB{z-06J@&ZV}XhP)mFpFunoWkoJMt&+Rr8Rx8$hU>H@__dsn$qRuK@$#g#H`d)EMP&)bIS;Sq zL@fjcQqAE){>&mACg+|MHYHc0+I18;)`%#~@V>xoGE_zy4t)t#X=-YAA?}AI=tiw4 zHLY}6NG@n1WO>P2Jq}zNn5$ufgE_zM20qxWsTQ$&d#;Kc`ph@%^-=|n`)w_9S^<=; zwlCvK2hY(T>_*YkEP!8G-N*(MEWJ*_`RG;k+b>lRa^MDp9?u>0w9;sO^bR0ZPkPv{ z0&>i9RJPYm1pcLwO#S_^oRse9ERnkva-g;y>N30H0BerzTP*iv(^jMn%i+ov=LZ)> zbLp-I_dJAVF1}pMuAi~Molid_Ka=Ec*BN1+8Q@jQ#|?VxQ@`7b&w7k2xi?1w_Ppbi zvD;TVUVXX)PVFdD%J8L<0apwWryM6caF}!DNmeu~!Mdd&f`Vp`?o1R>ln4wRZz(~@MY@sddsnqouEjWQN!)6)PS?+PgGJ5d7wZKpG3V=XzCh%Z>r;~Y#hP6y zND+8bU8Wu9t*sO-E92bCY`-jsb zA^7T>1&{%QqTV`kI&%31?KL6r;^UFdAscZ42*CU##j6iqu!>YrA5y(N_VbkZqmrwC zosB^Ypu(On>Pm~d$N9=a9mRdTVFP(jtAiY(%1Y^_ldGKsQj@p;3xbut z@L?gPgjzHG3zD0^1ri6pZ^AUsDCsa}0%@BbJ%zX!o1DO}y*5pr4t-;2-F1n`Aba|p zlwnA?kw>{&$aEg0j+(jjZ9(eAdF|Mmr^R7t*H!ig3_d9-bBFhVDgLr^o%UTT9?czA z4_;V}1vpcL)`-u>jATKUMuh6uxia{fYNs+w&;+JH)J()hlbjR$(17U)?SAK< z5`gV;x*BU=bL`IcV9$9(J$ufP4Th6fbT^4PAMd>UJ>gxcsE25k0Tq3@VXF0>whG^2 z&7$bTb77wYj3DQrfXC5L)1{%*@}P0^EsTr4N*3fHHBq%pLKGmdD9n%I6;0cNHA*(Zfj0BlWI7-}-KltBMqwxA4d_+EqqcfCD)|YLi79y!9+GGfas9PG z&(hV`M!o1)CDEM3YodvX*UrkcXpDKJbQ^})I;$Xft^DVT4<*YFMjT3 zTFir09sT{rD|6Q&2P(r#^wZ%%`B*Ni=6tLf3&s`6_e$I zsS%wKBmRyhl6xPhHdFyPdUh4C^^`r#=sY(X#~+lH?bmevPveBsD;-I(KuDnGFIC3F zAWN=*rZOXl(^>5kPN^|(ZC#DN=-clZ_fqVZN@SIjy;ffUaU~{f-km$LkdHhmHNV); zQ<5X`d2gs>I@~hVxg%$u5wpy2xhPs5JfM+YVn1E>F4MBESDI0Z%~;(4IM)l(20NVP zvV>X-q}uqp+way^h{R#7o%_vUg3>HIe^lLpJ@difHkD-$I}ku($CC5F0l$A1!cqwS zXw(5d_d#?_W3WSGU*u35XnJ#?;6^~5i@WJ`($;P0W<(WZ#L@nOL+5H5f%-%+F6Gw4 z6rEDsVxwYS7_oN)$3%8N&JYU*F}l9UR{n&b4*2PXTKa7D^h7w3J2aOwrf6eoFo`ua zv2waRy!y0*r4I~TD9IPqhAa1BZuP>-m0$6*%=VJ2D}LBCUIrKg(yI07PU7KrzfN)r4*?rnQPi}Xn+lr|^78{)r`)NT zN8`LyE~Z?auT}Gw*YN26>T<5X%nNUVF`}c)8_m0f7TRN;ipIhnT4nt$>kCD;?ahNRx%G$Uf!zz8V}}AFQP)01jP})F1x{ZCAlLu%mJ4V%p=nMCI0o1BY67CFNbc5hcjC z>t>(<(?bYG!@F$?5iK+qt>j&wYW`pp2ne%v!SFIc%o6kUy6- z?6JA(;a4=uo*3ds#atVn8LBz9hFfmZH`NctHfiYfP9{?^XI($VGyHQ;R1JNLY`~BUpQ&*BmFLpwS^xO9^>*_Me}LvvF?j;yzhl%l9eq=W*%wLs zcoQzVyd{Me@@E$c_0+<{oONd3l3ZsBWMme3-8}=K)@Ht?A{U~Fvl(d5NCle)ofPeb z44bBC!#@3$;(J9m_%bhjey#h-H*87shz2ptc=!q|sNtjA*jcW-T#t-gR7*_`&r2+J~ znU@m|04(tBYA<3-c`HMmPB zChMnk%BmUy^G;*|Ylj*VFH$em=_%8=UFY@a%U|)`M2nt!IO~_@`Ep0J^t^a_=b1Tc zsebEA3W#G!yxzJKg-Wcbu$c@#85550YnTz6j%Pp@5P{!jBLh2@Bxuh**v+marX{E83|a$L8Kt$-xs*5NH{ZC@Awv!W}?wGo2Z9-YeXR4fELFBi1**zzec!%=dFuIZ0n)hi?Dnh#T6*UTrL^yDH zWZ9-N+8)wNM~CYhI~)n|#^8koIPtFDVV=C1U|tpu4- z*KmsXW~HAG49cce)2yeLrSx=dat`YsFDRR8;#=2+qqmw7uAREvE{>SivDSyytT{Xr zYP=W)L`BGh@Ut5u}75VB0&pr_Wlj8`~znN{ut>M(QENYBk8 zt|{cyYo1WBKf>*mD>kJ|srFuGXXr9tWT=I=8(5aQOCEZ8yi=y$!|d88T#f{|uJdBJ zL%0rWhCD(A8zN?t1An)Td{%L0CW*w!*{}Uj!S1c>YOmN^?ZKWEj<50Rw%|MNE^Elw zY;&*L#qiGkl91hbA7W+9=1w@25e|8B8vj7-d|Xu-U({R2b&o+AkVL5^4_`Q#qgXUV zq|ePF_tC}I91lfK_a$5*)NU8=JGk?ke@6wN92tP5>R$Yg=cs#eWtpBuObpc+5Z8p( z4j*}CWTNx#rV~~bo1-=r(crg=G|`lHC*YqtK?5yU_8)T9!0b17t#Q@bi+3PEFs@L zuEpD8{H(#N&eTQi*UrhoYIG{a7%f?#sYmsG?I?9AZ^FwNUFi!hJgkt*6-swp}@A`Rl9~h*WFk7mK zevz)rm?Zu(vwx^E;V~K$&h#c!qGU~VtCy#{AIQ)5EIp@GL+yO;6WqJL+;p*T+@QW> zg};POgPSkD4(?jmbe8nDP-Y2x<4K=`A_417BORmA7$c*_0(G)!su1 zT1i2WpVD#0em&Rab*#XcV2B{P=1o`wXQNU3k4ZR>tdk9|z`UN%d*|Qw^VaAsJTTTW z^E2znfUw^THa~#f@hs8u;FoPAyDv z?85r;w3O?4!rCs%FbtiknNsY8(tE)WK>odF^2V9A;_}fx9Tz>X)~$C&o)4H*RrZhI zvyA=y$GxzT+5Xul5$hV;vO8pw7)5RL7z0``GT;Z<`aTmt)<4!rHPZ=onEd4D_(|dp zXooU({HCex1X-7xKSbzC$2avt#fME>lG(Wp8<)$*9Y7T&5<&f$( z(0I(K)aa$)6r1vk_hUDE%nM1Mw_IPgI6?M6PpzMTEXV5jd^=nFSUm$VfMRRnSu*Q&=I zN$>ZfJ7=pNe<}TH#_B6pd0Xw=x8eo-%&EPX=?jS4JQ$~3b(ImL;gaktv2)mTOrCDk z*1m(o*~GPvr>@&rox1_v0HCCE>}1f+Z7HkI4{B!b03cGoi07Dl)jNPh*>!Sso^&>S zQ*U&oV^^*?aO)fP0if%{)(2cQdvAAn65`7CSWbV*VZL%-&rN>vCHZQ&Wus$d*x>vp znN;UGo>%y3)fV-%EM8u|_e1LT zUXQ?MIsw9ECww`$(P7+=?7gd--s#>*v-3OefXIW+t-KZD@tJI>)@(heuR#u+hA_H~ z0jITag}Z?dT`(TA$f6F?u7vBj<69b>#*;J`-+s6y5jPiv`hFgR(ZVdh0Lyz|T+@7$ z4iEga)6#-+Luq!#7$FBOH6q(FeT`|?i;HUSzB#)y;;^$QB&kak`i%Drh(#SGT8I#+kdy*J5=XLL>h|+ zUNhOBhS2)=%iOoIS9uA`VOX0h9d$w>@hP`7r zx&1TsLhZ=;PXRM`x{+Vz|NnDTa+s9q9Co^H?RAxRT5d*I@XygH7M6sY1_a}hx6dnj zB)jmzTsfz&HzGL2VC8L6&b(V{^YULZf<9sXGMqL6idEbA z5@nnluyW^0k^^?N)NW~N$stL;82v!LnmVC*LQ`PyUP53s0+{Gf$Hz~MiwJJ}WU6-B z-gVje6L`n0fd%NUxtc33rqMme{TN<{5r3<5({}nPCK#C~x+olRP))hbI)tzfdI*d#aqxOV%eYPn?v-WH&e?vCX6pX9ZKrps+wJ{qFZ4X57K}}j^I9J5;ILrSFi1qe zJIK3kaU+)DVM&`!q=5x&BB(`?pHezT=*UIchCV`_=TWf7NXDZ?waqBUo*7GO13}ef zUP(uHlvHaD_YSp^p66J_m>Otf@ExrxWz+A@1U~>WfW5IrrH#KNl&N|f2^;`r zsYd@0S|;_A3nu-fQUCk62#c9E7N`Q1axjOtx7X*~?c9u}7rG4#jbh}(z@4{3%1giH z;1i+ad=oXhV(6 zT+)mOU4aK>{>D~FjXz+qX_jpbckb<~V&n>9Q&Z(zo8?nRZv5jGkI%i~$BEOydmA;a z2k_P1Jr63{x=_0F8vSKae=(U+)ktyMKQ~d~A8uNq`}$^;bw16d7hl#nILX;{ySq9T z#J;n4HEiVLu2d)1FDh&|FA*B0cOq65mlC}r9P(D5Z)5$Zkbt=NF!tQwuX(CKn9AKC z(w62N|)Cx&cLEuYm?11}KGJ6@bo=pe>G@_s&ORbcxwJRKaz~M13yC<#U~% zedmOyM!1xk2=lGu60X<6$BomO*~HDo*XbrZ#IsSD$G^xt3aXN+m#Y@%g`vaU$Tkf?mWpOcQk1ASWQT_T%-dybx{d=ty^UJaxYFVJIc)GmklbhxGe+M)0AH%?wr-IzhwISh=d!VyxJ=rZ9k0-&w zz2OUodcNCF(tFxlp4snxR72`+PdADHXc^AUi8&5SxaXTBT$aOeZcZaMX)2|uqs<*q zlPTOd?yIs^^g!cab3rIFgm14<5#I~7f`(TLxqAd`3uh!y z$K&~V|7c$(7s*?lObhIm0{d2&=4vbbZD#$U3nW)!0)e)iulz9J*nA|3uWhLs`arRY zuzYtfN#Jbb^wVHuZ~A2KgkrG9>h=gQXb3*rL09h7Z%J5&68SVemc9K2)chX*#;v$6v&4F}I%m2w$QXd(}DEPaoux8JD}ZNyGY)rUU| z*j`{G9(z>>X5dPdn?&*3m89AFwjeA&@fepFO$@@Msl;jWk0>OF{cRQ`(`f&t#(=gX zANFv06I0U@!h(K~`I(8vsR$Ovwe`=N7qCH?om0^?Zl@!2dRb~*fhDHV)zRqfOyz3{_Er%HNYDeUp7tgWq%98(EtXql1%I!)nBaxX#x z_OVwFV4sZUY%m*0u0x(`hnQEYUnKa(51kliic2xSC;-(5qLLjFf;d0LF z9}r=c_+wv8aByF1-cF)2R?z{6R$LmhMHHfa`&v+wkxd6KY%VQ0f9D?BlA#SL%2L|M zyUYV;_Ufj_;WtI~cvMIkqWN*@?t4#{a`5a64!YTZF&zlMth)zFP5K|XsV6j;$=L8( zq)+N!y-FhkOI5;PkObR*g#bX+Xj||%4|S+BuUawt-$xs>ESz}r+pfOpgv0`tT|?aB zDrI*9n&8h*{JJJzqsZS*ajC_3+KwxX@5GB{O`?);mJUQXQ!Tj*Q|m@LU+XmYd8#HT z(6gOVHPsld5#VXbgKhGDhvVDyXkuz4ZWfmNKa1G4MF=3;{UM$qyBxKlOQc4($({+O zatCZN(P6U>(De{z{2NcbEhsNMXHI_Qqi2A2t*Whj!hf7qvLbLt-e_;}g+E4aqiHiF zSWf3jBJ3fPZ0U2zf=}#hcGYKkn$)gOVXOv$QHxm{L}Kj^Pq|h`?>sGG3N1LiSceq`xglQgs85x4ZidzyJUtn;K-e7kAKQ$|VteYS>4 zKaZ|iX9e$rZ2d(|EZSYV@GK#}JKns^r8z%Z9qJMta{}8mVDee1Ge*8aR>CdkZX?V5 zj4CU>_qfI2eG5LVvLW4M_m$-z`<%{u4gypI0Rcc~NYu##Tf@OrvROPd%N$qD%P)KW zP%q*)>(W*|T5Mi%1`dqMBvnn=BUhyIrd^c%rZ0ZJU4GS+(&q*2wm6>_3il7+TMhZX zFv_ZzC4drJ;Ls4j^7#h`_k4(tY_vs#_ZklL-c8ruN|MLF7@Db-G~unUKr1;9xemtM zOP2|2C7S#N**_KPiAMv%vWkq$h|s>YkolCPodbfPK&^1(()i3OzoM+JZHwYO`M2@X z^US(1#&HC_2##KCxGf6!)j;?Jje}>E)IQmR5lra#H9z1Awl3uI0WZjWb zYgJV%w{o7-W6A#kEe*}m+8e>7JDuPA#Tg7EucxDRWCk(3|NCT@wOSQ@xWGBnsu<5Z z?{ga*p9^fWHF&4$#e=-8`Pz=R-&XL{mO?Z<2Y%S1wq5h2#bqEQ@1@@wIoxx-SL22% z=Gqq1Aho~R7v7F2V}I)ls0TMRHKOoaXX`WU>EsdL#?{uk)mt;b9Iy3R>$~h1o!%&T zuO#u!>8Ogo*U;KJtm7xY6qdPf2_0#F`N4YCFrtHVHRKoDdKJV3kgCgtP5*RNhm=@H znv@478O438xX63akV)E6Lv5ZJbt%G0qEO|+hsAVt3~T2>&d%X_KxQe7m2tsnVIf%^ zLff%-Nn-&pszFvDfT0=B1(%4vut1n-diU~C;q^E`H)eT(ci_DmSH!>ah7iD#IZd=0 zI3CKB5(0p57zxdjY6_v+ciJgk8mI-97|jykH|!u1{`Y3y%z8U&xVwFUXa$k^#k&45 zu1J^jss~s6iIi*dpZ9B9>1;I+3h&t38aZwUkQLjqTQ=LP^x42y1L z8)hb^=jN@EXS<%I$vlX#R!DWTi{M?*VH5T5p0>-+QR$8i zEv%O>%+vmp(B;*ZUGVjNGLZwCHj$-TGfnBvWKbMv9L=# z&*CWKIWThsF8t|EX@#nCc%wsLZI3>T;|im6f{&P?iLm%G;g3=*tX0WEo>CFQ`2UVq zQ77r-b{*;eUug<%*+4W4^rs&k;J4pE;*)nwm%kt^;-29)Kb$hG^qxOnxJ=g?+*6TR zAOAU)kM6if`543rpi%BBC^;){6z%QsVc)&9w+v_QNd16>n}y7_Pr+9>p0IVb=@2eSO0rf2TCkBDCuT;uZz=+6?@i2h+|~^X zH7WgUowe}Gn7z*O7E$*z6MEeAxBC;!%ZEV$2{#W76s(Z1-J1XU(qF3Z=%CnL@d5V{ z@?3HWV4hJ))>Fha0JIswHelx!$2;==v5GCzmCi*b?=@U|1+!p6>V}W0`t=x-tG~_vO+6b$6Ke z#eK>dn$muJuI&C*3tdB|LMAwKC{OY=Hxr=>7FIHKW;_uvd@o$M31gPKWQ+_u8XR+- z_A?LKs>pc;s!=ujFj0jCQ@u=}^3-#K`f9*Blu97j0~Xha;}$>^wgU#Z zsU(Gk17)D|B>o1xp5VM|cGF#XIV6Y6H?@~#csGTUr|(({D2))ga?54eyZ!*y)Wgu( zJ2bMGKVi9wb6qQqrPoj} zI&hr`tcXsLNFBFjl$o_lGK9NOtx~ltX8_I5s=;a6%lXT0hC|X@VD))Bm$c;~TcKhK zkMRy0%leHbZ=v5kq{w#eCC;zb=ZwfmAA6gJ2Gq#ZWmBkf#FM!N(%igELNK0rx1Oh>K;wJdU2I?PKxvtR9F+UtoEka{8g?QE7f0ZyD_NJkOitPb{ek?XxbVvl0UqB*oKs*=yR_jb*NeLUNJoYv zu5o2br)x#Ek&6{miU9&TIAmCX2Y|Xc)#B*ER+A&zDS=S-wh85bG=@PpfbLj-b4d8< zWp=J^I2TSbd?G(TyYBq*i^=?vs_psfMa;$`39Y(u_RY(`r{M+6_L)puVj|4=Ev0d- z%PhujrE?1ie^cJ9zj|j<=sZAJx@mS?y)!D|s2tb?u_hTeUbH~!|MkVah>q{3_E*BC zu%+qgHpl*tQyt;KJ4;ku6?#)B+eg*l)N!dV@yVNfUN1Q)V8Jr)Use+!=;wWV(kTQI z6p_Tx@efP_-W!U{JRo?$muqGu*Y7Wy(b8{%{422UX44(Nmv8;43>r@|mN-2e)7v=+ zky9H=irbRl-;(kt`9aNt#Prgv`esMWCGQ}kkJums%qM;b6+W1fb5R~a!Y;6M38>Uq z4ftGJjlRGB;ClO^St@Q4V`^*7XxmOj!?h@(24ryZ=ulmy4TI!&)V)5SHz|!YUL7FH zCsU0^RJt$VQqR3iwT+L?QxUmSP$#Ev5v1>e$y-v%_phdNT%aN|l8-hgIg$@fYmIEo z^tnfB+6Cuw9rz;K7KM&`@LZ_gzP8qa65{t}uy@bT96sNnx)iQ8jXm;7yPfyA5dgaing6K3kt=G zI}4Zdtm<#dR4)i9gNeWKz$UdAyOVr?R#H(H0U_lNeqB`57}B@xP-lAybQa&_xX&B^ z@xD2^Q|S%ypy3t!-}-UJ_ot`Ez+E9;0g%d64+o`DH`y8++>@O;xFn7sWS*QJd%crh zAUwTi2?qg-!Z*>@;T}2sb5Ce$){HXb0T|>qEYO=5c5`aX@{vzp_4aY``FNH){P=)L z1#P44tbelJr@!4#staZN!v|I=)$YA|7UtV07^f zvr9DXOK#(w?;92p2C&rkeXXBu*2@4gRRkcM+R1^Mu+6lG`d_9Q8d+hy{|6CA-AN0R zA&$G%+?R$RxT16$Zd2<(W58f!@*?clD>`7Ty|9-Qe3kFhk($D4_x3!E{CU`cJ z4rg5)OAA#hOco3jO}hS6JTy){;CFkHUFT=p!8p&vrb{(4DObWKRtSF<%v+MhmjQZ_ zwb7)!0I)l+PX6G2*v4IX|BJcF*9zu9D?YMMGt?!C?S;?s>En8w`gA02sutkDPQ#a# zsPYkZFwQC6njO%PXuJ!m;kFl9^&Pld(gT19e~Z|jJ~bGXNWbMRUqPfU!~uf>ps-kl zZs%y2hz~6e))&}aJOe~>W^|Xz+5D&img^;4{io(F)_wbCu27uO!8f(6f=Y<>PwK9`u$sq$gF4`Ij=!>u$+ZE8Ej3%OM=`Xcd|oYEObrP zBNY2oNs{RYD}6A#Zi0UgAdLL{26hz6JMT%`-IcK1PE*YDpMPn<1J6JN6nK5}in1JR zB<2Z)1rnvrg?*CUbhSh#T2Ub>?m~mUq+3oLiXRw5|AUlwIk$#$$8(?@T_+@ z7n8qXEIWW9vb^iZtF(pMbBqfQyUG_^0d&SY;WlHZ@JxU)(glc_C|B|b<0ZcVTe2NO zI0o2k5dFU|on_js0t!pzCs{ew{|eqks+w;Aqjm3I(WcoeR2fsat)h`zXIrz+FRJxB zp-ABD7afE@4?sMq=^DZj!=C8c&M!AskH;Bw-(fw5zvTq;!eq>_W_#Z{tbKbfYdl_! zR$Pi&V#kxk_DLLhEjK#-H~VJZZnS5#`CIZrx__!1gfEWiCh6EN{NvG<-fQkZ(R-R~ zhFO)$^VxHg7U@;x^TJiU~Q%)fS3EpH4lFN zU@P3zmVr>8S(%CT*Xn)VT5`tq$ zNG`B+Og8>IaVNnV1-joFyyNEFt;T0Z*J^*9l=DQni#N8+W#zJ@!=mpfJR->Rv>&%u zCO*rfA!f-X<$}H&E3>^l@GeF^z?hi-&`vLS=RHbU87vv@?82*joNM-A=gm|*#5Oym zYs`KQ?e(tG!#kN|tt{!Wl3=g7gyX~7V@EpnW+{0cpjRp7fc%Q0SCSqO_g?w^>stXd zTIasMmxy}HaJ$)xbC>zoeOC3jXX;&Dp+h0PPBa>njoB`zfQwZFjAXXg;j6-HG*Jw< zH|{iFvC=&EH@)SRYu6d373S8rZ4oSySZ@e%e7~}4*=TZ;{z%g}5L32ez7e5^37uRG zCwWx5U~_L;$yGW1*cX47kKe|E=QPqm35NB#SLx$YW&viVb?i6Ko2TaeA0oJO$c?p_9aK(8PPWt}H|$&AwT7(b8F=;2wm(6!pZ_ggU>JhaJ$v`)T8jQzG*|fCiB$?^=f$w&Kd%eQlUvJ z#~2|2JQ2=&|Hk_gge|$1%KZ?0o}5P}e$2#|JUeQCi##YKPAavIc`ZDcgM&E<6-wK^ zO9F}P629ssqPew5vSSVtVF+AblG?wcZpw=^aEK!R$gh= zLj@_=d?!dU(~PSw_Xz=t4B%hs`%z~9d)4x%a#?|YF7SB14d0HhmVR#}HGa1|tt-;w zneMrxX#=&@n<~&Fp{*ME`Z!&mM$5?)cEzvtfooxdKZ-C4idb-jMq41GwNY7qy??t- zrqU=g80#3Pljq1%Y3=Pzp+M(!5x)iTxz(f+aEN`%aF!zWHt+rGq1$ImNi~JGt>YFJ zSeuFj$$7 zsLEvoR_$p1N3BtpAnf{uHG$k|DD+QLhXT*HoXYqY!uj}b+lAYYJgeI31{4@*5&l z3*A@l{3u^9Zm&B_f=O~W=ouD*uwX3&hoZK>WTdgfWOXh8x4Wg>G%lcJw28V}KvSHC zbJNgi6jZdS*XsjqA(-F(RutQ7FGBXs)x@8?7G;KMO@mah4BTBt!NUz?|c3ps@bcZ3i5ddpD zV$y=nt`G%fO>?F>m?s%&hX0qx@%<*$tYnH+A#A<^$y}e4tYjy8p2V&vL?3Gt?DMPF zW9cHe@30KN{){rOpup1jsxx2X&Wou@?`SoludNoS>7KnDnX>Y+Vt$4GVx7nScX&7H$CjU?J|SDawb$}+{3uC>^F2cLug4!0o^DJS`J$78lxe4Gk^YrTxTu9u^L3EG-6z+B7sh$A*} z8;3Sj@2Xk2VScMC^5zx`m~@Y-*bKtQALiqa7gcP)BEcui7MfT+50$4yDJyoioUua4 zl!GwcyMso_dqh2B#&YLhVpi;zOk$01$H$n61Xeuc{L8_bQzaQEJKl6U9{f%?QTbp< z3huMm{Rh4&Zv9OgbMZ*9in=83cTop5?8J(iKu=eJO0u75&?Tp=%VA9!3GPfWV#=0u zP#-xI=`x-rZURM3HoE?jNpECl;(C1fUdu>h@O-uw{6+U8#nv|ewax_r@5Y3B7nx&h z+8pZ^M;6P?!5^TNJ-5uQ`Fb>YFC83&^mfL^?TDTU%2uLD$`E}14$~B&q_x5&7~y^( z1oiOtt`$s{Ag*OPYoHYZLOM*IpQxoelO=7g$0SV$3`u-`I#%>_(`AreG8QFQ3%W`3 zvNq~pwmy(rw9+iTWzF`|ud`v&Uym=MD^kj%6Bia#lDc{Q^}^*MU1@lqY<@KMr3v9|U9T#Tkc*a{jOSB5(?FashrD-)t;H z&XhzZ%@nx`+^tn&E?$bca3~Aq6pY~w=(bJ$wx6@B|Le&jov#qPyRY9(k=dI3)2OSj z=?z%6Zw5%i=wrs4O6xwv-i`#46x5B>eo7<)5(RI+ncLgGRf(5eNB~Suhv|w<#9pr! zTcLzz$dp~&bg@nrjQ=KMSw|}KRpGS~(v!}Y#zpvBULir0n>F|&fD2@E<-z#oR~)jj zTCZGltvGX;6e-tUf9DtMRP4df7cvR_=SR}Z%pU1gOK^I|GH?>^nS9;i#YxXH}Sk$jx;{r{k%uwPPg?Jn=CB>M8C_3OF}lQ(==~<;F!uJ-G8& z&o+;VS-BI@G>ON}l*dlTqd67I>Y&wNp4K_iz~pVc8T>4R+Oa@`>Z9(zXboa@owYM1 zN1Z{nBNv@Tg+-K>VrweYd+%xpp&j%b*Dk2Hbe)5A?XZBqPMW^x$@%to+%F6`$0|VR z>XFdn2g)4!M!y~jUGBaXrTgI1^Zch{qN`t%V)aBH+kKyUO8>c9_0>n!OSCWD zjRVj99mDnp7J9>)sGIY6p~g<_ib*qoVgE?nk69pLw|#+Fh8jT4C@HO33PDeH;u|kN zQf2BY9nflIzM_CJD~8z{;w19#9HyEi{$Q%Ch}0m8aWIalNZY0=SLTf}BdoxmIgSb= z7jAzxJ>vRo3q7$JE?<$iwJlgq--$%n`d`=K@bgg<+?lR-X{Io$U0lXLDe?m>&RI>* zV?o-Mxt|4&Hd9qhXt>Q!T5iA>GJu$;5ik_AMHrA8kwXH~+q|>g)psIQn%ky`4Z@4L zNYbXQH+3wvuVMxF%mmJEk&@5(QK~?t&=eRL$V3Og$Vo?pqfOiru&DF7V@rX70RF*P zVKH0%J(5F#`>5}mw7GTENKIl^&{PBM*jDf+v)N#?3D4w=Gs1GgLy9@e8LiwpJR%pl z^2Q;3d;L7VC)Z?iho>QlerfAd`k%;1ACyL3r%Q=)3*;l;RNDf>6+AX!TWzb3HF#!= z%P{4%!cn?kU;5nZP9L-R=bx^)iECJ(Ga?`G|TIsN51`f zndptHr{MR;V)b%+FGY)E{$|@?AU%)!Sn;~%=NFIY@*iJWx$GW3L62(jQXF69vc^j6 z8vuek8tcYb4Ph6qWSQM`ahk`*-2hdYtSZCa__^=Osw z5c$`TG>??+ETzyo0O3YT&jGMo{RhZvivaf48<^Fid{S>7TcCLT6969`qTGEs=ludp zx}u|#J%iy0j^PfhVi<*l$XG5)ZsH zBLm9>J)EEo;b;2oJPjg&5Su;>GcGSsO^`%xPWSsJ-wuy0^S>61&^L_AXmMn!2*QY$T{!Z z)Af3A7D!U$o@vBqcMpwtKG2~8lqjs@ZI3B8c-E|4eyY~4UZSGBMJXp0{-v?-*XseU z-&6FfPx6)nc^?L<(hPqkblm&5PjIzZe5(u!c&Yk#LU!d3{&<`JrxnVjP3AizK?w*@Xa}g>dD|*guE+S9$CcaJ zemxHqPfv9UtBMy{6s6g9->p6F&=p%r5x-Zk@w8S*!6@}1QeDR3dbA$FvUa8W!KY-USFertvr|7zi(t zM@qJ({Jiwzu4d`WXn~tb5 z?9O4|1FNl5LK~rb>PvQ-Q{}mfk!$c@|Z7@ZBv>$Jh_z`Wo@iBsn@Fig`s2)Sou=Jyl!UifJVHVh*n1+GKx|IL~3OJ z<8XJ@V?B_~^Lf_Uwfs`mGQv>?+FKG-Q@)bivp-t6VCS zG)cxux1_^&(T0dM)hRtq7Kyk(fAEZ$Aouy9ZNhHc-C9V5rg0+DA#NANN+U60kojL) zxly9-7H+o1{{()H!>~lRk<&|V-mjTe$Dcp+JD$Jg2e6#_V8|l@+pvo-zvuesip0VJ zT?*_q63lh)s=&R8SPsiMg&*D>qQG@cVOCr!Vi>G?n$aHl&jhcCG5gi;qgn~c5q|^) zb%mSXol}ti$B*qJnY2LZc=6;`j*$=2O6EX9L*dbq_<}`@>gQenKjh~n@;>cJ>J-W} zsvK{~BKD8Zj9bYxHS=VDsTa^4+*|@chH)aoZ*@PGM=es}*sXN*<>q?^MC+XFKsKjx zK5hK*QB159d!rgrLAlf5_*n8lWJ5q2EeXjkuC94lJL!YG=sx2{@{uk}w0knwaLq%K zSHw}H@Gu2aaC$_v6-^F1M5rKy?c_m0KP7uVXz;sb$$+`ZT`5thOPeLWqyp9OOIIU- z<-L|FiR_XM@U(A+B3kp`kDYU^N;dwrOI1Jn<=I#oUzQe)`D^U;fgC`Gaa3*xtnjr9 zXDhP(9xd1Mi>v?1C$wd#C>M(}gC-|_$FU`qPep7&OTA zVdA94oE!2osRTBQqB2{Ux~ui|Fl&1&K+$KATy2CsivIBQ`9kH!QcTZ?$2~q)Q-|=s zHf|{el^^>uyz}5v;DpVM{ue>-DlEGYQ4iQZBTw)^pvB5fAA^MQtuHB86k5K(uENMh z%*QblgmS|)d}pU-A)O-tssp<^^I`(QKl--1^Xs~QlJbx)1aFW9+Q~35c02*BxUMC6 zkzx0lOcxz0d>0>$(!T<({`83Fwy)j*B*dFzK4^oJ{E5H*eo#Uu5b=W=>$QkwV82%r zFV_0Ku6Py_QPiTM#&N_2BCYEUZcDlM($WLicxS57;$x8FjJQWLimL4mq`&emwu_ql zqN4QpkJ*;s<1Sg;W}t6FUh=v)_glhaS#;z}|KB}t`y)*4@7Nr317rZMZ8g-!{bjTK zNZzaR1LZuFVQ}c5ylD!;*CyY> zysQQqM90jN)G0_66mqf{2k!rPOY8fiWdE*6j(J5Oi0$JSdri;_Keh4=-z8MCp0Km~ z=c@aKS+S}g^#BgEk872Yovqa!eYu%s+q+)MXMP7NSa=86HS^4FT7)y?(3daBYLkC+ zW1~Tt$UOB)D{g8G?b#$=k>`w++2SH(%;SkD=9v?0mrcBnODfXauaq50z8ono=f_=?L|Spt0!xW8DbpxVJYl`^BH0rrU5OQwv| z@nQSXIv+8%I8%E8TP^ZG(dH|<4T#@t)tcS=cJFrqB}wKS06Yz3xS~Mz;}yYps9i!E z-*`BA$Nh4EN3jGS>eA8J9g<_CYa~e`A9m`Lf8TECeY@KO#$90RqhMt{AG4Vkx{_G{ zDF`2-HXE}+##fM?gLBpH15~~3N3zM-QQS;mzACK|n}rTT9eB3c_oOvRZHmylg*E^+Jm!5r|S%H zdQpk)tqu=3&n3_X$~rp<>=^mW%|G>hq)K~nEb8?ozB^N2P#7uAHB~NV679=- zZl5CSEV@rCgEMTquUUFNv77|BYH9-9xoL#&0AcqwlBMk(H0Gq5tFyCwri4h1XS)qj zcz&ePQ7k|+zVzVc9xwpSyFh=`)Nl<;t<>7VK6X98Duj$KjecE)+kU<#X}-7;xcI?3 zNPCkxD_Av-iTMzlpf*fi+$6!b7dbp<^}|t3M>AdL)izJ2N%*OW{8gsPx{0*EvgCu? zNu-bb;iugkO$TAd8Y@$DG{P=G^qYJWJ*%_8zk_-j8hvR1YckkE0-&1a9czx?xP*qA z;72UeJzT7~-s&e7KlxqFC4UqcWYT%QSiA4t#n`s{-L3y^+0$J8#hdi+l@~d1=9SrL zMUeIrwKJ#Ga?KMUSnkgSdbSrK!yYyc3l#Ek9FqCy@h{$oX+ZI3E^5ct9Ts=0wgDPg zkH;JXSCxPkPz;>t=66y~0a3OOrp>JC_b-472nHY%{SxtFyQ@*jgf^RFfLmSRYFc{;}6yeCV*pyojWn2Zgm=`iI;xmJDC{x3<^FA}^`A8y8 zOaD6K6{=aLiTXpx{QJr~kkKs~M=iml6wDPog8Uw3uCME)>TA~(#~*g*bkbOGoJ7^= zcvABtQeNb4i`=SM<1fT519HF`f3$M&NlVqww(=e#u-qL0()3b{n8n5;l~*^)tNFJD zj^vbqea|t)3qY>~e+;q>o(n{d@$2OA$KGdv*p&CJu0HkI+&wJ3jV;!}dj5VL6h8dt zNCq;{^JhnhFT6SDM`QmC))Ks)&4(W`)f*hD>5EXg_;P?Adh#$sg7}}Zg61X6ATVy1 zq2Rf&DH~5;nUz9jzVH=q#l<^WaNONZ1<(B-@f{&ix1YUw%g&<>yTZ@A)imv2GUbL= z@|x2SFy8|nSq#HD!G|)hiJCaIdW8%6EyNRG2~u+}@5Go#9C5avO{yf^is7LO35G7v z^Kr@mtdqu8Sm{}gR@C4Rb-e$$ru_SM6qJgW_z3lGUW{EN*Xaus zyeR4||woCc&X zO#cpGlqjidXk@5JgB1z2;cLFpHdmK1UyF^MCl zwKH}jnQ>36do%uf`omytZHlY#*qC>Vu{p4-OyDBcN6^!{QyEgGkmtaMvDj_ChCcz% z=FSI-_7@?Xte#)S%R%6uGGgA@3}znE_i^f;??{# zr?})=UUwwbB9wwEWg!7FFIN_&-IvNhm5&G5Ju|?qe4hA?PUSr#L>WQ|phm~Vij2oI zT!uYg1T5xYDz#TPFFiT8Fo;@xaplHlO;YVMNJ#iC;?PunV*R#`3%^s26bkFzqzebm zxm9ZUh3l{l?aQ5;@QUM|@TxZZRC!#d*!f59ZyI%>N?3ptKSG-OU-}MkUBcMKtTOzG zWa3T}XAz?v+cTl4t9Pt?XXu0eEU{qiPy)_DHK4M;%@8TH(fv+7KHn6Cf~G6_MT1sy zb7b6p_rP*A_x0ZUe7x{Iw&wTqW|ra4quWa&*9S2ns*CzZfO-j?0=52rUBv329?@s# zR%z-AMhX4y-=91EdQgaqx7O5SqX5C%%1VD~o5|0UPxq3cp3oyxs8c+<=u=gC6aaC# zf^S>->=CQRPzCZ=W8U=t1^B8&!vT4!C%|Lvcj)ev|Fg2ZK*lX>QTfxmtA;w>r~AuV zuC?5tzu(Z;Nq4)nXt65da6+9gDy=Jnj$7XbO+!Z-Y?Y-)6`X^n>DavoXKUXR6NV)J zp-IrSFm`kNzq8zU@rMK9iF@sUF}eS}yRrMvw>NYCCZvE=Mn|UhGx#9;i!xOmo4qqH zVT`3IX7a0!0S>#z6qBDnvOd~@efP?UKgm7L`z)dX&Mwlb_4^*WV*X3AG+{VVq? z9h$ucZ==BL8pM$biajD5646+S_#Q@^nWCp6Ut+811I#f5K=U(LY-M#dx~@l58U-BTCALk&E9l6s;$Xg-bpF~wFFEff z(a8Ssj}scAF<8!gRhm0`AqlF+-F*-LWy{gf7#UkECjg~w%`g8m_fX8)&-t9MVOqyl z;U2`7I?B5V!`ZUPi$r3D)0z{C>5l%1eQC|lO3pLVY2&Lj-wE%I3F*M7;Y`I*c4blq zsh3OW7MmQ5?WVR3<#-TdyiTJJr5eory=1%7x0bh-M22Y@>iFNAv~9N^jAUO=Y^)Ae zlM~i2#hO1dR;4j##itCLrT)m2^|l$o-Fr<>dI&8dFw+i=yO06UD;!{JGgkY9c5U7z zB{5sp+V{Q*T3L!Up720yTJO}!cC71YSz0ygL#a#Pr_8s~kR54;1MSYFI5zjmv(Pwf zAP2gx+Ulnh^?*|-19L`pMbtbkk8fu=Xcw$5X!Q(b>srSrM4F>TpQ zKon&c$&ivO-gLdrOGpp4p3GVI@E~f3T7bOFVj>=nfZi z4FaS*H$i4UA|e+VY-@_1rQcR#6iE=@XuGYQ@!m*M>CbyLKNY<)LiIL40=M;U3&K6! z7CAgcF%>F>?mM=T37aWyA`jPAbdVx>K(ePX$-v|U+4@|iy5)LK{k>V0x|#W@Z^AS$ zzT&V=SU@i0x6J_k=auJ;f3t9CXkcPsPVE?-eR@PqbuY)sw&U2|9BOJHv>wNXm6;iT zONZNCa+x|Ap%1EA?sbn+D+&1M9us}bSmccK<9k@d{*bQ%GKDc;$!X+jR``&kA~`b< z-{(u*noZAB;1w(DJ&UbIYF+u#X$058mQdwH}ZY2TWN^$yYBYB_&m7_AfX`sIs3CB;n zq|_jRt;k2a_ysb)3ET6Q6*nY>Zcq-#-#qw3@Epck&hN7W5NTr5<<#>cqKp^Q2o{Ia zsa>lMG@Km`*ON#)%KE2lQ)!kf+>0Z?T7(cz()C?g9*$-AoK z;I$xlx~c>`QEWS1q2=tKlPV||m;q^P`9FYKEB*nk&LoE3%P-Iy&Q`}!)ih=HN zK!y6LT$N?>(crvp~HTv$2Wy3FrP-Epd-Z7^#pH zq|aj9KU>7n^9B2@t>vdY^|r?Qwf&hwZeiN;>O%F-2AJpe8gw!zoa6@`yR2A%rG;y5S7Y+GVI|Oi%tUef;?S z$7?5cs=&h`fNeB6H z{GShpslLwL2ACE&7#9e>JC2t)Sy>TS9@}5?zfbc?9Q?*1lJTe9|DMHX{bs>4+6}bN-nhH`7pN=sBvzu$9a{BG8!~_WPV-`zlTAR?5dr5ws@kZ^#w z?PG(1nwX7W|E(Z^sB{a-)8^i8ZEfAJk8;Tkb-tfns{-8!`5na|rkPuU!n})mStTyk z_`F8o&Umdpdm2;_+v4w1r*pFKjE3fEp~Ht=I%Y=SS+oE9MHsvkJ=@nzRsX3QQugk3 zVTu84=!^KFqyo47*NzV5GVSkLm%nrdM1|cmwtP2@GJW1GK1K!uEx^Q`t3Dy?&ura! zWW5^V1R~|=?;C}m^r#wj05VK2`9)gTCSW5OdX#e#O{|0=SNQE3rH*QGW_*$eWJ=b>%ifC!_KAm|JPUQ&v?HfS?gIQEBNy$HM>Mlg6N~U22Q}>g^`_E9Z`i?4W1jQ;pu9hwjgcwwKDE zkYaVut~?Y^hH1IG0@JJ~1O}QT33eioB7WZ-h_@Yq9F^*FUaTDF5P8pLlKR-!!kI9; z66INh@3NpH?+yWp^)F7--2e|rKMS|@->FO2Xx^mY9sYU1{=cHT7}&c1 zU+tu>PPqn#d@-U|!~8jNj7B-01FGJP>q${c7M;}iV%=U1n#Ob)&%*A@jiqp-coO6; z&{^)DXT0!Z>8{V0p?&7azjmx|V>dt(C%2uSK4e z^U$4g;oR_wTt)vC?KCJCP~bb%I}bkwSrP3hme@Q2S~QI!}$ zRht8{m=z-)=0^SEasU1e8<;8vvF2;5aNVFez^;mH`v16l>$s@e?p=I9Q5irGh7eRh zkuH^1YEU{xVCWDr2b#d9UfXbB}Hi@q-)@JZ_xL7-uJwp@9&(? z`S0vMU~1oc?>pAL*0t8UR!vi2IliQ1P|a+a79_35O7AY!=*IE-M|Oa!r%55C^6Kj)cUR9t*gNP#C+=;V$a(G$1^ zFE*QZ-Ei6-%Kr3%5J>4n3H!BBy}uV{DRDNo`@2$j#oS$T0--xkkza{jqO z#rO7Hi8Y%joE|j2B{ya`x`0H#WLWBW|T-L~a6t$GFK$Pd-kqktbN_5+m;_B2d2TrUD8>z>4GVQEtd z61HU@0PR`sNQcm03fu41&BJ=={yZz5Pg8o(t^20ZMFq(NCyz}0^;76OOM^eUo`DKm zfC1yE=k`o`xHVuL5!xy=@1ZcI_rI46=KvXPn!H-OfLKK}@|k83cXwBBZ={QM;By_h z`I#4MP=qk(zN3p!@MjjrXmL>u=ryVa9eP~8MT=dH6h&hlN~{{&p}+0cQBD}gR;~0P~hCSE?QT31r%-mV5Bl9ZepHoNs8%nGxer6 z`}v{|2p&jPL=v~X=JYSVBz}opm(b8^-UXy#x3<#TFL&%rwkuF;@LJnEwvL^6bOQF> z3q)qONFKZC`{8yiTtT6{GPR^6N(DjUyd9HU z9f?(*>W_xw{n{;ScDl@DAG|O>^?CSYdg=~-#I;jLUJY~C6?vUYVV8zo9+@PA2s`1M znkUpJ`T*4W%QO!?oKDLd5fb$H8y&Eo^J`9C<^QTb8|ZWe_05El>MGa2dp_vS7wBIa`@E$^ z)?J*jARw}n+ICp%yYo3J^ydTJdMtQNAs-O8T|TX+Pju{ORFfnY7_aY1wa?*CUjoUu4A2 zfPgXl3bBQnW|HFk_B=exX)7XiH=OM#x&L+pGORtbey{4Sx7S5_I3kF|Re(C)gnH+R zqbzGsdyUV9oVl#4L=UiRQ(_LRC!$ui5_l802A(0H$t;(={*jt1^ zIQ{k>l$J!U-lK|TeUdqfmd*ZgWUaQmjcjLlD&aQKN^;(*OK3GY7r!^Uf6dUUymtE1 zK;3Gd&uZ$@!0Jp_IIq98|6a$@ug}j?@N{QRf{}+_eG<+j>FWjNeaop9^0;M4&Q;jA2mBG@wyxP2@LnNuD(7eBTg9`yuWRC z#Lp%do6_ntjj@-W&wukncVyg>h{gfmSKmowT`1otoPt<;2{e?rEK#@JD&C$4Yg$B=O; z_-&W|cy;wBRE_cJ4t?14!b0&qohF&8PB>Zn#WOe`{*@yW7X5Kmq;k?gWp0Baf+k}f zTFXE=sK@RzsZmA(sowWicD{KfjxBrdIr(l(P_#4J>Q||HZjpvqObaC9!?62t`HNJ+ zT{+{_pE@JG2b^Aes#mpTT6a+`vd;g~Wm=##=QDX#yBSI?Tahl{xqtjIq;*+?41Yyq zP$a0;o6_ML=O2yw>-+p?nqHWmED`=bU}YEI;XdE^u#;!>S*#a5!);9ugvWz|8HA0( z7gxB(Re=*o383l_rS{We?F&IY+Nq&DRW&GYQs+2_DkP9gWvR0IxBF7Nz4KWo*At@{ z76kDiWYAqVqbbz01cmQ$7`Nz$9VtLm0{e?2|?Sb6=Hw`)P$3+vBC$h>!p4%XfUzS5L==&a{nRX-B&Ka%iMbzklm zv{j7Zm!)&V@O^D1X3J-fm(7h`oOgoxxmx$6MhThQH%AIw5jotXVK6pNkuzxoPJjb^ zDKlpe2Q=8tnjm#KZl_CAMrEM!IVzLA8ebjde3^q1Gk!Ib))RTr&I5zQz-%mmmbgA4 zH(_5##88J=l%X0TUbL@D{ z-Nf}xFS{njZ*LvrCn!AX)T{gFkzEZB`Nwmoc35C+)p$1St1-dqUFVwUKS1A|E)di3 zW=yyy{~5Q{dz6I{SynV1RfN>zw~L}H@k6Sf#HGcp+;nMbIpRDhTDx^w+OAX)GcH&* z{$||#W5$s8t`!wS=0_dX{ybXjC+&bq>rkP!Dx|#2egO={w;I0oI1t3K}g(Z#sMYg@ZGt-Jl0<&HhTG_eoP0hb)l{rUbXjFX3;yzyn^id zdh5To0s^f=U3}Fjo}`1VF2aEsbIv?Nph`RIGKpMHhgNUYwlSKUGRjx_2*$dTba{*z z0UwRRwNq0NiYGknc}Vs>c-CuzA|nY3^7YxFx7&E`O4HkF6-62>EWQK`WB>jdHS3r%>rP z(kfKwCz*4wx?|goKaD=z3mFEn`WF_spyuEmp$a|_w|l91y$b-_FTea(?^)g?%Yqh= zTeQf{Z{xs`sX@vGoSIi)8Ar*^BVGdL5ueu&_mj;hgiOo3A)tesdgl?$N_{mvgxx%8 z3FO+~f$G@@@UDA6U3t&?Z0OOCkPAL+?=7X!(VWT>_x^~*5GDF-ghWSUKGJ0RI#)eH zYG!u!GK?EjCDO+@S&o1!N^H}bny1aFxxG9IuFxQ}-B=gl&|tZi1$k7+5u`BKAl^D0 zQt%t6Tv!!Y#L50xSWXel5>As}-i60;bXAvgKYBm7ef}j4yxj{C*#69wSOMVBar5|F zzCR1<6}>DHLJ6mE`6=Gyyf3`3!MYiV2hPO@mH00Lo7l`U{?0f{P_uhi!)Bu zucpBCrhPS6C-diWzsf#6<4sP9r#a%M>RcltXjm8sR}+Q$o|%Gp#}c*g0cy=~kaWN`$E-3FnaJ^V0C> zJ_4~zr$H5>t0qrTHO%Qm1CXTyidJI<4Dvs<>;9RAlSx9g2r&SpxH!L2uko0X*$|@7 z^_xrp#gwe7~q_cA(YI6c4m4()~g@gp)2 zPWRsW&-^+#Y1bcz0CeP)WLZ0{Kvg&U^<1B$hnJjz6c#3pySDr73H1Q50K_>wu^P!+?lwn$eShNxCCtyLJs%5&GJ#N#4h8`2gRx-e`UMj? zdRAfXJ&_Rxv*I5f9#(mZ+F#x}^4MeuT7ck?qcZ{`hjz041{MjXte=r#iT$u6&3)qb zno5ySeD@K*?5;~61aV#|M5fP3AZs)}%M<;qL9zIwCvMXIp5!nq)jb+JI9PQkzOg0C zRRtJxNDy!DjnByW8U*(c2)bU*3o-@)S8Gs_Z4NmJhmmW%H1B~w1}X~q`?z~kS)M-B z)d|6?+u#USBsGsm1*6B?6$$?!X{;-CS!+6>w^ znI3>=}pChWBX0bsnQq%gm2=KFsxbND|g^WSCy_%NyQ|Lzkxo!Os1&s52{ z&FUV3CF$+T07=>|4yF?QpQmhptCwqH{o#IF@xp>Va5iB`KzD(zL638HN^u6QDR$kS14OG)eB&@htjOEN3yuUw*t+jQ*uOIcs`ub5lEc5S8@h{vv1P5+Z zFlU*{?v7Ei8Lts+qjmglRf|nCsV;--R5cz z4vR_u5^zbJ9v&V)K_#*xP;Zr!_LB+izleY1}-`q`e(ZX@`wtA_z6zAGi4myZqm8m>czo3=ui9j|IQ}^ zP$ZC*+#kVc5g#8PN%jwc@keCcrJuX>w_M8dI&$PlBz<=Mfk*?2C9E*y;MSNmwY1un z7(af`?b~@__BSeYh%`4kS9C5|K2A;kjhX}SxzVPAuyo7ZoRx?o`ExC3eyn{u=*jF&39pb#eQ*p+LwV6-$;G&>e(1@SMVL4Oq^7rw3BKQS$b#z?%UbVjNo+bXDclig? zKZM2sqJ1Z~wyKy%Y}Q zX5W^s`}+nX9EM7NQgYNDHZBO7U>jtbWCH*HKKwY9{;8dTFaF0*G%`W4^EIj8=ZjqU zTb()3snKEWrpKW#yU)okk;2$m4ki$IZ-rEg{ zJ5vW*C;v8yV;IVBxbjP0_OMAYG!`^ELdp&JleAb(;(9Le;yjI{$4(ZpKIi{9KP`5 z#NTUShC*7h)6?r)Wp*AIm%C>E7F2Q{-v4PTrV}LhykWy;n^g`RoLPizyS=UMUPC3$ zA-f93@T=TipbbA=l1X+kd+xiynq1w_t71V zM3fA>vp^b*hBZ+3BugHG?G9{4dl`QVFO&i>ceDZm-wU%tKLm&2zpe;ImKmOP$*w>D z^T9V6;(rG3;4SzgBKiN;CswLJf3N9F*NnIM{fO;f{+^JGCXW7)uq~^4@b&V)?_dX_ z-7m=X+b93=^B~gq-B#hV|FYW1={Wl#I_~lEfrvlS?$EEaOhrj)pqGE{2c*_f_+t&HUaIOy@;YpD6g(R63!%<*t zG%);g61H2X$KL}HOobq1^)IvKH&Oj6T=91fI=7o80_Tb}%ipR7Xj!xn)E*pHG6gUuz9&n6<%kfl zH2yFs99$cg@s7B&F*T%z0U`k9OX+eLbql^jS#FGY=79{G5y&i6ixLz)IAIoSqRdU- zzTI*@&BUZ^U*S7U-1`B8IT{%mIp6(uE;QoxQ-Eed-X_z4lbDVoq#=4^5E-X z4O|%NVIcYI(>{;FzOliQ&h4s}%!Y4+jK(LJy#s-M_)vZYAU}FmRi28GjpN!0Si^Zn z_lIaoA`Vl2`2E3OVFnt`>n$N-a??0unQ*WyN9sP&<@=Y?mK~PTlA{R`qx0L-Q+6mQNU6$lkmH%-k_T1FAUXtb7U^7@-i{9WFwotjIvFVM( zAUDoH6qHCN!dPgH51N=miqYi(7tM$V2#(>yhYy|O#5~s@913nlK=jjU z=Ph_3R<|k0c>2Tgn6VT$rsYD}*kQ0#Ct|vwHIWMf3}H5E7RZ1==#nU7NGUnM5*PvD z2>=$r>PPwNa$YyfWwtPimZuMAYGs1?EoYKq34z|;-i@_Wgk2ByD(ve&J!Pc}rAY!3 zEduC{Vn=u83kc?31MG1s>uhWY3GCB!sSny;9&=puXkqdmsK`TX&i_|;feqQl@`2Gf zw@7wD03~y0G<2&Gsa0t22S)VBAn5~fbL%H4;j7tdhrWNH3M7p{;!E;MNFW7mkOdjl z-E8)d%;&&%Sj%J{YyV|v2+V< z{Uy|M8LI03uV3Ho0=3mfq2_EBGgkIz0k?97qbxcAJR+|p@XWXallo#`o8;7)SZUAV z-w!SqULGCD^k$5%iEOmPT{IA@fxeOc|n zJpp~?UD4^77~H8eGFtH5KqM#0=weVk{`BX%Yo~DJS*oBDU`-`}#eHnYJKxRvq>(w3~-{oVY7Gy zcEoN*3j$|^ka7lGSyvJ$rp5?PUDmVbA@Kg-u(}Rx)V4AWsO3U`VGBA{3R)ZP+T2`0 zTU*O_i7+H-5Z%frKH0Yu7mSX_fhv+fDFdg+^wJ0)+;1-{P^+U zMg7s-y*SoG@z^e*mt%C-wl})~L%}!zYBX9Nw9w6RKQ2}o49C-^8_AxocNdnQnhIoz+zM)xbuZfQ;75x80Fn<{^}DACT2-bv zVfC~IY$ibPwJ6oYtZUBd#Dm5AjrupNtSJb%Ob#RrILqpuUC4XtkpsABt-CNM+mq;&{K_IlFHQ| zWnS*zAM?l2xr3z9-{-;Bk`^%Kj)`ZNP<4pRREwPUcS|4Ps4wqNR9TAf+*od|pywRt^qrlJ=-*6g`C~ z;xa5rGEiP_jl{xHILG`%BG*FMtYs(i$yRQ~)WPlpn(_O|uHOGCLlvB^nreBJ>;&;s zusz?M6ELPh6OA5j*O6iP+PMUf_YisMcwjDedglg3n z+YR0WenKso4*-$tgAy}VWmn}hN&rSjbzBm1y8`$E>{&KiJy^2m# zf}Q+S@8FfTYnA{{Pk~G{h+%lFEk6)`Q%K+J@x+pwcb+rw8Gt7TYt0>?*RQ>_z)Rqe zXgwp&9gkR^IZ6ph7XWB7!>PArl~J|az(N;xFJ;392?J`y;BeR&;EcWUAumzXHnJu9 zQq4KfpO+5Y*3P=4*Q03+lTNWgt;342G-FiaL${#p3fyWgz-6I{_gcnQ4yQV;kDv__ z2NOE=1SndT#>B)V43qn@!bi)ioY6i42u-5O+DiwLX~sZ&(>w^5R~^Wz;@*vepe`W! za(5Oggve=|fIb-xKFW|K1X3x}2YN}JQUK{hFch4O+}POIWr2e4+z}Xa6!Ni_EYVf_ zHN@Xtz^}8zsTmhdS^F~}@aUn|YIX8pBhFF9fIit%P7;mA&&umw1ath7ZhX6vC^yW? zYXFdZf&$1K=2iElm8UbDzXSS@aCbWHKWi+h>I9uwbiQ*KFU8FWuo(Yp|D|9MFP!Z~ z3A4HeAIs#$1_T#j23sn4D{3H{@5`(D(hw3 zCz;D+?7lbx1-E_x(Q%!xB)*?GBJ5s<3k0!@GXMBBcd4f!Hm;K^e zGNwT9mqA3XS2&keu>!SpcVP;%oe4n9atsR2lTS8`?zW6_L@6=~@@0-opOD0}?z}xnlgvDy6X96a?IQaD&=G>QV$>K~4N_Bof|Og$ zfxNaT5bjzcgDp6AKZUQj}Kj{)T15F=KMBF&?3Ph(rD&#NPjq8AvK(^-9 zA3|j0HxgNZTh`}aa2QI9ql5XWxwJB23_=4`Cw$(v_w|W;B?d`>!h87za{h09x1xLt zKzK#72#s*DXrSp5K0$zo?X-ueO>APs_ixk#a-;j_sZP>r%c zb#p~_e`TCw#40Us>1}bvpnZkF{j-6Hgf2Y&GJmRbilzuJ8?$9+ii(QW&B1O^cIwhB zJYoWm)itUMDfjmIY00YATcR^Ze3_-1v6R3|2Z|o#jZc|*%%rLq7v;=u8P!7;fh@60 zwg?I-xa&RH?Fz{bD9BrS=jVsUseVA`&CiABH{E8rtiS4)zvl_lepGvr3;UZ2eAYf0s>n&1pd~MxHR$JJL^=z7r>w# z9CGugI3LiA(bP+IDBJ}%EzB$xxs)Ya)}$BGgegPBsIr_ac^61OqD$rgtOq>`s4T-g z2%lrAiUTwYdUhTy9n2c-nmyf$biF#8vmljFt$56)!xOm}8tS|IBdDO>TTd7B6~ta{ zLzzZgp)Q@InC;!IcWqvr9@Cy{?7kZheZ}A7s?uDDYkiDf_P}R&R>IK_vVjf{H&8Q< z>iqm20E>LG3W>nJGF>aWM}0~PWG`vZQfjA4Q@I(>qRK;N6Fe6RKp{O|-1%573pwBD zf&QsUiu`hmAo_9LK?Fm$aG`YsVwi@&yin4;^$29R#BLiDsi4Wbcm;4a?~5_jyUo>j z19+;KQnD>zcoK zKc)EfSq0_V>B8^v2zT=%;&hZXv5n5VfC8|{*_yjwSUF=NFMp(#(L7H4XZq6Z(qWLI zxdX~Wnf2QU2{+s1$j74<5!PT2GWb^pZ-s;q2 z@Q3pQ5g+eQwk67*M{TYo?hln$RD=)4^a7r~`8PFz&;}Gbt%P8MXscD)Hfo-fw}wvt zegYT>-g@T%lR8b8fH=rE{P6@UJbuxS!Lq|pO{&Myuab~jIsC1R;6&-?-&ZtwB)xoFqBYJZ%kp%<+Viv zUzj>6#=gOFey1hsMIGuaJrxflIF9$WeK=4jb0Z}(mcQcq;J4TwK@?>5;iBQ2)-nlCeE(8t!S#nmc zZ%b@px!Qq;8>5O;@zRmN5Q**rf-wB7~|>HWOD*}R_+!~L=Z=9uW}1{8T)$V;)UaOO9* zdJ7C%hNQMN6R39KtT`Ziw+Tb zeDlUYx%ps$piOd+t(Nh_SFCKSS)>fkP{yx(BTydcbFODu^ExIlaJ$MAV(rVVC6pmP zqkfsq&(wMxCzK)A+DkV8vmKZVa=l@y&83c3T=6K2XiK0QK~q41B}5FL5+uf{Y;cZ@ zTtD03l;}s>f-rpn>&jsNt7NsRYr~5^;#0*)>r~xYof9{kThnoY6P?#GoKt)=Z`6KL z#Ypj~kko_aCS=2+Bd0Z3(5rR{{+t26f>|Ar0KDBw*^AE8MF$J0vUoD9-8PthSM}>d zKExw{6mSh@=Q2{vm&svSMf%nre*AG(Ql3EVJ*}V;<|vY=Ph7w9*Cm_Uyf22_A1PpR zshI=oZ+Udy%t4^nztrP3#k+);6nsYxbS>Q{uP*Jv(dD^I%8@{xvy~|;DJk8sHN-O}`#meq~F(tV2);ouc!t z>E;QIjR*oO+(?Alf=xp{6Btx+?ihuICa||#B#%X!zpSdNdczvaaI3346H#(b$#@>6 zFdo$0XQyCa;WQXstx^7MIR4?tJ37Jjl01mdS7xKEE##4gU9;d@+Of}F_uZ|Pc_>WNOlh$sUo~|keavgCDf3(Ur_pqoHJ+U>_hrJ5Ixd#}I0=#` z-kW-_^|%wrxn4fUBe%&2@_zwooZeNCNtKh8)m#{`#&iG|fCVB~e2Vjn1mv1aVju`x z^ae}ylKXCu<(1B@?Y$Szns@e>P@3Pa6R*1T{ya9wg^G_jQye6El8=d(DV5z|0xH(y z{5lULp960#4j+|9+uV#{HeC}FSAS5z9#HcsI>5%`)c`AekY4Y3*EKyGrXePP2wezx zwY53-SElZog0J+ew3s-gtC5gQPPmZQ7{!GESEIIpV?=n6QDV6kwm9DuMDCqC#nHW+ zPIV!B%2vu}Q!6h$DS70=#EBq36=+VW1KuxAscYIkve>i#n01l_OF2Iw2 z_HUF!GjcE$2IUyV58ZoAe;Czt;1}aDFrO9!5{CMX^=Zb0hWk&UMMo9l{t`=r=Go{m z(i@DJ1Vw=!x7uN%m&uk8Zy|DwH*@e-9xwDWEE`|ZN0YD)@+ZdD1Pj*}R60LP__CMv z+bk}r8JgjW8G|@PQ;brBj^}lY`GBo&a!9XVYpfFQZgW-lses7_u5b!>VS9cXz#VZ? z^ImhB{xXcJu80P8io22Lvx_$&Y7@X1m7n8LT{eGnCG&Ci1Zn}kGC}c{4+Z%#0zpQO zE2>680nksD5CdaoW$P_{(mJH2D_6i37D3PWYUZRH`W0=4DdH)$nw(NGcmXu z0MU6RF%v9bX6tkQH9Ze9q*~&_K=WPn}KNE;tXMN`|U;ZNk;(pVTxWpdlN(q&VQD9O>%n`m$7OEYhRdrlM0l zPU6y3F~J1>m0o*>%_2223Re;j3<}8&OwGhLp$W6yS!j^YpEY;MyaMq>N0b1GNW&7D zoO)HCo3X{`EP3!#I**FpVv|iLs_@oj?mS0XWYW90WC14|;2ktDcqTjpNe-8*cG8h_ z9H6*f1qy>?{@1|(;{vVQ_1Egn`AdHt+VC17uGv`goY@y<7){sH z0O>k_|Lzu<0@&xmz*=ufmFUzZ+|T0Mwa?!mF%jC06ZFTLbTfs`r}?r~nU0(-9w+%@cD-&%q+5k?`-t5LL z&oBAid4Ls2aVV--{RMf+CA8`_us4LG#(;UFSm|9hVU{V3m3aXL_cm4+Q6hrm-A(WN zYC*wyf5-7bk$S!EkUj$i;83M#E9ikYDL13rsWDKjBRf93GpgKh_jbw6fmB$epijQ0 zf9^*0KC#gaou@NB3$eU`pe@Jix3Y}pdG#e^;Yz$rne1WSV~*u9ZOE10fq|GfUG9{d zz>RRDiZAcuWnOiC-jZ|8rVPxH1B_jD__Y#HJR+qqaRBcyTJ=7(O{wQp(a^5N+als$ zRjdST>5Up8-_06U&D=TZ$?8#D!7J5NA2|hnzUfE6i}7`NqFIf3xs=x5RaSXASm*0# zJs3VH$PCbTspSe87N$(gRO=ap9%k+S1Fi)xf5!|NT~Lpr^(A+e3IH&}4oh99pxQ4=_Elv6YW@|Xx#{4DIAztMBj%}GH!Ykt5TjV)-G zS_u|&S2!CQL_rH-Gb%~DpnPaCFi0z`l2G*5fOm-R9QRQh`NbB9JF#zV5h$iP2?%?D z7yCMU&vs|lY-AZ_AVVQeppo!3s~RXJ#ncjmr32w#?#4Jn|P ze|hW$ylstL)byT}ieZb=GLcs2inKOHbwDyP^M@5a>p}a1jmUkOx7v}RW=h^~aK4Km z%%RKy#9pfGtBgVm8)kXLf18~U-hz6pxD86V8g zRVGYLC+S1W8xJHL@y!HkhPw80l+}zmp!5q*vc=9=d2aXD%S7s4(G_jW3khnLKH~^RhV|F#@Wz?|NI|^berIsnHwVARy_)hZXe7AMV{MiLWzZt0Zp3oE9%iIiZo1e$VE{-=ym`0u z=M3saE#HB1EB9ixdrUCVagc?a8QO){&oPSFuIk;n|F!ULqw`4~j?@i9cALCjt>j}} zRe%{D3d#;MX)D=fjvzQ0`EI`sT90SXxr)Z3FHRTK?OQz8`B^zpOQ|R=Q36j#P(<43eONA1fFI{_>>MsDP?dXSz6S%6x?pMYjR9q|z~N;<%LWaRh= zunNG*`^r8MBwf3MnuCLC=G&bC{sjH1<%pUR`siIyl)$zEgz5fd1#~EC+t<|4NVa%V zW~qX-Pr*3ZEfP))&g9W`_OJpIH#LNFw==isYUdYb>%n#@0nwLui<+SKp*NrAqu5aX zR|HrDk^xC5x75+pb>7X)A5(a_+ss4>ImGR3cef7y&2cW-cSl=oPRqEhbUI_KB2~faN)1L2JN>X@$z0VEVo5EULs1JI-G)d%T)u(xRY&_i+ z#I8;R?IVo3otLAYnSB_Vv6g~rD>^UekX2-#+Kxy~2gsurt4ID51>n}Q$9W|o=nM7~@*ND1~XT=5Q7_P5q-}rQUEEmtF z(?Cr-dwsPtRV`fAwLCYxTdKInQsk_DB?`MG;xWp@Ju$sCo40o6X2kWgE{=qXJ-n|v zsJODmXfHmXIJAC=!D^4FT2ZAvVw?)E`Pf2p`#4NDim;TsujiX)S)}co4Ym$ku=Yc$ zKssKap2VD>JD)-mN%{m%f{~ZRWFx$qVuX`Y-FZ<=C;77K$Fo-To`XacKCtDR`#im* z2so-R^sy?Xf~ylwnS`(mf{#ywDCJ$;^1Vz1(+RY#gss34vd5yjj-`9_62qWCVTbi! z3nivuO8`NXr8q_|yYjAW{3r<#W_gKuc@tRfG16P(;Wt>DY3k)Hj*1CKrlQdlTQw}{ zP6|WMvmPYFT?K!p`?@)A19R-5_C!~ueHD8v{q-)_A;5jUYO;yqs-o{k5;5{f^!4s- zeaszGga^eDH)a_bb68#nLdw0h#9N<~NDI4#2Nd-V073Q409#4iC$L22ig=Pz*h~s; z#p8A`7G+*FY<;l-p*}rwm^DJNjl_5ds$m(#LASii#fjFPjL&9^V+m;Q5Q)(+n7LWA zZ!^fBx}18BMYEYvFROb!w~zN?{^?`V^c-h2YLyvJ@QayyR6~&QRF!Fk`-FEwz{3S? zoOVDU^KD5=+U>l|@TL2JcxuVpW5~gG#~2W>J8NfJwPjpsKIM35iF=nl2 z*u+vyTMfX?GBpGrP2iemWX}4IXThg*$Rsd#cX1b)716qGnk!?x;m{?XeYo%oW0uulD45z()GtPOOe-zs zp70l%(iMG#I7^VpF=u@4+(y2oz{0m0B{ZEfHRqkezIHv2Wksg_n$2EM&}|Eml6Wb& zP&q1PsmsW(W)v50!V9cM)qN>mj**CC_MMftWOLb)c!({%hc3g`@1xz#TrP!J9^+ZH|)D zPTMcXBX=OkByR0TZ-JQtW2ISX_E2$lb;VlJ#2dX3ZC{WOwG+Ok_rN^9n15G>XQ&lY z&5}+XrscpIX|80c1~@FnoO%lvq*lPXMLb2&2JQtaz(zECqO7a zSup3xBeW1HU9eVy{b-f6B*_jymbmiL!z?cC(KMCQa39^1!;+C*Ht9zaEUpgEFM0o< z-kH_XswV;IhnHg5g@@;O{Z1{2ixL5QPaTvW%fbJ>PO*CI_=swUMq9Qr3l3- zWUb2r)R>zlFwO^|vAF};-lejp8%EpWl!^?V0xkMHHQ5ZYuOgFi`Ivbs2mRZF%AT)E zi>*%AKF|_WpPIa2exg24;?b8&_2Yr*XT*~?j z-7ZDus>bK+w_lNl!V`8Dbu=tRcDC?qyw-DKqcQ|LrETLLv~8$I^Y!Jof5vSv*zj%=g~9#~ygx`E_Z__Ngwra1uq_Q?uU&#bHB zb4fjg?v!A53XM^wJmjY%VlC9j?YHEGxaRLTFe!KA*Nlp?i_^auFlnx!ZBAHFsbt|E z`P`7UvvmRN9hsW>u-fjGUuD9T~sB$ z6ww`%EoybWIx4m}vLjQu-zM>c@)s@^&NW5mEGrBaJ^Dg zFexWI+-h16bIxRqpXl8hCwU_a!Inn857H7n(t(z5^QU0_<)152&pPR_wT?8wslie#r*KB0EIXAq{ zvXpH(HtU&?Ddw=sdS$@#YI*DFZ9b5?<>Hbe(iiNrT6sSFXb9)CAy7S z#dzdqWy}x)aZ;R!LL9f9Pc0wtjA-t>B&{pnOP@?rP! z;6e(!a7^Tr;+mHe}r5^ys^UlPsQQGR4zCil?fNSJk)30AJi}S~wrviDxw}ToNv}HKx$9 z(uH)emKP)oANnDxfvHhFUF9ya>aj%#jL6>I*lP0~0U5_1Pxbu5KX7yE;UC_$AqIu6 z4Gz6Tlbn+lWKV{p$EP4hHsQE{GGZBpRnVd2w?SsAv6lO>WiY>1GUCtX*O(J`qgp zwv#rqILj!-Vi(EB&1oym%a@0ayC+PRVNq2KZ$V9uXe7(1tDXL&at5v6mOV;A(> z>x$~By)#(d*%^qj44mC7HIG-PYFGvP(2aDAUaL{z1U-*j9yFD>7t8kJa z+%qJ$>-do(w#?lG5Lnf$AXw5mR!x_9>>4a&O;Vbit=Z4ZQodyn&f)Nl9=cugE@%aK z&Ng_yj0dP#Oh_>*yO3P1ET)jJyQVd$f?Z1KEsBkdEU-;Z!w%YeYM0`}ggF4;#7eNG zo`hQ0M&ja#j(x+o+w7&nC-h#P`=MXl{V`QqD=%*&h*fH(MB4P>;#ptQr~B8c;g(ob z5{u&3%C@3769Ys1t6j(BAJsy`JNkI((pyC=oF9iQ=8NX1-*1#w|3s|2niLsU3+(yy zoBP-sYk~}@cAT9D$Uw`)%1JBkD3Z|@EfCz2?T}r)j*SELK@rEr9Y*5#DfC;~$xD<< zSP?C;dkc}6eet3`>TSr!B$s@c(2s=ete3bGrX|k`t-ut z*^^rOpYv7G$G<3Hg(?-9nmWC~j7#6@Y0LyA;zY>HFs{x*XFLQ#G?~p@T10!l;Q8rW zb)$Q!mm|teZDIxS_VgmFd?C+s)2T$ycCO3BU<*Xe0ei81t9NQ^6cUa>roo5KUV9{5 zW~O=<8NcZoy1@g>m@n;S!%nBn1k0>q$NIbhDiluFC&j~Z3|?*|A=8qA6iFFQ!m^!2OW&ZJ1Df zP%rohXPt^MD0+aWG^v$mg!6Aaw%(mMdvb#}S@I;jY$=k0H%LUbymaiYlFm&bjZ*J% z&rzGLtvP+Wp34tOF3S#0u`X6L`MoL*m)#1NRpDiP9_e7jjoST`pO##|O;RbmIj!GD zx9{dgIU`$QVC9J?c&A53?fdlDk)0T}gy$H`21YMRFCLE>Uh1(ooR+j7>~z)>?!Z*l z7Rmd%^fY&^yX;Bb-n`#36c?NOD@i}&sES>MvfUbqo-di3EKZcWJv#Lj1GpD>q1 z5Nf(PA8q3Ym466Q-vc~bF}hOSH@Lb)@1$!^Z~G(TuM1tD2k{fixqQmgI67M1qfO~e zYOg2)F}sd|=dQ#ETVS8xjetLlz`dQl6Y~&bj^B4eSC}YsCwev4vubw#dU6e3kRgN$ zQ&LEd?5t8PlGG?03|e1=cg{G(aJ7U^s?GV}*)Ng}58;Wt9zIK#1caw_Ev;_Kdc9ru zu%DjoSPH%`h^HKG|0p=Ezp%3Gq)=fKGhkmdv}RILspq^^UW~9Up-8Vi8m~s~HADQ- zEIyCNg$sV1E4An&_Mts%SYK}D%}nwPQKz73s95P*6ImG$`_iPD4f z3LrALA@$V_K=2Hdk~|ryY-T)Y6~rg!vGN0_M_Lw}z5v0+=EJ~u+>kX9U5Ha{K<9NW ztH&6pY2<(?ci0=NN_7Cj6Trw#JV3lW2ar4(v ztu(gR$xUox0olvxmCXWmi&Gez=a?)-HGd{d%Wjin?r6uJrZ>`{EDIIumZ_VcI%KCX z-&@cO?d2Q~egvPkoZ=Y|s_9wX<@LyY9-n4tr95^!VvYAkUb3IQ#JFqOZkQo-~)h^^^#?I`~M^&G7H{(SXA;T@1&FCWUU|BIj%Iw#E+B(8WOzy6>h;p1&(l^uaQ69(Hmq*bWn z#{?-V_@BqbAa*;}kP2TSZ1HyL$u!#P(>DRY)IrmV<1GdxoP$NSQ5p%?B+0F*C0{*%-jelOyR0MC8HEv_zA?5A-n9HY(NkeBUaUg1gVkom~F5ByNrLcoUDU8uQY zp6OmbuRRzVnOv-v-X9j4s(IBew(lBuHfmO?bE!M^IEvs4p-8BCFm8!XfHq4!IS; zFbYc4WT^H6R$+lSaahD_vA0$kLpF~?uJ2rAeC0eQP8Nh8?cUXwLcO=pHpPgpi3_Hr z!a^vHZ`VBTO>P17`dRrITTV4>J1Z$W^s0RKJ+%!qEW{Y=I=#v*%WltiFFStD)UFAf zeL?TnIhR3duDtSEkC<&l5RS|nYDa%r5pe)qv^NS(5ZWtzA7;#RVnKDZif@jMJ*aHn z_r*%p1EvL^Oqf2sAn!{hnj_-Zwb&M`Cr>clE@#m;RVHX8 z*&+hz1wssUj-0Bpd?m@5nPAA{VMlh)XJ>HKsLb{9ZXlheFf8Inu%XE6T~P&82!V?KXKunfpAxF>r0bPQk8yrE^jRd!|K4 zvL}gXVMy<>eB@c`(WLHk;@2Gtqk@1)mw*aLj?yV0pwiMEBhoRz0Me~8gnO%iG$J7ll0!z z05S}G*TQ}8XYc!c-!FdO`a@CH%&fWMtm`+cj0mWwou|3r zzkj00%1IW~w4SzvyX%h|(sj?QZ zpuR00s=N>!P~LmkPZ4PN_QD@f$m4Nv4+Y{&*VJ zMJv&FD6jh|Px`wHjjfrf?cEbg<(g*Kchj85B6NGNq;BS~Qs*({G}e3@=rlk48Cg*? z;gmTlRdX{S(=f(Jw8->A`2F|6%DW~tYn9jwl`X7OHJa%K;}bu+-@>fNchY~kvhm6; z>8x+{hbbTl<$fmBC6`4xFy$00xO8A)v=rguMB8?&M77XS3T*oh5o&p`O#doRm zyy+W6g`|%fG9|spt9r;AviTG$?qlEa^Lt1d9TGG*$j{95^B?Z4-4xYR5F>pu{&UM5 z8~$|V^K_#fbHv8WlAj!4h+sk&p%ZFYF2>aV)aTEK#m*Cm*oI!4>FF|A2tIz(0c2a1 zo@+*u-apcXLjV}Gg`MMi!#$rU#OmG8^BNEerJo1?=Y7eC{DB&s)`RxXam zkF9GkyZE|z7Y-TGP5e-JP-K*(k?|)uAGv;hGOR`F@#~AJwz++yO^LEhc18(GC(<6A zRldT|U#5F~w4^W~C`&UzDbPJoBVR_}K1un5lL^#K1RIgWN#slBob+^}SF3>Q_jQp! zjAPgtSh3H2G?EIzAUPH$$wpRAnkRY|M);$OS>2q5r3h@wUTJYvb5Ezg_?s~Rgq1Vu zK9*&|zjS4oEO>28A&Gc}6ou96oiIuh)1O zFja_*wz&_1J~MIwDW@!3M?SeK%Cbh-HZ3r||cM`?P1ULTQk> zf4Zpu@n%&jJ^@IITyJTQSZOA2d`+km8aERrtCC)#lkZd>=p{NEWfd7$s9;l4cY(*a z^+w~&;e==Qe9@XVwx}|z+%xCQ%mqGkrvamp20u$dwW`rNTl%GXh6svzp<3Br`mb$w zrz`u_mjWt?TDyTC%$@vXYGln{D|6eB{LrNPlh5PpVjd@E-l!`hhDeO`EiA|7j8BzO z+%T#0-uyu^W(m{(xn|ymSB^>2AbVrh8FUMpOPy24yjo4_yh|Pf8=hGI9()uD0Knbs z1b~H4l0QFjIDxiFPy-aBxcB~$RBPVlNY<#+U$WR@UA*_s5I*;AcJK>0Cd9I)qsa#B z?#-pv9_Fmotj9Xgu#naWra07|WD4+ZC-YwpA=^*7=*1;AlPa6|L!Xb^@3LM|C++C{ zOu?6oF*=>{mYn);G&(JXsPXa_cDY)!D3Taibc+|q^HF8s)cS4ldrwg0kTE`-N2GVBt*DVVV4jYBr5_hVdMp(1;0(bLo zVtO4)ckOC=E-sfEu@pro#$bmJ>|Qg#1_*;r#?ApCS(7^O3bz%OuHyT(=Gmj~YnAH+ z9aLRBTT||2|K|1OB@i(kzG#ox4;pbg2ug99F1x=IGVkkjBrVDifRZark`J~gPLeZ1 zUQ#sn&{x0_9hX0qX_$_h*>LeQ+6`}>3JCnZRR#250M$c>58z;u$GW)4#%3Y352&u{ z45Ui?x;f`d+${huK~??R%C!8{gmhnwM40>-O!GVtzWChRw69M~##(?20|MV4=3+tL zk@2z*jcY3_9{`s0lbQF-nIzf32GEd1=DZx{qNV&R!1SUix+?nKKbcRw;P!jHgxHW< z&fc15E<7OP$4~hd;tLLKFLQmh`Pt#i7nd)_S(NyyIzt{M$@l54U`)hE-4_NOE_NsA z)D&i}(++DbDV%sEs7gH$;TMZH|DaiGny&cqWdbZQiMH>@dO$^kpVN8s5oz;SZPZPo zakms^c|M7pb^>{VN_x6N{ILYwX-C%$`-A&H_lwlLHm*|3w)Rh};^nw{Up_qAN4??6 z)>DTfB@}#wr4U_iT`=NB&6o8GMMg+Lp40qxZX*7zT!FXbG8u@CRe1-b6jU5?> zC@L;!N?f>MKQ-ywlh$6uv$e}p>saQr(a@W^(NMDFK*(q)-wkP$jwJYs>G+KM=Jt9_ z$h1$>Jb0rn=7`Fer~hEiUCGqHz~;9mzcstjaebl#!xl3c&|Fw-RZK7ACA2{exH$l< zxZ?WSKno<7cC%IVUqr% zDfrY5(9*r0Ax&139T|?_*&fi~RA5|rl&?PM3A(|{DMyGYiARQ?XAZwGEw)P`S@1y^ zzY(yPPN$nT9=KvY*3{jBmZ?kzNoBgs&+zjDS1wRV%w>01Y<3n^#wfY~MVuGM@)B=6 z;5IH`9SPpNKv^{A*|g4z5ln`|2IE;jX=`dw>*#3Bf96b6ji8oUEooVWnvnoKBtu`{ zTHU0Z*KX$GL75y2R#fW2y>*B{0Vw;%fV;0QEH59H8%Mi;c9kb~UH#^jhJzCIqzM`R z#f_{ml5?I?mIN1L!>XC66R_wuJR_0ui03@@O%uT3aSQ)QwAYCM@WjpI)GFPP;maOY zV?#x~{}c?@5x%MOMzG*#PR7I)mWf&~m&-U`h;_08xNa>&qyr&-&OUa5BmV2~!^ZUh zue6isJh%K2*_kk^RiMDke2LQdEY+9gdg2e0iLv}FDTnwH!`QKgLifK_Wy@8JA^ ze=fcbD-m}cm`hEr2TnAZjpP6l*o90fdin#QO;)CQw{a z>vNv%%f*5FX+LS!cOCxzXy05nvHto2A4Xch?|`m6^Ji3E3N2Qe-XHQJAV zu+ivtBHj{uNA*LN9%8-=Vcw(^?!>J>7iqwg2kVhp`&kTiC`rFV8t>+l!FuU-cDD^Q zdRQA`(Q zX3rr~++qClzDE;u%}dP>E`Pps4f8gM)~ZadjOWi1%CdyhdLyJOkHa17qG}RY`b-V_ zP$oxxA9)aOO`_sctvk(Yr%vyd&qp}l+yXLWb zPt#;~h(}T)#N3~F0%12gjfrwMC16QbsHNJ&V~Av*_@*Rd|!@TM2<&%$hK zrhUz6LS?~sE-9#|8UqS$w2yg!_I0>AEIG0S(6<)#PIRxVF0=4UQXPwohH zilct2yCS^5S`9V-@c8iom%n`a50FC9+>equ4A+iTJNo_{q1fb_!6fBfl8@b2gC6ks zDStnzIjU`gxmaQzsrdAJO-R^9t&zx0{VW9kzN-<^(<|c-XYoexVZT3-YlQlU=5B8t z_UeqIuvp9oZRjKI8~}PzkmPuDF?*Uv)SSiqGT+H5d8M4g%@t1sO|`M|guRS0j0^B2 z;@k{$iH$jvsH~=%{a%{NmrUO#d3gF6I3#rYdKiF(8kGsxqr6{O$3y@B?!q3$3_) z`v$pYK@pKSK$I-?SnAD~C3p{rIx`CWbG2ycr?wS1$TaE;4V3^D7a>;ozL>7Wm7V5a ze~+_LR?%CYwS{T*t$DG9<8}6rQ6tUTD+z31-4i(bU+-EuJfs(c_}9uje(fd!AYlBC zXBmtUbkW&C!~fSMI2$E?OdxuN`nxAN!C~&5|;ABIqLsy?*}+wtrJT%sQ6>W_u1=7k^zAt$%$`8F;MM zQ2)%+Frtu%h=~8Bfik#fycuFo0D*~Ps|}iVXqm_ow+9A5OByQJb_*cC4Yao2O-xLD z0CFWbhLlafwUqSNe7*x@k}S0(k*v+lO=xUFv;X?=JFIG|s_)*ve=j61Zg{Nh zl1}!Y_uaPzstW@n9LG{JPX83M<52&HsGAZLmj3685`(tEUl$$nU#sjt?+bqWU(fqL z69x;Stl|a2CKM8`-|isK1OEIyFcR;{{%L}M9U3$osp>wq8ILg){N!`0%k_mFKQ z9auTdYkjf~)_dviA-Xf2?Dz9C81H-DfbwzeuL-7;0AnDwS~=etp9AQa|Lf-d+Y5E2 z5rrjxKcG)aP}U_Ce=u%ha$!Dfq4J;=gAJmO2t5;}gYF8h_1 zq!Eyh`1?`+>nDc*NYWq>l@6J^+q}h8C09}c_LtAYY~wjDu}JM1ynPK&XR>nyAPK_> z3u@a2QqA3a0YiDZ(AG9E0HxVqQ5(e=TpqOgPI+ehHYic=Aa*xUz|5>}0NTok3b10A zd_;tWbxn3R0OS$ognR^fUcm#F{oWiu%$D-+^_uR&e$N|76=wtGyljY2 zqNb*H{h!+`sNwe(RDJ;o5kNNR@7Ds0_V2a++spxn@EFjh?SPb*9T*EcVCw9^v;a%g zl)n6ztX{7&6&018z5Pu!b#f`s;mzWX3^6vyBJp^T2*E{yv-nH-MKv``4D? z{2z%v*#DosNp~M?wt*4w$PiT=Sim>Gw>F0z;H5*R{2oC2-#4ywynp=bOh?1NW(OGM zawlvhG`X;#zeliKeQmN<6bK%&{oV`z`YK};4*Jk@Y@$H<41zmCUlsab zx(7hNii_W}Et1;K(UG?z_}}|Z!+6D>=N~rX?~SZNC7Q~~|W3?w|3 zG&lY0mKm$a68~!ilUDMet_=)A{~X4ise0d>VpcKN^d(K(f4?!buLNJswj7H1eKXJb z_alODJO1S#UI1sB6r%Fm)j*MR(%q5p(I?&^usTvHqb@4Fy!NJGCTKw}g&E+*V3uRde!9 zf4t8C@e5&a1H1hJH-ht7c}#dRAc2!z(b4c{R@=>Y2^`o3Z{0NU_Eh^?)wG%4B#^5l zFN2;h=#Lw!_jtN!D)Y~-C(4HorM-sQL(Z-M{E~Qm)X#>^54c-6*)%?i(JO)l`~WEG z@2IU=YG<6+nOfMcPI&J#jk^^_3U=o=SJ5eN1O|w5MXqe(-=g^+z&FKYcb|)&aL3k0^2uLB1)BGmD$Kh~L=<=f0^dK1&%Mz_Y*0k(%eW147Pi<_@>2fb2!}sMqxVa8*BT!;f&Q#uV?_ z=gWX0nzG)@^;LYd--UhZn@E z$hP@qjX)m^fbI@JoQG}DW1@kCy1ge9sxm>BAW2`AD5mw|*r_|dU>-HCMVt2+IHEGq zW*$9~b5XamPq}=Z#-LXELHokA!v&Ya8*5Ybk|{p8_li~W%zugh1qIZCUPt{v?gqU= z$z=jMnfzsuRz96RMH5^}*GT|~xQ3-<+LZ0n=V{GW2S%$(V;cbglE(C^0;=w}KnbkL zsumoh+W5&r-Wm$r-%&Rl;)`iHo}}#$=G>qe8A#nJe@j5DZh^CU(5>qCHvsjz8;=Cp zoAk_pGD|!dg$b6yq)MS`a2-E=5G^a9&r7W;2Me@nCP&5ymgSPfDo4^BTo>$UMf|2DVaZY7|@eTwR1 z1>J{QKnrTMh>DJ;Mr4PRfGu!*S(Klr>enB-q^Pp|GqKWBKdNU;vD93J*?yZk!d6P=e|*zpthxqqv|y z^dm^}5xFPQ*Df)MX__=gQuBbCj)DED|MEeV!_+8M5KWo8T>OVg&iC5f_=7m~2D08g zf_3U(Kgp7SCzqJ6(7-3O0yP9!gq+`}hYcSYzpR7?Wa|Ihh2?aiQUIs|c%HX=hoC{T z6HJj#-AO@mS+H8M# zppHB)H2hSX1_lTUl0ByVce6lFw@gR{F;*L-Eel2ecBMi?L!C($rDR~W@i(Z0%SDB& z${IKu5S7;D&-3Y+UUVwOjh7-iqWozH+&lWtrh$ixfim^U+2k}nuiaM@aSY#)QSNRa zLVRyD0OBTP+(BaT7!HFUHny1%*sXKATpSLJP)8JY)?0Y(X)+N=$$L>3Yw1FPBSaiF zBEioqt7#bzt{>q3D<}a~upvm&{}6Wvnra&ZGfs>5&vz+j>$5>=fo3Dv5rkSNV4<9< z_ofxS)kN!dxM~$8Zoo*BAgP3S3btmFuR5n5WmWS{y^2(s;5;euA>57zQ)HrrpnsOE z78W{tIk*T4?m@&_cSigLIs7-|i~1KI$6r{TT&u=6eHv4}9^ ze9LB%#r7?`SKY`)^#WQdckXoPei9*@iR*|?Lpd32- ztpjTBllN-LyZnpzJO-i^@zP9{*!{pBmKtxp+cXRz%Nr? z`bz0it|*V^IUYs_6hiT}zm?GPHp$gun^!om!?h7Iz}J>vleI z#9lE!f@E}-vjqI@B-X3vB1LvLh54bF4dN1_)BFJujDMW~>_E09Wd%-^q)9LmSqWN` z)16{UnN|mZ%B+uR*bksYwJ=DN5)uZNC|}haF6U#D;2&0g*PMH^cHwnA32i^?rq9G6 zn~1V@hk}7BvKx|qT26E*LLMF)CJqf7`LlUzCn_PQxFohW))P=kcWx(T!_Q`z;*m!W zz&OjifW(|~IAu1UD^|~R1gv&s$M<*G$F|WDHN37E;bUv_8)JMhohq5U#s$fjZ!@I+_ zb)^f27uLhz?PBslpeLr;ejh8b526w;rwNCz=T-RwaFe?1;hb!$+2PN4+RkUz6^7yq zqqzuJ#C>Vk%d8D|_wtt&5`Nm?H9e^=saC2b`lX$y~h30`Uh0Jt!BJ2eMu&5L{_xFxM_X4HB(?Q8Gup3K7 zg_%K_NfuH3hkvmC`eVDM$Dg11$@f1LCpU%dW1Me8t)3&hWht8ofF-%I-4S@uu?#?| z$;j!WU$RFwKA_KO3UHl0<&_GSV;~g+aVIHyucVPh0x(QFli3RQN*4B4iqlGymo&w& zd*1>^!N!f+$mDn^ioeUnSD7O*CAM?lW2b-UB;Bi@_!^d}btR8yr(kr{n@0f>vtVuZ ziU1D;I#d>sUa5rF(Nrid2^=yA$xH||#;%9Zn)@O5HI9~Lv#5FWzIuPXXuj9HJwDP< zS-s(4ww1VjF7U@WMLIYO6~~`xP$>sO#T1gRo~Jx17{v}V02LYM5FW~YbT|1B|J_hh zDy1LAQ)jprTi($*n2=FLj8-}~hn24HnR2e14^t2p?rZowEET9-PSuZ~RoSe@VdfS~ zW88j~egzsdHH%tbt-E%o1CO>L%{HE$mWL#oaWlh!DDB%W&VV&wiU0L%4!Kdk+dUdM z4>w=k993J1*ks!S2xU{Bg}UF=bZ?5WARF3#<}Ln^AF6w$ z$W@XApV38<<}XTv87wJpBk%!@L4k&LUv0>cf|y++fx`an);B=woZ9{)*6$t3*lW3F z0sm!{Whexkdp7s*V0Aa}Xm zamEaxa`2NCOhXe+uD5^gctcTuZJ>Kr2OyT;rh9>}kfXDY15 zY?6^m5+DVk^&oXW-D-thU@&LI3XS|4zMq47wWy(6U{mS(IP4tgKv23QEO5P4AU~sX z)C(!SXc9T{a{pE|q+J}8mA}toT~|yI7$!9>n|%hhppo#RAwObsoW=rubw*2>`88O& zb+Zo7uFgpBPP|OrYvok;(o0s061kL$0Wue^JU?8cB8g=S>?OZw~gce-_X{q=k(TY_O#y9oV7oTsQQW488MH8qsai4xY zve^9$Y%aPLSddc0C;nDXMFakm(Jj|@F!eq#Hapdzko_S-4#7mHbMb zicg!J+x+L?NAryb@yqkAo)D1V$<(5l<%Q!M{6tXrL(k4UhDWa4&7$zfl{cI9@$kZn z8QL!d?Q=&cXUXb7O)Q|DYfgPO0BXssUG+9q-7@2rt*HKsk}DsJNCe5y>T=E5?wMBz z1xduqyKkJ1+RKufpJ{=L-Q*x1o~D-U0z8I+OC9%w&;!X~`bJ+b-#S{OK`G-Gu9ZaU zS$K=b6knWAdKvbrU2da@;tS0Vs2mT<*s{QS<=boW6;<(7)3AXKn<>+88dz8@hzC7K zetl|!`lq*n@L}^dW~81U3dGx-^v(TI`xWMdioqLrM7ks!OvNy)EAMUXjC_p8nqt`F zJh?=Yhl)WiY|6Px9mPHdoN!Bp>iZ&P$UEP)E&fWld)|b4(-Lc*NlilD7 z!W|)v#I8@)GAggNJAL4*Eey9$_Cv^DFR_mrQTIad|yZ)ZmI=&?vHFAWJ8fL7B>kwy28t5z1k$#H% zIA*YDvgoop=hHR+qz?Ej`kp_$W!fb279S^j!6R*6%zT$a6Pd8)sa~alR*jwmT302? z^yXuJ{*L5nB7iS4r8jdx*sejsD#+(QEKCCCL<2GxOIu6*OAF_3&0c1OZGb-96B=#wH7Smw};60kxhoD;}>qPf0*JE z9Hx~k_3^4EIaCaS2k6yH4^MVA##6FHTDQ*S%Y8D?3k!vKlP#6Pag&2*1cvp=$J=hP zU(qY99Vo*V*b!z;xT=b8HZ-o5bzWjzV`gj0-eTXespm1F&&_Au8nvOkStv_8>-Em8 zc1^MR!_Dr{?6nKXRb23=3BZAZIL0zp+TsIvFXiv}JA&&`VX7gJXIt2VNda|B3k2LgQBG2sm)gO=C+R+=WrZQ8)?o? z?*`fl)1@4U0*;f~L;f_K%h|>Trw1PA^9<8}EG4<8*S0gns!jgtb;=bRs?Hc9-BBvJ zy{dDJ&!?L^Pfm|WJo(jW9x%EijPgt_yl9<+U|ES-cO7!I;?0r4R5IdUd4Bc6-zU~ZEt=LZ)L=rCqA8mS&XRO4;<(PubW3f0ovq{U=^JI;^wrNn7#)4{ zD_R7HHV?HtYhzRn%-%}^KoV2{`P6jRwlIG4Bl`W6;yf!cGf=45!c=PKJV=3~u%Ox@ zP<~65u45KT!HG1iEK2Dc+G+bb9fY=XFDYgAKBpag9*m9$G~n9}fgO@l@Hw&?Aj#<@ zagT0Rp_`<5(zS)`xfG=>T2u;Z_}Tr0X>JdAO>NuTal1*4zmje{qc`V}2edb@(RLOO z?_X^Eak0&Mv{l@ALxiS>dJ+Ehid^%4^#sZJE)f<_3U=EH0DzdCZp+qc%5rnGrQeHG zN0lu=IIF2YAs^E+oK(yNe$^X~Ub|0JOy#|(p^&1vfDuxtZ zI9$EsmzJM&oM_u&E^YCYGRkwfV=-3jCBk0sOc&(dFYz`YyoI#+|_#m&Va z3^SQ-@Jfd3wKSD#JibWP?p{A-s;w^%v(V8A)L@B_eFj1!3n4mpRddDaBGN^&yi;bJ z2}9t>mqT`RC3nFgz)De#f@fuh<_^?F?;s9ycoP&1oH_+mHRx#9)3B0zInw(z909(AsydP9lhd@n3sBx8qRl^oG~P#N#}kepcrMawLV-zsEyP~?E4!>awDK6VWTOcGT6UPI1d zP8X>Td&!AlL_qJDYkIwcospv0TM-O4Eguegtz@Mt`ldPHnI+GY0}M(+vk1GS*B zI?2z{Kx0w>GocHV#BrJx6zl~x36s0*pY%4aCTxw8uq&Ob_o@0wa`moPz5Uw!J&k~? z5^~))B$C=>!n8)NTqA|wtX#8v?cIe|8KdWy@NH`h#yk6Gk*u^5ttOoCUh$*n@Dv(3 z^MgZym#W zM&R_7)nt{h&+Olfq|Iliv#al>l*Z86Ohh24g@?^K+)lp7`(uZj9?7;LqgCi1#rQKd zjtED8EjlNjiRCyR9YG-LwyjIAhc3==Ld2xu3v9afWMp8V-SvRarO`IefO#JCH%RP< zG~()@$owW(vB2Z>65#MUC9zjy%MPu9w0h+xzPk0G>{u`{)(reI#Q zZG8pmA?P1px`d%uV$OMVA;# z-DrMX|BRd`^ES&bkhD>*M$H*)2pJsm#sqq(6htVv)UH*|Ru9)MwUs`fmN|{Og%!kdoF-mD_2X$L{*=PL{}y(5M_-y37&PIg;~JOutsE z9bViI=~RR3?8c|l{yOBBxSXjpHT?Q0&PHU{a*=?&ZN&PSHE10 zqaoo|9T7)CFIR>kd83$KtX)0#N?V~q z#qS!jpaHM|pxo(?9y?L2PnlPHxW6)hHb0o1C6K@l2Q`Q~`#^=x;Gh8e1L24WI}0ko z6YL3lP|>66Xz}PE?L*vv08xyllze3_lH(rTiE?K}3%-+K@aBpu!O!J9EzmwJpaiSP z_*suX5RV&T4)q~!hXa|y5^Nrau;g>%l@4jlgSdqY_1kY*2~ICMZiGD4?Fl^GLSOYj znJe9MuT=kIi<0#~U|6JT)6Zt_E^WCM$>q4Wo%$wub9Xm@#>VmrP@W|5sxYm;ZR)&R zEfOns2|xEI+_h_ zVnv!n7Htfzhf>n;N#+dKUe=QYO_QN3ltcBNxvnIYQc$5e^CpIxt&z5VNpTstVEJ(% zFRx{O8Z>uW$9R0c9`8YYnsj~C5eDCtTcT*)Z&$@%Eq zikMBfyOeu{dDQ1Tl~PVC?%Zgx|C$3@Q;)NB${?^{3mW<;i7)G}5X)BF#^&{&V1>lZ zY6-U*Qq^{!yc1vDenEr|pZ=GruNKC$?VU1A9uGlLShfk=6LS-nSeSaleQk~cQ7F4p z8mks~RJSPY(*14Ok-#T!SK6&kC^SY+u+uS`udF;1G!MHuH`?PRoznV5pM0TRoVJ4v z#}(`A7riAEFdV(VH3Q_wQ(4$puuOys2E793Et3^4WfT3^i&G{Yg-MH4uOO8f`V36( z&Q(i)Rb-gUmtNf}+)SxGZ6mNP%$`Ri4i(a?I>iEJ%2u+df#Bja(={JmS$_Oh<;ziW zyi>uUR?8gg(H^}ldTw&9;UfOWjQr8ZyW)AZ4E#~kRQW5>SR4nER%P%qA$wmcDjiXU zW!^0mh~3D4nu1{M*tonYLEEUJ%Qw^{+QOE)F~5?xCi={g#w+)j4?mI@YvLw^9tZ1RZc4h;`IY7 z5V-}Ib+_q`Co~!P*(jZORzfj%l;L!Cp230%b`-q?VyU$qZzWhE>3fvXW0d49L7hE{ zX7W-}rGPWqpJwqhwZ2K8cCP4-Lzt}1%3Hb>uG&X&a%0X4el>Y1;tHt1;WS3dhq%g^ z{TgkS5z)FCvS_U7t%*$w+#JsH>Jy_L6`-XiR^NSNO~|+6Q4j^1j@$C}z*a zFNiii+3#0*+G0W(7pbXRWmz}6Fed--19zzIWB~{6{nIb|Su7r0wZfMktfH45iH!uH z)0F9iP9}+end@H@Te#(Iz&u~~0%!T&-k{`g#%53|4QA)4Ac~#jzA}2hD)#xE8E{lZ zd!4kizxT?uC|IYU2PW@n7CRSBaFMg3U=GV+-qSa@l|5Nt4c|GpHAGMQCk}r|n4M1a z;jG_>J72aecU~8@Rm1AD5+stuSak7M_2{n_?gJ_D6ig{lN1>nYC{vgD_5Bq`j9gg9 ztXS*DIk=A>e<_e0;pC6-2@=I^WE(#kNLV0={CNt=uNUjI`oY`ueSdI*9D1a8&-6Bb zNFg+|O}PVaUIZH!d>fa4HhvtLrql!gHUAnCIwh_j!RJ`+LCiTcAsGlHF9QOV8zlv= ztZP)zBM*+K;(5edv*-NS$UHSR^n>GMY-I~h#~6^v2SH)bC22lh+@n# z$!PTG0#0mFGve1i{ME> zrJX}$l`ZzNcg!TwAF2}AL7gs^IFp}PS-gB$8f)d@%5A2wIPOJBSID{{7VmTZm+lDj ztoY#j0rTXSXmPVAMOPiA$ma}nBstpMzk}oUL=+)&0q`cR+nqKiD58?yFPjvX^*|E~ zOE-ng8IEpJBvvw}KZlpSw3L#Oc|n9q=EE_f&Si8N z%|4vfmitYDGJZu*huBM~gR}SVjga)$FG_E4%iIxYG1=TBvF@TSLla)lkwN_&h$a4m zSM1TK!FW^VU6YM$TFb(462j{fHTwb2H#i2}rk*TPpQuvjaG{l7ed%Ms)+%Frp-0h) z^4e-;i^Vgiyr;W~ajetI(v&2HsT*p+J->3FMz9}fPWF_h8kko&Scg6EJx>*9`H}7- z^IXOXwzs@~GIb-P0B-NV;5+5ft*nMTu&QOdE0)Ki$sNi)=|P>J=#*@KV0@TC?r*zK zaPz-N%@u_o?x0Y<$9*y!=^N&nmyt&z*|x=jv&S{jV$H}2!r@Q8xLSI+6imxH$NC#@ zyc#KAZPOu_ix`a{Nf}zUSLCXPgWv~ zupg(6EA}S@?p7Cdov1CVT)J~bxZ@etp{ORix)8&h^|NwGb&sdK#uIqQ zIe}NWN?xTxJiqJ3F}ug5igX1=frBiQH*79GReZr8K!b2rp-hZy8Z-$m+k3)W%N?d) zR3(E$X=XBU98j~CjkGu+jXjdlDuX7TE>Zje7n%8^Gz~@N8XtOmh)QHnACAsmLsQs= ztt7tc;KC%vp@h%KEOLYd>#CWySq16?4b%Xf^-;%WQX!Saz)pm7&n^YQy3+qPR-84M z$`X*BEEAi#Kb(0f@jOV?W5S_-TY@p=G;NtA1WS%g%>VdqG^YC1N^Fqk>9=_wF__Vq zF$|efJ-)Bd`f4eY0)xv`o5UQxxFOXn)Ow3jxNT=p`E493J~L#+bj`|muA+F2`9kIn zbkz7XrI^wz+Fi0zV8@p!cCM|ITTqWa!(dx^9f6fDIvLIE? z7m9mztD43fJCRbN6T=s6}A9_39*(I-9c@-FQY57X44ujHYWUrC%>Xpvr1qH0PNsd1i>!T3`cD$ge zY(3DDT&Me!r^UOkejTLchLji38?<=~_7?oDGHLmz%qMcNlw5YcD|YIK1-$dlY88mi?azr;rHm(`sxc=6Au|Y zGmhj%PD;SlD@b{SpNkGkoH@^tOZBl#y`pW!RXbhwq|E#?Y_^3Vz0rz|e)Ge+?k<`M z-cZ{ii;=7Hv0IcKd@(A->NfZ@Cp6-gakYC9qJO#WX6zFaTxX0CyBO5iCg+!58Jkt&niFnKF|3u3fsgl(RP4XX#h0YLnXG9sZp}VNmd9Q_dGqRO>*-H_Pi1X`M(tRj_-?+}Nt~yStMpge3XyPo_n}ihDcv3WB0HCDS9kneFy^AB zuM*{B)e?6IP#1*L#QrP1k&Owawwvu^*W*ndo_3Z9zp(OU-Jg;niRTs7?qsEt@vy|~ zbc*-atEXfsdC{U&i+MGqI+v12*4kv&JtrKx;oPUfCq2KS8Qj*V?@MVcT==Z_nyo$j z!hGBQoJks=g)itIAUegeva#TIaehnVg{$+Bb3#^|H($8f>tIXBtpsTS{Gz{Z#H!W7 zM#YY-g5r9mYj-wjDN2HT&IZ5kg-49vUww)D*$DIq zyLb{PvDhwfwB|pEl#+?L-|hV^*(YtcZn4Y2Ty$p4S@LdsX(T4UC&M|er=PaxLpbiv z$onMoG4Ud)a~vk4PD}am5>`p<7F5hw_Envj>cYq%1INH9;!;+e8#7$~8h>9_AtuJa zlUFp=*Bxbf&~dcK+Cx&&C2Ur4r5HXmzUX5zAoD_)g z)iu|zBBbY$5?`kfa2?XP*gyehy#8=>Jy*1{Y5E7XEw@tM z@0i)-^~zA^B9dOk!mYxp_(ij@bsC;9$@tOr>TS1fzE<~Z$~k>s>m!}aqvm(qRNYxW z7*r^j(qALpJc1~|+bRcvhQFTP4_!SAA=h=A!$=3MJepV9GGk?o1~12x$krbbHw5Gn zpriLlq^O5a;qB%ppd+-ZM!?UP=x)=^8P~C3j^VKgtxxzrzM?6)qSUVs{TL|$K}4)= zjD=pY+$Swk+1IR19SP(1qRn~L(h}o}`YlX&;@IGc4=-y%o&uy{lV#P=sVL$Vx#2t# zWb3(!n8IB2ZY1Z88D7< zz4>Re`%a)v-}uJuZj*r5AhK#)EPxR_QhL|^$#@=!W`)uBsZGJ#P*bs}Xvg z-C>sSQ?Zm2XnZ{onBYIDLuuitkX}+wmvTN+!6?drPtm5CLKOaQszM5<~i6gDTPbv?*Av?whN&AO!}-8LO^gk5P&O=m>mOhmupG# z$;u=xYYs^SQb287F;@`tbI2jGD45bid?5kR?@^(a-IEPWwa;VaMtjYtY8u;tw)q}#} zW9&=}9i_(j5_73E_mg?<(c?u=C#=RoC3fI${u$p-KXF%bAKA4b*4)PatRNE5=LG2heH*TUo;IyNE%!>IpF@=`2c+YXBh4s)?0<=5m2XR6I zO*6!#@>4Q0n=Y|b_#3|tPUKFxhmulH0b-Qm)0(BTbRQd*H0jLeXb0(J{kp{$kEu)H z9)M;8anBAR;KDLU*l#uiFjI4m^qQTb#ZubgKp=)I<%HyN$38fZkH_T>n<-5{o%)NAQCetR)m_ae;k0o21Z}fpaH}(B{dF)#J+axfCb%QK1i+zNFo%jH zqB*tSpe}K91c(aPQ(tpew9k2+7S|K)EFV(pH0i7?T|k#Pz;*=HRrDK>6MgBB}&PwfsT;YUmy`P7Ej}<;2abD3oE8H=Z?a5V%)6laP|vR zR2WK>{jT1K(^HH+GybZ5)Q>f#(}s(eJB!Xux>dSo#P~~#F`^U`1iQPO`Jwuz?^KMl zd_3F9hM{!VrN>O({#c30Vl(9`w!2{Gg5#<~_Iq?J-rs_^cd8nt>%W~5;8Frya}@}^ zIEp(7c-+?k0z z+KbJB)HoD7n&k`&f^4Fm6`l)n28H!)=&)~+pPksfaiG4Luh1sj0A#+)h5UWUVYai~;=6*yw10_5_>n^P|j6ThxcFt=%7U-u7^Y^>8M6#ql!1uE@ehn~d_B zX@T>*ZH-^;1Ac&VI$MIBo$mj?Cv5$PK%81Re8!1pW3yp1+C>$Dqd@5H%dip;2n~x* zH)=c?UOKgOk}eb8=^Qj;PRtK{eYo0l=;$-y+6Zbch)FGlK`#ISeH)Pj&W+ke0#Hr` zF-c~tC`hASrXb1WbgkKbf4nEDw_^HD`6u{qT49m=n5DPn3Ao^j+z;GRy411)Rh*dG zp;Ir24>#E9n3rkZBFwboKa+HIQTL2Y}IK^Y_J(pvKL*5AW4Gn8lBJuVRXOIN~~_ zYZG| ziUMmEVWY8ePo~*Nc0W@#aSC7LMg6fk;Ou#o(r!9CCgIl6Ip}O(vFV5L^uC_YZtb*b zSkXRPNc5&`M``Sr?&s+1%}!9En|wu;_C!H7ALf^$Xw$9JyMXBCx%!r-)2N^*EQ>@5 zK*teE9ksXaP|rQioVs`Q=3c!1O=qV@5HNGI| z2sh$bJFLOkLR*hjqu;WMx&11GSlS(1k9$%3`5&uu=b)TO3sKX9sO>~hjUZBx&Rx`2 zkwo`4YW=SySKEKi)zD+v6hm4MwZ$(es-ItN2&!LRZ*60I~3&CB3?1pPl%vKk;CU@GV0C{bQh9e@jXf zL|qU{z>b3bfLg8u7f=)vwp&pT9FHmUxu{dvdi{M zojkNpJ!=HQ!m&%%1sQhwpk`}1I_7iKFZ-765@IzTIRaYmYx1K)U5Xn?wgJ}%XlFW; zG}V%l)8Gg@p!A!EDwkkEtS1AzSRfSzCeb(pidHsvr+k~#-XPL0`FQA?(2y&BsN3b6auDa2$XBtyk#ra~^SYg(4M4?PU;!Tyu zXq8wFE?!e3Y0hx#uuI%o43G*k{WfCjYDXSGE_$EMl%w*(qM=nDA==m?^x=l>Kpib` z+e7&aFN0La82DiL4%AnbIbYCbAj$D~Qqiq*BSXfw6z2q2g7gr7E^zh6rnQ7jVImH2 zf$8b-vNFC=BI1MGq9t$xR^=_OQfjn{ zn#8vZWPiW1$jI!fc8|m`YIc}%p?A%9rDDl4Z(zP8NiO83m5=&8Sv-E$dY{()p|PMv zhy$H~%@3C8PzbuR-TYdMc>F38v-ofOP5s;^n6YJJU=-_+ZXya}NrW7)gWFsP+o8I! zZE%a7T`WR(Tidj;27^uzw#Rs@+4y~nMZoq!4U(ybIaA2++)hp7&iA%yr}N>CXsq16k1vK~qRu1Sg-w0zjy>Ll0i`Ab;wy_%z#}LS;=#)3^3{Q< z9t%~Rlb}&=v&GK@lRNR-#G8?K8n&**zkMjy=$gKHjc2i$ghX?I`M9b56`&0$zgaF_ zcB;(tq;*?Yc_zcUBpDX~dRLiX;?CQOXC9T)FV-P#DwW2IuIWyj1@nky1h(QVb+6PO zw9~lg3Iw%b2pg%|nRA7g-AU*=3cAO!5m>acJVOXK<{?K6`mRYs!*PUw3(~9T@{#v^ zS=%2@y4o2`ui<4o_GJpsewRzhJ9zA3I{L^n!IqG!XU+D>tuaFSn_Bx};klVx_q(nw z@s>P`;C4Kj_O#Kk>F&uuVe1^;ZRca_&nOV2lR8XG_X2nVbi ztWzuoLz!O1U=Of=O{9NHC4KC*07BN*xE1o^(5E@a#s&FD1b1Kz>-Laur7U@dReQI7 zb2+*FJcPZBSG~%br7%;cbHb=q#I=v?bYb!K)=v|kce%s=d$i9sA{N0-^1- z(Lh*HFEEm^)8Po2y>vL4)gU)j_I9W+hyy5Tgd=VnFRPa$DC9AEf8XHg|PZnaHneE(r(^Ja;ehQOgl?j}7;^knI?Jw@BW^!Tvhle* zTyjf)_s)_OVlL?YZG;MTL-Fjr2X@Wrg1s^REu0Hrz4AX;%8yNfzP_U+*|tp+ohjh4 z(?HkN+Iq0Cm&eAWW`#gE+S#vI%rqK(pi@lr=Y1I0*BS3khhXdId=~Zt_Zsh1gV{~D zgX@4d>$8cG;$-AHl&}@ycs&ry_n{(cjglX+g@=5WO8MMlbOsW=gJAhf3#|syto92> z6XQ!N64gM5by2;cWnfX8ho+N3 zM(MwDGDt?9yAMz8V_xY_*P4goA!7TTEggM7Nt?2dDcPaD0qxS|Tn$+G1>GgcB7)B6 zJ9>7RH{4De#sH224}@eK@i+BC%256X1zexf#|PG3fY}-15diXq&&`MGl34P_8565$ zVq5>9`yr0#k}^0)bm(5@cDzHcJnR=jTx#mjdq}{qQ^5{iLNXYf>{p>ss8I_7>$6~2x|0t_ zfNQ_JA{5|Xvw?!<7v?vIY8MbEKg`r z78tj6oN9uR2C0~5cQkmrxZMANT!2hC0vAuf9lMzNFcl`1MQQI-sGT>Q2ZId_R!{@g zKkjCySop~R2VnSB;P17wfjRU7J6*@z;6WJ63vk<;Cw!NV*r~DYQxQA}ql#XPf?&A6 zFD*X+7RUmkba@bdl=G(5fO-HEj3reb%ks=4D1r<;J>2$Df5BA3L%|Ggm00lM2nE$Y z0E>_VCKuEgDL2(t5cx0#96oql&{!O=3u`5R|G>V3K%ic6$WO_|w#fe$oPdM;4m2m@ z=GgfI1&30Rzkdn*l+y2Zz7-^WF-lM9WS8`tB(Ei2c>HS z$XnRkYvv>;6iS)&>%FRHxc@)|*p9)Ye(F_n@`*8iK!B3E2o{_j9t+SvTcB61Bbk8; zER8n~GX2hjk1A7OnYc5S1cr9rp_|YfpcaU`D5O;la_@L`uEpN&H1GJY3BC>}b%%BVeHO5OBx*1yQx)2eOrHE!Sb-<%K83!LmGE+~1o(mBE^|hT zFbEcG4YC?TM> z6$>p6P&MqwhwRi>R>6p)!CQD?`$gL6-n+oi-!ubwgDuBE7Agk#3z_Ht16Bn{LhgT4 zg&=qc$-~8^jqxgC$3;JGxeGQyi)23X=yV0BlP6|oBq-<3Jqd(dd=DC-{PJ-NCyk;q zzGr)*xHiwZ zd$|RG+kf_ZDF^rq7Av6gkp0d)ao701wfAF_$!)6rcHW#BuVaN)4C}F~PoUc4lokT? z*hVUW%Tmf5FKGV26xtGm_eo0{72V~K+*}6~;4g�~_`4HMW7RA^YD%ts_|j0JCD{ zrp$R-4&iW0Pf#x>A4MB3bw$f-v8huWsGtI48#yT^!trk@YsUH$n9gu4fRJuhv=zj`rbcK_tXm)?&l~+r`I}#3ym4>;YVBQe%h`nRDK5M;n@%BBYxV=MTzGUm)+u^IFK$UF|D*yJ{#HK3=IN?>?C0-GT|np00&N_XCLj z@(e~rWq%LX>5uAV^Qd!NLtK}n3X@Xy=Ly2wFF0=c!g69o!T@$94?=nr6+zqHk#T$f zKER~W{R$FPdB=P|VEW4h#2#7QK=6KHU|uCnc7z!&YAEo;2-4K!e1-z zvZyOeBy})vo9i7@@E3k#*n@KBcUQ%oKT8iTqgj%5=qS>C@RA~sWp^s?Y5?@|oZD<= zK-A3y`;3+o%|CtPQXvc8T+8p)m!hPrwUvTQgi@*A5}$9}Vjn|=U+D@Rgs#H+$y{V9 zZ1cLpY8hzB$*5~eQ2 zVpV(4fll)f8klD&iPiv=oMCBa>Bq`t4NE!iGVD%#+sz6FKN~>-&c&#ISX&TP1bVE0 z-@&qkU*)z%tpMNMX8@rA=W7S_wDW{+1t_`!eni=wQ3-~Sl~Q?QH!R{7L_PWIDZs3( zhd9cDkQxCq{C<1t{`$AM*K`_L0`@lKUrX?Ra;yG-=4|m^CJYpM*6idOa~`0Uedx zp8yq^3P3=nHi?ChV)XZ+n(G~hUVvBgH@pY@(ShqX&JDUE1_uY%NjnTn$ny%3h5id} z=ZyNJBS-%mn&97&dF1n8VvqXPBpZ)E{b|^mF^D;y(a-JQ`K`Lb-JpmAy_uZfl-7&2 zLt`d(!t4CC|L)pXbuY-9KTpfs;Tr7lv_R#57_>Z4z(cK0ulT(R zJqVL_{C}eHID9~}Pd>M_dkLK{s8rOdLD>zX0&^&IqQGPBHDuiE8IX{W=-EmAppXtw z{ZfF7Rv-WerWW`7-H+cf-OptpHul;S0b{WI|wfL1n)jiu%}kJZ|5Aq9Qwh zpC4c}4!!y>oBlr?q+x5YLa;oP69oXGlj@6}P5T-C|9K|!^YVXJ4*$K}h$oxFU=(%G zD}f$>k_EArijjHzv*GonO`Y`yz)V{ZuOCz=!@GraM#u)>ary{0>B9 z0LX=kg5MZENbU>Wa=h0s?%W=*@f|P|^jz@x-^F1X*aUQ7x>S?_qntUpQC})X`)?v;8f_t2(sKC8oh;07xO5 z7JM@9Pr8XVfVdzNhzgVieZR>=H8EL?#Y!Gh6jo&RyTD8*%Ye_$%e8ZVfQ8bz9*%$p zuj@Qng$i0bivhs2cwcw>J|x#q1LF=)fnuQj6$rLye6P(Eq8(+sa-G<@R>K)PfX{Hm zwE%_+;K?AwwmJhbPmMsG68c7aJ-@${>%UPz11)OVi&4aoNWS)h=-p5j24XG>bXoIW zk$-qbMpXGaDI>x{;h*69j5!v8rHX%sXd#(rx>P^GNu>?If*9+Kg$h7D&Kk5KKS)yr z@G574pR0&ciC0wcmp=4wphu_P;=dNu#J^J)@Q%v=V>a>$&Y=!ct$BbAxQH0O0qz=? z*YNYH0mFB9SU}Wo&g!PUR?&ij*v{VP5(_R>9SoO$5z9c2V_~rn{0qomqNn}A?L?jY z0#1=u)}qO|0eAX8&vfCoWneZ+3p1hFs`)G&7NAFMtdAA&HGukhu~qsB_(kygEX+FRSX^iX#B zF76-3+v~>JU`FWId0^AI3=t@xyZ*2H_J3aQL*KGYvy)M@%bP4w*v;no!|;D z;_dT?P*{!}ws>`5z~dnahiwsiXsduobSYwL>vvn}A3ofxwCATRhwTM-G76UV{{r-c zu!=32%a9;I=6lGg9h0s^ir+QVvTxcA8(}lsU-G!~Ez^Lvzv6NPebv(Z`i+xSDt+X@ zxRaxxpyWuN&n%#pTq!o@Zfs)Hu!*y<&6&nBHi2jGYOg?PQoAzH?DqiFXdlU^4R19* zo^nF9vkvIFK|6815 z-j_A;=kJXaL`zG@cW8KMSQkjeJJ$;WYxCq`7NAsZ_iSuEOX2XYg0gBzs|gIv_L(<4 zYfJIxbxh`+K{*U+HSMdih0<#DEBz!p)LrinsP-FU*W8RxE+mLtm7NL#RYuP=ZydNX zU#kOSt;{9%d^^{9FgD0%!Q!4B18Q1fCv1BKXKybSdL⋘+kjT{fLGh$m0nr{SKO) z&qw&G`uvtz&}?CprujoaE3{4~ZqjIR%7{G;XOm5K>Wa4_%_5D z0|dch-u*NU26p=rq(5pq-2T`E4>W0iK<)AOkhv4Wb3y5e;GQUu8oZQW2zp`*ZJ*&$ z+2SW)pPL|>b+i@JAcS+}0rJlDYIe=EHH`b=U7z74kX}iL-Mzp)_KCGym%iQFpBA2$ z8qJv4kTUk%hA(#7g431fE%(>us60e_Z^LitmX}3w6CSxs5+E!_mdeqRb)8k2-9f;8 zYhp1N0JtafkSnV(TTf_{Ue4yE1Wj3w?Y_yh$G63I3bKJCSb+Ou5Fq=hB>QbrLgI~D zls_Umj+BIh7Fd=-KohwKx`NEcpjHeO=5`{YQ@gDYRj*HZS@Jl(IKOX2Oq)Bsey%9_ z6o5(ewD8!g{tzv;+px8LWBKdIB>#hYIAJ<9X}Svk#xCxHqj}^3ybb3GfkU@ovaNx7 z7mA*a(`v_0EnB)A;L?S!f=S;g49pFLdN8PAsR)WmwfKZ?lf&xpNaV;M7&G7;=Ppxz zaOTwRdR%yQ4f+zzMbS3$GWYk{NNpetnA z7ri3heHeM0Anqz2#6ynP=WpK7e)REVvxVSQe%lr?7&l(`b=9m6xrNsd1(jm2ngvX} z?($AC7Lo7#nnnB~>JodwA`TOeq)pbJ$Fvf6hbBVz)xqR!L6IOy7- zo;sX010S}PsN+>M_FYc*HrBmzQhSv7I(#~LiCy@n`%uZTotTKI{)GH+$Gxz#jX8Z< z2AicT5-#%40=jX&KhEcKi@o4@<6nizFp1mg1%}gT`#HK-o(MH zXVdU}DIZWvMu$O*h@+K={^eOlEFmU?GOI@`F_GY%5LhjUSbTj7#!2`PXb|Un%wy%4 zsxNTScHK*ZKQDHkVZ@+BB>VSu`6hqDi2_xDEtJFLF@F}rPTkF{UV0vGI2fji>- z$Jkq+9X~oob}wi)&e@S4um!(G!llbjh6<6<6OJJ-(Q1`RLVBm- zafJ~tt*WiHc4xI{(M}xvQ2+LUYL+OOXFh5q-($*wC1=fn8L=*aW3mRm)iO8$=XxQY zh%ZKb&V{O>3%XLWyLyvbPGNS>C$PIl!XW`LR4SG9 z&tO4VfH)e~1o|CrRO-&10s&>AV3j2W&SasN8Ngq4O8ACgwelUq$x?lp%A>BYFUI*K z=$9gqQYuwb{v71(ygF*$`XpF8i(oy`hxlVxug7)A-zvXOuh4yRI`GXh6k7D6=RB^j zJUe0wt3Jf>Vk#M190rC~D~8-~GnKk8N)GGoKg(AwbkprAd}!3CUAm=`k5-NPn6bR3 z+A%tt8C0NMk&jtaENr_a+dtP+vgcNCe%<*NU%7-3rVGCIoEJm5ypH@N9NlP1qz@(@ z7Ok%U8^(CDkh=}}qqADxoQ)#R2Tue@g}NtJ` z{RHB|LYdb;jAke%jW#;+hM=SN(R9UZ(36DtRBT%t{X$HYCqrAmI3JOao7i>edz9=& zH4?jxxu1KSekA%`mQ|?igRIb?$%FQa$Sr?a(VCqbhg@7c>bK8r0rryrrj!n^%tMR5 zUw-LLk%m|=c3Vo-VcX;Q!EfBVWmGnGM9!DzIWOYio}K;7MXHHY9OtwZ%LnS6Y*iJI zqbXGxFS;5A_j?;nr&{*Ogr>h18G}smQ=44EZe8a>6AqdqBTTt~8o>)C4WHO;(&u%PX_vSau(IZi z!!ytMuex4ii)g{44%xO6V~Jn-yvCiF6bUH;xZeCvk{e;2IkSqkPYc#Qv8p9ZHGx>m_8&j->AL;kIuX}B4p|mE;T(`| zOy*(aXu{d$&zQBm9dIZMKsE;$8oO6Q*iRsie9_w43YtS!J0C6*?ySR}BUx2@sHDfv zRIdao(tof-oPBjFj(3#iq|vKoaH|~a)9cQZ{m#|toCD8*xXsytxu2Sk+3iasY?+vx z3E!*+dPPh4Z;`RNv#PU7ch$Lbf7wd76S{|PmWeo)p1G@9ND!|=;>2=z8U}_^$BD^+ z=O(4FyY-es+2R$e!c#ABuKT~QmFCPVSsi-UiXwD1KrQ^1&5@ukFMRKyf_hYi^H z8dhx#yP^vot%PmUdLXPT&;$PgWizcLvprF1KyKL+Mlam<*L@;qr!d10uxneVwSR+H zn+Ono8KVxkxHXo!uCjma)%tdkK2Cma=bmv+en9cmh33U{=Za>vaA@YB6q<*W^IO~z z6_VQ*6`$cPaDypDET$G%Ry|(Vf>k7wR347EYDv{nBiixFs7&?uv%^)d>u+ha$bZ<+ zEIx;7I;0?A;VLkWT9tcrZZI}JLG zp4QzWj+HW^;N}Dmt01UyVs%mM4yWk53T<*BPtC+$oum>Mo_hE(9bRV;ZD~cm%B3Q@ zA&|uF$l@8kp<%9Ow450jRqKG@_?W=ru|@=eaM3Nl`yOZGKe*qJD=Zj`lK(6X8{#^; zc(zKhbCO!gm;M#LyT5~b=5@aK%@HBd)?CG9#K)x;i0#+(LgPkKlOo$VF(Hs^Kzx-K zm*yM#{6RHk*xj2jT&ZNRbv(aa>Z;Ab{oHZ&Fs;lT<>9P88 zEz(=2V&dVub_;%GLSMukI$Ys}ZL63A&#v~$*31{XvRs@$S2o^@FiWVH?s_nY1-g5@ z`fJ{Jn{xv>bGheN(8m!!@&X;>x-DNT8=m)%tpRGSnc&dodvr_Smp+ra{XMfph$D_z zZC*_Gjen6ad*O{%UqO<0h^U?Z3@X@->|Q>4UhQEG&n<~MFx>0;gT^dY;QV7cz$U2t zES0|KoE>!(499^UXnlxbuB*$07vmb3=|s^vc4dLGc;=v|x#1{f_$LG%h+BDG)nguJ zv2=~+L4M`KTPfcpR!gnKR(W?-&vjp4q3~1*&X35FjQ$YSJrI}ySZ~#N54wUF1JxV> z{6hJGY;m)irTDwR;@$wi&|m$mpM8*Y6j5qg{MXDYXIadg&5*@3=O*)*_tc3Ahv#ZSL$m|%smIkT=;SG zJ$4biFK;ZTl^}7m8rz#fa~l=;p~Zn@g)vUsDiz$}i=9AOqBv3i|-mG`j_3mGflyV%~m+5BZjwft9VDTG5Ra zyWx(KJk%kWeF&*fC%N+EXa4f%wq{z92iuV^?uS}ncVS7J`>1UqleF<|#yV)zz4z@> zQvkp};a6u&D!_CkNU6Vi}<0=p2}uMoQD_igxMFz zaT+O#0{Motyedj<1ANyP0(S~cM|kXl91&8v(ci?1&g2;RkP=)GBG!x6PMd}II$Wy` zAauPX8-0qqqVr81du^8(+`yp61qJOrkxv>D8dyCS&QU3w|kL4^pFCGkT`aONq6*K|g%V-bV zl+BKaOvhbt908rD;*RY}jjfnMD@+Ff>vlCMY%{4PriI?mwUE1i%tyQEWb3Adj$W&a!ve_SwUB@Y z`1tM6)716%^YVGOL_Eq^UEFzS#Z~#=9~Y057pLXn%mTvN4{MeK6UuccOEbPzU&__+ zsnVqytepFNSY@M8^|A^@mMd<9C*zWF*Ng1hgZ18H<y5R&y&m!-m zLgMGn1zYahaV{F}XVxdup;-;t>ZA|r;u>q>paQVHm`49Jzk*lYR@j~yf8Q=sEk*D5 zq+3|p9wazkN?|^77!eQ)vu8?whqgYa_z5II>939*2UCGA6 z@5R-o$wfTHp85!jxbgb~AFn^v)j4Ny{m_9x{Em0|Plbyg^``@kz*&2{Ga5rY^W>H( zn=nbg_Zr(fo%*bSwo#wXk6Fl~Pk`eR5Ar?` zxvoRu!F)^@CC2%qbAguw(=PlPu8xkI_XK?RA3)(bLgKx->!R*erZ6$><5Cr$%_AZa z9C31^pzd;2fIMe%q^`X=M6CC4LlH3eXTrH;_JVLkvP`y0TQ@u){TcEZJ`f+pD!Ch+ z7^%afwWh!UC(ZGoav}5eAGPl0!udEZ3c;fGl;e5wn!^i%~42e`i&cd;Uq5Oq%+p3G?z_XifLHT8@$md3L@p5w&K zzFf>nh;_eTap{TIUG!VPK4H;r!Fhz7ka$otY==E!iYy9R+kTcS)-}1ZZA7NZTWu7Dk0Nf;>9u6_aC!wiF@7UE~<_X>Im@Aa^CYk%KYJ- z!|-L1dv}(!EcP(nIVJl;_2@O2bAFP@Qj8U{o=r`zhWi%wV1QeF&z@$E{ReNHDUo79 z-mqvdx#@HBZq*lz*KFW>jgYFUpv_;`{9|W-9T8YH^S2G14KuQho^2nu4}X%uz0$K; zs@l1-=l(q)vB)dx1M^7j`rcIk&?&EyGPdWN{4{sdil-cu@n-X|j|u|rE&K%JT?2pR z69b5MT6VphDZhN=q=ZzmE_S7m-bE*;D!4o%IDNy9>$9jEJ@Oje{52~Kk74aUQXV$_ z+_J1(W0|vjd|v%n!W5BU%0Hk5FyY19LmD*a`KKTo*s$tN993i$d&7hEk8@v4RJHIP zuLv#23Ih{_aNzKZtzdb^%Dt}pLSU>!s;3tYzd;;5(|We*uw6}%yr4yayr*#_7t%f| zr&{Q!%xDscn1GF43HRQ@Dz^6GH_wZ1=&4%J&&!t)UkPpS4Y_Vj$xoFPt8H$0SLcW? zCuP?sP<;+vugU%y76w`&J({gDW!Vhd69d9D*=5co6|)* zk$HI0`%lHHKbK}VooF#p-5Z=whP#l$M=N@{OLcs zy-pV~`V(8$+%<^FnJo{al?SGjD0+NKBWzsbXhw$vk}(;h77+S9*=>4f`8GLJ8s;==VNkvb#216 zR^8u&PV}*?h7$yi7d=l?cxV{0(nO>HQi^3y*VeZ#v-J0Ws}tjI!~t39j7_QYTm)%EDcW%_wy1%f4>{)bjHX|@y3lOjm~I>OR^ z`-3DJOTW2r#OPw+Vt{hMyHqRCG^kz%rSCHyiGg1|G&Tz%@q{Y`$Y|{N$uC@q_H0TyW}Nb!?bG?eNcLs-bTBV*|u9jZxlvu+NyjF zC;XOY6mGQ@3P@h!-Df-lbuGs25-&Ks8SHw{*wNAPIjP~o&Aa6~ikGKKIVUsVV0w!5 z&*j^sw+2jq_;gDL&b)g4I`5%MC8yOE&3>B@M(<9GA=Zios>Javq_l=k@`R?NYIVzh z1Qk@sEF>uVn79_!Ta8HUXT}rp_k21K$!Pa@oVK9S8{U;O_h`s(VsLI;TGO*iv{NtY z$%R`XE>l~SVnJ^@NWM+fJbJlTaMkYitmtU^a?ISdmk|%;LnAb6;ViBARz>=k#H}wI z=k*MgDBObcYs718P1D5uaoJWEuV8|Yw0r_^Z`2uwQG0NrgA}r3dRvZ~=Y7_zx-m_> z13__v2~E+=(5+*kk%7LgY8fgWsVH>O4aqv&Vx^luP7WEj992)qw^}>Tic3%(`+it% z|G1O#G@yxRQ+ZpEplP`6cdm8Q``6&g`B2DFSEh2JbsCjV92QnBz@Oi&NKgiHMZk(2 z!q|sO3*BB^*j0xTkz=R{V>i_#7angX^0E9{@~ac58JQ1lYDTv3HZQ$1bt#Z-Rn+L$ zw1>c1-N&Iqk-;0so;KZXiALS;YErwkH?_?S#Fcq_+cy@*ctlfC;V1gVpHx`Je2$=y zw;AJXO9njWn*Xd8o0+gT*smOflWKh2R5f{M-xIMfT%+q^U)D2#nGcsJn8@GL!g^5N z@SUQ?zU;CN#wI{n&F3^%gn@El(20>FNyx9J%QM~yxYozBjtRM+e!h==I_1s68Xywi zjyM~KRBDyK80kG)BzJqn!%9(ZLo?M*6-{R^tWy+PbCtdrDPKwiG5z**`hfK!AtKUa zN->U{O_k?x{E?$pl{d2#JDczdl$=)Oo9X&{I+VQ1(0zYk!vlz{uc^S> zz1|`lT8w&Q!p60njdN1tI;njLH+?sjkaED+vb;**tiT(pq6~IPJossoddwltnQ&xn z>K5gybL9Os-=EgL;gZTdONV*UUe{R#b&~VOYGu_G#N-ZZrlYbk6(J)sGP6auYRBMr zTHF^#C$4A@w4e?t)o^t^&AyjN{#e^MLYnQGEUVS-md@MXUjjGRxg)7;d|SSiNI`sT z>uH#AFi(|K(Drim`Z$%0rY&(XzV%T1S9r=F%akhpJTDc=qghB@(n_^fG*${8oWHx@ zwF4Vw%S2wQ>#II7gKL#HOGDi&N*!z+!~kt*AK8$VG)?OlR#+Csn$Rl}Lbe_o2fwo} z6n4EYI6y?jVnIccr`%O=WDL%+Eb+cFtvJv4q^jb+dsS1awm$Uaym8M}t9aT^@*}Ww z5gv>LF;MZ9sX-~v4PHVMbKG|t1KP1NP6)SM1?JF$uqQ;8BP@=)yR5AR3H zV(%5DOn9f5R*V`xRl${(dZ(o>`bUW71{J{fM>#&IhL^U`qy07B94#J=nEWzWR=49) zyYgL0c4N9oS^oZ$gh$63suk~sfgyo01&OeUI;!Qi%eZDEd-|VYG|%m&fJZZs znK^G&D{}w7JyBrNLLFun-}0+}kJ9d_qG}YjJY|eCqvv`mrc~X0SyP**T2qOa$1$s+ z$mwo*CDp8=DQj1P$Tc+wo++ikgO_LO9o2Ho-wAP{$>2b3Sh-$2s=xlhv8S&!d7$a~ zb*4PArrV-{qv&G60D)$@m|vA?dA*}!e~zdz)`kt!KTYSOeF)vIuxA}=A>JEY zj;wx+?J9gOOe+@VucFHvDQ#J=a3t%5M1YD7k?#nLXe-@GaWG>eN@PMI3#YGm z<;BXt1MijtE>?`a^eBo52yyd`>A?T#3Re-t%HOU`b)4+UP$bsgNDj=Vs|DF$g(X-R z#eLrG*iMJ!av9;otDfr1tf)H2g;5?PEh7Wz_alx}YX>k+6oBF@p~!s31zN!z{2-;0 zgOkn)iHBN<)yGC&5u=!WIzdv6TC_tdu6rY&JS`aVXYcqLpG2x;B z0inqJ-F*3|;e+dJ-9Oz&R+e682^F0h)l-584o*w}Nqp>G*EBFIO7h8zY*?q*Ag|!S zMsB5$cHVwmfa$n>lTp?3#v4)?Kg%5*ic6u~ut$0U2dMy8tX`4mWa9(q-uG-$ToxK| zqggLINh~-<)4tpK)AL_r1P3ZRuYT<+#eLm&+1=%V6vD2r_3b0 zCMtdh><~*+m_zVX;w~qP2#S&EHj)G8Py>~$l7kT69?Kv_jY4(hB(mf>-f`M9m0gLj zMwrCb8#5(4g%lb^>wUl&%9r}MnTEtpM|hZWH{@73;ii7N$62cOL&CMFl7&Qm|DVXE~U9i%`i?0#Z7c48d`gox0>K8mOJ}Gek zc+N_d?T{KRvTxX^)?Xg9P}SSOhPnyuthCTYcvPzo7tTK!k2GN&ZJ}AcaoA2>*-RDm zqybAo?SauP5H%I{K(>%|Q1DlU%7Bur9JjywuPTK7Qs@DZ&lASZvs!2#P z5y_0Paohl2yIZ{Vn5RDL;R9SgR!Tt}Q?o$k^W*Yaw*%ibpZ?f;vo3{pv8GEqS*iK) zS4z#JBCBN-hiH3KGz;E46&TzWRPNLy>8(l!+O%u7!UdMkOS|`)vUM)$rHY*&-+nuV zA$i#ZZ!G#t3Fyq7PyHlhJtAGti$Cq=GMRDuu0%d^94mY@a^w$L;htjcz;DTosNR5s za?2Z4YIF2`AcS$1x9cGdIEC7_B)8xN2fUeJ0tUg+Y40@_c85Q`0MxbNoX3p9h5YAM z6tmBdC4!sPGNyj8%-hqRUt8{*qVSkEhP$;gF~PTbVX}I#6+&FhgOPgUB=wabtV*uY z6TRzNetW33K%C)cf>@(A2yZnkVe_`%r(pW)lI$qf8nsJ@Oh1qZZvW8itF9Lecs1Su zW^s+)+uXiv`YTMO!)EIeUpbYzG9`#4U)uSA*wEx7fpx&mq;!xsLi{HqCXeV!Z ziZV^-gp~30eH+mt)ED(u_w2hRzls%>C)}6Ic>EdPK=w3fk3)3F7Y{8Z=NTVLA#{*& z`8~Jjfa1d7Eli;jBob06dme+mmPaq}XA_2JG5 zY<9iFGxU@7tFzK+Rc$$Wap1=(^HJJGR!(*5T*f?<^!+%x3E?6?S5{t^mJcvwMkr(2 z1pIC5h|>c^Um?pfZI)l}R(Zh&D_MSr=SXT35`xl>iQp*LTI z0lff0mzsA6?JZWEER(96o->^UE-;lf{HZ|ugT>MjU4qzwm2Jj~<)|6*EADBNy?h9* zXtmoho6vz*P{a^z5bqe66&hy2r)FjOoSipKN;zlA;zlIbud=0H|I@7wRs$_TBf%kl zb%Jl+1(dTs$i%jJtR=TtLF}hOWfB7lwfjU>UL*&o7PR;L(Cxtbe`_gzo|q`mn}^2d zVG;umq7p8S?FYnh4I6)$P_SD6o*hQP^!y2mda;84^Q)FGv-*L2EMrGL z_C-e_-Ew8bc>i|2szo0~Ua)y4-D>%I%nE`%KPbmfSm>$;K-#oX*<@omUD&%QC zQ24Vm9yG~_e1UJfiRLqk3&R!VrL~NXfjZNz)-;So)B$~rJxgBt`vN!%W@GVwo`_@Q zl>{xJLVC^Zn7zY3UJ`*dd1!Zfrad)t9h9godeoBKXo?AiBhs>|X9Bm{laLGR5R1dS zUMA~WS4x);<&IU3Ox4~Oeo6wzThFz6T2W+O+8*R5-wrJ##=a82z%x}0C2N!EA$&Ll zO>#fOX?x~LubjLQscKEVi!n34^&00tO^nA4U=`5BSa&+0OH?8i{p%JUv|WpwU9Y- zVLrpYHn7>bVQE$-+cRV>>?G@-1LUuvV-mV8OGLO>a_R6?Yhv-(shY( z%89!>h%J{R}LMPcGwUciS`eT*+M?-(#G2phrh+ho!J}nj6x(0lE=Iu#fgu~L^ zh}VnI((CU-hpP97qvPBw0I_%Dd_1_=zU3@{k;kPPl-sDlKxO*i4VC%gGuerm1@J$b zpSAKUgfSjm_^PPQE3sR_yF?{xCGLr(UozUrQ&hQHha$CG2_)vuN_Q;T_O`4S-*+bi zO}J2%$Fqi?0&mdnamO09zHfShvDS15l(SK`MFBDIMrfQzn7oWqBv0ZWD#|>cn%UMu_!zSaU1Pm&XnQwSlRcBD0=oWy&4zP5};cz*^i7J zL>X$VYiXECR~Aqyx|dGxM$2Qo>rfQggtXd+w)Y<^muy+7)C&3PRjKw7y$u@#1cg9v zugelI+|V0TFxU_2UFTf^iEX2T4hg}q3=AKV8%s*t`kMBBdmZqkz@ZvpylsUFHA(Xy z0jfLn5niJ}>QgPv8GF1#>k_us1P(&ZH=lKZJ=8?N@48mm1Io!B4hFrIj% zJn@2gt*?{XDlbi{?9|4P$F7)>dATZPRwG*hl)&@u^t8uo1i>+zLvU>$L4o! zWT~PP$BWe#FunJf99P?T+WXt|-2#Fs&+L5hFCu`47#6dPX|#Hp&_RAunX;^4;C6%6 zm~FXki?RCisc{ybJB{;uegIWZ{mE^a(3=CeR#!-Jl{&Fw_5DR(b`}ZW_Z!S6J2P4Htw?2g)J|VD9-fqp~Yu?Q3l-8F8g)q0=#K?!*g~&v-O4 zq6cRJYYDcvE{&kz7I!30Yvg#DN~)(-h>H7mXxT9hJqiPCt1i_G$ZMz(EE1YKG88%% z)YX|n4FD2wr?h5y_Q`sxRSO+MzF${<;8g#i{^yC=LCX#D{1(W;yTYx=Y2^E{Bd0Yy zLPi2nX*n9Yp{Whn(L}nCCu!h{fW5pO>B60mnhn%9>IQ3dhaK(@;jq<;_B8zW$8y?M z%TV|3oIlA9jNl!U4fr$hE4bV?Ch&UBjX5W)l%nB|x(+f`Vk@^-lX3I(lj8w}^6iDM zx2)vPktT%21ZI5a`toSButAEzBbli0`Ch!^62Bvrv!ybM^Y0?OwkA!v!vwqp!W4JI zxHjL)IYvO0*NlK?j*ZD}l&C^dUYd&1on*P@z)bj?NXlMM)2!61mPeos`G`OLIs9kK zpL?R@ZSNZ3NYk=ml?V2>T;70V;&4#I*g_C66A^-3;sOZdVf{$v*X&2X3E1CCs4AdG zGk<~~W~?OE5z{{;7TVQP>!-m~@RbqP$m$0rEwypg4_oFPQ|8jmZbapD2ko5E=XYIf zf%C@&syPZgPHqjFCh*Vu7Rokdo!#dUEEdV5zig5$8v~eXlMS9>G~7JFej7z?+v*MO$BVuP9SGaJ|mQWB>?1Td)JLR+hq!(OW5>TtKTEi$#}eT&&9~6x$n{AjhhVvj0&D|qYm(s%dTyY-FevmnrNf4 zHy^{#z~$0MsLOpfZX{;qW5-??scno-j1$_$$7^x(D`OkI^F4P@HHMELI>4A7tw1hK z+HbFzZ3Qk=8~aPDu`7<)8ILpX53R0J&n(of@cKm}dDbvivX?zHbaUwfeclM#U8FWm zGF1;2rs@%zpB5T+iD%l=o~B~njW2R?xyc(7RZ~KxdRM#(kFJCQ>kZ$ep4RxpECsvG zQNJsz0-*H}jR4h#iF&6i_-!h1oXa`hNNMiw$w`%1ejeTP)2Q zl@Bddr6{w^V5^fH%=T&UjJ;s|1jSQWQ4+Ym$pnq0P+!s?F4lcfQBl1F66EKOdkyL2 zTh_qx*EcK2z~$p&LgO4GQ(@uP?DahjvuDS=bLQiBIcU5SpY*KTBJ~<4W`@?Ky?F6r z$7*+n32KnV(x-$O(|`EgL)WKu3&RBJ6w;JZbf-Idu-i8FE8{A?y_4hH(H}m01)Qhb zELFvyeL6---%dU6!3Z9)X6|P(l6!GSxI2<7>;G0ZpD3`)Ye@|G272vfP+GgFyxmyN&3{Iu5ka)s(2j_w&M= z7Z#vI$G5)zQlz#~;CL!b@g6JR@i(uwAD{IlI_IT9bvHBUmf)flmk0cq#bCUTjxtM( zUS?5?mn`DcJ@Wp@G=>9wDTBkqSF$nu%k2abVU6U!c|OAE5HmBYI`^}jG1@ZM2!li} z93ywWC)Z#WXKSS%h9}xpy;$C0B>K^)b#f$m*naM`lV=cSDjm!Sg561?6kIL8QJ(>2w<_bo$49rUH9|t&&j|J!r>5i!&E_zx?Gmt~`-&8@ zg`Q`jeH`?>VDjWi(+TEl*uZ#7r}q<+dwz+io*w&;RVMCIVFo$u;UQ>`@*W=bXSX}U za;$+7GyF1i-s>Nj^)NBw^?EkJM=lHl!SuA}xM8N)szRHGH!vI5Wd6EzSZw6(6@2%+ z-?h5&!v5;f<%Dvy$2DLH51QOIdJRTQ?hYS)b8jk4Kc!{cp$+otA5S<_4)olm;|W(0 z`+~=}`12gn9YkPq9z^VcZNkvd`h!5s-2o9tD;paFUteGI94|*l#{rzK$jyxp+fT;U zCp8S54a}MA*%7K?P1D;7YE%XRT<)Mlax{gR-_7e4o~jIzWR=Ch_`tyIyko{8TOBH7 z0Bt%!VhkH7Z)^DHUYNvqj1$cDAH!o*QS;WrZ?igbu^ze3p!oayq$h&QD+OcsT z|Kh%*;6w;yH>~i{mJAe6WT`RIt%0&^q9SCQKkG;1(2$D@{_5ioiO8XfBc{S(*K*k3 zX8Jsm+i~&9&41GxZ~#|L{fW`KFuxQgWEDPngJQqc5XrpaP_CbXt z)I|2BQ!#BA$I?QPRENr#QnKXzJm2Gxeof6>?|Z%1`(9ViA1>eX-Ja+9KF@uB?$3Qc z_p@k>^0^4zo}CtER!%`}`O!HnqQl+qEu7uSN9y?5Ce505z z78U#L%yj8^UElLknfYh&^=_nqjT%a%qI^JTNqcQA&&R~kyXLQo;whskW}2vE5eYR> zJqkt3L)(JN>l8HPS>X*1kJk3&Y&27NYFd9w={5&M1Gf2#jWtG? zrTy{2JudZ=ulV03<3iFcf!hM+ptvNG+0ylLB;xlQ%3IzhMKf(MfP+c`#AVBoo64r! z;HQlH%do>bQZuz;~ET z%SiNmx~;8kL#={`5JM}HInyGtP!`duMh=!o{>&tka@khI%fD!oR};YsjhT>IU0t~o z6sei}G##$cU_$e33c2i;1fB6V^g3fFB?r!)**+bYZCM~OL@1Yv!3Ashi|5f`B#v;n zGJKDin~Df^=t`Y?5Lr5f)=TJee%>Y|j6T5IjZoz)0RPV;Ak~VhM)<2-<>FaW&jNZm{U{54!@A4M5 zJc~i+ynyYj;J^`cX{t!vaDC!p9i4IbDD#GGpCsS)fHWS3FiIaW<2-fh)b;DvPq|%E691tVs?w5~ zm@P7#>E_M<2BxjHC&#MLx@xLkD?E6(hLpY#lqdU#yMQQCT12r4Psb+IlaHFn+g`^RQsQaI1_; zq^2JL^RF&02Mj58Z4J^Z#$E4c! zb=i%X=_w9D)g=VZ{;b0D!HW*vH(&keYEL27@@!glsKn6P+FC{yzZ*@@9)Sqmx4W7L z&sH$m%}g-!>AAksT)N=(!RDf6|IvL5wK;*Z(=9>{R==blPX7tFs*VW@`?eunoaW`0 zctSz5m+PcwWn~42>U8@SbSOkjdtbcn&MKx5RdLOIpI4~nd%4Tg7h$~Gr=vx%Hd#NVw*y4^bEyr>mciTyf)&(m$p^q||P!$v6kX$4I z`OLDc4m0Wz^e=;&6}KSgB9E64&D^+gW8(c6Dpbt3$|Wi_sKWUR7miZYcVtQE9=Q0< zXJ?1w203-80jJRp92t8ruZ~?e;hf!Zrc(-i&>F?)6-yNLW}>l&Fvu@I?wCbGPGZs` zq0CC-EpzS!|B0Lc1k%!_PLboUdKK?-s29TWKs3b6NA=8X*C)nga26$Td7Dxr9!TSR z#!IQpQDjExwV8&*ib`EQUQ97Mue!6_HB4paaoS7B(}JoWJz7mdQ49mb#l=a4VR)gu zyXs=3%GWWCe&DfYjS_^;Pzj0=c-4JZyM(Fsb4kQkQtZ}eZC;ha)aJ^Tx7>@R`qtgv zkM^sUUkgMgM@`?7$Beub8eTGA`?=sv8y*HQs2kx8LQZjD=$o`nt4BgP{wFC-a_mYig64(Gj#5{ ziM~e)UjuaNWDeGcp9_5H?>FuM#XdI4T{%T7Pmn&QQxUz|h^ma+2NgmkRk5P{LnxB$5aJO&K|Y!cSu~%S+i!1Z!(mB_)YIyAtQW4U%kXgo5xbkM&gb) zZup8x@di-PRN-~0=of9Jol*}6@h`uf_S5ftC>$1_!7b-AIwH!;7(Y~0l6Coi?c;MRw4dKaJ_^0W7Es_F`B;fq`7c>AA5?W zwUSYr?(nn}a5X6yYafiqgyr{nef zT^x)@wV(;GDUK;32uUj!YqY=$Ef%Kid-S51v|Oy4?Js7nVmMfO))|?}BJArf&`RU{ z{LReF#3=T6veWJ4vn9xb%Zy6&GE61L)?~Rlc4muC$|c76s>k9=6yp^#A!)aDcJED* zRTiOiGG9F{O;G*SF9KsC(b53-|CFN#DReHPLu6Hqk1y7a6LQd89UPN5;^R+x4PY9j|#1CPv=x zv2e?`>e^E`5iuVPc$!FYsfgSkKce>F9yi4xsVi6jq0nRlKzz9U*1^&T2$XF|-KApM3_!c&q{Qxoh`LlDs)f;lUr@>odsoYcHMp-(b3 zOm@ikjE@=yEFitBTlA&mcBNtXQk4@SU0q%yeZFeS>iun47psJRXE#*8*v>_|mMBCZ zORKCsgRyEJ8z3rWlyG_){h+|a(L!}~Z<>rk{rrc?3f;2gynfAE6MxxspcV!#mBzEMQ@VxvaEY#(2v#jB3EY>~cNQ|fjq zz7K;C4^^LP=l`cSry*U7uS~s&GHr;Dlkfb^I)wA=+{hnlRP;uLBMIpEOZSf#DRTVu zi5`s^_vmoDYe)@Q9L|@#cQ?G*oS_k*C>)Rq!s?LOEMdmjMkxnxqnFq_v9#)$(d5OM zJEe2w)41-(m;X4hwFB`@@Vg;BbqeN+oOYi=67RFJ`rFhfx+B0ELLm)_5}R@&XXML~ zV}hEMEai5Ijypj)C5cKKd1#)voRa_3`*Snnwq~^t+B>I6v1mrhSlWHpOBn({g{f43 z@3ejUtb-BVmc1MtKHZb+s?o+I+2z4%eZjkQc#vb|Sf&#KVb4YUGZzX62%)6{s@P)p zpltV+2PumC?K{>`_mp?eDk_cAS6H?ln@|lSJ1N!uh2$(m!0Gt@@WT%TxC3RZ%8z{V z^eOa=g#xnX&EJoWil2{G3(^Rc`1C{^!l>*cynQ~#|oDnb{AjF`-} z({E(d&be*5~*kha69y5$>qUV46 z&R05nA3HWBfSNha`$S{u+4d~larLPstL`9SMM%*U^B2>#&^4F49Qe#UbET+~Nr3!G zKeLxMa)*yr3)N*lKC(kX>ZSG^IhG=6$eWEn|2#=uT|MsZ-MeY$&mW;N7sUpKU8_9N ze8$|lw#h9SIc8nf60$~8GXfb75@V`%R{4!J+)Gtd&VF3<6qzR+mrF>TxDh=+D+$8^ zx@{UvC*Oiq>e6Z>3DAf^jlRtiwLU&rym{LlC3j5`_VN)E$jc=*`uOb%&5+zzV>W8q$Th`sVhP^wiqs#62hMM zsR4hnu*q!{F(eBeZ1TNzk=4YtvV$I6_ze)ksx9aJ_D(A_)xumyPpb>oRKjiSVDZae!87RYbjtd2$1>O?_6!A>MG z0x*Ny38TR;LAQ{i3l4M-q>14|&GgluoW_3a8CwioboE9FQw^$p_h9AC-ydiP^B*i1 z791R$mX>xLUCHHe44c}#NIw=WBYwvHX%{XmS9kF8(sR>`su1}|$}USzO8k(3@8(;$ z1y=glvp#`r2b(&Vmh|oULn-Q^A}UsRb^kH-ce1&PsfRmd^FDrF%EmixMH_Yaz7QhV z;;Yj0^YioUBxih^THk9bAy-_`6=--$W*zvnRn-T>1&WK-ULXBw{_`U`H?mb32S{@R?)Z}~f%Hx_2=Thto9Wm<#Tn41a7bg_m9WjmkQJGeD@19lm z)&`^Wd`PqGT4zYpy7uAkdqR2?2C>)CIl!ox;1t`O6IIpKe2W2`mNFi4=pkm;*&c-* zSWZJ5cC9t4>taPQjn zg3oXgctwi6|7clRZcUBNC^Z|PdjMJ(o3+_)Q!$Na^o?@6)RpQ~nFa79SS_ zWr~ewjO%ntbG#svD5j(<$Tut_m}p7Tl~1C1Ldn3oy*e+8U-GejuWpK7&(a0`z7j6#j%9vPvW-rh87U2)Utb z9skC(@_~W}r($mReqYnn>ZH;1)9HkD^nVn)N|3q_$LtedI*3U}b<^ov(`td!anEQg z@LSr>HD{`#y_ynP^_&3IsIawpdyb^duQaO%o)oL)j>4EbI|RCGWQM(h$kc4A+omH1f+>dy^k<6oQQO@1ZEr|1bMJtBm=~S*sWxI+CRFw z~t zZXQJ&9cg)X>@|V1#YBDz3Oz>H`?coI?Jje3EQwsw@bAFuER14%4hWXC^z>jtG1wkj z)lua3@l&{uvj-DWa_hwp*uNVKhJ!$|ijIi*K}$=kXrnA$$z{o)ql@Zk*hhc86IocY zGbkk5^GTQpb?Q7Zm|^!a>M@LHjc`vzf5?{F?BJ4L=T5yUPTl@)pT^_aj;z7#K){5Q+qC^%h3+CuJGVezIXsvG7eI zzd$L$lY|%p#Y@Y5hBf~=WlO75SD8#N=-QT^kI$q50FGzr_#*whhZk8(F};-LZs6Y_ zb$5!BS00v>l!SV*{~$C`Y5UF#mb457U1Qi=N&C|i7le|epKwU@{)GDd4~k^e0Nmds z@D`K6&wY%4)C~Pg=f1T`A8+fGYLoRB<0Z({ zV}M!cpwwL)OM-RqyWBxAqux96xV+OKtqHc_EAX^(y5k{ga8ZyE)T?hwa&jC1aGY~S z1kLo&@55OmwIM@=*iSUc*Y7QS5c=7JLQ%l)7j5KWd}ELnN|ce))~z$Jx^YD;2Yx6K zYI7P0ZU{Xt034K;Lm&LevrDzYt|S%~7R2^nnfx%;HV{Dvx;Pjb!B#eS`w zXJhb-Hb@|XPW#atefA$zlw6lqL(58NthMBY<>fzyM@22e$VjYiCZbzTyH9gCd3x%C zUQD}m39-Ux6R~pzO{4AF0bnStyr!n+jTgi$U7gKDE8?Dj2R-lKZZT2M%FGOwyJH4nzD)_fYTxhrfS_@6>Gpl-@nW8UW~)2>DjWE^ z4C;%$Uq$ z^X540i(wwp5R$*OHnX2Qm$6)|SM|j&PP5ltuSye;z*i#=*$%}hr#*ae zLKXi0+AD8^5c`tVPd_b*#*QVIs-ew(0c0XVpu88Cph98Tg0*qi%?#(-|6?Gi7xzTi z(TIaT(gBKC*j*Mrlpm9$XS-|A6Euy^c>)m`r=*B3gqJQ9VS$438JlQ;wZu1&7~(($ zade~vkrKp%hr+%z0!SY7yWi3j9c@RGGuVFzV?&5)aO9ITR#S$EUExnp`Nlk> z3Tf{h^DGrQ)B#T_JaenGG=XHHtyJ(_c*GQ9&uMU4wB(tP5Oph z5%WSomb@x8h<2{YTpl3yf!T}xwBUDG*;3ubp(J0L++mozeOMqS7+&s(CQKQ7)=8}{ zkQeE`fItFA_y;0&KUKe1|8*=2?!XGdbCwP3Q&)wHuT>PqYY%3xoAKYm6Mbm>BF1d5 z(M0>7Ve}13T~+WatR=3D$(*AacDbQ_O+b@kdr1}PvpYdce!_X&OW0FEw}`7PGxyS^ z6Cd1DJk!9SAdmDifaY%>vMPLI zt!ya)f!bRhoU1C0HsCu0`o>Sw_MG(qTlAo^|BxD{GiLFt_78YstHg&b3E0>GwXZ8qThTpS3hBzpa#Fb%Rq25b@-o7{q(8d1@jb6mpCde7_ zDdF(GW9Q6ae2N}K8jl~Z;0CS@pfx+s7ltmrPxME6^54_i0|@Uga@>66ODHsDN^b9e z$S48ViCC5Md1OuG-;JCZ$&VqjD`WpeC<<@}`w#wyB2ZO8;xjfb45fF5vrj9DHYn)s zS^dZN`tKhOL(qf(52Ii9GyTDwECDsz|B?Dw3I0bK$m#Q&7!!c~c!SZ&h^R36ds`+R z+C|OW1A8x_A>};QrD{an0Ufhd1TCd`kyEo3sh}Gl6Nuq`2wDlY7`zsUPFc`rI(*jN z`m+CoG)>@H`4@~Pf8`CoU+_03S^o@w8x{Jee=wHGbzd+0|JetB_GD}vLTtNXelJaY zj*MDFF(UgU8%1sXu3xHMy$-<8TclJ!@H6~By5Zb18V}R0VC(c56BPAmt-yY#sC~!! zq2UDP*Bkmd_^-Y6b!dE@4`1hlKn;8qm0$J4S1tO#3ms;6j(U1zPoiN6nS@IgG8d#6 H+VB1^whH71 literal 0 HcmV?d00001 From f9c8fac854d1d3f146fd358302a69db03f5e9645 Mon Sep 17 00:00:00 2001 From: Hammad Saeed Date: Fri, 21 Mar 2025 16:55:35 -0700 Subject: [PATCH 049/119] fix(model_param_helper.py): update `_get_litellm_supported_transcription_kwargs()` to use proper annotations from `TranscriptionCreateParamsNonStreaming` & ``TranscriptionCreateParamsStreaming` --- litellm/litellm_core_utils/model_param_helper.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/litellm/litellm_core_utils/model_param_helper.py b/litellm/litellm_core_utils/model_param_helper.py index ebe3ce93fc..3542ec3a94 100644 --- a/litellm/litellm_core_utils/model_param_helper.py +++ b/litellm/litellm_core_utils/model_param_helper.py @@ -1,6 +1,9 @@ from typing import Set -from openai.types.audio.transcription_create_params import TranscriptionCreateParams +from openai.types.audio.transcription_create_params import ( + TranscriptionCreateParamsNonStreaming, + TranscriptionCreateParamsStreaming, +) from openai.types.chat.completion_create_params import ( CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming, @@ -123,7 +126,10 @@ class ModelParamHelper: This follows the OpenAI API Spec """ - return set(TranscriptionCreateParams.__dict__.keys()) + all_transcription_kwargs = set( + TranscriptionCreateParamsNonStreaming.__annotations__.keys() + ).union(set(TranscriptionCreateParamsStreaming.__annotations__.keys())) + return all_transcription_kwargs @staticmethod def _get_exclude_kwargs() -> Set[str]: From 91cbaa48a9bb77c7c8f9897d7e7cac9524875a61 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 17:00:56 -0700 Subject: [PATCH 050/119] litellm mcp bridge docs --- docs/my-website/docs/mcp.md | 2 +- docs/my-website/img/litellm_mcp.png | Bin 113591 -> 114814 bytes 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/my-website/docs/mcp.md b/docs/my-website/docs/mcp.md index 75e582ace6..1f7e65a4fa 100644 --- a/docs/my-website/docs/mcp.md +++ b/docs/my-website/docs/mcp.md @@ -12,7 +12,7 @@ Use Model Context Protocol with LiteLLM style={{width: '100%', display: 'block', margin: '2rem auto'}} />

- LiteLLM MCP Architecture: Enabling MCP tool usage across all LiteLLM supported models + LiteLLM MCP Architecture: Use MCP tools with all LiteLLM supported models

diff --git a/docs/my-website/img/litellm_mcp.png b/docs/my-website/img/litellm_mcp.png index 2d18071aae4c3997cf30b9b3ed0f6170e7aaaf1f..cef822eeb2205744213603ca6acc233ac0614b55 100644 GIT binary patch literal 114814 zcmeFZhgTEr)(093V4 zM#RtpLyA$8@oqPX=yG|Bs;RJ-4XZGyI0`dHF=hj0I z=mP9l=mWk^pBgRu!ord{uN*&w(B2qNwEPuVh^&!^c)>VPz%i?#AADcVDbM2t1!Rrs8f9{Ao4%erSBf#MKa5!;6 zWU$4zAakjiKsp?)zE-A4(jVze;i*q@?X4JP|JM` zP7KAxVm*rg3n&dMMMxXeGM~1 z*BkdA0?F|>V%oU{;G_(ku!3cpz&@L>%`WeyFR7fvG5-bRCiv{o^*3lZxq1kAZR~Ih zX;OQPWQzHm006J1@bufpgj>b6_E*8kcP(n-B`2L~Zx(R;cSSE*L>ii0qzKzYg#Fnh8Lga z5I$VRuN8435w_}pFRdOtaw&&=;PGB>fP(I;;9)~iK?SvqH%4K3_TsGHCPwk8Epr#6 zR1Q8r{hq)nw)>sytofDx*gJOQs#JB=w6MQ?=lhVft{6~WFyoz@po41f=TixSSNlsy zuy@*v;2ROZO_Bdo+b7d}HGBKBBnRkcdeu$)e5qH#c9dlc<*tUYeLvr`eS<z|iLBQIWh4P2M&Gs#3lYIC2sz4E6VWPYD4vc*Mb-}4a_%j9;t zJ0pFxrC@h*;PZc$NpxJ@e?WPZTWEieY_2G!0d-3End6~be-5PaZk-9BOa-D6;oJGM z&iVH6<-_;l#MZdJZx?21DmY@(q3+6e(x6My5b9m%70^#fG_f49w>%qGVV5ndNeo6y z4-(V7{&wQs-eiYxUfbW!;L0n&w5Byoo&B}yx3Qg=tSq!4Ug@d5( zr{Av6crq6}o9;ZF{kOa5XRm(P+ePV!QYL-T&que8esR{Jjd%UD0_m6Jwx4j$@;7HM z4XG>Szy#m(kxyBf8ja}cbN2fB`bYxD;eF4CTuxp#^$)rEFH6@?Y11ft$a!7i&=a-0 zpH9tf${`=1zu!uh9pU-~2h{*&_WMUnq$5+pC9P#~%FMaT%q4`NScA8ul z$OfX{kBfu9Xm7mW>&(^)-P6jMNgQoiP~MD`)Rb@<%oH@qt*@u9C}SrkZfijq+w+=R z-s%-?3%0DV9maX9#I|V*q|EoZG*wT#Oy5Cuy<<-pdpnG%6aBP$96U7Br}s*w%TS(+ z|8OG%^!SO3q$Cb#aRT*|KKGD>gTMt-} z@Nu6zy@_FS=NTINhJjW*+bdmh6>b={w_$5!WQ4=Js(Cgn_0e|f-^qkt7$!6+rz3{8 zga$${tUTbknu3+JJd`Az7*5Cz^_YZFrWox$!7VM+g$xHwv00_he6Ney@E68qWqxrn zsqb2rXTmr61A+L7 zZ*AHRW)Pugvk<4;?jqR-ca7kh7~9=~W_Ch2OG6Ca;lqn~X>W(MZ_mxq@r0j(f!xv%}zQ3vYQ zeC6Y|m++NC2IVCc2o2^6lIixHWCfEgk*!TH^3XZ!|1i~lyDi~eM6E!))BtaQd--5V z2)&}b!)xtMzIbqTpHR;>i2puVUB1dh2a8QeNI(!2K7Gt(6mh$nV+VDkdAor2k~gAb z7Y3KcmfW6cC6mc;0T<;x_hjOg177oC%c<5l!&IoJ`u%+WvT)oTsa`$|GqvxEPQ^fu z4zdBlFi?M2j2*{dPK8p|uW9QLpqDWW%%Y9s1!55-F}JQBE{bt9MH7k%8u3hcFYZP! zL&teAbGF)t%Dhdu2jZ43;b(S1=z7TO_6cflJ4Bk*o=)0G{2dwwZt6;o0S0E$ckg$3 zS&bXNlW30?4f(ow(5f}W|EgU}!{V7AvgMDA$0cf@*t*%+mPrAWSa{e~tEyXHj}`o= z9ksKbTu$zwE_j!i89366*LKl$=RF-kt;Sp-`Nw7+WcBNsUARhAO@}wkD;>GUUB9{< zc2C{UFG@hc3-P2YW)|^SLJm`tgeUsu&}@u9ew!w;m_(ykaeHQe?Dr}k*Sky7cPw%H zeoal;`aO4_YEm9l!^fSwin^Ym0+rHOuDWX7^;jTWshvoB0`$HyiYo#htaUNjO1a9$8{O&AP%8 zRRkpwiuzKnNe{m1Wz2*f`lN34Ohejm)Z|&nq>Vft>KTd!v&G5B=^tfow13DF*I{lZ z#tx<^qBX0(mfmu(g1G_@H4GzJHT+e0`X8>t2HC@g;IiG1B8y*EZy^AXfq!{aMy(j^ zQ>gn^jCHpf`bYN_17Tkorg#w6B@9j06j5#7$x;;7P-Xmj=P`G&?SuHwr5Xq=| z>2;QP-QF%TaMU(%oaOHmPCzDdpID%}+IhW~7~R9i`<{FLPJOeJKQ;gXZvfV4Nz=n7 z?8aYlH7%v8Ne|v==P4y+^R(~@=V8)4R+5o+<2?BK?Q<04^$OwJSEV6yUXS*lm!qEL zxL8%K!?IlvSk*5na?vR^3sQ(X_gwquOOB&44{lgh${$D>AIgV<3T~FvCE=IY*k__8 z#51TvD5f_ssSIUR?{U|=wrrMrtqzyw{SOE{f=$i;urDqm-{d}?QUUPZ&I-iJqN z5L+qJ=N_b;V%!^y1>L@co(CYy6*PY9Oa>-UkpC}ue7x6VSVRmXfEn4Lzml>~qG9X4 z4hkPSo_wLLH&+~reERn7!qYn!q))DoR~I(uOzXPr>*$jmr4Vrt(;jKZE=1|+REsS! zXh9R>2WU?ua8lyAcI4joV9q1eincVMF`K&qWoN#>7ZhFx`@+4R1I!-z@#P42K}$5w znR9%La>qfi?#FfesAr`1XnMc3Yu$a|GPvg^SRH-nAC2g$_anW@(y_i52)97zE$mtz zgxlZYoRDo=Jcl`qS~!a60qP7?kPM9Ly;0;eu~9f?sj5#VwkPFA!y2}J<&JPf%ULyw zeSj2(PlU%HTN7Sx*V=^hh0tdOVr9n@FPUKAzXLxx6N1etLmu3+6l3$-z1Qtwuf-l$ z)!qe4scN8%*E4M9l|Z-Yj}0>`5w!yxg*oshaqOWvZ4G>!V$96z=H_Posne-6orp$t zSz7p9f(~={+;6LFzKiU0P8HxAsaFs7@L}wXaygv;x8+HT(f@GKv<3}}*adp&o}kO( z($bZi5`qC<$}5ZW2=GiKe~f*(PtK9J@4tiQr6KB{OjJt_i`*~BV|*oU4}e0L6fnKAxb`VsDdE=!n8!M0l1Fv&C-Eg$`oDgH zfsneUBd7fGeZ4|c_zPqpaXPQ69q4^Rarf6dgqPJi&S~gBcAfK9ia4=THTzIBFp<+_ z9R49|QT_AmM6Dag&y2S;Dd}@XhKCGl^INloJu%`lrWO|sqIpGPu36We6i^t-s0#X!%2L z@e!-s7n_wpUqQ&_0?qNP`f3oo$ByXI@FZNLMvO*Yiya&^+_!D~W7$|x4ehE4GFftg zN=n|$2No(9Q5*(`TYr1p_JO;?8ZzVwa3gOva|#ColYk|Pqc5o9X*d?#xTuw>_|tDs z^MidIl|&FAm=1)Qm$2MZhYc}IowIXZs9^06xdx#0{Qu8+R)jNF7>K!^<NyInaC!VV5G?vjSJ~&@9&$@jB!qLKmCUN?{slnevTJX?M}9^6O=YD zeGLrky!R&4Tg)oQzS$V*^4I<=Bd3UK>+1yuZ!cnuo!D}i+tyii)fnYp&E zPRQgp`QonXlU1da(LbL8bvOUkf2@NIIp%wly{Cf-T^CbrP8fyn4dbsE1>d#tYU9w` znN{~)I7!iAQPBcWFGI+nL-9Pv@tvH}bZBh7Cr-PF#&R*?)ac7i$U3@DsnFQJeT#H$ zKyjMg?&apIVs7LEm#Nj_ErqN>OPP!dDq(zJRryzXpJYl7!?yFq z;@Pm&d*WGPS0Iu$mB*7=Dv?p$+H0TJSLZ5)mHt#K0$rQC^|h{f++z-&;W|Eje}qt9 zDw+y?%FJq6b(D>54W*E4D?9E}BE;z8cDwBYrxSJPGkgN}O!o@s{ES|Ik{&`RW7M`+Tiyss8wTMfQbTALp|ZN!L3e^{8GeqfmY%(7)5Js&?aYKCjr(7!TCmr zMD!6zt&L)e;__CQ_f_#fj{R^ck=Pekm|w)MHf{9L$mRPx$54*1zV|epDHV<)Zx0(K z=CG(a#F7LXT(|jw$Mg6-^i4t0FieTvDK-bzmb@Qe_NSP>iX9&p9FU@fb4tBBjJkx5 z-XgkvL!Jjkr3k3~?(;pZE0UP-m)6OB;Vh9-KD7+~d z<8}f)<3*aW`S>yYPCknqK?}6a39;SjrrjZ1$~I4t@;Fw_?8fcKp9EU|a4se9%=#sfNYX6eu8N;i^xjckql}qRnqfzNW||mj)iz*g z(-ee&lF`3_M_RM1w5&9eH(1C_IpH&3j$A0WEn4)DP+|wMWXEJDKqSUiD6mU6(EW|bS}Y!!ntejdu3iEW zh!xS(H@C7n;BqY<@f69X+;Dp~|fnd|{ssX?#{V_HlTwhE4DB>&?XM8rOg3n~I+t1x5rw^ABN;g`0b4 z@?Odx_<_DJSfIJ|IcblB*jwk@6?1CSh_!803DA)|Df(JH&aS4Gswn&X$B~vC7M*|x zG^Z=mkh;EcvZLDOP$Jwf4A82s-HaKv@x4!a+t6367V zI8}j|c)~F1?q(i5lL0`LLLA;b`q}h$qv;9YQFZwT7z`_vAgPiQDVN;Ta=D$KO8!Ez zNCk_*bdI6Q-Q6)bZDG;FGw~yIO;71tVV%@ktNP?`uN5V_24!TrikiDNGz>o9G?IO~ zzSLSQ^;)*c%I+iWC9_(eaG|=x1?IrS3g<@5LVZal0(dq~ad0t^8v!Urv;GyKeUsz&_q*pMe3KSSQlXC0SjsHu|=aMKR=)7U1ECi zs15l*V(d<3zXu*W$(R|Nur`fS6>5+nc{!v{*PbM9p*=3zW+{F^_+eV znr3S|`Y)G$!wq)cw#-)n>Rq5fa?EmxqT^*bPr*aN<@=#^_o>+c0^wb}0N@vhj=e3?_geL@Iz((m)qp0cUoo~AG4A9f zr2ZbQq7Fk4E11g_d;8xET&`CvWKhCcl)Q9%-m*-RVB&MEC3)sn?UXuhC!-$KRQ}Q1 z6I->*Jb;y~&4c&Ow=O7h;gxeBqR8E%*R3>O!$s$XJ+A_EKWLx?uJfQ({N>EQBAZFU zp5vgY`NdK(JMTP=o4w+w=ygl6j6|@q3O&fT-#te#S^&J+_U6eLKrRNpPGt^IvB*?ZA4eSw z{42@<`ckMOAaXo;J-a7v9~_3Od1eBU`>Y+-;J5UvVf)uT+C*m<6zCX9%)s1uP(j4` zT6RmT3oP!4+>t1GQ+hXz1n;w+{#D=NF0E*=n=i(28T$lhggvz5t_<3o1sLD-Pk~t@ z%wu~5;d7FqR57QlbzLcus_--g`c%Dhvk$`D+HK6CH6kRE5>*;o`TS&FV?Ea6xQI(J ztWlAT^xuWr${r0fQEEXOk5?xf71!C^cpGAVi_WAVAvHMabY{V>4P`?UvHeB1t4jc4 zQ)*|Ly0};*^uD^V zB0uligj-1sFqFMX$i-BW_AUBR8f2!hol#UDFdFoqw{ASc8yA_SrUw64Jpw1g61fgORGv3~1%6tW# z5erUu5A|#n0S6pB{~zM{#x>g1+%?U0N8};?BKQ+6(koP4Y`9J4IV89s6qkX>?O0rfZJH}shz?OXCoPikm* z2Pp+bfCz5Z%i3D5Qr?_hP{Xj8nF9Ob4DEIwRG3NMOHBC}9WR~b;ZwmRA6|(z7p{<9 ztsa&D2WtBebT|KWM+{95Gd6y94>#k#)Q_~)22YKQc{3X~zv~KdMjGCBJ6Eg32DSL)b?2T#h&IbthvKXE z>dT(vB%HQ(AO!|^PbV?S@Ewcr+C%cwAuDBFq+oZk3VbNcx>N(O0nbRu{xuv#E9_b| z3LY_DJ$7Kw&CQR~2XOR(CzdPqh@G-Z7>ztzTQ$``-=CwSBTD_Dui~EHD-Jgow?Tvj z+rX*wu;l4JiT8adG3G{eEH%fwTd+%MTr|;i9Kl*eGu?;Eop5Pr z%7Z@B{EH8GQo+60y?CHc%>Mul804(OynKrGaS%|o%LwzSwklMK3fO;9eFy1A2pUyX8c{;!h=4{ zPj}RJcwU~)=@1_c$(v0O`wcfrwBpzCeeBsEpA%pcZw}kxlsKNI-7E2Y8EJ#24NF{p zplk~)x>`YUU+~gB$k`m`b|Kfn!@iKoKpcz>S6F~m^9H=$;BfuzhBJJ@dG;@ux_RLyqC1{b4UFqqX!gn8m>b}CvI^<-nvVGD{d3?Hf~o{jo`8ybFarCG0i%DL-MBm*&Q zX3M?5yz3kt|5OZxiUBSmk4B;}{I8QX8HA6A>GGsX)MPqOhj&Ta9S82VKCK<@Kn+VK zX|m{~Se4x_S-3auSTB`cLGP_NnS31Os$nkGKQ%RsHK8=9)~$^RHlLPFyw)o1u+lxK z#%fq7zCs|*IOfDJj^5;9VRumT;KSTv;GFUlG-(^ytXM|gcWo%zn1kD zD#If8KFCE6fbvbs4913rj>l#G%aT8rnAfH8c09!hB}#z=#~S7nnjJ>R30;VIuqf{d z!<6Z!oLa-T(a8PF*K{NJxDNYfg-kg8_Ne8pp4Ec-504TIheY(+Js$nU1;i@hVI-E(0 zl{m2y5lPMF5)Jxv#>e}s1bJBucsob-CVx03Fx)~@6xx-&?DgX1tM7h3pKFr%9h>-k zHPg|aj@Z2-BUtg*W0susUa6PyHa41p?^ZtNmRf(={h%Ose?3vZzXjR-Ag+}_>^G`H-4CmlAF1?on}k;X&IMT zy`uz*H94_vySq$hHEw{th038oakqu+Z5=9ny6q@`Wsn zgsNQ2Enl3#34+(B`H9R?2LGPb2;u4T%D%fDJEoW1|3kr-iY+1x>x)Gl7+c!lmkQrE zJMf0FT=1%*Ln~S~C%oEgDI*#_G0z`+=$gMyx4POq;&=*Z&qG!#Nz1Dqv5Zio`f2}-pU;~5;)~=0l_BqiqBjr%sc_`3?raP32Od0_82g;hckm_-`qF5 zE2Vo}U1<(*btZ2}Ys5d*1`T80{bD~8Y2PMp=$z_4-;y$eDBaD-MOKwvmCfs&d8zmo zQZW>)FZdH3`~m8xoeFhWFZ6`1SbNjf>QTSG+!KmARL$snrU3@yf8CRf2jC0J8zmyZ z?)x#LCA#Y)(h;!`#I^czYDJV zntA!6m=knG<0Nz~denb&b^_Y-(xv-NUvoZeu)kw;Y9LLpP!x--vJbTmT&wNCq`D!e z($qr@4I;OQPG65|fX;lxC+Z+@4#|fz{v#|mEn`0EV~^R7zuVqq?cH#nZYUfPe3!-B zDsKe?Y1Mj$I&E(|8Ql^t)zbbs#cx}iP{h$1J^Q(_b#swJYqRDH*KBYf8RaNV?uxZo zkB3;cO4>EKdiJlZ#bnn_G}SpOcW`IU71!&dy4NaqGD$JK#Z?cxT|Q);zbJBE4rUsC zB)i;V%_xKV;RW@8f9D6)o?zrfQ?~dPOF>x&Em$366Ed-aP3Gbk@ zIn1S;Zoof}DSqMj{aX{l(V9LOh)`TW=ZLYTUb}t=0I+Y4p|n-l2oeF)v0T(1)fuH8 zHVV3lbfi;rSiwqepC#IVJ*~lZHU0w?lGXIMt?Benr-Px@2r)2gg}zO)KzY}qt5G?> zPghes;*&;2g$9DAFBWzV8?LWP-#kJpCq;a(hXZ=u5+$g~u2u)= zkjs_s=QKk4p0_DxZ^rE|s3O(H@9cin*D5}AQ|ZVF(a_aO_7DedDLb0rjWM&}Idme$ zo@iC=wDYRF@0p=Cb?)2)w)DJnO(*rE*@^HO0eCKCS1ue*?SmvFhSTwSUkaUWzH+=M znziuV&yU?hzcjU?gnbPRRL?_odh50}6T`zGLgsZh04ya<;Wg*^Hst(q*@v|Md8U3{ z(fD)<)UUN_4YA&|663f)y)Wo`6@sSzO`CPa3ZOjc-JYF*l+Q%Oau@gY6L6aefhYT9 zMoEDBw2mEd0>m}!Ek3ACGVLj4U8~Y>={>xyS9A2oXtvkvuUT(mO!EC4qd7X9#UU-0 zz0d$B{F78fpGqKXkrcgJg%iM@4s1zk*1RGlPPl>!fJI(qmXheobiaar^L-3%{z12! zAb+Fh<2*C88Mp7G@V|8bml^W=$Am27{A9=;1cpi(YxFZ*YwJHueB5InAL>Vk1eQyI-B;U{@x);$K1k@`y{%`JK+{lGfyL zMYn~xMXI-jd@MOmdxV+luNG4EhfgT;$Y5ZW_U@7`I+J7?9 zJwVrb?s|vHWXr{}+2Mc=Bdj_dmEjq=qMcT>cx-T|Z*4#>oS{OfOTqH!zdZjX%iV0?AgZb?Y@|9Hqgvyw z4z05mj6&CWe*;Z>_4O>bvb8|SnQz96ve-YRn!BBn+Vbm-glt6b_ePoEuztJ~kOi|w zkn6)~^_!#Nj8VO}EWlb6I;PS`OSSR&2kGbtZUVxQ#v;yr{$kIpHQ%+hXA<}BW1rxv z%qk1r{@I?aRu>KIE#61$Vbnuuc1j{>QEDy0}wRhV+9UQiBems z!I+%BbTyt(MIwH&i!uyfC%|R8XW1+xhWfkjs7?1Wj2H^RIg=kUGj_XG5y$ZoNd6zo zIXhc86vhG~!hbPQ8sm$BFy|o&IK~t%-f4c%`Fn$4Fv5S~qjR?#gf?NH$kCXML8i7G z>gh`Rnf`Ac@E?n{FzILm&0mg^-F0hNhc-MqeobdWc#Z#&?G><_(`^fDqvN1V9Q<;^ zN2`^{iF#QTTqtC+m3iIosv2cxr`UgEyG9?&%wCKk?=EDp&S^8UadA+v1}|TSS!hQo z*=DqKBe|!uLsvuqfMX7q51d<5e1aDd+5EV+GHa0&a3k(^ZBmgj!IYENj?y%`6Dot8 z6uUPj4(474U);L4r9-RMkw%=!8N9UIM}Mlw&`!^&;5;mg*qxoldBbY+tCGTc-e0T)98AR9 z$H8#}ynVoM(K;Y?VgV1BU)x7dq;x1cDYsWuHyRfPP!Xl+yJ;xROCGH|BZ%Myx8Teo zQMV|C3D-mmyWsifh7+5bDoV=-Jp!22(Ehqb9CHm9#m#cGPNivjo_|I;Tl=x&N<mffxE#q*<(t7;Co(<842hGejWP~AD&KE`A5Qd`tGp+xVZQ>|m&*upZN*Nl;<%IJ zSOND5mEsF(4;^_TM=93~)RV_CDr#V5LIy$&R+Mz;xH_edGJH=!7`KmWyWBTAr5+<%qVXV&OF)OeR6^F|f1gll5E>)%nIwOPvFRMjPcjYC}G)LbluQ2@z~ku5m@6D_Nj&jv^6^P=5bF;(B`19W7ui z{;Vptf9v`su}ERW@GU)$g@XqLx;Hs{YQKK-j#TEa(bd2BY$fKD@3{oel{aIn4GT6pntczi6j& zbzz%v<`HOSQ3FX=4Y2h;tPjCoW{)EN+H=CquB@*#->fMk4 z8zpP4+;^*yf{9eQhVx%$fBCuUT$&9skJ?|=QFHb}KS7ah#(B7pm&v`)>8+gZ!QilR z&cm!fd`xlXoDuUg~PZ=!#a%NZ)OL5tpvaKfgG5bha3yTIJJD$$8)u^eS~~M zNhMnGZZGQl1A;VK%v>KE}M0^vGC#aL^w2X96=zYaqBp3e9P1ymvL^k z^DNcXFkF!9*6lHgn^tRyClTrdk}3BZ!#$?_E_+bzo!zG<`Bwt_UyDtK1R_US3w>Az z29)-Mn4nwvZ0g1CT&l+!h2n!jmd9QT6n#C|npH{4hHau)eiq2lQPKLV69i4VaU+?& zMYfec-n;SV4lCQbD=3qn0fl^P;RTmF{La7A zEf@(u4UEk31s;_e?^Y*m$&?u+JHw(k|A$4@VL3x;+iaNkAbY>^+UKotdn1Ut)|UAi z79Gv95$0D^OY042k*%8vsX1f6aW>Vx%aQwEMaQG`&Y2va7IOWV zudY+tdnCdN`YojIv5#Ha@2^|-K+kzcNc}$4CWm6HKs<4mRs%>z>oHo=hDH7lS7W{7 zdog>*n({{4hNjFS52cc8y6TZT9Swn`I?G1);LPCVHyGYn9YCt|!F7GVjIf&VaxropLQ(Ksr(VlHnsY*@e1#JtOD>U?;>_jqk&Z3+eh> zJQXn0N0kRXroM4l9=j~IUANoFiq{qp~N{P<^E*(mHr80HyQT4c_=Pk+q%Sxe!O?cGcFU`!IC! z@KZ1Q&DpN5EdZ32hNN{|9*ySnNzd?Ty7HF-If|LM=@(-X;3U;>MeV(zL~^K1w1p;Z zwc6oA@!Gm2cnGBtX%kSFpz>DDRIXguNzF4@C(%2?%zMBAf|%}q+dKY3(~{ubSX#`@ zzo3Mq_aTKHhFmgy5silPWG9Y+(qc2-MhjaM0N=-xpf&)dlO?@wF8@f*7Y{OI#9atXl;)ljw%ydqnd73Ep=m((%qWE{m1M!f?)P8rISj5^$)E3t zXpEoI8cG(is&(`W*tun7VbPzVsOq$yaXi{hPHCEpy4pX{Jz&+^a^|jL$s^_A8DsMb zsS;ZQr`lKCCsp{nABV+%sXo_nUrJI=B;EoQEO?zq7WY zI3T<{P5TyRW0{x6wuKy+UbxUR|A9(HJZ$szynJd{=~@&oT$S2^$esnjqH z>P+2r5TjWF=^?Q=q8i!}aJ$z7_o^pBC$%a_EE(dJno?rIHr)lp?T7(fT~+9j~; zWo6e4lmqP{gFtvwu94HHF*a^xGZwH~d$POcP~qS4n1j95x!6y z4avd!1=W-uWZ;`cUn?3}gYB`_t(IR}WS^da5pL$I8wn=5Vg`!M>j#fY_k!#hq1_=8 zD6g%_bs#Ex#>+z^g>krsvqPBu#{)Wzkl#G(n)GxeGmmLa*$#mVWNFwKY}|yDA7-JN z@+UWFb+*nD8!V^2!DoTmo8p@7OfpQc8sY~NTa^QFh;~VYlpemq?Q^U+}76UO`Ep?*89n0QN zPcU>U$>H-d7B|FQIrhV{INj-Ouu?Gw^3X~m@ax?-)a)=u62KRpQgE=O*L3N+{tpSE3hrsS9k~k}a%oQ37`Eoem`Eo8PMdtTPj>Jm?GKmE*rTF z6o{(~#VIUWBDEp}i7KITjZ=IcW1ff(+v2kuObreShXf zD5b=s0xIVz^L;wQLP#10j~CFHI-OBZ{P^~|{m#qgH`Dd{@3+pqbDF0PKjrhz>?_TV zaQZ6ixpS#x4<4Lg<@wDO1G@NEV0eCJ%=p)8Dp^R?BSsxX@cM`9-{%t5tZ9?{t!zYc z$hNr|St6Z67L9>C0xNp#PK0+t!k7EJ`+rfrXIE{#H@BeP^B3>ULQ{3r?P%`_or*bO z_L&c?0|GrmSEmETn?fcW*6FV*y*7cNH#BS6u(1JUw!9h|o)Q9dS~tMWNCbAbdBy^6 z?!`JN*0eORyzS23E%LhNnU#QmE=vQyotUKyVE#*?mS2`Gfu(N`zeyUd*+V#lln_^; z=7L8w*BkIL_K=br?Q96Dhsk3jBCh!j6i_8>KlvnPdbwn6E>s0NVan|f==J@y@o{E{ z7*kFZFfsTJov;JWgT3R9eDs8qbBG~3y^5fIwjr~*(>_}6$Cx{qUNC;ha8CZI5-NL$ zH#&9L^uW*Y7a9A|xy2kKeG+tq#}=NYH;}ihOd8M53bFeO#6R58tKg%*pa=ZCZSYpG0CB* zE-{$fW0$Jt-K%sjwW@%X<#kptbJ?>z_C7^4=70#-6GD^LX0(M^=G!%>pUm&OIT_K4 z>7);(-J0XQGr#Wz^QP_l(}Kz9M>hdX)`ZLEkNmjAy+1#mq&Wax=lSlE03|-^^w~Gp zz=5|%`;&)gUgF<^p|vsq8<2Ads3GEnbef&aOUFn_yE8kuSVDGSK+D{$es>=Ap2JMu zEAOu(jYGp&(NMBj+;}yiJ2UvGSouR`lGpRkXG9yd?#bPv^^Y5ygAGUvl+N>_-taX$ z^BSq?GgHxZ`ca3m&i!gOGkL1fR-$3&n}BwM;JBA>h2c%@5w2Qd2c(XVTygx7OIOf2 zaMtlo{)BfnWE~PP?#Re4$*Fs%u>CGe1XIFLyzqV|UihKQ=eGCxdYta*BCa7y7|F*9 z@nkh~&coGDeh#|pNZn|M&}t#E6kr1x<7K3v)MedBKwd84ONLsA2HFA1a-ib0w}0w- z18E`Pp9>83KG!p|m1i%|f2AT$vb6xV&G%98!;k9@45db1tF`h+Q>qc~2Z}5Flti~I zZFmNFoNGE}lr@|jAC!;dVdb9F%iv(&X(X)~DTFT)_Nn*)HLtTD;0+%AT6MoJ@O&~> zBi%mUcZx*GFuSk<#``jSsZ`ZWT^dh43kRr0WNcJbmMoUkl1$)IuK`3(p2p zL`XQGFN_rgLQ!2jmWrA3t=Y876IC~wpPz#8s)FI5HW(P8RR z(8}IYi$n?5h#Adqbg*{`uT&tl2Z7uVg4_((QaqSu|DBt|=}UW6?;mLWlzB}S)k;Dd zlLGaEVeQN>4az-HszoDf8E=!t z2=%C!X`{rQP)dJHR)&V#%CH@_LaVZnGOfnSG@*@b5)lZ=5l=L1Oq7dU*M+KQT!7CL z28kKW&+}75+RfxjS-{S1p(v-QVFl`yX8V z56N79dvl<{myt52Na}Q18Q{%vDuAi^4UGuVLY+D{wL)jDB!y$Zp3H}<*|p+>8pPQ& zW#<4OlXOba84U!v#^uA?GSjp|4Tp-9@?lQp?loF1{p;D~i$(sH{{CVyRFVJ+%iM|z z@?x|tuB~6y$PXZfh!4W&HfxuhM9R~KLgfP1ml8tZVMaDvQK`4Fr5|-U4~tD3%LjhA z(NMI^-6Pw(f~=_!OYm(tatAYdmS^6oH{s3R&l_d|zX;cGNm-mpL*8;`XZ@K<{11B= zc5AOc#tn*{c~cpQ?h~JRfAL<-k#NWPTm_enDAktSuLc%nchdSkYJ~={Z8tKF4foDn zFy%Sn^h3PkrJ&|l&cj4t=Zf0jz$Ukk9e@!9X7zwgpU-pSeLn(!^60?L!|D~KpZ>B9 zQJ;-SdHLajR3DW{ts25O=k69ngkO7je@cvL;-f5LScqQ!)L#9w|H^?D**81(Umr6OsxsKkRqq7YFZjF=n<)NHXq?9$hmzkUp)QCCd^!CBUn+c}l zScz zu2^~R0#O)8r0>hU;HTEqYCyv5LUZ4CvbM=qoLd8uo3BHOt%03TSH(3CD_CWS@pMq1 zqb9Y6>OCBFU`VB&Os#jgcNOd(jHLL9Enj&V*%dPIwd|e+%U}K3Qf`B@^IA(mK(Dg% z4eLzUBi6S+zOvdEJtkVz5%vC7naD7UmMwKjawwy8I3d=e6n8qO+2L6x!X=bGeKZle z3l=Q#fn$98&3R5PQ1$9?I|Tm$n+p%ilx(A@mmcPme(d?aZGo_Y1FG61ub^qB46fU~ z|DnT5SixW3U#~Us#yKtGxQPRCx@(HFm9frU99*99VlY;d8ac8W|Jwxph66Aq-s|-} z>DqF@r#YmcX1ih?rdv4lPPKC%3lPyB-+ z8Vb2H?jmWA$6NiOK1uFA8!E;oym%MTO$Y^KiB!?rEQ%w+sMRTIsg##bSujgS?jgp( zN)c7Ztx#&yr~vGi8i4J5avb(-+3Y^2WGq|A_@S@;OEvM`rP)3Zrn#JiKpXBFI%mNiFR*%{lKSt}zh;Y*3F6azTWJ zuQxl5I)4?nFFGq+TCuG9;daMal_OxM$L9`w0nV(X*R^+m-s-$)^7d{14MA6-!74|_ zyvk=v6UG&w=_o{16k9xUq1c;w`P zj?D?UnHkt#ZdvIRpy|=I(6CX;OiuD`#_f?xW=Rzv1&MUAKF&cw%B#Y)a>^ zFVauOx6CS^_Ee>5IU61qUtL~K0D6@kDgjyv#iq;>rnN$!5D{A5fBv-nKQx_nSQFge z#`Q5kMUV#(kWx`mX@eReB47|AQX(xW-7wfx6hu;`r4f+sjv+OrMoJ8r-mk z4w$g`{MB>MdTz9JauHCrgGE<|NEM*GO=*~Q|Z9wx-4nDW| zYOz??{t{%aP`}V)vM44lPJQdQz5P53^Gjxez5WK3r@+x#|IVHi<0PK%6X;=x6DJ8! z6GSGn9xDg6#JBm?U1A2yv6p+9MXZV4Wf(@Sx|fu`p9I#cEE*|6yAzD_x9-kbYIOQv zVD1+b`=>wh?K_*g3{Yn<^x|3nSki9$D~pIN4s`cc;OLTH*%U&!Cm3=($DB{64gMz@nx$ax%Qa zWAnkwy$l$S&6x|XM1K4zBC;Z2!L7g&;W%(%am$mg57$_X8Ca~5F6e*8YN+t9&PLvwze$%(!W2+i!UF}ML<7ZD>vVzB9rG<12 zo=~gpiaz?%G@(s9`AD{L_`a;G%#@d1x#{TGHdQ2n@+UW{g{1SX+`vI8Id;R<;Tu=j zg8zdc6Y$g87plo*2bAbK=M|#AY|Xt1i${j6?teK&e4Y89?c0KH=PZUGLsEmg>bUlr z%L-58C(pPBwo~S=qXM4t8Lla;hV4uF;-;R5s=>U6dBGyM#G94Bz}w1e*W2r*cgVWo zHOS46*pFu|Op0DeE6;3UF{R#q5(Qedg}alP%kp&yW{8ikz<2MK_2{%2qVSk+6@SmDWs+WqY!59BxmjN0X?&+* zlHDwXSZGF>96P6aag6E27G7b@x(@SLPK#%X*QvSZj1A3S_2BgB+EmrkaHH;lmvdO8jS)YHY&js7MkDsZ5!?gPSX#<1AYiyK z9CV$Yi)RMlU{XE9?v4Byukg1Nea=a! zMNM$^#EbHg5N}pv25)c(#M4<<(ftY%yf~}qRczOh<)VZGV zMPif>&1={&u#c~o>1wp^Hx8Y}kn9qz7KZzhWMmp#nne!g`rsT`a}VR*^jgt)afrhD zLJPPnDZ72;mhCifdFC#U*WcO2p-me%J_h-t>27!0j!>*oY17r%-4Pe){GT^bd$(WP3MfJxR+ zc4+AIyEI$RFUr$eY`zhDj+M5XOT0xoNEqxHgU0+4^J1i#~H`st9oVIhHjU%?Sn zY1saB`8Il@a-(}Cbj`lB#+}EFyI^kqL5OTg$n$O2l_LO6ld<+0ome_X?_2axmv^!{ zmHg^UnBldyr0!{?mV4{ztS_es zl9UGMvXe^LI2Gs7Mku_&+`~lPXCqSi0Jpxkmzvkr*g_k|>fsXD0^L=e5Z10B>}0Ew`Rd^Fwm6YX6iz#U^cFcKPY~lE_zH{V`lTnpLZE z>e=1x`2l=4B{%NA&<+c7Hwb4f%*&j8o-?*_ZOP%aD zMT6gVG(Wnwqu2P>{04mMy-|g|!8)e0*as~@*>wr;4~EYDIY4#2Ok%95v)>|7p-3Y7--g)mQF&px?e^?JQhy zu~GrY!nt*$kSji!pS4sTzoOvvqU4o9Q&A1XnqRwx59V9uj9RoE7sq^2=>uX(rw-MN zcKU6r4XlQ7-*vrbKRv7KBhphb`>EVvj=VBcPmqx}uIQ5tSu0;i1jE{Cii>MNN%z^A zU-alnjkkkU9E_fRu_u9a?8g>Xg4!l=cEhY%5haaN6(m2QGS<=N=vX`y*jN5#u(l?r zZ|ibBh}&=~X$OV0HJEpL>#st?-JWINHEB5j{<|vtiwl^q!`S*(zXF4&X+KYOHb5G) z^O%`;yXMq3%Kc`k2Wvz1c|N#7lcp9AV-vo{z=sGTbx+0@86t>}4uU|eY0;4RVe{r@ z3^^TsZ^O6I!hmqSHmEL|L?>4gwt;PbaEg`dU-;qy4FGf-y zvdVavqP|z?wB$YS`z}AVFnFo9`utBG$DaeMN`0+Ay;v6qlDjoi!J+Ff?{;A{81PW4lZki#V0w8=Yx)!=t4(Fe=XbA7HbPehqj$RW>$qLGnn z5B7ki?JC3h%nAltKvXb!!Lk6u21cf_ssq1y3$km-lY_E5CLy~X5Jm@cT9#FYH;qrT zU#M`wMwsY^#NAX-`ut?}LCA5JZ_oR#B}l#SlM*3DR82eV_)0(fP};yhddRNC6UmZ`24m%WTo)?z_pE*4Tg%b_4?ec0}ojZDXu6I!~FlGKaA> zpooMb`wsmb^UDG`a5yQdW`kFILfY15ma#>r)d-C_mpN2CBCJnTQWW7}_0;tsLFV7= z33~P?M7D5x%Y`Tt{|g5EV;tMNJ{_Ms!^y?STjH{Ew#XXU-8x)i^()UJB_B0}e{bSo zJTGZ8dv|$bWt~LUlaQ4%^Em+GE@Fh=4`cM)dmXt<%vv;TVbbrwp?Xn3Mxy^krOeEJ zMKy;(QiWI5GlHPR-*#JF@pt$Y^{m0pH>##PPn_YQjmtTo6ha#|+fMP$RGjxRcwb`n z&9EdPocMZHvp4B9c(q&EOD0%9)ui=b3YGbcr=qiDc5zdjp}YJarqN@aGc?MjHFZ%J z45!J>ow0J6NxQyiF>~6m-;lL-c$(G6cH7^$X}9e+Mtk4QBl?3Nb8dp_I#spR$bObw zuZ<5qaKhfCr|HD@zs?(#ucoh}K ztmmoa2h{=c@`+Ka5I^CHN)phX;AH4uwB_?LW$$?|Gn8$*M_JCFmTQ934=(+A62jMg zm&A}1_yGs+H0eb*sW<7C@b3D=u;hciX!>5XdnczMkQy${vlvC((Uh9mxF=v&AygfU z%={5DR~)i-)mpYGEapd(ZCv|uR6@lg*BS6&8^P*MHcd>V;{Cx^5MnXjy#At88?8Bh z3ymhTl3CVHbDneZ8Aq(gmeMJ|D8>_IlU;WvnYdoKsUX18!SW7^TBus2kcJS+EBKX89IzO!^BcWuK|ueTH?P#d6SKagFR>J0Hm>$&)Tzm~I)$t1dfsT_i5S1L`5ru5T((m;yIYIL`f_ z?*DG;ph5pv1l*9%D4^~24W`raPbjXoqDHdH0)c;aWk)JSZ2MA7M{ch#&v22HHlM6d zHxCYMiYD}$|7c0aZPRrK!p+1Aa&wIx&c#ig`XP3)!Yqb1Y{j&j# zYot9uC%}_rAr)YoA6M*dj5E%q{&r0q5%VVoz_i&GeMS|WjIDG^RAPuE7QY}jpV>PMTQ6o-i@&r2PI&{ zKE&n{v5k`VZuS4=R_L?W_F-nP6ki%1F6e#Fd6Gv!N>#ge;san zwB84mXn@%luA40S`Lc^rc?XrQ{nlA+@9K-122|9nEWZ7R?-Ve=u#Y-zZj8%0m;3%m z8OKPyNG$t(aRAK$dBuOO6h@+R!MzttI}7x~vwWAR%cfKOBuwM&h6aeBS!mzlW#!a{ z8W^P3StQ_oKm+vSfQ4`I_9Z+jaZo^~$<1cRwod3qssLNGMd0S2IWXpNDGt6I+Po?r za-)AmzhbIg6+*@_$pZr8-pN`?{i#ws` z^J0X_861~qp_Y!%R9K!rthSaHxi|{_^M>hT`Y7=Bea{3lm0}FJcn>I}*IKaMq?hCN z04j4t9<#0Z7i$2Jk3nD-KJg=?phcNVZ>LKs!qOt=65>9}=t$`MC3Z0?B>5!leNf4`N=NI?pqw%D=fH(JqY#_=*;a68N zGHF~3D%Zlq`;FSuC?ylpvi zn8R5t(Gk0eimwbX4c@-|xb;*)b&GQoHCe1yNvGGZi?HtEl=uuH zLJMz?-~^|dg`;|2TW$QpO`>)$$_ImYHHwFKOzk$$)!zPma@mrfzU!Z#F_~d3k+J+@ zHcXF$usYT4Ir3B?O+${+!A5Y+jW_zbV&)n0CCr*|nxB>K{!36ccS_apxyZr|g5Mec zS_EVn3q)&Ri*UlHxWv^XY>R7favuCV7|0-GGA>yv^>>HPb6yrrkV}@NyH#+=shSay zY2>I$!g*GF?=93i$?vjiulBojNddEN9YEsb|OQHIx`gBjln+itegL#umd?)k; zZb_O8UM*tvo?6B`-7Gl5f1UHDCD2MRe{~42q&1LN3&Ur_Yn>*ZDkP+rS7vRs0}Ty#c16QXNE)J; zX?A3ZtZl0uu?dbzOByWCtlVP>8k?RjFQlZ^{b@=!@WlO^Bzwa}OigJdA`_zGPR+Ex zX1|!v4D{LziC)vyXkKsMT9XSA80SoHvNLv?_LB@ivvxQ?PUzCARXoF$5_B&@pQ!L4 z#w{aL{NGeI6|5jAI<2%(~5(gS+OWk5e@2X#S)vC9J+t1k;T@KYPP8hYyy}A64YEqFUn&BTs zB9(T_TNZ8fGmA}X&mC@EuR>i=Jsu0_rU$vqr)@S%&!6nI#w{k83jiZG60D&s{}~Qu zQv%YPJuq@ET+R>;eJ7gt%jEc#u3Ha`3S0%d3Y&jg>j>DbwgUrp_2|WAb(8uW-M#uj zH=K~Z0j@CNN8@Z8*vsId(ci7@Tyzdn<1V6}22q|&F9lK?!NOYVIS2bQly_f@t?{P{ z`4M;Tz4lKBsf!AXtxT7_%{X_Ifwe<;&n{4`o6^!yHDc&AhQC+M1PThf4!{eTZ3)j> zMdx@31$gm0l8)61dOyVNvU`X^x`lvbX%({+cf zHwrhMDrMat;Wsov3r!VIAkEUJ*lS%(_$WT>g+WzHeEkYu8IWE6^)SMdgGjB8{ei(O zvVXgnPepBm9A`;Ia)TofCPb^L&hUdGr>A$|;_WITx zq=(u6qVknx=Oq?p9}0h$FRjb)#L&s+bddu<2r0+cvz%vCG#?pX$vA0pg5H2d;e=t`k0(`qQT`1srwNtz%1 z!D)WA9YTEb(^1l^jOcpy=%_&3&)aC1DL9>`7B^i$zOg!SoZhZXRSjI#OaI95TO27k zn;V#dM*#$rT}bNRWv{bOOFUO^;GZLc9~i}_`*xoUa?PAhZW&`WOvYaq=L2&(Gxqrv zH0Hnufn?x#LQ->+PSS{2WgG7WLdco~tijbVb;LX-B1;ThSVG2p#AzE>-Px_;y$b;@^B_yPw+)3ekxlPqy-$FX=u7Q4eT|$DSl!{ORaz`#S$WRsF!- z%GsDe`e!EG2Ss58`G8&AE8Dhq{d89E&Kc9j@PM~+EPD9fdMWEc!eEVuj}zsGGa(;^ z;d*RP6qZzpJ*&z~^0Mh{<7+6Oxn-2q=DGfypP8viRII#Z(AOw)4wcSp0vrHe!~@lx z_6D`egJ{m~WrZ@J(a>daXsWcbtSd{)el>Br7NRZZ?$s{K z@0@yZR6Cr7otcAGoMZ?dRksgHUy$7%u>FG+ zeDolBm5l)gJ4$hDp1{-FB?9-1F8pPS5#r5uXkT^QOS5{>X06^x1-Y zH4RPN8w`mSW}d2Gd>S9=_5DwI_bJ6y8_G?tU&h1i};y&F3B?s<9#re#C1+_7)DDG}XKJW_6Z35r%*!-&oYV;^rhdaTOh3cCg)Pokh!ShR6@KC zO=HS!o=u4Lx4fA=sf!Jd`+(X1DQrdQD(*b<>y`&4+dWU$_OfAG-R-mrkKf;e1vq%l zCNZ~rcN>NZ^s>@F!S?AD3C*J7O`C0g3}Oi5r8}Re68%iOQ)& z)&l-zjw`B6JkNi^If(qD6YjKBd?4@(rzuGVVC`6Be&O?24uIst6G}ipI zsvYDr#pfGvT^(L~Q9Hv22_PMSCSmvX@lwI=`z=k2w9>#u5w+1)0O#5H zO82`af}?f0Qq>WtDxRYAD|S;i?Z-4t6wFll#ze+QBX`2*i}p?3=dOFcLMa9wjQ$qj z9Rpj3>Fs&Q8rwNa*+EKPgG(c6J=_=WYa6(^G)jnCqojL2Tf}?P->4GkLB+x@a0@fO z@#)m+{qQl{KF9ECqy}|x_>#7Q3wXr66;5-_EtF;UPi&=FipJ$?6zD=A$!DyIe0dY6 zP8_=W)-s9_>vxHKj9cBr=0+Y=6JJudU#HR4uf;DV&n6)gdSRwsBnsX;;ogf2lB#qz zV3#Z+8g#5rcv{&Ox4%@cLPi+hnseJNt>vz%YityW7ldo`$$DK#BP`@~w#A89W1>>4 zCEhru@=r!>-d;RnH?bGd6JkrSuRb?u{?fVXo}ALI;NI}#aOY-s3O?78}9MwSwx zXMMXn>Jz!YX2l@rVxjA(2;!$;(WPfTWV>q_EkYNAPda=ryBm~Kz*4oa;S0n1X6Zi* zeOr{kuR7Xo2D~Cw9w;Ll^yj!p5BoVlOtH9o=Nn%x0nB>^%FGri-Hs~$e2s)IUf$y; zN&&VZ>!a?S2Xz48>$jX4dQ}1FgdWpie0b?B7KhX4$_kFxsf$Fr@?0pyAMKx~*(8`6 z96jwR7b#y?$#4JcQ&|f3rPs%318+ZU?BTH~T;`3wr8S1%LV&PL{*a+ zF(AjMW%g$Rb@mUasV}dT@dfy0#jY8L&wWtgv9w!u+F(kndfIM40zJmU%19~+N@v@)tI3`yX#sA5Fy;L(%3%`l{T>3q z)i#zE_qxFaDBT&xEy{V&H51Hp7WfO+D+=rzj^Yvto**XxmR`?j&1~;)tQxgZIoPjh z)~9=5%<68=b*1tT%6S?)VTMePo@`{T9_x>6usZl$lUSQAq%8;Ae;s~g`v3#C)={Vm zvX#=pwv_B4ookVt35vF@WGocnzZSK(n#);E`_ZhXe-PV#UgG&3nY219ji${LJ>i%Y z06y!+4Bj^&pvs)GJtH$;jyuzTR;Hl(i2SMNdBzO2&#PWC||KW-K82ytPW9$Q_Yq#rn zswJX%Ds9zGqz>Hq`pGs-&-hBrjHVpcDC}})!15zBt9JJQ*WW*0>*-oz#Fv)!JHG<% z>Qt<#wD=2kyiQ{S>Eq%u~ zt`=+ytDeq2eS?MbYD)iTZH8G8bI_$lf;41stk}fx9cM}N?&Ms3#c<1{?WnRiCL!*L zGVs7jgM9-6380d20kGh`$X)3%^z!J5$2Qpg`|ta16)6l_w#6cm>Z8m(nQ@h-Kj-cI z6;OZ1EcAD`NVk5^CI~-wd0f+IlUN*!P9ELzfUXq|0mZd6n}N4531fj8t2MNFQ0av2 z&iq7LpV0GalR3%5!s)`tw&?ag*ctsGMA@lI*%P`ob&-C~!#ti#`K4^d)bG^*dmV=> z*$%)#x$+Gby%ybWW3tak2ie7A1R4GLJVOpa2&^fz;Z2E7-%i1HB-y1IQz3y}x6;@l zzPNS6K1pRZM)eT^3Qpp&-&gO|j4{$dsCYUJ5pENk+2Di%&4fe33~CQjv%P*eNyI3 zq9Q_3fcW7c-LiW-Te-1D2#0BrWyMXKb6sAK;>;8MU=GO>GANbJ9bY08T{SL+2`=qw zcX+7lorYh|tQmYZ#QQksj}vyQspNcwlb1@{eS0t5@AWgt9Ch?(tj;K&|IpPh|0AP$ z7=0&qvrVXcNfjAZ2|5-EqL1iuUZX|y1G8SmgB00Bl@^xG&gHf+#p?ca8kEG@eoDE? zr@BAl=8lY?otA6JDYVDWg+Od8$yTzAH1LCbCpvQ>FdzVG)gPfss05?vsRHeYs&7xeHu-(l(! zk8US!$tv<|A$P5BhGdCA1`*`Z(sTMJ0kR0YyAkTh+@cgy`|{W4JruCg%7T7{?ENUg zvY88@klcY*fTaKMMuInR86Z=AXXGs7e9vDWtABwCW4^K=l{;8&*A&{`<8Ra1^dm$@ zTx5t<7@ZwflqHkqVe0v{V8cunAfE8Zt9Ao*yd#Qf1CXSO0ph)?$_03pfkRP^D%7{IM zfaQVAmPa2!1jFV&K(ugZ%-O7|4jG#>>N-{OJ5E@{)lI{@O&RQSgSP) zq?7&v8S)n;^{>`+GF*1{8I$`4f-G`Fi(ChY&4p9equtLno{}=tTO>a^p59PwRs$xb zH^%SFY88)!G*_DxTYs-Jhp&mjc9p2+2c?ZhGdc+q2&mo5wIQ^0HZzB~BFs3Yb|X_t zfB3r#{pC?Nbs@-wkOIf?YllAL|8DHPkU>7I3~;fSv3rKD2FkurPSS?~%R1p_s5J(T z5J2--yBf%LKj~rcGcS#lfzflzSjBK4sPNPtY=Y809E2huC+yOvWk%d@KytS+CdW_Q z`twBavI595AisUbq3@sfiJVF6Msi#{GON$QF4?ntfrPi0ukMKaiTu=+9Jm67WPE;MEW8O5Y`1uegWE6&1Xl*;QBEgW394P z1f7Cl>aFD~k;T&gj_}UDFt{U%{&n-LQONL?=Z|MDO_JMx>jiEn9a z6vH)BET}K(y>)i&C3E#2=Jd%*gU)mXxHfg}3X&5QcNisssS-M%HiladPea2|E%~5( zj4w&zm5_zHQg`*1bS67FiEve7v_nzTgKslN{{;1YH%lCH;x&)M2tPydA1_>BljM2! zSDt*;)ThPU%WFrm|49cU!fv?JD8%6gB>DFgVSXIYB*t4TH4u}pwY-_D;^K~Fe?H3R zM4rjIVTozjVdWI$a{-zjVniBl*=a3uADEFj`jw%Y`X>s}il&943kiFSSr#S4uE-#+ zJ1SURRXhI2r*{Xz>^5cF$&s-eRw27LpD=tAIn2bd&_4{08W-KHCo_ik*g&_m^pVg6bC^GdY^UFmViC> zlD3mc!no4ma{pY+sl}*16eNJ|h3aA&J`h$AoA%lMIg`wFZdd^0j_&tr-J%V*W1!cX z|11ZGi@*1$sTJ41niUMfm3v{>;&W5c8Zf@gnvKIB?QmbksbR$q6EG&twW==tQSEor z%p5R4>whqPZhpus&?Wp#5MS~K!KFKc#gp@%aGB0={%IuIp`l2t6FRptZkAzKfS)F> z1od1R}m-b|8WN1Mj5C z=F0m>*-;{qUc3A>k=)XLcYSZtI2N`irSljC{S)OiRgj%G^66r(Y~WzH*|YVZzw}<8 ze!F}>QB@dZSS;7g-+jkB*I-iXB32>@VU8C!ovg5C-x=zY!K8>Is9S@>j9e!MG+Ym{ zl1C6?I+iPM+Z)SwS%EmBH_U*S0oyZ^kjd~UwSiI3Ncueav~t6P^Jch7WWzTb9GED}A@pe>|eY?k0}Y$q=&$91|} zR?d_`x{5l%alqj6n6Jl5M0XDAOE!>vZYWi%jpHIgo1NHdpnzmm+WT`tnY7?e1iiG~ z;ORY}@Duy}kabRfkw65u>thCW=N_wjJ>NQNeuja=xb)qKlmGqpNP&Ajz>jh9-H~_a1;~6>?a){uf+QT;77uuQu1ywkaLpr1&}ndWpcLyP1kMcHFsWdE^BzukEE}>4Y{f&-!fl(!akMEB_wLw&kUhhYGV zEjekb6uO2-&<{=^I|5UEo96jDR}@Cwt8)WkUV1Pvc-M`nb#GkE_woEL`A-4+^Wsw# z&f{g4y%ku}UAYMES{|fz5uL&GdiZ{}JDv3W{iVw95I79CNXhsv6;J%uPI`!$slTHL z+Z?J|Nn75k^G(Gir3;|z6q~JSLtd0%s6-wm@>2Xs%#CKYj9g=tjYhMyhqSES;eM9DI1Yfz%lk zf8@1$ed6amu#B^0zZ^j?JXH|{)L72Os;oi>41({s;CGo#0zHT6^0b{Kaf!86q4;mv zTu$1BX?J3G*Jt8bXC!jygfP>U-)I4?ZPM?0zuBmrh27?8!V&aPDjTkOW+v3>N7M95 zO!m1vI%e|IPS&W7Gq#zk5=>G)G)>rSXN#5-(;ScZuc`+lyXSY7c0kdaL{^h~G9HNz zn5~R{d}<6XET-t^5U^U9=Y&^=uwkzYjAC?ZGltWhwPTw3ko1eyu#+ zxHrzGe7wi9H9ycNe2s>p?<!Q` zcDf)53Xpi;KsNJ}jvJP9*?-VgPS7xmS_6l-SP`2@cZxKPV>!amWsL{KE z-yC*7Nq*dPV36m!J|;0qaiCt)*%&MRQR`PsTM$+@KFSG*GQh8t{*bZQquhMJJfL&j zAtgn8lu)a%=w2GDtfRTRt;Tu^hJ|3RA1t}Nb3aI@{!0JG_(Sxqzq^?*f7rH-71WV(C4U*2g*Vj=QPB%FkgI%Rd%1JPj z4?yej3~xh|10fv6!>~a57r}NeKL5K7_?FP2p`dd)=}Hzs^7a!$bM*9}-pm!_$GFGg z=foZe3%1BLA6#MD?X2pujFC4#veUTACiw2eSHfw=uugJ5>xe4xaf+F*uqgi8bzjui zg7GAF)5~U*BB9|oY|~%j6*gWs`|i#kdW0Bnq!ced+FM97b?DUX8G1;9OB4rov71ik zKQ+sHD>9&5P>LXsr%Lq>5r_9anYTBN_W8))kK{7*n@uEK?aE=6VVFu+3PHv|&_6q0 zRL%?MJ@#I222sRMm6INYHtjWecCc%l)jNufT9?}+jh4cy_n;Ft36W6=8(sO##w~N4 zr4KX&{9gtzGit70uM*5`nzawAZ^_J)d0O?;=H7OUmizW$Z( z6Bi};cvucI$yn7WGLk!`@~l^LVS^hI>(-8izz*O6BNlF$k+^ELx)#hTe~p1<=iPWB=C z%nGqElebxmuc8m-4BvFNyK~oG_{2`ABWPgPJP|9kgltUv;As9T0ssa%;hsM;&AEkM z!|vG6FL*5@`?scJRRabI4Cgj)>Xz&R;imXDD95kRz?MGXd8Mru`y+;+)%YE41_Xvg zhn({OkVAd9KvpW_vh)JLi@bJLNKu`pw^K>?{49e=xWf>|-j?OA^#$o$oSB^~dn$MR z`O=nYeHS8zDH8nJrI6XdDvMulz50qD5A)IqF*|sYsFrG|=tEZsA+rT4ahAHwYO%W7 z@M!qI%YT4lbmhmKVK!|b)lD8z7tQ~$Txc14FnnU={a2hO%CA4u;UMdZ4)J(I|Lby& zSG`I}vgKF}c%y?(a;>nBh1gW1uamF*!g%n|MK$#0fqpOpBtJS(B{T^QvnbAU}c_e)aJ2*GV@&fRApaw!d|e zAk$T!AI0lm6oCttI!!nsoFcgdOODcVhYpszy?pu7OMWiw-n)xBoltY7IuITg@ zqV58aqmTMMH;!gR{=uOPK@1x(Myws9N;F!-Uao$+>IR=GQNq|>!-@$P5Q@kifm>)JWRJAk%87^(voKRzm zT4DXyAW!~yz#7N~>Pj3A9&pYV?26{Q_CeME>%Z~BeB&L{MQJCCB` z9a_)7d{>#f!`-8y=y|nc?}}OIiEX*O=(%8NM*4EP?4Qk8N4t9<*W^FGX{V z{eC1{4^J}TCmU6*b>#gPe?U+9-4q+=H=pc%yiK})*QQ{$Wop9BEVmb>6^D~RR=EosC@;_Y4|+#@1q*6u=T)w=Kk<;`Rcx?oxRirWV!!zdK|QuCG?iuZIvZ-)n# zU)vk<@O2W`bUX+_0PZ3-O=J#?SDCy|6#a+26!30_8FDZ8H*MUZb9dy#hb8z=2J>UV zSz2)SkRQ<^1onv4eltrd>3?VA-y887!=O2zwM2mAy|TpPa&KYm+<{aAGQ4$`F-anb zz`Mff;lqqPu<%23fYY7;GSWynKo|?xVWc{<`!GJ!j}GDHfI6E4WMsK|+HzN%ryHud zOScpGdv-y_`qdyX9Z4xOv5?W+MS3ft4-c@d=E{o0eec@+|LKzO{9R%SS!y!V@lNqg z27$%0kr5H!k|gDcgo5m$wRG?M^2ATjVM`JXD(^&(WmP7(-mY?5(KDs(_J$w4<(PVY z^aR&gm9{DimFGUohgTdl@ae4F{*pAL*4nTH9wFI0_x{z85=Hbn&NsK==KTR-xWA0v zwBVavB~m>qQcm%K9(@N?4iGA_@;lxTPBa|;i~l`wR(;@NgNT}8&I(Q&@(E@h+B$XX zC%CTieQei>cC}6o%7!D;uxYz|W*!0W5#4OMU^>)l>NJC-7XXZ1pBc>c9L9yt%q`&F z!LEjFV}z9|f)L!!#jVBdvq(IQ)Huu-5=3dgnUd%>#ahcn&L7$dX6(PLwM&gFuMVc@ z@`Ey9IY046fhbJnIgN8g^Sd?za{KmeYs)rK{A>TNnbb!JuhsCOeWZd|^FldvsGyOq zxKX=5Q?=guex)p%RXem){xW~vTx%6|g()YPP5Bx>cc|<^MDXxOYqp0>f>p9&(+6(y zf0!^@NDO6T=T*`^qs?kPrL0q+e$&&#m1IJyAH!f8w>c-VQJs&Yvo; z`;y#~%Dj#3fvtKdeApsyD=}MmfTOf)9rNoy#6iG~e{4t1!_0z+qxoFj=97%}cHZa0 zZdQDDw8Bjq^}56(uC%SGosjJK97C}HQQ8zvY~@?1i;l!NRCXn_HV=zeq~&BbQ8*u7acxY7EVTb)v%GN{e*jN^U7}L_{iQ9u)uTM_9U;GsbM&W##x+ zbLDaR+|A^PtMWZazoEzMUEu^GgNQenBbh5K?rY;p@)xC>YvxlKg7i+iK|6(co?E{U z?}Oyn#~@Pe zfnv!_Z?6+5?Eifh!*?r5>}K;lw5W;n>eC$P%o-Uk3nQ= zmua4$&wYixKf#?*<(fuE1MH-|O;$(ubW=+d+lHE1GGy3ie;78+ajtyDP;J?temFA= zl1KG4)5k^rW;+~GRepy#>~+PdbdpE5YZ|1g<(OS#+L_xnIXV|0_7<+I7f~xn8ApC3 zu}Ce>?PJr^(?H9^HOz};ea-$YIzLJW46?1{=l7^Qrg8o9YejPb?)6Fks4Q`U75R zrw4ywnJ*J@D;(&sjjb3}Rvy#zL(MnQ;EbO!g=L*GJ!kDw3THl-E(I=91M2I1^Vr4{ zEg;1~8{97NIFQxjn#15!gF>is*gXu#zqInOnx1A%%=EktH**dj#3C%B$EH>AH6w)tA)hjA1{g&~;_m&k z+^6;@(BA$#^Cpr!QhjbYXF^un$E4^>^{cTcxl*p_6NTwwb4Uv*t@RAAlQ`k>{w!5@ zs_dl&!Ex`ceK97%`G)e-eiFCk2uGrgLQ_k84|Nq#q?o__mqfMm-T;yVf(1%$4|nU? zbZY1CS*8C9LrVO%^tuvJV(1fb7vE78>3%Id|0Gj0`YHHEHMJ`?oH&so^W_LOR1j8I zQhgWI_Zz%r9RVw$xObUTof2JhjOHWEc8>PAsPUK2{ob)~!Fl4`HYFpx^R}E`F26I(z&lwG+txr}B()jCq6SeUB1Nj8 zf=I_g2c=pth*YJ6iWKRgHx;CV{*Weysvv|S9YU3oC?G`$K_H-n9s|;(!?!m2)^px- z&mG?w_wGL&nC!i?_S$RB`pjn*Wf9D0(*k$aZ7(IyEM#H#DSD2FuZXlr6m!YMcJF|G z#+o@YmyobJ2{s%UG?>?kE_QDbvufA-z{b%Xta7o=tb=NFTCsAIN!A)QAEi%(wv#;q z4v$rUt`AC-2Gz=8*BcAX5o!B<#cLCb36RXY(BQ3F4vK>EwUT|BR^gxTHYySY* z@K*6fz?3l`le!8jZ&>7WpaO`uGX^8xf5qZs;@8wEr(zXu_pxMzzKdJnzr0r9HcyyG>*zc!$AhZVgA+&uJyxZiKW3mqxu3$?QG0eU9Fc7%GIN0@dB zVAnN$03_y1;M&pM40gdh)5kOqzL=a4tPxdWZngKRW-z5|)mB(&u~JCACi!gK!r1Ru zm~%g9iJA)9LMJmS{A_ITS}-C3=e5;?C)O~(GFzDzQ=_%>gUfmw@F3*xvW``k0(;!b zUmbR7CxQ3rH@EYbeS6~HZ|rM=u+JmSH%Wpa^dRgJRL^W0VU^Pdgn~v_WFb8ttzB=y z##?_z8MU*gJ<3{;iO^bBzjeCPyzF7m>m$l)R|T8odi=*Aq3W{nE-U6bkQxt*%|Kkp zA_vSv(%e)eaJiF~GOeef40q1ghuQd8EU^@B*n(UJ%eHLb1y9jagv59*T2Y$huG_N$ z&ARB5DcQ?sof(&#OgIH=s315f9&q)7l$4Z@nfn`gz&Z9s>yx!+FnuZ4!y2GtdiNbw zxWyIQC7^JTRg-OfO@vdq;_N|s!Sk$@hmPZHri^wMtj|*d^SAcZuJLvBX!0r!W>gO+ zcW?3qg>M5~?boE$laeD)U1qq@=6Ee=P50v(SPn3Mf%?PP@xN~1=v128&3=`PNstyh z->ua^4M3RifIdZ^3sf4}ORg*ciWM`iwqD*u&wH(%K~;Y5=El!?cAqsbswJm8w&DHE zc8u%%R(ya(JTL9BI6l!{j+=3fp1A*U*StP@`Yem(b9mdhs!1USf0-`b12ekQW4Bn% z3-M*q3!W9PW)@C0h)ufi$DVKr zB;bBHxw8k^=;$AtC1J?-D%HdGx>~r605C{0L|&Mae0%k)s8VGspWJsG`|%G2?4jgh z^h{@X9c!BYDZLX{y4HU9fQZu@U>Z7@NY^eNZG81HjcTBO1 z+Jq(HUWwkvO+P;W-wC;MY>9+g~c4S`8DW8)Y3XC zQ8@HCW>vpZYkj9pUg>U#>W|}(lg|_P9XqcW@Y4LL+s|6AhhdC`6EL5h-Nlts1DeIH zK*BO z-NBGe>)~i7!{9Am@9`I7uCfE8vv}h>>j(9z0n_@cvbh`Hc?6pRkIIxHw)P%Jg+k_X z{85gvMacu2UZS-A)ujRJ&*yDQtQXWM$|~p{7ReFj)?LFuBb`<2s_?m@x%2iU-((Nv zevCZYDJLG=P9!9VbGGvalFQEonA~37r!XH1lVc-)0vzhR(X!$y!3cpj!oPiyoeJCD zY^y1~BHECQLEDUBmyE65y1sK;(xkS>Ep~z`@~Y|5v$V$$(^$|n_zNgj`eGv4OV;%E zgHNER!NjbWpel9zKRd8!6`2VfJTe&4iYZUncWh#p0Y!Q^lxrqnu6*!#8(H;Yuu}X_ z+RM7QPjeWC1x@F3v?1reh7^i*8?1VQ(%Ew5!}b?P2+;-9Cl1WfQ|Na)>;+pqnSRN$ zYvwFdASxam^C5%7N_eziudiVDdCl=j6f;E`d#1uSR@bx(rTkHN)JA^2ME3a|Hkpn< zHtaqZ*3q+jUP&E#;+Uv=jHGo6*rL|9pJ(_`IT&GJHnvw*j$BxQS-5n$1sA zu@}(W$4^*Vobiy4UtFkV&LZ5O)vM7dpH&)|!&0A|LVGlwOY7Vw$i@cVkq>0D zod1Nd`3cVey3>GsK6&z0GLLee{DxbS1aDDIhlAk}A-$&z6AhrVo{X%2f020_lsA=} zD!5yIuI~F0r>p04H%=6A{SfC}ykCl)3#7^o$IpwKEALX>qly>=`$?J?-wezy67DW8 z%w}*b&riyjm^Mr>*Krdk1h*S+TC*s6EZa&7o(lIWHrss7A)% zP)QNFr4VCMHjsl7pMz&(LT8ca5&`+YtUB^;j`{#`mnALXRxZn7-mSJ zE|=Gd8JUr6N5+4_vaK>*q;SCjT)YQCcSw`CLeM5$a_aN=(Z-Tln+jLW8rPg};e{zT zk4rePUAAAjQ|wn$@Nz!qt|n%XqJkncOsKB!V&8I1Mh(Tz1+Rm7kxm?~DZA@Q?Kc!8 ziV_W10}Dnddb~?^q6Iut*i{u2rD0BqhP|k;X4y6eoq$wa=~FX=Y$XXVD?y8$C(_2X zUM^h?)B1nr=p82cN}jL>E$HGBZwmnK)}67oB35l}fQmrq^UGOe)GYF;hkW$i+@W=e zp3dh6Ir7y-;bNnXIfgq!VjZ*{QBe7#|5`II4_g8T`Bf$npOLgE1Ga)}fZo%?>rQbC z8KkOPb+I0aPN-45%u~Ji$ljOZ3xQxr+X}(I>JCO^~ijFPcRm* zR<5TSN_A`?%^;tI-odP1897ShUMO~zeH68q<}HMr2#cQz8`D|k@)f^E19$sw>@Aab zm=mar=O3cfHJ>JxlyJc55eJ|~lycUlhbUTQPpvc28UM- zutr$!&RvBTHi-qyg4ykVdfyoPuO;_wS7r&V$UMjt>Da}W-G{6{kXgSbseeaYt4ufD z_I+k;Z^qmrigaYQ6H{ZhQi8%`5}ovUB}(hJCd6}M0Uf-0<=eb?VMA7Q!Y@GXD>>D* z;&K-E4A^nZbq0yCi8-`l4YcXAvN9{w>+IDvDlTg=sedLz4yo;)JUc3|s#A}AZRj~#AU5Q< z{$Q}yd+Bby=f|A(c7s(j>&z9i?cG`5*WrbRp6ed-_Ol5E1Z$m2Zdi9R_N1kIER-@o zc~!<=DHqE2@5^!ID9gym7;A6)m$@mV0MF(I(aiS|b;Vb5MY7R7l6%3>+#iJqz_{T~ z*t`KgRR2zz%QQZU7gpMP5BDj6oA;op7K)4e5T^IWYMv?|Ov32yetNPdpJc0KFYW?p zp>a4nDenQ$39SojPo$MDFYiNL%qo1)M~K3{Ix&tNylH$QSS9ERTTuuIFH1*sdZV}# zRQz590K4GkS_t;fZ>dWE`7L+BT1cXy#c#o3Wz5QqAPi71HMixZTnU;2x0D?N%Nz!K z5IQY|+Fl{XQv6=1pFN%37jf%jo<8Q-<%Y=1YILU{;G}8!b4gZ zF&?fGr#&pxC0dR@xU3u|{+Z0MBvZzqac|*_k6&`3@0Y$4%CL}Q#t@5DWGXBRxE z8dbCx)ChVQFBSrmSXJ2i**pU^fUdHQR(0oB0C-e7dYjbW(;=O2-k?N+I{#W`3iZG? zt_1UqwJ-G|2w71_Cmph&cx^$|5`$np-&Oj9#Gj3^{ zCe&SgtQt~zds$8UcdHDyu4hL$BW_*YDxgHrbPFS%83D)cZk=z6_rFGSuX>Z{|msDon_`X6kAFZgF2Wub|vd7s-=k_zu zr0=RteNSfERkDr$D2f*JPuqkJiNanvOH^3dH>%y~@#3C&$-9@4-EV;Bo z4#UoiXG_F%Tg<6cxsY)ciER8oeD~aPa5ii?7S1vPa-cIfPA3Q1Z5RDu2J5_*EonJ6 zjjMbr&s;cQNw3)xRq6JhQm6vPohkTl{`Mjd~`4Mcuaf*^wE*B%uw; zbSlKvXr^ySKAL7RG_mZUJ%Dry>at-1 zxf}O$#k4_7GR_`)p_luf-SPm%Qh;O(YrDJ*7JVFLF-uQ_WZ1F%ED z@$~xdTR?xqCT0FU8W=2z{P$M~gIUD8+`L|)z_jKPI4eRD(siv1?4|wrcj90cVzS?ZnKfcHpF0C~HhLJl{RnXW zf1QQ~V(~>oL&LosmOlSzq*nIP*OIqzZT3o7QsgCvf#<*wGz$K4%H$|4%H{V~hc1D^ zIv!0WPqP318~ppfei%o~-}6GJV;n~EEvickm`R0Oz=BV z#xMzDzHz14qroyE@Lc^yaM;D)^JMWCG`Pa*$yYsO@WO6Dg_E>F0i~dF!5uVu0BZnV zZw~l4ZTFsbG6M8~0KndAd<8*`W3c5c_${^P z661_OEF})3h^E#^P6iqa1vChMuwMmKBI1meAiC%ja3$}L7WQJop zZ-3%np6<_mSp{G)c8TWZX4|e*MO#S6&#Zls?xbA&w?}zULDaA7{P^+e|9m@WnCw@3 z_P-mXf5-bTzxR*P{rfCsn^S0ELCFtZLH#FwAODMew0GkFdcFSHeE)J$(i;Hf1GYxv zw*_xT81DCG4Z3z0w5h*a;qY~O28wCGrZowT!La=>x6h!rjf{_iQhR|>F+UB=`tuN| zrc8xw6_{4rY#SPF7Ffh>cvhd0EkPw(x+TdzUjv(+4ybYpC32q?lYpi9Gk z3XMhW(%)X|WEflrjsSb=7FnTpul*0$n}{dD;O1q&i}m-04s-jB68Qa}4wt`c003sf zi)uk1H@<;>n$xLPQuI#A=>m|x12MnuG^hfq3<$$A*NN+Cqp0sLr0k_o*=&vfQay-$@#}*+%7gT+kcI7@a!|XTyh)s)x8W1AJzb-s*4h;D6 z|AjE!n}`3aMX+~=e_L*lPP2eL8m+IB2?i$i1qFrI8~@~%|BuUhqg)KA0bA(B9De=c z#fzT*A4Nrwtop&*zZFc-J1c`rju2P$s1|+Jm4X-$kx|nHp;TA`kLO|0cq~ zecUn%8N{kilAN25v@QT^Cp-72%%y+&zV02yXA_Y*9g3C9fMl{(L;rsFQ7E_-!e*8P zv`K_Zw;MMMmr@C&ry>-`2NM5|@DpaxzV-fHV0hdG_9O&44>=3IIV=B1sqUROS+O)J zxc3+Ij|K9-)5^b3tP0t?bJAa*Q>57f6UEI_!ROQ3kT*5>-cl6;PXW{~?vLRj6o%FT z+r-pm3)Dkt3#R$Gg8iG`FbtkCD~$wvVdVqHU{tiQfDXus0$X&yw~gM43~V_CbVhrN zDa7WjF#x#&VMtzga!=9(@%AP&K__AX(&#kM{?LOVP9H`H^hJY#1)YNw7<4?Swv*0= z0vF)Fmy>_*-Y}Rkm!zcr z+AGQc)(m6^y94kX@M!i}&<=dri}|s0a9jtvH}}7&%F?>R0R%903|Zu}t!(-h@Vsf+ zP2_?9#@YY^C%tA*x&yI2wg#-7M1ZS~-%~?DyvEK8t!%r%hz+7SCO^BxR%;1(d0Iz9 z7<@O4|8R{L##oSD*pG9ZHHHv22<+eIP5nL?Pz$Kjf)a(JXdngVGBXhHn*E`|nAR z{a_7rY1>)Qf<}=-j5VI~V#uk6^{vGT*h!$YMv}-@R+^^*350EMd;j}K!% zzqs}SA2%z*oMU}hD%<_R!-o%ty#b>5dR_MagT)90+3+=K2avNIzMo&TCr5!$*qzb( z_?q=`G}qyd@0Vdo*CuZHFmg^7hD_|UK63OZ8%4C=FihN^T>D={^S>E(e|u$+c!a^~ zPJ+AYQbpjfZGlh{naQ=v88kp_eVik5r{s;yGUvZY;QwrHF58;|+0+|MpUj-iak9fS z!!_yD&ZO$P6zq9xQe1qFeBlmNP1fWrFx&TzMt3HGd;JkX8jNP{94AGzg-_5N(Y-+{ zaR(j4D`_?%Da`96?MT?qz$SY4g1|S%-xtAd(lbj@)T+inyynNdAzemG(8tI5!euz@ zywE|SwMd)mdkI`AsQy5MKVq#XhW_lZAmrDfj{N7%#}U}1cMO(v@KX<8>^G&4`g{RtesiE)=@Yx?>1v zsrF*=&d6Q1b3l1PM>@T4;t8Yb8@e@2mZ@`+jB-r05}f0OA$+(GTq60$z-dXckb*R> za5$mJjVCKW7X@9hDw5hK=y}hMg+Hp>B9-Gdn+ohfDc7;6nP|@+13GjEF~NuaJj8Tx z-KWD*a8QQCYfm!c%z9_;La8>FBCY=MKur>q(Tpo{jASGcL#@lQ}A*f zTzOn5ngT4tM%}iICU)slMxYWYfXu{w$oj{h(FdIbv4@n0*U9iJz$2hX&|bb4e8}1l zRFGh1V#&4aHf-W%4qRZlK*!~z=Q|g4q?`6Lupa*&3cmq^t>pFc8sZ7;PMW}b6xzF| zMZYnS$Vt5m+p<6&pJw$Jd~yVMO+|p->y}z=5A$idpw97y^Nc96O;k1FoCAR#_%*+N z1OfFo9SDl2ZUX5p90MW83e4){fT=tKM)-*Yr!*bTzHlHYH=Gfqy8yYVjmz0~%bhAM zE^ys2u&$FpI>wyK97qvJo9`)85#S1&y7#El!ybN4wc1drv5h{p%%sbK@58|w`^H@^ z(mqA-Lj z-w~BGCzCxPtnJIif0h0F&h)*%ZK-@zY8k5{qF1mVb-7Jf?9egV#*T<$Y23FE2DTVk zW!x-#DMpv?&+3+_5z&%p&g8T-J;|4SF=ihW!U&PT?$h*=V7y7PyBiJ9JFr;AztsVCSeMN#Lf>fjJ0TuoBdBuMBH|EPjp?Cm^l4 zoCwki(Xv#$1xy0O8_s8fJch2281P6$O&(S}0$!D*#(Zg)Qfrc_00;w1^h0$4J_=8Q zQ!YKa4z3457LABVTeinf4M0iO(!?E_^t4tZZflP%D>vY%W=-J-c*xbf(5i0yRs~{_ zy;u|la;thlO~ecEX}Z>5k+$HG3tiBWbo4Y9@5e7&maAGuEdT|A1m9oYjb!=`DNEq# z{RMUHaR$VxmLH)%Dfwwj3*V%iU```I#}2sY^4761%H&FqQ5y%J^}hE* znUS`Lj3)r|7R$h<2EX#033j0O$$kdh{zJym>oAzxwH3uLfah<|f@n;CF>7_VFXRw} zmpub6CJq1$$0~j&SmdyTmcDFTb9_{Yv^(l|SbX-2ytEUP*dhm`k~O0f-tG zT|Qb^sjAi#o3;O)S~dxiWw|i|CQ;L3#A_Wu2QM8i@B*O-Te(*M$<1gE{(*s`|3P>C zyA87!9BI@7tE+7`TiUG3`Fq(xI4}yCtas)}HIck*_ZCJhfL`nU)%*lFRFX6}zgnXC z^1pi+|3Sa~4lbB=Uy2bD4s1Mx;4lLDLt5Bm3^t|(Z0@9rnHX|~ysT4p?!S2~|MUHT z2D}bN8*}nINa6rT$g^d@d|UF0>8jQ58HH^C>WVl6Hl{FJM(gXUFpIhHRoH@Ay2yyI zmZoRZbg+?r-MrbYe{;nC!;JmW_U|P=oK`n%DoGmb4i$za4UM%EO=~>fl4?9-Q`vf( z^lDgr%)m8`nYLS@TqSVnD+oWs2mKWO5 z@Qefzjg)ghvV+}q_1EMVBU3|_Ue0~6`(TBE#cMqx4TPE;>mhGg$B~I!7PY_dGk)(l z83Z;OW*62t$)CY`#G70iHH=>fok`diviGV%F!;HK`x!cz9Lqd{|FRLIf6EN~1g(Vp zgoFj-0VR;FodGCW;T=aPobXbdGmo!f@zKktk6mhg0!-H=0)AG23{p10^ayeTP+qI~ z6(CSwLKgg9xTP0B1FwSL`I{Hw{t&o^T1gTrv>?K8S=e{fo;6gYiA!>S$*qHpKXe&m zHG_VF3MbL*T4j*t=>?$KFJMWyBsn5XAQaOc1g-g~vWedVf4~<5DEgPythvB;$b;lQ zq$NN)bT$vn&}D)*5I-W=V+qYLNwnS%xKyhotou6Mb^fcjcO+HZ1)>UdU>#zTBxgUz zS_pv8G+GKkbo^=q_N$|TfmjH!_4pu9QjEgl$DXeDw9sTpNJEinQJ~UkU}QM+mufr1 zCOAz8$nV1}-V;!Q%RICo-;>5{eTWRegavC-I{<7pi}~i|E?i5Fk&g)S4dyyEzystY zaO39yx)=Bf36T6Y$TxZcv8mBP(d`6R_tV%LFP{c~SjNFF3-Z6$VYF}YjhHsu-}nPG z^62Eh>eYSa;9J0goINkst)U2z-zGh1=k1M^EWXv*DU+m#HN*og&J*%EV93+T(!we;--KtQ=fW4NFC|vG$ovnjCP)!x|mfELVjKtN1|) zHy?N4*1`;w%>&Lar0k?DRR$x_TN8{&immV8!rlZf-uwtNZ}QT|z0l+sKxygE(4)&mCZNU({ z1wY3sK#0~0C8oinos2t_Rp7dsehZz-1WVUl*Q5s|OXI<`7gZ^?7OqowX3iP zL2=u`n62dT4}8%c0W>?=dws2@XB_*Wb{Z}huCnidmOYv9gRPd&^@;jn_csRcN(tES zh5-|X5oj;n>TS4otA{7B_oBTI=kY7BRdVxnA>}l2F*tbGB}7I(ToiX{rHQTHxsCgr zpss3;75CBEU+bNo^`&05rDpY=0Dj7BOUTpbHs3ko# zg%vBVDv+OzNVlES;;dW&oUc-K6QWMZeN{|DOHfjtZr{?;u}pI3?i%lI7Ih_aE-AZu zIGPt)_pnK@7VqXEUAW|Z+y&@h+596nUe+j0*h(GLXLM74nCB=rT>WP7yD0V>o3l8Rs-D0m?g32$WbLwLy$14E~HXcQY9a;y~VBNp4K;xdE;!c6- zgpO?4>}V_+m~OfkjkD!CNyJH?FbQ8!$!|*cG_%)C3pF&OhkZH=<>7ts@*^)GY|Dic zhyxqbOdNvr^_ z;wRJ&FgF+e7I(a!+9HJ`_gq8YFD^`a4G)>heJoEOjCVWpI${Sf@rFP{pa2C8b+ArpF`>}(6avTzgz%RRvvguGG0k26; z=Lj>Sl}*$1tbz1R2f8HwZx`cp?GNhHmGVnmFjR9EQ*lMSdL0EOC-6obi|z+NxnI0< zD&KR&d#e2Krn*-#mZXs`(di3%`=!buR&}Tg;Yw$sNZnW!W}f5ZeW7HivqD zObKJz6h3>;?$xt-9tZ#`>jpsV(G14}K)3cAz(nDyBSE}yb23`&P+7VQ`6?Z}2&7PA z3VDevC-)5Epqz&nH46qZZlfo!0WWj<8QfbhM~dzp;h(&+q)&-c! zzix=Hi=?0{aK+AJS%2BbCqmu$OxEJ{^dYL&8gT$Fp>mWbVxjkiV5h))oI zB5A+7tXB;~*on)Qsf#jZn}E+wda@*vO&p018;b0FHL-ANPLL7m%#1?~IvE z?=2H(A=~MDri&Q8KS={~SEaj@*NgPv*qGMrp%;5%(u0cr6q#$6H8gVaCMS0}QXrU( znB3v2aj+gjMp<`8T3Bf54%^}zxPD<~J-Wr4f(e~Lap?Z)#B8^e9}~Y))0`G6b6pLa zx~~~FWW~tkt)=V>DJBH~zQ!B7i{iU~nE~dX*yNv{0fK^Iqi5_WPh2N9F=o;#w{yT|Oj@$y7@Yw~>6i70 zNqegj?jzQA<92I5AObyd?o;c!R8;>R>n8sNcD48U`ZmpP!P*L?PAAIrJyORT3~zlI-61tvgrwkP`R_{F#}m!bo6 z{Ac@fy8?cdS|c^j7Q`X_5h_tALsnCILE!{GIn9Kl=X)gI8)tMnMg)-oRQ~Hj&d4A$ z2mC^n*6Ami)Fwaug<&rMAC{e4NR6tRuLLzx(R1<%m{0zm$Lb?*eFF$sU58Q^zf1}U zC&)hJ!3UD11FW^4|6Bz$EDnRmYGG*k(07w2L_K~=8MZQeE-})jH{fA`6eK}?+ zWcm5$g~6~!_pnv=2}XSvg{D&-jy*5lb2zm1&P^g6Bv~j^pd?iCaWnilZo(B`rXXq` zcG=Kdj+9h!5t|;5I)9dpdxqaRUL7vlNm27YH_g=EeeGm{+=Syh$`(=0EU@>jSR-rf z&4zFxR}4kkN!7dN(~yyOO2>y9ZBXbf+T4nm#8Bi;47oDC)krGF*5R@3m5N^(Lv-9$ z>aDBCssV9(6{#xCt6slG54TiCa*b~uMSCPfH;w{+rTS$^O{v3H0fhn*v=_X|X71Bx35yqo8(u z9l>@<&fVE^+0}3nefeWJ_d)_P&_yo$Vxfmbl!czw2M$n_k;oGIGhExx$^^J))SHQ1 zb{(Xl#S6vEHxVXBqSSAR7Ba7BzZnz~ODHz({f10jwEpX}+G?|aRZ0~8tq4yXREtf1 z;=Uk*zhpFL!5(Y#nk}Ou4=Z6V5=gqxepr;?8JrA(dp%p`04)3ID!W@3)X+yV=TuAIxgfU3IZ)Hy)&+Q{ z`dS`GQd>5kP5fc^wotN_Sghq9e}SyjL(7|{Uy9%%8R z1S2}3#rv)v-<|CI3v*yhVD>iKm1c+%%AfdIOie<#>9Kb*|4$auKI8qZdAgFJ0=>tH zA%u-PS~pCoD(hHco!Cm80a{A%7$4C~qJ)waX}wale8SF&+Xx61dLhZ*HBK(oA;-ai|%n+Wtv^``Y)=SlNOa5DmP% zdxD2YEFd2sO9js0*iqak0Ky_@=9YhK#`(S^CftL#>HATb>+TO?=d zc&FUW($DtmeD87T_!>kiWVBfuv&LFC`0H?H)Rn-CJQkD2JN>DSlu|&0Q$=ydZb7tV zpt9A%Qo894S}TWiZR+DJ#a%k*pd&Fu;^3j2^Il4(SFTMygU4xxMgru;TL?ut?Y@^1 zka+l}g*^*vtpS3CNyriV4pG8SQ+E$}fW?$M#PTc4E?q>9rQ(pkeodb~u{AvmPXQKY zfuh2^cb9PXuL5Kd=MGS@QY$IIgPtXSMWTG>j3yVY&Cy?KK9%)BmD_l?D&{Jpfs{wo!9(ZM0{3PwiNY#y7J!ps z5^;3WlTd-XqPk}d)>-d0Td0DmGb3pm@(4$f^jx}M5}wBD_y$+C<@L7L&)jsnev=!N z;#niL1xzmWjul7ZYj}BUay=$7DU5)?O}pK+Zrkpi6Ti#4>q!@>?>$w~P?t*e%axA4 z;f|p$A87{ODOQlCy9eeZ+tW_eT;z6UJO-6|tEVf-)tEwkCN2~&1ftTek+%SFqzqsc z3jH34ipRNxSA)RKX%G`H*i#5SOIgO#P-tE2D_7Siv%dErMLaeSAX`whCUbN0i(F?r z$14#h@6E##kID$!4iN3xgkhn{*9EHKP4~d3=>#{wOX2s2A^L}@KFd(*vNU$F28brHCXTO z>jFodn@L2&Nrwj9jlKPEYHAiC)vwNg(mpGM1ymt7_-xFzNzr{}# ziSnQ(G-Bf8haxyDj?XhQ^41+$2X>kw$acuh<+|NIwgZ*gmH|fa3vrpg=B5&2{kn8N z#+W|fL1RVp+#|!E%VvDg4>=g~3nQ~p`tsZ!=Xi?X$9DjAh&sakk*EBEi7z0QO5TCk zi)SD$Xjo5M!f=nQ8A3cQ?#iFRd{& z{A8GcyxE0723C>jch6jd-p`aQQ)J$NL~eY<&vq!+%oS^)5uD0jh3IN_CZUL+m=*4) zWoU}J80>YFDI;Hod#ZBG#Ua~-2r0lZCk5O>eRg8Dum1#9NUQ8-ygy*JLheN?XCM!+ zJTY=8hOuViq$SX#NwkXnZu-G*#=QHHeVnn{vR)Uj4*7d?SKJK*sbWrkF1A?nt*fWZ z3-j86+KR>W_1hPO-=aPfEWfY3>wc9iPpcMQ|z zTc?6Nl-FQE)mll{#kdr$T3LWnk~l^G}3?PN;N&f;@lVJMhHz}4XKEEN=I ztK+jkfK8O-MUHOG&_7rKS5*(f46ZoI=@AX(b$X#7#4?~4@LI0z3fx)BvTS-+_cOIiohi#6=F}wiRY{#kkd28W+^(#*;utiuUzL+2q+9oPKAULQo?Nj% zN8%=9B?<0a`~w_AHlV&cH=5EE0@eRFEXg;-?SL*QmlUUDyG|Gy%1S84BLifwt^tqT zb(r$tbqq1o>n137Ukk5z+ywE7aVc^?{sdm^uKk|C;@$bq>#^)z;g>AX-rGffj+M(P z>>sPQ!r!G+%G}hYQ*t$x9`###*e&s72PzbqeLG`jj&O}y>9(D|pFR63Utr$tRYaks ztkH2w-ZjmNWHa-HB%D9eebXg*jYe~&c6J2>G{u~vYy#Yw;A5BoraQ*J~JlmB@ zSIq{d548i2NqQhF7LKUhZ4(=1C|~DW&alHKan-1J|LVOmD=IK zOZg@0eH( z)AlK1LRj2dWT|0$sNpT-LXzjs(=tFz0~hC9I?(iPKOLpMk5_eqc9^rE?XXdmv*iXv zI=Bod_I;O?jQ1aEcQp;%!Uw*dtKXcnN|_s%i<1l+@)ys=BnRXQH^~%di5_gaOCZ<- zc-qFgza`1Lx4J&_)HgN08n~3=;>T0Drlw%ey$*zsJb=aDJBx1^zJR~a&lF$7rtz>I zsW1=F!GJcike7@y5rpX2ght#B4|0y>7Ce)J$iChp)h*KVj4hk;j1KmYq_Pjiytli9L1xIl zXB)d%J7a_6It?ap3tw@i2_iM)W~OsEW)j7;3K)P#**=VivwH%kni`9=sBUj{=zdem zb@&yw&`y7J=U3&>PGIWTrSZ;7J&(ndJ^6!=$>)qNXG^n~DlKjI-wfg64`U^M4y{5C zn=$g@>~GMOPuJN!myqMfk$JiQA~Tti^B!ZTX9v#(Nk1H{^K>lt_f(a=3WvoN1D!_) zD+3Ir)&nnSVTYdpy;%R+;NomvgoRcSp`getBIvgJl}K(!Di3~o&TwvxZ8rNpwc=BL z5nL%jUP!~J=&rq1r~(V3WD5ktBm};#d^j2)QMVXhzESKZRqH8q&~Zhm#VKfq@ z@NUSzs3Qc+59i|UP@z-L{t};S-cJO0G#!z4sNzI&8YDo@nq{@2efj%%x|-pN`Z+Ao z64&xWtD>j7<8zQPg(eH0C1V~?=wQrCi;a^^h4kh!RDM0xN}251`O(FCWu2?Gt@oY= z8u7-Fz)~n8*QjokJzRb|TjvPGQ=4$gQ-HxG#bwi_?wzDrV&+o=R+=eLUwVcFTI^*} zPCc$~o;v zCX9%Fl8r(vzXR7;K#Iaqs}t7iS(8Qwsb+$U9dHri|8onCk*sePTGQ(J!4aq)TQA=9op4wecs)$<)M!waFD06#l5dm`gM< zYRj)J<1=z!-@+^NIR&FiG~U}Kg#de)U%JJy42T-+J9f8$aqPOlGYM5WO;_*?U8|ug zj0W@_r?_U`kC=L3BG!TpziTzr7JFa80|Gllu9!2<>GR^D;*X9J+@-j*qYLaW<%f_O zBD&RXdcSs78yeO)m4m}o3o#o$EQ=n;+wYJPFcdy9wzLofx_!52OCH9=Z)#7;PxR+s+R1h$QWVI$>O3trRG z4k>CDs&!mlV_0_cSBu?F%KW5W?Z%@j0IhJZ8fzRDWDfjZkHjeFOK~ z{th(fw6{y3B*7&|_qb*{70er@YUxy!?49i`KL}$V~Q5T-Gd4*a}>@n=m>B>eI|m5O0c!v01+43d-$| zbCR};A^uzhn1zT2g2g=yDs5gkL0IJ+b6r>Rw1<_V23v()9ibQP$TU2ffrbx2#mFfTE7iTo4d`Sjw^xBtvHXh4iSxeN94WuZLEBo=uozB{0^ z^r-cc8L4x2oS0WK>vqvDYzd;+cIh>k!dm-=tr8~Q-xnsVZcs<2@D$TvoU7=Bge6^K zTM=eqD+*#7EgR|-^f{c>Syi-HaiUV!Oc|jk<~b9!WMvFmmJdZblQTkjQY+J_NKjItM#n0I-n zTDMTPpzmgiH6d&u{YJXCvq*@ zKNJ|mTFXwy6R1B5JWPO%z4_{TWnFE(9987RhhjUKf_!6L_-^9vqIm@R{zQyVaRJ~x z5f}2X(m4Cx(KtM4Hf?u3Etay(e%CZ7JiuS60wc^*YGgCP{RDA5ikhomB~^i$zJ1#O zJt-H-WU*edkkmR;``M7v*0o*Y1)4*Ih-=+!Es6JZ#rxx3D~5VZNtedz<&({-uiR>+-xztNK+i}Sy-b`SLgG%OYBSVEs{(XKzPboS@)+vue4#NzaBKOY)?-u1Uz%;#|u&A8V{j=J#`v zKDruyFiI<~B@JI1bJ{W=xzyGRpSnA?yR6ZEb`0GSF233FNbRiTY}?Q^Z|`PJ<&B=6 zbi+dV7XKEvbK=HYItn_Z2_~aC?BMmSKnLFHIP*ENJ8XABh}myh)X6Cx*-~zI&x_j>#HXymWj8=Nd2XkWj-lh0-|!STK4V zeOHun{<&mq`2{>Hz9zMG0qWGo?7@-}G0k)Ph~&-F9$|H4?)mAp#eyfsh^@#%$6~iL zDkcb@NVF`%;auoZRg(s>WdpYH2#eeGe!~xfo8^-0Jx9Uk^{nkIYVnOnORAf~X z(B(U#>pKMU^kx8c`}2Io`at2She?&|RYg@=4oid>bC>E*bc$qIM0sMOu-t821ycr* z%j;uTrO~7-I)hL)>>WYwwuNm$OmT)!g85Xg^au$>&QLSmkc>M3AA@$6`b^WbSoXPY zdgRd4lLsHT<}@+oU`*Rx2QuZTHm_saHH`#AJF__iI`}GF71w`db;5TiHb0h62I;%M zv}c30eMoV2#*jeMKnk|1bsbUmY2~FVuO#CX(~YeH24>s}rrpRaX3@R6`i9xZ5BNo& zi1#;4q*WxDL$qB#%N{qXFBrZr8fuyCHgq^2}mM4%fNr7lu=;&aA6Q|dPc5}MU} z?i7lt=`rpj?ANNv9sXLPFW8!o711$XsfC}Pu()0BX3F?xBFJ>ulkv)>YBL8xYE#^T z(bP}`>a~5;P*|_GZ-j)iJmI;CX;qN|w_9-O*speCS68mW{hw-lRd!4c{4U9hWsSuS zC}QMh?39MAyTQn>F|UBRm%Nd;>dj=AlfkykXjF)H4M@fNv{_Ct#p=ioC&i~FuCH@i z`F{K``_3}6+e5yX+QOGgowPxWz*{CvhgZoneHnx|Vt2uDBLqm?uPoQ+M%h-}e=>vAM9>lDY;0gdkW z!?qV%7O=V57%Shjw!VR46R2(+|C%`Cs(v+tup~n~r)`otSXW9Dc*t z%j!1*6j%m~tMQ}rQ=LI~90ZkD-INX8#SRw-P*fggaIwb>g|{c{^C5h*uJ$!MJL*&$ zDq`-L+5JmT-(23>KR&I%SjIl?r7N9LyebiFl-h zjWkFjQXOkRpi9KCk6m6kMPby+=cfUQRW+DuH8>{X;ia(}@4*)VrMRHbB47=3eJ+tb zItS1#06b-)mCV%()5vL-Y3W=}Dmm{l4a7(>a~=AfFDSt%6V5ovr)BDH*5-OLKaVRW ztX=Dj)FNXaBr_Poy#<2RO;V9S&~g+lGkD6C6LThu|DV8 zde2uHhkDzMa4o0&Sh@R2w+8nJ=fO!)EKI2+Iu?$aw5y?}obSe^y9qun^4fV>j-_G? z<78`7CTxlAtDGwaffBdtyK1!v#K&01OhUU}Vi6bX;fblMs66rv{6T_#Wib|CDc9;w zvNo>dnbnhjjzcZFWo`tbJ%ppv?Nk4fO|X$FDuTADz}_}Jt)eD*(T;JqjPg-BjNgPM zBXi&@>eHFTvLmwUw}hW~q214f!weXS{$tE(?4C7<@mJwLP5QXX$HhDs)=;IG>-uwM zY?M;RYD~F$C@$%kGd9PM=K|U>v6|t5mgVq_T*u?4D(ZCV*KcxzbM)Hl^@LwRaCPoe6sCCk2`wOmsSX4~GY7N6`~HO{sP< zs+CVf<4Bh>=e*_@0pS=HKJ>CV>Ski@Iqgu+O2fI##F~NDM-?fzy)rv}NXJ&0X^LiF z_5xny=(gJ8YLR&o*b9oYBF4H7h>GsYu0fVnjaI|KaVt1Da~qbsrVQh@gTEX(Cb- zq=<9~0t!k;lwL(Z=_Ls@fQn+F*yvrPN$-%*1S!&`1qcBVLg>ARa%bW9oxS(DXP&8jZNG7ApK)tRZ0y8`Gph~x6RJmvx?-V^dmiCPv>)7!n?}-@ecs~Zj+TC3a+A`e z@A@)MXDc)`_D+ZDv5R=FbgZ?2jRV$xjP7low8w?9rR!i6SB`F3J8fQcw16b2!)NUy zAf^?Yhj4NF+9hZnSa-_r4dpeR_k%oU6tO);MI`WnXn)6pkJEMeP#H*_=j(R{Wx|Hr zK@H=e=+aTj*hS#`n%*2*Ov^0$LbWY@b_Pw$gW8x_Wbd?MYJ&^D&F*tMVsSgBEj_r| zK#er0@f+i6hbNKmN|Q%dd07>RKwaVIvu`Dgd0eb&Fa7@LbyNn8VL#Gekop@ktG&$X z>$h)c&&~5Fi!zr5ezsw-?(Kq7tPo4L7%v6%y>I6tK_g^&8VUK0KFNP*y#XlIVx9oc zA&4Rgs;FWlV%gK=-ltLpCETH^1Kv3F(=v1(TR)HH+*-II|IAt1<_+D;tWQ$Q#=yf#nsmk7@P+y% z%`?W0DPB#(J|~-_6gW=#Wz&#Q*KgaTE@89Ka&H$@%bHDaZO)6n5*sU@&sC(^ifEt- z5sxEX&Q+ynHn`il7@s;u6MKYp8C466xqFI(msZ+l4_bCz)0k(zbW^`X=DW+-Ngaua z8~xKg>FRaHXQ#OXXmCQ;?1y$A5^Pq_lkE8pCNms!UzW%*MTEi}ukU3S;zQQap7ZXH* zl%zPM;Rb-+571uQjiV2-)qRXUWABxMUtVf%J~ZpSG}rKU{mh>CFvDbygIA&Wkcx)G z#=Qa0AIn|6Dtp?V+xg+ikvg;vpA?kK3=Q@Dl@V)sA!ib!r*4GoT&33dU6l01EK8c@ ziE#wiK?L!5@U6!@z4pjwS-l2yxrH=c-}gv+InxOIlUD71QT~dR&Y$)i0@~ZAsL0>Q zY|E>}dKaYdM|^kKBV#EO2&wYvPVhDz^#bQ^*)Ba}M2hyijFUFlxu zjpwo(&zQ|F$ zjqTn#zv6$RP5bjfs(7${X;0$T_Una7WaD~xU4Ff-q6zh)_@~wD-=jkn((+iwQ6J;A zXPgLo8iC+0d{(%x`|mFTE*t8$CZHGY~6? z4g~DgP*GgFg=e`g}0@@{Ky7SWhH*PN-otfQFloP5G_cjJog@$y34JcSlqytpUP|saWxvxt)=5l3Ccp8B`pvh z<>+!3<4c3eWxjj8(vx*&a`vgrQfDKi@fPDshk$wW&_fB1h47LBz4<&$eK8Z^o-vV> zUMQtsm8aHVVcdK5_}LPvpZVVtBj3!}z{CnYF|;1i zO-nBIpC*MFH9?fJl8prcKSJbT)3@d-iXL`^nga-rVMvX+iF4-V8(xqF&w)2bPp=gm zrAGjP{Ul?%d;Md-+^us_Z`RqS!&sxkq{(B2X2UyO$TwGlln~3qZ`J*U&#-1TnPZvP z-oTV3q(7!swtRcJQN=v>>esEx-TO7B!dGgzd)iQ02c=M#T0W(61ffourw&F6^w!>& zoO*D(lpC!1A2xR-)$fozM}>fohAUTjds>EZ>#3Tr!Yl+uBhi{yJ(f-QA0-k!s}WLj zFMilXL!ymSHdozP-jdqQzv>m2hNPiVyq&1d{L&M!5mG&MR`#@<$`L)pLHoW5n?Aw8%?QyXZT9-PIFC_f1 zFEJQe`c$tUctBuyp&048m0IZQc>@UjmRudKHW3ex<~=!=7Q81AK%46Nb+kN?L^df3 z)5}?8o~2&Y`(*0a^gGY-+!}v~k|k*tRCy*ajnDSw)tcxi)_uBW`7`|h*wX|~4XBpM z3A&%%(D+fg_5A&BN2wf>vKuUAIpc#c?K52m$qGoGz*_KLwfx~H02LPrrc@~}Xei`W zG$aB7dPa56m7hvokU0djh@fJJ3NkJP`m1j*w$4g5&iZV>FPtTaohBZ`fsSX_up0j5 z{q%0a(bL5))j0GQVD(k@4tMv3^n=GS*);jL&U2@H!vIau+jU3Ofr!$eqXMK{b$`Sq zzk9UWBI$`emd)$^*>lW~U;n!M5oTZ#cCrf>j$1AIn(SG%x#e6op?=1fyC86L&ET36 z-Q1YpT_aii;21ip+o-qIK8df!G?q`B7i5%Yi!%g&V#Yzrc^atb`Lerg`kT5$G?d)( zj5MUrqNc^?WZepn$4Z}7%Mtsz#hX*WC8AnbzNn|eHeHShD@iW5*SJlbRWA&xKV5Vx zq2*G<`R2HIFA~9hY*yi#Bl$h@rl&ipe=>l#J#L}0Ik6H;miO>FzPXp7os5?W7yyEs zpev3vv4iTUfhqKK!H?;tThoaSj9=G@4*E?wqlsG=qEp4&au_xjqb3j;cwnV)*en~n z)WoZV`~Grcu=fPf_CP{N47Y0Ic~Fd0ts1QyA&5QUW8PK#nM0Es=&rD-%shidwz{Wy z1}Q8G5Gn6XYz#z3{F#@F{~QO{cfzv(9dKI)xY3<7pZm(AdWV7G1-xMAT&qToNzo0K z+{aTNpWRwdx7XeE9H=09F_@r%s2S#&^dKTv9Vcim>bt4+WHJ3*YNAMv8tkSF_*+DW z$J!TYBjwVq(z(Rh$@&WFF2+ME>U1Nw#RWipG;lSM{5nzT0!%+ovsZ%ckjpHfDC%|u z#8G;Jy&dpSpr=`fv+QQ_m!|+JDkh#HFe%$QO&k7@N<3y#?HAm)J z-BRs!jXMe^0}q_%^BC+a8ZmvVY3@&Rz%TFXx1Gs}8_VM3YTJ$DtA^{6I)%6;C zipSfz=YYy!>IvC5b)PEHM~qRPPM#%x9L-&OQhPfsmq94VIj-|OENUQcZs~Wf`)qV) z$G|WsVE6j~iO(V!$)KoA8Q|9*HfL7@Hr75@^(2^y@nW32iZ7{(>$^!~g=O*;j-E=$ zaOn^rJqa1jB+5#94HuFVFrC|n*1znG&&tSRf3)$Mt|}d<@mTJh`#HM&mhi^cx!*$; zE3cYlFDfZlzsHv?nFLmT3See?uyL9Er0s;nF=DYZuk>g2F1bxkCYSJHoHvR zPSP|-^4f6*9EIu|wDWdes&uxk>PBy!hg}7hEQZW5KhI@Qg>7yku2^?p(-P90!eCuj zusWf20ty)qske34S;6#^$X-j+X{*Dp_V(Pic*g~8M#rkPSnaq@_mQUX%djK0#qZzW zxV|(xKN>DzOJscVbyNY#ZRFBxSr648ak+pZWeaErPT2ml;K-V>S7-7x+g%_S!}WuPY+Wq!3b=`+=PUZvoR0}Vp^4YV4_II%jYzVk0pn~^fjD!SaEmHyWHC&g3i}_ISf>euzJ^>q7-7-^1YUc zlZ(wJVQ9xqlc>yyN_xM3?)rcqZ=M95i(jJym6X*XfdD=Q)e!=$INzU2KZ_V^4ZwLi zl+<@>Zw1qtN4ciFKgT_HP|&^UX_a?*&oFkn5KB8D_LkhPVoPTd(~GcouW4>G^|?PV z|C!Jgj0#Vk#aFuCOO%&81+;49O$e&0n!FU3dNx&_ckfAMNk)b}V%_RPAQ&ymbIY zkrla=MWxPO-<^h)K9p~G<=*_B07^j>Q{Jv0o(vkAQB>8;1QiT*Hq+ffNayru-B2Dx z0pNmUCVu+t@-Jlf0xVI1AXv4`{+a2|GhyA09wuvaA2X5D_yKKY>^ z4nK2H@ax1QuX?s~#G@9O;6#4u5KYJI?CUEbqfWIa6g#g)yM2LL3Fu?&L#~;a0jI5z ziW8|QDWy&%lfm-9CBFQf4Bz)?Bj39R&E_shZmOjSgPB#rbfFy>&UN0fP@t#N_k-=T z;m84nq1}V9kL$p>L7?UoFtr8adLcK9wYR}M8w z{`^UcrvOR-l3ghf=e9s+W-ugnTejliU7^G5V8`%#$QgBMcW2Y`oL39K5Z1fS3&M3_4(Jg0*;pS>DZ(6$0fLyS!h2&V({}1W>hq zgu}SUUsMI~EdFLRfb(L6>e6f#!YbexCpeB@f-xpA9QdInb`4 z!z_$9fXHzlv|0#60>R9HaL1Vp8RxA={ZE*OpaguMg*Nd5f@&RU0m9aSe}aSsEqki{ zZyx2!v;8jp1c)l?5{LgsmFH77{})-%OvkXurYEB7uOf6w5b*yT1p`qcwr{F{tq#vV zeiJ}4{&28vo&$g$sHJRa_#bsmI|)1A{x{6)XX`^y>zI_9p=}Ks8hAtpp~VGwbZ_`e ze?5EnH4}P&pehz|;AYk9wlWS5jymTqN$`!De^E(Nc;p$MF~U{~AS{Qm*KE=4goSQx z4KTdHG)4YUr?wjbIifzs1dDuf5Xj1aTPlmIBo9RyTm z4Jxev0rrgO+2{8Gko+G&6hBZ;-jLqj%X1UZ{Lo8^-SDjuRBb*g{IiPY^G|jgKci3V zTIWQ6vUJCAL`D2kq%D0PdcO79>+HuaB7c659TT5;a}&{x7L>Jcwp6T zAf5Qv`00dP#0JHiFvv#5BV@F`r`%mubkn`-}m zX4(CHG2joGPe1|l;0hjU&wyG4gx;#t$NoA+^@U>-(U2C{#=mR(>Cel77%E{Qkv^%K&A9S*r*ji z2zQy%RDz%?*tkQ-Ebe6b)VIiSxRMbs1{kz3SXk#!g_pHad{oqhNoP>HY3=$N*eSPf zXg=ah&tOBXCT06{_k>`#v~*>srj(3#(*EO@?JoEtP`{rKRQ373d%G@Mld0k0RfajI z+1}s(Xdnc#%57;o%k}68cb1A-2-AVPt4`wG4&njidB_>V^X@4E*K_W!9Ls(u17 z`83Q7V0!ki6F6xh>Ftr{x#a0NRE7NEwa3%lQ|zz1?)Lb%v+U{4@`L(g7?IN6}u zO3)7)`u309@njBjept0x{XQeM`XEX+Fe)kvOs~O_u|MmD!T35RQ{zoU?FYKOF(587 zh99o(d$MQO`5uhJescfKE5-67VAVFozI#4$AY^v7#Y#0pAu;{|{F@{2(*$2}v$?;< zzXjSZ;S>A&mi*4Rt^MN;>Os8>8!u4ZyGj6>K=Yq45(&VIe;u}c&}l<;P5QUJ0O2_d z6c?TO*XIMZXe*=P3NN?+7z6hIuOGZ5d;cFaAA-+95NBwpNR9Kffsoky!L8cnFH|lK zvgd(rC>RPWH$4IGkR+1hq1F!oo(KG#Ap!{31_DGsFtFX)6X|%0Gg>-ssv)zwi!99^#pFaQ?^e^`BeRAoN5sIv_&4(L;b1u>@h! zlKCI8?4P>v|NYB9;fDlm_Evu_0HJPD%st7cqVsD25$jTTVG-UQtLbfJo%anGTZ zR(oExe-7|}U&6mc{Xf4PH2{NuCKc4?bduB4)5otn{F_nYKc7T68?d*lUH%&D_wVSR z#$7?Jz#G(l&;JCC>Vtq#{-k~t5S5^*MtuV)h?8WwIQU<8RZeMlKqLy=p{#?h0A_x_ z%>HA{u8s;n32ZcCgc=0jLiU(j!=6ukbG!`Ky`G14?Uqnhb63~($8)2|U)}o9v|q~% z+jB{vy;Ohc1Oz&8=-DH={KL%ItK{UcbE`&kpg>e@j|uVuT6V06nX{z>d5>6 z9yiQnrP-Lnv=E(x5IKH~YR%Xevkky&O{bKYwdQ>HDB;vH6LP|BEQt^b5rk?+d}>O{ zF%VP=ft^}D(5t%uQ@knFqt$)=0RPvYXQ2cjB2cvYyCah9%;l{jz6+Yqvbq!OOF{w1 zIuDQ;Sk%`2)S^X0fh2M;dXEE2&qZU_K{Z&xAdnceR(kc-vg$ynp>YZz4IPK-iuaG) z-)*Af5wF97J>lOt!OU=wgKe{s>((9LNmX~SA1)FOtU5mdKZY{z&1O@FdP`=nnY`9@ z2AbYHI&lTzZRE#MX^$DVG#0nQ}SuejoWuWo`(IBqPrsal>RXe4AM>K zsy6Dl7SP~c;q?4ZXS(J$a`~O=X$DL%KkMxKKk5!k^tOO4J)^L za>4)&DBpYC&DYKRiOla)A&=KkYaVNuqo?;<6*xon1^|~m>|QX~tpNJpW&qtYUHepLFbB(WI- z!Rv0K_wYK#v<&E@b-_27W%YqKTlV`6Z&XeW;QG5jeFVZO!_J@E)Gq?_2x#W{&#_m{ zTAoW;aR$tmN(Md1v80Kl5PrTK=xsy$>1x)E{*ovj@H))00HqJib%v^ZL5cfGSp8ir zdEENG`Pfc^nFH$4+CB$Cj>y|~qx$}RXHV8H#FUXLD=5C1~1~G}9)u{(D zEJ~>B9}CC*C;ItvDH<$VLczTHjE-WBKV@y!R+1h-9RE5^wL5t1tr%1kQck z8Trl7168=l9^elD`!fBUFaD^#_4BZ!bobBtU~1^I&=E*&F3+W|IGw*V<=k=QFA%bx zjkdP7aJFM#ZX#GYWbx+K_t3eQ9nB63U=EP{-RFG3OoRYl)F$X5I%r^`2=U>^J;&72 zAnsNfgvzl5^k>UKOD2eOHa-kuu7;yZH<WzL|ZUn?EoSO}1nUMTys zinLW4(v{rXUXEUm)~RLlJ5;RMWge6{%9%Utp-sPZVl?U5ltY^y!;-gf*Fga9G|+5&K|%p|`m;4vHuvD~(oW%rf? z=ehme`Yppl!55Td`p@ku!>sk*2_`8_Gugm3xWxM&;I8u*+TxX28p^-|WYnfJ#G=I3 zq{hDB1-t>}O9}~}+LNfC6=xN9GW)f@3eHOwyShD$9ylmd3|}{#xTBT z?)+>e>OoshznZc!BaXPc88v*WJ~6xQ^FOUuw}Sa_yYUWsN?ervB`{bjo%c0}$Q$R( z)H!53I#eXUM#oLHF-S&~NCS;X>>~SO!in~Q637aQ6yUoN&cT4jHvsfUGRAP-0Yf?Lyx-CrldAjq02gSToU|6a-*GTH+u;fToHyW&d# z^kBF7QFim=5=0)U$Ltp|NskMKT#4Uov92#xFc*_flVXQu$n7duFiGCkG|PUcgSD&x zw!(3FF-V>m$CA%=3&{c)Fr)NmRZQnjEcD($)}7H+9S*~MXfvtB8o>C9wzgs}>Y7fT zrFnZrR@)UUP{znmm~uVEA~~qc*q2=MWPG@mT%E3P z!urBanv0cQPs(^g2Fz<3i-YL7R3`+h zrpI>E*?l(_{HUc9q&L2uaF7(Yif=S$u2C z2bEK%*g_h)=$+ib15I*dynK(-Gyoq6jxx83BF5NHFkXofAuI!_42X+-P_g#P`jU5J zK3R&@>wbTBj?ZdFtKR(8(YPuKu8NYdY=waG4f!CSG>Iy*G(}5Aj>r7i6esL9EjDgO zlZx6$%1!tbTQS#Gr>A%nW#$=<#n`KET$Fe@yX=#Q3Yy-%hj;o^1Y_?AqSrXtF%`K~eq0itYE(3LlgyE?*;$05Tdrus&E7ChOeqW(=}_6zOx3AsMcI~MYeEIbK=^@6>9E_sA|lP^_6 z9+Sy4|ILm$2=KD(I3}BE3an3~_+`>D#V2-bZ(e7sBnvgLdd;l3Kq*Kr)d*Ce_Pc!6 z0Lds1BOLcZ!6AC_=xfO0zNPT5NyvLl_jScGi|9F?+8Prb8s29kRaR5wSxB)3@Yb!&8GViIA5dZ)dnUe{neiDz&v7JnlK$wE1CKsD1A$#iiTNv8`&&g2TL3 ztJc3Lo3t%)t<3)GlYBS2;Su$?>#G9q`qtNS4^OeLU3 zi&W{+vbVvuXW=QanXnlI>i+ucyv@GL; z@=aah%_(Bdtfv)K)#km)Qc8&N+A(jY)0bO3ms6Z(kM$Y5`K+_9x(gI{OY_|kxAVQt zS`gKSOYWq-AAI3*(44LIGk@MwDSWtpUg5ZT^U{MoIfOcP&c&nCpEn^BBm}%$_=Js;h^@h#8GE6uK55LhpdG_JmW?hEM3tgPQ6cgW%?QpOrkbG z7iGAXP;DgtcoMBMJ;(3Wo$WGerui<{!IYAdOC1!X{_6(88>FRA7@@HT(F*DP)@8nw z2r@##VV0~&yBg~?S7wQ@39pr#p4&xv^hO&@JGWU-kcAloWKA*gXjB_&*NV-}Hk-V7 zB{k;ysKf`dcJEJwnCp<`>2~FpA9W4ZT#KvX-1=qvbCQs!?VJyDOJe!ElfnY4XSyjjQRoX}lg`9nX1x0K-4#_GJ@I zP$@0AM*fyC`-U0ka4}y)30L=D=Wkgi`J^c%%mi%{|`4 zh6HtKV=$Mj8aA3*hnNW>(-yl^&Mmv``1csfy8!7gcxTlK5=d4ouUzjOS(a$`P}k=p zE?WTkT}6AI^Shn>Oi4*rul>_x(KKxXdDT&Rgo@x5^Ax1?w6b0%T200Mib(33zrEt5 zfjCc^>v*Jf+d$%#={sg$E86p8w6HHYBSbT^Gljq9QC%kFV~7pNLzYD!e+%lP*DQSE za=If15m@$&nPn=x)pDk_%yhymQ+L0!-npj!V2|F|Ij32thhbeT#!_z_?ks^-LL7=H zk+T#5k}c(%vF2s*;T}UM+qewg$2O|40+x%J?LBwtYGQ#Txz$6v{pU=`_K*lcXx7IG zfnSCxhMKuz9LVc_F}9-%N>nh-9tV8c*t#+?66EZ`-Wga16SuyG9?zleoc+AyxZ7H# zuNfg2g86lbO?4UX{CnmR#(B00Gle8_W~ddR|Nj%_W8|i zWdw2q!-!GZGT`{_DS7Ao<~!k$qLPYP`xid+kDHm|K%PfwTsm072Y$Hi>_E$s4+QF~ zw`Y`%8?Dk~FXO8+o^Ib6Y!?ad;Ue3W5cJ}B+bklk9C$WYI_U5JGAO_p4mSW{UB9)x zPAO>1t%k+Z}x2H zH1XFu&w-@wURt@}Bs;_P^C2z>x`9f*3@;;WLwRs?oQk~YY=U*XP*DLR8c~w<$j?rb zyG{$y*qS#HEY+6JC84S(^`f}8x}UqY#@{2`4QD&0-1tzR_L~cg%aF4ENVWt7#`E7J zEPBf?T(%48W#I-`tA-UkG$4QWX91K@JYNPD%JGyMv|Sv5(p5{p&sCL{Puv!W681Y= zrg6{=4(G4$BRGha7%=QSiU+`ycH0`CB<2RHVfwHid6~x4xFaml8{mKGZ3NF$IlL2e zVOEqlD|p4`UG5IMS`EK|`!^aZ+PbVy{!Z81M2*K3;oV=73=|)>ikcKLbu$)C8?I@H z`GvKi3hv%X#e^gmLf#)?1ljcb29|ej#|KWqQJfzgJ*fl zyu4YF;T4=*35d}T?UVx1Axdud=k2hGwQ|JU+-mFIwgDe-M^J$*oEv}kH9y`2X(Yvi zwnY9Hqrdhjd%IPWIrBhT?E*Hi4pY4XZTT=Zt6E?lXJuza04Jkj#eocCBB02ECk&+|_k2U$4iD$AdI z{>;BW2X1K<=u8VAuY9g}k=zHNNqGTr?3-M(2SDl)m`4Y>o0Y|1Mp{kFhmj+fBkzVU zGy(J$TGQzw%O3qBgKP9uDiuZJlJv+9p0VRcMbftA8@ z2d1K0tK$Pz{nj6cj0JM^XGdd#gNTCWHOaGN`8CqR!yiCw0;9sU{p4u%fD1^|J@C4$ zYojtT%QEk-6q$n!M4zjNMhbyZtFn3~?lD20I}vm?a0KnNbhze7f)ExSO%j4fgBF#k zYAwA5COWFQ!VP(ZC%HZX4YEXf!bk@QR9J~@BEP4EDE#zwy)}|uQec;}YlqK^yYB@b zNYbNmRk7%SCmEH^PYx=VJ(8mTK8f}Pvo<@jc(Bu464Qdg^ta3xtT}bZ)%knNJ_`1O z)=i)~W}T7qlE!szyc~Fa67pH+U(Dfpc8v_LZn9kF2%MhQmT8-H_#`oIkNuO4tX2&; z9RU-$FXkXu(-`m+o&w2V)iCf&Uxsk=mAI7$Y|9W3T)&q+fVtB<4eLO3AAcFnp&4;e zUsv7=zS4Sj&K(-}22%-7u02hMvL`P{B0P;@7R&kJ6?SIta=(tW&S9*ka~sTvJElqs zC9R@I7<==-nJy710k_{Jvo5E;zCp z)&-%6pF7xwbjPTzX$N~@m33TSn2cLUtPzx1qp6Yy;x@i>i^@%`RjS@`)k|%B(MY|g zQq(%?URkj9g=XL~FxEkCHYLlbuZ%}K#_f7)YF-`5cE-=D;)hbS0+(NnPOyeoJ}vC8 zzv=e;1!n#B`ewm-xw^TuIp%fOmIU2MSbhl=vCuk0+E{G zY*wr`4pPC;>^E;eUv?@orc9mJx@LhSO2a>sv(n9_9 z?1@~EjR+S_%luN16*;n*Txm6HQRaZQHD*on))V-yG1hN`ugd>oTWY zODxQEE#49R%3MNavFzx^0$=95A8Aq5vD(5#HWrJBcI9tiM|F?J`MSo9dnHXE*q}wh zN$f&{o6Pe+@u90(Hpr&!4*SlP)6!DSh3)l+}#AbTt zev65SpgMLKdM~W!YL`@Y$L!ot&b61o4Yca6mm=E<{f}gkC3!naw8N{$!y=U+ z6zd!s-fzsgCu{sOJ!Z50N3V{)hQRut_LDS61;fJu&qU3SJF&+9ux&tHv7IhIoSw@) zLeQKgJjQaDIJ`sDLvtF@5q~{sDX}o1JdPS$y-9KpYyMq4=9C$qW;wTQ(6BUaO@&WbK+t?3wG)RB+tL zr=E`Yf)=v4k*rGdnz>PeT#^45Y%EPd8_+@)1s2H3Wp~RcmzK&Xql{2e#hlXN`0rW= z?VcCs?3BtKC@Cuybg5fh{&<~3PnSazdTl}CVzSv=l6fsO z_rCmtq?e2IeizPuxR7!rLs4I8YVu`Mn2OkhCQc;#)684s^%&bK5(WO8E4N=shCeCT zBKuqI8V|Xyn^vW$_8HN+E8T_--N(_Ny|fQ|U{45O>S3tp3`7*IrEw%yFttn$VSeu4;KIB_ky8w5=M)jB7bk5_ z9dXy*;~`$64tk=l`DwnftPfG#Z|!0Hr0eOQ4YVU&k?&r2u%M)a>9f6tmgDfq@vRr8 z8!x!bA~b)2B3hHUt?pFOb!Z{=fORc5K;*!;t)~`}CsLTqce6t7mSlq^ethKIV{wNg zC_SSYNt}gq*KK=tminaw+xJiyrZ4D<>88dJ|4Tqx*600hkfqDukj~~w%%@oyu#b{j zdvDgkRlhCarml^$duGK<;Gt~e?atRZER`3MIezivx6V3awQ9YaPtDk=G}?9Kr9bV7 z;Z5g#`cg7%3tFx1@_XMz0eaLjAr9Yjw%Bv9fUWX{C{VX)S3XM_G6Uw0_ z!d&nP7|K$3m3P17D+nMcTw~uq)O~}n+rEyAz0jal^ME+pT&ccbQ*71)R(i<2nWR9v z?4=8m_(~PCLa%b1T&`9Tp2dT%wRcj0{KyTb$RaHLrtDd+2kW?Ap%lls#0UPa+G1G| zQ=d%|*i&;=5Ohe9BYQ?;_R}%2+WoD`#cGjT;=v=>JI3%m&hyK)vfH(-oQdDi8$#yc z4tMx%xND^mY>iOCDa-U?Jt{hEDFP|=Ei2{3@#PU$vCOb)or7w`_u25y3V_VCN8HRY z&z43cW|qmL%S>4uH!`ZWGN@;hWmYer96$_HA4Z(}%ydC{Be5dB zATBe9SadzHbJHJgt0!oYD9nn*(LOJB>t2?z2}-x`=v$T}d)qWEHlr0TGmE5sG%!(6 zn!MU8Y8PYU69$z%cj2eE&hlzTtfCuEa)0@B)7(MhXe!HB<-RNJA>ATrhfIqrnAVt1 zkEf5+ART9hWJ|1v(NZK@gSC58FE1uJD3pXy;tB-P4SAQ#mQUbcLyuA1S7tx*E1aq+{saxe>n;KPr~V@GD_9NUMFl(TB0v@n!5X zJmx3^J-qh#vDD)$m(Yi6ul zY@t{3t((9?G&uCSh+;HU-YmJig*R^Zer`$gJ}O^^_&!ZyuB=<@8!;Q*-&?*Gm)k9`(g3fJr7E!N#Sdhyk88wGV~aK&boBgd z6O(eIkHWDQ#D{lhiGuxhay3^Ii_m>nni)w71Zjt2%*PY?IQI7r5e^a+fJGWBa#^?5 zebQvT#Trone?i)M*fU7ia(ivlh-JeiUQ6kbo0Pz~bL}(#2W>$P;^rMkJ2x9$um!_Wz2^Jg#aW6+Pe;MFxj!_wX?igCmlf^jqUXlEr#y{b zF_$oh&p9^>H5QX1j?=icp;WD;a;AjZlW{ZMAAa>g!o;)r#?PN+iDZW7K6>^7Tq%vr z%c8Ddf+CH5p|h+vf+g0oD7Jdv0wrVi1$l=wcs;I3O~@mv~N?j`D@4bzQXcTP^#ji)EP&A#}VtjS4=rU;vfrQvmp zaX1a7eqYQSewOOSRN60x2!SHHV?ApcXoy{;pcddN3D{L*v{rw0Cmoh zp*YQ7JY>V}sKan_@7zoMYjw7Nq@S6b^~?q1v=?Yw+)!$7khmx&{v zuYYyB9|9Cb)<*M37^oR7x0l~5vFg{WO=TM&D~JW-FJDg zx;Fii`kd4+iL~WD8#n{{kS*gG+$WZwij$q9ZAG+9N0NbDaztc((#d~?&@y&mXfy3Y`5+xYSbsA zt8dG)W4e;i9kaX#u-(`WPxA7h%7jC@e5vaXH2HeGtAkJ+2c|_d%A+!WRwZw6lew!h zME~nRBr}mN`z_<{VX%1-ueN&PqfR~(qI?)K*Ew7>7R9^1;^;Vik0k_o#+Ewo2nu6iZyO@7 zE>^R)`A*RMV8H0<^V3J}>fU3~8myQV1Icq`Hu%OWn8|;2X0D=z!88E9Jco zzs8}Hf#g!neNs7+CtXP(<_7l~D??i@QFI?$OyytXc(mF^-82t%WgjU;kZbQzpTNbg z>QflJ$JW?u43kcqmAKiega%v$8%|5oE*yW}4Qb_MaFl@RoT4M|9Qnp%lvuVaqa zYRDS*v30&No$wA=S*`NWUz)y9q+pB-b~q`*;jQ%k; zgyYQtzhq9;##?C|FT$}*Cc5%on>68&*#CHJ-qVoH!;-_kogJYl+>c_(#&79jHGA1QZOFE^(@HqE-s z=ZYY5;tups4PX8NEW8Xd7BIz>Y>H&m4gE1w`J+vlh+Y5Ex3VCJiJO^4K z}1AM7Ia6`%gRUl7*G0CZ^ z^TS4A88oK7k^Fv)LHf7@^$-R<49Ev%<5|VZjiz3WW=e^CsRXLgjPwyNu-1<94d=Df zuZ``wm%Uz;gPD~hj?vF-L9P+{hfLu2IX~fPSvl_`+a$=JlDla$liTD*(u=*T95k>v z)S-9H$%Na2ER$!GEQS*JKf+sE&vZ6BdxK3tl=(jMdM+sxe2Lupa3+Jw0jD_Gb{^uYS%btD+0zS?bi{jk!@NCYri#iL zNBIRwSdulN>4H5<(Q0%ay4kiiTIvZPz4z6wfpV567k87*C!K{dPh{5l|C zCPNgz#|Ye=Tuq={K0Ca`8EN*S2W(OVNwjT2Z$1}_RIRK2tE`m1hX2Fac}6w0cI_G! z(Wrn1wn~>RP3a)g1OyAxi-q1qM7s1|6a))`3IwEE00HSjAfcn6R24{Q0WpLKA@qb2 zAm>?e@3(#5d&U_j{~Xe?vetZNx$kQtpj}c1<#&~%W3FRf$~d`x9>PO}bA#qZNEpp% zW_=y>>53@AainUsdO%Bz*k|=qnQnOMDK`1-c`sPo5W3boa!InwvZyrX`h62fhWl-) zl;xeA=UECS>el^en2SFe?-a_i7lp8Nb?b`I7n7$K>h;uhJ6~MQ4L416pc)`@7zy5V z=&&?kJRaK@z-oQv#2G&L)kxLjsVdpvY&xre3C|{%c8J(HXDK7656C*v0V}-h<&pE< z%PV8uXK!y%ta~cqnD?YR84Jhfq2y4$)Y)bQ#xdl7?xvr#h)gpX};U#<0R~TOnNEE9;4OY=W&8dMol}opz#LTWQ^Nu15Uss1(Sb26+spU#w~B-vnanuFrVpgU~WSAY`}q2KT86 zLZudipw$1^D}4tAgejpQCKVtWPEO{cOfTG*huy$zGAggF4*I%40a{5aFF>@Fus=}b zO!6~}o!J^5R8iL0wCM%g8P{rFs5u?2GCR09W(|)B866$JaYIX8Oz-gfhMdtiPDCD& zc)ypt19@J_->R=;a-;+g!eT_ezRb01_3(g&vXPS0I{ez6wF$+f(vavCr^w!#Eg-4p z5Go=4!pPadW~-6NgqUoPmxXG(CV25Cm#<(u;zQ2)&Ot%?`I>WYYD$Pyg$%TEu=CO} zn`@qc4GwI6*6{8-UG(cr_L)L+w*HZxyP92-HBLlslk6t8Og2CbVb-gJkKfHC5ii>T z`qc?BT?*4mqAw5X_kS3AN*1dW8F`NI$uS4H1ZuIdCvtH%@wo2wQYLC zTEm0&QTA-3iEZ_BPSS4tssb~c*y?BVh-j7a?!m0`Y>hXWO?6&1NFyZ*IEFp%qu!X$ z5aDm?R(e%4n(d`Flo3r@Jy-CG^^>gP+(L601*!ixE72}J!B?&EYKB)2yh+?;?+^oX zfC+tA?*e;1`**dk8_{PpQoLWFOs}eNOq2X1wkQCb?=-_XG#@cEZ(2Lxs`Lw59nA3} zUGiND(7@I$=V;Y&$eHbzYpIF(3{SJ3S-k41!`F8I4wuF`JO7+`pF_oS=uhei zg(pYTEMq0BVsX$%EE284r6kV;b$%~$e7Wo^TRzL=p5 z{uj~kzuRyB)=45p?6wE(6q1tQ62K-CwZU)@A*8>w4S`TG5l`Y`MIW8gh@5FmHM|Go z+M-3$7rIP~arB?iW|EBDJ|zfWGtBG;+2VR4U!;xK%H zz~zSRUWN9VS?K(Sq_poQwi+@!0Vb=y-2`wnKuj-PJD_R)=NDGN{TpVlm!*5FWo?_G zO1C0+YDPUXRw9BI?2T>2x4GKg{hdZ{xcamLBe!gM>B$78*AlTCEPjpKWG?|ft66SB zLvG$csd$HE>VQ9hRi#!7WsPErQou#rFZMdZd8LuGV3#n04y zR>omK@l(6-wf)Vr&)C|)8VoD5u*@ZyXw+8e>b|G4I{r^b25+9-*z-iwzhu?!9x+kf zvw?!)LK32fQ#PM9bT-aLN}dF#fi{?+tm(eGTL!YXjKLYUD~$_)|L@IrKV-YN~9ZowOPD z3e$}OW`Xf#1Tq;)2%H4vT3rTAFe|Kz{(CsYapc#o*z4L+Gd?X9?YcS3r`^FyrC{H52Cr z>dXGvaBQzec|a{ji{b>8*>}(BhyAO+yu((zN@`40+PG0N0Uwh3SzEob8$0ZrgJXta zXB+oKJ5vi1(N3#05w$=;>Dna z>POR_lpZbax7_)mh;cn3NW6vG|NgogrQW*BM)Ia`ju+&)hQz+Bln%9dZvrI!0F!fj z7Xtl01j$zhi!cu`mCJw@3MbzIVvRMe0GDO_>~K__kbauteYjXXaJ_Yg!55(@N~J1zw7ZvF{*!QA5T2ywAM;L%s4m&PvMRQ;ODkSkRVpag-L z)Ikm?*}O8m!-3Gise;$HTb8g#V7*xrkfMdh)}RHy!M?vffu)=Emj#H@*PhlgK1y>O znCKTIn>B3?HtdLn0e@AM?LH<@Au{34Px!>pfP=$11v(B07%MLilo*ZFe>$bMRZE4V5Nid?Jl4VEnpr6{lr= zk+0pv#C9V!%79P5fucfcSOdU8{AH=avb9T^JtPBz*J)@98Z|yal%2u|CvmC_^LcpvG!&2Db}~pIx>S= z1}-3KP@2QFsH#sL9n2mi2z>7LXH>uq@2agOShF-~&QzWTKMmHQO_*nIO^K#LIYPr_ z0fvcF#&#U^a8n_n7%+YDNiZ`Z5WUc({s^jlCLxZ%t;pIu#dwHH8#iY~%-2Em{Q%tf zsqtOAqr5RY7S{`AnVM@m1)k9djJA`3&r(7j2;Lui2H^!eS}K_APdQC3L6WGiLkD+V zV6rcSK6Y?1CHt{R2%lh|nF%&hIrZlo7`(eXTH^uP{7{gaf7c`ad@?wD+bO`Jh^YQC z9=hNm1vQ8I*=RaOA*K$5w~hmN{Z$)BS*P_BX`ALLbuBccOuT(`Rd` zLs9SoQ&b*sVU>!Qzq=|3dOJZQO_5FXs$I?Y&Sl~rnv!BMG1zv!)~vsO5eTJ~m_N+%vv=^ow}#v>yX!&l z-}C#w-|vNbMF8>300lsHh>XZx_SSzxDF8So1;ShWjgA3M;6Fd?iHQ@42pUFu*{#IF zx)$cq3sP~sKA+>699Y9ZmD}tZ01|*?P+6R@oAB#5AiS^YJh2)P;!&YxrqS z5%9KhyPyl>c}3Epwcet&vldcq0zGqL{=tfgS~}6wmX783wr-1o8Z5vO%QLZlIrz8KmMY6A z#GxyO$dM6rv*mkbr8=qhV;B1!75t20?UrZ#Qv2<0KWb!XeFelp`TZ)d1o31)tT>Nm z!&=SKlzQevAs+Hh3K~Kt06l^QKx&a2)*5>>G z_H#l0=H|OM39bp6XbAN6FF*dcLj8?B57>dJyA$Zk4DPi$K{l$wG2lh(QTGyvI}F)@ z8Ly}mK%e|r=+FwBw<*w^g8=yDAsfKP<~+3o|E#{fMbxkZTWY<|&^7_IHsf0gh}VC8 zFH?IowxmO;W99k zb*0k4@}-fvHST}B8>aTSWKNF401_IJ{~=sgcvQEpD&XP-~D)6MVupbdNwG z&cELq;;P7CPXHeiIQt-tKPzfbDzchmqC6pn_dU+PoaNA{Lo0O~d@}YWFBy7H29k*> zYRP-HumAxS=U_Kmp*x_ExC258gH3BNRtNP>G~c=Nik|CbCBs8Eg->`-f*W6<}JDy>tJg_R-g} z!F9|}G&Oan;)nh!fBv;5YOj{E_a@YZGpxn-fZrrCl6%(*Z8e#Dq~jaXYO>s<-8a0f zEMQ+V6}$2cSvtQTuZhpxWnVGXwz0|LHBe@@`PBBmnR@=?Z}|(za};s*zQXI#OJ!== z$ne57(zvZOwAj>_YqgfTHoE2}?e*1B+rns#WP*^#hMQvD*SJ1NlF2gxqRdgB{D2nv zxu=!qKZ}t64qF0mmkWympNBADbV){|W(McOaE9Sk52mKMg{{kw9WZv%FYI>2+ESYz z*eh@r5o8=r9!0GcCuX_L{XVwwf;O1%_ax7Hja!2cHW#cgO}l_}n+!vfS$Sxx*yY&p z$gj&N3Z*b`NF}zUVEZ^YjS-`p6cRg%3=yXP2OQ+(TU3qvu-rBU8s1K~dC&a|DCEZL z(tab22KSKccN2XxiD0J&+n#={D-*@HM8 zbrzzkfOCc!b_Wpd6?(A1t*ooOz1k1vJ5PYxEg3?bq;7RHz-Q25_4}(8t_*E+Gqtz; zrYhwAMOFAa@H73tfy(}qoAbXB(af144a>iv4XT094f-3P*FO+!jDP(%tW$YDVB!V0 zQy4=N`+R=}A_07dJ*z7DLw_f@AP`og6Xfkd#vo7p=g${u22QN+)=W_?ze#9}-|}C6 zCje2EI{<_o05Jf2^*`u9PjdOXuh=`fx)y=#PYGPsu6%=x^c)kA!ihs)=Kp`WF~fg> zV*SS7Q>+yV0Mb}O899_)tPcUc0yNlC2L4+A*}YNGS(5sn4*>{Gv(H~xIQv53i$Nd< zIb$b!1%lO_0c7zv)GOpl~{yY7Pz^h`;o|JyibSIY|NSzz=i7RRu7YOdx@GQs?dX z7YLkl9ccuRVur!n_0@TI*F5d(4kHQ!Hc5L1gvKg1R9}DRwRm|uByZx^_5a|;i4>MH z%fp4@AzlN_1tQ(`{mI*r{m+&kaNibbyqVqX_}ec2n;4|4^cGy?)4uwh`07;pMt{izfM{Xdy||Mo+m&<3)_jDKc>O#dHj!hd_@ z&HEuE5SSFY4};#r%im!OT(l?!cZSCwl;p0B5G(E9SYVj?nEh^G8Um@Bz!(31{g*%G zy*FqlrR{`()QnLv{e><(^5FtElZ>aK#`bC@ux(8f2Z4P8vJ&}~Ku;x8^L@ySLCDYW zJPnGNQvjmq0220${!P!wG#TQ>&3#J(P?D}dtsj~?vIKB0K@}W~OW<&i>z+n9vvfUn3s_KgJ}(0^K59*ae6mHGwE2h0kJr)fyWz=eJ`e z_kdJuanqz9XkJV$F{N8gYvz>R+>@)vO=HHwZg8F zW9AY-rXU7dCpsKMz^?lu#{^=tfw1NIo0FEuNbt)gMr>F(W=El1=D1fK0kI7H39on| z)d!^Q&baHq%GVl(3&KB@FNF#==uriCLXjW?CnZrha%vIkWiSL?o-xo!3JT?&J|N`T zRE=Bn1IrSf^|U@6{VgZxYo}g`mv!Iwr*UgY9p= zPpAPuU8WtPJ50Zjy0OH-Osu|Qmoy4$HnpiCP0j_%~tLz;Nv6#0JPVelC|L8J8W(DFop+O!JcwZ zsy4SCH11t@M^|kq(x)fRIJ5%nmQM@mw^@DgJjA7(7TlE`*g&!&;1(hRInHuTL$mr| zO0gY|xwaiAs=oAS85_BcB>)^~ntulT+Gb(SW}y!#B;NlDVC*(`Ewx>Kk$NVz2HUhl z>7iaLRMmXF@p&l%hz3;fH8Jt(P^Vk>aEX;01*(nXi?UN8wV{gJqLe>ztb2!>a$J2z z$LMtUEk0OwHiVkl08)w9nd0GY#|LwLovRrN>*dF0^*1*|vU4C~h=mRej_CpwFd0I+ zJ{O?92CJ2V6Q&i!CYzw_0D?9VAPr0xeJh7v#S4sMd1VqrXs!$)BsMm&O|~G($gTvC zBIr@=<`#o0{yM-@%ts0{pq=38>Vgs}`|BuZ8dt6(1qF((Q@d)QOJV@DonLK)eqCKa zzqmbW2g?=y900t|XEi_6)tnNncJGr1?dQE-SY7^(;do24#+>k)<0CDfHwIEvX6w$H z?Q1%x-tuWo<5jnxo0YqTc1A?;}6>s$weljW0ZnU6jjSyUk&^oZhXqazK*Yn8|SB`MySUFE3QLcaB*KGxbW zA+}<1+#t(S?SugU*tk3~eSF%y0OR>f21&I3EX(W9>up1Y#zlDQd(l+LGQLkug!Hnn z&eTbtVl7vMZU|?DYiOWoXr}?-KWriTy%a*_NH6UHW{~X%Ym&(#>_7yVtY)zBW6nFM z9EbqTvb4s9yj0tf`WcN`R3tQwfw2Xsa^Joc;F%$wImCM8Mp6LpG2MRw5dAB-7{c>@ z|4cjIwPAah>HS&gCXjg@KQtEtb(f*h`80_+&GpyRRFaXDblBvEJJVu?eX?j!>(ft3$(%$SdHl3ZLt$n^0dUj*srCOQ!g#icDL7Ayb0;JN6#4mLZ12 z;cjKp*hnA~kGO0jQ9@0_n=sTyvZ`Pe3Ht#~02#{3p9Ey?GvAxr%RLLloIzDB~a zv87^z`$H343$$+f8>DcY&Ie!Rao9BG(EKl0zy>H>DQI|yH|I*Bc!z%@K2)18X{>|< z5rP(iI#h3uAsm-$kPRhJ80HQ+bdu9fS>kHJmTOh`wsnrE6Y7#QKo5sxh3~exz9qp z9iQI*M;qpESSx7#fLI3m{bX~=Cn<)w$p}rp0DZ*DWVxTnl7PjG;?e?*?U{Xh3sGH` zt~it@$Ty<3?!e$wF7J%PmRfEeGZGllhc`&NVp6FDrG)U=+0Y1ZVl12vA2du{DI`JX z13St{#YvrT%81J)SQ!ILnI1y=qYbGg;6U>!^~({5NwRU5HC#K2_kW%6DBJ7K2k4?^ zTg5x|Y2!-kr6i_FuB@C1aFH;O{)}%~?RlUqN{5h*Tab5c1MJP6;aUqH?y@~gT&@J{ zG3A1KAYgLC*lBZ#eoQ|+3G9}Mhzsm$A6CEN5>nQc&m8bC)U%m2O+^jlmDXV_Kv7-C z=(D7vL@F2;1nOUupC%aK2w{$j_nQ5tien$lvkazm)O3`V#st_z?~Vi+-}q-Ie z_H_w#*O!s9S-PD>qXWT&{|4myiQer5?Wo~U@7j-(2+fQ4iSSQ!qq@;}xjsRnINP-{ z|2#ASM7W8bbz2k@0LHxc>Gqui%p)_y34}+d>}(aCIARB!Ja^_ipMk;ug;yK#XAM8k zFLz0M^$yN0P%8{^lyhWs-$~kRr+Arrs|U`h0E3#_h`-N&iG2QLB)ZHXm~DDUlxbv5 z;Z>19^I#eA1@hnOG0Gl|#XSb=l`PEY<@Fp|U5+>ar9}>nNxy1| zU+AqRo4C#t3AT#{CK~irn7Q4H(n&6h|o=b(nD1IH6ka=#7Qm|ur`4W+h z_ww_oAPfh$uah=n78x-CTh;K!kA;_WKbhy&dw8tg*P`%jDlVb!kV>rhw+CL=X5?Nv z%1bb39(I?4eWa3SUxsHaPM7z_Q_gh?D+pz(<~0925$t&zSLLr=($_KD^cz!v-qddSLSP&OTZYDTBL2tf(v z`<>4yi(!!lswplc>jAyn4KP0H%-6K@4!f@lY<`XF+{3>ZC6@q?a*aF5!K`2I&WST= zg5|N!{!qN4cnHzaqv-b%pR)9se0Rc>SqYuRYlgBS$_&%PFRKxcs{=jiodjC$VGpa` znSf)TO%U6_QxIhPs2IL=DZLNDV1M6U#QEx%C7W}Aruay}FtTo4a z_roDMqsRT2aBXh(@AEbLt8{`E0aH;Dnt80OjPJV&VkO=YY%c4e$v@X%R)vs{8q9l` z8ZSVZBjQS{1#EY9-sq@qzzEO4^Etrca(!9G{B-)j$^1||>IKD**U)Mh6C}(q&+VqF zeQFtpRJxnrvaZg(ZEyY7w;|eXXUt; zXZlF<59fhRECklb78*g61;O0HbGNpFj1?LlK8Yhh%d8^Dz? z%(jaP`ysp-2&5ed$oX$z_7M^+IDsiOgR#9azOX%hkL0rNfCi<|C#yHQ1Z7L;%8s^ejyZzdHNR|X%xjRcm&xa_YOxhBa8lZ` zcRoYihO*Up_|UozE_Y8Hns3N?8dsPV+L?{ydg_*wHE>^nUfq5-5e1 za|!GupR|=lNPD*8zdLbwCq=ypKCYL0_ZjmRo9bM-he}-#OjptJBi57kDc7`r zV0=%|lM=)_Kfnot5ZNZs7PEnNrd!qc6~uR z1$s`3LTO8AkKvuV%X&oRXTI-n6gfNTcDFCqiG2ZfK*8L9@-ywqP^lVsP(}M5?D@%D zwg+`;_gbOB90SOxCp`KW`Muea(Yug%#f*nP<{|cmbP4auuLCADs?}(&XBd78x+d|Y zXjFf76L#J-wHagGQaRi$x>!8`57KXz;!{EM(Qghs?*+2pQ*v^qL7X_p9J1#=^_8uK z0AXH*b0cK_G@>RNKl2VX*}{ARZKI2x4z;ari+;japHXtbft8buwxfmNx)0j0mgQhr zu#Cbslv;z$zCBoQSNulCA58vg(BOCBF6@Z#IVymLR&qt2GmCQm?i48;cJ_TgzE<~F zarm4w>BcbMMh08Z!EdZZlBt>fkc?rUt)a@yTahbDcLap4)PBb|qT!`(dcXV!<|Cuq z_agU1;&N~HCYfBQo-rI^rBtg1Boy)7Fc6dN5XBug>vA*Ix-S4^!ay=DZwwMxsT&2C zFg4I<4R!}=XE8M^K~Qd})w&|@3MTE0mKjvBMeT+1RwaB0>V@$6D zVJGpU`MNTNe2bQkp4a))2NFDTXk*0myZyQkejdHB`{vA9EWb`hl%?Vs#E%I z7*lCHT!24wHYkl5)=?tv;HPQ_m8eeFl&N(eQlI7A#F}iO(J&y@9l~`fv|n*B#XY&P}omrZ!iMw{O6bX**7EXa1}jID)ccm zG4hZtc7{aE4)y%Ka3MY#%oFt^vy4IO`1g{R=0Ek`iDxzY1Y^O9iNz&|n;30)@_qb$MoZF|FS}D0Qc`J6a1}R}x_IH0B^lHp6JK zj%D%a$!q4(F#>=;+Xei1zrEt(W%WM2=O@8jtP6BdjtXxT@Cb(cX#)v_Oq_0yOo{-{V0!YUvZNQ7JFHaAXIUr}#P4canGo31(nNt1sM!&Aq=S zTIbnO&HNC7LP*dr0rKb-!B*x1IHF74$|LK*)j%}Os?`a`sZQVM%V{(U6nLBAU>DtL zW8oRdJ8#;xdHY~GAr!Q0zRh^nQCPQ}thCkaj%`|#0!(UOmn& z_5R+_MnIB#;=b&}nBDZY$aQw$h&ZlboJJEiPV?A-3K9f_t=XC~+PSr2nY;keY)%e- zPW6oK(*S>+yT%0vLaCmRv?KO*Cy{u*HpM@8wdZ`Nf~RrQH|7yyz^|E z&!1McV7IfJnwocQj~DrbLkh(UOf*oX)qiTX9+a-XyBQMEm>Du#cwr$Y$|IZoIbOEY z_fJ|C|ER0vK1Xa3bK6Q-yh)-OxNMNm4g~*?a548d7#@o08SGC|M{|wM6c;ht_YJs+ zqlkofa`OO#hyhF{vRzWfj9VU} zR#s*|$TU*;VJRGR*31J+r5|Uu>Am7vB=HQ90?uYi#t$K@?2(8E30L}LyM&1@>c{CI z1so1jbI<5EkRO$wz&_k4(rkqY>EKeH#u|YJ_%ehQ-TDD=xbN?^LU8AojoB+(8UX)n z4Y7EZGeA{z(qGa&QMuIa;4wKEJU}-}TJxQsiQ7Ks@8KrzZhPoFERUUH(fK9Sajs(r z$XqPt9cn%A%JHF{4|4QsQ$143Z$>QOKr`qiCt1cxRqWhj#fBM!o-5-q=Ge}KOmM}@ zxuJexGn|KWXC^igq31fo@(o`8(#u#(@NLZAQGlssH%R>1vqt(gaN3~_>O3A~yYv*W z)k3E{S}lk7!KQ^5u?U&*u)H@8p|^Z6&wX7RX{#x~ViGSitduBZdmz~GgLcFkYh}Q~ zggRS?K;%~Z0)1I?Z2&Q$Iw0HhR);;*a5ek%>DQI@evR#Z zP+(U8{nYZorp1E)s3ElCw8pxM*8+bj>47oX~m}C%jIP(2eh9*?*KYzS|_+3 z8a=@imQ9{9`lmHpt0gs&+(O+I>Oggt;oVND6}3()y}PmJA$Pf+SfiIr1h3O52XDOJ zq4)4?>jS`%1|2K*3SXfdxdl>7c=c7rrwAnR6_zEBVN6`9xnX&L5;$4%(H6OOW#VNC2-; zwZC@N8sD~cbWFHb&>N7?1Mi9e0GU^oLP7Yln9v$+&RJu${S&yun>gj)-&n|t@LWQe_u){cWo!%DP@MUGryN6%S3K&%RlW*D7 zNQF3PZ;r?^`BhzI>-srpeBjyWGAeUn?VF9Ekab|^se&^OXb1o49maArDp9tMUf!`< zalFeb@vLXuestxWeBZe$>#(Iy{j}HHRZ>LF*5Kf*p2nV~ceK-qJ>)9MriGmn0EsdJ zOF+01`JBRW9(~&@XlJSm>W^$_)cVTxsedTkw7cT*0B+uNXwAH4EfA41vCDVIS>yI< zP=nD_&@BrxZ3?hRS96JIy)DOT#n9NOK8 z7Wm3CGQo~Ni8MTJeOZ~fsqR_h)3%Nzke-*x3%Ti^Sxso5t{ut3sO7wQ9ie|NiCFvwAtBYplk=A1mrqVu&hKqTG- zy>~jIFM?V{JOtnAUD)b<|0WiKeF7D7VB}h0e;E>3MHMsH*Svr_9I#Qj5=@Wj7ct+0 z#}_(UzF~1fVbOx4)L&!NWmhwFjQ>&iP!Y+XypX1nNdVC69>QMfAWaTkS8S$-j=4l4 zDOMovOE|~SsO+zu? z`0v?7+Z8&1q)onkAF+m8z3XhC_I~VFi=6FITRVQM=(l7_Ui&%%rB))+{Mo54uQ*<_;`xp52>bqza zEmybe&+XC)gtLZIXfqB|<(woRR+e?bEGK)SdLqGRBM10sj= zcA7iBd($Upz@+U_aF$CVTd#U2727c&^*DGgYmq)C^J zR(aU5K52i~A!u{j%~*z3#xtO2FVoc%0S=<1+j9lnVS3uAkj4*fcm3%#0;%DHxF(Nk z#9W8F8~+-3XF)2=84K(OuIE!xWQB__+2V%FEH2kLzH(><3|WhwrkU3-{RfF!Q(+I? z9a)J^cKH}+uvZtEtz9?Z!TW*FKe9mFp0&{_a~Uib(hU`G8iqRa8R6EcR%_etv;_zX z=eO^ZwHH?9SAB2zY;cx}Pd6kxo#_}P{?ww{sQ$pRF;CMd=Ss^=3PtIOv#}eoYNagF zacZ8=B*bYa*7OPnEbXY?Hkhy+61dySO&NUz=IHiLkzYyWCa5^!^Vc*Ub{_xcXr@#oQNC2&LD0oF2}^SaMY;a1^a zT@#=6_{d8I7(aAT=&bSA9=)MKtJ1JKsz;>})z|8xTeIr2_4w+n5*_9<*t7RpyzhNc zd|jQhPkD%ZKAi;Pp0@6d(0?ccx>SzS5z(#plv(`igHrI%h<3eko7Q>{W!^lcLy1=| z(K3M&rlqLNzOp$0$i^ZDBFKAo?bXVy!U5^5C?LOO>2m!g=1>~2*0?#u{9q(&(}zALjddHUwDH<) zS!aaiu@;y?nLBML)F)F0WEX-Z=NtJ0tFd-%9=AJ)(r)j6UjLM4= zqDxweR8A|x$7bh?Gkt@{$`_saNTU(f^GQXgem_2eHrF4)Pg|mQ-ic zJz(>L3)N^b3~id%HPLn+ljPA&71kfZsGn6BFcTQH_2kd-eHU&^JZ{IRsL8$_r7J3Z z1rjSGZe>}e)#wHoJOTcl*J+|!`#|8DCZCyghcL3Qj*3SSUHPxtoxBX&RkNqgA`;zh zX_W08epv)a`_KsR`$phC+!%tyE0pKM)1)Fk9O{h|$s>3ycRsFIrv}ZF&|RYjx?hq{PPKINc4=yM z&y70l`dY)>Ijy27+L9aP!OGf~7qH4JVzZZH$SntJK^4nV@(sI95>iS%cXrojYQSLg zjI~cMb>`s^zppiVqeoa^WCH9>+lR|$`T=#No=3+#H~zvP%W?03CVnAIL+IRB-~$I9 zpx_zD0rEW^%>4S{r=E!DE2h4zRhb%I>%!5)ptyC?L(=qtWrnm1wfM})x!Y)S~B*r_V_eOKJueFtt zrS4syg`~R6yfd+fS8Cmzw^5S*1BS23zLIMh9+47j$0xY=8#F{yHA|3u?Q`m$U+Yxn zyg{1AmQ5N0EmMcO$i!VygZ3W+@w=F!L=L4b;&R!-e07xO!RD5)nS4Pk;Z3*}uL|LF z0l=`*SA5cjRlD~OpU%oSkG0$rlcs(S*0AW_B;GEEliA1qq$#~>ooiC}IkYmAcRa6w zdoYTeYx1c|XUH>|Xq;d&L9}c|9Sk=dz?L==JG469iv}UVTpNQYNW|g z*f8|U^{S`(Sw`neOK|?~<`0Wlu(1y&T?6ag1GT)X-F|8b+1qL?>I3%Dd)yJ)oya>X zSdyQD*!dw0zvk%mwaHw34uxIC%%_m%{}mOt*Tk82z83egbW{`F;N=$AlV|Go(1GYp zwJ32mUiXt#U(c58R-Fq9>?3V?Ypo@gAS~4aQ4ZrZA?o?#A>%!3-q9b>(zj{--(<7S ztWd1`fEAxGwSDh&)4?99ssxk%X|UanDK{?02hOY8ESmKLg=<8?`SpVC44k*0&p7hM z|7Dj#NKL?X9{&u6YX&+UPVU85_U%Mc=S9v|uGSx;jk>?ES&hGKF84mI#_6TUsihAo z%*Ys~0Ct}wtEUI;9$u-xbCTsx&8Q!Zi}@)J-dGrXsB{*V+-^A}D%E=zMEMr4DHN}l zu`dsl?}UwunYR0P$W?aO1cq&YmX*w_sl{#UIpz=?xz_C5mtsBXJ`LMHga6PQYhPKH z=zN+aksWFBB79)mTuLC5mMPhU1(UFyU!Q$iuh(g)^5;pgb&wzD4I8bf@%JTKZ+E%2 z^N^QfH+Ax|)2z9Zk~$UV1dvybzg}E(j-)z+Cv+y~F+pc-ZS9HHB-B;<$r_zDcg;{N zELjYBk*m`3&of!iw7EVUa>=^p<(2H}qaHbS?O;G%KS$S}V@XH%?R!Ktm$0INV)596 z4?eITrkp&EKS$okndKA@-W=5_O2p5}g$a^DXPuTwcz|_2rJz-#!?&CM-`yXJeguS4<;HlE$|LR=o%W!;sQx&I3Oc zY&b*kp)45jaX-Ujq#B^~6iCpGe@-H1t<9Sv9T^HjAt)=gYBJrn{vUiDu z+~vu|w@xl6jjELbxe4$J0ztscy2di$iDTcJ?1M(j6x}6-;=E5u2jab(4p~|TnTwDO zNI`RWcDR=E^Rh6*j-&mujxf?n!1unPq((1+HJ|OD>JJw_t!I};NFyf9ymLNLvIbZ7 zGXJTNvz*A3ZxeYPQehzK@1RUDcND zae|{C0$I%I)GBr(~&8R0I(`T!Tw(LD#yUMP9NTO+3 z)IJ%F07^}H$*0%tqAv%ZY79_5BB%S*MC_c-xy!PbN-{lzOgCg62LXebI5p9VZT_w4 zO^aNKg+$kvveYsgG@{qC`XkrcsCUx#Yt9=|gwPwp<!T5 zrW^@rV5TNUO};**O}`~t`@8&-^9rs@60x}<9XK+RxYTfUd*>0+aO>X1uoAIj9ez`y z+ZN^MosP5N^wsqJHqODp_9hB-qCikyaerFY|ftY=bY!uAroBr|(n5TCYI9>v&KBVPyTLp)9E}KbAsJ}b6`nHRLShJ&V^Vo^& z4mi!so!R?&ealqeSoM@uGG`a*oagMVv3=3`QtcE3(d?j^6DCQ7nQf)cblln7E_|~; zM?DiW+LY4u{ql5Gx)k@?g^ew-Cn4KKdU83}R?o2$91r)AG%vNf?pjb%=AiTEZCP+6 ze#<3#obz9E^0AF-h8 z0^ws+RW49bo|pW1<`sE6nz?!7k(Pu0`FO35(fioH3k3lq?2|uRphoF2J;{#Se+oK; zE^K3ucrDBqMAGRE8I8+ifdVA|d`_y^p+?&B0_UYxu_Fm1xms2hW2Z#NuU{&hh92gA zuo6BKbm$d`Z*|I^9SqW13o%lKf3dCNm9==`s4Psp6lwLc+h zw1tVoYi45@>Bx<)4#ZWXV#_=4Zy$fVxlj?dVDXTsqhEaRc`==IalCe0kiS6NN!^XG zGE+;Wct7ZK>g7j#}d)7&XUr+S8i zdy>?%v6>;Ef(Bsvi*P+|uZeAZw$>N1i(H}y6iwzI5mety{c#DEwtDntrB!m@>t`GS zZ^CXIy-}2xYpza}NM+)WtyDUOl&ukC-`?t@hnXBI8b{2&ZA_ycThp^>eL@lwP5Tr~ zkSk6s?&iR5K&r*{I_|YdYiEUimsHk-dhcPes!J88!O3u`x3<*vk$9uwxyFu1qufaO z12-xrA~h~iT{VkC=$Ccm4m=@jTS?MCtq@HmYlCz4(97arx_pb6t3)|72EkMlY-*$-Vay!tC<5S zZtYHvo6gE6a@lc9$3Njsk8-`)ulg%gKB9M3gUCdlGBd z`<|TAsyOFRKgr%a{OG3*C+tmp;N82=z#g%P!l0y`(TSaa&0pFY>Fw92LP;ga?~wMz zZ|Uc!pY=XpZY$|X1oM{3rlTAB3t}tIxJWqFHBhx(B3A^yD?K#TPd4e)a^Q3+5`JAN z#`)kw<(+F;SB|D8`WW~>!g(Jh)QNGYIH6~Ss+Mc6>1ms26^+l&dL;Heb(*jP+r?IE zJAd9*{zLY_ok6GjC;LIOYvGpKn)=2UqoWUsW~V;7PRf?bdzziQEt>MWQzuT=pgeNs zXKiBVsfX9S>FtD{Zfg@wf}YwI*TUfh)7GVGL*EJT9Ozr@R(s+Ui*JlyNLjaiOe>~u zz%B4QrwVKnOwR=(kx2F?_SfgkwvqU3EWsn5c=238r)0$Kb4B*Y`s#dMk}8L|AJCU~ z#Gc%ZZdB~*>hMKRFrF9>r{Iue4bo4gBmJ(#$dyRl)NK0LqnuGYV2M$AjNRqcxN&BKKPWMC&*-y(~+9f(3^#pEGAHj3Y@Db_ZvXRAEiBvbT^bx`>rFq|CyOpeviPhY&8GIHh&GH;7}a+%PzXCaB#I}2JD%HHI;)h#7T z;S=6D=?9U)kpB-lk#Gf*xI z;Ui#Yn#TK;$SWoEP+|Tqd}I5Y(yEZqec`=tJhg+jqvwlvi2fTxYnz%V0mf4jxtuEf=u6P{sfe*3GsaTC6hlz zjwV=6Z9wyw*LRmd&#G1oj@a9}{6-_QBn@$;+){W7eCXMQoZ30}Aqmqg@9ge_aqbjlgb!Z3@O)G>a#&N3zP`(cLuJ z1Q4X8Y$zi*&;n+tq2|2tC8%!Ma|w~N^LbViVd(Xfu@hUy_~&`xJe#lI>~NvY3ml^{ zE!N(g!4D-L*u}*3IS<+c|M4+=a+4j|KXGo1MbhO^^PQAS^o?d|x8GP1XChzYHq~?UW1sD5-3QShP1~)?wnjfRZLrUH zs|DdF@&?TH?~bdFY)6`|P0}MORy;>6B1L!M!3X6XU$F`qSqd0``NhpRBxi}#%&s3d;x@1+`UF12|$FXVn!2yO3QH?%5MQ5uR& zBNwb;=jN#4`Y9rVdW>)@%~ zz|))$eV`YE-2yeX|30_Rc-|cDUOv|MYE0va-T4loc6_I#wmOowcKvp>7H}9 zEyRcwDqnWSBSIE`Xd0Cc*X>i|7^vy{Rl)t%GUPGbQUUwm;L@1D`1kU*Q)>PbqwPJ^ z(9zoAU@x8r2lp8E@vbNzW3iq-LmOO3YFM3WUr1ZMnomv0SPwp$bfBzJ@s*5jOq&>d zck`Qzg^TnMLZm@towux{gv8qbbe3fa=6RoLAHB3k(2T!sIo=H@M33)v0`e#kt-80~ z`jlXJWOyX$Q5jyzomYw^y+|G2DTlV>)M&nXj9Q}`Vcauy@8yh}zk+I=nuAuREde<2@ zSS?VhN0}Hy2k@U^1S<{xPQS{Vx?_zmIG8e1*?Ct+3l1K;{k>dGK5~;Z7U{Jy7tvwx zdD|hdEDb{w)3BvYe4l4;%($H$a#qck9QY)0GBrEhfSmVW)i>q;wD;wKP_FI&H)$cK zgmB6^r8H5NNs3C*Mlob5Bw0h2>?9R)PL#crWn`(Wp=gTA7Ac%a#SkqNl152ni;UlO z&kW_fb*Wvm|w3;Kg35=+N`$`+bWfa3A~;sg|X*CeQP6@#%!SN8YYXr}n1wJzn6`^Px}z zFVe1}%{KmtY;px8K_rR*1J*V8%+q1@Wx!R3_qv8!I8Xsrh&`{ej z`0m`>^wXIKpWNgd+$BX-RL{#}yoz;FzDm0JvB*hyYw4A-2YYPaI(4U}jCn5^&4jM0 zv3Whs#5(m;c-?+qvFfqlhsp2bl}QCBcZ&p`-e%meJCFF1ad!T(3Y{z^{Ea|0TwT{Y z#yER^D&8KQH7Z+HK?|~M2{(3gv#onox!54V1rOg#!a+pImLXx~!;(b?Rkuhwz^*8$MV}tLr^FAdO%9Bp+@qYUB=~}78ve?aS zSGPnOS7d4h26q=I{^Fl)mnBiYt9WTumFVEnd1F52Z>*}HoES8iS5VOtljl#pKlkTW zmy4UTj>R`i?pWaCy?XTIf)N#$=ROeS~-dyzlT&O^r}p_r&THrBkjmaOJT-JPH|YDsetL zv()BJJz*;TS@NW1LjLQZhN)?i{Q70CWz;gk43Oh_nYtG`CM4G5p2r0$<@LES<9zpi zw=zasui|U`&7d@)88bi>k)Gq9i$9f}lMM17>lkd-d~vvA@TBLxZOMg-$oe8p%JhzW-i8>dD{XDb5DV~0M5c~y6xjT^)5m#%iTfeN5TwL(k zT}NssFBejkW*D{y(+$d9;W<P5qwbGj&F&dV%t3Q^_Gv151C&G^w!XezXSgi$ z+6)JVUNCGP3Oc=?f_P~a2{Fqzl-q~BV6>;zcFQO?>521(swQ=~-*~2JbD@T@i5k~# z{B~s-eQ3X}2AMv${WhP*yo%aE7g23Ie(t4|S9IjBqgJKnw{FiykFv*R3tL+!e0JK~ z`V+&;O_?BAp1er|<}kX$SM~Flx7#hl2VSCE4t=o6dZ8}48l@UVOt*wA2>DtnY1`~G z+#KYObz>)Ho-DZEv#`f{EZZPzgN%lMv`iSD3yY%ErpvtLz5X51(@_LB-4esKgyAt~ za|*z%`q`^$D`G#=oqTfQH@MR+hTk04z;+EG?FBH~6 zY&}R2`J;Q(8SYHLJih%OXL92c$lgJ{FRNn}Xs=&eQ=Q;0qA8?pC%vX9U+;`K?{&u4 z+L{30ly-hm%kD`0^$}V~;4hzscvHp7=cMA^ZYfM1pV{QY{z^a58fAZHCns4f31jxW z*wEreuF)a93Sh`xxCa%RyWjYRxXcP9Gr|=3o^+sE`o`Ux!O5w++?YFg+ z#H-oRVVXqgsFNYw+HmiX6aD@D?odK*q0Hmo^Ws2NRTVe#IwvPKr9YZ`fs)s$*RNmm zqY~m>``~5p*k(pYU=WYgYB;mJ3I-*6YaLws3+#W>ws+68&U2eYXvZ=%=5QjVz|IBI zTmq+<-3g^R8ZoDSlQF)j?T{kDKOn0%1AR~&PU9aaeh(MI?d4}l0oO|m?v0~PpQj+D zSsF01(6KSQvpv1J$(4X|q!Lf)Om`M5~aGJD- zaXi+3qfy@ovp`Fj*2Cezf&Mt3kJSnS_^^`X5AUT<%I{w*hE!y}bcAg8%ie9En}!Kj zd;>Qj@|kzt-3tzl5)~Ap@mbJBW>@+t>5_ixJSSSG3{&2{6koS)-TJh)+^tgM^E8xr zX%k-U99}xTY~O`r$BuEK4-zXPKiobxeSJs&jt#6;d5~F71oxUq27RNBxO_Z}jSpp{ z`}Lf)@bp~G{+z@Oa6m3YVKj+^pVIj>MO|c1i`ZoE;srY`FI!GT)L?#*(lTyTspl7Mv)4`BOXNqHoRe34b$j4W?84`m*J#gq;VD zoSRaj6San*A&4@{Zd0@xP5EKu_h|N<<7*#Er5KBx&z)=JZR zHw6Lj5`Nitt@*_;-|0xPD&5cA$A|I9p`MDLk_i9oL7Y9og}wADzw^PcRrylJ)Y77& zB8%Zuft!&^AwltNx*zoVFQTHNS{h*u1efNE_b`zBem8xEn4HaRY&sU+wghdIGB*se z3KJF*3L{B#f$ue$?_SW9&@e+-)FBnA9tWRHGzqYm2LT4E)z>97Ea5ce;6Lhk* zg@N1~b$jUBA(1$cK5?NqxZ*Oa9xwxs=SGg(AEv<1n z^8R`59f8=G7|GOG>%Bg_{7`7vN_=oK{w<@o$&cr%O(w&1>J zM+HeB*8o85T+>8%|9o&h>eC_YlSm|q&;l|90wDuX7AzdB3wAtynx^>mLg;{lRAV9h z3MI)ky%rLc?O~Y$0pKgZAxdumOKlmc7lDN2veSzb(3;iCoqs^sPD)!f`{Lj7z`lHo$Bn?;{xR*djvPmyxM z)4jCW9zDALa#mKVzrTN-;|;ycdgvLXZ6znRS$N6sF)__)`hv4O4;~ai#80g^f8Dc- zv^Xv{Rs)VH@iN-8XSqIqr~BHi=~(`UB?aW%bqE2Hexn+q<+=f*0wIWekH4Z9as@yu zfGyY6sr7;!d@y|nbvA0VaC7_U{9*+Rhaqpgw7DWiLSw4V!Vs&I8a*?E3^%p3mIZSo ziqy!+h+4CQ#K}*izFXkI3}Dhj^}+5JvI9xlUNFE2yJSSx#yfhggn;Yo#C?2L$CmkTLKNP^hYwc>aogC~Fbz9gh_*52l-nGuV;lWTVeT~r z5Rru(uy|HjO^_e+MhNwvC;gH4^G*rgCP#&?ckjfJ*^zhe-wRc|@tHV;c5mJ7xXi0Z z_@?@2TqQBBE4e19t@nIl@q`WT>vbe{yieZL!eIF1gWt?|ZrQ|%9vDd3h|*?&Qw;mO zN01?FQb=LnmZDYZ7qUf=Cy6NHUn(IJ0YacH$TL4w^=u54M**T+MTTlPaf5zY&)Oir zuqyUhwCaM=5E4mYp1eO%op;ZkJ!%ADPGqwTov&P|*H`-U^7x&>D7?gIt>8#JRb#b9Knwpqs z8Dtr6M=G*aqAOjkc8jGhf^^uf_Nx^=-7D_#A(1e!a*10Sl|za5A3P9>W#5&S_{1R< zmL3!1Koxp~eGd0wPW}{s2|oR_#gSFNRuorQr*B1GM_fXho;;CM|2*2NXe`7+fPf){ zy9HP3Cl>Mv%$P2HLnP${B~e~kIr|kjc`=?59dS|l&_`D>#>cHcHN31bkbiShjReo^ zQN1&XpC5I32Fsa8@MubMk=26RYy|t0B9sHHLfIY^AeuwGdI8|~kXGxEiQLW;Z zLyug5;76Tei~wbZp|X5zH5S#;Lbv;d%hLoOR9FA>c~C;|7OcBcA@gJg^4J+QqfO+H zaX2yTGW8Ob`ObBB*ih1^quKEWmb$)`uyUfdmZVaayhw){mB=%PAjP3=ptC5X*UVv5 z@2EyQwHC)JHjCBg#m2^-NVDYJ8L=9f)frvZr*f$5^;jG44wayQEyJ@+ii`xiILIdR z3`ijo>+f=_ROeKEx(1I zuEYyw0ddq3fA?ZKYPOLR6Icong-}^w8$ot!I`R5Yp<($7!hFsW`M@^QLxsVXtuBd* zA{84eKpQ{0z#QMLn(2IJfcgQT1ERJ*%~H1od@Uzx?$~QZcx*R4sz92h_lQOa~b^{VXLZqs^PIBp7)s{s;!aBbyr619e@fS_>($SI$ zc`Gr<@$%ALmu1%#g(62zp50)WQ6-2G^2>Xdv$NCc>+5r_Tsif$%hN8`2s_%0oMeiT zPDX$v49k-wtcp+FKc_v^b>2>rOo*-ailQrVJadz}+OlgbuA1le&6$sO?m9Wg<$i#r zny6{3VXbdSAgw*(IZet^v?6D%rkJRBSkEW~9JPOmN*+qMLXbSS_}CllMx2dZS4Y9=n)gT3C3K_o0yck|&h;lm zwc{7Jt=lc#+*V4<%BC6rTJZ%Gtqd}Daai`Rx562qxGc@7dJ>GbEBQIEY^(f{b5teh zRTOa=_@V4u!*;`+2E|72OpCl%UA}Zl9EnVqy)|U3DT^rK2F< zb#}S@$ob7R)o0rF?YiOZrZHtgBQ7R_iV+G((Y~F!|54AyEDezu zU4oz5MIjZu!ECAFR$*-01T!qY##{`${rel_^y^oyJaWr_;y_@=$9M&A}Tss zFR5l5j^V+h_np-ozmJ}jx*x2*x($`|kbqzX7cWLPCD_9UVLl3p6T-~#c5%(OHpr3< z`{id1##UQneCY$p9%j>Xw6a?mVIYZu72h&2Y?CjT1#Oz zm*oc>9aCY5pA4>Q&;pMGL0cRd`OEAI5>z;yd>{- zDY3kqoShLomc{txA@Fswx6kZwCoSVb-n${k4(+&jVO!Q|r%F>VxBY|%$_;O@{L zQ84%t_iCW+4p?463lPx`gKQH`9Jer!K2ySa7Ln61p+$0NXsAW2-a9oWPD5t=n0A79 zKXrFs>9VSA_-}M(6Em7-o{a3YzPsTFabpSB)8=NG>qH0zC-OBoi@|0qB@MI zq<8F?jb*hCG1K0y5NF&m{czmXx32h2Bqn<&EP0w^-mvTfQyWp&^^*S*RKoPfAA^K;+4lq`R~}_ zhmbos-ui^%at$mgfdoo1efo4;NCYeXQrs8%QC0Kr;xf1+ zq(aPwS`Uv5_rK*vip~D<@$nGULsOzcsmhWfSgYYgT3cua

8#8fXoc9f2n`rjB+8~)v>G#QtU44 zXEuVet&P_j#xpew@>r^O0}FSUFvy7rEMbc2DF?ta4hwj7q>haCHwd6d8mt~a{tCKF zHtH1b3c3YwS0MC6Z*ctZqA@IEWwtF*2_yPDR@Yc2g9+5}1MuoZ%z)$ZbZ3+78v8KN zjJI#!+Io1bn#eHeW1F#zrR)f$KL0aAlm-+oZp8Ml=^r++vRSZdo1=q6a(la#21_>t zHnZPBJjf5UZNqb?)v>IY2bnz|w*<*FdS4uaF<8u`hY;a6ELXr6hCAM1b>sLRycgBQ zv3~v!H;DCTtF0f>+uPf@CYfEB*wxYR3~ce!#zqNQlW&!0axey;tt>Q2@|tR^>2VEq4`KtC|^MU3IMK?CTK8hj&ipg{2lN#EP^ zD@S$45)CHlTta?({*h0v`7rptg`Sd<5^;x>Nnxd#*p`Qtjr5O!9uu$zhGZwqN-){} zbVk`9I+SO)EVIcmZN7qI@^XW$eS4d~d#R~xj}u@q&;0!srh6dktYkjwN@Zo=SA}_b zdGdCXU+~YU^=r6C!I`eRw(|4nu;NM=#FA-k(YOAF`N0_L&v?&PbTZPy@JCdf{mmZZ ziN$mB@^-&d)9Hkjfj4oI*TVYN2DSFtMV?;6V4tQ`^&@S|bGfXUDN#wp@?gIU&${qO zf5TR;&4N=y4b`vG=*+888tKfM-kbP0210Lf}7m@4-}w zPp*aRBeCWPr*N@M8Bv^REZ_(;eo3am!AB#N>i=#-uM6s7)~7{BM}wuZAE!hSi3%{{ z$a9e80VV+P{&E*RS#>`v@S}>AxD15dzSvmU@tuu-Xng^LBm+bM{}1r7&VlKhk6Yk? zyYKO1eONN$5sTbW>)z{#I#9| z4zMGNFD4q;wRGU-fEi_$ubhXh1oOm_h-c5AD?LA{i#I304*X3%%K~*JkM7EyKKTCm zcE}DL?bJJ`uA!lUqZKf{F)TB_*%jPX;x-ZRPCBsu2DC3y)8se^J|4F=<_9AscXf4H z%>4NAV|va0WS52ijE4m?u7E)*L5no>9rN;S#K5j@_^-hVVnH%;;D2N^?t@-Zh%Bx+ zxv8nC)%|m=!h`v%CR?@%@b$G3hCqzmQUG?-yDPULXmn@;rf-l+_Dsk|OlN)uFN^u} z=hI;#K)@!|>0lBeFv=A+Jwom1gZY~y(0ND9jrNUp`znJ+TEa^3RoG}wc%{^4w} z9B%Xm*D*n}xf=n<668^L999F_;|@~+TPO$lwQs;bmE0Vc<(y|-?){zh7B5)v{L49# zkZ6N;M)jKtRWq(o*36I(9_GM5ixVaRLDn0KoCRMjtg&5&RgVKef_Z0=gKSH?MYi~J zZ;S3}972JK=D?2?vW^wNU?XuwivKv|(xH_oj(0U|kk9_z23gE@b?o}Ib@5uv8;&a} zoU3wKeqJab8Qm{J@f1(}38rgVaZXMI8&~6dBC{YI)8hqRF%cUD^ncx6o8IfBO z{LL_c)_JTvbiVDOLu)ut_ciQ}1%RRc25$Ka?LOFT@{q2Tb_RNRlDOavQrxFcpB8si zS68R#OpdbB!tx-B-e9-pp3m3(%A`0?;-6WbPRd)r6!gl-<+k?r%L_#fK@j zFF_G}R$__?z>+=~hJTTigE8kEa&kF8|Te4vq z;}%AF`Efe*EU3>G7BnZX%*oA-#Cu?1{J$F(XfFr5&K4-yy?zr`YRY=vUw0@SO5PIo=Pim<^vMS0N#W0q^0OxEY5JZl;BNYuWBf`p4YD!OaH;C>UohLuAjs+dcj#;2MKRslumF5n zEF=YtjEvUSTdfcK!o0AZKek&?9eL%iEQdI%SVQabq`~qvwCLi`?K#)4MBT#MuI(O@(!gN8La5T8&+vhK`QobZizO%y>v2x+6OgE?_mC zAS`%mpgQLv-TGP#bN9Hx;;pIAG@wD+p8CQrx0ad7f!{?AjzbR0mLVvNU}_I}dm_7Q zi0zLX8fcxJc1DJV+qygpClA}X_3nf_$P;=(A9WUXkC}5<`OQlC1wCvt?qY7#EI@;dAs8h zse5-qevf6w-jACxIP&~!EDfad4uFUQdQuN&8k>9|CPHuCG>qVR*5%1b&ZoPt#i0ScOxTnJ zz7g2IO14P{PKgD(JHcN9l52Aw!~UNIWWGm`K%EbM^(1O;ZoS)D!dy;NgeBLk_3mlX z1mBu+LD7j>9L%Cr@d>~$um&A~ph;YJ`5L3(UqcqN(JddS$Q|2@Ju9@Bv2Mt^arOp4 za{i%@*4|Lo1(I>q>M@oGav-idhPGMAzqApV8DILEWx0M23Z0OsIopJgC2xRg0z>w7 zy#!Oj5GfDf)tylZcpY}BH@+yiJm53?;vD7YeTg+-Hz^nU0dRH}#r1Y604X75(ZJ>j z>~Q@GP=8>7S~Zu!!K%v{Bkx{ULnNh`!1D9FI^aT~h+?hMFHVME=odC4D3S==Ljr9D zltkbsVCx6|``Z}0gb!}Tj@48zV|saTK_(o$kQEBS)8NMrOAv0`unR%)9nLpzu%EE$Uoo8cw+XuQ z#|G|!A06_e513-`KcE;W-EC+I`Q~-PKRATi(7^YfIK9v)}(8<*u>- literal 113591 zcmeFZ`8(9_`#w$!EtX2MlT^wcWgAN+^ej=?Bl{rxzQmYTvQC?QnNV5AE@K^IpE4NI z*oF}kQ<%|U3}%e)L%rV5&(Vq9}EDaK`=&=dn9(*4OT| z>ni!$|;S(_mh znbHQH9pHaE9v(Zc15g%83L_Cdd1v1N&POHZ_dX}Xp)s`8;=+_e!cYF~nx4wbA8~+g z)Au~%P~lB&?`Ub~__tN^R4{lZah1}&C4O2 z=}SO%6H!r#>XE>!b(8A6M^7+D%k4DEBHip*#m2g&2L!#+OOE_sll89B{pvFjVI9h+ z;n}z5rt#)xx|dQkXJ2q|oV7xZhEGGn%N$VJ)TYG>=jVsjZf|@l(!Evu^7X&Z8^@z@ zEu+1?c0gNxK#%FexsM~`NsX6t>|r!>YMD<|p)Zjm@Aos=u3u7ZhZfDVa=H^JMYW|Kli!N|J zxwCt2Lao1_$ExppbmBFJ%i2EBuv6}N1y&beo9~Bb3^z7y% zTAE{?N0mzD0K;%=qZY=U;qd8=xnt};y|sLPLt|rO^r%wfkJk+wvx$2-Ja5_NSKUeN z(5YZ_lT(>EdpCAqU!Y8F+g|z0_=S3uii)TDhE~U z8{v&iYBR}0pJ_|09a7P!KReTtvb%3ZxjA_?zWwS*(bIj%wK-*IBl0-M&s0pfwId`n zu*F&#CX~0DLq3iP7f<^WXper)0YNVoQjAr#4;r!c5X^PqaCN3H;Aw?Y+n$4M*Qn5c z1KY`MiNyI)mh$5paI828HF!G@C!HMZ>B{lQh)Z;RV-p8$y*xAX&)>(F4AQJvVu^ZQ zuj5H|YdCFiR?drqbiVW0rs39Q=lHUwH70t8S*m=gT~G4+@BG}Hhg_l)8zQ&}qrkl! z!X|~6^QB?@c7IY!+qZDwJTd8L)?935xqV|z@%#L%f+|C?hi?7|NGVn3?ce=~_qjQu zK}_^AWLe6D|lA<0Ui;`NI)J2FAZh19> zI>Qf*P@zEX{5{0GK@Hg5s|nE-9_~$*C#_(Wm19N_{NGX^w``G{nw!O9OgHluqQv&E ztu$_ZccM2w{&;>`e=uqhF6EgSv}J_7bi1nhPTQTfBWF#ig+T6HY5*da`D`u-9)?w{ z?;0|elE9`b!>sU2dND1$8a{f!v36KHO38h5m=1d&@aW4$3s7XoLD}Nlnp}QOzdmlP zq@yRvjXc46%aORml6T|l^$&>`zeRCJ>AYs}gg_gxgP+}y7z4%W0OYu*UU=2&Pa_n@G>5tL*+5)A5`v?RAbKeO>r6n0;QPk~( zy**94vy-P9py@qQmZ1-g>C4urJfHyHpRoeb{>N$dK7@{^p_U<))2p@+WrT8Fmis!5 z1b(N|=H%SsPKaFyPM%jA)IrSrcE9=vud;c0xe_VG@x~COrB`FFPaveEq!7uf?=thZ zzwmjD)YozIiT~6M$J#}jnz#B_sh6_pUSbZ@?6ls^5wkbAOWMk1=nU_R6li3g4b(x? zwI|7@%IQjAYZ^m#%f6f)@g9MY4_hWm`oZJ#dg^hYu)xos!ZQEVOO-P}s?x`Q zS(aPjJnnSKpJSXqUkE8mL1` zClv3!f3xrGm%330yUB&r&dvE4gyqFrm=r%wk)9>cPeBarpN5`_KjJr!kS7 zHhhU0oJgH;dH?d0xY6~cadYvf3br+;Q*_tIh{1I8$NR09|I{TIhCOog zq<$JUmB0FItv-;{Q&8wPUIR8Pt}2Vk2i|n#@|Ae2#(Pjr<_7KVbl9hd94MX~k1-tr zjweS7%Ay%QKb?QoNww}FNGnbCT84ASLjl1_sq3~9N}b*a>q@1GtnNf1%ms|NfSSiZ zlGR(^AY`|exEs419@KlDbj%Zh&M1gslF^7Vx9L)XJ`iYV^UtJjladIlayhweS&RWk z|CbxXwt-D~H-?8Nz-q*mwE7ms-Q0BcA2sPWH3C0`&>Mz*dAcA|{q41anq$deddbr1 zUf*nqGq#b^d!PTqeb6Kaz*BV_nGy{jmaeq|=Qzt+#p#!Uf@I~d+wi{g@PJsEd>%+* zLEK)dfR^n_1a#wU`a%czR#WAzPvGXJ-NHU9+c#82px!5QoV_bw|HhS4il|sH57c06 zraL@*V3MYKz2U*|$5}>pYTzm(7r(JU$hCU2m`C;+mn2VbWnJ=qD1V@+n(z3$q5CeF z3-rq%peLyH1(TIO&@dh20TRCD`)Pv;I#UdI;m%+npB$%{YzJV*kyhEmg5|vqI+P~hXEpv#ZOd5^<^V%TNJ;VLi*AFPdE%E`A9HT4o zOG`Lt`sRM-Olc;BWm8hw@#cya=UPv1TcmCEg}rIxLxoQiRYdO9<%-`Ny&r!6Xj>Vh zX%tmad<`_TK$ry}f}oIkE@XYYJ5n41u2QbY-VdL$)EP)a;nJ5;45EvemgF8!2T3gWdB0A)DN0v91 z*X*&;I{hJRPXZj9X5ft4eXMO=UvDitYqTH~1)%~hJ z^uHn-a+IDnYW;Xm(T@h28vCPiWtF9rhyzX=E3@re>awu?6=?RB)7D=|aJD-RfR%c% z+RnwVHQM`r=x9dp3lRA6u^&mK4$wZtlC?h$w`v-UEKRiOv6v% z$%3t5?#!2yc5LE8XKnwdZaP69_S9!*e$MdRW_`Bh9SLsEi?Rp-JzBik(&{9y#^fOR z3HL|tzo@ojZZLgwE*fPd;V_rZXbxXiDpeDYTB>gU`9pmdt9&iIENzl9ISRgA;1DKC zZlwjNOhQ-@Um*9a&K$q&Z|a9AbFi?I-It*d7X{K9DZSG1rR;`{cEI<`M?W81@OOwE zAE|Vlw#yAT-MKU$Fsj8*Ku1REye0Q}k5*Wgh&b)Dkk!%o8?^#*@*Y1dOXxMVh>01? zmqV`M1r_87^=N#K;n7w1|pFkTIh2QX-)GlV4Z5oM6V zzQOEte4=WbBl0N68}PfZyWniLsO6a)lku1zw6-Vv51$1`Ol=+hsP0b`ub&FDqt&lc z{Gp&$pndD<*O<1q54k-(aO;D*a;J~&`1yKg+rY?%Ui#-7op0*4`MD#n+;Di5p5ON) z133Es=dl;Er&;cB<(I0bH7(+-xx~_x?u#ZRi|Zlu&J_gjJ8Mj3mRc{7^@6m;zUaj$ zSj1V{*SP&-VhBEY60!e&%;sFR&v=cezrXU;sYoodaVhJinPb1ovNRuXKA^)hZf*Z2o)UY7_@l2-lF^^!(;Br%dTso?%7DR-FMYa!$j~ zg2dSpGl-YYuYb%EfK-e=kl3H8R@AWbTsb=5^J(yw`;#crdF2IR8w^zo_}(8@!;X<$!S6f>TT++___kC(8$He3g$tZ=0y2%`Fp! zN5A<=HLh=MwAE?!2u`!^v@AF4C(5JQ+Z*-%(HgD4Da#mMjp?uc4Y)7Vm!Ewbp*!r1 zL)bm#3ZV1w%|4IyNG0Gf>$GRCuhU!=J2`CsahKy7R6E|_fy>jRe=~ROP7tKE^T$*j zu(ObtnG0i$Vf3t8mVuI*m1)E3+=XVcrE2KMO|cpkj%qt z@@>-&`D&aE{dLn(PEQS{TeIh`m7Jtk6P)!!mexYnt<2CP5P9fkAurIcd=eKspLJTY z`Boxa<#P1RJm!|){W)^3Qq#|C(tnFYhF5nvPX>?r8hBjfwS=y%gi`@N{Ka+RWLaOT_DNfOl_5>Tqqzow?At_>Kl`BNB%9E5Zh97G_QT`%D(4(tKKuUliwNt$D zfWpR85=p&|elO(Fw5JX1k9H41XKGfu-9m2sz_*XLYgopuKe2jkz2s3Au0mIX2sFddo_|zCncwlm;T1Yl4x6BS!^;8~~&#V5NAT@Ip0!Pi361C@iQ9 zTpZM&bqSJ}T_&BfNT~IxJS*2{6-!Zz-myc|?NInvi-a+rLnHE1qwvN4j0-Xjv;^zp z_{dt`oTbaodCd9!KK*%xse&h-A2e2m#jPwOSfCQN1gAl8G!NkiSc8}Dwm1~u`Dsduq9S4l{MU-ma?7*@quX*ysa z5j%$B0(=qQ3`;6Fcw-3$mGumrwbpz3 zmS&@m$EstDbt1?g$3|Muv4d$}Xs_^#vzYp6+p@)Le6F@r723v5D;Kw3Z*MJD!hX5c zAdZ=aJaTTRQQPU1Vh6M=ftBU2J_eIxtS;XGfMNmz=vZjLPpW>}G|nc-C%iL|+xtrE zmML?$hYw^-g_}jxotF67<_7w^cQg+{%$K{dD`yv2inwf5lgQb4*%K77?Uf22P`aYs ziJf)-ofErozO-XH5F=q|m#x3L=Y{y5&BKlxS91UovYUww(;`gr$m*@Uf+a}-q?F-x z+AhDuAN_rA;9RP84}eO$PkGu%xl2@jcZeLajf9?4IxZGfdTdY9fUQR^o*anmG~qWV zub2Rc;(HbHA$>s)7H3}VRUmKYCi~TLC(t}Zf1UNyCdA$cA9ioK1aO>wA`|qz|nQBc=__E5yuU|8A=P1>G-W|-cKban(C!9HoM(`7IUeVbklL`?lLK; zm}I3JlWl78hwK40YuEHNuULd3Kh!+kmto&% zR7DIq#^Gc0Y?OUZjp*G%b!A6+`D1A_ zD?UQnAQ0&dkj$Sw-`9IGAHa7$!;QJ({N{l#NJb3IJ&_7rZ|i2Q-t~yD|HumZ&b`T7 zd++>w2D`p-0TyX;;XZ_rwJLFgy^SJCEWdbuSg5CRd!kZsRZiJQSiVzNxx#Uj#iHrJ z2im|y06I)CBTl8l>z#+w^k5hR$P4qAN8Uw7`qcbEpq9$l+`RsBrakxhrfmF+OGWVc z)jf-e%%zCWrp0E&joJ-rjb-2t1mwE2J~cFD6$4hyn~Bex5K9!zF6Tv@QgiqO6P#JvV zt3xP3vlr17x$?fhP#bcDzF{+cRPjQ#uY1gTK+680t>|feHc5Y->sa<3=p+q*P0hvb z83yv?=pMQ6BPQ2_Ps#n9V~zSviXxibdvtwo&dxLIO>GRf@5T`2^=tJVHlKf>f#3;a zD36_n9JS_*Jx)FrXsKd{BkkW#|4DsG%?mWk{|(TcIEe6`lVA1fQga`aF0A+It#KjD4h zzO#;eoEFORM7OyQB^Bck)=cRntaY2>532{bH%K0K9iZ2=x{q|zuXF_Mv^saj5}mF? zHUem~-L$nLeUw>Uet)(Gj5%!xdDo$WX|uR+pRkK8qKd9&ROx+Imn18LyNMd;&3+W+ zE;3c*ENdJWqwaPa9iw+%Io9Tr=baX6Xyqa=EgkDP%{L!qszd84@cRAw%*}`J%1WXg zUm)vsddulN<|KKYFH=Xf{}UR2WF{g{pE#|h?}z@NCTTM)*i1NqnV@I8o`TSOqGnr} z4_$Ecy4Ak)iilMYB+M?NPP^{{M?XTc@PL3Rf5)!ttgWF}Ier1O8T2ACU%R<9SK3v= z_4;^w{z_H<1gvVo?q(ENB}yED-42WamJxpSo9J^3HScgh6gI=(xUN0)xWq{t`G)kNiTK7+u;JoS^07@3 z+s%3%jb6-n1u+P#*iFiR@(c`K-)&AaJ@Q0^93q^T#%qf-crsS8m!0%tR^GtHH6{ZL zmZs7ANdIX`#$>SB(Jf{9Q^e@ienV#Na{q&b)uy<48o;EV*`=+IW6iD1#N&e^7e}ED zJ*$jrC^Wpw03*lUJaJZMu-V5ss!wNja9O*4V8xIm5T5bk)iI8HSLnhYQkA(fP5%O5 zpb4kcGa_|zQYu!kTdHj;X;+%|a`PS6*GJZ=sh3ZSaeKp}dQZ;mlkboXl}@G-`4W+y zm5N`tnl&&e4E)3nn*{1XU6oe3WP#YSWaOLo?*iTO7^yU7Z#-XX>XnW%pS6l`6oS$C z_6pFboH*Y8eCs42Ed|*s1NFqkv+FM3cxLCtw4hdxoIoQqF8X&nw`+l?%%c*wrn)zz z{Ch(lbG@mCo)Fbk-x(Y3!Nt9yk&@}o9d~0KkcIrvjZW@z6YD&)`6#94rJC7}I7jwM zI*PN+BKCk3a|~kyh#=9c+0#iFU25QksP(0oRqSR?I5Cn|R|zRE)?~ex)h)?TCv2?L zvRCzY)MA+R(B|+~8Ov!d4K;YY&WWs_q-Tc*w54Mx?^A3(1v%)j-oL0veIE-Hn-_jG zgmr!E6SWAab&44i|JzSX%2Kb~5rYydPGN-dO6^`R094UR%DG)Sn;=8OQ)wSK3Q-#^ zUw$bFFPRg(7g5xTTh+85aowRBQ|j{)qV)V*PF@*)1{}$5Qu-skkTExlLHSMr=$bL~ z_|67wXUs#S*EylK@I4@b1YF@d5%c@0)S4Jq^@KFBxb#k8K&MTqBdlgipv}UmxaFsH ze^JcOsV^3J^~v2YYr!v6aMSFq${lThk~|SS-*;Yae;UP&%^Hl;yXZ-9v^vb{lGVGT zFpAM<(`G%8D7xkT;BzFNkM5T`S`NIkhqsSh0u%DuJE1!asPcR!IqSo&P7+ARTf@ey zzXT9wJ5l`u)6B)Z+Ka8fS6)D4hMu@I!!RlseD6z+BzuRA+m)8`H_c@Kcby+xT(eQL zMD(h3+C<00b2-NwSBD(+g!#*+r6^O}Qlb+PUg5~KZ`uv&6#zUQ&;?PU0r$~^U#_R7 z8ayfiYx8nmoB*K6c~C#5P$p6Z{PMS8WrU%9#(zS5~@evbMfYn9pF& zunb^9aU?-`t1gS%QdFtrnuPCw_>$#fvxweEM85z?-V>Fn6n7+gb-q8*SZDSqZ)Y(0 zh`#(0rTa}6cXcr0jb-rl)@ar%SoHgholeR1DwixDSRBG<8oE6SMR|^@u`-6q(W_*C z_Pl>e!8MBxzhNoT*rsAW_=kS&+tp(D0|63!7pgfg0^r$D~j-Z&_Gm81sWH1Vf*4pq2nK_8X4Xam>&!vxz3 z^i$RVDOkuGmP#4em40tv#?w+Cqm7m!eXf#f-*tjEW;}FAB7vl2jdi|D_OXLOd67FG zlpznImt3V5d6~)cEID!{wUKPT1eYHmvXSd& zOt}vEPl>`Eno&QUL=xLfwskCQAC(l|3q23Q7Zp^GVPL88JiDn@wZWSASYE_z{9dku zwF^#=msLEMSUvtMqYKwuARH&BgLk$WLIz5FNOT31wM&*9xCJyEQs*Si+|HbS6t8+8 zXbn{b$u-8%Qt%xsm{S2!iQrnDgl0X0phLghAB{)4hPgBBm&t6qO;V4%kw$4OQd3^;71%e;^*8 zJ)ayIe-(mf#2gbFT`mnZaw{@0GqKRW98Sqj7n#x>M+h>#VGN0-l!|+pZ>+q%fwM<- zX`eTzXm7ijvsyRTd(L3jyAG!XYAU|+0Ptcd0pLw{Hb-OhE@ zmV8{o0{kKA)j0!&wr}PVauKBtb^2KV2P}e1A*vpE*?ANS@Yqz|L+^P&KVBx)Psc>T zrDL(Tox<{Tez)xfYK(H^mzxQ4Cdp5pZE8YCxP=xB_*coeW!{(HxKH&CEPGd?)OY;| zNuI@>TMKsqUGd|SUjRLyF5Xe^5E^9?TQhYIHsPY+AU8|4T z-r9ifEbj=mx6bq-c<8CCpVevp0JGpaT5;bH-=^whQ|SX*-K4<&lNL4e@0PVEo;TIyOLf@}QwBciuNqB^2`GNO+r5%^0D zw#)k6dUbrx7avpWyr%u30ZQ!r#B9uBp znZfWUdRLi<@wcsO^Tu{}#;Zm^cbSpzkb`PK|D7aKy+x(aW>AZ>c$lu5QE6Gz1&KYU z0QMyb5E;4w#OVUSN6xtBh3QQTlo#|pphT+X<+gy^zCSRFx)%);B8t#yon21i4c_{+&&U3AgkFr^dNs<| zCXz1$f1oyK`7)kEEEi*;4^rm%9O;&wMe!Xs8kHwd|JaE7bXw0FB+o5sP45zF)t*BI z8~!^KbT^<+x9W8s8pV>Q*OB#t@cT;SsqpEk=88^BN6YFv95qcunQe)rYkqG4Cb@gI z+j#w;)x7GwHw@$)NNeyIUpdM<%JM)rvhp9KP=a$VzUrO!-JTxzG`?li9OethXo-gt zjbw1Ct5*IB1DRs|;R54TAiWm5!N-OpDYI5$lY)^ z3-8Mtmo)CPJEBJJOKhxbEwe_m!o4L<=NhuBn#s3~`^qxa3Yg^k-3k+GTQ*GS$jr}y z1d+Ow@=R{Nu9je2Qi=q~W$4L%nFaeV4;G5Vd@C!^J^yx&4{9H zjiU8FHhCF$+l025Sa4g2Vh0k8auu}=xU9-mEO;rm1ZAc$uymT}Y~@l{%-1)07CbN; zDX-7BFI#Cf0lqy^u7l|cws%>~Qm%u+!n4qgIOxyeD-N+If+X=Loh!__gxEh}>|)1D z@1uA8w@3U1ar%Hll_cihQjJq>UHhP%yvy$0q5y?WjQS~=T@DM!ta{GFeNBkHa|4@h z&-*9!E#rp8ztr%$tFAw8jBacd8LV_9+m8mJoV7EQC~orS5u79Xk|{p&a9Wbb8lSJd zX*$I!b|yG)@BZJhTS;ZasQyc%Yq6YS&)GG+1HDgSfN#+i;Kos@kArQ4b~2d#sx}$* z(G>X3CY-dr422&3f&TIO=GOD66}F+!{cq&j8i}cfKv#3-_)16RmR$SZ^5DAHzaw%P z^U#3h=WQj(~b#7GxprKXm38@G`@Q%5o4sP~$AUr1SZBa_zJn#!xa40lF@ zqvjXGCs9fRnxWnApC6PdnC=cQ)CJ52=?_J;0W1y|E2QNvM;B$7p<3F&pNOk@jIMNG zpY(8Z==fds@4|_iJ)TxOW$8P56|Gwf!pe<~C6b7gHRf}BHjl7|46J@vypO;m94`H6 zJ(*s+#ds|w4NkRq6#tidPRjPeIB1EJZuj;+o7;XB{Q&N`6-Dc}TAU zFH-Z=**&B#REV=JlEOV(oLOgDtQ+M*(q;5ac{2<#tXipi6%*Q_^~MAu^=HER1~r=~ z29Fu_cEKW33=C}XL=}W?`X%{juqtivHpZ~Lljp75<@XSyGIXtsUTtV7tKED6_DT-q z)#LS>#Arir1C?!KMt#TwW5dO3fR`yZaO7aH$CiT*Ozue@Wt2w0MqhdEbvo0D$e zi@j~`_~z+ts*4@Wihb*17JA_2V@pOVeuk$g;ngmyc$-0fRGH3Gs#D>6RdQ|9yE`WmQF3__(+d%SkiMML*!fOc5A?qoW^lcV`X z*;rHJ4xh&$SD1_LiF0vl;M>~BsU7k#9R4C+CZ;QzO6Up*q zFhOndq#g1OzutyZG)Zjwr>}RAqzLl-(`U(Y2l=cc^=xyEMUG%c9XC3JdUhrMSzWMQ z7Ss_S{YDeKx}>%S;}X7`wdPggacT(RihFgX#hw|=m94q{goW{s%hPByBY4+XaP&D6 zJChaX1&b5V7BA0PhU6Z0At~k#S=oIAV-ym(NBoj3<@xs?3TFCH#za6TBn1~4iYfhK zd|kGfHQqyEBWxo!TBeFDxZ0Fi1TdXiQQ>TVGaB{z-06J@&ZV}XhP)mFpFunoWkoJMt&+Rr8Rx8$hU>H@__dsn$qRuK@$#g#H`d)EMP&)bIS;Sq zL@fjcQqAE){>&mACg+|MHYHc0+I18;)`%#~@V>xoGE_zy4t)t#X=-YAA?}AI=tiw4 zHLY}6NG@n1WO>P2Jq}zNn5$ufgE_zM20qxWsTQ$&d#;Kc`ph@%^-=|n`)w_9S^<=; zwlCvK2hY(T>_*YkEP!8G-N*(MEWJ*_`RG;k+b>lRa^MDp9?u>0w9;sO^bR0ZPkPv{ z0&>i9RJPYm1pcLwO#S_^oRse9ERnkva-g;y>N30H0BerzTP*iv(^jMn%i+ov=LZ)> zbLp-I_dJAVF1}pMuAi~Molid_Ka=Ec*BN1+8Q@jQ#|?VxQ@`7b&w7k2xi?1w_Ppbi zvD;TVUVXX)PVFdD%J8L<0apwWryM6caF}!DNmeu~!Mdd&f`Vp`?o1R>ln4wRZz(~@MY@sddsnqouEjWQN!)6)PS?+PgGJ5d7wZKpG3V=XzCh%Z>r;~Y#hP6y zND+8bU8Wu9t*sO-E92bCY`-jsb zA^7T>1&{%QqTV`kI&%31?KL6r;^UFdAscZ42*CU##j6iqu!>YrA5y(N_VbkZqmrwC zosB^Ypu(On>Pm~d$N9=a9mRdTVFP(jtAiY(%1Y^_ldGKsQj@p;3xbut z@L?gPgjzHG3zD0^1ri6pZ^AUsDCsa}0%@BbJ%zX!o1DO}y*5pr4t-;2-F1n`Aba|p zlwnA?kw>{&$aEg0j+(jjZ9(eAdF|Mmr^R7t*H!ig3_d9-bBFhVDgLr^o%UTT9?czA z4_;V}1vpcL)`-u>jATKUMuh6uxia{fYNs+w&;+JH)J()hlbjR$(17U)?SAK< z5`gV;x*BU=bL`IcV9$9(J$ufP4Th6fbT^4PAMd>UJ>gxcsE25k0Tq3@VXF0>whG^2 z&7$bTb77wYj3DQrfXC5L)1{%*@}P0^EsTr4N*3fHHBq%pLKGmdD9n%I6;0cNHA*(Zfj0BlWI7-}-KltBMqwxA4d_+EqqcfCD)|YLi79y!9+GGfas9PG z&(hV`M!o1)CDEM3YodvX*UrkcXpDKJbQ^})I;$Xft^DVT4<*YFMjT3 zTFir09sT{rD|6Q&2P(r#^wZ%%`B*Ni=6tLf3&s`6_e$I zsS%wKBmRyhl6xPhHdFyPdUh4C^^`r#=sY(X#~+lH?bmevPveBsD;-I(KuDnGFIC3F zAWN=*rZOXl(^>5kPN^|(ZC#DN=-clZ_fqVZN@SIjy;ffUaU~{f-km$LkdHhmHNV); zQ<5X`d2gs>I@~hVxg%$u5wpy2xhPs5JfM+YVn1E>F4MBESDI0Z%~;(4IM)l(20NVP zvV>X-q}uqp+way^h{R#7o%_vUg3>HIe^lLpJ@difHkD-$I}ku($CC5F0l$A1!cqwS zXw(5d_d#?_W3WSGU*u35XnJ#?;6^~5i@WJ`($;P0W<(WZ#L@nOL+5H5f%-%+F6Gw4 z6rEDsVxwYS7_oN)$3%8N&JYU*F}l9UR{n&b4*2PXTKa7D^h7w3J2aOwrf6eoFo`ua zv2waRy!y0*r4I~TD9IPqhAa1BZuP>-m0$6*%=VJ2D}LBCUIrKg(yI07PU7KrzfN)r4*?rnQPi}Xn+lr|^78{)r`)NT zN8`LyE~Z?auT}Gw*YN26>T<5X%nNUVF`}c)8_m0f7TRN;ipIhnT4nt$>kCD;?ahNRx%G$Uf!zz8V}}AFQP)01jP})F1x{ZCAlLu%mJ4V%p=nMCI0o1BY67CFNbc5hcjC z>t>(<(?bYG!@F$?5iK+qt>j&wYW`pp2ne%v!SFIc%o6kUy6- z?6JA(;a4=uo*3ds#atVn8LBz9hFfmZH`NctHfiYfP9{?^XI($VGyHQ;R1JNLY`~BUpQ&*BmFLpwS^xO9^>*_Me}LvvF?j;yzhl%l9eq=W*%wLs zcoQzVyd{Me@@E$c_0+<{oONd3l3ZsBWMme3-8}=K)@Ht?A{U~Fvl(d5NCle)ofPeb z44bBC!#@3$;(J9m_%bhjey#h-H*87shz2ptc=!q|sNtjA*jcW-T#t-gR7*_`&r2+J~ znU@m|04(tBYA<3-c`HMmPB zChMnk%BmUy^G;*|Ylj*VFH$em=_%8=UFY@a%U|)`M2nt!IO~_@`Ep0J^t^a_=b1Tc zsebEA3W#G!yxzJKg-Wcbu$c@#85550YnTz6j%Pp@5P{!jBLh2@Bxuh**v+marX{E83|a$L8Kt$-xs*5NH{ZC@Awv!W}?wGo2Z9-YeXR4fELFBi1**zzec!%=dFuIZ0n)hi?Dnh#T6*UTrL^yDH zWZ9-N+8)wNM~CYhI~)n|#^8koIPtFDVV=C1U|tpu4- z*KmsXW~HAG49cce)2yeLrSx=dat`YsFDRR8;#=2+qqmw7uAREvE{>SivDSyytT{Xr zYP=W)L`BGh@Ut5u}75VB0&pr_Wlj8`~znN{ut>M(QENYBk8 zt|{cyYo1WBKf>*mD>kJ|srFuGXXr9tWT=I=8(5aQOCEZ8yi=y$!|d88T#f{|uJdBJ zL%0rWhCD(A8zN?t1An)Td{%L0CW*w!*{}Uj!S1c>YOmN^?ZKWEj<50Rw%|MNE^Elw zY;&*L#qiGkl91hbA7W+9=1w@25e|8B8vj7-d|Xu-U({R2b&o+AkVL5^4_`Q#qgXUV zq|ePF_tC}I91lfK_a$5*)NU8=JGk?ke@6wN92tP5>R$Yg=cs#eWtpBuObpc+5Z8p( z4j*}CWTNx#rV~~bo1-=r(crg=G|`lHC*YqtK?5yU_8)T9!0b17t#Q@bi+3PEFs@L zuEpD8{H(#N&eTQi*UrhoYIG{a7%f?#sYmsG?I?9AZ^FwNUFi!hJgkt*6-swp}@A`Rl9~h*WFk7mK zevz)rm?Zu(vwx^E;V~K$&h#c!qGU~VtCy#{AIQ)5EIp@GL+yO;6WqJL+;p*T+@QW> zg};POgPSkD4(?jmbe8nDP-Y2x<4K=`A_417BORmA7$c*_0(G)!su1 zT1i2WpVD#0em&Rab*#XcV2B{P=1o`wXQNU3k4ZR>tdk9|z`UN%d*|Qw^VaAsJTTTW z^E2znfUw^THa~#f@hs8u;FoPAyDv z?85r;w3O?4!rCs%FbtiknNsY8(tE)WK>odF^2V9A;_}fx9Tz>X)~$C&o)4H*RrZhI zvyA=y$GxzT+5Xul5$hV;vO8pw7)5RL7z0``GT;Z<`aTmt)<4!rHPZ=onEd4D_(|dp zXooU({HCex1X-7xKSbzC$2avt#fME>lG(Wp8<)$*9Y7T&5<&f$( z(0I(K)aa$)6r1vk_hUDE%nM1Mw_IPgI6?M6PpzMTEXV5jd^=nFSUm$VfMRRnSu*Q&=I zN$>ZfJ7=pNe<}TH#_B6pd0Xw=x8eo-%&EPX=?jS4JQ$~3b(ImL;gaktv2)mTOrCDk z*1m(o*~GPvr>@&rox1_v0HCCE>}1f+Z7HkI4{B!b03cGoi07Dl)jNPh*>!Sso^&>S zQ*U&oV^^*?aO)fP0if%{)(2cQdvAAn65`7CSWbV*VZL%-&rN>vCHZQ&Wus$d*x>vp znN;UGo>%y3)fV-%EM8u|_e1LT zUXQ?MIsw9ECww`$(P7+=?7gd--s#>*v-3OefXIW+t-KZD@tJI>)@(heuR#u+hA_H~ z0jITag}Z?dT`(TA$f6F?u7vBj<69b>#*;J`-+s6y5jPiv`hFgR(ZVdh0Lyz|T+@7$ z4iEga)6#-+Luq!#7$FBOH6q(FeT`|?i;HUSzB#)y;;^$QB&kak`i%Drh(#SGT8I#+kdy*J5=XLL>h|+ zUNhOBhS2)=%iOoIS9uA`VOX0h9d$w>@hP`7r zx&1TsLhZ=;PXRM`x{+Vz|NnDTa+s9q9Co^H?RAxRT5d*I@XygH7M6sY1_a}hx6dnj zB)jmzTsfz&HzGL2VC8L6&b(V{^YULZf<9sXGMqL6idEbA z5@nnluyW^0k^^?N)NW~N$stL;82v!LnmVC*LQ`PyUP53s0+{Gf$Hz~MiwJJ}WU6-B z-gVje6L`n0fd%NUxtc33rqMme{TN<{5r3<5({}nPCK#C~x+olRP))hbI)tzfdI*d#aqxOV%eYPn?v-WH&e?vCX6pX9ZKrps+wJ{qFZ4X57K}}j^I9J5;ILrSFi1qe zJIK3kaU+)DVM&`!q=5x&BB(`?pHezT=*UIchCV`_=TWf7NXDZ?waqBUo*7GO13}ef zUP(uHlvHaD_YSp^p66J_m>Otf@ExrxWz+A@1U~>WfW5IrrH#KNl&N|f2^;`r zsYd@0S|;_A3nu-fQUCk62#c9E7N`Q1axjOtx7X*~?c9u}7rG4#jbh}(z@4{3%1giH z;1i+ad=oXhV(6 zT+)mOU4aK>{>D~FjXz+qX_jpbckb<~V&n>9Q&Z(zo8?nRZv5jGkI%i~$BEOydmA;a z2k_P1Jr63{x=_0F8vSKae=(U+)ktyMKQ~d~A8uNq`}$^;bw16d7hl#nILX;{ySq9T z#J;n4HEiVLu2d)1FDh&|FA*B0cOq65mlC}r9P(D5Z)5$Zkbt=NF!tQwuX(CKn9AKC z(w62N|)Cx&cLEuYm?11}KGJ6@bo=pe>G@_s&ORbcxwJRKaz~M13yC<#U~% zedmOyM!1xk2=lGu60X<6$BomO*~HDo*XbrZ#IsSD$G^xt3aXN+m#Y@%g`vaU$Tkf?mWpOcQk1ASWQT_T%-dybx{d=ty^UJaxYFVJIc)GmklbhxGe+M)0AH%?wr-IzhwISh=d!VyxJ=rZ9k0-&w zz2OUodcNCF(tFxlp4snxR72`+PdADHXc^AUi8&5SxaXTBT$aOeZcZaMX)2|uqs<*q zlPTOd?yIs^^g!cab3rIFgm14<5#I~7f`(TLxqAd`3uh!y z$K&~V|7c$(7s*?lObhIm0{d2&=4vbbZD#$U3nW)!0)e)iulz9J*nA|3uWhLs`arRY zuzYtfN#Jbb^wVHuZ~A2KgkrG9>h=gQXb3*rL09h7Z%J5&68SVemc9K2)chX*#;v$6v&4F}I%m2w$QXd(}DEPaoux8JD}ZNyGY)rUU| z*j`{G9(z>>X5dPdn?&*3m89AFwjeA&@fepFO$@@Msl;jWk0>OF{cRQ`(`f&t#(=gX zANFv06I0U@!h(K~`I(8vsR$Ovwe`=N7qCH?om0^?Zl@!2dRb~*fhDHV)zRqfOyz3{_Er%HNYDeUp7tgWq%98(EtXql1%I!)nBaxX#x z_OVwFV4sZUY%m*0u0x(`hnQEYUnKa(51kliic2xSC;-(5qLLjFf;d0LF z9}r=c_+wv8aByF1-cF)2R?z{6R$LmhMHHfa`&v+wkxd6KY%VQ0f9D?BlA#SL%2L|M zyUYV;_Ufj_;WtI~cvMIkqWN*@?t4#{a`5a64!YTZF&zlMth)zFP5K|XsV6j;$=L8( zq)+N!y-FhkOI5;PkObR*g#bX+Xj||%4|S+BuUawt-$xs>ESz}r+pfOpgv0`tT|?aB zDrI*9n&8h*{JJJzqsZS*ajC_3+KwxX@5GB{O`?);mJUQXQ!Tj*Q|m@LU+XmYd8#HT z(6gOVHPsld5#VXbgKhGDhvVDyXkuz4ZWfmNKa1G4MF=3;{UM$qyBxKlOQc4($({+O zatCZN(P6U>(De{z{2NcbEhsNMXHI_Qqi2A2t*Whj!hf7qvLbLt-e_;}g+E4aqiHiF zSWf3jBJ3fPZ0U2zf=}#hcGYKkn$)gOVXOv$QHxm{L}Kj^Pq|h`?>sGG3N1LiSceq`xglQgs85x4ZidzyJUtn;K-e7kAKQ$|VteYS>4 zKaZ|iX9e$rZ2d(|EZSYV@GK#}JKns^r8z%Z9qJMta{}8mVDee1Ge*8aR>CdkZX?V5 zj4CU>_qfI2eG5LVvLW4M_m$-z`<%{u4gypI0Rcc~NYu##Tf@OrvROPd%N$qD%P)KW zP%q*)>(W*|T5Mi%1`dqMBvnn=BUhyIrd^c%rZ0ZJU4GS+(&q*2wm6>_3il7+TMhZX zFv_ZzC4drJ;Ls4j^7#h`_k4(tY_vs#_ZklL-c8ruN|MLF7@Db-G~unUKr1;9xemtM zOP2|2C7S#N**_KPiAMv%vWkq$h|s>YkolCPodbfPK&^1(()i3OzoM+JZHwYO`M2@X z^US(1#&HC_2##KCxGf6!)j;?Jje}>E)IQmR5lra#H9z1Awl3uI0WZjWb zYgJV%w{o7-W6A#kEe*}m+8e>7JDuPA#Tg7EucxDRWCk(3|NCT@wOSQ@xWGBnsu<5Z z?{ga*p9^fWHF&4$#e=-8`Pz=R-&XL{mO?Z<2Y%S1wq5h2#bqEQ@1@@wIoxx-SL22% z=Gqq1Aho~R7v7F2V}I)ls0TMRHKOoaXX`WU>EsdL#?{uk)mt;b9Iy3R>$~h1o!%&T zuO#u!>8Ogo*U;KJtm7xY6qdPf2_0#F`N4YCFrtHVHRKoDdKJV3kgCgtP5*RNhm=@H znv@478O438xX63akV)E6Lv5ZJbt%G0qEO|+hsAVt3~T2>&d%X_KxQe7m2tsnVIf%^ zLff%-Nn-&pszFvDfT0=B1(%4vut1n-diU~C;q^E`H)eT(ci_DmSH!>ah7iD#IZd=0 zI3CKB5(0p57zxdjY6_v+ciJgk8mI-97|jykH|!u1{`Y3y%z8U&xVwFUXa$k^#k&45 zu1J^jss~s6iIi*dpZ9B9>1;I+3h&t38aZwUkQLjqTQ=LP^x42y1L z8)hb^=jN@EXS<%I$vlX#R!DWTi{M?*VH5T5p0>-+QR$8i zEv%O>%+vmp(B;*ZUGVjNGLZwCHj$-TGfnBvWKbMv9L=# z&*CWKIWThsF8t|EX@#nCc%wsLZI3>T;|im6f{&P?iLm%G;g3=*tX0WEo>CFQ`2UVq zQ77r-b{*;eUug<%*+4W4^rs&k;J4pE;*)nwm%kt^;-29)Kb$hG^qxOnxJ=g?+*6TR zAOAU)kM6if`543rpi%BBC^;){6z%QsVc)&9w+v_QNd16>n}y7_Pr+9>p0IVb=@2eSO0rf2TCkBDCuT;uZz=+6?@i2h+|~^X zH7WgUowe}Gn7z*O7E$*z6MEeAxBC;!%ZEV$2{#W76s(Z1-J1XU(qF3Z=%CnL@d5V{ z@?3HWV4hJ))>Fha0JIswHelx!$2;==v5GCzmCi*b?=@U|1+!p6>V}W0`t=x-tG~_vO+6b$6Ke z#eK>dn$muJuI&C*3tdB|LMAwKC{OY=Hxr=>7FIHKW;_uvd@o$M31gPKWQ+_u8XR+- z_A?LKs>pc;s!=ujFj0jCQ@u=}^3-#K`f9*Blu97j0~Xha;}$>^wgU#Z zsU(Gk17)D|B>o1xp5VM|cGF#XIV6Y6H?@~#csGTUr|(({D2))ga?54eyZ!*y)Wgu( zJ2bMGKVi9wb6qQqrPoj} zI&hr`tcXsLNFBFjl$o_lGK9NOtx~ltX8_I5s=;a6%lXT0hC|X@VD))Bm$c;~TcKhK zkMRy0%leHbZ=v5kq{w#eCC;zb=ZwfmAA6gJ2Gq#ZWmBkf#FM!N(%igELNK0rx1Oh>K;wJdU2I?PKxvtR9F+UtoEka{8g?QE7f0ZyD_NJkOitPb{ek?XxbVvl0UqB*oKs*=yR_jb*NeLUNJoYv zu5o2br)x#Ek&6{miU9&TIAmCX2Y|Xc)#B*ER+A&zDS=S-wh85bG=@PpfbLj-b4d8< zWp=J^I2TSbd?G(TyYBq*i^=?vs_psfMa;$`39Y(u_RY(`r{M+6_L)puVj|4=Ev0d- z%PhujrE?1ie^cJ9zj|j<=sZAJx@mS?y)!D|s2tb?u_hTeUbH~!|MkVah>q{3_E*BC zu%+qgHpl*tQyt;KJ4;ku6?#)B+eg*l)N!dV@yVNfUN1Q)V8Jr)Use+!=;wWV(kTQI z6p_Tx@efP_-W!U{JRo?$muqGu*Y7Wy(b8{%{422UX44(Nmv8;43>r@|mN-2e)7v=+ zky9H=irbRl-;(kt`9aNt#Prgv`esMWCGQ}kkJums%qM;b6+W1fb5R~a!Y;6M38>Uq z4ftGJjlRGB;ClO^St@Q4V`^*7XxmOj!?h@(24ryZ=ulmy4TI!&)V)5SHz|!YUL7FH zCsU0^RJt$VQqR3iwT+L?QxUmSP$#Ev5v1>e$y-v%_phdNT%aN|l8-hgIg$@fYmIEo z^tnfB+6Cuw9rz;K7KM&`@LZ_gzP8qa65{t}uy@bT96sNnx)iQ8jXm;7yPfyA5dgaing6K3kt=G zI}4Zdtm<#dR4)i9gNeWKz$UdAyOVr?R#H(H0U_lNeqB`57}B@xP-lAybQa&_xX&B^ z@xD2^Q|S%ypy3t!-}-UJ_ot`Ez+E9;0g%d64+o`DH`y8++>@O;xFn7sWS*QJd%crh zAUwTi2?qg-!Z*>@;T}2sb5Ce$){HXb0T|>qEYO=5c5`aX@{vzp_4aY``FNH){P=)L z1#P44tbelJr@!4#staZN!v|I=)$YA|7UtV07^f zvr9DXOK#(w?;92p2C&rkeXXBu*2@4gRRkcM+R1^Mu+6lG`d_9Q8d+hy{|6CA-AN0R zA&$G%+?R$RxT16$Zd2<(W58f!@*?clD>`7Ty|9-Qe3kFhk($D4_x3!E{CU`cJ z4rg5)OAA#hOco3jO}hS6JTy){;CFkHUFT=p!8p&vrb{(4DObWKRtSF<%v+MhmjQZ_ zwb7)!0I)l+PX6G2*v4IX|BJcF*9zu9D?YMMGt?!C?S;?s>En8w`gA02sutkDPQ#a# zsPYkZFwQC6njO%PXuJ!m;kFl9^&Pld(gT19e~Z|jJ~bGXNWbMRUqPfU!~uf>ps-kl zZs%y2hz~6e))&}aJOe~>W^|Xz+5D&img^;4{io(F)_wbCu27uO!8f(6f=Y<>PwK9`u$sq$gF4`Ij=!>u$+ZE8Ej3%OM=`Xcd|oYEObrP zBNY2oNs{RYD}6A#Zi0UgAdLL{26hz6JMT%`-IcK1PE*YDpMPn<1J6JN6nK5}in1JR zB<2Z)1rnvrg?*CUbhSh#T2Ub>?m~mUq+3oLiXRw5|AUlwIk$#$$8(?@T_+@ z7n8qXEIWW9vb^iZtF(pMbBqfQyUG_^0d&SY;WlHZ@JxU)(glc_C|B|b<0ZcVTe2NO zI0o2k5dFU|on_js0t!pzCs{ew{|eqks+w;Aqjm3I(WcoeR2fsat)h`zXIrz+FRJxB zp-ABD7afE@4?sMq=^DZj!=C8c&M!AskH;Bw-(fw5zvTq;!eq>_W_#Z{tbKbfYdl_! zR$Pi&V#kxk_DLLhEjK#-H~VJZZnS5#`CIZrx__!1gfEWiCh6EN{NvG<-fQkZ(R-R~ zhFO)$^VxHg7U@;x^TJiU~Q%)fS3EpH4lFN zU@P3zmVr>8S(%CT*Xn)VT5`tq$ zNG`B+Og8>IaVNnV1-joFyyNEFt;T0Z*J^*9l=DQni#N8+W#zJ@!=mpfJR->Rv>&%u zCO*rfA!f-X<$}H&E3>^l@GeF^z?hi-&`vLS=RHbU87vv@?82*joNM-A=gm|*#5Oym zYs`KQ?e(tG!#kN|tt{!Wl3=g7gyX~7V@EpnW+{0cpjRp7fc%Q0SCSqO_g?w^>stXd zTIasMmxy}HaJ$)xbC>zoeOC3jXX;&Dp+h0PPBa>njoB`zfQwZFjAXXg;j6-HG*Jw< zH|{iFvC=&EH@)SRYu6d373S8rZ4oSySZ@e%e7~}4*=TZ;{z%g}5L32ez7e5^37uRG zCwWx5U~_L;$yGW1*cX47kKe|E=QPqm35NB#SLx$YW&viVb?i6Ko2TaeA0oJO$c?p_9aK(8PPWt}H|$&AwT7(b8F=;2wm(6!pZ_ggU>JhaJ$v`)T8jQzG*|fCiB$?^=f$w&Kd%eQlUvJ z#~2|2JQ2=&|Hk_gge|$1%KZ?0o}5P}e$2#|JUeQCi##YKPAavIc`ZDcgM&E<6-wK^ zO9F}P629ssqPew5vSSVtVF+AblG?wcZpw=^aEK!R$gh= zLj@_=d?!dU(~PSw_Xz=t4B%hs`%z~9d)4x%a#?|YF7SB14d0HhmVR#}HGa1|tt-;w zneMrxX#=&@n<~&Fp{*ME`Z!&mM$5?)cEzvtfooxdKZ-C4idb-jMq41GwNY7qy??t- zrqU=g80#3Pljq1%Y3=Pzp+M(!5x)iTxz(f+aEN`%aF!zWHt+rGq1$ImNi~JGt>YFJ zSeuFj$$7 zsLEvoR_$p1N3BtpAnf{uHG$k|DD+QLhXT*HoXYqY!uj}b+lAYYJgeI31{4@*5&l z3*A@l{3u^9Zm&B_f=O~W=ouD*uwX3&hoZK>WTdgfWOXh8x4Wg>G%lcJw28V}KvSHC zbJNgi6jZdS*XsjqA(-F(RutQ7FGBXs)x@8?7G;KMO@mah4BTBt!NUz?|c3ps@bcZ3i5ddpD zV$y=nt`G%fO>?F>m?s%&hX0qx@%<*$tYnH+A#A<^$y}e4tYjy8p2V&vL?3Gt?DMPF zW9cHe@30KN{){rOpup1jsxx2X&Wou@?`SoludNoS>7KnDnX>Y+Vt$4GVx7nScX&7H$CjU?J|SDawb$}+{3uC>^F2cLug4!0o^DJS`J$78lxe4Gk^YrTxTu9u^L3EG-6z+B7sh$A*} z8;3Sj@2Xk2VScMC^5zx`m~@Y-*bKtQALiqa7gcP)BEcui7MfT+50$4yDJyoioUua4 zl!GwcyMso_dqh2B#&YLhVpi;zOk$01$H$n61Xeuc{L8_bQzaQEJKl6U9{f%?QTbp< z3huMm{Rh4&Zv9OgbMZ*9in=83cTop5?8J(iKu=eJO0u75&?Tp=%VA9!3GPfWV#=0u zP#-xI=`x-rZURM3HoE?jNpECl;(C1fUdu>h@O-uw{6+U8#nv|ewax_r@5Y3B7nx&h z+8pZ^M;6P?!5^TNJ-5uQ`Fb>YFC83&^mfL^?TDTU%2uLD$`E}14$~B&q_x5&7~y^( z1oiOtt`$s{Ag*OPYoHYZLOM*IpQxoelO=7g$0SV$3`u-`I#%>_(`AreG8QFQ3%W`3 zvNq~pwmy(rw9+iTWzF`|ud`v&Uym=MD^kj%6Bia#lDc{Q^}^*MU1@lqY<@KMr3v9|U9T#Tkc*a{jOSB5(?FashrD-)t;H z&XhzZ%@nx`+^tn&E?$bca3~Aq6pY~w=(bJ$wx6@B|Le&jov#qPyRY9(k=dI3)2OSj z=?z%6Zw5%i=wrs4O6xwv-i`#46x5B>eo7<)5(RI+ncLgGRf(5eNB~Suhv|w<#9pr! zTcLzz$dp~&bg@nrjQ=KMSw|}KRpGS~(v!}Y#zpvBULir0n>F|&fD2@E<-z#oR~)jj zTCZGltvGX;6e-tUf9DtMRP4df7cvR_=SR}Z%pU1gOK^I|GH?>^nS9;i#YxXH}Sk$jx;{r{k%uwPPg?Jn=CB>M8C_3OF}lQ(==~<;F!uJ-G8& z&o+;VS-BI@G>ON}l*dlTqd67I>Y&wNp4K_iz~pVc8T>4R+Oa@`>Z9(zXboa@owYM1 zN1Z{nBNv@Tg+-K>VrweYd+%xpp&j%b*Dk2Hbe)5A?XZBqPMW^x$@%to+%F6`$0|VR z>XFdn2g)4!M!y~jUGBaXrTgI1^Zch{qN`t%V)aBH+kKyUO8>c9_0>n!OSCWD zjRVj99mDnp7J9>)sGIY6p~g<_ib*qoVgE?nk69pLw|#+Fh8jT4C@HO33PDeH;u|kN zQf2BY9nflIzM_CJD~8z{;w19#9HyEi{$Q%Ch}0m8aWIalNZY0=SLTf}BdoxmIgSb= z7jAzxJ>vRo3q7$JE?<$iwJlgq--$%n`d`=K@bgg<+?lR-X{Io$U0lXLDe?m>&RI>* zV?o-Mxt|4&Hd9qhXt>Q!T5iA>GJu$;5ik_AMHrA8kwXH~+q|>g)psIQn%ky`4Z@4L zNYbXQH+3wvuVMxF%mmJEk&@5(QK~?t&=eRL$V3Og$Vo?pqfOiru&DF7V@rX70RF*P zVKH0%J(5F#`>5}mw7GTENKIl^&{PBM*jDf+v)N#?3D4w=Gs1GgLy9@e8LiwpJR%pl z^2Q;3d;L7VC)Z?iho>QlerfAd`k%;1ACyL3r%Q=)3*;l;RNDf>6+AX!TWzb3HF#!= z%P{4%!cn?kU;5nZP9L-R=bx^)iECJ(Ga?`G|TIsN51`f zndptHr{MR;V)b%+FGY)E{$|@?AU%)!Sn;~%=NFIY@*iJWx$GW3L62(jQXF69vc^j6 z8vuek8tcYb4Ph6qWSQM`ahk`*-2hdYtSZCa__^=Osw z5c$`TG>??+ETzyo0O3YT&jGMo{RhZvivaf48<^Fid{S>7TcCLT6969`qTGEs=ludp zx}u|#J%iy0j^PfhVi<*l$XG5)ZsH zBLm9>J)EEo;b;2oJPjg&5Su;>GcGSsO^`%xPWSsJ-wuy0^S>61&^L_AXmMn!2*QY$T{!Z z)Af3A7D!U$o@vBqcMpwtKG2~8lqjs@ZI3B8c-E|4eyY~4UZSGBMJXp0{-v?-*XseU z-&6FfPx6)nc^?L<(hPqkblm&5PjIzZe5(u!c&Yk#LU!d3{&<`JrxnVjP3AizK?w*@Xa}g>dD|*guE+S9$CcaJ zemxHqPfv9UtBMy{6s6g9->p6F&=p%r5x-Zk@w8S*!6@}1QeDR3dbA$FvUa8W!KY-USFertvr|7zi(t zM@qJ({Jiwzu4d`WXn~tb5 z?9O4|1FNl5LK~rb>PvQ-Q{}mfk!$c@|Z7@ZBv>$Jh_z`Wo@iBsn@Fig`s2)Sou=Jyl!UifJVHVh*n1+GKx|IL~3OJ z<8XJ@V?B_~^Lf_Uwfs`mGQv>?+FKG-Q@)bivp-t6VCS zG)cxux1_^&(T0dM)hRtq7Kyk(fAEZ$Aouy9ZNhHc-C9V5rg0+DA#NANN+U60kojL) zxly9-7H+o1{{()H!>~lRk<&|V-mjTe$Dcp+JD$Jg2e6#_V8|l@+pvo-zvuesip0VJ zT?*_q63lh)s=&R8SPsiMg&*D>qQG@cVOCr!Vi>G?n$aHl&jhcCG5gi;qgn~c5q|^) zb%mSXol}ti$B*qJnY2LZc=6;`j*$=2O6EX9L*dbq_<}`@>gQenKjh~n@;>cJ>J-W} zsvK{~BKD8Zj9bYxHS=VDsTa^4+*|@chH)aoZ*@PGM=es}*sXN*<>q?^MC+XFKsKjx zK5hK*QB159d!rgrLAlf5_*n8lWJ5q2EeXjkuC94lJL!YG=sx2{@{uk}w0knwaLq%K zSHw}H@Gu2aaC$_v6-^F1M5rKy?c_m0KP7uVXz;sb$$+`ZT`5thOPeLWqyp9OOIIU- z<-L|FiR_XM@U(A+B3kp`kDYU^N;dwrOI1Jn<=I#oUzQe)`D^U;fgC`Gaa3*xtnjr9 zXDhP(9xd1Mi>v?1C$wd#C>M(}gC-|_$FU`qPep7&OTA zVdA94oE!2osRTBQqB2{Ux~ui|Fl&1&K+$KATy2CsivIBQ`9kH!QcTZ?$2~q)Q-|=s zHf|{el^^>uyz}5v;DpVM{ue>-DlEGYQ4iQZBTw)^pvB5fAA^MQtuHB86k5K(uENMh z%*QblgmS|)d}pU-A)O-tssp<^^I`(QKl--1^Xs~QlJbx)1aFW9+Q~35c02*BxUMC6 zkzx0lOcxz0d>0>$(!T<({`83Fwy)j*B*dFzK4^oJ{E5H*eo#Uu5b=W=>$QkwV82%r zFV_0Ku6Py_QPiTM#&N_2BCYEUZcDlM($WLicxS57;$x8FjJQWLimL4mq`&emwu_ql zqN4QpkJ*;s<1Sg;W}t6FUh=v)_glhaS#;z}|KB}t`y)*4@7Nr317rZMZ8g-!{bjTK zNZzaR1LZuFVQ}c5ylD!;*CyY> zysQQqM90jN)G0_66mqf{2k!rPOY8fiWdE*6j(J5Oi0$JSdri;_Keh4=-z8MCp0Km~ z=c@aKS+S}g^#BgEk872Yovqa!eYu%s+q+)MXMP7NSa=86HS^4FT7)y?(3daBYLkC+ zW1~Tt$UOB)D{g8G?b#$=k>`w++2SH(%;SkD=9v?0mrcBnODfXauaq50z8ono=f_=?L|Spt0!xW8DbpxVJYl`^BH0rrU5OQwv| z@nQSXIv+8%I8%E8TP^ZG(dH|<4T#@t)tcS=cJFrqB}wKS06Yz3xS~Mz;}yYps9i!E z-*`BA$Nh4EN3jGS>eA8J9g<_CYa~e`A9m`Lf8TECeY@KO#$90RqhMt{AG4Vkx{_G{ zDF`2-HXE}+##fM?gLBpH15~~3N3zM-QQS;mzACK|n}rTT9eB3c_oOvRZHmylg*E^+Jm!5r|S%H zdQpk)tqu=3&n3_X$~rp<>=^mW%|G>hq)K~nEb8?ozB^N2P#7uAHB~NV679=- zZl5CSEV@rCgEMTquUUFNv77|BYH9-9xoL#&0AcqwlBMk(H0Gq5tFyCwri4h1XS)qj zcz&ePQ7k|+zVzVc9xwpSyFh=`)Nl<;t<>7VK6X98Duj$KjecE)+kU<#X}-7;xcI?3 zNPCkxD_Av-iTMzlpf*fi+$6!b7dbp<^}|t3M>AdL)izJ2N%*OW{8gsPx{0*EvgCu? zNu-bb;iugkO$TAd8Y@$DG{P=G^qYJWJ*%_8zk_-j8hvR1YckkE0-&1a9czx?xP*qA z;72UeJzT7~-s&e7KlxqFC4UqcWYT%QSiA4t#n`s{-L3y^+0$J8#hdi+l@~d1=9SrL zMUeIrwKJ#Ga?KMUSnkgSdbSrK!yYyc3l#Ek9FqCy@h{$oX+ZI3E^5ct9Ts=0wgDPg zkH;JXSCxPkPz;>t=66y~0a3OOrp>JC_b-472nHY%{SxtFyQ@*jgf^RFfLmSRYFc{;}6yeCV*pyojWn2Zgm=`iI;xmJDC{x3<^FA}^`A8y8 zOaD6K6{=aLiTXpx{QJr~kkKs~M=iml6wDPog8Uw3uCME)>TA~(#~*g*bkbOGoJ7^= zcvABtQeNb4i`=SM<1fT519HF`f3$M&NlVqww(=e#u-qL0()3b{n8n5;l~*^)tNFJD zj^vbqea|t)3qY>~e+;q>o(n{d@$2OA$KGdv*p&CJu0HkI+&wJ3jV;!}dj5VL6h8dt zNCq;{^JhnhFT6SDM`QmC))Ks)&4(W`)f*hD>5EXg_;P?Adh#$sg7}}Zg61X6ATVy1 zq2Rf&DH~5;nUz9jzVH=q#l<^WaNONZ1<(B-@f{&ix1YUw%g&<>yTZ@A)imv2GUbL= z@|x2SFy8|nSq#HD!G|)hiJCaIdW8%6EyNRG2~u+}@5Go#9C5avO{yf^is7LO35G7v z^Kr@mtdqu8Sm{}gR@C4Rb-e$$ru_SM6qJgW_z3lGUW{EN*Xaus zyeR4||woCc&X zO#cpGlqjidXk@5JgB1z2;cLFpHdmK1UyF^MCl zwKH}jnQ>36do%uf`omytZHlY#*qC>Vu{p4-OyDBcN6^!{QyEgGkmtaMvDj_ChCcz% z=FSI-_7@?Xte#)S%R%6uGGgA@3}znE_i^f;??{# zr?})=UUwwbB9wwEWg!7FFIN_&-IvNhm5&G5Ju|?qe4hA?PUSr#L>WQ|phm~Vij2oI zT!uYg1T5xYDz#TPFFiT8Fo;@xaplHlO;YVMNJ#iC;?PunV*R#`3%^s26bkFzqzebm zxm9ZUh3l{l?aQ5;@QUM|@TxZZRC!#d*!f59ZyI%>N?3ptKSG-OU-}MkUBcMKtTOzG zWa3T}XAz?v+cTl4t9Pt?XXu0eEU{qiPy)_DHK4M;%@8TH(fv+7KHn6Cf~G6_MT1sy zb7b6p_rP*A_x0ZUe7x{Iw&wTqW|ra4quWa&*9S2ns*CzZfO-j?0=52rUBv329?@s# zR%z-AMhX4y-=91EdQgaqx7O5SqX5C%%1VD~o5|0UPxq3cp3oyxs8c+<=u=gC6aaC# zf^S>->=CQRPzCZ=W8U=t1^B8&!vT4!C%|Lvcj)ev|Fg2ZK*lX>QTfxmtA;w>r~AuV zuC?5tzu(Z;Nq4)nXt65da6+9gDy=Jnj$7XbO+!Z-Y?Y-)6`X^n>DavoXKUXR6NV)J zp-IrSFm`kNzq8zU@rMK9iF@sUF}eS}yRrMvw>NYCCZvE=Mn|UhGx#9;i!xOmo4qqH zVT`3IX7a0!0S>#z6qBDnvOd~@efP?UKgm7L`z)dX&Mwlb_4^*WV*X3AG+{VVq? z9h$ucZ==BL8pM$biajD5646+S_#Q@^nWCp6Ut+811I#f5K=U(LY-M#dx~@l58U-BTCALk&E9l6s;$Xg-bpF~wFFEff z(a8Ssj}scAF<8!gRhm0`AqlF+-F*-LWy{gf7#UkECjg~w%`g8m_fX8)&-t9MVOqyl z;U2`7I?B5V!`ZUPi$r3D)0z{C>5l%1eQC|lO3pLVY2&Lj-wE%I3F*M7;Y`I*c4blq zsh3OW7MmQ5?WVR3<#-TdyiTJJr5eory=1%7x0bh-M22Y@>iFNAv~9N^jAUO=Y^)Ae zlM~i2#hO1dR;4j##itCLrT)m2^|l$o-Fr<>dI&8dFw+i=yO06UD;!{JGgkY9c5U7z zB{5sp+V{Q*T3L!Up720yTJO}!cC71YSz0ygL#a#Pr_8s~kR54;1MSYFI5zjmv(Pwf zAP2gx+Ulnh^?*|-19L`pMbtbkk8fu=Xcw$5X!Q(b>srSrM4F>TpQ zKon&c$&ivO-gLdrOGpp4p3GVI@E~f3T7bOFVj>=nfZi z4FaS*H$i4UA|e+VY-@_1rQcR#6iE=@XuGYQ@!m*M>CbyLKNY<)LiIL40=M;U3&K6! z7CAgcF%>F>?mM=T37aWyA`jPAbdVx>K(ePX$-v|U+4@|iy5)LK{k>V0x|#W@Z^AS$ zzT&V=SU@i0x6J_k=auJ;f3t9CXkcPsPVE?-eR@PqbuY)sw&U2|9BOJHv>wNXm6;iT zONZNCa+x|Ap%1EA?sbn+D+&1M9us}bSmccK<9k@d{*bQ%GKDc;$!X+jR``&kA~`b< z-{(u*noZAB;1w(DJ&UbIYF+u#X$058mQdwH}ZY2TWN^$yYBYB_&m7_AfX`sIs3CB;n zq|_jRt;k2a_ysb)3ET6Q6*nY>Zcq-#-#qw3@Epck&hN7W5NTr5<<#>cqKp^Q2o{Ia zsa>lMG@Km`*ON#)%KE2lQ)!kf+>0Z?T7(cz()C?g9*$-AoK z;I$xlx~c>`QEWS1q2=tKlPV||m;q^P`9FYKEB*nk&LoE3%P-Iy&Q`}!)ih=HN zK!y6LT$N?>(crvp~HTv$2Wy3FrP-Epd-Z7^#pH zq|aj9KU>7n^9B2@t>vdY^|r?Qwf&hwZeiN;>O%F-2AJpe8gw!zoa6@`yR2A%rG;y5S7Y+GVI|Oi%tUef;?S z$7?5cs=&h`fNeB6H z{GShpslLwL2ACE&7#9e>JC2t)Sy>TS9@}5?zfbc?9Q?*1lJTe9|DMHX{bs>4+6}bN-nhH`7pN=sBvzu$9a{BG8!~_WPV-`zlTAR?5dr5ws@kZ^#w z?PG(1nwX7W|E(Z^sB{a-)8^i8ZEfAJk8;Tkb-tfns{-8!`5na|rkPuU!n})mStTyk z_`F8o&Umdpdm2;_+v4w1r*pFKjE3fEp~Ht=I%Y=SS+oE9MHsvkJ=@nzRsX3QQugk3 zVTu84=!^KFqyo47*NzV5GVSkLm%nrdM1|cmwtP2@GJW1GK1K!uEx^Q`t3Dy?&ura! zWW5^V1R~|=?;C}m^r#wj05VK2`9)gTCSW5OdX#e#O{|0=SNQE3rH*QGW_*$eWJ=b>%ifC!_KAm|JPUQ&v?HfS?gIQEBNy$HM>Mlg6N~U22Q}>g^`_E9Z`i?4W1jQ;pu9hwjgcwwKDE zkYaVut~?Y^hH1IG0@JJ~1O}QT33eioB7WZ-h_@Yq9F^*FUaTDF5P8pLlKR-!!kI9; z66INh@3NpH?+yWp^)F7--2e|rKMS|@->FO2Xx^mY9sYU1{=cHT7}&c1 zU+tu>PPqn#d@-U|!~8jNj7B-01FGJP>q${c7M;}iV%=U1n#Ob)&%*A@jiqp-coO6; z&{^)DXT0!Z>8{V0p?&7azjmx|V>dt(C%2uSK4e z^U$4g;oR_wTt)vC?KCJCP~bb%I}bkwSrP3hme@Q2S~QI!}$ zRht8{m=z-)=0^SEasU1e8<;8vvF2;5aNVFez^;mH`v16l>$s@e?p=I9Q5irGh7eRh zkuH^1YEU{xVCWDr2b#d9UfXbB}Hi@q-)@JZ_xL7-uJwp@9&(? z`S0vMU~1oc?>pAL*0t8UR!vi2IliQ1P|a+a79_35O7AY!=*IE-M|Oa!r%55C^6Kj)cUR9t*gNP#C+=;V$a(G$1^ zFE*QZ-Ei6-%Kr3%5J>4n3H!BBy}uV{DRDNo`@2$j#oS$T0--xkkza{jqO z#rO7Hi8Y%joE|j2B{ya`x`0H#WLWBW|T-L~a6t$GFK$Pd-kqktbN_5+m;_B2d2TrUD8>z>4GVQEtd z61HU@0PR`sNQcm03fu41&BJ=={yZz5Pg8o(t^20ZMFq(NCyz}0^;76OOM^eUo`DKm zfC1yE=k`o`xHVuL5!xy=@1ZcI_rI46=KvXPn!H-OfLKK}@|k83cXwBBZ={QM;By_h z`I#4MP=qk(zN3p!@MjjrXmL>u=ryVa9eP~8MT=dH6h&hlN~{{&p}+0cQBD}gR;~0P~hCSE?QT31r%-mV5Bl9ZepHoNs8%nGxer6 z`}v{|2p&jPL=v~X=JYSVBz}opm(b8^-UXy#x3<#TFL&%rwkuF;@LJnEwvL^6bOQF> z3q)qONFKZC`{8yiTtT6{GPR^6N(DjUyd9HU z9f?(*>W_xw{n{;ScDl@DAG|O>^?CSYdg=~-#I;jLUJY~C6?vUYVV8zo9+@PA2s`1M znkUpJ`T*4W%QO!?oKDLd5fb$H8y&Eo^J`9C<^QTb8|ZWe_05El>MGa2dp_vS7wBIa`@E$^ z)?J*jARw}n+ICp%yYo3J^ydTJdMtQNAs-O8T|TX+Pju{ORFfnY7_aY1wa?*CUjoUu4A2 zfPgXl3bBQnW|HFk_B=exX)7XiH=OM#x&L+pGORtbey{4Sx7S5_I3kF|Re(C)gnH+R zqbzGsdyUV9oVl#4L=UiRQ(_LRC!$ui5_l802A(0H$t;(={*jt1^ zIQ{k>l$J!U-lK|TeUdqfmd*ZgWUaQmjcjLlD&aQKN^;(*OK3GY7r!^Uf6dUUymtE1 zK;3Gd&uZ$@!0Jp_IIq98|6a$@ug}j?@N{QRf{}+_eG<+j>FWjNeaop9^0;M4&Q;jA2mBG@wyxP2@LnNuD(7eBTg9`yuWRC z#Lp%do6_ntjj@-W&wukncVyg>h{gfmSKmowT`1otoPt<;2{e?rEK#@JD&C$4Yg$B=O; z_-&W|cy;wBRE_cJ4t?14!b0&qohF&8PB>Zn#WOe`{*@yW7X5Kmq;k?gWp0Baf+k}f zTFXE=sK@RzsZmA(sowWicD{KfjxBrdIr(l(P_#4J>Q||HZjpvqObaC9!?62t`HNJ+ zT{+{_pE@JG2b^Aes#mpTT6a+`vd;g~Wm=##=QDX#yBSI?Tahl{xqtjIq;*+?41Yyq zP$a0;o6_ML=O2yw>-+p?nqHWmED`=bU}YEI;XdE^u#;!>S*#a5!);9ugvWz|8HA0( z7gxB(Re=*o383l_rS{We?F&IY+Nq&DRW&GYQs+2_DkP9gWvR0IxBF7Nz4KWo*At@{ z76kDiWYAqVqbbz01cmQ$7`Nz$9VtLm0{e?2|?Sb6=Hw`)P$3+vBC$h>!p4%XfUzS5L==&a{nRX-B&Ka%iMbzklm zv{j7Zm!)&V@O^D1X3J-fm(7h`oOgoxxmx$6MhThQH%AIw5jotXVK6pNkuzxoPJjb^ zDKlpe2Q=8tnjm#KZl_CAMrEM!IVzLA8ebjde3^q1Gk!Ib))RTr&I5zQz-%mmmbgA4 zH(_5##88J=l%X0TUbL@D{ z-Nf}xFS{njZ*LvrCn!AX)T{gFkzEZB`Nwmoc35C+)p$1St1-dqUFVwUKS1A|E)di3 zW=yyy{~5Q{dz6I{SynV1RfN>zw~L}H@k6Sf#HGcp+;nMbIpRDhTDx^w+OAX)GcH&* z{$||#W5$s8t`!wS=0_dX{ybXjC+&bq>rkP!Dx|#2egO={w;I0oI1t3K}g(Z#sMYg@ZGt-Jl0<&HhTG_eoP0hb)l{rUbXjFX3;yzyn^id zdh5To0s^f=U3}Fjo}`1VF2aEsbIv?Nph`RIGKpMHhgNUYwlSKUGRjx_2*$dTba{*z z0UwRRwNq0NiYGknc}Vs>c-CuzA|nY3^7YxFx7&E`O4HkF6-62>EWQK`WB>jdHS3r%>rP z(kfKwCz*4wx?|goKaD=z3mFEn`WF_spyuEmp$a|_w|l91y$b-_FTea(?^)g?%Yqh= zTeQf{Z{xs`sX@vGoSIi)8Ar*^BVGdL5ueu&_mj;hgiOo3A)tesdgl?$N_{mvgxx%8 z3FO+~f$G@@@UDA6U3t&?Z0OOCkPAL+?=7X!(VWT>_x^~*5GDF-ghWSUKGJ0RI#)eH zYG!u!GK?EjCDO+@S&o1!N^H}bny1aFxxG9IuFxQ}-B=gl&|tZi1$k7+5u`BKAl^D0 zQt%t6Tv!!Y#L50xSWXel5>As}-i60;bXAvgKYBm7ef}j4yxj{C*#69wSOMVBar5|F zzCR1<6}>DHLJ6mE`6=Gyyf3`3!MYiV2hPO@mH00Lo7l`U{?0f{P_uhi!)Bu zucpBCrhPS6C-diWzsf#6<4sP9r#a%M>RcltXjm8sR}+Q$o|%Gp#}c*g0cy=~kaWN`$E-3FnaJ^V0C> zJ_4~zr$H5>t0qrTHO%Qm1CXTyidJI<4Dvs<>;9RAlSx9g2r&SpxH!L2uko0X*$|@7 z^_xrp#gwe7~q_cA(YI6c4m4()~g@gp)2 zPWRsW&-^+#Y1bcz0CeP)WLZ0{Kvg&U^<1B$hnJjz6c#3pySDr73H1Q50K_>wu^P!+?lwn$eShNxCCtyLJs%5&GJ#N#4h8`2gRx-e`UMj? zdRAfXJ&_Rxv*I5f9#(mZ+F#x}^4MeuT7ck?qcZ{`hjz041{MjXte=r#iT$u6&3)qb zno5ySeD@K*?5;~61aV#|M5fP3AZs)}%M<;qL9zIwCvMXIp5!nq)jb+JI9PQkzOg0C zRRtJxNDy!DjnByW8U*(c2)bU*3o-@)S8Gs_Z4NmJhmmW%H1B~w1}X~q`?z~kS)M-B z)d|6?+u#USBsGsm1*6B?6$$?!X{;-CS!+6>w^ znI3>=}pChWBX0bsnQq%gm2=KFsxbND|g^WSCy_%NyQ|Lzkxo!Os1&s52{ z&FUV3CF$+T07=>|4yF?QpQmhptCwqH{o#IF@xp>Va5iB`KzD(zL638HN^u6QDR$kS14OG)eB&@htjOEN3yuUw*t+jQ*uOIcs`ub5lEc5S8@h{vv1P5+Z zFlU*{?v7Ei8Lts+qjmglRf|nCsV;--R5cz z4vR_u5^zbJ9v&V)K_#*xP;Zr!_LB+izleY1}-`q`e(ZX@`wtA_z6zAGi4myZqm8m>czo3=ui9j|IQ}^ zP$ZC*+#kVc5g#8PN%jwc@keCcrJuX>w_M8dI&$PlBz<=Mfk*?2C9E*y;MSNmwY1un z7(af`?b~@__BSeYh%`4kS9C5|K2A;kjhX}SxzVPAuyo7ZoRx?o`ExC3eyn{u=*jF&39pb#eQ*p+LwV6-$;G&>e(1@SMVL4Oq^7rw3BKQS$b#z?%UbVjNo+bXDclig? zKZM2sqJ1Z~wyKy%Y}Q zX5W^s`}+nX9EM7NQgYNDHZBO7U>jtbWCH*HKKwY9{;8dTFaF0*G%`W4^EIj8=ZjqU zTb()3snKEWrpKW#yU)okk;2$m4ki$IZ-rEg{ zJ5vW*C;v8yV;IVBxbjP0_OMAYG!`^ELdp&JleAb(;(9Le;yjI{$4(ZpKIi{9KP`5 z#NTUShC*7h)6?r)Wp*AIm%C>E7F2Q{-v4PTrV}LhykWy;n^g`RoLPizyS=UMUPC3$ zA-f93@T=TipbbA=l1X+kd+xiynq1w_t71V zM3fA>vp^b*hBZ+3BugHG?G9{4dl`QVFO&i>ceDZm-wU%tKLm&2zpe;ImKmOP$*w>D z^T9V6;(rG3;4SzgBKiN;CswLJf3N9F*NnIM{fO;f{+^JGCXW7)uq~^4@b&V)?_dX_ z-7m=X+b93=^B~gq-B#hV|FYW1={Wl#I_~lEfrvlS?$EEaOhrj)pqGE{2c*_f_+t&HUaIOy@;YpD6g(R63!%<*t zG%);g61H2X$KL}HOobq1^)IvKH&Oj6T=91fI=7o80_Tb}%ipR7Xj!xn)E*pHG6gUuz9&n6<%kfl zH2yFs99$cg@s7B&F*T%z0U`k9OX+eLbql^jS#FGY=79{G5y&i6ixLz)IAIoSqRdU- zzTI*@&BUZ^U*S7U-1`B8IT{%mIp6(uE;QoxQ-Eed-X_z4lbDVoq#=4^5E-X z4O|%NVIcYI(>{;FzOliQ&h4s}%!Y4+jK(LJy#s-M_)vZYAU}FmRi28GjpN!0Si^Zn z_lIaoA`Vl2`2E3OVFnt`>n$N-a??0unQ*WyN9sP&<@=Y?mK~PTlA{R`qx0L-Q+6mQNU6$lkmH%-k_T1FAUXtb7U^7@-i{9WFwotjIvFVM( zAUDoH6qHCN!dPgH51N=miqYi(7tM$V2#(>yhYy|O#5~s@913nlK=jjU z=Ph_3R<|k0c>2Tgn6VT$rsYD}*kQ0#Ct|vwHIWMf3}H5E7RZ1==#nU7NGUnM5*PvD z2>=$r>PPwNa$YyfWwtPimZuMAYGs1?EoYKq34z|;-i@_Wgk2ByD(ve&J!Pc}rAY!3 zEduC{Vn=u83kc?31MG1s>uhWY3GCB!sSny;9&=puXkqdmsK`TX&i_|;feqQl@`2Gf zw@7wD03~y0G<2&Gsa0t22S)VBAn5~fbL%H4;j7tdhrWNH3M7p{;!E;MNFW7mkOdjl z-E8)d%;&&%Sj%J{YyV|v2+V< z{Uy|M8LI03uV3Ho0=3mfq2_EBGgkIz0k?97qbxcAJR+|p@XWXallo#`o8;7)SZUAV z-w!SqULGCD^k$5%iEOmPT{IA@fxeOc|n zJpp~?UD4^77~H8eGFtH5KqM#0=weVk{`BX%Yo~DJS*oBDU`-`}#eHnYJKxRvq>(w3~-{oVY7Gy zcEoN*3j$|^ka7lGSyvJ$rp5?PUDmVbA@Kg-u(}Rx)V4AWsO3U`VGBA{3R)ZP+T2`0 zTU*O_i7+H-5Z%frKH0Yu7mSX_fhv+fDFdg+^wJ0)+;1-{P^+U zMg7s-y*SoG@z^e*mt%C-wl})~L%}!zYBX9Nw9w6RKQ2}o49C-^8_AxocNdnQnhIoz+zM)xbuZfQ;75x80Fn<{^}DACT2-bv zVfC~IY$ibPwJ6oYtZUBd#Dm5AjrupNtSJb%Ob#RrILqpuUC4XtkpsABt-CNM+mq;&{K_IlFHQ| zWnS*zAM?l2xr3z9-{-;Bk`^%Kj)`ZNP<4pRREwPUcS|4Ps4wqNR9TAf+*od|pywRt^qrlJ=-*6g`C~ z;xa5rGEiP_jl{xHILG`%BG*FMtYs(i$yRQ~)WPlpn(_O|uHOGCLlvB^nreBJ>;&;s zusz?M6ELPh6OA5j*O6iP+PMUf_YisMcwjDedglg3n z+YR0WenKso4*-$tgAy}VWmn}hN&rSjbzBm1y8`$E>{&KiJy^2m# zf}Q+S@8FfTYnA{{Pk~G{h+%lFEk6)`Q%K+J@x+pwcb+rw8Gt7TYt0>?*RQ>_z)Rqe zXgwp&9gkR^IZ6ph7XWB7!>PArl~J|az(N;xFJ;392?J`y;BeR&;EcWUAumzXHnJu9 zQq4KfpO+5Y*3P=4*Q03+lTNWgt;342G-FiaL${#p3fyWgz-6I{_gcnQ4yQV;kDv__ z2NOE=1SndT#>B)V43qn@!bi)ioY6i42u-5O+DiwLX~sZ&(>w^5R~^Wz;@*vepe`W! za(5Oggve=|fIb-xKFW|K1X3x}2YN}JQUK{hFch4O+}POIWr2e4+z}Xa6!Ni_EYVf_ zHN@Xtz^}8zsTmhdS^F~}@aUn|YIX8pBhFF9fIit%P7;mA&&umw1ath7ZhX6vC^yW? zYXFdZf&$1K=2iElm8UbDzXSS@aCbWHKWi+h>I9uwbiQ*KFU8FWuo(Yp|D|9MFP!Z~ z3A4HeAIs#$1_T#j23sn4D{3H{@5`(D(hw3 zCz;D+?7lbx1-E_x(Q%!xB)*?GBJ5s<3k0!@GXMBBcd4f!Hm;K^e zGNwT9mqA3XS2&keu>!SpcVP;%oe4n9atsR2lTS8`?zW6_L@6=~@@0-opOD0}?z}xnlgvDy6X96a?IQaD&=G>QV$>K~4N_Bof|Og$ zfxNaT5bjzcgDp6AKZUQj}Kj{)T15F=KMBF&?3Ph(rD&#NPjq8AvK(^-9 zA3|j0HxgNZTh`}aa2QI9ql5XWxwJB23_=4`Cw$(v_w|W;B?d`>!h87za{h09x1xLt zKzK#72#s*DXrSp5K0$zo?X-ueO>APs_ixk#a-;j_sZP>r%c zb#p~_e`TCw#40Us>1}bvpnZkF{j-6Hgf2Y&GJmRbilzuJ8?$9+ii(QW&B1O^cIwhB zJYoWm)itUMDfjmIY00YATcR^Ze3_-1v6R3|2Z|o#jZc|*%%rLq7v;=u8P!7;fh@60 zwg?I-xa&RH?Fz{bD9BrS=jVsUseVA`&CiABH{E8rtiS4)zvl_lepGvr3;UZ2eAYf0s>n&1pd~MxHR$JJL^=z7r>w# z9CGugI3LiA(bP+IDBJ}%EzB$xxs)Ya)}$BGgegPBsIr_ac^61OqD$rgtOq>`s4T-g z2%lrAiUTwYdUhTy9n2c-nmyf$biF#8vmljFt$56)!xOm}8tS|IBdDO>TTd7B6~ta{ zLzzZgp)Q@InC;!IcWqvr9@Cy{?7kZheZ}A7s?uDDYkiDf_P}R&R>IK_vVjf{H&8Q< z>iqm20E>LG3W>nJGF>aWM}0~PWG`vZQfjA4Q@I(>qRK;N6Fe6RKp{O|-1%573pwBD zf&QsUiu`hmAo_9LK?Fm$aG`YsVwi@&yin4;^$29R#BLiDsi4Wbcm;4a?~5_jyUo>j z19+;KQnD>zcoK zKc)EfSq0_V>B8^v2zT=%;&hZXv5n5VfC8|{*_yjwSUF=NFMp(#(L7H4XZq6Z(qWLI zxdX~Wnf2QU2{+s1$j74<5!PT2GWb^pZ-s;q2 z@Q3pQ5g+eQwk67*M{TYo?hln$RD=)4^a7r~`8PFz&;}Gbt%P8MXscD)Hfo-fw}wvt zegYT>-g@T%lR8b8fH=rE{P6@UJbuxS!Lq|pO{&Myuab~jIsC1R;6&-?-&ZtwB)xoFqBYJZ%kp%<+Viv zUzj>6#=gOFey1hsMIGuaJrxflIF9$WeK=4jb0Z}(mcQcq;J4TwK@?>5;iBQ2)-nlCeE(8t!S#nmc zZ%b@px!Qq;8>5O;@zRmN5Q**rf-wB7~|>HWOD*}R_+!~L=Z=9uW}1{8T)$V;)UaOO9* zdJ7C%hNQMN6R39KtT`Ziw+Tb zeDlUYx%ps$piOd+t(Nh_SFCKSS)>fkP{yx(BTydcbFODu^ExIlaJ$MAV(rVVC6pmP zqkfsq&(wMxCzK)A+DkV8vmKZVa=l@y&83c3T=6K2XiK0QK~q41B}5FL5+uf{Y;cZ@ zTtD03l;}s>f-rpn>&jsNt7NsRYr~5^;#0*)>r~xYof9{kThnoY6P?#GoKt)=Z`6KL z#Ypj~kko_aCS=2+Bd0Z3(5rR{{+t26f>|Ar0KDBw*^AE8MF$J0vUoD9-8PthSM}>d zKExw{6mSh@=Q2{vm&svSMf%nre*AG(Ql3EVJ*}V;<|vY=Ph7w9*Cm_Uyf22_A1PpR zshI=oZ+Udy%t4^nztrP3#k+);6nsYxbS>Q{uP*Jv(dD^I%8@{xvy~|;DJk8sHN-O}`#meq~F(tV2);ouc!t z>E;QIjR*oO+(?Alf=xp{6Btx+?ihuICa||#B#%X!zpSdNdczvaaI3346H#(b$#@>6 zFdo$0XQyCa;WQXstx^7MIR4?tJ37Jjl01mdS7xKEE##4gU9;d@+Of}F_uZ|Pc_>WNOlh$sUo~|keavgCDf3(Ur_pqoHJ+U>_hrJ5Ixd#}I0=#` z-kW-_^|%wrxn4fUBe%&2@_zwooZeNCNtKh8)m#{`#&iG|fCVB~e2Vjn1mv1aVju`x z^ae}ylKXCu<(1B@?Y$Szns@e>P@3Pa6R*1T{ya9wg^G_jQye6El8=d(DV5z|0xH(y z{5lULp960#4j+|9+uV#{HeC}FSAS5z9#HcsI>5%`)c`AekY4Y3*EKyGrXePP2wezx zwY53-SElZog0J+ew3s-gtC5gQPPmZQ7{!GESEIIpV?=n6QDV6kwm9DuMDCqC#nHW+ zPIV!B%2vu}Q!6h$DS70=#EBq36=+VW1KuxAscYIkve>i#n01l_OF2Iw2 z_HUF!GjcE$2IUyV58ZoAe;Czt;1}aDFrO9!5{CMX^=Zb0hWk&UMMo9l{t`=r=Go{m z(i@DJ1Vw=!x7uN%m&uk8Zy|DwH*@e-9xwDWEE`|ZN0YD)@+ZdD1Pj*}R60LP__CMv z+bk}r8JgjW8G|@PQ;brBj^}lY`GBo&a!9XVYpfFQZgW-lses7_u5b!>VS9cXz#VZ? z^ImhB{xXcJu80P8io22Lvx_$&Y7@X1m7n8LT{eGnCG&Ci1Zn}kGC}c{4+Z%#0zpQO zE2>680nksD5CdaoW$P_{(mJH2D_6i37D3PWYUZRH`W0=4DdH)$nw(NGcmXu z0MU6RF%v9bX6tkQH9Ze9q*~&_K=WPn}KNE;tXMN`|U;ZNk;(pVTxWpdlN(q&VQD9O>%n`m$7OEYhRdrlM0l zPU6y3F~J1>m0o*>%_2223Re;j3<}8&OwGhLp$W6yS!j^YpEY;MyaMq>N0b1GNW&7D zoO)HCo3X{`EP3!#I**FpVv|iLs_@oj?mS0XWYW90WC14|;2ktDcqTjpNe-8*cG8h_ z9H6*f1qy>?{@1|(;{vVQ_1Egn`AdHt+VC17uGv`goY@y<7){sH z0O>k_|Lzu<0@&xmz*=ufmFUzZ+|T0Mwa?!mF%jC06ZFTLbTfs`r}?r~nU0(-9w+%@cD-&%q+5k?`-t5LL z&oBAid4Ls2aVV--{RMf+CA8`_us4LG#(;UFSm|9hVU{V3m3aXL_cm4+Q6hrm-A(WN zYC*wyf5-7bk$S!EkUj$i;83M#E9ikYDL13rsWDKjBRf93GpgKh_jbw6fmB$epijQ0 zf9^*0KC#gaou@NB3$eU`pe@Jix3Y}pdG#e^;Yz$rne1WSV~*u9ZOE10fq|GfUG9{d zz>RRDiZAcuWnOiC-jZ|8rVPxH1B_jD__Y#HJR+qqaRBcyTJ=7(O{wQp(a^5N+als$ zRjdST>5Up8-_06U&D=TZ$?8#D!7J5NA2|hnzUfE6i}7`NqFIf3xs=x5RaSXASm*0# zJs3VH$PCbTspSe87N$(gRO=ap9%k+S1Fi)xf5!|NT~Lpr^(A+e3IH&}4oh99pxQ4=_Elv6YW@|Xx#{4DIAztMBj%}GH!Ykt5TjV)-G zS_u|&S2!CQL_rH-Gb%~DpnPaCFi0z`l2G*5fOm-R9QRQh`NbB9JF#zV5h$iP2?%?D z7yCMU&vs|lY-AZ_AVVQeppo!3s~RXJ#ncjmr32w#?#4Jn|P ze|hW$ylstL)byT}ieZb=GLcs2inKOHbwDyP^M@5a>p}a1jmUkOx7v}RW=h^~aK4Km z%%RKy#9pfGtBgVm8)kXLf18~U-hz6pxD86V8g zRVGYLC+S1W8xJHL@y!HkhPw80l+}zmp!5q*vc=9=d2aXD%S7s4(G_jW3khnLKH~^RhV|F#@Wz?|NI|^berIsnHwVARy_)hZXe7AMV{MiLWzZt0Zp3oE9%iIiZo1e$VE{-=ym`0u z=M3saE#HB1EB9ixdrUCVagc?a8QO){&oPSFuIk;n|F!ULqw`4~j?@i9cALCjt>j}} zRe%{D3d#;MX)D=fjvzQ0`EI`sT90SXxr)Z3FHRTK?OQz8`B^zpOQ|R=Q36j#P(<43eONA1fFI{_>>MsDP?dXSz6S%6x?pMYjR9q|z~N;<%LWaRh= zunNG*`^r8MBwf3MnuCLC=G&bC{sjH1<%pUR`siIyl)$zEgz5fd1#~EC+t<|4NVa%V zW~qX-Pr*3ZEfP))&g9W`_OJpIH#LNFw==isYUdYb>%n#@0nwLui<+SKp*NrAqu5aX zR|HrDk^xC5x75+pb>7X)A5(a_+ss4>ImGR3cef7y&2cW-cSl=oPRqEhbUI_KB2~faN)1L2JN>X@$z0VEVo5EULs1JI-G)d%T)u(xRY&_i+ z#I8;R?IVo3otLAYnSB_Vv6g~rD>^UekX2-#+Kxy~2gsurt4ID51>n}Q$9W|o=nM7~@*ND1~XT=5Q7_P5q-}rQUEEmtF z(?Cr-dwsPtRV`fAwLCYxTdKInQsk_DB?`MG;xWp@Ju$sCo40o6X2kWgE{=qXJ-n|v zsJODmXfHmXIJAC=!D^4FT2ZAvVw?)E`Pf2p`#4NDim;TsujiX)S)}co4Ym$ku=Yc$ zKssKap2VD>JD)-mN%{m%f{~ZRWFx$qVuX`Y-FZ<=C;77K$Fo-To`XacKCtDR`#im* z2so-R^sy?Xf~ylwnS`(mf{#ywDCJ$;^1Vz1(+RY#gss34vd5yjj-`9_62qWCVTbi! z3nivuO8`NXr8q_|yYjAW{3r<#W_gKuc@tRfG16P(;Wt>DY3k)Hj*1CKrlQdlTQw}{ zP6|WMvmPYFT?K!p`?@)A19R-5_C!~ueHD8v{q-)_A;5jUYO;yqs-o{k5;5{f^!4s- zeaszGga^eDH)a_bb68#nLdw0h#9N<~NDI4#2Nd-V073Q409#4iC$L22ig=Pz*h~s; z#p8A`7G+*FY<;l-p*}rwm^DJNjl_5ds$m(#LASii#fjFPjL&9^V+m;Q5Q)(+n7LWA zZ!^fBx}18BMYEYvFROb!w~zN?{^?`V^c-h2YLyvJ@QayyR6~&QRF!Fk`-FEwz{3S? zoOVDU^KD5=+U>l|@TL2JcxuVpW5~gG#~2W>J8NfJwPjpsKIM35iF=nl2 z*u+vyTMfX?GBpGrP2iemWX}4IXThg*$Rsd#cX1b)716qGnk!?x;m{?XeYo%oW0uulD45z()GtPOOe-zs zp70l%(iMG#I7^VpF=u@4+(y2oz{0m0B{ZEfHRqkezIHv2Wksg_n$2EM&}|Eml6Wb& zP&q1PsmsW(W)v50!V9cM)qN>mj**CC_MMftWOLb)c!({%hc3g`@1xz#TrP!J9^+ZH|)D zPTMcXBX=OkByR0TZ-JQtW2ISX_E2$lb;VlJ#2dX3ZC{WOwG+Ok_rN^9n15G>XQ&lY z&5}+XrscpIX|80c1~@FnoO%lvq*lPXMLb2&2JQtaz(zECqO7a zSup3xBeW1HU9eVy{b-f6B*_jymbmiL!z?cC(KMCQa39^1!;+C*Ht9zaEUpgEFM0o< z-kH_XswV;IhnHg5g@@;O{Z1{2ixL5QPaTvW%fbJ>PO*CI_=swUMq9Qr3l3- zWUb2r)R>zlFwO^|vAF};-lejp8%EpWl!^?V0xkMHHQ5ZYuOgFi`Ivbs2mRZF%AT)E zi>*%AKF|_WpPIa2exg24;?b8&_2Yr*XT*~?j z-7ZDus>bK+w_lNl!V`8Dbu=tRcDC?qyw-DKqcQ|LrETLLv~8$I^Y!Jof5vSv*zj%=g~9#~ygx`E_Z__Ngwra1uq_Q?uU&#bHB zb4fjg?v!A53XM^wJmjY%VlC9j?YHEGxaRLTFe!KA*Nlp?i_^auFlnx!ZBAHFsbt|E z`P`7UvvmRN9hsW>u-fjGUuD9T~sB$ z6ww`%EoybWIx4m}vLjQu-zM>c@)s@^&NW5mEGrBaJ^Dg zFexWI+-h16bIxRqpXl8hCwU_a!Inn857H7n(t(z5^QU0_<)152&pPR_wT?8wslie#r*KB0EIXAq{ zvXpH(HtU&?Ddw=sdS$@#YI*DFZ9b5?<>Hbe(iiNrT6sSFXb9)CAy7S z#dzdqWy}x)aZ;R!LL9f9Pc0wtjA-t>B&{pnOP@?rP! z;6e(!a7^Tr;+mHe}r5^ys^UlPsQQGR4zCil?fNSJk)30AJi}S~wrviDxw}ToNv}HKx$9 z(uH)emKP)oANnDxfvHhFUF9ya>aj%#jL6>I*lP0~0U5_1Pxbu5KX7yE;UC_$AqIu6 z4Gz6Tlbn+lWKV{p$EP4hHsQE{GGZBpRnVd2w?SsAv6lO>WiY>1GUCtX*O(J`qgp zwv#rqILj!-Vi(EB&1oym%a@0ayC+PRVNq2KZ$V9uXe7(1tDXL&at5v6mOV;A(> z>x$~By)#(d*%^qj44mC7HIG-PYFGvP(2aDAUaL{z1U-*j9yFD>7t8kJa z+%qJ$>-do(w#?lG5Lnf$AXw5mR!x_9>>4a&O;Vbit=Z4ZQodyn&f)Nl9=cugE@%aK z&Ng_yj0dP#Oh_>*yO3P1ET)jJyQVd$f?Z1KEsBkdEU-;Z!w%YeYM0`}ggF4;#7eNG zo`hQ0M&ja#j(x+o+w7&nC-h#P`=MXl{V`QqD=%*&h*fH(MB4P>;#ptQr~B8c;g(ob z5{u&3%C@3769Ys1t6j(BAJsy`JNkI((pyC=oF9iQ=8NX1-*1#w|3s|2niLsU3+(yy zoBP-sYk~}@cAT9D$Uw`)%1JBkD3Z|@EfCz2?T}r)j*SELK@rEr9Y*5#DfC;~$xD<< zSP?C;dkc}6eet3`>TSr!B$s@c(2s=ete3bGrX|k`t-ut z*^^rOpYv7G$G<3Hg(?-9nmWC~j7#6@Y0LyA;zY>HFs{x*XFLQ#G?~p@T10!l;Q8rW zb)$Q!mm|teZDIxS_VgmFd?C+s)2T$ycCO3BU<*Xe0ei81t9NQ^6cUa>roo5KUV9{5 zW~O=<8NcZoy1@g>m@n;S!%nBn1k0>q$NIbhDiluFC&j~Z3|?*|A=8qA6iFFQ!m^!2OW&ZJ1Df zP%rohXPt^MD0+aWG^v$mg!6Aaw%(mMdvb#}S@I;jY$=k0H%LUbymaiYlFm&bjZ*J% z&rzGLtvP+Wp34tOF3S#0u`X6L`MoL*m)#1NRpDiP9_e7jjoST`pO##|O;RbmIj!GD zx9{dgIU`$QVC9J?c&A53?fdlDk)0T}gy$H`21YMRFCLE>Uh1(ooR+j7>~z)>?!Z*l z7Rmd%^fY&^yX;Bb-n`#36c?NOD@i}&sES>MvfUbqo-di3EKZcWJv#Lj1GpD>q1 z5Nf(PA8q3Ym466Q-vc~bF}hOSH@Lb)@1$!^Z~G(TuM1tD2k{fixqQmgI67M1qfO~e zYOg2)F}sd|=dQ#ETVS8xjetLlz`dQl6Y~&bj^B4eSC}YsCwev4vubw#dU6e3kRgN$ zQ&LEd?5t8PlGG?03|e1=cg{G(aJ7U^s?GV}*)Ng}58;Wt9zIK#1caw_Ev;_Kdc9ru zu%DjoSPH%`h^HKG|0p=Ezp%3Gq)=fKGhkmdv}RILspq^^UW~9Up-8Vi8m~s~HADQ- zEIyCNg$sV1E4An&_Mts%SYK}D%}nwPQKz73s95P*6ImG$`_iPD4f z3LrALA@$V_K=2Hdk~|ryY-T)Y6~rg!vGN0_M_Lw}z5v0+=EJ~u+>kX9U5Ha{K<9NW ztH&6pY2<(?ci0=NN_7Cj6Trw#JV3lW2ar4(v ztu(gR$xUox0olvxmCXWmi&Gez=a?)-HGd{d%Wjin?r6uJrZ>`{EDIIumZ_VcI%KCX z-&@cO?d2Q~egvPkoZ=Y|s_9wX<@LyY9-n4tr95^!VvYAkUb3IQ#JFqOZkQo-~)h^^^#?I`~M^&G7H{(SXA;T@1&FCWUU|BIj%Iw#E+B(8WOzy6>h;p1&(l^uaQ69(Hmq*bWn z#{?-V_@BqbAa*;}kP2TSZ1HyL$u!#P(>DRY)IrmV<1GdxoP$NSQ5p%?B+0F*C0{*%-jelOyR0MC8HEv_zA?5A-n9HY(NkeBUaUg1gVkom~F5ByNrLcoUDU8uQY zp6OmbuRRzVnOv-v-X9j4s(IBew(lBuHfmO?bE!M^IEvs4p-8BCFm8!XfHq4!IS; zFbYc4WT^H6R$+lSaahD_vA0$kLpF~?uJ2rAeC0eQP8Nh8?cUXwLcO=pHpPgpi3_Hr z!a^vHZ`VBTO>P17`dRrITTV4>J1Z$W^s0RKJ+%!qEW{Y=I=#v*%WltiFFStD)UFAf zeL?TnIhR3duDtSEkC<&l5RS|nYDa%r5pe)qv^NS(5ZWtzA7;#RVnKDZif@jMJ*aHn z_r*%p1EvL^Oqf2sAn!{hnj_-Zwb&M`Cr>clE@#m;RVHX8 z*&+hz1wssUj-0Bpd?m@5nPAA{VMlh)XJ>HKsLb{9ZXlheFf8Inu%XE6T~P&82!V?KXKunfpAxF>r0bPQk8yrE^jRd!|K4 zvL}gXVMy<>eB@c`(WLHk;@2Gtqk@1)mw*aLj?yV0pwiMEBhoRz0Me~8gnO%iG$J7ll0!z z05S}G*TQ}8XYc!c-!FdO`a@CH%&fWMtm`+cj0mWwou|3r zzkj00%1IW~w4SzvyX%h|(sj?QZ zpuR00s=N>!P~LmkPZ4PN_QD@f$m4Nv4+Y{&*VJ zMJv&FD6jh|Px`wHjjfrf?cEbg<(g*Kchj85B6NGNq;BS~Qs*({G}e3@=rlk48Cg*? z;gmTlRdX{S(=f(Jw8->A`2F|6%DW~tYn9jwl`X7OHJa%K;}bu+-@>fNchY~kvhm6; z>8x+{hbbTl<$fmBC6`4xFy$00xO8A)v=rguMB8?&M77XS3T*oh5o&p`O#doRm zyy+W6g`|%fG9|spt9r;AviTG$?qlEa^Lt1d9TGG*$j{95^B?Z4-4xYR5F>pu{&UM5 z8~$|V^K_#fbHv8WlAj!4h+sk&p%ZFYF2>aV)aTEK#m*Cm*oI!4>FF|A2tIz(0c2a1 zo@+*u-apcXLjV}Gg`MMi!#$rU#OmG8^BNEerJo1?=Y7eC{DB&s)`RxXam zkF9GkyZE|z7Y-TGP5e-JP-K*(k?|)uAGv;hGOR`F@#~AJwz++yO^LEhc18(GC(<6A zRldT|U#5F~w4^W~C`&UzDbPJoBVR_}K1un5lL^#K1RIgWN#slBob+^}SF3>Q_jQp! zjAPgtSh3H2G?EIzAUPH$$wpRAnkRY|M);$OS>2q5r3h@wUTJYvb5Ezg_?s~Rgq1Vu zK9*&|zjS4oEO>28A&Gc}6ou96oiIuh)1O zFja_*wz&_1J~MIwDW@!3M?SeK%Cbh-HZ3r||cM`?P1ULTQk> zf4Zpu@n%&jJ^@IITyJTQSZOA2d`+km8aERrtCC)#lkZd>=p{NEWfd7$s9;l4cY(*a z^+w~&;e==Qe9@XVwx}|z+%xCQ%mqGkrvamp20u$dwW`rNTl%GXh6svzp<3Br`mb$w zrz`u_mjWt?TDyTC%$@vXYGln{D|6eB{LrNPlh5PpVjd@E-l!`hhDeO`EiA|7j8BzO z+%T#0-uyu^W(m{(xn|ymSB^>2AbVrh8FUMpOPy24yjo4_yh|Pf8=hGI9()uD0Knbs z1b~H4l0QFjIDxiFPy-aBxcB~$RBPVlNY<#+U$WR@UA*_s5I*;AcJK>0Cd9I)qsa#B z?#-pv9_Fmotj9Xgu#naWra07|WD4+ZC-YwpA=^*7=*1;AlPa6|L!Xb^@3LM|C++C{ zOu?6oF*=>{mYn);G&(JXsPXa_cDY)!D3Taibc+|q^HF8s)cS4ldrwg0kTE`-N2GVBt*DVVV4jYBr5_hVdMp(1;0(bLo zVtO4)ckOC=E-sfEu@pro#$bmJ>|Qg#1_*;r#?ApCS(7^O3bz%OuHyT(=Gmj~YnAH+ z9aLRBTT||2|K|1OB@i(kzG#ox4;pbg2ug99F1x=IGVkkjBrVDifRZark`J~gPLeZ1 zUQ#sn&{x0_9hX0qX_$_h*>LeQ+6`}>3JCnZRR#250M$c>58z;u$GW)4#%3Y352&u{ z45Ui?x;f`d+${huK~??R%C!8{gmhnwM40>-O!GVtzWChRw69M~##(?20|MV4=3+tL zk@2z*jcY3_9{`s0lbQF-nIzf32GEd1=DZx{qNV&R!1SUix+?nKKbcRw;P!jHgxHW< z&fc15E<7OP$4~hd;tLLKFLQmh`Pt#i7nd)_S(NyyIzt{M$@l54U`)hE-4_NOE_NsA z)D&i}(++DbDV%sEs7gH$;TMZH|DaiGny&cqWdbZQiMH>@dO$^kpVN8s5oz;SZPZPo zakms^c|M7pb^>{VN_x6N{ILYwX-C%$`-A&H_lwlLHm*|3w)Rh};^nw{Up_qAN4??6 z)>DTfB@}#wr4U_iT`=NB&6o8GMMg+Lp40qxZX*7zT!FXbG8u@CRe1-b6jU5?> zC@L;!N?f>MKQ-ywlh$6uv$e}p>saQr(a@W^(NMDFK*(q)-wkP$jwJYs>G+KM=Jt9_ z$h1$>Jb0rn=7`Fer~hEiUCGqHz~;9mzcstjaebl#!xl3c&|Fw-RZK7ACA2{exH$l< zxZ?WSKno<7cC%IVUqr% zDfrY5(9*r0Ax&139T|?_*&fi~RA5|rl&?PM3A(|{DMyGYiARQ?XAZwGEw)P`S@1y^ zzY(yPPN$nT9=KvY*3{jBmZ?kzNoBgs&+zjDS1wRV%w>01Y<3n^#wfY~MVuGM@)B=6 z;5IH`9SPpNKv^{A*|g4z5ln`|2IE;jX=`dw>*#3Bf96b6ji8oUEooVWnvnoKBtu`{ zTHU0Z*KX$GL75y2R#fW2y>*B{0Vw;%fV;0QEH59H8%Mi;c9kb~UH#^jhJzCIqzM`R z#f_{ml5?I?mIN1L!>XC66R_wuJR_0ui03@@O%uT3aSQ)QwAYCM@WjpI)GFPP;maOY zV?#x~{}c?@5x%MOMzG*#PR7I)mWf&~m&-U`h;_08xNa>&qyr&-&OUa5BmV2~!^ZUh zue6isJh%K2*_kk^RiMDke2LQdEY+9gdg2e0iLv}FDTnwH!`QKgLifK_Wy@8JA^ ze=fcbD-m}cm`hEr2TnAZjpP6l*o90fdin#QO;)CQw{a z>vNv%%f*5FX+LS!cOCxzXy05nvHto2A4Xch?|`m6^Ji3E3N2Qe-XHQJAV zu+ivtBHj{uNA*LN9%8-=Vcw(^?!>J>7iqwg2kVhp`&kTiC`rFV8t>+l!FuU-cDD^Q zdRQA`(Q zX3rr~++qClzDE;u%}dP>E`Pps4f8gM)~ZadjOWi1%CdyhdLyJOkHa17qG}RY`b-V_ zP$oxxA9)aOO`_sctvk(Yr%vyd&qp}l+yXLWb zPt#;~h(}T)#N3~F0%12gjfrwMC16QbsHNJ&V~Av*_@*Rd|!@TM2<&%$hK zrhUz6LS?~sE-9#|8UqS$w2yg!_I0>AEIG0S(6<)#PIRxVF0=4UQXPwohH zilct2yCS^5S`9V-@c8iom%n`a50FC9+>equ4A+iTJNo_{q1fb_!6fBfl8@b2gC6ks zDStnzIjU`gxmaQzsrdAJO-R^9t&zx0{VW9kzN-<^(<|c-XYoexVZT3-YlQlU=5B8t z_UeqIuvp9oZRjKI8~}PzkmPuDF?*Uv)SSiqGT+H5d8M4g%@t1sO|`M|guRS0j0^B2 z;@k{$iH$jvsH~=%{a%{NmrUO#d3gF6I3#rYdKiF(8kGsxqr6{O$3y@B?!q3$3_) z`v$pYK@pKSK$I-?SnAD~C3p{rIx`CWbG2ycr?wS1$TaE;4V3^D7a>;ozL>7Wm7V5a ze~+_LR?%CYwS{T*t$DG9<8}6rQ6tUTD+z31-4i(bU+-EuJfs(c_}9uje(fd!AYlBC zXBmtUbkW&C!~fSMI2$E?OdxuN`nxAN!C~&5|;ABIqLsy?*}+wtrJT%sQ6>W_u1=7k^zAt$%$`8F;MM zQ2)%+Frtu%h=~8Bfik#fycuFo0D*~Ps|}iVXqm_ow+9A5OByQJb_*cC4Yao2O-xLD z0CFWbhLlafwUqSNe7*x@k}S0(k*v+lO=xUFv;X?=JFIG|s_)*ve=j61Zg{Nh zl1}!Y_uaPzstW@n9LG{JPX83M<52&HsGAZLmj3685`(tEUl$$nU#sjt?+bqWU(fqL z69x;Stl|a2CKM8`-|isK1OEIyFcR;{{%L}M9U3$osp>wq8ILg){N!`0%k_mFKQ z9auTdYkjf~)_dviA-Xf2?Dz9C81H-DfbwzeuL-7;0AnDwS~=etp9AQa|Lf-d+Y5E2 z5rrjxKcG)aP}U_Ce=u%ha$!Dfq4J;=gAJmO2t5;}gYF8h_1 zq!Eyh`1?`+>nDc*NYWq>l@6J^+q}h8C09}c_LtAYY~wjDu}JM1ynPK&XR>nyAPK_> z3u@a2QqA3a0YiDZ(AG9E0HxVqQ5(e=TpqOgPI+ehHYic=Aa*xUz|5>}0NTok3b10A zd_;tWbxn3R0OS$ognR^fUcm#F{oWiu%$D-+^_uR&e$N|76=wtGyljY2 zqNb*H{h!+`sNwe(RDJ;o5kNNR@7Ds0_V2a++spxn@EFjh?SPb*9T*EcVCw9^v;a%g zl)n6ztX{7&6&018z5Pu!b#f`s;mzWX3^6vyBJp^T2*E{yv-nH-MKv``4D? z{2z%v*#DosNp~M?wt*4w$PiT=Sim>Gw>F0z;H5*R{2oC2-#4ywynp=bOh?1NW(OGM zawlvhG`X;#zeliKeQmN<6bK%&{oV`z`YK};4*Jk@Y@$H<41zmCUlsab zx(7hNii_W}Et1;K(UG?z_}}|Z!+6D>=N~rX?~SZNC7Q~~|W3?w|3 zG&lY0mKm$a68~!ilUDMet_=)A{~X4ise0d>VpcKN^d(K(f4?!buLNJswj7H1eKXJb z_alODJO1S#UI1sB6r%Fm)j*MR(%q5p(I?&^usTvHqb@4Fy!NJGCTKw}g&E+*V3uRde!9 zf4t8C@e5&a1H1hJH-ht7c}#dRAc2!z(b4c{R@=>Y2^`o3Z{0NU_Eh^?)wG%4B#^5l zFN2;h=#Lw!_jtN!D)Y~-C(4HorM-sQL(Z-M{E~Qm)X#>^54c-6*)%?i(JO)l`~WEG z@2IU=YG<6+nOfMcPI&J#jk^^_3U=o=SJ5eN1O|w5MXqe(-=g^+z&FKYcb|)&aL3k0^2uLB1)BGmD$Kh~L=<=f0^dK1&%Mz_Y*0k(%eW147Pi<_@>2fb2!}sMqxVa8*BT!;f&Q#uV?_ z=gWX0nzG)@^;LYd--UhZn@E z$hP@qjX)m^fbI@JoQG}DW1@kCy1ge9sxm>BAW2`AD5mw|*r_|dU>-HCMVt2+IHEGq zW*$9~b5XamPq}=Z#-LXELHokA!v&Ya8*5Ybk|{p8_li~W%zugh1qIZCUPt{v?gqU= z$z=jMnfzsuRz96RMH5^}*GT|~xQ3-<+LZ0n=V{GW2S%$(V;cbglE(C^0;=w}KnbkL zsumoh+W5&r-Wm$r-%&Rl;)`iHo}}#$=G>qe8A#nJe@j5DZh^CU(5>qCHvsjz8;=Cp zoAk_pGD|!dg$b6yq)MS`a2-E=5G^a9&r7W;2Me@nCP&5ymgSPfDo4^BTo>$UMf|2DVaZY7|@eTwR1 z1>J{QKnrTMh>DJ;Mr4PRfGu!*S(Klr>enB-q^Pp|GqKWBKdNU;vD93J*?yZk!d6P=e|*zpthxqqv|y z^dm^}5xFPQ*Df)MX__=gQuBbCj)DED|MEeV!_+8M5KWo8T>OVg&iC5f_=7m~2D08g zf_3U(Kgp7SCzqJ6(7-3O0yP9!gq+`}hYcSYzpR7?Wa|Ihh2?aiQUIs|c%HX=hoC{T z6HJj#-AO@mS+H8M# zppHB)H2hSX1_lTUl0ByVce6lFw@gR{F;*L-Eel2ecBMi?L!C($rDR~W@i(Z0%SDB& z${IKu5S7;D&-3Y+UUVwOjh7-iqWozH+&lWtrh$ixfim^U+2k}nuiaM@aSY#)QSNRa zLVRyD0OBTP+(BaT7!HFUHny1%*sXKATpSLJP)8JY)?0Y(X)+N=$$L>3Yw1FPBSaiF zBEioqt7#bzt{>q3D<}a~upvm&{}6Wvnra&ZGfs>5&vz+j>$5>=fo3Dv5rkSNV4<9< z_ofxS)kN!dxM~$8Zoo*BAgP3S3btmFuR5n5WmWS{y^2(s;5;euA>57zQ)HrrpnsOE z78W{tIk*T4?m@&_cSigLIs7-|i~1KI$6r{TT&u=6eHv4}9^ ze9LB%#r7?`SKY`)^#WQdckXoPei9*@iR*|?Lpd32- ztpjTBllN-LyZnpzJO-i^@zP9{*!{pBmKtxp+cXRz%Nr? z`bz0it|*V^IUYs_6hiT}zm?GPHp$gun^!om!?h7Iz}J>vleI z#9lE!f@E}-vjqI@B-X3vB1LvLh54bF4dN1_)BFJujDMW~>_E09Wd%-^q)9LmSqWN` z)16{UnN|mZ%B+uR*bksYwJ=DN5)uZNC|}haF6U#D;2&0g*PMH^cHwnA32i^?rq9G6 zn~1V@hk}7BvKx|qT26E*LLMF)CJqf7`LlUzCn_PQxFohW))P=kcWx(T!_Q`z;*m!W zz&OjifW(|~IAu1UD^|~R1gv&s$M<*G$F|WDHN37E;bUv_8)JMhohq5U#s$fjZ!@I+_ zb)^f27uLhz?PBslpeLr;ejh8b526w;rwNCz=T-RwaFe?1;hb!$+2PN4+RkUz6^7yq zqqzuJ#C>Vk%d8D|_wtt&5`Nm?H9e^=saC2b`lX$y~h30`Uh0Jt!BJ2eMu&5L{_xFxM_X4HB(?Q8Gup3K7 zg_%K_NfuH3hkvmC`eVDM$Dg11$@f1LCpU%dW1Me8t)3&hWht8ofF-%I-4S@uu?#?| z$;j!WU$RFwKA_KO3UHl0<&_GSV;~g+aVIHyucVPh0x(QFli3RQN*4B4iqlGymo&w& zd*1>^!N!f+$mDn^ioeUnSD7O*CAM?lW2b-UB;Bi@_!^d}btR8yr(kr{n@0f>vtVuZ ziU1D;I#d>sUa5rF(Nrid2^=yA$xH||#;%9Zn)@O5HI9~Lv#5FWzIuPXXuj9HJwDP< zS-s(4ww1VjF7U@WMLIYO6~~`xP$>sO#T1gRo~Jx17{v}V02LYM5FW~YbT|1B|J_hh zDy1LAQ)jprTi($*n2=FLj8-}~hn24HnR2e14^t2p?rZowEET9-PSuZ~RoSe@VdfS~ zW88j~egzsdHH%tbt-E%o1CO>L%{HE$mWL#oaWlh!DDB%W&VV&wiU0L%4!Kdk+dUdM z4>w=k993J1*ks!S2xU{Bg}UF=bZ?5WARF3#<}Ln^AF6w$ z$W@XApV38<<}XTv87wJpBk%!@L4k&LUv0>cf|y++fx`an);B=woZ9{)*6$t3*lW3F z0sm!{Whexkdp7s*V0Aa}Xm zamEaxa`2NCOhXe+uD5^gctcTuZJ>Kr2OyT;rh9>}kfXDY15 zY?6^m5+DVk^&oXW-D-thU@&LI3XS|4zMq47wWy(6U{mS(IP4tgKv23QEO5P4AU~sX z)C(!SXc9T{a{pE|q+J}8mA}toT~|yI7$!9>n|%hhppo#RAwObsoW=rubw*2>`88O& zb+Zo7uFgpBPP|OrYvok;(o0s061kL$0Wue^JU?8cB8g=S>?OZw~gce-_X{q=k(TY_O#y9oV7oTsQQW488MH8qsai4xY zve^9$Y%aPLSddc0C;nDXMFakm(Jj|@F!eq#Hapdzko_S-4#7mHbMb zicg!J+x+L?NAryb@yqkAo)D1V$<(5l<%Q!M{6tXrL(k4UhDWa4&7$zfl{cI9@$kZn z8QL!d?Q=&cXUXb7O)Q|DYfgPO0BXssUG+9q-7@2rt*HKsk}DsJNCe5y>T=E5?wMBz z1xduqyKkJ1+RKufpJ{=L-Q*x1o~D-U0z8I+OC9%w&;!X~`bJ+b-#S{OK`G-Gu9ZaU zS$K=b6knWAdKvbrU2da@;tS0Vs2mT<*s{QS<=boW6;<(7)3AXKn<>+88dz8@hzC7K zetl|!`lq*n@L}^dW~81U3dGx-^v(TI`xWMdioqLrM7ks!OvNy)EAMUXjC_p8nqt`F zJh?=Yhl)WiY|6Px9mPHdoN!Bp>iZ&P$UEP)E&fWld)|b4(-Lc*NlilD7 z!W|)v#I8@)GAggNJAL4*Eey9$_Cv^DFR_mrQTIad|yZ)ZmI=&?vHFAWJ8fL7B>kwy28t5z1k$#H% zIA*YDvgoop=hHR+qz?Ej`kp_$W!fb279S^j!6R*6%zT$a6Pd8)sa~alR*jwmT302? z^yXuJ{*L5nB7iS4r8jdx*sejsD#+(QEKCCCL<2GxOIu6*OAF_3&0c1OZGb-96B=#wH7Smw};60kxhoD;}>qPf0*JE z9Hx~k_3^4EIaCaS2k6yH4^MVA##6FHTDQ*S%Y8D?3k!vKlP#6Pag&2*1cvp=$J=hP zU(qY99Vo*V*b!z;xT=b8HZ-o5bzWjzV`gj0-eTXespm1F&&_Au8nvOkStv_8>-Em8 zc1^MR!_Dr{?6nKXRb23=3BZAZIL0zp+TsIvFXiv}JA&&`VX7gJXIt2VNda|B3k2LgQBG2sm)gO=C+R+=WrZQ8)?o? z?*`fl)1@4U0*;f~L;f_K%h|>Trw1PA^9<8}EG4<8*S0gns!jgtb;=bRs?Hc9-BBvJ zy{dDJ&!?L^Pfm|WJo(jW9x%EijPgt_yl9<+U|ES-cO7!I;?0r4R5IdUd4Bc6-zU~ZEt=LZ)L=rCqA8mS&XRO4;<(PubW3f0ovq{U=^JI;^wrNn7#)4{ zD_R7HHV?HtYhzRn%-%}^KoV2{`P6jRwlIG4Bl`W6;yf!cGf=45!c=PKJV=3~u%Ox@ zP<~65u45KT!HG1iEK2Dc+G+bb9fY=XFDYgAKBpag9*m9$G~n9}fgO@l@Hw&?Aj#<@ zagT0Rp_`<5(zS)`xfG=>T2u;Z_}Tr0X>JdAO>NuTal1*4zmje{qc`V}2edb@(RLOO z?_X^Eak0&Mv{l@ALxiS>dJ+Ehid^%4^#sZJE)f<_3U=EH0DzdCZp+qc%5rnGrQeHG zN0lu=IIF2YAs^E+oK(yNe$^X~Ub|0JOy#|(p^&1vfDuxtZ zI9$EsmzJM&oM_u&E^YCYGRkwfV=-3jCBk0sOc&(dFYz`YyoI#+|_#m&Va z3^SQ-@Jfd3wKSD#JibWP?p{A-s;w^%v(V8A)L@B_eFj1!3n4mpRddDaBGN^&yi;bJ z2}9t>mqT`RC3nFgz)De#f@fuh<_^?F?;s9ycoP&1oH_+mHRx#9)3B0zInw(z909(AsydP9lhd@n3sBx8qRl^oG~P#N#}kepcrMawLV-zsEyP~?E4!>awDK6VWTOcGT6UPI1d zP8X>Td&!AlL_qJDYkIwcospv0TM-O4Eguegtz@Mt`ldPHnI+GY0}M(+vk1GS*B zI?2z{Kx0w>GocHV#BrJx6zl~x36s0*pY%4aCTxw8uq&Ob_o@0wa`moPz5Uw!J&k~? z5^~))B$C=>!n8)NTqA|wtX#8v?cIe|8KdWy@NH`h#yk6Gk*u^5ttOoCUh$*n@Dv(3 z^MgZym#W zM&R_7)nt{h&+Olfq|Iliv#al>l*Z86Ohh24g@?^K+)lp7`(uZj9?7;LqgCi1#rQKd zjtED8EjlNjiRCyR9YG-LwyjIAhc3==Ld2xu3v9afWMp8V-SvRarO`IefO#JCH%RP< zG~()@$owW(vB2Z>65#MUC9zjy%MPu9w0h+xzPk0G>{u`{)(reI#Q zZG8pmA?P1px`d%uV$OMVA;# z-DrMX|BRd`^ES&bkhD>*M$H*)2pJsm#sqq(6htVv)UH*|Ru9)MwUs`fmN|{Og%!kdoF-mD_2X$L{*=PL{}y(5M_-y37&PIg;~JOutsE z9bViI=~RR3?8c|l{yOBBxSXjpHT?Q0&PHU{a*=?&ZN&PSHE10 zqaoo|9T7)CFIR>kd83$KtX)0#N?V~q z#qS!jpaHM|pxo(?9y?L2PnlPHxW6)hHb0o1C6K@l2Q`Q~`#^=x;Gh8e1L24WI}0ko z6YL3lP|>66Xz}PE?L*vv08xyllze3_lH(rTiE?K}3%-+K@aBpu!O!J9EzmwJpaiSP z_*suX5RV&T4)q~!hXa|y5^Nrau;g>%l@4jlgSdqY_1kY*2~ICMZiGD4?Fl^GLSOYj znJe9MuT=kIi<0#~U|6JT)6Zt_E^WCM$>q4Wo%$wub9Xm@#>VmrP@W|5sxYm;ZR)&R zEfOns2|xEI+_h_ zVnv!n7Htfzhf>n;N#+dKUe=QYO_QN3ltcBNxvnIYQc$5e^CpIxt&z5VNpTstVEJ(% zFRx{O8Z>uW$9R0c9`8YYnsj~C5eDCtTcT*)Z&$@%Eq zikMBfyOeu{dDQ1Tl~PVC?%Zgx|C$3@Q;)NB${?^{3mW<;i7)G}5X)BF#^&{&V1>lZ zY6-U*Qq^{!yc1vDenEr|pZ=GruNKC$?VU1A9uGlLShfk=6LS-nSeSaleQk~cQ7F4p z8mks~RJSPY(*14Ok-#T!SK6&kC^SY+u+uS`udF;1G!MHuH`?PRoznV5pM0TRoVJ4v z#}(`A7riAEFdV(VH3Q_wQ(4$puuOys2E793Et3^4WfT3^i&G{Yg-MH4uOO8f`V36( z&Q(i)Rb-gUmtNf}+)SxGZ6mNP%$`Ri4i(a?I>iEJ%2u+df#Bja(={JmS$_Oh<;ziW zyi>uUR?8gg(H^}ldTw&9;UfOWjQr8ZyW)AZ4E#~kRQW5>SR4nER%P%qA$wmcDjiXU zW!^0mh~3D4nu1{M*tonYLEEUJ%Qw^{+QOE)F~5?xCi={g#w+)j4?mI@YvLw^9tZ1RZc4h;`IY7 z5V-}Ib+_q`Co~!P*(jZORzfj%l;L!Cp230%b`-q?VyU$qZzWhE>3fvXW0d49L7hE{ zX7W-}rGPWqpJwqhwZ2K8cCP4-Lzt}1%3Hb>uG&X&a%0X4el>Y1;tHt1;WS3dhq%g^ z{TgkS5z)FCvS_U7t%*$w+#JsH>Jy_L6`-XiR^NSNO~|+6Q4j^1j@$C}z*a zFNiii+3#0*+G0W(7pbXRWmz}6Fed--19zzIWB~{6{nIb|Su7r0wZfMktfH45iH!uH z)0F9iP9}+end@H@Te#(Iz&u~~0%!T&-k{`g#%53|4QA)4Ac~#jzA}2hD)#xE8E{lZ zd!4kizxT?uC|IYU2PW@n7CRSBaFMg3U=GV+-qSa@l|5Nt4c|GpHAGMQCk}r|n4M1a z;jG_>J72aecU~8@Rm1AD5+stuSak7M_2{n_?gJ_D6ig{lN1>nYC{vgD_5Bq`j9gg9 ztXS*DIk=A>e<_e0;pC6-2@=I^WE(#kNLV0={CNt=uNUjI`oY`ueSdI*9D1a8&-6Bb zNFg+|O}PVaUIZH!d>fa4HhvtLrql!gHUAnCIwh_j!RJ`+LCiTcAsGlHF9QOV8zlv= ztZP)zBM*+K;(5edv*-NS$UHSR^n>GMY-I~h#~6^v2SH)bC22lh+@n# z$!PTG0#0mFGve1i{ME> zrJX}$l`ZzNcg!TwAF2}AL7gs^IFp}PS-gB$8f)d@%5A2wIPOJBSID{{7VmTZm+lDj ztoY#j0rTXSXmPVAMOPiA$ma}nBstpMzk}oUL=+)&0q`cR+nqKiD58?yFPjvX^*|E~ zOE-ng8IEpJBvvw}KZlpSw3L#Oc|n9q=EE_f&Si8N z%|4vfmitYDGJZu*huBM~gR}SVjga)$FG_E4%iIxYG1=TBvF@TSLla)lkwN_&h$a4m zSM1TK!FW^VU6YM$TFb(462j{fHTwb2H#i2}rk*TPpQuvjaG{l7ed%Ms)+%Frp-0h) z^4e-;i^Vgiyr;W~ajetI(v&2HsT*p+J->3FMz9}fPWF_h8kko&Scg6EJx>*9`H}7- z^IXOXwzs@~GIb-P0B-NV;5+5ft*nMTu&QOdE0)Ki$sNi)=|P>J=#*@KV0@TC?r*zK zaPz-N%@u_o?x0Y<$9*y!=^N&nmyt&z*|x=jv&S{jV$H}2!r@Q8xLSI+6imxH$NC#@ zyc#KAZPOu_ix`a{Nf}zUSLCXPgWv~ zupg(6EA}S@?p7Cdov1CVT)J~bxZ@etp{ORix)8&h^|NwGb&sdK#uIqQ zIe}NWN?xTxJiqJ3F}ug5igX1=frBiQH*79GReZr8K!b2rp-hZy8Z-$m+k3)W%N?d) zR3(E$X=XBU98j~CjkGu+jXjdlDuX7TE>Zje7n%8^Gz~@N8XtOmh)QHnACAsmLsQs= ztt7tc;KC%vp@h%KEOLYd>#CWySq16?4b%Xf^-;%WQX!Saz)pm7&n^YQy3+qPR-84M z$`X*BEEAi#Kb(0f@jOV?W5S_-TY@p=G;NtA1WS%g%>VdqG^YC1N^Fqk>9=_wF__Vq zF$|efJ-)Bd`f4eY0)xv`o5UQxxFOXn)Ow3jxNT=p`E493J~L#+bj`|muA+F2`9kIn zbkz7XrI^wz+Fi0zV8@p!cCM|ITTqWa!(dx^9f6fDIvLIE? z7m9mztD43fJCRbN6T=s6}A9_39*(I-9c@-FQY57X44ujHYWUrC%>Xpvr1qH0PNsd1i>!T3`cD$ge zY(3DDT&Me!r^UOkejTLchLji38?<=~_7?oDGHLmz%qMcNlw5YcD|YIK1-$dlY88mi?azr;rHm(`sxc=6Au|Y zGmhj%PD;SlD@b{SpNkGkoH@^tOZBl#y`pW!RXbhwq|E#?Y_^3Vz0rz|e)Ge+?k<`M z-cZ{ii;=7Hv0IcKd@(A->NfZ@Cp6-gakYC9qJO#WX6zFaTxX0CyBO5iCg+!58Jkt&niFnKF|3u3fsgl(RP4XX#h0YLnXG9sZp}VNmd9Q_dGqRO>*-H_Pi1X`M(tRj_-?+}Nt~yStMpge3XyPo_n}ihDcv3WB0HCDS9kneFy^AB zuM*{B)e?6IP#1*L#QrP1k&Owawwvu^*W*ndo_3Z9zp(OU-Jg;niRTs7?qsEt@vy|~ zbc*-atEXfsdC{U&i+MGqI+v12*4kv&JtrKx;oPUfCq2KS8Qj*V?@MVcT==Z_nyo$j z!hGBQoJks=g)itIAUegeva#TIaehnVg{$+Bb3#^|H($8f>tIXBtpsTS{Gz{Z#H!W7 zM#YY-g5r9mYj-wjDN2HT&IZ5kg-49vUww)D*$DIq zyLb{PvDhwfwB|pEl#+?L-|hV^*(YtcZn4Y2Ty$p4S@LdsX(T4UC&M|er=PaxLpbiv z$onMoG4Ud)a~vk4PD}am5>`p<7F5hw_Envj>cYq%1INH9;!;+e8#7$~8h>9_AtuJa zlUFp=*Bxbf&~dcK+Cx&&C2Ur4r5HXmzUX5zAoD_)g z)iu|zBBbY$5?`kfa2?XP*gyehy#8=>Jy*1{Y5E7XEw@tM z@0i)-^~zA^B9dOk!mYxp_(ij@bsC;9$@tOr>TS1fzE<~Z$~k>s>m!}aqvm(qRNYxW z7*r^j(qALpJc1~|+bRcvhQFTP4_!SAA=h=A!$=3MJepV9GGk?o1~12x$krbbHw5Gn zpriLlq^O5a;qB%ppd+-ZM!?UP=x)=^8P~C3j^VKgtxxzrzM?6)qSUVs{TL|$K}4)= zjD=pY+$Swk+1IR19SP(1qRn~L(h}o}`YlX&;@IGc4=-y%o&uy{lV#P=sVL$Vx#2t# zWb3(!n8IB2ZY1Z88D7< zz4>Re`%a)v-}uJuZj*r5AhK#)EPxR_QhL|^$#@=!W`)uBsZGJ#P*bs}Xvg z-C>sSQ?Zm2XnZ{onBYIDLuuitkX}+wmvTN+!6?drPtm5CLKOaQszM5<~i6gDTPbv?*Av?whN&AO!}-8LO^gk5P&O=m>mOhmupG# z$;u=xYYs^SQb287F;@`tbI2jGD45bid?5kR?@^(a-IEPWwa;VaMtjYtY8u;tw)q}#} zW9&=}9i_(j5_73E_mg?<(c?u=C#=RoC3fI${u$p-KXF%bAKA4b*4)PatRNE5=LG2heH*TUo;IyNE%!>IpF@=`2c+YXBh4s)?0<=5m2XR6I zO*6!#@>4Q0n=Y|b_#3|tPUKFxhmulH0b-Qm)0(BTbRQd*H0jLeXb0(J{kp{$kEu)H z9)M;8anBAR;KDLU*l#uiFjI4m^qQTb#ZubgKp=)I<%HyN$38fZkH_T>n<-5{o%)NAQCetR)m_ae;k0o21Z}fpaH}(B{dF)#J+axfCb%QK1i+zNFo%jH zqB*tSpe}K91c(aPQ(tpew9k2+7S|K)EFV(pH0i7?T|k#Pz;*=HRrDK>6MgBB}&PwfsT;YUmy`P7Ej}<;2abD3oE8H=Z?a5V%)6laP|vR zR2WK>{jT1K(^HH+GybZ5)Q>f#(}s(eJB!Xux>dSo#P~~#F`^U`1iQPO`Jwuz?^KMl zd_3F9hM{!VrN>O({#c30Vl(9`w!2{Gg5#<~_Iq?J-rs_^cd8nt>%W~5;8Frya}@}^ zIEp(7c-+?k0z z+KbJB)HoD7n&k`&f^4Fm6`l)n28H!)=&)~+pPksfaiG4Luh1sj0A#+)h5UWUVYai~;=6*yw10_5_>n^P|j6ThxcFt=%7U-u7^Y^>8M6#ql!1uE@ehn~d_B zX@T>*ZH-^;1Ac&VI$MIBo$mj?Cv5$PK%81Re8!1pW3yp1+C>$Dqd@5H%dip;2n~x* zH)=c?UOKgOk}eb8=^Qj;PRtK{eYo0l=;$-y+6Zbch)FGlK`#ISeH)Pj&W+ke0#Hr` zF-c~tC`hASrXb1WbgkKbf4nEDw_^HD`6u{qT49m=n5DPn3Ao^j+z;GRy411)Rh*dG zp;Ir24>#E9n3rkZBFwboKa+HIQTL2Y}IK^Y_J(pvKL*5AW4Gn8lBJuVRXOIN~~_ zYZG| ziUMmEVWY8ePo~*Nc0W@#aSC7LMg6fk;Ou#o(r!9CCgIl6Ip}O(vFV5L^uC_YZtb*b zSkXRPNc5&`M``Sr?&s+1%}!9En|wu;_C!H7ALf^$Xw$9JyMXBCx%!r-)2N^*EQ>@5 zK*teE9ksXaP|rQioVs`Q=3c!1O=qV@5HNGI| z2sh$bJFLOkLR*hjqu;WMx&11GSlS(1k9$%3`5&uu=b)TO3sKX9sO>~hjUZBx&Rx`2 zkwo`4YW=SySKEKi)zD+v6hm4MwZ$(es-ItN2&!LRZ*60I~3&CB3?1pPl%vKk;CU@GV0C{bQh9e@jXf zL|qU{z>b3bfLg8u7f=)vwp&pT9FHmUxu{dvdi{M zojkNpJ!=HQ!m&%%1sQhwpk`}1I_7iKFZ-765@IzTIRaYmYx1K)U5Xn?wgJ}%XlFW; zG}V%l)8Gg@p!A!EDwkkEtS1AzSRfSzCeb(pidHsvr+k~#-XPL0`FQA?(2y&BsN3b6auDa2$XBtyk#ra~^SYg(4M4?PU;!Tyu zXq8wFE?!e3Y0hx#uuI%o43G*k{WfCjYDXSGE_$EMl%w*(qM=nDA==m?^x=l>Kpib` z+e7&aFN0La82DiL4%AnbIbYCbAj$D~Qqiq*BSXfw6z2q2g7gr7E^zh6rnQ7jVImH2 zf$8b-vNFC=BI1MGq9t$xR^=_OQfjn{ zn#8vZWPiW1$jI!fc8|m`YIc}%p?A%9rDDl4Z(zP8NiO83m5=&8Sv-E$dY{()p|PMv zhy$H~%@3C8PzbuR-TYdMc>F38v-ofOP5s;^n6YJJU=-_+ZXya}NrW7)gWFsP+o8I! zZE%a7T`WR(Tidj;27^uzw#Rs@+4y~nMZoq!4U(ybIaA2++)hp7&iA%yr}N>CXsq16k1vK~qRu1Sg-w0zjy>Ll0i`Ab;wy_%z#}LS;=#)3^3{Q< z9t%~Rlb}&=v&GK@lRNR-#G8?K8n&**zkMjy=$gKHjc2i$ghX?I`M9b56`&0$zgaF_ zcB;(tq;*?Yc_zcUBpDX~dRLiX;?CQOXC9T)FV-P#DwW2IuIWyj1@nky1h(QVb+6PO zw9~lg3Iw%b2pg%|nRA7g-AU*=3cAO!5m>acJVOXK<{?K6`mRYs!*PUw3(~9T@{#v^ zS=%2@y4o2`ui<4o_GJpsewRzhJ9zA3I{L^n!IqG!XU+D>tuaFSn_Bx};klVx_q(nw z@s>P`;C4Kj_O#Kk>F&uuVe1^;ZRca_&nOV2lR8XG_X2nVbi ztWzuoLz!O1U=Of=O{9NHC4KC*07BN*xE1o^(5E@a#s&FD1b1Kz>-Laur7U@dReQI7 zb2+*FJcPZBSG~%br7%;cbHb=q#I=v?bYb!K)=v|kce%s=d$i9sA{N0-^1- z(Lh*HFEEm^)8Po2y>vL4)gU)j_I9W+hyy5Tgd=VnFRPa$DC9AEf8XHg|PZnaHneE(r(^Ja;ehQOgl?j}7;^knI?Jw@BW^!Tvhle* zTyjf)_s)_OVlL?YZG;MTL-Fjr2X@Wrg1s^REu0Hrz4AX;%8yNfzP_U+*|tp+ohjh4 z(?HkN+Iq0Cm&eAWW`#gE+S#vI%rqK(pi@lr=Y1I0*BS3khhXdId=~Zt_Zsh1gV{~D zgX@4d>$8cG;$-AHl&}@ycs&ry_n{(cjglX+g@=5WO8MMlbOsW=gJAhf3#|syto92> z6XQ!N64gM5by2;cWnfX8ho+N3 zM(MwDGDt?9yAMz8V_xY_*P4goA!7TTEggM7Nt?2dDcPaD0qxS|Tn$+G1>GgcB7)B6 zJ9>7RH{4De#sH224}@eK@i+BC%256X1zexf#|PG3fY}-15diXq&&`MGl34P_8565$ zVq5>9`yr0#k}^0)bm(5@cDzHcJnR=jTx#mjdq}{qQ^5{iLNXYf>{p>ss8I_7>$6~2x|0t_ zfNQ_JA{5|Xvw?!<7v?vIY8MbEKg`r z78tj6oN9uR2C0~5cQkmrxZMANT!2hC0vAuf9lMzNFcl`1MQQI-sGT>Q2ZId_R!{@g zKkjCySop~R2VnSB;P17wfjRU7J6*@z;6WJ63vk<;Cw!NV*r~DYQxQA}ql#XPf?&A6 zFD*X+7RUmkba@bdl=G(5fO-HEj3reb%ks=4D1r<;J>2$Df5BA3L%|Ggm00lM2nE$Y z0E>_VCKuEgDL2(t5cx0#96oql&{!O=3u`5R|G>V3K%ic6$WO_|w#fe$oPdM;4m2m@ z=GgfI1&30Rzkdn*l+y2Zz7-^WF-lM9WS8`tB(Ei2c>HS z$XnRkYvv>;6iS)&>%FRHxc@)|*p9)Ye(F_n@`*8iK!B3E2o{_j9t+SvTcB61Bbk8; zER8n~GX2hjk1A7OnYc5S1cr9rp_|YfpcaU`D5O;la_@L`uEpN&H1GJY3BC>}b%%BVeHO5OBx*1yQx)2eOrHE!Sb-<%K83!LmGE+~1o(mBE^|hT zFbEcG4YC?TM> z6$>p6P&MqwhwRi>R>6p)!CQD?`$gL6-n+oi-!ubwgDuBE7Agk#3z_Ht16Bn{LhgT4 zg&=qc$-~8^jqxgC$3;JGxeGQyi)23X=yV0BlP6|oBq-<3Jqd(dd=DC-{PJ-NCyk;q zzGr)*xHiwZ zd$|RG+kf_ZDF^rq7Av6gkp0d)ao701wfAF_$!)6rcHW#BuVaN)4C}F~PoUc4lokT? z*hVUW%Tmf5FKGV26xtGm_eo0{72V~K+*}6~;4g�~_`4HMW7RA^YD%ts_|j0JCD{ zrp$R-4&iW0Pf#x>A4MB3bw$f-v8huWsGtI48#yT^!trk@YsUH$n9gu4fRJuhv=zj`rbcK_tXm)?&l~+r`I}#3ym4>;YVBQe%h`nRDK5M;n@%BBYxV=MTzGUm)+u^IFK$UF|D*yJ{#HK3=IN?>?C0-GT|np00&N_XCLj z@(e~rWq%LX>5uAV^Qd!NLtK}n3X@Xy=Ly2wFF0=c!g69o!T@$94?=nr6+zqHk#T$f zKER~W{R$FPdB=P|VEW4h#2#7QK=6KHU|uCnc7z!&YAEo;2-4K!e1-z zvZyOeBy})vo9i7@@E3k#*n@KBcUQ%oKT8iTqgj%5=qS>C@RA~sWp^s?Y5?@|oZD<= zK-A3y`;3+o%|CtPQXvc8T+8p)m!hPrwUvTQgi@*A5}$9}Vjn|=U+D@Rgs#H+$y{V9 zZ1cLpY8hzB$*5~eQ2 zVpV(4fll)f8klD&iPiv=oMCBa>Bq`t4NE!iGVD%#+sz6FKN~>-&c&#ISX&TP1bVE0 z-@&qkU*)z%tpMNMX8@rA=W7S_wDW{+1t_`!eni=wQ3-~Sl~Q?QH!R{7L_PWIDZs3( zhd9cDkQxCq{C<1t{`$AM*K`_L0`@lKUrX?Ra;yG-=4|m^CJYpM*6idOa~`0Uedx zp8yq^3P3=nHi?ChV)XZ+n(G~hUVvBgH@pY@(ShqX&JDUE1_uY%NjnTn$ny%3h5id} z=ZyNJBS-%mn&97&dF1n8VvqXPBpZ)E{b|^mF^D;y(a-JQ`K`Lb-JpmAy_uZfl-7&2 zLt`d(!t4CC|L)pXbuY-9KTpfs;Tr7lv_R#57_>Z4z(cK0ulT(R zJqVL_{C}eHID9~}Pd>M_dkLK{s8rOdLD>zX0&^&IqQGPBHDuiE8IX{W=-EmAppXtw z{ZfF7Rv-WerWW`7-H+cf-OptpHul;S0b{WI|wfL1n)jiu%}kJZ|5Aq9Qwh zpC4c}4!!y>oBlr?q+x5YLa;oP69oXGlj@6}P5T-C|9K|!^YVXJ4*$K}h$oxFU=(%G zD}f$>k_EArijjHzv*GonO`Y`yz)V{ZuOCz=!@GraM#u)>ary{0>B9 z0LX=kg5MZENbU>Wa=h0s?%W=*@f|P|^jz@x-^F1X*aUQ7x>S?_qntUpQC})X`)?v;8f_t2(sKC8oh;07xO5 z7JM@9Pr8XVfVdzNhzgVieZR>=H8EL?#Y!Gh6jo&RyTD8*%Ye_$%e8ZVfQ8bz9*%$p zuj@Qng$i0bivhs2cwcw>J|x#q1LF=)fnuQj6$rLye6P(Eq8(+sa-G<@R>K)PfX{Hm zwE%_+;K?AwwmJhbPmMsG68c7aJ-@${>%UPz11)OVi&4aoNWS)h=-p5j24XG>bXoIW zk$-qbMpXGaDI>x{;h*69j5!v8rHX%sXd#(rx>P^GNu>?If*9+Kg$h7D&Kk5KKS)yr z@G574pR0&ciC0wcmp=4wphu_P;=dNu#J^J)@Q%v=V>a>$&Y=!ct$BbAxQH0O0qz=? z*YNYH0mFB9SU}Wo&g!PUR?&ij*v{VP5(_R>9SoO$5z9c2V_~rn{0qomqNn}A?L?jY z0#1=u)}qO|0eAX8&vfCoWneZ+3p1hFs`)G&7NAFMtdAA&HGukhu~qsB_(kygEX+FRSX^iX#B zF76-3+v~>JU`FWId0^AI3=t@xyZ*2H_J3aQL*KGYvy)M@%bP4w*v;no!|;D z;_dT?P*{!}ws>`5z~dnahiwsiXsduobSYwL>vvn}A3ofxwCATRhwTM-G76UV{{r-c zu!=32%a9;I=6lGg9h0s^ir+QVvTxcA8(}lsU-G!~Ez^Lvzv6NPebv(Z`i+xSDt+X@ zxRaxxpyWuN&n%#pTq!o@Zfs)Hu!*y<&6&nBHi2jGYOg?PQoAzH?DqiFXdlU^4R19* zo^nF9vkvIFK|6815 z-j_A;=kJXaL`zG@cW8KMSQkjeJJ$;WYxCq`7NAsZ_iSuEOX2XYg0gBzs|gIv_L(<4 zYfJIxbxh`+K{*U+HSMdih0<#DEBz!p)LrinsP-FU*W8RxE+mLtm7NL#RYuP=ZydNX zU#kOSt;{9%d^^{9FgD0%!Q!4B18Q1fCv1BKXKybSdL⋘+kjT{fLGh$m0nr{SKO) z&qw&G`uvtz&}?CprujoaE3{4~ZqjIR%7{G;XOm5K>Wa4_%_5D z0|dch-u*NU26p=rq(5pq-2T`E4>W0iK<)AOkhv4Wb3y5e;GQUu8oZQW2zp`*ZJ*&$ z+2SW)pPL|>b+i@JAcS+}0rJlDYIe=EHH`b=U7z74kX}iL-Mzp)_KCGym%iQFpBA2$ z8qJv4kTUk%hA(#7g431fE%(>us60e_Z^LitmX}3w6CSxs5+E!_mdeqRb)8k2-9f;8 zYhp1N0JtafkSnV(TTf_{Ue4yE1Wj3w?Y_yh$G63I3bKJCSb+Ou5Fq=hB>QbrLgI~D zls_Umj+BIh7Fd=-KohwKx`NEcpjHeO=5`{YQ@gDYRj*HZS@Jl(IKOX2Oq)Bsey%9_ z6o5(ewD8!g{tzv;+px8LWBKdIB>#hYIAJ<9X}Svk#xCxHqj}^3ybb3GfkU@ovaNx7 z7mA*a(`v_0EnB)A;L?S!f=S;g49pFLdN8PAsR)WmwfKZ?lf&xpNaV;M7&G7;=Ppxz zaOTwRdR%yQ4f+zzMbS3$GWYk{NNpetnA z7ri3heHeM0Anqz2#6ynP=WpK7e)REVvxVSQe%lr?7&l(`b=9m6xrNsd1(jm2ngvX} z?($AC7Lo7#nnnB~>JodwA`TOeq)pbJ$Fvf6hbBVz)xqR!L6IOy7- zo;sX010S}PsN+>M_FYc*HrBmzQhSv7I(#~LiCy@n`%uZTotTKI{)GH+$Gxz#jX8Z< z2AicT5-#%40=jX&KhEcKi@o4@<6nizFp1mg1%}gT`#HK-o(MH zXVdU}DIZWvMu$O*h@+K={^eOlEFmU?GOI@`F_GY%5LhjUSbTj7#!2`PXb|Un%wy%4 zsxNTScHK*ZKQDHkVZ@+BB>VSu`6hqDi2_xDEtJFLF@F}rPTkF{UV0vGI2fji>- z$Jkq+9X~oob}wi)&e@S4um!(G!llbjh6<6<6OJJ-(Q1`RLVBm- zafJ~tt*WiHc4xI{(M}xvQ2+LUYL+OOXFh5q-($*wC1=fn8L=*aW3mRm)iO8$=XxQY zh%ZKb&V{O>3%XLWyLyvbPGNS>C$PIl!XW`LR4SG9 z&tO4VfH)e~1o|CrRO-&10s&>AV3j2W&SasN8Ngq4O8ACgwelUq$x?lp%A>BYFUI*K z=$9gqQYuwb{v71(ygF*$`XpF8i(oy`hxlVxug7)A-zvXOuh4yRI`GXh6k7D6=RB^j zJUe0wt3Jf>Vk#M190rC~D~8-~GnKk8N)GGoKg(AwbkprAd}!3CUAm=`k5-NPn6bR3 z+A%tt8C0NMk&jtaENr_a+dtP+vgcNCe%<*NU%7-3rVGCIoEJm5ypH@N9NlP1qz@(@ z7Ok%U8^(CDkh=}}qqADxoQ)#R2Tue@g}NtJ` z{RHB|LYdb;jAke%jW#;+hM=SN(R9UZ(36DtRBT%t{X$HYCqrAmI3JOao7i>edz9=& zH4?jxxu1KSekA%`mQ|?igRIb?$%FQa$Sr?a(VCqbhg@7c>bK8r0rryrrj!n^%tMR5 zUw-LLk%m|=c3Vo-VcX;Q!EfBVWmGnGM9!DzIWOYio}K;7MXHHY9OtwZ%LnS6Y*iJI zqbXGxFS;5A_j?;nr&{*Ogr>h18G}smQ=44EZe8a>6AqdqBTTt~8o>)C4WHO;(&u%PX_vSau(IZi z!!ytMuex4ii)g{44%xO6V~Jn-yvCiF6bUH;xZeCvk{e;2IkSqkPYc#Qv8p9ZHGx>m_8&j->AL;kIuX}B4p|mE;T(`| zOy*(aXu{d$&zQBm9dIZMKsE;$8oO6Q*iRsie9_w43YtS!J0C6*?ySR}BUx2@sHDfv zRIdao(tof-oPBjFj(3#iq|vKoaH|~a)9cQZ{m#|toCD8*xXsytxu2Sk+3iasY?+vx z3E!*+dPPh4Z;`RNv#PU7ch$Lbf7wd76S{|PmWeo)p1G@9ND!|=;>2=z8U}_^$BD^+ z=O(4FyY-es+2R$e!c#ABuKT~QmFCPVSsi-UiXwD1KrQ^1&5@ukFMRKyf_hYi^H z8dhx#yP^vot%PmUdLXPT&;$PgWizcLvprF1KyKL+Mlam<*L@;qr!d10uxneVwSR+H zn+Ono8KVxkxHXo!uCjma)%tdkK2Cma=bmv+en9cmh33U{=Za>vaA@YB6q<*W^IO~z z6_VQ*6`$cPaDypDET$G%Ry|(Vf>k7wR347EYDv{nBiixFs7&?uv%^)d>u+ha$bZ<+ zEIx;7I;0?A;VLkWT9tcrZZI}JLG zp4QzWj+HW^;N}Dmt01UyVs%mM4yWk53T<*BPtC+$oum>Mo_hE(9bRV;ZD~cm%B3Q@ zA&|uF$l@8kp<%9Ow450jRqKG@_?W=ru|@=eaM3Nl`yOZGKe*qJD=Zj`lK(6X8{#^; zc(zKhbCO!gm;M#LyT5~b=5@aK%@HBd)?CG9#K)x;i0#+(LgPkKlOo$VF(Hs^Kzx-K zm*yM#{6RHk*xj2jT&ZNRbv(aa>Z;Ab{oHZ&Fs;lT<>9P88 zEz(=2V&dVub_;%GLSMukI$Ys}ZL63A&#v~$*31{XvRs@$S2o^@FiWVH?s_nY1-g5@ z`fJ{Jn{xv>bGheN(8m!!@&X;>x-DNT8=m)%tpRGSnc&dodvr_Smp+ra{XMfph$D_z zZC*_Gjen6ad*O{%UqO<0h^U?Z3@X@->|Q>4UhQEG&n<~MFx>0;gT^dY;QV7cz$U2t zES0|KoE>!(499^UXnlxbuB*$07vmb3=|s^vc4dLGc;=v|x#1{f_$LG%h+BDG)nguJ zv2=~+L4M`KTPfcpR!gnKR(W?-&vjp4q3~1*&X35FjQ$YSJrI}ySZ~#N54wUF1JxV> z{6hJGY;m)irTDwR;@$wi&|m$mpM8*Y6j5qg{MXDYXIadg&5*@3=O*)*_tc3Ahv#ZSL$m|%smIkT=;SG zJ$4biFK;ZTl^}7m8rz#fa~l=;p~Zn@g)vUsDiz$}i=9AOqBv3i|-mG`j_3mGflyV%~m+5BZjwft9VDTG5Ra zyWx(KJk%kWeF&*fC%N+EXa4f%wq{z92iuV^?uS}ncVS7J`>1UqleF<|#yV)zz4z@> zQvkp};a6u&D!_CkNU6Vi}<0=p2}uMoQD_igxMFz zaT+O#0{Motyedj<1ANyP0(S~cM|kXl91&8v(ci?1&g2;RkP=)GBG!x6PMd}II$Wy` zAauPX8-0qqqVr81du^8(+`yp61qJOrkxv>D8dyCS&QU3w|kL4^pFCGkT`aONq6*K|g%V-bV zl+BKaOvhbt908rD;*RY}jjfnMD@+Ff>vlCMY%{4PriI?mwUE1i%tyQEWb3Adj$W&a!ve_SwUB@Y z`1tM6)716%^YVGOL_Eq^UEFzS#Z~#=9~Y057pLXn%mTvN4{MeK6UuccOEbPzU&__+ zsnVqytepFNSY@M8^|A^@mMd<9C*zWF*Ng1hgZ18H<y5R&y&m!-m zLgMGn1zYahaV{F}XVxdup;-;t>ZA|r;u>q>paQVHm`49Jzk*lYR@j~yf8Q=sEk*D5 zq+3|p9wazkN?|^77!eQ)vu8?whqgYa_z5II>939*2UCGA6 z@5R-o$wfTHp85!jxbgb~AFn^v)j4Ny{m_9x{Em0|Plbyg^``@kz*&2{Ga5rY^W>H( zn=nbg_Zr(fo%*bSwo#wXk6Fl~Pk`eR5Ar?` zxvoRu!F)^@CC2%qbAguw(=PlPu8xkI_XK?RA3)(bLgKx->!R*erZ6$><5Cr$%_AZa z9C31^pzd;2fIMe%q^`X=M6CC4LlH3eXTrH;_JVLkvP`y0TQ@u){TcEZJ`f+pD!Ch+ z7^%afwWh!UC(ZGoav}5eAGPl0!udEZ3c;fGl;e5wn!^i%~42e`i&cd;Uq5Oq%+p3G?z_XifLHT8@$md3L@p5w&K zzFf>nh;_eTap{TIUG!VPK4H;r!Fhz7ka$otY==E!iYy9R+kTcS)-}1ZZA7NZTWu7Dk0Nf;>9u6_aC!wiF@7UE~<_X>Im@Aa^CYk%KYJ- z!|-L1dv}(!EcP(nIVJl;_2@O2bAFP@Qj8U{o=r`zhWi%wV1QeF&z@$E{ReNHDUo79 z-mqvdx#@HBZq*lz*KFW>jgYFUpv_;`{9|W-9T8YH^S2G14KuQho^2nu4}X%uz0$K; zs@l1-=l(q)vB)dx1M^7j`rcIk&?&EyGPdWN{4{sdil-cu@n-X|j|u|rE&K%JT?2pR z69b5MT6VphDZhN=q=ZzmE_S7m-bE*;D!4o%IDNy9>$9jEJ@Oje{52~Kk74aUQXV$_ z+_J1(W0|vjd|v%n!W5BU%0Hk5FyY19LmD*a`KKTo*s$tN993i$d&7hEk8@v4RJHIP zuLv#23Ih{_aNzKZtzdb^%Dt}pLSU>!s;3tYzd;;5(|We*uw6}%yr4yayr*#_7t%f| zr&{Q!%xDscn1GF43HRQ@Dz^6GH_wZ1=&4%J&&!t)UkPpS4Y_Vj$xoFPt8H$0SLcW? zCuP?sP<;+vugU%y76w`&J({gDW!Vhd69d9D*=5co6|)* zk$HI0`%lHHKbK}VooF#p-5Z=whP#l$M=N@{OLcs zy-pV~`V(8$+%<^FnJo{al?SGjD0+NKBWzsbXhw$vk}(;h77+S9*=>4f`8GLJ8s;==VNkvb#216 zR^8u&PV}*?h7$yi7d=l?cxV{0(nO>HQi^3y*VeZ#v-J0Ws}tjI!~t39j7_QYTm)%EDcW%_wy1%f4>{)bjHX|@y3lOjm~I>OR^ z`-3DJOTW2r#OPw+Vt{hMyHqRCG^kz%rSCHyiGg1|G&Tz%@q{Y`$Y|{N$uC@q_H0TyW}Nb!?bG?eNcLs-bTBV*|u9jZxlvu+NyjF zC;XOY6mGQ@3P@h!-Df-lbuGs25-&Ks8SHw{*wNAPIjP~o&Aa6~ikGKKIVUsVV0w!5 z&*j^sw+2jq_;gDL&b)g4I`5%MC8yOE&3>B@M(<9GA=Zios>Javq_l=k@`R?NYIVzh z1Qk@sEF>uVn79_!Ta8HUXT}rp_k21K$!Pa@oVK9S8{U;O_h`s(VsLI;TGO*iv{NtY z$%R`XE>l~SVnJ^@NWM+fJbJlTaMkYitmtU^a?ISdmk|%;LnAb6;ViBARz>=k#H}wI z=k*MgDBObcYs718P1D5uaoJWEuV8|Yw0r_^Z`2uwQG0NrgA}r3dRvZ~=Y7_zx-m_> z13__v2~E+=(5+*kk%7LgY8fgWsVH>O4aqv&Vx^luP7WEj992)qw^}>Tic3%(`+it% z|G1O#G@yxRQ+ZpEplP`6cdm8Q``6&g`B2DFSEh2JbsCjV92QnBz@Oi&NKgiHMZk(2 z!q|sO3*BB^*j0xTkz=R{V>i_#7angX^0E9{@~ac58JQ1lYDTv3HZQ$1bt#Z-Rn+L$ zw1>c1-N&Iqk-;0so;KZXiALS;YErwkH?_?S#Fcq_+cy@*ctlfC;V1gVpHx`Je2$=y zw;AJXO9njWn*Xd8o0+gT*smOflWKh2R5f{M-xIMfT%+q^U)D2#nGcsJn8@GL!g^5N z@SUQ?zU;CN#wI{n&F3^%gn@El(20>FNyx9J%QM~yxYozBjtRM+e!h==I_1s68Xywi zjyM~KRBDyK80kG)BzJqn!%9(ZLo?M*6-{R^tWy+PbCtdrDPKwiG5z**`hfK!AtKUa zN->U{O_k?x{E?$pl{d2#JDczdl$=)Oo9X&{I+VQ1(0zYk!vlz{uc^S> zz1|`lT8w&Q!p60njdN1tI;njLH+?sjkaED+vb;**tiT(pq6~IPJossoddwltnQ&xn z>K5gybL9Os-=EgL;gZTdONV*UUe{R#b&~VOYGu_G#N-ZZrlYbk6(J)sGP6auYRBMr zTHF^#C$4A@w4e?t)o^t^&AyjN{#e^MLYnQGEUVS-md@MXUjjGRxg)7;d|SSiNI`sT z>uH#AFi(|K(Drim`Z$%0rY&(XzV%T1S9r=F%akhpJTDc=qghB@(n_^fG*${8oWHx@ zwF4Vw%S2wQ>#II7gKL#HOGDi&N*!z+!~kt*AK8$VG)?OlR#+Csn$Rl}Lbe_o2fwo} z6n4EYI6y?jVnIccr`%O=WDL%+Eb+cFtvJv4q^jb+dsS1awm$Uaym8M}t9aT^@*}Ww z5gv>LF;MZ9sX-~v4PHVMbKG|t1KP1NP6)SM1?JF$uqQ;8BP@=)yR5AR3H zV(%5DOn9f5R*V`xRl${(dZ(o>`bUW71{J{fM>#&IhL^U`qy07B94#J=nEWzWR=49) zyYgL0c4N9oS^oZ$gh$63suk~sfgyo01&OeUI;!Qi%eZDEd-|VYG|%m&fJZZs znK^G&D{}w7JyBrNLLFun-}0+}kJ9d_qG}YjJY|eCqvv`mrc~X0SyP**T2qOa$1$s+ z$mwo*CDp8=DQj1P$Tc+wo++ikgO_LO9o2Ho-wAP{$>2b3Sh-$2s=xlhv8S&!d7$a~ zb*4PArrV-{qv&G60D)$@m|vA?dA*}!e~zdz)`kt!KTYSOeF)vIuxA}=A>JEY zj;wx+?J9gOOe+@VucFHvDQ#J=a3t%5M1YD7k?#nLXe-@GaWG>eN@PMI3#YGm z<;BXt1MijtE>?`a^eBo52yyd`>A?T#3Re-t%HOU`b)4+UP$bsgNDj=Vs|DF$g(X-R z#eLrG*iMJ!av9;otDfr1tf)H2g;5?PEh7Wz_alx}YX>k+6oBF@p~!s31zN!z{2-;0 zgOkn)iHBN<)yGC&5u=!WIzdv6TC_tdu6rY&JS`aVXYcqLpG2x;B z0inqJ-F*3|;e+dJ-9Oz&R+e682^F0h)l-584o*w}Nqp>G*EBFIO7h8zY*?q*Ag|!S zMsB5$cHVwmfa$n>lTp?3#v4)?Kg%5*ic6u~ut$0U2dMy8tX`4mWa9(q-uG-$ToxK| zqggLINh~-<)4tpK)AL_r1P3ZRuYT<+#eLm&+1=%V6vD2r_3b0 zCMtdh><~*+m_zVX;w~qP2#S&EHj)G8Py>~$l7kT69?Kv_jY4(hB(mf>-f`M9m0gLj zMwrCb8#5(4g%lb^>wUl&%9r}MnTEtpM|hZWH{@73;ii7N$62cOL&CMFl7&Qm|DVXE~U9i%`i?0#Z7c48d`gox0>K8mOJ}Gek zc+N_d?T{KRvTxX^)?Xg9P}SSOhPnyuthCTYcvPzo7tTK!k2GN&ZJ}AcaoA2>*-RDm zqybAo?SauP5H%I{K(>%|Q1DlU%7Bur9JjywuPTK7Qs@DZ&lASZvs!2#P z5y_0Paohl2yIZ{Vn5RDL;R9SgR!Tt}Q?o$k^W*Yaw*%ibpZ?f;vo3{pv8GEqS*iK) zS4z#JBCBN-hiH3KGz;E46&TzWRPNLy>8(l!+O%u7!UdMkOS|`)vUM)$rHY*&-+nuV zA$i#ZZ!G#t3Fyq7PyHlhJtAGti$Cq=GMRDuu0%d^94mY@a^w$L;htjcz;DTosNR5s za?2Z4YIF2`AcS$1x9cGdIEC7_B)8xN2fUeJ0tUg+Y40@_c85Q`0MxbNoX3p9h5YAM z6tmBdC4!sPGNyj8%-hqRUt8{*qVSkEhP$;gF~PTbVX}I#6+&FhgOPgUB=wabtV*uY z6TRzNetW33K%C)cf>@(A2yZnkVe_`%r(pW)lI$qf8nsJ@Oh1qZZvW8itF9Lecs1Su zW^s+)+uXiv`YTMO!)EIeUpbYzG9`#4U)uSA*wEx7fpx&mq;!xsLi{HqCXeV!Z ziZV^-gp~30eH+mt)ED(u_w2hRzls%>C)}6Ic>EdPK=w3fk3)3F7Y{8Z=NTVLA#{*& z`8~Jjfa1d7Eli;jBob06dme+mmPaq}XA_2JG5 zY<9iFGxU@7tFzK+Rc$$Wap1=(^HJJGR!(*5T*f?<^!+%x3E?6?S5{t^mJcvwMkr(2 z1pIC5h|>c^Um?pfZI)l}R(Zh&D_MSr=SXT35`xl>iQp*LTI z0lff0mzsA6?JZWEER(96o->^UE-;lf{HZ|ugT>MjU4qzwm2Jj~<)|6*EADBNy?h9* zXtmoho6vz*P{a^z5bqe66&hy2r)FjOoSipKN;zlA;zlIbud=0H|I@7wRs$_TBf%kl zb%Jl+1(dTs$i%jJtR=TtLF}hOWfB7lwfjU>UL*&o7PR;L(Cxtbe`_gzo|q`mn}^2d zVG;umq7p8S?FYnh4I6)$P_SD6o*hQP^!y2mda;84^Q)FGv-*L2EMrGL z_C-e_-Ew8bc>i|2szo0~Ua)y4-D>%I%nE`%KPbmfSm>$;K-#oX*<@omUD&%QC zQ24Vm9yG~_e1UJfiRLqk3&R!VrL~NXfjZNz)-;So)B$~rJxgBt`vN!%W@GVwo`_@Q zl>{xJLVC^Zn7zY3UJ`*dd1!Zfrad)t9h9godeoBKXo?AiBhs>|X9Bm{laLGR5R1dS zUMA~WS4x);<&IU3Ox4~Oeo6wzThFz6T2W+O+8*R5-wrJ##=a82z%x}0C2N!EA$&Ll zO>#fOX?x~LubjLQscKEVi!n34^&00tO^nA4U=`5BSa&+0OH?8i{p%JUv|WpwU9Y- zVLrpYHn7>bVQE$-+cRV>>?G@-1LUuvV-mV8OGLO>a_R6?Yhv-(shY( z%89!>h%J{R}LMPcGwUciS`eT*+M?-(#G2phrh+ho!J}nj6x(0lE=Iu#fgu~L^ zh}VnI((CU-hpP97qvPBw0I_%Dd_1_=zU3@{k;kPPl-sDlKxO*i4VC%gGuerm1@J$b zpSAKUgfSjm_^PPQE3sR_yF?{xCGLr(UozUrQ&hQHha$CG2_)vuN_Q;T_O`4S-*+bi zO}J2%$Fqi?0&mdnamO09zHfShvDS15l(SK`MFBDIMrfQzn7oWqBv0ZWD#|>cn%UMu_!zSaU1Pm&XnQwSlRcBD0=oWy&4zP5};cz*^i7J zL>X$VYiXECR~Aqyx|dGxM$2Qo>rfQggtXd+w)Y<^muy+7)C&3PRjKw7y$u@#1cg9v zugelI+|V0TFxU_2UFTf^iEX2T4hg}q3=AKV8%s*t`kMBBdmZqkz@ZvpylsUFHA(Xy z0jfLn5niJ}>QgPv8GF1#>k_us1P(&ZH=lKZJ=8?N@48mm1Io!B4hFrIj% zJn@2gt*?{XDlbi{?9|4P$F7)>dATZPRwG*hl)&@u^t8uo1i>+zLvU>$L4o! zWT~PP$BWe#FunJf99P?T+WXt|-2#Fs&+L5hFCu`47#6dPX|#Hp&_RAunX;^4;C6%6 zm~FXki?RCisc{ybJB{;uegIWZ{mE^a(3=CeR#!-Jl{&Fw_5DR(b`}ZW_Z!S6J2P4Htw?2g)J|VD9-fqp~Yu?Q3l-8F8g)q0=#K?!*g~&v-O4 zq6cRJYYDcvE{&kz7I!30Yvg#DN~)(-h>H7mXxT9hJqiPCt1i_G$ZMz(EE1YKG88%% z)YX|n4FD2wr?h5y_Q`sxRSO+MzF${<;8g#i{^yC=LCX#D{1(W;yTYx=Y2^E{Bd0Yy zLPi2nX*n9Yp{Whn(L}nCCu!h{fW5pO>B60mnhn%9>IQ3dhaK(@;jq<;_B8zW$8y?M z%TV|3oIlA9jNl!U4fr$hE4bV?Ch&UBjX5W)l%nB|x(+f`Vk@^-lX3I(lj8w}^6iDM zx2)vPktT%21ZI5a`toSButAEzBbli0`Ch!^62Bvrv!ybM^Y0?OwkA!v!vwqp!W4JI zxHjL)IYvO0*NlK?j*ZD}l&C^dUYd&1on*P@z)bj?NXlMM)2!61mPeos`G`OLIs9kK zpL?R@ZSNZ3NYk=ml?V2>T;70V;&4#I*g_C66A^-3;sOZdVf{$v*X&2X3E1CCs4AdG zGk<~~W~?OE5z{{;7TVQP>!-m~@RbqP$m$0rEwypg4_oFPQ|8jmZbapD2ko5E=XYIf zf%C@&syPZgPHqjFCh*Vu7Rokdo!#dUEEdV5zig5$8v~eXlMS9>G~7JFej7z?+v*MO$BVuP9SGaJ|mQWB>?1Td)JLR+hq!(OW5>TtKTEi$#}eT&&9~6x$n{AjhhVvj0&D|qYm(s%dTyY-FevmnrNf4 zHy^{#z~$0MsLOpfZX{;qW5-??scno-j1$_$$7^x(D`OkI^F4P@HHMELI>4A7tw1hK z+HbFzZ3Qk=8~aPDu`7<)8ILpX53R0J&n(of@cKm}dDbvivX?zHbaUwfeclM#U8FWm zGF1;2rs@%zpB5T+iD%l=o~B~njW2R?xyc(7RZ~KxdRM#(kFJCQ>kZ$ep4RxpECsvG zQNJsz0-*H}jR4h#iF&6i_-!h1oXa`hNNMiw$w`%1ejeTP)2Q zl@Bddr6{w^V5^fH%=T&UjJ;s|1jSQWQ4+Ym$pnq0P+!s?F4lcfQBl1F66EKOdkyL2 zTh_qx*EcK2z~$p&LgO4GQ(@uP?DahjvuDS=bLQiBIcU5SpY*KTBJ~<4W`@?Ky?F6r z$7*+n32KnV(x-$O(|`EgL)WKu3&RBJ6w;JZbf-Idu-i8FE8{A?y_4hH(H}m01)Qhb zELFvyeL6---%dU6!3Z9)X6|P(l6!GSxI2<7>;G0ZpD3`)Ye@|G272vfP+GgFyxmyN&3{Iu5ka)s(2j_w&M= z7Z#vI$G5)zQlz#~;CL!b@g6JR@i(uwAD{IlI_IT9bvHBUmf)flmk0cq#bCUTjxtM( zUS?5?mn`DcJ@Wp@G=>9wDTBkqSF$nu%k2abVU6U!c|OAE5HmBYI`^}jG1@ZM2!li} z93ywWC)Z#WXKSS%h9}xpy;$C0B>K^)b#f$m*naM`lV=cSDjm!Sg561?6kIL8QJ(>2w<_bo$49rUH9|t&&j|J!r>5i!&E_zx?Gmt~`-&8@ zg`Q`jeH`?>VDjWi(+TEl*uZ#7r}q<+dwz+io*w&;RVMCIVFo$u;UQ>`@*W=bXSX}U za;$+7GyF1i-s>Nj^)NBw^?EkJM=lHl!SuA}xM8N)szRHGH!vI5Wd6EzSZw6(6@2%+ z-?h5&!v5;f<%Dvy$2DLH51QOIdJRTQ?hYS)b8jk4Kc!{cp$+otA5S<_4)olm;|W(0 z`+~=}`12gn9YkPq9z^VcZNkvd`h!5s-2o9tD;paFUteGI94|*l#{rzK$jyxp+fT;U zCp8S54a}MA*%7K?P1D;7YE%XRT<)Mlax{gR-_7e4o~jIzWR=Ch_`tyIyko{8TOBH7 z0Bt%!VhkH7Z)^DHUYNvqj1$cDAH!o*QS;WrZ?igbu^ze3p!oayq$h&QD+OcsT z|Kh%*;6w;yH>~i{mJAe6WT`RIt%0&^q9SCQKkG;1(2$D@{_5ioiO8XfBc{S(*K*k3 zX8Jsm+i~&9&41GxZ~#|L{fW`KFuxQgWEDPngJQqc5XrpaP_CbXt z)I|2BQ!#BA$I?QPRENr#QnKXzJm2Gxeof6>?|Z%1`(9ViA1>eX-Ja+9KF@uB?$3Qc z_p@k>^0^4zo}CtER!%`}`O!HnqQl+qEu7uSN9y?5Ce505z z78U#L%yj8^UElLknfYh&^=_nqjT%a%qI^JTNqcQA&&R~kyXLQo;whskW}2vE5eYR> zJqkt3L)(JN>l8HPS>X*1kJk3&Y&27NYFd9w={5&M1Gf2#jWtG? zrTy{2JudZ=ulV03<3iFcf!hM+ptvNG+0ylLB;xlQ%3IzhMKf(MfP+c`#AVBoo64r! z;HQlH%do>bQZuz;~ET z%SiNmx~;8kL#={`5JM}HInyGtP!`duMh=!o{>&tka@khI%fD!oR};YsjhT>IU0t~o z6sei}G##$cU_$e33c2i;1fB6V^g3fFB?r!)**+bYZCM~OL@1Yv!3Ashi|5f`B#v;n zGJKDin~Df^=t`Y?5Lr5f)=TJee%>Y|j6T5IjZoz)0RPV;Ak~VhM)<2-<>FaW&jNZm{U{54!@A4M5 zJc~i+ynyYj;J^`cX{t!vaDC!p9i4IbDD#GGpCsS)fHWS3FiIaW<2-fh)b;DvPq|%E691tVs?w5~ zm@P7#>E_M<2BxjHC&#MLx@xLkD?E6(hLpY#lqdU#yMQQCT12r4Psb+IlaHFn+g`^RQsQaI1_; zq^2JL^RF&02Mj58Z4J^Z#$E4c! zb=i%X=_w9D)g=VZ{;b0D!HW*vH(&keYEL27@@!glsKn6P+FC{yzZ*@@9)Sqmx4W7L z&sH$m%}g-!>AAksT)N=(!RDf6|IvL5wK;*Z(=9>{R==blPX7tFs*VW@`?eunoaW`0 zctSz5m+PcwWn~42>U8@SbSOkjdtbcn&MKx5RdLOIpI4~nd%4Tg7h$~Gr=vx%Hd#NVw*y4^bEyr>mciTyf)&(m$p^q||P!$v6kX$4I z`OLDc4m0Wz^e=;&6}KSgB9E64&D^+gW8(c6Dpbt3$|Wi_sKWUR7miZYcVtQE9=Q0< zXJ?1w203-80jJRp92t8ruZ~?e;hf!Zrc(-i&>F?)6-yNLW}>l&Fvu@I?wCbGPGZs` zq0CC-EpzS!|B0Lc1k%!_PLboUdKK?-s29TWKs3b6NA=8X*C)nga26$Td7Dxr9!TSR z#!IQpQDjExwV8&*ib`EQUQ97Mue!6_HB4paaoS7B(}JoWJz7mdQ49mb#l=a4VR)gu zyXs=3%GWWCe&DfYjS_^;Pzj0=c-4JZyM(Fsb4kQkQtZ}eZC;ha)aJ^Tx7>@R`qtgv zkM^sUUkgMgM@`?7$Beub8eTGA`?=sv8y*HQs2kx8LQZjD=$o`nt4BgP{wFC-a_mYig64(Gj#5{ ziM~e)UjuaNWDeGcp9_5H?>FuM#XdI4T{%T7Pmn&QQxUz|h^ma+2NgmkRk5P{LnxB$5aJO&K|Y!cSu~%S+i!1Z!(mB_)YIyAtQW4U%kXgo5xbkM&gb) zZup8x@di-PRN-~0=of9Jol*}6@h`uf_S5ftC>$1_!7b-AIwH!;7(Y~0l6Coi?c;MRw4dKaJ_^0W7Es_F`B;fq`7c>AA5?W zwUSYr?(nn}a5X6yYafiqgyr{nef zT^x)@wV(;GDUK;32uUj!YqY=$Ef%Kid-S51v|Oy4?Js7nVmMfO))|?}BJArf&`RU{ z{LReF#3=T6veWJ4vn9xb%Zy6&GE61L)?~Rlc4muC$|c76s>k9=6yp^#A!)aDcJED* zRTiOiGG9F{O;G*SF9KsC(b53-|CFN#DReHPLu6Hqk1y7a6LQd89UPN5;^R+x4PY9j|#1CPv=x zv2e?`>e^E`5iuVPc$!FYsfgSkKce>F9yi4xsVi6jq0nRlKzz9U*1^&T2$XF|-KApM3_!c&q{Qxoh`LlDs)f;lUr@>odsoYcHMp-(b3 zOm@ikjE@=yEFitBTlA&mcBNtXQk4@SU0q%yeZFeS>iun47psJRXE#*8*v>_|mMBCZ zORKCsgRyEJ8z3rWlyG_){h+|a(L!}~Z<>rk{rrc?3f;2gynfAE6MxxspcV!#mBzEMQ@VxvaEY#(2v#jB3EY>~cNQ|fjq zz7K;C4^^LP=l`cSry*U7uS~s&GHr;Dlkfb^I)wA=+{hnlRP;uLBMIpEOZSf#DRTVu zi5`s^_vmoDYe)@Q9L|@#cQ?G*oS_k*C>)Rq!s?LOEMdmjMkxnxqnFq_v9#)$(d5OM zJEe2w)41-(m;X4hwFB`@@Vg;BbqeN+oOYi=67RFJ`rFhfx+B0ELLm)_5}R@&XXML~ zV}hEMEai5Ijypj)C5cKKd1#)voRa_3`*Snnwq~^t+B>I6v1mrhSlWHpOBn({g{f43 z@3ejUtb-BVmc1MtKHZb+s?o+I+2z4%eZjkQc#vb|Sf&#KVb4YUGZzX62%)6{s@P)p zpltV+2PumC?K{>`_mp?eDk_cAS6H?ln@|lSJ1N!uh2$(m!0Gt@@WT%TxC3RZ%8z{V z^eOa=g#xnX&EJoWil2{G3(^Rc`1C{^!l>*cynQ~#|oDnb{AjF`-} z({E(d&be*5~*kha69y5$>qUV46 z&R05nA3HWBfSNha`$S{u+4d~larLPstL`9SMM%*U^B2>#&^4F49Qe#UbET+~Nr3!G zKeLxMa)*yr3)N*lKC(kX>ZSG^IhG=6$eWEn|2#=uT|MsZ-MeY$&mW;N7sUpKU8_9N ze8$|lw#h9SIc8nf60$~8GXfb75@V`%R{4!J+)Gtd&VF3<6qzR+mrF>TxDh=+D+$8^ zx@{UvC*Oiq>e6Z>3DAf^jlRtiwLU&rym{LlC3j5`_VN)E$jc=*`uOb%&5+zzV>W8q$Th`sVhP^wiqs#62hMM zsR4hnu*q!{F(eBeZ1TNzk=4YtvV$I6_ze)ksx9aJ_D(A_)xumyPpb>oRKjiSVDZae!87RYbjtd2$1>O?_6!A>MG z0x*Ny38TR;LAQ{i3l4M-q>14|&GgluoW_3a8CwioboE9FQw^$p_h9AC-ydiP^B*i1 z791R$mX>xLUCHHe44c}#NIw=WBYwvHX%{XmS9kF8(sR>`su1}|$}USzO8k(3@8(;$ z1y=glvp#`r2b(&Vmh|oULn-Q^A}UsRb^kH-ce1&PsfRmd^FDrF%EmixMH_Yaz7QhV z;;Yj0^YioUBxih^THk9bAy-_`6=--$W*zvnRn-T>1&WK-ULXBw{_`U`H?mb32S{@R?)Z}~f%Hx_2=Thto9Wm<#Tn41a7bg_m9WjmkQJGeD@19lm z)&`^Wd`PqGT4zYpy7uAkdqR2?2C>)CIl!ox;1t`O6IIpKe2W2`mNFi4=pkm;*&c-* zSWZJ5cC9t4>taPQjn zg3oXgctwi6|7clRZcUBNC^Z|PdjMJ(o3+_)Q!$Na^o?@6)RpQ~nFa79SS_ zWr~ewjO%ntbG#svD5j(<$Tut_m}p7Tl~1C1Ldn3oy*e+8U-GejuWpK7&(a0`z7j6#j%9vPvW-rh87U2)Utb z9skC(@_~W}r($mReqYnn>ZH;1)9HkD^nVn)N|3q_$LtedI*3U}b<^ov(`td!anEQg z@LSr>HD{`#y_ynP^_&3IsIawpdyb^duQaO%o)oL)j>4EbI|RCGWQM(h$kc4A+omH1f+>dy^k<6oQQO@1ZEr|1bMJtBm=~S*sWxI+CRFw z~t zZXQJ&9cg)X>@|V1#YBDz3Oz>H`?coI?Jje3EQwsw@bAFuER14%4hWXC^z>jtG1wkj z)lua3@l&{uvj-DWa_hwp*uNVKhJ!$|ijIi*K}$=kXrnA$$z{o)ql@Zk*hhc86IocY zGbkk5^GTQpb?Q7Zm|^!a>M@LHjc`vzf5?{F?BJ4L=T5yUPTl@)pT^_aj;z7#K){5Q+qC^%h3+CuJGVezIXsvG7eI zzd$L$lY|%p#Y@Y5hBf~=WlO75SD8#N=-QT^kI$q50FGzr_#*whhZk8(F};-LZs6Y_ zb$5!BS00v>l!SV*{~$C`Y5UF#mb457U1Qi=N&C|i7le|epKwU@{)GDd4~k^e0Nmds z@D`K6&wY%4)C~Pg=f1T`A8+fGYLoRB<0Z({ zV}M!cpwwL)OM-RqyWBxAqux96xV+OKtqHc_EAX^(y5k{ga8ZyE)T?hwa&jC1aGY~S z1kLo&@55OmwIM@=*iSUc*Y7QS5c=7JLQ%l)7j5KWd}ELnN|ce))~z$Jx^YD;2Yx6K zYI7P0ZU{Xt034K;Lm&LevrDzYt|S%~7R2^nnfx%;HV{Dvx;Pjb!B#eS`w zXJhb-Hb@|XPW#atefA$zlw6lqL(58NthMBY<>fzyM@22e$VjYiCZbzTyH9gCd3x%C zUQD}m39-Ux6R~pzO{4AF0bnStyr!n+jTgi$U7gKDE8?Dj2R-lKZZT2M%FGOwyJH4nzD)_fYTxhrfS_@6>Gpl-@nW8UW~)2>DjWE^ z4C;%$Uq$ z^X540i(wwp5R$*OHnX2Qm$6)|SM|j&PP5ltuSye;z*i#=*$%}hr#*ae zLKXi0+AD8^5c`tVPd_b*#*QVIs-ew(0c0XVpu88Cph98Tg0*qi%?#(-|6?Gi7xzTi z(TIaT(gBKC*j*Mrlpm9$XS-|A6Euy^c>)m`r=*B3gqJQ9VS$438JlQ;wZu1&7~(($ zade~vkrKp%hr+%z0!SY7yWi3j9c@RGGuVFzV?&5)aO9ITR#S$EUExnp`Nlk> z3Tf{h^DGrQ)B#T_JaenGG=XHHtyJ(_c*GQ9&uMU4wB(tP5Oph z5%WSomb@x8h<2{YTpl3yf!T}xwBUDG*;3ubp(J0L++mozeOMqS7+&s(CQKQ7)=8}{ zkQeE`fItFA_y;0&KUKe1|8*=2?!XGdbCwP3Q&)wHuT>PqYY%3xoAKYm6Mbm>BF1d5 z(M0>7Ve}13T~+WatR=3D$(*AacDbQ_O+b@kdr1}PvpYdce!_X&OW0FEw}`7PGxyS^ z6Cd1DJk!9SAdmDifaY%>vMPLI zt!ya)f!bRhoU1C0HsCu0`o>Sw_MG(qTlAo^|BxD{GiLFt_78YstHg&b3E0>GwXZ8qThTpS3hBzpa#Fb%Rq25b@-o7{q(8d1@jb6mpCde7_ zDdF(GW9Q6ae2N}K8jl~Z;0CS@pfx+s7ltmrPxME6^54_i0|@Uga@>66ODHsDN^b9e z$S48ViCC5Md1OuG-;JCZ$&VqjD`WpeC<<@}`w#wyB2ZO8;xjfb45fF5vrj9DHYn)s zS^dZN`tKhOL(qf(52Ii9GyTDwECDsz|B?Dw3I0bK$m#Q&7!!c~c!SZ&h^R36ds`+R z+C|OW1A8x_A>};QrD{an0Ufhd1TCd`kyEo3sh}Gl6Nuq`2wDlY7`zsUPFc`rI(*jN z`m+CoG)>@H`4@~Pf8`CoU+_03S^o@w8x{Jee=wHGbzd+0|JetB_GD}vLTtNXelJaY zj*MDFF(UgU8%1sXu3xHMy$-<8TclJ!@H6~By5Zb18V}R0VC(c56BPAmt-yY#sC~!! zq2UDP*Bkmd_^-Y6b!dE@4`1hlKn;8qm0$J4S1tO#3ms;6j(U1zPoiN6nS@IgG8d#6 H+VB1^whH71 From 38a350c2e2ae816230c4fbd58d5723f1ece67e7d Mon Sep 17 00:00:00 2001 From: Emerson Gomes Date: Fri, 21 Mar 2025 19:02:38 -0500 Subject: [PATCH 051/119] Add Azure Mistral Small 3.1 --- model_prices_and_context_window.json | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index f2ca9156ad..8a55aab98a 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -2091,6 +2091,18 @@ "mode": "chat", "supports_tool_choice": true }, + "azure_ai/mistral-small-2503": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000003, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_function_calling": true, + "supports_vision": true, + "supports_tool_choice": true + }, "azure_ai/mistral-large-2407": { "max_tokens": 4096, "max_input_tokens": 128000, From ea8c8d380aac05077a4c08956dc0bacf25a66e20 Mon Sep 17 00:00:00 2001 From: Emerson Gomes Date: Fri, 21 Mar 2025 19:02:55 -0500 Subject: [PATCH 052/119] Add Azure GPT-4.5-Preview --- model_prices_and_context_window.json | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 8a55aab98a..1d7b8794b5 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -1426,6 +1426,25 @@ "supports_vision": false, "supports_prompt_caching": true }, + "azure/gpt-4.5-preview": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.000075, + "output_cost_per_token": 0.00015, + "input_cost_per_token_batches": 0.0000375, + "output_cost_per_token_batches": 0.000075, + "cache_read_input_token_cost": 0.0000375, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, "azure/gpt-4o": { "max_tokens": 16384, "max_input_tokens": 128000, From 4be6cccdd347e595dd3aa8fca5b11fcc9f3ee341 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 17:06:47 -0700 Subject: [PATCH 053/119] doc mcp example --- docs/my-website/docs/mcp.md | 137 ++++++++++++++++++++++++++++-------- 1 file changed, 107 insertions(+), 30 deletions(-) diff --git a/docs/my-website/docs/mcp.md b/docs/my-website/docs/mcp.md index 1f7e65a4fa..be5f1c517c 100644 --- a/docs/my-website/docs/mcp.md +++ b/docs/my-website/docs/mcp.md @@ -26,50 +26,127 @@ LiteLLM acts as a MCP bridge to utilize MCP tools with all LiteLLM supported mod ## Usage +### 1. List Available MCP Tools + -```python -import asyncio +```python title="MCP Client Example" showLineNumbers +# Create server parameters for stdio connection +from mcp import ClientSession, StdioServerParameters +from mcp.client.stdio import stdio_client +import os +from litellm.mcp_client.tools import ( + load_mcp_tools, + transform_openai_tool_to_mcp_tool, + call_openai_tool, +) import litellm -from litellm import experimental_create_mcp_client -from litellm.mcp_stdio import experimental_stdio_mcp_transport -async def main(): - client_one = None - try: - # Initialize an MCP client to connect to a `stdio` MCP server: - transport = experimental_stdio_mcp_transport( - command='node', - args=['src/stdio/dist/server.js'] - ) - client_one = await experimental_create_mcp_client( - transport=transport - ) +server_params = StdioServerParameters( + command="python3", + # Make sure to update to the full absolute path to your math_server.py file + args=["./mcp_server.py"], +) - tools = await client_one.list_tools(format="openai") - response = await litellm.completion( +async with stdio_client(server_params) as (read, write): + async with ClientSession(read, write) as session: + # Initialize the connection + await session.initialize() + + # Get tools + tools = await load_mcp_tools(session=session, format="openai") + print("MCP TOOLS: ", tools) + + # Create and run the agent + messages = [{"role": "user", "content": "what's (3 + 5)"}] + print(os.getenv("OPENAI_API_KEY")) + llm_response = await litellm.acompletion( model="gpt-4o", + api_key=os.getenv("OPENAI_API_KEY"), + messages=messages, tools=tools, - messages=[ - { - "role": "user", - "content": "Find products under $100" - } + ) + print("LLM RESPONSE: ", json.dumps(llm_response, indent=4, default=str)) +``` + + +### 2. List and Call MCP Tools +```python title="MCP Client Example" showLineNumbers +# Create server parameters for stdio connection +from mcp import ClientSession, StdioServerParameters +from mcp.client.stdio import stdio_client +import os +from litellm.mcp_client.tools import ( + load_mcp_tools, + transform_openai_tool_to_mcp_tool, + call_openai_tool, +) +import litellm + + +server_params = StdioServerParameters( + command="python3", + # Make sure to update to the full absolute path to your math_server.py file + args=["./mcp_server.py"], +) + +async with stdio_client(server_params) as (read, write): + async with ClientSession(read, write) as session: + # Initialize the connection + await session.initialize() + + # Get tools + tools = await load_mcp_tools(session=session, format="openai") + print("MCP TOOLS: ", tools) + + # Create and run the agent + messages = [{"role": "user", "content": "what's (3 + 5)"}] + print(os.getenv("OPENAI_API_KEY")) + llm_response = await litellm.acompletion( + model="gpt-4o", + api_key=os.getenv("OPENAI_API_KEY"), + messages=messages, + tools=tools, + ) + print("LLM RESPONSE: ", json.dumps(llm_response, indent=4, default=str)) +# Add assertions to verify the response + assert llm_response["choices"][0]["message"]["tool_calls"] is not None + assert ( + llm_response["choices"][0]["message"]["tool_calls"][0]["function"][ + "name" ] + == "add" ) + openai_tool = llm_response["choices"][0]["message"]["tool_calls"][0] - print(response.text) - except Exception as error: - print(error) - finally: - await asyncio.gather( - client_one.close() if client_one else asyncio.sleep(0), + # Call the tool using MCP client + call_result = await call_openai_tool( + session=session, + openai_tool=openai_tool, ) + print("CALL RESULT: ", call_result) -if __name__ == "__main__": - asyncio.run(main()) + # send the tool result to the LLM + messages.append(llm_response["choices"][0]["message"]) + messages.append( + { + "role": "tool", + "content": str(call_result.content[0].text), + "tool_call_id": openai_tool["id"], + } + ) + print("final messages: ", messages) + llm_response = await litellm.acompletion( + model="gpt-4o", + api_key=os.getenv("OPENAI_API_KEY"), + messages=messages, + tools=tools, + ) + print( + "FINAL LLM RESPONSE: ", json.dumps(llm_response, indent=4, default=str) + ) ``` From b006e325ccb293bfa9dce9d9af007023a5dd8e7c Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 17:07:15 -0700 Subject: [PATCH 054/119] add experimental mcp client --- litellm/{mcp_client => experimental_mcp_client}/Readme.md | 0 litellm/{mcp_client => experimental_mcp_client}/__init__.py | 0 litellm/{mcp_client => experimental_mcp_client}/client.py | 0 litellm/{mcp_client => experimental_mcp_client}/tools.py | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename litellm/{mcp_client => experimental_mcp_client}/Readme.md (100%) rename litellm/{mcp_client => experimental_mcp_client}/__init__.py (100%) rename litellm/{mcp_client => experimental_mcp_client}/client.py (100%) rename litellm/{mcp_client => experimental_mcp_client}/tools.py (100%) diff --git a/litellm/mcp_client/Readme.md b/litellm/experimental_mcp_client/Readme.md similarity index 100% rename from litellm/mcp_client/Readme.md rename to litellm/experimental_mcp_client/Readme.md diff --git a/litellm/mcp_client/__init__.py b/litellm/experimental_mcp_client/__init__.py similarity index 100% rename from litellm/mcp_client/__init__.py rename to litellm/experimental_mcp_client/__init__.py diff --git a/litellm/mcp_client/client.py b/litellm/experimental_mcp_client/client.py similarity index 100% rename from litellm/mcp_client/client.py rename to litellm/experimental_mcp_client/client.py diff --git a/litellm/mcp_client/tools.py b/litellm/experimental_mcp_client/tools.py similarity index 100% rename from litellm/mcp_client/tools.py rename to litellm/experimental_mcp_client/tools.py From c7f42747bf2af69f6860c2e58718b4cb096d9833 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 17:21:07 -0700 Subject: [PATCH 055/119] refactor(user_api_key_auth.py): move is_route_allowed to inside common_checks ensures consistent behaviour inside api key + jwt routes --- litellm/proxy/auth/auth_checks.py | 128 ++++++++++++- litellm/proxy/auth/auth_utils.py | 1 + litellm/proxy/auth/route_checks.py | 1 - litellm/proxy/auth/user_api_key_auth.py | 171 +++--------------- .../test_user_api_key_auth.py | 21 ++- 5 files changed, 167 insertions(+), 155 deletions(-) diff --git a/litellm/proxy/auth/auth_checks.py b/litellm/proxy/auth/auth_checks.py index f029511dd2..80cfb03de4 100644 --- a/litellm/proxy/auth/auth_checks.py +++ b/litellm/proxy/auth/auth_checks.py @@ -14,7 +14,7 @@ import time import traceback from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, cast -from fastapi import status +from fastapi import Request, status from pydantic import BaseModel import litellm @@ -74,6 +74,7 @@ async def common_checks( llm_router: Optional[Router], proxy_logging_obj: ProxyLogging, valid_token: Optional[UserAPIKeyAuth], + request: Request, ) -> bool: """ Common checks across jwt + key-based auth. @@ -198,9 +199,134 @@ async def common_checks( user_object=user_object, route=route, request_body=request_body ) + token_team = getattr(valid_token, "team_id", None) + token_type: Literal["ui", "api"] = ( + "ui" if token_team is not None and token_team == "litellm-dashboard" else "api" + ) + _is_route_allowed = _is_allowed_route( + route=route, + token_type=token_type, + user_obj=user_object, + request=request, + request_data=request_body, + valid_token=valid_token, + ) + return True +def _is_ui_route( + route: str, + user_obj: Optional[LiteLLM_UserTable] = None, +) -> bool: + """ + - Check if the route is a UI used route + """ + # this token is only used for managing the ui + allowed_routes = LiteLLMRoutes.ui_routes.value + # check if the current route startswith any of the allowed routes + if ( + route is not None + and isinstance(route, str) + and any(route.startswith(allowed_route) for allowed_route in allowed_routes) + ): + # Do something if the current route starts with any of the allowed routes + return True + elif any( + RouteChecks._route_matches_pattern(route=route, pattern=allowed_route) + for allowed_route in allowed_routes + ): + return True + return False + + +def _get_user_role( + user_obj: Optional[LiteLLM_UserTable], +) -> Optional[LitellmUserRoles]: + if user_obj is None: + return None + + _user = user_obj + + _user_role = _user.user_role + try: + role = LitellmUserRoles(_user_role) + except ValueError: + return LitellmUserRoles.INTERNAL_USER + + return role + + +def _is_api_route_allowed( + route: str, + request: Request, + request_data: dict, + valid_token: Optional[UserAPIKeyAuth], + user_obj: Optional[LiteLLM_UserTable] = None, +) -> bool: + """ + - Route b/w api token check and normal token check + """ + _user_role = _get_user_role(user_obj=user_obj) + + if valid_token is None: + raise Exception("Invalid proxy server token passed. valid_token=None.") + + if not _is_user_proxy_admin(user_obj=user_obj): # if non-admin + RouteChecks.non_proxy_admin_allowed_routes_check( + user_obj=user_obj, + _user_role=_user_role, + route=route, + request=request, + request_data=request_data, + valid_token=valid_token, + ) + return True + + +def _is_user_proxy_admin(user_obj: Optional[LiteLLM_UserTable]): + if user_obj is None: + return False + + if ( + user_obj.user_role is not None + and user_obj.user_role == LitellmUserRoles.PROXY_ADMIN.value + ): + return True + + if ( + user_obj.user_role is not None + and user_obj.user_role == LitellmUserRoles.PROXY_ADMIN.value + ): + return True + + return False + + +def _is_allowed_route( + route: str, + token_type: Literal["ui", "api"], + request: Request, + request_data: dict, + valid_token: Optional[UserAPIKeyAuth], + user_obj: Optional[LiteLLM_UserTable] = None, +) -> bool: + """ + - Route b/w ui token check and normal token check + """ + + if token_type == "ui" and _is_ui_route(route=route, user_obj=user_obj): + return True + else: + return _is_api_route_allowed( + route=route, + request=request, + request_data=request_data, + valid_token=valid_token, + user_obj=user_obj, + ) + + def _allowed_routes_check(user_route: str, allowed_routes: list) -> bool: """ Return if a user is allowed to access route. Helper function for `allowed_routes_check`. diff --git a/litellm/proxy/auth/auth_utils.py b/litellm/proxy/auth/auth_utils.py index 91fcaf7e11..2c4b122d3a 100644 --- a/litellm/proxy/auth/auth_utils.py +++ b/litellm/proxy/auth/auth_utils.py @@ -321,6 +321,7 @@ async def check_if_request_size_is_safe(request: Request) -> bool: from litellm.proxy.proxy_server import general_settings, premium_user max_request_size_mb = general_settings.get("max_request_size_mb", None) + if max_request_size_mb is not None: # Check if premium user if premium_user is not True: diff --git a/litellm/proxy/auth/route_checks.py b/litellm/proxy/auth/route_checks.py index a18a7ab5e1..8f956abb72 100644 --- a/litellm/proxy/auth/route_checks.py +++ b/litellm/proxy/auth/route_checks.py @@ -24,7 +24,6 @@ class RouteChecks: route: str, request: Request, valid_token: UserAPIKeyAuth, - api_key: str, request_data: dict, ): """ diff --git a/litellm/proxy/auth/user_api_key_auth.py b/litellm/proxy/auth/user_api_key_auth.py index ace0bf4948..c9bc5c994c 100644 --- a/litellm/proxy/auth/user_api_key_auth.py +++ b/litellm/proxy/auth/user_api_key_auth.py @@ -25,7 +25,10 @@ from litellm.litellm_core_utils.dd_tracing import tracer from litellm.proxy._types import * from litellm.proxy.auth.auth_checks import ( _cache_key_object, + _get_user_role, _handle_failed_db_connection_for_get_key_object, + _is_allowed_route, + _is_user_proxy_admin, _virtual_key_max_budget_check, _virtual_key_soft_budget_check, can_key_call_model, @@ -98,86 +101,6 @@ def _get_bearer_token( return api_key -def _is_ui_route( - route: str, - user_obj: Optional[LiteLLM_UserTable] = None, -) -> bool: - """ - - Check if the route is a UI used route - """ - # this token is only used for managing the ui - allowed_routes = LiteLLMRoutes.ui_routes.value - # check if the current route startswith any of the allowed routes - if ( - route is not None - and isinstance(route, str) - and any(route.startswith(allowed_route) for allowed_route in allowed_routes) - ): - # Do something if the current route starts with any of the allowed routes - return True - elif any( - RouteChecks._route_matches_pattern(route=route, pattern=allowed_route) - for allowed_route in allowed_routes - ): - return True - return False - - -def _is_api_route_allowed( - route: str, - request: Request, - request_data: dict, - api_key: str, - valid_token: Optional[UserAPIKeyAuth], - user_obj: Optional[LiteLLM_UserTable] = None, -) -> bool: - """ - - Route b/w api token check and normal token check - """ - _user_role = _get_user_role(user_obj=user_obj) - - if valid_token is None: - raise Exception("Invalid proxy server token passed. valid_token=None.") - - if not _is_user_proxy_admin(user_obj=user_obj): # if non-admin - RouteChecks.non_proxy_admin_allowed_routes_check( - user_obj=user_obj, - _user_role=_user_role, - route=route, - request=request, - request_data=request_data, - api_key=api_key, - valid_token=valid_token, - ) - return True - - -def _is_allowed_route( - route: str, - token_type: Literal["ui", "api"], - request: Request, - request_data: dict, - api_key: str, - valid_token: Optional[UserAPIKeyAuth], - user_obj: Optional[LiteLLM_UserTable] = None, -) -> bool: - """ - - Route b/w ui token check and normal token check - """ - - if token_type == "ui" and _is_ui_route(route=route, user_obj=user_obj): - return True - else: - return _is_api_route_allowed( - route=route, - request=request, - request_data=request_data, - api_key=api_key, - valid_token=valid_token, - user_obj=user_obj, - ) - - async def user_api_key_auth_websocket(websocket: WebSocket): # Accept the WebSocket connection @@ -328,6 +251,7 @@ async def _user_api_key_auth_builder( # noqa: PLR0915 parent_otel_span: Optional[Span] = None start_time = datetime.now() route: str = get_request_route(request=request) + try: # get the request body @@ -470,22 +394,8 @@ async def _user_api_key_auth_builder( # noqa: PLR0915 user_role=LitellmUserRoles.PROXY_ADMIN, parent_otel_span=parent_otel_span, ) - # run through common checks - _ = await common_checks( - request_body=request_data, - team_object=team_object, - user_object=user_object, - end_user_object=end_user_object, - general_settings=general_settings, - global_proxy_spend=global_proxy_spend, - route=route, - llm_router=llm_router, - proxy_logging_obj=proxy_logging_obj, - valid_token=None, - ) - # return UserAPIKeyAuth object - return UserAPIKeyAuth( + valid_token = UserAPIKeyAuth( api_key=None, team_id=team_id, team_tpm_limit=( @@ -501,6 +411,23 @@ async def _user_api_key_auth_builder( # noqa: PLR0915 parent_otel_span=parent_otel_span, end_user_id=end_user_id, ) + # run through common checks + _ = await common_checks( + request=request, + request_body=request_data, + team_object=team_object, + user_object=user_object, + end_user_object=end_user_object, + general_settings=general_settings, + global_proxy_spend=global_proxy_spend, + route=route, + llm_router=llm_router, + proxy_logging_obj=proxy_logging_obj, + valid_token=valid_token, + ) + + # return UserAPIKeyAuth object + return valid_token #### ELSE #### ## CHECK PASS-THROUGH ENDPOINTS ## @@ -1038,6 +965,7 @@ async def _user_api_key_auth_builder( # noqa: PLR0915 ) ) _ = await common_checks( + request=request, request_body=request_data, team_object=_team_obj, user_object=user_obj, @@ -1075,23 +1003,6 @@ async def _user_api_key_auth_builder( # noqa: PLR0915 # check if token is from litellm-ui, litellm ui makes keys to allow users to login with sso. These keys can only be used for LiteLLM UI functions # sso/login, ui/login, /key functions and /user functions # this will never be allowed to call /chat/completions - token_team = getattr(valid_token, "team_id", None) - token_type: Literal["ui", "api"] = ( - "ui" - if token_team is not None and token_team == "litellm-dashboard" - else "api" - ) - _is_route_allowed = _is_allowed_route( - route=route, - token_type=token_type, - user_obj=user_obj, - request=request, - request_data=request_data, - api_key=api_key, - valid_token=valid_token, - ) - if not _is_route_allowed: - raise HTTPException(401, detail="Invalid route for UI token") if valid_token is None: # No token was found when looking up in the DB @@ -1242,42 +1153,6 @@ async def _return_user_api_key_auth_obj( return UserAPIKeyAuth(**user_api_key_kwargs) -def _is_user_proxy_admin(user_obj: Optional[LiteLLM_UserTable]): - if user_obj is None: - return False - - if ( - user_obj.user_role is not None - and user_obj.user_role == LitellmUserRoles.PROXY_ADMIN.value - ): - return True - - if ( - user_obj.user_role is not None - and user_obj.user_role == LitellmUserRoles.PROXY_ADMIN.value - ): - return True - - return False - - -def _get_user_role( - user_obj: Optional[LiteLLM_UserTable], -) -> Optional[LitellmUserRoles]: - if user_obj is None: - return None - - _user = user_obj - - _user_role = _user.user_role - try: - role = LitellmUserRoles(_user_role) - except ValueError: - return LitellmUserRoles.INTERNAL_USER - - return role - - def get_api_key_from_custom_header( request: Request, custom_litellm_key_header_name: str ) -> str: diff --git a/tests/proxy_unit_tests/test_user_api_key_auth.py b/tests/proxy_unit_tests/test_user_api_key_auth.py index 5e86a2d688..119457f0c2 100644 --- a/tests/proxy_unit_tests/test_user_api_key_auth.py +++ b/tests/proxy_unit_tests/test_user_api_key_auth.py @@ -4,6 +4,9 @@ import os import sys +import litellm.proxy +import litellm.proxy.proxy_server + sys.path.insert( 0, os.path.abspath("../..") ) # Adds the parent directory to the system path @@ -950,7 +953,7 @@ def test_get_model_from_request(route, request_data, expected_model): @pytest.mark.asyncio -async def test_jwt_non_admin_team_route_access(): +async def test_jwt_non_admin_team_route_access(monkeypatch): """ Test that a non-admin JWT user cannot access team management routes """ @@ -958,6 +961,8 @@ async def test_jwt_non_admin_team_route_access(): from starlette.datastructures import URL from unittest.mock import patch from litellm.proxy.auth.user_api_key_auth import user_api_key_auth + import json + from litellm.proxy._types import ProxyException mock_jwt_response = { "is_proxy_admin": False, @@ -973,9 +978,15 @@ async def test_jwt_non_admin_team_route_access(): } # Create request - request = Request(scope={"type": "http"}) + request = Request( + scope={"type": "http", "headers": [("Authorization", "Bearer fake.jwt.token")]} + ) request._url = URL(url="/team/new") + monkeypatch.setattr( + litellm.proxy.proxy_server, "general_settings", {"enable_jwt_auth": True} + ) + # Mock JWTAuthManager.auth_builder with patch( "litellm.proxy.auth.handle_jwt.JWTAuthManager.auth_builder", @@ -986,6 +997,6 @@ async def test_jwt_non_admin_team_route_access(): pytest.fail( "Expected this call to fail. Non-admin user should not access team routes." ) - except HTTPException as e: - assert e.status_code == 403 - assert "Unauthorized" in str(e.detail) + except ProxyException as e: + print("e", e) + assert "Only proxy admin can be used to generate" in str(e.message) From a8cf71973d19e1f29e96bf5856c67beb7e9b3e5c Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 17:21:40 -0700 Subject: [PATCH 056/119] docs mcp docs update --- docs/my-website/docs/mcp.md | 19 +++++++++++-------- litellm/experimental_mcp_client/__init__.py | 3 +++ litellm/experimental_mcp_client/tools.py | 12 ++++++++++-- tests/mcp_tests/test_mcp_litellm_client.py | 13 +++++-------- 4 files changed, 29 insertions(+), 18 deletions(-) diff --git a/docs/my-website/docs/mcp.md b/docs/my-website/docs/mcp.md index be5f1c517c..10821bfbf8 100644 --- a/docs/my-website/docs/mcp.md +++ b/docs/my-website/docs/mcp.md @@ -21,13 +21,22 @@ Use Model Context Protocol with LiteLLM LiteLLM acts as a MCP bridge to utilize MCP tools with all LiteLLM supported models. LiteLLM offers the following features for using MCP - **List** Available MCP Tools: OpenAI clients can view all available MCP tools + - `litellm.experimental_mcp_client.load_mcp_tools` to list all available MCP tools - **Call** MCP Tools: OpenAI clients can call MCP tools + - `litellm.experimental_mcp_client.call_openai_tool` to call an OpenAI tool on an MCP server ## Usage ### 1. List Available MCP Tools +In this example we'll use `litellm.experimental_mcp_client.load_mcp_tools` to list all available MCP tools on any MCP server. This method can be used in two ways: + +- `format="mcp"` - (default) Return MCP tools + - Returns: `mcp.types.Tool` +- `format="openai"` - Return MCP tools converted to OpenAI API compatible tools. Allows using with OpenAI endpoints. + - Returns: `openai.types.chat.ChatCompletionToolParam` + @@ -36,12 +45,8 @@ LiteLLM acts as a MCP bridge to utilize MCP tools with all LiteLLM supported mod from mcp import ClientSession, StdioServerParameters from mcp.client.stdio import stdio_client import os -from litellm.mcp_client.tools import ( - load_mcp_tools, - transform_openai_tool_to_mcp_tool, - call_openai_tool, -) import litellm +from litellm import experimental_mcp_client server_params = StdioServerParameters( @@ -56,12 +61,10 @@ async with stdio_client(server_params) as (read, write): await session.initialize() # Get tools - tools = await load_mcp_tools(session=session, format="openai") + tools = await experimental_mcp_client.load_mcp_tools(session=session, format="openai") print("MCP TOOLS: ", tools) - # Create and run the agent messages = [{"role": "user", "content": "what's (3 + 5)"}] - print(os.getenv("OPENAI_API_KEY")) llm_response = await litellm.acompletion( model="gpt-4o", api_key=os.getenv("OPENAI_API_KEY"), diff --git a/litellm/experimental_mcp_client/__init__.py b/litellm/experimental_mcp_client/__init__.py index e69de29bb2..7110d5375e 100644 --- a/litellm/experimental_mcp_client/__init__.py +++ b/litellm/experimental_mcp_client/__init__.py @@ -0,0 +1,3 @@ +from .tools import call_openai_tool, load_mcp_tools + +__all__ = ["load_mcp_tools", "call_openai_tool"] diff --git a/litellm/experimental_mcp_client/tools.py b/litellm/experimental_mcp_client/tools.py index bd803b995d..aa4d02184a 100644 --- a/litellm/experimental_mcp_client/tools.py +++ b/litellm/experimental_mcp_client/tools.py @@ -53,7 +53,7 @@ async def load_mcp_tools( format: The format to convert the tools to By default, the tools are returned in MCP format. - If format is set to "openai", the tools are converted to OpenAI tools. + If format is set to "openai", the tools are converted to OpenAI API compatible tools. """ tools = await session.list_tools() if format == "openai": @@ -80,7 +80,15 @@ async def call_openai_tool( session: ClientSession, openai_tool: ChatCompletionToolParam, ) -> CallToolResult: - """Call an OpenAI tool using MCP client.""" + """ + Call an OpenAI tool using MCP client. + + Args: + session: The MCP session to use + openai_tool: The OpenAI tool to call. You can get this from the `choices[0].message.tool_calls[0]` of the response from the OpenAI API. + Returns: + The result of the MCP tool call. + """ mcp_tool = transform_openai_tool_to_mcp_tool( openai_tool=openai_tool, ) diff --git a/tests/mcp_tests/test_mcp_litellm_client.py b/tests/mcp_tests/test_mcp_litellm_client.py index c1dd30c8f9..448f0c3ce5 100644 --- a/tests/mcp_tests/test_mcp_litellm_client.py +++ b/tests/mcp_tests/test_mcp_litellm_client.py @@ -10,11 +10,7 @@ sys.path.insert( from mcp import ClientSession, StdioServerParameters from mcp.client.stdio import stdio_client import os -from litellm.mcp_client.tools import ( - load_mcp_tools, - transform_openai_tool_to_mcp_tool, - call_openai_tool, -) +from litellm import experimental_mcp_client import litellm import pytest import json @@ -34,12 +30,13 @@ async def test_mcp_agent(): await session.initialize() # Get tools - tools = await load_mcp_tools(session=session, format="openai") + tools = await experimental_mcp_client.load_mcp_tools( + session=session, format="openai" + ) print("MCP TOOLS: ", tools) # Create and run the agent messages = [{"role": "user", "content": "what's (3 + 5)"}] - print(os.getenv("OPENAI_API_KEY")) llm_response = await litellm.acompletion( model="gpt-4o", api_key=os.getenv("OPENAI_API_KEY"), @@ -59,7 +56,7 @@ async def test_mcp_agent(): openai_tool = llm_response["choices"][0]["message"]["tool_calls"][0] # Call the tool using MCP client - call_result = await call_openai_tool( + call_result = await experimental_mcp_client.call_openai_tool( session=session, openai_tool=openai_tool, ) From 92d6e2fecbba3994fc4a8081ddc163448761e692 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 17:22:58 -0700 Subject: [PATCH 057/119] fix: remove unused import --- litellm/proxy/auth/user_api_key_auth.py | 1 - 1 file changed, 1 deletion(-) diff --git a/litellm/proxy/auth/user_api_key_auth.py b/litellm/proxy/auth/user_api_key_auth.py index c9bc5c994c..3437d019a5 100644 --- a/litellm/proxy/auth/user_api_key_auth.py +++ b/litellm/proxy/auth/user_api_key_auth.py @@ -27,7 +27,6 @@ from litellm.proxy.auth.auth_checks import ( _cache_key_object, _get_user_role, _handle_failed_db_connection_for_get_key_object, - _is_allowed_route, _is_user_proxy_admin, _virtual_key_max_budget_check, _virtual_key_soft_budget_check, From 1dd9a433e36578b4ef7384ed06564b5f27988d74 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 17:25:57 -0700 Subject: [PATCH 058/119] fix: remove unused import --- litellm/proxy/auth/user_api_key_auth.py | 1 - 1 file changed, 1 deletion(-) diff --git a/litellm/proxy/auth/user_api_key_auth.py b/litellm/proxy/auth/user_api_key_auth.py index 3437d019a5..2be359cc2c 100644 --- a/litellm/proxy/auth/user_api_key_auth.py +++ b/litellm/proxy/auth/user_api_key_auth.py @@ -50,7 +50,6 @@ from litellm.proxy.auth.auth_utils import ( from litellm.proxy.auth.handle_jwt import JWTAuthManager, JWTHandler from litellm.proxy.auth.oauth2_check import check_oauth2_token from litellm.proxy.auth.oauth2_proxy_hook import handle_oauth2_proxy_request -from litellm.proxy.auth.route_checks import RouteChecks from litellm.proxy.auth.service_account_checks import service_account_checks from litellm.proxy.common_utils.http_parsing_utils import _read_request_body from litellm.proxy.utils import PrismaClient, ProxyLogging From 7ed7dead0e5246b6e9590c0a9bca5a71dc30e14d Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 17:30:23 -0700 Subject: [PATCH 059/119] litellm mcp example --- docs/my-website/docs/mcp.md | 45 +++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/docs/my-website/docs/mcp.md b/docs/my-website/docs/mcp.md index 10821bfbf8..2669146492 100644 --- a/docs/my-website/docs/mcp.md +++ b/docs/my-website/docs/mcp.md @@ -40,7 +40,7 @@ In this example we'll use `litellm.experimental_mcp_client.load_mcp_tools` to li -```python title="MCP Client Example" showLineNumbers +```python title="MCP Client List Tools" showLineNumbers # Create server parameters for stdio connection from mcp import ClientSession, StdioServerParameters from mcp.client.stdio import stdio_client @@ -76,17 +76,28 @@ async with stdio_client(server_params) as (read, write): ### 2. List and Call MCP Tools -```python title="MCP Client Example" showLineNumbers + +In this example we'll use +- `litellm.experimental_mcp_client.load_mcp_tools` to list all available MCP tools on any MCP server +- `litellm.experimental_mcp_client.call_openai_tool` to call an OpenAI tool on an MCP server + +The first llm response returns a list of OpenAI tools. We take the first tool call from the LLM response and pass it to `litellm.experimental_mcp_client.call_openai_tool` to call the tool on the MCP server. + +#### How `litellm.experimental_mcp_client.call_openai_tool` works + +- Accepts an OpenAI Tool Call from the LLM response +- Converts the OpenAI Tool Call to an MCP Tool +- Calls the MCP Tool on the MCP server +- Returns the result of the MCP Tool call + + +```python title="MCP Client List and Call Tools" showLineNumbers # Create server parameters for stdio connection from mcp import ClientSession, StdioServerParameters from mcp.client.stdio import stdio_client import os -from litellm.mcp_client.tools import ( - load_mcp_tools, - transform_openai_tool_to_mcp_tool, - call_openai_tool, -) import litellm +from litellm import experimental_mcp_client server_params = StdioServerParameters( @@ -101,12 +112,10 @@ async with stdio_client(server_params) as (read, write): await session.initialize() # Get tools - tools = await load_mcp_tools(session=session, format="openai") + tools = await experimental_mcp_client.load_mcp_tools(session=session, format="openai") print("MCP TOOLS: ", tools) - # Create and run the agent messages = [{"role": "user", "content": "what's (3 + 5)"}] - print(os.getenv("OPENAI_API_KEY")) llm_response = await litellm.acompletion( model="gpt-4o", api_key=os.getenv("OPENAI_API_KEY"), @@ -114,22 +123,14 @@ async with stdio_client(server_params) as (read, write): tools=tools, ) print("LLM RESPONSE: ", json.dumps(llm_response, indent=4, default=str)) -# Add assertions to verify the response - assert llm_response["choices"][0]["message"]["tool_calls"] is not None - assert ( - llm_response["choices"][0]["message"]["tool_calls"][0]["function"][ - "name" - ] - == "add" - ) - openai_tool = llm_response["choices"][0]["message"]["tool_calls"][0] + openai_tool = llm_response["choices"][0]["message"]["tool_calls"][0] # Call the tool using MCP client - call_result = await call_openai_tool( + call_result = await experimental_mcp_client.call_openai_tool( session=session, openai_tool=openai_tool, ) - print("CALL RESULT: ", call_result) + print("MCP TOOL CALL RESULT: ", call_result) # send the tool result to the LLM messages.append(llm_response["choices"][0]["message"]) @@ -140,7 +141,7 @@ async with stdio_client(server_params) as (read, write): "tool_call_id": openai_tool["id"], } ) - print("final messages: ", messages) + print("final messages with tool result: ", messages) llm_response = await litellm.acompletion( model="gpt-4o", api_key=os.getenv("OPENAI_API_KEY"), From cc9d3a8e7227343555ef85ea7180779f26f468c3 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 17:43:10 -0700 Subject: [PATCH 060/119] litellm mcp interface --- docs/my-website/docs/mcp.md | 143 ++++++++++++++++++++++++++---------- 1 file changed, 103 insertions(+), 40 deletions(-) diff --git a/docs/my-website/docs/mcp.md b/docs/my-website/docs/mcp.md index 2669146492..6ebbd7a1fa 100644 --- a/docs/my-website/docs/mcp.md +++ b/docs/my-website/docs/mcp.md @@ -51,7 +51,7 @@ from litellm import experimental_mcp_client server_params = StdioServerParameters( command="python3", - # Make sure to update to the full absolute path to your math_server.py file + # Make sure to update to the full absolute path to your mcp_server.py file args=["./mcp_server.py"], ) @@ -74,6 +74,52 @@ async with stdio_client(server_params) as (read, write): print("LLM RESPONSE: ", json.dumps(llm_response, indent=4, default=str)) ``` + + + + +In this example we'll walk through how you can use the OpenAI SDK pointed to the LiteLLM proxy to call MCP tools. The key difference here is we use the OpenAI SDK to make the LLM API request + +```python title="MCP Client List Tools" showLineNumbers +# Create server parameters for stdio connection +from mcp import ClientSession, StdioServerParameters +from mcp.client.stdio import stdio_client +import os +from openai import OpenAI +from litellm import experimental_mcp_client + +server_params = StdioServerParameters( + command="python3", + # Make sure to update to the full absolute path to your mcp_server.py file + args=["./mcp_server.py"], +) + +async with stdio_client(server_params) as (read, write): + async with ClientSession(read, write) as session: + # Initialize the connection + await session.initialize() + + # Get tools using litellm mcp client + tools = await experimental_mcp_client.load_mcp_tools(session=session, format="openai") + print("MCP TOOLS: ", tools) + + # Use OpenAI SDK pointed to LiteLLM proxy + client = OpenAI( + api_key="your-api-key", # Your LiteLLM proxy API key + base_url="http://localhost:4000" # Your LiteLLM proxy URL + ) + + messages = [{"role": "user", "content": "what's (3 + 5)"}] + llm_response = client.chat.completions.create( + model="gpt-4", + messages=messages, + tools=tools + ) + print("LLM RESPONSE: ", llm_response) +``` + + + ### 2. List and Call MCP Tools @@ -90,6 +136,8 @@ The first llm response returns a list of OpenAI tools. We take the first tool ca - Calls the MCP Tool on the MCP server - Returns the result of the MCP Tool call + + ```python title="MCP Client List and Call Tools" showLineNumbers # Create server parameters for stdio connection @@ -102,7 +150,7 @@ from litellm import experimental_mcp_client server_params = StdioServerParameters( command="python3", - # Make sure to update to the full absolute path to your math_server.py file + # Make sure to update to the full absolute path to your mcp_server.py file args=["./mcp_server.py"], ) @@ -154,56 +202,71 @@ async with stdio_client(server_params) as (read, write): ``` - + -```python -import asyncio +In this example we'll walk through how you can use the OpenAI SDK pointed to the LiteLLM proxy to call MCP tools. The key difference here is we use the OpenAI SDK to make the LLM API request + +```python title="MCP Client with OpenAI SDK" showLineNumbers +# Create server parameters for stdio connection +from mcp import ClientSession, StdioServerParameters +from mcp.client.stdio import stdio_client +import os from openai import OpenAI -from litellm import experimental_create_mcp_client -from litellm.mcp_stdio import experimental_stdio_mcp_transport +from litellm import experimental_mcp_client -async def main(): - client_one = None +server_params = StdioServerParameters( + command="python3", + # Make sure to update to the full absolute path to your mcp_server.py file + args=["./mcp_server.py"], +) - try: - # Initialize an MCP client to connect to a `stdio` MCP server: - transport = experimental_stdio_mcp_transport( - command='node', - args=['src/stdio/dist/server.js'] - ) - client_one = await experimental_create_mcp_client( - transport=transport - ) +async with stdio_client(server_params) as (read, write): + async with ClientSession(read, write) as session: + # Initialize the connection + await session.initialize() - # Get tools from MCP client - tools = await client_one.list_tools(format="openai") - - # Use OpenAI client connected to LiteLLM Proxy Server + # Get tools using litellm mcp client + tools = await experimental_mcp_client.load_mcp_tools(session=session, format="openai") + print("MCP TOOLS: ", tools) + + # Use OpenAI SDK pointed to LiteLLM proxy client = OpenAI( - api_key="sk-1234", - base_url="http://0.0.0.0:4000" + api_key="your-api-key", # Your LiteLLM proxy API key + base_url="http://localhost:8000" # Your LiteLLM proxy URL ) - response = client.chat.completions.create( + + messages = [{"role": "user", "content": "what's (3 + 5)"}] + llm_response = client.chat.completions.create( model="gpt-4", - tools=tools, - messages=[ - { - "role": "user", - "content": "Find products under $100" - } - ] + messages=messages, + tools=tools ) + print("LLM RESPONSE: ", llm_response) - print(response.choices[0].message.content) - except Exception as error: - print(error) - finally: - await asyncio.gather( - client_one.close() if client_one else asyncio.sleep(0), + # Get the first tool call + tool_call = llm_response.choices[0].message.tool_calls[0] + + # Call the tool using MCP client + call_result = await experimental_mcp_client.call_openai_tool( + session=session, + openai_tool=tool_call.model_dump(), ) + print("MCP TOOL CALL RESULT: ", call_result) -if __name__ == "__main__": - asyncio.run(main()) + # Send the tool result back to the LLM + messages.append(llm_response.choices[0].message.model_dump()) + messages.append({ + "role": "tool", + "content": str(call_result.content[0].text), + "tool_call_id": tool_call.id, + }) + + final_response = client.chat.completions.create( + model="gpt-4", + messages=messages, + tools=tools + ) + print("FINAL RESPONSE: ", final_response) ``` From 02b7299337eda92c8010fb1f5af1c665ff729071 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 17:46:18 -0700 Subject: [PATCH 061/119] fix(proxy/_types.py): handle user_email=None --- litellm/proxy/_new_secret_config.yaml | 7 ++++++- litellm/proxy/_types.py | 2 +- .../proxy/management_endpoints/internal_user_endpoints.py | 1 + 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index afb1b7b3aa..cd49647464 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -18,4 +18,9 @@ router_settings: general_settings: enable_jwt_auth: True litellm_jwtauth: - admin_jwt_scope: "ai.admin" \ No newline at end of file + admin_jwt_scope: "ai.admin" + # team_id_jwt_field: "client_id" # 👈 CAN BE ANY FIELD + user_id_jwt_field: "sub" # 👈 CAN BE ANY FIELD + org_id_jwt_field: "org_id" # 👈 CAN BE ANY FIELD + end_user_id_jwt_field: "customer_id" # 👈 CAN BE ANY FIELD + user_id_upsert: True \ No newline at end of file diff --git a/litellm/proxy/_types.py b/litellm/proxy/_types.py index 255e37186d..220a0d5ddb 100644 --- a/litellm/proxy/_types.py +++ b/litellm/proxy/_types.py @@ -1631,7 +1631,7 @@ class LiteLLM_UserTable(LiteLLMPydanticObjectBase): class LiteLLM_UserTableFiltered(BaseModel): # done to avoid exposing sensitive data user_id: str - user_email: str + user_email: Optional[str] = None class LiteLLM_UserTableWithKeyCount(LiteLLM_UserTable): diff --git a/litellm/proxy/management_endpoints/internal_user_endpoints.py b/litellm/proxy/management_endpoints/internal_user_endpoints.py index 43d8273dee..e9be169cdc 100644 --- a/litellm/proxy/management_endpoints/internal_user_endpoints.py +++ b/litellm/proxy/management_endpoints/internal_user_endpoints.py @@ -1240,4 +1240,5 @@ async def ui_view_users( return [LiteLLM_UserTableFiltered(**user.model_dump()) for user in users] except Exception as e: + verbose_proxy_logger.exception(f"Error searching users: {str(e)}") raise HTTPException(status_code=500, detail=f"Error searching users: {str(e)}") From 60c3104da8c38d5f7db2a6c07fbfec1b00a44a6c Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 17:48:16 -0700 Subject: [PATCH 062/119] test mcp agent --- tests/mcp_tests/test_mcp_litellm_client.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/mcp_tests/test_mcp_litellm_client.py b/tests/mcp_tests/test_mcp_litellm_client.py index 448f0c3ce5..e69aec8553 100644 --- a/tests/mcp_tests/test_mcp_litellm_client.py +++ b/tests/mcp_tests/test_mcp_litellm_client.py @@ -42,11 +42,12 @@ async def test_mcp_agent(): api_key=os.getenv("OPENAI_API_KEY"), messages=messages, tools=tools, + tool_choice="required", ) print("LLM RESPONSE: ", json.dumps(llm_response, indent=4, default=str)) - # Add assertions to verify the response assert llm_response["choices"][0]["message"]["tool_calls"] is not None + assert ( llm_response["choices"][0]["message"]["tool_calls"][0]["function"][ "name" From 527c6096837a6f3bf0672abce1ca50f90da7ebb1 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 17:51:37 -0700 Subject: [PATCH 063/119] test mcp on ci/cd --- .circleci/config.yml | 55 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index a1a7e69c40..33559dfdff 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -680,6 +680,49 @@ jobs: paths: - llm_translation_coverage.xml - llm_translation_coverage + mcp_testing: + docker: + - image: cimg/python:3.11 + auth: + username: ${DOCKERHUB_USERNAME} + password: ${DOCKERHUB_PASSWORD} + working_directory: ~/project + + steps: + - checkout + - run: + name: Install Dependencies + command: | + python -m pip install --upgrade pip + python -m pip install -r requirements.txt + pip install "pytest==7.3.1" + pip install "pytest-retry==1.6.3" + pip install "pytest-cov==5.0.0" + pip install "pytest-asyncio==0.21.1" + pip install "respx==0.21.1" + pip install "mcp==1.4.1" + # Run pytest and generate JUnit XML report + - run: + name: Run tests + command: | + pwd + ls + python -m pytest -vv tests/mcp_tests --cov=litellm --cov-report=xml -x -s -v --junitxml=test-results/junit.xml --durations=5 + no_output_timeout: 120m + - run: + name: Rename the coverage files + command: | + mv coverage.xml mcp_coverage.xml + mv .coverage mcp_coverage + + # Store test results + - store_test_results: + path: test-results + - persist_to_workspace: + root: . + paths: + - mcp_coverage.xml + - mcp_coverage llm_responses_api_testing: docker: - image: cimg/python:3.11 @@ -1353,7 +1396,7 @@ jobs: command: | pwd ls - python -m pytest -s -vv tests/*.py -x --junitxml=test-results/junit.xml --durations=5 --ignore=tests/otel_tests --ignore=tests/pass_through_tests --ignore=tests/proxy_admin_ui_tests --ignore=tests/load_tests --ignore=tests/llm_translation --ignore=tests/llm_responses_api_testing --ignore=tests/image_gen_tests --ignore=tests/pass_through_unit_tests + python -m pytest -s -vv tests/*.py -x --junitxml=test-results/junit.xml --durations=5 --ignore=tests/otel_tests --ignore=tests/pass_through_tests --ignore=tests/proxy_admin_ui_tests --ignore=tests/load_tests --ignore=tests/llm_translation --ignore=tests/llm_responses_api_testing --ignore=tests/mcp_tests --ignore=tests/image_gen_tests --ignore=tests/pass_through_unit_tests no_output_timeout: 120m # Store test results @@ -2112,7 +2155,7 @@ jobs: python -m venv venv . venv/bin/activate pip install coverage - coverage combine llm_translation_coverage llm_responses_api_coverage logging_coverage litellm_router_coverage local_testing_coverage litellm_assistants_api_coverage auth_ui_unit_tests_coverage langfuse_coverage caching_coverage litellm_proxy_unit_tests_coverage image_gen_coverage pass_through_unit_tests_coverage batches_coverage litellm_proxy_security_tests_coverage + coverage combine llm_translation_coverage llm_responses_api_coverage mcp_coverage logging_coverage litellm_router_coverage local_testing_coverage litellm_assistants_api_coverage auth_ui_unit_tests_coverage langfuse_coverage caching_coverage litellm_proxy_unit_tests_coverage image_gen_coverage pass_through_unit_tests_coverage batches_coverage litellm_proxy_security_tests_coverage coverage xml - codecov/upload: file: ./coverage.xml @@ -2473,6 +2516,12 @@ workflows: only: - main - /litellm_.*/ + - mcp_testing: + filters: + branches: + only: + - main + - /litellm_.*/ - llm_responses_api_testing: filters: branches: @@ -2518,6 +2567,7 @@ workflows: - upload-coverage: requires: - llm_translation_testing + - mcp_testing - llm_responses_api_testing - litellm_mapped_tests - batches_testing @@ -2577,6 +2627,7 @@ workflows: - load_testing - test_bad_database_url - llm_translation_testing + - mcp_testing - llm_responses_api_testing - litellm_mapped_tests - batches_testing From 011745327491c8096e60c6c53a6835f1de8bb43e Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 18:06:20 -0700 Subject: [PATCH 064/119] test(test_internal_user_endpoints.py): add unit testing to handle user_email=None --- .../test_internal_user_endpoints.py | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 tests/litellm/proxy/management_endpoints/test_internal_user_endpoints.py diff --git a/tests/litellm/proxy/management_endpoints/test_internal_user_endpoints.py b/tests/litellm/proxy/management_endpoints/test_internal_user_endpoints.py new file mode 100644 index 0000000000..697be8b3c9 --- /dev/null +++ b/tests/litellm/proxy/management_endpoints/test_internal_user_endpoints.py @@ -0,0 +1,57 @@ +import json +import os +import sys + +import pytest +from fastapi.testclient import TestClient + +sys.path.insert( + 0, os.path.abspath("../../../..") +) # Adds the parent directory to the system path + +from litellm.proxy._types import LiteLLM_UserTableFiltered, UserAPIKeyAuth +from litellm.proxy.management_endpoints.internal_user_endpoints import ui_view_users +from litellm.proxy.proxy_server import app + +client = TestClient(app) + + +@pytest.mark.asyncio +async def test_ui_view_users_with_null_email(mocker, caplog): + """ + Test that /user/filter/ui endpoint returns users even when they have null email fields + """ + # Mock the prisma client + mock_prisma_client = mocker.MagicMock() + + # Create mock user data with null email + mock_user = mocker.MagicMock() + mock_user.model_dump.return_value = { + "user_id": "test-user-null-email", + "user_email": None, + "user_role": "proxy_admin", + "created_at": "2024-01-01T00:00:00Z", + } + + # Setup the mock find_many response + # Setup the mock find_many response as an async function + async def mock_find_many(*args, **kwargs): + return [mock_user] + + mock_prisma_client.db.litellm_usertable.find_many = mock_find_many + + # Patch the prisma client import in the endpoint + mocker.patch("litellm.proxy.proxy_server.prisma_client", mock_prisma_client) + + # Call ui_view_users function directly + response = await ui_view_users( + user_api_key_dict=UserAPIKeyAuth(user_id="test_user"), + user_id="test_user", + user_email=None, + page=1, + page_size=50, + ) + + assert response == [ + LiteLLM_UserTableFiltered(user_id="test-user-null-email", user_email=None) + ] From 4a88ba1bc8ca158ea09e650996af2b07bc6b3578 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 18:08:00 -0700 Subject: [PATCH 065/119] fix: fix linting error --- litellm/proxy/auth/user_api_key_auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/proxy/auth/user_api_key_auth.py b/litellm/proxy/auth/user_api_key_auth.py index 2be359cc2c..3944fe74e5 100644 --- a/litellm/proxy/auth/user_api_key_auth.py +++ b/litellm/proxy/auth/user_api_key_auth.py @@ -393,7 +393,7 @@ async def _user_api_key_auth_builder( # noqa: PLR0915 parent_otel_span=parent_otel_span, ) - valid_token = UserAPIKeyAuth( + valid_token: Optional[UserAPIKeyAuth] = UserAPIKeyAuth( api_key=None, team_id=team_id, team_tpm_limit=( From 4c265a7eea73a4b67546946a06c7c09603ea4b55 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 18:12:35 -0700 Subject: [PATCH 066/119] test: update tests --- tests/proxy_admin_ui_tests/test_route_check_unit_tests.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py b/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py index 718f707755..7f5ed297ca 100644 --- a/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py +++ b/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py @@ -165,7 +165,6 @@ def test_llm_api_route(route_checks): route="/v1/chat/completions", request=MockRequest(), valid_token=UserAPIKeyAuth(api_key="test_key"), - api_key="test_key", request_data={}, ) is None @@ -183,7 +182,6 @@ def test_key_info_route_allowed(route_checks): route="/key/info", request=MockRequest(query_params={"key": "test_key"}), valid_token=UserAPIKeyAuth(api_key="test_key"), - api_key="test_key", request_data={}, ) is None @@ -201,7 +199,6 @@ def test_user_info_route_allowed(route_checks): route="/user/info", request=MockRequest(query_params={"user_id": "test_user"}), valid_token=UserAPIKeyAuth(api_key="test_key", user_id="test_user"), - api_key="test_key", request_data={}, ) is None @@ -219,7 +216,6 @@ def test_user_info_route_forbidden(route_checks): route="/user/info", request=MockRequest(query_params={"user_id": "wrong_user"}), valid_token=UserAPIKeyAuth(api_key="test_key", user_id="test_user"), - api_key="test_key", request_data={}, ) assert exc_info.value.status_code == 403 From a0ad3c24843ef2e50bba4f5c68a4d494549c0149 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 18:18:23 -0700 Subject: [PATCH 067/119] fix mcp client --- litellm/experimental_mcp_client/tools.py | 74 ++++++++++++++---------- 1 file changed, 42 insertions(+), 32 deletions(-) diff --git a/litellm/experimental_mcp_client/tools.py b/litellm/experimental_mcp_client/tools.py index aa4d02184a..f4ebbf4af4 100644 --- a/litellm/experimental_mcp_client/tools.py +++ b/litellm/experimental_mcp_client/tools.py @@ -2,12 +2,18 @@ import json from typing import List, Literal, Union from mcp import ClientSession -from mcp.types import CallToolResult +from mcp.types import CallToolRequestParams as MCPCallToolRequestParams +from mcp.types import CallToolResult as MCPCallToolResult from mcp.types import Tool as MCPTool from openai.types.chat import ChatCompletionToolParam from openai.types.shared_params.function_definition import FunctionDefinition +from litellm.types.utils import ChatCompletionMessageToolCall + +######################################################## +# List MCP Tool functions +######################################################## def transform_mcp_tool_to_openai_tool(mcp_tool: MCPTool) -> ChatCompletionToolParam: """Convert an MCP tool to an OpenAI tool.""" return ChatCompletionToolParam( @@ -21,27 +27,6 @@ def transform_mcp_tool_to_openai_tool(mcp_tool: MCPTool) -> ChatCompletionToolPa ) -def _get_function_arguments(function: FunctionDefinition) -> dict: - """Helper to safely get and parse function arguments.""" - arguments = function.get("arguments", {}) - if isinstance(arguments, str): - try: - arguments = json.loads(arguments) - except json.JSONDecodeError: - arguments = {} - return arguments if isinstance(arguments, dict) else {} - - -def transform_openai_tool_to_mcp_tool(openai_tool: ChatCompletionToolParam) -> MCPTool: - """Convert an OpenAI tool to an MCP tool.""" - function = openai_tool["function"] - return MCPTool( - name=function["name"], - description=function.get("description", ""), - inputSchema=_get_function_arguments(function), - ) - - async def load_mcp_tools( session: ClientSession, format: Literal["mcp", "openai"] = "mcp" ) -> Union[List[MCPTool], List[ChatCompletionToolParam]]: @@ -63,23 +48,49 @@ async def load_mcp_tools( return tools.tools +######################################################## +# Call MCP Tool functions +######################################################## + + async def call_mcp_tool( session: ClientSession, - name: str, - arguments: dict, -) -> CallToolResult: + call_tool_request_params: MCPCallToolRequestParams, +) -> MCPCallToolResult: """Call an MCP tool.""" tool_result = await session.call_tool( - name=name, - arguments=arguments, + name=call_tool_request_params.name, + arguments=call_tool_request_params.arguments, ) return tool_result +def _get_function_arguments(function: FunctionDefinition) -> dict: + """Helper to safely get and parse function arguments.""" + arguments = function.get("arguments", {}) + if isinstance(arguments, str): + try: + arguments = json.loads(arguments) + except json.JSONDecodeError: + arguments = {} + return arguments if isinstance(arguments, dict) else {} + + +def _transform_openai_tool_call_to_mcp_tool_call_request( + openai_tool: ChatCompletionMessageToolCall, +) -> MCPCallToolRequestParams: + """Convert an OpenAI ChatCompletionMessageToolCall to an MCP CallToolRequestParams.""" + function = openai_tool["function"] + return MCPCallToolRequestParams( + name=function["name"], + arguments=_get_function_arguments(function), + ) + + async def call_openai_tool( session: ClientSession, - openai_tool: ChatCompletionToolParam, -) -> CallToolResult: + openai_tool: ChatCompletionMessageToolCall, +) -> MCPCallToolResult: """ Call an OpenAI tool using MCP client. @@ -89,11 +100,10 @@ async def call_openai_tool( Returns: The result of the MCP tool call. """ - mcp_tool = transform_openai_tool_to_mcp_tool( + mcp_tool_call_request_params = _transform_openai_tool_call_to_mcp_tool_call_request( openai_tool=openai_tool, ) return await call_mcp_tool( session=session, - name=mcp_tool.name, - arguments=mcp_tool.inputSchema, + call_tool_request_params=mcp_tool_call_request_params, ) From 12046a1a2ee7543965ba29ba2644912de3c1e35b Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 18:23:58 -0700 Subject: [PATCH 068/119] fix: fix linting error --- litellm/proxy/auth/user_api_key_auth.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/litellm/proxy/auth/user_api_key_auth.py b/litellm/proxy/auth/user_api_key_auth.py index 3944fe74e5..b78619ae65 100644 --- a/litellm/proxy/auth/user_api_key_auth.py +++ b/litellm/proxy/auth/user_api_key_auth.py @@ -249,6 +249,7 @@ async def _user_api_key_auth_builder( # noqa: PLR0915 parent_otel_span: Optional[Span] = None start_time = datetime.now() route: str = get_request_route(request=request) + valid_token: Optional[UserAPIKeyAuth] = None try: @@ -393,7 +394,7 @@ async def _user_api_key_auth_builder( # noqa: PLR0915 parent_otel_span=parent_otel_span, ) - valid_token: Optional[UserAPIKeyAuth] = UserAPIKeyAuth( + valid_token = UserAPIKeyAuth( api_key=None, team_id=team_id, team_tpm_limit=( @@ -425,7 +426,7 @@ async def _user_api_key_auth_builder( # noqa: PLR0915 ) # return UserAPIKeyAuth object - return valid_token + return cast(UserAPIKeyAuth, valid_token) #### ELSE #### ## CHECK PASS-THROUGH ENDPOINTS ## From 9039b500339f46fa490d96ac081378186326e794 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 18:24:43 -0700 Subject: [PATCH 069/119] test_transform_openai_tool_call_to_mcp_tool_call_request tests --- .../experimental_mcp_client/test_tools.py | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 tests/litellm/experimental_mcp_client/test_tools.py diff --git a/tests/litellm/experimental_mcp_client/test_tools.py b/tests/litellm/experimental_mcp_client/test_tools.py new file mode 100644 index 0000000000..799078f915 --- /dev/null +++ b/tests/litellm/experimental_mcp_client/test_tools.py @@ -0,0 +1,60 @@ +import json +import os +import sys +from unittest.mock import AsyncMock, MagicMock + +import pytest + +sys.path.insert( + 0, os.path.abspath("../../..") +) # Adds the parent directory to the system path + +from mcp.types import CallToolRequestParams, CallToolResult, ListToolsResult +from mcp.types import Tool as MCPTool + +from litellm.experimental_mcp_client.tools import ( + _transform_openai_tool_call_to_mcp_tool_call_request, + call_mcp_tool, + call_openai_tool, + load_mcp_tools, + transform_mcp_tool_to_openai_tool, +) + + +@pytest.fixture +def mock_mcp_tool(): + return MCPTool( + name="test_tool", + description="A test tool", + inputSchema={"type": "object", "properties": {"test": {"type": "string"}}}, + ) + + +@pytest.fixture +def mock_session(): + session = MagicMock() + session.list_tools = AsyncMock() + session.call_tool = AsyncMock() + return session + + +def test_transform_mcp_tool_to_openai_tool(mock_mcp_tool): + openai_tool = transform_mcp_tool_to_openai_tool(mock_mcp_tool) + assert openai_tool["type"] == "function" + assert openai_tool["function"]["name"] == "test_tool" + assert openai_tool["function"]["description"] == "A test tool" + assert openai_tool["function"]["parameters"] == { + "type": "object", + "properties": {"test": {"type": "string"}}, + } + + +def test_transform_openai_tool_call_to_mcp_tool_call_request(mock_mcp_tool): + openai_tool = { + "function": {"name": "test_tool", "arguments": json.dumps({"test": "value"})} + } + mcp_tool_call_request = _transform_openai_tool_call_to_mcp_tool_call_request( + openai_tool + ) + assert mcp_tool_call_request.name == "test_tool" + assert mcp_tool_call_request.arguments == {"test": "value"} From 826c0d258fa5c65091ccc4bb042536bfd34b4a85 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 18:38:24 -0700 Subject: [PATCH 070/119] test_tools.py --- .../experimental_mcp_client/test_tools.py | 99 ++++++++++++++++++- 1 file changed, 98 insertions(+), 1 deletion(-) diff --git a/tests/litellm/experimental_mcp_client/test_tools.py b/tests/litellm/experimental_mcp_client/test_tools.py index 799078f915..7089d83217 100644 --- a/tests/litellm/experimental_mcp_client/test_tools.py +++ b/tests/litellm/experimental_mcp_client/test_tools.py @@ -9,10 +9,16 @@ sys.path.insert( 0, os.path.abspath("../../..") ) # Adds the parent directory to the system path -from mcp.types import CallToolRequestParams, CallToolResult, ListToolsResult +from mcp.types import ( + CallToolRequestParams, + CallToolResult, + ListToolsResult, + TextContent, +) from mcp.types import Tool as MCPTool from litellm.experimental_mcp_client.tools import ( + _get_function_arguments, _transform_openai_tool_call_to_mcp_tool_call_request, call_mcp_tool, call_openai_tool, @@ -38,6 +44,27 @@ def mock_session(): return session +@pytest.fixture +def mock_list_tools_result(): + return ListToolsResult( + tools=[ + MCPTool( + name="test_tool", + description="A test tool", + inputSchema={ + "type": "object", + "properties": {"test": {"type": "string"}}, + }, + ) + ] + ) + + +@pytest.fixture +def mock_mcp_tool_call_result(): + return CallToolResult(content=[TextContent(type="text", text="test_output")]) + + def test_transform_mcp_tool_to_openai_tool(mock_mcp_tool): openai_tool = transform_mcp_tool_to_openai_tool(mock_mcp_tool) assert openai_tool["type"] == "function" @@ -58,3 +85,73 @@ def test_transform_openai_tool_call_to_mcp_tool_call_request(mock_mcp_tool): ) assert mcp_tool_call_request.name == "test_tool" assert mcp_tool_call_request.arguments == {"test": "value"} + + +@pytest.mark.asyncio() +async def test_load_mcp_tools_mcp_format(mock_session, mock_list_tools_result): + mock_session.list_tools.return_value = mock_list_tools_result + result = await load_mcp_tools(mock_session, format="mcp") + assert len(result) == 1 + assert isinstance(result[0], MCPTool) + assert result[0].name == "test_tool" + mock_session.list_tools.assert_called_once() + + +@pytest.mark.asyncio() +async def test_load_mcp_tools_openai_format(mock_session, mock_list_tools_result): + mock_session.list_tools.return_value = mock_list_tools_result + result = await load_mcp_tools(mock_session, format="openai") + assert len(result) == 1 + assert result[0]["type"] == "function" + assert result[0]["function"]["name"] == "test_tool" + mock_session.list_tools.assert_called_once() + + +def test_get_function_arguments(): + # Test with string arguments + function = {"arguments": '{"test": "value"}'} + result = _get_function_arguments(function) + assert result == {"test": "value"} + + # Test with dict arguments + function = {"arguments": {"test": "value"}} + result = _get_function_arguments(function) + assert result == {"test": "value"} + + # Test with invalid JSON string + function = {"arguments": "invalid json"} + result = _get_function_arguments(function) + assert result == {} + + # Test with no arguments + function = {} + result = _get_function_arguments(function) + assert result == {} + + +@pytest.mark.asyncio() +async def test_call_openai_tool(mock_session, mock_mcp_tool_call_result): + mock_session.call_tool.return_value = mock_mcp_tool_call_result + openai_tool = { + "function": {"name": "test_tool", "arguments": json.dumps({"test": "value"})} + } + result = await call_openai_tool(mock_session, openai_tool) + print("result of call_openai_tool", result) + assert result.content[0].text == "test_output" + mock_session.call_tool.assert_called_once_with( + name="test_tool", arguments={"test": "value"} + ) + + +@pytest.mark.asyncio() +async def test_call_mcp_tool(mock_session, mock_mcp_tool_call_result): + mock_session.call_tool.return_value = mock_mcp_tool_call_result + request_params = CallToolRequestParams( + name="test_tool", arguments={"test": "value"} + ) + result = await call_mcp_tool(mock_session, request_params) + print("call_mcp_tool result", result) + assert result.content[0].text == "test_output" + mock_session.call_tool.assert_called_once_with( + name="test_tool", arguments={"test": "value"} + ) From 221f3b9241264caa954caff5928ad732ffcf52c5 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 19:03:31 -0700 Subject: [PATCH 071/119] pip install "pydantic==2.7.2" --- .circleci/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 33559dfdff..afcf10d3e6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -700,6 +700,7 @@ jobs: pip install "pytest-cov==5.0.0" pip install "pytest-asyncio==0.21.1" pip install "respx==0.21.1" + pip install "pydantic==2.7.2" pip install "mcp==1.4.1" # Run pytest and generate JUnit XML report - run: From eb47fd1c9baecc8a13043d24506db70f75d241ea Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 19:04:53 -0700 Subject: [PATCH 072/119] pip install "mcp==1.4.1" --- .circleci/config.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index afcf10d3e6..aa18f0c5ad 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -788,6 +788,8 @@ jobs: pip install "pytest-asyncio==0.21.1" pip install "respx==0.21.1" pip install "hypercorn==0.17.3" + pip install "pydantic==2.7.2" + pip install "mcp==1.4.1" # Run pytest and generate JUnit XML report - run: name: Run tests From ebcd0d798220dc8f5949006f0f45ee67abb8dae7 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 19:06:29 -0700 Subject: [PATCH 073/119] ci_cd_server_path --- tests/mcp_tests/test_mcp_litellm_client.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/mcp_tests/test_mcp_litellm_client.py b/tests/mcp_tests/test_mcp_litellm_client.py index e69aec8553..0f8fb7994a 100644 --- a/tests/mcp_tests/test_mcp_litellm_client.py +++ b/tests/mcp_tests/test_mcp_litellm_client.py @@ -18,10 +18,12 @@ import json @pytest.mark.asyncio async def test_mcp_agent(): + local_server_path = "./mcp_server.py" + ci_cd_server_path = "tests/mcp_tests/mcp_server.py" server_params = StdioServerParameters( command="python3", # Make sure to update to the full absolute path to your math_server.py file - args=["./mcp_server.py"], + args=[ci_cd_server_path], ) async with stdio_client(server_params) as (read, write): From 32474d6862664a60425ab7fd987aaa7a36ab7ace Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 19:20:13 -0700 Subject: [PATCH 074/119] _get_litellm_supported_transcription_kwargs --- .../litellm_core_utils/model_param_helper.py | 22 ++++++++----- ...odel_prices_and_context_window_backup.json | 31 +++++++++++++++++++ 2 files changed, 45 insertions(+), 8 deletions(-) diff --git a/litellm/litellm_core_utils/model_param_helper.py b/litellm/litellm_core_utils/model_param_helper.py index 3542ec3a94..5316ab5d84 100644 --- a/litellm/litellm_core_utils/model_param_helper.py +++ b/litellm/litellm_core_utils/model_param_helper.py @@ -1,9 +1,5 @@ from typing import Set -from openai.types.audio.transcription_create_params import ( - TranscriptionCreateParamsNonStreaming, - TranscriptionCreateParamsStreaming, -) from openai.types.chat.completion_create_params import ( CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming, @@ -16,6 +12,7 @@ from openai.types.completion_create_params import ( ) from openai.types.embedding_create_params import EmbeddingCreateParams +from litellm._logging import verbose_logger from litellm.types.rerank import RerankRequest @@ -126,10 +123,19 @@ class ModelParamHelper: This follows the OpenAI API Spec """ - all_transcription_kwargs = set( - TranscriptionCreateParamsNonStreaming.__annotations__.keys() - ).union(set(TranscriptionCreateParamsStreaming.__annotations__.keys())) - return all_transcription_kwargs + try: + from openai.types.audio.transcription_create_params import ( + TranscriptionCreateParamsNonStreaming, + TranscriptionCreateParamsStreaming, + ) + + all_transcription_kwargs = set( + TranscriptionCreateParamsNonStreaming.__annotations__.keys() + ).union(set(TranscriptionCreateParamsStreaming.__annotations__.keys())) + return all_transcription_kwargs + except Exception as e: + verbose_logger.warning("Error getting transcription kwargs %s", str(e)) + return set() @staticmethod def _get_exclude_kwargs() -> Set[str]: diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index f2ca9156ad..1d7b8794b5 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -1426,6 +1426,25 @@ "supports_vision": false, "supports_prompt_caching": true }, + "azure/gpt-4.5-preview": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.000075, + "output_cost_per_token": 0.00015, + "input_cost_per_token_batches": 0.0000375, + "output_cost_per_token_batches": 0.000075, + "cache_read_input_token_cost": 0.0000375, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, "azure/gpt-4o": { "max_tokens": 16384, "max_input_tokens": 128000, @@ -2091,6 +2110,18 @@ "mode": "chat", "supports_tool_choice": true }, + "azure_ai/mistral-small-2503": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000003, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_function_calling": true, + "supports_vision": true, + "supports_tool_choice": true + }, "azure_ai/mistral-large-2407": { "max_tokens": 4096, "max_input_tokens": 128000, From cd45d9069b07d3139d0ea1c82b13dce2313ad512 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 20:50:13 -0700 Subject: [PATCH 075/119] bug fix azure/gpt-4.5-preview was added as litellm_provider=openai, should be azure --- ...odel_prices_and_context_window_backup.json | 31 +++++++++++++++++++ model_prices_and_context_window.json | 2 +- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index f2ca9156ad..c5e0329013 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -1426,6 +1426,25 @@ "supports_vision": false, "supports_prompt_caching": true }, + "azure/gpt-4.5-preview": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.000075, + "output_cost_per_token": 0.00015, + "input_cost_per_token_batches": 0.0000375, + "output_cost_per_token_batches": 0.000075, + "cache_read_input_token_cost": 0.0000375, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, "azure/gpt-4o": { "max_tokens": 16384, "max_input_tokens": 128000, @@ -2091,6 +2110,18 @@ "mode": "chat", "supports_tool_choice": true }, + "azure_ai/mistral-small-2503": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000003, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_function_calling": true, + "supports_vision": true, + "supports_tool_choice": true + }, "azure_ai/mistral-large-2407": { "max_tokens": 4096, "max_input_tokens": 128000, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 1d7b8794b5..c5e0329013 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -1435,7 +1435,7 @@ "input_cost_per_token_batches": 0.0000375, "output_cost_per_token_batches": 0.000075, "cache_read_input_token_cost": 0.0000375, - "litellm_provider": "openai", + "litellm_provider": "azure", "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, From 2c09c31c7e24cd4ae98d7c030645c9eabcc8a979 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 21:03:20 -0700 Subject: [PATCH 076/119] =?UTF-8?q?bump:=20version=201.63.12=20=E2=86=92?= =?UTF-8?q?=201.63.13?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 38d5687800..66266d845e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.63.12" +version = "1.63.13" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT" @@ -100,7 +100,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.63.12" +version = "1.63.13" version_files = [ "pyproject.toml:^version" ] From 581f2e6c2f9c080a34a175539a3d5812b5edacb4 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 21:03:42 -0700 Subject: [PATCH 077/119] bump version --- litellm/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/__init__.py b/litellm/__init__.py index 762a058c7e..25da650440 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -2,7 +2,7 @@ import warnings warnings.filterwarnings("ignore", message=".*conflict with protected namespace.*") -### INIT VARIABLES ######### +### INIT VARIABLES ########## import threading import os from typing import Callable, List, Optional, Dict, Union, Any, Literal, get_args From a45077ee9b68990467908868f6fc1cca18d1821e Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 21:03:55 -0700 Subject: [PATCH 078/119] =?UTF-8?q?bump:=20version=201.63.13=20=E2=86=92?= =?UTF-8?q?=201.63.14?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 66266d845e..000adb98f6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.63.13" +version = "1.63.14" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT" @@ -100,7 +100,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.63.13" +version = "1.63.14" version_files = [ "pyproject.toml:^version" ] From ce32fe355c07f9a8efd4fc3cee3fc7c4807dbc09 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 22 Mar 2025 04:04:46 +0000 Subject: [PATCH 079/119] build(deps): bump next from 14.2.21 to 14.2.25 in /ui/litellm-dashboard Bumps [next](https://github.com/vercel/next.js) from 14.2.21 to 14.2.25. - [Release notes](https://github.com/vercel/next.js/releases) - [Changelog](https://github.com/vercel/next.js/blob/canary/release.js) - [Commits](https://github.com/vercel/next.js/compare/v14.2.21...v14.2.25) --- updated-dependencies: - dependency-name: next dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- ui/litellm-dashboard/package-lock.json | 99 ++++++++++++++------------ ui/litellm-dashboard/package.json | 2 +- 2 files changed, 56 insertions(+), 45 deletions(-) diff --git a/ui/litellm-dashboard/package-lock.json b/ui/litellm-dashboard/package-lock.json index 307e95217f..39ab75d8c7 100644 --- a/ui/litellm-dashboard/package-lock.json +++ b/ui/litellm-dashboard/package-lock.json @@ -21,7 +21,7 @@ "jsonwebtoken": "^9.0.2", "jwt-decode": "^4.0.0", "moment": "^2.30.1", - "next": "^14.2.15", + "next": "^14.2.25", "openai": "^4.28.0", "papaparse": "^5.5.2", "react": "^18", @@ -418,9 +418,10 @@ } }, "node_modules/@next/env": { - "version": "14.2.21", - "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.21.tgz", - "integrity": "sha512-lXcwcJd5oR01tggjWJ6SrNNYFGuOOMB9c251wUNkjCpkoXOPkDeF/15c3mnVlBqrW4JJXb2kVxDFhC4GduJt2A==" + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.25.tgz", + "integrity": "sha512-JnzQ2cExDeG7FxJwqAksZ3aqVJrHjFwZQAEJ9gQZSoEhIow7SNoKZzju/AwQ+PLIR4NY8V0rhcVozx/2izDO0w==", + "license": "MIT" }, "node_modules/@next/eslint-plugin-next": { "version": "14.1.0", @@ -432,12 +433,13 @@ } }, "node_modules/@next/swc-darwin-arm64": { - "version": "14.2.21", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.21.tgz", - "integrity": "sha512-HwEjcKsXtvszXz5q5Z7wCtrHeTTDSTgAbocz45PHMUjU3fBYInfvhR+ZhavDRUYLonm53aHZbB09QtJVJj8T7g==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.25.tgz", + "integrity": "sha512-09clWInF1YRd6le00vt750s3m7SEYNehz9C4PUcSu3bAdCTpjIV4aTYQZ25Ehrr83VR1rZeqtKUPWSI7GfuKZQ==", "cpu": [ "arm64" ], + "license": "MIT", "optional": true, "os": [ "darwin" @@ -447,12 +449,13 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "14.2.21", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.21.tgz", - "integrity": "sha512-TSAA2ROgNzm4FhKbTbyJOBrsREOMVdDIltZ6aZiKvCi/v0UwFmwigBGeqXDA97TFMpR3LNNpw52CbVelkoQBxA==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.25.tgz", + "integrity": "sha512-V+iYM/QR+aYeJl3/FWWU/7Ix4b07ovsQ5IbkwgUK29pTHmq+5UxeDr7/dphvtXEq5pLB/PucfcBNh9KZ8vWbug==", "cpu": [ "x64" ], + "license": "MIT", "optional": true, "os": [ "darwin" @@ -462,12 +465,13 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "14.2.21", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.21.tgz", - "integrity": "sha512-0Dqjn0pEUz3JG+AImpnMMW/m8hRtl1GQCNbO66V1yp6RswSTiKmnHf3pTX6xMdJYSemf3O4Q9ykiL0jymu0TuA==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.25.tgz", + "integrity": "sha512-LFnV2899PJZAIEHQ4IMmZIgL0FBieh5keMnriMY1cK7ompR+JUd24xeTtKkcaw8QmxmEdhoE5Mu9dPSuDBgtTg==", "cpu": [ "arm64" ], + "license": "MIT", "optional": true, "os": [ "linux" @@ -477,12 +481,13 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "14.2.21", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.21.tgz", - "integrity": "sha512-Ggfw5qnMXldscVntwnjfaQs5GbBbjioV4B4loP+bjqNEb42fzZlAaK+ldL0jm2CTJga9LynBMhekNfV8W4+HBw==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.25.tgz", + "integrity": "sha512-QC5y5PPTmtqFExcKWKYgUNkHeHE/z3lUsu83di488nyP0ZzQ3Yse2G6TCxz6nNsQwgAx1BehAJTZez+UQxzLfw==", "cpu": [ "arm64" ], + "license": "MIT", "optional": true, "os": [ "linux" @@ -492,12 +497,13 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "14.2.21", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.21.tgz", - "integrity": "sha512-uokj0lubN1WoSa5KKdThVPRffGyiWlm/vCc/cMkWOQHw69Qt0X1o3b2PyLLx8ANqlefILZh1EdfLRz9gVpG6tg==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.25.tgz", + "integrity": "sha512-y6/ML4b9eQ2D/56wqatTJN5/JR8/xdObU2Fb1RBidnrr450HLCKr6IJZbPqbv7NXmje61UyxjF5kvSajvjye5w==", "cpu": [ "x64" ], + "license": "MIT", "optional": true, "os": [ "linux" @@ -507,12 +513,13 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "14.2.21", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.21.tgz", - "integrity": "sha512-iAEBPzWNbciah4+0yI4s7Pce6BIoxTQ0AGCkxn/UBuzJFkYyJt71MadYQkjPqCQCJAFQ26sYh7MOKdU+VQFgPg==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.25.tgz", + "integrity": "sha512-sPX0TSXHGUOZFvv96GoBXpB3w4emMqKeMgemrSxI7A6l55VBJp/RKYLwZIB9JxSqYPApqiREaIIap+wWq0RU8w==", "cpu": [ "x64" ], + "license": "MIT", "optional": true, "os": [ "linux" @@ -522,12 +529,13 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "14.2.21", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.21.tgz", - "integrity": "sha512-plykgB3vL2hB4Z32W3ktsfqyuyGAPxqwiyrAi2Mr8LlEUhNn9VgkiAl5hODSBpzIfWweX3er1f5uNpGDygfQVQ==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.25.tgz", + "integrity": "sha512-ReO9S5hkA1DU2cFCsGoOEp7WJkhFzNbU/3VUF6XxNGUCQChyug6hZdYL/istQgfT/GWE6PNIg9cm784OI4ddxQ==", "cpu": [ "arm64" ], + "license": "MIT", "optional": true, "os": [ "win32" @@ -537,12 +545,13 @@ } }, "node_modules/@next/swc-win32-ia32-msvc": { - "version": "14.2.21", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.21.tgz", - "integrity": "sha512-w5bacz4Vxqrh06BjWgua3Yf7EMDb8iMcVhNrNx8KnJXt8t+Uu0Zg4JHLDL/T7DkTCEEfKXO/Er1fcfWxn2xfPA==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.25.tgz", + "integrity": "sha512-DZ/gc0o9neuCDyD5IumyTGHVun2dCox5TfPQI/BJTYwpSNYM3CZDI4i6TOdjeq1JMo+Ug4kPSMuZdwsycwFbAw==", "cpu": [ "ia32" ], + "license": "MIT", "optional": true, "os": [ "win32" @@ -552,12 +561,13 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "14.2.21", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.21.tgz", - "integrity": "sha512-sT6+llIkzpsexGYZq8cjjthRyRGe5cJVhqh12FmlbxHqna6zsDDK8UNaV7g41T6atFHCJUPeLb3uyAwrBwy0NA==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.25.tgz", + "integrity": "sha512-KSznmS6eFjQ9RJ1nEc66kJvtGIL1iZMYmGEXsZPh2YtnLtqrgdVvKXJY2ScjjoFnG6nGLyPFR0UiEvDwVah4Tw==", "cpu": [ "x64" ], + "license": "MIT", "optional": true, "os": [ "win32" @@ -5001,11 +5011,12 @@ "dev": true }, "node_modules/next": { - "version": "14.2.21", - "resolved": "https://registry.npmjs.org/next/-/next-14.2.21.tgz", - "integrity": "sha512-rZmLwucLHr3/zfDMYbJXbw0ZeoBpirxkXuvsJbk7UPorvPYZhP7vq7aHbKnU7dQNCYIimRrbB2pp3xmf+wsYUg==", + "version": "14.2.25", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.25.tgz", + "integrity": "sha512-N5M7xMc4wSb4IkPvEV5X2BRRXUmhVHNyaXwEM86+voXthSZz8ZiRyQW4p9mwAoAPIm6OzuVZtn7idgEJeAJN3Q==", + "license": "MIT", "dependencies": { - "@next/env": "14.2.21", + "@next/env": "14.2.25", "@swc/helpers": "0.5.5", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", @@ -5020,15 +5031,15 @@ "node": ">=18.17.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "14.2.21", - "@next/swc-darwin-x64": "14.2.21", - "@next/swc-linux-arm64-gnu": "14.2.21", - "@next/swc-linux-arm64-musl": "14.2.21", - "@next/swc-linux-x64-gnu": "14.2.21", - "@next/swc-linux-x64-musl": "14.2.21", - "@next/swc-win32-arm64-msvc": "14.2.21", - "@next/swc-win32-ia32-msvc": "14.2.21", - "@next/swc-win32-x64-msvc": "14.2.21" + "@next/swc-darwin-arm64": "14.2.25", + "@next/swc-darwin-x64": "14.2.25", + "@next/swc-linux-arm64-gnu": "14.2.25", + "@next/swc-linux-arm64-musl": "14.2.25", + "@next/swc-linux-x64-gnu": "14.2.25", + "@next/swc-linux-x64-musl": "14.2.25", + "@next/swc-win32-arm64-msvc": "14.2.25", + "@next/swc-win32-ia32-msvc": "14.2.25", + "@next/swc-win32-x64-msvc": "14.2.25" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", diff --git a/ui/litellm-dashboard/package.json b/ui/litellm-dashboard/package.json index 79f096106d..895e2576cc 100644 --- a/ui/litellm-dashboard/package.json +++ b/ui/litellm-dashboard/package.json @@ -22,7 +22,7 @@ "jsonwebtoken": "^9.0.2", "jwt-decode": "^4.0.0", "moment": "^2.30.1", - "next": "^14.2.15", + "next": "^14.2.25", "openai": "^4.28.0", "papaparse": "^5.5.2", "react": "^18", From 2aceeaccd0610d26c322793b9d857ee251a8339d Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 21:28:11 -0700 Subject: [PATCH 080/119] fix ModelParamHelper --- .../litellm_core_utils/model_param_helper.py | 26 +++++++++++++------ 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/litellm/litellm_core_utils/model_param_helper.py b/litellm/litellm_core_utils/model_param_helper.py index 5316ab5d84..d792ede282 100644 --- a/litellm/litellm_core_utils/model_param_helper.py +++ b/litellm/litellm_core_utils/model_param_helper.py @@ -84,8 +84,10 @@ class ModelParamHelper: This follows the OpenAI API Spec """ all_chat_completion_kwargs = set( - CompletionCreateParamsNonStreaming.__annotations__.keys() - ).union(set(CompletionCreateParamsStreaming.__annotations__.keys())) + getattr(CompletionCreateParamsNonStreaming, "__annotations__", {}).keys() + ).union( + set(getattr(CompletionCreateParamsStreaming, "__annotations__", {}).keys()) + ) return all_chat_completion_kwargs @staticmethod @@ -96,8 +98,16 @@ class ModelParamHelper: This follows the OpenAI API Spec """ all_text_completion_kwargs = set( - TextCompletionCreateParamsNonStreaming.__annotations__.keys() - ).union(set(TextCompletionCreateParamsStreaming.__annotations__.keys())) + getattr( + TextCompletionCreateParamsNonStreaming, "__annotations__", {} + ).keys() + ).union( + set( + getattr( + TextCompletionCreateParamsStreaming, "__annotations__", {} + ).keys() + ) + ) return all_text_completion_kwargs @staticmethod @@ -114,7 +124,7 @@ class ModelParamHelper: This follows the OpenAI API Spec """ - return set(EmbeddingCreateParams.__annotations__.keys()) + return set(getattr(EmbeddingCreateParams, "__annotations__", {}).keys()) @staticmethod def _get_litellm_supported_transcription_kwargs() -> Set[str]: @@ -128,10 +138,10 @@ class ModelParamHelper: TranscriptionCreateParamsNonStreaming, TranscriptionCreateParamsStreaming, ) + non_streaming_kwargs = set(getattr(TranscriptionCreateParamsNonStreaming, "__annotations__", {}).keys()) + streaming_kwargs = set(getattr(TranscriptionCreateParamsStreaming, "__annotations__", {}).keys()) - all_transcription_kwargs = set( - TranscriptionCreateParamsNonStreaming.__annotations__.keys() - ).union(set(TranscriptionCreateParamsStreaming.__annotations__.keys())) + all_transcription_kwargs = non_streaming_kwargs.union(streaming_kwargs) return all_transcription_kwargs except Exception as e: verbose_logger.warning("Error getting transcription kwargs %s", str(e)) From 5775866b2b46e09d64d62fe57bdfdb81e4224a46 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 21:49:54 -0700 Subject: [PATCH 081/119] bump to pip install "openai==1.68.2" --- .circleci/config.yml | 18 +++++++++--------- .circleci/requirements.txt | 2 +- requirements.txt | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index aa18f0c5ad..886e121f35 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -49,7 +49,7 @@ jobs: pip install opentelemetry-api==1.25.0 pip install opentelemetry-sdk==1.25.0 pip install opentelemetry-exporter-otlp==1.25.0 - pip install openai==1.67.0 + pip install openai==1.68.2 pip install prisma==0.11.0 pip install "detect_secrets==1.5.0" pip install "httpx==0.24.1" @@ -168,7 +168,7 @@ jobs: pip install opentelemetry-api==1.25.0 pip install opentelemetry-sdk==1.25.0 pip install opentelemetry-exporter-otlp==1.25.0 - pip install openai==1.67.0 + pip install openai==1.68.2 pip install prisma==0.11.0 pip install "detect_secrets==1.5.0" pip install "httpx==0.24.1" @@ -268,7 +268,7 @@ jobs: pip install opentelemetry-api==1.25.0 pip install opentelemetry-sdk==1.25.0 pip install opentelemetry-exporter-otlp==1.25.0 - pip install openai==1.67.0 + pip install openai==1.68.2 pip install prisma==0.11.0 pip install "detect_secrets==1.5.0" pip install "httpx==0.24.1" @@ -513,7 +513,7 @@ jobs: pip install opentelemetry-api==1.25.0 pip install opentelemetry-sdk==1.25.0 pip install opentelemetry-exporter-otlp==1.25.0 - pip install openai==1.67.0 + pip install openai==1.68.2 pip install prisma==0.11.0 pip install "detect_secrets==1.5.0" pip install "httpx==0.24.1" @@ -1324,7 +1324,7 @@ jobs: pip install "aiodynamo==23.10.1" pip install "asyncio==3.4.3" pip install "PyGithub==1.59.1" - pip install "openai==1.67.0" + pip install "openai==1.68.2" - run: name: Install Grype command: | @@ -1460,7 +1460,7 @@ jobs: pip install "aiodynamo==23.10.1" pip install "asyncio==3.4.3" pip install "PyGithub==1.59.1" - pip install "openai==1.67.0" + pip install "openai==1.68.2" # Run pytest and generate JUnit XML report - run: name: Build Docker image @@ -1582,7 +1582,7 @@ jobs: pip install "aiodynamo==23.10.1" pip install "asyncio==3.4.3" pip install "PyGithub==1.59.1" - pip install "openai==1.67.0" + pip install "openai==1.68.2" - run: name: Build Docker image command: docker build -t my-app:latest -f ./docker/Dockerfile.database . @@ -2011,7 +2011,7 @@ jobs: pip install "pytest-asyncio==0.21.1" pip install "google-cloud-aiplatform==1.43.0" pip install aiohttp - pip install "openai==1.67.0" + pip install "openai==1.68.2" pip install "assemblyai==0.37.0" python -m pip install --upgrade pip pip install "pydantic==2.7.1" @@ -2287,7 +2287,7 @@ jobs: pip install "pytest-retry==1.6.3" pip install "pytest-asyncio==0.21.1" pip install aiohttp - pip install "openai==1.67.0" + pip install "openai==1.68.2" python -m pip install --upgrade pip pip install "pydantic==2.7.1" pip install "pytest==7.3.1" diff --git a/.circleci/requirements.txt b/.circleci/requirements.txt index fc0bbb2ee2..356a9840f5 100644 --- a/.circleci/requirements.txt +++ b/.circleci/requirements.txt @@ -1,5 +1,5 @@ # used by CI/CD testing -openai==1.67.0 +openai==1.68.2 python-dotenv tiktoken importlib_metadata diff --git a/requirements.txt b/requirements.txt index 0683d105e4..2891171cc3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ # LITELLM PROXY DEPENDENCIES # anyio==4.4.0 # openai + http req. httpx==0.27.0 # Pin Httpx dependency -openai==1.67.0 # openai req. +openai==1.68.2 # openai req. fastapi==0.115.5 # server dep backoff==2.2.1 # server dep pyyaml==6.0.2 # server dep From 7652352934bfcbfc596ca36631d88138a623e0d9 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 21:51:57 -0700 Subject: [PATCH 082/119] mcp servers.json --- mcp_servers.json | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 mcp_servers.json diff --git a/mcp_servers.json b/mcp_servers.json new file mode 100644 index 0000000000..c196815747 --- /dev/null +++ b/mcp_servers.json @@ -0,0 +1,16 @@ +{ + "brave-search": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "BRAVE_API_KEY", + "mcp/brave-search" + ], + "env": { + "BRAVE_API_KEY": "YOUR_API_KEY_HERE" + } + } +} \ No newline at end of file From f1d191f5a2900129c858033defad4fdd87f445af Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 21:56:56 -0700 Subject: [PATCH 083/119] security fix - bump gunicorn==23.0.0 # server dep --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2891171cc3..93a23e0f2f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ fastapi==0.115.5 # server dep backoff==2.2.1 # server dep pyyaml==6.0.2 # server dep uvicorn==0.29.0 # server dep -gunicorn==22.0.0 # server dep +gunicorn==23.0.0 # server dep uvloop==0.21.0 # uvicorn dep, gives us much better performance under load boto3==1.34.34 # aws bedrock/sagemaker calls redis==5.0.0 # caching From e84ae46d27953c2be6ed18d1afc648ebf6656c45 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 21:58:43 -0700 Subject: [PATCH 084/119] bump gunicorn - fix security issue on gunicorn --- poetry.lock | 269 +++++++++++++++++++++++++++++++++---------------- pyproject.toml | 2 +- 2 files changed, 185 insertions(+), 86 deletions(-) diff --git a/poetry.lock b/poetry.lock index d270aa2d79..5834dd2e70 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.0.0 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -6,6 +6,7 @@ version = "2.4.4" description = "Happy Eyeballs for asyncio" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "aiohappyeyeballs-2.4.4-py3-none-any.whl", hash = "sha256:a980909d50efcd44795c4afeca523296716d50cd756ddca6af8c65b996e27de8"}, {file = "aiohappyeyeballs-2.4.4.tar.gz", hash = "sha256:5fdd7d87889c63183afc18ce9271f9b0a7d32c2303e394468dd45d514a757745"}, @@ -17,6 +18,7 @@ version = "3.10.11" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "aiohttp-3.10.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5077b1a5f40ffa3ba1f40d537d3bec4383988ee51fbba6b74aa8fb1bc466599e"}, {file = "aiohttp-3.10.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8d6a14a4d93b5b3c2891fca94fa9d41b2322a68194422bef0dd5ec1e57d7d298"}, @@ -129,6 +131,7 @@ version = "1.3.1" description = "aiosignal: a list of registered asynchronous callbacks" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, @@ -143,6 +146,7 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -157,6 +161,7 @@ version = "4.5.2" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"}, {file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"}, @@ -179,6 +184,8 @@ version = "3.11.0" description = "In-process task scheduler with Cron-like capabilities" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "APScheduler-3.11.0-py3-none-any.whl", hash = "sha256:fc134ca32e50f5eadcc4938e3a4545ab19131435e851abb40b34d63d5141c6da"}, {file = "apscheduler-3.11.0.tar.gz", hash = "sha256:4c622d250b0955a65d5d0eb91c33e6d43fd879834bf541e0a18661ae60460133"}, @@ -207,6 +214,8 @@ version = "5.0.1" description = "Timeout context manager for asyncio programs" optional = false python-versions = ">=3.8" +groups = ["main"] +markers = "python_full_version < \"3.11.3\" and extra == \"proxy\" or python_version < \"3.11\"" files = [ {file = "async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c"}, {file = "async_timeout-5.0.1.tar.gz", hash = "sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3"}, @@ -218,6 +227,7 @@ version = "25.3.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"}, {file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"}, @@ -237,6 +247,8 @@ version = "1.32.0" description = "Microsoft Azure Core Library for Python" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "azure_core-1.32.0-py3-none-any.whl", hash = "sha256:eac191a0efb23bfa83fddf321b27b122b4ec847befa3091fa736a5c32c50d7b4"}, {file = "azure_core-1.32.0.tar.gz", hash = "sha256:22b3c35d6b2dae14990f6c1be2912bf23ffe50b220e708a28ab1bb92b1c730e5"}, @@ -256,6 +268,8 @@ version = "1.21.0" description = "Microsoft Azure Identity Library for Python" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "azure_identity-1.21.0-py3-none-any.whl", hash = "sha256:258ea6325537352440f71b35c3dffe9d240eae4a5126c1b7ce5efd5766bd9fd9"}, {file = "azure_identity-1.21.0.tar.gz", hash = "sha256:ea22ce6e6b0f429bc1b8d9212d5b9f9877bd4c82f1724bfa910760612c07a9a6"}, @@ -274,6 +288,8 @@ version = "4.9.0" description = "Microsoft Azure Key Vault Secrets Client Library for Python" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "azure_keyvault_secrets-4.9.0-py3-none-any.whl", hash = "sha256:33c7e2aca2cc2092cebc8c6e96eca36a5cc30c767e16ea429c5fa21270e9fba6"}, {file = "azure_keyvault_secrets-4.9.0.tar.gz", hash = "sha256:2a03bb2ffd9a0d6c8ad1c330d9d0310113985a9de06607ece378fd72a5889fe1"}, @@ -290,6 +306,8 @@ version = "2.2.1" description = "Function decoration for backoff and retry" optional = true python-versions = ">=3.7,<4.0" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, @@ -301,6 +319,8 @@ version = "0.2.1" description = "Backport of the standard library zoneinfo module" optional = true python-versions = ">=3.6" +groups = ["main"] +markers = "extra == \"proxy\" and python_version < \"3.9\"" files = [ {file = "backports.zoneinfo-0.2.1-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:da6013fd84a690242c310d77ddb8441a559e9cb3d3d59ebac9aca1a57b2e18bc"}, {file = "backports.zoneinfo-0.2.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:89a48c0d158a3cc3f654da4c2de1ceba85263fafb861b98b59040a5086259722"}, @@ -329,6 +349,7 @@ version = "23.12.1" description = "The uncompromising code formatter." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "black-23.12.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0aaf6041986767a5e0ce663c7a2f0e9eaf21e6ff87a5f95cbf3675bfd4c41d2"}, {file = "black-23.12.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c88b3711d12905b74206227109272673edce0cb29f27e1385f33b0163c414bba"}, @@ -375,6 +396,8 @@ version = "1.34.34" description = "The AWS SDK for Python" optional = true python-versions = ">= 3.8" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "boto3-1.34.34-py3-none-any.whl", hash = "sha256:33a8b6d9136fa7427160edb92d2e50f2035f04e9d63a2d1027349053e12626aa"}, {file = "boto3-1.34.34.tar.gz", hash = "sha256:b2f321e20966f021ec800b7f2c01287a3dd04fc5965acdfbaa9c505a24ca45d1"}, @@ -394,6 +417,8 @@ version = "1.34.162" description = "Low-level, data-driven core of boto 3." optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "botocore-1.34.162-py3-none-any.whl", hash = "sha256:2d918b02db88d27a75b48275e6fb2506e9adaaddbec1ffa6a8a0898b34e769be"}, {file = "botocore-1.34.162.tar.gz", hash = "sha256:adc23be4fb99ad31961236342b7cbf3c0bfc62532cd02852196032e8c0d682f3"}, @@ -416,6 +441,8 @@ version = "5.5.2" description = "Extensible memoizing collections and decorators" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "cachetools-5.5.2-py3-none-any.whl", hash = "sha256:d26a22bcc62eb95c3beabd9f1ee5e820d3d2704fe2967cbe350e20c8ffcd3f0a"}, {file = "cachetools-5.5.2.tar.gz", hash = "sha256:1a661caa9175d26759571b2e19580f9d6393969e5dfca11fdb1f947a23e640d4"}, @@ -427,6 +454,7 @@ version = "2025.1.31" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, @@ -438,6 +466,8 @@ version = "1.17.1" description = "Foreign Function Interface for Python calling C code." optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"proxy\" or extra == \"extra-proxy\" and platform_python_implementation != \"PyPy\"" files = [ {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, @@ -517,6 +547,7 @@ version = "3.4.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, @@ -618,6 +649,7 @@ version = "8.1.8" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, @@ -632,10 +664,12 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main", "dev"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +markers = {main = "platform_system == \"Windows\"", dev = "platform_system == \"Windows\" or sys_platform == \"win32\""} [[package]] name = "cryptography" @@ -643,6 +677,8 @@ version = "43.0.3" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"proxy\" or extra == \"extra-proxy\"" files = [ {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, @@ -692,6 +728,7 @@ version = "1.9.0" description = "Distro - an OS platform information API" optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, @@ -703,6 +740,8 @@ version = "2.6.1" description = "DNS toolkit" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "dnspython-2.6.1-py3-none-any.whl", hash = "sha256:5ef3b9680161f6fa89daf8ad451b5f1a33b18ae8a1c6778cdf4b43f08c0a6e50"}, {file = "dnspython-2.6.1.tar.gz", hash = "sha256:e8f0f9c23a7b7cb99ded64e6c3a6f3e701d78f50c55e002b839dea7225cff7cc"}, @@ -723,6 +762,8 @@ version = "2.2.0" description = "A robust email address syntax and deliverability validation library." optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "email_validator-2.2.0-py3-none-any.whl", hash = "sha256:561977c2d73ce3611850a06fa56b414621e0c8faa9d66f2611407d87465da631"}, {file = "email_validator-2.2.0.tar.gz", hash = "sha256:cb690f344c617a714f22e66ae771445a1ceb46821152df8e165c5f9a364582b7"}, @@ -738,6 +779,8 @@ version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] +markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, @@ -752,6 +795,8 @@ version = "0.115.11" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "fastapi-0.115.11-py3-none-any.whl", hash = "sha256:32e1541b7b74602e4ef4a0260ecaf3aadf9d4f19590bba3e1bf2ac4666aa2c64"}, {file = "fastapi-0.115.11.tar.gz", hash = "sha256:cc81f03f688678b92600a65a5e618b93592c65005db37157147204d8924bf94f"}, @@ -772,6 +817,8 @@ version = "0.16.0" description = "FastAPI plugin to enable SSO to most common providers (such as Facebook login, Google login and login via Microsoft Office 365 Account)" optional = true python-versions = "<4.0,>=3.8" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "fastapi_sso-0.16.0-py3-none-any.whl", hash = "sha256:3a66a942474ef9756d3a9d8b945d55bd9faf99781facdb9b87a40b73d6d6b0c3"}, {file = "fastapi_sso-0.16.0.tar.gz", hash = "sha256:f3941f986347566b7d3747c710cf474a907f581bfb6697ff3bb3e44eb76b438c"}, @@ -790,6 +837,7 @@ version = "3.16.1" description = "A platform independent file lock." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, @@ -806,6 +854,7 @@ version = "6.1.0" description = "the modular source code checker: pep8 pyflakes and co" optional = false python-versions = ">=3.8.1" +groups = ["dev"] files = [ {file = "flake8-6.1.0-py2.py3-none-any.whl", hash = "sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5"}, {file = "flake8-6.1.0.tar.gz", hash = "sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23"}, @@ -822,6 +871,7 @@ version = "1.5.0" description = "A list-like structure which implements collections.abc.MutableSequence" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5b6a66c18b5b9dd261ca98dffcb826a525334b2f29e7caa54e182255c5f6a65a"}, {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1b3eb7b05ea246510b43a7e53ed1653e55c2121019a97e60cad7efb881a97bb"}, @@ -923,6 +973,7 @@ version = "2025.3.0" description = "File-system specification" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "fsspec-2025.3.0-py3-none-any.whl", hash = "sha256:efb87af3efa9103f94ca91a7f8cb7a4df91af9f74fc106c9c7ea0efd7277c1b3"}, {file = "fsspec-2025.3.0.tar.gz", hash = "sha256:a935fd1ea872591f2b5148907d103488fc523295e6c64b835cfad8c3eca44972"}, @@ -962,6 +1013,8 @@ version = "2.24.2" description = "Google API client core library" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "google_api_core-2.24.2-py3-none-any.whl", hash = "sha256:810a63ac95f3c441b7c0e43d344e372887f62ce9071ba972eacf32672e072de9"}, {file = "google_api_core-2.24.2.tar.gz", hash = "sha256:81718493daf06d96d6bc76a91c23874dbf2fac0adbbf542831b805ee6e974696"}, @@ -997,6 +1050,8 @@ version = "2.38.0" description = "Google Authentication Library" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "google_auth-2.38.0-py2.py3-none-any.whl", hash = "sha256:e7dae6694313f434a2727bf2906f27ad259bae090d7aa896590d86feec3d9d4a"}, {file = "google_auth-2.38.0.tar.gz", hash = "sha256:8285113607d3b80a3f1543b75962447ba8a09fe85783432a784fdeef6ac094c4"}, @@ -1021,6 +1076,8 @@ version = "2.24.2" description = "Google Cloud Kms API client library" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "google_cloud_kms-2.24.2-py2.py3-none-any.whl", hash = "sha256:368209b035dfac691a467c1cf50986d8b1b26cac1166bdfbaa25d738df91ff7b"}, {file = "google_cloud_kms-2.24.2.tar.gz", hash = "sha256:e9e18bbfafd1a4035c76c03fb5ff03f4f57f596d08e1a9ede7e69ec0151b27a1"}, @@ -1039,6 +1096,8 @@ version = "1.69.2" description = "Common protobufs used in Google APIs" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "googleapis_common_protos-1.69.2-py3-none-any.whl", hash = "sha256:0b30452ff9c7a27d80bfc5718954063e8ab53dd3697093d3bc99581f5fd24212"}, {file = "googleapis_common_protos-1.69.2.tar.gz", hash = "sha256:3e1b904a27a33c821b4b749fd31d334c0c9c30e6113023d495e48979a3dc9c5f"}, @@ -1057,6 +1116,8 @@ version = "0.14.2" description = "IAM API client library" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "grpc_google_iam_v1-0.14.2-py3-none-any.whl", hash = "sha256:a3171468459770907926d56a440b2bb643eec1d7ba215f48f3ecece42b4d8351"}, {file = "grpc_google_iam_v1-0.14.2.tar.gz", hash = "sha256:b3e1fc387a1a329e41672197d0ace9de22c78dd7d215048c4c78712073f7bd20"}, @@ -1073,6 +1134,8 @@ version = "1.70.0" description = "HTTP/2-based RPC framework" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "grpcio-1.70.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:95469d1977429f45fe7df441f586521361e235982a0b39e33841549143ae2851"}, {file = "grpcio-1.70.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:ed9718f17fbdb472e33b869c77a16d0b55e166b100ec57b016dc7de9c8d236bf"}, @@ -1134,75 +1197,14 @@ files = [ [package.extras] protobuf = ["grpcio-tools (>=1.70.0)"] -[[package]] -name = "grpcio" -version = "1.71.0" -description = "HTTP/2-based RPC framework" -optional = true -python-versions = ">=3.9" -files = [ - {file = "grpcio-1.71.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:c200cb6f2393468142eb50ab19613229dcc7829b5ccee8b658a36005f6669fdd"}, - {file = "grpcio-1.71.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:b2266862c5ad664a380fbbcdbdb8289d71464c42a8c29053820ee78ba0119e5d"}, - {file = "grpcio-1.71.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:0ab8b2864396663a5b0b0d6d79495657ae85fa37dcb6498a2669d067c65c11ea"}, - {file = "grpcio-1.71.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c30f393f9d5ff00a71bb56de4aa75b8fe91b161aeb61d39528db6b768d7eac69"}, - {file = "grpcio-1.71.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f250ff44843d9a0615e350c77f890082102a0318d66a99540f54769c8766ab73"}, - {file = "grpcio-1.71.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e6d8de076528f7c43a2f576bc311799f89d795aa6c9b637377cc2b1616473804"}, - {file = "grpcio-1.71.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9b91879d6da1605811ebc60d21ab6a7e4bae6c35f6b63a061d61eb818c8168f6"}, - {file = "grpcio-1.71.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f71574afdf944e6652203cd1badcda195b2a27d9c83e6d88dc1ce3cfb73b31a5"}, - {file = "grpcio-1.71.0-cp310-cp310-win32.whl", hash = "sha256:8997d6785e93308f277884ee6899ba63baafa0dfb4729748200fcc537858a509"}, - {file = "grpcio-1.71.0-cp310-cp310-win_amd64.whl", hash = "sha256:7d6ac9481d9d0d129224f6d5934d5832c4b1cddb96b59e7eba8416868909786a"}, - {file = "grpcio-1.71.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:d6aa986318c36508dc1d5001a3ff169a15b99b9f96ef5e98e13522c506b37eef"}, - {file = "grpcio-1.71.0-cp311-cp311-macosx_10_14_universal2.whl", hash = "sha256:d2c170247315f2d7e5798a22358e982ad6eeb68fa20cf7a820bb74c11f0736e7"}, - {file = "grpcio-1.71.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:e6f83a583ed0a5b08c5bc7a3fe860bb3c2eac1f03f1f63e0bc2091325605d2b7"}, - {file = "grpcio-1.71.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4be74ddeeb92cc87190e0e376dbc8fc7736dbb6d3d454f2fa1f5be1dee26b9d7"}, - {file = "grpcio-1.71.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4dd0dfbe4d5eb1fcfec9490ca13f82b089a309dc3678e2edabc144051270a66e"}, - {file = "grpcio-1.71.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a2242d6950dc892afdf9e951ed7ff89473aaf744b7d5727ad56bdaace363722b"}, - {file = "grpcio-1.71.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0fa05ee31a20456b13ae49ad2e5d585265f71dd19fbd9ef983c28f926d45d0a7"}, - {file = "grpcio-1.71.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3d081e859fb1ebe176de33fc3adb26c7d46b8812f906042705346b314bde32c3"}, - {file = "grpcio-1.71.0-cp311-cp311-win32.whl", hash = "sha256:d6de81c9c00c8a23047136b11794b3584cdc1460ed7cbc10eada50614baa1444"}, - {file = "grpcio-1.71.0-cp311-cp311-win_amd64.whl", hash = "sha256:24e867651fc67717b6f896d5f0cac0ec863a8b5fb7d6441c2ab428f52c651c6b"}, - {file = "grpcio-1.71.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:0ff35c8d807c1c7531d3002be03221ff9ae15712b53ab46e2a0b4bb271f38537"}, - {file = "grpcio-1.71.0-cp312-cp312-macosx_10_14_universal2.whl", hash = "sha256:b78a99cd1ece4be92ab7c07765a0b038194ded2e0a26fd654591ee136088d8d7"}, - {file = "grpcio-1.71.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:dc1a1231ed23caac1de9f943d031f1bc38d0f69d2a3b243ea0d664fc1fbd7fec"}, - {file = "grpcio-1.71.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6beeea5566092c5e3c4896c6d1d307fb46b1d4bdf3e70c8340b190a69198594"}, - {file = "grpcio-1.71.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5170929109450a2c031cfe87d6716f2fae39695ad5335d9106ae88cc32dc84c"}, - {file = "grpcio-1.71.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:5b08d03ace7aca7b2fadd4baf291139b4a5f058805a8327bfe9aece7253b6d67"}, - {file = "grpcio-1.71.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:f903017db76bf9cc2b2d8bdd37bf04b505bbccad6be8a81e1542206875d0e9db"}, - {file = "grpcio-1.71.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:469f42a0b410883185eab4689060a20488a1a0a00f8bbb3cbc1061197b4c5a79"}, - {file = "grpcio-1.71.0-cp312-cp312-win32.whl", hash = "sha256:ad9f30838550695b5eb302add33f21f7301b882937460dd24f24b3cc5a95067a"}, - {file = "grpcio-1.71.0-cp312-cp312-win_amd64.whl", hash = "sha256:652350609332de6dac4ece254e5d7e1ff834e203d6afb769601f286886f6f3a8"}, - {file = "grpcio-1.71.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:cebc1b34ba40a312ab480ccdb396ff3c529377a2fce72c45a741f7215bfe8379"}, - {file = "grpcio-1.71.0-cp313-cp313-macosx_10_14_universal2.whl", hash = "sha256:85da336e3649a3d2171e82f696b5cad2c6231fdd5bad52616476235681bee5b3"}, - {file = "grpcio-1.71.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:f9a412f55bb6e8f3bb000e020dbc1e709627dcb3a56f6431fa7076b4c1aab0db"}, - {file = "grpcio-1.71.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47be9584729534660416f6d2a3108aaeac1122f6b5bdbf9fd823e11fe6fbaa29"}, - {file = "grpcio-1.71.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c9c80ac6091c916db81131d50926a93ab162a7e97e4428ffc186b6e80d6dda4"}, - {file = "grpcio-1.71.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:789d5e2a3a15419374b7b45cd680b1e83bbc1e52b9086e49308e2c0b5bbae6e3"}, - {file = "grpcio-1.71.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:1be857615e26a86d7363e8a163fade914595c81fec962b3d514a4b1e8760467b"}, - {file = "grpcio-1.71.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:a76d39b5fafd79ed604c4be0a869ec3581a172a707e2a8d7a4858cb05a5a7637"}, - {file = "grpcio-1.71.0-cp313-cp313-win32.whl", hash = "sha256:74258dce215cb1995083daa17b379a1a5a87d275387b7ffe137f1d5131e2cfbb"}, - {file = "grpcio-1.71.0-cp313-cp313-win_amd64.whl", hash = "sha256:22c3bc8d488c039a199f7a003a38cb7635db6656fa96437a8accde8322ce2366"}, - {file = "grpcio-1.71.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:c6a0a28450c16809f94e0b5bfe52cabff63e7e4b97b44123ebf77f448534d07d"}, - {file = "grpcio-1.71.0-cp39-cp39-macosx_10_14_universal2.whl", hash = "sha256:a371e6b6a5379d3692cc4ea1cb92754d2a47bdddeee755d3203d1f84ae08e03e"}, - {file = "grpcio-1.71.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:39983a9245d37394fd59de71e88c4b295eb510a3555e0a847d9965088cdbd033"}, - {file = "grpcio-1.71.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9182e0063112e55e74ee7584769ec5a0b4f18252c35787f48738627e23a62b97"}, - {file = "grpcio-1.71.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693bc706c031aeb848849b9d1c6b63ae6bcc64057984bb91a542332b75aa4c3d"}, - {file = "grpcio-1.71.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:20e8f653abd5ec606be69540f57289274c9ca503ed38388481e98fa396ed0b41"}, - {file = "grpcio-1.71.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8700a2a57771cc43ea295296330daaddc0d93c088f0a35cc969292b6db959bf3"}, - {file = "grpcio-1.71.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d35a95f05a8a2cbe8e02be137740138b3b2ea5f80bd004444e4f9a1ffc511e32"}, - {file = "grpcio-1.71.0-cp39-cp39-win32.whl", hash = "sha256:f9c30c464cb2ddfbc2ddf9400287701270fdc0f14be5f08a1e3939f1e749b455"}, - {file = "grpcio-1.71.0-cp39-cp39-win_amd64.whl", hash = "sha256:63e41b91032f298b3e973b3fa4093cbbc620c875e2da7b93e249d4728b54559a"}, - {file = "grpcio-1.71.0.tar.gz", hash = "sha256:2b85f7820475ad3edec209d3d89a7909ada16caab05d3f2e08a7e8ae3200a55c"}, -] - -[package.extras] -protobuf = ["grpcio-tools (>=1.71.0)"] - [[package]] name = "grpcio-status" version = "1.70.0" description = "Status proto mapping for gRPC" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "grpcio_status-1.70.0-py3-none-any.whl", hash = "sha256:fc5a2ae2b9b1c1969cc49f3262676e6854aa2398ec69cb5bd6c47cd501904a85"}, {file = "grpcio_status-1.70.0.tar.gz", hash = "sha256:0e7b42816512433b18b9d764285ff029bde059e9d41f8fe10a60631bd8348101"}, @@ -1213,31 +1215,17 @@ googleapis-common-protos = ">=1.5.5" grpcio = ">=1.70.0" protobuf = ">=5.26.1,<6.0dev" -[[package]] -name = "grpcio-status" -version = "1.71.0" -description = "Status proto mapping for gRPC" -optional = true -python-versions = ">=3.9" -files = [ - {file = "grpcio_status-1.71.0-py3-none-any.whl", hash = "sha256:843934ef8c09e3e858952887467f8256aac3910c55f077a359a65b2b3cde3e68"}, - {file = "grpcio_status-1.71.0.tar.gz", hash = "sha256:11405fed67b68f406b3f3c7c5ae5104a79d2d309666d10d61b152e91d28fb968"}, -] - -[package.dependencies] -googleapis-common-protos = ">=1.5.5" -grpcio = ">=1.71.0" -protobuf = ">=5.26.1,<6.0dev" - [[package]] name = "gunicorn" -version = "22.0.0" +version = "23.0.0" description = "WSGI HTTP Server for UNIX" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ - {file = "gunicorn-22.0.0-py3-none-any.whl", hash = "sha256:350679f91b24062c86e386e198a15438d53a7a8207235a78ba1b53df4c4378d9"}, - {file = "gunicorn-22.0.0.tar.gz", hash = "sha256:4a0b436239ff76fb33f11c07a16482c521a7e09c1ce3cc293c2330afe01bec63"}, + {file = "gunicorn-23.0.0-py3-none-any.whl", hash = "sha256:ec400d38950de4dfd418cff8328b2c8faed0edb0d517d3394e457c317908ca4d"}, + {file = "gunicorn-23.0.0.tar.gz", hash = "sha256:f014447a0101dc57e294f6c18ca6b40227a4c90e9bdb586042628030cba004ec"}, ] [package.dependencies] @@ -1256,6 +1244,7 @@ version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, @@ -1267,6 +1256,7 @@ version = "1.0.7" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, @@ -1288,6 +1278,7 @@ version = "0.28.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, @@ -1312,6 +1303,7 @@ version = "0.29.3" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" +groups = ["main"] files = [ {file = "huggingface_hub-0.29.3-py3-none-any.whl", hash = "sha256:0b25710932ac649c08cdbefa6c6ccb8e88eef82927cacdb048efb726429453aa"}, {file = "huggingface_hub-0.29.3.tar.gz", hash = "sha256:64519a25716e0ba382ba2d3fb3ca082e7c7eb4a2fc634d200e8380006e0760e5"}, @@ -1346,6 +1338,7 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -1360,6 +1353,7 @@ version = "8.5.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, @@ -1383,6 +1377,8 @@ version = "6.4.5" description = "Read resources from Python packages" optional = false python-versions = ">=3.8" +groups = ["main"] +markers = "python_version < \"3.9\"" files = [ {file = "importlib_resources-6.4.5-py3-none-any.whl", hash = "sha256:ac29d5f956f01d5e4bb63102a5a19957f1b9175e45649977264a1416783bb717"}, {file = "importlib_resources-6.4.5.tar.gz", hash = "sha256:980862a1d16c9e147a59603677fa2aa5fd82b87f223b6cb870695bcfce830065"}, @@ -1405,6 +1401,7 @@ version = "2.0.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, @@ -1416,6 +1413,8 @@ version = "0.7.2" description = "An ISO 8601 date/time/duration parser and formatter" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "isodate-0.7.2-py3-none-any.whl", hash = "sha256:28009937d8031054830160fce6d409ed342816b543597cece116d966c6d99e15"}, {file = "isodate-0.7.2.tar.gz", hash = "sha256:4cd1aa0f43ca76f4a6c6c0292a85f40b35ec2e43e315b59f06e6d32171a953e6"}, @@ -1427,6 +1426,7 @@ version = "3.1.6" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, @@ -1444,6 +1444,7 @@ version = "0.9.0" description = "Fast iterable JSON parser." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "jiter-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:816ec9b60fdfd1fec87da1d7ed46c66c44ffec37ab2ef7de5b147b2fce3fd5ad"}, {file = "jiter-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b1d3086f8a3ee0194ecf2008cf81286a5c3e540d977fa038ff23576c023c0ea"}, @@ -1529,6 +1530,8 @@ version = "1.0.1" description = "JSON Matching Expressions" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, @@ -1540,6 +1543,7 @@ version = "4.23.0" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, @@ -1563,6 +1567,7 @@ version = "2023.12.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"}, {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"}, @@ -1578,6 +1583,7 @@ version = "2.1.5" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, @@ -1647,6 +1653,7 @@ version = "0.7.0" description = "McCabe checker, plugin for flake8" optional = false python-versions = ">=3.6" +groups = ["dev"] files = [ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, @@ -1658,6 +1665,8 @@ version = "1.32.0" description = "The Microsoft Authentication Library (MSAL) for Python library enables your app to access the Microsoft Cloud by supporting authentication of users with Microsoft Azure Active Directory accounts (AAD) and Microsoft Accounts (MSA) using industry standard OAuth2 and OpenID Connect." optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "msal-1.32.0-py3-none-any.whl", hash = "sha256:9dbac5384a10bbbf4dae5c7ea0d707d14e087b92c5aa4954b3feaa2d1aa0bcb7"}, {file = "msal-1.32.0.tar.gz", hash = "sha256:5445fe3af1da6be484991a7ab32eaa82461dc2347de105b76af92c610c3335c2"}, @@ -1677,6 +1686,8 @@ version = "1.3.0" description = "Microsoft Authentication Library extensions (MSAL EX) provides a persistence API that can save your data on disk, encrypted on Windows, macOS and Linux. Concurrent data access will be coordinated by a file lock mechanism." optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "msal_extensions-1.3.0-py3-none-any.whl", hash = "sha256:105328ddcbdd342016c9949d8f89e3917554740c8ab26669c0fa0e069e730a0e"}, {file = "msal_extensions-1.3.0.tar.gz", hash = "sha256:96918996642b38c78cd59b55efa0f06fd1373c90e0949be8615697c048fba62c"}, @@ -1694,6 +1705,7 @@ version = "6.1.0" description = "multidict implementation" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, @@ -1798,6 +1810,7 @@ version = "1.14.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, @@ -1857,6 +1870,7 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" +groups = ["dev"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -1868,6 +1882,8 @@ version = "1.9.1" description = "Node.js virtual environment builder" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, @@ -1879,6 +1895,8 @@ version = "3.2.2" description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" optional = true python-versions = ">=3.6" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, @@ -1895,6 +1913,7 @@ version = "1.66.3" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "openai-1.66.3-py3-none-any.whl", hash = "sha256:a427c920f727711877ab17c11b95f1230b27767ba7a01e5b66102945141ceca9"}, {file = "openai-1.66.3.tar.gz", hash = "sha256:8dde3aebe2d081258d4159c4cb27bdc13b5bb3f7ea2201d9bd940b9a89faf0c9"}, @@ -1920,6 +1939,8 @@ version = "3.10.15" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "orjson-3.10.15-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:552c883d03ad185f720d0c09583ebde257e41b9521b74ff40e08b7dec4559c04"}, {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:616e3e8d438d02e4854f70bfdc03a6bcdb697358dbaa6bcd19cbe24d24ece1f8"}, @@ -2008,6 +2029,7 @@ version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, @@ -2019,6 +2041,7 @@ version = "0.12.1" description = "Utility library for gitignore style pattern matching of file paths." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, @@ -2030,6 +2053,8 @@ version = "1.3.10" description = "Resolve a name to an object." optional = false python-versions = ">=3.6" +groups = ["main"] +markers = "python_version < \"3.9\"" files = [ {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, @@ -2041,6 +2066,7 @@ version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, @@ -2057,6 +2083,7 @@ version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, @@ -2072,6 +2099,8 @@ version = "0.11.0" description = "Prisma Client Python is an auto-generated and fully type-safe database client" optional = true python-versions = ">=3.7.0" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "prisma-0.11.0-py3-none-any.whl", hash = "sha256:22bb869e59a2968b99f3483bb417717273ffbc569fd1e9ceed95e5614cbaf53a"}, {file = "prisma-0.11.0.tar.gz", hash = "sha256:3f2f2fd2361e1ec5ff655f2a04c7860c2f2a5bc4c91f78ca9c5c6349735bf693"}, @@ -2097,6 +2126,7 @@ version = "0.2.0" description = "Accelerated property cache" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "propcache-0.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c5869b8fd70b81835a6f187c5fdbe67917a04d7e52b6e7cc4e5fe39d55c39d58"}, {file = "propcache-0.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:952e0d9d07609d9c5be361f33b0d6d650cd2bae393aabb11d9b719364521984b"}, @@ -2204,6 +2234,8 @@ version = "1.26.1" description = "Beautiful, Pythonic protocol buffers" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "proto_plus-1.26.1-py3-none-any.whl", hash = "sha256:13285478c2dcf2abb829db158e1047e2f1e8d63a077d94263c2b88b043c75a66"}, {file = "proto_plus-1.26.1.tar.gz", hash = "sha256:21a515a4c4c0088a773899e23c7bbade3d18f9c66c73edd4c7ee3816bc96a012"}, @@ -2221,6 +2253,8 @@ version = "5.29.3" description = "" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "protobuf-5.29.3-cp310-abi3-win32.whl", hash = "sha256:3ea51771449e1035f26069c4c7fd51fba990d07bc55ba80701c78f886bf9c888"}, {file = "protobuf-5.29.3-cp310-abi3-win_amd64.whl", hash = "sha256:a4fa6f80816a9a0678429e84973f2f98cbc218cca434abe8db2ad0bffc98503a"}, @@ -2241,6 +2275,8 @@ version = "0.6.1" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, @@ -2252,6 +2288,8 @@ version = "0.4.1" description = "A collection of ASN.1-based protocols modules" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, @@ -2266,6 +2304,7 @@ version = "2.11.1" description = "Python style guide checker" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pycodestyle-2.11.1-py2.py3-none-any.whl", hash = "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67"}, {file = "pycodestyle-2.11.1.tar.gz", hash = "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f"}, @@ -2277,6 +2316,8 @@ version = "2.22" description = "C parser in Python" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"proxy\" or extra == \"extra-proxy\" and platform_python_implementation != \"PyPy\"" files = [ {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, @@ -2288,6 +2329,7 @@ version = "2.10.6" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, @@ -2309,6 +2351,7 @@ version = "2.27.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, @@ -2421,6 +2464,7 @@ version = "3.1.0" description = "passive checker of Python programs" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pyflakes-3.1.0-py2.py3-none-any.whl", hash = "sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774"}, {file = "pyflakes-3.1.0.tar.gz", hash = "sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc"}, @@ -2432,6 +2476,8 @@ version = "2.9.0" description = "JSON Web Token implementation in Python" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"proxy\" or extra == \"extra-proxy\"" files = [ {file = "PyJWT-2.9.0-py3-none-any.whl", hash = "sha256:3b02fb0f44517787776cf48f2ae25d8e14f300e6d7545a4315cee571a415e850"}, {file = "pyjwt-2.9.0.tar.gz", hash = "sha256:7e1e5b56cc735432a7369cbfa0efe50fa113ebecdc04ae6922deba8b84582d0c"}, @@ -2452,6 +2498,8 @@ version = "1.5.0" description = "Python binding to the Networking and Cryptography (NaCl) library" optional = true python-versions = ">=3.6" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"}, {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"}, @@ -2478,6 +2526,7 @@ version = "7.4.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, @@ -2500,6 +2549,7 @@ version = "3.14.0" description = "Thin-wrapper around the mock package for easier use with pytest" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"}, {file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"}, @@ -2517,6 +2567,8 @@ version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -2531,6 +2583,7 @@ version = "1.0.1" description = "Read key-value pairs from a .env file and set them as environment variables" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, @@ -2545,6 +2598,8 @@ version = "0.0.18" description = "A streaming multipart parser for Python" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "python_multipart-0.0.18-py3-none-any.whl", hash = "sha256:efe91480f485f6a361427a541db4796f9e1591afc0fb8e7a4ba06bfbc6708996"}, {file = "python_multipart-0.0.18.tar.gz", hash = "sha256:7a68db60c8bfb82e460637fa4750727b45af1d5e2ed215593f917f64694d34fe"}, @@ -2556,6 +2611,7 @@ version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -2618,6 +2674,8 @@ version = "5.2.1" description = "Python client for Redis database and key-value store" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "redis-5.2.1-py3-none-any.whl", hash = "sha256:ee7e1056b9aea0f04c6c2ed59452947f34c4940ee025f5dd83e6a6418b6989e4"}, {file = "redis-5.2.1.tar.gz", hash = "sha256:16f2e22dff21d5125e8481515e386711a34cbec50f0e44413dd7d9c060a54e0f"}, @@ -2636,6 +2694,7 @@ version = "0.35.1" description = "JSON Referencing + Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"}, {file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"}, @@ -2651,6 +2710,7 @@ version = "2024.11.6" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, @@ -2754,6 +2814,7 @@ version = "2.31.0" description = "Python HTTP for Humans." optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, @@ -2775,6 +2836,8 @@ version = "0.8.0" description = "Resend Python SDK" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "resend-0.8.0-py2.py3-none-any.whl", hash = "sha256:adc1515dadf4f4fc6b90db55a237f0f37fc56fd74287a986519a8a187fdb661d"}, {file = "resend-0.8.0.tar.gz", hash = "sha256:94142394701724dbcfcd8f760f675c662a1025013e741dd7cc773ca885526257"}, @@ -2789,6 +2852,7 @@ version = "0.20.1" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "rpds_py-0.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a649dfd735fff086e8a9d0503a9f0c7d01b7912a333c7ae77e1515c08c146dad"}, {file = "rpds_py-0.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f16bc1334853e91ddaaa1217045dd7be166170beec337576818461268a3de67f"}, @@ -2901,6 +2965,8 @@ version = "2.1.0" description = "RQ is a simple, lightweight, library for creating background jobs, and processing them." optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "rq-2.1.0-py3-none-any.whl", hash = "sha256:3c6892c6ca848e5fb47c1875399a66f13656bf0e123bf725d9aa9a12718e2fdf"}, {file = "rq-2.1.0.tar.gz", hash = "sha256:764585b6cab69ef1412f4aee523347e5aa7ece3ca175c118b1d92223dd8c2826"}, @@ -2916,6 +2982,8 @@ version = "4.9" description = "Pure-Python RSA implementation" optional = true python-versions = ">=3.6,<4" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, @@ -2930,6 +2998,8 @@ version = "0.10.4" description = "An Amazon S3 Transfer Manager" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "s3transfer-0.10.4-py3-none-any.whl", hash = "sha256:244a76a24355363a68164241438de1b72f8781664920260c48465896b712a41e"}, {file = "s3transfer-0.10.4.tar.gz", hash = "sha256:29edc09801743c21eb5ecbc617a152df41d3c287f67b615f73e5f750583666a7"}, @@ -2947,6 +3017,8 @@ version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] +markers = "extra == \"extra-proxy\" or extra == \"proxy\"" files = [ {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, @@ -2958,6 +3030,7 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, @@ -2969,6 +3042,8 @@ version = "0.44.0" description = "The little ASGI library that shines." optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "starlette-0.44.0-py3-none-any.whl", hash = "sha256:19edeb75844c16dcd4f9dd72f22f9108c1539f3fc9c4c88885654fef64f85aea"}, {file = "starlette-0.44.0.tar.gz", hash = "sha256:e35166950a3ccccc701962fe0711db0bc14f2ecd37c6f9fe5e3eae0cbaea8715"}, @@ -2987,6 +3062,7 @@ version = "0.7.0" description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "tiktoken-0.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485f3cc6aba7c6b6ce388ba634fbba656d9ee27f766216f45146beb4ac18b25f"}, {file = "tiktoken-0.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e54be9a2cd2f6d6ffa3517b064983fb695c9a9d8aa7d574d1ef3c3f931a99225"}, @@ -3039,6 +3115,7 @@ version = "0.21.0" description = "" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "tokenizers-0.21.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:3c4c93eae637e7d2aaae3d376f06085164e1660f89304c0ab2b1d08a406636b2"}, {file = "tokenizers-0.21.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:f53ea537c925422a2e0e92a24cce96f6bc5046bbef24a1652a5edc8ba975f62e"}, @@ -3071,6 +3148,8 @@ version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, @@ -3112,6 +3191,8 @@ version = "0.13.2" description = "Style preserving TOML library" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"extra-proxy\"" files = [ {file = "tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde"}, {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, @@ -3123,6 +3204,7 @@ version = "4.67.1" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, @@ -3144,6 +3226,7 @@ version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, @@ -3155,6 +3238,8 @@ version = "2025.1" description = "Provider of IANA time zone data" optional = true python-versions = ">=2" +groups = ["main"] +markers = "extra == \"proxy\" and platform_system == \"Windows\"" files = [ {file = "tzdata-2025.1-py2.py3-none-any.whl", hash = "sha256:7e127113816800496f027041c570f50bcd464a020098a3b6b199517772303639"}, {file = "tzdata-2025.1.tar.gz", hash = "sha256:24894909e88cdb28bd1636c6887801df64cb485bd593f2fd83ef29075a81d694"}, @@ -3166,6 +3251,8 @@ version = "5.2" description = "tzinfo object for the local timezone" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "tzlocal-5.2-py3-none-any.whl", hash = "sha256:49816ef2fe65ea8ac19d19aa7a1ae0551c834303d5014c6d5a62e4cbda8047b8"}, {file = "tzlocal-5.2.tar.gz", hash = "sha256:8d399205578f1a9342816409cc1e46a93ebd5755e39ea2d85334bea911bf0e6e"}, @@ -3184,6 +3271,8 @@ version = "1.26.20" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +groups = ["main"] +markers = "python_version < \"3.10\"" files = [ {file = "urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e"}, {file = "urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32"}, @@ -3200,6 +3289,8 @@ version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" +groups = ["main"] +markers = "python_version >= \"3.10\"" files = [ {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, @@ -3217,6 +3308,8 @@ version = "0.29.0" description = "The lightning-fast ASGI server." optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "uvicorn-0.29.0-py3-none-any.whl", hash = "sha256:2c2aac7ff4f4365c206fd773a39bf4ebd1047c238f8b8268ad996829323473de"}, {file = "uvicorn-0.29.0.tar.gz", hash = "sha256:6a69214c0b6a087462412670b3ef21224fa48cae0e452b5883e8e8bdfdd11dd0"}, @@ -3236,6 +3329,8 @@ version = "0.21.0" description = "Fast implementation of asyncio event loop on top of libuv" optional = true python-versions = ">=3.8.0" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ec7e6b09a6fdded42403182ab6b832b71f4edaf7f37a9a0e371a01db5f0cb45f"}, {file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:196274f2adb9689a289ad7d65700d37df0c0930fd8e4e743fa4834e850d7719d"}, @@ -3287,6 +3382,8 @@ version = "13.1" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"proxy\"" files = [ {file = "websockets-13.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f48c749857f8fb598fb890a75f540e3221d0976ed0bf879cf3c7eef34151acee"}, {file = "websockets-13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c7e72ce6bda6fb9409cc1e8164dd41d7c91466fb599eb047cfda72fe758a34a7"}, @@ -3382,6 +3479,7 @@ version = "1.15.2" description = "Yet another URL library" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "yarl-1.15.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e4ee8b8639070ff246ad3649294336b06db37a94bdea0d09ea491603e0be73b8"}, {file = "yarl-1.15.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a7cf963a357c5f00cb55b1955df8bbe68d2f2f65de065160a1c26b85a1e44172"}, @@ -3494,6 +3592,7 @@ version = "3.20.2" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"}, {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"}, @@ -3512,6 +3611,6 @@ extra-proxy = ["azure-identity", "azure-keyvault-secrets", "google-cloud-kms", " proxy = ["PyJWT", "apscheduler", "backoff", "boto3", "cryptography", "fastapi", "fastapi-sso", "gunicorn", "orjson", "pynacl", "python-multipart", "pyyaml", "rq", "uvicorn", "uvloop", "websockets"] [metadata] -lock-version = "2.0" +lock-version = "2.1" python-versions = ">=3.8.1,<4.0, !=3.9.7" -content-hash = "b9daad0a009079f7bf9c520525e2f9d0ea7ade51a1c598b88e23d6d590ef44be" +content-hash = "55078af47c1af79bd3ebadacb7ba92844d550a577bb0c49f5096693701ea4322" diff --git a/pyproject.toml b/pyproject.toml index 000adb98f6..208804c562 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,7 +34,7 @@ jsonschema = "^4.22.0" uvicorn = {version = "^0.29.0", optional = true} uvloop = {version = "^0.21.0", optional = true} -gunicorn = {version = "^22.0.0", optional = true} +gunicorn = {version = "^23.0.0", optional = true} fastapi = {version = "^0.115.5", optional = true} backoff = {version = "*", optional = true} pyyaml = {version = "^6.0.1", optional = true} From d73db6fd4a6d4fc8d7adc52989d26ff14d310a84 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 21 Mar 2025 22:02:39 -0700 Subject: [PATCH 085/119] test: fix test --- tests/proxy_unit_tests/test_user_api_key_auth.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tests/proxy_unit_tests/test_user_api_key_auth.py b/tests/proxy_unit_tests/test_user_api_key_auth.py index 119457f0c2..f0ca27c946 100644 --- a/tests/proxy_unit_tests/test_user_api_key_auth.py +++ b/tests/proxy_unit_tests/test_user_api_key_auth.py @@ -332,7 +332,7 @@ async def test_auth_with_allowed_routes(route, should_raise_error): ], ) def test_is_ui_route_allowed(route, user_role, expected_result): - from litellm.proxy.auth.user_api_key_auth import _is_ui_route + from litellm.proxy.auth.auth_checks import _is_ui_route from litellm.proxy._types import LiteLLM_UserTable user_obj = LiteLLM_UserTable( @@ -370,7 +370,7 @@ def test_is_ui_route_allowed(route, user_role, expected_result): ], ) def test_is_api_route_allowed(route, user_role, expected_result): - from litellm.proxy.auth.user_api_key_auth import _is_api_route_allowed + from litellm.proxy.auth.auth_checks import _is_api_route_allowed from litellm.proxy._types import LiteLLM_UserTable user_obj = LiteLLM_UserTable( @@ -638,7 +638,7 @@ async def test_soft_budget_alert(): def test_is_allowed_route(): - from litellm.proxy.auth.user_api_key_auth import _is_allowed_route + from litellm.proxy.auth.auth_checks import _is_allowed_route from litellm.proxy._types import UserAPIKeyAuth import datetime @@ -649,7 +649,6 @@ def test_is_allowed_route(): "token_type": "api", "request": request, "request_data": {"input": ["hello world"], "model": "embedding-small"}, - "api_key": "9644159bc181998825c44c788b1526341ed2e825d1b6f562e23173759e14bb86", "valid_token": UserAPIKeyAuth( token="9644159bc181998825c44c788b1526341ed2e825d1b6f562e23173759e14bb86", key_name="sk-...CJjQ", @@ -737,7 +736,7 @@ def test_is_allowed_route(): ], ) def test_is_user_proxy_admin(user_obj, expected_result): - from litellm.proxy.auth.user_api_key_auth import _is_user_proxy_admin + from litellm.proxy.auth.auth_checks import _is_user_proxy_admin assert _is_user_proxy_admin(user_obj) == expected_result From 1d58407e9387ab206d1aeeb1fe6ee2403b40002b Mon Sep 17 00:00:00 2001 From: Luis Date: Sat, 22 Mar 2025 08:47:33 -0400 Subject: [PATCH 086/119] fix typo in predibase.md --- docs/my-website/docs/providers/predibase.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/my-website/docs/providers/predibase.md b/docs/my-website/docs/providers/predibase.md index 31713aef1e..9f25309c19 100644 --- a/docs/my-website/docs/providers/predibase.md +++ b/docs/my-website/docs/providers/predibase.md @@ -230,7 +230,7 @@ response = completion( model="predibase/llama-3-8b-instruct", messages = [{ "content": "Hello, how are you?","role": "user"}], adapter_id="my_repo/3", - adapter_soruce="pbase", + adapter_source="pbase", ) ``` From e9c6f5b2df027d290577bfd44b8c3aebaca8e052 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 10:24:08 -0700 Subject: [PATCH 087/119] add search_context_cost_per_1k_calls --- ...odel_prices_and_context_window_backup.json | 45 +++++++++++++++++-- model_prices_and_context_window.json | 45 +++++++++++++++++-- 2 files changed, 84 insertions(+), 6 deletions(-) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index c5e0329013..8733730946 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -74,7 +74,36 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true + "supports_tool_choice": true, + "search_context_cost_per_1k_calls": { + "low_context": 30.00, + "medium_context": 35.00, + "high_context": 50.00 + } + }, + "gpt-4o-search-preview": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.0000025, + "output_cost_per_token": 0.000010, + "input_cost_per_token_batches": 0.00000125, + "output_cost_per_token_batches": 0.00000500, + "cache_read_input_token_cost": 0.00000125, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "search_context_cost_per_1k_calls": { + "low_context": 30.00, + "medium_context": 35.00, + "high_context": 50.00 + } }, "gpt-4.5-preview": { "max_tokens": 16384, @@ -199,7 +228,12 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true + "supports_tool_choice": true, + "search_context_cost_per_1k_calls": { + "low_context": 30.00, + "medium_context": 35.00, + "high_context": 50.00 + } }, "gpt-4o-mini-2024-07-18": { "max_tokens": 16384, @@ -218,7 +252,12 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true + "supports_tool_choice": true, + "search_context_cost_per_1k_calls": { + "low_context": 30.00, + "medium_context": 35.00, + "high_context": 50.00 + } }, "o1-pro": { "max_tokens": 100000, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index c5e0329013..8733730946 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -74,7 +74,36 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true + "supports_tool_choice": true, + "search_context_cost_per_1k_calls": { + "low_context": 30.00, + "medium_context": 35.00, + "high_context": 50.00 + } + }, + "gpt-4o-search-preview": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.0000025, + "output_cost_per_token": 0.000010, + "input_cost_per_token_batches": 0.00000125, + "output_cost_per_token_batches": 0.00000500, + "cache_read_input_token_cost": 0.00000125, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "search_context_cost_per_1k_calls": { + "low_context": 30.00, + "medium_context": 35.00, + "high_context": 50.00 + } }, "gpt-4.5-preview": { "max_tokens": 16384, @@ -199,7 +228,12 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true + "supports_tool_choice": true, + "search_context_cost_per_1k_calls": { + "low_context": 30.00, + "medium_context": 35.00, + "high_context": 50.00 + } }, "gpt-4o-mini-2024-07-18": { "max_tokens": 16384, @@ -218,7 +252,12 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true + "supports_tool_choice": true, + "search_context_cost_per_1k_calls": { + "low_context": 30.00, + "medium_context": 35.00, + "high_context": 50.00 + } }, "o1-pro": { "max_tokens": 100000, From e99a22b9ac73aa3fd21ecf4d68a9947002d92c41 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 22 Mar 2025 10:42:46 -0700 Subject: [PATCH 088/119] refactor(llm_passthrough_endpoints.py): refactor vertex passthrough to use common llm passthrough handler.py --- litellm/llms/vertex_ai/common_utils.py | 60 +++ .../pass_through_endpoints/common_utils.py | 16 + .../llm_passthrough_endpoints.py | 133 ++++++ .../passthrough_endpoint_router.py | 36 ++ .../vertex_ai_endpoints/vertex_endpoints.py | 449 +++++++++--------- .../vertex_passthrough_router.py | 199 ++++---- .../test_llm_pass_through_endpoints.py | 72 +++ .../test_unit_test_vertex_pass_through.py | 23 + 8 files changed, 650 insertions(+), 338 deletions(-) create mode 100644 litellm/proxy/pass_through_endpoints/common_utils.py diff --git a/litellm/llms/vertex_ai/common_utils.py b/litellm/llms/vertex_ai/common_utils.py index f7149c349a..4a4c428941 100644 --- a/litellm/llms/vertex_ai/common_utils.py +++ b/litellm/llms/vertex_ai/common_utils.py @@ -1,3 +1,4 @@ +import re from typing import Dict, List, Literal, Optional, Tuple, Union import httpx @@ -280,3 +281,62 @@ def _convert_vertex_datetime_to_openai_datetime(vertex_datetime: str) -> int: dt = datetime.strptime(vertex_datetime, "%Y-%m-%dT%H:%M:%S.%fZ") # Convert to Unix timestamp (seconds since epoch) return int(dt.timestamp()) + + +def get_vertex_project_id_from_url(url: str) -> Optional[str]: + """ + Get the vertex project id from the url + + `https://${LOCATION}-aiplatform.googleapis.com/v1/projects/${PROJECT_ID}/locations/${LOCATION}/publishers/google/models/${MODEL_ID}:streamGenerateContent` + """ + match = re.search(r"/projects/([^/]+)", url) + return match.group(1) if match else None + + +def get_vertex_location_from_url(url: str) -> Optional[str]: + """ + Get the vertex location from the url + + `https://${LOCATION}-aiplatform.googleapis.com/v1/projects/${PROJECT_ID}/locations/${LOCATION}/publishers/google/models/${MODEL_ID}:streamGenerateContent` + """ + match = re.search(r"/locations/([^/]+)", url) + return match.group(1) if match else None + + +def construct_target_url( + base_url: str, + requested_route: str, + default_vertex_location: Optional[str], + default_vertex_project: Optional[str], +) -> httpx.URL: + """ + Allow user to specify their own project id / location. + + If missing, use defaults + + Handle cachedContent scenario - https://github.com/BerriAI/litellm/issues/5460 + + Constructed Url: + POST https://LOCATION-aiplatform.googleapis.com/{version}/projects/PROJECT_ID/locations/LOCATION/cachedContents + """ + new_base_url = httpx.URL(base_url) + if "locations" in requested_route: # contains the target project id + location + updated_url = new_base_url.copy_with(path=requested_route) + return updated_url + """ + - Add endpoint version (e.g. v1beta for cachedContent, v1 for rest) + - Add default project id + - Add default location + """ + vertex_version: Literal["v1", "v1beta1"] = "v1" + if "cachedContent" in requested_route: + vertex_version = "v1beta1" + + base_requested_route = "{}/projects/{}/locations/{}".format( + vertex_version, default_vertex_project, default_vertex_location + ) + + updated_requested_route = "/" + base_requested_route + requested_route + + updated_url = new_base_url.copy_with(path=updated_requested_route) + return updated_url diff --git a/litellm/proxy/pass_through_endpoints/common_utils.py b/litellm/proxy/pass_through_endpoints/common_utils.py new file mode 100644 index 0000000000..3a3783dd57 --- /dev/null +++ b/litellm/proxy/pass_through_endpoints/common_utils.py @@ -0,0 +1,16 @@ +from fastapi import Request + + +def get_litellm_virtual_key(request: Request) -> str: + """ + Extract and format API key from request headers. + Prioritizes x-litellm-api-key over Authorization header. + + + Vertex JS SDK uses `Authorization` header, we use `x-litellm-api-key` to pass litellm virtual key + + """ + litellm_api_key = request.headers.get("x-litellm-api-key") + if litellm_api_key: + return f"Bearer {litellm_api_key}" + return request.headers.get("Authorization", "") diff --git a/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py b/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py index 4724c7f9d1..be3a903dcc 100644 --- a/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py +++ b/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py @@ -12,10 +12,13 @@ import httpx from fastapi import APIRouter, Depends, HTTPException, Request, Response import litellm +from litellm._logging import verbose_proxy_logger from litellm.constants import BEDROCK_AGENT_RUNTIME_PASS_THROUGH_ROUTES +from litellm.llms.vertex_ai.vertex_llm_base import VertexBase from litellm.proxy._types import * from litellm.proxy.auth.route_checks import RouteChecks from litellm.proxy.auth.user_api_key_auth import user_api_key_auth +from litellm.proxy.pass_through_endpoints.common_utils import get_litellm_virtual_key from litellm.proxy.pass_through_endpoints.pass_through_endpoints import ( create_pass_through_route, ) @@ -23,6 +26,7 @@ from litellm.secret_managers.main import get_secret_str from .passthrough_endpoint_router import PassthroughEndpointRouter +vertex_llm_base = VertexBase() router = APIRouter() default_vertex_config = None @@ -417,6 +421,135 @@ async def azure_proxy_route( ) +@router.api_route( + "/vertex-ai/{endpoint:path}", + methods=["GET", "POST", "PUT", "DELETE", "PATCH"], + tags=["Vertex AI Pass-through", "pass-through"], + include_in_schema=False, +) +@router.api_route( + "/vertex_ai/{endpoint:path}", + methods=["GET", "POST", "PUT", "DELETE", "PATCH"], + tags=["Vertex AI Pass-through", "pass-through"], +) +async def vertex_proxy_route( + endpoint: str, + request: Request, + fastapi_response: Response, +): + """ + Call LiteLLM proxy via Vertex AI SDK. + + [Docs](https://docs.litellm.ai/docs/pass_through/vertex_ai) + """ + from litellm.llms.vertex_ai.common_utils import ( + construct_target_url, + get_vertex_location_from_url, + get_vertex_project_id_from_url, + ) + + encoded_endpoint = httpx.URL(endpoint).path + verbose_proxy_logger.debug("requested endpoint %s", endpoint) + headers: dict = {} + api_key_to_use = get_litellm_virtual_key(request=request) + user_api_key_dict = await user_api_key_auth( + request=request, + api_key=api_key_to_use, + ) + + vertex_project: Optional[str] = get_vertex_project_id_from_url(endpoint) + vertex_location: Optional[str] = get_vertex_location_from_url(endpoint) + vertex_credentials = passthrough_endpoint_router.get_vertex_credentials( + project_id=vertex_project, + location=vertex_location, + ) + + if vertex_credentials is None: + raise Exception( + f"No matching vertex credentials found, for project_id: {vertex_project}, location: {vertex_location}. No default_vertex_config set either." + ) + + # Use headers from the incoming request if no vertex credentials are found + if vertex_credentials.vertex_project is None: + headers = dict(request.headers) or {} + verbose_proxy_logger.debug( + "default_vertex_config not set, incoming request headers %s", headers + ) + base_target_url = f"https://{vertex_location}-aiplatform.googleapis.com/" + headers.pop("content-length", None) + headers.pop("host", None) + else: + vertex_project = vertex_credentials.vertex_project + vertex_location = vertex_credentials.vertex_location + vertex_credentials_str = vertex_credentials.vertex_credentials + + # Construct base URL for the target endpoint + base_target_url = f"https://{vertex_location}-aiplatform.googleapis.com/" + + _auth_header, vertex_project = await vertex_llm_base._ensure_access_token_async( + credentials=vertex_credentials_str, + project_id=vertex_project, + custom_llm_provider="vertex_ai_beta", + ) + + auth_header, _ = vertex_llm_base._get_token_and_url( + model="", + auth_header=_auth_header, + gemini_api_key=None, + vertex_credentials=vertex_credentials_str, + vertex_project=vertex_project, + vertex_location=vertex_location, + stream=False, + custom_llm_provider="vertex_ai_beta", + api_base="", + ) + + headers = { + "Authorization": f"Bearer {auth_header}", + } + + request_route = encoded_endpoint + verbose_proxy_logger.debug("request_route %s", request_route) + + # Ensure endpoint starts with '/' for proper URL construction + if not encoded_endpoint.startswith("/"): + encoded_endpoint = "/" + encoded_endpoint + + # Construct the full target URL using httpx + updated_url = construct_target_url( + base_url=base_target_url, + requested_route=encoded_endpoint, + default_vertex_location=vertex_location, + default_vertex_project=vertex_project, + ) + # base_url = httpx.URL(base_target_url) + # updated_url = base_url.copy_with(path=encoded_endpoint) + + verbose_proxy_logger.debug("updated url %s", updated_url) + + ## check for streaming + target = str(updated_url) + is_streaming_request = False + if "stream" in str(updated_url): + is_streaming_request = True + target += "?alt=sse" + + ## CREATE PASS-THROUGH + endpoint_func = create_pass_through_route( + endpoint=endpoint, + target=target, + custom_headers=headers, + ) # dynamically construct pass-through endpoint based on incoming path + received_value = await endpoint_func( + request, + fastapi_response, + user_api_key_dict, + stream=is_streaming_request, # type: ignore + ) + + return received_value + + @router.api_route( "/openai/{endpoint:path}", methods=["GET", "POST", "PUT", "DELETE", "PATCH"], diff --git a/litellm/proxy/pass_through_endpoints/passthrough_endpoint_router.py b/litellm/proxy/pass_through_endpoints/passthrough_endpoint_router.py index adf7d0f30c..5267c3b26c 100644 --- a/litellm/proxy/pass_through_endpoints/passthrough_endpoint_router.py +++ b/litellm/proxy/pass_through_endpoints/passthrough_endpoint_router.py @@ -2,6 +2,7 @@ from typing import Dict, Optional from litellm._logging import verbose_logger from litellm.secret_managers.main import get_secret_str +from litellm.types.passthrough_endpoints.vertex_ai import VertexPassThroughCredentials class PassthroughEndpointRouter: @@ -11,6 +12,9 @@ class PassthroughEndpointRouter: def __init__(self): self.credentials: Dict[str, str] = {} + self.deployment_key_to_vertex_credentials: Dict[ + str, VertexPassThroughCredentials + ] = {} def set_pass_through_credentials( self, @@ -62,6 +66,38 @@ class PassthroughEndpointRouter: ) return get_secret_str(_env_variable_name) + def _get_deployment_key( + self, project_id: Optional[str], location: Optional[str] + ) -> Optional[str]: + """ + Get the deployment key for the given project-id, location + """ + if project_id is None or location is None: + return None + return f"{project_id}-{location}" + + def get_vertex_credentials( + self, project_id: Optional[str], location: Optional[str] + ) -> Optional[VertexPassThroughCredentials]: + """ + Get the vertex credentials for the given project-id, location + """ + # from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( + # default_vertex_config, + # ) + default_vertex_config: Optional[VertexPassThroughCredentials] = None + + deployment_key = self._get_deployment_key( + project_id=project_id, + location=location, + ) + if deployment_key is None: + return default_vertex_config + if deployment_key in self.deployment_key_to_vertex_credentials: + return self.deployment_key_to_vertex_credentials[deployment_key] + else: + return default_vertex_config + def _get_credential_name_for_provider( self, custom_llm_provider: str, diff --git a/litellm/proxy/vertex_ai_endpoints/vertex_endpoints.py b/litellm/proxy/vertex_ai_endpoints/vertex_endpoints.py index 7444e3d1c1..4b706ed33a 100644 --- a/litellm/proxy/vertex_ai_endpoints/vertex_endpoints.py +++ b/litellm/proxy/vertex_ai_endpoints/vertex_endpoints.py @@ -1,274 +1,259 @@ -import traceback -from typing import Optional +# import traceback +# from typing import Optional -import httpx -from fastapi import APIRouter, HTTPException, Request, Response, status +# import httpx +# from fastapi import APIRouter, HTTPException, Request, Response, status -import litellm -from litellm._logging import verbose_proxy_logger -from litellm.fine_tuning.main import vertex_fine_tuning_apis_instance -from litellm.proxy._types import * -from litellm.proxy.auth.user_api_key_auth import user_api_key_auth -from litellm.proxy.pass_through_endpoints.pass_through_endpoints import ( - create_pass_through_route, -) -from litellm.secret_managers.main import get_secret_str -from litellm.types.passthrough_endpoints.vertex_ai import * +# import litellm +# from litellm._logging import verbose_proxy_logger +# from litellm.fine_tuning.main import vertex_fine_tuning_apis_instance +# from litellm.proxy._types import * +# from litellm.proxy.auth.user_api_key_auth import user_api_key_auth +# from litellm.proxy.pass_through_endpoints.pass_through_endpoints import ( +# create_pass_through_route, +# ) +# from litellm.secret_managers.main import get_secret_str +# from litellm.types.passthrough_endpoints.vertex_ai import * -from .vertex_passthrough_router import VertexPassThroughRouter +# from .vertex_passthrough_router import VertexPassThroughRouter -router = APIRouter() -vertex_pass_through_router = VertexPassThroughRouter() +# router = APIRouter() +# vertex_pass_through_router = VertexPassThroughRouter() -default_vertex_config: VertexPassThroughCredentials = VertexPassThroughCredentials() +# default_vertex_config: Optional[VertexPassThroughCredentials] = None -def _get_vertex_env_vars() -> VertexPassThroughCredentials: - """ - Helper to get vertex pass through config from environment variables +# def _get_vertex_env_vars() -> VertexPassThroughCredentials: +# """ +# Helper to get vertex pass through config from environment variables - The following environment variables are used: - - DEFAULT_VERTEXAI_PROJECT (project id) - - DEFAULT_VERTEXAI_LOCATION (location) - - DEFAULT_GOOGLE_APPLICATION_CREDENTIALS (path to credentials file) - """ - return VertexPassThroughCredentials( - vertex_project=get_secret_str("DEFAULT_VERTEXAI_PROJECT"), - vertex_location=get_secret_str("DEFAULT_VERTEXAI_LOCATION"), - vertex_credentials=get_secret_str("DEFAULT_GOOGLE_APPLICATION_CREDENTIALS"), - ) +# The following environment variables are used: +# - DEFAULT_VERTEXAI_PROJECT (project id) +# - DEFAULT_VERTEXAI_LOCATION (location) +# - DEFAULT_GOOGLE_APPLICATION_CREDENTIALS (path to credentials file) +# """ +# return VertexPassThroughCredentials( +# vertex_project=get_secret_str("DEFAULT_VERTEXAI_PROJECT"), +# vertex_location=get_secret_str("DEFAULT_VERTEXAI_LOCATION"), +# vertex_credentials=get_secret_str("DEFAULT_GOOGLE_APPLICATION_CREDENTIALS"), +# ) -def set_default_vertex_config(config: Optional[dict] = None): - """Sets vertex configuration from provided config and/or environment variables +# def set_default_vertex_config(config: Optional[dict] = None): +# """Sets vertex configuration from provided config and/or environment variables - Args: - config (Optional[dict]): Configuration dictionary - Example: { - "vertex_project": "my-project-123", - "vertex_location": "us-central1", - "vertex_credentials": "os.environ/GOOGLE_CREDS" - } - """ - global default_vertex_config +# Args: +# config (Optional[dict]): Configuration dictionary +# Example: { +# "vertex_project": "my-project-123", +# "vertex_location": "us-central1", +# "vertex_credentials": "os.environ/GOOGLE_CREDS" +# } +# """ +# global default_vertex_config - # Initialize config dictionary if None - if config is None: - default_vertex_config = _get_vertex_env_vars() - return +# # Initialize config dictionary if None +# if config is None: +# default_vertex_config = _get_vertex_env_vars() +# return - if isinstance(config, dict): - for key, value in config.items(): - if isinstance(value, str) and value.startswith("os.environ/"): - config[key] = litellm.get_secret(value) +# if isinstance(config, dict): +# for key, value in config.items(): +# if isinstance(value, str) and value.startswith("os.environ/"): +# config[key] = litellm.get_secret(value) - _set_default_vertex_config(VertexPassThroughCredentials(**config)) +# _set_default_vertex_config(VertexPassThroughCredentials(**config)) -def _set_default_vertex_config( - vertex_pass_through_credentials: VertexPassThroughCredentials, -): - global default_vertex_config - default_vertex_config = vertex_pass_through_credentials +# def _set_default_vertex_config( +# vertex_pass_through_credentials: VertexPassThroughCredentials, +# ): +# global default_vertex_config +# default_vertex_config = vertex_pass_through_credentials -def exception_handler(e: Exception): - verbose_proxy_logger.error( - "litellm.proxy.proxy_server.v1/projects/tuningJobs(): Exception occurred - {}".format( - str(e) - ) - ) - verbose_proxy_logger.debug(traceback.format_exc()) - if isinstance(e, HTTPException): - return ProxyException( - message=getattr(e, "message", str(e.detail)), - type=getattr(e, "type", "None"), - param=getattr(e, "param", "None"), - code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST), - ) - else: - error_msg = f"{str(e)}" - return ProxyException( - message=getattr(e, "message", error_msg), - type=getattr(e, "type", "None"), - param=getattr(e, "param", "None"), - code=getattr(e, "status_code", 500), - ) +# def exception_handler(e: Exception): +# verbose_proxy_logger.error( +# "litellm.proxy.proxy_server.v1/projects/tuningJobs(): Exception occurred - {}".format( +# str(e) +# ) +# ) +# verbose_proxy_logger.debug(traceback.format_exc()) +# if isinstance(e, HTTPException): +# return ProxyException( +# message=getattr(e, "message", str(e.detail)), +# type=getattr(e, "type", "None"), +# param=getattr(e, "param", "None"), +# code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST), +# ) +# else: +# error_msg = f"{str(e)}" +# return ProxyException( +# message=getattr(e, "message", error_msg), +# type=getattr(e, "type", "None"), +# param=getattr(e, "param", "None"), +# code=getattr(e, "status_code", 500), +# ) -def construct_target_url( - base_url: str, - requested_route: str, - default_vertex_location: Optional[str], - default_vertex_project: Optional[str], -) -> httpx.URL: - """ - Allow user to specify their own project id / location. +# def construct_target_url( +# base_url: str, +# requested_route: str, +# default_vertex_location: Optional[str], +# default_vertex_project: Optional[str], +# ) -> httpx.URL: +# """ +# Allow user to specify their own project id / location. - If missing, use defaults +# If missing, use defaults - Handle cachedContent scenario - https://github.com/BerriAI/litellm/issues/5460 +# Handle cachedContent scenario - https://github.com/BerriAI/litellm/issues/5460 - Constructed Url: - POST https://LOCATION-aiplatform.googleapis.com/{version}/projects/PROJECT_ID/locations/LOCATION/cachedContents - """ - new_base_url = httpx.URL(base_url) - if "locations" in requested_route: # contains the target project id + location - updated_url = new_base_url.copy_with(path=requested_route) - return updated_url - """ - - Add endpoint version (e.g. v1beta for cachedContent, v1 for rest) - - Add default project id - - Add default location - """ - vertex_version: Literal["v1", "v1beta1"] = "v1" - if "cachedContent" in requested_route: - vertex_version = "v1beta1" +# Constructed Url: +# POST https://LOCATION-aiplatform.googleapis.com/{version}/projects/PROJECT_ID/locations/LOCATION/cachedContents +# """ +# new_base_url = httpx.URL(base_url) +# if "locations" in requested_route: # contains the target project id + location +# updated_url = new_base_url.copy_with(path=requested_route) +# return updated_url +# """ +# - Add endpoint version (e.g. v1beta for cachedContent, v1 for rest) +# - Add default project id +# - Add default location +# """ +# vertex_version: Literal["v1", "v1beta1"] = "v1" +# if "cachedContent" in requested_route: +# vertex_version = "v1beta1" - base_requested_route = "{}/projects/{}/locations/{}".format( - vertex_version, default_vertex_project, default_vertex_location - ) +# base_requested_route = "{}/projects/{}/locations/{}".format( +# vertex_version, default_vertex_project, default_vertex_location +# ) - updated_requested_route = "/" + base_requested_route + requested_route +# updated_requested_route = "/" + base_requested_route + requested_route - updated_url = new_base_url.copy_with(path=updated_requested_route) - return updated_url +# updated_url = new_base_url.copy_with(path=updated_requested_route) +# return updated_url -@router.api_route( - "/vertex-ai/{endpoint:path}", - methods=["GET", "POST", "PUT", "DELETE", "PATCH"], - tags=["Vertex AI Pass-through", "pass-through"], - include_in_schema=False, -) -@router.api_route( - "/vertex_ai/{endpoint:path}", - methods=["GET", "POST", "PUT", "DELETE", "PATCH"], - tags=["Vertex AI Pass-through", "pass-through"], -) -async def vertex_proxy_route( - endpoint: str, - request: Request, - fastapi_response: Response, -): - """ - Call LiteLLM proxy via Vertex AI SDK. +# @router.api_route( +# "/vertex-ai/{endpoint:path}", +# methods=["GET", "POST", "PUT", "DELETE", "PATCH"], +# tags=["Vertex AI Pass-through", "pass-through"], +# include_in_schema=False, +# ) +# @router.api_route( +# "/vertex_ai/{endpoint:path}", +# methods=["GET", "POST", "PUT", "DELETE", "PATCH"], +# tags=["Vertex AI Pass-through", "pass-through"], +# ) +# async def vertex_proxy_route( +# endpoint: str, +# request: Request, +# fastapi_response: Response, +# ): +# """ +# Call LiteLLM proxy via Vertex AI SDK. - [Docs](https://docs.litellm.ai/docs/pass_through/vertex_ai) - """ - encoded_endpoint = httpx.URL(endpoint).path - verbose_proxy_logger.debug("requested endpoint %s", endpoint) - headers: dict = {} - api_key_to_use = get_litellm_virtual_key(request=request) - user_api_key_dict = await user_api_key_auth( - request=request, - api_key=api_key_to_use, - ) +# [Docs](https://docs.litellm.ai/docs/pass_through/vertex_ai) +# """ +# encoded_endpoint = httpx.URL(endpoint).path +# verbose_proxy_logger.debug("requested endpoint %s", endpoint) +# headers: dict = {} +# api_key_to_use = get_litellm_virtual_key(request=request) +# user_api_key_dict = await user_api_key_auth( +# request=request, +# api_key=api_key_to_use, +# ) - vertex_project: Optional[str] = ( - VertexPassThroughRouter._get_vertex_project_id_from_url(endpoint) - ) - vertex_location: Optional[str] = ( - VertexPassThroughRouter._get_vertex_location_from_url(endpoint) - ) - vertex_credentials = vertex_pass_through_router.get_vertex_credentials( - project_id=vertex_project, - location=vertex_location, - ) +# vertex_project: Optional[str] = ( +# VertexPassThroughRouter._get_vertex_project_id_from_url(endpoint) +# ) +# vertex_location: Optional[str] = ( +# VertexPassThroughRouter._get_vertex_location_from_url(endpoint) +# ) +# vertex_credentials = vertex_pass_through_router.get_vertex_credentials( +# project_id=vertex_project, +# location=vertex_location, +# ) - # Use headers from the incoming request if no vertex credentials are found - if vertex_credentials.vertex_project is None: - headers = dict(request.headers) or {} - verbose_proxy_logger.debug( - "default_vertex_config not set, incoming request headers %s", headers - ) - base_target_url = f"https://{vertex_location}-aiplatform.googleapis.com/" - headers.pop("content-length", None) - headers.pop("host", None) - else: - vertex_project = vertex_credentials.vertex_project - vertex_location = vertex_credentials.vertex_location - vertex_credentials_str = vertex_credentials.vertex_credentials +# # Use headers from the incoming request if no vertex credentials are found +# if vertex_credentials.vertex_project is None: +# headers = dict(request.headers) or {} +# verbose_proxy_logger.debug( +# "default_vertex_config not set, incoming request headers %s", headers +# ) +# base_target_url = f"https://{vertex_location}-aiplatform.googleapis.com/" +# headers.pop("content-length", None) +# headers.pop("host", None) +# else: +# vertex_project = vertex_credentials.vertex_project +# vertex_location = vertex_credentials.vertex_location +# vertex_credentials_str = vertex_credentials.vertex_credentials - # Construct base URL for the target endpoint - base_target_url = f"https://{vertex_location}-aiplatform.googleapis.com/" +# # Construct base URL for the target endpoint +# base_target_url = f"https://{vertex_location}-aiplatform.googleapis.com/" - _auth_header, vertex_project = ( - await vertex_fine_tuning_apis_instance._ensure_access_token_async( - credentials=vertex_credentials_str, - project_id=vertex_project, - custom_llm_provider="vertex_ai_beta", - ) - ) +# _auth_header, vertex_project = ( +# await vertex_fine_tuning_apis_instance._ensure_access_token_async( +# credentials=vertex_credentials_str, +# project_id=vertex_project, +# custom_llm_provider="vertex_ai_beta", +# ) +# ) - auth_header, _ = vertex_fine_tuning_apis_instance._get_token_and_url( - model="", - auth_header=_auth_header, - gemini_api_key=None, - vertex_credentials=vertex_credentials_str, - vertex_project=vertex_project, - vertex_location=vertex_location, - stream=False, - custom_llm_provider="vertex_ai_beta", - api_base="", - ) +# auth_header, _ = vertex_fine_tuning_apis_instance._get_token_and_url( +# model="", +# auth_header=_auth_header, +# gemini_api_key=None, +# vertex_credentials=vertex_credentials_str, +# vertex_project=vertex_project, +# vertex_location=vertex_location, +# stream=False, +# custom_llm_provider="vertex_ai_beta", +# api_base="", +# ) - headers = { - "Authorization": f"Bearer {auth_header}", - } +# headers = { +# "Authorization": f"Bearer {auth_header}", +# } - request_route = encoded_endpoint - verbose_proxy_logger.debug("request_route %s", request_route) +# request_route = encoded_endpoint +# verbose_proxy_logger.debug("request_route %s", request_route) - # Ensure endpoint starts with '/' for proper URL construction - if not encoded_endpoint.startswith("/"): - encoded_endpoint = "/" + encoded_endpoint +# # Ensure endpoint starts with '/' for proper URL construction +# if not encoded_endpoint.startswith("/"): +# encoded_endpoint = "/" + encoded_endpoint - # Construct the full target URL using httpx - updated_url = construct_target_url( - base_url=base_target_url, - requested_route=encoded_endpoint, - default_vertex_location=vertex_location, - default_vertex_project=vertex_project, - ) - # base_url = httpx.URL(base_target_url) - # updated_url = base_url.copy_with(path=encoded_endpoint) +# # Construct the full target URL using httpx +# updated_url = construct_target_url( +# base_url=base_target_url, +# requested_route=encoded_endpoint, +# default_vertex_location=vertex_location, +# default_vertex_project=vertex_project, +# ) +# # base_url = httpx.URL(base_target_url) +# # updated_url = base_url.copy_with(path=encoded_endpoint) - verbose_proxy_logger.debug("updated url %s", updated_url) +# verbose_proxy_logger.debug("updated url %s", updated_url) - ## check for streaming - target = str(updated_url) - is_streaming_request = False - if "stream" in str(updated_url): - is_streaming_request = True - target += "?alt=sse" +# ## check for streaming +# target = str(updated_url) +# is_streaming_request = False +# if "stream" in str(updated_url): +# is_streaming_request = True +# target += "?alt=sse" - ## CREATE PASS-THROUGH - endpoint_func = create_pass_through_route( - endpoint=endpoint, - target=target, - custom_headers=headers, - ) # dynamically construct pass-through endpoint based on incoming path - received_value = await endpoint_func( - request, - fastapi_response, - user_api_key_dict, - stream=is_streaming_request, # type: ignore - ) +# ## CREATE PASS-THROUGH +# endpoint_func = create_pass_through_route( +# endpoint=endpoint, +# target=target, +# custom_headers=headers, +# ) # dynamically construct pass-through endpoint based on incoming path +# received_value = await endpoint_func( +# request, +# fastapi_response, +# user_api_key_dict, +# stream=is_streaming_request, # type: ignore +# ) - return received_value - - -def get_litellm_virtual_key(request: Request) -> str: - """ - Extract and format API key from request headers. - Prioritizes x-litellm-api-key over Authorization header. - - - Vertex JS SDK uses `Authorization` header, we use `x-litellm-api-key` to pass litellm virtual key - - """ - litellm_api_key = request.headers.get("x-litellm-api-key") - if litellm_api_key: - return f"Bearer {litellm_api_key}" - return request.headers.get("Authorization", "") +# return received_value diff --git a/litellm/proxy/vertex_ai_endpoints/vertex_passthrough_router.py b/litellm/proxy/vertex_ai_endpoints/vertex_passthrough_router.py index 0273a62047..5017a8f661 100644 --- a/litellm/proxy/vertex_ai_endpoints/vertex_passthrough_router.py +++ b/litellm/proxy/vertex_ai_endpoints/vertex_passthrough_router.py @@ -1,121 +1,108 @@ -import json -import re -from typing import Dict, Optional +# import json +# import re +# from typing import Dict, Optional -from litellm._logging import verbose_proxy_logger -from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( - VertexPassThroughCredentials, -) -from litellm.types.llms.vertex_ai import VERTEX_CREDENTIALS_TYPES +# from litellm._logging import verbose_proxy_logger +# from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( +# VertexPassThroughCredentials, +# ) +# from litellm.types.llms.vertex_ai import VERTEX_CREDENTIALS_TYPES -class VertexPassThroughRouter: - """ - Vertex Pass Through Router for Vertex AI pass-through endpoints +# class VertexPassThroughRouter: +# """ +# Vertex Pass Through Router for Vertex AI pass-through endpoints - - if request specifies a project-id, location -> use credentials corresponding to the project-id, location - - if request does not specify a project-id, location -> use credentials corresponding to the DEFAULT_VERTEXAI_PROJECT, DEFAULT_VERTEXAI_LOCATION - """ +# - if request specifies a project-id, location -> use credentials corresponding to the project-id, location +# - if request does not specify a project-id, location -> use credentials corresponding to the DEFAULT_VERTEXAI_PROJECT, DEFAULT_VERTEXAI_LOCATION +# """ - def __init__(self): - """ - Initialize the VertexPassThroughRouter - Stores the vertex credentials for each deployment key - ``` - { - "project_id-location": VertexPassThroughCredentials, - "adroit-crow-us-central1": VertexPassThroughCredentials, - } - ``` - """ - self.deployment_key_to_vertex_credentials: Dict[ - str, VertexPassThroughCredentials - ] = {} - pass +# def __init__(self): +# """ +# Initialize the VertexPassThroughRouter +# Stores the vertex credentials for each deployment key +# ``` +# { +# "project_id-location": VertexPassThroughCredentials, +# "adroit-crow-us-central1": VertexPassThroughCredentials, +# } +# ``` +# """ +# self.deployment_key_to_vertex_credentials: Dict[ +# str, VertexPassThroughCredentials +# ] = {} +# pass - def get_vertex_credentials( - self, project_id: Optional[str], location: Optional[str] - ) -> VertexPassThroughCredentials: - """ - Get the vertex credentials for the given project-id, location - """ - from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( - default_vertex_config, - ) +# def get_vertex_credentials( +# self, project_id: Optional[str], location: Optional[str] +# ) -> Optional[VertexPassThroughCredentials]: +# """ +# Get the vertex credentials for the given project-id, location +# """ +# from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( +# default_vertex_config, +# ) - deployment_key = self._get_deployment_key( - project_id=project_id, - location=location, - ) - if deployment_key is None: - return default_vertex_config - if deployment_key in self.deployment_key_to_vertex_credentials: - return self.deployment_key_to_vertex_credentials[deployment_key] - else: - return default_vertex_config +# deployment_key = self._get_deployment_key( +# project_id=project_id, +# location=location, +# ) +# if deployment_key is None: +# return default_vertex_config +# if deployment_key in self.deployment_key_to_vertex_credentials: +# return self.deployment_key_to_vertex_credentials[deployment_key] +# else: +# return default_vertex_config - def add_vertex_credentials( - self, - project_id: str, - location: str, - vertex_credentials: VERTEX_CREDENTIALS_TYPES, - ): - """ - Add the vertex credentials for the given project-id, location - """ - from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( - _set_default_vertex_config, - ) +# def add_vertex_credentials( +# self, +# project_id: str, +# location: str, +# vertex_credentials: VERTEX_CREDENTIALS_TYPES, +# ): +# """ +# Add the vertex credentials for the given project-id, location +# """ - deployment_key = self._get_deployment_key( - project_id=project_id, - location=location, - ) - if deployment_key is None: - verbose_proxy_logger.debug( - "No deployment key found for project-id, location" - ) - return - vertex_pass_through_credentials = VertexPassThroughCredentials( - vertex_project=project_id, - vertex_location=location, - vertex_credentials=vertex_credentials, - ) - self.deployment_key_to_vertex_credentials[deployment_key] = ( - vertex_pass_through_credentials - ) - verbose_proxy_logger.debug( - f"self.deployment_key_to_vertex_credentials: {json.dumps(self.deployment_key_to_vertex_credentials, indent=4, default=str)}" - ) - _set_default_vertex_config(vertex_pass_through_credentials) +# deployment_key = self._get_deployment_key( +# project_id=project_id, +# location=location, +# ) +# if deployment_key is None: +# verbose_proxy_logger.debug( +# "No deployment key found for project-id, location" +# ) +# return +# vertex_pass_through_credentials = VertexPassThroughCredentials( +# vertex_project=project_id, +# vertex_location=location, +# vertex_credentials=vertex_credentials, +# ) +# self.deployment_key_to_vertex_credentials[deployment_key] = ( +# vertex_pass_through_credentials +# ) +# verbose_proxy_logger.debug( +# f"self.deployment_key_to_vertex_credentials: {json.dumps(self.deployment_key_to_vertex_credentials, indent=4, default=str)}" +# ) - def _get_deployment_key( - self, project_id: Optional[str], location: Optional[str] - ) -> Optional[str]: - """ - Get the deployment key for the given project-id, location - """ - if project_id is None or location is None: - return None - return f"{project_id}-{location}" - @staticmethod - def _get_vertex_project_id_from_url(url: str) -> Optional[str]: - """ - Get the vertex project id from the url +# @staticmethod +# def _get_vertex_project_id_from_url(url: str) -> Optional[str]: +# """ +# Get the vertex project id from the url - `https://${LOCATION}-aiplatform.googleapis.com/v1/projects/${PROJECT_ID}/locations/${LOCATION}/publishers/google/models/${MODEL_ID}:streamGenerateContent` - """ - match = re.search(r"/projects/([^/]+)", url) - return match.group(1) if match else None +# `https://${LOCATION}-aiplatform.googleapis.com/v1/projects/${PROJECT_ID}/locations/${LOCATION}/publishers/google/models/${MODEL_ID}:streamGenerateContent` +# """ +# match = re.search(r"/projects/([^/]+)", url) +# return match.group(1) if match else None - @staticmethod - def _get_vertex_location_from_url(url: str) -> Optional[str]: - """ - Get the vertex location from the url +# @staticmethod +# def _get_vertex_location_from_url(url: str) -> Optional[str]: +# """ +# Get the vertex location from the url - `https://${LOCATION}-aiplatform.googleapis.com/v1/projects/${PROJECT_ID}/locations/${LOCATION}/publishers/google/models/${MODEL_ID}:streamGenerateContent` - """ - match = re.search(r"/locations/([^/]+)", url) - return match.group(1) if match else None +# `https://${LOCATION}-aiplatform.googleapis.com/v1/projects/${PROJECT_ID}/locations/${LOCATION}/publishers/google/models/${MODEL_ID}:streamGenerateContent` +# """ +# match = re.search(r"/locations/([^/]+)", url) +# return match.group(1) if match else None diff --git a/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py b/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py index 2f5ce85de7..48cb60968b 100644 --- a/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py +++ b/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py @@ -1,6 +1,7 @@ import json import os import sys +from unittest import mock from unittest.mock import MagicMock, patch import httpx @@ -17,7 +18,9 @@ from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import ( BaseOpenAIPassThroughHandler, RouteChecks, create_pass_through_route, + vertex_proxy_route, ) +from litellm.types.passthrough_endpoints.vertex_ai import VertexPassThroughCredentials class TestBaseOpenAIPassThroughHandler: @@ -176,3 +179,72 @@ class TestBaseOpenAIPassThroughHandler: print(f"query_params: {call_kwargs['query_params']}") assert call_kwargs["stream"] is False assert call_kwargs["query_params"] == {"model": "gpt-4"} + + +class TestVertexAIPassThroughHandler: + """ + Case 1: User set passthrough credentials - confirm credentials used. + + Case 2: User set default credentials, no exact passthrough credentials - confirm default credentials used. + + Case 3: No default credentials, incorrect project/base passed - confirm no credentials used. + """ + + @pytest.mark.asyncio + async def test_vertex_passthrough_with_credentials(self): + """ + Test that when passthrough credentials are set, they are correctly used in the request + """ + # Mock request + mock_request = Request( + scope={ + "type": "http", + "method": "POST", + "path": "/vertex_ai/models/test-model/predict", + "headers": {}, + } + ) + + # Mock response + mock_response = Response() + + # Mock vertex credentials + test_project = "test-project" + test_location = "us-central1" + test_token = "test-token-123" + + with mock.patch( + "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.passthrough_endpoint_router.get_vertex_credentials" + ) as mock_get_creds, mock.patch( + "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.vertex_llm_base._ensure_access_token_async" + ) as mock_ensure_token, mock.patch( + "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.vertex_llm_base._get_token_and_url" + ) as mock_get_token, mock.patch( + "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.create_pass_through_route" + ) as mock_create_route: + + # Setup mock returns + mock_get_creds.return_value = VertexPassThroughCredentials( + vertex_project=test_project, + vertex_location=test_location, + vertex_credentials="test-creds", + ) + mock_ensure_token.return_value = ("test-auth-header", test_project) + mock_get_token.return_value = (test_token, "") + + # Call the route + try: + await vertex_proxy_route( + endpoint="models/test-model/predict", + request=mock_request, + fastapi_response=mock_response, + ) + except Exception as e: + print(f"Error: {e}") + + # Verify create_pass_through_route was called with correct arguments + mock_create_route.assert_called_once_with( + endpoint="models/test-model/predict", + target=f"https://{test_location}-aiplatform.googleapis.com/v1/projects/{test_project}/locations/{test_location}/models/test-model/predict", + custom_headers={"Authorization": f"Bearer {test_token}"}, + ) diff --git a/tests/pass_through_unit_tests/test_unit_test_vertex_pass_through.py b/tests/pass_through_unit_tests/test_unit_test_vertex_pass_through.py index ba5dfa33a8..9b354a84c9 100644 --- a/tests/pass_through_unit_tests/test_unit_test_vertex_pass_through.py +++ b/tests/pass_through_unit_tests/test_unit_test_vertex_pass_through.py @@ -218,6 +218,29 @@ async def test_get_vertex_credentials_stored(): assert creds.vertex_credentials == '{"credentials": "test-creds"}' +@pytest.mark.asyncio +async def test_default_credentials(): + """ + Test get_vertex_credentials with stored credentials. + + Tests if default credentials are used if set. + + Tests if no default credentials are used, if no default set + """ + router = VertexPassThroughRouter() + router.add_vertex_credentials( + project_id="test-project", + location="us-central1", + vertex_credentials='{"credentials": "test-creds"}', + ) + + creds = router.get_vertex_credentials( + project_id="test-project", location="us-central2" + ) + + assert creds is None + + @pytest.mark.asyncio async def test_add_vertex_credentials(): """Test add_vertex_credentials functionality""" From 69da0ed3b543820b4f7bd0f4ce26d51f23469b51 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 10:43:35 -0700 Subject: [PATCH 089/119] feat - add openai web search --- .../convert_dict_to_response.py | 1 + litellm/types/llms/openai.py | 22 +++++++++++++++++++ litellm/types/utils.py | 7 ++++++ 3 files changed, 30 insertions(+) diff --git a/litellm/litellm_core_utils/llm_response_utils/convert_dict_to_response.py b/litellm/litellm_core_utils/llm_response_utils/convert_dict_to_response.py index ebb1032a19..d33af2a477 100644 --- a/litellm/litellm_core_utils/llm_response_utils/convert_dict_to_response.py +++ b/litellm/litellm_core_utils/llm_response_utils/convert_dict_to_response.py @@ -494,6 +494,7 @@ def convert_to_model_response_object( # noqa: PLR0915 provider_specific_fields=provider_specific_fields, reasoning_content=reasoning_content, thinking_blocks=thinking_blocks, + annotations=choice["message"].get("annotations", None), ) finish_reason = choice.get("finish_reason", None) if finish_reason is None: diff --git a/litellm/types/llms/openai.py b/litellm/types/llms/openai.py index 4b0be9d5fe..e58f573227 100644 --- a/litellm/types/llms/openai.py +++ b/litellm/types/llms/openai.py @@ -382,6 +382,28 @@ class ChatCompletionThinkingBlock(TypedDict, total=False): cache_control: Optional[Union[dict, ChatCompletionCachedContent]] +class ChatCompletionAnnotationURLCitation(TypedDict, total=False): + end_index: int + """The index of the last character of the URL citation in the message.""" + + start_index: int + """The index of the first character of the URL citation in the message.""" + + title: str + """The title of the web resource.""" + + url: str + """The URL of the web resource.""" + + +class ChatCompletionAnnotation(TypedDict, total=False): + type: Literal["url_citation"] + """The type of the URL citation. Always `url_citation`.""" + + url_citation: ChatCompletionAnnotationURLCitation + """A URL citation when using web search.""" + + class OpenAIChatCompletionTextObject(TypedDict): type: Literal["text"] text: str diff --git a/litellm/types/utils.py b/litellm/types/utils.py index a665428561..58c78dfa29 100644 --- a/litellm/types/utils.py +++ b/litellm/types/utils.py @@ -7,6 +7,7 @@ from typing import Any, Dict, List, Literal, Optional, Tuple, Union from aiohttp import FormData from openai._models import BaseModel as OpenAIObject from openai.types.audio.transcription_create_params import FileTypes # type: ignore +from openai.types.chat.chat_completion import ChatCompletion from openai.types.completion_usage import ( CompletionTokensDetails, CompletionUsage, @@ -27,6 +28,7 @@ from ..litellm_core_utils.core_helpers import map_finish_reason from .guardrails import GuardrailEventHooks from .llms.openai import ( Batch, + ChatCompletionAnnotation, ChatCompletionThinkingBlock, ChatCompletionToolCallChunk, ChatCompletionUsageBlock, @@ -527,6 +529,7 @@ class Message(OpenAIObject): provider_specific_fields: Optional[Dict[str, Any]] = Field( default=None, exclude=True ) + annotations: Optional[List[ChatCompletionAnnotation]] = None def __init__( self, @@ -538,6 +541,7 @@ class Message(OpenAIObject): provider_specific_fields: Optional[Dict[str, Any]] = None, reasoning_content: Optional[str] = None, thinking_blocks: Optional[List[ChatCompletionThinkingBlock]] = None, + annotations: Optional[List[ChatCompletionAnnotation]] = None, **params, ): init_values: Dict[str, Any] = { @@ -566,6 +570,9 @@ class Message(OpenAIObject): if thinking_blocks is not None: init_values["thinking_blocks"] = thinking_blocks + if annotations is not None: + init_values["annotations"] = annotations + if reasoning_content is not None: init_values["reasoning_content"] = reasoning_content From 2e7bca74d9a01082adb2d95132cbb5a9845483f6 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 10:44:04 -0700 Subject: [PATCH 090/119] test open ai web search --- tests/llm_translation/test_openai.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/llm_translation/test_openai.py b/tests/llm_translation/test_openai.py index 172c946636..456190469a 100644 --- a/tests/llm_translation/test_openai.py +++ b/tests/llm_translation/test_openai.py @@ -391,3 +391,17 @@ def test_openai_chat_completion_streaming_handler_reasoning_content(): ) assert response.choices[0].delta.reasoning_content == "." + + +def test_openai_web_search(): + # litellm._turn_on_debug() + response = litellm.completion( + model="openai/gpt-4o-search-preview", + messages=[ + { + "role": "user", + "content": "What was a positive news story from today?", + } + ], + ) + print("litellm response: ", response.model_dump_json(indent=4)) From 583c2e19d0a0ad3577acdd7284cc6c0bd593e90e Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 10:53:47 -0700 Subject: [PATCH 091/119] test_openai_web_search --- tests/llm_translation/test_openai.py | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/tests/llm_translation/test_openai.py b/tests/llm_translation/test_openai.py index 456190469a..97af8d93e4 100644 --- a/tests/llm_translation/test_openai.py +++ b/tests/llm_translation/test_openai.py @@ -17,6 +17,10 @@ import litellm from litellm import Choices, Message, ModelResponse from base_llm_unit_tests import BaseLLMChatTest import asyncio +from litellm.types.llms.openai import ( + ChatCompletionAnnotation, + ChatCompletionAnnotationURLCitation, +) def test_openai_prediction_param(): @@ -393,8 +397,28 @@ def test_openai_chat_completion_streaming_handler_reasoning_content(): assert response.choices[0].delta.reasoning_content == "." +def validate_response_url_citation(url_citation: ChatCompletionAnnotationURLCitation): + assert "end_index" in url_citation + assert "start_index" in url_citation + assert "url" in url_citation + + +def validate_model_response_contains_web_search_annotations(response: ModelResponse): + """validates litellm response contains web search annotations""" + message = response.choices[0].message + annotations: ChatCompletionAnnotation = message.annotations + print("annotations: ", annotations) + assert annotations is not None + assert isinstance(annotations, list) + for annotation in annotations: + assert annotation["type"] == "url_citation" + url_citation: ChatCompletionAnnotationURLCitation = annotation["url_citation"] + validate_response_url_citation(url_citation) + + def test_openai_web_search(): - # litellm._turn_on_debug() + """Makes a simple web search request and validates the response contains web search annotations and all expected fields are present""" + litellm._turn_on_debug() response = litellm.completion( model="openai/gpt-4o-search-preview", messages=[ @@ -405,3 +429,4 @@ def test_openai_web_search(): ], ) print("litellm response: ", response.model_dump_json(indent=4)) + validate_model_response_contains_web_search_annotations(response) From 0145ebd343ee429a2f637697fc246c5d1847c225 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 22 Mar 2025 11:06:52 -0700 Subject: [PATCH 092/119] feat(llm_passthrough_endpoints.py): base case passing for refactored vertex passthrough route --- .../llm_passthrough_endpoints.py | 1 - .../passthrough_endpoint_router.py | 80 +++++++++++++++++-- litellm/proxy/proxy_server.py | 10 ++- .../vertex_ai_endpoints/vertex_endpoints.py | 42 ---------- .../vertex_passthrough_router.py | 31 ------- litellm/router.py | 14 ++-- .../test_llm_pass_through_endpoints.py | 48 +++++++---- 7 files changed, 115 insertions(+), 111 deletions(-) diff --git a/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py b/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py index be3a903dcc..24ab08c167 100644 --- a/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py +++ b/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py @@ -456,7 +456,6 @@ async def vertex_proxy_route( request=request, api_key=api_key_to_use, ) - vertex_project: Optional[str] = get_vertex_project_id_from_url(endpoint) vertex_location: Optional[str] = get_vertex_location_from_url(endpoint) vertex_credentials = passthrough_endpoint_router.get_vertex_credentials( diff --git a/litellm/proxy/pass_through_endpoints/passthrough_endpoint_router.py b/litellm/proxy/pass_through_endpoints/passthrough_endpoint_router.py index 5267c3b26c..897faa1717 100644 --- a/litellm/proxy/pass_through_endpoints/passthrough_endpoint_router.py +++ b/litellm/proxy/pass_through_endpoints/passthrough_endpoint_router.py @@ -1,7 +1,8 @@ from typing import Dict, Optional -from litellm._logging import verbose_logger +from litellm._logging import verbose_router_logger from litellm.secret_managers.main import get_secret_str +from litellm.types.llms.vertex_ai import VERTEX_CREDENTIALS_TYPES from litellm.types.passthrough_endpoints.vertex_ai import VertexPassThroughCredentials @@ -15,6 +16,7 @@ class PassthroughEndpointRouter: self.deployment_key_to_vertex_credentials: Dict[ str, VertexPassThroughCredentials ] = {} + self.default_vertex_config: Optional[VertexPassThroughCredentials] = None def set_pass_through_credentials( self, @@ -49,14 +51,14 @@ class PassthroughEndpointRouter: custom_llm_provider=custom_llm_provider, region_name=region_name, ) - verbose_logger.debug( + verbose_router_logger.debug( f"Pass-through llm endpoints router, looking for credentials for {credential_name}" ) if credential_name in self.credentials: - verbose_logger.debug(f"Found credentials for {credential_name}") + verbose_router_logger.debug(f"Found credentials for {credential_name}") return self.credentials[credential_name] else: - verbose_logger.debug( + verbose_router_logger.debug( f"No credentials found for {credential_name}, looking for env variable" ) _env_variable_name = ( @@ -66,6 +68,72 @@ class PassthroughEndpointRouter: ) return get_secret_str(_env_variable_name) + def _get_vertex_env_vars(self) -> VertexPassThroughCredentials: + """ + Helper to get vertex pass through config from environment variables + + The following environment variables are used: + - DEFAULT_VERTEXAI_PROJECT (project id) + - DEFAULT_VERTEXAI_LOCATION (location) + - DEFAULT_GOOGLE_APPLICATION_CREDENTIALS (path to credentials file) + """ + return VertexPassThroughCredentials( + vertex_project=get_secret_str("DEFAULT_VERTEXAI_PROJECT"), + vertex_location=get_secret_str("DEFAULT_VERTEXAI_LOCATION"), + vertex_credentials=get_secret_str("DEFAULT_GOOGLE_APPLICATION_CREDENTIALS"), + ) + + def set_default_vertex_config(self, config: Optional[dict] = None): + """Sets vertex configuration from provided config and/or environment variables + + Args: + config (Optional[dict]): Configuration dictionary + Example: { + "vertex_project": "my-project-123", + "vertex_location": "us-central1", + "vertex_credentials": "os.environ/GOOGLE_CREDS" + } + """ + # Initialize config dictionary if None + if config is None: + self.default_vertex_config = self._get_vertex_env_vars() + return + + if isinstance(config, dict): + for key, value in config.items(): + if isinstance(value, str) and value.startswith("os.environ/"): + config[key] = get_secret_str(value) + + self.default_vertex_config = VertexPassThroughCredentials(**config) + + def add_vertex_credentials( + self, + project_id: str, + location: str, + vertex_credentials: VERTEX_CREDENTIALS_TYPES, + ): + """ + Add the vertex credentials for the given project-id, location + """ + + deployment_key = self._get_deployment_key( + project_id=project_id, + location=location, + ) + if deployment_key is None: + verbose_router_logger.debug( + "No deployment key found for project-id, location" + ) + return + vertex_pass_through_credentials = VertexPassThroughCredentials( + vertex_project=project_id, + vertex_location=location, + vertex_credentials=vertex_credentials, + ) + self.deployment_key_to_vertex_credentials[deployment_key] = ( + vertex_pass_through_credentials + ) + def _get_deployment_key( self, project_id: Optional[str], location: Optional[str] ) -> Optional[str]: @@ -82,15 +150,13 @@ class PassthroughEndpointRouter: """ Get the vertex credentials for the given project-id, location """ - # from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( - # default_vertex_config, - # ) default_vertex_config: Optional[VertexPassThroughCredentials] = None deployment_key = self._get_deployment_key( project_id=project_id, location=location, ) + if deployment_key is None: return default_vertex_config if deployment_key in self.deployment_key_to_vertex_credentials: diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index ae1c8d18af..ee2a906200 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -235,6 +235,9 @@ from litellm.proxy.openai_files_endpoints.files_endpoints import ( router as openai_files_router, ) from litellm.proxy.openai_files_endpoints.files_endpoints import set_files_config +from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import ( + passthrough_endpoint_router, +) from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import ( router as llm_passthrough_router, ) @@ -272,8 +275,6 @@ from litellm.proxy.utils import ( from litellm.proxy.vertex_ai_endpoints.langfuse_endpoints import ( router as langfuse_router, ) -from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import router as vertex_router -from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import set_default_vertex_config from litellm.router import ( AssistantsTypedDict, Deployment, @@ -2115,7 +2116,9 @@ class ProxyConfig: ## default config for vertex ai routes default_vertex_config = config.get("default_vertex_config", None) - set_default_vertex_config(config=default_vertex_config) + passthrough_endpoint_router.set_default_vertex_config( + config=default_vertex_config + ) ## ROUTER SETTINGS (e.g. routing_strategy, ...) router_settings = config.get("router_settings", None) @@ -8161,7 +8164,6 @@ app.include_router(batches_router) app.include_router(rerank_router) app.include_router(fine_tuning_router) app.include_router(credential_router) -app.include_router(vertex_router) app.include_router(llm_passthrough_router) app.include_router(anthropic_router) app.include_router(langfuse_router) diff --git a/litellm/proxy/vertex_ai_endpoints/vertex_endpoints.py b/litellm/proxy/vertex_ai_endpoints/vertex_endpoints.py index 4b706ed33a..6243fe79b4 100644 --- a/litellm/proxy/vertex_ai_endpoints/vertex_endpoints.py +++ b/litellm/proxy/vertex_ai_endpoints/vertex_endpoints.py @@ -23,48 +23,6 @@ # default_vertex_config: Optional[VertexPassThroughCredentials] = None -# def _get_vertex_env_vars() -> VertexPassThroughCredentials: -# """ -# Helper to get vertex pass through config from environment variables - -# The following environment variables are used: -# - DEFAULT_VERTEXAI_PROJECT (project id) -# - DEFAULT_VERTEXAI_LOCATION (location) -# - DEFAULT_GOOGLE_APPLICATION_CREDENTIALS (path to credentials file) -# """ -# return VertexPassThroughCredentials( -# vertex_project=get_secret_str("DEFAULT_VERTEXAI_PROJECT"), -# vertex_location=get_secret_str("DEFAULT_VERTEXAI_LOCATION"), -# vertex_credentials=get_secret_str("DEFAULT_GOOGLE_APPLICATION_CREDENTIALS"), -# ) - - -# def set_default_vertex_config(config: Optional[dict] = None): -# """Sets vertex configuration from provided config and/or environment variables - -# Args: -# config (Optional[dict]): Configuration dictionary -# Example: { -# "vertex_project": "my-project-123", -# "vertex_location": "us-central1", -# "vertex_credentials": "os.environ/GOOGLE_CREDS" -# } -# """ -# global default_vertex_config - -# # Initialize config dictionary if None -# if config is None: -# default_vertex_config = _get_vertex_env_vars() -# return - -# if isinstance(config, dict): -# for key, value in config.items(): -# if isinstance(value, str) and value.startswith("os.environ/"): -# config[key] = litellm.get_secret(value) - -# _set_default_vertex_config(VertexPassThroughCredentials(**config)) - - # def _set_default_vertex_config( # vertex_pass_through_credentials: VertexPassThroughCredentials, # ): diff --git a/litellm/proxy/vertex_ai_endpoints/vertex_passthrough_router.py b/litellm/proxy/vertex_ai_endpoints/vertex_passthrough_router.py index 5017a8f661..dd17d49b8f 100644 --- a/litellm/proxy/vertex_ai_endpoints/vertex_passthrough_router.py +++ b/litellm/proxy/vertex_ai_endpoints/vertex_passthrough_router.py @@ -55,37 +55,6 @@ # else: # return default_vertex_config -# def add_vertex_credentials( -# self, -# project_id: str, -# location: str, -# vertex_credentials: VERTEX_CREDENTIALS_TYPES, -# ): -# """ -# Add the vertex credentials for the given project-id, location -# """ - -# deployment_key = self._get_deployment_key( -# project_id=project_id, -# location=location, -# ) -# if deployment_key is None: -# verbose_proxy_logger.debug( -# "No deployment key found for project-id, location" -# ) -# return -# vertex_pass_through_credentials = VertexPassThroughCredentials( -# vertex_project=project_id, -# vertex_location=location, -# vertex_credentials=vertex_credentials, -# ) -# self.deployment_key_to_vertex_credentials[deployment_key] = ( -# vertex_pass_through_credentials -# ) -# verbose_proxy_logger.debug( -# f"self.deployment_key_to_vertex_credentials: {json.dumps(self.deployment_key_to_vertex_credentials, indent=4, default=str)}" -# ) - # @staticmethod # def _get_vertex_project_id_from_url(url: str) -> Optional[str]: diff --git a/litellm/router.py b/litellm/router.py index a395c851dd..af7b00e79d 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -4495,11 +4495,11 @@ class Router: Each provider uses diff .env vars for pass-through endpoints, this helper uses the deployment credentials to set the .env vars for pass-through endpoints """ if deployment.litellm_params.use_in_pass_through is True: - if custom_llm_provider == "vertex_ai": - from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( - vertex_pass_through_router, - ) + from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import ( + passthrough_endpoint_router, + ) + if custom_llm_provider == "vertex_ai": if ( deployment.litellm_params.vertex_project is None or deployment.litellm_params.vertex_location is None @@ -4508,16 +4508,12 @@ class Router: raise ValueError( "vertex_project, vertex_location, and vertex_credentials must be set in litellm_params for pass-through endpoints" ) - vertex_pass_through_router.add_vertex_credentials( + passthrough_endpoint_router.add_vertex_credentials( project_id=deployment.litellm_params.vertex_project, location=deployment.litellm_params.vertex_location, vertex_credentials=deployment.litellm_params.vertex_credentials, ) else: - from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import ( - passthrough_endpoint_router, - ) - passthrough_endpoint_router.set_pass_through_credentials( custom_llm_provider=custom_llm_provider, api_base=deployment.litellm_params.api_base, diff --git a/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py b/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py index 48cb60968b..8f8fbbe9de 100644 --- a/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py +++ b/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py @@ -191,16 +191,39 @@ class TestVertexAIPassThroughHandler: """ @pytest.mark.asyncio - async def test_vertex_passthrough_with_credentials(self): + async def test_vertex_passthrough_with_credentials(self, monkeypatch): """ Test that when passthrough credentials are set, they are correctly used in the request """ + from litellm.proxy.pass_through_endpoints.passthrough_endpoint_router import ( + PassthroughEndpointRouter, + ) + + vertex_project = "test-project" + vertex_location = "us-central1" + vertex_credentials = "test-creds" + + pass_through_router = PassthroughEndpointRouter() + + pass_through_router.add_vertex_credentials( + project_id=vertex_project, + location=vertex_location, + vertex_credentials=vertex_credentials, + ) + + monkeypatch.setattr( + "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.passthrough_endpoint_router", + pass_through_router, + ) + + endpoint = f"/v1/projects/{vertex_project}/locations/{vertex_location}/publishers/google/models/gemini-1.5-flash:generateContent" + # Mock request mock_request = Request( scope={ "type": "http", "method": "POST", - "path": "/vertex_ai/models/test-model/predict", + "path": endpoint, "headers": {}, } ) @@ -209,33 +232,24 @@ class TestVertexAIPassThroughHandler: mock_response = Response() # Mock vertex credentials - test_project = "test-project" - test_location = "us-central1" - test_token = "test-token-123" + test_project = vertex_project + test_location = vertex_location + test_token = vertex_credentials with mock.patch( - "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.passthrough_endpoint_router.get_vertex_credentials" - ) as mock_get_creds, mock.patch( "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.vertex_llm_base._ensure_access_token_async" ) as mock_ensure_token, mock.patch( "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.vertex_llm_base._get_token_and_url" ) as mock_get_token, mock.patch( "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.create_pass_through_route" ) as mock_create_route: - - # Setup mock returns - mock_get_creds.return_value = VertexPassThroughCredentials( - vertex_project=test_project, - vertex_location=test_location, - vertex_credentials="test-creds", - ) mock_ensure_token.return_value = ("test-auth-header", test_project) mock_get_token.return_value = (test_token, "") # Call the route try: await vertex_proxy_route( - endpoint="models/test-model/predict", + endpoint=endpoint, request=mock_request, fastapi_response=mock_response, ) @@ -244,7 +258,7 @@ class TestVertexAIPassThroughHandler: # Verify create_pass_through_route was called with correct arguments mock_create_route.assert_called_once_with( - endpoint="models/test-model/predict", - target=f"https://{test_location}-aiplatform.googleapis.com/v1/projects/{test_project}/locations/{test_location}/models/test-model/predict", + endpoint=endpoint, + target=f"https://{test_location}-aiplatform.googleapis.com/v1/projects/{test_project}/locations/{test_location}/publishers/google/models/gemini-1.5-flash:generateContent", custom_headers={"Authorization": f"Bearer {test_token}"}, ) From 851c99306360b86b654d6b6c9397ad278244a000 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 22 Mar 2025 11:32:01 -0700 Subject: [PATCH 093/119] fix(vertex_ai/common_utils.py): fix handling constructed url with default vertex config --- litellm/llms/vertex_ai/common_utils.py | 29 ++++++-- .../llm_passthrough_endpoints.py | 11 +-- .../passthrough_endpoint_router.py | 6 +- .../test_llm_pass_through_endpoints.py | 69 +++++++++++++++++++ 4 files changed, 98 insertions(+), 17 deletions(-) diff --git a/litellm/llms/vertex_ai/common_utils.py b/litellm/llms/vertex_ai/common_utils.py index 4a4c428941..a3f91fbacc 100644 --- a/litellm/llms/vertex_ai/common_utils.py +++ b/litellm/llms/vertex_ai/common_utils.py @@ -303,11 +303,26 @@ def get_vertex_location_from_url(url: str) -> Optional[str]: return match.group(1) if match else None +def replace_project_and_location_in_route( + requested_route: str, vertex_project: str, vertex_location: str +) -> str: + """ + Replace project and location values in the route with the provided values + """ + # Replace project and location values while keeping route structure + modified_route = re.sub( + r"/projects/[^/]+/locations/[^/]+/", + f"/projects/{vertex_project}/locations/{vertex_location}/", + requested_route, + ) + return modified_route + + def construct_target_url( base_url: str, requested_route: str, - default_vertex_location: Optional[str], - default_vertex_project: Optional[str], + vertex_location: Optional[str], + vertex_project: Optional[str], ) -> httpx.URL: """ Allow user to specify their own project id / location. @@ -321,8 +336,12 @@ def construct_target_url( """ new_base_url = httpx.URL(base_url) if "locations" in requested_route: # contains the target project id + location - updated_url = new_base_url.copy_with(path=requested_route) - return updated_url + if vertex_project and vertex_location: + requested_route = replace_project_and_location_in_route( + requested_route, vertex_project, vertex_location + ) + return new_base_url.copy_with(path=requested_route) + """ - Add endpoint version (e.g. v1beta for cachedContent, v1 for rest) - Add default project id @@ -333,7 +352,7 @@ def construct_target_url( vertex_version = "v1beta1" base_requested_route = "{}/projects/{}/locations/{}".format( - vertex_version, default_vertex_project, default_vertex_location + vertex_version, vertex_project, vertex_location ) updated_requested_route = "/" + base_requested_route + requested_route diff --git a/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py b/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py index 24ab08c167..0fae1e6f0b 100644 --- a/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py +++ b/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py @@ -463,13 +463,8 @@ async def vertex_proxy_route( location=vertex_location, ) - if vertex_credentials is None: - raise Exception( - f"No matching vertex credentials found, for project_id: {vertex_project}, location: {vertex_location}. No default_vertex_config set either." - ) - # Use headers from the incoming request if no vertex credentials are found - if vertex_credentials.vertex_project is None: + if vertex_credentials is None or vertex_credentials.vertex_project is None: headers = dict(request.headers) or {} verbose_proxy_logger.debug( "default_vertex_config not set, incoming request headers %s", headers @@ -518,8 +513,8 @@ async def vertex_proxy_route( updated_url = construct_target_url( base_url=base_target_url, requested_route=encoded_endpoint, - default_vertex_location=vertex_location, - default_vertex_project=vertex_project, + vertex_location=vertex_location, + vertex_project=vertex_project, ) # base_url = httpx.URL(base_target_url) # updated_url = base_url.copy_with(path=encoded_endpoint) diff --git a/litellm/proxy/pass_through_endpoints/passthrough_endpoint_router.py b/litellm/proxy/pass_through_endpoints/passthrough_endpoint_router.py index 897faa1717..89cccfc071 100644 --- a/litellm/proxy/pass_through_endpoints/passthrough_endpoint_router.py +++ b/litellm/proxy/pass_through_endpoints/passthrough_endpoint_router.py @@ -150,19 +150,17 @@ class PassthroughEndpointRouter: """ Get the vertex credentials for the given project-id, location """ - default_vertex_config: Optional[VertexPassThroughCredentials] = None - deployment_key = self._get_deployment_key( project_id=project_id, location=location, ) if deployment_key is None: - return default_vertex_config + return self.default_vertex_config if deployment_key in self.deployment_key_to_vertex_credentials: return self.deployment_key_to_vertex_credentials[deployment_key] else: - return default_vertex_config + return self.default_vertex_config def _get_credential_name_for_provider( self, diff --git a/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py b/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py index 8f8fbbe9de..ea5b908796 100644 --- a/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py +++ b/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py @@ -1,6 +1,7 @@ import json import os import sys +import traceback from unittest import mock from unittest.mock import MagicMock, patch @@ -262,3 +263,71 @@ class TestVertexAIPassThroughHandler: target=f"https://{test_location}-aiplatform.googleapis.com/v1/projects/{test_project}/locations/{test_location}/publishers/google/models/gemini-1.5-flash:generateContent", custom_headers={"Authorization": f"Bearer {test_token}"}, ) + + @pytest.mark.asyncio + async def test_vertex_passthrough_with_default_credentials(self, monkeypatch): + """ + Test that when no passthrough credentials are set, default credentials are used in the request + """ + from litellm.proxy.pass_through_endpoints.passthrough_endpoint_router import ( + PassthroughEndpointRouter, + ) + + # Setup default credentials + default_project = "default-project" + default_location = "us-central1" + default_credentials = "default-creds" + + pass_through_router = PassthroughEndpointRouter() + pass_through_router.default_vertex_config = VertexPassThroughCredentials( + vertex_project=default_project, + vertex_location=default_location, + vertex_credentials=default_credentials, + ) + + monkeypatch.setattr( + "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.passthrough_endpoint_router", + pass_through_router, + ) + + # Use different project/location in request than the default + request_project = "non-existing-project" + request_location = "bad-location" + endpoint = f"/v1/projects/{request_project}/locations/{request_location}/publishers/google/models/gemini-1.5-flash:generateContent" + + mock_request = Request( + scope={ + "type": "http", + "method": "POST", + "path": endpoint, + "headers": {}, + } + ) + mock_response = Response() + + with mock.patch( + "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.vertex_llm_base._ensure_access_token_async" + ) as mock_ensure_token, mock.patch( + "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.vertex_llm_base._get_token_and_url" + ) as mock_get_token, mock.patch( + "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.create_pass_through_route" + ) as mock_create_route: + mock_ensure_token.return_value = ("test-auth-header", default_project) + mock_get_token.return_value = (default_credentials, "") + + try: + await vertex_proxy_route( + endpoint=endpoint, + request=mock_request, + fastapi_response=mock_response, + ) + except Exception as e: + traceback.print_exc() + print(f"Error: {e}") + + # Verify default credentials were used + mock_create_route.assert_called_once_with( + endpoint=endpoint, + target=f"https://{default_location}-aiplatform.googleapis.com/v1/projects/{default_project}/locations/{default_location}/publishers/google/models/gemini-1.5-flash:generateContent", + custom_headers={"Authorization": f"Bearer {default_credentials}"}, + ) From ecf2243b690059194d779bb63c7fcf94cfd74c15 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 22 Mar 2025 11:35:57 -0700 Subject: [PATCH 094/119] test: add more e2e testing --- .../test_llm_pass_through_endpoints.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py b/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py index ea5b908796..6283c8ebce 100644 --- a/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py +++ b/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py @@ -264,8 +264,17 @@ class TestVertexAIPassThroughHandler: custom_headers={"Authorization": f"Bearer {test_token}"}, ) + @pytest.mark.parametrize( + "initial_endpoint", + [ + "publishers/google/models/gemini-1.5-flash:generateContent", + "v1/projects/bad-project/locations/bad-location/publishers/google/models/gemini-1.5-flash:generateContent", + ], + ) @pytest.mark.asyncio - async def test_vertex_passthrough_with_default_credentials(self, monkeypatch): + async def test_vertex_passthrough_with_default_credentials( + self, monkeypatch, initial_endpoint + ): """ Test that when no passthrough credentials are set, default credentials are used in the request """ @@ -291,15 +300,13 @@ class TestVertexAIPassThroughHandler: ) # Use different project/location in request than the default - request_project = "non-existing-project" - request_location = "bad-location" - endpoint = f"/v1/projects/{request_project}/locations/{request_location}/publishers/google/models/gemini-1.5-flash:generateContent" + endpoint = initial_endpoint mock_request = Request( scope={ "type": "http", "method": "POST", - "path": endpoint, + "path": f"/vertex_ai/{endpoint}", "headers": {}, } ) From 539ad25a95924d3e878f31e188a1fdcc6099c960 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 11:36:34 -0700 Subject: [PATCH 095/119] test_openai_web_search_streaming --- tests/llm_translation/test_openai.py | 36 ++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/tests/llm_translation/test_openai.py b/tests/llm_translation/test_openai.py index 97af8d93e4..633ff76467 100644 --- a/tests/llm_translation/test_openai.py +++ b/tests/llm_translation/test_openai.py @@ -3,6 +3,7 @@ import os import sys from datetime import datetime from unittest.mock import AsyncMock, patch +from typing import Optional sys.path.insert( 0, os.path.abspath("../..") @@ -403,10 +404,8 @@ def validate_response_url_citation(url_citation: ChatCompletionAnnotationURLCita assert "url" in url_citation -def validate_model_response_contains_web_search_annotations(response: ModelResponse): +def validate_web_search_annotations(annotations: ChatCompletionAnnotation): """validates litellm response contains web search annotations""" - message = response.choices[0].message - annotations: ChatCompletionAnnotation = message.annotations print("annotations: ", annotations) assert annotations is not None assert isinstance(annotations, list) @@ -429,4 +428,33 @@ def test_openai_web_search(): ], ) print("litellm response: ", response.model_dump_json(indent=4)) - validate_model_response_contains_web_search_annotations(response) + message = response.choices[0].message + annotations: ChatCompletionAnnotation = message.annotations + validate_web_search_annotations(annotations) + + +def test_openai_web_search_streaming(): + """Makes a simple web search request and validates the response contains web search annotations and all expected fields are present""" + # litellm._turn_on_debug() + test_openai_web_search: Optional[ChatCompletionAnnotation] = None + response = litellm.completion( + model="openai/gpt-4o-search-preview", + messages=[ + { + "role": "user", + "content": "What was a positive news story from today?", + } + ], + stream=True, + ) + for chunk in response: + print("litellm response chunk: ", chunk) + if ( + hasattr(chunk.choices[0].delta, "annotations") + and chunk.choices[0].delta.annotations is not None + ): + test_openai_web_search = chunk.choices[0].delta.annotations + + # Assert this request has at-least one web search annotation + assert test_openai_web_search is not None + validate_web_search_annotations(test_openai_web_search) From 13bfe7d518e9ef07c1846137b52427a50c258a97 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 11:38:30 -0700 Subject: [PATCH 096/119] Add annotations to the delta --- litellm/types/utils.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/litellm/types/utils.py b/litellm/types/utils.py index 58c78dfa29..8821d2c80b 100644 --- a/litellm/types/utils.py +++ b/litellm/types/utils.py @@ -630,6 +630,7 @@ class Delta(OpenAIObject): audio: Optional[ChatCompletionAudioResponse] = None, reasoning_content: Optional[str] = None, thinking_blocks: Optional[List[ChatCompletionThinkingBlock]] = None, + annotations: Optional[List[ChatCompletionAnnotation]] = None, **params, ): super(Delta, self).__init__(**params) @@ -640,6 +641,7 @@ class Delta(OpenAIObject): self.function_call: Optional[Union[FunctionCall, Any]] = None self.tool_calls: Optional[List[Union[ChatCompletionDeltaToolCall, Any]]] = None self.audio: Optional[ChatCompletionAudioResponse] = None + self.annotations: Optional[List[ChatCompletionAnnotation]] = None if reasoning_content is not None: self.reasoning_content = reasoning_content @@ -653,6 +655,12 @@ class Delta(OpenAIObject): # ensure default response matches OpenAI spec del self.thinking_blocks + # Add annotations to the delta, ensure they are only on Delta if they exist (Match OpenAI spec) + if annotations is not None: + self.annotations = annotations + else: + del self.annotations + if function_call is not None and isinstance(function_call, dict): self.function_call = FunctionCall(**function_call) else: From 907b2c1c0ae6a17d4d95a482d7088b0399751fe8 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 11:41:53 -0700 Subject: [PATCH 097/119] test_is_chunk_non_empty_with_annotations --- .../litellm_core_utils/streaming_handler.py | 5 ++- .../test_streaming_handler.py | 34 +++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/litellm/litellm_core_utils/streaming_handler.py b/litellm/litellm_core_utils/streaming_handler.py index 56e64d1859..a11e5af12b 100644 --- a/litellm/litellm_core_utils/streaming_handler.py +++ b/litellm/litellm_core_utils/streaming_handler.py @@ -799,6 +799,10 @@ class CustomStreamWrapper: "provider_specific_fields" in response_obj and response_obj["provider_specific_fields"] is not None ) + or ( + "annotations" in model_response.choices[0].delta + and model_response.choices[0].delta.annotations is not None + ) ): return True else: @@ -939,7 +943,6 @@ class CustomStreamWrapper: and model_response.choices[0].delta.audio is not None ): return model_response - else: if hasattr(model_response, "usage"): self.chunks.append(model_response) diff --git a/tests/litellm/litellm_core_utils/test_streaming_handler.py b/tests/litellm/litellm_core_utils/test_streaming_handler.py index 75c4fc1035..988d533670 100644 --- a/tests/litellm/litellm_core_utils/test_streaming_handler.py +++ b/tests/litellm/litellm_core_utils/test_streaming_handler.py @@ -136,6 +136,40 @@ def test_is_chunk_non_empty(initialized_custom_stream_wrapper: CustomStreamWrapp ) +def test_is_chunk_non_empty_with_annotations( + initialized_custom_stream_wrapper: CustomStreamWrapper, +): + """Unit test if non-empty when annotations are present""" + chunk = { + "id": "e89b6501-8ac2-464c-9550-7cd3daf94350", + "object": "chat.completion.chunk", + "created": 1741037890, + "model": "deepseek-reasoner", + "system_fingerprint": "fp_5417b77867_prod0225", + "choices": [ + { + "index": 0, + "delta": { + "content": None, + "annotations": [ + {"type": "url_citation", "url": "https://www.google.com"} + ], + }, + "logprobs": None, + "finish_reason": None, + } + ], + } + assert ( + initialized_custom_stream_wrapper.is_chunk_non_empty( + completion_obj=MagicMock(), + model_response=ModelResponseStream(**chunk), + response_obj=MagicMock(), + ) + is True + ) + + def test_optional_combine_thinking_block_in_choices( initialized_custom_stream_wrapper: CustomStreamWrapper, ): From 2d5c7f809d5b451443818c02cd1036b8f516f67d Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 22 Mar 2025 11:49:51 -0700 Subject: [PATCH 098/119] fix(llm_passthrough_endpoints.py): raise verbose error if credentials not found on proxy --- .../llm_passthrough_endpoints.py | 25 ++++-- .../test_llm_pass_through_endpoints.py | 85 ++++++++++++++++++- 2 files changed, 100 insertions(+), 10 deletions(-) diff --git a/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py b/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py index 0fae1e6f0b..c4d96b67f6 100644 --- a/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py +++ b/litellm/proxy/pass_through_endpoints/llm_passthrough_endpoints.py @@ -463,9 +463,11 @@ async def vertex_proxy_route( location=vertex_location, ) + headers_passed_through = False # Use headers from the incoming request if no vertex credentials are found if vertex_credentials is None or vertex_credentials.vertex_project is None: headers = dict(request.headers) or {} + headers_passed_through = True verbose_proxy_logger.debug( "default_vertex_config not set, incoming request headers %s", headers ) @@ -516,8 +518,6 @@ async def vertex_proxy_route( vertex_location=vertex_location, vertex_project=vertex_project, ) - # base_url = httpx.URL(base_target_url) - # updated_url = base_url.copy_with(path=encoded_endpoint) verbose_proxy_logger.debug("updated url %s", updated_url) @@ -534,12 +534,21 @@ async def vertex_proxy_route( target=target, custom_headers=headers, ) # dynamically construct pass-through endpoint based on incoming path - received_value = await endpoint_func( - request, - fastapi_response, - user_api_key_dict, - stream=is_streaming_request, # type: ignore - ) + + try: + received_value = await endpoint_func( + request, + fastapi_response, + user_api_key_dict, + stream=is_streaming_request, # type: ignore + ) + except Exception as e: + if headers_passed_through: + raise Exception( + f"No credentials found on proxy for this request. Headers were passed through directly but request failed with error: {str(e)}" + ) + else: + raise e return received_value diff --git a/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py b/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py index 6283c8ebce..74a3dd45c8 100644 --- a/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py +++ b/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py @@ -188,7 +188,7 @@ class TestVertexAIPassThroughHandler: Case 2: User set default credentials, no exact passthrough credentials - confirm default credentials used. - Case 3: No default credentials, incorrect project/base passed - confirm no credentials used. + Case 3: No default credentials, no mapped credentials - request passed through directly. """ @pytest.mark.asyncio @@ -225,7 +225,10 @@ class TestVertexAIPassThroughHandler: "type": "http", "method": "POST", "path": endpoint, - "headers": {}, + "headers": [ + (b"Authorization", b"Bearer test-creds"), + (b"Content-Type", b"application/json"), + ], } ) @@ -338,3 +341,81 @@ class TestVertexAIPassThroughHandler: target=f"https://{default_location}-aiplatform.googleapis.com/v1/projects/{default_project}/locations/{default_location}/publishers/google/models/gemini-1.5-flash:generateContent", custom_headers={"Authorization": f"Bearer {default_credentials}"}, ) + + @pytest.mark.asyncio + async def test_vertex_passthrough_with_no_default_credentials(self, monkeypatch): + """ + Test that when no default credentials are set, the request fails + """ + """ + Test that when passthrough credentials are set, they are correctly used in the request + """ + from litellm.proxy.pass_through_endpoints.passthrough_endpoint_router import ( + PassthroughEndpointRouter, + ) + + vertex_project = "my-project" + vertex_location = "us-central1" + vertex_credentials = "test-creds" + + test_project = "test-project" + test_location = "test-location" + test_token = "test-creds" + + pass_through_router = PassthroughEndpointRouter() + + pass_through_router.add_vertex_credentials( + project_id=vertex_project, + location=vertex_location, + vertex_credentials=vertex_credentials, + ) + + monkeypatch.setattr( + "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.passthrough_endpoint_router", + pass_through_router, + ) + + endpoint = f"/v1/projects/{test_project}/locations/{test_location}/publishers/google/models/gemini-1.5-flash:generateContent" + + # Mock request + mock_request = Request( + scope={ + "type": "http", + "method": "POST", + "path": endpoint, + "headers": [ + (b"authorization", b"Bearer test-creds"), + ], + } + ) + + # Mock response + mock_response = Response() + + with mock.patch( + "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.vertex_llm_base._ensure_access_token_async" + ) as mock_ensure_token, mock.patch( + "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.vertex_llm_base._get_token_and_url" + ) as mock_get_token, mock.patch( + "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.create_pass_through_route" + ) as mock_create_route: + mock_ensure_token.return_value = ("test-auth-header", test_project) + mock_get_token.return_value = (test_token, "") + + # Call the route + try: + await vertex_proxy_route( + endpoint=endpoint, + request=mock_request, + fastapi_response=mock_response, + ) + except Exception as e: + traceback.print_exc() + print(f"Error: {e}") + + # Verify create_pass_through_route was called with correct arguments + mock_create_route.assert_called_once_with( + endpoint=endpoint, + target=f"https://{test_location}-aiplatform.googleapis.com/v1/projects/{test_project}/locations/{test_location}/publishers/google/models/gemini-1.5-flash:generateContent", + custom_headers={"authorization": f"Bearer {test_token}"}, + ) From 860ddec3c5c73fe8a4acd5f481ae39c7a3a66a6d Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 22 Mar 2025 12:48:53 -0700 Subject: [PATCH 099/119] test: migrate testing --- .../vertex_ai/test_vertex_ai_common_utils.py | 43 +++ .../test_llm_pass_through_endpoints.py | 39 ++- ...test_passthrough_endpoints_common_utils.py | 44 +++ .../test_unit_test_passthrough_router.py | 183 +++++++++++ .../test_unit_test_vertex_pass_through.py | 289 ------------------ 5 files changed, 308 insertions(+), 290 deletions(-) create mode 100644 tests/litellm/llms/vertex_ai/test_vertex_ai_common_utils.py create mode 100644 tests/litellm/proxy/pass_through_endpoints/test_passthrough_endpoints_common_utils.py diff --git a/tests/litellm/llms/vertex_ai/test_vertex_ai_common_utils.py b/tests/litellm/llms/vertex_ai/test_vertex_ai_common_utils.py new file mode 100644 index 0000000000..e89355443f --- /dev/null +++ b/tests/litellm/llms/vertex_ai/test_vertex_ai_common_utils.py @@ -0,0 +1,43 @@ +import os +import sys +from unittest.mock import MagicMock, call, patch + +import pytest + +sys.path.insert( + 0, os.path.abspath("../../..") +) # Adds the parent directory to the system path + +import litellm +from litellm.llms.vertex_ai.common_utils import ( + get_vertex_location_from_url, + get_vertex_project_id_from_url, +) + + +@pytest.mark.asyncio +async def test_get_vertex_project_id_from_url(): + """Test _get_vertex_project_id_from_url with various URLs""" + # Test with valid URL + url = "https://us-central1-aiplatform.googleapis.com/v1/projects/test-project/locations/us-central1/publishers/google/models/gemini-pro:streamGenerateContent" + project_id = get_vertex_project_id_from_url(url) + assert project_id == "test-project" + + # Test with invalid URL + url = "https://invalid-url.com" + project_id = get_vertex_project_id_from_url(url) + assert project_id is None + + +@pytest.mark.asyncio +async def test_get_vertex_location_from_url(): + """Test _get_vertex_location_from_url with various URLs""" + # Test with valid URL + url = "https://us-central1-aiplatform.googleapis.com/v1/projects/test-project/locations/us-central1/publishers/google/models/gemini-pro:streamGenerateContent" + location = get_vertex_location_from_url(url) + assert location == "us-central1" + + # Test with invalid URL + url = "https://invalid-url.com" + location = get_vertex_location_from_url(url) + assert location is None diff --git a/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py b/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py index 74a3dd45c8..da08dea605 100644 --- a/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py +++ b/tests/litellm/proxy/pass_through_endpoints/test_llm_pass_through_endpoints.py @@ -3,7 +3,7 @@ import os import sys import traceback from unittest import mock -from unittest.mock import MagicMock, patch +from unittest.mock import AsyncMock, MagicMock, Mock, patch import httpx import pytest @@ -419,3 +419,40 @@ class TestVertexAIPassThroughHandler: target=f"https://{test_location}-aiplatform.googleapis.com/v1/projects/{test_project}/locations/{test_location}/publishers/google/models/gemini-1.5-flash:generateContent", custom_headers={"authorization": f"Bearer {test_token}"}, ) + + @pytest.mark.asyncio + async def test_async_vertex_proxy_route_api_key_auth(self): + """ + Critical + + This is how Vertex AI JS SDK will Auth to Litellm Proxy + """ + # Mock dependencies + mock_request = Mock() + mock_request.headers = {"x-litellm-api-key": "test-key-123"} + mock_request.method = "POST" + mock_response = Mock() + + with patch( + "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.user_api_key_auth" + ) as mock_auth: + mock_auth.return_value = {"api_key": "test-key-123"} + + with patch( + "litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints.create_pass_through_route" + ) as mock_pass_through: + mock_pass_through.return_value = AsyncMock( + return_value={"status": "success"} + ) + + # Call the function + result = await vertex_proxy_route( + endpoint="v1/projects/test-project/locations/us-central1/publishers/google/models/gemini-1.5-pro:generateContent", + request=mock_request, + fastapi_response=mock_response, + ) + + # Verify user_api_key_auth was called with the correct Bearer token + mock_auth.assert_called_once() + call_args = mock_auth.call_args[1] + assert call_args["api_key"] == "Bearer test-key-123" diff --git a/tests/litellm/proxy/pass_through_endpoints/test_passthrough_endpoints_common_utils.py b/tests/litellm/proxy/pass_through_endpoints/test_passthrough_endpoints_common_utils.py new file mode 100644 index 0000000000..bd8c5f5a99 --- /dev/null +++ b/tests/litellm/proxy/pass_through_endpoints/test_passthrough_endpoints_common_utils.py @@ -0,0 +1,44 @@ +import json +import os +import sys +import traceback +from unittest import mock +from unittest.mock import MagicMock, patch + +import httpx +import pytest +from fastapi import Request, Response +from fastapi.testclient import TestClient + +sys.path.insert( + 0, os.path.abspath("../../../..") +) # Adds the parent directory to the system path + +from unittest.mock import Mock + +from litellm.proxy.pass_through_endpoints.common_utils import get_litellm_virtual_key + + +@pytest.mark.asyncio +async def test_get_litellm_virtual_key(): + """ + Test that the get_litellm_virtual_key function correctly handles the API key authentication + """ + # Test with x-litellm-api-key + mock_request = Mock() + mock_request.headers = {"x-litellm-api-key": "test-key-123"} + result = get_litellm_virtual_key(mock_request) + assert result == "Bearer test-key-123" + + # Test with Authorization header + mock_request.headers = {"Authorization": "Bearer auth-key-456"} + result = get_litellm_virtual_key(mock_request) + assert result == "Bearer auth-key-456" + + # Test with both headers (x-litellm-api-key should take precedence) + mock_request.headers = { + "x-litellm-api-key": "test-key-123", + "Authorization": "Bearer auth-key-456", + } + result = get_litellm_virtual_key(mock_request) + assert result == "Bearer test-key-123" diff --git a/tests/pass_through_unit_tests/test_unit_test_passthrough_router.py b/tests/pass_through_unit_tests/test_unit_test_passthrough_router.py index 6e8296876a..8e016b68d0 100644 --- a/tests/pass_through_unit_tests/test_unit_test_passthrough_router.py +++ b/tests/pass_through_unit_tests/test_unit_test_passthrough_router.py @@ -11,6 +11,7 @@ from unittest.mock import patch from litellm.proxy.pass_through_endpoints.passthrough_endpoint_router import ( PassthroughEndpointRouter, ) +from litellm.types.passthrough_endpoints.vertex_ai import VertexPassThroughCredentials passthrough_endpoint_router = PassthroughEndpointRouter() @@ -132,3 +133,185 @@ class TestPassthroughEndpointRouter(unittest.TestCase): ), "COHERE_API_KEY", ) + + def test_get_deployment_key(self): + """Test _get_deployment_key with various inputs""" + router = PassthroughEndpointRouter() + + # Test with valid inputs + key = router._get_deployment_key("test-project", "us-central1") + assert key == "test-project-us-central1" + + # Test with None values + key = router._get_deployment_key(None, "us-central1") + assert key is None + + key = router._get_deployment_key("test-project", None) + assert key is None + + key = router._get_deployment_key(None, None) + assert key is None + + def test_add_vertex_credentials(self): + """Test add_vertex_credentials functionality""" + router = PassthroughEndpointRouter() + + # Test adding valid credentials + router.add_vertex_credentials( + project_id="test-project", + location="us-central1", + vertex_credentials='{"credentials": "test-creds"}', + ) + + assert "test-project-us-central1" in router.deployment_key_to_vertex_credentials + creds = router.deployment_key_to_vertex_credentials["test-project-us-central1"] + assert creds.vertex_project == "test-project" + assert creds.vertex_location == "us-central1" + assert creds.vertex_credentials == '{"credentials": "test-creds"}' + + # Test adding with None values + router.add_vertex_credentials( + project_id=None, + location=None, + vertex_credentials='{"credentials": "test-creds"}', + ) + # Should not add None values + assert len(router.deployment_key_to_vertex_credentials) == 1 + + def test_default_credentials(self): + """ + Test get_vertex_credentials with stored credentials. + + Tests if default credentials are used if set. + + Tests if no default credentials are used, if no default set + """ + router = PassthroughEndpointRouter() + router.add_vertex_credentials( + project_id="test-project", + location="us-central1", + vertex_credentials='{"credentials": "test-creds"}', + ) + + creds = router.get_vertex_credentials( + project_id="test-project", location="us-central2" + ) + + assert creds is None + + def test_get_vertex_env_vars(self): + """Test that _get_vertex_env_vars correctly reads environment variables""" + # Set environment variables for the test + os.environ["DEFAULT_VERTEXAI_PROJECT"] = "test-project-123" + os.environ["DEFAULT_VERTEXAI_LOCATION"] = "us-central1" + os.environ["DEFAULT_GOOGLE_APPLICATION_CREDENTIALS"] = "/path/to/creds" + + try: + result = self.router._get_vertex_env_vars() + print(result) + + # Verify the result + assert isinstance(result, VertexPassThroughCredentials) + assert result.vertex_project == "test-project-123" + assert result.vertex_location == "us-central1" + assert result.vertex_credentials == "/path/to/creds" + + finally: + # Clean up environment variables + del os.environ["DEFAULT_VERTEXAI_PROJECT"] + del os.environ["DEFAULT_VERTEXAI_LOCATION"] + del os.environ["DEFAULT_GOOGLE_APPLICATION_CREDENTIALS"] + + def test_set_default_vertex_config(self): + """Test set_default_vertex_config with various inputs""" + # Test with None config - set environment variables first + os.environ["DEFAULT_VERTEXAI_PROJECT"] = "env-project" + os.environ["DEFAULT_VERTEXAI_LOCATION"] = "env-location" + os.environ["DEFAULT_GOOGLE_APPLICATION_CREDENTIALS"] = "env-creds" + os.environ["GOOGLE_CREDS"] = "secret-creds" + + try: + # Test with None config + self.router.set_default_vertex_config() + + assert self.router.default_vertex_config.vertex_project == "env-project" + assert self.router.default_vertex_config.vertex_location == "env-location" + assert self.router.default_vertex_config.vertex_credentials == "env-creds" + + # Test with valid config.yaml settings on vertex_config + test_config = { + "vertex_project": "my-project-123", + "vertex_location": "us-central1", + "vertex_credentials": "path/to/creds", + } + self.router.set_default_vertex_config(test_config) + + assert self.router.default_vertex_config.vertex_project == "my-project-123" + assert self.router.default_vertex_config.vertex_location == "us-central1" + assert ( + self.router.default_vertex_config.vertex_credentials == "path/to/creds" + ) + + # Test with environment variable reference + test_config = { + "vertex_project": "my-project-123", + "vertex_location": "us-central1", + "vertex_credentials": "os.environ/GOOGLE_CREDS", + } + self.router.set_default_vertex_config(test_config) + + assert ( + self.router.default_vertex_config.vertex_credentials == "secret-creds" + ) + + finally: + # Clean up environment variables + del os.environ["DEFAULT_VERTEXAI_PROJECT"] + del os.environ["DEFAULT_VERTEXAI_LOCATION"] + del os.environ["DEFAULT_GOOGLE_APPLICATION_CREDENTIALS"] + del os.environ["GOOGLE_CREDS"] + + def test_vertex_passthrough_router_init(self): + """Test VertexPassThroughRouter initialization""" + router = PassthroughEndpointRouter() + assert isinstance(router.deployment_key_to_vertex_credentials, dict) + assert len(router.deployment_key_to_vertex_credentials) == 0 + + def test_get_vertex_credentials_none(self): + """Test get_vertex_credentials with various inputs""" + router = PassthroughEndpointRouter() + + router.set_default_vertex_config( + config={ + "vertex_project": None, + "vertex_location": None, + "vertex_credentials": None, + } + ) + + # Test with None project_id and location - should return default config + creds = router.get_vertex_credentials(None, None) + assert isinstance(creds, VertexPassThroughCredentials) + + # Test with valid project_id and location but no stored credentials + creds = router.get_vertex_credentials("test-project", "us-central1") + assert isinstance(creds, VertexPassThroughCredentials) + assert creds.vertex_project is None + assert creds.vertex_location is None + assert creds.vertex_credentials is None + + def test_get_vertex_credentials_stored(self): + """Test get_vertex_credentials with stored credentials""" + router = PassthroughEndpointRouter() + router.add_vertex_credentials( + project_id="test-project", + location="us-central1", + vertex_credentials='{"credentials": "test-creds"}', + ) + + creds = router.get_vertex_credentials( + project_id="test-project", location="us-central1" + ) + assert creds.vertex_project == "test-project" + assert creds.vertex_location == "us-central1" + assert creds.vertex_credentials == '{"credentials": "test-creds"}' diff --git a/tests/pass_through_unit_tests/test_unit_test_vertex_pass_through.py b/tests/pass_through_unit_tests/test_unit_test_vertex_pass_through.py index 9b354a84c9..066e434c26 100644 --- a/tests/pass_through_unit_tests/test_unit_test_vertex_pass_through.py +++ b/tests/pass_through_unit_tests/test_unit_test_vertex_pass_through.py @@ -26,292 +26,3 @@ from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( from litellm.proxy.vertex_ai_endpoints.vertex_passthrough_router import ( VertexPassThroughRouter, ) - - -@pytest.mark.asyncio -async def test_get_litellm_virtual_key(): - """ - Test that the get_litellm_virtual_key function correctly handles the API key authentication - """ - # Test with x-litellm-api-key - mock_request = Mock() - mock_request.headers = {"x-litellm-api-key": "test-key-123"} - result = get_litellm_virtual_key(mock_request) - assert result == "Bearer test-key-123" - - # Test with Authorization header - mock_request.headers = {"Authorization": "Bearer auth-key-456"} - result = get_litellm_virtual_key(mock_request) - assert result == "Bearer auth-key-456" - - # Test with both headers (x-litellm-api-key should take precedence) - mock_request.headers = { - "x-litellm-api-key": "test-key-123", - "Authorization": "Bearer auth-key-456", - } - result = get_litellm_virtual_key(mock_request) - assert result == "Bearer test-key-123" - - -@pytest.mark.asyncio -async def test_async_vertex_proxy_route_api_key_auth(): - """ - Critical - - This is how Vertex AI JS SDK will Auth to Litellm Proxy - """ - # Mock dependencies - mock_request = Mock() - mock_request.headers = {"x-litellm-api-key": "test-key-123"} - mock_request.method = "POST" - mock_response = Mock() - - with patch( - "litellm.proxy.vertex_ai_endpoints.vertex_endpoints.user_api_key_auth" - ) as mock_auth: - mock_auth.return_value = {"api_key": "test-key-123"} - - with patch( - "litellm.proxy.vertex_ai_endpoints.vertex_endpoints.create_pass_through_route" - ) as mock_pass_through: - mock_pass_through.return_value = AsyncMock( - return_value={"status": "success"} - ) - - # Call the function - result = await vertex_proxy_route( - endpoint="v1/projects/test-project/locations/us-central1/publishers/google/models/gemini-1.5-pro:generateContent", - request=mock_request, - fastapi_response=mock_response, - ) - - # Verify user_api_key_auth was called with the correct Bearer token - mock_auth.assert_called_once() - call_args = mock_auth.call_args[1] - assert call_args["api_key"] == "Bearer test-key-123" - - -@pytest.mark.asyncio -async def test_get_vertex_env_vars(): - """Test that _get_vertex_env_vars correctly reads environment variables""" - # Set environment variables for the test - os.environ["DEFAULT_VERTEXAI_PROJECT"] = "test-project-123" - os.environ["DEFAULT_VERTEXAI_LOCATION"] = "us-central1" - os.environ["DEFAULT_GOOGLE_APPLICATION_CREDENTIALS"] = "/path/to/creds" - - try: - result = _get_vertex_env_vars() - print(result) - - # Verify the result - assert isinstance(result, VertexPassThroughCredentials) - assert result.vertex_project == "test-project-123" - assert result.vertex_location == "us-central1" - assert result.vertex_credentials == "/path/to/creds" - - finally: - # Clean up environment variables - del os.environ["DEFAULT_VERTEXAI_PROJECT"] - del os.environ["DEFAULT_VERTEXAI_LOCATION"] - del os.environ["DEFAULT_GOOGLE_APPLICATION_CREDENTIALS"] - - -@pytest.mark.asyncio -async def test_set_default_vertex_config(): - """Test set_default_vertex_config with various inputs""" - # Test with None config - set environment variables first - os.environ["DEFAULT_VERTEXAI_PROJECT"] = "env-project" - os.environ["DEFAULT_VERTEXAI_LOCATION"] = "env-location" - os.environ["DEFAULT_GOOGLE_APPLICATION_CREDENTIALS"] = "env-creds" - os.environ["GOOGLE_CREDS"] = "secret-creds" - - try: - # Test with None config - set_default_vertex_config() - from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( - default_vertex_config, - ) - - assert default_vertex_config.vertex_project == "env-project" - assert default_vertex_config.vertex_location == "env-location" - assert default_vertex_config.vertex_credentials == "env-creds" - - # Test with valid config.yaml settings on vertex_config - test_config = { - "vertex_project": "my-project-123", - "vertex_location": "us-central1", - "vertex_credentials": "path/to/creds", - } - set_default_vertex_config(test_config) - from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( - default_vertex_config, - ) - - assert default_vertex_config.vertex_project == "my-project-123" - assert default_vertex_config.vertex_location == "us-central1" - assert default_vertex_config.vertex_credentials == "path/to/creds" - - # Test with environment variable reference - test_config = { - "vertex_project": "my-project-123", - "vertex_location": "us-central1", - "vertex_credentials": "os.environ/GOOGLE_CREDS", - } - set_default_vertex_config(test_config) - from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( - default_vertex_config, - ) - - assert default_vertex_config.vertex_credentials == "secret-creds" - - finally: - # Clean up environment variables - del os.environ["DEFAULT_VERTEXAI_PROJECT"] - del os.environ["DEFAULT_VERTEXAI_LOCATION"] - del os.environ["DEFAULT_GOOGLE_APPLICATION_CREDENTIALS"] - del os.environ["GOOGLE_CREDS"] - - -@pytest.mark.asyncio -async def test_vertex_passthrough_router_init(): - """Test VertexPassThroughRouter initialization""" - router = VertexPassThroughRouter() - assert isinstance(router.deployment_key_to_vertex_credentials, dict) - assert len(router.deployment_key_to_vertex_credentials) == 0 - - -@pytest.mark.asyncio -async def test_get_vertex_credentials_none(): - """Test get_vertex_credentials with various inputs""" - from litellm.proxy.vertex_ai_endpoints import vertex_endpoints - - setattr(vertex_endpoints, "default_vertex_config", VertexPassThroughCredentials()) - router = VertexPassThroughRouter() - - # Test with None project_id and location - should return default config - creds = router.get_vertex_credentials(None, None) - assert isinstance(creds, VertexPassThroughCredentials) - - # Test with valid project_id and location but no stored credentials - creds = router.get_vertex_credentials("test-project", "us-central1") - assert isinstance(creds, VertexPassThroughCredentials) - assert creds.vertex_project is None - assert creds.vertex_location is None - assert creds.vertex_credentials is None - - -@pytest.mark.asyncio -async def test_get_vertex_credentials_stored(): - """Test get_vertex_credentials with stored credentials""" - router = VertexPassThroughRouter() - router.add_vertex_credentials( - project_id="test-project", - location="us-central1", - vertex_credentials='{"credentials": "test-creds"}', - ) - - creds = router.get_vertex_credentials( - project_id="test-project", location="us-central1" - ) - assert creds.vertex_project == "test-project" - assert creds.vertex_location == "us-central1" - assert creds.vertex_credentials == '{"credentials": "test-creds"}' - - -@pytest.mark.asyncio -async def test_default_credentials(): - """ - Test get_vertex_credentials with stored credentials. - - Tests if default credentials are used if set. - - Tests if no default credentials are used, if no default set - """ - router = VertexPassThroughRouter() - router.add_vertex_credentials( - project_id="test-project", - location="us-central1", - vertex_credentials='{"credentials": "test-creds"}', - ) - - creds = router.get_vertex_credentials( - project_id="test-project", location="us-central2" - ) - - assert creds is None - - -@pytest.mark.asyncio -async def test_add_vertex_credentials(): - """Test add_vertex_credentials functionality""" - router = VertexPassThroughRouter() - - # Test adding valid credentials - router.add_vertex_credentials( - project_id="test-project", - location="us-central1", - vertex_credentials='{"credentials": "test-creds"}', - ) - - assert "test-project-us-central1" in router.deployment_key_to_vertex_credentials - creds = router.deployment_key_to_vertex_credentials["test-project-us-central1"] - assert creds.vertex_project == "test-project" - assert creds.vertex_location == "us-central1" - assert creds.vertex_credentials == '{"credentials": "test-creds"}' - - # Test adding with None values - router.add_vertex_credentials( - project_id=None, - location=None, - vertex_credentials='{"credentials": "test-creds"}', - ) - # Should not add None values - assert len(router.deployment_key_to_vertex_credentials) == 1 - - -@pytest.mark.asyncio -async def test_get_deployment_key(): - """Test _get_deployment_key with various inputs""" - router = VertexPassThroughRouter() - - # Test with valid inputs - key = router._get_deployment_key("test-project", "us-central1") - assert key == "test-project-us-central1" - - # Test with None values - key = router._get_deployment_key(None, "us-central1") - assert key is None - - key = router._get_deployment_key("test-project", None) - assert key is None - - key = router._get_deployment_key(None, None) - assert key is None - - -@pytest.mark.asyncio -async def test_get_vertex_project_id_from_url(): - """Test _get_vertex_project_id_from_url with various URLs""" - # Test with valid URL - url = "https://us-central1-aiplatform.googleapis.com/v1/projects/test-project/locations/us-central1/publishers/google/models/gemini-pro:streamGenerateContent" - project_id = VertexPassThroughRouter._get_vertex_project_id_from_url(url) - assert project_id == "test-project" - - # Test with invalid URL - url = "https://invalid-url.com" - project_id = VertexPassThroughRouter._get_vertex_project_id_from_url(url) - assert project_id is None - - -@pytest.mark.asyncio -async def test_get_vertex_location_from_url(): - """Test _get_vertex_location_from_url with various URLs""" - # Test with valid URL - url = "https://us-central1-aiplatform.googleapis.com/v1/projects/test-project/locations/us-central1/publishers/google/models/gemini-pro:streamGenerateContent" - location = VertexPassThroughRouter._get_vertex_location_from_url(url) - assert location == "us-central1" - - # Test with invalid URL - url = "https://invalid-url.com" - location = VertexPassThroughRouter._get_vertex_location_from_url(url) - assert location is None From 70c0d9ce949dc27a193af17fe7a90887b06f246d Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 22 Mar 2025 12:50:58 -0700 Subject: [PATCH 100/119] test: fix test --- .../test_unit_test_vertex_pass_through.py | 28 ------------------- .../test_router_adding_deployments.py | 8 +++--- 2 files changed, 4 insertions(+), 32 deletions(-) delete mode 100644 tests/pass_through_unit_tests/test_unit_test_vertex_pass_through.py diff --git a/tests/pass_through_unit_tests/test_unit_test_vertex_pass_through.py b/tests/pass_through_unit_tests/test_unit_test_vertex_pass_through.py deleted file mode 100644 index 066e434c26..0000000000 --- a/tests/pass_through_unit_tests/test_unit_test_vertex_pass_through.py +++ /dev/null @@ -1,28 +0,0 @@ -import json -import os -import sys -from datetime import datetime -from unittest.mock import AsyncMock, Mock, patch - -sys.path.insert( - 0, os.path.abspath("../..") -) # Adds the parent directory to the system-path - - -import httpx -import pytest -import litellm -from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj - - -from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( - get_litellm_virtual_key, - vertex_proxy_route, - _get_vertex_env_vars, - set_default_vertex_config, - VertexPassThroughCredentials, - default_vertex_config, -) -from litellm.proxy.vertex_ai_endpoints.vertex_passthrough_router import ( - VertexPassThroughRouter, -) diff --git a/tests/router_unit_tests/test_router_adding_deployments.py b/tests/router_unit_tests/test_router_adding_deployments.py index fca3f147e5..aabaf37f1d 100644 --- a/tests/router_unit_tests/test_router_adding_deployments.py +++ b/tests/router_unit_tests/test_router_adding_deployments.py @@ -123,21 +123,21 @@ def test_add_vertex_pass_through_deployment(): router.add_deployment(deployment) # Get the vertex credentials from the router - from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( - vertex_pass_through_router, + from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import ( + passthrough_endpoint_router, ) # current state of pass-through vertex router print("\n vertex_pass_through_router.deployment_key_to_vertex_credentials\n\n") print( json.dumps( - vertex_pass_through_router.deployment_key_to_vertex_credentials, + passthrough_endpoint_router.deployment_key_to_vertex_credentials, indent=4, default=str, ) ) - vertex_creds = vertex_pass_through_router.get_vertex_credentials( + vertex_creds = passthrough_endpoint_router.get_vertex_credentials( project_id="test-project", location="us-central1" ) From 7bd9c5cd9e9a2015cb4521f7a2b01de6c3e98544 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 22 Mar 2025 12:54:11 -0700 Subject: [PATCH 101/119] build: cleanup unused files --- .../vertex_ai_endpoints/vertex_endpoints.py | 217 ------------------ .../vertex_passthrough_router.py | 77 ------- 2 files changed, 294 deletions(-) delete mode 100644 litellm/proxy/vertex_ai_endpoints/vertex_endpoints.py delete mode 100644 litellm/proxy/vertex_ai_endpoints/vertex_passthrough_router.py diff --git a/litellm/proxy/vertex_ai_endpoints/vertex_endpoints.py b/litellm/proxy/vertex_ai_endpoints/vertex_endpoints.py deleted file mode 100644 index 6243fe79b4..0000000000 --- a/litellm/proxy/vertex_ai_endpoints/vertex_endpoints.py +++ /dev/null @@ -1,217 +0,0 @@ -# import traceback -# from typing import Optional - -# import httpx -# from fastapi import APIRouter, HTTPException, Request, Response, status - -# import litellm -# from litellm._logging import verbose_proxy_logger -# from litellm.fine_tuning.main import vertex_fine_tuning_apis_instance -# from litellm.proxy._types import * -# from litellm.proxy.auth.user_api_key_auth import user_api_key_auth -# from litellm.proxy.pass_through_endpoints.pass_through_endpoints import ( -# create_pass_through_route, -# ) -# from litellm.secret_managers.main import get_secret_str -# from litellm.types.passthrough_endpoints.vertex_ai import * - -# from .vertex_passthrough_router import VertexPassThroughRouter - -# router = APIRouter() -# vertex_pass_through_router = VertexPassThroughRouter() - -# default_vertex_config: Optional[VertexPassThroughCredentials] = None - - -# def _set_default_vertex_config( -# vertex_pass_through_credentials: VertexPassThroughCredentials, -# ): -# global default_vertex_config -# default_vertex_config = vertex_pass_through_credentials - - -# def exception_handler(e: Exception): -# verbose_proxy_logger.error( -# "litellm.proxy.proxy_server.v1/projects/tuningJobs(): Exception occurred - {}".format( -# str(e) -# ) -# ) -# verbose_proxy_logger.debug(traceback.format_exc()) -# if isinstance(e, HTTPException): -# return ProxyException( -# message=getattr(e, "message", str(e.detail)), -# type=getattr(e, "type", "None"), -# param=getattr(e, "param", "None"), -# code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST), -# ) -# else: -# error_msg = f"{str(e)}" -# return ProxyException( -# message=getattr(e, "message", error_msg), -# type=getattr(e, "type", "None"), -# param=getattr(e, "param", "None"), -# code=getattr(e, "status_code", 500), -# ) - - -# def construct_target_url( -# base_url: str, -# requested_route: str, -# default_vertex_location: Optional[str], -# default_vertex_project: Optional[str], -# ) -> httpx.URL: -# """ -# Allow user to specify their own project id / location. - -# If missing, use defaults - -# Handle cachedContent scenario - https://github.com/BerriAI/litellm/issues/5460 - -# Constructed Url: -# POST https://LOCATION-aiplatform.googleapis.com/{version}/projects/PROJECT_ID/locations/LOCATION/cachedContents -# """ -# new_base_url = httpx.URL(base_url) -# if "locations" in requested_route: # contains the target project id + location -# updated_url = new_base_url.copy_with(path=requested_route) -# return updated_url -# """ -# - Add endpoint version (e.g. v1beta for cachedContent, v1 for rest) -# - Add default project id -# - Add default location -# """ -# vertex_version: Literal["v1", "v1beta1"] = "v1" -# if "cachedContent" in requested_route: -# vertex_version = "v1beta1" - -# base_requested_route = "{}/projects/{}/locations/{}".format( -# vertex_version, default_vertex_project, default_vertex_location -# ) - -# updated_requested_route = "/" + base_requested_route + requested_route - -# updated_url = new_base_url.copy_with(path=updated_requested_route) -# return updated_url - - -# @router.api_route( -# "/vertex-ai/{endpoint:path}", -# methods=["GET", "POST", "PUT", "DELETE", "PATCH"], -# tags=["Vertex AI Pass-through", "pass-through"], -# include_in_schema=False, -# ) -# @router.api_route( -# "/vertex_ai/{endpoint:path}", -# methods=["GET", "POST", "PUT", "DELETE", "PATCH"], -# tags=["Vertex AI Pass-through", "pass-through"], -# ) -# async def vertex_proxy_route( -# endpoint: str, -# request: Request, -# fastapi_response: Response, -# ): -# """ -# Call LiteLLM proxy via Vertex AI SDK. - -# [Docs](https://docs.litellm.ai/docs/pass_through/vertex_ai) -# """ -# encoded_endpoint = httpx.URL(endpoint).path -# verbose_proxy_logger.debug("requested endpoint %s", endpoint) -# headers: dict = {} -# api_key_to_use = get_litellm_virtual_key(request=request) -# user_api_key_dict = await user_api_key_auth( -# request=request, -# api_key=api_key_to_use, -# ) - -# vertex_project: Optional[str] = ( -# VertexPassThroughRouter._get_vertex_project_id_from_url(endpoint) -# ) -# vertex_location: Optional[str] = ( -# VertexPassThroughRouter._get_vertex_location_from_url(endpoint) -# ) -# vertex_credentials = vertex_pass_through_router.get_vertex_credentials( -# project_id=vertex_project, -# location=vertex_location, -# ) - -# # Use headers from the incoming request if no vertex credentials are found -# if vertex_credentials.vertex_project is None: -# headers = dict(request.headers) or {} -# verbose_proxy_logger.debug( -# "default_vertex_config not set, incoming request headers %s", headers -# ) -# base_target_url = f"https://{vertex_location}-aiplatform.googleapis.com/" -# headers.pop("content-length", None) -# headers.pop("host", None) -# else: -# vertex_project = vertex_credentials.vertex_project -# vertex_location = vertex_credentials.vertex_location -# vertex_credentials_str = vertex_credentials.vertex_credentials - -# # Construct base URL for the target endpoint -# base_target_url = f"https://{vertex_location}-aiplatform.googleapis.com/" - -# _auth_header, vertex_project = ( -# await vertex_fine_tuning_apis_instance._ensure_access_token_async( -# credentials=vertex_credentials_str, -# project_id=vertex_project, -# custom_llm_provider="vertex_ai_beta", -# ) -# ) - -# auth_header, _ = vertex_fine_tuning_apis_instance._get_token_and_url( -# model="", -# auth_header=_auth_header, -# gemini_api_key=None, -# vertex_credentials=vertex_credentials_str, -# vertex_project=vertex_project, -# vertex_location=vertex_location, -# stream=False, -# custom_llm_provider="vertex_ai_beta", -# api_base="", -# ) - -# headers = { -# "Authorization": f"Bearer {auth_header}", -# } - -# request_route = encoded_endpoint -# verbose_proxy_logger.debug("request_route %s", request_route) - -# # Ensure endpoint starts with '/' for proper URL construction -# if not encoded_endpoint.startswith("/"): -# encoded_endpoint = "/" + encoded_endpoint - -# # Construct the full target URL using httpx -# updated_url = construct_target_url( -# base_url=base_target_url, -# requested_route=encoded_endpoint, -# default_vertex_location=vertex_location, -# default_vertex_project=vertex_project, -# ) -# # base_url = httpx.URL(base_target_url) -# # updated_url = base_url.copy_with(path=encoded_endpoint) - -# verbose_proxy_logger.debug("updated url %s", updated_url) - -# ## check for streaming -# target = str(updated_url) -# is_streaming_request = False -# if "stream" in str(updated_url): -# is_streaming_request = True -# target += "?alt=sse" - -# ## CREATE PASS-THROUGH -# endpoint_func = create_pass_through_route( -# endpoint=endpoint, -# target=target, -# custom_headers=headers, -# ) # dynamically construct pass-through endpoint based on incoming path -# received_value = await endpoint_func( -# request, -# fastapi_response, -# user_api_key_dict, -# stream=is_streaming_request, # type: ignore -# ) - -# return received_value diff --git a/litellm/proxy/vertex_ai_endpoints/vertex_passthrough_router.py b/litellm/proxy/vertex_ai_endpoints/vertex_passthrough_router.py deleted file mode 100644 index dd17d49b8f..0000000000 --- a/litellm/proxy/vertex_ai_endpoints/vertex_passthrough_router.py +++ /dev/null @@ -1,77 +0,0 @@ -# import json -# import re -# from typing import Dict, Optional - -# from litellm._logging import verbose_proxy_logger -# from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( -# VertexPassThroughCredentials, -# ) -# from litellm.types.llms.vertex_ai import VERTEX_CREDENTIALS_TYPES - - -# class VertexPassThroughRouter: -# """ -# Vertex Pass Through Router for Vertex AI pass-through endpoints - - -# - if request specifies a project-id, location -> use credentials corresponding to the project-id, location -# - if request does not specify a project-id, location -> use credentials corresponding to the DEFAULT_VERTEXAI_PROJECT, DEFAULT_VERTEXAI_LOCATION -# """ - -# def __init__(self): -# """ -# Initialize the VertexPassThroughRouter -# Stores the vertex credentials for each deployment key -# ``` -# { -# "project_id-location": VertexPassThroughCredentials, -# "adroit-crow-us-central1": VertexPassThroughCredentials, -# } -# ``` -# """ -# self.deployment_key_to_vertex_credentials: Dict[ -# str, VertexPassThroughCredentials -# ] = {} -# pass - -# def get_vertex_credentials( -# self, project_id: Optional[str], location: Optional[str] -# ) -> Optional[VertexPassThroughCredentials]: -# """ -# Get the vertex credentials for the given project-id, location -# """ -# from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( -# default_vertex_config, -# ) - -# deployment_key = self._get_deployment_key( -# project_id=project_id, -# location=location, -# ) -# if deployment_key is None: -# return default_vertex_config -# if deployment_key in self.deployment_key_to_vertex_credentials: -# return self.deployment_key_to_vertex_credentials[deployment_key] -# else: -# return default_vertex_config - - -# @staticmethod -# def _get_vertex_project_id_from_url(url: str) -> Optional[str]: -# """ -# Get the vertex project id from the url - -# `https://${LOCATION}-aiplatform.googleapis.com/v1/projects/${PROJECT_ID}/locations/${LOCATION}/publishers/google/models/${MODEL_ID}:streamGenerateContent` -# """ -# match = re.search(r"/projects/([^/]+)", url) -# return match.group(1) if match else None - -# @staticmethod -# def _get_vertex_location_from_url(url: str) -> Optional[str]: -# """ -# Get the vertex location from the url - -# `https://${LOCATION}-aiplatform.googleapis.com/v1/projects/${PROJECT_ID}/locations/${LOCATION}/publishers/google/models/${MODEL_ID}:streamGenerateContent` -# """ -# match = re.search(r"/locations/([^/]+)", url) -# return match.group(1) if match else None From 11f764601bd89154e2d543d3b0a1c163debf9673 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 12:56:21 -0700 Subject: [PATCH 102/119] add search_context_cost_per_1k_calls to model cost map spec --- tests/local_testing/test_get_model_info.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/local_testing/test_get_model_info.py b/tests/local_testing/test_get_model_info.py index d71f3f7c24..f885a61e78 100644 --- a/tests/local_testing/test_get_model_info.py +++ b/tests/local_testing/test_get_model_info.py @@ -518,6 +518,15 @@ def test_aaamodel_prices_and_context_window_json_is_valid(): ], }, }, + "search_context_cost_per_1k_calls": { + "type": "object", + "properties": { + "low_context": {"type": "number"}, + "medium_context": {"type": "number"}, + "high_context": {"type": "number"}, + }, + "additionalProperties": False, + }, "supported_modalities": { "type": "array", "items": { From 7b615ea7b799f42d9eeed13d051b7a8d7d5c9836 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 22 Mar 2025 12:56:42 -0700 Subject: [PATCH 103/119] test: update tests --- .../pass_through_unit_tests/test_pass_through_unit_tests.py | 4 ---- tests/proxy_admin_ui_tests/test_route_check_unit_tests.py | 5 +---- tests/router_unit_tests/test_router_adding_deployments.py | 6 +++--- 3 files changed, 4 insertions(+), 11 deletions(-) diff --git a/tests/pass_through_unit_tests/test_pass_through_unit_tests.py b/tests/pass_through_unit_tests/test_pass_through_unit_tests.py index db0a647e41..cb9db00324 100644 --- a/tests/pass_through_unit_tests/test_pass_through_unit_tests.py +++ b/tests/pass_through_unit_tests/test_pass_through_unit_tests.py @@ -339,9 +339,6 @@ def test_pass_through_routes_support_all_methods(): from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import ( router as llm_router, ) - from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( - router as vertex_router, - ) # Expected HTTP methods expected_methods = {"GET", "POST", "PUT", "DELETE", "PATCH"} @@ -361,7 +358,6 @@ def test_pass_through_routes_support_all_methods(): # Check both routers check_router_methods(llm_router) - check_router_methods(vertex_router) def test_is_bedrock_agent_runtime_route(): diff --git a/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py b/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py index 7f5ed297ca..937eb6f298 100644 --- a/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py +++ b/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py @@ -30,9 +30,6 @@ from litellm.proxy._types import LiteLLM_UserTable, LitellmUserRoles, UserAPIKey from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import ( router as llm_passthrough_router, ) -from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( - router as vertex_router, -) # Replace the actual hash_token function with our mock import litellm.proxy.auth.route_checks @@ -96,7 +93,7 @@ def test_is_llm_api_route(): assert RouteChecks.is_llm_api_route("/key/regenerate/82akk800000000jjsk") is False assert RouteChecks.is_llm_api_route("/key/82akk800000000jjsk/delete") is False - all_llm_api_routes = vertex_router.routes + llm_passthrough_router.routes + all_llm_api_routes = llm_passthrough_router.routes # check all routes in llm_passthrough_router, ensure they are considered llm api routes for route in all_llm_api_routes: diff --git a/tests/router_unit_tests/test_router_adding_deployments.py b/tests/router_unit_tests/test_router_adding_deployments.py index aabaf37f1d..53fe7347d3 100644 --- a/tests/router_unit_tests/test_router_adding_deployments.py +++ b/tests/router_unit_tests/test_router_adding_deployments.py @@ -36,11 +36,11 @@ def test_initialize_deployment_for_pass_through_success(): ) # Verify the credentials were properly set - from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import ( - vertex_pass_through_router, + from litellm.proxy.pass_through_endpoints.llm_passthrough_endpoints import ( + passthrough_endpoint_router, ) - vertex_creds = vertex_pass_through_router.get_vertex_credentials( + vertex_creds = passthrough_endpoint_router.get_vertex_credentials( project_id="test-project", location="us-central1" ) assert vertex_creds.vertex_project == "test-project" From 79f8e5d87255a160ddc38de45646a136d3c5e966 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 12:57:00 -0700 Subject: [PATCH 104/119] search_context_cost_per_1k_calls --- litellm/model_prices_and_context_window_backup.json | 5 +++++ model_prices_and_context_window.json | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 8733730946..b666c84446 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -15,6 +15,11 @@ "supports_prompt_caching": true, "supports_response_schema": true, "supports_system_messages": true, + "search_context_cost_per_1k_calls": { + "low_context": 0.0000, + "medium_context": 0.0000, + "high_context": 0.0000 + }, "deprecation_date": "date when the model becomes deprecated in the format YYYY-MM-DD" }, "omni-moderation-latest": { diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 8733730946..b666c84446 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -15,6 +15,11 @@ "supports_prompt_caching": true, "supports_response_schema": true, "supports_system_messages": true, + "search_context_cost_per_1k_calls": { + "low_context": 0.0000, + "medium_context": 0.0000, + "high_context": 0.0000 + }, "deprecation_date": "date when the model becomes deprecated in the format YYYY-MM-DD" }, "omni-moderation-latest": { From 119213dc606c9d970e196731b1ea8b7da3a4bad9 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 12:59:50 -0700 Subject: [PATCH 105/119] add supports_web_search --- ...odel_prices_and_context_window_backup.json | 29 +++++++++++++++++++ model_prices_and_context_window.json | 29 +++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index b666c84446..0c3b82e6fa 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -15,6 +15,7 @@ "supports_prompt_caching": true, "supports_response_schema": true, "supports_system_messages": true, + "supports_web_search": true, "search_context_cost_per_1k_calls": { "low_context": 0.0000, "medium_context": 0.0000, @@ -80,6 +81,7 @@ "supports_prompt_caching": true, "supports_system_messages": true, "supports_tool_choice": true, + "supports_web_search": true, "search_context_cost_per_1k_calls": { "low_context": 30.00, "medium_context": 35.00, @@ -104,6 +106,7 @@ "supports_prompt_caching": true, "supports_system_messages": true, "supports_tool_choice": true, + "supports_web_search": true, "search_context_cost_per_1k_calls": { "low_context": 30.00, "medium_context": 35.00, @@ -234,6 +237,32 @@ "supports_prompt_caching": true, "supports_system_messages": true, "supports_tool_choice": true, + "supports_web_search": true, + "search_context_cost_per_1k_calls": { + "low_context": 30.00, + "medium_context": 35.00, + "high_context": 50.00 + } + }, + "gpt-4o-mini-search-preview": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.00000015, + "output_cost_per_token": 0.00000060, + "input_cost_per_token_batches": 0.000000075, + "output_cost_per_token_batches": 0.00000030, + "cache_read_input_token_cost": 0.000000075, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_web_search": true, "search_context_cost_per_1k_calls": { "low_context": 30.00, "medium_context": 35.00, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index b666c84446..0c3b82e6fa 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -15,6 +15,7 @@ "supports_prompt_caching": true, "supports_response_schema": true, "supports_system_messages": true, + "supports_web_search": true, "search_context_cost_per_1k_calls": { "low_context": 0.0000, "medium_context": 0.0000, @@ -80,6 +81,7 @@ "supports_prompt_caching": true, "supports_system_messages": true, "supports_tool_choice": true, + "supports_web_search": true, "search_context_cost_per_1k_calls": { "low_context": 30.00, "medium_context": 35.00, @@ -104,6 +106,7 @@ "supports_prompt_caching": true, "supports_system_messages": true, "supports_tool_choice": true, + "supports_web_search": true, "search_context_cost_per_1k_calls": { "low_context": 30.00, "medium_context": 35.00, @@ -234,6 +237,32 @@ "supports_prompt_caching": true, "supports_system_messages": true, "supports_tool_choice": true, + "supports_web_search": true, + "search_context_cost_per_1k_calls": { + "low_context": 30.00, + "medium_context": 35.00, + "high_context": 50.00 + } + }, + "gpt-4o-mini-search-preview": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.00000015, + "output_cost_per_token": 0.00000060, + "input_cost_per_token_batches": 0.000000075, + "output_cost_per_token_batches": 0.00000030, + "cache_read_input_token_cost": 0.000000075, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_web_search": true, "search_context_cost_per_1k_calls": { "low_context": 30.00, "medium_context": 35.00, From e3686e54488d08f840826fcea14c3e8c02cb0e3c Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 13:01:41 -0700 Subject: [PATCH 106/119] supports_web_search --- litellm/utils.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/litellm/utils.py b/litellm/utils.py index 52dbccb0c8..03e69acf4e 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1975,6 +1975,27 @@ def supports_system_messages(model: str, custom_llm_provider: Optional[str]) -> ) +def supports_web_search(model: str, custom_llm_provider: Optional[str]) -> bool: + """ + Check if the given model supports web search and return a boolean value. + + Parameters: + model (str): The model name to be checked. + custom_llm_provider (str): The provider to be checked. + + Returns: + bool: True if the model supports web search, False otherwise. + + Raises: + Exception: If the given model is not found in model_prices_and_context_window.json. + """ + return _supports_factory( + model=model, + custom_llm_provider=custom_llm_provider, + key="supports_web_search", + ) + + def supports_native_streaming(model: str, custom_llm_provider: Optional[str]) -> bool: """ Check if the given model supports native streaming and return a boolean value. From 9235caae83dfa39125baf7dd6ca42de51d3c6965 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 13:04:24 -0700 Subject: [PATCH 107/119] search_context_cost_per_1k_calls --- ...odel_prices_and_context_window_backup.json | 36 +++++++++---------- model_prices_and_context_window.json | 36 +++++++++---------- 2 files changed, 36 insertions(+), 36 deletions(-) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 0c3b82e6fa..33d01523e1 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -17,9 +17,9 @@ "supports_system_messages": true, "supports_web_search": true, "search_context_cost_per_1k_calls": { - "low_context": 0.0000, - "medium_context": 0.0000, - "high_context": 0.0000 + "search_context_size_low": 0.0000, + "search_context_size_medium": 0.0000, + "search_context_size_high": 0.0000 }, "deprecation_date": "date when the model becomes deprecated in the format YYYY-MM-DD" }, @@ -83,9 +83,9 @@ "supports_tool_choice": true, "supports_web_search": true, "search_context_cost_per_1k_calls": { - "low_context": 30.00, - "medium_context": 35.00, - "high_context": 50.00 + "search_context_size_low": 30.00, + "search_context_size_medium": 35.00, + "search_context_size_high": 50.00 } }, "gpt-4o-search-preview": { @@ -108,9 +108,9 @@ "supports_tool_choice": true, "supports_web_search": true, "search_context_cost_per_1k_calls": { - "low_context": 30.00, - "medium_context": 35.00, - "high_context": 50.00 + "search_context_size_low": 30.00, + "search_context_size_medium": 35.00, + "search_context_size_high": 50.00 } }, "gpt-4.5-preview": { @@ -239,9 +239,9 @@ "supports_tool_choice": true, "supports_web_search": true, "search_context_cost_per_1k_calls": { - "low_context": 30.00, - "medium_context": 35.00, - "high_context": 50.00 + "search_context_size_low": 25.00, + "search_context_size_medium": 27.50, + "search_context_size_high": 30.00 } }, "gpt-4o-mini-search-preview": { @@ -264,9 +264,9 @@ "supports_tool_choice": true, "supports_web_search": true, "search_context_cost_per_1k_calls": { - "low_context": 30.00, - "medium_context": 35.00, - "high_context": 50.00 + "search_context_size_low": 25.00, + "search_context_size_medium": 27.50, + "search_context_size_high": 30.00 } }, "gpt-4o-mini-2024-07-18": { @@ -288,9 +288,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "search_context_cost_per_1k_calls": { - "low_context": 30.00, - "medium_context": 35.00, - "high_context": 50.00 + "search_context_size_low": 30.00, + "search_context_size_medium": 35.00, + "search_context_size_high": 50.00 } }, "o1-pro": { diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 0c3b82e6fa..33d01523e1 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -17,9 +17,9 @@ "supports_system_messages": true, "supports_web_search": true, "search_context_cost_per_1k_calls": { - "low_context": 0.0000, - "medium_context": 0.0000, - "high_context": 0.0000 + "search_context_size_low": 0.0000, + "search_context_size_medium": 0.0000, + "search_context_size_high": 0.0000 }, "deprecation_date": "date when the model becomes deprecated in the format YYYY-MM-DD" }, @@ -83,9 +83,9 @@ "supports_tool_choice": true, "supports_web_search": true, "search_context_cost_per_1k_calls": { - "low_context": 30.00, - "medium_context": 35.00, - "high_context": 50.00 + "search_context_size_low": 30.00, + "search_context_size_medium": 35.00, + "search_context_size_high": 50.00 } }, "gpt-4o-search-preview": { @@ -108,9 +108,9 @@ "supports_tool_choice": true, "supports_web_search": true, "search_context_cost_per_1k_calls": { - "low_context": 30.00, - "medium_context": 35.00, - "high_context": 50.00 + "search_context_size_low": 30.00, + "search_context_size_medium": 35.00, + "search_context_size_high": 50.00 } }, "gpt-4.5-preview": { @@ -239,9 +239,9 @@ "supports_tool_choice": true, "supports_web_search": true, "search_context_cost_per_1k_calls": { - "low_context": 30.00, - "medium_context": 35.00, - "high_context": 50.00 + "search_context_size_low": 25.00, + "search_context_size_medium": 27.50, + "search_context_size_high": 30.00 } }, "gpt-4o-mini-search-preview": { @@ -264,9 +264,9 @@ "supports_tool_choice": true, "supports_web_search": true, "search_context_cost_per_1k_calls": { - "low_context": 30.00, - "medium_context": 35.00, - "high_context": 50.00 + "search_context_size_low": 25.00, + "search_context_size_medium": 27.50, + "search_context_size_high": 30.00 } }, "gpt-4o-mini-2024-07-18": { @@ -288,9 +288,9 @@ "supports_system_messages": true, "supports_tool_choice": true, "search_context_cost_per_1k_calls": { - "low_context": 30.00, - "medium_context": 35.00, - "high_context": 50.00 + "search_context_size_low": 30.00, + "search_context_size_medium": 35.00, + "search_context_size_high": 50.00 } }, "o1-pro": { From 386c173de7e5d4dd0b5592fd95dc3b7e191634d7 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 13:07:17 -0700 Subject: [PATCH 108/119] search_context_cost_per_query --- ...odel_prices_and_context_window_backup.json | 36 +++++++++---------- model_prices_and_context_window.json | 36 +++++++++---------- 2 files changed, 36 insertions(+), 36 deletions(-) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 33d01523e1..6992e1afcf 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -16,7 +16,7 @@ "supports_response_schema": true, "supports_system_messages": true, "supports_web_search": true, - "search_context_cost_per_1k_calls": { + "search_context_cost_per_query": { "search_context_size_low": 0.0000, "search_context_size_medium": 0.0000, "search_context_size_high": 0.0000 @@ -82,10 +82,10 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_web_search": true, - "search_context_cost_per_1k_calls": { - "search_context_size_low": 30.00, - "search_context_size_medium": 35.00, - "search_context_size_high": 50.00 + "search_context_cost_per_query": { + "search_context_size_low": 0.030, + "search_context_size_medium": 0.035, + "search_context_size_high": 0.050 } }, "gpt-4o-search-preview": { @@ -107,10 +107,10 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_web_search": true, - "search_context_cost_per_1k_calls": { - "search_context_size_low": 30.00, - "search_context_size_medium": 35.00, - "search_context_size_high": 50.00 + "search_context_cost_per_query": { + "search_context_size_low": 0.030, + "search_context_size_medium": 0.035, + "search_context_size_high": 0.050 } }, "gpt-4.5-preview": { @@ -238,10 +238,10 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_web_search": true, - "search_context_cost_per_1k_calls": { - "search_context_size_low": 25.00, - "search_context_size_medium": 27.50, - "search_context_size_high": 30.00 + "search_context_cost_per_query": { + "search_context_size_low": 0.025, + "search_context_size_medium": 0.0275, + "search_context_size_high": 0.030 } }, "gpt-4o-mini-search-preview": { @@ -263,10 +263,10 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_web_search": true, - "search_context_cost_per_1k_calls": { - "search_context_size_low": 25.00, - "search_context_size_medium": 27.50, - "search_context_size_high": 30.00 + "search_context_cost_per_query": { + "search_context_size_low": 0.025, + "search_context_size_medium": 0.0275, + "search_context_size_high": 0.030 } }, "gpt-4o-mini-2024-07-18": { @@ -287,7 +287,7 @@ "supports_prompt_caching": true, "supports_system_messages": true, "supports_tool_choice": true, - "search_context_cost_per_1k_calls": { + "search_context_cost_per_query": { "search_context_size_low": 30.00, "search_context_size_medium": 35.00, "search_context_size_high": 50.00 diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 33d01523e1..6992e1afcf 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -16,7 +16,7 @@ "supports_response_schema": true, "supports_system_messages": true, "supports_web_search": true, - "search_context_cost_per_1k_calls": { + "search_context_cost_per_query": { "search_context_size_low": 0.0000, "search_context_size_medium": 0.0000, "search_context_size_high": 0.0000 @@ -82,10 +82,10 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_web_search": true, - "search_context_cost_per_1k_calls": { - "search_context_size_low": 30.00, - "search_context_size_medium": 35.00, - "search_context_size_high": 50.00 + "search_context_cost_per_query": { + "search_context_size_low": 0.030, + "search_context_size_medium": 0.035, + "search_context_size_high": 0.050 } }, "gpt-4o-search-preview": { @@ -107,10 +107,10 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_web_search": true, - "search_context_cost_per_1k_calls": { - "search_context_size_low": 30.00, - "search_context_size_medium": 35.00, - "search_context_size_high": 50.00 + "search_context_cost_per_query": { + "search_context_size_low": 0.030, + "search_context_size_medium": 0.035, + "search_context_size_high": 0.050 } }, "gpt-4.5-preview": { @@ -238,10 +238,10 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_web_search": true, - "search_context_cost_per_1k_calls": { - "search_context_size_low": 25.00, - "search_context_size_medium": 27.50, - "search_context_size_high": 30.00 + "search_context_cost_per_query": { + "search_context_size_low": 0.025, + "search_context_size_medium": 0.0275, + "search_context_size_high": 0.030 } }, "gpt-4o-mini-search-preview": { @@ -263,10 +263,10 @@ "supports_system_messages": true, "supports_tool_choice": true, "supports_web_search": true, - "search_context_cost_per_1k_calls": { - "search_context_size_low": 25.00, - "search_context_size_medium": 27.50, - "search_context_size_high": 30.00 + "search_context_cost_per_query": { + "search_context_size_low": 0.025, + "search_context_size_medium": 0.0275, + "search_context_size_high": 0.030 } }, "gpt-4o-mini-2024-07-18": { @@ -287,7 +287,7 @@ "supports_prompt_caching": true, "supports_system_messages": true, "supports_tool_choice": true, - "search_context_cost_per_1k_calls": { + "search_context_cost_per_query": { "search_context_size_low": 30.00, "search_context_size_medium": 35.00, "search_context_size_high": 50.00 From 70f25828344e4114edf2db7c73c36e3b32193356 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 13:08:57 -0700 Subject: [PATCH 109/119] search_context_cost_per_query test --- tests/local_testing/test_get_model_info.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/local_testing/test_get_model_info.py b/tests/local_testing/test_get_model_info.py index f885a61e78..4cca96481d 100644 --- a/tests/local_testing/test_get_model_info.py +++ b/tests/local_testing/test_get_model_info.py @@ -518,12 +518,12 @@ def test_aaamodel_prices_and_context_window_json_is_valid(): ], }, }, - "search_context_cost_per_1k_calls": { + "search_context_cost_per_query": { "type": "object", "properties": { - "low_context": {"type": "number"}, - "medium_context": {"type": "number"}, - "high_context": {"type": "number"}, + "search_context_size_low": {"type": "number"}, + "search_context_size_medium": {"type": "number"}, + "search_context_size_high": {"type": "number"}, }, "additionalProperties": False, }, From 8fd564acda4aac2b27074d17f4d670379d6637ab Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 13:32:22 -0700 Subject: [PATCH 110/119] add supports_web_search --- tests/local_testing/test_get_model_info.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/local_testing/test_get_model_info.py b/tests/local_testing/test_get_model_info.py index 4cca96481d..1a0f6d7a8d 100644 --- a/tests/local_testing/test_get_model_info.py +++ b/tests/local_testing/test_get_model_info.py @@ -500,6 +500,7 @@ def test_aaamodel_prices_and_context_window_json_is_valid(): "supports_tool_choice": {"type": "boolean"}, "supports_video_input": {"type": "boolean"}, "supports_vision": {"type": "boolean"}, + "supports_web_search": {"type": "boolean"}, "tool_use_system_prompt_tokens": {"type": "number"}, "tpm": {"type": "number"}, "supported_endpoints": { From f8cfdb5c6b88611ba625d9d466c67c267ce7888c Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 13:44:44 -0700 Subject: [PATCH 111/119] fix supports_web_search --- .../model_prices_and_context_window_backup.json | 16 ++-------------- model_prices_and_context_window.json | 16 ++-------------- 2 files changed, 4 insertions(+), 28 deletions(-) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 6992e1afcf..c07607d2ba 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -105,13 +105,7 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true, - "supports_web_search": true, - "search_context_cost_per_query": { - "search_context_size_low": 0.030, - "search_context_size_medium": 0.035, - "search_context_size_high": 0.050 - } + "supports_tool_choice": true }, "gpt-4.5-preview": { "max_tokens": 16384, @@ -236,13 +230,7 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true, - "supports_web_search": true, - "search_context_cost_per_query": { - "search_context_size_low": 0.025, - "search_context_size_medium": 0.0275, - "search_context_size_high": 0.030 - } + "supports_tool_choice": true }, "gpt-4o-mini-search-preview": { "max_tokens": 16384, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 6992e1afcf..c07607d2ba 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -105,13 +105,7 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true, - "supports_web_search": true, - "search_context_cost_per_query": { - "search_context_size_low": 0.030, - "search_context_size_medium": 0.035, - "search_context_size_high": 0.050 - } + "supports_tool_choice": true }, "gpt-4.5-preview": { "max_tokens": 16384, @@ -236,13 +230,7 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true, - "supports_web_search": true, - "search_context_cost_per_query": { - "search_context_size_low": 0.025, - "search_context_size_medium": 0.0275, - "search_context_size_high": 0.030 - } + "supports_tool_choice": true }, "gpt-4o-mini-search-preview": { "max_tokens": 16384, From a3a105c06870d1048bdc926fa44e44e1159cbc8c Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 14:02:00 -0700 Subject: [PATCH 112/119] fix supports_web_search --- .../model_prices_and_context_window_backup.json | 16 ++++++++-------- model_prices_and_context_window.json | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index c07607d2ba..6131afdd51 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -80,13 +80,7 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true, - "supports_web_search": true, - "search_context_cost_per_query": { - "search_context_size_low": 0.030, - "search_context_size_medium": 0.035, - "search_context_size_high": 0.050 - } + "supports_tool_choice": true }, "gpt-4o-search-preview": { "max_tokens": 16384, @@ -105,7 +99,13 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true + "supports_tool_choice": true, + "supports_web_search": true, + "search_context_cost_per_query": { + "search_context_size_low": 0.030, + "search_context_size_medium": 0.035, + "search_context_size_high": 0.050 + } }, "gpt-4.5-preview": { "max_tokens": 16384, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index c07607d2ba..6131afdd51 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -80,13 +80,7 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true, - "supports_web_search": true, - "search_context_cost_per_query": { - "search_context_size_low": 0.030, - "search_context_size_medium": 0.035, - "search_context_size_high": 0.050 - } + "supports_tool_choice": true }, "gpt-4o-search-preview": { "max_tokens": 16384, @@ -105,7 +99,13 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true + "supports_tool_choice": true, + "supports_web_search": true, + "search_context_cost_per_query": { + "search_context_size_low": 0.030, + "search_context_size_medium": 0.035, + "search_context_size_high": 0.050 + } }, "gpt-4.5-preview": { "max_tokens": 16384, From bf3f1fb79a9367a939963ec1f215fb1c78fd3002 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 22 Mar 2025 14:07:26 -0700 Subject: [PATCH 113/119] docs(vertex_ai.md): document supported vertex passthrough flows --- .../my-website/docs/pass_through/vertex_ai.md | 103 ++++++++++++++++-- 1 file changed, 94 insertions(+), 9 deletions(-) diff --git a/docs/my-website/docs/pass_through/vertex_ai.md b/docs/my-website/docs/pass_through/vertex_ai.md index ce366af541..4918d889ed 100644 --- a/docs/my-website/docs/pass_through/vertex_ai.md +++ b/docs/my-website/docs/pass_through/vertex_ai.md @@ -15,6 +15,91 @@ Pass-through endpoints for Vertex AI - call provider-specific endpoint, in nativ Just replace `https://REGION-aiplatform.googleapis.com` with `LITELLM_PROXY_BASE_URL/vertex_ai` +LiteLLM supports 3 flows for calling Vertex AI endpoints via pass-through: + +1. **Specific Credentials**: Admin sets passthrough credentials for a specific project/region. + +2. **Default Credentials**: Admin sets default credentials. + +3. **Client-Side Credentials**: User can send client-side credentials through to Vertex AI (default behavior - if no default or mapped credentials are found, the request is passed through directly). + + +## Example Usage + + + + +```yaml +model_list: + - model_name: gemini-1.0-pro + litellm_params: + model: vertex_ai/gemini-1.0-pro + vertex_project: adroit-crow-413218 + vertex_region: us-central1 + vertex_credentials: /path/to/credentials.json + use_in_pass_through: true # 👈 KEY CHANGE +``` + + + + + + + +```yaml +default_vertex_config: + vertex_project: adroit-crow-413218 + vertex_region: us-central1 + vertex_credentials: /path/to/credentials.json +``` + + + +```bash +export DEFAULT_VERTEXAI_PROJECT="adroit-crow-413218" +export DEFAULT_VERTEXAI_LOCATION="us-central1" +export DEFAULT_GOOGLE_APPLICATION_CREDENTIALS="/path/to/credentials.json" +``` + + + + + + +Try Gemini 2.0 Flash (curl) + +``` +MODEL_ID="gemini-2.0-flash-001" +PROJECT_ID="YOUR_PROJECT_ID" +``` + +```bash +curl \ + -X POST \ + -H "Authorization: Bearer $(gcloud auth application-default print-access-token)" \ + -H "Content-Type: application/json" \ + "${LITELLM_PROXY_BASE_URL}/vertex_ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/${MODEL_ID}:streamGenerateContent" -d \ + $'{ + "contents": { + "role": "user", + "parts": [ + { + "fileData": { + "mimeType": "image/png", + "fileUri": "gs://generativeai-downloads/images/scones.jpg" + } + }, + { + "text": "Describe this picture." + } + ] + } + }' +``` + + + + #### **Example Usage** @@ -22,7 +107,7 @@ Just replace `https://REGION-aiplatform.googleapis.com` with `LITELLM_PROXY_BASE ```bash -curl http://localhost:4000/vertex_ai/publishers/google/models/gemini-1.0-pro:generateContent \ +curl http://localhost:4000/vertex_ai/vertex_ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/${MODEL_ID}:generateContent \ -H "Content-Type: application/json" \ -H "x-litellm-api-key: Bearer sk-1234" \ -d '{ @@ -101,7 +186,7 @@ litellm Let's call the Google AI Studio token counting endpoint ```bash -curl http://localhost:4000/vertex-ai/publishers/google/models/gemini-1.0-pro:generateContent \ +curl http://localhost:4000/vertex-ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/gemini-1.0-pro:generateContent \ -H "Content-Type: application/json" \ -H "Authorization: Bearer sk-1234" \ -d '{ @@ -140,7 +225,7 @@ LiteLLM Proxy Server supports two methods of authentication to Vertex AI: ```shell -curl http://localhost:4000/vertex_ai/publishers/google/models/gemini-1.5-flash-001:generateContent \ +curl http://localhost:4000/vertex_ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/gemini-1.5-flash-001:generateContent \ -H "Content-Type: application/json" \ -H "x-litellm-api-key: Bearer sk-1234" \ -d '{"contents":[{"role": "user", "parts":[{"text": "hi"}]}]}' @@ -152,7 +237,7 @@ curl http://localhost:4000/vertex_ai/publishers/google/models/gemini-1.5-flash-0 ```shell -curl http://localhost:4000/vertex_ai/publishers/google/models/textembedding-gecko@001:predict \ +curl http://localhost:4000/vertex_ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/textembedding-gecko@001:predict \ -H "Content-Type: application/json" \ -H "x-litellm-api-key: Bearer sk-1234" \ -d '{"instances":[{"content": "gm"}]}' @@ -162,7 +247,7 @@ curl http://localhost:4000/vertex_ai/publishers/google/models/textembedding-geck ### Imagen API ```shell -curl http://localhost:4000/vertex_ai/publishers/google/models/imagen-3.0-generate-001:predict \ +curl http://localhost:4000/vertex_ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/imagen-3.0-generate-001:predict \ -H "Content-Type: application/json" \ -H "x-litellm-api-key: Bearer sk-1234" \ -d '{"instances":[{"prompt": "make an otter"}], "parameters": {"sampleCount": 1}}' @@ -172,7 +257,7 @@ curl http://localhost:4000/vertex_ai/publishers/google/models/imagen-3.0-generat ### Count Tokens API ```shell -curl http://localhost:4000/vertex_ai/publishers/google/models/gemini-1.5-flash-001:countTokens \ +curl http://localhost:4000/vertex_ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/gemini-1.5-flash-001:countTokens \ -H "Content-Type: application/json" \ -H "x-litellm-api-key: Bearer sk-1234" \ -d '{"contents":[{"role": "user", "parts":[{"text": "hi"}]}]}' @@ -183,7 +268,7 @@ Create Fine Tuning Job ```shell -curl http://localhost:4000/vertex_ai/tuningJobs \ +curl http://localhost:4000/vertex_ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/gemini-1.5-flash-001:tuningJobs \ -H "Content-Type: application/json" \ -H "x-litellm-api-key: Bearer sk-1234" \ -d '{ @@ -243,7 +328,7 @@ Expected Response ```bash -curl http://localhost:4000/vertex_ai/publishers/google/models/gemini-1.0-pro:generateContent \ +curl http://localhost:4000/vertex_ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/gemini-1.0-pro:generateContent \ -H "Content-Type: application/json" \ -H "x-litellm-api-key: Bearer sk-1234" \ -d '{ @@ -268,7 +353,7 @@ tags: ["vertex-js-sdk", "pass-through-endpoint"] ```bash -curl http://localhost:4000/vertex-ai/publishers/google/models/gemini-1.0-pro:generateContent \ +curl http://localhost:4000/vertex_ai/v1/projects/${PROJECT_ID}/locations/us-central1/publishers/google/models/gemini-1.0-pro:generateContent \ -H "Content-Type: application/json" \ -H "x-litellm-api-key: Bearer sk-1234" \ -H "tags: vertex-js-sdk,pass-through-endpoint" \ From 70b3370b71c96eec4bf087d60e261e7bf8471fb8 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 14:29:57 -0700 Subject: [PATCH 114/119] add gpt-4o-search-preview-2025-03-11 to model cost map --- ...odel_prices_and_context_window_backup.json | 25 +++++++++++++++++++ model_prices_and_context_window.json | 25 +++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 6131afdd51..9e1a92cf38 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -82,6 +82,31 @@ "supports_system_messages": true, "supports_tool_choice": true }, + "gpt-4o-search-preview-2025-03-11": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.0000025, + "output_cost_per_token": 0.000010, + "input_cost_per_token_batches": 0.00000125, + "output_cost_per_token_batches": 0.00000500, + "cache_read_input_token_cost": 0.00000125, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_web_search": true, + "search_context_cost_per_query": { + "search_context_size_low": 0.030, + "search_context_size_medium": 0.035, + "search_context_size_high": 0.050 + } + }, "gpt-4o-search-preview": { "max_tokens": 16384, "max_input_tokens": 128000, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 6131afdd51..9e1a92cf38 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -82,6 +82,31 @@ "supports_system_messages": true, "supports_tool_choice": true }, + "gpt-4o-search-preview-2025-03-11": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.0000025, + "output_cost_per_token": 0.000010, + "input_cost_per_token_batches": 0.00000125, + "output_cost_per_token_batches": 0.00000500, + "cache_read_input_token_cost": 0.00000125, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_web_search": true, + "search_context_cost_per_query": { + "search_context_size_low": 0.030, + "search_context_size_medium": 0.035, + "search_context_size_high": 0.050 + } + }, "gpt-4o-search-preview": { "max_tokens": 16384, "max_input_tokens": 128000, From dc0db29431bd4d5a5a0c3696424efe51e9823d9e Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 14:31:04 -0700 Subject: [PATCH 115/119] gpt-4o-mini-search-preview-2025-03-11 --- ...odel_prices_and_context_window_backup.json | 25 +++++++++++++++++++ model_prices_and_context_window.json | 25 +++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 9e1a92cf38..330fe890e4 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -257,6 +257,31 @@ "supports_system_messages": true, "supports_tool_choice": true }, + "gpt-4o-mini-search-preview-2025-03-11":{ + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.00000015, + "output_cost_per_token": 0.00000060, + "input_cost_per_token_batches": 0.000000075, + "output_cost_per_token_batches": 0.00000030, + "cache_read_input_token_cost": 0.000000075, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_web_search": true, + "search_context_cost_per_query": { + "search_context_size_low": 0.025, + "search_context_size_medium": 0.0275, + "search_context_size_high": 0.030 + } + }, "gpt-4o-mini-search-preview": { "max_tokens": 16384, "max_input_tokens": 128000, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 9e1a92cf38..330fe890e4 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -257,6 +257,31 @@ "supports_system_messages": true, "supports_tool_choice": true }, + "gpt-4o-mini-search-preview-2025-03-11":{ + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.00000015, + "output_cost_per_token": 0.00000060, + "input_cost_per_token_batches": 0.000000075, + "output_cost_per_token_batches": 0.00000030, + "cache_read_input_token_cost": 0.000000075, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_web_search": true, + "search_context_cost_per_query": { + "search_context_size_low": 0.025, + "search_context_size_medium": 0.0275, + "search_context_size_high": 0.030 + } + }, "gpt-4o-mini-search-preview": { "max_tokens": 16384, "max_input_tokens": 128000, From 02d0c81c2b1917835f0b8205b7df9cd80f254fe3 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 16:58:48 -0700 Subject: [PATCH 116/119] fix model cost map --- ...odel_prices_and_context_window_backup.json | 60 +++++++++++-------- model_prices_and_context_window.json | 60 +++++++++++-------- 2 files changed, 72 insertions(+), 48 deletions(-) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 330fe890e4..61f2703f55 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -80,32 +80,38 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true + "supports_tool_choice": true, + "supports_web_search": true, + "search_context_cost_per_query": { + "search_context_size_low": 0.030, + "search_context_size_medium": 0.035, + "search_context_size_high": 0.050 + } }, "gpt-4o-search-preview-2025-03-11": { "max_tokens": 16384, - "max_input_tokens": 128000, - "max_output_tokens": 16384, - "input_cost_per_token": 0.0000025, - "output_cost_per_token": 0.000010, - "input_cost_per_token_batches": 0.00000125, - "output_cost_per_token_batches": 0.00000500, - "cache_read_input_token_cost": 0.00000125, - "litellm_provider": "openai", - "mode": "chat", - "supports_function_calling": true, - "supports_parallel_function_calling": true, - "supports_response_schema": true, - "supports_vision": true, - "supports_prompt_caching": true, - "supports_system_messages": true, - "supports_tool_choice": true, - "supports_web_search": true, - "search_context_cost_per_query": { - "search_context_size_low": 0.030, - "search_context_size_medium": 0.035, - "search_context_size_high": 0.050 - } + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.0000025, + "output_cost_per_token": 0.000010, + "input_cost_per_token_batches": 0.00000125, + "output_cost_per_token_batches": 0.00000500, + "cache_read_input_token_cost": 0.00000125, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_web_search": true, + "search_context_cost_per_query": { + "search_context_size_low": 0.030, + "search_context_size_medium": 0.035, + "search_context_size_high": 0.050 + } }, "gpt-4o-search-preview": { "max_tokens": 16384, @@ -255,7 +261,13 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true + "supports_tool_choice": true, + "supports_web_search": true, + "search_context_cost_per_query": { + "search_context_size_low": 0.025, + "search_context_size_medium": 0.0275, + "search_context_size_high": 0.030 + } }, "gpt-4o-mini-search-preview-2025-03-11":{ "max_tokens": 16384, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 330fe890e4..61f2703f55 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -80,32 +80,38 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true + "supports_tool_choice": true, + "supports_web_search": true, + "search_context_cost_per_query": { + "search_context_size_low": 0.030, + "search_context_size_medium": 0.035, + "search_context_size_high": 0.050 + } }, "gpt-4o-search-preview-2025-03-11": { "max_tokens": 16384, - "max_input_tokens": 128000, - "max_output_tokens": 16384, - "input_cost_per_token": 0.0000025, - "output_cost_per_token": 0.000010, - "input_cost_per_token_batches": 0.00000125, - "output_cost_per_token_batches": 0.00000500, - "cache_read_input_token_cost": 0.00000125, - "litellm_provider": "openai", - "mode": "chat", - "supports_function_calling": true, - "supports_parallel_function_calling": true, - "supports_response_schema": true, - "supports_vision": true, - "supports_prompt_caching": true, - "supports_system_messages": true, - "supports_tool_choice": true, - "supports_web_search": true, - "search_context_cost_per_query": { - "search_context_size_low": 0.030, - "search_context_size_medium": 0.035, - "search_context_size_high": 0.050 - } + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.0000025, + "output_cost_per_token": 0.000010, + "input_cost_per_token_batches": 0.00000125, + "output_cost_per_token_batches": 0.00000500, + "cache_read_input_token_cost": 0.00000125, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_web_search": true, + "search_context_cost_per_query": { + "search_context_size_low": 0.030, + "search_context_size_medium": 0.035, + "search_context_size_high": 0.050 + } }, "gpt-4o-search-preview": { "max_tokens": 16384, @@ -255,7 +261,13 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true + "supports_tool_choice": true, + "supports_web_search": true, + "search_context_cost_per_query": { + "search_context_size_low": 0.025, + "search_context_size_medium": 0.0275, + "search_context_size_high": 0.030 + } }, "gpt-4o-mini-search-preview-2025-03-11":{ "max_tokens": 16384, From 99d2ac3f96a135f52321f7d8623a81373dea3071 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 18:14:19 -0700 Subject: [PATCH 117/119] docs clarify /mcp endpoint readiness on litellm --- docs/my-website/docs/mcp.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/my-website/docs/mcp.md b/docs/my-website/docs/mcp.md index 6ebbd7a1fa..0706b9adea 100644 --- a/docs/my-website/docs/mcp.md +++ b/docs/my-website/docs/mcp.md @@ -274,6 +274,13 @@ async with stdio_client(server_params) as (read, write): ## Advanced +:::info + +**This feature is not live as yet** this is a beta interface. Expect this to be live on litellm `v1.63.15` and above. + +::: + + ### Expose MCP tools on LiteLLM Proxy Server This allows you to define tools that can be called by any MCP compatible client. Define your mcp_tools with LiteLLM and all your clients can list and call available tools. From 097fdde0ab44651f5414645d4873df64ae89bdb9 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 18:18:47 -0700 Subject: [PATCH 118/119] docs - update this to upcoming features --- docs/my-website/docs/mcp.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/my-website/docs/mcp.md b/docs/my-website/docs/mcp.md index 0706b9adea..9f3343e9cd 100644 --- a/docs/my-website/docs/mcp.md +++ b/docs/my-website/docs/mcp.md @@ -272,7 +272,7 @@ async with stdio_client(server_params) as (read, write): -## Advanced +## Upcoming Features :::info From 753f5a104c4725d530fd9da70b33cd7e13da314d Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 22 Mar 2025 18:48:33 -0700 Subject: [PATCH 119/119] add gpt-4o-2024-08-06 pricing for web tools --- litellm/model_prices_and_context_window_backup.json | 8 +++++++- model_prices_and_context_window.json | 8 +++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 61f2703f55..1d4353e3ed 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -548,7 +548,13 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true + "supports_tool_choice": true, + "supports_web_search": true, + "search_context_cost_per_query": { + "search_context_size_low": 0.030, + "search_context_size_medium": 0.035, + "search_context_size_high": 0.050 + } }, "gpt-4o-2024-11-20": { "max_tokens": 16384, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 61f2703f55..1d4353e3ed 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -548,7 +548,13 @@ "supports_vision": true, "supports_prompt_caching": true, "supports_system_messages": true, - "supports_tool_choice": true + "supports_tool_choice": true, + "supports_web_search": true, + "search_context_cost_per_query": { + "search_context_size_low": 0.030, + "search_context_size_medium": 0.035, + "search_context_size_high": 0.050 + } }, "gpt-4o-2024-11-20": { "max_tokens": 16384,