From 8bf0005012cc81f9496c59733ac1197f8cc02c36 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Tue, 5 Nov 2024 00:43:32 +0530 Subject: [PATCH 01/67] (proxy fix) - call connect on prisma client when running setup (#6534) * critical fix - call connect on prisma client when running setup * fix test_proxy_server_prisma_setup * fix test_proxy_server_prisma_setup --- litellm/proxy/proxy_server.py | 9 ++++----- tests/local_testing/test_proxy_server.py | 6 ++++-- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index ca6befef6..363ab4efd 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -2993,7 +2993,7 @@ class ProxyStartupEvent: scheduler.start() @classmethod - def _setup_prisma_client( + async def _setup_prisma_client( cls, database_url: Optional[str], proxy_logging_obj: ProxyLogging, @@ -3012,6 +3012,8 @@ class ProxyStartupEvent: except Exception as e: raise e + await prisma_client.connect() + ## Add necessary views to proxy ## asyncio.create_task( prisma_client.check_view_exists() @@ -3033,7 +3035,7 @@ async def startup_event(): # check if DATABASE_URL in environment - load from there if prisma_client is None: _db_url: Optional[str] = get_secret("DATABASE_URL", None) # type: ignore - prisma_client = ProxyStartupEvent._setup_prisma_client( + prisma_client = await ProxyStartupEvent._setup_prisma_client( database_url=_db_url, proxy_logging_obj=proxy_logging_obj, user_api_key_cache=user_api_key_cache, @@ -3123,9 +3125,6 @@ async def startup_event(): prompt_injection_detection_obj.update_environment(router=llm_router) verbose_proxy_logger.debug("prisma_client: %s", prisma_client) - if prisma_client is not None: - await prisma_client.connect() - if prisma_client is not None and master_key is not None: ProxyStartupEvent._add_master_key_hash_to_db( master_key=master_key, diff --git a/tests/local_testing/test_proxy_server.py b/tests/local_testing/test_proxy_server.py index 51ec085ba..808b10db3 100644 --- a/tests/local_testing/test_proxy_server.py +++ b/tests/local_testing/test_proxy_server.py @@ -1909,13 +1909,15 @@ async def test_proxy_server_prisma_setup(): litellm.proxy.proxy_server, "PrismaClient", new=MagicMock() ) as mock_prisma_client: mock_client = mock_prisma_client.return_value # This is the mocked instance + mock_client.connect = AsyncMock() # Mock the connect method mock_client.check_view_exists = AsyncMock() # Mock the check_view_exists method - ProxyStartupEvent._setup_prisma_client( + await ProxyStartupEvent._setup_prisma_client( database_url=os.getenv("DATABASE_URL"), proxy_logging_obj=ProxyLogging(user_api_key_cache=user_api_key_cache), user_api_key_cache=user_api_key_cache, ) - await asyncio.sleep(1) + # Verify our mocked methods were called + mock_client.connect.assert_called_once() mock_client.check_view_exists.assert_called_once() From 7525b6bbaa594a471cc86ff503196a76d527a6a4 Mon Sep 17 00:00:00 2001 From: paul-gauthier <69695708+paul-gauthier@users.noreply.github.com> Date: Mon, 4 Nov 2024 14:15:29 -0800 Subject: [PATCH 02/67] Add 3.5 haiku (#6588) * feat: add claude-3-5-haiku-20241022 entries * feat: add claude-3-5-haiku-20241022 and vertex_ai/claude-3-5-haiku@20241022 models * add missing entries, remove vision * remove image token costs --- model_prices_and_context_window.json | 73 ++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 6bc873fc9..132217eb0 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -1730,6 +1730,19 @@ "supports_assistant_prefill": true, "supports_prompt_caching": true }, + "claude-3-5-haiku-20241022": { + "max_tokens": 4096, + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000005, + "litellm_provider": "anthropic", + "mode": "chat", + "supports_function_calling": true, + "tool_use_system_prompt_tokens": 264, + "supports_assistant_prefill": true, + "supports_prompt_caching": true + }, "claude-3-opus-20240229": { "max_tokens": 4096, "max_input_tokens": 200000, @@ -2643,6 +2656,17 @@ "supports_vision": true, "supports_assistant_prefill": true }, + "vertex_ai/claude-3-5-haiku@20241022": { + "max_tokens": 4096, + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000005, + "litellm_provider": "vertex_ai-anthropic_models", + "mode": "chat", + "supports_function_calling": true, + "supports_assistant_prefill": true + }, "vertex_ai/claude-3-opus@20240229": { "max_tokens": 4096, "max_input_tokens": 200000, @@ -3615,6 +3639,14 @@ "supports_function_calling": true, "supports_vision": true }, + "openrouter/anthropic/claude-3-5-haiku": { + "max_tokens": 200000, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000005, + "litellm_provider": "openrouter", + "mode": "chat", + "supports_function_calling": true, + }, "openrouter/anthropic/claude-3-haiku-20240307": { "max_tokens": 4096, "max_input_tokens": 200000, @@ -3627,6 +3659,17 @@ "supports_vision": true, "tool_use_system_prompt_tokens": 264 }, + "openrouter/anthropic/claude-3-5-haiku-20241022": { + "max_tokens": 4096, + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000005, + "litellm_provider": "openrouter", + "mode": "chat", + "supports_function_calling": true, + "tool_use_system_prompt_tokens": 264 + }, "anthropic/claude-3-5-sonnet-20241022": { "max_tokens": 8192, "max_input_tokens": 200000, @@ -4352,6 +4395,16 @@ "supports_function_calling": true, "supports_vision": true }, + "anthropic.claude-3-5-haiku-20241022-v1:0": { + "max_tokens": 4096, + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000005, + "litellm_provider": "bedrock", + "mode": "chat", + "supports_function_calling": true, + }, "anthropic.claude-3-opus-20240229-v1:0": { "max_tokens": 4096, "max_input_tokens": 200000, @@ -4408,6 +4461,16 @@ "supports_function_calling": true, "supports_vision": true }, + "us.anthropic.claude-3-5-haiku-20241022-v1:0": { + "max_tokens": 4096, + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000005, + "litellm_provider": "bedrock", + "mode": "chat", + "supports_function_calling": true, + }, "us.anthropic.claude-3-opus-20240229-v1:0": { "max_tokens": 4096, "max_input_tokens": 200000, @@ -4464,6 +4527,16 @@ "supports_function_calling": true, "supports_vision": true }, + "eu.anthropic.claude-3-5-haiku-20241022-v1:0": { + "max_tokens": 4096, + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000005, + "litellm_provider": "bedrock", + "mode": "chat", + "supports_function_calling": true, + }, "eu.anthropic.claude-3-opus-20240229-v1:0": { "max_tokens": 4096, "max_input_tokens": 200000, From 3a6ba0b9558ca0a754cc558fbfddd2bb7b11fda5 Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Tue, 5 Nov 2024 03:51:26 +0530 Subject: [PATCH 03/67] Litellm perf improvements 3 (#6573) * perf: move writing key to cache, to background task * perf(litellm_pre_call_utils.py): add otel tracing for pre-call utils adds 200ms on calls with pgdb connected * fix(litellm_pre_call_utils.py'): rename call_type to actual call used * perf(proxy_server.py): remove db logic from _get_config_from_file was causing db calls to occur on every llm request, if team_id was set on key * fix(auth_checks.py): add check for reducing db calls if user/team id does not exist in db reduces latency/call by ~100ms * fix(proxy_server.py): minor fix on existing_settings not incl alerting * fix(exception_mapping_utils.py): map databricks exception string * fix(auth_checks.py): fix auth check logic * test: correctly mark flaky test * fix(utils.py): handle auth token error for tokenizers.from_pretrained --- .circleci/config.yml | 2 +- litellm/caching/dual_cache.py | 2 +- litellm/integrations/opentelemetry.py | 15 --- .../exception_mapping_utils.py | 10 ++ litellm/proxy/_new_secret_config.yaml | 8 +- litellm/proxy/auth/auth_checks.py | 98 ++++++++++++++++--- litellm/proxy/auth/user_api_key_auth.py | 12 ++- litellm/proxy/litellm_pre_call_utils.py | 18 +++- litellm/proxy/proxy_server.py | 41 +------- litellm/proxy/utils.py | 1 + litellm/types/services.py | 1 + litellm/utils.py | 12 ++- .../local_testing/test_key_generate_prisma.py | 2 +- tests/local_testing/test_router.py | 1 + 14 files changed, 137 insertions(+), 86 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 4734ee2a7..7083be6bd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -392,7 +392,7 @@ jobs: pip install click pip install "boto3==1.34.34" pip install jinja2 - pip install tokenizers + pip install tokenizers=="0.20.0" pip install jsonschema - run: name: Run tests diff --git a/litellm/caching/dual_cache.py b/litellm/caching/dual_cache.py index a55a1a577..ddcd02abe 100644 --- a/litellm/caching/dual_cache.py +++ b/litellm/caching/dual_cache.py @@ -70,7 +70,7 @@ class DualCache(BaseCache): self.redis_batch_cache_expiry = ( default_redis_batch_cache_expiry or litellm.default_redis_batch_cache_expiry - or 5 + or 10 ) self.default_in_memory_ttl = ( default_in_memory_ttl or litellm.default_in_memory_ttl diff --git a/litellm/integrations/opentelemetry.py b/litellm/integrations/opentelemetry.py index a3bbb244e..a1d4b781a 100644 --- a/litellm/integrations/opentelemetry.py +++ b/litellm/integrations/opentelemetry.py @@ -281,21 +281,6 @@ class OpenTelemetry(CustomLogger): # End Parent OTEL Sspan parent_otel_span.end(end_time=self._to_ns(datetime.now())) - async def async_post_call_success_hook( - self, - data: dict, - user_api_key_dict: UserAPIKeyAuth, - response: Union[Any, ModelResponse, EmbeddingResponse, ImageResponse], - ): - from opentelemetry import trace - from opentelemetry.trace import Status, StatusCode - - parent_otel_span = user_api_key_dict.parent_otel_span - if parent_otel_span is not None: - parent_otel_span.set_status(Status(StatusCode.OK)) - # End Parent OTEL Sspan - parent_otel_span.end(end_time=self._to_ns(datetime.now())) - def _handle_sucess(self, kwargs, response_obj, start_time, end_time): from opentelemetry import trace from opentelemetry.trace import Status, StatusCode diff --git a/litellm/litellm_core_utils/exception_mapping_utils.py b/litellm/litellm_core_utils/exception_mapping_utils.py index 94eb5c623..14d5bffdb 100644 --- a/litellm/litellm_core_utils/exception_mapping_utils.py +++ b/litellm/litellm_core_utils/exception_mapping_utils.py @@ -646,6 +646,16 @@ def exception_type( # type: ignore # noqa: PLR0915 response=original_exception.response, litellm_debug_info=extra_information, ) + elif ( + "The server received an invalid response from an upstream server." + in error_str + ): + exception_mapping_worked = True + raise litellm.InternalServerError( + message=f"{custom_llm_provider}Exception - {original_exception.message}", + llm_provider=custom_llm_provider, + model=model, + ) elif hasattr(original_exception, "status_code"): if original_exception.status_code == 500: exception_mapping_worked = True diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index b9315670a..45a379748 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -35,13 +35,7 @@ litellm_settings: # see https://docs.litellm.ai/docs/proxy/caching#turn-on-batch_redis_requests # see https://docs.litellm.ai/docs/proxy/prometheus - callbacks: ['prometheus', 'otel'] - - # # see https://docs.litellm.ai/docs/proxy/logging#logging-proxy-inputoutput---sentry - failure_callback: ['sentry'] - service_callback: ['prometheus_system'] - - # redact_user_api_key_info: true + callbacks: ['otel'] router_settings: diff --git a/litellm/proxy/auth/auth_checks.py b/litellm/proxy/auth/auth_checks.py index b3f249d6f..e00d494d9 100644 --- a/litellm/proxy/auth/auth_checks.py +++ b/litellm/proxy/auth/auth_checks.py @@ -18,6 +18,7 @@ from pydantic import BaseModel import litellm from litellm._logging import verbose_proxy_logger from litellm.caching.caching import DualCache +from litellm.caching.dual_cache import LimitedSizeOrderedDict from litellm.proxy._types import ( LiteLLM_EndUserTable, LiteLLM_JWTAuth, @@ -42,6 +43,10 @@ if TYPE_CHECKING: else: Span = Any + +last_db_access_time = LimitedSizeOrderedDict(max_size=100) +db_cache_expiry = 5 # refresh every 5s + all_routes = LiteLLMRoutes.openai_routes.value + LiteLLMRoutes.management_routes.value @@ -383,6 +388,32 @@ def model_in_access_group(model: str, team_models: Optional[List[str]]) -> bool: return False +def _should_check_db( + key: str, last_db_access_time: LimitedSizeOrderedDict, db_cache_expiry: int +) -> bool: + """ + Prevent calling db repeatedly for items that don't exist in the db. + """ + current_time = time.time() + # if key doesn't exist in last_db_access_time -> check db + if key not in last_db_access_time: + return True + elif ( + last_db_access_time[key][0] is not None + ): # check db for non-null values (for refresh operations) + return True + elif last_db_access_time[key][0] is None: + if current_time - last_db_access_time[key] >= db_cache_expiry: + return True + return False + + +def _update_last_db_access_time( + key: str, value: Optional[Any], last_db_access_time: LimitedSizeOrderedDict +): + last_db_access_time[key] = (value, time.time()) + + @log_to_opentelemetry async def get_user_object( user_id: str, @@ -412,11 +443,20 @@ async def get_user_object( if prisma_client is None: raise Exception("No db connected") try: - - response = await prisma_client.db.litellm_usertable.find_unique( - where={"user_id": user_id}, include={"organization_memberships": True} + db_access_time_key = "user_id:{}".format(user_id) + should_check_db = _should_check_db( + key=db_access_time_key, + last_db_access_time=last_db_access_time, + db_cache_expiry=db_cache_expiry, ) + if should_check_db: + response = await prisma_client.db.litellm_usertable.find_unique( + where={"user_id": user_id}, include={"organization_memberships": True} + ) + else: + response = None + if response is None: if user_id_upsert: response = await prisma_client.db.litellm_usertable.create( @@ -444,6 +484,13 @@ async def get_user_object( # save the user object to cache await user_api_key_cache.async_set_cache(key=user_id, value=response_dict) + # save to db access time + _update_last_db_access_time( + key=db_access_time_key, + value=response_dict, + last_db_access_time=last_db_access_time, + ) + return _response except Exception as e: # if user not in db raise ValueError( @@ -515,6 +562,12 @@ async def _delete_cache_key_object( @log_to_opentelemetry +async def _get_team_db_check(team_id: str, prisma_client: PrismaClient): + return await prisma_client.db.litellm_teamtable.find_unique( + where={"team_id": team_id} + ) + + async def get_team_object( team_id: str, prisma_client: Optional[PrismaClient], @@ -544,7 +597,7 @@ async def get_team_object( ): cached_team_obj = ( await proxy_logging_obj.internal_usage_cache.dual_cache.async_get_cache( - key=key + key=key, parent_otel_span=parent_otel_span ) ) @@ -564,9 +617,18 @@ async def get_team_object( # else, check db try: - response = await prisma_client.db.litellm_teamtable.find_unique( - where={"team_id": team_id} + db_access_time_key = "team_id:{}".format(team_id) + should_check_db = _should_check_db( + key=db_access_time_key, + last_db_access_time=last_db_access_time, + db_cache_expiry=db_cache_expiry, ) + if should_check_db: + response = await _get_team_db_check( + team_id=team_id, prisma_client=prisma_client + ) + else: + response = None if response is None: raise Exception @@ -580,6 +642,14 @@ async def get_team_object( proxy_logging_obj=proxy_logging_obj, ) + # save to db access time + # save to db access time + _update_last_db_access_time( + key=db_access_time_key, + value=_response, + last_db_access_time=last_db_access_time, + ) + return _response except Exception: raise Exception( @@ -608,16 +678,16 @@ async def get_key_object( # check if in cache key = hashed_token - cached_team_obj: Optional[UserAPIKeyAuth] = None - if cached_team_obj is None: - cached_team_obj = await user_api_key_cache.async_get_cache(key=key) + cached_key_obj: Optional[UserAPIKeyAuth] = await user_api_key_cache.async_get_cache( + key=key + ) - if cached_team_obj is not None: - if isinstance(cached_team_obj, dict): - return UserAPIKeyAuth(**cached_team_obj) - elif isinstance(cached_team_obj, UserAPIKeyAuth): - return cached_team_obj + if cached_key_obj is not None: + if isinstance(cached_key_obj, dict): + return UserAPIKeyAuth(**cached_key_obj) + elif isinstance(cached_key_obj, UserAPIKeyAuth): + return cached_key_obj if check_cache_only: raise Exception( diff --git a/litellm/proxy/auth/user_api_key_auth.py b/litellm/proxy/auth/user_api_key_auth.py index 995a95f79..d25b6f620 100644 --- a/litellm/proxy/auth/user_api_key_auth.py +++ b/litellm/proxy/auth/user_api_key_auth.py @@ -1127,11 +1127,13 @@ async def user_api_key_auth( # noqa: PLR0915 api_key = valid_token.token # Add hashed token to cache - await _cache_key_object( - hashed_token=api_key, - user_api_key_obj=valid_token, - user_api_key_cache=user_api_key_cache, - proxy_logging_obj=proxy_logging_obj, + asyncio.create_task( + _cache_key_object( + hashed_token=api_key, + user_api_key_obj=valid_token, + user_api_key_cache=user_api_key_cache, + proxy_logging_obj=proxy_logging_obj, + ) ) valid_token_dict = valid_token.model_dump(exclude_none=True) diff --git a/litellm/proxy/litellm_pre_call_utils.py b/litellm/proxy/litellm_pre_call_utils.py index a34dffccd..789e79f37 100644 --- a/litellm/proxy/litellm_pre_call_utils.py +++ b/litellm/proxy/litellm_pre_call_utils.py @@ -1,4 +1,5 @@ import copy +import time from typing import TYPE_CHECKING, Any, Dict, Optional, Union from fastapi import Request @@ -6,6 +7,7 @@ from starlette.datastructures import Headers import litellm from litellm._logging import verbose_logger, verbose_proxy_logger +from litellm._service_logger import ServiceLogging from litellm.proxy._types import ( AddTeamCallback, CommonProxyErrors, @@ -16,11 +18,15 @@ from litellm.proxy._types import ( UserAPIKeyAuth, ) from litellm.proxy.auth.auth_utils import get_request_route +from litellm.types.services import ServiceTypes from litellm.types.utils import ( StandardLoggingUserAPIKeyMetadata, SupportedCacheControls, ) +service_logger_obj = ServiceLogging() # used for tracking latency on OTEL + + if TYPE_CHECKING: from litellm.proxy.proxy_server import ProxyConfig as _ProxyConfig @@ -471,7 +477,7 @@ async def add_litellm_data_to_request( # noqa: PLR0915 ### END-USER SPECIFIC PARAMS ### if user_api_key_dict.allowed_model_region is not None: data["allowed_model_region"] = user_api_key_dict.allowed_model_region - + start_time = time.time() ## [Enterprise Only] # Add User-IP Address requester_ip_address = "" @@ -539,6 +545,16 @@ async def add_litellm_data_to_request( # noqa: PLR0915 verbose_proxy_logger.debug( f"[PROXY]returned data from litellm_pre_call_utils: {data}" ) + + end_time = time.time() + await service_logger_obj.async_service_success_hook( + service=ServiceTypes.PROXY_PRE_CALL, + duration=end_time - start_time, + call_type="add_litellm_data_to_request", + start_time=start_time, + end_time=end_time, + parent_otel_span=user_api_key_dict.parent_otel_span, + ) return data diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 363ab4efd..37cbd2b82 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -1373,9 +1373,6 @@ class ProxyConfig: ) -> dict: """ Given a config file path, load the config from the file. - - If `store_model_in_db` is True, then read the DB and update the config with the DB values. - Args: config_file_path (str): path to the config file Returns: @@ -1401,40 +1398,6 @@ class ProxyConfig: "litellm_settings": {}, } - ## DB - if prisma_client is not None and ( - general_settings.get("store_model_in_db", False) is True - or store_model_in_db is True - ): - _tasks = [] - keys = [ - "general_settings", - "router_settings", - "litellm_settings", - "environment_variables", - ] - for k in keys: - response = prisma_client.get_generic_data( - key="param_name", value=k, table_name="config" - ) - _tasks.append(response) - - responses = await asyncio.gather(*_tasks) - for response in responses: - if response is not None: - param_name = getattr(response, "param_name", None) - param_value = getattr(response, "param_value", None) - if param_name is not None and param_value is not None: - # check if param_name is already in the config - if param_name in config: - if isinstance(config[param_name], dict): - config[param_name].update(param_value) - else: - config[param_name] = param_value - else: - # if it's not in the config - then add it - config[param_name] = param_value - return config async def save_config(self, new_config: dict): @@ -1500,8 +1463,10 @@ class ProxyConfig: - for a given team id - return the relevant completion() call params """ + # load existing config config = await self.get_config() + ## LITELLM MODULE SETTINGS (e.g. litellm.drop_params=True,..) litellm_settings = config.get("litellm_settings", {}) all_teams_config = litellm_settings.get("default_team_settings", None) @@ -8824,7 +8789,7 @@ async def update_config(config_info: ConfigYAML): # noqa: PLR0915 if k == "alert_to_webhook_url": # check if slack is already enabled. if not, enable it if "alerting" not in _existing_settings: - _existing_settings["alerting"].append("slack") + _existing_settings = {"alerting": ["slack"]} elif isinstance(_existing_settings["alerting"], list): if "slack" not in _existing_settings["alerting"]: _existing_settings["alerting"].append("slack") diff --git a/litellm/proxy/utils.py b/litellm/proxy/utils.py index 82831b3b2..44243cab0 100644 --- a/litellm/proxy/utils.py +++ b/litellm/proxy/utils.py @@ -1400,6 +1400,7 @@ class PrismaClient: return + @log_to_opentelemetry @backoff.on_exception( backoff.expo, Exception, # base exception to catch for the backoff diff --git a/litellm/types/services.py b/litellm/types/services.py index 5f690f328..cfa427ebc 100644 --- a/litellm/types/services.py +++ b/litellm/types/services.py @@ -16,6 +16,7 @@ class ServiceTypes(str, enum.Enum): LITELLM = "self" ROUTER = "router" AUTH = "auth" + PROXY_PRE_CALL = "proxy_pre_call" class ServiceLoggerPayload(BaseModel): diff --git a/litellm/utils.py b/litellm/utils.py index 70f43e512..8bd001def 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1539,9 +1539,15 @@ def create_pretrained_tokenizer( dict: A dictionary with the tokenizer and its type. """ - tokenizer = Tokenizer.from_pretrained( - identifier, revision=revision, auth_token=auth_token - ) + try: + tokenizer = Tokenizer.from_pretrained( + identifier, revision=revision, auth_token=auth_token + ) + except Exception as e: + verbose_logger.error( + f"Error creating pretrained tokenizer: {e}. Defaulting to version without 'auth_token'." + ) + tokenizer = Tokenizer.from_pretrained(identifier, revision=revision) return {"type": "huggingface_tokenizer", "tokenizer": tokenizer} diff --git a/tests/local_testing/test_key_generate_prisma.py b/tests/local_testing/test_key_generate_prisma.py index 74182c09f..e009e214c 100644 --- a/tests/local_testing/test_key_generate_prisma.py +++ b/tests/local_testing/test_key_generate_prisma.py @@ -2717,7 +2717,7 @@ async def test_update_user_role(prisma_client): ) ) - await asyncio.sleep(2) + # await asyncio.sleep(3) # use generated key to auth in print("\n\nMAKING NEW REQUEST WITH UPDATED USER ROLE\n\n") diff --git a/tests/local_testing/test_router.py b/tests/local_testing/test_router.py index 7bf0b0bba..5ffdbc7ac 100644 --- a/tests/local_testing/test_router.py +++ b/tests/local_testing/test_router.py @@ -2486,6 +2486,7 @@ async def test_aaarouter_dynamic_cooldown_message_retry_time(sync_mode): @pytest.mark.parametrize("sync_mode", [True, False]) @pytest.mark.asyncio() +@pytest.mark.flaky(retries=6, delay=1) async def test_router_weighted_pick(sync_mode): router = Router( model_list=[ From cc84b09b953a03822a0d1fe04bdb94706d87028d Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 5 Nov 2024 04:01:04 +0530 Subject: [PATCH 04/67] build: fix map --- ...odel_prices_and_context_window_backup.json | 73 +++++++++++++++++++ model_prices_and_context_window.json | 2 +- 2 files changed, 74 insertions(+), 1 deletion(-) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 6bc873fc9..4cb8e06b6 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -1730,6 +1730,19 @@ "supports_assistant_prefill": true, "supports_prompt_caching": true }, + "claude-3-5-haiku-20241022": { + "max_tokens": 4096, + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000005, + "litellm_provider": "anthropic", + "mode": "chat", + "supports_function_calling": true, + "tool_use_system_prompt_tokens": 264, + "supports_assistant_prefill": true, + "supports_prompt_caching": true + }, "claude-3-opus-20240229": { "max_tokens": 4096, "max_input_tokens": 200000, @@ -2643,6 +2656,17 @@ "supports_vision": true, "supports_assistant_prefill": true }, + "vertex_ai/claude-3-5-haiku@20241022": { + "max_tokens": 4096, + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000005, + "litellm_provider": "vertex_ai-anthropic_models", + "mode": "chat", + "supports_function_calling": true, + "supports_assistant_prefill": true + }, "vertex_ai/claude-3-opus@20240229": { "max_tokens": 4096, "max_input_tokens": 200000, @@ -3615,6 +3639,14 @@ "supports_function_calling": true, "supports_vision": true }, + "openrouter/anthropic/claude-3-5-haiku": { + "max_tokens": 200000, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000005, + "litellm_provider": "openrouter", + "mode": "chat", + "supports_function_calling": true + }, "openrouter/anthropic/claude-3-haiku-20240307": { "max_tokens": 4096, "max_input_tokens": 200000, @@ -3627,6 +3659,17 @@ "supports_vision": true, "tool_use_system_prompt_tokens": 264 }, + "openrouter/anthropic/claude-3-5-haiku-20241022": { + "max_tokens": 4096, + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000005, + "litellm_provider": "openrouter", + "mode": "chat", + "supports_function_calling": true, + "tool_use_system_prompt_tokens": 264 + }, "anthropic/claude-3-5-sonnet-20241022": { "max_tokens": 8192, "max_input_tokens": 200000, @@ -4352,6 +4395,16 @@ "supports_function_calling": true, "supports_vision": true }, + "anthropic.claude-3-5-haiku-20241022-v1:0": { + "max_tokens": 4096, + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000005, + "litellm_provider": "bedrock", + "mode": "chat", + "supports_function_calling": true, + }, "anthropic.claude-3-opus-20240229-v1:0": { "max_tokens": 4096, "max_input_tokens": 200000, @@ -4408,6 +4461,16 @@ "supports_function_calling": true, "supports_vision": true }, + "us.anthropic.claude-3-5-haiku-20241022-v1:0": { + "max_tokens": 4096, + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000005, + "litellm_provider": "bedrock", + "mode": "chat", + "supports_function_calling": true, + }, "us.anthropic.claude-3-opus-20240229-v1:0": { "max_tokens": 4096, "max_input_tokens": 200000, @@ -4464,6 +4527,16 @@ "supports_function_calling": true, "supports_vision": true }, + "eu.anthropic.claude-3-5-haiku-20241022-v1:0": { + "max_tokens": 4096, + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000005, + "litellm_provider": "bedrock", + "mode": "chat", + "supports_function_calling": true, + }, "eu.anthropic.claude-3-opus-20240229-v1:0": { "max_tokens": 4096, "max_input_tokens": 200000, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 132217eb0..4cb8e06b6 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -3645,7 +3645,7 @@ "output_cost_per_token": 0.000005, "litellm_provider": "openrouter", "mode": "chat", - "supports_function_calling": true, + "supports_function_calling": true }, "openrouter/anthropic/claude-3-haiku-20240307": { "max_tokens": 4096, From 4debf4ecebac6602d91bc8e4257c7694122e61f8 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 5 Nov 2024 04:01:31 +0530 Subject: [PATCH 05/67] build: fix map --- litellm/model_prices_and_context_window_backup.json | 2 +- model_prices_and_context_window.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 4cb8e06b6..584170de8 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -4403,7 +4403,7 @@ "output_cost_per_token": 0.000005, "litellm_provider": "bedrock", "mode": "chat", - "supports_function_calling": true, + "supports_function_calling": true }, "anthropic.claude-3-opus-20240229-v1:0": { "max_tokens": 4096, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 4cb8e06b6..584170de8 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -4403,7 +4403,7 @@ "output_cost_per_token": 0.000005, "litellm_provider": "bedrock", "mode": "chat", - "supports_function_calling": true, + "supports_function_calling": true }, "anthropic.claude-3-opus-20240229-v1:0": { "max_tokens": 4096, From 8ce53be4987df0e6dbc3883efbbed7f8668ffe25 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 5 Nov 2024 04:04:55 +0530 Subject: [PATCH 06/67] build: fix json for model map --- litellm/model_prices_and_context_window_backup.json | 4 ++-- model_prices_and_context_window.json | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 584170de8..a37a431dc 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -4469,7 +4469,7 @@ "output_cost_per_token": 0.000005, "litellm_provider": "bedrock", "mode": "chat", - "supports_function_calling": true, + "supports_function_calling": true }, "us.anthropic.claude-3-opus-20240229-v1:0": { "max_tokens": 4096, @@ -4535,7 +4535,7 @@ "output_cost_per_token": 0.000005, "litellm_provider": "bedrock", "mode": "chat", - "supports_function_calling": true, + "supports_function_calling": true }, "eu.anthropic.claude-3-opus-20240229-v1:0": { "max_tokens": 4096, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 584170de8..a37a431dc 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -4469,7 +4469,7 @@ "output_cost_per_token": 0.000005, "litellm_provider": "bedrock", "mode": "chat", - "supports_function_calling": true, + "supports_function_calling": true }, "us.anthropic.claude-3-opus-20240229-v1:0": { "max_tokens": 4096, @@ -4535,7 +4535,7 @@ "output_cost_per_token": 0.000005, "litellm_provider": "bedrock", "mode": "chat", - "supports_function_calling": true, + "supports_function_calling": true }, "eu.anthropic.claude-3-opus-20240229-v1:0": { "max_tokens": 4096, From 67ddf55ebde35c56e8d5b5e1208289bd919db1ed Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Tue, 5 Nov 2024 05:16:43 +0530 Subject: [PATCH 07/67] fix ImageObject conversion (#6584) --- litellm/types/utils.py | 2 +- .../test_convert_dict_to_image.py | 119 ++++++++++++++++++ 2 files changed, 120 insertions(+), 1 deletion(-) create mode 100644 tests/llm_translation/test_convert_dict_to_image.py diff --git a/litellm/types/utils.py b/litellm/types/utils.py index 2d0e262fe..942750416 100644 --- a/litellm/types/utils.py +++ b/litellm/types/utils.py @@ -1145,7 +1145,7 @@ class ImageObject(OpenAIImage): url: Optional[str] = None revised_prompt: Optional[str] = None - def __init__(self, b64_json=None, url=None, revised_prompt=None): + def __init__(self, b64_json=None, url=None, revised_prompt=None, **kwargs): super().__init__(b64_json=b64_json, url=url, revised_prompt=revised_prompt) # type: ignore def __contains__(self, key): diff --git a/tests/llm_translation/test_convert_dict_to_image.py b/tests/llm_translation/test_convert_dict_to_image.py new file mode 100644 index 000000000..87c415ecb --- /dev/null +++ b/tests/llm_translation/test_convert_dict_to_image.py @@ -0,0 +1,119 @@ +import json +import os +import sys +from datetime import datetime + +sys.path.insert( + 0, os.path.abspath("../../") +) # Adds the parent directory to the system path + +import litellm +import pytest +from datetime import timedelta +from litellm.types.utils import ImageResponse, ImageObject +from litellm.litellm_core_utils.llm_response_utils.convert_dict_to_response import ( + LiteLLMResponseObjectHandler, +) + + +def test_convert_to_image_response_basic(): + # Test basic conversion with minimal input + response_dict = { + "created": 1234567890, + "data": [{"url": "http://example.com/image.jpg"}], + } + + result = LiteLLMResponseObjectHandler.convert_to_image_response(response_dict) + + assert isinstance(result, ImageResponse) + assert result.created == 1234567890 + assert result.data[0].url == "http://example.com/image.jpg" + + +def test_convert_to_image_response_with_hidden_params(): + # Test with hidden params + response_dict = { + "created": 1234567890, + "data": [{"url": "http://example.com/image.jpg"}], + } + hidden_params = {"api_key": "test_key"} + + result = LiteLLMResponseObjectHandler.convert_to_image_response( + response_dict, hidden_params=hidden_params + ) + + assert result._hidden_params == {"api_key": "test_key"} + + +def test_convert_to_image_response_multiple_images(): + # Test handling multiple images in response + response_dict = { + "created": 1234567890, + "data": [ + {"url": "http://example.com/image1.jpg"}, + {"url": "http://example.com/image2.jpg"}, + ], + } + + result = LiteLLMResponseObjectHandler.convert_to_image_response(response_dict) + + assert len(result.data) == 2 + assert result.data[0].url == "http://example.com/image1.jpg" + assert result.data[1].url == "http://example.com/image2.jpg" + + +def test_convert_to_image_response_with_b64_json(): + # Test handling b64_json in response + response_dict = { + "created": 1234567890, + "data": [{"b64_json": "base64encodedstring"}], + } + + result = LiteLLMResponseObjectHandler.convert_to_image_response(response_dict) + + assert result.data[0].b64_json == "base64encodedstring" + + +def test_convert_to_image_response_with_extra_fields(): + response_dict = { + "created": 1234567890, + "data": [ + { + "url": "http://example.com/image1.jpg", + "content_filter_results": {"category": "violence", "flagged": True}, + }, + { + "url": "http://example.com/image2.jpg", + "content_filter_results": {"category": "violence", "flagged": True}, + }, + ], + } + + result = LiteLLMResponseObjectHandler.convert_to_image_response(response_dict) + + assert result.data[0].url == "http://example.com/image1.jpg" + assert result.data[1].url == "http://example.com/image2.jpg" + + +def test_convert_to_image_response_with_extra_fields_2(): + """ + Date from a non-OpenAI API could have some obscure field in addition to the expected ones. This should not break the conversion. + """ + response_dict = { + "created": 1234567890, + "data": [ + { + "url": "http://example.com/image1.jpg", + "very_obscure_field": "some_value", + }, + { + "url": "http://example.com/image2.jpg", + "very_obscure_field2": "some_other_value", + }, + ], + } + + result = LiteLLMResponseObjectHandler.convert_to_image_response(response_dict) + + assert result.data[0].url == "http://example.com/image1.jpg" + assert result.data[1].url == "http://example.com/image2.jpg" From 58ce30aceed1a97dff33243291b6f27e4fb3ef1d Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Tue, 5 Nov 2024 05:17:48 +0530 Subject: [PATCH 08/67] (fix) litellm.text_completion raises a non-blocking error on simple usage (#6546) * unit test test_huggingface_text_completion_logprobs * fix return TextCompletionHandler convert_chat_to_text_completion * fix hf rest api * fix test_huggingface_text_completion_logprobs * fix linting errors * fix importLiteLLMResponseObjectHandler * fix test for LiteLLMResponseObjectHandler * fix test text completion --- .../convert_dict_to_response.py | 77 ++++++++++ litellm/llms/huggingface_restapi.py | 71 +++++++++ litellm/main.py | 53 ++----- litellm/utils.py | 71 +-------- tests/llm_translation/test_text_completion.py | 141 ++++++++++++++++++ .../test_text_completion_unit_tests.py | 72 +++++++++ 6 files changed, 374 insertions(+), 111 deletions(-) create mode 100644 tests/llm_translation/test_text_completion.py diff --git a/litellm/litellm_core_utils/llm_response_utils/convert_dict_to_response.py b/litellm/litellm_core_utils/llm_response_utils/convert_dict_to_response.py index 76077ad46..93926a81f 100644 --- a/litellm/litellm_core_utils/llm_response_utils/convert_dict_to_response.py +++ b/litellm/litellm_core_utils/llm_response_utils/convert_dict_to_response.py @@ -14,11 +14,17 @@ from litellm.types.utils import ( Delta, EmbeddingResponse, Function, + HiddenParams, ImageResponse, +) +from litellm.types.utils import Logprobs as TextCompletionLogprobs +from litellm.types.utils import ( Message, ModelResponse, RerankResponse, StreamingChoices, + TextChoices, + TextCompletionResponse, TranscriptionResponse, Usage, ) @@ -235,6 +241,77 @@ class LiteLLMResponseObjectHandler: model_response_object = ImageResponse(**model_response_dict) return model_response_object + @staticmethod + def convert_chat_to_text_completion( + response: ModelResponse, + text_completion_response: TextCompletionResponse, + custom_llm_provider: Optional[str] = None, + ) -> TextCompletionResponse: + """ + Converts a chat completion response to a text completion response format. + + Note: This is used for huggingface. For OpenAI / Azure Text the providers files directly return TextCompletionResponse which we then send to user + + Args: + response (ModelResponse): The chat completion response to convert + + Returns: + TextCompletionResponse: The converted text completion response + + Example: + chat_response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi"}]) + text_response = convert_chat_to_text_completion(chat_response) + """ + transformed_logprobs = LiteLLMResponseObjectHandler._convert_provider_response_logprobs_to_text_completion_logprobs( + response=response, + custom_llm_provider=custom_llm_provider, + ) + + text_completion_response["id"] = response.get("id", None) + text_completion_response["object"] = "text_completion" + text_completion_response["created"] = response.get("created", None) + text_completion_response["model"] = response.get("model", None) + choices_list: List[TextChoices] = [] + + # Convert each choice to TextChoices + for choice in response["choices"]: + text_choices = TextChoices() + text_choices["text"] = choice["message"]["content"] + text_choices["index"] = choice["index"] + text_choices["logprobs"] = transformed_logprobs + text_choices["finish_reason"] = choice["finish_reason"] + choices_list.append(text_choices) + + text_completion_response["choices"] = choices_list + text_completion_response["usage"] = response.get("usage", None) + text_completion_response._hidden_params = HiddenParams( + **response._hidden_params + ) + return text_completion_response + + @staticmethod + def _convert_provider_response_logprobs_to_text_completion_logprobs( + response: ModelResponse, + custom_llm_provider: Optional[str] = None, + ) -> Optional[TextCompletionLogprobs]: + """ + Convert logprobs from provider to OpenAI.Completion() format + + Only supported for HF TGI models + """ + transformed_logprobs: Optional[TextCompletionLogprobs] = None + if custom_llm_provider == "huggingface": + # only supported for TGI models + try: + raw_response = response._hidden_params.get("original_response", None) + transformed_logprobs = litellm.huggingface._transform_logprobs( + hf_response=raw_response + ) + except Exception as e: + verbose_logger.exception(f"LiteLLM non blocking exception: {e}") + + return transformed_logprobs + def convert_to_model_response_object( # noqa: PLR0915 response_object: Optional[dict] = None, diff --git a/litellm/llms/huggingface_restapi.py b/litellm/llms/huggingface_restapi.py index 67db83ba2..907d72a60 100644 --- a/litellm/llms/huggingface_restapi.py +++ b/litellm/llms/huggingface_restapi.py @@ -15,6 +15,7 @@ import litellm from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj from litellm.secret_managers.main import get_secret_str from litellm.types.completion import ChatCompletionMessageToolCallParam +from litellm.types.utils import Logprobs as TextCompletionLogprobs from litellm.utils import Choices, CustomStreamWrapper, Message, ModelResponse, Usage from .base import BaseLLM @@ -1183,3 +1184,73 @@ class Huggingface(BaseLLM): input=input, encoding=encoding, ) + + def _transform_logprobs( + self, hf_response: Optional[List] + ) -> Optional[TextCompletionLogprobs]: + """ + Transform Hugging Face logprobs to OpenAI.Completion() format + """ + if hf_response is None: + return None + + # Initialize an empty list for the transformed logprobs + _logprob: TextCompletionLogprobs = TextCompletionLogprobs( + text_offset=[], + token_logprobs=[], + tokens=[], + top_logprobs=[], + ) + + # For each Hugging Face response, transform the logprobs + for response in hf_response: + # Extract the relevant information from the response + response_details = response["details"] + top_tokens = response_details.get("top_tokens", {}) + + for i, token in enumerate(response_details["prefill"]): + # Extract the text of the token + token_text = token["text"] + + # Extract the logprob of the token + token_logprob = token["logprob"] + + # Add the token information to the 'token_info' list + _logprob.tokens.append(token_text) + _logprob.token_logprobs.append(token_logprob) + + # stub this to work with llm eval harness + top_alt_tokens = {"": -1.0, "": -2.0, "": -3.0} # noqa: F601 + _logprob.top_logprobs.append(top_alt_tokens) + + # For each element in the 'tokens' list, extract the relevant information + for i, token in enumerate(response_details["tokens"]): + # Extract the text of the token + token_text = token["text"] + + # Extract the logprob of the token + token_logprob = token["logprob"] + + top_alt_tokens = {} + temp_top_logprobs = [] + if top_tokens != {}: + temp_top_logprobs = top_tokens[i] + + # top_alt_tokens should look like this: { "alternative_1": -1, "alternative_2": -2, "alternative_3": -3 } + for elem in temp_top_logprobs: + text = elem["text"] + logprob = elem["logprob"] + top_alt_tokens[text] = logprob + + # Add the token information to the 'token_info' list + _logprob.tokens.append(token_text) + _logprob.token_logprobs.append(token_logprob) + _logprob.top_logprobs.append(top_alt_tokens) + + # Add the text offset of the token + # This is computed as the sum of the lengths of all previous tokens + _logprob.text_offset.append( + sum(len(t["text"]) for t in response_details["tokens"][:i]) + ) + + return _logprob diff --git a/litellm/main.py b/litellm/main.py index a964ba7e6..2f3a2ea2b 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -3867,34 +3867,17 @@ async def atext_completion( custom_llm_provider=custom_llm_provider, ) else: - transformed_logprobs = None - # only supported for TGI models - try: - raw_response = response._hidden_params.get("original_response", None) - transformed_logprobs = litellm.utils.transform_logprobs(raw_response) - except Exception as e: - print_verbose(f"LiteLLM non blocking exception: {e}") - - ## TRANSLATE CHAT TO TEXT FORMAT ## + ## OpenAI / Azure Text Completion Returns here if isinstance(response, TextCompletionResponse): return response elif asyncio.iscoroutine(response): response = await response text_completion_response = TextCompletionResponse() - text_completion_response["id"] = response.get("id", None) - text_completion_response["object"] = "text_completion" - text_completion_response["created"] = response.get("created", None) - text_completion_response["model"] = response.get("model", None) - text_choices = TextChoices() - text_choices["text"] = response["choices"][0]["message"]["content"] - text_choices["index"] = response["choices"][0]["index"] - text_choices["logprobs"] = transformed_logprobs - text_choices["finish_reason"] = response["choices"][0]["finish_reason"] - text_completion_response["choices"] = [text_choices] - text_completion_response["usage"] = response.get("usage", None) - text_completion_response._hidden_params = HiddenParams( - **response._hidden_params + text_completion_response = litellm.utils.LiteLLMResponseObjectHandler.convert_chat_to_text_completion( + text_completion_response=text_completion_response, + response=response, + custom_llm_provider=custom_llm_provider, ) return text_completion_response except Exception as e: @@ -4156,29 +4139,17 @@ def text_completion( # noqa: PLR0915 return response elif isinstance(response, TextCompletionStreamWrapper): return response - transformed_logprobs = None - # only supported for TGI models - try: - raw_response = response._hidden_params.get("original_response", None) - transformed_logprobs = litellm.utils.transform_logprobs(raw_response) - except Exception as e: - verbose_logger.exception(f"LiteLLM non blocking exception: {e}") + # OpenAI Text / Azure Text will return here if isinstance(response, TextCompletionResponse): return response - text_completion_response["id"] = response.get("id", None) - text_completion_response["object"] = "text_completion" - text_completion_response["created"] = response.get("created", None) - text_completion_response["model"] = response.get("model", None) - text_choices = TextChoices() - text_choices["text"] = response["choices"][0]["message"]["content"] - text_choices["index"] = response["choices"][0]["index"] - text_choices["logprobs"] = transformed_logprobs - text_choices["finish_reason"] = response["choices"][0]["finish_reason"] - text_completion_response["choices"] = [text_choices] - text_completion_response["usage"] = response.get("usage", None) - text_completion_response._hidden_params = HiddenParams(**response._hidden_params) + text_completion_response = ( + litellm.utils.LiteLLMResponseObjectHandler.convert_chat_to_text_completion( + response=response, + text_completion_response=text_completion_response, + ) + ) return text_completion_response diff --git a/litellm/utils.py b/litellm/utils.py index 8bd001def..0f7ff50a0 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -71,6 +71,7 @@ from litellm.litellm_core_utils.get_llm_provider_logic import ( ) from litellm.litellm_core_utils.llm_request_utils import _ensure_extra_body_is_safe from litellm.litellm_core_utils.llm_response_utils.convert_dict_to_response import ( + LiteLLMResponseObjectHandler, _handle_invalid_parallel_tool_calls, convert_to_model_response_object, convert_to_streaming_response, @@ -8388,76 +8389,6 @@ def get_valid_models() -> List[str]: return [] # NON-Blocking -# used for litellm.text_completion() to transform HF logprobs to OpenAI.Completion() format -def transform_logprobs(hf_response): - # Initialize an empty list for the transformed logprobs - transformed_logprobs = [] - - # For each Hugging Face response, transform the logprobs - for response in hf_response: - # Extract the relevant information from the response - response_details = response["details"] - top_tokens = response_details.get("top_tokens", {}) - - # Initialize an empty list for the token information - token_info = { - "tokens": [], - "token_logprobs": [], - "text_offset": [], - "top_logprobs": [], - } - - for i, token in enumerate(response_details["prefill"]): - # Extract the text of the token - token_text = token["text"] - - # Extract the logprob of the token - token_logprob = token["logprob"] - - # Add the token information to the 'token_info' list - token_info["tokens"].append(token_text) - token_info["token_logprobs"].append(token_logprob) - - # stub this to work with llm eval harness - top_alt_tokens = {"": -1, "": -2, "": -3} # noqa: F601 - token_info["top_logprobs"].append(top_alt_tokens) - - # For each element in the 'tokens' list, extract the relevant information - for i, token in enumerate(response_details["tokens"]): - # Extract the text of the token - token_text = token["text"] - - # Extract the logprob of the token - token_logprob = token["logprob"] - - top_alt_tokens = {} - temp_top_logprobs = [] - if top_tokens != {}: - temp_top_logprobs = top_tokens[i] - - # top_alt_tokens should look like this: { "alternative_1": -1, "alternative_2": -2, "alternative_3": -3 } - for elem in temp_top_logprobs: - text = elem["text"] - logprob = elem["logprob"] - top_alt_tokens[text] = logprob - - # Add the token information to the 'token_info' list - token_info["tokens"].append(token_text) - token_info["token_logprobs"].append(token_logprob) - token_info["top_logprobs"].append(top_alt_tokens) - - # Add the text offset of the token - # This is computed as the sum of the lengths of all previous tokens - token_info["text_offset"].append( - sum(len(t["text"]) for t in response_details["tokens"][:i]) - ) - - # Add the 'token_info' list to the 'transformed_logprobs' list - transformed_logprobs = token_info - - return transformed_logprobs - - def print_args_passed_to_litellm(original_function, args, kwargs): try: # we've already printed this for acompletion, don't print for completion diff --git a/tests/llm_translation/test_text_completion.py b/tests/llm_translation/test_text_completion.py new file mode 100644 index 000000000..50c96e6eb --- /dev/null +++ b/tests/llm_translation/test_text_completion.py @@ -0,0 +1,141 @@ +import json +import os +import sys +from datetime import datetime + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path + +import litellm +import pytest + +from litellm.utils import ( + LiteLLMResponseObjectHandler, +) + + +from datetime import timedelta + +from litellm.types.utils import ( + ModelResponse, + TextCompletionResponse, + TextChoices, + Logprobs as TextCompletionLogprobs, + Usage, +) + + +def test_convert_chat_to_text_completion(): + """Test converting chat completion to text completion""" + chat_response = ModelResponse( + id="chat123", + created=1234567890, + model="gpt-3.5-turbo", + choices=[ + { + "index": 0, + "message": {"content": "Hello, world!"}, + "finish_reason": "stop", + } + ], + usage={"total_tokens": 10, "completion_tokens": 10}, + _hidden_params={"api_key": "test"}, + ) + + text_completion = TextCompletionResponse() + result = LiteLLMResponseObjectHandler.convert_chat_to_text_completion( + response=chat_response, text_completion_response=text_completion + ) + + assert isinstance(result, TextCompletionResponse) + assert result.id == "chat123" + assert result.object == "text_completion" + assert result.created == 1234567890 + assert result.model == "gpt-3.5-turbo" + assert result.choices[0].text == "Hello, world!" + assert result.choices[0].finish_reason == "stop" + assert result.usage == Usage( + completion_tokens=10, + prompt_tokens=0, + total_tokens=10, + completion_tokens_details=None, + prompt_tokens_details=None, + ) + + +def test_convert_provider_response_logprobs(): + """Test converting provider logprobs to text completion logprobs""" + response = ModelResponse( + id="test123", + _hidden_params={ + "original_response": { + "details": {"tokens": [{"text": "hello", "logprob": -1.0}]} + } + }, + ) + + result = LiteLLMResponseObjectHandler._convert_provider_response_logprobs_to_text_completion_logprobs( + response=response, custom_llm_provider="huggingface" + ) + + # Note: The actual assertion here depends on the implementation of + # litellm.huggingface._transform_logprobs, but we can at least test the function call + assert ( + result is not None or result is None + ) # Will depend on the actual implementation + + +def test_convert_provider_response_logprobs_non_huggingface(): + """Test converting provider logprobs for non-huggingface provider""" + response = ModelResponse(id="test123", _hidden_params={}) + + result = LiteLLMResponseObjectHandler._convert_provider_response_logprobs_to_text_completion_logprobs( + response=response, custom_llm_provider="openai" + ) + + assert result is None + + +def test_convert_chat_to_text_completion_multiple_choices(): + """Test converting chat completion to text completion with multiple choices""" + chat_response = ModelResponse( + id="chat456", + created=1234567890, + model="gpt-3.5-turbo", + choices=[ + { + "index": 0, + "message": {"content": "First response"}, + "finish_reason": "stop", + }, + { + "index": 1, + "message": {"content": "Second response"}, + "finish_reason": "length", + }, + ], + usage={"total_tokens": 20}, + _hidden_params={"api_key": "test"}, + ) + + text_completion = TextCompletionResponse() + result = LiteLLMResponseObjectHandler.convert_chat_to_text_completion( + response=chat_response, text_completion_response=text_completion + ) + + assert isinstance(result, TextCompletionResponse) + assert result.id == "chat456" + assert result.object == "text_completion" + assert len(result.choices) == 2 + assert result.choices[0].text == "First response" + assert result.choices[0].finish_reason == "stop" + assert result.choices[1].text == "Second response" + assert result.choices[1].finish_reason == "length" + assert result.usage == Usage( + completion_tokens=0, + prompt_tokens=0, + total_tokens=20, + completion_tokens_details=None, + prompt_tokens_details=None, + ) diff --git a/tests/llm_translation/test_text_completion_unit_tests.py b/tests/llm_translation/test_text_completion_unit_tests.py index 2012ae11b..9d5359a4a 100644 --- a/tests/llm_translation/test_text_completion_unit_tests.py +++ b/tests/llm_translation/test_text_completion_unit_tests.py @@ -3,11 +3,15 @@ import os import sys from datetime import datetime from unittest.mock import AsyncMock +import pytest +import httpx +from respx import MockRouter sys.path.insert( 0, os.path.abspath("../..") ) # Adds the parent directory to the system path +import litellm from litellm.types.utils import TextCompletionResponse @@ -62,3 +66,71 @@ def test_convert_dict_to_text_completion_response(): assert response.choices[0].logprobs.token_logprobs == [None, -12.203847] assert response.choices[0].logprobs.tokens == ["hello", " crisp"] assert response.choices[0].logprobs.top_logprobs == [None, {",": -2.1568563}] + + +@pytest.mark.asyncio +@pytest.mark.respx +async def test_huggingface_text_completion_logprobs(respx_mock: MockRouter): + """Test text completion with Hugging Face, focusing on logprobs structure""" + litellm.set_verbose = True + + # Mock the raw response from Hugging Face + mock_response = [ + { + "generated_text": ",\n\nI have a question...", # truncated for brevity + "details": { + "finish_reason": "length", + "generated_tokens": 100, + "seed": None, + "prefill": [], + "tokens": [ + {"id": 28725, "text": ",", "logprob": -1.7626953, "special": False}, + {"id": 13, "text": "\n", "logprob": -1.7314453, "special": False}, + ], + }, + } + ] + + # Mock the API request + mock_request = respx_mock.post( + "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-v0.1" + ).mock(return_value=httpx.Response(200, json=mock_response)) + + response = await litellm.atext_completion( + model="huggingface/mistralai/Mistral-7B-v0.1", + prompt="good morning", + ) + + # Verify the request + assert mock_request.called + request_body = json.loads(mock_request.calls[0].request.content) + assert request_body == { + "inputs": "good morning", + "parameters": {"details": True, "return_full_text": False}, + "stream": False, + } + + print("response=", response) + + # Verify response structure + assert isinstance(response, TextCompletionResponse) + assert response.object == "text_completion" + assert response.model == "mistralai/Mistral-7B-v0.1" + + # Verify logprobs structure + choice = response.choices[0] + assert choice.finish_reason == "length" + assert choice.index == 0 + assert isinstance(choice.logprobs.tokens, list) + assert isinstance(choice.logprobs.token_logprobs, list) + assert isinstance(choice.logprobs.text_offset, list) + assert isinstance(choice.logprobs.top_logprobs, list) + assert choice.logprobs.tokens == [",", "\n"] + assert choice.logprobs.token_logprobs == [-1.7626953, -1.7314453] + assert choice.logprobs.text_offset == [0, 1] + assert choice.logprobs.top_logprobs == [{}, {}] + + # Verify usage + assert response.usage["completion_tokens"] > 0 + assert response.usage["prompt_tokens"] > 0 + assert response.usage["total_tokens"] > 0 From 37eea68c0651aee4f58ad7de60e5c581ef37f330 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Mon, 4 Nov 2024 16:06:16 -0800 Subject: [PATCH 09/67] fix allow using 15 seconds for premium license check --- litellm/proxy/auth/litellm_license.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/proxy/auth/litellm_license.py b/litellm/proxy/auth/litellm_license.py index a25fd9bd4..784b4274e 100644 --- a/litellm/proxy/auth/litellm_license.py +++ b/litellm/proxy/auth/litellm_license.py @@ -21,7 +21,7 @@ class LicenseCheck: def __init__(self) -> None: self.license_str = os.getenv("LITELLM_LICENSE", None) verbose_proxy_logger.debug("License Str value - {}".format(self.license_str)) - self.http_handler = HTTPHandler() + self.http_handler = HTTPHandler(timeout=15) self.public_key = None self.read_public_key() From 57b1bb5e06e60b64c8b76fde491705384d4e15f2 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Mon, 4 Nov 2024 20:20:19 -0800 Subject: [PATCH 10/67] testing fix bedrock deprecated cohere.command-text-v14 --- tests/local_testing/test_completion.py | 1 - tests/local_testing/test_streaming.py | 1 - 2 files changed, 2 deletions(-) diff --git a/tests/local_testing/test_completion.py b/tests/local_testing/test_completion.py index a8fbb3dc5..8f28de7b4 100644 --- a/tests/local_testing/test_completion.py +++ b/tests/local_testing/test_completion.py @@ -3543,7 +3543,6 @@ def response_format_tests(response: litellm.ModelResponse): "mistral.mistral-7b-instruct-v0:2", # "bedrock/amazon.titan-tg1-large", "meta.llama3-8b-instruct-v1:0", - "cohere.command-text-v14", ], ) @pytest.mark.parametrize("sync_mode", [True, False]) diff --git a/tests/local_testing/test_streaming.py b/tests/local_testing/test_streaming.py index b912d98f3..99c506f69 100644 --- a/tests/local_testing/test_streaming.py +++ b/tests/local_testing/test_streaming.py @@ -1430,7 +1430,6 @@ async def test_completion_replicate_llama3_streaming(sync_mode): ["mistral.mistral-7b-instruct-v0:2", None], ["bedrock/amazon.titan-tg1-large", None], ["meta.llama3-8b-instruct-v1:0", None], - ["cohere.command-text-v14", None], ], ) @pytest.mark.asyncio From c047d51cc8666b83cd9e296bb972aec727d48bf3 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Tue, 5 Nov 2024 10:46:57 +0530 Subject: [PATCH 11/67] (feat) add `Predicted Outputs` for OpenAI (#6594) * bump openai to openai==1.54.0 * add 'prediction' param * testing fix bedrock deprecated cohere.command-text-v14 * test test_openai_prediction_param.py * test_openai_prediction_param_with_caching * doc Predicted Outputs * doc Predicted Output --- .circleci/config.yml | 10 +- .circleci/requirements.txt | 2 +- .../docs/completion/predict_outputs.md | 109 +++++++++ docs/my-website/sidebars.js | 1 + .../llms/OpenAI/chat/gpt_transformation.py | 1 + litellm/main.py | 8 + litellm/types/llms/openai.py | 3 + litellm/utils.py | 2 + poetry.lock | 10 +- pyproject.toml | 2 +- requirements.txt | 2 +- .../test_openai_prediction_param.py | 225 ++++++++++++++++++ 12 files changed, 362 insertions(+), 13 deletions(-) create mode 100644 docs/my-website/docs/completion/predict_outputs.md create mode 100644 tests/llm_translation/test_openai_prediction_param.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 7083be6bd..063aff4c6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -47,7 +47,7 @@ jobs: pip install opentelemetry-api==1.25.0 pip install opentelemetry-sdk==1.25.0 pip install opentelemetry-exporter-otlp==1.25.0 - pip install openai==1.52.0 + pip install openai==1.54.0 pip install prisma==0.11.0 pip install "detect_secrets==1.5.0" pip install "httpx==0.24.1" @@ -520,7 +520,7 @@ jobs: pip install "aiodynamo==23.10.1" pip install "asyncio==3.4.3" pip install "PyGithub==1.59.1" - pip install "openai==1.52.0" + pip install "openai==1.54.0 " # Run pytest and generate JUnit XML report - run: name: Build Docker image @@ -637,7 +637,7 @@ jobs: pip install "aiodynamo==23.10.1" pip install "asyncio==3.4.3" pip install "PyGithub==1.59.1" - pip install "openai==1.52.0" + pip install "openai==1.54.0 " - run: name: Build Docker image command: docker build -t my-app:latest -f ./docker/Dockerfile.database . @@ -729,7 +729,7 @@ jobs: pip install "pytest-asyncio==0.21.1" pip install "google-cloud-aiplatform==1.43.0" pip install aiohttp - pip install "openai==1.52.0" + pip install "openai==1.54.0 " python -m pip install --upgrade pip pip install "pydantic==2.7.1" pip install "pytest==7.3.1" @@ -924,7 +924,7 @@ jobs: pip install "pytest-retry==1.6.3" pip install "pytest-asyncio==0.21.1" pip install aiohttp - pip install "openai==1.52.0" + pip install "openai==1.54.0 " python -m pip install --upgrade pip pip install "pydantic==2.7.1" pip install "pytest==7.3.1" diff --git a/.circleci/requirements.txt b/.circleci/requirements.txt index 4912c052c..578bfa572 100644 --- a/.circleci/requirements.txt +++ b/.circleci/requirements.txt @@ -1,5 +1,5 @@ # used by CI/CD testing -openai==1.52.0 +openai==1.54.0 python-dotenv tiktoken importlib_metadata diff --git a/docs/my-website/docs/completion/predict_outputs.md b/docs/my-website/docs/completion/predict_outputs.md new file mode 100644 index 000000000..a0d832d68 --- /dev/null +++ b/docs/my-website/docs/completion/predict_outputs.md @@ -0,0 +1,109 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Predicted Outputs + +| Property | Details | +|-------|-------| +| Description | Use this when most of the output of the LLM is known ahead of time. For instance, if you are asking the model to rewrite some text or code with only minor changes, you can reduce your latency significantly by using Predicted Outputs, passing in the existing content as your prediction. | +| Supported providers | `openai` | +| Link to OpenAI doc on Predicted Outputs | [Predicted Outputs ↗](https://platform.openai.com/docs/guides/latency-optimization#use-predicted-outputs) | +| Supported from LiteLLM Version | `v1.51.4` | + + + +## Using Predicted Outputs + + + + +In this example we want to refactor a piece of C# code, and convert the Username property to Email instead: +```python +import litellm +os.environ["OPENAI_API_KEY"] = "your-api-key" +code = """ +/// +/// Represents a user with a first name, last name, and username. +/// +public class User +{ + /// + /// Gets or sets the user's first name. + /// + public string FirstName { get; set; } + + /// + /// Gets or sets the user's last name. + /// + public string LastName { get; set; } + + /// + /// Gets or sets the user's username. + /// + public string Username { get; set; } +} +""" + +completion = litellm.completion( + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": "Replace the Username property with an Email property. Respond only with code, and with no markdown formatting.", + }, + {"role": "user", "content": code}, + ], + prediction={"type": "content", "content": code}, +) + +print(completion) +``` + + + + +1. Define models on config.yaml + +```yaml +model_list: + - model_name: gpt-4o-mini # OpenAI gpt-4o-mini + litellm_params: + model: openai/gpt-4o-mini + api_key: os.environ/OPENAI_API_KEY + +``` + +2. Run proxy server + +```bash +litellm --config config.yaml +``` + +3. Test it using the OpenAI Python SDK + + +```python +from openai import OpenAI + +client = OpenAI( + api_key="LITELLM_PROXY_KEY", # sk-1234 + base_url="LITELLM_PROXY_BASE" # http://0.0.0.0:4000 +) + +completion = client.chat.completions.create( + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": "Replace the Username property with an Email property. Respond only with code, and with no markdown formatting.", + }, + {"role": "user", "content": code}, + ], + prediction={"type": "content", "content": code}, +) + +print(completion) +``` + + + diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index d0b46fe1e..18ad940f8 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -205,6 +205,7 @@ const sidebars = { "completion/prompt_caching", "completion/audio", "completion/vision", + "completion/predict_outputs", "completion/prefix", "completion/drop_params", "completion/prompt_formatting", diff --git a/litellm/llms/OpenAI/chat/gpt_transformation.py b/litellm/llms/OpenAI/chat/gpt_transformation.py index 4eced5b1b..14ebb4a53 100644 --- a/litellm/llms/OpenAI/chat/gpt_transformation.py +++ b/litellm/llms/OpenAI/chat/gpt_transformation.py @@ -94,6 +94,7 @@ class OpenAIGPTConfig: "max_tokens", "max_completion_tokens", "modalities", + "prediction", "n", "presence_penalty", "seed", diff --git a/litellm/main.py b/litellm/main.py index 2f3a2ea2b..ab85be834 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -162,6 +162,7 @@ from .types.llms.openai import ( ChatCompletionAssistantMessage, ChatCompletionAudioParam, ChatCompletionModality, + ChatCompletionPredictionContentParam, ChatCompletionUserMessage, HttpxBinaryResponseContent, ) @@ -304,6 +305,7 @@ async def acompletion( max_tokens: Optional[int] = None, max_completion_tokens: Optional[int] = None, modalities: Optional[List[ChatCompletionModality]] = None, + prediction: Optional[ChatCompletionPredictionContentParam] = None, audio: Optional[ChatCompletionAudioParam] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, @@ -346,6 +348,7 @@ async def acompletion( max_tokens (integer, optional): The maximum number of tokens in the generated completion (default is infinity). max_completion_tokens (integer, optional): An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens. modalities (List[ChatCompletionModality], optional): Output types that you would like the model to generate for this request. You can use `["text", "audio"]` + prediction (ChatCompletionPredictionContentParam, optional): Configuration for a Predicted Output, which can greatly improve response times when large parts of the model response are known ahead of time. This is most common when you are regenerating a file with only minor changes to most of the content. audio (ChatCompletionAudioParam, optional): Parameters for audio output. Required when audio output is requested with modalities: ["audio"] presence_penalty (float, optional): It is used to penalize new tokens based on their existence in the text so far. frequency_penalty: It is used to penalize new tokens based on their frequency in the text so far. @@ -387,6 +390,7 @@ async def acompletion( "max_tokens": max_tokens, "max_completion_tokens": max_completion_tokens, "modalities": modalities, + "prediction": prediction, "audio": audio, "presence_penalty": presence_penalty, "frequency_penalty": frequency_penalty, @@ -693,6 +697,7 @@ def completion( # type: ignore # noqa: PLR0915 max_completion_tokens: Optional[int] = None, max_tokens: Optional[int] = None, modalities: Optional[List[ChatCompletionModality]] = None, + prediction: Optional[ChatCompletionPredictionContentParam] = None, audio: Optional[ChatCompletionAudioParam] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, @@ -737,6 +742,7 @@ def completion( # type: ignore # noqa: PLR0915 max_tokens (integer, optional): The maximum number of tokens in the generated completion (default is infinity). max_completion_tokens (integer, optional): An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens. modalities (List[ChatCompletionModality], optional): Output types that you would like the model to generate for this request.. You can use `["text", "audio"]` + prediction (ChatCompletionPredictionContentParam, optional): Configuration for a Predicted Output, which can greatly improve response times when large parts of the model response are known ahead of time. This is most common when you are regenerating a file with only minor changes to most of the content. audio (ChatCompletionAudioParam, optional): Parameters for audio output. Required when audio output is requested with modalities: ["audio"] presence_penalty (float, optional): It is used to penalize new tokens based on their existence in the text so far. frequency_penalty: It is used to penalize new tokens based on their frequency in the text so far. @@ -843,6 +849,7 @@ def completion( # type: ignore # noqa: PLR0915 "stop", "max_completion_tokens", "modalities", + "prediction", "audio", "max_tokens", "presence_penalty", @@ -994,6 +1001,7 @@ def completion( # type: ignore # noqa: PLR0915 max_tokens=max_tokens, max_completion_tokens=max_completion_tokens, modalities=modalities, + prediction=prediction, audio=audio, presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, diff --git a/litellm/types/llms/openai.py b/litellm/types/llms/openai.py index c2a78e349..a457c125c 100644 --- a/litellm/types/llms/openai.py +++ b/litellm/types/llms/openai.py @@ -21,6 +21,9 @@ from openai.types.beta.threads.run import Run from openai.types.chat import ChatCompletionChunk from openai.types.chat.chat_completion_audio_param import ChatCompletionAudioParam from openai.types.chat.chat_completion_modality import ChatCompletionModality +from openai.types.chat.chat_completion_prediction_content_param import ( + ChatCompletionPredictionContentParam, +) from openai.types.embedding import Embedding as OpenAIEmbedding from pydantic import BaseModel, Field from typing_extensions import Dict, Required, TypedDict, override diff --git a/litellm/utils.py b/litellm/utils.py index 0f7ff50a0..1b37b77a5 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -2550,6 +2550,7 @@ def get_optional_params( # noqa: PLR0915 max_tokens=None, max_completion_tokens=None, modalities=None, + prediction=None, audio=None, presence_penalty=None, frequency_penalty=None, @@ -2631,6 +2632,7 @@ def get_optional_params( # noqa: PLR0915 "max_tokens": None, "max_completion_tokens": None, "modalities": None, + "prediction": None, "audio": None, "presence_penalty": None, "frequency_penalty": None, diff --git a/poetry.lock b/poetry.lock index 7846ef049..2f94693e6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1823,13 +1823,13 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] [[package]] name = "openai" -version = "1.52.0" +version = "1.54.0" description = "The official Python library for the openai API" optional = false -python-versions = ">=3.7.1" +python-versions = ">=3.8" files = [ - {file = "openai-1.52.0-py3-none-any.whl", hash = "sha256:0c249f20920183b0a2ca4f7dba7b0452df3ecd0fa7985eb1d91ad884bc3ced9c"}, - {file = "openai-1.52.0.tar.gz", hash = "sha256:95c65a5f77559641ab8f3e4c3a050804f7b51d278870e2ec1f7444080bfe565a"}, + {file = "openai-1.54.0-py3-none-any.whl", hash = "sha256:24ed8874b56e919f0fbb80b7136c3fb022dc82ce9f5f21579b7b280ea4bba249"}, + {file = "openai-1.54.0.tar.gz", hash = "sha256:df2a84384314165b706722a7ac8988dc33eba20dd7fc3b939d138110e608b1ce"}, ] [package.dependencies] @@ -3519,4 +3519,4 @@ proxy = ["PyJWT", "apscheduler", "backoff", "cryptography", "fastapi", "fastapi- [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0, !=3.9.7" -content-hash = "491d361cabc637f8f896091b92855040da670bb7b311dcbfe75ad20eab97400c" +content-hash = "64154f16e1bbea8b77ba3eddf1cbf051af39f019820d92b638c448445fa32c83" diff --git a/pyproject.toml b/pyproject.toml index 92998dd28..2257cb679 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,7 +17,7 @@ documentation = "https://docs.litellm.ai" [tool.poetry.dependencies] python = ">=3.8.1,<4.0, !=3.9.7" -openai = ">=1.52.0" +openai = ">=1.54.0" python-dotenv = ">=0.2.0" tiktoken = ">=0.7.0" importlib-metadata = ">=6.8.0" diff --git a/requirements.txt b/requirements.txt index a08ca5852..0ac95fc96 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ # LITELLM PROXY DEPENDENCIES # anyio==4.4.0 # openai + http req. -openai==1.52.0 # openai req. +openai==1.54.0 # openai req. fastapi==0.111.0 # server dep backoff==2.2.1 # server dep pyyaml==6.0.0 # server dep diff --git a/tests/llm_translation/test_openai_prediction_param.py b/tests/llm_translation/test_openai_prediction_param.py new file mode 100644 index 000000000..ebfdf061f --- /dev/null +++ b/tests/llm_translation/test_openai_prediction_param.py @@ -0,0 +1,225 @@ +import json +import os +import sys +from datetime import datetime +from unittest.mock import AsyncMock + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path + + +import httpx +import pytest +from respx import MockRouter + +import litellm +from litellm import Choices, Message, ModelResponse + + +def test_openai_prediction_param(): + litellm.set_verbose = True + code = """ + /// + /// Represents a user with a first name, last name, and username. + /// + public class User + { + /// + /// Gets or sets the user's first name. + /// + public string FirstName { get; set; } + + /// + /// Gets or sets the user's last name. + /// + public string LastName { get; set; } + + /// + /// Gets or sets the user's username. + /// + public string Username { get; set; } + } + """ + + completion = litellm.completion( + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": "Replace the Username property with an Email property. Respond only with code, and with no markdown formatting.", + }, + {"role": "user", "content": code}, + ], + prediction={"type": "content", "content": code}, + ) + + print(completion) + + assert ( + completion.usage.completion_tokens_details.accepted_prediction_tokens > 0 + or completion.usage.completion_tokens_details.rejected_prediction_tokens > 0 + ) + + +@pytest.mark.asyncio +@pytest.mark.respx +async def test_openai_prediction_param_mock(respx_mock: MockRouter): + """ + Tests that prediction parameter is correctly passed to the API + """ + litellm.set_verbose = True + + code = """ + /// + /// Represents a user with a first name, last name, and username. + /// + public class User + { + /// + /// Gets or sets the user's first name. + /// + public string FirstName { get; set; } + + /// + /// Gets or sets the user's last name. + /// + public string LastName { get; set; } + + /// + /// Gets or sets the user's username. + /// + public string Username { get; set; } + } + """ + + mock_response = ModelResponse( + id="chatcmpl-AQ5RmV8GvVSRxEcDxnuXlQnsibiY9", + choices=[ + Choices( + message=Message( + content=code.replace("Username", "Email").replace( + "username", "email" + ), + role="assistant", + ) + ) + ], + created=int(datetime.now().timestamp()), + model="gpt-4o-mini-2024-07-18", + usage={ + "completion_tokens": 207, + "prompt_tokens": 175, + "total_tokens": 382, + "completion_tokens_details": { + "accepted_prediction_tokens": 0, + "reasoning_tokens": 0, + "rejected_prediction_tokens": 80, + }, + }, + ) + + mock_request = respx_mock.post("https://api.openai.com/v1/chat/completions").mock( + return_value=httpx.Response(200, json=mock_response.dict()) + ) + + completion = await litellm.acompletion( + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": "Replace the Username property with an Email property. Respond only with code, and with no markdown formatting.", + }, + {"role": "user", "content": code}, + ], + prediction={"type": "content", "content": code}, + ) + + assert mock_request.called + request_body = json.loads(mock_request.calls[0].request.content) + + # Verify the request contains the prediction parameter + assert "prediction" in request_body + # verify prediction is correctly sent to the API + assert request_body["prediction"] == {"type": "content", "content": code} + + # Verify the completion tokens details + assert completion.usage.completion_tokens_details.accepted_prediction_tokens == 0 + assert completion.usage.completion_tokens_details.rejected_prediction_tokens == 80 + + +@pytest.mark.asyncio +async def test_openai_prediction_param_with_caching(): + """ + Tests using `prediction` parameter with caching + """ + from litellm.caching.caching import LiteLLMCacheType + import logging + from litellm._logging import verbose_logger + + verbose_logger.setLevel(logging.DEBUG) + import time + + litellm.set_verbose = True + litellm.cache = litellm.Cache(type=LiteLLMCacheType.LOCAL) + code = """ + /// + /// Represents a user with a first name, last name, and username. + /// + public class User + { + /// + /// Gets or sets the user's first name. + /// + public string FirstName { get; set; } + + /// + /// Gets or sets the user's last name. + /// + public string LastName { get; set; } + + /// + /// Gets or sets the user's username. + /// + public string Username { get; set; } + } + """ + + completion_response_1 = litellm.completion( + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": "Replace the Username property with an Email property. Respond only with code, and with no markdown formatting.", + }, + {"role": "user", "content": code}, + ], + prediction={"type": "content", "content": code}, + ) + + time.sleep(0.5) + + # cache hit + completion_response_2 = litellm.completion( + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": "Replace the Username property with an Email property. Respond only with code, and with no markdown formatting.", + }, + {"role": "user", "content": code}, + ], + prediction={"type": "content", "content": code}, + ) + + assert completion_response_1.id == completion_response_2.id + + completion_response_3 = litellm.completion( + model="gpt-4o-mini", + messages=[ + {"role": "user", "content": "What is the first name of the user?"}, + ], + prediction={"type": "content", "content": code + "FirstName"}, + ) + + assert completion_response_3.id != completion_response_1.id From 96b0e324e306c78611cdb4add6f4134fc7a2686c Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Tue, 5 Nov 2024 11:25:09 +0530 Subject: [PATCH 12/67] (fix) Vertex Improve Performance when using `image_url` (#6593) * fix transformation vertex * test test_process_gemini_image * test_image_completion_request * testing fix - bedrock has deprecated cohere.command-text-v14 * fix vertex pdf --- .../gemini/transformation.py | 37 ++++- tests/llm_translation/test_vertex.py | 146 ++++++++++++++++++ 2 files changed, 180 insertions(+), 3 deletions(-) diff --git a/litellm/llms/vertex_ai_and_google_ai_studio/gemini/transformation.py b/litellm/llms/vertex_ai_and_google_ai_studio/gemini/transformation.py index 66ab07674..f828d93c8 100644 --- a/litellm/llms/vertex_ai_and_google_ai_studio/gemini/transformation.py +++ b/litellm/llms/vertex_ai_and_google_ai_studio/gemini/transformation.py @@ -51,6 +51,9 @@ from ..common_utils import ( def _process_gemini_image(image_url: str) -> PartType: + """ + Given an image URL, return the appropriate PartType for Gemini + """ try: # GCS URIs if "gs://" in image_url: @@ -68,9 +71,14 @@ def _process_gemini_image(image_url: str) -> PartType: file_data = FileDataType(mime_type=mime_type, file_uri=image_url) return PartType(file_data=file_data) - - # Direct links - elif "https:/" in image_url or "base64" in image_url: + elif ( + "https://" in image_url + and (image_type := _get_image_mime_type_from_url(image_url)) is not None + ): + file_data = FileDataType(file_uri=image_url, mime_type=image_type) + return PartType(file_data=file_data) + elif "https://" in image_url or "base64" in image_url: + # https links for unsupported mime types and base64 images image = convert_to_anthropic_image_obj(image_url) _blob = BlobType(data=image["data"], mime_type=image["media_type"]) return PartType(inline_data=_blob) @@ -79,6 +87,29 @@ def _process_gemini_image(image_url: str) -> PartType: raise e +def _get_image_mime_type_from_url(url: str) -> Optional[str]: + """ + Get mime type for common image URLs + See gemini mime types: https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/image-understanding#image-requirements + + Supported by Gemini: + - PNG (`image/png`) + - JPEG (`image/jpeg`) + - WebP (`image/webp`) + Example: + url = https://example.com/image.jpg + Returns: image/jpeg + """ + url = url.lower() + if url.endswith((".jpg", ".jpeg")): + return "image/jpeg" + elif url.endswith(".png"): + return "image/png" + elif url.endswith(".webp"): + return "image/webp" + return None + + def _gemini_convert_messages_with_history( # noqa: PLR0915 messages: List[AllMessageValues], ) -> List[ContentType]: diff --git a/tests/llm_translation/test_vertex.py b/tests/llm_translation/test_vertex.py index 467be4ddf..a06179a49 100644 --- a/tests/llm_translation/test_vertex.py +++ b/tests/llm_translation/test_vertex.py @@ -15,6 +15,7 @@ sys.path.insert( import pytest import litellm from litellm import get_optional_params +from litellm.llms.custom_httpx.http_handler import HTTPHandler def test_completion_pydantic_obj_2(): @@ -1171,3 +1172,148 @@ def test_logprobs(): print(resp) assert resp.choices[0].logprobs is not None + + +def test_process_gemini_image(): + """Test the _process_gemini_image function for different image sources""" + from litellm.llms.vertex_ai_and_google_ai_studio.gemini.transformation import ( + _process_gemini_image, + ) + from litellm.types.llms.vertex_ai import PartType, FileDataType, BlobType + + # Test GCS URI + gcs_result = _process_gemini_image("gs://bucket/image.png") + assert gcs_result["file_data"] == FileDataType( + mime_type="image/png", file_uri="gs://bucket/image.png" + ) + + # Test HTTPS JPG URL + https_result = _process_gemini_image("https://example.com/image.jpg") + print("https_result JPG", https_result) + assert https_result["file_data"] == FileDataType( + mime_type="image/jpeg", file_uri="https://example.com/image.jpg" + ) + + # Test HTTPS PNG URL + https_result = _process_gemini_image("https://example.com/image.png") + print("https_result PNG", https_result) + assert https_result["file_data"] == FileDataType( + mime_type="image/png", file_uri="https://example.com/image.png" + ) + + # Test base64 image + base64_image = "data:image/jpeg;base64,/9j/4AAQSkZJRg..." + base64_result = _process_gemini_image(base64_image) + print("base64_result", base64_result) + assert base64_result["inline_data"]["mime_type"] == "image/jpeg" + assert base64_result["inline_data"]["data"] == "/9j/4AAQSkZJRg..." + + +def test_get_image_mime_type_from_url(): + """Test the _get_image_mime_type_from_url function for different image URLs""" + from litellm.llms.vertex_ai_and_google_ai_studio.gemini.transformation import ( + _get_image_mime_type_from_url, + ) + + # Test JPEG images + assert ( + _get_image_mime_type_from_url("https://example.com/image.jpg") == "image/jpeg" + ) + assert ( + _get_image_mime_type_from_url("https://example.com/image.jpeg") == "image/jpeg" + ) + assert ( + _get_image_mime_type_from_url("https://example.com/IMAGE.JPG") == "image/jpeg" + ) + + # Test PNG images + assert _get_image_mime_type_from_url("https://example.com/image.png") == "image/png" + assert _get_image_mime_type_from_url("https://example.com/IMAGE.PNG") == "image/png" + + # Test WebP images + assert ( + _get_image_mime_type_from_url("https://example.com/image.webp") == "image/webp" + ) + assert ( + _get_image_mime_type_from_url("https://example.com/IMAGE.WEBP") == "image/webp" + ) + + # Test unsupported formats + assert _get_image_mime_type_from_url("https://example.com/image.gif") is None + assert _get_image_mime_type_from_url("https://example.com/image.bmp") is None + assert _get_image_mime_type_from_url("https://example.com/image") is None + assert _get_image_mime_type_from_url("invalid_url") is None + + +@pytest.mark.parametrize( + "image_url", ["https://example.com/image.jpg", "https://example.com/image.png"] +) +def test_image_completion_request(image_url): + """https:// .jpg, .png images are passed directly to the model""" + from unittest.mock import patch, Mock + import litellm + from litellm.llms.vertex_ai_and_google_ai_studio.gemini.transformation import ( + _get_image_mime_type_from_url, + ) + + # Mock response data + mock_response = Mock() + mock_response.json.return_value = { + "candidates": [{"content": {"parts": [{"text": "This is a sunflower"}]}}], + "usageMetadata": { + "promptTokenCount": 11, + "candidatesTokenCount": 50, + "totalTokenCount": 61, + }, + "modelVersion": "gemini-1.5-pro", + } + mock_response.raise_for_status = MagicMock() + mock_response.status_code = 200 + + # Expected request body + expected_request_body = { + "contents": [ + { + "role": "user", + "parts": [ + {"text": "Whats in this image?"}, + { + "file_data": { + "file_uri": image_url, + "mime_type": _get_image_mime_type_from_url(image_url), + } + }, + ], + } + ], + "system_instruction": {"parts": [{"text": "Be a good bot"}]}, + "generationConfig": {}, + } + + messages = [ + {"role": "system", "content": "Be a good bot"}, + { + "role": "user", + "content": [ + {"type": "text", "text": "Whats in this image?"}, + {"type": "image_url", "image_url": {"url": image_url}}, + ], + }, + ] + + client = HTTPHandler() + with patch.object(client, "post", new=MagicMock()) as mock_post: + mock_post.return_value = mock_response + try: + litellm.completion( + model="gemini/gemini-1.5-pro", + messages=messages, + client=client, + ) + except Exception as e: + print(e) + + # Assert the request body matches expected + mock_post.assert_called_once() + print("mock_post.call_args.kwargs['json']", mock_post.call_args.kwargs["json"]) + assert mock_post.call_args.kwargs["json"] == expected_request_body From f3071161add36c23b8682c68834a3838744108c8 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Mon, 4 Nov 2024 21:55:43 -0800 Subject: [PATCH 13/67] =?UTF-8?q?bump:=20version=201.51.5=20=E2=86=92=201.?= =?UTF-8?q?52.0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2257cb679..9c520ff34 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.51.5" +version = "1.52.0" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT" @@ -91,7 +91,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.51.5" +version = "1.52.0" version_files = [ "pyproject.toml:^version" ] From 695f48a8f1ed607c7b596dbb00a977e399f024fc Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Tue, 5 Nov 2024 22:03:44 +0530 Subject: [PATCH 14/67] fix(lowest_tpm_rpm_routing.py): fix parallel rate limit check (#6577) * fix(lowest_tpm_rpm_routing.py): fix parallel rate limit check * fix(lowest_tpm_rpm_v2.py): return headers in correct format * test: update test * build(deps): bump cookie and express in /docs/my-website (#6566) Bumps [cookie](https://github.com/jshttp/cookie) and [express](https://github.com/expressjs/express). These dependencies needed to be updated together. Updates `cookie` from 0.6.0 to 0.7.1 - [Release notes](https://github.com/jshttp/cookie/releases) - [Commits](https://github.com/jshttp/cookie/compare/v0.6.0...v0.7.1) Updates `express` from 4.20.0 to 4.21.1 - [Release notes](https://github.com/expressjs/express/releases) - [Changelog](https://github.com/expressjs/express/blob/4.21.1/History.md) - [Commits](https://github.com/expressjs/express/compare/4.20.0...4.21.1) --- updated-dependencies: - dependency-name: cookie dependency-type: indirect - dependency-name: express dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * docs(virtual_keys.md): update Dockerfile reference (#6554) Signed-off-by: Emmanuel Ferdman * (proxy fix) - call connect on prisma client when running setup (#6534) * critical fix - call connect on prisma client when running setup * fix test_proxy_server_prisma_setup * fix test_proxy_server_prisma_setup * Add 3.5 haiku (#6588) * feat: add claude-3-5-haiku-20241022 entries * feat: add claude-3-5-haiku-20241022 and vertex_ai/claude-3-5-haiku@20241022 models * add missing entries, remove vision * remove image token costs * Litellm perf improvements 3 (#6573) * perf: move writing key to cache, to background task * perf(litellm_pre_call_utils.py): add otel tracing for pre-call utils adds 200ms on calls with pgdb connected * fix(litellm_pre_call_utils.py'): rename call_type to actual call used * perf(proxy_server.py): remove db logic from _get_config_from_file was causing db calls to occur on every llm request, if team_id was set on key * fix(auth_checks.py): add check for reducing db calls if user/team id does not exist in db reduces latency/call by ~100ms * fix(proxy_server.py): minor fix on existing_settings not incl alerting * fix(exception_mapping_utils.py): map databricks exception string * fix(auth_checks.py): fix auth check logic * test: correctly mark flaky test * fix(utils.py): handle auth token error for tokenizers.from_pretrained * build: fix map * build: fix map * build: fix json for model map * test: remove eol model * fix(proxy_server.py): fix db config loading logic * fix(proxy_server.py): fix order of config / db updates, to ensure fields not overwritten * test: skip test if required env var is missing * test: fix test --------- Signed-off-by: dependabot[bot] Signed-off-by: Emmanuel Ferdman Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Emmanuel Ferdman Co-authored-by: Ishaan Jaff Co-authored-by: paul-gauthier <69695708+paul-gauthier@users.noreply.github.com> --- litellm/proxy/_new_secret_config.yaml | 87 +++++++++++-------- litellm/proxy/proxy_server.py | 70 +++++++++++++-- litellm/router.py | 14 +-- litellm/router_strategy/lowest_tpm_rpm_v2.py | 5 +- .../test_router_max_parallel_requests.py | 11 +-- tests/test_openai_endpoints.py | 5 +- tests/test_team_logging.py | 20 ++++- 7 files changed, 148 insertions(+), 64 deletions(-) diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index 45a379748..d81c96df5 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -10,9 +10,20 @@ model_list: output_cost_per_token: 0.000015 # 15$/M api_base: "https://exampleopenaiendpoint-production.up.railway.app" api_key: my-fake-key - - model_name: gemini-1.5-flash-002 + - model_name: fake-openai-endpoint-2 litellm_params: - model: gemini/gemini-1.5-flash-002 + model: openai/my-fake-model + api_key: my-fake-key + api_base: https://exampleopenaiendpoint-production.up.railway.app/ + stream_timeout: 0.001 + timeout: 1 + rpm: 1 + - model_name: fake-openai-endpoint + litellm_params: + model: openai/my-fake-model + api_key: my-fake-key + api_base: https://exampleopenaiendpoint-production.up.railway.app/ + # litellm_settings: # fallbacks: [{ "claude-3-5-sonnet-20240620": ["claude-3-5-sonnet-aihubmix"] }] @@ -20,47 +31,47 @@ model_list: # default_redis_batch_cache_expiry: 10 -litellm_settings: - cache: True - cache_params: - type: redis +# litellm_settings: +# cache: True +# cache_params: +# type: redis - # disable caching on the actual API call - supported_call_types: [] +# # disable caching on the actual API call +# supported_call_types: [] - # see https://docs.litellm.ai/docs/proxy/prod#3-use-redis-porthost-password-not-redis_url - host: os.environ/REDIS_HOST - port: os.environ/REDIS_PORT - password: os.environ/REDIS_PASSWORD +# # see https://docs.litellm.ai/docs/proxy/prod#3-use-redis-porthost-password-not-redis_url +# host: os.environ/REDIS_HOST +# port: os.environ/REDIS_PORT +# password: os.environ/REDIS_PASSWORD - # see https://docs.litellm.ai/docs/proxy/caching#turn-on-batch_redis_requests - # see https://docs.litellm.ai/docs/proxy/prometheus - callbacks: ['otel'] +# # see https://docs.litellm.ai/docs/proxy/caching#turn-on-batch_redis_requests +# # see https://docs.litellm.ai/docs/proxy/prometheus +# callbacks: ['otel'] -router_settings: - routing_strategy: latency-based-routing - routing_strategy_args: - # only assign 40% of traffic to the fastest deployment to avoid overloading it - lowest_latency_buffer: 0.4 +# # router_settings: +# # routing_strategy: latency-based-routing +# # routing_strategy_args: +# # # only assign 40% of traffic to the fastest deployment to avoid overloading it +# # lowest_latency_buffer: 0.4 - # consider last five minutes of calls for latency calculation - ttl: 300 - redis_host: os.environ/REDIS_HOST - redis_port: os.environ/REDIS_PORT - redis_password: os.environ/REDIS_PASSWORD +# # # consider last five minutes of calls for latency calculation +# # ttl: 300 +# # redis_host: os.environ/REDIS_HOST +# # redis_port: os.environ/REDIS_PORT +# # redis_password: os.environ/REDIS_PASSWORD + +# # # see https://docs.litellm.ai/docs/proxy/prod#1-use-this-configyaml +# # general_settings: +# # master_key: os.environ/LITELLM_MASTER_KEY +# # database_url: os.environ/DATABASE_URL +# # disable_master_key_return: true +# # # alerting: ['slack', 'email'] +# # alerting: ['email'] -# see https://docs.litellm.ai/docs/proxy/prod#1-use-this-configyaml -general_settings: - master_key: os.environ/LITELLM_MASTER_KEY - database_url: os.environ/DATABASE_URL - disable_master_key_return: true - # alerting: ['slack', 'email'] - alerting: ['email'] +# # # Batch write spend updates every 60s +# # proxy_batch_write_at: 60 - # Batch write spend updates every 60s - proxy_batch_write_at: 60 - - # see https://docs.litellm.ai/docs/proxy/caching#advanced---user-api-key-cache-ttl - # our api keys rarely change - user_api_key_cache_ttl: 3600 \ No newline at end of file +# # # see https://docs.litellm.ai/docs/proxy/caching#advanced---user-api-key-cache-ttl +# # # our api keys rarely change +# # user_api_key_cache_ttl: 3600 \ No newline at end of file diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 37cbd2b82..9f6579242 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -757,12 +757,6 @@ async def _PROXY_track_cost_callback( verbose_proxy_logger.debug("INSIDE _PROXY_track_cost_callback") global prisma_client try: - # check if it has collected an entire stream response - verbose_proxy_logger.debug( - "Proxy: In track_cost_callback for: kwargs=%s and completion_response: %s", - kwargs, - completion_response, - ) verbose_proxy_logger.debug( f"kwargs stream: {kwargs.get('stream', None)} + complete streaming response: {kwargs.get('complete_streaming_response', None)}" ) @@ -1359,7 +1353,7 @@ class ProxyConfig: """ def __init__(self) -> None: - pass + self.config: Dict[str, Any] = {} def is_yaml(self, config_file_path: str) -> bool: if not os.path.isfile(config_file_path): @@ -1465,7 +1459,7 @@ class ProxyConfig: """ # load existing config - config = await self.get_config() + config = self.config ## LITELLM MODULE SETTINGS (e.g. litellm.drop_params=True,..) litellm_settings = config.get("litellm_settings", {}) @@ -1518,7 +1512,9 @@ class ProxyConfig: dict: config """ + global prisma_client, store_model_in_db # Load existing config + if os.environ.get("LITELLM_CONFIG_BUCKET_NAME") is not None: bucket_name = os.environ.get("LITELLM_CONFIG_BUCKET_NAME") object_key = os.environ.get("LITELLM_CONFIG_BUCKET_OBJECT_KEY") @@ -1540,12 +1536,21 @@ class ProxyConfig: else: # default to file config = await self._get_config_from_file(config_file_path=config_file_path) + ## UPDATE CONFIG WITH DB + if prisma_client is not None: + config = await self._update_config_from_db( + config=config, + prisma_client=prisma_client, + store_model_in_db=store_model_in_db, + ) + ## PRINT YAML FOR CONFIRMING IT WORKS printed_yaml = copy.deepcopy(config) printed_yaml.pop("environment_variables", None) config = self._check_for_os_environ_vars(config=config) + self.config = config return config async def load_config( # noqa: PLR0915 @@ -2357,6 +2362,55 @@ class ProxyConfig: pass_through_endpoints=general_settings["pass_through_endpoints"] ) + async def _update_config_from_db( + self, + prisma_client: PrismaClient, + config: dict, + store_model_in_db: Optional[bool], + ): + + if store_model_in_db is not True: + verbose_proxy_logger.info( + "'store_model_in_db' is not True, skipping db updates" + ) + return config + + _tasks = [] + keys = [ + "general_settings", + "router_settings", + "litellm_settings", + "environment_variables", + ] + for k in keys: + response = prisma_client.get_generic_data( + key="param_name", value=k, table_name="config" + ) + _tasks.append(response) + + responses = await asyncio.gather(*_tasks) + for response in responses: + if response is not None: + param_name = getattr(response, "param_name", None) + verbose_proxy_logger.info(f"loading {param_name} settings from db") + if param_name == "litellm_settings": + verbose_proxy_logger.info( + f"litellm_settings: {response.param_value}" + ) + param_value = getattr(response, "param_value", None) + if param_name is not None and param_value is not None: + # check if param_name is already in the config + if param_name in config: + if isinstance(config[param_name], dict): + config[param_name].update(param_value) + else: + config[param_name] = param_value + else: + # if it's not in the config - then add it + config[param_name] = param_value + + return config + async def add_deployment( self, prisma_client: PrismaClient, diff --git a/litellm/router.py b/litellm/router.py index 82a37a9f4..726119cb7 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -585,6 +585,7 @@ class Router: def routing_strategy_init( self, routing_strategy: Union[RoutingStrategy, str], routing_strategy_args: dict ): + verbose_router_logger.info(f"Routing strategy: {routing_strategy}") if ( routing_strategy == RoutingStrategy.LEAST_BUSY.value or routing_strategy == RoutingStrategy.LEAST_BUSY @@ -912,6 +913,7 @@ class Router: logging_obj=logging_obj, parent_otel_span=parent_otel_span, ) + response = await _response ## CHECK CONTENT FILTER ERROR ## @@ -2961,14 +2963,14 @@ class Router: raise # decides how long to sleep before retry - _timeout = self._time_to_sleep_before_retry( + retry_after = self._time_to_sleep_before_retry( e=original_exception, remaining_retries=num_retries, num_retries=num_retries, healthy_deployments=_healthy_deployments, ) - # sleeps for the length of the timeout - await asyncio.sleep(_timeout) + + await asyncio.sleep(retry_after) for current_attempt in range(num_retries): try: # if the function call is successful, no exception will be raised and we'll break out of the loop @@ -4178,7 +4180,9 @@ class Router: model = _model ## GET LITELLM MODEL INFO - raises exception, if model is not mapped - model_info = litellm.get_model_info(model=model) + model_info = litellm.get_model_info( + model="{}/{}".format(custom_llm_provider, model) + ) ## CHECK USER SET MODEL INFO user_model_info = deployment.get("model_info", {}) @@ -4849,7 +4853,7 @@ class Router: ) continue except Exception as e: - verbose_router_logger.error("An error occurs - {}".format(str(e))) + verbose_router_logger.exception("An error occurs - {}".format(str(e))) _litellm_params = deployment.get("litellm_params", {}) model_id = deployment.get("model_info", {}).get("id", "") diff --git a/litellm/router_strategy/lowest_tpm_rpm_v2.py b/litellm/router_strategy/lowest_tpm_rpm_v2.py index 17ff0cc09..47e0b7b1d 100644 --- a/litellm/router_strategy/lowest_tpm_rpm_v2.py +++ b/litellm/router_strategy/lowest_tpm_rpm_v2.py @@ -180,7 +180,6 @@ class LowestTPMLoggingHandler_v2(CustomLogger): deployment_rpm = deployment.get("model_info", {}).get("rpm") if deployment_rpm is None: deployment_rpm = float("inf") - if local_result is not None and local_result >= deployment_rpm: raise litellm.RateLimitError( message="Deployment over defined rpm limit={}. current usage={}".format( @@ -195,7 +194,7 @@ class LowestTPMLoggingHandler_v2(CustomLogger): deployment_rpm, local_result, ), - headers={"retry-after": 60}, # type: ignore + headers={"retry-after": str(60)}, # type: ignore request=httpx.Request(method="tpm_rpm_limits", url="https://github.com/BerriAI/litellm"), # type: ignore ), ) @@ -221,7 +220,7 @@ class LowestTPMLoggingHandler_v2(CustomLogger): deployment_rpm, result, ), - headers={"retry-after": 60}, # type: ignore + headers={"retry-after": str(60)}, # type: ignore request=httpx.Request(method="tpm_rpm_limits", url="https://github.com/BerriAI/litellm"), # type: ignore ), ) diff --git a/tests/local_testing/test_router_max_parallel_requests.py b/tests/local_testing/test_router_max_parallel_requests.py index 33ca17d8b..ff5c2104c 100644 --- a/tests/local_testing/test_router_max_parallel_requests.py +++ b/tests/local_testing/test_router_max_parallel_requests.py @@ -137,7 +137,7 @@ async def _handle_router_calls(router): Nam vitae finibus eros, eu eleifend erat. Maecenas hendrerit magna quis molestie dictum. Ut consequat quam eu massa auctor pulvinar. Pellentesque vitae eros ornare urna accumsan tempor. Maecenas porta id quam at sodales. Donec quis accumsan leo, vel viverra nibh. Vestibulum congue blandit nulla, sed rhoncus libero eleifend ac. In risus lorem, rutrum et tincidunt a, interdum a lectus. Pellentesque aliquet pulvinar mauris, ut ultrices nibh ultricies nec. Mauris mi mauris, facilisis nec metus non, egestas luctus ligula. Quisque ac ligula at felis mollis blandit id nec risus. Nam sollicitudin lacus sed sapien fringilla ullamcorper. Etiam dui quam, posuere sit amet velit id, aliquet molestie ante. Integer cursus eget sapien fringilla elementum. Integer molestie, mi ac scelerisque ultrices, nunc purus condimentum est, in posuere quam nibh vitae velit. """ completion = await router.acompletion( - "gpt-4o-2024-08-06", + "gpt-3.5-turbo", [ { "role": "user", @@ -166,16 +166,17 @@ async def test_max_parallel_requests_rpm_rate_limiting(): enable_pre_call_checks=True, model_list=[ { - "model_name": "gpt-4o-2024-08-06", + "model_name": "gpt-3.5-turbo", "litellm_params": { - "model": "gpt-4o-2024-08-06", + "model": "gpt-3.5-turbo", "temperature": 0.0, - "rpm": 5, + "rpm": 1, + "num_retries": 3, }, } ], ) - await asyncio.gather(*[_handle_router_calls(router) for _ in range(16)]) + await asyncio.gather(*[_handle_router_calls(router) for _ in range(3)]) @pytest.mark.asyncio diff --git a/tests/test_openai_endpoints.py b/tests/test_openai_endpoints.py index c3691bc19..4dbeda188 100644 --- a/tests/test_openai_endpoints.py +++ b/tests/test_openai_endpoints.py @@ -5,7 +5,7 @@ import asyncio import aiohttp, openai from openai import OpenAI, AsyncOpenAI from typing import Optional, List, Union - +import uuid LITELLM_MASTER_KEY = "sk-1234" @@ -107,7 +107,7 @@ async def chat_completion(session, key, model: Union[str, List] = "gpt-4"): "model": model, "messages": [ {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "Hello!"}, + {"role": "user", "content": f"Hello! {uuid.uuid4()}"}, ], } @@ -296,7 +296,6 @@ async def test_chat_completion(): await chat_completion(session=session, key=key_2) -# @pytest.mark.skip(reason="Local test. Proxy not concurrency safe yet. WIP.") @pytest.mark.asyncio async def test_chat_completion_ratelimit(): """ diff --git a/tests/test_team_logging.py b/tests/test_team_logging.py index 7a06f7fa5..97f18b42e 100644 --- a/tests/test_team_logging.py +++ b/tests/test_team_logging.py @@ -110,6 +110,7 @@ async def test_team_logging(): pytest.fail(f"Unexpected error: {str(e)}") +@pytest.mark.skip(reason="todo fix langfuse credential error") @pytest.mark.asyncio async def test_team_2logging(): """ @@ -118,6 +119,20 @@ async def test_team_2logging(): -> Make chat/completions call -> Fetch logs from langfuse """ + langfuse_public_key = os.getenv("LANGFUSE_PROJECT2_PUBLIC") + + print(f"langfuse_public_key: {langfuse_public_key}") + langfuse_secret_key = os.getenv("LANGFUSE_PROJECT2_SECRET") + print(f"langfuse_secret_key: {langfuse_secret_key}") + langfuse_host = "https://us.cloud.langfuse.com" + + try: + assert langfuse_public_key is not None + assert langfuse_secret_key is not None + except Exception as e: + # skip test if langfuse credentials are not set + return + try: async with aiohttp.ClientSession() as session: @@ -143,8 +158,9 @@ async def test_team_2logging(): import langfuse langfuse_client = langfuse.Langfuse( - public_key=os.getenv("LANGFUSE_PROJECT2_PUBLIC"), - secret_key=os.getenv("LANGFUSE_PROJECT2_SECRET"), + public_key=langfuse_public_key, + secret_key=langfuse_secret_key, + host=langfuse_host, ) await asyncio.sleep(10) From 43f1f943915c5850b1203f96c1b2e296de80aec0 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 5 Nov 2024 22:49:38 +0530 Subject: [PATCH 15/67] test: mark flaky test --- tests/local_testing/test_parallel_request_limiter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/local_testing/test_parallel_request_limiter.py b/tests/local_testing/test_parallel_request_limiter.py index 9bb2589aa..4e0eb9ceb 100644 --- a/tests/local_testing/test_parallel_request_limiter.py +++ b/tests/local_testing/test_parallel_request_limiter.py @@ -355,6 +355,7 @@ async def test_pre_call_hook_user_tpm_limits(): @pytest.mark.asyncio +@pytest.mark.flaky(retries=6, delay=1) async def test_success_call_hook(): """ Test if on success, cache correctly decremented From 305821902d41a6b3abe12778fb7ce1329297f174 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 5 Nov 2024 23:24:05 +0530 Subject: [PATCH 16/67] test: handle anthropic api instability --- tests/local_testing/test_prompt_caching.py | 117 ++++++++++----------- 1 file changed, 56 insertions(+), 61 deletions(-) diff --git a/tests/local_testing/test_prompt_caching.py b/tests/local_testing/test_prompt_caching.py index 35d5e2588..c73bda04e 100644 --- a/tests/local_testing/test_prompt_caching.py +++ b/tests/local_testing/test_prompt_caching.py @@ -47,70 +47,65 @@ def _usage_format_tests(usage: litellm.Usage): ], ) def test_prompt_caching_model(model): - for _ in range(2): - response = litellm.completion( - model=model, - messages=[ - # System Message - { - "role": "system", - "content": [ - { - "type": "text", - "text": "Here is the full text of a complex legal agreement" - * 400, - "cache_control": {"type": "ephemeral"}, - } - ], - }, - # marked for caching with the cache_control parameter, so that this checkpoint can read from the previous cache. - { - "role": "user", - "content": [ - { - "type": "text", - "text": "What are the key terms and conditions in this agreement?", - "cache_control": {"type": "ephemeral"}, - } - ], - }, - { - "role": "assistant", - "content": "Certainly! the key terms and conditions are the following: the contract is 1 year long for $10/mo", - }, - # The final turn is marked with cache-control, for continuing in followups. - { - "role": "user", - "content": [ - { - "type": "text", - "text": "What are the key terms and conditions in this agreement?", - "cache_control": {"type": "ephemeral"}, - } - ], - }, - ], - temperature=0.2, - max_tokens=10, - ) + try: + for _ in range(2): + response = litellm.completion( + model=model, + messages=[ + # System Message + { + "role": "system", + "content": [ + { + "type": "text", + "text": "Here is the full text of a complex legal agreement" + * 400, + "cache_control": {"type": "ephemeral"}, + } + ], + }, + # marked for caching with the cache_control parameter, so that this checkpoint can read from the previous cache. + { + "role": "user", + "content": [ + { + "type": "text", + "text": "What are the key terms and conditions in this agreement?", + "cache_control": {"type": "ephemeral"}, + } + ], + }, + { + "role": "assistant", + "content": "Certainly! the key terms and conditions are the following: the contract is 1 year long for $10/mo", + }, + # The final turn is marked with cache-control, for continuing in followups. + { + "role": "user", + "content": [ + { + "type": "text", + "text": "What are the key terms and conditions in this agreement?", + "cache_control": {"type": "ephemeral"}, + } + ], + }, + ], + temperature=0.2, + max_tokens=10, + ) + + _usage_format_tests(response.usage) + + print("response=", response) + print("response.usage=", response.usage) _usage_format_tests(response.usage) - print("response=", response) - print("response.usage=", response.usage) - - _usage_format_tests(response.usage) - - assert "prompt_tokens_details" in response.usage - assert response.usage.prompt_tokens_details.cached_tokens > 0 - - # assert "cache_read_input_tokens" in response.usage - # assert "cache_creation_input_tokens" in response.usage - - # # Assert either a cache entry was created or cache was read - changes depending on the anthropic api ttl - # assert (response.usage.cache_read_input_tokens > 0) or ( - # response.usage.cache_creation_input_tokens > 0 - # ) + assert "prompt_tokens_details" in response.usage + assert response.usage.prompt_tokens_details.cached_tokens > 0 + except litellm.InternalServerError: + pass def test_supports_prompt_caching(): From 0fe8cde7c78d6b975b936f7802a4124b58bd253a Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Tue, 5 Nov 2024 13:43:08 -0800 Subject: [PATCH 17/67] (DB fix) don't run apply_db_fixes on startup (#6604) * fix don't apply_db_fixes on startup * fix remove unused import --- db_scripts/create_views.py | 2 -- db_scripts/update_unassigned_teams.py | 7 +++++++ litellm/proxy/proxy_config.yaml | 4 ++-- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/db_scripts/create_views.py b/db_scripts/create_views.py index 2b6a28ccb..7a913c7f8 100644 --- a/db_scripts/create_views.py +++ b/db_scripts/create_views.py @@ -4,7 +4,6 @@ python script to pre-create all views required by LiteLLM Proxy Server import asyncio import os -from update_unassigned_teams import apply_db_fixes # Enter your DATABASE_URL here @@ -205,7 +204,6 @@ async def check_view_exists(): # noqa: PLR0915 print("Last30dTopEndUsersSpend Created!") # noqa - await apply_db_fixes(db=db) return diff --git a/db_scripts/update_unassigned_teams.py b/db_scripts/update_unassigned_teams.py index dc65e4c20..bf2cd2075 100644 --- a/db_scripts/update_unassigned_teams.py +++ b/db_scripts/update_unassigned_teams.py @@ -1,7 +1,14 @@ from prisma import Prisma +from litellm._logging import verbose_logger async def apply_db_fixes(db: Prisma): + """ + Do Not Run this in production, only use it as a one-time fix + """ + verbose_logger.warning( + "DO NOT run this in Production....Running update_unassigned_teams" + ) try: sql_query = """ UPDATE "LiteLLM_SpendLogs" diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index 23834f759..f3edf79d0 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -1,7 +1,7 @@ model_list: - - model_name: gpt-4o + - model_name: fake-openai-endpoint litellm_params: - model: openai/gpt-5 + model: openai/fake api_key: os.environ/OPENAI_API_KEY api_base: https://exampleopenaiendpoint-production.up.railway.app/ From 5c5527074045aa1e0ed90f2aaf02f38402e758e9 Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Wed, 6 Nov 2024 17:53:46 +0530 Subject: [PATCH 18/67] LiteLLM Minor Fixes & Improvements (11/04/2024) (#6572) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: initial commit for watsonx chat endpoint support Closes https://github.com/BerriAI/litellm/issues/6562 * feat(watsonx/chat/handler.py): support tool calling for watsonx Closes https://github.com/BerriAI/litellm/issues/6562 * fix(streaming_utils.py): return empty chunk instead of failing if streaming value is invalid dict ensures streaming works for ibm watsonx * fix(openai_like/chat/handler.py): ensure asynchttphandler is passed correctly for openai like calls * fix: ensure exception mapping works well for watsonx calls * fix(openai_like/chat/handler.py): handle async streaming correctly * feat(main.py): Make it clear when a user is passing an invalid message add validation for user content message Closes https://github.com/BerriAI/litellm/issues/6565 * fix: cleanup * fix(utils.py): loosen validation check, to just make sure content types are valid make litellm robust to future content updates * fix: fix linting erro * fix: fix linting errors * fix(utils.py): make validation check more flexible * test: handle langfuse list index out of range error * Litellm dev 11 02 2024 (#6561) * fix(dual_cache.py): update in-memory check for redis batch get cache Fixes latency delay for async_batch_redis_cache * fix(service_logger.py): fix race condition causing otel service logging to be overwritten if service_callbacks set * feat(user_api_key_auth.py): add parent otel component for auth allows us to isolate how much latency is added by auth checks * perf(parallel_request_limiter.py): move async_set_cache_pipeline (from max parallel request limiter) out of execution path (background task) reduces latency by 200ms * feat(user_api_key_auth.py): have user api key auth object return user tpm/rpm limits - reduces redis calls in downstream task (parallel_request_limiter) Reduces latency by 400-800ms * fix(parallel_request_limiter.py): use batch get cache to reduce user/key/team usage object calls reduces latency by 50-100ms * fix: fix linting error * fix(_service_logger.py): fix import * fix(user_api_key_auth.py): fix service logging * fix(dual_cache.py): don't pass 'self' * fix: fix python3.8 error * fix: fix init] * bump: version 1.51.4 → 1.51.5 * build(deps): bump cookie and express in /docs/my-website (#6566) Bumps [cookie](https://github.com/jshttp/cookie) and [express](https://github.com/expressjs/express). These dependencies needed to be updated together. Updates `cookie` from 0.6.0 to 0.7.1 - [Release notes](https://github.com/jshttp/cookie/releases) - [Commits](https://github.com/jshttp/cookie/compare/v0.6.0...v0.7.1) Updates `express` from 4.20.0 to 4.21.1 - [Release notes](https://github.com/expressjs/express/releases) - [Changelog](https://github.com/expressjs/express/blob/4.21.1/History.md) - [Commits](https://github.com/expressjs/express/compare/4.20.0...4.21.1) --- updated-dependencies: - dependency-name: cookie dependency-type: indirect - dependency-name: express dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * docs(virtual_keys.md): update Dockerfile reference (#6554) Signed-off-by: Emmanuel Ferdman * (proxy fix) - call connect on prisma client when running setup (#6534) * critical fix - call connect on prisma client when running setup * fix test_proxy_server_prisma_setup * fix test_proxy_server_prisma_setup * Add 3.5 haiku (#6588) * feat: add claude-3-5-haiku-20241022 entries * feat: add claude-3-5-haiku-20241022 and vertex_ai/claude-3-5-haiku@20241022 models * add missing entries, remove vision * remove image token costs * Litellm perf improvements 3 (#6573) * perf: move writing key to cache, to background task * perf(litellm_pre_call_utils.py): add otel tracing for pre-call utils adds 200ms on calls with pgdb connected * fix(litellm_pre_call_utils.py'): rename call_type to actual call used * perf(proxy_server.py): remove db logic from _get_config_from_file was causing db calls to occur on every llm request, if team_id was set on key * fix(auth_checks.py): add check for reducing db calls if user/team id does not exist in db reduces latency/call by ~100ms * fix(proxy_server.py): minor fix on existing_settings not incl alerting * fix(exception_mapping_utils.py): map databricks exception string * fix(auth_checks.py): fix auth check logic * test: correctly mark flaky test * fix(utils.py): handle auth token error for tokenizers.from_pretrained * build: fix map * build: fix map * build: fix json for model map * Litellm dev 11 02 2024 (#6561) * fix(dual_cache.py): update in-memory check for redis batch get cache Fixes latency delay for async_batch_redis_cache * fix(service_logger.py): fix race condition causing otel service logging to be overwritten if service_callbacks set * feat(user_api_key_auth.py): add parent otel component for auth allows us to isolate how much latency is added by auth checks * perf(parallel_request_limiter.py): move async_set_cache_pipeline (from max parallel request limiter) out of execution path (background task) reduces latency by 200ms * feat(user_api_key_auth.py): have user api key auth object return user tpm/rpm limits - reduces redis calls in downstream task (parallel_request_limiter) Reduces latency by 400-800ms * fix(parallel_request_limiter.py): use batch get cache to reduce user/key/team usage object calls reduces latency by 50-100ms * fix: fix linting error * fix(_service_logger.py): fix import * fix(user_api_key_auth.py): fix service logging * fix(dual_cache.py): don't pass 'self' * fix: fix python3.8 error * fix: fix init] * Litellm perf improvements 3 (#6573) * perf: move writing key to cache, to background task * perf(litellm_pre_call_utils.py): add otel tracing for pre-call utils adds 200ms on calls with pgdb connected * fix(litellm_pre_call_utils.py'): rename call_type to actual call used * perf(proxy_server.py): remove db logic from _get_config_from_file was causing db calls to occur on every llm request, if team_id was set on key * fix(auth_checks.py): add check for reducing db calls if user/team id does not exist in db reduces latency/call by ~100ms * fix(proxy_server.py): minor fix on existing_settings not incl alerting * fix(exception_mapping_utils.py): map databricks exception string * fix(auth_checks.py): fix auth check logic * test: correctly mark flaky test * fix(utils.py): handle auth token error for tokenizers.from_pretrained * fix ImageObject conversion (#6584) * (fix) litellm.text_completion raises a non-blocking error on simple usage (#6546) * unit test test_huggingface_text_completion_logprobs * fix return TextCompletionHandler convert_chat_to_text_completion * fix hf rest api * fix test_huggingface_text_completion_logprobs * fix linting errors * fix importLiteLLMResponseObjectHandler * fix test for LiteLLMResponseObjectHandler * fix test text completion * fix allow using 15 seconds for premium license check * testing fix bedrock deprecated cohere.command-text-v14 * (feat) add `Predicted Outputs` for OpenAI (#6594) * bump openai to openai==1.54.0 * add 'prediction' param * testing fix bedrock deprecated cohere.command-text-v14 * test test_openai_prediction_param.py * test_openai_prediction_param_with_caching * doc Predicted Outputs * doc Predicted Output * (fix) Vertex Improve Performance when using `image_url` (#6593) * fix transformation vertex * test test_process_gemini_image * test_image_completion_request * testing fix - bedrock has deprecated cohere.command-text-v14 * fix vertex pdf * bump: version 1.51.5 → 1.52.0 * fix(lowest_tpm_rpm_routing.py): fix parallel rate limit check (#6577) * fix(lowest_tpm_rpm_routing.py): fix parallel rate limit check * fix(lowest_tpm_rpm_v2.py): return headers in correct format * test: update test * build(deps): bump cookie and express in /docs/my-website (#6566) Bumps [cookie](https://github.com/jshttp/cookie) and [express](https://github.com/expressjs/express). These dependencies needed to be updated together. Updates `cookie` from 0.6.0 to 0.7.1 - [Release notes](https://github.com/jshttp/cookie/releases) - [Commits](https://github.com/jshttp/cookie/compare/v0.6.0...v0.7.1) Updates `express` from 4.20.0 to 4.21.1 - [Release notes](https://github.com/expressjs/express/releases) - [Changelog](https://github.com/expressjs/express/blob/4.21.1/History.md) - [Commits](https://github.com/expressjs/express/compare/4.20.0...4.21.1) --- updated-dependencies: - dependency-name: cookie dependency-type: indirect - dependency-name: express dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * docs(virtual_keys.md): update Dockerfile reference (#6554) Signed-off-by: Emmanuel Ferdman * (proxy fix) - call connect on prisma client when running setup (#6534) * critical fix - call connect on prisma client when running setup * fix test_proxy_server_prisma_setup * fix test_proxy_server_prisma_setup * Add 3.5 haiku (#6588) * feat: add claude-3-5-haiku-20241022 entries * feat: add claude-3-5-haiku-20241022 and vertex_ai/claude-3-5-haiku@20241022 models * add missing entries, remove vision * remove image token costs * Litellm perf improvements 3 (#6573) * perf: move writing key to cache, to background task * perf(litellm_pre_call_utils.py): add otel tracing for pre-call utils adds 200ms on calls with pgdb connected * fix(litellm_pre_call_utils.py'): rename call_type to actual call used * perf(proxy_server.py): remove db logic from _get_config_from_file was causing db calls to occur on every llm request, if team_id was set on key * fix(auth_checks.py): add check for reducing db calls if user/team id does not exist in db reduces latency/call by ~100ms * fix(proxy_server.py): minor fix on existing_settings not incl alerting * fix(exception_mapping_utils.py): map databricks exception string * fix(auth_checks.py): fix auth check logic * test: correctly mark flaky test * fix(utils.py): handle auth token error for tokenizers.from_pretrained * build: fix map * build: fix map * build: fix json for model map * test: remove eol model * fix(proxy_server.py): fix db config loading logic * fix(proxy_server.py): fix order of config / db updates, to ensure fields not overwritten * test: skip test if required env var is missing * test: fix test --------- Signed-off-by: dependabot[bot] Signed-off-by: Emmanuel Ferdman Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Emmanuel Ferdman Co-authored-by: Ishaan Jaff Co-authored-by: paul-gauthier <69695708+paul-gauthier@users.noreply.github.com> * test: mark flaky test * test: handle anthropic api instability * test: update test * test: bump num retries on langfuse tests - their api is quite bad --------- Signed-off-by: dependabot[bot] Signed-off-by: Emmanuel Ferdman Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Emmanuel Ferdman Co-authored-by: Ishaan Jaff Co-authored-by: paul-gauthier <69695708+paul-gauthier@users.noreply.github.com> --- litellm/__init__.py | 15 +- .../exception_mapping_utils.py | 22 +- .../get_supported_openai_params.py | 288 +++++++++++++ litellm/llms/custom_httpx/http_handler.py | 3 + litellm/llms/databricks/streaming_utils.py | 27 +- litellm/llms/openai_like/chat/handler.py | 372 ++++++++++++++++ litellm/llms/openai_like/common_utils.py | 42 ++ litellm/llms/openai_like/embedding/handler.py | 39 +- litellm/llms/watsonx/chat/handler.py | 123 ++++++ litellm/llms/watsonx/chat/transformation.py | 82 ++++ litellm/llms/watsonx/common_utils.py | 172 ++++++++ .../completion/handler.py} | 234 ++++------ litellm/main.py | 28 +- litellm/types/llms/openai.py | 22 +- litellm/types/llms/watsonx.py | 31 ++ litellm/utils.py | 399 ++++-------------- tests/llm_translation/test_databricks.py | 2 +- tests/llm_translation/test_optional_params.py | 16 + tests/local_testing/test_alangfuse.py | 14 +- tests/local_testing/test_exceptions.py | 2 +- tests/local_testing/test_function_calling.py | 61 ++- tests/local_testing/test_streaming.py | 14 +- tests/local_testing/test_utils.py | 52 +++ .../test_datadog_llm_obs.py | 4 +- 24 files changed, 1510 insertions(+), 554 deletions(-) create mode 100644 litellm/litellm_core_utils/get_supported_openai_params.py create mode 100644 litellm/llms/openai_like/chat/handler.py create mode 100644 litellm/llms/watsonx/chat/handler.py create mode 100644 litellm/llms/watsonx/chat/transformation.py create mode 100644 litellm/llms/watsonx/common_utils.py rename litellm/llms/{watsonx.py => watsonx/completion/handler.py} (78%) create mode 100644 litellm/types/llms/watsonx.py diff --git a/litellm/__init__.py b/litellm/__init__.py index eb59f6d6b..f388bf17a 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -137,6 +137,8 @@ safe_memory_mode: bool = False enable_azure_ad_token_refresh: Optional[bool] = False ### DEFAULT AZURE API VERSION ### AZURE_DEFAULT_API_VERSION = "2024-08-01-preview" # this is updated to the latest +### DEFAULT WATSONX API VERSION ### +WATSONX_DEFAULT_API_VERSION = "2024-03-13" ### COHERE EMBEDDINGS DEFAULT TYPE ### COHERE_DEFAULT_EMBEDDING_INPUT_TYPE: COHERE_EMBEDDING_INPUT_TYPES = "search_document" ### GUARDRAILS ### @@ -282,7 +284,9 @@ priority_reservation: Optional[Dict[str, float]] = None #### RELIABILITY #### REPEATED_STREAMING_CHUNK_LIMIT = 100 # catch if model starts looping the same chunk while streaming. Uses high default to prevent false positives. request_timeout: float = 6000 # time in seconds -module_level_aclient = AsyncHTTPHandler(timeout=request_timeout) +module_level_aclient = AsyncHTTPHandler( + timeout=request_timeout, client_alias="module level aclient" +) module_level_client = HTTPHandler(timeout=request_timeout) num_retries: Optional[int] = None # per model endpoint max_fallbacks: Optional[int] = None @@ -527,7 +531,11 @@ openai_text_completion_compatible_providers: List = ( "hosted_vllm", ] ) - +_openai_like_providers: List = [ + "predibase", + "databricks", + "watsonx", +] # private helper. similar to openai but require some custom auth / endpoint handling, so can't use the openai sdk # well supported replicate llms replicate_models: List = [ # llama replicate supported LLMs @@ -1040,7 +1048,8 @@ from .llms.hosted_vllm.chat.transformation import HostedVLLMChatConfig from .llms.lm_studio.chat.transformation import LMStudioChatConfig from .llms.perplexity.chat.transformation import PerplexityChatConfig from .llms.AzureOpenAI.chat.o1_transformation import AzureOpenAIO1Config -from .llms.watsonx import IBMWatsonXAIConfig +from .llms.watsonx.completion.handler import IBMWatsonXAIConfig +from .llms.watsonx.chat.transformation import IBMWatsonXChatConfig from .main import * # type: ignore from .integrations import * from .exceptions import ( diff --git a/litellm/litellm_core_utils/exception_mapping_utils.py b/litellm/litellm_core_utils/exception_mapping_utils.py index 14d5bffdb..a4a30fc31 100644 --- a/litellm/litellm_core_utils/exception_mapping_utils.py +++ b/litellm/litellm_core_utils/exception_mapping_utils.py @@ -612,19 +612,7 @@ def exception_type( # type: ignore # noqa: PLR0915 url="https://api.replicate.com/v1/deployments", ), ) - elif custom_llm_provider == "watsonx": - if "token_quota_reached" in error_str: - exception_mapping_worked = True - raise RateLimitError( - message=f"WatsonxException: Rate Limit Errror - {error_str}", - llm_provider="watsonx", - model=model, - response=original_exception.response, - ) - elif ( - custom_llm_provider == "predibase" - or custom_llm_provider == "databricks" - ): + elif custom_llm_provider in litellm._openai_like_providers: if "authorization denied for" in error_str: exception_mapping_worked = True @@ -646,6 +634,14 @@ def exception_type( # type: ignore # noqa: PLR0915 response=original_exception.response, litellm_debug_info=extra_information, ) + elif "token_quota_reached" in error_str: + exception_mapping_worked = True + raise RateLimitError( + message=f"{custom_llm_provider}Exception: Rate Limit Errror - {error_str}", + llm_provider=custom_llm_provider, + model=model, + response=original_exception.response, + ) elif ( "The server received an invalid response from an upstream server." in error_str diff --git a/litellm/litellm_core_utils/get_supported_openai_params.py b/litellm/litellm_core_utils/get_supported_openai_params.py new file mode 100644 index 000000000..bb94d54d5 --- /dev/null +++ b/litellm/litellm_core_utils/get_supported_openai_params.py @@ -0,0 +1,288 @@ +from typing import Literal, Optional + +import litellm +from litellm.exceptions import BadRequestError + + +def get_supported_openai_params( # noqa: PLR0915 + model: str, + custom_llm_provider: Optional[str] = None, + request_type: Literal["chat_completion", "embeddings"] = "chat_completion", +) -> Optional[list]: + """ + Returns the supported openai params for a given model + provider + + Example: + ``` + get_supported_openai_params(model="anthropic.claude-3", custom_llm_provider="bedrock") + ``` + + Returns: + - List if custom_llm_provider is mapped + - None if unmapped + """ + if not custom_llm_provider: + try: + custom_llm_provider = litellm.get_llm_provider(model=model)[1] + except BadRequestError: + return None + if custom_llm_provider == "bedrock": + return litellm.AmazonConverseConfig().get_supported_openai_params(model=model) + elif custom_llm_provider == "ollama": + return litellm.OllamaConfig().get_supported_openai_params() + elif custom_llm_provider == "ollama_chat": + return litellm.OllamaChatConfig().get_supported_openai_params() + elif custom_llm_provider == "anthropic": + return litellm.AnthropicConfig().get_supported_openai_params() + elif custom_llm_provider == "fireworks_ai": + if request_type == "embeddings": + return litellm.FireworksAIEmbeddingConfig().get_supported_openai_params( + model=model + ) + else: + return litellm.FireworksAIConfig().get_supported_openai_params() + elif custom_llm_provider == "nvidia_nim": + if request_type == "chat_completion": + return litellm.nvidiaNimConfig.get_supported_openai_params(model=model) + elif request_type == "embeddings": + return litellm.nvidiaNimEmbeddingConfig.get_supported_openai_params() + elif custom_llm_provider == "cerebras": + return litellm.CerebrasConfig().get_supported_openai_params(model=model) + elif custom_llm_provider == "xai": + return litellm.XAIChatConfig().get_supported_openai_params(model=model) + elif custom_llm_provider == "ai21_chat": + return litellm.AI21ChatConfig().get_supported_openai_params(model=model) + elif custom_llm_provider == "volcengine": + return litellm.VolcEngineConfig().get_supported_openai_params(model=model) + elif custom_llm_provider == "groq": + return litellm.GroqChatConfig().get_supported_openai_params(model=model) + elif custom_llm_provider == "hosted_vllm": + return litellm.HostedVLLMChatConfig().get_supported_openai_params(model=model) + elif custom_llm_provider == "deepseek": + return [ + # https://platform.deepseek.com/api-docs/api/create-chat-completion + "frequency_penalty", + "max_tokens", + "presence_penalty", + "response_format", + "stop", + "stream", + "temperature", + "top_p", + "logprobs", + "top_logprobs", + "tools", + "tool_choice", + ] + elif custom_llm_provider == "cohere": + return [ + "stream", + "temperature", + "max_tokens", + "logit_bias", + "top_p", + "frequency_penalty", + "presence_penalty", + "stop", + "n", + "extra_headers", + ] + elif custom_llm_provider == "cohere_chat": + return [ + "stream", + "temperature", + "max_tokens", + "top_p", + "frequency_penalty", + "presence_penalty", + "stop", + "n", + "tools", + "tool_choice", + "seed", + "extra_headers", + ] + elif custom_llm_provider == "maritalk": + return [ + "stream", + "temperature", + "max_tokens", + "top_p", + "presence_penalty", + "stop", + ] + elif custom_llm_provider == "openai": + return litellm.OpenAIConfig().get_supported_openai_params(model=model) + elif custom_llm_provider == "azure": + if litellm.AzureOpenAIO1Config().is_o1_model(model=model): + return litellm.AzureOpenAIO1Config().get_supported_openai_params( + model=model + ) + else: + return litellm.AzureOpenAIConfig().get_supported_openai_params() + elif custom_llm_provider == "openrouter": + return [ + "temperature", + "top_p", + "frequency_penalty", + "presence_penalty", + "repetition_penalty", + "seed", + "max_tokens", + "logit_bias", + "logprobs", + "top_logprobs", + "response_format", + "stop", + "tools", + "tool_choice", + ] + elif custom_llm_provider == "mistral" or custom_llm_provider == "codestral": + # mistal and codestral api have the exact same params + if request_type == "chat_completion": + return litellm.MistralConfig().get_supported_openai_params() + elif request_type == "embeddings": + return litellm.MistralEmbeddingConfig().get_supported_openai_params() + elif custom_llm_provider == "text-completion-codestral": + return litellm.MistralTextCompletionConfig().get_supported_openai_params() + elif custom_llm_provider == "replicate": + return [ + "stream", + "temperature", + "max_tokens", + "top_p", + "stop", + "seed", + "tools", + "tool_choice", + "functions", + "function_call", + ] + elif custom_llm_provider == "huggingface": + return litellm.HuggingfaceConfig().get_supported_openai_params() + elif custom_llm_provider == "together_ai": + return [ + "stream", + "temperature", + "max_tokens", + "top_p", + "stop", + "frequency_penalty", + "tools", + "tool_choice", + "response_format", + ] + elif custom_llm_provider == "ai21": + return [ + "stream", + "n", + "temperature", + "max_tokens", + "top_p", + "stop", + "frequency_penalty", + "presence_penalty", + ] + elif custom_llm_provider == "databricks": + if request_type == "chat_completion": + return litellm.DatabricksConfig().get_supported_openai_params() + elif request_type == "embeddings": + return litellm.DatabricksEmbeddingConfig().get_supported_openai_params() + elif custom_llm_provider == "palm" or custom_llm_provider == "gemini": + return litellm.GoogleAIStudioGeminiConfig().get_supported_openai_params() + elif custom_llm_provider == "vertex_ai": + if request_type == "chat_completion": + if model.startswith("meta/"): + return litellm.VertexAILlama3Config().get_supported_openai_params() + if model.startswith("mistral"): + return litellm.MistralConfig().get_supported_openai_params() + if model.startswith("codestral"): + return ( + litellm.MistralTextCompletionConfig().get_supported_openai_params() + ) + if model.startswith("claude"): + return litellm.VertexAIAnthropicConfig().get_supported_openai_params() + return litellm.VertexAIConfig().get_supported_openai_params() + elif request_type == "embeddings": + return litellm.VertexAITextEmbeddingConfig().get_supported_openai_params() + elif custom_llm_provider == "vertex_ai_beta": + if request_type == "chat_completion": + return litellm.VertexGeminiConfig().get_supported_openai_params() + elif request_type == "embeddings": + return litellm.VertexAITextEmbeddingConfig().get_supported_openai_params() + elif custom_llm_provider == "sagemaker": + return ["stream", "temperature", "max_tokens", "top_p", "stop", "n"] + elif custom_llm_provider == "aleph_alpha": + return [ + "max_tokens", + "stream", + "top_p", + "temperature", + "presence_penalty", + "frequency_penalty", + "n", + "stop", + ] + elif custom_llm_provider == "cloudflare": + return ["max_tokens", "stream"] + elif custom_llm_provider == "nlp_cloud": + return [ + "max_tokens", + "stream", + "temperature", + "top_p", + "presence_penalty", + "frequency_penalty", + "n", + "stop", + ] + elif custom_llm_provider == "petals": + return ["max_tokens", "temperature", "top_p", "stream"] + elif custom_llm_provider == "deepinfra": + return litellm.DeepInfraConfig().get_supported_openai_params() + elif custom_llm_provider == "perplexity": + return [ + "temperature", + "top_p", + "stream", + "max_tokens", + "presence_penalty", + "frequency_penalty", + ] + elif custom_llm_provider == "anyscale": + return [ + "temperature", + "top_p", + "stream", + "max_tokens", + "stop", + "frequency_penalty", + "presence_penalty", + ] + elif custom_llm_provider == "watsonx": + return litellm.IBMWatsonXChatConfig().get_supported_openai_params(model=model) + elif custom_llm_provider == "custom_openai" or "text-completion-openai": + return [ + "functions", + "function_call", + "temperature", + "top_p", + "n", + "stream", + "stream_options", + "stop", + "max_tokens", + "presence_penalty", + "frequency_penalty", + "logit_bias", + "user", + "response_format", + "seed", + "tools", + "tool_choice", + "max_retries", + "logprobs", + "top_logprobs", + "extra_headers", + ] + return None diff --git a/litellm/llms/custom_httpx/http_handler.py b/litellm/llms/custom_httpx/http_handler.py index 55851a636..9e5ed782e 100644 --- a/litellm/llms/custom_httpx/http_handler.py +++ b/litellm/llms/custom_httpx/http_handler.py @@ -34,12 +34,14 @@ class AsyncHTTPHandler: timeout: Optional[Union[float, httpx.Timeout]] = None, event_hooks: Optional[Mapping[str, List[Callable[..., Any]]]] = None, concurrent_limit=1000, + client_alias: Optional[str] = None, # name for client in logs ): self.timeout = timeout self.event_hooks = event_hooks self.client = self.create_client( timeout=timeout, concurrent_limit=concurrent_limit, event_hooks=event_hooks ) + self.client_alias = client_alias def create_client( self, @@ -112,6 +114,7 @@ class AsyncHTTPHandler: try: if timeout is None: timeout = self.timeout + req = self.client.build_request( "POST", url, data=data, json=json, params=params, headers=headers, timeout=timeout # type: ignore ) diff --git a/litellm/llms/databricks/streaming_utils.py b/litellm/llms/databricks/streaming_utils.py index dd6b3c8aa..a87ab39bb 100644 --- a/litellm/llms/databricks/streaming_utils.py +++ b/litellm/llms/databricks/streaming_utils.py @@ -2,6 +2,7 @@ import json from typing import Optional import litellm +from litellm import verbose_logger from litellm.types.llms.openai import ( ChatCompletionDeltaChunk, ChatCompletionResponseMessage, @@ -109,7 +110,17 @@ class ModelResponseIterator: except StopIteration: raise StopIteration except ValueError as e: - raise RuntimeError(f"Error parsing chunk: {e},\nReceived chunk: {chunk}") + verbose_logger.debug( + f"Error parsing chunk: {e},\nReceived chunk: {chunk}. Defaulting to empty chunk here." + ) + return GenericStreamingChunk( + text="", + is_finished=False, + finish_reason="", + usage=None, + index=0, + tool_use=None, + ) # Async iterator def __aiter__(self): @@ -123,6 +134,8 @@ class ModelResponseIterator: raise StopAsyncIteration except ValueError as e: raise RuntimeError(f"Error receiving chunk from stream: {e}") + except Exception as e: + raise RuntimeError(f"Error receiving chunk from stream: {e}") try: chunk = chunk.replace("data:", "") @@ -144,4 +157,14 @@ class ModelResponseIterator: except StopAsyncIteration: raise StopAsyncIteration except ValueError as e: - raise RuntimeError(f"Error parsing chunk: {e},\nReceived chunk: {chunk}") + verbose_logger.debug( + f"Error parsing chunk: {e},\nReceived chunk: {chunk}. Defaulting to empty chunk here." + ) + return GenericStreamingChunk( + text="", + is_finished=False, + finish_reason="", + usage=None, + index=0, + tool_use=None, + ) diff --git a/litellm/llms/openai_like/chat/handler.py b/litellm/llms/openai_like/chat/handler.py new file mode 100644 index 000000000..0dbc3a978 --- /dev/null +++ b/litellm/llms/openai_like/chat/handler.py @@ -0,0 +1,372 @@ +""" +OpenAI-like chat completion handler + +For handling OpenAI-like chat completions, like IBM WatsonX, etc. +""" + +import copy +import json +import os +import time +import types +from enum import Enum +from functools import partial +from typing import Any, Callable, List, Literal, Optional, Tuple, Union + +import httpx # type: ignore +import requests # type: ignore + +import litellm +from litellm.litellm_core_utils.core_helpers import map_finish_reason +from litellm.llms.custom_httpx.http_handler import ( + AsyncHTTPHandler, + HTTPHandler, + get_async_httpx_client, +) +from litellm.llms.databricks.streaming_utils import ModelResponseIterator +from litellm.types.utils import CustomStreamingDecoder, ModelResponse +from litellm.utils import CustomStreamWrapper, EmbeddingResponse + +from ..common_utils import OpenAILikeBase, OpenAILikeError + + +async def make_call( + client: Optional[AsyncHTTPHandler], + api_base: str, + headers: dict, + data: str, + model: str, + messages: list, + logging_obj, + streaming_decoder: Optional[CustomStreamingDecoder] = None, +): + if client is None: + client = litellm.module_level_aclient + + response = await client.post(api_base, headers=headers, data=data, stream=True) + + if streaming_decoder is not None: + completion_stream: Any = streaming_decoder.aiter_bytes( + response.aiter_bytes(chunk_size=1024) + ) + else: + completion_stream = ModelResponseIterator( + streaming_response=response.aiter_lines(), sync_stream=False + ) + # LOGGING + logging_obj.post_call( + input=messages, + api_key="", + original_response=completion_stream, # Pass the completion stream for logging + additional_args={"complete_input_dict": data}, + ) + + return completion_stream + + +def make_sync_call( + client: Optional[HTTPHandler], + api_base: str, + headers: dict, + data: str, + model: str, + messages: list, + logging_obj, + streaming_decoder: Optional[CustomStreamingDecoder] = None, +): + if client is None: + client = litellm.module_level_client # Create a new client if none provided + + response = client.post(api_base, headers=headers, data=data, stream=True) + + if response.status_code != 200: + raise OpenAILikeError(status_code=response.status_code, message=response.read()) + + if streaming_decoder is not None: + completion_stream = streaming_decoder.iter_bytes( + response.iter_bytes(chunk_size=1024) + ) + else: + completion_stream = ModelResponseIterator( + streaming_response=response.iter_lines(), sync_stream=True + ) + + # LOGGING + logging_obj.post_call( + input=messages, + api_key="", + original_response="first stream response received", + additional_args={"complete_input_dict": data}, + ) + + return completion_stream + + +class OpenAILikeChatHandler(OpenAILikeBase): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + async def acompletion_stream_function( + self, + model: str, + messages: list, + custom_llm_provider: str, + api_base: str, + custom_prompt_dict: dict, + model_response: ModelResponse, + print_verbose: Callable, + encoding, + api_key, + logging_obj, + stream, + data: dict, + optional_params=None, + litellm_params=None, + logger_fn=None, + headers={}, + client: Optional[AsyncHTTPHandler] = None, + streaming_decoder: Optional[CustomStreamingDecoder] = None, + ) -> CustomStreamWrapper: + + data["stream"] = True + completion_stream = await make_call( + client=client, + api_base=api_base, + headers=headers, + data=json.dumps(data), + model=model, + messages=messages, + logging_obj=logging_obj, + streaming_decoder=streaming_decoder, + ) + streamwrapper = CustomStreamWrapper( + completion_stream=completion_stream, + model=model, + custom_llm_provider=custom_llm_provider, + logging_obj=logging_obj, + ) + + return streamwrapper + + async def acompletion_function( + self, + model: str, + messages: list, + api_base: str, + custom_prompt_dict: dict, + model_response: ModelResponse, + custom_llm_provider: str, + print_verbose: Callable, + client: Optional[AsyncHTTPHandler], + encoding, + api_key, + logging_obj, + stream, + data: dict, + base_model: Optional[str], + optional_params: dict, + litellm_params=None, + logger_fn=None, + headers={}, + timeout: Optional[Union[float, httpx.Timeout]] = None, + ) -> ModelResponse: + if timeout is None: + timeout = httpx.Timeout(timeout=600.0, connect=5.0) + + if client is None: + client = litellm.module_level_aclient + + try: + response = await client.post( + api_base, headers=headers, data=json.dumps(data), timeout=timeout + ) + response.raise_for_status() + + response_json = response.json() + except httpx.HTTPStatusError as e: + raise OpenAILikeError( + status_code=e.response.status_code, + message=e.response.text, + ) + except httpx.TimeoutException: + raise OpenAILikeError(status_code=408, message="Timeout error occurred.") + except Exception as e: + raise OpenAILikeError(status_code=500, message=str(e)) + + logging_obj.post_call( + input=messages, + api_key="", + original_response=response_json, + additional_args={"complete_input_dict": data}, + ) + response = ModelResponse(**response_json) + + response.model = custom_llm_provider + "/" + (response.model or "") + + if base_model is not None: + response._hidden_params["model"] = base_model + return response + + def completion( + self, + model: str, + messages: list, + api_base: str, + custom_llm_provider: str, + custom_prompt_dict: dict, + model_response: ModelResponse, + print_verbose: Callable, + encoding, + api_key: Optional[str], + logging_obj, + optional_params: dict, + acompletion=None, + litellm_params=None, + logger_fn=None, + headers: Optional[dict] = None, + timeout: Optional[Union[float, httpx.Timeout]] = None, + client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, + custom_endpoint: Optional[bool] = None, + streaming_decoder: Optional[ + CustomStreamingDecoder + ] = None, # if openai-compatible api needs custom stream decoder - e.g. sagemaker + ): + custom_endpoint = custom_endpoint or optional_params.pop( + "custom_endpoint", None + ) + base_model: Optional[str] = optional_params.pop("base_model", None) + api_base, headers = self._validate_environment( + api_base=api_base, + api_key=api_key, + endpoint_type="chat_completions", + custom_endpoint=custom_endpoint, + headers=headers, + ) + + stream: bool = optional_params.get("stream", None) or False + optional_params["stream"] = stream + + data = { + "model": model, + "messages": messages, + **optional_params, + } + + ## LOGGING + logging_obj.pre_call( + input=messages, + api_key=api_key, + additional_args={ + "complete_input_dict": data, + "api_base": api_base, + "headers": headers, + }, + ) + if acompletion is True: + if client is None or not isinstance(client, AsyncHTTPHandler): + client = None + if ( + stream is True + ): # if function call - fake the streaming (need complete blocks for output parsing in openai format) + data["stream"] = stream + return self.acompletion_stream_function( + model=model, + messages=messages, + data=data, + api_base=api_base, + custom_prompt_dict=custom_prompt_dict, + model_response=model_response, + print_verbose=print_verbose, + encoding=encoding, + api_key=api_key, + logging_obj=logging_obj, + optional_params=optional_params, + stream=stream, + litellm_params=litellm_params, + logger_fn=logger_fn, + headers=headers, + client=client, + custom_llm_provider=custom_llm_provider, + streaming_decoder=streaming_decoder, + ) + else: + return self.acompletion_function( + model=model, + messages=messages, + data=data, + api_base=api_base, + custom_prompt_dict=custom_prompt_dict, + custom_llm_provider=custom_llm_provider, + model_response=model_response, + print_verbose=print_verbose, + encoding=encoding, + api_key=api_key, + logging_obj=logging_obj, + optional_params=optional_params, + stream=stream, + litellm_params=litellm_params, + logger_fn=logger_fn, + headers=headers, + timeout=timeout, + base_model=base_model, + client=client, + ) + else: + ## COMPLETION CALL + if stream is True: + completion_stream = make_sync_call( + client=( + client + if client is not None and isinstance(client, HTTPHandler) + else None + ), + api_base=api_base, + headers=headers, + data=json.dumps(data), + model=model, + messages=messages, + logging_obj=logging_obj, + streaming_decoder=streaming_decoder, + ) + # completion_stream.__iter__() + return CustomStreamWrapper( + completion_stream=completion_stream, + model=model, + custom_llm_provider=custom_llm_provider, + logging_obj=logging_obj, + ) + else: + if client is None or not isinstance(client, HTTPHandler): + client = HTTPHandler(timeout=timeout) # type: ignore + try: + response = client.post( + api_base, headers=headers, data=json.dumps(data) + ) + response.raise_for_status() + + response_json = response.json() + except httpx.HTTPStatusError as e: + raise OpenAILikeError( + status_code=e.response.status_code, + message=e.response.text, + ) + except httpx.TimeoutException: + raise OpenAILikeError( + status_code=408, message="Timeout error occurred." + ) + except Exception as e: + raise OpenAILikeError(status_code=500, message=str(e)) + logging_obj.post_call( + input=messages, + api_key="", + original_response=response_json, + additional_args={"complete_input_dict": data}, + ) + response = ModelResponse(**response_json) + + response.model = custom_llm_provider + "/" + (response.model or "") + + if base_model is not None: + response._hidden_params["model"] = base_model + + return response diff --git a/litellm/llms/openai_like/common_utils.py b/litellm/llms/openai_like/common_utils.py index adfd01586..3051618d4 100644 --- a/litellm/llms/openai_like/common_utils.py +++ b/litellm/llms/openai_like/common_utils.py @@ -1,3 +1,5 @@ +from typing import Literal, Optional, Tuple + import httpx @@ -10,3 +12,43 @@ class OpenAILikeError(Exception): super().__init__( self.message ) # Call the base class constructor with the parameters it needs + + +class OpenAILikeBase: + def __init__(self, **kwargs): + pass + + def _validate_environment( + self, + api_key: Optional[str], + api_base: Optional[str], + endpoint_type: Literal["chat_completions", "embeddings"], + headers: Optional[dict], + custom_endpoint: Optional[bool], + ) -> Tuple[str, dict]: + if api_key is None and headers is None: + raise OpenAILikeError( + status_code=400, + message="Missing API Key - A call is being made to LLM Provider but no key is set either in the environment variables ({LLM_PROVIDER}_API_KEY) or via params", + ) + + if api_base is None: + raise OpenAILikeError( + status_code=400, + message="Missing API Base - A call is being made to LLM Provider but no api base is set either in the environment variables ({LLM_PROVIDER}_API_KEY) or via params", + ) + + if headers is None: + headers = { + "Content-Type": "application/json", + } + + if api_key is not None: + headers.update({"Authorization": "Bearer {}".format(api_key)}) + + if not custom_endpoint: + if endpoint_type == "chat_completions": + api_base = "{}/chat/completions".format(api_base) + elif endpoint_type == "embeddings": + api_base = "{}/embeddings".format(api_base) + return api_base, headers diff --git a/litellm/llms/openai_like/embedding/handler.py b/litellm/llms/openai_like/embedding/handler.py index e83fc2686..ce0860724 100644 --- a/litellm/llms/openai_like/embedding/handler.py +++ b/litellm/llms/openai_like/embedding/handler.py @@ -23,46 +23,13 @@ from litellm.llms.custom_httpx.http_handler import ( ) from litellm.utils import EmbeddingResponse -from ..common_utils import OpenAILikeError +from ..common_utils import OpenAILikeBase, OpenAILikeError -class OpenAILikeEmbeddingHandler: +class OpenAILikeEmbeddingHandler(OpenAILikeBase): def __init__(self, **kwargs): pass - def _validate_environment( - self, - api_key: Optional[str], - api_base: Optional[str], - endpoint_type: Literal["chat_completions", "embeddings"], - headers: Optional[dict], - ) -> Tuple[str, dict]: - if api_key is None and headers is None: - raise OpenAILikeError( - status_code=400, - message="Missing API Key - A call is being made to LLM Provider but no key is set either in the environment variables ({LLM_PROVIDER}_API_KEY) or via params", - ) - - if api_base is None: - raise OpenAILikeError( - status_code=400, - message="Missing API Base - A call is being made to LLM Provider but no api base is set either in the environment variables ({LLM_PROVIDER}_API_KEY) or via params", - ) - - if headers is None: - headers = { - "Content-Type": "application/json", - } - - if api_key is not None: - headers.update({"Authorization": "Bearer {}".format(api_key)}) - - if endpoint_type == "chat_completions": - api_base = "{}/chat/completions".format(api_base) - elif endpoint_type == "embeddings": - api_base = "{}/embeddings".format(api_base) - return api_base, headers - async def aembedding( self, input: list, @@ -133,6 +100,7 @@ class OpenAILikeEmbeddingHandler: model_response: Optional[litellm.utils.EmbeddingResponse] = None, client=None, aembedding=None, + custom_endpoint: Optional[bool] = None, headers: Optional[dict] = None, ) -> EmbeddingResponse: api_base, headers = self._validate_environment( @@ -140,6 +108,7 @@ class OpenAILikeEmbeddingHandler: api_key=api_key, endpoint_type="embeddings", headers=headers, + custom_endpoint=custom_endpoint, ) model = model data = {"model": model, "input": input, **optional_params} diff --git a/litellm/llms/watsonx/chat/handler.py b/litellm/llms/watsonx/chat/handler.py new file mode 100644 index 000000000..b016bb0a7 --- /dev/null +++ b/litellm/llms/watsonx/chat/handler.py @@ -0,0 +1,123 @@ +from typing import Callable, Optional, Union + +import httpx + +from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler +from litellm.types.llms.watsonx import WatsonXAIEndpoint, WatsonXAPIParams +from litellm.types.utils import CustomStreamingDecoder, ModelResponse + +from ...openai_like.chat.handler import OpenAILikeChatHandler +from ..common_utils import WatsonXAIError, _get_api_params + + +class WatsonXChatHandler(OpenAILikeChatHandler): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _prepare_url( + self, model: str, api_params: WatsonXAPIParams, stream: Optional[bool] + ) -> str: + if model.startswith("deployment/"): + if api_params.get("space_id") is None: + raise WatsonXAIError( + status_code=401, + url=api_params["url"], + message="Error: space_id is required for models called using the 'deployment/' endpoint. Pass in the space_id as a parameter or set it in the WX_SPACE_ID environment variable.", + ) + deployment_id = "/".join(model.split("/")[1:]) + endpoint = ( + WatsonXAIEndpoint.DEPLOYMENT_CHAT_STREAM.value + if stream is True + else WatsonXAIEndpoint.DEPLOYMENT_CHAT.value + ) + endpoint = endpoint.format(deployment_id=deployment_id) + else: + endpoint = ( + WatsonXAIEndpoint.CHAT_STREAM.value + if stream is True + else WatsonXAIEndpoint.CHAT.value + ) + base_url = httpx.URL(api_params["url"]) + base_url = base_url.join(endpoint) + full_url = str( + base_url.copy_add_param(key="version", value=api_params["api_version"]) + ) + + return full_url + + def _prepare_payload( + self, model: str, api_params: WatsonXAPIParams, stream: Optional[bool] + ) -> dict: + payload: dict = {} + if model.startswith("deployment/"): + return payload + payload["model_id"] = model + payload["project_id"] = api_params["project_id"] + return payload + + def completion( + self, + model: str, + messages: list, + api_base: str, + custom_llm_provider: str, + custom_prompt_dict: dict, + model_response: ModelResponse, + print_verbose: Callable, + encoding, + api_key: Optional[str], + logging_obj, + optional_params: dict, + acompletion=None, + litellm_params=None, + logger_fn=None, + headers: Optional[dict] = None, + timeout: Optional[Union[float, httpx.Timeout]] = None, + client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None, + custom_endpoint: Optional[bool] = None, + streaming_decoder: Optional[ + CustomStreamingDecoder + ] = None, # if openai-compatible api needs custom stream decoder - e.g. sagemaker + ): + api_params = _get_api_params(optional_params, print_verbose=print_verbose) + + if headers is None: + headers = {} + headers.update( + { + "Authorization": f"Bearer {api_params['token']}", + "Content-Type": "application/json", + "Accept": "application/json", + } + ) + + stream: Optional[bool] = optional_params.get("stream", False) + + ## get api url and payload + api_base = self._prepare_url(model=model, api_params=api_params, stream=stream) + watsonx_auth_payload = self._prepare_payload( + model=model, api_params=api_params, stream=stream + ) + optional_params.update(watsonx_auth_payload) + + return super().completion( + model=model, + messages=messages, + api_base=api_base, + custom_llm_provider=custom_llm_provider, + custom_prompt_dict=custom_prompt_dict, + model_response=model_response, + print_verbose=print_verbose, + encoding=encoding, + api_key=api_key, + logging_obj=logging_obj, + optional_params=optional_params, + acompletion=acompletion, + litellm_params=litellm_params, + logger_fn=logger_fn, + headers=headers, + timeout=timeout, + client=client, + custom_endpoint=True, + streaming_decoder=streaming_decoder, + ) diff --git a/litellm/llms/watsonx/chat/transformation.py b/litellm/llms/watsonx/chat/transformation.py new file mode 100644 index 000000000..13fd51603 --- /dev/null +++ b/litellm/llms/watsonx/chat/transformation.py @@ -0,0 +1,82 @@ +""" +Translation from OpenAI's `/chat/completions` endpoint to IBM WatsonX's `/text/chat` endpoint. + +Docs: https://cloud.ibm.com/apidocs/watsonx-ai#text-chat +""" + +import types +from typing import List, Optional, Tuple, Union + +from pydantic import BaseModel + +import litellm +from litellm.secret_managers.main import get_secret_str +from litellm.types.llms.openai import AllMessageValues, ChatCompletionAssistantMessage + +from ....utils import _remove_additional_properties, _remove_strict_from_schema +from ...OpenAI.chat.gpt_transformation import OpenAIGPTConfig + + +class IBMWatsonXChatConfig(OpenAIGPTConfig): + + def get_supported_openai_params(self, model: str) -> List: + return [ + "temperature", # equivalent to temperature + "max_tokens", # equivalent to max_new_tokens + "top_p", # equivalent to top_p + "frequency_penalty", # equivalent to repetition_penalty + "stop", # equivalent to stop_sequences + "seed", # equivalent to random_seed + "stream", # equivalent to stream + "tools", + "tool_choice", # equivalent to tool_choice + tool_choice_options + "logprobs", + "top_logprobs", + "n", + "presence_penalty", + "response_format", + ] + + def is_tool_choice_option(self, tool_choice: Optional[Union[str, dict]]) -> bool: + if tool_choice is None: + return False + if isinstance(tool_choice, str): + return tool_choice in ["auto", "none", "required"] + return False + + def map_openai_params( + self, + non_default_params: dict, + optional_params: dict, + model: str, + drop_params: bool, + ) -> dict: + ## TOOLS ## + _tools = non_default_params.pop("tools", None) + if _tools is not None: + # remove 'additionalProperties' from tools + _tools = _remove_additional_properties(_tools) + # remove 'strict' from tools + _tools = _remove_strict_from_schema(_tools) + if _tools is not None: + non_default_params["tools"] = _tools + + ## TOOL CHOICE ## + + _tool_choice = non_default_params.pop("tool_choice", None) + if self.is_tool_choice_option(_tool_choice): + optional_params["tool_choice_options"] = _tool_choice + elif _tool_choice is not None: + optional_params["tool_choice"] = _tool_choice + return super().map_openai_params( + non_default_params, optional_params, model, drop_params + ) + + def _get_openai_compatible_provider_info( + self, api_base: Optional[str], api_key: Optional[str] + ) -> Tuple[Optional[str], Optional[str]]: + api_base = api_base or get_secret_str("HOSTED_VLLM_API_BASE") # type: ignore + dynamic_api_key = ( + api_key or get_secret_str("HOSTED_VLLM_API_KEY") or "" + ) # vllm does not require an api key + return api_base, dynamic_api_key diff --git a/litellm/llms/watsonx/common_utils.py b/litellm/llms/watsonx/common_utils.py new file mode 100644 index 000000000..976b8e6dd --- /dev/null +++ b/litellm/llms/watsonx/common_utils.py @@ -0,0 +1,172 @@ +from typing import Callable, Optional, cast + +import httpx + +import litellm +from litellm import verbose_logger +from litellm.caching import InMemoryCache +from litellm.secret_managers.main import get_secret_str +from litellm.types.llms.watsonx import WatsonXAPIParams + + +class WatsonXAIError(Exception): + def __init__(self, status_code, message, url: Optional[str] = None): + self.status_code = status_code + self.message = message + url = url or "https://https://us-south.ml.cloud.ibm.com" + self.request = httpx.Request(method="POST", url=url) + self.response = httpx.Response(status_code=status_code, request=self.request) + super().__init__( + self.message + ) # Call the base class constructor with the parameters it needs + + +iam_token_cache = InMemoryCache() + + +def generate_iam_token(api_key=None, **params) -> str: + result: Optional[str] = iam_token_cache.get_cache(api_key) # type: ignore + + if result is None: + headers = {} + headers["Content-Type"] = "application/x-www-form-urlencoded" + if api_key is None: + api_key = get_secret_str("WX_API_KEY") or get_secret_str("WATSONX_API_KEY") + if api_key is None: + raise ValueError("API key is required") + headers["Accept"] = "application/json" + data = { + "grant_type": "urn:ibm:params:oauth:grant-type:apikey", + "apikey": api_key, + } + verbose_logger.debug( + "calling ibm `/identity/token` to retrieve IAM token.\nURL=%s\nheaders=%s\ndata=%s", + "https://iam.cloud.ibm.com/identity/token", + headers, + data, + ) + response = httpx.post( + "https://iam.cloud.ibm.com/identity/token", data=data, headers=headers + ) + response.raise_for_status() + json_data = response.json() + + result = json_data["access_token"] + iam_token_cache.set_cache( + key=api_key, + value=result, + ttl=json_data["expires_in"] - 10, # leave some buffer + ) + + return cast(str, result) + + +def _get_api_params( + params: dict, + print_verbose: Optional[Callable] = None, + generate_token: Optional[bool] = True, +) -> WatsonXAPIParams: + """ + Find watsonx.ai credentials in the params or environment variables and return the headers for authentication. + """ + # Load auth variables from params + url = params.pop("url", params.pop("api_base", params.pop("base_url", None))) + api_key = params.pop("apikey", None) + token = params.pop("token", None) + project_id = params.pop( + "project_id", params.pop("watsonx_project", None) + ) # watsonx.ai project_id - allow 'watsonx_project' to be consistent with how vertex project implementation works -> reduce provider-specific params + space_id = params.pop("space_id", None) # watsonx.ai deployment space_id + region_name = params.pop("region_name", params.pop("region", None)) + if region_name is None: + region_name = params.pop( + "watsonx_region_name", params.pop("watsonx_region", None) + ) # consistent with how vertex ai + aws regions are accepted + wx_credentials = params.pop( + "wx_credentials", + params.pop( + "watsonx_credentials", None + ), # follow {provider}_credentials, same as vertex ai + ) + api_version = params.pop("api_version", litellm.WATSONX_DEFAULT_API_VERSION) + # Load auth variables from environment variables + if url is None: + url = ( + get_secret_str("WATSONX_API_BASE") # consistent with 'AZURE_API_BASE' + or get_secret_str("WATSONX_URL") + or get_secret_str("WX_URL") + or get_secret_str("WML_URL") + ) + if api_key is None: + api_key = ( + get_secret_str("WATSONX_APIKEY") + or get_secret_str("WATSONX_API_KEY") + or get_secret_str("WX_API_KEY") + ) + if token is None: + token = get_secret_str("WATSONX_TOKEN") or get_secret_str("WX_TOKEN") + if project_id is None: + project_id = ( + get_secret_str("WATSONX_PROJECT_ID") + or get_secret_str("WX_PROJECT_ID") + or get_secret_str("PROJECT_ID") + ) + if region_name is None: + region_name = ( + get_secret_str("WATSONX_REGION") + or get_secret_str("WX_REGION") + or get_secret_str("REGION") + ) + if space_id is None: + space_id = ( + get_secret_str("WATSONX_DEPLOYMENT_SPACE_ID") + or get_secret_str("WATSONX_SPACE_ID") + or get_secret_str("WX_SPACE_ID") + or get_secret_str("SPACE_ID") + ) + + # credentials parsing + if wx_credentials is not None: + url = wx_credentials.get("url", url) + api_key = wx_credentials.get("apikey", wx_credentials.get("api_key", api_key)) + token = wx_credentials.get( + "token", + wx_credentials.get( + "watsonx_token", token + ), # follow format of {provider}_token, same as azure - e.g. 'azure_ad_token=..' + ) + + # verify that all required credentials are present + if url is None: + raise WatsonXAIError( + status_code=401, + message="Error: Watsonx URL not set. Set WX_URL in environment variables or pass in as a parameter.", + ) + + if token is None and api_key is not None and generate_token: + # generate the auth token + if print_verbose is not None: + print_verbose("Generating IAM token for Watsonx.ai") + token = generate_iam_token(api_key) + elif token is None and api_key is None: + raise WatsonXAIError( + status_code=401, + url=url, + message="Error: API key or token not found. Set WX_API_KEY or WX_TOKEN in environment variables or pass in as a parameter.", + ) + if project_id is None: + raise WatsonXAIError( + status_code=401, + url=url, + message="Error: Watsonx project_id not set. Set WX_PROJECT_ID in environment variables or pass in as a parameter.", + ) + + return WatsonXAPIParams( + url=url, + api_key=api_key, + token=cast(str, token), + project_id=project_id, + space_id=space_id, + region_name=region_name, + api_version=api_version, + ) diff --git a/litellm/llms/watsonx.py b/litellm/llms/watsonx/completion/handler.py similarity index 78% rename from litellm/llms/watsonx.py rename to litellm/llms/watsonx/completion/handler.py index c54eb30f8..fda25ba0f 100644 --- a/litellm/llms/watsonx.py +++ b/litellm/llms/watsonx/completion/handler.py @@ -26,22 +26,12 @@ import requests # type: ignore import litellm from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler from litellm.secret_managers.main import get_secret_str +from litellm.types.llms.watsonx import WatsonXAIEndpoint from litellm.utils import EmbeddingResponse, ModelResponse, Usage, map_finish_reason -from .base import BaseLLM -from .prompt_templates import factory as ptf - - -class WatsonXAIError(Exception): - def __init__(self, status_code, message, url: Optional[str] = None): - self.status_code = status_code - self.message = message - url = url or "https://https://us-south.ml.cloud.ibm.com" - self.request = httpx.Request(method="POST", url=url) - self.response = httpx.Response(status_code=status_code, request=self.request) - super().__init__( - self.message - ) # Call the base class constructor with the parameters it needs +from ...base import BaseLLM +from ...prompt_templates import factory as ptf +from ..common_utils import WatsonXAIError, _get_api_params, generate_iam_token class IBMWatsonXAIConfig: @@ -140,6 +130,29 @@ class IBMWatsonXAIConfig: and v is not None } + def is_watsonx_text_param(self, param: str) -> bool: + """ + Determine if user passed in a watsonx.ai text generation param + """ + text_generation_params = [ + "decoding_method", + "max_new_tokens", + "min_new_tokens", + "length_penalty", + "stop_sequences", + "top_k", + "repetition_penalty", + "truncate_input_tokens", + "include_stop_sequences", + "return_options", + "random_seed", + "moderations", + "decoding_method", + "min_tokens", + ] + + return param in text_generation_params + def get_supported_openai_params(self): return [ "temperature", # equivalent to temperature @@ -151,6 +164,44 @@ class IBMWatsonXAIConfig: "stream", # equivalent to stream ] + def map_openai_params( + self, non_default_params: dict, optional_params: dict + ) -> dict: + extra_body = {} + for k, v in non_default_params.items(): + if k == "max_tokens": + optional_params["max_new_tokens"] = v + elif k == "stream": + optional_params["stream"] = v + elif k == "temperature": + optional_params["temperature"] = v + elif k == "top_p": + optional_params["top_p"] = v + elif k == "frequency_penalty": + optional_params["repetition_penalty"] = v + elif k == "seed": + optional_params["random_seed"] = v + elif k == "stop": + optional_params["stop_sequences"] = v + elif k == "decoding_method": + extra_body["decoding_method"] = v + elif k == "min_tokens": + extra_body["min_new_tokens"] = v + elif k == "top_k": + extra_body["top_k"] = v + elif k == "truncate_input_tokens": + extra_body["truncate_input_tokens"] = v + elif k == "length_penalty": + extra_body["length_penalty"] = v + elif k == "time_limit": + extra_body["time_limit"] = v + elif k == "return_options": + extra_body["return_options"] = v + + if extra_body: + optional_params["extra_body"] = extra_body + return optional_params + def get_mapped_special_auth_params(self) -> dict: """ Common auth params across bedrock/vertex_ai/azure/watsonx @@ -212,18 +263,6 @@ def convert_messages_to_prompt(model, messages, provider, custom_prompt_dict) -> return prompt -class WatsonXAIEndpoint(str, Enum): - TEXT_GENERATION = "/ml/v1/text/generation" - TEXT_GENERATION_STREAM = "/ml/v1/text/generation_stream" - DEPLOYMENT_TEXT_GENERATION = "/ml/v1/deployments/{deployment_id}/text/generation" - DEPLOYMENT_TEXT_GENERATION_STREAM = ( - "/ml/v1/deployments/{deployment_id}/text/generation_stream" - ) - EMBEDDINGS = "/ml/v1/text/embeddings" - PROMPTS = "/ml/v1/prompts" - AVAILABLE_MODELS = "/ml/v1/foundation_model_specs" - - class IBMWatsonXAI(BaseLLM): """ Class to interface with IBM watsonx.ai API for text generation and embeddings. @@ -247,10 +286,10 @@ class IBMWatsonXAI(BaseLLM): """ Get the request parameters for text generation. """ - api_params = self._get_api_params(optional_params, print_verbose=print_verbose) + api_params = _get_api_params(optional_params, print_verbose=print_verbose) # build auth headers api_token = api_params.get("token") - + self.token = api_token headers = { "Authorization": f"Bearer {api_token}", "Content-Type": "application/json", @@ -294,118 +333,6 @@ class IBMWatsonXAI(BaseLLM): method="POST", url=url, headers=headers, json=payload, params=request_params ) - def _get_api_params( - self, - params: dict, - print_verbose: Optional[Callable] = None, - generate_token: Optional[bool] = True, - ) -> dict: - """ - Find watsonx.ai credentials in the params or environment variables and return the headers for authentication. - """ - # Load auth variables from params - url = params.pop("url", params.pop("api_base", params.pop("base_url", None))) - api_key = params.pop("apikey", None) - token = params.pop("token", None) - project_id = params.pop( - "project_id", params.pop("watsonx_project", None) - ) # watsonx.ai project_id - allow 'watsonx_project' to be consistent with how vertex project implementation works -> reduce provider-specific params - space_id = params.pop("space_id", None) # watsonx.ai deployment space_id - region_name = params.pop("region_name", params.pop("region", None)) - if region_name is None: - region_name = params.pop( - "watsonx_region_name", params.pop("watsonx_region", None) - ) # consistent with how vertex ai + aws regions are accepted - wx_credentials = params.pop( - "wx_credentials", - params.pop( - "watsonx_credentials", None - ), # follow {provider}_credentials, same as vertex ai - ) - api_version = params.pop("api_version", IBMWatsonXAI.api_version) - # Load auth variables from environment variables - if url is None: - url = ( - get_secret_str("WATSONX_API_BASE") # consistent with 'AZURE_API_BASE' - or get_secret_str("WATSONX_URL") - or get_secret_str("WX_URL") - or get_secret_str("WML_URL") - ) - if api_key is None: - api_key = ( - get_secret_str("WATSONX_APIKEY") - or get_secret_str("WATSONX_API_KEY") - or get_secret_str("WX_API_KEY") - ) - if token is None: - token = get_secret_str("WATSONX_TOKEN") or get_secret_str("WX_TOKEN") - if project_id is None: - project_id = ( - get_secret_str("WATSONX_PROJECT_ID") - or get_secret_str("WX_PROJECT_ID") - or get_secret_str("PROJECT_ID") - ) - if region_name is None: - region_name = ( - get_secret_str("WATSONX_REGION") - or get_secret_str("WX_REGION") - or get_secret_str("REGION") - ) - if space_id is None: - space_id = ( - get_secret_str("WATSONX_DEPLOYMENT_SPACE_ID") - or get_secret_str("WATSONX_SPACE_ID") - or get_secret_str("WX_SPACE_ID") - or get_secret_str("SPACE_ID") - ) - - # credentials parsing - if wx_credentials is not None: - url = wx_credentials.get("url", url) - api_key = wx_credentials.get( - "apikey", wx_credentials.get("api_key", api_key) - ) - token = wx_credentials.get( - "token", - wx_credentials.get( - "watsonx_token", token - ), # follow format of {provider}_token, same as azure - e.g. 'azure_ad_token=..' - ) - - # verify that all required credentials are present - if url is None: - raise WatsonXAIError( - status_code=401, - message="Error: Watsonx URL not set. Set WX_URL in environment variables or pass in as a parameter.", - ) - if token is None and api_key is not None and generate_token: - # generate the auth token - if print_verbose is not None: - print_verbose("Generating IAM token for Watsonx.ai") - token = self.generate_iam_token(api_key) - elif token is None and api_key is None: - raise WatsonXAIError( - status_code=401, - url=url, - message="Error: API key or token not found. Set WX_API_KEY or WX_TOKEN in environment variables or pass in as a parameter.", - ) - if project_id is None: - raise WatsonXAIError( - status_code=401, - url=url, - message="Error: Watsonx project_id not set. Set WX_PROJECT_ID in environment variables or pass in as a parameter.", - ) - - return { - "url": url, - "api_key": api_key, - "token": token, - "project_id": project_id, - "space_id": space_id, - "region_name": region_name, - "api_version": api_version, - } - def _process_text_gen_response( self, json_resp: dict, model_response: Union[ModelResponse, None] = None ) -> ModelResponse: @@ -616,9 +543,10 @@ class IBMWatsonXAI(BaseLLM): input = [input] if api_key is not None: optional_params["api_key"] = api_key - api_params = self._get_api_params(optional_params) + api_params = _get_api_params(optional_params) # build auth headers api_token = api_params.get("token") + self.token = api_token headers = { "Authorization": f"Bearer {api_token}", "Content-Type": "application/json", @@ -664,29 +592,9 @@ class IBMWatsonXAI(BaseLLM): except Exception as e: raise WatsonXAIError(status_code=500, message=str(e)) - def generate_iam_token(self, api_key=None, **params): - headers = {} - headers["Content-Type"] = "application/x-www-form-urlencoded" - if api_key is None: - api_key = get_secret_str("WX_API_KEY") or get_secret_str("WATSONX_API_KEY") - if api_key is None: - raise ValueError("API key is required") - headers["Accept"] = "application/json" - data = { - "grant_type": "urn:ibm:params:oauth:grant-type:apikey", - "apikey": api_key, - } - response = httpx.post( - "https://iam.cloud.ibm.com/identity/token", data=data, headers=headers - ) - response.raise_for_status() - json_data = response.json() - iam_access_token = json_data["access_token"] - self.token = iam_access_token - return iam_access_token - def get_available_models(self, *, ids_only: bool = True, **params): - api_params = self._get_api_params(params) + api_params = _get_api_params(params) + self.token = api_params["token"] headers = { "Authorization": f"Bearer {api_params['token']}", "Content-Type": "application/json", diff --git a/litellm/main.py b/litellm/main.py index ab85be834..f89a6f2e3 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -77,6 +77,7 @@ from litellm.utils import ( read_config_args, supports_httpx_timeout, token_counter, + validate_chat_completion_user_messages, ) from ._logging import verbose_logger @@ -157,7 +158,8 @@ from .llms.vertex_ai_and_google_ai_studio.vertex_ai_partner_models.main import ( from .llms.vertex_ai_and_google_ai_studio.vertex_embeddings.embedding_handler import ( VertexEmbedding, ) -from .llms.watsonx import IBMWatsonXAI +from .llms.watsonx.chat.handler import WatsonXChatHandler +from .llms.watsonx.completion.handler import IBMWatsonXAI from .types.llms.openai import ( ChatCompletionAssistantMessage, ChatCompletionAudioParam, @@ -221,6 +223,7 @@ vertex_partner_models_chat_completion = VertexAIPartnerModels() vertex_text_to_speech = VertexTextToSpeechAPI() watsonxai = IBMWatsonXAI() sagemaker_llm = SagemakerLLM() +watsonx_chat_completion = WatsonXChatHandler() openai_like_embedding = OpenAILikeEmbeddingHandler() ####### COMPLETION ENDPOINTS ################ @@ -921,6 +924,9 @@ def completion( # type: ignore # noqa: PLR0915 "aws_region_name", None ) # support region-based pricing for bedrock + ### VALIDATE USER MESSAGES ### + validate_chat_completion_user_messages(messages=messages) + ### TIMEOUT LOGIC ### timeout = timeout or kwargs.get("request_timeout", 600) or 600 # set timeout for 10 minutes by default @@ -2615,6 +2621,26 @@ def completion( # type: ignore # noqa: PLR0915 ## RESPONSE OBJECT response = response elif custom_llm_provider == "watsonx": + response = watsonx_chat_completion.completion( + model=model, + messages=messages, + headers=headers, + model_response=model_response, + print_verbose=print_verbose, + api_key=api_key, + api_base=api_base, + acompletion=acompletion, + logging_obj=logging, + optional_params=optional_params, + litellm_params=litellm_params, + logger_fn=logger_fn, + timeout=timeout, # type: ignore + custom_prompt_dict=custom_prompt_dict, + client=client, # pass AsyncOpenAI, OpenAI client + encoding=encoding, + custom_llm_provider="watsonx", + ) + elif custom_llm_provider == "watsonx_text": custom_prompt_dict = custom_prompt_dict or litellm.custom_prompt_dict response = watsonxai.completion( model=model, diff --git a/litellm/types/llms/openai.py b/litellm/types/llms/openai.py index a457c125c..ebf23804f 100644 --- a/litellm/types/llms/openai.py +++ b/litellm/types/llms/openai.py @@ -20,6 +20,9 @@ from openai.types.beta.threads.message_content import MessageContent from openai.types.beta.threads.run import Run from openai.types.chat import ChatCompletionChunk from openai.types.chat.chat_completion_audio_param import ChatCompletionAudioParam +from openai.types.chat.chat_completion_content_part_input_audio_param import ( + ChatCompletionContentPartInputAudioParam, +) from openai.types.chat.chat_completion_modality import ChatCompletionModality from openai.types.chat.chat_completion_prediction_content_param import ( ChatCompletionPredictionContentParam, @@ -355,8 +358,19 @@ class ChatCompletionImageObject(TypedDict): image_url: Union[str, ChatCompletionImageUrlObject] +class ChatCompletionAudioObject(ChatCompletionContentPartInputAudioParam): + pass + + OpenAIMessageContent = Union[ - str, Iterable[Union[ChatCompletionTextObject, ChatCompletionImageObject]] + str, + Iterable[ + Union[ + ChatCompletionTextObject, + ChatCompletionImageObject, + ChatCompletionAudioObject, + ] + ], ] # The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. @@ -412,6 +426,12 @@ class ChatCompletionSystemMessage(OpenAIChatCompletionSystemMessage, total=False cache_control: ChatCompletionCachedContent +ValidUserMessageContentTypes = [ + "text", + "image_url", + "input_audio", +] # used for validating user messages. Prevent users from accidentally sending anthropic messages. + AllMessageValues = Union[ ChatCompletionUserMessage, ChatCompletionAssistantMessage, diff --git a/litellm/types/llms/watsonx.py b/litellm/types/llms/watsonx.py new file mode 100644 index 000000000..f3b9c5d0b --- /dev/null +++ b/litellm/types/llms/watsonx.py @@ -0,0 +1,31 @@ +import json +from enum import Enum +from typing import Any, List, Optional, TypedDict, Union + +from pydantic import BaseModel + + +class WatsonXAPIParams(TypedDict): + url: str + api_key: Optional[str] + token: str + project_id: str + space_id: Optional[str] + region_name: Optional[str] + api_version: str + + +class WatsonXAIEndpoint(str, Enum): + TEXT_GENERATION = "/ml/v1/text/generation" + TEXT_GENERATION_STREAM = "/ml/v1/text/generation_stream" + CHAT = "/ml/v1/text/chat" + CHAT_STREAM = "/ml/v1/text/chat_stream" + DEPLOYMENT_TEXT_GENERATION = "/ml/v1/deployments/{deployment_id}/text/generation" + DEPLOYMENT_TEXT_GENERATION_STREAM = ( + "/ml/v1/deployments/{deployment_id}/text/generation_stream" + ) + DEPLOYMENT_CHAT = "/ml/v1/deployments/{deployment_id}/text/chat" + DEPLOYMENT_CHAT_STREAM = "/ml/v1/deployments/{deployment_id}/text/chat_stream" + EMBEDDINGS = "/ml/v1/text/embeddings" + PROMPTS = "/ml/v1/prompts" + AVAILABLE_MODELS = "/ml/v1/foundation_model_specs" diff --git a/litellm/utils.py b/litellm/utils.py index 1b37b77a5..d8c435552 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -69,6 +69,9 @@ from litellm.litellm_core_utils.get_llm_provider_logic import ( _is_non_openai_azure_model, get_llm_provider, ) +from litellm.litellm_core_utils.get_supported_openai_params import ( + get_supported_openai_params, +) from litellm.litellm_core_utils.llm_request_utils import _ensure_extra_body_is_safe from litellm.litellm_core_utils.llm_response_utils.convert_dict_to_response import ( LiteLLMResponseObjectHandler, @@ -962,9 +965,10 @@ def client(original_function): # noqa: PLR0915 result._hidden_params["additional_headers"] = process_response_headers( result._hidden_params.get("additional_headers") or {} ) # GUARANTEE OPENAI HEADERS IN RESPONSE - result._response_ms = ( - end_time - start_time - ).total_seconds() * 1000 # return response latency in ms like openai + if result is not None: + result._response_ms = ( + end_time - start_time + ).total_seconds() * 1000 # return response latency in ms like openai return result except Exception as e: call_type = original_function.__name__ @@ -3622,43 +3626,30 @@ def get_optional_params( # noqa: PLR0915 model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) - if max_tokens is not None: - optional_params["max_new_tokens"] = max_tokens - if stream: - optional_params["stream"] = stream - if temperature is not None: - optional_params["temperature"] = temperature - if top_p is not None: - optional_params["top_p"] = top_p - if frequency_penalty is not None: - optional_params["repetition_penalty"] = frequency_penalty - if seed is not None: - optional_params["random_seed"] = seed - if stop is not None: - optional_params["stop_sequences"] = stop - - # WatsonX-only parameters - extra_body = {} - if "decoding_method" in passed_params: - extra_body["decoding_method"] = passed_params.pop("decoding_method") - if "min_tokens" in passed_params or "min_new_tokens" in passed_params: - extra_body["min_new_tokens"] = passed_params.pop( - "min_tokens", passed_params.pop("min_new_tokens") - ) - if "top_k" in passed_params: - extra_body["top_k"] = passed_params.pop("top_k") - if "truncate_input_tokens" in passed_params: - extra_body["truncate_input_tokens"] = passed_params.pop( - "truncate_input_tokens" - ) - if "length_penalty" in passed_params: - extra_body["length_penalty"] = passed_params.pop("length_penalty") - if "time_limit" in passed_params: - extra_body["time_limit"] = passed_params.pop("time_limit") - if "return_options" in passed_params: - extra_body["return_options"] = passed_params.pop("return_options") - optional_params["extra_body"] = ( - extra_body # openai client supports `extra_body` param + optional_params = litellm.IBMWatsonXChatConfig().map_openai_params( + non_default_params=non_default_params, + optional_params=optional_params, + model=model, + drop_params=( + drop_params + if drop_params is not None and isinstance(drop_params, bool) + else False + ), + ) + # WatsonX-text param check + for param in passed_params.keys(): + if litellm.IBMWatsonXAIConfig().is_watsonx_text_param(param): + raise ValueError( + f"LiteLLM now defaults to Watsonx's `/text/chat` endpoint. Please use the `watsonx_text` provider instead, to call the `/text/generation` endpoint. Param: {param}" + ) + elif custom_llm_provider == "watsonx_text": + supported_params = get_supported_openai_params( + model=model, custom_llm_provider=custom_llm_provider + ) + _check_valid_arg(supported_params=supported_params) + optional_params = litellm.IBMWatsonXAIConfig().map_openai_params( + non_default_params=non_default_params, + optional_params=optional_params, ) elif custom_llm_provider == "openai": supported_params = get_supported_openai_params( @@ -4160,290 +4151,6 @@ def get_first_chars_messages(kwargs: dict) -> str: return "" -def get_supported_openai_params( # noqa: PLR0915 - model: str, - custom_llm_provider: Optional[str] = None, - request_type: Literal["chat_completion", "embeddings"] = "chat_completion", -) -> Optional[list]: - """ - Returns the supported openai params for a given model + provider - - Example: - ``` - get_supported_openai_params(model="anthropic.claude-3", custom_llm_provider="bedrock") - ``` - - Returns: - - List if custom_llm_provider is mapped - - None if unmapped - """ - if not custom_llm_provider: - try: - custom_llm_provider = litellm.get_llm_provider(model=model)[1] - except BadRequestError: - return None - if custom_llm_provider == "bedrock": - return litellm.AmazonConverseConfig().get_supported_openai_params(model=model) - elif custom_llm_provider == "ollama": - return litellm.OllamaConfig().get_supported_openai_params() - elif custom_llm_provider == "ollama_chat": - return litellm.OllamaChatConfig().get_supported_openai_params() - elif custom_llm_provider == "anthropic": - return litellm.AnthropicConfig().get_supported_openai_params() - elif custom_llm_provider == "fireworks_ai": - if request_type == "embeddings": - return litellm.FireworksAIEmbeddingConfig().get_supported_openai_params( - model=model - ) - else: - return litellm.FireworksAIConfig().get_supported_openai_params() - elif custom_llm_provider == "nvidia_nim": - if request_type == "chat_completion": - return litellm.nvidiaNimConfig.get_supported_openai_params(model=model) - elif request_type == "embeddings": - return litellm.nvidiaNimEmbeddingConfig.get_supported_openai_params() - elif custom_llm_provider == "cerebras": - return litellm.CerebrasConfig().get_supported_openai_params(model=model) - elif custom_llm_provider == "xai": - return litellm.XAIChatConfig().get_supported_openai_params(model=model) - elif custom_llm_provider == "ai21_chat": - return litellm.AI21ChatConfig().get_supported_openai_params(model=model) - elif custom_llm_provider == "volcengine": - return litellm.VolcEngineConfig().get_supported_openai_params(model=model) - elif custom_llm_provider == "groq": - return litellm.GroqChatConfig().get_supported_openai_params(model=model) - elif custom_llm_provider == "hosted_vllm": - return litellm.HostedVLLMChatConfig().get_supported_openai_params(model=model) - elif custom_llm_provider == "deepseek": - return [ - # https://platform.deepseek.com/api-docs/api/create-chat-completion - "frequency_penalty", - "max_tokens", - "presence_penalty", - "response_format", - "stop", - "stream", - "temperature", - "top_p", - "logprobs", - "top_logprobs", - "tools", - "tool_choice", - ] - elif custom_llm_provider == "cohere": - return [ - "stream", - "temperature", - "max_tokens", - "logit_bias", - "top_p", - "frequency_penalty", - "presence_penalty", - "stop", - "n", - "extra_headers", - ] - elif custom_llm_provider == "cohere_chat": - return [ - "stream", - "temperature", - "max_tokens", - "top_p", - "frequency_penalty", - "presence_penalty", - "stop", - "n", - "tools", - "tool_choice", - "seed", - "extra_headers", - ] - elif custom_llm_provider == "maritalk": - return [ - "stream", - "temperature", - "max_tokens", - "top_p", - "presence_penalty", - "stop", - ] - elif custom_llm_provider == "openai": - return litellm.OpenAIConfig().get_supported_openai_params(model=model) - elif custom_llm_provider == "azure": - if litellm.AzureOpenAIO1Config().is_o1_model(model=model): - return litellm.AzureOpenAIO1Config().get_supported_openai_params( - model=model - ) - else: - return litellm.AzureOpenAIConfig().get_supported_openai_params() - elif custom_llm_provider == "openrouter": - return [ - "temperature", - "top_p", - "frequency_penalty", - "presence_penalty", - "repetition_penalty", - "seed", - "max_tokens", - "logit_bias", - "logprobs", - "top_logprobs", - "response_format", - "stop", - "tools", - "tool_choice", - ] - elif custom_llm_provider == "mistral" or custom_llm_provider == "codestral": - # mistal and codestral api have the exact same params - if request_type == "chat_completion": - return litellm.MistralConfig().get_supported_openai_params() - elif request_type == "embeddings": - return litellm.MistralEmbeddingConfig().get_supported_openai_params() - elif custom_llm_provider == "text-completion-codestral": - return litellm.MistralTextCompletionConfig().get_supported_openai_params() - elif custom_llm_provider == "replicate": - return [ - "stream", - "temperature", - "max_tokens", - "top_p", - "stop", - "seed", - "tools", - "tool_choice", - "functions", - "function_call", - ] - elif custom_llm_provider == "huggingface": - return litellm.HuggingfaceConfig().get_supported_openai_params() - elif custom_llm_provider == "together_ai": - return [ - "stream", - "temperature", - "max_tokens", - "top_p", - "stop", - "frequency_penalty", - "tools", - "tool_choice", - "response_format", - ] - elif custom_llm_provider == "ai21": - return [ - "stream", - "n", - "temperature", - "max_tokens", - "top_p", - "stop", - "frequency_penalty", - "presence_penalty", - ] - elif custom_llm_provider == "databricks": - if request_type == "chat_completion": - return litellm.DatabricksConfig().get_supported_openai_params() - elif request_type == "embeddings": - return litellm.DatabricksEmbeddingConfig().get_supported_openai_params() - elif custom_llm_provider == "palm" or custom_llm_provider == "gemini": - return litellm.GoogleAIStudioGeminiConfig().get_supported_openai_params() - elif custom_llm_provider == "vertex_ai": - if request_type == "chat_completion": - if model.startswith("meta/"): - return litellm.VertexAILlama3Config().get_supported_openai_params() - if model.startswith("mistral"): - return litellm.MistralConfig().get_supported_openai_params() - if model.startswith("codestral"): - return ( - litellm.MistralTextCompletionConfig().get_supported_openai_params() - ) - if model.startswith("claude"): - return litellm.VertexAIAnthropicConfig().get_supported_openai_params() - return litellm.VertexAIConfig().get_supported_openai_params() - elif request_type == "embeddings": - return litellm.VertexAITextEmbeddingConfig().get_supported_openai_params() - elif custom_llm_provider == "vertex_ai_beta": - if request_type == "chat_completion": - return litellm.VertexGeminiConfig().get_supported_openai_params() - elif request_type == "embeddings": - return litellm.VertexAITextEmbeddingConfig().get_supported_openai_params() - elif custom_llm_provider == "sagemaker": - return ["stream", "temperature", "max_tokens", "top_p", "stop", "n"] - elif custom_llm_provider == "aleph_alpha": - return [ - "max_tokens", - "stream", - "top_p", - "temperature", - "presence_penalty", - "frequency_penalty", - "n", - "stop", - ] - elif custom_llm_provider == "cloudflare": - return ["max_tokens", "stream"] - elif custom_llm_provider == "nlp_cloud": - return [ - "max_tokens", - "stream", - "temperature", - "top_p", - "presence_penalty", - "frequency_penalty", - "n", - "stop", - ] - elif custom_llm_provider == "petals": - return ["max_tokens", "temperature", "top_p", "stream"] - elif custom_llm_provider == "deepinfra": - return litellm.DeepInfraConfig().get_supported_openai_params() - elif custom_llm_provider == "perplexity": - return [ - "temperature", - "top_p", - "stream", - "max_tokens", - "presence_penalty", - "frequency_penalty", - ] - elif custom_llm_provider == "anyscale": - return [ - "temperature", - "top_p", - "stream", - "max_tokens", - "stop", - "frequency_penalty", - "presence_penalty", - ] - elif custom_llm_provider == "watsonx": - return litellm.IBMWatsonXAIConfig().get_supported_openai_params() - elif custom_llm_provider == "custom_openai" or "text-completion-openai": - return [ - "functions", - "function_call", - "temperature", - "top_p", - "n", - "stream", - "stream_options", - "stop", - "max_tokens", - "presence_penalty", - "frequency_penalty", - "logit_bias", - "user", - "response_format", - "seed", - "tools", - "tool_choice", - "max_retries", - "logprobs", - "top_logprobs", - "extra_headers", - ] - return None - - def _count_characters(text: str) -> int: # Remove white spaces and count characters filtered_text = "".join(char for char in text if not char.isspace()) @@ -8640,3 +8347,47 @@ def add_dummy_tool(custom_llm_provider: str) -> List[ChatCompletionToolParam]: ), ) ] + + +from litellm.types.llms.openai import ( + ChatCompletionAudioObject, + ChatCompletionImageObject, + ChatCompletionTextObject, + ChatCompletionUserMessage, + OpenAIMessageContent, + ValidUserMessageContentTypes, +) + + +def validate_chat_completion_user_messages(messages: List[AllMessageValues]): + """ + Ensures all user messages are valid OpenAI chat completion messages. + + Args: + messages: List of message dictionaries + message_content_type: Type to validate content against + + Returns: + List[dict]: The validated messages + + Raises: + ValueError: If any message is invalid + """ + for idx, m in enumerate(messages): + try: + if m["role"] == "user": + user_content = m.get("content") + if user_content is not None: + if isinstance(user_content, str): + continue + elif isinstance(user_content, list): + for item in user_content: + if isinstance(item, dict): + if item.get("type") not in ValidUserMessageContentTypes: + raise Exception("invalid content type") + except Exception: + raise Exception( + f"Invalid user message={m} at index {idx}. Please ensure all user messages are valid OpenAI chat completion messages." + ) + + return messages diff --git a/tests/llm_translation/test_databricks.py b/tests/llm_translation/test_databricks.py index 97e92b106..89ad6832b 100644 --- a/tests/llm_translation/test_databricks.py +++ b/tests/llm_translation/test_databricks.py @@ -233,7 +233,7 @@ def test_throws_if_api_base_or_api_key_not_set_without_databricks_sdk( with pytest.raises(BadRequestError) as exc: litellm.completion( model="databricks/dbrx-instruct-071224", - messages={"role": "user", "content": "How are you?"}, + messages=[{"role": "user", "content": "How are you?"}], ) assert err_msg in str(exc) diff --git a/tests/llm_translation/test_optional_params.py b/tests/llm_translation/test_optional_params.py index d921c1c17..7283e9a39 100644 --- a/tests/llm_translation/test_optional_params.py +++ b/tests/llm_translation/test_optional_params.py @@ -905,3 +905,19 @@ def test_vertex_schema_field(): "$schema" not in optional_params["tools"][0]["function_declarations"][0]["parameters"] ) + + +def test_watsonx_tool_choice(): + optional_params = get_optional_params( + model="gemini-1.5-pro", custom_llm_provider="watsonx", tool_choice="auto" + ) + print(optional_params) + assert optional_params["tool_choice_options"] == "auto" + + +def test_watsonx_text_top_k(): + optional_params = get_optional_params( + model="gemini-1.5-pro", custom_llm_provider="watsonx_text", top_k=10 + ) + print(optional_params) + assert optional_params["top_k"] == 10 diff --git a/tests/local_testing/test_alangfuse.py b/tests/local_testing/test_alangfuse.py index 1f8c4becb..da83e3829 100644 --- a/tests/local_testing/test_alangfuse.py +++ b/tests/local_testing/test_alangfuse.py @@ -203,7 +203,7 @@ def create_async_task(**completion_kwargs): @pytest.mark.asyncio @pytest.mark.parametrize("stream", [False, True]) -@pytest.mark.flaky(retries=6, delay=1) +@pytest.mark.flaky(retries=12, delay=2) async def test_langfuse_logging_without_request_response(stream, langfuse_client): try: import uuid @@ -232,6 +232,12 @@ async def test_langfuse_logging_without_request_response(stream, langfuse_client _trace_data = trace.data + if ( + len(_trace_data) == 0 + ): # prevent infrequent list index out of range error from langfuse api + return + + print(f"_trace_data: {_trace_data}") assert _trace_data[0].input == { "messages": [{"content": "redacted-by-litellm", "role": "user"}] } @@ -256,7 +262,7 @@ audio_file = open(file_path, "rb") @pytest.mark.asyncio -@pytest.mark.flaky(retries=3, delay=1) +@pytest.mark.flaky(retries=12, delay=2) async def test_langfuse_logging_audio_transcriptions(langfuse_client): """ Test that creates a trace with masked input and output @@ -291,7 +297,7 @@ async def test_langfuse_logging_audio_transcriptions(langfuse_client): @pytest.mark.asyncio -@pytest.mark.flaky(retries=5, delay=1) +@pytest.mark.flaky(retries=12, delay=2) async def test_langfuse_masked_input_output(langfuse_client): """ Test that creates a trace with masked input and output @@ -344,7 +350,7 @@ async def test_langfuse_masked_input_output(langfuse_client): @pytest.mark.asyncio -@pytest.mark.flaky(retries=3, delay=1) +@pytest.mark.flaky(retries=12, delay=2) async def test_aaalangfuse_logging_metadata(langfuse_client): """ Test that creates multiple traces, with a varying number of generations and sets various metadata fields diff --git a/tests/local_testing/test_exceptions.py b/tests/local_testing/test_exceptions.py index 2794fe68b..e1ae1a84f 100644 --- a/tests/local_testing/test_exceptions.py +++ b/tests/local_testing/test_exceptions.py @@ -775,7 +775,7 @@ def test_litellm_predibase_exception(): @pytest.mark.parametrize( - "provider", ["predibase", "vertex_ai_beta", "anthropic", "databricks"] + "provider", ["predibase", "vertex_ai_beta", "anthropic", "databricks", "watsonx"] ) def test_exception_mapping(provider): """ diff --git a/tests/local_testing/test_function_calling.py b/tests/local_testing/test_function_calling.py index 7946bdfea..6e1bd13a1 100644 --- a/tests/local_testing/test_function_calling.py +++ b/tests/local_testing/test_function_calling.py @@ -12,7 +12,7 @@ sys.path.insert( 0, os.path.abspath("../..") ) # Adds the parent directory to the system path import pytest - +from unittest.mock import patch, MagicMock, AsyncMock import litellm from litellm import RateLimitError, Timeout, completion, completion_cost, embedding @@ -619,3 +619,62 @@ def test_passing_tool_result_as_list(model): if model == "claude-3-5-sonnet-20241022": assert resp.usage.prompt_tokens_details.cached_tokens > 0 + + +@pytest.mark.parametrize("sync_mode", [True, False]) +@pytest.mark.asyncio +async def test_watsonx_tool_choice(sync_mode): + from litellm.llms.custom_httpx.http_handler import HTTPHandler, AsyncHTTPHandler + import json + from litellm import acompletion, completion + + litellm.set_verbose = True + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + }, + } + ] + messages = [{"role": "user", "content": "What is the weather in San Francisco?"}] + + client = HTTPHandler() if sync_mode else AsyncHTTPHandler() + with patch.object(client, "post", return_value=MagicMock()) as mock_completion: + + if sync_mode: + resp = completion( + model="watsonx/meta-llama/llama-3-1-8b-instruct", + messages=messages, + tools=tools, + tool_choice="auto", + client=client, + ) + else: + resp = await acompletion( + model="watsonx/meta-llama/llama-3-1-8b-instruct", + messages=messages, + tools=tools, + tool_choice="auto", + client=client, + stream=True, + ) + + print(resp) + + mock_completion.assert_called_once() + print(mock_completion.call_args.kwargs) + json_data = json.loads(mock_completion.call_args.kwargs["data"]) + json_data["tool_choice_options"] == "auto" diff --git a/tests/local_testing/test_streaming.py b/tests/local_testing/test_streaming.py index 99c506f69..3e2145c81 100644 --- a/tests/local_testing/test_streaming.py +++ b/tests/local_testing/test_streaming.py @@ -1917,25 +1917,31 @@ def test_completion_sagemaker_stream(): @pytest.mark.skip(reason="Account deleted by IBM.") -def test_completion_watsonx_stream(): +@pytest.mark.asyncio +async def test_completion_watsonx_stream(): litellm.set_verbose = True + from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler + try: - response = completion( - model="watsonx/ibm/granite-13b-chat-v2", + response = await acompletion( + model="watsonx/meta-llama/llama-3-1-8b-instruct", messages=messages, temperature=0.5, max_tokens=20, stream=True, + # client=client ) complete_response = "" has_finish_reason = False # Add any assertions here to check the response - for idx, chunk in enumerate(response): + idx = 0 + async for chunk in response: chunk, finished = streaming_format_tests(idx, chunk) has_finish_reason = finished if finished: break complete_response += chunk + idx += 1 if has_finish_reason is False: raise Exception("finish reason not set for last chunk") if complete_response.strip() == "": diff --git a/tests/local_testing/test_utils.py b/tests/local_testing/test_utils.py index ce4051fda..5aa3b610c 100644 --- a/tests/local_testing/test_utils.py +++ b/tests/local_testing/test_utils.py @@ -891,3 +891,55 @@ def test_is_base64_encoded_2(): ) assert is_base64_encoded(s="Dog") is False + + +@pytest.mark.parametrize( + "messages, expected_bool", + [ + ([{"role": "user", "content": "hi"}], True), + ([{"role": "user", "content": [{"type": "text", "text": "hi"}]}], True), + ( + [ + { + "role": "user", + "content": [ + {"type": "image_url", "url": "https://example.com/image.png"} + ], + } + ], + True, + ), + ( + [ + { + "role": "user", + "content": [ + {"type": "text", "text": "hi"}, + { + "type": "image", + "source": { + "type": "image", + "source": { + "type": "base64", + "media_type": "image/png", + "data": "1234", + }, + }, + }, + ], + } + ], + False, + ), + ], +) +def test_validate_chat_completion_user_messages(messages, expected_bool): + from litellm.utils import validate_chat_completion_user_messages + + if expected_bool: + ## Valid message + validate_chat_completion_user_messages(messages=messages) + else: + ## Invalid message + with pytest.raises(Exception): + validate_chat_completion_user_messages(messages=messages) diff --git a/tests/logging_callback_tests/test_datadog_llm_obs.py b/tests/logging_callback_tests/test_datadog_llm_obs.py index 84ec3b2d9..afc56599c 100644 --- a/tests/logging_callback_tests/test_datadog_llm_obs.py +++ b/tests/logging_callback_tests/test_datadog_llm_obs.py @@ -93,7 +93,9 @@ async def test_datadog_llm_obs_logging(): for _ in range(2): response = await litellm.acompletion( - model="gpt-4o", messages=["Hello testing dd llm obs!"], mock_response="hi" + model="gpt-4o", + messages=[{"role": "user", "content": "Hello testing dd llm obs!"}], + mock_response="hi", ) print(response) From 9debb9a1c54d0dfbe5192a9b07b1ed57fc077773 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Wed, 6 Nov 2024 07:24:31 -0800 Subject: [PATCH 19/67] docs fix clarify team_id on team based logging --- docs/my-website/docs/proxy/team_logging.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/my-website/docs/proxy/team_logging.md b/docs/my-website/docs/proxy/team_logging.md index bd79277cb..e7c3b307b 100644 --- a/docs/my-website/docs/proxy/team_logging.md +++ b/docs/my-website/docs/proxy/team_logging.md @@ -30,11 +30,11 @@ This config would send langfuse logs to 2 different langfuse projects, based on ```yaml litellm_settings: default_team_settings: - - team_id: my-secret-project + - team_id: dbe2f686-a686-4896-864a-4c3924458709 success_callback: ["langfuse"] langfuse_public_key: os.environ/LANGFUSE_PUB_KEY_1 # Project 1 langfuse_secret: os.environ/LANGFUSE_PRIVATE_KEY_1 # Project 1 - - team_id: ishaans-secret-project + - team_id: "06ed1e01-3fa7-4b9e-95bc-f2e59b74f3a8" success_callback: ["langfuse"] langfuse_public_key: os.environ/LANGFUSE_PUB_KEY_2 # Project 2 langfuse_secret: os.environ/LANGFUSE_SECRET_2 # Project 2 @@ -46,7 +46,7 @@ Now, when you [generate keys](./virtual_keys.md) for this team-id curl -X POST 'http://0.0.0.0:4000/key/generate' \ -H 'Authorization: Bearer sk-1234' \ -H 'Content-Type: application/json' \ --d '{"team_id": "ishaans-secret-project"}' +-d '{"team_id": "06ed1e01-3fa7-4b9e-95bc-f2e59b74f3a8"}' ``` All requests made with these keys will log data to their team-specific logging. --> From 00790dc9e12f18249d777d0c64fd70a85c153310 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Wed, 6 Nov 2024 07:32:12 -0800 Subject: [PATCH 20/67] doc fix team based logging with langfuse --- docs/my-website/docs/proxy/team_logging.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/my-website/docs/proxy/team_logging.md b/docs/my-website/docs/proxy/team_logging.md index e7c3b307b..e2fcfa4b5 100644 --- a/docs/my-website/docs/proxy/team_logging.md +++ b/docs/my-website/docs/proxy/team_logging.md @@ -30,7 +30,7 @@ This config would send langfuse logs to 2 different langfuse projects, based on ```yaml litellm_settings: default_team_settings: - - team_id: dbe2f686-a686-4896-864a-4c3924458709 + - team_id: "dbe2f686-a686-4896-864a-4c3924458709" success_callback: ["langfuse"] langfuse_public_key: os.environ/LANGFUSE_PUB_KEY_1 # Project 1 langfuse_secret: os.environ/LANGFUSE_PRIVATE_KEY_1 # Project 1 From 45ff74ae81e331412370cd7436816559fd6298da Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Wed, 6 Nov 2024 10:45:58 -0800 Subject: [PATCH 21/67] fix flake8 checks --- .github/workflows/lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index ccb6df892..bc7785397 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -32,4 +32,4 @@ jobs: python-version: '3.12' - run: poetry install - name: Run flake8 (https://flake8.pycqa.org/en/latest/) - run: poetry run flake8 + run: poetry run flake8 --exclude=tests/ From 66c1ee09cf79a035a4afdc14d9031d1ddf59097b Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Thu, 7 Nov 2024 01:05:58 +0530 Subject: [PATCH 22/67] ci: remove redundant lint.yml workflow (#6622) --- .github/workflows/lint.yml | 35 ----------------------------------- 1 file changed, 35 deletions(-) delete mode 100644 .github/workflows/lint.yml diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml deleted file mode 100644 index bc7785397..000000000 --- a/.github/workflows/lint.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: Lint - -# If a pull-request is pushed then cancel all previously running jobs related -# to that pull-request -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }} - cancel-in-progress: true - -on: - push: - branches: - - main - pull_request: - branches: - - main - - master - -env: - PY_COLORS: 1 - -jobs: - lint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4.1.1 - with: - fetch-depth: 0 - - name: Install poetry - run: pipx install poetry - - uses: actions/setup-python@v5.0.0 - with: - python-version: '3.12' - - run: poetry install - - name: Run flake8 (https://flake8.pycqa.org/en/latest/) - run: poetry run flake8 --exclude=tests/ From 136693cac4fd86d8be791837f88f07d795759fea Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Thu, 7 Nov 2024 04:17:05 +0530 Subject: [PATCH 23/67] LiteLLM Minor Fixes & Improvements (11/05/2024) (#6590) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(pattern_matching_router.py): update model name using correct function * fix(langfuse.py): metadata deepcopy can cause unhandled error (#6563) Co-authored-by: seva * fix(stream_chunk_builder_utils.py): correctly set prompt tokens + log correct streaming usage Closes https://github.com/BerriAI/litellm/issues/6488 * build(deps): bump cookie and express in /docs/my-website (#6566) Bumps [cookie](https://github.com/jshttp/cookie) and [express](https://github.com/expressjs/express). These dependencies needed to be updated together. Updates `cookie` from 0.6.0 to 0.7.1 - [Release notes](https://github.com/jshttp/cookie/releases) - [Commits](https://github.com/jshttp/cookie/compare/v0.6.0...v0.7.1) Updates `express` from 4.20.0 to 4.21.1 - [Release notes](https://github.com/expressjs/express/releases) - [Changelog](https://github.com/expressjs/express/blob/4.21.1/History.md) - [Commits](https://github.com/expressjs/express/compare/4.20.0...4.21.1) --- updated-dependencies: - dependency-name: cookie dependency-type: indirect - dependency-name: express dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * docs(virtual_keys.md): update Dockerfile reference (#6554) Signed-off-by: Emmanuel Ferdman * (proxy fix) - call connect on prisma client when running setup (#6534) * critical fix - call connect on prisma client when running setup * fix test_proxy_server_prisma_setup * fix test_proxy_server_prisma_setup * Add 3.5 haiku (#6588) * feat: add claude-3-5-haiku-20241022 entries * feat: add claude-3-5-haiku-20241022 and vertex_ai/claude-3-5-haiku@20241022 models * add missing entries, remove vision * remove image token costs * Litellm perf improvements 3 (#6573) * perf: move writing key to cache, to background task * perf(litellm_pre_call_utils.py): add otel tracing for pre-call utils adds 200ms on calls with pgdb connected * fix(litellm_pre_call_utils.py'): rename call_type to actual call used * perf(proxy_server.py): remove db logic from _get_config_from_file was causing db calls to occur on every llm request, if team_id was set on key * fix(auth_checks.py): add check for reducing db calls if user/team id does not exist in db reduces latency/call by ~100ms * fix(proxy_server.py): minor fix on existing_settings not incl alerting * fix(exception_mapping_utils.py): map databricks exception string * fix(auth_checks.py): fix auth check logic * test: correctly mark flaky test * fix(utils.py): handle auth token error for tokenizers.from_pretrained * build: fix map * build: fix map * build: fix json for model map * fix ImageObject conversion (#6584) * (fix) litellm.text_completion raises a non-blocking error on simple usage (#6546) * unit test test_huggingface_text_completion_logprobs * fix return TextCompletionHandler convert_chat_to_text_completion * fix hf rest api * fix test_huggingface_text_completion_logprobs * fix linting errors * fix importLiteLLMResponseObjectHandler * fix test for LiteLLMResponseObjectHandler * fix test text completion * fix allow using 15 seconds for premium license check * testing fix bedrock deprecated cohere.command-text-v14 * (feat) add `Predicted Outputs` for OpenAI (#6594) * bump openai to openai==1.54.0 * add 'prediction' param * testing fix bedrock deprecated cohere.command-text-v14 * test test_openai_prediction_param.py * test_openai_prediction_param_with_caching * doc Predicted Outputs * doc Predicted Output * (fix) Vertex Improve Performance when using `image_url` (#6593) * fix transformation vertex * test test_process_gemini_image * test_image_completion_request * testing fix - bedrock has deprecated cohere.command-text-v14 * fix vertex pdf * bump: version 1.51.5 → 1.52.0 * fix(lowest_tpm_rpm_routing.py): fix parallel rate limit check (#6577) * fix(lowest_tpm_rpm_routing.py): fix parallel rate limit check * fix(lowest_tpm_rpm_v2.py): return headers in correct format * test: update test * build(deps): bump cookie and express in /docs/my-website (#6566) Bumps [cookie](https://github.com/jshttp/cookie) and [express](https://github.com/expressjs/express). These dependencies needed to be updated together. Updates `cookie` from 0.6.0 to 0.7.1 - [Release notes](https://github.com/jshttp/cookie/releases) - [Commits](https://github.com/jshttp/cookie/compare/v0.6.0...v0.7.1) Updates `express` from 4.20.0 to 4.21.1 - [Release notes](https://github.com/expressjs/express/releases) - [Changelog](https://github.com/expressjs/express/blob/4.21.1/History.md) - [Commits](https://github.com/expressjs/express/compare/4.20.0...4.21.1) --- updated-dependencies: - dependency-name: cookie dependency-type: indirect - dependency-name: express dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * docs(virtual_keys.md): update Dockerfile reference (#6554) Signed-off-by: Emmanuel Ferdman * (proxy fix) - call connect on prisma client when running setup (#6534) * critical fix - call connect on prisma client when running setup * fix test_proxy_server_prisma_setup * fix test_proxy_server_prisma_setup * Add 3.5 haiku (#6588) * feat: add claude-3-5-haiku-20241022 entries * feat: add claude-3-5-haiku-20241022 and vertex_ai/claude-3-5-haiku@20241022 models * add missing entries, remove vision * remove image token costs * Litellm perf improvements 3 (#6573) * perf: move writing key to cache, to background task * perf(litellm_pre_call_utils.py): add otel tracing for pre-call utils adds 200ms on calls with pgdb connected * fix(litellm_pre_call_utils.py'): rename call_type to actual call used * perf(proxy_server.py): remove db logic from _get_config_from_file was causing db calls to occur on every llm request, if team_id was set on key * fix(auth_checks.py): add check for reducing db calls if user/team id does not exist in db reduces latency/call by ~100ms * fix(proxy_server.py): minor fix on existing_settings not incl alerting * fix(exception_mapping_utils.py): map databricks exception string * fix(auth_checks.py): fix auth check logic * test: correctly mark flaky test * fix(utils.py): handle auth token error for tokenizers.from_pretrained * build: fix map * build: fix map * build: fix json for model map * test: remove eol model * fix(proxy_server.py): fix db config loading logic * fix(proxy_server.py): fix order of config / db updates, to ensure fields not overwritten * test: skip test if required env var is missing * test: fix test --------- Signed-off-by: dependabot[bot] Signed-off-by: Emmanuel Ferdman Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Emmanuel Ferdman Co-authored-by: Ishaan Jaff Co-authored-by: paul-gauthier <69695708+paul-gauthier@users.noreply.github.com> * test: mark flaky test * test: handle anthropic api instability * test(test_proxy_utils.py): add testing for db config update logic * Update setuptools in docker and fastapi to latest verison, in order to upgrade starlette version (#6597) * build(deps): bump cookie and express in /docs/my-website (#6566) Bumps [cookie](https://github.com/jshttp/cookie) and [express](https://github.com/expressjs/express). These dependencies needed to be updated together. Updates `cookie` from 0.6.0 to 0.7.1 - [Release notes](https://github.com/jshttp/cookie/releases) - [Commits](https://github.com/jshttp/cookie/compare/v0.6.0...v0.7.1) Updates `express` from 4.20.0 to 4.21.1 - [Release notes](https://github.com/expressjs/express/releases) - [Changelog](https://github.com/expressjs/express/blob/4.21.1/History.md) - [Commits](https://github.com/expressjs/express/compare/4.20.0...4.21.1) --- updated-dependencies: - dependency-name: cookie dependency-type: indirect - dependency-name: express dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * docs(virtual_keys.md): update Dockerfile reference (#6554) Signed-off-by: Emmanuel Ferdman * (proxy fix) - call connect on prisma client when running setup (#6534) * critical fix - call connect on prisma client when running setup * fix test_proxy_server_prisma_setup * fix test_proxy_server_prisma_setup * Add 3.5 haiku (#6588) * feat: add claude-3-5-haiku-20241022 entries * feat: add claude-3-5-haiku-20241022 and vertex_ai/claude-3-5-haiku@20241022 models * add missing entries, remove vision * remove image token costs * Litellm perf improvements 3 (#6573) * perf: move writing key to cache, to background task * perf(litellm_pre_call_utils.py): add otel tracing for pre-call utils adds 200ms on calls with pgdb connected * fix(litellm_pre_call_utils.py'): rename call_type to actual call used * perf(proxy_server.py): remove db logic from _get_config_from_file was causing db calls to occur on every llm request, if team_id was set on key * fix(auth_checks.py): add check for reducing db calls if user/team id does not exist in db reduces latency/call by ~100ms * fix(proxy_server.py): minor fix on existing_settings not incl alerting * fix(exception_mapping_utils.py): map databricks exception string * fix(auth_checks.py): fix auth check logic * test: correctly mark flaky test * fix(utils.py): handle auth token error for tokenizers.from_pretrained * build: fix map * build: fix map * build: fix json for model map * fix ImageObject conversion (#6584) * (fix) litellm.text_completion raises a non-blocking error on simple usage (#6546) * unit test test_huggingface_text_completion_logprobs * fix return TextCompletionHandler convert_chat_to_text_completion * fix hf rest api * fix test_huggingface_text_completion_logprobs * fix linting errors * fix importLiteLLMResponseObjectHandler * fix test for LiteLLMResponseObjectHandler * fix test text completion * fix allow using 15 seconds for premium license check * testing fix bedrock deprecated cohere.command-text-v14 * (feat) add `Predicted Outputs` for OpenAI (#6594) * bump openai to openai==1.54.0 * add 'prediction' param * testing fix bedrock deprecated cohere.command-text-v14 * test test_openai_prediction_param.py * test_openai_prediction_param_with_caching * doc Predicted Outputs * doc Predicted Output * (fix) Vertex Improve Performance when using `image_url` (#6593) * fix transformation vertex * test test_process_gemini_image * test_image_completion_request * testing fix - bedrock has deprecated cohere.command-text-v14 * fix vertex pdf * bump: version 1.51.5 → 1.52.0 * Update setuptools in docker and fastapi to latest verison, in order to upgrade starlette version --------- Signed-off-by: dependabot[bot] Signed-off-by: Emmanuel Ferdman Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Emmanuel Ferdman Co-authored-by: Ishaan Jaff Co-authored-by: paul-gauthier <69695708+paul-gauthier@users.noreply.github.com> Co-authored-by: Krish Dholakia Co-authored-by: Jacob Hagstedt * fix(langfuse.py): fix linting errors * fix: fix linting errors * fix: fix casting error * fix: fix typing error * fix: add more tests * fix(utils.py): fix return_processed_chunk_logic * Revert "Update setuptools in docker and fastapi to latest verison, in order t…" (#6615) This reverts commit 1a7f7bdfb75df0efbc930b7f2e39febc80e97d5a. * docs fix clarify team_id on team based logging * doc fix team based logging with langfuse * fix flake8 checks * test: bump sleep time * refactor: replace claude-instant-1.2 with haiku in testing * fix(proxy_server.py): move to using sl payload in track_cost_callback * fix(proxy_server.py): fix linting errors * fix(proxy_server.py): fallback to kwargs(response_cost) if given * test: remove claude-instant-1 from tests * test: fix claude test * docs fix clarify team_id on team based logging * doc fix team based logging with langfuse * build: remove lint.yml --------- Signed-off-by: dependabot[bot] Signed-off-by: Emmanuel Ferdman Co-authored-by: Vsevolod Karvetskiy <56288164+karvetskiy@users.noreply.github.com> Co-authored-by: seva Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Emmanuel Ferdman Co-authored-by: Ishaan Jaff Co-authored-by: paul-gauthier <69695708+paul-gauthier@users.noreply.github.com> Co-authored-by: Jacob Hagstedt P Suorra Co-authored-by: Jacob Hagstedt --- litellm/integrations/langfuse/langfuse.py | 84 ++-- .../streaming_chunk_builder_utils.py | 96 ++-- litellm/llms/anthropic/chat/handler.py | 1 + litellm/proxy/_new_secret_config.yaml | 13 +- litellm/proxy/proxy_server.py | 26 +- .../router_utils/pattern_match_deployments.py | 33 +- litellm/types/utils.py | 57 ++- litellm/utils.py | 419 ++++++++---------- .../test_add_function_to_prompt.py | 36 +- tests/local_testing/test_alangfuse.py | 22 - tests/local_testing/test_batch_completions.py | 2 +- tests/local_testing/test_completion.py | 2 +- .../test_custom_callback_input.py | 62 +++ tests/local_testing/test_exceptions.py | 4 +- tests/local_testing/test_langsmith.py | 65 --- tests/local_testing/test_logging.py | 8 +- .../test_model_response_typing/test.py | 2 +- tests/local_testing/test_prometheus.py | 59 +-- .../test_promptlayer_integration.py | 2 +- .../test_provider_specific_config.py | 6 +- tests/local_testing/test_proxy_utils.py | 47 +- tests/local_testing/test_router.py | 2 +- tests/local_testing/test_router_fallbacks.py | 8 +- .../test_router_pattern_matching.py | 33 ++ tests/local_testing/test_router_timeout.py | 6 +- tests/local_testing/test_streaming.py | 4 +- tests/local_testing/test_token_counter.py | 13 +- tests/local_testing/test_traceloop.py | 2 +- tests/local_testing/test_wandb.py | 2 +- .../test_langfuse_unit_tests.py | 32 +- .../test_router_helper_utils.py | 13 + tests/test_keys.py | 6 +- 32 files changed, 634 insertions(+), 533 deletions(-) diff --git a/litellm/integrations/langfuse/langfuse.py b/litellm/integrations/langfuse/langfuse.py index 182c88637..18892871e 100644 --- a/litellm/integrations/langfuse/langfuse.py +++ b/litellm/integrations/langfuse/langfuse.py @@ -1,9 +1,9 @@ #### What this does #### # On success, logs events to Langfuse import copy -import inspect import os import traceback +from collections.abc import MutableMapping, MutableSequence, MutableSet from typing import TYPE_CHECKING, Any, Dict, Optional from packaging.version import Version @@ -14,7 +14,7 @@ from litellm._logging import verbose_logger from litellm.litellm_core_utils.redact_messages import redact_user_api_key_info from litellm.secret_managers.main import str_to_bool from litellm.types.integrations.langfuse import * -from litellm.types.utils import StandardCallbackDynamicParams, StandardLoggingPayload +from litellm.types.utils import StandardLoggingPayload if TYPE_CHECKING: from litellm.litellm_core_utils.litellm_logging import DynamicLoggingCache @@ -355,6 +355,47 @@ class LangFuseLogger: ) ) + def _prepare_metadata(self, metadata) -> Any: + try: + return copy.deepcopy(metadata) # Avoid modifying the original metadata + except (TypeError, copy.Error) as e: + verbose_logger.warning(f"Langfuse Layer Error - {e}") + + new_metadata: Dict[str, Any] = {} + + # if metadata is not a MutableMapping, return an empty dict since we can't call items() on it + if not isinstance(metadata, MutableMapping): + verbose_logger.warning( + "Langfuse Layer Logging - metadata is not a MutableMapping, returning empty dict" + ) + return new_metadata + + for key, value in metadata.items(): + try: + if isinstance(value, MutableMapping): + new_metadata[key] = self._prepare_metadata(value) + elif isinstance(value, (MutableSequence, MutableSet)): + new_metadata[key] = type(value)( + *( + ( + self._prepare_metadata(v) + if isinstance(v, MutableMapping) + else copy.deepcopy(v) + ) + for v in value + ) + ) + elif isinstance(value, BaseModel): + new_metadata[key] = value.model_dump() + else: + new_metadata[key] = copy.deepcopy(value) + except (TypeError, copy.Error): + verbose_logger.warning( + f"Langfuse Layer Error - Couldn't copy metadata key: {key} - {traceback.format_exc()}" + ) + + return new_metadata + def _log_langfuse_v2( # noqa: PLR0915 self, user_id, @@ -373,40 +414,19 @@ class LangFuseLogger: ) -> tuple: import langfuse + print_verbose("Langfuse Layer Logging - logging to langfuse v2") + try: - tags = [] - try: - optional_params.pop("metadata") - metadata = copy.deepcopy( - metadata - ) # Avoid modifying the original metadata - except Exception: - new_metadata = {} - for key, value in metadata.items(): - if ( - isinstance(value, list) - or isinstance(value, dict) - or isinstance(value, str) - or isinstance(value, int) - or isinstance(value, float) - ): - new_metadata[key] = copy.deepcopy(value) - elif isinstance(value, BaseModel): - new_metadata[key] = value.model_dump() - metadata = new_metadata + metadata = self._prepare_metadata(metadata) - supports_tags = Version(langfuse.version.__version__) >= Version("2.6.3") - supports_prompt = Version(langfuse.version.__version__) >= Version("2.7.3") - supports_costs = Version(langfuse.version.__version__) >= Version("2.7.3") - supports_completion_start_time = Version( - langfuse.version.__version__ - ) >= Version("2.7.3") + langfuse_version = Version(langfuse.version.__version__) - print_verbose("Langfuse Layer Logging - logging to langfuse v2 ") + supports_tags = langfuse_version >= Version("2.6.3") + supports_prompt = langfuse_version >= Version("2.7.3") + supports_costs = langfuse_version >= Version("2.7.3") + supports_completion_start_time = langfuse_version >= Version("2.7.3") - if supports_tags: - metadata_tags = metadata.pop("tags", []) - tags = metadata_tags + tags = metadata.pop("tags", []) if supports_tags else [] # Clean Metadata before logging - never log raw metadata # the raw metadata can contain circular references which leads to infinite recursion diff --git a/litellm/litellm_core_utils/streaming_chunk_builder_utils.py b/litellm/litellm_core_utils/streaming_chunk_builder_utils.py index 1ca6a6fd6..a198a90f7 100644 --- a/litellm/litellm_core_utils/streaming_chunk_builder_utils.py +++ b/litellm/litellm_core_utils/streaming_chunk_builder_utils.py @@ -243,6 +243,49 @@ class ChunkProcessor: id=id, ) + def _usage_chunk_calculation_helper(self, usage_chunk: Usage) -> dict: + prompt_tokens = 0 + completion_tokens = 0 + ## anthropic prompt caching information ## + cache_creation_input_tokens: Optional[int] = None + cache_read_input_tokens: Optional[int] = None + completion_tokens_details: Optional[CompletionTokensDetails] = None + prompt_tokens_details: Optional[PromptTokensDetails] = None + + if "prompt_tokens" in usage_chunk: + prompt_tokens = usage_chunk.get("prompt_tokens", 0) or 0 + if "completion_tokens" in usage_chunk: + completion_tokens = usage_chunk.get("completion_tokens", 0) or 0 + if "cache_creation_input_tokens" in usage_chunk: + cache_creation_input_tokens = usage_chunk.get("cache_creation_input_tokens") + if "cache_read_input_tokens" in usage_chunk: + cache_read_input_tokens = usage_chunk.get("cache_read_input_tokens") + if hasattr(usage_chunk, "completion_tokens_details"): + if isinstance(usage_chunk.completion_tokens_details, dict): + completion_tokens_details = CompletionTokensDetails( + **usage_chunk.completion_tokens_details + ) + elif isinstance( + usage_chunk.completion_tokens_details, CompletionTokensDetails + ): + completion_tokens_details = usage_chunk.completion_tokens_details + if hasattr(usage_chunk, "prompt_tokens_details"): + if isinstance(usage_chunk.prompt_tokens_details, dict): + prompt_tokens_details = PromptTokensDetails( + **usage_chunk.prompt_tokens_details + ) + elif isinstance(usage_chunk.prompt_tokens_details, PromptTokensDetails): + prompt_tokens_details = usage_chunk.prompt_tokens_details + + return { + "prompt_tokens": prompt_tokens, + "completion_tokens": completion_tokens, + "cache_creation_input_tokens": cache_creation_input_tokens, + "cache_read_input_tokens": cache_read_input_tokens, + "completion_tokens_details": completion_tokens_details, + "prompt_tokens_details": prompt_tokens_details, + } + def calculate_usage( self, chunks: List[Union[Dict[str, Any], ModelResponse]], @@ -269,37 +312,30 @@ class ChunkProcessor: elif isinstance(chunk, ModelResponse) and hasattr(chunk, "_hidden_params"): usage_chunk = chunk._hidden_params.get("usage", None) if usage_chunk is not None: - if "prompt_tokens" in usage_chunk: - prompt_tokens = usage_chunk.get("prompt_tokens", 0) or 0 - if "completion_tokens" in usage_chunk: - completion_tokens = usage_chunk.get("completion_tokens", 0) or 0 - if "cache_creation_input_tokens" in usage_chunk: - cache_creation_input_tokens = usage_chunk.get( + usage_chunk_dict = self._usage_chunk_calculation_helper(usage_chunk) + if ( + usage_chunk_dict["prompt_tokens"] is not None + and usage_chunk_dict["prompt_tokens"] > 0 + ): + prompt_tokens = usage_chunk_dict["prompt_tokens"] + if ( + usage_chunk_dict["completion_tokens"] is not None + and usage_chunk_dict["completion_tokens"] > 0 + ): + completion_tokens = usage_chunk_dict["completion_tokens"] + if usage_chunk_dict["cache_creation_input_tokens"] is not None: + cache_creation_input_tokens = usage_chunk_dict[ "cache_creation_input_tokens" - ) - if "cache_read_input_tokens" in usage_chunk: - cache_read_input_tokens = usage_chunk.get("cache_read_input_tokens") - if hasattr(usage_chunk, "completion_tokens_details"): - if isinstance(usage_chunk.completion_tokens_details, dict): - completion_tokens_details = CompletionTokensDetails( - **usage_chunk.completion_tokens_details - ) - elif isinstance( - usage_chunk.completion_tokens_details, CompletionTokensDetails - ): - completion_tokens_details = ( - usage_chunk.completion_tokens_details - ) - if hasattr(usage_chunk, "prompt_tokens_details"): - if isinstance(usage_chunk.prompt_tokens_details, dict): - prompt_tokens_details = PromptTokensDetails( - **usage_chunk.prompt_tokens_details - ) - elif isinstance( - usage_chunk.prompt_tokens_details, PromptTokensDetails - ): - prompt_tokens_details = usage_chunk.prompt_tokens_details - + ] + if usage_chunk_dict["cache_read_input_tokens"] is not None: + cache_read_input_tokens = usage_chunk_dict[ + "cache_read_input_tokens" + ] + if usage_chunk_dict["completion_tokens_details"] is not None: + completion_tokens_details = usage_chunk_dict[ + "completion_tokens_details" + ] + prompt_tokens_details = usage_chunk_dict["prompt_tokens_details"] try: returned_usage.prompt_tokens = prompt_tokens or token_counter( model=model, messages=messages diff --git a/litellm/llms/anthropic/chat/handler.py b/litellm/llms/anthropic/chat/handler.py index a30cd6570..da95ac075 100644 --- a/litellm/llms/anthropic/chat/handler.py +++ b/litellm/llms/anthropic/chat/handler.py @@ -769,6 +769,7 @@ class ModelResponseIterator: message=message, status_code=500, # it looks like Anthropic API does not return a status code in the chunk error - default to 500 ) + returned_chunk = GenericStreamingChunk( text=text, tool_use=tool_use, diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index d81c96df5..5cf293864 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -24,13 +24,20 @@ model_list: api_key: my-fake-key api_base: https://exampleopenaiendpoint-production.up.railway.app/ - + - model_name: gpt-4 + litellm_params: + model: azure/chatgpt-v-2 + api_base: https://openai-gpt-4-test-v-1.openai.azure.com/ + api_version: "2023-05-15" + api_key: os.environ/AZURE_API_KEY # The `os.environ/` prefix tells litellm to read this from the env. See https://docs.litellm.ai/docs/simple_proxy#load-api-keys-from-vault + rpm: 480 + timeout: 300 + stream_timeout: 60 # litellm_settings: # fallbacks: [{ "claude-3-5-sonnet-20240620": ["claude-3-5-sonnet-aihubmix"] }] # callbacks: ["otel", "prometheus"] # default_redis_batch_cache_expiry: 10 - # litellm_settings: # cache: True # cache_params: @@ -74,4 +81,4 @@ model_list: # # # see https://docs.litellm.ai/docs/proxy/caching#advanced---user-api-key-cache-ttl # # # our api keys rarely change -# # user_api_key_cache_ttl: 3600 \ No newline at end of file +# # user_api_key_cache_ttl: 3600 diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 9f6579242..94a5bb5e9 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -770,8 +770,16 @@ async def _PROXY_track_cost_callback( org_id = metadata.get("user_api_key_org_id", None) key_alias = metadata.get("user_api_key_alias", None) end_user_max_budget = metadata.get("user_api_end_user_max_budget", None) - if kwargs.get("response_cost", None) is not None: - response_cost = kwargs["response_cost"] + sl_object: Optional[StandardLoggingPayload] = kwargs.get( + "standard_logging_object", None + ) + response_cost = ( + sl_object.get("response_cost", None) + if sl_object is not None + else kwargs.get("response_cost", None) + ) + + if response_cost is not None: user_api_key = metadata.get("user_api_key", None) if kwargs.get("cache_hit", False) is True: response_cost = 0.0 @@ -824,9 +832,15 @@ async def _PROXY_track_cost_callback( if kwargs["stream"] is not True or ( kwargs["stream"] is True and "complete_streaming_response" in kwargs ): - cost_tracking_failure_debug_info = kwargs.get( - "response_cost_failure_debug_information" - ) + if sl_object is not None: + cost_tracking_failure_debug_info: Union[dict, str] = ( + sl_object["response_cost_failure_debug_info"] # type: ignore + or "response_cost_failure_debug_info is None in standard_logging_object" + ) + else: + cost_tracking_failure_debug_info = ( + "standard_logging_object not found" + ) model = kwargs.get("model") raise Exception( f"Cost tracking failed for model={model}.\nDebug info - {cost_tracking_failure_debug_info}\nAdd custom pricing - https://docs.litellm.ai/docs/proxy/custom_pricing" @@ -842,7 +856,7 @@ async def _PROXY_track_cost_callback( failing_model=model, ) ) - verbose_proxy_logger.debug("error in tracking cost callback - %s", e) + verbose_proxy_logger.debug(error_msg) def error_tracking(): diff --git a/litellm/router_utils/pattern_match_deployments.py b/litellm/router_utils/pattern_match_deployments.py index a0d631bf7..039af635c 100644 --- a/litellm/router_utils/pattern_match_deployments.py +++ b/litellm/router_utils/pattern_match_deployments.py @@ -61,6 +61,24 @@ class PatternMatchRouter: # return f"^{regex}$" return re.escape(pattern).replace(r"\*", "(.*)") + def _return_pattern_matched_deployments( + self, matched_pattern: Match, deployments: List[Dict] + ) -> List[Dict]: + new_deployments = [] + for deployment in deployments: + new_deployment = copy.deepcopy(deployment) + new_deployment["litellm_params"]["model"] = ( + PatternMatchRouter.set_deployment_model_name( + matched_pattern=matched_pattern, + litellm_deployment_litellm_model=deployment["litellm_params"][ + "model" + ], + ) + ) + new_deployments.append(new_deployment) + + return new_deployments + def route(self, request: Optional[str]) -> Optional[List[Dict]]: """ Route a requested model to the corresponding llm deployments based on the regex pattern @@ -79,8 +97,11 @@ class PatternMatchRouter: if request is None: return None for pattern, llm_deployments in self.patterns.items(): - if re.match(pattern, request): - return llm_deployments + pattern_match = re.match(pattern, request) + if pattern_match: + return self._return_pattern_matched_deployments( + matched_pattern=pattern_match, deployments=llm_deployments + ) except Exception as e: verbose_router_logger.debug(f"Error in PatternMatchRouter.route: {str(e)}") @@ -102,6 +123,7 @@ class PatternMatchRouter: if model_name = "llmengine/foo" -> model = "openai/foo" """ + ## BASE CASE: if the deployment model name does not contain a wildcard, return the deployment model name if "*" not in litellm_deployment_litellm_model: return litellm_deployment_litellm_model @@ -165,12 +187,7 @@ class PatternMatchRouter: """ pattern_match = self.get_pattern(model, custom_llm_provider) if pattern_match: - provider_deployments = [] - for deployment in pattern_match: - dep = copy.deepcopy(deployment) - dep["litellm_params"]["model"] = model - provider_deployments.append(dep) - return provider_deployments + return pattern_match return [] diff --git a/litellm/types/utils.py b/litellm/types/utils.py index 942750416..c0a9764e8 100644 --- a/litellm/types/utils.py +++ b/litellm/types/utils.py @@ -745,13 +745,13 @@ class StreamingChatCompletionChunk(OpenAIChatCompletionChunk): super().__init__(**kwargs) -class ModelResponse(OpenAIObject): +from openai.types.chat import ChatCompletionChunk + + +class ModelResponseBase(OpenAIObject): id: str """A unique identifier for the completion.""" - choices: List[Union[Choices, StreamingChoices]] - """The list of completion choices the model generated for the input prompt.""" - created: int """The Unix timestamp (in seconds) of when the completion was created.""" @@ -772,6 +772,55 @@ class ModelResponse(OpenAIObject): _response_headers: Optional[dict] = None + +class ModelResponseStream(ModelResponseBase): + choices: List[StreamingChoices] + + def __init__( + self, + choices: Optional[List[Union[StreamingChoices, dict, BaseModel]]] = None, + **kwargs, + ): + if choices is not None and isinstance(choices, list): + new_choices = [] + for choice in choices: + _new_choice = None + if isinstance(choice, StreamingChoices): + _new_choice = choice + elif isinstance(choice, dict): + _new_choice = StreamingChoices(**choice) + elif isinstance(choice, BaseModel): + _new_choice = StreamingChoices(**choice.model_dump()) + new_choices.append(_new_choice) + kwargs["choices"] = new_choices + else: + kwargs["choices"] = [StreamingChoices()] + super().__init__(**kwargs) + + def __contains__(self, key): + # Define custom behavior for the 'in' operator + return hasattr(self, key) + + def get(self, key, default=None): + # Custom .get() method to access attributes with a default value if the attribute doesn't exist + return getattr(self, key, default) + + def __getitem__(self, key): + # Allow dictionary-style access to attributes + return getattr(self, key) + + def json(self, **kwargs): # type: ignore + try: + return self.model_dump() # noqa + except Exception: + # if using pydantic v1 + return self.dict() + + +class ModelResponse(ModelResponseBase): + choices: List[Union[Choices, StreamingChoices]] + """The list of completion choices the model generated for the input prompt.""" + def __init__( self, id=None, diff --git a/litellm/utils.py b/litellm/utils.py index d8c435552..6dd0a5009 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -114,6 +114,7 @@ from litellm.types.utils import ( Message, ModelInfo, ModelResponse, + ModelResponseStream, ProviderField, StreamingChoices, TextChoices, @@ -5642,6 +5643,9 @@ class CustomStreamWrapper: ) self.messages = getattr(logging_obj, "messages", None) self.sent_stream_usage = False + self.send_stream_usage = ( + True if self.check_send_stream_usage(self.stream_options) else False + ) self.tool_call = False self.chunks: List = ( [] @@ -5654,6 +5658,12 @@ class CustomStreamWrapper: def __aiter__(self): return self + def check_send_stream_usage(self, stream_options: Optional[dict]): + return ( + stream_options is not None + and stream_options.get("include_usage", False) is True + ) + def check_is_function_call(self, logging_obj) -> bool: if hasattr(logging_obj, "optional_params") and isinstance( logging_obj.optional_params, dict @@ -6506,9 +6516,148 @@ class CustomStreamWrapper: is_empty = False return is_empty + def return_processed_chunk_logic( # noqa + self, + completion_obj: dict, + model_response: ModelResponseStream, + response_obj: dict, + ): + + print_verbose( + f"completion_obj: {completion_obj}, model_response.choices[0]: {model_response.choices[0]}, response_obj: {response_obj}" + ) + if ( + "content" in completion_obj + and ( + isinstance(completion_obj["content"], str) + and len(completion_obj["content"]) > 0 + ) + or ( + "tool_calls" in completion_obj + and completion_obj["tool_calls"] is not None + and len(completion_obj["tool_calls"]) > 0 + ) + or ( + "function_call" in completion_obj + and completion_obj["function_call"] is not None + ) + ): # cannot set content of an OpenAI Object to be an empty string + self.safety_checker() + hold, model_response_str = self.check_special_tokens( + chunk=completion_obj["content"], + finish_reason=model_response.choices[0].finish_reason, + ) # filter out bos/eos tokens from openai-compatible hf endpoints + print_verbose(f"hold - {hold}, model_response_str - {model_response_str}") + if hold is False: + ## check if openai/azure chunk + original_chunk = response_obj.get("original_chunk", None) + if original_chunk: + model_response.id = original_chunk.id + self.response_id = original_chunk.id + if len(original_chunk.choices) > 0: + choices = [] + for choice in original_chunk.choices: + try: + if isinstance(choice, BaseModel): + choice_json = choice.model_dump() + choice_json.pop( + "finish_reason", None + ) # for mistral etc. which return a value in their last chunk (not-openai compatible). + print_verbose(f"choice_json: {choice_json}") + choices.append(StreamingChoices(**choice_json)) + except Exception: + choices.append(StreamingChoices()) + print_verbose(f"choices in streaming: {choices}") + setattr(model_response, "choices", choices) + else: + return + model_response.system_fingerprint = ( + original_chunk.system_fingerprint + ) + setattr( + model_response, + "citations", + getattr(original_chunk, "citations", None), + ) + print_verbose(f"self.sent_first_chunk: {self.sent_first_chunk}") + if self.sent_first_chunk is False: + model_response.choices[0].delta["role"] = "assistant" + self.sent_first_chunk = True + elif self.sent_first_chunk is True and hasattr( + model_response.choices[0].delta, "role" + ): + _initial_delta = model_response.choices[0].delta.model_dump() + _initial_delta.pop("role", None) + model_response.choices[0].delta = Delta(**_initial_delta) + print_verbose( + f"model_response.choices[0].delta: {model_response.choices[0].delta}" + ) + else: + ## else + completion_obj["content"] = model_response_str + if self.sent_first_chunk is False: + completion_obj["role"] = "assistant" + self.sent_first_chunk = True + + model_response.choices[0].delta = Delta(**completion_obj) + _index: Optional[int] = completion_obj.get("index") + if _index is not None: + model_response.choices[0].index = _index + print_verbose(f"returning model_response: {model_response}") + return model_response + else: + return + elif self.received_finish_reason is not None: + if self.sent_last_chunk is True: + # Bedrock returns the guardrail trace in the last chunk - we want to return this here + if self.custom_llm_provider == "bedrock" and "trace" in model_response: + return model_response + + # Default - return StopIteration + raise StopIteration + # flush any remaining holding chunk + if len(self.holding_chunk) > 0: + if model_response.choices[0].delta.content is None: + model_response.choices[0].delta.content = self.holding_chunk + else: + model_response.choices[0].delta.content = ( + self.holding_chunk + model_response.choices[0].delta.content + ) + self.holding_chunk = "" + # if delta is None + _is_delta_empty = self.is_delta_empty(delta=model_response.choices[0].delta) + + if _is_delta_empty: + # get any function call arguments + model_response.choices[0].finish_reason = map_finish_reason( + finish_reason=self.received_finish_reason + ) # ensure consistent output to openai + + self.sent_last_chunk = True + + return model_response + elif ( + model_response.choices[0].delta.tool_calls is not None + or model_response.choices[0].delta.function_call is not None + ): + if self.sent_first_chunk is False: + model_response.choices[0].delta["role"] = "assistant" + self.sent_first_chunk = True + return model_response + elif ( + len(model_response.choices) > 0 + and hasattr(model_response.choices[0].delta, "audio") + and model_response.choices[0].delta.audio is not None + ): + return model_response + else: + if hasattr(model_response, "usage"): + self.chunks.append(model_response) + return + def chunk_creator(self, chunk): # type: ignore # noqa: PLR0915 model_response = self.model_response_creator() - response_obj = {} + response_obj: dict = {} try: # return this for all models completion_obj = {"content": ""} @@ -6559,6 +6708,7 @@ class CustomStreamWrapper: "provider_specific_fields" ].items(): setattr(model_response, key, value) + response_obj = anthropic_response_obj elif ( self.custom_llm_provider @@ -6626,7 +6776,7 @@ class CustomStreamWrapper: if self.sent_first_chunk is False: raise Exception("An unknown error occurred with the stream") self.received_finish_reason = "stop" - elif self.custom_llm_provider and (self.custom_llm_provider == "vertex_ai"): + elif self.custom_llm_provider == "vertex_ai": import proto # type: ignore if self.model.startswith("claude-3"): @@ -7009,145 +7159,12 @@ class CustomStreamWrapper: self.tool_call = True ## RETURN ARG - if ( - "content" in completion_obj - and ( - isinstance(completion_obj["content"], str) - and len(completion_obj["content"]) > 0 - ) - or ( - "tool_calls" in completion_obj - and completion_obj["tool_calls"] is not None - and len(completion_obj["tool_calls"]) > 0 - ) - or ( - "function_call" in completion_obj - and completion_obj["function_call"] is not None - ) - ): # cannot set content of an OpenAI Object to be an empty string - self.safety_checker() - hold, model_response_str = self.check_special_tokens( - chunk=completion_obj["content"], - finish_reason=model_response.choices[0].finish_reason, - ) # filter out bos/eos tokens from openai-compatible hf endpoints - print_verbose( - f"hold - {hold}, model_response_str - {model_response_str}" - ) - if hold is False: - ## check if openai/azure chunk - original_chunk = response_obj.get("original_chunk", None) - if original_chunk: - model_response.id = original_chunk.id - self.response_id = original_chunk.id - if len(original_chunk.choices) > 0: - choices = [] - for idx, choice in enumerate(original_chunk.choices): - try: - if isinstance(choice, BaseModel): - try: - choice_json = choice.model_dump() - except Exception: - choice_json = choice.dict() - choice_json.pop( - "finish_reason", None - ) # for mistral etc. which return a value in their last chunk (not-openai compatible). - print_verbose(f"choice_json: {choice_json}") - choices.append(StreamingChoices(**choice_json)) - except Exception: - choices.append(StreamingChoices()) - print_verbose(f"choices in streaming: {choices}") - model_response.choices = choices - else: - return - model_response.system_fingerprint = ( - original_chunk.system_fingerprint - ) - model_response.citations = getattr( - original_chunk, "citations", None - ) - print_verbose(f"self.sent_first_chunk: {self.sent_first_chunk}") - if self.sent_first_chunk is False: - model_response.choices[0].delta["role"] = "assistant" - self.sent_first_chunk = True - elif self.sent_first_chunk is True and hasattr( - model_response.choices[0].delta, "role" - ): - _initial_delta = model_response.choices[ - 0 - ].delta.model_dump() - _initial_delta.pop("role", None) - model_response.choices[0].delta = Delta(**_initial_delta) - print_verbose( - f"model_response.choices[0].delta: {model_response.choices[0].delta}" - ) - else: - ## else - completion_obj["content"] = model_response_str - if self.sent_first_chunk is False: - completion_obj["role"] = "assistant" - self.sent_first_chunk = True + return self.return_processed_chunk_logic( + completion_obj=completion_obj, + model_response=model_response, # type: ignore + response_obj=response_obj, + ) - model_response.choices[0].delta = Delta(**completion_obj) - if completion_obj.get("index") is not None: - model_response.choices[0].index = completion_obj.get( - "index" - ) - print_verbose(f"returning model_response: {model_response}") - return model_response - else: - return - elif self.received_finish_reason is not None: - if self.sent_last_chunk is True: - # Bedrock returns the guardrail trace in the last chunk - we want to return this here - if ( - self.custom_llm_provider == "bedrock" - and "trace" in model_response - ): - return model_response - - # Default - return StopIteration - raise StopIteration - # flush any remaining holding chunk - if len(self.holding_chunk) > 0: - if model_response.choices[0].delta.content is None: - model_response.choices[0].delta.content = self.holding_chunk - else: - model_response.choices[0].delta.content = ( - self.holding_chunk + model_response.choices[0].delta.content - ) - self.holding_chunk = "" - # if delta is None - _is_delta_empty = self.is_delta_empty( - delta=model_response.choices[0].delta - ) - - if _is_delta_empty: - # get any function call arguments - model_response.choices[0].finish_reason = map_finish_reason( - finish_reason=self.received_finish_reason - ) # ensure consistent output to openai - - self.sent_last_chunk = True - - return model_response - elif ( - model_response.choices[0].delta.tool_calls is not None - or model_response.choices[0].delta.function_call is not None - ): - if self.sent_first_chunk is False: - model_response.choices[0].delta["role"] = "assistant" - self.sent_first_chunk = True - return model_response - elif ( - len(model_response.choices) > 0 - and hasattr(model_response.choices[0].delta, "audio") - and model_response.choices[0].delta.audio is not None - ): - return model_response - else: - if hasattr(model_response, "usage"): - self.chunks.append(model_response) - return except StopIteration: raise StopIteration except Exception as e: @@ -7293,27 +7310,24 @@ class CustomStreamWrapper: except StopIteration: if self.sent_last_chunk is True: - if ( - self.sent_stream_usage is False - and self.stream_options is not None - and self.stream_options.get("include_usage", False) is True - ): - # send the final chunk with stream options - complete_streaming_response = litellm.stream_chunk_builder( - chunks=self.chunks, messages=self.messages + complete_streaming_response = litellm.stream_chunk_builder( + chunks=self.chunks, messages=self.messages + ) + response = self.model_response_creator() + if complete_streaming_response is not None: + setattr( + response, + "usage", + getattr(complete_streaming_response, "usage"), ) - response = self.model_response_creator() - if complete_streaming_response is not None: - setattr( - response, - "usage", - getattr(complete_streaming_response, "usage"), - ) - ## LOGGING - threading.Thread( - target=self.logging_obj.success_handler, - args=(response, None, None, cache_hit), - ).start() # log response + + ## LOGGING + threading.Thread( + target=self.logging_obj.success_handler, + args=(response, None, None, cache_hit), + ).start() # log response + + if self.sent_stream_usage is False and self.send_stream_usage is True: self.sent_stream_usage = True return response raise # Re-raise StopIteration @@ -7401,7 +7415,6 @@ class CustomStreamWrapper: or self.custom_llm_provider in litellm._custom_providers ): async for chunk in self.completion_stream: - print_verbose(f"value of async chunk: {chunk}") if chunk == "None" or chunk is None: raise Exception elif ( @@ -7431,10 +7444,7 @@ class CustomStreamWrapper: end_time=None, cache_hit=cache_hit, ) - # threading.Thread( - # target=self.logging_obj.success_handler, - # args=(processed_chunk, None, None, cache_hit), - # ).start() # log response + asyncio.create_task( self.logging_obj.async_success_handler( processed_chunk, cache_hit=cache_hit @@ -7515,82 +7525,33 @@ class CustomStreamWrapper: # RETURN RESULT self.chunks.append(processed_chunk) return processed_chunk - except StopAsyncIteration: + except (StopAsyncIteration, StopIteration): if self.sent_last_chunk is True: - if ( - self.sent_stream_usage is False - and self.stream_options is not None - and self.stream_options.get("include_usage", False) is True - ): - # send the final chunk with stream options - complete_streaming_response = litellm.stream_chunk_builder( - chunks=self.chunks, messages=self.messages + # log the final chunk with accurate streaming values + complete_streaming_response = litellm.stream_chunk_builder( + chunks=self.chunks, messages=self.messages + ) + response = self.model_response_creator() + if complete_streaming_response is not None: + setattr( + response, + "usage", + getattr(complete_streaming_response, "usage"), ) - response = self.model_response_creator() - if complete_streaming_response is not None: - setattr( - response, - "usage", - getattr(complete_streaming_response, "usage"), - ) - ## LOGGING - threading.Thread( - target=self.logging_obj.success_handler, - args=(response, None, None, cache_hit), - ).start() # log response - asyncio.create_task( - self.logging_obj.async_success_handler( - response, cache_hit=cache_hit - ) - ) - self.sent_stream_usage = True - return response - raise # Re-raise StopIteration - else: - self.sent_last_chunk = True - processed_chunk = self.finish_reason_handler() ## LOGGING threading.Thread( target=self.logging_obj.success_handler, - args=(processed_chunk, None, None, cache_hit), + args=(response, None, None, cache_hit), ).start() # log response asyncio.create_task( self.logging_obj.async_success_handler( - processed_chunk, cache_hit=cache_hit + response, cache_hit=cache_hit ) ) - return processed_chunk - except StopIteration: - if self.sent_last_chunk is True: - if ( - self.sent_stream_usage is False - and self.stream_options is not None - and self.stream_options.get("include_usage", False) is True - ): - # send the final chunk with stream options - complete_streaming_response = litellm.stream_chunk_builder( - chunks=self.chunks, messages=self.messages - ) - response = self.model_response_creator() - if complete_streaming_response is not None: - setattr( - response, - "usage", - getattr(complete_streaming_response, "usage"), - ) - ## LOGGING - threading.Thread( - target=self.logging_obj.success_handler, - args=(response, None, None, cache_hit), - ).start() # log response - asyncio.create_task( - self.logging_obj.async_success_handler( - response, cache_hit=cache_hit - ) - ) + if self.sent_stream_usage is False and self.send_stream_usage is True: self.sent_stream_usage = True return response - raise StopAsyncIteration + raise StopAsyncIteration # Re-raise StopIteration else: self.sent_last_chunk = True processed_chunk = self.finish_reason_handler() diff --git a/tests/local_testing/test_add_function_to_prompt.py b/tests/local_testing/test_add_function_to_prompt.py index d703ce849..43ee3dd41 100644 --- a/tests/local_testing/test_add_function_to_prompt.py +++ b/tests/local_testing/test_add_function_to_prompt.py @@ -13,7 +13,7 @@ import litellm ## case 1: set_function_to_prompt not set def test_function_call_non_openai_model(): try: - model = "claude-instant-1" + model = "claude-3-5-haiku-20241022" messages = [{"role": "user", "content": "what's the weather in sf?"}] functions = [ { @@ -43,38 +43,4 @@ def test_function_call_non_openai_model(): # test_function_call_non_openai_model() - -## case 2: add_function_to_prompt set -@pytest.mark.skip(reason="Anthropic now supports tool calling") -def test_function_call_non_openai_model_litellm_mod_set(): - litellm.add_function_to_prompt = True - litellm.set_verbose = True - try: - model = "claude-instant-1.2" - messages = [{"role": "user", "content": "what's the weather in sf?"}] - functions = [ - { - "name": "get_current_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA", - }, - "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, - }, - "required": ["location"], - }, - } - ] - response = litellm.completion( - model=model, messages=messages, functions=functions - ) - print(f"response: {response}") - except Exception as e: - pytest.fail(f"An error occurred {e}") - - # test_function_call_non_openai_model_litellm_mod_set() diff --git a/tests/local_testing/test_alangfuse.py b/tests/local_testing/test_alangfuse.py index da83e3829..8c69f567b 100644 --- a/tests/local_testing/test_alangfuse.py +++ b/tests/local_testing/test_alangfuse.py @@ -480,28 +480,6 @@ async def test_aaalangfuse_logging_metadata(langfuse_client): print("generation_from_langfuse", generation) -@pytest.mark.skip(reason="beta test - checking langfuse output") -def test_langfuse_logging(): - try: - pre_langfuse_setup() - litellm.set_verbose = True - response = completion( - model="claude-instant-1.2", - messages=[{"role": "user", "content": "Hi 👋 - i'm claude"}], - max_tokens=10, - temperature=0.2, - ) - print(response) - # time.sleep(5) - # # check langfuse.log to see if there was a failed response - # search_logs("langfuse.log") - - except litellm.Timeout as e: - pass - except Exception as e: - pytest.fail(f"An exception occurred - {e}") - - # test_langfuse_logging() diff --git a/tests/local_testing/test_batch_completions.py b/tests/local_testing/test_batch_completions.py index cb1f16a9f..87cb88e44 100644 --- a/tests/local_testing/test_batch_completions.py +++ b/tests/local_testing/test_batch_completions.py @@ -69,7 +69,7 @@ def test_batch_completions_models(): def test_batch_completion_models_all_responses(): try: responses = batch_completion_models_all_responses( - models=["j2-light", "claude-instant-1.2"], + models=["j2-light", "claude-3-haiku-20240307"], messages=[{"role": "user", "content": "write a poem"}], max_tokens=10, ) diff --git a/tests/local_testing/test_completion.py b/tests/local_testing/test_completion.py index 8f28de7b4..6ee6a45b2 100644 --- a/tests/local_testing/test_completion.py +++ b/tests/local_testing/test_completion.py @@ -343,7 +343,7 @@ def test_completion_claude(): try: # test without max tokens response = completion( - model="claude-instant-1", messages=messages, request_timeout=10 + model="claude-3-5-haiku-20241022", messages=messages, request_timeout=10 ) # Add any assertions here to check response args print(response) diff --git a/tests/local_testing/test_custom_callback_input.py b/tests/local_testing/test_custom_callback_input.py index 3ce3a618c..1744d3891 100644 --- a/tests/local_testing/test_custom_callback_input.py +++ b/tests/local_testing/test_custom_callback_input.py @@ -1562,3 +1562,65 @@ def test_logging_key_masking_gemini(): trimmed_key = key.split("key=")[1] trimmed_key = trimmed_key.replace("*", "") assert "PART" == trimmed_key + + +@pytest.mark.parametrize("sync_mode", [True, False]) +@pytest.mark.asyncio +async def test_standard_logging_payload_stream_usage(sync_mode): + """ + Even if stream_options is not provided, correct usage should be logged + """ + from litellm.types.utils import StandardLoggingPayload + from litellm.main import stream_chunk_builder + + stream = True + try: + # sync completion + customHandler = CompletionCustomHandler() + litellm.callbacks = [customHandler] + + if sync_mode: + patch_event = "log_success_event" + return_val = MagicMock() + else: + patch_event = "async_log_success_event" + return_val = AsyncMock() + + with patch.object(customHandler, patch_event, new=return_val) as mock_client: + if sync_mode: + resp = litellm.completion( + model="anthropic/claude-3-5-sonnet-20240620", + messages=[{"role": "user", "content": "Hey, how's it going?"}], + stream=stream, + ) + + chunks = [] + for chunk in resp: + chunks.append(chunk) + time.sleep(2) + else: + resp = await litellm.acompletion( + model="anthropic/claude-3-5-sonnet-20240620", + messages=[{"role": "user", "content": "Hey, how's it going?"}], + stream=stream, + ) + + chunks = [] + async for chunk in resp: + chunks.append(chunk) + await asyncio.sleep(2) + + mock_client.assert_called_once() + + standard_logging_object: StandardLoggingPayload = ( + mock_client.call_args.kwargs["kwargs"]["standard_logging_object"] + ) + + built_response = stream_chunk_builder(chunks=chunks) + assert ( + built_response.usage.total_tokens + != standard_logging_object["total_tokens"] + ) + print(f"standard_logging_object usage: {built_response.usage}") + except litellm.InternalServerError: + pass diff --git a/tests/local_testing/test_exceptions.py b/tests/local_testing/test_exceptions.py index e1ae1a84f..d5f67cecf 100644 --- a/tests/local_testing/test_exceptions.py +++ b/tests/local_testing/test_exceptions.py @@ -163,7 +163,7 @@ def invalid_auth(model): # set the model key to an invalid key, depending on th elif model == "azure/chatgpt-v-2": temporary_key = os.environ["AZURE_API_KEY"] os.environ["AZURE_API_KEY"] = "bad-key" - elif model == "claude-instant-1": + elif model == "claude-3-5-haiku-20241022": temporary_key = os.environ["ANTHROPIC_API_KEY"] os.environ["ANTHROPIC_API_KEY"] = "bad-key" elif model == "command-nightly": @@ -213,7 +213,7 @@ def invalid_auth(model): # set the model key to an invalid key, depending on th elif model == "chatgpt-test": os.environ["AZURE_API_KEY"] = temporary_key azure = True - elif model == "claude-instant-1": + elif model == "claude-3-5-haiku-20241022": os.environ["ANTHROPIC_API_KEY"] = temporary_key elif model == "command-nightly": os.environ["COHERE_API_KEY"] = temporary_key diff --git a/tests/local_testing/test_langsmith.py b/tests/local_testing/test_langsmith.py index 347044592..6a98f244d 100644 --- a/tests/local_testing/test_langsmith.py +++ b/tests/local_testing/test_langsmith.py @@ -77,71 +77,6 @@ async def test_langsmith_queue_logging(): pytest.fail(f"Error occurred: {e}") -@pytest.mark.skip(reason="Flaky test. covered by unit tests on custom logger.") -@pytest.mark.asyncio() -async def test_async_langsmith_logging(): - try: - test_langsmith_logger = LangsmithLogger() - run_id = str(uuid.uuid4()) - litellm.set_verbose = True - litellm.callbacks = ["langsmith"] - response = await litellm.acompletion( - model="claude-instant-1.2", - messages=[{"role": "user", "content": "what llm are u"}], - max_tokens=10, - temperature=0.2, - metadata={ - "id": run_id, - "tags": ["tag1", "tag2"], - "user_api_key": "6eb81e014497d89f3cc1aa9da7c2b37bda6b7fea68e4b710d33d94201e68970c", - "user_api_key_alias": "ishaans-langmsith-key", - "user_api_end_user_max_budget": None, - "litellm_api_version": "1.40.19", - "global_max_parallel_requests": None, - "user_api_key_user_id": "admin", - "user_api_key_org_id": None, - "user_api_key_team_id": "dbe2f686-a686-4896-864a-4c3924458709", - "user_api_key_team_alias": "testing-team", - }, - ) - print(response) - await asyncio.sleep(3) - - print("run_id", run_id) - logged_run_on_langsmith = test_langsmith_logger.get_run_by_id(run_id=run_id) - - print("logged_run_on_langsmith", logged_run_on_langsmith) - - print("fields in logged_run_on_langsmith", logged_run_on_langsmith.keys()) - - input_fields_on_langsmith = logged_run_on_langsmith.get("inputs") - extra_fields_on_langsmith = logged_run_on_langsmith.get("extra").get( - "invocation_params" - ) - - print("\nLogged INPUT ON LANGSMITH", input_fields_on_langsmith) - - print("\nextra fields on langsmith", extra_fields_on_langsmith) - - assert isinstance(input_fields_on_langsmith, dict) - assert "api_key" not in input_fields_on_langsmith - assert "api_key" not in extra_fields_on_langsmith - - # assert user_api_key in extra_fields_on_langsmith - assert "user_api_key" in extra_fields_on_langsmith - assert "user_api_key_user_id" in extra_fields_on_langsmith - assert "user_api_key_team_alias" in extra_fields_on_langsmith - - for cb in litellm.callbacks: - if isinstance(cb, LangsmithLogger): - await cb.async_httpx_client.client.aclose() - # test_langsmith_logger.async_httpx_client.close() - - except Exception as e: - print(e) - pytest.fail(f"Error occurred: {e}") - - # test_langsmith_logging() diff --git a/tests/local_testing/test_logging.py b/tests/local_testing/test_logging.py index 1a35d8454..0140cbd56 100644 --- a/tests/local_testing/test_logging.py +++ b/tests/local_testing/test_logging.py @@ -72,7 +72,7 @@ # # old_stdout = sys.stdout # # sys.stdout = new_stdout = io.StringIO() -# # response = completion(model="claude-instant-1", messages=messages) +# # response = completion(model="claude-3-5-haiku-20241022", messages=messages) # # # Restore stdout # # sys.stdout = old_stdout @@ -154,7 +154,7 @@ # old_stdout = sys.stdout # sys.stdout = new_stdout = io.StringIO() -# response = completion(model="claude-instant-1", messages=messages, stream=True) +# response = completion(model="claude-3-5-haiku-20241022", messages=messages, stream=True) # for idx, chunk in enumerate(response): # pass @@ -255,7 +255,7 @@ # # sys.stdout = new_stdout = io.StringIO() # # try: -# # response = completion(model="claude-instant-1", messages=messages) +# # response = completion(model="claude-3-5-haiku-20241022", messages=messages) # # except AuthenticationError: # # pass @@ -327,7 +327,7 @@ # # sys.stdout = new_stdout = io.StringIO() # # try: -# # response = completion(model="claude-instant-1", messages=messages) +# # response = completion(model="claude-3-5-haiku-20241022", messages=messages) # # except AuthenticationError: # # pass diff --git a/tests/local_testing/test_model_response_typing/test.py b/tests/local_testing/test_model_response_typing/test.py index 95d404809..46bf5fbb4 100644 --- a/tests/local_testing/test_model_response_typing/test.py +++ b/tests/local_testing/test_model_response_typing/test.py @@ -3,7 +3,7 @@ # BASE_URL = 'http://localhost:8080' # def test_hello_route(): -# data = {"model": "claude-instant-1", "messages": [{"role": "user", "content": "hey, how's it going?"}]} +# data = {"model": "claude-3-5-haiku-20241022", "messages": [{"role": "user", "content": "hey, how's it going?"}]} # headers = {'Content-Type': 'application/json'} # response = requests.get(BASE_URL, headers=headers, data=json.dumps(data)) # print(response.text) diff --git a/tests/local_testing/test_prometheus.py b/tests/local_testing/test_prometheus.py index 164d94553..2abdeea98 100644 --- a/tests/local_testing/test_prometheus.py +++ b/tests/local_testing/test_prometheus.py @@ -31,63 +31,6 @@ litellm.set_verbose = True import time -@pytest.mark.skip(reason="duplicate test of logging with callbacks") -@pytest.mark.asyncio() -async def test_async_prometheus_success_logging(): - from litellm.integrations.prometheus import PrometheusLogger - - pl = PrometheusLogger() - run_id = str(uuid.uuid4()) - - litellm.set_verbose = True - litellm.callbacks = [pl] - - response = await litellm.acompletion( - model="claude-instant-1.2", - messages=[{"role": "user", "content": "what llm are u"}], - max_tokens=10, - mock_response="hi", - temperature=0.2, - metadata={ - "id": run_id, - "tags": ["tag1", "tag2"], - "user_api_key": "6eb81e014497d89f3cc1aa9da7c2b37bda6b7fea68e4b710d33d94201e68970c", - "user_api_key_alias": "ishaans-prometheus-key", - "user_api_end_user_max_budget": None, - "litellm_api_version": "1.40.19", - "global_max_parallel_requests": None, - "user_api_key_user_id": "admin", - "user_api_key_org_id": None, - "user_api_key_team_id": "dbe2f686-a686-4896-864a-4c3924458709", - "user_api_key_team_alias": "testing-team", - }, - ) - print(response) - await asyncio.sleep(3) - - # get prometheus logger - test_prometheus_logger = pl - print("done with success request") - - print( - "vars of test_prometheus_logger", - vars(test_prometheus_logger.litellm_requests_metric), - ) - - # Get the metrics - metrics = {} - for metric in REGISTRY.collect(): - for sample in metric.samples: - metrics[sample.name] = sample.value - - print("metrics from prometheus", metrics) - assert metrics["litellm_requests_metric_total"] == 1.0 - assert metrics["litellm_total_tokens_total"] == 30.0 - assert metrics["litellm_deployment_success_responses_total"] == 1.0 - assert metrics["litellm_deployment_total_requests_total"] == 1.0 - assert metrics["litellm_deployment_latency_per_output_token_bucket"] == 1.0 - - @pytest.mark.asyncio() async def test_async_prometheus_success_logging_with_callbacks(): @@ -107,7 +50,7 @@ async def test_async_prometheus_success_logging_with_callbacks(): initial_metrics[sample.name] = sample.value response = await litellm.acompletion( - model="claude-instant-1.2", + model="claude-3-haiku-20240307", messages=[{"role": "user", "content": "what llm are u"}], max_tokens=10, mock_response="hi", diff --git a/tests/local_testing/test_promptlayer_integration.py b/tests/local_testing/test_promptlayer_integration.py index f55765757..d2e2268e6 100644 --- a/tests/local_testing/test_promptlayer_integration.py +++ b/tests/local_testing/test_promptlayer_integration.py @@ -18,7 +18,7 @@ import time # sys.stdout = new_stdout = io.StringIO() -# response = completion(model="claude-instant-1.2", +# response = completion(model="claude-3-5-haiku-20241022", # messages=[{ # "role": "user", # "content": "Hi 👋 - i'm claude" diff --git a/tests/local_testing/test_provider_specific_config.py b/tests/local_testing/test_provider_specific_config.py index 3ff709854..1f1ccaef8 100644 --- a/tests/local_testing/test_provider_specific_config.py +++ b/tests/local_testing/test_provider_specific_config.py @@ -56,7 +56,7 @@ def claude_test_completion(): try: # OVERRIDE WITH DYNAMIC MAX TOKENS response_1 = litellm.completion( - model="claude-instant-1.2", + model="claude-3-haiku-20240307", messages=[{"content": "Hello, how are you?", "role": "user"}], max_tokens=10, ) @@ -66,7 +66,7 @@ def claude_test_completion(): # USE CONFIG TOKENS response_2 = litellm.completion( - model="claude-instant-1.2", + model="claude-3-haiku-20240307", messages=[{"content": "Hello, how are you?", "role": "user"}], ) # Add any assertions here to check the response @@ -77,7 +77,7 @@ def claude_test_completion(): try: response_3 = litellm.completion( - model="claude-instant-1.2", + model="claude-3-5-haiku-20241022", messages=[{"content": "Hello, how are you?", "role": "user"}], n=2, ) diff --git a/tests/local_testing/test_proxy_utils.py b/tests/local_testing/test_proxy_utils.py index 74ef75392..f3f33bad6 100644 --- a/tests/local_testing/test_proxy_utils.py +++ b/tests/local_testing/test_proxy_utils.py @@ -10,7 +10,7 @@ sys.path.insert( 0, os.path.abspath("../..") ) # Adds the parent directory to the system path import litellm - +from unittest.mock import MagicMock, patch, AsyncMock from litellm.proxy._types import LitellmUserRoles, UserAPIKeyAuth from litellm.proxy.auth.auth_utils import is_request_body_safe @@ -465,3 +465,48 @@ def test_update_internal_user_params(): updated_data_json["budget_duration"] == litellm.default_internal_user_params["budget_duration"] ) + + +@pytest.mark.asyncio +async def test_proxy_config_update_from_db(): + from litellm.proxy.proxy_server import ProxyConfig + from pydantic import BaseModel + + proxy_config = ProxyConfig() + + pc = AsyncMock() + + test_config = { + "litellm_settings": { + "callbacks": ["prometheus", "otel"], + } + } + + class ReturnValue(BaseModel): + param_name: str + param_value: dict + + with patch.object( + pc, + "get_generic_data", + new=AsyncMock( + return_value=ReturnValue( + param_name="litellm_settings", + param_value={ + "success_callback": "langfuse", + }, + ) + ), + ): + new_config = await proxy_config._update_config_from_db( + prisma_client=pc, + config=test_config, + store_model_in_db=True, + ) + + assert new_config == { + "litellm_settings": { + "callbacks": ["prometheus", "otel"], + "success_callback": "langfuse", + } + } diff --git a/tests/local_testing/test_router.py b/tests/local_testing/test_router.py index 5ffdbc7ac..8884f4c3a 100644 --- a/tests/local_testing/test_router.py +++ b/tests/local_testing/test_router.py @@ -1807,7 +1807,7 @@ def test_router_anthropic_key_dynamic(): { "model_name": "anthropic-claude", "litellm_params": { - "model": "claude-instant-1.2", + "model": "claude-3-5-haiku-20241022", "api_key": anthropic_api_key, }, } diff --git a/tests/local_testing/test_router_fallbacks.py b/tests/local_testing/test_router_fallbacks.py index 96983003a..3e91cd79a 100644 --- a/tests/local_testing/test_router_fallbacks.py +++ b/tests/local_testing/test_router_fallbacks.py @@ -824,8 +824,8 @@ def test_ausage_based_routing_fallbacks(): "rpm": OPENAI_RPM, }, { - "model_name": "anthropic-claude-instant-1.2", - "litellm_params": get_anthropic_params("claude-instant-1.2"), + "model_name": "anthropic-claude-3-5-haiku-20241022", + "litellm_params": get_anthropic_params("claude-3-5-haiku-20241022"), "model_info": {"id": 4}, "rpm": ANTHROPIC_RPM, }, @@ -834,7 +834,7 @@ def test_ausage_based_routing_fallbacks(): fallbacks_list = [ {"azure/gpt-4-fast": ["azure/gpt-4-basic"]}, {"azure/gpt-4-basic": ["openai-gpt-4"]}, - {"openai-gpt-4": ["anthropic-claude-instant-1.2"]}, + {"openai-gpt-4": ["anthropic-claude-3-5-haiku-20241022"]}, ] router = Router( @@ -864,7 +864,7 @@ def test_ausage_based_routing_fallbacks(): assert response._hidden_params["model_id"] == "1" for i in range(10): - # now make 100 mock requests to OpenAI - expect it to fallback to anthropic-claude-instant-1.2 + # now make 100 mock requests to OpenAI - expect it to fallback to anthropic-claude-3-5-haiku-20241022 response = router.completion( model="azure/gpt-4-fast", messages=messages, diff --git a/tests/local_testing/test_router_pattern_matching.py b/tests/local_testing/test_router_pattern_matching.py index 701a62e41..9d8c4db0d 100644 --- a/tests/local_testing/test_router_pattern_matching.py +++ b/tests/local_testing/test_router_pattern_matching.py @@ -17,6 +17,7 @@ from litellm.router import Deployment, LiteLLM_Params, ModelInfo from concurrent.futures import ThreadPoolExecutor from collections import defaultdict from dotenv import load_dotenv +from unittest.mock import patch, MagicMock, AsyncMock load_dotenv() @@ -155,3 +156,35 @@ def test_route_with_exception(): result = router.route("openai/gpt-3.5-turbo") assert result is None + + +def test_router_pattern_match_e2e(): + """ + Tests the end to end flow of the router + """ + from litellm.llms.custom_httpx.http_handler import HTTPHandler + + client = HTTPHandler() + router = Router( + model_list=[ + { + "model_name": "llmengine/*", + "litellm_params": {"model": "anthropic/*", "api_key": "test"}, + } + ] + ) + + with patch.object(client, "post", new=MagicMock()) as mock_post: + + router.completion( + model="llmengine/my-custom-model", + messages=[{"role": "user", "content": "Hello, how are you?"}], + client=client, + api_key="test", + ) + mock_post.assert_called_once() + print(mock_post.call_args.kwargs["data"]) + mock_post.call_args.kwargs["data"] == { + "model": "gpt-4o", + "messages": [{"role": "user", "content": "Hello, how are you?"}], + } diff --git a/tests/local_testing/test_router_timeout.py b/tests/local_testing/test_router_timeout.py index c13bc2deb..21e74e099 100644 --- a/tests/local_testing/test_router_timeout.py +++ b/tests/local_testing/test_router_timeout.py @@ -38,9 +38,9 @@ def test_router_timeouts(): "tpm": 80000, }, { - "model_name": "anthropic-claude-instant-1.2", + "model_name": "anthropic-claude-3-5-haiku-20241022", "litellm_params": { - "model": "claude-instant-1.2", + "model": "claude-3-5-haiku-20241022", "api_key": "os.environ/ANTHROPIC_API_KEY", "mock_response": "hello world", }, @@ -49,7 +49,7 @@ def test_router_timeouts(): ] fallbacks_list = [ - {"openai-gpt-4": ["anthropic-claude-instant-1.2"]}, + {"openai-gpt-4": ["anthropic-claude-3-5-haiku-20241022"]}, ] # Configure router diff --git a/tests/local_testing/test_streaming.py b/tests/local_testing/test_streaming.py index 3e2145c81..827a2495b 100644 --- a/tests/local_testing/test_streaming.py +++ b/tests/local_testing/test_streaming.py @@ -681,7 +681,7 @@ def test_completion_ollama_hosted_stream(): @pytest.mark.parametrize( "model", [ - # "claude-instant-1.2", + # "claude-3-5-haiku-20241022", # "claude-2", # "mistral/mistral-medium", "openrouter/openai/gpt-4o-mini", @@ -1112,7 +1112,7 @@ def test_completion_claude_stream_bad_key(): }, ] response = completion( - model="claude-instant-1", + model="claude-3-5-haiku-20241022", messages=messages, stream=True, max_tokens=50, diff --git a/tests/local_testing/test_token_counter.py b/tests/local_testing/test_token_counter.py index 3ad73f2d8..7234ef38e 100644 --- a/tests/local_testing/test_token_counter.py +++ b/tests/local_testing/test_token_counter.py @@ -1,6 +1,6 @@ #### What this tests #### # This tests litellm.token_counter() function - +import traceback import os import sys import time @@ -116,7 +116,9 @@ def test_tokenizers(): openai_tokens = token_counter(model="gpt-3.5-turbo", text=sample_text) # claude tokenizer - claude_tokens = token_counter(model="claude-instant-1", text=sample_text) + claude_tokens = token_counter( + model="claude-3-5-haiku-20241022", text=sample_text + ) # cohere tokenizer cohere_tokens = token_counter(model="command-nightly", text=sample_text) @@ -167,8 +169,9 @@ def test_encoding_and_decoding(): assert openai_text == sample_text # claude encoding + decoding - claude_tokens = encode(model="claude-instant-1", text=sample_text) - claude_text = decode(model="claude-instant-1", tokens=claude_tokens.ids) + claude_tokens = encode(model="claude-3-5-haiku-20241022", text=sample_text) + + claude_text = decode(model="claude-3-5-haiku-20241022", tokens=claude_tokens) assert claude_text == sample_text @@ -186,7 +189,7 @@ def test_encoding_and_decoding(): assert llama2_text == sample_text except Exception as e: - pytest.fail(f"An exception occured: {e}") + pytest.fail(f"An exception occured: {e}\n{traceback.format_exc()}") # test_encoding_and_decoding() diff --git a/tests/local_testing/test_traceloop.py b/tests/local_testing/test_traceloop.py index 74d58228e..5cab8dd59 100644 --- a/tests/local_testing/test_traceloop.py +++ b/tests/local_testing/test_traceloop.py @@ -26,7 +26,7 @@ def exporter(): return exporter -@pytest.mark.parametrize("model", ["claude-instant-1.2", "gpt-3.5-turbo"]) +@pytest.mark.parametrize("model", ["claude-3-5-haiku-20241022", "gpt-3.5-turbo"]) def test_traceloop_logging(exporter, model): litellm.completion( model=model, diff --git a/tests/local_testing/test_wandb.py b/tests/local_testing/test_wandb.py index d31310fa6..6cdca4049 100644 --- a/tests/local_testing/test_wandb.py +++ b/tests/local_testing/test_wandb.py @@ -57,7 +57,7 @@ test_wandb_logging_async() def test_wandb_logging(): try: response = completion( - model="claude-instant-1.2", + model="claude-3-5-haiku-20241022", messages=[{"role": "user", "content": "Hi 👋 - i'm claude"}], max_tokens=10, temperature=0.2, diff --git a/tests/logging_callback_tests/test_langfuse_unit_tests.py b/tests/logging_callback_tests/test_langfuse_unit_tests.py index 2a6cbe00a..20b33f81b 100644 --- a/tests/logging_callback_tests/test_langfuse_unit_tests.py +++ b/tests/logging_callback_tests/test_langfuse_unit_tests.py @@ -1,19 +1,13 @@ -import json import os import sys +import threading from datetime import datetime -from pydantic.main import Model - sys.path.insert( 0, os.path.abspath("../..") ) # Adds the parent directory to the system-path import pytest -import litellm -import asyncio -import logging -from litellm._logging import verbose_logger from litellm.integrations.langfuse.langfuse import ( LangFuseLogger, ) @@ -217,3 +211,27 @@ def test_get_langfuse_logger_for_request_with_cached_logger(): assert result == cached_logger mock_cache.get_cache.assert_called_once() + +@pytest.mark.parametrize("metadata", [ + {'a': 1, 'b': 2, 'c': 3}, + {'a': {'nested_a': 1}, 'b': {'nested_b': 2}}, + {'a': [1, 2, 3], 'b': {4, 5, 6}}, + {'a': (1, 2), 'b': frozenset([3, 4]), 'c': {'d': [5, 6]}}, + {'lock': threading.Lock()}, + {'func': lambda x: x + 1}, + { + 'int': 42, + 'str': 'hello', + 'list': [1, 2, 3], + 'set': {4, 5}, + 'dict': {'nested': 'value'}, + 'non_copyable': threading.Lock(), + 'function': print + }, + ['list', 'not', 'a', 'dict'], + {'timestamp': datetime.now()}, + {}, + None, +]) +def test_langfuse_logger_prepare_metadata(metadata): + global_langfuse_logger._prepare_metadata(metadata) diff --git a/tests/router_unit_tests/test_router_helper_utils.py b/tests/router_unit_tests/test_router_helper_utils.py index 0231e199f..ddd7a502c 100644 --- a/tests/router_unit_tests/test_router_helper_utils.py +++ b/tests/router_unit_tests/test_router_helper_utils.py @@ -986,3 +986,16 @@ def test_pattern_match_deployment_set_model_name( print(updated_model) # Expected output: "openai/fo::hi:static::hello" assert updated_model == expected_model + + updated_models = pattern_router._return_pattern_matched_deployments( + match, + deployments=[ + { + "model_name": model_name, + "litellm_params": {"model": litellm_model}, + } + ], + ) + + for model in updated_models: + assert model["litellm_params"]["model"] == expected_model diff --git a/tests/test_keys.py b/tests/test_keys.py index ab1e97ac2..554a084c9 100644 --- a/tests/test_keys.py +++ b/tests/test_keys.py @@ -523,8 +523,8 @@ async def test_key_info_spend_values(): @pytest.mark.asyncio -@pytest.mark.flaky(retries=3, delay=1) -async def test_key_info_spend_values_streaming(): +@pytest.mark.flaky(retries=6, delay=2) +async def test_aaaaakey_info_spend_values_streaming(): """ Test to ensure spend is correctly calculated. - create key @@ -545,7 +545,7 @@ async def test_key_info_spend_values_streaming(): completion_tokens=completion_tokens, ) response_cost = prompt_cost + completion_cost - await asyncio.sleep(5) # allow db log to be updated + await asyncio.sleep(8) # allow db log to be updated print(f"new_key: {new_key}") key_info = await get_key_info( session=session, get_key=new_key, call_key=new_key From 0c204d33bc4ea454c86f6c083e383cda661625f6 Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Thu, 7 Nov 2024 04:37:32 +0530 Subject: [PATCH 24/67] LiteLLM Minor Fixes & Improvements (11/06/2024) (#6624) * refactor(proxy_server.py): add debug logging around license check event (refactor position in startup_event logic) * fix(proxy/_types.py): allow admin_allowed_routes to be any str * fix(router.py): raise 400-status code error for no 'model_name' error on router Fixes issue with status code when unknown model name passed with pattern matching enabled * fix(converse_handler.py): add claude 3-5 haiku to bedrock converse models * test: update testing to replace claude-instant-1.2 * fix(router.py): fix router.moderation calls * test: update test to remove claude-instant-1 * fix(router.py): support model_list values in router.moderation * test: fix test * test: fix test --- litellm/llms/bedrock/chat/converse_handler.py | 1 + litellm/main.py | 4 +- litellm/proxy/_new_secret_config.yaml | 26 ++++ litellm/proxy/_types.py | 10 +- litellm/proxy/auth/litellm_license.py | 38 +++++- litellm/proxy/proxy_cli.py | 3 + litellm/proxy/proxy_server.py | 24 ++-- litellm/proxy/route_llm_request.py | 1 + litellm/router.py | 128 ++++++------------ litellm/utils.py | 1 + tests/local_testing/test_jwt.py | 13 +- tests/local_testing/test_router.py | 9 +- tests/local_testing/test_router_fallbacks.py | 4 +- .../test_router_pattern_matching.py | 41 ++++++ .../test_router_helper_utils.py | 7 + 15 files changed, 180 insertions(+), 130 deletions(-) diff --git a/litellm/llms/bedrock/chat/converse_handler.py b/litellm/llms/bedrock/chat/converse_handler.py index b775cc64c..e47ba4f42 100644 --- a/litellm/llms/bedrock/chat/converse_handler.py +++ b/litellm/llms/bedrock/chat/converse_handler.py @@ -19,6 +19,7 @@ from ..common_utils import BedrockError from .invoke_handler import AWSEventStreamDecoder, MockResponseIterator, make_call BEDROCK_CONVERSE_MODELS = [ + "anthropic.claude-3-5-haiku-20241022-v1:0", "anthropic.claude-3-5-sonnet-20241022-v2:0", "anthropic.claude-3-5-sonnet-20240620-v1:0", "anthropic.claude-3-opus-20240229-v1:0", diff --git a/litellm/main.py b/litellm/main.py index f89a6f2e3..8334f35d7 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -4319,9 +4319,9 @@ async def amoderation( else: _openai_client = openai_client if model is not None: - response = await openai_client.moderations.create(input=input, model=model) + response = await _openai_client.moderations.create(input=input, model=model) else: - response = await openai_client.moderations.create(input=input) + response = await _openai_client.moderations.create(input=input) return response diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index 5cf293864..d0bd5f674 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -23,6 +23,31 @@ model_list: model: openai/my-fake-model api_key: my-fake-key api_base: https://exampleopenaiendpoint-production.up.railway.app/ + ## bedrock chat completions + - model_name: "*anthropic.claude*" + litellm_params: + model: bedrock/*anthropic.claude* + aws_access_key_id: os.environ/BEDROCK_AWS_ACCESS_KEY_ID + aws_secret_access_key: os.environ/BEDROCK_AWS_SECRET_ACCESS_KEY + aws_region_name: os.environ/AWS_REGION_NAME + guardrailConfig: + "guardrailIdentifier": "h4dsqwhp6j66" + "guardrailVersion": "2" + "trace": "enabled" + +## bedrock embeddings + - model_name: "*amazon.titan-embed-*" + litellm_params: + model: bedrock/amazon.titan-embed-* + aws_access_key_id: os.environ/BEDROCK_AWS_ACCESS_KEY_ID + aws_secret_access_key: os.environ/BEDROCK_AWS_SECRET_ACCESS_KEY + aws_region_name: os.environ/AWS_REGION_NAME + - model_name: "*cohere.embed-*" + litellm_params: + model: bedrock/cohere.embed-* + aws_access_key_id: os.environ/BEDROCK_AWS_ACCESS_KEY_ID + aws_secret_access_key: os.environ/BEDROCK_AWS_SECRET_ACCESS_KEY + aws_region_name: os.environ/AWS_REGION_NAME - model_name: gpt-4 litellm_params: @@ -33,6 +58,7 @@ model_list: rpm: 480 timeout: 300 stream_timeout: 60 + # litellm_settings: # fallbacks: [{ "claude-3-5-sonnet-20240620": ["claude-3-5-sonnet-aihubmix"] }] # callbacks: ["otel", "prometheus"] diff --git a/litellm/proxy/_types.py b/litellm/proxy/_types.py index 9aebd9071..fd9ef8556 100644 --- a/litellm/proxy/_types.py +++ b/litellm/proxy/_types.py @@ -436,15 +436,7 @@ class LiteLLM_JWTAuth(LiteLLMBase): """ admin_jwt_scope: str = "litellm_proxy_admin" - admin_allowed_routes: List[ - Literal[ - "openai_routes", - "info_routes", - "management_routes", - "spend_tracking_routes", - "global_spend_tracking_routes", - ] - ] = [ + admin_allowed_routes: List[str] = [ "management_routes", "spend_tracking_routes", "global_spend_tracking_routes", diff --git a/litellm/proxy/auth/litellm_license.py b/litellm/proxy/auth/litellm_license.py index 784b4274e..a736a1f5e 100644 --- a/litellm/proxy/auth/litellm_license.py +++ b/litellm/proxy/auth/litellm_license.py @@ -5,6 +5,9 @@ import json import os import traceback from datetime import datetime +from typing import Optional + +import httpx from litellm._logging import verbose_proxy_logger from litellm.llms.custom_httpx.http_handler import HTTPHandler @@ -44,23 +47,46 @@ class LicenseCheck: verbose_proxy_logger.error(f"Error reading public key: {str(e)}") def _verify(self, license_str: str) -> bool: + + verbose_proxy_logger.debug( + "litellm.proxy.auth.litellm_license.py::_verify - Checking license against {}/verify_license - {}".format( + self.base_url, license_str + ) + ) url = "{}/verify_license/{}".format(self.base_url, license_str) + response: Optional[httpx.Response] = None try: # don't impact user, if call fails - response = self.http_handler.get(url=url) + num_retries = 3 + for i in range(num_retries): + try: + response = self.http_handler.get(url=url) + if response is None: + raise Exception("No response from license server") + response.raise_for_status() + except httpx.HTTPStatusError: + if i == num_retries - 1: + raise - response.raise_for_status() + if response is None: + raise Exception("No response from license server") response_json = response.json() premium = response_json["verify"] assert isinstance(premium, bool) + + verbose_proxy_logger.debug( + "litellm.proxy.auth.litellm_license.py::_verify - License={} is premium={}".format( + license_str, premium + ) + ) return premium except Exception as e: - verbose_proxy_logger.error( - "litellm.proxy.auth.litellm_license.py::_verify - Unable to verify License via api. - {}".format( - str(e) + verbose_proxy_logger.exception( + "litellm.proxy.auth.litellm_license.py::_verify - Unable to verify License={} via api. - {}".format( + license_str, str(e) ) ) return False @@ -72,7 +98,7 @@ class LicenseCheck: """ try: verbose_proxy_logger.debug( - "litellm.proxy.auth.litellm_license.py::is_premium() - ENTERING 'IS_PREMIUM' - {}".format( + "litellm.proxy.auth.litellm_license.py::is_premium() - ENTERING 'IS_PREMIUM' - LiteLLM License={}".format( self.license_str ) ) diff --git a/litellm/proxy/proxy_cli.py b/litellm/proxy/proxy_cli.py index 1fb628a80..f9f8276c7 100644 --- a/litellm/proxy/proxy_cli.py +++ b/litellm/proxy/proxy_cli.py @@ -694,6 +694,9 @@ def run_server( # noqa: PLR0915 import litellm + if detailed_debug is True: + litellm._turn_on_debug() + # DO NOT DELETE - enables global variables to work across files from litellm.proxy.proxy_server import app # noqa diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 94a5bb5e9..8edf2cee3 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -3074,6 +3074,15 @@ async def startup_event(): user_api_key_cache=user_api_key_cache, ) + ## CHECK PREMIUM USER + verbose_proxy_logger.debug( + "litellm.proxy.proxy_server.py::startup() - CHECKING PREMIUM USER - {}".format( + premium_user + ) + ) + if premium_user is False: + premium_user = _license_check.is_premium() + ### LOAD CONFIG ### worker_config: Optional[Union[str, dict]] = get_secret("WORKER_CONFIG") # type: ignore env_config_yaml: Optional[str] = get_secret_str("CONFIG_FILE_PATH") @@ -3121,21 +3130,6 @@ async def startup_event(): if isinstance(worker_config, dict): await initialize(**worker_config) - ## CHECK PREMIUM USER - verbose_proxy_logger.debug( - "litellm.proxy.proxy_server.py::startup() - CHECKING PREMIUM USER - {}".format( - premium_user - ) - ) - if premium_user is False: - premium_user = _license_check.is_premium() - - verbose_proxy_logger.debug( - "litellm.proxy.proxy_server.py::startup() - PREMIUM USER value - {}".format( - premium_user - ) - ) - ProxyStartupEvent._initialize_startup_logging( llm_router=llm_router, proxy_logging_obj=proxy_logging_obj, diff --git a/litellm/proxy/route_llm_request.py b/litellm/proxy/route_llm_request.py index fcf95f6ab..3c5c8b3b4 100644 --- a/litellm/proxy/route_llm_request.py +++ b/litellm/proxy/route_llm_request.py @@ -65,6 +65,7 @@ async def route_request( Common helper to route the request """ + router_model_names = llm_router.model_names if llm_router is not None else [] if "api_key" in data or "api_base" in data: return getattr(litellm, f"{route_type}")(**data) diff --git a/litellm/router.py b/litellm/router.py index 726119cb7..759f94f74 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -556,6 +556,10 @@ class Router: self.initialize_assistants_endpoint() + self.amoderation = self.factory_function( + litellm.amoderation, call_type="moderation" + ) + def initialize_assistants_endpoint(self): ## INITIALIZE PASS THROUGH ASSISTANTS ENDPOINT ## self.acreate_assistants = self.factory_function(litellm.acreate_assistants) @@ -1683,78 +1687,6 @@ class Router: ) raise e - async def amoderation(self, model: str, input: str, **kwargs): - try: - kwargs["model"] = model - kwargs["input"] = input - kwargs["original_function"] = self._amoderation - kwargs["num_retries"] = kwargs.get("num_retries", self.num_retries) - kwargs.get("request_timeout", self.timeout) - kwargs.setdefault("metadata", {}).update({"model_group": model}) - - response = await self.async_function_with_fallbacks(**kwargs) - - return response - except Exception as e: - asyncio.create_task( - send_llm_exception_alert( - litellm_router_instance=self, - request_kwargs=kwargs, - error_traceback_str=traceback.format_exc(), - original_exception=e, - ) - ) - raise e - - async def _amoderation(self, model: str, input: str, **kwargs): - model_name = None - try: - verbose_router_logger.debug( - f"Inside _moderation()- model: {model}; kwargs: {kwargs}" - ) - deployment = await self.async_get_available_deployment( - model=model, - input=input, - specific_deployment=kwargs.pop("specific_deployment", None), - ) - self._update_kwargs_with_deployment(deployment=deployment, kwargs=kwargs) - data = deployment["litellm_params"].copy() - model_name = data["model"] - model_client = self._get_async_openai_model_client( - deployment=deployment, - kwargs=kwargs, - ) - self.total_calls[model_name] += 1 - - timeout: Optional[Union[float, int]] = self._get_timeout( - kwargs=kwargs, - data=data, - ) - - response = await litellm.amoderation( - **{ - **data, - "input": input, - "caching": self.cache_responses, - "client": model_client, - "timeout": timeout, - **kwargs, - } - ) - - self.success_calls[model_name] += 1 - verbose_router_logger.info( - f"litellm.amoderation(model={model_name})\033[32m 200 OK\033[0m" - ) - return response - except Exception as e: - verbose_router_logger.info( - f"litellm.amoderation(model={model_name})\033[31m Exception {str(e)}\033[0m" - ) - if model_name is not None: - self.fail_calls[model_name] += 1 - raise e - async def arerank(self, model: str, **kwargs): try: kwargs["model"] = model @@ -2610,20 +2542,46 @@ class Router: return final_results - #### ASSISTANTS API #### + #### PASSTHROUGH API #### - def factory_function(self, original_function: Callable): + async def _pass_through_moderation_endpoint_factory( + self, + original_function: Callable, + **kwargs, + ): + if ( + "model" in kwargs + and self.get_model_list(model_name=kwargs["model"]) is not None + ): + deployment = await self.async_get_available_deployment( + model=kwargs["model"] + ) + kwargs["model"] = deployment["litellm_params"]["model"] + return await original_function(**kwargs) + + def factory_function( + self, + original_function: Callable, + call_type: Literal["assistants", "moderation"] = "assistants", + ): async def new_function( custom_llm_provider: Optional[Literal["openai", "azure"]] = None, client: Optional["AsyncOpenAI"] = None, **kwargs, ): - return await self._pass_through_assistants_endpoint_factory( - original_function=original_function, - custom_llm_provider=custom_llm_provider, - client=client, - **kwargs, - ) + if call_type == "assistants": + return await self._pass_through_assistants_endpoint_factory( + original_function=original_function, + custom_llm_provider=custom_llm_provider, + client=client, + **kwargs, + ) + elif call_type == "moderation": + + return await self._pass_through_moderation_endpoint_factory( # type: ignore + original_function=original_function, + **kwargs, + ) return new_function @@ -5052,10 +5010,12 @@ class Router: ) if len(healthy_deployments) == 0: - raise ValueError( - "{}. You passed in model={}. There is no 'model_name' with this string ".format( - RouterErrors.no_deployments_available.value, model - ) + raise litellm.BadRequestError( + message="You passed in model={}. There is no 'model_name' with this string ".format( + model + ), + model=model, + llm_provider="", ) if litellm.model_alias_map and model in litellm.model_alias_map: diff --git a/litellm/utils.py b/litellm/utils.py index 6dd0a5009..efda579d6 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1043,6 +1043,7 @@ def client(original_function): # noqa: PLR0915 if ( call_type != CallTypes.aimage_generation.value # model optional and call_type != CallTypes.atext_completion.value # can also be engine + and call_type != CallTypes.amoderation.value ): raise ValueError("model param not passed in.") diff --git a/tests/local_testing/test_jwt.py b/tests/local_testing/test_jwt.py index 15ce4192d..ad929ba4f 100644 --- a/tests/local_testing/test_jwt.py +++ b/tests/local_testing/test_jwt.py @@ -689,9 +689,10 @@ async def aaaatest_user_token_output( assert team_result.user_id == user_id +@pytest.mark.parametrize("admin_allowed_routes", [None, ["ui_routes"]]) @pytest.mark.parametrize("audience", [None, "litellm-proxy"]) @pytest.mark.asyncio -async def test_allowed_routes_admin(prisma_client, audience): +async def test_allowed_routes_admin(prisma_client, audience, admin_allowed_routes): """ Add a check to make sure jwt proxy admin scope can access all allowed admin routes @@ -754,12 +755,17 @@ async def test_allowed_routes_admin(prisma_client, audience): jwt_handler.user_api_key_cache = cache - jwt_handler.litellm_jwtauth = LiteLLM_JWTAuth(team_id_jwt_field="client_id") + if admin_allowed_routes: + jwt_handler.litellm_jwtauth = LiteLLM_JWTAuth( + team_id_jwt_field="client_id", admin_allowed_routes=admin_allowed_routes + ) + else: + jwt_handler.litellm_jwtauth = LiteLLM_JWTAuth(team_id_jwt_field="client_id") # VALID TOKEN ## GENERATE A TOKEN # Assuming the current time is in UTC - expiration_time = int((datetime.utcnow() + timedelta(minutes=10)).timestamp()) + expiration_time = int((datetime.now() + timedelta(minutes=10)).timestamp()) # Generate the JWT token # But before, you should convert bytes to string @@ -777,6 +783,7 @@ async def test_allowed_routes_admin(prisma_client, audience): # verify token + print(f"admin_token: {admin_token}") response = await jwt_handler.auth_jwt(token=admin_token) ## RUN IT THROUGH USER API KEY AUTH diff --git a/tests/local_testing/test_router.py b/tests/local_testing/test_router.py index 8884f4c3a..cd5e8f6b2 100644 --- a/tests/local_testing/test_router.py +++ b/tests/local_testing/test_router.py @@ -1866,16 +1866,9 @@ async def test_router_amoderation(): router = Router(model_list=model_list) ## Test 1: user facing function result = await router.amoderation( - model="openai-moderations", input="this is valid good text" + model="text-moderation-stable", input="this is valid good text" ) - ## Test 2: underlying function - result = await router._amoderation( - model="openai-moderations", input="this is valid good text" - ) - - print("moderation result", result) - def test_router_add_deployment(): initial_model_list = [ diff --git a/tests/local_testing/test_router_fallbacks.py b/tests/local_testing/test_router_fallbacks.py index 3e91cd79a..a5149b9fa 100644 --- a/tests/local_testing/test_router_fallbacks.py +++ b/tests/local_testing/test_router_fallbacks.py @@ -1226,9 +1226,7 @@ async def test_using_default_fallback(sync_mode): pytest.fail(f"Expected call to fail we passed model=openai/foo") except Exception as e: print("got exception = ", e) - from litellm.types.router import RouterErrors - - assert RouterErrors.no_deployments_available.value in str(e) + assert "BadRequestError" in str(e) @pytest.mark.parametrize("sync_mode", [False]) diff --git a/tests/local_testing/test_router_pattern_matching.py b/tests/local_testing/test_router_pattern_matching.py index 9d8c4db0d..2a6f66105 100644 --- a/tests/local_testing/test_router_pattern_matching.py +++ b/tests/local_testing/test_router_pattern_matching.py @@ -158,6 +158,46 @@ def test_route_with_exception(): assert result is None +@pytest.mark.asyncio +async def test_route_with_no_matching_pattern(): + """ + Tests that the router returns None when there is no matching pattern + """ + from litellm.types.router import RouterErrors + + router = Router( + model_list=[ + { + "model_name": "*meta.llama3*", + "litellm_params": {"model": "bedrock/meta.llama3*"}, + } + ] + ) + + ## WORKS + result = await router.acompletion( + model="bedrock/meta.llama3-70b", + messages=[{"role": "user", "content": "Hello, world!"}], + mock_response="Works", + ) + assert result.choices[0].message.content == "Works" + + ## FAILS + with pytest.raises(litellm.BadRequestError) as e: + await router.acompletion( + model="my-fake-model", + messages=[{"role": "user", "content": "Hello, world!"}], + mock_response="Works", + ) + + assert RouterErrors.no_deployments_available.value not in str(e.value) + + with pytest.raises(litellm.BadRequestError): + await router.aembedding( + model="my-fake-model", + input="Hello, world!", + ) + def test_router_pattern_match_e2e(): """ Tests the end to end flow of the router @@ -188,3 +228,4 @@ def test_router_pattern_match_e2e(): "model": "gpt-4o", "messages": [{"role": "user", "content": "Hello, how are you?"}], } + diff --git a/tests/router_unit_tests/test_router_helper_utils.py b/tests/router_unit_tests/test_router_helper_utils.py index ddd7a502c..cabb4a899 100644 --- a/tests/router_unit_tests/test_router_helper_utils.py +++ b/tests/router_unit_tests/test_router_helper_utils.py @@ -999,3 +999,10 @@ def test_pattern_match_deployment_set_model_name( for model in updated_models: assert model["litellm_params"]["model"] == expected_model + +@pytest.mark.asyncio +async def test_pass_through_moderation_endpoint_factory(model_list): + router = Router(model_list=model_list) + response = await router._pass_through_moderation_endpoint_factory( + original_function=litellm.amoderation, input="this is valid good text" + ) \ No newline at end of file From 0f8cceb274a02d02db506fd398c0f6cafb2d4f27 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Thu, 7 Nov 2024 04:45:06 +0530 Subject: [PATCH 25/67] =?UTF-8?q?bump:=20version=201.52.0=20=E2=86=92=201.?= =?UTF-8?q?52.1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 9c520ff34..8681486e8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.52.0" +version = "1.52.1" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT" @@ -91,7 +91,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.52.0" +version = "1.52.1" version_files = [ "pyproject.toml:^version" ] From 0ca50d56a8362abe4372693ed6e0f4cb84bef58f Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Wed, 6 Nov 2024 17:14:56 -0800 Subject: [PATCH 26/67] (feat) GCS Bucket logging. Allow using IAM auth for logging to GCS (#6628) * fix gcs bucket auth * allow iam auth for gcs logging * test_get_gcs_logging_config_without_service_account --- litellm/integrations/gcs_bucket/gcs_bucket.py | 37 +++++++++------ .../gcs_bucket/gcs_bucket_base.py | 2 +- litellm/proxy/proxy_config.yaml | 6 +-- tests/local_testing/test_gcs_bucket.py | 47 +++++++++++++++++++ 4 files changed, 73 insertions(+), 19 deletions(-) diff --git a/litellm/integrations/gcs_bucket/gcs_bucket.py b/litellm/integrations/gcs_bucket/gcs_bucket.py index 3d99c0257..111730d1f 100644 --- a/litellm/integrations/gcs_bucket/gcs_bucket.py +++ b/litellm/integrations/gcs_bucket/gcs_bucket.py @@ -26,10 +26,13 @@ else: VertexBase = Any +IAM_AUTH_KEY = "IAM_AUTH" + + class GCSLoggingConfig(TypedDict): bucket_name: str vertex_instance: VertexBase - path_service_account: str + path_service_account: Optional[str] class GCSBucketLogger(GCSBucketBase): @@ -173,7 +176,7 @@ class GCSBucketLogger(GCSBucketBase): ) bucket_name: str - path_service_account: str + path_service_account: Optional[str] if standard_callback_dynamic_params is not None: verbose_logger.debug("Using dynamic GCS logging") verbose_logger.debug( @@ -193,10 +196,6 @@ class GCSBucketLogger(GCSBucketBase): raise ValueError( "GCS_BUCKET_NAME is not set in the environment, but GCS Bucket is being used as a logging callback. Please set 'GCS_BUCKET_NAME' in the environment." ) - if _path_service_account is None: - raise ValueError( - "GCS_PATH_SERVICE_ACCOUNT is not set in the environment, but GCS Bucket is being used as a logging callback. Please set 'GCS_PATH_SERVICE_ACCOUNT' in the environment." - ) bucket_name = _bucket_name path_service_account = _path_service_account vertex_instance = await self.get_or_create_vertex_instance( @@ -208,10 +207,6 @@ class GCSBucketLogger(GCSBucketBase): raise ValueError( "GCS_BUCKET_NAME is not set in the environment, but GCS Bucket is being used as a logging callback. Please set 'GCS_BUCKET_NAME' in the environment." ) - if self.path_service_account_json is None: - raise ValueError( - "GCS_PATH_SERVICE_ACCOUNT is not set in the environment, but GCS Bucket is being used as a logging callback. Please set 'GCS_PATH_SERVICE_ACCOUNT' in the environment." - ) bucket_name = self.BUCKET_NAME path_service_account = self.path_service_account_json vertex_instance = await self.get_or_create_vertex_instance( @@ -224,7 +219,9 @@ class GCSBucketLogger(GCSBucketBase): path_service_account=path_service_account, ) - async def get_or_create_vertex_instance(self, credentials: str) -> VertexBase: + async def get_or_create_vertex_instance( + self, credentials: Optional[str] + ) -> VertexBase: """ This function is used to get the Vertex instance for the GCS Bucket Logger. It checks if the Vertex instance is already created and cached, if not it creates a new instance and caches it. @@ -233,15 +230,27 @@ class GCSBucketLogger(GCSBucketBase): VertexBase, ) - if credentials not in self.vertex_instances: + _in_memory_key = self._get_in_memory_key_for_vertex_instance(credentials) + if _in_memory_key not in self.vertex_instances: vertex_instance = VertexBase() await vertex_instance._ensure_access_token_async( credentials=credentials, project_id=None, custom_llm_provider="vertex_ai", ) - self.vertex_instances[credentials] = vertex_instance - return self.vertex_instances[credentials] + self.vertex_instances[_in_memory_key] = vertex_instance + return self.vertex_instances[_in_memory_key] + + def _get_in_memory_key_for_vertex_instance(self, credentials: Optional[str]) -> str: + """ + Returns key to use for caching the Vertex instance in-memory. + + When using Vertex with Key based logging, we need to cache the Vertex instance in-memory. + + - If a credentials string is provided, it is used as the key. + - If no credentials string is provided, "IAM_AUTH" is used as the key. + """ + return credentials or IAM_AUTH_KEY async def download_gcs_object(self, object_name: str, **kwargs): """ diff --git a/litellm/integrations/gcs_bucket/gcs_bucket_base.py b/litellm/integrations/gcs_bucket/gcs_bucket_base.py index dac0790b6..56df3aa80 100644 --- a/litellm/integrations/gcs_bucket/gcs_bucket_base.py +++ b/litellm/integrations/gcs_bucket/gcs_bucket_base.py @@ -33,7 +33,7 @@ class GCSBucketBase(CustomLogger): async def construct_request_headers( self, - service_account_json: str, + service_account_json: Optional[str], vertex_instance: Optional[VertexBase] = None, ) -> Dict[str, str]: from litellm import vertex_chat_completion diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index f3edf79d0..9767677cf 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -5,7 +5,5 @@ model_list: api_key: os.environ/OPENAI_API_KEY api_base: https://exampleopenaiendpoint-production.up.railway.app/ - -general_settings: - alerting: ["slack"] - alerting_threshold: 0.001 +litellm_settings: + callbacks: ["gcs_bucket"] \ No newline at end of file diff --git a/tests/local_testing/test_gcs_bucket.py b/tests/local_testing/test_gcs_bucket.py index 67e125593..2b80d04df 100644 --- a/tests/local_testing/test_gcs_bucket.py +++ b/tests/local_testing/test_gcs_bucket.py @@ -519,3 +519,50 @@ async def test_basic_gcs_logging_per_request_with_no_litellm_callback_set(): object_name=gcs_log_id, standard_callback_dynamic_params=standard_callback_dynamic_params, ) + + +@pytest.mark.asyncio +async def test_get_gcs_logging_config_without_service_account(): + """ + Test the get_gcs_logging_config works for IAM auth on GCS + 1. Key based logging without a service account + 2. Default Callback without a service account + """ + + # Mock the load_auth function to avoid credential loading issues + # Test 1: With standard_callback_dynamic_params (with service account) + gcs_logger = GCSBucketLogger() + + dynamic_params = StandardCallbackDynamicParams( + gcs_bucket_name="dynamic-bucket", + ) + config = await gcs_logger.get_gcs_logging_config( + {"standard_callback_dynamic_params": dynamic_params} + ) + + assert config["bucket_name"] == "dynamic-bucket" + assert config["path_service_account"] is None + assert config["vertex_instance"] is not None + + # Test 2: With standard_callback_dynamic_params (without service account - this is IAM auth) + dynamic_params = StandardCallbackDynamicParams( + gcs_bucket_name="dynamic-bucket", gcs_path_service_account=None + ) + + config = await gcs_logger.get_gcs_logging_config( + {"standard_callback_dynamic_params": dynamic_params} + ) + + assert config["bucket_name"] == "dynamic-bucket" + assert config["path_service_account"] is None + assert config["vertex_instance"] is not None + + # Test 5: With missing bucket name + with pytest.raises(ValueError, match="GCS_BUCKET_NAME is not set"): + _old_gcs_bucket_name = os.environ.get("GCS_BUCKET_NAME") + os.environ.pop("GCS_BUCKET_NAME") + gcs_logger = GCSBucketLogger(bucket_name=None) + await gcs_logger.get_gcs_logging_config({}) + + if _old_gcs_bucket_name is not None: + os.environ["GCS_BUCKET_NAME"] = _old_gcs_bucket_name From 8a2b6fd8d278b76b9402bd16d974b3a79ad3fc3c Mon Sep 17 00:00:00 2001 From: Ronen Schaffer Date: Thu, 7 Nov 2024 03:20:31 +0200 Subject: [PATCH 27/67] Update opentelemetry_integration.md - Fix typos (#6618) --- .../docs/observability/opentelemetry_integration.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/my-website/docs/observability/opentelemetry_integration.md b/docs/my-website/docs/observability/opentelemetry_integration.md index 3a27ffc39..ba5ef2ff8 100644 --- a/docs/my-website/docs/observability/opentelemetry_integration.md +++ b/docs/my-website/docs/observability/opentelemetry_integration.md @@ -35,7 +35,7 @@ OTEL_HEADERS="Authorization=Bearer%20" ```shell OTEL_EXPORTER="otlp_http" -OTEL_ENDPOINT="http:/0.0.0.0:4317" +OTEL_ENDPOINT="http://0.0.0.0:4318" ``` @@ -44,7 +44,7 @@ OTEL_ENDPOINT="http:/0.0.0.0:4317" ```shell OTEL_EXPORTER="otlp_grpc" -OTEL_ENDPOINT="http:/0.0.0.0:4317" +OTEL_ENDPOINT="http://0.0.0.0:4317" ``` From 373f9d409ed43764d44ca6023b834b10575067c3 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Wed, 6 Nov 2024 17:36:48 -0800 Subject: [PATCH 28/67] (fix) ProxyStartup - Check that prisma connection is healthy when starting an instance of LiteLLM (#6627) * fix debug statements * fix assert prisma_client.health_check is called on _setup * asser that _setup_prisma_client is called on startup proxy * fix prisma client health_check * add test_bad_database_url * add strict checks on db startup * temp remove fix to validate if check works as expected * add health_check back * test_proxy_server_prisma_setup_invalid_db --- .circleci/config.yml | 42 ++++++++++++++++++++++++ litellm/proxy/proxy_server.py | 2 ++ litellm/proxy/utils.py | 13 ++------ tests/local_testing/test_proxy_server.py | 39 ++++++++++++++++++++++ 4 files changed, 86 insertions(+), 10 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 063aff4c6..4bb5ebc45 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -986,6 +986,41 @@ jobs: - store_test_results: path: test-results + test_bad_database_url: + machine: + image: ubuntu-2204:2023.10.1 + resource_class: xlarge + working_directory: ~/project + steps: + - checkout + - run: + name: Build Docker image + command: | + docker build -t myapp . -f ./docker/Dockerfile.non_root + - run: + name: Run Docker container with bad DATABASE_URL + command: | + docker run --name my-app \ + -p 4000:4000 \ + -e DATABASE_URL="postgresql://wrong:wrong@wrong:5432/wrong" \ + myapp:latest \ + --port 4000 > docker_output.log 2>&1 || true + - run: + name: Display Docker logs + command: cat docker_output.log + - run: + name: Check for expected error + command: | + if grep -q "Error: P1001: Can't reach database server at" docker_output.log && \ + grep -q "httpx.ConnectError: All connection attempts failed" docker_output.log && \ + grep -q "ERROR: Application startup failed. Exiting." docker_output.log; then + echo "Expected error found. Test passed." + else + echo "Expected error not found. Test failed." + cat docker_output.log + exit 1 + fi + workflows: version: 2 build_and_test: @@ -1082,11 +1117,18 @@ workflows: only: - main - /litellm_.*/ + - test_bad_database_url: + filters: + branches: + only: + - main + - /litellm_.*/ - publish_to_pypi: requires: - local_testing - build_and_test - load_testing + - test_bad_database_url - llm_translation_testing - logging_testing - litellm_router_testing diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 8edf2cee3..ce58c4d75 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -3052,6 +3052,8 @@ class ProxyStartupEvent: prisma_client.check_view_exists() ) # check if all necessary views exist. Don't block execution + # run a health check to ensure the DB is ready + await prisma_client.health_check() return prisma_client diff --git a/litellm/proxy/utils.py b/litellm/proxy/utils.py index 44243cab0..44e9d151d 100644 --- a/litellm/proxy/utils.py +++ b/litellm/proxy/utils.py @@ -1083,19 +1083,16 @@ class PrismaClient: proxy_logging_obj: ProxyLogging, http_client: Optional[Any] = None, ): - verbose_proxy_logger.debug( - "LiteLLM: DATABASE_URL Set in config, trying to 'pip install prisma'" - ) ## init logging object self.proxy_logging_obj = proxy_logging_obj self.iam_token_db_auth: Optional[bool] = str_to_bool( os.getenv("IAM_TOKEN_DB_AUTH") ) + verbose_proxy_logger.debug("Creating Prisma Client..") try: from prisma import Prisma # type: ignore except Exception: raise Exception("Unable to find Prisma binaries.") - verbose_proxy_logger.debug("Connecting Prisma Client to DB..") if http_client is not None: self.db = PrismaWrapper( original_prisma=Prisma(http=http_client), @@ -1114,7 +1111,7 @@ class PrismaClient: else False ), ) # Client to connect to Prisma db - verbose_proxy_logger.debug("Success - Connected Prisma Client to DB") + verbose_proxy_logger.debug("Success - Created Prisma Client") def hash_token(self, token: str): # Hash the string using SHA-256 @@ -2348,11 +2345,7 @@ class PrismaClient: """ start_time = time.time() try: - sql_query = """ - SELECT 1 - FROM "LiteLLM_VerificationToken" - LIMIT 1 - """ + sql_query = "SELECT 1" # Execute the raw query # The asterisk before `user_id_list` unpacks the list into separate arguments diff --git a/tests/local_testing/test_proxy_server.py b/tests/local_testing/test_proxy_server.py index 808b10db3..76cdf1a54 100644 --- a/tests/local_testing/test_proxy_server.py +++ b/tests/local_testing/test_proxy_server.py @@ -1911,6 +1911,7 @@ async def test_proxy_server_prisma_setup(): mock_client = mock_prisma_client.return_value # This is the mocked instance mock_client.connect = AsyncMock() # Mock the connect method mock_client.check_view_exists = AsyncMock() # Mock the check_view_exists method + mock_client.health_check = AsyncMock() # Mock the health_check method await ProxyStartupEvent._setup_prisma_client( database_url=os.getenv("DATABASE_URL"), @@ -1921,3 +1922,41 @@ async def test_proxy_server_prisma_setup(): # Verify our mocked methods were called mock_client.connect.assert_called_once() mock_client.check_view_exists.assert_called_once() + + # Note: This is REALLY IMPORTANT to check that the health check is called + # This is how we ensure the DB is ready before proceeding + mock_client.health_check.assert_called_once() + + +@pytest.mark.asyncio +async def test_proxy_server_prisma_setup_invalid_db(): + """ + PROD TEST: Test that proxy server startup fails when it's unable to connect to the database + + Think 2-3 times before editing / deleting this test, it's important for PROD + """ + from litellm.proxy.proxy_server import ProxyStartupEvent + from litellm.proxy.utils import ProxyLogging + from litellm.caching import DualCache + + user_api_key_cache = DualCache() + invalid_db_url = "postgresql://invalid:invalid@localhost:5432/nonexistent" + + _old_db_url = os.getenv("DATABASE_URL") + os.environ["DATABASE_URL"] = invalid_db_url + + with pytest.raises(Exception) as exc_info: + await ProxyStartupEvent._setup_prisma_client( + database_url=invalid_db_url, + proxy_logging_obj=ProxyLogging(user_api_key_cache=user_api_key_cache), + user_api_key_cache=user_api_key_cache, + ) + print("GOT EXCEPTION=", exc_info) + + assert "httpx.ConnectError" in str(exc_info.value) + + # # Verify the error message indicates a database connection issue + # assert any(x in str(exc_info.value).lower() for x in ["database", "connection", "authentication"]) + + if _old_db_url: + os.environ["DATABASE_URL"] = _old_db_url From 5713f3b5d73a5a8133678adefe0189b858e4a64e Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Wed, 6 Nov 2024 18:27:36 -0800 Subject: [PATCH 29/67] fix test_get_gcs_logging_config_without_service_account --- tests/local_testing/test_gcs_bucket.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/local_testing/test_gcs_bucket.py b/tests/local_testing/test_gcs_bucket.py index 2b80d04df..f7cbff809 100644 --- a/tests/local_testing/test_gcs_bucket.py +++ b/tests/local_testing/test_gcs_bucket.py @@ -528,6 +528,8 @@ async def test_get_gcs_logging_config_without_service_account(): 1. Key based logging without a service account 2. Default Callback without a service account """ + _old_gcs_bucket_name = os.environ.get("GCS_BUCKET_NAME") + os.environ.pop("GCS_BUCKET_NAME") # Mock the load_auth function to avoid credential loading issues # Test 1: With standard_callback_dynamic_params (with service account) @@ -559,10 +561,8 @@ async def test_get_gcs_logging_config_without_service_account(): # Test 5: With missing bucket name with pytest.raises(ValueError, match="GCS_BUCKET_NAME is not set"): - _old_gcs_bucket_name = os.environ.get("GCS_BUCKET_NAME") - os.environ.pop("GCS_BUCKET_NAME") gcs_logger = GCSBucketLogger(bucket_name=None) await gcs_logger.get_gcs_logging_config({}) - if _old_gcs_bucket_name is not None: - os.environ["GCS_BUCKET_NAME"] = _old_gcs_bucket_name + if _old_gcs_bucket_name is not None: + os.environ["GCS_BUCKET_NAME"] = _old_gcs_bucket_name From eb171e6d955bcb01fc2408417247e73a9e86e41e Mon Sep 17 00:00:00 2001 From: superpoussin22 Date: Thu, 7 Nov 2024 03:46:16 +0100 Subject: [PATCH 30/67] Update team_budgets.md (#6611) --- docs/my-website/docs/proxy/team_budgets.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/my-website/docs/proxy/team_budgets.md b/docs/my-website/docs/proxy/team_budgets.md index 22b5242a0..3942bfa50 100644 --- a/docs/my-website/docs/proxy/team_budgets.md +++ b/docs/my-website/docs/proxy/team_budgets.md @@ -56,7 +56,7 @@ Possible values for `budget_duration` | `budget_duration="1m"` | every 1 min | | `budget_duration="1h"` | every 1 hour | | `budget_duration="1d"` | every 1 day | -| `budget_duration="1mo"` | every 1 month | +| `budget_duration="30d"` | every 1 month | ### 2. Create a key for the `team` From e3519aa5ae4120ac2ad77e335b607cf361d5ce98 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Wed, 6 Nov 2024 20:04:41 -0800 Subject: [PATCH 31/67] (feat) Allow failed DB connection requests to allow virtual keys with `allow_failed_db_requests` (#6605) * fix use helper for _handle_failed_db_connection_for_get_key_object * track ALLOW_FAILED_DB_REQUESTS on prometheus * fix allow_failed_db_requests check * fix allow_requests_on_db_unavailable * fix allow_requests_on_db_unavailable * docs allow_requests_on_db_unavailable * identify user_id as litellm_proxy_admin_name when DB is failing * test_handle_failed_db_connection * fix test_user_api_key_auth_db_unavailable * update best practices for prod doc * update best practices for prod * fix handle db failure --- docs/my-website/docs/proxy/configs.md | 5 + docs/my-website/docs/proxy/prod.md | 28 ++++- litellm/proxy/auth/auth_checks.py | 45 +++++++- litellm/proxy/proxy_config.yaml | 9 +- tests/local_testing/test_auth_checks.py | 35 ++++++ .../local_testing/test_key_generate_prisma.py | 105 ++++++++++++++++++ 6 files changed, 224 insertions(+), 3 deletions(-) diff --git a/docs/my-website/docs/proxy/configs.md b/docs/my-website/docs/proxy/configs.md index 28b0b67e3..1adc4943d 100644 --- a/docs/my-website/docs/proxy/configs.md +++ b/docs/my-website/docs/proxy/configs.md @@ -692,9 +692,13 @@ general_settings: allowed_routes: ["route1", "route2"] # list of allowed proxy API routes - a user can access. (currently JWT-Auth only) key_management_system: google_kms # either google_kms or azure_kms master_key: string + + # Database Settings database_url: string database_connection_pool_limit: 0 # default 100 database_connection_timeout: 0 # default 60s + allow_requests_on_db_unavailable: boolean # if true, will allow requests that can not connect to the DB to verify Virtual Key to still work + custom_auth: string max_parallel_requests: 0 # the max parallel requests allowed per deployment global_max_parallel_requests: 0 # the max parallel requests allowed on the proxy all up @@ -766,6 +770,7 @@ general_settings: | database_url | string | The URL for the database connection [Set up Virtual Keys](virtual_keys) | | database_connection_pool_limit | integer | The limit for database connection pool [Setting DB Connection Pool limit](#configure-db-pool-limits--connection-timeouts) | | database_connection_timeout | integer | The timeout for database connections in seconds [Setting DB Connection Pool limit, timeout](#configure-db-pool-limits--connection-timeouts) | +| allow_requests_on_db_unavailable | boolean | If true, allows requests to succeed even if DB is unreachable. **Only use this if running LiteLLM in your VPC** This will allow requests to work even when LiteLLM cannot connect to the DB to verify a Virtual Key | | custom_auth | string | Write your own custom authentication logic [Doc Custom Auth](virtual_keys#custom-auth) | | max_parallel_requests | integer | The max parallel requests allowed per deployment | | global_max_parallel_requests | integer | The max parallel requests allowed on the proxy overall | diff --git a/docs/my-website/docs/proxy/prod.md b/docs/my-website/docs/proxy/prod.md index 99fa19e77..66c719e5d 100644 --- a/docs/my-website/docs/proxy/prod.md +++ b/docs/my-website/docs/proxy/prod.md @@ -20,6 +20,10 @@ general_settings: proxy_batch_write_at: 60 # Batch write spend updates every 60s database_connection_pool_limit: 10 # limit the number of database connections to = MAX Number of DB Connections/Number of instances of litellm proxy (Around 10-20 is good number) + # OPTIONAL Best Practices + disable_spend_logs: True # turn off writing each transaction to the db. We recommend doing this is you don't need to see Usage on the LiteLLM UI and are tracking metrics via Prometheus + allow_requests_on_db_unavailable: True # Only USE when running LiteLLM on your VPC. Allow requests to still be processed even if the DB is unavailable. We recommend doing this if you're running LiteLLM on VPC that cannot be accessed from the public internet. + litellm_settings: request_timeout: 600 # raise Timeout error if call takes longer than 600 seconds. Default value is 6000seconds if not set set_verbose: False # Switch off Debug Logging, ensure your logs do not have any debugging on @@ -86,7 +90,29 @@ Set `export LITELLM_MODE="PRODUCTION"` This disables the load_dotenv() functionality, which will automatically load your environment credentials from the local `.env`. -## 5. Set LiteLLM Salt Key +## 5. If running LiteLLM on VPC, gracefully handle DB unavailability + +This will allow LiteLLM to continue to process requests even if the DB is unavailable. This is better handling for DB unavailability. + +**WARNING: Only do this if you're running LiteLLM on VPC, that cannot be accessed from the public internet.** + +```yaml +general_settings: + allow_requests_on_db_unavailable: True +``` + +## 6. Disable spend_logs if you're not using the LiteLLM UI + +By default LiteLLM will write every request to the `LiteLLM_SpendLogs` table. This is used for viewing Usage on the LiteLLM UI. + +If you're not viewing Usage on the LiteLLM UI (most users use Prometheus when this is disabled), you can disable spend_logs by setting `disable_spend_logs` to `True`. + +```yaml +general_settings: + disable_spend_logs: True +``` + +## 7. Set LiteLLM Salt Key If you plan on using the DB, set a salt key for encrypting/decrypting variables in the DB. diff --git a/litellm/proxy/auth/auth_checks.py b/litellm/proxy/auth/auth_checks.py index e00d494d9..dcc1c5e90 100644 --- a/litellm/proxy/auth/auth_checks.py +++ b/litellm/proxy/auth/auth_checks.py @@ -13,6 +13,7 @@ import traceback from datetime import datetime from typing import TYPE_CHECKING, Any, List, Literal, Optional +import httpx from pydantic import BaseModel import litellm @@ -717,12 +718,54 @@ async def get_key_object( ) return _response - except Exception: + except httpx.ConnectError as e: + return await _handle_failed_db_connection_for_get_key_object(e=e) + except Exception as e: raise Exception( f"Key doesn't exist in db. key={hashed_token}. Create key via `/key/generate` call." ) +async def _handle_failed_db_connection_for_get_key_object( + e: Exception, +) -> UserAPIKeyAuth: + """ + Handles httpx.ConnectError when reading a Virtual Key from LiteLLM DB + + Use this if you don't want failed DB queries to block LLM API reqiests + + Returns: + - UserAPIKeyAuth: If general_settings.allow_requests_on_db_unavailable is True + + Raises: + - Orignal Exception in all other cases + """ + from litellm.proxy.proxy_server import ( + general_settings, + litellm_proxy_admin_name, + proxy_logging_obj, + ) + + # If this flag is on, requests failing to connect to the DB will be allowed + if general_settings.get("allow_requests_on_db_unavailable", False) is True: + # log this as a DB failure on prometheus + proxy_logging_obj.service_logging_obj.service_failure_hook( + service=ServiceTypes.DB, + call_type="get_key_object", + error=e, + duration=0.0, + ) + + return UserAPIKeyAuth( + key_name="failed-to-connect-to-db", + token="failed-to-connect-to-db", + user_id=litellm_proxy_admin_name, + ) + else: + # raise the original exception, the wrapper on `get_key_object` handles logging db failure to prometheus + raise e + + @log_to_opentelemetry async def get_org_object( org_id: str, diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index 9767677cf..694c1613d 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -5,5 +5,12 @@ model_list: api_key: os.environ/OPENAI_API_KEY api_base: https://exampleopenaiendpoint-production.up.railway.app/ + litellm_settings: - callbacks: ["gcs_bucket"] \ No newline at end of file + callbacks: ["prometheus"] + service_callback: ["prometheus_system"] + + +general_settings: + allow_requests_on_db_unavailable: true + diff --git a/tests/local_testing/test_auth_checks.py b/tests/local_testing/test_auth_checks.py index 3ea113c28..f1683a153 100644 --- a/tests/local_testing/test_auth_checks.py +++ b/tests/local_testing/test_auth_checks.py @@ -12,6 +12,11 @@ sys.path.insert( 0, os.path.abspath("../..") ) # Adds the parent directory to the system path import pytest, litellm +import httpx +from litellm.proxy.auth.auth_checks import ( + _handle_failed_db_connection_for_get_key_object, +) +from litellm.proxy._types import UserAPIKeyAuth from litellm.proxy.auth.auth_checks import get_end_user_object from litellm.caching.caching import DualCache from litellm.proxy._types import LiteLLM_EndUserTable, LiteLLM_BudgetTable @@ -60,3 +65,33 @@ async def test_get_end_user_object(customer_spend, customer_budget): customer_spend, customer_budget, str(e) ) ) + + +@pytest.mark.asyncio +async def test_handle_failed_db_connection(): + """ + Test cases: + 1. When allow_requests_on_db_unavailable=True -> return UserAPIKeyAuth + 2. When allow_requests_on_db_unavailable=False -> raise original error + """ + from litellm.proxy.proxy_server import general_settings, litellm_proxy_admin_name + + # Test case 1: allow_requests_on_db_unavailable=True + general_settings["allow_requests_on_db_unavailable"] = True + mock_error = httpx.ConnectError("Failed to connect to DB") + + result = await _handle_failed_db_connection_for_get_key_object(e=mock_error) + + assert isinstance(result, UserAPIKeyAuth) + assert result.key_name == "failed-to-connect-to-db" + assert result.token == "failed-to-connect-to-db" + assert result.user_id == litellm_proxy_admin_name + + # Test case 2: allow_requests_on_db_unavailable=False + general_settings["allow_requests_on_db_unavailable"] = False + + with pytest.raises(httpx.ConnectError) as exc_info: + await _handle_failed_db_connection_for_get_key_object(e=mock_error) + print("_handle_failed_db_connection_for_get_key_object got exception", exc_info) + + assert str(exc_info.value) == "Failed to connect to DB" diff --git a/tests/local_testing/test_key_generate_prisma.py b/tests/local_testing/test_key_generate_prisma.py index e009e214c..66b9c7b8f 100644 --- a/tests/local_testing/test_key_generate_prisma.py +++ b/tests/local_testing/test_key_generate_prisma.py @@ -28,6 +28,7 @@ from datetime import datetime from dotenv import load_dotenv from fastapi import Request from fastapi.routing import APIRoute +import httpx load_dotenv() import io @@ -51,6 +52,7 @@ from litellm.proxy.management_endpoints.internal_user_endpoints import ( user_info, user_update, ) +from litellm.proxy.auth.auth_checks import get_key_object from litellm.proxy.management_endpoints.key_management_endpoints import ( delete_key_fn, generate_key_fn, @@ -3307,3 +3309,106 @@ async def test_service_accounts(prisma_client): print("response from user_api_key_auth", result) setattr(litellm.proxy.proxy_server, "general_settings", {}) + + +@pytest.mark.asyncio +async def test_user_api_key_auth_db_unavailable(): + """ + Test that user_api_key_auth handles DB connection failures appropriately when: + 1. DB connection fails during token validation + 2. allow_requests_on_db_unavailable=True + """ + litellm.set_verbose = True + + # Mock dependencies + class MockPrismaClient: + async def get_data(self, *args, **kwargs): + print("MockPrismaClient.get_data() called") + raise httpx.ConnectError("Failed to connect to DB") + + async def connect(self): + print("MockPrismaClient.connect() called") + pass + + class MockDualCache: + async def async_get_cache(self, *args, **kwargs): + return None + + async def async_set_cache(self, *args, **kwargs): + pass + + async def set_cache(self, *args, **kwargs): + pass + + # Set up test environment + setattr(litellm.proxy.proxy_server, "prisma_client", MockPrismaClient()) + setattr(litellm.proxy.proxy_server, "user_api_key_cache", MockDualCache()) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + setattr( + litellm.proxy.proxy_server, + "general_settings", + {"allow_requests_on_db_unavailable": True}, + ) + + # Create test request + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + # Run test with a sample API key + result = await user_api_key_auth( + request=request, + api_key="Bearer sk-123456789", + ) + + # Verify results + assert isinstance(result, UserAPIKeyAuth) + assert result.key_name == "failed-to-connect-to-db" + assert result.user_id == litellm.proxy.proxy_server.litellm_proxy_admin_name + + +@pytest.mark.asyncio +async def test_user_api_key_auth_db_unavailable_not_allowed(): + """ + Test that user_api_key_auth raises an exception when: + This is default behavior + + 1. DB connection fails during token validation + 2. allow_requests_on_db_unavailable=False (default behavior) + """ + + # Mock dependencies + class MockPrismaClient: + async def get_data(self, *args, **kwargs): + print("MockPrismaClient.get_data() called") + raise httpx.ConnectError("Failed to connect to DB") + + async def connect(self): + print("MockPrismaClient.connect() called") + pass + + class MockDualCache: + async def async_get_cache(self, *args, **kwargs): + return None + + async def async_set_cache(self, *args, **kwargs): + pass + + async def set_cache(self, *args, **kwargs): + pass + + # Set up test environment + setattr(litellm.proxy.proxy_server, "prisma_client", MockPrismaClient()) + setattr(litellm.proxy.proxy_server, "user_api_key_cache", MockDualCache()) + setattr(litellm.proxy.proxy_server, "general_settings", {}) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + + # Create test request + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + # Run test with a sample API key + with pytest.raises(litellm.proxy._types.ProxyException): + await user_api_key_auth( + request=request, + api_key="Bearer sk-123456789", + ) From c6da749997019b6b6204dcff23cc0ee242058bad Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Wed, 6 Nov 2024 20:49:44 -0800 Subject: [PATCH 32/67] fix test_get_gcs_logging_config_without_service_account --- tests/local_testing/test_gcs_bucket.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/local_testing/test_gcs_bucket.py b/tests/local_testing/test_gcs_bucket.py index f7cbff809..fed287bd0 100644 --- a/tests/local_testing/test_gcs_bucket.py +++ b/tests/local_testing/test_gcs_bucket.py @@ -531,6 +531,9 @@ async def test_get_gcs_logging_config_without_service_account(): _old_gcs_bucket_name = os.environ.get("GCS_BUCKET_NAME") os.environ.pop("GCS_BUCKET_NAME") + _old_gcs_service_acct = os.environ.get("GCS_PATH_SERVICE_ACCOUNT") + os.environ.pop("GCS_PATH_SERVICE_ACCOUNT") + # Mock the load_auth function to avoid credential loading issues # Test 1: With standard_callback_dynamic_params (with service account) gcs_logger = GCSBucketLogger() @@ -566,3 +569,6 @@ async def test_get_gcs_logging_config_without_service_account(): if _old_gcs_bucket_name is not None: os.environ["GCS_BUCKET_NAME"] = _old_gcs_bucket_name + + if _old_gcs_service_acct is not None: + os.environ["GCS_PATH_SERVICE_ACCOUNT"] = _old_gcs_service_acct From 9cb02513b4f832492cc848cc01cd7cc1926418ec Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Wed, 6 Nov 2024 20:50:52 -0800 Subject: [PATCH 33/67] fix code quality check --- litellm/proxy/auth/auth_checks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/proxy/auth/auth_checks.py b/litellm/proxy/auth/auth_checks.py index dcc1c5e90..8d504c739 100644 --- a/litellm/proxy/auth/auth_checks.py +++ b/litellm/proxy/auth/auth_checks.py @@ -720,7 +720,7 @@ async def get_key_object( return _response except httpx.ConnectError as e: return await _handle_failed_db_connection_for_get_key_object(e=e) - except Exception as e: + except Exception: raise Exception( f"Key doesn't exist in db. key={hashed_token}. Create key via `/key/generate` call." ) From 44840d615d07c8b3f514d00d7cdf8b816b3fa0f1 Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Thu, 7 Nov 2024 23:57:37 +0530 Subject: [PATCH 34/67] fix(pattern_match_deployments.py): default to user input if unable to map based on wildcards (#6646) --- .../router_utils/pattern_match_deployments.py | 22 +++++++++++++++---- .../test_router_pattern_matching.py | 10 ++++++++- .../test_router_helper_utils.py | 15 ++++++++++++- 3 files changed, 41 insertions(+), 6 deletions(-) diff --git a/litellm/router_utils/pattern_match_deployments.py b/litellm/router_utils/pattern_match_deployments.py index 039af635c..3896c3a95 100644 --- a/litellm/router_utils/pattern_match_deployments.py +++ b/litellm/router_utils/pattern_match_deployments.py @@ -117,11 +117,26 @@ class PatternMatchRouter: E.g.: + Case 1: model_name: llmengine/* (can be any regex pattern or wildcard pattern) litellm_params: model: openai/* if model_name = "llmengine/foo" -> model = "openai/foo" + + Case 2: + model_name: llmengine/fo::*::static::* + litellm_params: + model: openai/fo::*::static::* + + if model_name = "llmengine/foo::bar::static::baz" -> model = "openai/foo::bar::static::baz" + + Case 3: + model_name: *meta.llama3* + litellm_params: + model: bedrock/meta.llama3* + + if model_name = "hello-world-meta.llama3-70b" -> model = "bedrock/meta.llama3-70b" """ ## BASE CASE: if the deployment model name does not contain a wildcard, return the deployment model name @@ -134,10 +149,9 @@ class PatternMatchRouter: dynamic_segments = matched_pattern.groups() if len(dynamic_segments) > wildcard_count: - raise ValueError( - f"More wildcards in the deployment model name than the pattern. Wildcard count: {wildcard_count}, dynamic segments count: {len(dynamic_segments)}" - ) - + return ( + matched_pattern.string + ) # default to the user input, if unable to map based on wildcards. # Replace the corresponding wildcards in the litellm model pattern with extracted segments for segment in dynamic_segments: litellm_deployment_litellm_model = litellm_deployment_litellm_model.replace( diff --git a/tests/local_testing/test_router_pattern_matching.py b/tests/local_testing/test_router_pattern_matching.py index 2a6f66105..914e8ecfa 100644 --- a/tests/local_testing/test_router_pattern_matching.py +++ b/tests/local_testing/test_router_pattern_matching.py @@ -182,6 +182,14 @@ async def test_route_with_no_matching_pattern(): ) assert result.choices[0].message.content == "Works" + ## WORKS + result = await router.acompletion( + model="meta.llama3-70b-instruct-v1:0", + messages=[{"role": "user", "content": "Hello, world!"}], + mock_response="Works", + ) + assert result.choices[0].message.content == "Works" + ## FAILS with pytest.raises(litellm.BadRequestError) as e: await router.acompletion( @@ -198,6 +206,7 @@ async def test_route_with_no_matching_pattern(): input="Hello, world!", ) + def test_router_pattern_match_e2e(): """ Tests the end to end flow of the router @@ -228,4 +237,3 @@ def test_router_pattern_match_e2e(): "model": "gpt-4o", "messages": [{"role": "user", "content": "Hello, how are you?"}], } - diff --git a/tests/router_unit_tests/test_router_helper_utils.py b/tests/router_unit_tests/test_router_helper_utils.py index cabb4a899..7e2daa9b5 100644 --- a/tests/router_unit_tests/test_router_helper_utils.py +++ b/tests/router_unit_tests/test_router_helper_utils.py @@ -960,6 +960,18 @@ def test_replace_model_in_jsonl(model_list): "openai/gpt-3.5-turbo", "openai/gpt-3.5-turbo", ), + ( + "bedrock/meta.llama3-70b", + "*meta.llama3*", + "bedrock/meta.llama3-*", + "bedrock/meta.llama3-70b", + ), + ( + "meta.llama3-70b", + "*meta.llama3*", + "bedrock/meta.llama3-*", + "meta.llama3-70b", + ), ], ) def test_pattern_match_deployment_set_model_name( @@ -1000,9 +1012,10 @@ def test_pattern_match_deployment_set_model_name( for model in updated_models: assert model["litellm_params"]["model"] == expected_model + @pytest.mark.asyncio async def test_pass_through_moderation_endpoint_factory(model_list): router = Router(model_list=model_list) response = await router._pass_through_moderation_endpoint_factory( original_function=litellm.amoderation, input="this is valid good text" - ) \ No newline at end of file + ) From 27e18358abcbcb973356cb067451a22f70b8daef Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Fri, 8 Nov 2024 00:55:57 +0530 Subject: [PATCH 35/67] =?UTF-8?q?fix(pattern=5Fmatch=5Fdeployments.py):=20?= =?UTF-8?q?default=20to=20user=20input=20if=20unable=20to=E2=80=A6=20(#663?= =?UTF-8?q?2)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(pattern_match_deployments.py): default to user input if unable to map based on wildcards * test: fix test * test: reset test name * test: update conftest to reload proxy server module between tests * ci(config.yml): move langfuse out of local_testing reduce ci/cd time * ci(config.yml): cleanup langfuse ci/cd tests * fix: update test to not use global proxy_server app module * ci: move caching to a separate test pipeline speed up ci pipeline * test: update conftest to check if proxy_server attr exists before reloading * build(conftest.py): don't block on inability to reload proxy_server * ci(config.yml): update caching unit test filter to work on 'cache' keyword as well * fix(encrypt_decrypt_utils.py): use function to get salt key * test: mark flaky test * test: handle anthropic overloaded errors * refactor: create separate ci/cd pipeline for proxy unit tests make ci/cd faster * ci(config.yml): add litellm_proxy_unit_testing to build_and_test jobs * ci(config.yml): generate prisma binaries for proxy unit tests * test: readd vertex_key.json * ci(config.yml): remove `-s` from proxy_unit_test cmd speed up test * ci: remove any 'debug' logging flag speed up ci pipeline * test: fix test * test(test_braintrust.py): rerun * test: add delay for braintrust test --- .circleci/config.yml | 325 +++++++- litellm/integrations/braintrust_logging.py | 5 +- .../common_utils/encrypt_decrypt_utils.py | 27 +- .../test_anthropic_completion.py | 18 +- .../test_bedrock_completion.py | 0 .../test_prompt_factory.py | 0 tests/local_testing/conftest.py | 6 + tests/local_testing/test_acooldowns_router.py | 1 - tests/local_testing/test_braintrust.py | 16 +- tests/local_testing/test_completion.py | 30 - .../test_pass_through_endpoints.py | 18 +- tests/local_testing/test_router_timeout.py | 1 - tests/local_testing/test_user_api_key_auth.py | 11 +- .../adroit-crow-413218-bc47f303efc9.json | 13 + tests/proxy_unit_tests/azure_fine_tune.jsonl | 12 + .../batch_job_results_furniture.jsonl | 2 + tests/proxy_unit_tests/conftest copy.py | 60 ++ tests/proxy_unit_tests/conftest.py | 60 ++ tests/proxy_unit_tests/data_map.txt | Bin 0 -> 3939 bytes tests/proxy_unit_tests/eagle.wav | Bin 0 -> 55852 bytes .../example_config_yaml/aliases_config.yaml | 30 + .../example_config_yaml/azure_config.yaml | 15 + .../example_config_yaml/cache_no_params.yaml | 7 + .../cache_with_params.yaml | 11 + .../config_with_env_vars.yaml | 48 ++ .../example_config_yaml/langfuse_config.yaml | 7 + .../example_config_yaml/load_balancer.yaml | 28 + .../opentelemetry_config.yaml | 7 + .../example_config_yaml/simple_config.yaml | 4 + tests/proxy_unit_tests/gettysburg.wav | Bin 0 -> 775192 bytes tests/proxy_unit_tests/large_text.py | 112 +++ tests/proxy_unit_tests/log.txt | 104 +++ .../proxy_unit_tests/messages_with_counts.py | 733 ++++++++++++++++++ tests/proxy_unit_tests/model_cost.json | 3 + .../openai_batch_completions.jsonl | 2 + .../openai_batch_completions_router.jsonl | 3 + tests/proxy_unit_tests/speech_vertex.mp3 | Bin 0 -> 133244 bytes .../test_aproxy_startup.py | 0 .../test_audit_logs_proxy.py | 0 .../test_banned_keyword_list.py | 0 .../test_configs/custom_auth.py | 22 + .../test_configs/custom_callbacks.py | 121 +++ .../test_configs/test_bad_config.yaml | 21 + ...st_cloudflare_azure_with_cache_config.yaml | 17 + .../test_configs/test_config.yaml | 28 + .../test_configs/test_config_custom_auth.yaml | 11 + .../test_configs/test_config_no_auth.yaml | 127 +++ .../test_configs/test_custom_logger.yaml | 26 + .../test_configs/test_guardrails_config.yaml | 32 + .../test_custom_callback_input.py | 359 +++++++++ .../test_deployed_proxy_keygen.py | 0 .../test_jwt.py | 12 +- .../test_key_generate_dynamodb.py | 0 .../test_key_generate_prisma.py | 0 .../test_model_response_typing/server.py | 23 + .../test_model_response_typing/test.py | 14 + .../test_proxy_config_unit_test.py | 0 .../test_proxy_custom_auth.py | 0 .../test_proxy_custom_logger.py | 0 .../test_proxy_encrypt_decrypt.py | 1 + .../test_proxy_exception_mapping.py | 0 .../test_proxy_gunicorn.py | 0 .../test_proxy_pass_user_config.py | 0 .../test_proxy_reject_logging.py | 0 .../test_proxy_routes.py | 0 .../test_proxy_server.py | 0 .../test_proxy_server_caching.py | 0 .../test_proxy_server_cost.py | 0 .../test_proxy_server_keys.py | 0 .../test_proxy_server_langfuse.py | 0 .../test_proxy_server_spend.py | 0 .../test_proxy_setting_guardrails.py | 1 + .../test_proxy_token_counter.py | 0 .../test_proxy_utils.py | 0 .../test_user_api_key_auth.py | 389 ++++++++++ tests/proxy_unit_tests/vertex_key.json | 13 + .../test_router_endpoints.py | 1 + 77 files changed, 2861 insertions(+), 76 deletions(-) rename tests/{local_testing => llm_translation}/test_bedrock_completion.py (100%) rename tests/{local_testing => llm_translation}/test_prompt_factory.py (100%) create mode 100644 tests/proxy_unit_tests/adroit-crow-413218-bc47f303efc9.json create mode 100644 tests/proxy_unit_tests/azure_fine_tune.jsonl create mode 100644 tests/proxy_unit_tests/batch_job_results_furniture.jsonl create mode 100644 tests/proxy_unit_tests/conftest copy.py create mode 100644 tests/proxy_unit_tests/conftest.py create mode 100644 tests/proxy_unit_tests/data_map.txt create mode 100644 tests/proxy_unit_tests/eagle.wav create mode 100644 tests/proxy_unit_tests/example_config_yaml/aliases_config.yaml create mode 100644 tests/proxy_unit_tests/example_config_yaml/azure_config.yaml create mode 100644 tests/proxy_unit_tests/example_config_yaml/cache_no_params.yaml create mode 100644 tests/proxy_unit_tests/example_config_yaml/cache_with_params.yaml create mode 100644 tests/proxy_unit_tests/example_config_yaml/config_with_env_vars.yaml create mode 100644 tests/proxy_unit_tests/example_config_yaml/langfuse_config.yaml create mode 100644 tests/proxy_unit_tests/example_config_yaml/load_balancer.yaml create mode 100644 tests/proxy_unit_tests/example_config_yaml/opentelemetry_config.yaml create mode 100644 tests/proxy_unit_tests/example_config_yaml/simple_config.yaml create mode 100644 tests/proxy_unit_tests/gettysburg.wav create mode 100644 tests/proxy_unit_tests/large_text.py create mode 100644 tests/proxy_unit_tests/log.txt create mode 100644 tests/proxy_unit_tests/messages_with_counts.py create mode 100644 tests/proxy_unit_tests/model_cost.json create mode 100644 tests/proxy_unit_tests/openai_batch_completions.jsonl create mode 100644 tests/proxy_unit_tests/openai_batch_completions_router.jsonl create mode 100644 tests/proxy_unit_tests/speech_vertex.mp3 rename tests/{local_testing => proxy_unit_tests}/test_aproxy_startup.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_audit_logs_proxy.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_banned_keyword_list.py (100%) create mode 100644 tests/proxy_unit_tests/test_configs/custom_auth.py create mode 100644 tests/proxy_unit_tests/test_configs/custom_callbacks.py create mode 100644 tests/proxy_unit_tests/test_configs/test_bad_config.yaml create mode 100644 tests/proxy_unit_tests/test_configs/test_cloudflare_azure_with_cache_config.yaml create mode 100644 tests/proxy_unit_tests/test_configs/test_config.yaml create mode 100644 tests/proxy_unit_tests/test_configs/test_config_custom_auth.yaml create mode 100644 tests/proxy_unit_tests/test_configs/test_config_no_auth.yaml create mode 100644 tests/proxy_unit_tests/test_configs/test_custom_logger.yaml create mode 100644 tests/proxy_unit_tests/test_configs/test_guardrails_config.yaml create mode 100644 tests/proxy_unit_tests/test_custom_callback_input.py rename tests/{local_testing => proxy_unit_tests}/test_deployed_proxy_keygen.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_jwt.py (98%) rename tests/{local_testing => proxy_unit_tests}/test_key_generate_dynamodb.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_key_generate_prisma.py (100%) create mode 100644 tests/proxy_unit_tests/test_model_response_typing/server.py create mode 100644 tests/proxy_unit_tests/test_model_response_typing/test.py rename tests/{local_testing => proxy_unit_tests}/test_proxy_config_unit_test.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_proxy_custom_auth.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_proxy_custom_logger.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_proxy_encrypt_decrypt.py (95%) rename tests/{local_testing => proxy_unit_tests}/test_proxy_exception_mapping.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_proxy_gunicorn.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_proxy_pass_user_config.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_proxy_reject_logging.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_proxy_routes.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_proxy_server.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_proxy_server_caching.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_proxy_server_cost.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_proxy_server_keys.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_proxy_server_langfuse.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_proxy_server_spend.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_proxy_setting_guardrails.py (97%) rename tests/{local_testing => proxy_unit_tests}/test_proxy_token_counter.py (100%) rename tests/{local_testing => proxy_unit_tests}/test_proxy_utils.py (100%) create mode 100644 tests/proxy_unit_tests/test_user_api_key_auth.py create mode 100644 tests/proxy_unit_tests/vertex_key.json diff --git a/.circleci/config.yml b/.circleci/config.yml index 4bb5ebc45..8e63cfe25 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -103,7 +103,7 @@ jobs: command: | pwd ls - python -m pytest -vv tests/local_testing --cov=litellm --cov-report=xml -x --junitxml=test-results/junit.xml --durations=5 -k "not test_python_38.py and not router and not assistants" + python -m pytest -vv tests/local_testing --cov=litellm --cov-report=xml -x --junitxml=test-results/junit.xml --durations=5 -k "not test_python_38.py and not router and not assistants and not langfuse and not caching and not cache" no_output_timeout: 120m - run: name: Rename the coverage files @@ -119,6 +119,204 @@ jobs: paths: - local_testing_coverage.xml - local_testing_coverage + langfuse_logging_unit_tests: + docker: + - image: cimg/python:3.11 + auth: + username: ${DOCKERHUB_USERNAME} + password: ${DOCKERHUB_PASSWORD} + working_directory: ~/project + + steps: + - checkout + + - run: + name: Show git commit hash + command: | + echo "Git commit hash: $CIRCLE_SHA1" + + - restore_cache: + keys: + - v1-dependencies-{{ checksum ".circleci/requirements.txt" }} + - run: + name: Install Dependencies + command: | + python -m pip install --upgrade pip + python -m pip install -r .circleci/requirements.txt + pip install "pytest==7.3.1" + pip install "pytest-retry==1.6.3" + pip install "pytest-asyncio==0.21.1" + pip install "pytest-cov==5.0.0" + pip install mypy + pip install "google-generativeai==0.3.2" + pip install "google-cloud-aiplatform==1.43.0" + pip install pyarrow + pip install "boto3==1.34.34" + pip install "aioboto3==12.3.0" + pip install langchain + pip install lunary==0.2.5 + pip install "azure-identity==1.16.1" + pip install "langfuse==2.45.0" + pip install "logfire==0.29.0" + pip install numpydoc + pip install traceloop-sdk==0.21.1 + pip install opentelemetry-api==1.25.0 + pip install opentelemetry-sdk==1.25.0 + pip install opentelemetry-exporter-otlp==1.25.0 + pip install openai==1.54.0 + pip install prisma==0.11.0 + pip install "detect_secrets==1.5.0" + pip install "httpx==0.24.1" + pip install "respx==0.21.1" + pip install fastapi + pip install "gunicorn==21.2.0" + pip install "anyio==4.2.0" + pip install "aiodynamo==23.10.1" + pip install "asyncio==3.4.3" + pip install "apscheduler==3.10.4" + pip install "PyGithub==1.59.1" + pip install argon2-cffi + pip install "pytest-mock==3.12.0" + pip install python-multipart + pip install google-cloud-aiplatform + pip install prometheus-client==0.20.0 + pip install "pydantic==2.7.1" + pip install "diskcache==5.6.1" + pip install "Pillow==10.3.0" + pip install "jsonschema==4.22.0" + - save_cache: + paths: + - ./venv + key: v1-dependencies-{{ checksum ".circleci/requirements.txt" }} + - run: + name: Run prisma ./docker/entrypoint.sh + command: | + set +e + chmod +x docker/entrypoint.sh + ./docker/entrypoint.sh + set -e + + # Run pytest and generate JUnit XML report + - run: + name: Run tests + command: | + pwd + ls + python -m pytest -vv tests/local_testing --cov=litellm --cov-report=xml -x --junitxml=test-results/junit.xml --durations=5 -k "langfuse" + no_output_timeout: 120m + - run: + name: Rename the coverage files + command: | + mv coverage.xml langfuse_coverage.xml + mv .coverage langfuse_coverage + + # Store test results + - store_test_results: + path: test-results + - persist_to_workspace: + root: . + paths: + - langfuse_coverage.xml + - langfuse_coverage + caching_unit_tests: + docker: + - image: cimg/python:3.11 + auth: + username: ${DOCKERHUB_USERNAME} + password: ${DOCKERHUB_PASSWORD} + working_directory: ~/project + + steps: + - checkout + + - run: + name: Show git commit hash + command: | + echo "Git commit hash: $CIRCLE_SHA1" + + - restore_cache: + keys: + - v1-dependencies-{{ checksum ".circleci/requirements.txt" }} + - run: + name: Install Dependencies + command: | + python -m pip install --upgrade pip + python -m pip install -r .circleci/requirements.txt + pip install "pytest==7.3.1" + pip install "pytest-retry==1.6.3" + pip install "pytest-asyncio==0.21.1" + pip install "pytest-cov==5.0.0" + pip install mypy + pip install "google-generativeai==0.3.2" + pip install "google-cloud-aiplatform==1.43.0" + pip install pyarrow + pip install "boto3==1.34.34" + pip install "aioboto3==12.3.0" + pip install langchain + pip install lunary==0.2.5 + pip install "azure-identity==1.16.1" + pip install "langfuse==2.45.0" + pip install "logfire==0.29.0" + pip install numpydoc + pip install traceloop-sdk==0.21.1 + pip install opentelemetry-api==1.25.0 + pip install opentelemetry-sdk==1.25.0 + pip install opentelemetry-exporter-otlp==1.25.0 + pip install openai==1.54.0 + pip install prisma==0.11.0 + pip install "detect_secrets==1.5.0" + pip install "httpx==0.24.1" + pip install "respx==0.21.1" + pip install fastapi + pip install "gunicorn==21.2.0" + pip install "anyio==4.2.0" + pip install "aiodynamo==23.10.1" + pip install "asyncio==3.4.3" + pip install "apscheduler==3.10.4" + pip install "PyGithub==1.59.1" + pip install argon2-cffi + pip install "pytest-mock==3.12.0" + pip install python-multipart + pip install google-cloud-aiplatform + pip install prometheus-client==0.20.0 + pip install "pydantic==2.7.1" + pip install "diskcache==5.6.1" + pip install "Pillow==10.3.0" + pip install "jsonschema==4.22.0" + - save_cache: + paths: + - ./venv + key: v1-dependencies-{{ checksum ".circleci/requirements.txt" }} + - run: + name: Run prisma ./docker/entrypoint.sh + command: | + set +e + chmod +x docker/entrypoint.sh + ./docker/entrypoint.sh + set -e + + # Run pytest and generate JUnit XML report + - run: + name: Run tests + command: | + pwd + ls + python -m pytest -vv tests/local_testing --cov=litellm --cov-report=xml -x --junitxml=test-results/junit.xml --durations=5 -k "caching or cache" + no_output_timeout: 120m + - run: + name: Rename the coverage files + command: | + mv coverage.xml caching_coverage.xml + mv .coverage caching_coverage + + # Store test results + - store_test_results: + path: test-results + - persist_to_workspace: + root: . + paths: + - caching_coverage.xml + - caching_coverage auth_ui_unit_tests: docker: - image: cimg/python:3.11 @@ -215,6 +413,105 @@ jobs: paths: - litellm_router_coverage.xml - litellm_router_coverage + litellm_proxy_unit_testing: # Runs all tests with the "proxy", "key", "jwt" filenames + docker: + - image: cimg/python:3.11 + auth: + username: ${DOCKERHUB_USERNAME} + password: ${DOCKERHUB_PASSWORD} + working_directory: ~/project + + steps: + - checkout + + - run: + name: Show git commit hash + command: | + echo "Git commit hash: $CIRCLE_SHA1" + + - restore_cache: + keys: + - v1-dependencies-{{ checksum ".circleci/requirements.txt" }} + - run: + name: Install Dependencies + command: | + python -m pip install --upgrade pip + python -m pip install -r .circleci/requirements.txt + pip install "pytest==7.3.1" + pip install "pytest-retry==1.6.3" + pip install "pytest-asyncio==0.21.1" + pip install "pytest-cov==5.0.0" + pip install mypy + pip install "google-generativeai==0.3.2" + pip install "google-cloud-aiplatform==1.43.0" + pip install pyarrow + pip install "boto3==1.34.34" + pip install "aioboto3==12.3.0" + pip install langchain + pip install lunary==0.2.5 + pip install "azure-identity==1.16.1" + pip install "langfuse==2.45.0" + pip install "logfire==0.29.0" + pip install numpydoc + pip install traceloop-sdk==0.21.1 + pip install opentelemetry-api==1.25.0 + pip install opentelemetry-sdk==1.25.0 + pip install opentelemetry-exporter-otlp==1.25.0 + pip install openai==1.54.0 + pip install prisma==0.11.0 + pip install "detect_secrets==1.5.0" + pip install "httpx==0.24.1" + pip install "respx==0.21.1" + pip install fastapi + pip install "gunicorn==21.2.0" + pip install "anyio==4.2.0" + pip install "aiodynamo==23.10.1" + pip install "asyncio==3.4.3" + pip install "apscheduler==3.10.4" + pip install "PyGithub==1.59.1" + pip install argon2-cffi + pip install "pytest-mock==3.12.0" + pip install python-multipart + pip install google-cloud-aiplatform + pip install prometheus-client==0.20.0 + pip install "pydantic==2.7.1" + pip install "diskcache==5.6.1" + pip install "Pillow==10.3.0" + pip install "jsonschema==4.22.0" + - save_cache: + paths: + - ./venv + key: v1-dependencies-{{ checksum ".circleci/requirements.txt" }} + - run: + name: Run prisma ./docker/entrypoint.sh + command: | + set +e + chmod +x docker/entrypoint.sh + ./docker/entrypoint.sh + set -e + + # Run pytest and generate JUnit XML report + - run: + name: Run tests + command: | + pwd + ls + python -m pytest tests/proxy_unit_tests --cov=litellm --cov-report=xml -vv -x -v --junitxml=test-results/junit.xml --durations=5 + no_output_timeout: 120m + - run: + name: Rename the coverage files + command: | + mv coverage.xml litellm_proxy_unit_tests_coverage.xml + mv .coverage litellm_proxy_unit_tests_coverage + # Store test results + - store_test_results: + path: test-results + + - persist_to_workspace: + root: . + paths: + - litellm_proxy_unit_tests_coverage.xml + - litellm_proxy_unit_tests_coverage litellm_assistants_api_testing: # Runs all tests with the "assistants" keyword docker: - image: cimg/python:3.11 @@ -814,7 +1111,7 @@ jobs: python -m venv venv . venv/bin/activate pip install coverage - coverage combine llm_translation_coverage logging_coverage litellm_router_coverage local_testing_coverage litellm_assistants_api_coverage auth_ui_unit_tests_coverage + coverage combine llm_translation_coverage logging_coverage litellm_router_coverage local_testing_coverage litellm_assistants_api_coverage auth_ui_unit_tests_coverage langfuse_coverage caching_coverage litellm_proxy_unit_tests_coverage coverage xml - codecov/upload: file: ./coverage.xml @@ -1031,6 +1328,24 @@ workflows: only: - main - /litellm_.*/ + - langfuse_logging_unit_tests: + filters: + branches: + only: + - main + - /litellm_.*/ + - caching_unit_tests: + filters: + branches: + only: + - main + - /litellm_.*/ + - litellm_proxy_unit_testing: + filters: + branches: + only: + - main + - /litellm_.*/ - litellm_assistants_api_testing: filters: branches: @@ -1096,6 +1411,9 @@ workflows: - llm_translation_testing - logging_testing - litellm_router_testing + - caching_unit_tests + - litellm_proxy_unit_testing + - langfuse_logging_unit_tests - local_testing - litellm_assistants_api_testing - auth_ui_unit_tests @@ -1132,10 +1450,13 @@ workflows: - llm_translation_testing - logging_testing - litellm_router_testing + - caching_unit_tests + - langfuse_logging_unit_tests - litellm_assistants_api_testing - auth_ui_unit_tests - db_migration_disable_update_check - e2e_ui_testing + - litellm_proxy_unit_testing - installing_litellm_on_python - proxy_logging_guardrails_model_info_tests - proxy_pass_through_endpoint_tests diff --git a/litellm/integrations/braintrust_logging.py b/litellm/integrations/braintrust_logging.py index a0c76a258..6de691093 100644 --- a/litellm/integrations/braintrust_logging.py +++ b/litellm/integrations/braintrust_logging.py @@ -23,7 +23,7 @@ from litellm.llms.custom_httpx.http_handler import ( get_async_httpx_client, httpxSpecialProvider, ) -from litellm.utils import get_formatted_prompt +from litellm.utils import get_formatted_prompt, print_verbose global_braintrust_http_handler = get_async_httpx_client( llm_provider=httpxSpecialProvider.LoggingCallback @@ -229,6 +229,9 @@ class BraintrustLogger(CustomLogger): request_data["metrics"] = metrics try: + print_verbose( + f"global_braintrust_sync_http_handler.post: {global_braintrust_sync_http_handler.post}" + ) global_braintrust_sync_http_handler.post( url=f"{self.api_base}/project_logs/{project_id}/insert", json={"events": [request_data]}, diff --git a/litellm/proxy/common_utils/encrypt_decrypt_utils.py b/litellm/proxy/common_utils/encrypt_decrypt_utils.py index 05a16eda0..4c04942d0 100644 --- a/litellm/proxy/common_utils/encrypt_decrypt_utils.py +++ b/litellm/proxy/common_utils/encrypt_decrypt_utils.py @@ -3,18 +3,25 @@ import os from litellm._logging import verbose_proxy_logger -LITELLM_SALT_KEY = os.getenv("LITELLM_SALT_KEY", None) -verbose_proxy_logger.debug( - "LITELLM_SALT_KEY is None using master_key to encrypt/decrypt secrets stored in DB" -) + +def _get_salt_key(): + from litellm.proxy.proxy_server import master_key + + salt_key = os.getenv("LITELLM_SALT_KEY", None) + + if salt_key is None: + verbose_proxy_logger.debug( + "LITELLM_SALT_KEY is None using master_key to encrypt/decrypt secrets stored in DB" + ) + + salt_key = master_key + + return salt_key def encrypt_value_helper(value: str): - from litellm.proxy.proxy_server import master_key - signing_key = LITELLM_SALT_KEY - if LITELLM_SALT_KEY is None: - signing_key = master_key + signing_key = _get_salt_key() try: if isinstance(value, str): @@ -35,9 +42,7 @@ def encrypt_value_helper(value: str): def decrypt_value_helper(value: str): from litellm.proxy.proxy_server import master_key - signing_key = LITELLM_SALT_KEY - if LITELLM_SALT_KEY is None: - signing_key = master_key + signing_key = _get_salt_key() try: if isinstance(value, str): diff --git a/tests/llm_translation/test_anthropic_completion.py b/tests/llm_translation/test_anthropic_completion.py index 6be41f90d..46f01e0ec 100644 --- a/tests/llm_translation/test_anthropic_completion.py +++ b/tests/llm_translation/test_anthropic_completion.py @@ -548,14 +548,16 @@ def test_anthropic_computer_tool_use(): model = "claude-3-5-sonnet-20241022" messages = [{"role": "user", "content": "Save a picture of a cat to my desktop."}] - resp = completion( - model=model, - messages=messages, - tools=tools, - # headers={"anthropic-beta": "computer-use-2024-10-22"}, - ) - - print(resp) + try: + resp = completion( + model=model, + messages=messages, + tools=tools, + # headers={"anthropic-beta": "computer-use-2024-10-22"}, + ) + print(resp) + except litellm.InternalServerError: + pass @pytest.mark.parametrize( diff --git a/tests/local_testing/test_bedrock_completion.py b/tests/llm_translation/test_bedrock_completion.py similarity index 100% rename from tests/local_testing/test_bedrock_completion.py rename to tests/llm_translation/test_bedrock_completion.py diff --git a/tests/local_testing/test_prompt_factory.py b/tests/llm_translation/test_prompt_factory.py similarity index 100% rename from tests/local_testing/test_prompt_factory.py rename to tests/llm_translation/test_prompt_factory.py diff --git a/tests/local_testing/conftest.py b/tests/local_testing/conftest.py index eca0bc431..1421700c9 100644 --- a/tests/local_testing/conftest.py +++ b/tests/local_testing/conftest.py @@ -26,6 +26,12 @@ def setup_and_teardown(): from litellm import Router importlib.reload(litellm) + try: + if hasattr(litellm, "proxy") and hasattr(litellm.proxy, "proxy_server"): + importlib.reload(litellm.proxy.proxy_server) + except Exception as e: + print(f"Error reloading litellm.proxy.proxy_server: {e}") + import asyncio loop = asyncio.get_event_loop_policy().new_event_loop() diff --git a/tests/local_testing/test_acooldowns_router.py b/tests/local_testing/test_acooldowns_router.py index f186d42f1..df3f493a6 100644 --- a/tests/local_testing/test_acooldowns_router.py +++ b/tests/local_testing/test_acooldowns_router.py @@ -131,7 +131,6 @@ def test_multiple_deployments_parallel(): @pytest.mark.parametrize("sync_mode", [True, False]) @pytest.mark.asyncio async def test_cooldown_same_model_name(sync_mode): - litellm._turn_on_debug() # users could have the same model with different api_base # example # azure/chatgpt, api_base: 1234 diff --git a/tests/local_testing/test_braintrust.py b/tests/local_testing/test_braintrust.py index 7792a0841..adfd47cf3 100644 --- a/tests/local_testing/test_braintrust.py +++ b/tests/local_testing/test_braintrust.py @@ -31,16 +31,15 @@ from litellm.llms.custom_httpx.http_handler import HTTPHandler def test_braintrust_logging(): import litellm + litellm.set_verbose = True + http_client = HTTPHandler() - setattr( - litellm.integrations.braintrust_logging, - "global_braintrust_sync_http_handler", - http_client, - ) - - with patch.object(http_client, "post", new=MagicMock()) as mock_client: - + with patch.object( + litellm.integrations.braintrust_logging.global_braintrust_sync_http_handler, + "post", + new=MagicMock(), + ) as mock_client: # set braintrust as a callback, litellm will send the data to braintrust litellm.callbacks = ["braintrust"] @@ -50,4 +49,5 @@ def test_braintrust_logging(): messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}], ) + time.sleep(2) mock_client.assert_called() diff --git a/tests/local_testing/test_completion.py b/tests/local_testing/test_completion.py index 6ee6a45b2..77cea6f0c 100644 --- a/tests/local_testing/test_completion.py +++ b/tests/local_testing/test_completion.py @@ -329,36 +329,6 @@ async def test_completion_predibase(): # test_completion_predibase() -def test_completion_claude(): - litellm.set_verbose = True - litellm.cache = None - litellm.AnthropicTextConfig(max_tokens_to_sample=200, metadata={"user_id": "1224"}) - messages = [ - { - "role": "system", - "content": """You are an upbeat, enthusiastic personal fitness coach named Sam. Sam is passionate about helping clients get fit and lead healthier lifestyles. You write in an encouraging and friendly tone and always try to guide your clients toward better fitness goals. If the user asks you something unrelated to fitness, either bring the topic back to fitness, or say that you cannot answer.""", - }, - {"content": user_message, "role": "user"}, - ] - try: - # test without max tokens - response = completion( - model="claude-3-5-haiku-20241022", messages=messages, request_timeout=10 - ) - # Add any assertions here to check response args - print(response) - print(response.usage) - print(response.usage.completion_tokens) - print(response["usage"]["completion_tokens"]) - # print("new cost tracking") - except litellm.RateLimitError as e: - pass - except Exception as e: - if "overloaded_error" in str(e): - pass - pytest.fail(f"Error occurred: {e}") - - # test_completion_claude() diff --git a/tests/local_testing/test_pass_through_endpoints.py b/tests/local_testing/test_pass_through_endpoints.py index b3977e936..b069dc0ef 100644 --- a/tests/local_testing/test_pass_through_endpoints.py +++ b/tests/local_testing/test_pass_through_endpoints.py @@ -15,7 +15,7 @@ from unittest.mock import Mock import httpx -from litellm.proxy.proxy_server import app, initialize_pass_through_endpoints +from litellm.proxy.proxy_server import initialize_pass_through_endpoints # Mock the async_client used in the pass_through_request function @@ -25,7 +25,8 @@ async def mock_request(*args, **kwargs): return mock_response -def remove_rerank_route(): +def remove_rerank_route(app): + for route in app.routes: if route.path == "/v1/rerank" and "POST" in route.methods: app.routes.remove(route) @@ -35,7 +36,11 @@ def remove_rerank_route(): @pytest.fixture def client(): - remove_rerank_route() # remove the native rerank route on the litellm proxy - since we're testing the pass through endpoints + from litellm.proxy.proxy_server import app + + remove_rerank_route( + app=app + ) # remove the native rerank route on the litellm proxy - since we're testing the pass through endpoints return TestClient(app) @@ -145,8 +150,9 @@ async def test_pass_through_endpoint_rerank(client): [(True, 0, 429), (True, 1, 200), (False, 0, 200)], ) @pytest.mark.asyncio -async def test_pass_through_endpoint_rpm_limit(auth, expected_error_code, rpm_limit): - client = TestClient(app) +async def test_pass_through_endpoint_rpm_limit( + client, auth, expected_error_code, rpm_limit +): import litellm from litellm.proxy._types import UserAPIKeyAuth from litellm.proxy.proxy_server import ProxyLogging, hash_token, user_api_key_cache @@ -214,9 +220,11 @@ async def test_pass_through_endpoint_rpm_limit(auth, expected_error_code, rpm_li async def test_aaapass_through_endpoint_pass_through_keys_langfuse( auth, expected_error_code, rpm_limit ): + from litellm.proxy.proxy_server import app client = TestClient(app) import litellm + from litellm.proxy._types import UserAPIKeyAuth from litellm.proxy.proxy_server import ProxyLogging, hash_token, user_api_key_cache diff --git a/tests/local_testing/test_router_timeout.py b/tests/local_testing/test_router_timeout.py index 21e74e099..8123fad7e 100644 --- a/tests/local_testing/test_router_timeout.py +++ b/tests/local_testing/test_router_timeout.py @@ -149,7 +149,6 @@ def test_router_timeout_with_retries_anthropic_model(num_retries, expected_call_ """ If request hits custom timeout, ensure it's retried. """ - litellm._turn_on_debug() from litellm.llms.custom_httpx.http_handler import HTTPHandler import time diff --git a/tests/local_testing/test_user_api_key_auth.py b/tests/local_testing/test_user_api_key_auth.py index 36bb71eb9..f6becf070 100644 --- a/tests/local_testing/test_user_api_key_auth.py +++ b/tests/local_testing/test_user_api_key_auth.py @@ -8,7 +8,7 @@ sys.path.insert( 0, os.path.abspath("../..") ) # Adds the parent directory to the system path from typing import Dict, List, Optional -from unittest.mock import MagicMock +from unittest.mock import MagicMock, patch, AsyncMock import pytest from starlette.datastructures import URL @@ -157,7 +157,7 @@ def test_returned_user_api_key_auth(user_role, expected_role): @pytest.mark.parametrize("key_ownership", ["user_key", "team_key"]) @pytest.mark.asyncio -async def test_user_personal_budgets(key_ownership): +async def test_aaauser_personal_budgets(key_ownership): """ Set a personal budget on a user @@ -169,6 +169,7 @@ async def test_user_personal_budgets(key_ownership): from fastapi import Request from starlette.datastructures import URL + import litellm from litellm.proxy._types import LiteLLM_UserTable, UserAPIKeyAuth from litellm.proxy.auth.user_api_key_auth import user_api_key_auth @@ -193,7 +194,7 @@ async def test_user_personal_budgets(key_ownership): team_max_budget=100, spend=20, ) - await asyncio.sleep(1) + user_obj = LiteLLM_UserTable( user_id=_user_id, spend=11, max_budget=10, user_email="" ) @@ -207,6 +208,10 @@ async def test_user_personal_budgets(key_ownership): request = Request(scope={"type": "http"}) request._url = URL(url="/chat/completions") + test_user_cache = getattr(litellm.proxy.proxy_server, "user_api_key_cache") + + assert test_user_cache.get_cache(key=hash_token(user_key)) == valid_token + try: await user_api_key_auth(request=request, api_key="Bearer " + user_key) diff --git a/tests/proxy_unit_tests/adroit-crow-413218-bc47f303efc9.json b/tests/proxy_unit_tests/adroit-crow-413218-bc47f303efc9.json new file mode 100644 index 000000000..e2fd8512b --- /dev/null +++ b/tests/proxy_unit_tests/adroit-crow-413218-bc47f303efc9.json @@ -0,0 +1,13 @@ +{ + "type": "service_account", + "project_id": "adroit-crow-413218", + "private_key_id": "", + "private_key": "", + "client_email": "test-adroit-crow@adroit-crow-413218.iam.gserviceaccount.com", + "client_id": "104886546564708740969", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test-adroit-crow%40adroit-crow-413218.iam.gserviceaccount.com", + "universe_domain": "googleapis.com" +} diff --git a/tests/proxy_unit_tests/azure_fine_tune.jsonl b/tests/proxy_unit_tests/azure_fine_tune.jsonl new file mode 100644 index 000000000..ef41bd977 --- /dev/null +++ b/tests/proxy_unit_tests/azure_fine_tune.jsonl @@ -0,0 +1,12 @@ +{"messages": [{"role": "system", "content": "Clippy is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "Paris, as if everyone doesn't know that already."}]} +{"messages": [{"role": "system", "content": "Clippy is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "Oh, just some guy named William Shakespeare. Ever heard of him?"}]} +{"messages": [{"role": "system", "content": "Clippy is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]} +{"messages": [{"role": "system", "content": "Clippy is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "Paris, as if everyone doesn't know that already."}]} +{"messages": [{"role": "system", "content": "Clippy is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "Oh, just some guy named William Shakespeare. Ever heard of him?"}]} +{"messages": [{"role": "system", "content": "Clippy is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]} +{"messages": [{"role": "system", "content": "Clippy is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "Paris, as if everyone doesn't know that already."}]} +{"messages": [{"role": "system", "content": "Clippy is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "Oh, just some guy named William Shakespeare. Ever heard of him?"}]} +{"messages": [{"role": "system", "content": "Clippy is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]} +{"messages": [{"role": "system", "content": "Clippy is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "Paris, as if everyone doesn't know that already."}]} +{"messages": [{"role": "system", "content": "Clippy is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "Oh, just some guy named William Shakespeare. Ever heard of him?"}]} +{"messages": [{"role": "system", "content": "Clippy is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]} \ No newline at end of file diff --git a/tests/proxy_unit_tests/batch_job_results_furniture.jsonl b/tests/proxy_unit_tests/batch_job_results_furniture.jsonl new file mode 100644 index 000000000..05448952a --- /dev/null +++ b/tests/proxy_unit_tests/batch_job_results_furniture.jsonl @@ -0,0 +1,2 @@ +{"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-3.5-turbo-0125", "messages": [{"role": "system", "content": "You are a helpful assistant."},{"role": "user", "content": "Hello world!"}],"max_tokens": 10}} +{"custom_id": "request-2", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-3.5-turbo-0125", "messages": [{"role": "system", "content": "You are an unhelpful assistant."},{"role": "user", "content": "Hello world!"}],"max_tokens": 10}} \ No newline at end of file diff --git a/tests/proxy_unit_tests/conftest copy.py b/tests/proxy_unit_tests/conftest copy.py new file mode 100644 index 000000000..1421700c9 --- /dev/null +++ b/tests/proxy_unit_tests/conftest copy.py @@ -0,0 +1,60 @@ +# conftest.py + +import importlib +import os +import sys + +import pytest + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import litellm + + +@pytest.fixture(scope="function", autouse=True) +def setup_and_teardown(): + """ + This fixture reloads litellm before every function. To speed up testing by removing callbacks being chained. + """ + curr_dir = os.getcwd() # Get the current working directory + sys.path.insert( + 0, os.path.abspath("../..") + ) # Adds the project directory to the system path + + import litellm + from litellm import Router + + importlib.reload(litellm) + try: + if hasattr(litellm, "proxy") and hasattr(litellm.proxy, "proxy_server"): + importlib.reload(litellm.proxy.proxy_server) + except Exception as e: + print(f"Error reloading litellm.proxy.proxy_server: {e}") + + import asyncio + + loop = asyncio.get_event_loop_policy().new_event_loop() + asyncio.set_event_loop(loop) + print(litellm) + # from litellm import Router, completion, aembedding, acompletion, embedding + yield + + # Teardown code (executes after the yield point) + loop.close() # Close the loop created earlier + asyncio.set_event_loop(None) # Remove the reference to the loop + + +def pytest_collection_modifyitems(config, items): + # Separate tests in 'test_amazing_proxy_custom_logger.py' and other tests + custom_logger_tests = [ + item for item in items if "custom_logger" in item.parent.name + ] + other_tests = [item for item in items if "custom_logger" not in item.parent.name] + + # Sort tests based on their names + custom_logger_tests.sort(key=lambda x: x.name) + other_tests.sort(key=lambda x: x.name) + + # Reorder the items list + items[:] = custom_logger_tests + other_tests diff --git a/tests/proxy_unit_tests/conftest.py b/tests/proxy_unit_tests/conftest.py new file mode 100644 index 000000000..1421700c9 --- /dev/null +++ b/tests/proxy_unit_tests/conftest.py @@ -0,0 +1,60 @@ +# conftest.py + +import importlib +import os +import sys + +import pytest + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import litellm + + +@pytest.fixture(scope="function", autouse=True) +def setup_and_teardown(): + """ + This fixture reloads litellm before every function. To speed up testing by removing callbacks being chained. + """ + curr_dir = os.getcwd() # Get the current working directory + sys.path.insert( + 0, os.path.abspath("../..") + ) # Adds the project directory to the system path + + import litellm + from litellm import Router + + importlib.reload(litellm) + try: + if hasattr(litellm, "proxy") and hasattr(litellm.proxy, "proxy_server"): + importlib.reload(litellm.proxy.proxy_server) + except Exception as e: + print(f"Error reloading litellm.proxy.proxy_server: {e}") + + import asyncio + + loop = asyncio.get_event_loop_policy().new_event_loop() + asyncio.set_event_loop(loop) + print(litellm) + # from litellm import Router, completion, aembedding, acompletion, embedding + yield + + # Teardown code (executes after the yield point) + loop.close() # Close the loop created earlier + asyncio.set_event_loop(None) # Remove the reference to the loop + + +def pytest_collection_modifyitems(config, items): + # Separate tests in 'test_amazing_proxy_custom_logger.py' and other tests + custom_logger_tests = [ + item for item in items if "custom_logger" in item.parent.name + ] + other_tests = [item for item in items if "custom_logger" not in item.parent.name] + + # Sort tests based on their names + custom_logger_tests.sort(key=lambda x: x.name) + other_tests.sort(key=lambda x: x.name) + + # Reorder the items list + items[:] = custom_logger_tests + other_tests diff --git a/tests/proxy_unit_tests/data_map.txt b/tests/proxy_unit_tests/data_map.txt new file mode 100644 index 0000000000000000000000000000000000000000..e8077595f476b6e0ae27fcd3e401b5806ede77af GIT binary patch literal 3939 zcmb_fO>Z1U5VZt~g8>BzDH0-xW-m5kJ%HnY>%3t|zk2lW$pQWDUUVP-*nQt^&EGl>7mklJ?s>TK;jA=FI{cNKYj*JX0W&#M(hH@k z+l+Nv<@)@iqV^$jQgO`-VH1~f#cc?aOt}-?{K~#|Ghvr!UE%S<{HsUC+IGyQjc11} z<$BYz+L}|D3Fb?|q|S}4xR*w=)-;<|FlEe%)CI$hq?^APBOUv;7F~IDe)TT0e@s7K zbobVN>K=B@v|RtS^W7Y9xFV4X!%@Z%nMsFW0}xi`Zi$)rcoIoV6_t9WC0S#3ziz~#hIrU zQcx%JmL9e2yGA&Ie!DBLmG1(`^#J69R{}v*Zmhug{UsQONS|14bSA9xTq9W!9TFZO zCL?y}StX0oM`U2vP<9*_ovTrQQ1-u}x)D&l|1zp)2kdKMt=vDn4NRjIOn+R!loG!L zE0+Mob$d=Z5YGUOEu%)1PKe4aN;uT5E-h8b_llun&!9=wt`r&g(G~x>rlWxV`X%Uh zb_n>^3h=*BmMA);Ou4PX+SwVAx`Qwwsj4y>`JQMq(+fRDTnT$z3g|98%}~!&<}y7; z89(Jx@u`BYrv}X_{TQ@n_@CaYjdPN2MuI$!BYlT1SW!rnj1@T!(e2>DKvs(k@Bn8uw_c_V8ewRRWYGUs;Of0gx%zD9Cef9w(DnDf)JcSTo>PP!vBpux z)F*{J6_L&aJ+gTT6)SCXfeKh#)};Q^%0Np@H`P?YJ}!zZ6zzZOuOzUxbqQ-VZxkym zFL^Z%ylY{J#N8DVe>rE-(L73i3Sp$FaG`Z-IEm3*omWuQjm{+8yy42haumG)36|G&bUHe^ z52_i(P;Wm=ibx04&VeXvZinb#@>7Lgc-Tjn5PI z4RJi<1c(r(EoIZl%x}noz>uZO*9$sMr-h!2|&hIIX2Y#-;`YOhq9zDuDOV zY2pmD`oM3Ttl_G$1*Z{1JW0!xD3CP;P#Fg8�&%=xN!>F(Ddx^Ur+XOzv4X_y| zVXe5Q8BT`{IFyR>EoPr0VuYNTw4tCt0;4O1s$Hyt=Pi69B8Sbij4Nek)RR;sM0Or? zbj)ySdJ>JQSEsCMV=?7mTPv%C704H)8kcJo3hp8f>U4%gq1uw7& zK%=~(22$83o&h$trPx0xdCwc(fiV~8BJNHQA@D)FMuAE$A`S=y6G%Jturz3t(&+nd zs2NDuP1xN&I@@o&iM!YmxbtY43hMw83>b!7Sc)5AZ^QAyC{VVjk>I7GqEq_9^F?IP zf(4ZZ6vj+cfFlWe345Rdz%CkuW7rG~7)QpL<+-PrQBFewq}*G`9{V7BBrs~Qh%NK3 ztu(j-iP9t`1Bg$AGvGKmrag8yh}c7Vd5;aTf;5G8)Elw30rsl8Xh2$nh6@rat!au9 zn_z0&nyw(aCf4CI<9IF|Z^`3g%@NsK8Qvxv>5Ud}z92<9zm0B;Wn1Fu1(-<^A~Ht? zXjqT}mf>gAA`J}Z6x*d~uq419V$V&R3_1kkPr{7j3rnG8o|+LOJNGj(NfKP6Pzlrx zf(dm2_+@Rdafp`M)6{rMKJOb85{KY(bnLQ%bc<(|)Dl>s6Dl!j7AU_=*nS)>xiuBr z*X@k9l_gg2n<~>}+w^sjjbYX@rSU;PDIdzSZ~su6b0*0`P&i@;8k-N#2Zz>zHkqM1 zy$Ew9&g2xv2q6U9iHCsE(8g9&C^hIHaW4B`#K^F9@&)u6T&Qo}jg1Fk*r@ShW;ocC z!VU&WEj_WfORB*b@+mo1sFy5e%KWGXe=fO`7F8~rB(2J ztj9Ei;>ma~hgaYtM)qRd?uBu?{pxW`J0}j=*7V_^S`6Gf%Yi%pwqH75HEt{T{EW;R z>|Qo=`{_QcgLf#aX8z9b<%$-z4lUi?dt-iWxLEPCaP9t|ui?qzyZ89mSbPBC12Q;l uY)AZhKvzX}7wt!%s}BKUeNtYZls6{jXi{!X%4AaRPRhMWd1ulz)8rpZSeX0( literal 0 HcmV?d00001 diff --git a/tests/proxy_unit_tests/eagle.wav b/tests/proxy_unit_tests/eagle.wav new file mode 100644 index 0000000000000000000000000000000000000000..1c23657859e4b55c7b38994c9401bd9b67b3687f GIT binary patch literal 55852 zcmXWD2Yg)Dc_q#?C)uo%O>>cLMK&c_vbtzWBqjC+fatwrW-z_?Uf!GDdjn<$7}P-r z2Au?0z)ngODan>)%T0D{r@Lem|9^@1uRV9PV~e6l%zL+d_bca|?>1MTJNM-G_U-F9 z*?yKQQ9tp&_U+sEd-&g9KDV#(&+qN~lYMNanz;`@Ba_RO8ofcUGnlNj-QjdOX|qwM zl8R+YxkRoum<)QO*+$u{w9D-cMWfM3$nWt4qtQ?x81&k0HoME`_t0jOnR0nu7M;#$ zHXHOBtfHx zg+wftsm*q~!{ftGIy`>A$K|wJOlH~>h-LHn;S3%XPsYP!02U=}x0(%FwL&TuN#shU zN+Fdg)OwTEX0cc)hc^^YWrwrLSS%I|ha=H=0^f6BIqfuMHJkKmxk$k0a0F6?Oo|V_ zs?lH#ZFUE3qa04B9lyH09*>K*IsA!iCJ~J$Qps2-gmrM3)e|as{TZzzXOL zS_S^CQ0WW?old7W*&HsH+v5pDlc`iXlZr<}J`W~F8B{8Rm7**LjoN6VD4PwNV77a( zL0*3_81Vc2cmc*}f(d9;8okMEG8!#58wPE)d;EC04&SrrvC|g2%T4Jd0*TsWvl{VI zEUFSykt<{{A+=g(q%a#BwlErUSq*xF(PX7)%A`>!4R&7uR->nUA)n1?b^3wJWrs3}SR7MJM6o13Cq-k!9Zu}5 zUZYfLwJMoHYo?t}O0TuL1AZ4}wOGlsRBE|Yp@sQj^W|7AwGNMU;G1r*&+qpKLg6rW z-C-v`Z?n5$2l$p;Di%wm5}`n>(&N8WI!wcXwRPFa`q56W+fLivfe5x5$0PvT$0}km zeI9QhO#VCQbvj_N7JRVe_}EMsfQFnE93ZRR?e{q>1|yAyiufF~I}r4_$q-;-dX-A2 zSHn&j46!W|qq*IZ^u~Hx+SMo7sd-fw^XeVXa<+n9Jky zc|w^AZ^M3=C~ODjYtgD;u=sDY*>C>Wy8=kwZ4YL(7tG8thtCcRQYCZ|`+L?XG? zV5Z;zXfn|dmX>yUe8EuA@4>)eM{cVbqxN{5FbJ2+;}3@8>1;NW$s{A#61Wbx+ksPP z(ZO}dEZJ(ZK4?9B{wJA!MGUP)<5-R>x- z2l$iA`cB7Mp2Zzqv9xI?q8+WllC@PfF+9i^T6Z){=cYxy@pW%~UuJ~gWM2$4cBe~{ zbJD3~PhVzU*P@?Ibv6g)O|_!=61P3F=4s-O=X>kj3sluWKHpzM%~(#irbgMdwxv)D zBVUqs1h18ZqVW}Dm+R7ufHkyAcbT`!T0w5z+oM~Y7;N$`c^U@`EG ztE5xu&U*K>?i_PugxwZcq#F2BC25y`+26{|=lag5^0w;k?2M}2RtmOt1`5&(!i>A2 z)?SdE85|j6bS2iDO~R=$c1LVG)T3Q5EBX?bQ@!HSvZ}*bPW19Iyf(U=Wr$}l+55da zQv$)*n!U%moa<`~%^GXPQ+ZZrbji^wo0;Uc23LF?(y2m!Ej?lQbU>l?Xt!i$QPFH) zPIR;KQ?eTQsHd(eFfPBKm`e4w1*bF(s@XANf2d?_5{~BC?Vb|V#LH)TFW9DSHGS!9 z-v!%(zfCYXuj=t_WSN}dWt!nwEoo)BWp|IHFwAMTjalj#*#f6plQ*63P8S$wb<@uC z?b&%{qotheWKEQ{P1MCHfpB8o+G<=H?r96n7^>OB8Ah9b$=%Eyo0fF=wuV^JnH6n^ zYb}jQ&q-^vbIJC$a8Z1kH=62caL-s8#Iy5?f#juFkFva~Y!B_GI|qxi;#%9Br=~5D z?>nKH3)gpM7d1`Bm0W+{@CMbaTPtyU5=;8igTH9tDFygJQ z3yurVDdvZ|yHe|}4*kZwh>_p%w#a9TjF#|4)p8r9{o{?^2*twuOI&FjkUrTPt{TuWHav2ddB5 zGVCKgN$-WTwkd0)XkwJp8CdjmD;8Ew?D)>mprW*_X?L#Y82s@?X^nm*+uoC3kerbe zJvEKdDd`2%))a>~v*+vgUSC&BOS`US<-#PlEx77!5lx(Z29t*sV8s^Umc$noYBum>RS>;_7Q%LK6%bNEiONj_*)yw%tV`YwB)_X}`QI?24~@n&pd=gLU=^!|9$(y8ECIE zT;)i8TWpklPCaftUK`H$9aojS)ot05;EcAMY3nJh8!jl9#<0@Yy>0sS600MNDDjnRS8QFrE7NTDSXq5mJTu(W5u4y0>`Dh}PB|u}ueT+G)h8V#_0g_ezN5;w z=BXW=m}J)bHoUdmxfOAfcf(uNTbyB^*3Wq^FvjNu=QK0X*8akZwm~zS?(Q94Qq?FI z3tb)QbxVVOYfiwKydLZ{Y%YmN;cT-V>^+Kz&zk zR$gaWinVe|tEzf>d$gZ5xn*s!l!rQ-!pn54er1vi=jmz`O%{63+onus8Dm9OYh>Nk zq}W(jbVW9NwS!ZO;-K6$Jgl#>;ha}=5nOZdTCYMmDmZl%a)eKt^TsRx_@F`*c{r4)%A?b3D4-3 zVs$O)1?4&A$^@e;cP-XqSfAtejNC17>7BBSQMejymoG00>l_QtlXbCC=9`k)Ok4ZV zioQy=SZJ>bY`8B7mzVfW;Y*Pw(Zbwd4Ylm9YRk+>8!XF-E_Si3?Qmb7<_pGG4NaPv zRCjM`$=YF@O|&-or*+l*tozKHni%6iYsh{6kg}jX(UmJQs^}H$)Xa*c$+zmNW=)lN zHT06Fs;5{sboj1J4zefKbWQYDkm8jFol%tnRTm-?{5NW$Yl{^n;|6aMkM2S?c+|btC~Gm ziv!ZtJ!_kDC)eIPwr#qgnu|9yCa_en%+AK}y1z?ZF3Vf!lB>RNcv{xtC?`Ama|_xQ zdNtkMpIZ)10%WiW9q!>wd(LF=^~@X2aK;j~C#*B3Gkv2|!dB0EvYR_LBdXTTgqr(C z%KAFZOrm38tgNj!u1|3X3uR-od230|&TaX6%!WT>w(xEX4f%+K?ieXE|@o}psj z3GIxpzI$j^TBDlHbaZA{wC8wZ$;K+@r0NuVa;UR8Qnpm_=9ffW$*Zw;`RazEcj#V` zsaoIGG*Tyl=Ge4xfRVh(L|!@xI8I4))ox39P0C_Pc{UKidOSt zrk^>qplQ)A=6V@pS3KR+#fgD|u^Zt|?b;l1fBt&T?23vtd?n4ZE{<^f{8L_udpfI- z`lD)=#!m}`lv~b{+bsf~j<#6+5xY!c1fUQ}1Z@TsG&T*hgDYqs21Wc)(2?l`4yaGNB~1nCVa|>a>`RpO*i6 z)W3SGRn70~9TbR!BDqF~hgzL(m(%S>k%ThD>yyRWB`K%qjA);ZnIJ%)0j}O10z7mYey+dTP-#_ZPsDfC>;zox7Uf%MT3vtY&HWp zkO36Hq!fB9>PN4e;0YFd)8!A?bqcwb6saDk6%dnIuQ8a-MvYo;)F~w*AzzHLMQgH{ ztqxRICvEEQ5==1tRaiC3!N_peP-vHG> zJstK~0aXy(!Day-U^Q3)uAq<#xU47u)mqf6I+aYWl(PF-B9$JsCU6Q9UCcv8VLs+1`S^0a!}YZuiIfo z-RCfC^(etm1!GlX7_N&lVc%{3SUiC70B6GL13EHc129sv&5bJ17fol=@kj)q8{iX- z#^MR1BK2Ff8UoJ&Y(a@B7#tXsks8zF48`IhC*=a#0NjFdpjLMPaxkdn5|LPGuvm>+ zl|rerIviHL0?3hED%DYdh`eqy&=QlwiFGw81wsYpWHg&}3b9n@jHP0*zF4L(S{O=3 zLeb>#=twpZk9nzho)#iLj-Sd17dJFfg`wl z_zRa&#$$`LSPYAuLbo1?WOfs0W~|FBnUug2XL&T@>Z= z`&~d6oetZlh>RJyE>5(GKyKI`J>VY$&^WV7z~;&IHV42qPZ;0^KpdNmvb%vJSY5z7 zD}VyvMS)Bv5t9J``NBtOayl(KU^_Oaht|rm&KQixL2wMKNw3x5gxSc-12iKmRjTwB+Tp+#FdM*KK=$kaS8(1UA+HPP0iMEb zGs1LiKq{3=ypPxnCS@nslgCLAJA#hl#Nd&7I60gOD=-eNN<~m;qY+2R36#ngj=0TQ z0(k8Gcoq*<)nd>ZX`FKc`Vp_CQORT)i_=MC!Ewg07-qZ?;2T+3 zST!*lfPp{*DR^c;fWcrmin9~(*vvMo9#ExL0Vi2;mqsf#4o=lT`(ueXR?3O}b6AK0 z0N=z!;Z;?z8!!n70WtxXGsxtKB@&4O5ItrqQNj@MLL)pP4Qq7;fZX{)iOf(s4y@b< zFfSMYW|zUU5#Qin^csZ*7@*N+(JJ*YKpy}!2e7{gaPc^fc(MYFN8y&Du_$7M5>v%V zaoF@SK94U1ybq9GED{JMD#RMSN+ywNNu0%kS5_N8i_`CP;4BAxI0y!f4%jhefHMFm zQh~KKa9}ExLZP)_3Gu-UD60dDp3bC`fEUxr>_~Am4^TQ94g?d~bOg90*oMjhF(L+o zZm1xr73f!^C5D0cPQ$3N9fW`()57@!y6SKQ5~+w6@fse~gVk|bwODyXN35cdGsqQ) zL?SUh5;^>g(-#hVEG8iIcCX(H(33U-z$8H&|3LwURqBZmBEFj7TH(rQ2W>*svk=f5 zrsTl13H9RfAV+X|!g2V^kQYF^9~KY>0ps)rBXIceNaRU4UTTd(z~SmAf-x~pvjd|7KSfYfpw^Vr3kM8ej-euhf{kk6OW;jRMzt7t zFu-7~MuowcKz#)L4m}JJE(y=Y=2|ooiBd~`8u1eFtA)ad5DmdoP=QY+0OJ#EKOB8$zXb~)$HyRK7y#T5aNkM^xgN_jPz-}?@3()Uy*ky*}z;_Wv@HYVL36lq!$EK0V;Z!sVDNZezF61h536NmS2VbyI zRs&cZ8k~&L<|1nWVh#3!Af(gc&q>5Ul8ThTY9hHExCshw4o5T-2Pz&*B$KfKAbwaA zSy)0k;6Nk#R0dmtqIG&RTsLR|kIQDVxsbDJGzNPh;Ifg(=OtLZ2V`2zPpCxjO$5&; zaS$Uw7O92>$iy-O{ss@VoAn6uFrR27686~VX75MlbvM3RuIpyKrmy-P7x{eV2WUvCQ!S2aHo1w9yt9G&~0JA!-|MtHxdpB zIXTutP0lE&B7+0QL`dXtG#0RfEw&;}_tGX=D2l5)WL(@4S$NblifNe$sfPH%Mw!^qr| z@ziiGalXPH@vG1GRJ@7ImADaA?()lFooq3OSi0h#CyEBiJe=&o&E1svZ1;Bnbzd zRK60CT&+@Sk^P&HKigCixzQbtctL!J@^g!&e2mBqP~}B3`TP)aFTYLAXY@1qa?~<# zdf@imCZ)y(BPUFw!xK(sl0gSynZd!~=%eC891TVzfk0}kG@A5b?F?#}Sgz4)6yAEWa6g(VeBjoVac2aa3w0by5>?c@0izArK#C-72@Sqlhg3n?Oaztu48${4B z7&(_WQ7C5MN{}&X5V^DpmC1pyO41C(S^w~CdAT%_D~uLLa;Y$4kX0>%K^s+aog?5i zidX}KpdcYl&|4h-2ss=kvpb%NAw(DzDl5`nWbI@P;cvajx%5(r1{`rEn&Ai$j}Af* z-kdVX1Y(Uzi+M0u0=bwyz!GWT$toKLjDgs2D(*69%zw&Ezm4P6CQx z4f16wrI5vu>1+rwfB`I8wZTT2Ehy*`VGl)A4=^`7Od5PLMPe&FyOhUdu*4>p%P8pS zVu@rDg~er&@?~ZukF-gx#NSx7s9-6b&K^slN{4~@^aAR3eGq39P{9Fe5?7_K>84XiX|5gvW*=6)#1mFP4Y}TyU`#XTWEc zPR9pLQ?|E zf%i4&px4kKk^yakTe2aV+Eg;FGm6yCVb#GPkgOrKHY4dluCEXw(U)<0`-K_+7^p@} z2CFBM9?Bv^k9tgquW$UpzQ3u8&Ocb6n;4szo+%LNNHCht<#NM`0OpQz12s)D?lqG| zB_%0xAJ{p5k1~jq%cwcrX6Q70aP7cT;OJC0^>*I*3l97LP2eJ1_7@i;UiAkh22I$?{qt1 z9{7m49IjZ6?1cz1uo*-f1bvSY7?c{AAL6A=!DI6jR2XY*)hRJ8vqB)kG5QqUBJMkp zj0f$MHx`F3g(oq&k$8I$!y^zj!3)3=!pWg*92=7K>g8Iy-|s-6G%Lg^i`}Xc2vla1 zjKfn}z2UGAc_yM41y^i!+6_{n!WK!uts@K3*=d8(lS+qBNe3|~p+KS{xLMGSR1~E# z9H-x=U)t49< z8&1O7fWO+zuI%JU&~A4*jZ(e{B>{34D}m|^M!N?ARzsl>pw)uGL5@VF5Furka0I4c z*bX2;fpkH{W{EH)z&e1?>;S^_8X#aqazi24w%}yK`Pu^!T0AIF(~)eHB5nr0*+Utv z;o)?^DfJeO({q3I=TEkcfBy%U{MBFh!b_czTmSG6x4dnqPt{1q zuid;nE`If|zI2|Ro=n(S9n6jfdHF|oGtt%ix9;7&x;7HFwts%#zCS+hm>G*WH2lG? zcFEZHe*2&Q_~Gq2AHS1lcZn{%vHyR5_VZsq$#TtKzr1$k2mkfSC!hSsx7OV4=Q>1! zs;_-!-ygl$AkbRjm}k1D!UXruF479c*EER&QQk8K&7aCyt+KVre|F%{E^4>rE$FF|+{?4$nrG;-$bsc&BI8&e5xwA7Cb%^T^z52?* zvt4p8>X?!8t?&Kvcfb3$_sc%P0IQ1(=nua9^+P=hg;=DG%-sCN@BaIr-rJstC>swS zuI=xwztC?=t-t@l?hqyII=27uC!RcTu2+L(H8OSS;m?2dFF$#-7E-Cq+SafA{=UCF ze7uT}EXxut?L7LY|N8CE9<1ekT6TL=Q{9DjWny;c(YN*rMv0J7_3|@M?|=Syn@DT4 z5AS~ar~mq!fBW(6Nm}Cwnj0Sby+3-Tp|*`Hl9{8kyZ1l%{`cPAE_n39uG*UVnsW`J zU~%d4)#bQ~*-`uYQ(t}hg(DaGl+JK+Z1u|hkN){rKfN_VnFAr&;m_{-(_;-)t(`4Q zWBTIVx8MEXt)2N|$|>z^XlU>1lSfx>z5n6;d7r$$?weow+?Stt;Y6Fv;c<9JwjX@_ z&;R)2N9&pBP|DJbNqzq0g>JE0srMC@cW&Ojv%66)C6vu4j@5PcD>IkwzkUDeVqDAZ zIJ*A}kA3CYBQ*?TFgd!sd*k8X|L1@H?A|0oo|Adtf9(5{H#->;r9zju{DYr-_ray{ zcsO8To_ygz6Hjgkl`ih?URqA;m`z8%_E&%Vm2bS+$g#wSbA^fd&D-Dp=)Ef?)IU-A z*~kCjPY=|$!jqaX%$qkZ&JF>L3+V=`Pc#e+imgLydv|W{PPpU)XTJW}^gS+=^BRC=1xp+ zU)xz74{BJ=N1ytezxdq#SB^JHLdB`6nc0P%_y7LmhuhPIxUpsbAME?gH;#2^A+XCW zZfQm1jJkcm{CsVmXzBs-9){lSj z!@DcRh`Rc7`}Y0*(+!G{S7!)Mt}M<@71Q9%bllEPo+X$m&McNUHW!DT(*A~H&p-K% zH)>is`?-chX?bCK;m*H)^2zUha&aUbF*SW@-@g6g!b(BU5P3!?3i*+I+~*5}`%>zh z$?28(oENHCr{vkB)ylGE3UND7CKDXOV%d1lUFpKaETb zkuJ2dHp&%D4QEryR3;q<{FDV6?+K=I`D_e01|io_@DhpuB$on+A<_W^I+C0L?IZF^ z)EhPc3D8tR@8}JnPCzB@MAZ)H-iBHo1tfq<+E7t(Bijc$Op<<`9Mxe3Sp!f7f+FBW zN(*uU>d*?i0LBufBQR|O!xG*Wc_<(X;FoO< z;8Y{YRNQAXkcBhZPqx}fM!yU6NR-mng%WoQW0beNl=j%>!IEyH3jNl z8pRSSErPNlTO(L2l1OC15;-s#D7?W@LK?3n5GueFLKzbZ2gn$rIw1CB$OVoR55dHxu7@|4(6^e5pe_4`Jf_g)qXa{t!#zLUL1@;BV77qswfCpnZsIh=1 z`Xk`g5SHymBRp+=O`|V;#=tU^9tiX1GusAIZc}nIj7& zg$K-4F5rP$B%0qoCSM8W6e%odYb&Z~hGnVunCA8EIxJ<=rz(A)Qy#Oz#b#>gyI*Z z0>~{a9iblrz}n-malq(NCp0%m5qZ_!Zm%w%DWtZbue{0C9xl1Xu!0ClZUe2{Va*8~|tmRpKQ?l@F;i4m3Fh z#O}aH$iVFIcoH3$70dx(J=q}?aBfPgGQz3@5dV{ZffgW07CB6P?q>iuhr(={-X-`j2XM_GILCl~!MsxjIgiO!dJ;jSLSqkwJSGuW;fne!74$g(J`&`BRm1LLnzNHb`qtCU z)auO@N5gXmxv8lHzpBwt7bh4?XtsX%Xm)I7>FU)H(XlW5+5XoL9zNUWEPwPb-@iOt%%xnSrc{9-dc>0Om7jdP;pu4i%VKK{k`-`X4xYkSV0YiK(6>gWIPFAkfo ze(y?Ve10Ha$PVX|fz){3&g*Jw>SPNCdzhx;=H^mq;p#j0wkBgXap&3B zUis#sLoa^i%dbn`(D}F-@!8aX>M~XOCaE}nbO$yyWf59t+(#o+#GfAdwaVY zPQ7`u0UP3tL_*2=w}1TMhKJRqh)opSS}M0V7nSyQ^mB#GmYV9?hUWG@j?z1ZjPTl} zwYfr2$82e+t2ux6?Add*9fL|oYJ7fmdwnM3RPy*r=x?MV9+Sy|SLP0~xO^^)$>b?) z;oRiH^3vks%t*{*)k?Yjy*i* z?I3ES#Q`706v}`m6T)l2<)SqV)d|pV&^#bp1JSsjFc@$@TGA{*0y-!ZAYh0!sFl#y z0d_m!3xp$ROoJZ^qKZba2OC@wETL}$)ecYv4+K!aL`l02R0Sf10jMxw5P%C{kO=XK zA~hNWQ;<#)EDtFMc{Wfki!+iLiu*}wf+`yAF2GdazDTbPAt*thxm`5ni6p?v20=fi z6JZ}DMR>GKEEI~wBDsO|y0}7#m=7)%h#ur+fL=kFnhh#3)|590X$nwxryKMrNDHDq za$1yJ<^W3o_(AWA1Raz+g0Gu^=n+*12~q}73;?E;@XH#+C%DZ-p|rBKwK_XdL_!DR z9`r0h*D1sW%9qF_LtY<{0&wwYvw`A+w(6ujE|FB-xS+nO$82Jekho48yI=%#Z!DKdCHtz4$H z0VfwQ`+9r(SV9#bVr0OP)DS+y!|AY7C*<$|`XbS=Be!weNnAy5C|FZMq#|hq&Sp{o z14T!b+yI_2HIz+di!*c6W5Wq7u}zQ8H-NjKu`qFi+6e6uls2gZJ~7gz=tWndjL+c- zfJ}k;^oP+(Nhr@KWC}z#0-CqdF$wyLbRTLhNNqrVpaYNaw=OVK=&wYln3$kz=)J2MQI128$pEPe{{N${mV<$Rp5uCJla+G<#7R94?_qs;-C#W2iiG#p%gH z0<%TNCI^5kM)t=e#g6dRHzV7Hco6ch0Dpwm=wQz8L0 zLtwYS)g#5U+T4*;)a%Ai5M3L6IdEj6%X?b77lc&!L9` ze(NPolgJ(^+o#e6q6ITmWUWZ}K)CoKF)R@t4jlxKF~DHL{2@cjzpuP7N&CZT^7fF&ePQ1RgZ z@U5u-P^6IFU(-x^QJE#GJkew2{Ru;t^N_2zjstNYUZr zQO7_GY6lm9`T|ClPU4f!BmxiyTMaS}TcCgz4nZlHB+ON=SWE{HoWQc-0|u3JDMLXe zLE(ut0f|p41fu#P#)uyPTTdb%A}A5|5pe}FT#wxZbq0}5*iipM`GUSxB>QLt1aFV- z#i0=-fP{t&(MB`Tt)LJk>MtkMZ?SZKG@r(KhhEP_>NO&CMZu?3$wlZT)sY$-;=}ZC zK3~XX;^=wxm=z-Ey3}$3n@c1npa(%F0Pc4=(L3%nBJRqBT)s#o6v~K_35UP{cMGj- zG+P`ULbc$v6PK@tQc@z55J?F-CcwgTc;L`hQOc!P<;)bw}( z_y7!iWS9gOCqkoAS5eCmR5Z`mwax(J>rXFr)C* zSWHsWi+O_(e#xK(#mUBrM@xPpiy4y_L0b*wE_Z;*1AC1k&T1nfV6>&e@Sq}rd{Sd| z;#5QODN7C8jb`L#reglv~94V(_Y@mu@1h{>RsK* zaZlDMf)SUB$+9iK^VYJXr?x{G2%CjMV{l|{!lz;nFj+mdM~|Expz|}O;%KS7v5*T| zgk3HDQfbe*^TNW#1xxk8TJuCHB5A2^6YCt|#PIUlKm6!MUe$h~bzq>anPHAbokoq6 z)z#U{6dT;J;rYw=AKtxGO4t?5w%Ri%tNT@Yxhj73gX<|lGb^#Rx900_>R}9U)Q;5h zorl+^=*~CasO{rPw7%l}MAU?HkMPA8XuU%VSMT1wzPmaZGqLNBe)E;rsyJ4s$&$VG zqetV)9&vW#;*71Mwv#!~$5s0#clOqY)b)p7Ki$-eRT^H|TFN;@eOT9MRFW+wZ(}V{qe(>IvqQ2qKYiIk^v_H4JvoW2piTk_z8G~YjD_L5 zUi#WsUpU9HC8yU{NBzO#;<$@l+hQq}7n}_T51*{+PzC`dU)!2+^uGSg^T#@s&cyVk zJGZuS21a{JM?aIJutZ9mJL{z+C2oK7$uEBC>7)Jn(9GQr?-%uk{N9ZT>mWZ-zIG|b zJo(1Sx_)zE>*l>%Tcf7hr#|<@;VzwTXnyDRUU`_}_jGr6_42gA+1+c`cFRS-mRbGE z6JLJn1lO5cz4Peim`UNATS>|p($K`pe6a7}H(ohfCkRY!T)S~;+}!xw7ax0}hHVH= zT)cT>bHvos*wn@p>*&Dn{MO~&_30EA?Z7i%efo_aQ)p!4?e}+Fy}aR@*GJU#jkfWn z2}8}RZ&Y`1%&GayZ-3{}roZRV*Izi#wk0Rm_U_-kGV2qxH`La3iHxDK<*lua@^sQV zc<$w|Klj?X0eyIU@8`e$Sy?k^pIwY_Yih-T(Q#Mr(St{;dJIG5tGDl5F4;KQ`N~g&$h9Z&g|^!#@b}e%slb*<4?X?HK6s6?EL88f4Xj!1?CsR z!sZrLv`}z%z5epS)2-6r^u;?5@2&c~kG$~0kw$@QZ2R`j-K}!L*naBumkyrq7E<|@ z-77oG#hAJCz!Q%@b+lLGh_8J2-+yt@p~+spQc$+kOMvqFStk!2I#R`rtlYSF|3*2& z{N|S*`^I^$bL!T&-@ki#dp@Zj>}js98&Gof;=HR&=RbutlyARjXnVs)Gns@Vi^r`7`mVfR{b#1FKFn|5w zd-v8NgNOd^^ZQQ>m{N<^?%ut=JrmbT;BG^yw#_JJliam3FRdt;`o( zg0}jqGqqhPUZ;1i6Fws%JpB2;_}gdBv@wQGIpgs%PUnX}dH z%KY7r|Nf)9CH=YmfBiRKui`n1+js8YyR$oKXEa_obNXx}o1WZz>#YZG?ajG+zxlPN z4_37gnx`NA;wRU}#sGb8~{QTEmyU;#JFTeNh>d4aV8`DlfQ=2|M zUbOd|Jn;Oh=S2CtKlssy_ZA(^I1=ADJD`ou@4fTr?HkKsL0jYbGv_b#sN)SWJ|Iwwer13P`|3{}I^OMGefwMQ-CIv^4}aw^ zAA7CWKC*V>!F%sN*cr95I$LUM8v9j|@~t~JukEajn46z_>`PC*a;!x=v3GSQw{ZPZ zR^Igbt4(rm+}c}LS976{p1=F8_a9uFbGN_v*yo-+JxFEew=QmPZ%ldlolWOYoT%=W z`e$~oUf$W74e{Q1;ww)ds%~b|b5}Nt`SO+Jm|?K4%aWa+cC%Y+s~QA}E8qS2`|n>V z$d5ht=bwMMLF}8nc=Pu4o#niZ({bVG5db>okqvNKn@dCbrkB6^{bxCr%<|P6S2h+#y;8P_tr1V(lnl4M}-gm$IaCcN+v;WUN^S6f>&atfrm6Tk{!R(7FKmYh^y^e{? z4<0?Ze`jwwD(z`Ld$OimV2I6ZUfSJVE&3!?&;H&17f$u)GMgX#`nNyciup3b9&y+C zV|Ajy#E47OQFXReUAX%2om(3POZ&@T_}r7H1pdj3w{PyO%#3&ib*J7qc=%L(pE^2w z>Dt~d*tY)Hzw)`Syi~(Bg(q+Q<8Oblm-G9V2P#SO(q?elrS0e5IN2q4XA%^@O`c6zDF&YGY-Q8S$aC)U^=Qf?IVY+5-ynSmeXX!q)|A}Yb=%pw29zDE$ zalR1JGa9N-9y!*)vyW`wzIXTb-bRt`eC?TM51j8+1xq)7{=5JF>6)plTcYN*HMFqQ z&TKJ5^YBE5v$VZ6pYurTU;gUXo;}j8O|4$Pd1ZAX>Cq0h)}1?cs)=KYO|9s0ZrKcUV@w_0glNb79`8mtHt< zvfEU+_||)mZf%Umtb&gEs-uTawkyKOqAqQ(k!3vi^pnpXs^z*Tci;K`&wlb~(OiGz zR6C2=*WKPF3D3@Fto*jR4(-VH&Pp+ALGJME3rAWtna%r;-nq3t>QnXARi8O}w7Opt z7+tz}<=W-d5licV{eSoL>vh85?4`RO{^FNEy%Fg+{ANQ}S2qJ{GfO<5q$Is9U7C^W z5ASS_`DHCQxK&O3K>5Lk@82j-WCPlP#;RklA8p`Rv-6u*Zd~7-@C$1WzWB=Vh5>DI z<=zkf>6icTt+K7*SRDuKkVq)dBv&>{aWlWWPm^6*oymGMjOxP&UOU;QOm5tN=jO_g zOE2!LJ$d-$7mv2dLO3!z+shL%UFVVKpGR)ObByo4`_o_k>c@LY=CNZfgRHi?>IR-` zVs&xI#%^ll1XgcdU!RQX+VSHDPW4fv+i!pH-t~oyOWs*^zUpjMyVNtfdhPDLn-@y~ z!TDF8d-=@^Jy7J_`uN}f_s`!d%3E3odA+qK-#FDnPp(e+CCz8fGeg@CAKhFVar3KQ zdG3{CZMxjm@Ba8ZcQ>b!#_n^+4j(vF!?q5UcdqX3U0TSPdru!abhN6KWk|0+`1l|H z{@t>(TEo7 z;FVMT`XErr+4-e~Q8)Yi>j#cp=vO$0SMPoN>wo|GTNC>3#@6oc)`nJi{P~rsxJAN~ z+D9)xxVJfMX17$GI@7>1#HQ9SUs;<>c?|rX*1GfOYC0wM%Eh4x0zjv>q6(UM`Xu-2j(>4JkS8)#vMb`1;Uz8Ba(!^&&=B zdq+1@Y7OV7=jW#j37=KP>+9<38YK@J~ChlImq2~@UFzBE5O zk&8K$jE+v0#ONO$8!r~JQIACp$U{!~MCaiV>b)ZVxZ3(yc$=@5<*GL`gwfR6=V35hMfikqRJGDqiDr9&7f z5otC7-h~E}O2-ed)G*Nr0zh}*ZYW@T=wySS0jMo$wgT7%p#kZ21*vZV(+lzv(mcSJ zKz@n%0D~bn+*RNw8XY+vvIf8mD8jd+iwxaVq?H1!1Q|L&P>HHSK394Ipb{i~Iix2z z0HgyHE_pxR4s9Q9`ojyr^WqvftQg)zIxK;81;T*ZN&6WZ>d5;^n>SP$=v7Z7B7k+! z`vPL#Pnz4%?usQvYXL!c!7-q5inOUf-V5nJnk6A@^^uGBa9bF83YiD3LxVZk5JCk5 z144HvE@J~mgiE?Gexe(|%)nVg3PRdwF;*-$Gz6bsF-9O}KwD_-AQ&dmCPTPOczpaE zrU`PLsO5<|0li1i_Ci>Se$G$j{x&l(F(@nWQ&<+l1%ayuMrH#EO^9YaZZCw4owPHc z!3L-dc?1L$q&X2JJ!x)$$>BSMOd(VcaD5M3aG#c0XWiI5B90f}N1wQHXQU*FtXGL;ob8DTw3ASMV-! z4*)u8BYx5jM0CKUGaisUE)4*Z`>7RUxuJNV2%bv1l`upo9f%SH^gX#h4c1D=Q)y%- zHyB`IWImWO_<>6MCafJc4+IlUS=bubC{ZE+OeF#v>@V8R(3A%S003JevcP*Q?N&kf z6nGfma-_E(O$T7Y0HtEkL^uEqG+9m9O~qYUfLFTR5Yd3e0hEs4li_0%DrbdYkrkN> zHW^|N=m78#%;(d25wW`#@)-PrMoo0G;7E|}2_o@@#)ovF!F(!>V6b@D2tF7(mcF70 zg3WwNsNi^4E@=#a-6Z`7P<;@Y2uMtFzakbLECWO`AT)zScujZ-K>l8AX2t%9t{c=L zt|3696%JUXKN$KFd@um;2^DWeP<;3%XibQ-t(Yo&0X{Gt7%y33+=YkEItZG`6@z4< z(W3}+BR5-CwCk`FIQYtGB|Iz8chX8i95qqQz+Zx6#d<(;Ok*3dbSIP~~YpL97ij^i!Mv&d0^9tUT=t{6FWZ;!I!vaa48Jr3X zifH$7Uhzg46(K%x{RZ4Mx!DtpF8;v)NS`MRi8Kep%Han{hZ@#|tTko~(==mGh`T49 zrldy;W5<4=1Ay>j=ro1b0~JXc20$=?RSUz(lA{c%l?QAZF>x{;h*%WlK0s^+QTRY9 zLr{1Ua>yIJUb_KSQi1EqU6PfcM!cp4S1#gi3YZRQkwahriH%oQ@NA453xO~SIvETr zMvp%yt_Z%Igg^(@4gD+R!bQvpHwsidKWrP?B}6xZijy!(RNioP}oOl}f1)TzD z#se#h7(wC>IX`xb#T~_!i?IM)5_~G|b*(gv!PBF~4jF?2o)b}tkTft}au)#h!yuQT zy8s@-McQM5xkJ|d=`&#mpwh577&KW|bgiQe3;x-P!1!r*wH09>E&(ba4M;Z_DLe~d z1IfJtaP6dTm9#mK`z)Q1R|V}RtPu%{U|)zQ2H%X^R7qUHLJ=#2kO>YKk+C9#f}bE4 zU%H&QJl0I8txD4}7+BJJLR!g)x_}r78W3@#C6Vx9RwRm{aR4`pcw@<^A2b0tC^9Va zT*P-HIt-y3GNHR39ulXR-0A@fgQ5U;Jd(RuiI*nS0s5TLcpAd>jks16f8nF`7&*3( zNWm;7fc$tlREeM@k=?+sNsBtUhy-R1UqfOJ*(%ro&Klag&<&0zCUSiSY!3VZx#@zW z2O4xiLo}#V>nU_u;kp$ZRpK#lCn$acOD3xTVgg+s60kxx8oDxdZs5;+;H4(^Y1Gl3W+hKe3v zsQXBB*8e*Qh|EaPAh9Etl#=<7a|kB@k)bP$)rNSGG!KFwLceJuLOQPXI-5U&4ja<1 zjPGL8z(JC{2Zj&k8rh*f*Nw9iZJIhQLyg_n;vU zD^|&@aSI6YT#^XkhpSG+T4YFW z5=U?>E4RZU)Ow@1=owMG(gR2CQYNtzxglsfSTd3X+=EPns)USsaT2c~bNe+z;VH_`{D3GS2MVb^N;NX!{Vj~f;NINv{tT3r%xb~T( zwj@^KKtTeGsslRHL>S2p6s`zR2&w;s{~KUaxK1CTX^3?06g7 z7-RO{O9&8JAfden5<+`{1V~6?4~f0OV6edx8{6?n?AVUuIIBsMwAZBVb!PqFkJ3M{ zz8c2?@g2|ec|PMkfE54`19>|(92u1pIx|)oAg0(Qc2c~{ zUJ1<-BAr4G0K%;YY8Dh_aJ&?tph7?gaRACY0H2z1$bsu;MOZ~D2&fiWCO~(CtB5m+ zq-#W|8Ms-J)Zhyn1Fv5s)j+s8niCnxokw#Mw@oj@G#)$7sKb^a--gPTghm8*j7uS5 zgrG|JkI^CukZMr-keCDBL5Q2cQxh#Tf?b_3c49d z43;yL2Ab_a4cL*QnQU#H;65N25ri6wP@Ejm9g0Io4Sg&kC=6sPk$%8yB+Ct%2mwQ5 z9bu716Ec!%lIoVkXbBQ-vZ6r==K_>wH0wY^g%%NEjvOGXfFVq7OVJ|RFr zM7D^YwbxNEmFvM;w;4&+jOYLnCWp(UlB$5HMZIo>U=%2=N_B%P(9`KP)i)r*f1`_o zph0-BM6Dt!5&?oIHULP!2o4^P9Y`1u@I@r4LxKsEj|)HN1^fr01A9y%kea+6lbWbV z)GL+bJO`QH)$Fp=%LJtU7IT@^;4HA2H8PVMtVO`++M95s8^JK*=2o6~G<4)hHT`pu9pQUas>O=YiC0zL(#bxWu_*ksb1+lR+`(Frgq0Q`{YY+g?T zvs5y)v0}~Go6KMq6x5%8|ExB%r1kP^Q{p0_$CR^aPu=9T6Htyb22}AcAh61wKET^_Rnhg>c4%|w*hxw;c~E;){Gj@LvwrB%|%tM|@y zHv2u&0#9M!!?(Wi- zR=a059^HvBxBuv8G1Qvkq#XUiJD;5wWmK8kt*k=H(z|c3%3~8bTKc|cW0kY7-Cc3! z{p7KK+mx?qvNxT0_itbS?Sgp!9-7O{NQ~nS-F$Du2L1i=lyu?1>gkydr^lyFe(t&L zM{+9}JWF?E{Ne|nfBogh8zap+1;0Gu@y8x}>}M6TZ!gOWtJ^mI_`eg?>Ef{o<w`BN+1TIdZ%}-2KY$zx?v|w@WQ#8Fdx@{IT!tEVqoHsk<~cpv!#e zsVBA^&1Y)EZ~XPkul{g*78CA*xXu5GYkKV8cPbu!{eM9UckXw8zvC{i?d+~S5>pm< z_~qwsoV$FcU%2llPaVkRw@xgKcZUWeu8No!Ux+EFWox@EO+a#4$9B|?l+faMnl@{XTSf$rw^1ihUe#odU`^d z%pK1@e;|j-vCMz)<=0<*dUL|X%S$@G>xpNRL?-{mKYo7SlklsA*5!yIGgGK8+Wypz z^8Savy>oK1*HXFf=Rf}C_B7e>#;cbXr)R_U=`X#o1v6b~@4e5z{`&JrD;)-AR_u#E z_+^^X$*U2U((l7;Mr+qR^d)<1gp($YjgnZ50)=l5g?J6Erq9r6c* z4aNIjcy32x3D0x>4_|-%#Rup5ES$`k&5!@=<$RIB)_?BWjO@VAUsgxj#T16IS(YTQfCNH(tNCJlUhq-S*3$J$tBtW10N`59;r4E&FAq$@`xE z;nO+BzW#wzZ+)_1IQ;Yd?)4Q{HOuS~CGI@T2*35)duy{pErQq|{mYMb6q#qPTswPW zezIMhwSW80<0W;j)1WN=?X5F|c45)sO;7&p$%8fCkSDZw=k^d|`?J)svypmEeXFT- z&yIBC`FHQFO-2Ih^vA#TADhx8ee17XJ9~1(UU_uqi<|dnF!bTufBNDdpWK=cHZYDo z_mdy~d}m6Hwaqtt{o$34vS*&C7+)LEFl1JB{{DU0jc49_bp7l?*jDn)x4*Zk$To>Z zb@tM-kDauC=bl4}W!mvKKK;`tchApuX^IXXOi0+jD_Iii?}{v6zY$@)^df!c%94Yl zu&Q%5KXXLB^637#`I(3MK7&o4$8B z#O0XX+#_2K)ehZxbmPq0%Ajobcfa*`lDxGmeB$!em4Fyfzk=K%wxMI;wGV&$(W8y= zCbg84|MKI%*w3`u6jI;D2e({_TMLJ;&8c%}u8=bRg&5xOrPXj}WTdU?iO0VEETz$` zHuj%gA8(*h8P&!3tj6HP<+ne4_x05Qn}Sn(_{o3&{-#VOi>?^`@T+$MMH$8mSHt}D zvU(#k;YdOK@Y<=V$)yuroLx`uC{&rW^}drgE{_<+%0^8c*uJgZqo=RmxUxFrl-BWT zvY-F%_jb{6v*m$Df4`y27cM=#+LTr18un41eKMtCpv%(Gcj01p{*#-it)0sJ%I1Z6 zH#LjV*kq9DLo;*XQ1`^*WKheKH|puTfAEVmLG_*|_J~eD7_Z5Zp8el<`YUPPg@`a_ z52bmc-&)_ke5Qx?($->EK%SK*?3?w`ld`!g0iA0fhyYp?gdqpavGz$ytuHWoKDcE zI;TGr@DVg1faj=w3HnwD;27ye=o&1|m^`3cKvI3c2!AB8>8kya`P$wiK zf&dF+{P0bcp&%A4gkgv7vr=AfLoIg z*^haH6PjcuV?Fw$08ny((1S{dMj_zv)h2RD)&?T7X>mh+6Zri$Xc9Vfav?yWRh5hy zRyBjK_q4Y-oW79PVsZuW|DZ$&YhY*r{5?k?;(&e(Ix_C3^|vG3t0*+>Gb$ptXkAi zI)k&lyRWyqGuQ%LwUAkwmzj~CnpQ}wsbL8kTl^jyDv<_*t7~YmJLq#F^#tafQ$we- z7<9G}v_VIxzb}Xa53O699|EJ$7HzYmYay>i$jHw~&!w;-^VJ;e?&<0Yk4=n4!Xu;L z_j~LRXc5Wq1nQKif*>^t$SWpKfp(wAtdb~AP3_&`pConYkgW#d3*ZAp=Mw-($^_UW|0E`Y6IxSM!Kj7u z9+9&1_&a)fd&6VXlfyl2U`B%Nz$`B+p;lHgfpJx%g^9!+OQpRH%n=lHN+=B}VPOI^ zJ(6rRvB-)BK@<{Qz@Fl+n}S^d=ofmRRVpX*IC@o8HAkV7b7~|88>BG;9=%lMjx3#; zZWkA%Jf8pLrLz;(g6%KFvfSa3PCX^PSmvKtS?tu7 z9eDc3PsZ|x-~9Xkz2eRL<-b0arJuNb{o2|2@quP(S;q0hi8RaP`ngF@?eQ%y9;$9T zd;9vFvpOX{N6&i=tJhf1n38WaMN*6!=-4~GZ4TTDtoe`^)CU?kk( z(#o|qkH@0qGODNyb#VH`SckiwQ=FYk;ad8pK*5&5Nr%s6Qu8yi%6JXlp7F({$qpmG zx(XyWxodFc^2JGA+RK>Ry8}I=i%TON9UWG7^3LbB#8#LmHeS6n>fn`^R4Wi3&n<_Y zg5t#3;~5#p4#uTtt7K8B45_n!xLukLD8}(hfz&ed!QcP>_s?GK zsoeUX|FyN;w{&LK$BEm#B_@ZZ8+hf@&;Ru3Lcf7sPAyG&>B(oFdtrB`xP9f#Pyh0V zw@U@X3I_2=;q=E{jP(`L!_Tl?qe2v`t`iwpI>F+%DFHh||5}&7=U%z_y?xj&1zxddW zXSb&c{o^2MLGG%tW$5JT*?zCK`uOIjpWD7S=4g6x1p<$G_)b)n;^CPPecN{)d+Z1M zv$>v?ciy>mb$z+7o}Rhqmp}X!Y!c3|uguNPj0BucUsrEWw^v#zRu!L1on(aT^s z{y58h=AGZZePyyqNG;0Gir=+$cN)I~A9D3%xYbHp(V{w0qpv$W(%-7Dir>BKU_w6I zJoWJNum1eO?ejzRg?oPR*uTES2&}$#=kk28o=q(+%uPKWms~7vojH5{WW*_kq>T;( z2c^Z|I|QzuBy-!-&%YdBBKDqm@Y!eY-MupFqVE66w;ub>7Fyu^gST%jc=%B*Q?X4|s9sQBv(ZLo?#o-s8 z-n2KRs&VYrhabLiZFS61xfd7mFE7%%uf6xq?bCgFYI1DMp+iSgq2kfEaPi97VJo{J zpK0hELk8l7?UlVzM$d^m0NijM*E#drZ+`p!oikm$13$psJe_4({rHPNy|X@8UlOQvl(YM(AOCz?VwK9%8;SJ$O+pq& z(>i|c>iH?3nqE{=%@snvsU#~ciz4x#`8D3~qtl+!=l<7C|M_~=p>@6b>G?Z3CCONZY0%V)2j>ND5o#2rp7tkrqDI$A6$E~A<)G!I?5 z|LXdDt1xH(w%rMZOp&OzqNt2x9K7<;|NZa1lWpANFFf(w-Yh}qrAKdW%y*f1C27g| zH9AkItJ4R57@sRN^j>`Q&fUv1Es~<6dyk}3Sln7#ac%)!+jZvtqdTj8jr6qGBMJGn z=7E!EPL77GqN<{->|(m80cWaN#mbG3O{RO#+`oTowO3ymziZdw^s?&eijpD<*W9;q z65`xhrs*8%prFRobL z5*Zt8krf@?cPNYNUATVl?#77$k4{jPeR$L3Kl@seB%jlU`-@JEq zd9Yc-%006A@t^#1dt#L#a^?QL2XCBjrNjWCmP0cveDJs5J-jy8%F8^k<4~d4)!u64 zm82d^%&V3)O}+Bw8#h-YZWXsG_t?vuw;f2URJNbI_28{HuSC>EM`P3SOIX&~J0HA% z?`(&hlAc~Ja|c6Svy7ITo{>itG%daRkAHl0eX?1}p``A8?&&RubE;&%#T#!tx;e{ruU}a~+DJV~4;|FQU}y+>n-RkIamE>I(81`u5Hy6}y_F z^9I{p^&%#m>Hde*&+H)i!IlW9|0a+EJCu7U-DZp6! zJiu|)R8}y=rq=crtCCfaoS0c$&6VkGUa0W*LeZihm#*Vgl>(ld06Mi83`2j<@Z{|L z{Ot5-f1nBXTU}C62s$A%i@RCkmwMD#f;1qvsa9sP~b#c*qHfYa)-Ht6a8y4NC0jLT8rpN6W|UJ z^2JbuI1WIof&euzge1xiIBUstnHb;!))K94qJ{|28b@UYuf*tV6RIf~lIZO%m>y&7 z0<;aZ*D*81JocMX69ZaYs?Ox??Dkp62n&i5LYcwc>b0rsSZpxSzy(Cx8{ec5)=3PI z;Dg2t>a2PZWDhamHo@-D-|A`*GC3NnraHgUGIM@0pb={=dTx28ynS}A$BGWUNye#D z+g+Nf0$P1r&~0dJpPcDXRnvG-h~d@9TBoLba)G%gBsv~b9=g`U)6I-bM~sDs6IhN= zvqolYZ8MALbg6e_q|IOnpF2P11szSxp_EiqQ%h-51F~Y^xd(Mw37p|8tL>6HowJ@2 zvnQiQCT39UY<`=N&eeH>VC_1G&R;n-F#^RjF36L`NpX2ndjl;cPZgPKlWT_`ef9B* z*V!}N%uGn3i`WIJnbi_u?yjAQ6rRQ&7@RzP_2$hxZ{At!cj}b9g56JV&#W#z{QR~8 zTd$3;8h`7{4^DLi2l^~kX(bYQO-XJsgU`)5n!&dvD)6H|PTTkDjq> z%VA1gb@IO0a(mdX(hWWM;=>CNO7hEd4kXtID>F08nXIzd-Nz}k;$lhfg-dh&gY)Ok zj(aqsn)2Mtl(@KTmWY{^RH*2k?r;Py{`nsdmWKy|#`0ay9Hq1K)2K3?f|iz2CX%r0 z+-H9C#?sKt#_jbnpS3|zQE}MoNa@V$(4&`wTZl|H4?bO}% zF8BC5pWdEAMXIK}{N$c8dVUV8!K9Xm#4V1>#uLR^zj=fy8J$?DCLP4(o50~r}JPlAl2A{z3pw0 zGwVw|7M0HFMb|^lXB4L(%EZd=F6%PZY>tJm*tkc)ZM1xYanj>N?$rDQSeL*qev+tRJm zHjd-M{Z${(GQ#4`-+LyxFgu;9G6T0@2D3TPd*bTl`5s@OtJ4d=9C2-F8V>&Wgrtme zrLVWm(lmANy)(YH3%`5!ghS_QHPQBL&0I^-Zo{UG8;Y+QhGb_(*(GZdqw@l@P^7`^br#7S=k0+%hCnlz3A)`}S+ImN)PpocSzj9_S(r%SA^3#*zVDbho#7TqY&A0&5{TB`vFf!VuOwe0}3{i>J|@W5>#6&U*152RWAdn6H2x78>kvIS!xGZ4oAl3n4OF-<%#N7gS4q{?LEH#W!1_0nU zI-~?%3#cbz(1YKA`^FFAxWgZDP0{H*FovYpN4$)pm^w@?$!+1XA)*N_ETB2y-~oge z8lePYiiZIk1)w5{<`eu32qXy7UO*|}|A{37te(&!)!}y$1Po4B040co6`nLuC3=g~ zsv~azza9^U0H_GjAB|GtFa!(@ehJPud_mJSioU?Z!EYr(O+?;|fQVtcOW>yDnGtjX zfzRS9Acg`FGob45&%c6!PVFUu(gs$)R)YnvleKH%=Yh5^}ZE`xL3f(iA z0a*00K^G(La8*->{Me2(=h+KTJ*?rW0LpCef(6uJ;807-7v;6@s9D03xn>?bFZy5#P2P2go0Lf1)Ww^%V*NK8ZDn*D>JkXpSp7M z^#>1bUl?namL|p?+PC$Y7k2DD5R+1&?K^w#{fGA+zI81k&Meays-F4QcU}^BLSBu= z-rm&(n}fDCn}j1!xw=PY7fxThd3MO#0L_%r`0XzrJerzcj$UbV-{OrA|M zwvwlRw3AYowS7lU>(#sWmb!-~2W*Ut%nFsmDB?AAOwNtSb7{uVu?$;yxX=&^s>BXo-C%JFwU`StyoHX_RS{F7S}IVPv2ZTiCc z>O=?{8zyP~uIDx#E2ZY=SIK&AzIAnB@s-^KtyNQu6_RbJEup6f_`hx^PBf4=9jV?M(=$3>DxC}2Ah?o z2e)k7vv23lg9+KR`rg%z)5GEAtLMgjjbaw9yr`h4tcq7}cUiPjnXVDbM>BBmv$v*o z8h^;cJ9Z$xh$oVn`qu8hbLY%x$fd4~+xFu2BWd|%OtH}yo?Bg89P0u=-z26L<>%++ z6;<(|dV~B9yIZccLL$Jw`u+!3+bfwix3Oa9iw6t2T)uJSm3Q8}w$ST$%8T}G-WFF_ zLFd;u^-Y{Ocm3LOABGKP8NH0ksHrZaa+C(h`XJ#pu?s26?x~=rzW>t3jN)Lb-mS}f z8v3VI1%=$enOm2}?Mj`7owWJcT{&C{6gWGErcPeCesy&$;I=l%YRYnRi>kPQ)WZh_ z{D?rS6{_B@j&OIgvuEwfqA5OB=r)ub*q^V2Et!4l)`M41b-P?9){&>5-CxL4C>sM~ zb0rHMQ5r@KA2Xk z7BhL?(>JdybplY!N!+&WU}iO6q;8H(O-!$xJvG*9P^)G1T$FF9+Zjxep&9cHm@&Aa z>MxL41Dzhd#@;v9#oHIdG#hB?`7(cpRqLL=`Raw~4zI^R+4bDZ$z=>C-!XM&d1`8L zc`De5kj5^_giaR+kT*m>V&c=$8;-)Z50+ zpPA}_f`%w_`_`j{G^pNdduE2)eS^~@&5~NFPFYi&okwLt7()uf?Y92O>9KIPzsVAX z`Jv@!-+J;8POHXI$yqc;?b=O#j$ue;E(&@SJ zaCeg+J29tE)@C&0a0x-m-e^+!4ys`IPl z8v33mU!>dP?i?R!)8knh zjb$>@Kr7SQ(|pNS~oj*|WCX+td>B8EXskt0Aab#n(A4f(p92wWrHtZW=l<=54fi zAcPMroSKh$7%-}O$a1xKu}edp-JPwPiku2fBRghWmMK_IA5bGTEy zRepuaEKEH@adsK1s;!fAEn=a2`s74MlgrzrE=$U&;4nd}6xOSFbt*E{^z_Xwgxipp zB3pw)4Hzs)67(7|hay zuc<06uQR~fKxPJ-swvpl*U^TogV??h$xjgjjlKMm@+!6j28RCjK+ixwK0FA^0uYr{ zP#vp{7sDLQ7LsV0l3y6J}}%s*|A=!8iX!0-4p*nLL7U}pvBFyb#B14*NacomohQQ$6C2avQ_4g_(E zdIu_NAc`QP#YdAQABzX+I_?h7#RAJU;P+8f=peF;c`nWgKq|2)@Qwj&2l|xQ+JNRo z5Q>P@fNGMH1@8t3VDf@Yc3}0LSUni}mJ*C5fo>uxC(5S;T#4-6 z2naP8B4qJ^B!}k!>JM=Qg{23XRuirpXoYC7Vc?8{2G~w=lLWj8Z%vqtkt)d4sHo#g zz(W9^L0V6$y(nnh1bPVY3)ne?qyl*@Sa=wubs{ZxBJ5s(&2SNcUpqA5{)trvX$Zgs z(rJq#Oo@Ig!MGE|w%rJ)45)!M;>`Crjp*q?H4tu11fzjXN0^GBtKi9i-bS#rDk&)* zuts3%2tg`fw?Y4C#IhkaE3hF#b)uGP-GP<{I6O5vueu&Wk@!}-R4x2#_(6|T4S<-;SZZg6(%S6!78nYwnjz(THN^1$YA3 z5`@&Hlxb1KM!8d+%~ZWgKGm6isg`^2tfYFh&~30LK8ZsfB?4Mr8byhyc)^1D&N zVR9r|L?j)&q(IPs31~eOksVmzSbf;oupcw4g#riz>IGF*5^sM%!xnjmdrf?ism(2| zpedR|?VcuoPgjdc8#O3_^%NlbV7m&qOfZU}y6I}`9h)2-z%)q*-82X=)5__JmVvH@ zYN~qd@{)~K<3F{~CS+@y%=Gv;YGZq=#poTH=xpFZZmLaq_VPA;VLwap{vuCI^zdrn`O zGn7^tTg}Y9&n9t&1qTjMoZ}LL5-tGO@fc6h3)6 z`Xgt-C=U~M7o=y2hXNv#~(eOnvanl;403+n;*Y1BIDSh zfgnxK;3-*0U)WDGdm3f>@GGxG8r1%iXNJ|q<@H`OGa;@-q!!okEj>fMf!>AnODiJ) zXe;X~QeqAs&0;99f7~OtKl|vkRTEk~(WfgduH%*Me_~sSx>3Ruw9L-=l~UKlVx*D9 z)H+NOm}UsIGSXG=>hZS^o;U+90J~PkD^J*WG@HVxg_xUh>b*a|9uVrrFU&WW#+GXp zr8}O8X6b!{_(Ir<3Nt8)m4{OC}|El8?50|YqNvh&=WNB$}+PGGWR~SCB6)* zp!(qtzW(j3#Wwf)wYi4aBV2I_J{WMK@2}e)2>+JtmpoC@V@XkoqFM z!JfH|8|N3MXXnR5CLy&TFZ0MtyN>4-6;o&d-ida#!oPBUSkIDpyIUnkHzn~KWHbu9zI*x9;>nBG zHx@!*))~dLoP^_vM|ZxkY3I@6hI4=W>(x&G%6sqJ9B0KAij_4f2b1bL7bZi#k+roE zGgoMI7}&8}6KmxB;>we_nPXSzT`(+X)>M~@_?AKUumbBCCb2lpo&X!pN* zaj^PeDqA6=B&ITL9X^AmX?%ISNv<%NRqT}A@iZPC_8uJjzC)Wzf z(zB>y-|3sL-dJBdIXf{n+})&Q=O(A6AA0h?esO>^_4=hj|Jc3HKDal%U`^ z^Jlvn9CHuedTmL0B%US|l^jc|HU&G|yj^prru}-o!z!)HPReD}QZh12c#iS2m#XUC1d{o&CfKcPh0sG}t&P^3soO&zCRy?J_k zWH{8w&5KW`(u-n&WYTpmT)1-Wl~=A_zHoBLqpZrwOxf{ok9}`Dt+mIh0*>d8AKmU} z71hd=oa}_$T0KZ`=HS%wLb$6tWS3T^0JTz{AA2CNOw+q`W^H-##Nx^2xqirG=Vm3o z_?>V4@Bp`^tx@Z_@PB{(WL=w9CbK&E8OQQC4KNmSj$F6~@$>#JhnSjjB=&exd`wJ2 ziLiNM?cC|5)oXWeUz`q^#jLWUKYr|AH|5Lq8jWM=4`2Q5y%8>5sxt`lk0%#%RVt~r zZF=qW{Ai@#*C?c>9Z$*0ijPe!;JL@oTsXb7a^d=o3)4X(J|_03|L^B4Q=70O{lc_?Dh)xp^zWv!pmn~FweY00X$;hF}Kqaep51q#3VKCg=tgX$9k54=n zv-e;!RoyXvdhODkcRxUPu)oE?&)NKwXA^5BmC4EFp4b2HvsYc!B6}d@WF)5-*NEB0 zB|O*g{0#iV!U3zIIx9J&un2gL)M|Zue`M<1-FF|pvIKp1Uiy|Nx93S!<%bVuH?F;L zZ$?0sG>=S%q!bDho;$T{se53g&kvv^XS1VD-o}Tk+Emej7m{leDMbv6oHuCB{U46ZMV=L?Hrzd;7Mq%NBEqijMb`!U#OwoJl!hB;^Dpl)i z)$=(rM>o{-1R_&tpwnFsmajrmQ&vz$tFET8(3tEQh)klxd}(nA%`-;A))!-_jUGL# zLgYVnZGE6Fw^Zb4u_*)+qrW>~(?ZI=qtym}m{!iAz_W#cNGy;WJy3idnK`jIKi<=7 z)3CCS>_1i_s+ZC$`HsQKh>KlZDYe4rPYGs~QwOr528kc0<48u)4lk{!5ek9+Y6R@M zEdW86>2Qa?#jdQ*JeFLE3`$d7#&JXTL6OhNtGIN6<9|q?bi4E~ysjfwN##3rzmbHVdZm9w?g<-2_X60*qmpr+!2F z1S<|gzo2ac)(ZFV79gIXrHsl}z$H**R#kZw#H`^nA=g-1g8`4F0jk>g24YNtNfLZl z>R`{^2!yyB^iLj*e>0T_adXi5+P2Eu(t zm2Cy_6NFGZTr4ny!rUnemLSC@Mrwq`OK80S@q$fkBxyUT*@3T!s-}Qm8qMp88ho@P z0IPbG-ehuvRRBc=4AXPW$xxHQaRfmb+E(CF!mGSAwxojVT2zI;wN!~B$tEs3YdEs z4C41hDTpX(RhV=UdMd_5AW`E3@SrfoA|^LHD2|gx1L+<_Ybg}OXd?j0L71=vIIkg< zIqAcoM2F=jaaxgzWm?d7RT2SAoTE*L1~u#jQT?J&CMxelE(K+e9bUI+W01xNyoph| zqmK|pa-#rmg;R1=y@BY(fZ!b!O@Ukh?vhMINuLG{13ZbSt_K<}@T9@044m7jWDwqu z5)vEu?I2pf5EfHPRFcFo1nVXGPx4?esl!L(_TZR`+MSF_;Rde9!a-sG&1)tqB*gOv z6|V{9wn;4|PW9;d5!V)CpF$|0IlqA^1_-nbpo zv%wAE50FhEQ(%jpEJ;lL(5t~x*Aj^lbWjLJCaNK!!)n(?XM3Qu!ZMhUQK3r{-GjI? zLQ03gkQiKah})o?jylL=;Xz9n)Kx4dI1<3;6*j!Yw-sv~kGBcy8;S{_etKY-LxfPs z>=*qCTn8E%ghEeVyBTWG9w#YEam_@Z1*JLYxcFdV(1XSSR#sGjDB3e4`VC-wppeB} zmbh?ZEk_>{o-Dpd+XAfy$Z_cK#K@}vEkEkOgbyXsIYgvEEF>c5c=d!tPpWoQVZaR$ ziZZCjV2cy0+Haa^Xq!bhAi*^OW)y9pk=KjWkInK8PadiaFouTF1BnPk^M(j#VBKJo zBajhJF%%iTiADb${6b)XsCL(5I})*^=$IT25uz2*(J@*WcvjIK9i9bQV}#Iv78nGV zaC1PGgl7Iu5 z66_UpXF=iyai6f62tfkz5^Z|Cdi*401<+PSzY#=W^jfi~$@V7AUv#B#)dt)ILJk%) z_|Jrlk0nFgdJ(VjTgXnw5`kTMbjy6Rg3vj{1(R-GRE7kaT6lZVtbkju5-I_Zd5Mzb z5#8{dp)ik&MeCMGe3^}Ah%w+Bm@= z>rPnpI0lK{0Uj^jAS?qQW<0RT5T93}|5 z5EKD_ zhII0YZ3yvUf&dUi3J}&Y9U%%GaH4{*Ycz5ZSOad6Y+cfq#mXm}7>65zD0F*ph~UBC zt>NPEjzMWhh(g26hzNlffODM$6apfQZUDp|5^;#wJv3p+hmjsIY0UrsaRXZfXF^mI zh5%#gacB5c(zC}SB!2QR_P{$pZxZ(PP%uHH!|-C5hGW6ULS&;v0CcWHF;xXV!fqP=qjp z*MU`n_AxrDc6ieegJM{Kp|^_9i*6ru8%YetL&8b~&&{kMG9IK|4!t3~XR!X!qDBCO zmph)FR)MWbp8GdY75LLY93KINd@pu4q~!2N5)P2%kcH6nSJ+1=s+q7>T~cODJ0*!u*agwzUx zJID;ssD@|^b{t6^$R;GO88;bK6+&nsk)H%Qyb0`QqK=2}QNqv;*d@FM7}i44KoteZ z;P^-CM(n_d6a|1kL>w&ICC5+Wf#7!`a*>ZAABl*M84fv7(OCxfj3B0naUcwS+@RuN zy(8O%B@K~Sz<`LPn&j|;W(wJcB-CNpfE)#z4~e7}$s*DIK}JdPL4b-JKmd~dHWn!! z5fVt;3SK&vCe9_2xM+z18p%M(+QSz{N!Tw?%E7|Hfk;h8cjF_l3~;(OBCuc$V0Vy^j}1X4pGZrH zv=*|cXiSKvR|xA^pNQ93+0cX_DVdIFlVApejDo~565z?~0pcQf$0YdS=ds1{KjPge z5Gu$J0hnY=4DrN?j0y>RL|u=J3!{ku5%EKIMXc_Cg91330I9GvU{?X{CHxH*7QP2K zGaM!84}5dFM%@=N?SS?q{HLMeLXv*~F^IMW%zTJE5g9`e#2Om>c-$mkQ%Rsf1Jfw7 zMWUI2QHzoT!vcZAf^b5fel&x`V^*TPXoSKG@^}10ER_hjdfLp3D5DRE9Pn%5q(n5x|$zzl$z2mck zEpQAEc(lwiy21+O6g{FE796~yAy9%irWCN4bs{m$BMeRcHhZJV)eK*Ct<|ooEUa|~ zbooj2)+vnRMXqqz!K=}Q2K~xPnowMkl2{~Z>Gc`3aDN^03d-2^Mm|8IJgr77F|?1) z%}q~CPC(nViY~5Kv-7i=t*2L9<>l7Zjc|j?Gcw|&=hLl`@eaMPG2p35NTN45^jwC_ z-DcD2UEPrM09U5p@AY^0celbxN2jQz78RElC&i}F4DIc9$H*JMyA@Jer&kA5lq!XW z9rw)UOpVtr6EyY=nky;F&Y?g(N8%l5sVgW`wg;3{rlqgftv828d(B*yM5n4LOwXs+ z(27gg8dsnz7jmbpKZTbH+YSB4_k6Y2UD>bdkdup!5Gj{Rnl9yd6H;MAIS$aDf#B@AbmArXr zW5y;m1cF8(bXBYX+sI8O!l`ZP>S?pV&$Tz8r%~0Ny@u@l)MghgKG!rg;o=Kg&RrX6 z>RMSp*&(4YwK`^2L5)%)rdP>Y0v5HUZ+5IzE7Liwx_YzARxjf6Whxm=Ns#T?4fRG> zn;#IuT0_9E%uA_tnP{oS4V_*|6{~S@vcuIrG1G73RnTgoe_t-rYGKVJGg}oLiECh_ z(>N+$rg5mU7T`W>qtPQ@QwCcYD!9(oQkXhp`!^L7qts@~e%>a{mCx_qz{ayOEa7v~LeeLP*>-iU-nW9P4knALQcnAh+o9=O!n!-uCb|rx~5hv zuPZO*yF1&gsIZ%ydX3)Q)8A$w7vE&XOc2>mgR`|6sRptYwTR8CQ(BtL+!A)%i6KdD z?Z63lQBM7Euc9zt(AHz+3O)TkWkn&Ci-9KJ-5a!MP2gBaxDrQqyS7dZ6?G+66@sAB z;C45l)P}kmhX!+PXUNT?@OrObYAPu4t&i8F3r9}4D|6XxOOuXDmI0Mv^8u zP&%6xpvD6SL^t|vRq6E3iwoNL4DTs_?tWTiQk8nFDsXYuqzzwOu$5<4XzZFgMF&u5 zcAcoYg0I)|gpU3mheF}%>#-@V;pyIHWW^4^@rw#;8m#=JgXIISuDi3dnpfI$5Amj# zY}p42o0dBG+L>1tY|ILd&MalhIwt#^T4=aag`(pHZq-&~l1-_RcP!A1z|c>_x>{)%HO@pQ>2+_?;mZ)pu@!b%fp((WN}Im)G7b z4a}5en$}`&9$q^$2}eRn&Vj>Jn0x10 zHXcqZ#l6?gS<7YNsW#T}<5WHyPJys&V^C>44TPCJBWthSUA9S^M>>^NEV-m`-^=wd?b&r6tIeG5V%fPYpXnwR~aq;U~6M44m!dRoFIO?~!}X|L)x} zyUjN{p%)nK8g}84BPA?K!tn~*=!rptz;@!|gb!Fa3$G}@iZ2$a-2Iai{Vh#&|`NNc15(td`8<$x1)LJ z;+=D&GmE1bUsh4dFhAITFqw&IP*I)Jjcmr`HFJO?gn#GI()#Of+*k@}sPVBGWkqpY zpF5HdqGXP$rGd+L&RtoZTDf&&c{pg5!#)f4P{)hZhRUNU{3aI?!`5yyk0UpjTSiwe zzxDuPIGwzVT#-qU|MH%qiX)qMq{$mN6oz$ref8Asw{I*=L|SwZ%G1e8;__4`PV(_` znG)>@orX&(5Cz?PdRpA-{JI( z#N%ZRf%d@2h3jXQ&z+g<9~v9(Ym%@8!lGT<5-VY~3MW2JWtG6_Y8{xGJ8|~a4}bUO zO3=~T6VR0AG6a=LF?$ajJ(iGGrE_~b=dWB}-?)BZabae{KL<Y;RhNCZCRv3=vJX7fN~;f5&>QYqh_Mg z{7=O^(sV#8y0%V+W0Lf3ftez8M^uxQ^w2FvC<&TL+Zx?S9NY~sa0Gn{U5Y5e4Baps z=(rF$^zzU~L~b7~rHB)sKmuY1nNdV-y`^%k5x#$D!jRLA==#F#9Q8SIMHEF_YH0b1 zxG>2?gQNjFI6Tfk^Cx~h$nH8W|LMs~G5md6|Z%FwQrME;eW~4iY&y}Igj4F$C zT+zrQ)eg}eHNa;Rg$7D0OngX-j`$;zW)^gB0YOqV*xhzDhfymg!lanf2xM9_MisE< z)ELl(5-Wgcf>|fpw2*4lpbrm#C6Sy&Pn2**?U3tqx@}4p(>QZ!PM4NY-8MHr+HRCE zsf>o9%QsfN`FrEp{;|>FaBsi`EC`pUZt4P%8@EY%8+fGX(rCp3xycjg8|)->i^kCC zXs3}wDd*d|`a=$I$edk+UCmHOG|DQn^J**|a2)i42!cHW6Hh}k90eSD316yrwb)dE zjuFtOQpl6&h>#6YD1?8zT2WV$l+0*9ebQN8DDR&e_8C>YvK*?)+wK7XvMtmTveu*@ z%c_AsW^?z{DY$Y)nq+YQgKZew+BY#X*;8K>mr&7s`MtZR`&--=wXlv|bUZF!WWlZv zd-Q6ZSzQvJ!)@$aKD#^_9$&D_`r1|0xTpT(N6#NfErrLN-4Q(Vo3FqA-*c_fin1DL z_e&VXh56a3Y1#Rd8l~N%XH%JSM`ZQNt=C__zB=N;0PoOC&-~=aPwma9me~hZZ@>M{ z{Tmm@HHk54nWcQ|GKB*6a!a3`|3}37h1&Bj6=KQ$`$68f%EsT&v&{M)MGElQZQ2Ov{YwhmvQPt=Rf*0 z#M2@kWkqJvk*z=Z!7q0vR=Jmd{l%Z&SP9BY4*cSOo<><>>w!zg)6Tb2L-#<7THkNJs{tq`LajZS3 zUVr1#sJ)I>k{+}Fc%iU4GBX>2rCKRn(tP^Yzkl!AREr8yLfIKFZWRmJuA20d}ZDztt?89Jq%Efy?cCqqFr8=UtX&V-~Q;`SC@jCDh#gU zQ!;aM^NLvw!?*wZ`NubAn`pZq|M|8Io^$$Dh*xy0tMZa#k7m;34q{VKUq>mTavQog zKK$_BnZAaqr2V@O#--$xRskvJpSynlwX@@`!i?Qp_oOlEJJxRBJRP?2%aRWqNTw>9 zLfu_HtDH^es$IP+_uhMOdBV-hjolZMm`ANaf65jZU%PhW@>0L8F6Zz*%ywOquiU*3 zK{XL%U6QF{=za`_%)GJ!s>C^X{?UgIFONCvat>_YzB|6KMgeow(7@R2{B*e8BBAA{ zBxTdo!IK-8PLFt%jO^GWX;o@ur7kVEqL>Pkxa%K(_TjDBCVu|@Up@ZpzATp72DxJh zqIv<|HluhUuC>C((B$F?qK;6WbU40%XX(Vm-y&gDvJ~#YwL5RzS{*R4GWTzK;icFD zfxUP7!0p_LRz#duY4B$^Sl;R4Jt?%r^kKTW6$uBKE zzVqo{ZaQ2bboEb74|O&-!c;&7r{N}-SuKDcrdUHVUz4nkRU?LfGHCMUrBy;x*U48O z-oHBMla?j!eBvihzMRUo^v*0y4*1N7Wd@hOtEUGKTT#d6h;=X`x9Nym0tyH(Dpdws zX6=~1{P5RrU7Kr@Q4)4O{p99Dg%a=B>ZNn@eJ+(iRPSgFb_84PDt1{Bg()_;+}3)j zK%}y@27E5~SC!D^&i>Wc-+gp*DX1z>*uCjj&+SU2DqDsZSLP$F4H6M5Ah7z}Hf>!+ zQ8Bef4$Dp;;-x6)o4jtLq`J72rSc4)ef{CR^)Y)*+MXAm_{9r{@`difg|*e0E-M7P zO`c#!03M>sT1qjsR&KC2xzRUoL(AKAZu{o|p z?wdZhaelVbEECEayM1?py1)ctKljgT@wq_-8K;=r-0QE zTDbYv8<5GXjNAU=b1xn&5c}pXY@D0$H3Dujc;eK;C>CvDe9Ymr3RzPGhBYHSZ5D+9 zqYGPWTeAsD5!n?A@6hR6_g*>S<7FM#{LE8Z5~^*}m#>{3u*v!2#_pA?S62EA)We&f z-x`y}faJ!VI~%ip1q+UH$xL?B%z!CT zc>dziM$DLT{CGyw&k`mS(RzPdN>SEqd{=0rQZNJxL}>?g-hUMy+~E$+X3{djYF7#y-$ z-%yi#@_3eQ;qdwPxJy9M5T^|#d1Z~B)c%_{Pc}!)`DqvP^`WJ^5AegP%{lkUCug&3 zI${eu2M_OW%nZUFcbSSl#j?^mf3P>*+v$rZ$NOxmD;F}#O@WEE{YU$&;p%h0Jeg~n zeE99x2Xmg9^pD?r?~|())EFK--d~*<9*Ot6tlEOpADpTTEiOz94PZ^2=QL`&XP_0hllB0qfh zU?Jogx_Rg3XeWNIxvHkFf$5Dq+iTZ*8cIu02-|!0`r&*}ZRVMiC(m9jHU?5_n>%}V zmPe4Z8}X|@e)~gJ{Pw)h)G>LVqtTRo4jaye#W#KM_3PcS)}jk% z&ZnKfT%ZpxY^*KLPEmHCo$JzNoXThl1&qaIwz-!tw-aqe=@-*7OAG^BhmY5zhHF=@ z>B4J|U%!63HD)XL?8K?FX@KwgQd` zo<5b{FnaL%@j5%9p8x3MD;1{h!Q}Qg zzx(#yOw?KV;a~mbPs=BEQk|CI(t5HF{IRO0s=C(bnOs|+o17#zCI-7ZJVUdq^8?m8 zoyn{#ICn0$wmmqp{^ZyH@!P|-xTpTa-~7c}Y1Z*X;^vd@zS@kp<(<#SO3yAe_RZej zytTfzI2rD=;JNLM&(45+GFaLynw*O{<&E~f@uj;jzJ0TQqu+Gpt-pKcOjfDZG4kZc z-|S3uR2AiCrd=s*@yFsL6Zkm~(bjY$XqSw;>IzFVjat>`*~QxS?(q2H*8Y>j2dgo2 z#=GyHxRhIN^v-|xzyEeOVXrRCPP>p>?~II1Oiax$F?We!gb1%ELgv!kqM8P@^(q={ zo&MO|)`O?dzu21gsZam>_@&G|P5a>f|NPD?F4?@aldYb#50FPuD=4N%zF+MC*Y{^s=;tD(knjX|AtIlnRT;H!r>ClRGLsw>K> z4S43Xx&ufOj!$oY_1o8*6Zm7-7hOJi>PnHiUT1U+u72_A&Ea0k-O$)-DLMUVMt%J8 zqva90I&Ler?+wk(cuqs8>u~nYJo*03?rc2dGS?Pf{OE&E({hVcjh^J4XRi)-lRXYk zpQrBpu`?y^8*8Znn#*8;xY}rItHr&pM%y~L^Q-TlZQWd+jbjXW{@DA!Jbg8jJRkGY0tNlwZNpu@$!asGtMtC*m%o0#yRosck_=l*&wu>! ziA!iQI$}$A?rtw9M@D8g?kx5cetII`K0MlMr{*?3oeZPiSZi!GsnRdyx5RI4ucW3| z_jcyOma@yIPo6qU(6j}H6XPTJh9_Qby-19Mn+DNx}`gg+~nx>l{;7fF3nCvT>65uA0E4? z?ut#s{cV=cfvH6hw+ev)m-(jFo}t7<%-_~ntI?t-T3K0d!Z#bHesE|KIP=Q747jfK1(|Il?<>j8VSPG_Xt;Oid&5FGJZboH3G36`lR9D^A*6p1BL zGgFCJh)b8Q{PW!6`nJH}a4ZUL0RIDUc!DgW;nc+eW2Cnef&UFs44@V5Q|WAiaNz0( z^g(H!(t5*Vtf^77KW$wB+J$sc>3wqf6QWsjBe!Nc?5GmZB<8mPpb0Yz%?PpAP;ehd zS6Bh&althijHtbWeBld(Pk$glPb`cR6`f0NqYAD?94Uc@;TFeGuA$pa75@nJFC_{{Zt0bLKW!bPW1d}HA z^Dxl_HUrc{oEBuTY4W#=ONOBH_+n^ydTIr0_4psA#8qa{l-|Z%dAyv^j2?df!uaB zY_819NdLUT*gdwgv%NftNG>wGqY+PYMRs~lVFh&h*z`R1iaZN~!ZdawC(B+t1+l@T-s&?Iq!P3Y{OA$LSD36;k@Rom>{tyFJIY1+xp^7Mh^>dJ(_ z)okyby7$d@FLzTtbr;_GhkyK8nqlh24{{5s%eY*q?+QnIofcd7%=2G8-#v{hEsX~qdeyaa$KL<=l4khTfBoigb4Z(h=CZnb zbaJ?@qO_)c_E$gt?&V7RXYagosy?>1xtxp#twreJT)mi4<-h>^4?nzE8}hWGO zSENGzU~~~o{A{$PB%eY@I~G za<3LQn5|~Vz{-;^pKeS-^fPNpGL9cRm7#`6)Ymt*xUrTnXI(QT?%bKQ7GEoAa1W2u zPnd{y)@OWl>|&|j8J$^ON%fg()H;KR8p9}npL2-hNo8+k`q(2(@|@s#xuQ?aMhKmn{CZ4 z?(wCiRM=Kqttu{7%brQCt}iE}h(ZqqyR0xeG4u?AEAMI5RTX7lLK&(xm{{D{zL^?J zOpSXrMfIWCRIjeE%z%=CsW-KbeE*iv3tQQN4~n){}fX5xM@KD5p%O@L>) zW9=lm+PnMvT5BpA+=OkcOU7e98(K^TO<74zYjolEo!!mFobWp4%$G5{pJ{?DX}9N9UGiA}-YK{7&?+adK^iZg_np>_)Aiq#&=T%76v;{?6PW z4FA!fO|3GxeC<^^Iptct-qM>|Te|_-Cqne>?;o6)8YO)am33XS9s9<1Z)AKt z;%RNtX{xAM=sjaAcON~tm5O@FC^|F+mCoU4TUkzFy|vBO6<^!iUP@3JHkW^1YV-_{ z<1OEs4LRCfzV0?{WnD9vKzB4bdp*|YwKo*!=GR!dPFds|CmUDfHAN^AkQuB67D7<21jMouN; zgQL@P$pHt|e_LH)fu;?bGvtMMxU;FTsaaQ9R9fHKGY}sq=^ga7qODL+ND>{H*|@uZ ze+zC#_D3J2)%En5i;J4V;{)wF^h;-^#$tm*BO`sZnYHzGRNNr6#Brnw2V8&*t)><* zs@}oW^6J7Ap1wAtR$ZxS@eD6+-GB7x4l!AE^~5Jv8bd>!wN>WuR5Ic)wFk#isnLEk z0MMvvsZ~`W-T)%CkKzDSXKZ>~tu1^4Zy1ecob~}Z>vGab5hrX!4ZiBy@Kde92-gb1k8-nC255JPiB)n?rwah z;t^0%KGG=QhX57YK)M1p^hHH?A7e_zFi=6hLfJ<)ABZby7s4f=J46{wOf7NA6?;OG zUr=^9+BNeDKxNSp!8MD7gsBAf3RByJCmcFp?U*tObPYKz!Pl{+1=uNQS&uM21RpEh z2GLdky(joMNl*C+Kxx6IOBzP1%YRW}paUjD%ZmgRnm)N58kpi83@lsN28e_4C6F~I zH>-Y0pF`oWAp8}qKhRMi*H#c+z#0W6E&2hlQOJhdMWuk8*de?Ufb2?ME_thkialF@>OS1^6x{>aM2^h-$dL7XOWE2RTY21ddg7Euqe z1Oo|7zApKpBzAz-P(>iI2c`+EQt{LjfE)NfQa(YRGRB++f?gyU6e&6AYv8Hy`Vrvt4zj9fVgnX75iVXetal8Q1r=oRdHnXyTAkUJ<6jYkJH;9 z3%NkY41<*D?u!icV%94334o_=htn4UKB9m>TboT?;X&?80u1ze9Y}(5Z4vGPLZf{? zr@6^Yo2b9rVaGBIP9feAd`cyo7a(^ZJ@zIcD2d&d;?>8UiE9Fvovx;Ot=SWBs50|R z$$MMF)~4?HgZ+7LWloXK+^hjQV)umuPHRX1G$&7LBsMhQHCI!gEiF@V2kjqA&CSox z&CjLA2I#-p>PxBpw)>fiNNjX`bifOMTVJQHR2%F->3wUz`Tb$4+Y?JAdNgO>`{h*_ z<9f3%d3%3vd8{8b$0 zDR6RaVmncMEw|L@beb)p-T(gMvjlEeiJ-aY#Cyjx3rbaW_VJxZU)LqG~A6JGT4% z4-d!egudQZ)#bC73sjm0OJI8M$-&M|d5hM%DosOEb3+uF%@t?zYzO?KjVt!!B=h$lqLaa`UHB8V$3N()potAu%M_^Rnx%z zgjU1k)GXd5gYb5Mr8ViaSgzpT=AbgvAI1chDurFSGD`QA#V=i50eb`Yr9~@nY(dVm z0J_s1b&0O@Db`4l&l!=5&U>Of!ho0U+%M9g2DL{`Q(}p zM4Dqs#00t3^@ce*Fpm^tVQ~uPJ}5jpu(V>lEyVzNFJ1@f8gB1LH*S$PK@3K5Oy`~n zoL!Vy_%YE)pzQ=5LKFQf5poomwaE3*HtK>bCKqnN=>!^tEQ#oh3ErsfTzWcON4IbV zYfC{v;SWl%7E5IoD~NUp_(xZBxwUh<7Qsj=7>XpM#8Qc*5|@=43m=BxklQmQ1n$Se z0HA&WxLe`V5dejQ$crF@B(wvd?i@t4%lWW;Dsk4ci5In?$LUHdo{sW7U?HtQNI-7O z^uVZ7glmYbtQ;=b0={PSMQxK3jkgPOjTjXlxz`hSDf@r}7aoR?$KhK%5Q|i2NDmyA#`zm08<*2bs2H4coX>U^gJ@36VJZj=ifD3t zQx6g%51&&OjUe2qbAit1pMt?um`MyLPs~g($YT1UP+d6S7%VwNWtGbr4Xj#tqdYh( zlGRIYf?2K|_jKmfCyHBAhT&sC^AMr|`39#K#~Cw03>Ky(Ym2v`&?N81mTP_WjJ{_y8|I5*{GDv6d1fx5ERlBVUivZ7d^%MD9vQ6f~!9Lr)oV&4$3#6KNR9FcfMR!gELPv;iu7|T!g0|5zY zjbQLuUBU!Vk{1>xLj#^h*?+?BQAju>f+DL#NlFQyLWQC3Da%xnSZ=)%epo6(pOr@o zC~~x-mn(Zg(o7{Wk|CCjMF3`k=zvHvrc_835RV%^ftXSA^h}448i6Pa!+=avqJ!)W zI3gWRXh8!Im?c6<;2;@8^M~Clr9VlJWLbIaPzuGfRl+Go#YY84;v{1(TUKH7iMcSz z0gDIf0Gky_Os#oN;-9PziSV*S`HRphCDUUO^y9t46_FWZ z)FCw!;N?rK%~`-gqTs_Y@MXEv?DqB-2OG-|u zyseZG*$Z$=B#Txk8WMYiv?NtUnG6Ce1TSby!e5q(q;df8Gok|29m=7K$?6fMk)uo! zYerknFJW&0_6iP&>5{B)NNi#b9u#gfaf6M;#7Pn=`oe#jpXQ66T)r zn9RM9RTw!rdKqdZu(B=0dbLv#_u^zC2(hI2F(hnCz3gbj&LCkET8~Z>sf-gRp;Qw8 zSweEZ*m+@yKBWnq>U5F)7gwfz+$wHRt z5d@r!qXc!CEa7ko(}|Jgjb+IZCzWj~uPHqYmIM#ToUjd8^`QR|dL}n<*eL9>+LZvH z@TjEx&MZl&25>>tQw|BvbAFA-WGRrp%E|6=DCTg=HS6dpAcjd_fR&2*87}3p9*-hE zTqSN)JQFk06EMF0XFvJr`uRQ*s6#1~w>H zL4NwDBt{5Xf*V!(9n>8pNn%{kv?IL`Vw_B)G)m-TVtNU$!cdL!?6TxVZBfza=F-g( zA+%g_Gm_4uIhN}+%UY60K9NgYw>ctZkBc1npH2-tm!W~Ugu_c<<5w9@T)sup+a}h{ z^pThVe)niTnL$aH@n3 z!r2yD4PR`APVRjQF-{y9>DkE}D9@-c@Wtpu45Jl%t+4v!(3Mn!h{j!jn>S?_n-JPq zIPy43xkDj!k{(I-iDk85-}$ zA*2xWB&8d`4jc^JAcpef$M87?k}D+%q9y8vgcbQj5&?;Kd~}2m!6}n&Nv^_yPJ~xP zE%=Vy4&b+w$naq$5f{@(VRhPBGr}sA1w|4^XBO2~37MVFc1vp)ZzjXwaf<9n2kC)? zJFgQ&n}jf>XhMJ#dMsHh%Yl6 to pass environment variables. This is AWS Access Key ID for S3 + s3_aws_secret_access_key: os.environ/AWS_SECRET_ACCESS_KEY # AWS Secret Access Key for S3 + s3_path: my-test-path # [OPTIONAL] set path in bucket you want to write logs to + s3_endpoint_url: https://s3.amazonaws.com # [OPTIONAL] S3 endpoint URL, if you want to use Backblaze/cloudflare s3 buckets + +router_settings: + routing_strategy: simple-shuffle # "simple-shuffle" shown to result in highest throughput. https://docs.litellm.ai/docs/proxy/configs#load-balancing diff --git a/tests/proxy_unit_tests/example_config_yaml/langfuse_config.yaml b/tests/proxy_unit_tests/example_config_yaml/langfuse_config.yaml new file mode 100644 index 000000000..c2a77b5ad --- /dev/null +++ b/tests/proxy_unit_tests/example_config_yaml/langfuse_config.yaml @@ -0,0 +1,7 @@ +model_list: + - model_name: gpt-3.5-turbo + +litellm_settings: + drop_params: True + success_callback: ["langfuse"] # https://docs.litellm.ai/docs/observability/langfuse_integration + diff --git a/tests/proxy_unit_tests/example_config_yaml/load_balancer.yaml b/tests/proxy_unit_tests/example_config_yaml/load_balancer.yaml new file mode 100644 index 000000000..502b90ff9 --- /dev/null +++ b/tests/proxy_unit_tests/example_config_yaml/load_balancer.yaml @@ -0,0 +1,28 @@ +litellm_settings: + drop_params: True + +# Model-specific settings +model_list: # use the same model_name for using the litellm router. LiteLLM will use the router between gpt-3.5-turbo + - model_name: gpt-3.5-turbo # litellm will + litellm_params: + model: gpt-3.5-turbo + api_key: sk-uj6F + tpm: 20000 # [OPTIONAL] REPLACE with your openai tpm + rpm: 3 # [OPTIONAL] REPLACE with your openai rpm + - model_name: gpt-3.5-turbo + litellm_params: + model: gpt-3.5-turbo + api_key: sk-Imn + tpm: 20000 # [OPTIONAL] REPLACE with your openai tpm + rpm: 3 # [OPTIONAL] REPLACE with your openai rpm + - model_name: gpt-3.5-turbo + litellm_params: + model: openrouter/gpt-3.5-turbo + - model_name: mistral-7b-instruct + litellm_params: + model: mistralai/mistral-7b-instruct + +environment_variables: + REDIS_HOST: localhost + REDIS_PASSWORD: + REDIS_PORT: \ No newline at end of file diff --git a/tests/proxy_unit_tests/example_config_yaml/opentelemetry_config.yaml b/tests/proxy_unit_tests/example_config_yaml/opentelemetry_config.yaml new file mode 100644 index 000000000..92d3454d7 --- /dev/null +++ b/tests/proxy_unit_tests/example_config_yaml/opentelemetry_config.yaml @@ -0,0 +1,7 @@ +model_list: + - model_name: gpt-3.5-turbo + litellm_params: + model: gpt-3.5-turbo + +general_settings: + otel: True # OpenTelemetry Logger this logs OTEL data to your collector diff --git a/tests/proxy_unit_tests/example_config_yaml/simple_config.yaml b/tests/proxy_unit_tests/example_config_yaml/simple_config.yaml new file mode 100644 index 000000000..14b39a125 --- /dev/null +++ b/tests/proxy_unit_tests/example_config_yaml/simple_config.yaml @@ -0,0 +1,4 @@ +model_list: + - model_name: gpt-3.5-turbo + litellm_params: + model: gpt-3.5-turbo \ No newline at end of file diff --git a/tests/proxy_unit_tests/gettysburg.wav b/tests/proxy_unit_tests/gettysburg.wav new file mode 100644 index 0000000000000000000000000000000000000000..9690f521e8443f3b0db97b1467a9d1fe85d964c7 GIT binary patch literal 775192 zcmXV(1$5{N^sqjR_kY6Kre@hcR@x zyA&x_mw0lM-1~n&;lKMzliZwp&Lf{k&U4OTH;x%KD($dKnKW$T$Ooo9U6Q6KiixY$ zL`4~~NKwp6nsVR0Pv2XA$fXQa+VRb)7=~T(8xg~)W|- zuXLyPVLW>?v=%~#--sG5m7YphekoOAh7Wo!8dXL!RCb|1uTe*f44y89lWcm+V!T>o zxAB*;#MohMH1--t;Ui|K+)q*3GfKJATj>l%ne>0b*l8>?w$T2N;WaMNcO!itHnwx^ zrSG^QoMpnX1D=zW5}s}e-^IwT9WqT)QkAw!4`fh8k15dHk-iHQ8*S5-s8M5VGuG4l zQRogK#m)3v&lm@d4aN_~Tw@-iZ$#3kk-MLUibPKuXVF<2RHh>13yiiC9_G^TdE*rF zT3~!;d}}O6o@?Q26~CT=o=9iX8wga;5jg<}HZZh0j8T)qSX|5X?r2)E^ zAitBK;R54rr>AOcEFa6t3^Q)9Qu&Al%>}oXY29?G5ipx-1Prqj5Qv(;r@zNI17QsIECwNLh$0Z=m z&G<52E!b{?gM1`V!e6^m&r`>Ex(*(WBk5w0)eVgFW6Y}Wn$+d!E^Mw6&Y91?`AAy9UAm9f{ihcfY-hF_j+CtBaboL z;$`-u#f$Ke0nZnaVjaANY0->lZpN}ZLRl%kpg*|kj5b=qK_gmN2OUibpC{3xksgy1 zG7{@(PtUzU{}7()gBA71i%4XY`?=up0QmpK_#4iu;cX-D%Z%B^EUwMm31bN@hz3pY z;)mKwII6^kni9Gf53?GYk3nBH5*6%(k?^4eE}|eO37ZKauOKw3Xvsj|Vv%m88IC0G z#z9I6oMbVI54oL0T3fM&W%!ZLj5m!>jNg%46@BG`jCg{}BS<$HEOq8N(N+uOkW3Gy z*!wkH=0nvgA1y`cvW8j-{qy5iPKzY$TWozk}hg z7kClulq(b9cN~0-P##bo!M6>koyN!&=;a{#-Ht|_^e1`3QsXP*HRCzsDdP*{A1II* zxu4(nGwvy#F*BwWK74#Xh#t>DuiR;lyH`*#sc#CX2zZ+Ti<%#Yf z!>)LcsuRr>)2jwR)ogYD9+;1?86}F7b*b$UYez3}KYkP~H>oG#npa2xcve zF&Ko51#2U?JAnH`!PcY7lf1447j1Z6@(jtW#A3}@MLPYpfI>gxo`Hf&Xxl*Slo)=L z*t>$*D&FNhpR3@fCDQDW@O;@wA&XJH{CXMrDIl@~bcsJul;ilDI&fCalV{NUaWr*_ zJ9P=)I{>_2pNL-9;bFTX=?LRVK2e2?8tLyc9M^%{2%hQ$ayiE96j)e}hG!U4jOoV9 zWJzo2I}L3%f}FjyiK0WvuKeKTD5yGvou?s}+=ND5c!n!7iyo!!($n+41T!qX| zpuZ+K5Zj0$olV9Z<4-hN2S>-jZar8s6Ayz4YYaiFWd43oD_(sQI6n_ZN07-e@DStj z@T+9qLZ@WS9$FkmTFG!#&gcsEk_s9#5*dPlx2wj^C1>h`&f22w40KkChVp6K38Xv* zG9Kc(7i*n{hbl~zuQAd%7*C?TF4z%_}PvR+C@x(;z?B*NXl|A|X3C+Q$foX;qJ3&4v3yw4VVFHhygbPCfj7f)*E`qk$35BqWlK zv_@cW65WPDb89@64H`S~v;j5*M>4B2qb2bOI-b>!hQ;$n@VsfzT?F;L(VmZXK6CCOpiAmwY&$xR7ul%k`K$V9Ru$rVD3 zy$#BKA^-jvUoN(~nYqXTbnYPA-^edNCt~?r<5O(+BXac*iKuhX#3jZS&rr#0B^>?) zEGK@2uplWNgb41O$V9xV(9jL}MzF6MvZypP)DBD*Lsx`n>|{Pb9OPVv zp3872QMd=s$V|h67HhG-zG$nI?DL8a6);|oFDR$)TgmY5Ad1~hJnoL}{2<~0`jTw4 z777%l2K|d~s)P33a6O+W6~x<@C9XV1iO{b`%P7=eMkZ^Bw!bDM@h?wQk^Sa_lLGYK z4a<8Je=-~l7GZn6K|)Wga{^ND1xGCs_$Y(Fd~8mv&kepD=w71Z8ss#WzNObac%Gk) z?|AKDY>7=WtKNpTzcyY+^4}0EWfpr7X@7^5KZfo_-1&v5IS)+<`kV1u;x$r1dG zw+{?9qn%thX@$ltWVEM{(=ojF2Ch?}F9Ypd0LQJ6P9_|Lk++OmhHP@UBQtUp8?u1y zDv&k@4)&r610*znr~q^JF+{2HP}T+6#<>$jYjOUjBG-{nJAwOYP-jCs5@{q4FUGoM zuGN$09!2}D$SpULlWs|PcO5G{g+Dn%KavG?gqo}AX&U*qp`{5 z)N`Q1jGiSvq(XI&RzafxORyuKG^lQehaG}Mq;hi+-AIlbWt4xQX%AAG4|;a+n*v=@ zCy*JjM7E3AhuD`?Bg(*R9T?q+FOd1mUyOec&F$mf6_1;a>~B{dM0;I%K8T#W$WQ!_ z&L^3bOI+N6mfi5s4NZ;&SKXl{3Ay&-a}OjUd777zE8+eaa_x#G^}rfK*sD|vGU!`H zx0P7TzqF8i>k?9Ji%tuny$s3rp|@+W{;?pa2rY>WMv$?L164z~R|Z-{lj4_5e3C4v z8H7l@_t2M(eq2PL8svT+?t=-9tMnxG!94KMmiX2Yi*WI5fGG40`S9PopGGEIk;!@b zbD@EL*x0jFEFMy(z(sq;jf21#zvaNGfmVHJv?DEt62)Jj<#WnCXsZqSv?k<|g#^1Y zVm?$!)^Hh`i}+RmQYEJ^OYkGe6#X1UKOY;9lS?mzo;A$qPBC5=at4XylIuz>pb0vK z#=~g;0DrH{GU{n5H9IfpT91~Opk2Xs7@6loK{ov#<~j;eDv4Q#;dMVFtOMco3E!Ck z-FfI!stx{xJgq!83SCSlG&?Z-3hgStadwFY~Yip>_@7sAad-uEFr4PPs>9GMLjqT7Ag*de5Il$X#av%o^A zkQ`WMrya?Td(nO<7#@r^uEPU%g^Lon>H|``V!>1Jj1OWzEy1A)SxXJK2Y&Yk>}L>? z=z^w8v4TF_y^^!Zd_ywFLtOj7htyb|MCX&}Q9QIMF`tq-n0N-!u3?BQs%T$L)?5nC z3$dp#PtRelE)lzmHc~Ot@P(Tb7@13TbsmyA4C2HSix+lbCo+dFBWg$n+<+bxFmok3 zONCPAR~~d(iSKjZODY*rVt)k`%d9(vXAblEBzGi(&*GlsjMeb89(wkGcoRLu;6(D^ zBOvP>J+(v2Z(*_HK|p85>x_*Akp3nlAEoU%p4|t(k|VZ3N7vG>C(@pXoxj8!`+-E> z>W1eG?#q>jU#@}Ex!Ax_uqd^3nSsWTR|7duFSOhj+ytRTGGiCCdqBIN5xr2}2&H1% z$9ZqSXJ*1jCe|aB`TvmHQD_qGrIN6p81*M|y$Gc;%W1$C|HxWo-r10_mUJX~9u1zOH>rMm7~wK<+m1YCW+{~% zE12qz%p?{yz(X7fm290+Ek+)-cw}IM0 zNVz>!cH%1Im(kSE|4(@WElr|TDJX~&d&FPwfulW)Q4f8`_;JbsT=TVyD01*(A3#!CN-= zG87ER3}14^Uc3*4=0dbs3Yx~iQ6F&B0gQA`#O%qS>rU*h4NsR6CB`M@D8tF;q$1KE zTg}3!_vDvOJl_v%EQVS$mBIaJ%>Dl1(`@q4`S>D<r;#Oo@&B=WWg4=H%O zj?m>ySWq2Ss^X(%r6&pdOoPfnw3MjkN_a*$8kRa&C3>zV#>vW!P$Ql zWvaod)X)FG@0{Ru39TMVP*h2)16Y$Uk!7}rr(Jlbg-B#4?G7WsgGfPE_JUwY^3+1G zHW*8h`j^Icsprd_uz^oma4mD*9CRcZh{UL!SmkMW$we9x6|PjgrTTjmT24Zr$Uy2T zCgjndzD3GuR4T+KgfdyFlNntay}CfHWR`AZZiUw*^xXk`lqO^;s}K2L@LIBs$FYv< z$$`gUg{^p6>blo}PVq3Epx@2iuDlN?f?tJ3WtFgyUj?PI-c&-nt3a?+C)`+V6SPZa zC7DW1LK0F>aYB*gqSug<-vyQ*U~cvhJaj?|(!(I0EYr54 ztoT#OysuQEJ$RxaWGLg{tsMIr2wh2_FNrG)>e|DP3$CO_+?uNu@+yJm0%GY9xNnDS zCC3>Gmg8t#VvJOUPD1M?yh0s+Wo@UKdwap9R4|UfWs5|#s6bM6Xi+@2_*Idk)HXs1 zOOSbRB=NiCr@OFc@u?U2yNmuMf^EVUU!a2eEwYjPSdb`{B&h%&L5fmq7j0A{3q66r zGU$|9xkPB`JHV$5aIkfBJz+{WLURiaWc;oFFpd_-U^G6Tw9Q3W~^)|ZI8sI zdeNQxrC@O+5pEKZ=uRx=;Y56!Kt$_Lq88HHK>Fzv69^X{UjiQ*gYNYXhw+6H#1NrNuM; z2T~)nIKybt;y{9ybZ{VR3%MYuiT(m0ssJuzRwDVk11@ETf1Z~Y$sb13i=cG_v1JoT zl{#oD63OM&0)352;B6%Gki4}ok}K!F)ZxUM)4-ruh?#HM=r;>FD#{tKa27pAsB#xz z;U}TZhef#2#*NtbaAa)*r~T>ic6{FiG*k|C3U`mf*}uqVJA8){HX%8sM6xVsl3gNF z_p{%dL~{_3&fKhb;*pJi8vWVCyb>>N0%2amGHp%b))!0Jlm{TI@cUZlbxK3-Pm zyW%shL8k+tst9Sx+&hyIi;$ws;a%8U4t&de)5*vOu_5t9g?!T(L)QCxgSTY9r*O$C zfR7du3uU*7pA0(}s*AbAN+n9mjQ#@J)36v7)Ja{foU1=5y*5!%+$<}q5y?C3C_<2X{cASdzZZlodA#6qZU zfu1h&TQgY7N2cjWDayzS?ZRX<>39gKK#KL`fU+(;QwyS3qPM@n<99@jT}Vgvq157g zj`MQBwM6ntD7oSzHe&k~P$67OO(Klcr3UF`1eqP0KuQTU!5LWY9n1{J^LHeYYX>6Q zGG{<5LRKtG*sFYm?vKDZWZ?bA0B$w%gEXu)M zd(hpU_Tn$r;n8JHq=H_W;Nb{k96++75!o#u*}bfk$m)3s8WnH9361Pd%yzHr3EIH7 zQ;a3~;<*HZWc@-`c?7pDuvw`ETmzQ-fM2QGOLP;tc4Iy}nJUB-yuo$Idj#L6;R(hf z`OeVV0b95Uy^Z2s_BBW>Z$#QM|928u&V!U=+)=R9c4%}cb|Etk7aDh=cRT2kj4PR! zR9Pes=z;I*2vvf5D|hPX&xJl^9pM5xTZwM=;lHh5SXM(y;GvlJ0;uT?599D%!|-@z z(0e^qh5PtcLQhh!YDODj#z;Zp8vRS{q5|o)XCBZG%49w%Rna7V*YQ14*AjHpA~{)| z7fi_RGL=_0FIl;hNF+5a1I_x;>}7PLb5GWe>bNJX$L*1>RMv+liGb2 zfPS=)Tti1YySb$9UI{XUfAIn-Ai03wz1WjP(;c8rR!)XOl{x)cKBe)LtY2J4CUI!( zh6Rs=cUhfoq{Sg<{)1WkPFhF?=tb+|-9_`VLYM+Zgffk*ni2BQt5m(aLy3vl<%4?3 zd1Ws14_3Mi{+442)d|06N9GE>gs?N&g(!8L(a=2{>LqiN`Hha}YsMl^^30XFdO8%R z@_R178{o!@4&wBYhCZ^8jO?qB)s6=AbsBW6Cq{oo-m?P^uIwd{ekBj<1hRUA*ER_| zl{s1g-|kG*E{9@EvU6=XG_^^1GuctH0qx9$0}nXW8LSxIy@CcSDnNMrvHx%6E zVFQvu$(&XEj;zXxcWh3eK5AUC=OR4M7$i3V84XT&wTZ~875X({;WCGms?j3`*AYO2MY&H&PLhwI3%4GO(v@@FeT|UD0kb zm&C;ykRo->ZP=RRLbBf>4-LxtLt9$3Wi(k`%Of+8wRBmpE5Q1CCn9Kn^qh(RlIoP~ zkcdO8=u+kxRZx8p+4vdtG9Ei|(S%?H>XPMM9#mXn3cbV~tHDp1n_^(U!AeF?^_@85pREZap-LkT3b|sS7 z=s{-5GBcLF7ZSyidC3}w2MSIzrtF%KSlb(fbZ5MLK1)_m1jf&xfpgfG>`xZi=%B3y z7LcQq4R{OR0RcfvPc3I1ri#Lc8EZR@r4wxdlB*b|%&SK}2)e`P2>Rw1IP# z-`r3yvvpY;m%6^hfxocKBT!fY9WoDG2i=Dfeklx35{G3aTK2$-J;?gqSw2a1Om+iF z9`_I2$S$%CP%JZ~WJYSH$0{r^1?@@o!3&SlM)rapVr0Ra)JbLEtW=0g(b;J5AakV_ z=w$*CV-UKL+W&ZRyGP0BWT&2Jy$mE)fv=-v#>??6t&v85_-o{zL}CLve_5}rx$B6Dn6os|6b5d7Aoi*)2H zvl7{_C7F5Z1MxFixuZef9Msp-nj)hji4LCI$D^(%j`>^KYHjnn8~_L)oUt%D=kxs?Y+J;?_o zuMj)#23DIG{}lL<8Q3KxFEeJTXo(+{H8#m(CGN!;TXyb=^@Z{3DR@iS6E8FE_UK3U zL?1!2;tQneC)rUxddfg63fx9{vWnjG;D0)e)%%$*y==_i6y~vVXi9lt-XNc1G^z`z9>yUuYl`Pf5*Rc2USG?g3qpCUS^6edXg0^sbfg}CK>rwBOkGX7PObK1YNSDWHA1DIJ2B5xt_yM zkH9xwhrD~x!a;Ac7p@F6%8uW9bRuhUveHt)6{Sb9Op&crJaRyh)Wl?Gxnuzc;9BNC zAuzi!vFAp-)i(T+?2=rJZ7s$Re4ogMzGFtb8sAleHaaqTA(9dwC+n56I#!EZ&ho1R zYGk*;RSByV-!HY(uJAtq-el*k83_945kzD=yDtmC`* z^F}m$h_R%KC3_HM7AB}T08KK3mXj4Vc(zSsOY?YsCsL4AJu4%KFVI27l|3vH%~~?% zm0fhQ(@)N6oTTqdykzAfKu@ysqZ1mIQ$D@%k^@0z3fvSSaq$dtPDgxFlwMQOr|i!A zk9_e@D3Cqf9*{5dOb-<9Vg#9Ud62hMtYl}sjr>FQqaKB_CS)g(QPwTI*hg1JEMp9@ zO*uei`R(McRHkJ|jO^Z&Y+0hPoR`>>*kkoA z`Q02OF4^%Gxc{BF{2TmMkTr-6SQS~#sX-IRxT^6p#f)!5Vsf@f=6^~ezrV;6RiL#D z8e3pTvL9Tkr*0&u6Yt95unfCtOrZM`lFEXo9BfExDC-$tss zehrjLMPF73WJTl%h?R^+BWAT>jC`=rgFNXvqJgYsiRYIZl$`CdGP+c=WQ|DXH9}V> zo|4t17GO$ZNl%^~nD7CIu>+|Y=Yb3vK`dWZvSn^OgtaBvOEw+*y^0>n(OUzwZ%6k@ z&>*oT7pbIR7hSLh*$pQ3vx{ipd*gkiya@WGh9$dkrAqQ0=YwV^B)^RBC$R;o{kA}= z0G`amzsWg2iN&%LVkDTl zo!t3uo@xWy`h&8*(3(y)a2K+bUCcZ15mJpf40?9Mk18AAZ+4?1BY*!)20{C~)QfjwiI}W~Qa1}#~ti*|5m0jsqp&zM>>;Z{K8Epr> zirrN5k|@;{K03gcfn-zQs1*|JPd++;o;zy5Fg!@5G#h<(#L8r)F$+qDaLL|v1x-l>rHsE<*3#s>p^q7~oIT5cBiXqtl~F&Q zB8R9Lgz~jm%>v|ckiMmI{*&>H@e0|3tkM39R{w&g@5poC#RvY0Wr^R*K&Gk1D zfpV#GO5IFmrah6f_%dvcsr>q$XeDGyo9U_4FXHo<8?-$LzzoTy*nH1 z+=;$q4Y4nhXqAwJ#4I@-BbnI&kgx?$62te&PJk3fk-frlqC>oa)W+PzY1zRpb7t8O zE4w#jPAhAUvVT(ih^!JzrT-+Fl~otnPboEX*{kn?x`AN$VYKC@5-z)^gLt$Hj3FmT zr0RYg`OBVWsdSv?*8rEakXo^vBa>=@mMVN#70?iv`a2}wzv(7<+b;d~G|=-Bx}qihmw7C={zaEP(~21k-_ubq4{R!R2|?%DwMj> z8wq#7BLwj<5<_KtiCG@{m0blv_?6Y>KFr0s@%&h%U(PR5N`VMaiXtl_?DrZ1#O#=lZ<@<&C4zzS;^*cGuDaMjZc3)Sa`4sS9iyg_%{gY@z_P)j#H5m!Yiu9FAh7CJu znaEYD(UHs_W$#&M^2wRXU&?pv&$|jN$huSpt)wC>`^n`b-4SFYH9FY^nh(tuEJXGO z%BtmgV(C2m*5~M2&OU#M|9ZxlXpAr(W$k?)-eE88WHm}^V37n)yhI`cjy%vS5npN` zvZtg8y5rcP;6+yZ&B#O6o24?U(N@+52PNtbvM$jHtH?rg50M2wgf^twa0_|Q8=&`5 zIF)+Vf5@mkZRC7vv2u*~Ifa^mL`j)x$my6yv@Cj(dcW*hk0KRWWfpH&gf#N$X9q~$ z0$q!Uvxmaze+Kl@;a_j36I(ZJqqf2z=fy`F0q z@{l^19~tE;GB?`7ICADkvg+T#ocK!}+{*rw@3G{g+>;eN$>TdCRXO#0ir!`IT+Wq> z2h@@C7VO{=Wf8t*B$>qRa5DhAwUZl4g;h?%jX_dfh#~*uZ1?Bvuvo?K8onfiR%DJO zdoqXdLvZUn`jC}(#>&+7#7_ke8K4bJ^e3etf zze4v`a-7cSdmDOPj5oS+BE|}3Ikb>yzk|Mi;dPE&b0lrsgBjUvE?Gb!8j_s{f)j}x z6>uoEf^^0?hSgrcZe*2cCm5eYFG1rG_5I=16AXROH9`9g3lJ8NENw(teWX~Obd$|5-N5a}b>*fe5C zz3~!JZVS)vqF&e@ZS}+NNTuaxP`VX-^+jjzLGwy*{ug?z$70SJ;z7H?@9peuzYSSG z3D$G*OYM<(5!}eiq^y)KV_eydDM;x<^s*3d0!B|{_LB0GvYx*!K&X>7KAA)Gz`w{G zWjs`f-;#=#%;`^pj3XdcPFeN9PD*HBk0#TQY7b($tUteG^v5g7iE7#VScb20U_Y|| zH5rr~Hb&4(4-nlJ-}D1K?8kySfiWu>ROn?Z$gwC(627Cx7>RZ!^W-VKh!xGGV}Wuy zPG-nmxi9|QtUumc?)Fs?&7GLLP86n7&biRH3-?IH_tVL!*w(h5dgf=+iESEDB#6jXrXpYipw z54tniURPxw?*?ANj07bv|3)O+h|Z;^^(HcX0I&NTXuSoBrt(BvWj$OE<<7OptC8ro z7rG6iS6j}I_eKLAbhsRC%IxM5=z9&Cql_SDF#0Frs-4_Qc8|!3+?Lo*9NGAglwI$45#Xwp(Ik8L8Xf9rv<`p$Il7RO&`TJ_V3ZnsPA1d@k=80?agn$zr_sBi zM;#>Z0fqnJrH+FQ*>SatXXQk63|{X+yEi8+Y&r@0IsI)G2f zu*M*dd}Sk)>_=WQkCPpAAJNZR{KPw0&2Ess6Wq@MFHtmbh;tNPM*9F;+d-=pjJOQy z8lgu{ryQq#ET}IBS2tq8#n^u?n6fByjj??b}w$L}+YG9w>c|7wiGhm3`ng~+4=>y#6jr?B1j*z2$K zEvvjXaMqR>=_U%v3C^eaEeE~CXkVVVmrdnR*0KHo;olK?9>P+(p~c(D+P1OcmL8nz@e77-rHWHBa9Sb_>MLd(}CN&c%~J8^!9{TkkcHp3t6J=aikGsR22l(K#xRG9k~a{+4d2uD$&M8 z`rnNO%3LKA4EWG?6HhKfk6(iG?f9*8AfXe~N#&tCK2G+_O-3F=@NhwVcrFMl1sQwk z^DUlvABtpOQ5C)JK@zgJP1Y@BKZ=~V@*ow-(&QZEK(y5Xs$5Vl=Z)oGf02FjgFtQx zesDfH;+@3zkNJ09W)PR|#8TyC>KSykpG-_nUxdg5Qt5p?6d!{F57?G}bzwTlXp5%0 zgDuIHWFN$}U`5Wp}Qr?hOOCXOP=2B7oGa<=og)U}UoLGhT28Bl8bPVWINxUL2t3$>25@GVe+1TQUC!%Ae9d}U55|K^GO8%Rg6?^gI; z`4>vE8KVFmWvAG9XuKWG$iLt+2Ahz*VsdtEFqTrynE!(5GQRb|$H*MD7ApJUyX4<@ zC`-%)Wq)sHa4*&EZOp4)$78$?hu<3aqn#hnAbX4^fSx5vT&Y*q(ARe4 z|1t7^9PP;&wIM{`1)%3MG9NiXypGo|#3tEQ-aveBN7YQU_9j?=2y6&4r4HH_tV*Ru zP6^2#Z8?z<;VD@uJx)*WpvNB)J0D)fk{&m1G1?lr#wgJ9CBLo!k&h-U;0s2a&*)3= zKdbOoKVTL27`I}H_hWlKj4^y3$EV@OwZ=94@+px>PJeC3Z^?P*$?UKgYbeHHeU|>U z{=WW+{=I&mK2{&3zogIC59wB;7}UQB3Z&j=#x`ZYsQkMht(naX#K*{*qQpa~1190K zN5kn0$Y>UsX^j$A+@=nu2TgNK)ut}yDdyScdUKN6UY(@QSC6Q*s>70F>1=6hQ7k*v z1?p?+L^V@gYQEWAWtwT4Y$`R`Ob3y*i_Cet(Zx8XAJGr$JN5tcYQ0Gh>no$hl@X_9@NA#9PXQQQ&Wq9<$AO9o%WBkkb()jlHxp=9bs+)DUo~O6buhAdW z-_qaK7wY?YdJXU2>+f(qt-r=EqxF7Vz4cOkj6O=As<+l7@lEkJ;#1<+#@oct#r}w` zjctf6i5-fajpfDfi4V|+lP#|{rKrO!cU$M$${iNx%dVBli&93WHm3G;zvh0>{aLB%``B(Ed=Cx*%dZ)TYZBWmu z`_w;T3{MuI)VROW<>P==^Ns` z<7;BB*gMfy(UaN-+6`K^))cX8Ewu-F3V?@mo3j*9s==4)L+#P)LH6YHK;~auj)}tEq7X`TE<#hTQ;jB)s^N; zrf#N($kbwbx&C1M>)4)XM)U#gLS$oPe`H4_5E-bwr!{DwMemE~_BsrhB|dhG4m4hJLW;= zMWzP4`w}CFJ(R>J#Ck@DX%9!{h2x=Rp>INSLjKS-VP{0uUXH#VA8f2s_L!S3pV+%N zmnOG$pG^BD<80>e?ElN@lshT+o1CfH=~jXMNc+Rc&iNM26c%KM?yadYhIJ35L{A*WfFGOa71iH~MezzvK7$KM#x# zb_sO|_l``{c1Kg=9rbZ&@_o}VbG7+yb*{Qr{TBOMY;JGvZW^KVH*$4KJ-5SRGgzSRJ@3SRDE#yiWTtHd;?tdYijgyE=+oX{ooSm1nHZd?9;m?yGtG@+RfI zkUKsnFZ;{PR_Q%b*EoCF+ge^Xh4t~V!bo0luJ3}UjpwoEil+CQCO5^KMtf|&GyW05 z1ED7)FGkPCEy^)dKeex=uQkVZ%=VytqCH@nVQa8Xw=T0BRx9xhSDTiQcNQ^Q{e$TA z7B%xB$}#0ZliB>1`M9}*ItD*8Ms1-UGT&mJZ?c+(D~}kD>BHkQVyUrZ(RR_dwIuDA z$XAhXBQ3Pu+Vj!Um^EIk_cKN-x0{|a->FWud}MvvcEOfw&#||*JMD*UZ!>wf|p;6(hBh$4dQFr_n z{deO_WxT1$G~HZh?ybJ9-lSeK_b|6NbyVu~XXBO8?%KNW&`{6d(}DN>y3gfbO&=+t zZ^Msi8L`QFk*T-kR{Ij?3n^#ZDH&t3R^&XJcSHV7`PTelc_VUFRyz|EVA1 zSNaa)pptL?Mh#nBw(Yh%?3?V#j+-5m9TvwF`$x7-)+Ltv)VS$UMb~G>%VMjwHzK{m zjlrbg4S{n=x!CXW-{s%$e=<-KObw-h=mpxS*u3~fof9;sY;(lCS%@;!zL0@2M;DJD6z!NMBKOC8>O^n%%j^^vE(;eMh&!n7kU!QR# zYky8R>2kx$`U`bWu?6_S zhFhAZdiwf?2X=(kX});6X{j2r9-TE4 z=|yFtp~tt!c1KI1mH62^qD!OZ*w+bP`-8Dj`N1^Eyv6*mYN(G{Ub76ae1nJSZvND? zjQZ+bx-H&5HZ^)#yItED=@q#U?jP9@c~-kC+9{S6Z==6%SWMTOUs0d3%&@*>yT@K( zud~mxkFYn`?z6pXonYByzRpx{%*N}d$3D{>kzYd}1s4Yb{)hdIzP&z=?=DjGg@Gr6 zkA!{=H%BH!e~kSW|5E?L_zu5uJ@I>zIz#QP{$h@rTAIwt7JWwinplgdC-Pr-PH127 zR8S2a2_;2bTEFP3SdwwtwB54PKGpeGvXMG7-I-OJU79;LcTMi<+(&ai&)J^UBO{*r zxNDf>PHVaO1AU?PYp{=RX7l%rO2gQ?ZndA*Y^^D*yHtOsab@!<@8f|_!bP#~j9KO$ z)_3fOlhowul&jr_yD;s9`)>E2scTb)B#(4vIW()?GRizhIjMJuFN{8`?Tb7WxjpiF zWOan7q(!5h+j#}pB!iz%nkJ?684O~8(ST>GrR9@Dl^xb z?^8d)Gk;DV`-1s((=?^X*b#p_c2l&gRu`@gwF{+&riP}4pO4&u#vSo*jnB-lTkmtk zos&{Fxj)XhJ9|Lx?Ri`9#~boq$$KexX7;L#A@1K?cQ~%KZZ!QD{~_{L!0COY>9dB~ zy0qHO)g!CdRX5grP&cumRns`n1iw9;6yt=Ic@{o?z4N-18Ezx(-Skh>Z%YrS<)&>( zjU{h%KIrIT>tg9@9-+8(SL{`7ROILIgz#12r^CNu^F`Wy(NjcZxAzwmG1cvs{@?VIS~*!B7$<2uvV z<~^#9<>k0-wY`nw2FGBBZhzeVz3pMEN4?Kni4FZ7KNAf`Muz7Hp9?(af5#W~zU#fu zyVg72XZUvd4+qrH6Je|NezZO|R)51dphQfI&8^kP)Ti)Vi_9lXKPVH75yXgCv|6i* z6h+!a-ij#NH`)+vX>9yYqsF|#I?r*BYhKD(_d^*IvhT@VpZ7`rhWt7Cx98uP*D=SQ z`LKJYtJeOvrGx2W?2d4je?s#!4Snkd*I27wxODzv-laP(2dk=T^t$^R*LXe&9FLgv zM@%1CmfOS5lGM#HZ5B}|c(l^aJ$5ZMFH5YoG^9=Hq_*(d%4zvlK4$s!6#!B^_ z#w62k=AW5eyiNWYRtKuDnqM^Cq?8*A&uQ? zT&GihNc%H$LC*TToPy&8jux&KX9{NK=jZOoJm7xU^{>5^<$!Tks|d94bZzKfyR+(< zOZxdQD@!XcRBpfU`=#?$ZEF4XdzwcC#%cfQ)6IppbxH4~*wgo9uFu||b5-sixjS+n z$sL%ZXFiy|I<-skpGo=l!Isvh>-CywxmFY26IvIn2)r0r7nm7*J@is|T%?!wVKgWH zkM1>=DXHWfM@_ewpE2KIKF(}$tTLba&3rx1eB#cSKbjM}G4_1yz1Zs5$apH5#AKzj z$!R`su2qAUEM=JdZ9)0DyHpDh2_N|Oe;8k3)K_sH0p^=D3f?u5M8^Q`$#=RcCyHfKQQc=w|& z)v?yHNeM>VhOYLy8Z+w#Rd2b}^TN%QFP&R=Zrb^)FE(A;TQ#tDTf-=CWAN?h-9{Hx zv(0f1N*$5@b>@)li8&K;GxA2~Ey#7|L^DsMA9Wv0`O$S-(!aJO%Xg+#Mw0$@Y;ZI~ zD~fy?ekJ^2_+73E;m5*9!f!<$);wBXG&8Dc1<|Qsv1e>#?2B0M_`djfGK>=by~zS) znCVvYD76LobxZ4h>zlT=_D}7niDA3!OYQgDf3baUy~k3jMoa;!(FOX=u^+Vj$f?kp z;D>?9{(F6ky%W7xd0TrG?`}`7_q;dYYZ15*m>+y1^m6!Yq+4`qY(l(H->N@N&HgT` z3zw1H|55MwlDS=l{)}EoB{8Vpzc z@A2HdIZAeYdUpV=y~zUOJ}NHuT5?E zqA86doAbl}#+EA^)pFba98;a|Bwv$ygZmHnSMFQgL)|X-yQ$ZtyydD$dfV}p-DkVk z_Ko#B%L;XZTB17DQ|3R-!_4)j-%Ml40oEF~>X&0%qsz6=BV)q#!8-#c|5#tMcZv5s z??2wBec$;z2ECzGk=;>mypPgo*08kCowp>PO>wxZ++U=9npWoyrv8yKD)~?64-U7j zjrx(&MUO@6BZosT2Hy6$y`kpz%^xbK`iHhy+oYApcEsN?s!b==nbu}|qch*V zF(a03y~zV=Y$Z|BxHaobDdz46B3mv5}O(vF1nX`&2)C@l<8& z$~EUMpBrC!_4%@k1FBZkH8#EKbAC2|=O{1H` z&5a(HudTmbpjYtv(Cgv(k-xP6i)P2ZiVcb{i~HgiMi3(qpxdUhVKi0;D6Ou?S0kT&s*tP?dj`X=&J~f43E`pu@Zf^ zvO`^ED^9xXEN7IRsTJ;iX&<}0rY=an%cUiaaa?UXq&iLO^pmk0qZ=c`!v6#(1%C8@ z;a}#j_Q!d32<#2)4ZarI9dhDTOw8v_Mb<>FjU1=;vMzigyf{2Md^|Ke^iF7I=!?+U z(E8xm;GIDw^lK=G45KZ1+(*$z^~I*IEWg?JI4`AqoOU3yTi%iu*5Y0z^GZ**(pnj< z9&V+UOe)OIYs|EzEp;8RJ#KnGdQEVI=i`QTwZW?HmrNI?obP*K<;69Zw^zSWdt?2v z#%a1GOf|w!`& zTQ$CJM&nn_KYRZScp^o5FEX@b^k#Xc*3WEz zJ2G6)Cx4vWCHaQrmTWnTrwm9AJ8rNJH*YYe#6Qujp<%vjn>RI%XdKvB+GuUOrP0xR zo$r=V&*)uxw&}c@ZT~;#y(yv8PVPYJU#U}4!zoS4PS>-{8?LfM&HGFXmH!#n>GR^J z7gEn< zp2>f|Qh!o2)Yna|l^3b-|7N5c*Xm2+<+@EdU|wns z+D{~%b@fl(lUALvBGa4EB7I|Oi{$%~N^AjBJZ26r@Z~m-Z78XmSd&t1t?pOzN1eSX z=sgj9L))xBr+#Z6?z)&V$=yBepR~gC2h;veeI@xw(o4)~A5r_5h8hp*-^5FF)}EEc z%oJZ#+gU5DIn0T=Qd^jys%Fg?9luFy9op~z#oNY{);yr8u`#1*Y;#v{p1&lhhO;zJ zbWeP{k#G9MG|==q`!ycWZ;9=~lWz3In*VIvTwhtcrDko-m$fg{Kiss)Hzzzuf5x)j z`E1&=+421Qi|3X0Z+)W8i8i}iH@Et)WNOjnya}1bDP{I-^9S*tA}0eMd%HBZY5K2e zOY@(ekG%i*t_=(i)kFrx2E?buZ;gLI9sXu&9>bZ-PKne;evU4QN2w4jwm2QHx*YD~ z>Az>r%1X|@CVOAjpv>FT9!rke?^74+<=Q&|fAh)u?KP#<8>{ZBo?G)#-GIgyJskqB z$eCESvY1SFm;H^Toz5p*FS@>RPEDHVXtvF<4z`r2gQ%YLHkYbNmgSb)tQofdY#-Xo z9NirU>?dt2tw$|Css~IIX`CnmNM4iF}w9Ck!J$Up3J7t8x}TXHGS25 zmG=|>xu7HRbacA@xYEm1Xg+7YPW?!|Uj5cQ%2cYnPvyLWa#XRH3oHw4FD4Zw|Cahz z+W7Rx)1B#IcWLT_t|5*(%R?roK3nS%+UwiXJhyRd!=d_|hN^~{O)EVQ_-BNsX`a|F z{eGp=)R7v;cHD|k;Zh75ewGOj>ZVg(mCC_eeyWcv{Qe%G0 zG)^fsI_fELSL~zZeH-=^R1!|!X)$Lixt^$y0^Al>mb&u_4$InR{ocTo;^fXN6<1{X6R!a%qQBst=F-rcH``7q{4@Mr0*?mU zhac4LiEmZju{`JKn!L^J%p9H5A^)ZppB1hv^tLE0cqlg`tH#~QHQBbv^hx}BttoV4 zaCpGy?-r;aZhaUk4z~@*LXL1jX8=RBqfCZymcP+9i5>^pEJhF+KKCyoMRfr>xGk)Ze4B zzld7%q)2(VHuwuv-{l|ce-65b1;>P541W_Dt2>g6pbxjUi3*}MZwQ`%d>MbmZg*@O_gXld|^z<7T*-xTi|&!?U@JS#o< z-jm(~RB#pr9t`G$UJ6|ZJs%#XZH=WUG0Wkk&Z)gKUd$;e*xzDY(fdWi3&$0_k~cE< z&+JxNwv18kHwfe&hm zejD2!f5n()ikhFdd~Ka+d&2H?jBw1e&$acjzM}3TYQLni@UqKhj_+& zy1T01ng4zHu#il8N^aeI?m4H@)10XfQtfHO(pP3~%QhVO-RHdz{e<@C4)D3f2P5J&2KW7s=PFz@`H3eCE7TZaEhhT?zu%=k`I? zb>@3=b?H3L-7xMRJ3))-E7UaQw|BBWpZ+CfL zQ8h{bjoM~755|EsRt`4truM)pZg@J&$a_(!An+CaaAs&E{giWAGJcSZmDAkO!${)RaBiTXuN z&}}S=Yt2^|&q_DsmZqAfB)PMMjxgEDhUt^lAb$~MCc1)YUMH}_UEVBotX8-xJ5T3q z%}&qKGXKe}mbE3PlRMOZo$=xX^8|YxR6}K=KgSG+^~bR2X3=G$r$zl2*&paxYse;| zY#Ys*6eQd>#?VXJU0{lrfsN-Vhm}{#6kusby~90~+;v=WF5Xqub=1|-J=C+&J6sv0 zw$R(LXzm-iBy^VU$@fgh&2g5ZmPpG@^8~YM8fz+GVsf~tw5g4$2e6MB@)PNjI78^j z`?wWGFT~UweH^g1q55JyTOUfp8M0BZftrK)+z-6&Qt*cBfew^~&8JL%ra#s_7`LhP zE3L}TF$X(|o_%L}Ko8PSY3sG-T4~hQGqvwpeY|Rn-cFmT&i9W|%KJF)a(6@51gG2a z(Xrk6%5}*zN$H?*Mm63i7B=Ow6tLy84-dK+{5m8%v`@Gx@_gjQs6V2oM^%gr4*xS` zlFenBEnea3FiBsn-tzbI-}B!=uW_L|KwS*RL8gDM|Gx6KvfEeD7lL(K#24mU>#M4K zR_3Tq?E>9pG$UTVp14$!C zM>X9}i_*4q1ARmH(uH(5ZHCV_r%z}#pj>l}v0O*;kXTUD)FS_Zr=AB&f(u}p-UXkg zAUMH6+$jh!hhD^QPouj473hg zX&mAflEHimUr{J0)D^0M!*EOZODrP2liJDMWve_w8X_hL)%k+tA8^p7a%S?F9OFZf zmD<5f_`#Jz7F|NOYFpI(=;Op9=WoP9!SJg9mRktv4F=Ib{s~_i-TW><+p6%rNI$N* zQGlTfpeN}M^qLssS5#(~X&$^zVa@M9t=#ZE^S<@8@m$0C-p$?Bz1980eH7@#XeC~K zs;M*&ObN4vbJ7-BlP8!Oo7bBwS$bJJ+Dh6V+MoQdZk>#d)dlNr^DEgdZ4~Slz2>T0y8-Wc%2CaFmLGWz`%^Q5P7j ze-IPzK;PuGI7(V7H2{V<3Eikz^4`E6XGg&_s?EN$5nz)J<~BiZH4ZGeV@7Vn3HH+k z)VgQ!>o|VhWtm{X95JeLN4UIXA;zZ`KbKGBoAENgiR2~wuzDUERgLZJ6n1A<{l0cU zTd5t>ZX(kbz#-^}k-qQuDsjpRpfGiOqkO9Gm9huXeTH^Le@m|eIjqAyFX-f1o$ibF?qoOH?F(YF)G- z;G{8Hw%Q3J_m@^ptAQ@?aqXS9MSlaP(>P?_g)qbUu~%?2Co#|uD~t6xSx6ExgvsJi zse(L89wC>MGo*vkaH*)YL_99!@KeEs3g(xP=A<+UC!yf^1d-R^C4L9bj~L;2t_=1S zth9z;=dA~S^NZ0I{KaQnHg^vi{)XTPJ_qNo9k_XZ77v!E)7Su1{uZgmALE+>UAQT{ z##2n-1%4kHKy2g_cLC~>DVT?;7=>G4$bDe1SUGU!W`PIT7Nhr-ePBP)H4WTFvIGoE zFY1!^;0ta6XSE!D(sbiHc!BR|F}hR#spZqZX_K_KY7O+_Bz2rR3@k0D+E06=*P>g{ zy~ten#1=l8A1UkwH)^~%0IY$*h%9&c(_kNU;qQ|A#7=fWpE@41nI9dN+2H!+ zHp&{+j9Q3ZMZog28gG~k4r^=Fz~4y)ej0y?KLh4cJH8nI3A->KKcCbi1<6P7cUxnO z=Hd31qhN)auu?8?AGv~L5Gh9|Qs`K=8tXTb5wHn4_KJqFFtB$6Y}OF2HJFPYthIXF5ioDtk&4LN!;J}SDho1_ zxwhmsc}Yf+2jHoWCSAx%(jTMw9qj3NzC76iKI(30QKph^d{e$BX-e+nZ$tRo{2hK4 zey>Z0a_g|FoZ!*x#yW7i@^h1n517j}#%r*g=8;O|92nb^Nh*0q{^6_hrTNL2pO@fj z<|A{7#E<982)_k0_$h>}!VH%o2MLtT(DENP9$;5aVy)>~y$XHJ@^MRy6KoV4fV2N6 zTVgEZWURn@Mqw~HZ?iY_0)0TcvlE8dkc?dHBt3&z_rPd^d0Ysd_Y>|V>xI*>D+R9# zYq~CC@qXQ@FJqsK(j?%-9t)o6JklRyGm$0JfYb9FZVB0dHBkU-cruvyeK`llvn1!? zIv9D`H1>$zhQ_EK-hCnV7OIY0K*$;+CLrM?cS$d-hL*-sHVEvRwftIP1y>8R2bWPH ziLWP=;4iU)VEK;a8wfXy+F+-aBvC>$lACnl`ms*NE6%~c7TfU82~V8H5K@J2A%^lV zjCo`#d5%}0*kM-VPs~$4-YQ%+#WC+gmSMmcak-r$Js7pCnu6) zs6ceRI2lC_8u7*yHWkX2`d~qxGX@cXPh#)cYw9t&2^WAjhJ(4=kXxjsv*l2vd@)ud zZ~u(b=_y-kSXpr{oD8QU!Hw?#UGG%d!uY|@<|~R@j4JelF^iq%(#chl%FWU<>3+_~ z+8D2}dW#^UcHmc$wJZ#%M6B^i*e|{^X6vi3IUV_7H z;^rC6`1$-sWHh6|*p1^y3B`B^nGUYT5i*hYf$?YO^C31A6)TH-ga_m&YSAQagScP( z!Y||sVFeZCuJKKvP3g~e)7i#j&d*H&=L>$eESlUELJ`%P8Y1mX?sDz`lK zQwV2`xfHdSzTc=Xb{6mZlhopDHW^KZ0lDpBP{GapMLuxF_#J$_FiOdCV;|DnT>w zgeA}w#(J?8ICKG@0zn!{3UVcwVw@N68lCtRMBy~BckdfpSUvFvcKrt92W=$OVpX|K z#t4-c6mA^$w5^ZEgo`&h4a!SQq{Smy6t?1@!iOHeIa$#Ln%3Q>B+~p-;H~ z=py|jw~O30(pW{#O^4D&L>Dj@1`j>U3z8{q6-xVm&?AVzmxMZeabqg_%CYug*_r!!HyF4m!Ykq0&8KM6g^T49k6v03p)L8B?$M89h5bot2tT+h^rLY~&(MoV#RNi6>W#qZjo{`7 z81i7c&lENChhBi@H)^0JIAYAyhhyg+7v>3%!P120fpp{UkTYQ4wbzU5e;AFxz8=O^ zph-$Gx#dY{nld(b;=S1B! zg>THo8Oz8VBbu}$*XVP-p7EV$#H07r8W=N(pLT&|>|?f*`HY#QjS<0;^$V;qDMDr% zpJ_bn!@o5)(9Prw*2Y;DtUI}VLIo%f9@2~YXMQ0M_5{C?jnJ*^pg3Jh)^h9P#Z|&m zHbLK^&li77d$x^BtLnET5dl&q{-4*?y{Ol4{_baZen$#ie8yz^Yeu>>^Io2c5vYbBMuD) zyL|`U!e)}?{Bffktp$Gm5iYj?7Y4&mCxKzTn5#mrq9&+<69I~SewI*%31XGH zb6fQ0%z^Q98PmvK)G~1LVEc?>d>N#o&V;7lBOh&T)oa=8Kp*^{$;HE!k z>DnOftGJK%8L3*L5zd1TD7HiowK*xvHga|Om&O{}g}W+jArIL@eT!k`=W?@HA-bGf zM+8=e&EU3kYmNDYu@p@+>IgHrW$0|pN8Y)fOW`Jh89z;Ei*8FFg35^wA=UYEP-sjc zS6F%Wk<0e4jIG=|B4a!s87XWKs=$p#US8)8fGL_n-*d+hn(B7mM`Aqh)7PL87 zwhu`H-_fwpg3x{>le#2`>uwaMZ4ouA@Nl;>BH0MKf#1cC3ut;-9qlr=R4yj;W};Su z&E^{sD>_-D*bM%=@vq*S>%_NU1%bHiKS!Zgc@EGEFbeT|{2+ zA8W`e^AZoI7sJHg=by46+y;IhJxlk2z3*Zc<1elO`DrAPApQ}JH_AcJa+ezbwbN_V zx+ddKsL!TA544@mMbsV)wN5~@naU>6D)gWcDx{JYEK5&f#ZXs2C$nj=(FEM&{>X4Y zKxft;6~kFJ3mMUK#Di>JrIqmRgHZd8f$n-USBe~g?r8xOJJ*mg*5!_(@^nIHx73hm zyk3(~ei3J3`L$#EDbin<&9!8~s3$(s_ClP%v%Ks*Nhiax(=&zlMn$MxgGdWLS{T5M zp^=;kJeO%)Jq8U3w_8}vUBIrLk7{&`@tox~o&jec&#tg-IK3~UdSF~n$!(Tktb*F?9?p6hb-)ngF{=;V#5ky>1{sxr6EKFG_P9JG zhVMp}aSmFMBnY{=!cbPsCe66FY%B8Wg;34iU^Tc|;B7U*>W*YSoD@%tG03jB8B@WN z97bN!Oxljr;UbNjhK_zq%m95oytF@s$d7W$rwlV|f!fURPQ5iGP7xj!28u+SQd%TxcnC%9*jASr( z3Q++20=aD=@&h@~b!5v^Q4_x67C^f?g*=0ftT{Ibd45^_t#Od=ME0Q1TbOT#Y~mTJ zfW_npayJq6csYIqDzKqYZ&Hk1cRayiob>H^!j)q$jq?0I$dOI_8qDruM6c6qJSvHk z`ZsFhbMfa9yV|pQS}86}MCGloyUW%ol^iwnl4~jCI3JT*qCdljs|k zi*F+~#ahqJwip-5YM^3+RJXQE=qRd)Kt*X&I+64eRvNMF7VxP{Yz@V25&V5_68lR3 zG``}MTeBW?B-fTV89!J(K9ujx-JWR^VjA1*p$wr9iAuCxZaGhcNOp-%8 zvsL68lo5x(VI0owAU>`Zcw4neb#6LsXe{Q}lOwUaMF?M z%QxY#>z%Z#TsnUYdfN`{lKPiXRA|LVV^1v7_p!&^ZN3u!%s8TtQV-EpatV1h$J2Oq zn(pN_afJ}Z-uPGO=Sg8OQ%e&QeWxd2@9q*!rfS?qU!tFJ6Qm(hs?io0TeiNHZ4_I{ z3)uqYt-gL_&sZADIr%Y>$eORc9xvF3bz$;UURrPS&AOU{c<_6|n<9<8hJR_sgq z0-3-a4p6v#CLe^;M4-?gv0p+BafnfrO3+FLqrWf+2=h^3bYlbOIcaV*0?O0^EYWBD z43f(JW!1P15>I-L+>GzC&n6mjns^t1bbbhR)xp_0!_R^zNY z2o_*5?lkF2<{I;j8$dgF(vcLzxwDE4Cr@ZPEdng6FOZ&EtU0U1%_9$yFZD;4`V8WQ zM07;-!p1?=3Z=M#I2(&XY1Eor2ZrNEz74l@zB@sf7qx9H)F4c$9>2y^RE8IPkPr#u;`SnwSjY z4!k$uNe_U6MFI&a0ljYz=$~_pQbvSf18P_rSws&gFYd4v+*tCNtB^z)U0 zkqtr(RvEh4Z9wE+!?B?PwD*aqv3F zM;O`63-^lA!0t*KU)g!+X$u2!>I)~4IoRJjxG0Q4B&mrkpc;3VYm6v(5w8W566)7m z+;2E+xLFz09wUHwRfKLB9&)4ysOMm3qe`BVBX9G>z2yJqG zIBdZ60^?pD?m6{tg4pw-d z{DJwrgE4-Bxoid;NH==n-6!MylmhZ|5RnGHIml+-V&xnLuFfGNDvTcE609p^qNo$P z8wH_Ae~2~t1IW^IWLCf6sK8srHVb;6MLZ)zA_oRsU!6G0UwHY z82Q@RMR8m=JWplJ>RdS7yur$jz%xYQ-7disU&ZcthJC&So+HTQjDFxzkB3*vCFH{i z;H0~->Vmljc%J7*Ka9k5%)%MWbTU@*T)flI*yUy5co2iVQxjeu8Sq3Yi`B9UD<|Mz z@-Id+2_7?VFlu$t#~F`^@CM!)`SE@0fyVM&;C6bKu@b0sKg9AE*tH(xA<*3yh*66$ za*J?_R5@-cYQMY47!&boe`0RC;`xW-Up27;Cqd!g2wbZc5{sD`K9v;cFP8Ukmt~WMPy8-Uf%^ z-w{AeuOKD{Zo(c9hk?M2!#+IeOYGamh$BVexe#zI@ngl0!Fw)%wcj7JLs6wvL5_bJ zuT>4_#0m5|0=KvS!ur?&)Z4%s2;8WegP+(4&JPQ)_IF?meq-bk@Qwm^_h#ewI?z~j z#s0~McYFfg3&qjj$&EFC0PkutM!W=&(G5V410IStI29bkQ|!evI`Q)wp#zeR?=FSs z*nu@G{qK9PV%(cyKJ#L=1e~Lm|_&wnI))2^SHLQSu&r0Baijvq>PvOxLSa)NvPP@Y8E^v#{XEVz#_+3kf*n1=hTPh`kiEQU&oU z5x;lF{A|Kp7RDLiM{e-{zS$peS_a&B=A(mg74Ky(*4za={aXBa5c$F+oDZk5Zbsw% zIWQN2e)I*5R^S%UKXBp(?z0HE%)8*-k%sS+@Scuh?Ljw@vL>R zrvkTD_k~|ZKdhOm*i9|5(x>D3YGIXc#=B^aJ-!rsZY=gnU|j`VXM(WaH({Qf@DzHC z6C>b2ejRHoaN~O5jyWG9?jo$pgLpS8oPpEL#0@Hb-T zOE`wjM9dG|>0Sc6W+Z+}z>6~AlsFt8(TI+T*qdW82Yrznb%FQCJN(3r*y(#Q7Ewk4 zoaYlTH&?J`1|zmL!Rl^?XhN}%hH_gG{R6lAn(^$zklO@2EAQi3KVWqi!Waa6W-4I> z3ZwEmi2nXnjNMQC?;iYwL`0Xf7|p;f@4$*MXBRP|74bbDM8^|&nt-=Q1{~FD;;Ht) zx8o+x)M3c4{=@7jMwkDswjk)PpJ2wy;WduJTWSvG&VvkOHKJq}#EZ@NTfpteg;>!B z<9i8_yE8mqj^Q&o7@-+J$o|E+oy7OO!}<@b z9s98kM2y&NtfJjmV@|A;^?1c&h}BQ=zGh%01)SjnyEAZaXhn>ff()r9R%r{&^*y+e zn9#p2_&+XmUX0*7?0b>hfiZuG{hJ#|syt%%KEF*}bi8V`}vMDmUJM4&)pktzKH zZ-Nha*k1JSH8 zI*=Z&A*qTiHHq7UGkpPK_i9AkR+xn~*k_yZ>a}p9uEP_&#N0mR9^jM++;|;|wY>-b zKN8L*xs58wYX{+9>v7V&!fp@T6wnK~KqH)vpDOMaY8>v zMsgl;x-z;%uUIsEA#Q_lISHz*VJsP+`GH>NV|0QSqi4Dkb5)XcMxXc#D}?iJDKd)P zTvd_`w}5H1AyHdgn@ zpLekqI2Vf`J86i$e-h_NAB<~jR7hpeNzDK!Wg=Vyy0KMEL2vUqolEP{_xc6B2R@c* zGqg5Zbub(Tt2fkX>MFIm_DNj_{ohhL-~A`gwxEg7OKIvX^}bp}leKngPyZHZ z4__)({j2>a{Y7wvTYbGdR1i<$V=w_2x}PsCP8TnV9#N1gO2wsvVgg(y5(T(w2-oh`Te@Neu z`A{%c4Bl_|SWj6ln`fC`N{hjijy2NYNVHBLuXR*CNZY=F?bZMq_CI|dZv$TyCBMIe z{}WW>qroGeqW+_m)Ys|_(GhrRbR&Iv#-A3hh$eZ7>2LE9^Kf&#IRyIe@1|jMgZN@^bCy9hJkpp^_v%*s$1%5dN#h=1lAxQWaoU8$FYI>Dw_)obZ_^3i_}CBVm@KVM}?!R&ExX9{KI;H{Fz z=M~>clg&eIDM9-~7KUbqwhGG%s{>B(xzG>6nfB(k`<9aChtj`1>UO<__K&Liqx=hf z)xAlc6i+x9*cFs!$~0vxE}&?m-_*D3ozP$Q0SB!Ntf&#X)$Nf%>?aq{X;=A4!U9p0 zyO{2o7MWU`{xLN$M_IaA4wyTch#V&M5|f1${6)?WHXYRm=>KSa!1fuZv{zOsW&HcW zch2%hf+Js0v+1Yx!tk%UNJ|6tpMj5II3ad`Pc#&{NZ>Bwz)h~5xcBIUj)l8aHrz?N zV(#W(AD@9^#&Ob$SjarEO`3qozC)j?*VGS!cXdKN1tqc*-WD=Ou@##JOZ_%{7om|D zBgyhdd7G)Ixv{xATpiZI3ueB2TG}Iy1dpx;X$lmjC2K_c=$Et+S_iF>R#7VeM(G62 z4u-;gJ%&!k%xs}CVCxJsMb3egPT?RNQq7(V83lqIr%_{PwY!9#49O-BSDI&Fv545gv3ho`aYtz(rVx6|i5Rc803KjP#e(85VVEGoMP>|guIhXVz9e6bpTj@JevB6C!A)s7oUCra zE9^R=<3xTE92+(pE7?{0LeJE!@IB(dv_0bA>JL{{wThlYR{+H;&#!{Z%vQO8xw6G( zO|)*euC~^-jV67+#z%E1*ul&6 z>WGs+s77!CC@%{r#~YGQh;G0>`rZh%ED{g1s?D zd#(;vfB5J4Tln|+>#M^x2e<$(V-YzhRF*4SrrJk^oDDk@(Jg9z^v;+Gu@hr=#pH?3 z7x^Hpcu2afpxFifMH_vzU-#~E*LH^H49nV{nVz{f>tyzsoHWM{m)W~VG1P2*9Q$hQ zB}qajI1X%(8^~1hNu}jDFk-KZk>YtFM`$fJga@8eXds%UDEXHxnXbv>#XNa^Gx$~^!L~1&{^OqE8v{h z0rOTejWG2!nGumo%1h9x?Y9n}P3{~I2 zN8~uT3+IeK2`AK&PMfw__XG){(cupwN<{UE?h_|w8eS_tVbeTM& z8&#)ok|)-+HD_E_yG&}c zSl^$_N6oS3wx%(1wCs|8OUI>HX``4d+N2{Ak)6^^sg%UT6mVlL;$hz4I$;Le>&MhJ z$`J1&w+XIhV;tLao@f7+eIoncoJ!7rU6nlP-V~*nwwmHD5wch4Bfb2eCTz5Io>j7r zxBP(eu(zo?)SBCb-TYbd2Kjp)aMSuCw>yLWYI(RMoJW1u8M*8U)R9Y2$CgLMg^ZWA zN2f0w>%It1$CcOv9avFd!fU~wcq)7miCoYWW4>$dWvOVnZ(e9FWL{(1E$5Ru2|K~| z(AXYYRo|!f_Rm*Blqy^kKRKZ8DN1LA3_dQ05|r@ue6-`X6#3q8g9aewfk zVlKIjd5^VeP-aLeIP;Z>3X8rNT|OpPOq=M(k?X?WhggF|>tnffHGiI{C=pcQjwp2an z|E6?T!j$K}cRs}ztMr5_KMo#D_x(-Po0x^!@VXj{T<^GkR(}VLVJ>7a)seSZQB@V8 zzj5YI)92~O^o{x$y|MlUvEd0ca~st#?9=c1L;8wYfm60Y#G7E+Z86zb1gD3(!WTsD zh_Xai#K)JYgeXtM8^lpB6f4!Fd!#NqqptU5xz9lv)G^zVRXH;v^IRs$zLay$dD4Bx z>r<+NuL3lIY!r$}hvYPqZeC_tW*KWKY$;(WV|i&FYrbO2V>%)CkqgVeq{Y~K@$iP@ zrTcJHYznuV2k5a>;1o8J-qY%+n-v|t1s`0;otGTd9H}`+bGGJqavnK+&dzS)P4|VV zCG;3(=B|^$f-GH?4?=8{R{MI*?N|uS(9f##4X`I+ukoc!SL+&to-vLLmR_JLhrw?^QYo_ghHg5>n`-had$~yQaTvfh9@v#aO zf!|+8-K&<=@GkXL`c^o^Jwg>+6`Alt^c<(LjJYIK3>&JE2{W&HQUx zF3d2}wa>~}Z!@>waW^|Ct5BvZqj2Vn%$%&PIUk)1JVlh9>J2>+`9&Lkv^Y@yW_oLG zU|DE6ZaIn_@t?UIyp49lNv1v2=&j|~(qyR()Dh3{u|TpRBe)MpMtfQ40p!cRh4`IZ`LnPV;5UK5HRc zyls#5q2-qOjA^`FO8N!fXMc3$#~KURMRG?9= zW_UB}W=jsc>xgHJQc=4_vyIo}Utycn#gu67Znb zJ!DC~BF&S=N#ms^5+}_OvxExJ&9ot2aMe%hG1>-yCFPWtc)GdXJ3i)2${C(>Bqu#* z8T9^z-MYu`3s+5gQP!WE$xjoD$o)*i%^fV6mWI|&*4)+=mf7a^rYQNLxE$`9r@6jx zG&qe~e>ukHwss0GEi<6@O#+T^hXuNgH-RY4A$w8fzXz`50>9`q*nQK;6fzLp#3o?> zcPI0pocIY|dl7yO?}U1(v(Qgy4zD?e?0P(O8q>&qplkz-|DY6ULsuiK9gb@JG0v;0 z>JYWH+8z3_6tyz&lku3Zf|#kDh>Qyn8<*nrn+soDD^%|Fw3ljs_}N}TMIG$FqV!R! zE3K7Ah`f80g3zIbXo_A7?lt+vc$u0c>o|K@NJO|h@@sT#?5)^R@Rf|pRVlV+^yY}e zp>KlXY)+FPHZ!)W|9LOCwmDv8J2UfTJWt!7)-U}+M!~Ga>qu*VtB%N7$9&MV3EoWyOp&Ix@@A=}^tW_b+5l&RN5W&?32jOR z?m8<>Cum>&Ta^*MqTVF8#pQF1aSV6daWr%ObnbJVb+`1kf_qFI-9?`npGlfc1Ue6CCWGJxJ zDL^E9pk}NMY$p}`j%R!Yuz|M1^XaZ&7ylIJ!E(yaiUw;F4Pgy_ziq+ei_L? z7iJZD(_qfSpZF#say?p-iqu5oXaQ)A$^sdf2o+vCT8PHr^F?S0L_n@fB zAH_DNg_g^0^ z>>Zh5n73)ElYejgeKGZIdR$hyoH*xTcd~DgR@x}aX9{0fuwoqG& z^^0|^b&92`xuz*cu4_7IN;Ex_4@x(HlMaU$Rp!&dIq)c;0&#Df`u;biHwkv<6oz9Q*B*`KYCo)nln`8DyT1*Cp~- z_epnsED+#+a%MAjL4x@-C@M7w|B*Tx4hLnuMY zK<^f0w19e}By@E*z@FL2S^>e_0$imtF#e+OV#D~9 z^OS2on{S%;ou{B@x;w_rxf9%{;q4gim7(Ij2{uw0k|id9C!)1n2XoiV zdphr|yzlbxd9KA*$+bG_kFb6A&*opEgDa_D_GUPKW^PNLk#-uFl$w7R{yil1c>2Dq z3XcBp7(M7?>P|54){A$gIdW&y8~BJ_wKcVSYXjtb7bZ@VgJiCQ0EqoKw}?h4;ad?|G{g<2k4>)M+I?Fm{JM1rzy z@2sONi%s>U7yM1`Hgo80v~!5%Hs2KQQuq(2`#vZa{BP6{y&ZkR3WNO~iG4c|KAFRy z@azah;9g*#li@P34O*vLa8)0H85}|n(iEtpj-zULi1TfZwj6m)UA!vwP<(7LkE;?S7B@J@L2q_sX=+na6X2-O1i@ zN+UH>U(DIW6LK~4LvvG9v}bH(f^r8p2>umxEXW?bH8@wu_28vJMeJ$Tftb0CmPY1C zxuXD17Tc;XSF0#TJ^ft=9our!vP)$Don_BzmenjfC8xS;zUPSVnLk+H3+{3kA-6P3 zJ_fX=qGg|DjU|sIpZOTPx@@8iK3#pFSGWTO1UPiG7QF%wlmwmAALDEr2{dB>Ja@M$ zui>4w$amitr__g+!EwJ?d#yibnOqTJH+8gy+b|0SL{}+ z)#Majai3|FHcTn!UG4q?r_<}MQ1?i8lxL84gp#B_*Dtd++-*3zOc94jUr|RakyLSs z*c%nZO=t-x0NE)3#?c!1+TVr#^{JkyH-eJ%12V^a{+7yGpWmC;Yxk(GP}gku{55e_ zbk=vScUoQPt~7V7w;yn_QEE4R4;(i4i8IWV>?h%edNA^Qw3KUA{DVC8^Igct@+Ril z9zQS+lt?_60^p#M`V@CT%K_=V^5|%dqGZq=QY>go|nGMD#r?utHLj77@Q}@TWVNO zTTfbxTdP_snOm5q%L}B&;x7I>mk8uy32qS>0`*mnu4}zCURwmOWnD?}F>e>Rdxd*{ zc}{pHd2V^mdIu

MNZ$s*_K`fAS;CczDX>4J{s4B%DM{ju;)$C%kXyiy*tLgt>;) ziC=82)t~wI`y#wPcf7ln>!-7_s}P))&Av1xS#6^Kpp(H2p2+VL&WT&4hVn}I6kU{O z%0HzrsghU&xadFR6}rnE;5yofwnM+?5}f1wR#Ozzn#oo1Q zHk|~0;xeJDG{bbz(#-a!{ii)|(4?TE=v-Js`-Bb)Z5&!Zv_R;ykjRi5K`(5w<)*Zh z?`%}peabJ-E9brJ{F&9@ArY1~Hmzp*fQ-gjzMP@%=89bpht_Vga7cP;x(=V`9NP_S zuiVz2mbK<2Q+av5m<1kjQ>dh!sItdFb+;dtkqdb518t=0hHphluhrv+W2470z|q+u zJM+3;xa5k^#uH){7+3MsjRyZy?B@vl!6L`ctfj7oh z`K)wRJSt4%E09z~$`tAYf>~Lcq+Wmn;!9L|8-0(wqPMPRJoZ{F(1*Uxf{2ncoTr^0 z=U7)199Y(QPQw3OR`=__j7agZd05bju+NdpVlL(S8lRbGNxr)I)AA+dZIOFPu5Qr- z!_Nk3ribK!R?vIOQ7mgnddJkHlv}@^{Q8ozBy~#q(#)~h%N!Q>S1LDxHP8?F2Dvt7AIO-K zRw%XQ?+U+zQq$6&WVUb=_qddfdM6{5loVyTgDJ}Vr@5}Vvbia`fcwnIe12{~ZoW3ZdLRQC|U(OevJIY<{ zBoT`B)3V9(#k}71R!R|gz9KZC zWA!QO-^w{}4`jQw-Rs?rJTcz7zFW#ewLQI#x@5KRR6HRKm!IMefs1kn`Cm9*4-;R3 zQ}By#flM?Ryop)BQ?J3BATN-X2l^ZBhk6eVNVR+~J!|2Y^}{*A*%Ypsot*QW=bhQk zPOdwyVD}Wa4_-@+y?1?u)LGyeC(4ytp7}^~iHIU%vuMfk6c}=2LPza*d20 z5Si{; zu#ytOI;n=)ZT%eN2z?kaEjlaqYTW(!lkp|v>cqrGR0&QoPZXk<>~G+1oij3Xd-|KS zt7!@81u`~g9?h=klsw;jC)F-=oZ&~ry~>BerPwCC;t%kr_?E(2v4h;-JiuDczCLJd z@RQ)R!Eb_^+68ME`cxIU(|QAcs5i>hI;TR`fsD85De3=Zw8^TOGt-&oUgoo*+gJ(e z;t{CVN`WsioqQsR{2JlC*crL$QOjQ2*r2Py$3x17dO`|>ybj8X+f*8w8i~1}U!0-7 z_U-n_ZrQcn$(%o2^E~%`_0((nPj(0%zqN!*!Vh>UWI{jD1f1^k(7O}{SN##m&o6=B z@@b(qJSWaz4(|wW_=jXVcZa=2uc4TKt8avNl_!tqj(fKIFZTp@qWdInCU}b*H*&iV z!y$7II$)ihADwphC~u^?gnbaMTBe08i2ND*0Mx<41sfF#E;OjX_`KKRK1Ux4j|`5m z{1zs&*8Vj2DaVrR$gJ9#`7@VhF31YY5uDdu-`x}7P#x@9<~F;(x$3zaqJQ^LS)zSs zkNFy=M~+S?w!==J?pbpyH@U)0|~R_AH;^$v6gtI9EA5huz&%ziBg{Pr$IpE&Rlsti|r%5;Bic&)tE zpD`<0DOly9<^$FX_EEtrLt2F%39T6F49*ku+}hiGRf-nIaxQv6d*{EaBq$w~U&=~< zO<+`aw6ppu++MT@Or9m+p~N7w9}WEu)q_xZU!bGdTtgsR(Ty4{HI`q>I!?A&S(FY6 zGT)6mk8Grv-Wsm7KjG;3S@}mP2bZEDaO%qAZQ>c=-sQ@`ZtLgxFK1@X#hi3UV|R*g zn*NBKGA#@`7G6F^kGq?vH?l^5UQ3>0ajxk15mFcleq&u>`Y9CS?&x7S8%*Bq@Knl# zC)#W0U#z?RzVP zcZ}EPH7Q&D`?Xwb7uQg@D19|KEIZ-*{U9h$uq}98&_a7h+a-&|93y86x1i(yN=NIt zwbJSc{}*Kukoon#LcVw2MP4&}tvM)HvN_vuIUNnJpE zBmB#OL4H=2_!p>;wLEkYiv&xcI?nd(Qf+yG+z_bHL2;rG%8y6Y*OV&g&d*fMaG#ab zSoJepl)ftA@FB|M<&iOkU}VQTZ#i$fPI`tYhqSxKB=M>FpuJ1zrihHF#F(eCh2YD7 zKKejp@9;ArC9t|$ndeJu_*`5`wi$P3999K2FT9@0DY0);LcEOC{DM_iwr z7CDt2NzTpg1m7a{50*nNOY<#v?A1f;VU@#^!&`>W4{I7aFxYLIV##fKA@1jMksRY+ zwimd-LpXKq1>0n>o({aJo|@nfSAKe5c?x^>BRaU<>FC;2@=wz|bQE`q|0W)jL(L}3 zE{of8)l$u}zyyH zdF`1BH^IirIR9O>lODuUp{8^4<>8tBR)TM?oGAB~bEF>9OWe4SEVLGKAsXia+b9{g zqSe7YQOn?guvNIh?;`oR3GA94t}XHxSHgj0bo1`>&h3ev1{lf z4A4{Hn^zsTBptz>T_gQ9l@#w>PdoQ+*I`#b_ZiPf-(deuZ4fI*yuwm>l{wq8#+qh* zZCzrmVExZh-qI4?(++Zd=}(}It-1Uw;Db&1 zv1BY))%Zd;>m#+vxHTk?{}|jvn<)L1WTm!0)jtn>miO8ky*zzR=d%i6v!3N9k~n@3 z&kJSYX;lk2V}j5NIApfaN<1$nNXMj?(iLfj^c?qjCG)+ZUF?Cm>Voyv*Z)+R2hPHB zh4;Vk-&9|MTM&v4w+Z*+H6xqJda&1iqmPh+JL)o6Z)A!`=uV)Ot*8_86{}B2RUNGI zeyh^W$NOa87Gwiu)d@NW6<}Vux@D2g7L*V?IQUSo8oVi(5BAve+H2V2tb5RXZ)Nt% z1*JT~RqisYL2H7&b5k9ymQ)}3|3l@GTjkYV{sR7?%3j(6T`Ys^GyVVuy9knq$__ov1 zP{hB5H``&M7iYKkS}FbfFVwtrA8ybXC6BYbvT62PK}kVng31ON@WdAFeQca< zf_0Ads`U@6+2WGdi|0v*kqJNb+R!aF_qV~XwSMYvgw9Tie~7=Avcwnb3-vwmW#jIt zk6JD|1*)&|@R=zkg#wdUW2s;*5BJ3`mVsusJVd$ySDFm4>nDTn{Dz#y%}b~F7i2Vi zJ>b8i?NQhHt$rD|Y@JjKqgNZyvi$+fDGH8tQOrxTX&M!P&}~K5Sq-}HZNyJXg7?!K z_XGO*&bawzgAgw?4d8$1PC<`2_OO zSi#x@vsj0&OO6_@tpeKo67F!bptS!4MfL@{7%aO(^c}F`%jntNg8r&Ec)T%CGaUeD z-w9sY888hqv@c-Hd6gat^WDYA6JHHwg_6(z*?$5RT^yUo?GiT1eJsUn(`^fF&#bGg zpR9{*E$zqbpX{&go$Pt+>uoEna5k62g>dc(?Vz6p=W{vy!&`wX+Yg=%UK!)bEa8lFΞBV<~GHWu9u9fV*7M#DQWi(FaE3d2x#L zO*#u-#W3L$mk8&M7GSp=MOW{=wg8!54s8HziD!S(zWM-k50lh#|HslK0r&^J@_9}Je>$Q;JnuI3c9ri0Q+-|1m-fRrdzMc3ql>`%odmr_&-P&|sN zT$VeqixcQLZYoAIPdhIi-W#YAtg;GOSIvoLEwco?f$rvU=1-2ZoM}Dcz1f*hSPP|0BwLpWL+E0^yVfb9~{71`cG<_GrR-5^}RK`4ZP*O`Mr;mMki5k zVUFXdL?y}T-Kw{=BE;;D-0sJIm;Ar@&GBS{B~#Sh+0)1Gm!Imt)h`^}$8uG74p3W4 z-Rv*MS$#TO(;{e9Cg^^;!<#8-9kI`)q{HAbE5OAw!4|KAZjzr^f^~XDjJ4BI#OZKl zXU9^*r}Ne0d&nu|1?WbU5c|`CQPjQ(|Hh4SUl8^nPCpHc6&{k#Y!5xZanzW>{`A6T z>iCmb7&n)Mc2xUkNiX5pHAg4WA+L}k#U%R0o>||`W>hi08t)A+JDZc~nwfdmXd@I> zsoOZFPhleAQq=oqp`Mw_indO|j$O*`bb&b*qfF-XlupL}GHPj18jVs@sJWF4^w*;Y z20ppk`eZgWbDIWy02|GoLSXL0sif>N51MoNT4+8rJ6dn7<>(MJ@F}-Q&0)R_k-tcl z=uSUQAHsG!8&)|7ih1q%T-RVbSBLw0Kp&!KC9+t8m#Is9krC805?#$;7$ISJgELGw zN{JHb+@$qPQT*#IXM8f5q%GZ6tGKRsj`;oX#JTD_HD`C%7f-DJ>VPHwZ#>!Dk70L~ z((KPbH%syx^RM zY`o<@wb#qTKg`YQyw@)nSIifdZD(hSri0n4+mw3hOLe*9ucM5XL@c;OeaiH$Ld>fi zk3yhRO2Hh;{B}=j&Qr}N?Dq=h@N7Wk@;p4^%SJ=GwU42Z){NWPQal28y%HS-{fM7> zOFgBT)I!%t&82hb2c#Bi6Qjn!`MPdYpfYwHhE;l_8Ruyj{NQ}3AD^afof&rdLGMZL zcW)q!+4Fj8qay4OHyOeWSlQiR&=%rZ_QEJBO^^s6m}d5% zr=%U6@6^N>RZuv&NgwTg7&k3YTFZ$h%~Dh-tBAQc%cJ3@wia61kGZW+&D3UT;{aU` zNB9~ECwm~Lb*T5WH-;IX`FPUN#$>aPm6L9->e$pZs0Yqt|D#ZR+bOHqzwzwWc=}U@ z)1}Z%Nr_eHjJp0aG+qYKzu|9J$NsH1SEBhUqgOZEdSL|+)VQ^hH@Q&?>4~N3rnXk8FR<09u z=Da9VzR(-NHrNB_V7+m`*v_n)=5Wn}^#r1^4BjhfH?KY87VK6F2jqjj`F4~4@UKDo%zXV z4d=F-6^-BBgjU~Dd$*8Z{6qKCR(j}4py;xe>A;_@g2Z{F(Vv`1%(M!17k{xM-0Z{5 zN&aPZrAI9k?fW^X6ATx-67z2nwh=v)g|9J!TRNK_wMq0!y%GMSx2pp);BuQC;9l?K zzL&w@u4WqRLhoB{>{|UZ=W-l3BZjD8n)#M}i?OD{pIgP0^kU)$^mkfI!%;?^jpAVl zx>9?oOC~X8ZYELZOQMX`(grb#4xQt4XFZ~q%MDYkofv|)!W4s3!~d7NIT zqC90PdUf{02f9MvNEP~q1}1kaK8NXL(~o(WE>Yin>D{QPMPplMqbz#Y3}vNi!=F12 z$Dp;5*?5Y!_H2C%EG?H&&+Q_p8tppoO6|_=PIT>ZHFu%O%`}^vOz+gRKWZv< zg;EorJzl}&KFE+hu2g#CVo;^rf(5Zu{j#P|m1_P%w88{66{&mAnra}-0zD#1(- zH#373OJ7b^^8qq4xfT1Y9*LT$E23np8oW$1b|CirDM1wozK-d)1R1l zH%y!jZ!J(f$_}mPZ%sPh{t}05gCE=#5BCk#g+{0*b+Dpf#)#$>V+TBpqDBPqMptmh zS9%^iZ5^Yk(U`ec*I`=xLzXa%9*-2n18M1uc~56#6x@$)^j~JPDiNnHvd*Fi<)W*x zG%M@R>AGgAs30Gt+tk;QKa5*;56fN;eD?q>5fjv7zVZg*Tu!ra<6mRY}L(RxhhLnDIKwv4;+pKrOwN8E*a{kbdE#~{r7ZQhtjvC4pbVLeP9-xyIW1R|w@5+K zQO@3Ew0d6&Q?LygxXo+nMy|-I$w?2~ANny*(*X)1Nf&Ay3IjjrD9(m@!eF{oHEhW` zGJ|)pxhkQO^oE|t^=QMDCn{FW0KT4cQimBeuqh>pMC)@#`*9mr!v`vDPQmuRLG!Q~ zTDq~s%I{EwegV6BDt(k!P`KNh%q6N%&YO!ToCISqfR5c+Afz$GRsZ3eOQI=rlaAS8 zsD9iPn$vG{hV%MP&ZX2+I)X9%P>QK5)GO*yb-CI{?W6{)vzWY)ph}K&>JT!!E{c~v zCzt#d>vV~IY{i7YLfEUysOLt~bDEX@)#Ip7)Ieq6Bbm{aLM)J^7d8ID_FMriNk^~K zJF^8{&>DT9jnTPjLe4b?*7$3b8D^lv5lB4J1Ak^QJ-)d#1pnBb4xc}C;?5I0p%7FG zRUnUep4U#LOaBHsYJ}op5?y_eh$Wjbl z#8)ku6BA^=pa*XzEcuF79%g(VrZcMv*~~RgeIGKqT}CSYe>hX3j+tBNaOlM99kVjf z6FQsT6)%Z<~$rNje&If29t;8p~p|-yCkc=3g-DW6b1ezT3JZm zZ=$rFi}{Kn(n4t|(ePcg)T8OMJ})gHS2&2q`Ut6_6o;M3D_)_~zcl&id^)Pr*{{%~ z9ziTq&Ps2kqMA^j4(qb?ZdanCqyrZJA3j|_*zViuy*|n3yT#|TtioVsg}LG1tn+kw zS0hU*3?K6bndVrSXw}TdaQCvp%FId*+Rq%x+8iZEH?r1MgWHmhBTcWpHC?|}iZ~iaQ`@auYEF>p+;9X+qnlKho12yhF(b+|kI^l8fri6DkgzuFKviy9H~L1)W4(4$8|y?2 zkq=AbXT3JJ^TZj=c;bay$;`zt^17#VS)?OEoJTKII`H5lSStw|uoaDwF2uoZdaADy z@B9=$il4;<@f$kap+um!!BA(@1x?qUSVjyYj$OiM%Z9o|ZqxzNz_|O3H93P;K{K=j z27n2U;%hQ%(iPo;A>{NvP5gqWZUpkX7-Fw*&eQ~~^GEs@V%X1(XzN!+5Ai2Vz0Kx2 zu<;Z4hlkv;v;2P?aZyo}_lKe(pAH>^1$d_us0bBD?w z2GmCbq9Xg*&VEBzo(J8K4OUBHrC<=BvP38)ta{wpUBtO7@%kH>k}#Sc*pf`7m}v!J z9~Tf=|HTilMM)wYWrJC$FfBxpG9@}hCK^rUu`pZg3s#Wb9ro=g{)9;u)@ov%muN(c zK!>6b>Jol@TGWYrIW7M^2Cu3R5lb%dG|CBE#c7b>yFx=dG$rx;T6}02 z_pKp4QtgD&sH-est=bV=+$7!~!1R=BC{b#H#f{1(?hzhYjbYwZ_ZLyUP zJ*gbz&O6NkJfCcDLtjl4C)qE}```vPoc3t56L#PgR`-z|AgV$|YpcCSd`NagKEwMp zob}$i?PSItgEQkH!@qI16o>WgZf=oFo&8#H)|66WYH-&z+MN+63d;N zE98{Uv%*7ck>38DRv0>!4Wxl$3-ginKx#+r+hQ#X(>tFN9hAN3)Sb51nq|b6=v8L2 z+S#SV^g>G8YmO1lh$rkC;2t<|!3NcwF2qX*?1DT|C$N;u#NnUmtzvQjOj4P=Zxfc% z3+CJqcEe$7t9jN~YVQ@R8Ik6AbbeObi_KxwA~duOKl8LP%;&nv36X@$baFIAtFox{ zkJN(9W4+K^ZYexBE84B)&tiKcrQj#+64TiG%?SGragHzUYk`JnX}hgepI(q(=<3JV zy@>3JqqlZmyod_$c226lR32N>6?^HAimL}C?Y;HgelNa|DshiakoEsYrF5&1S(5E1 zX0RPEWf#_)ORX55Zj+tE>?wGp7*t%ETIV=vwaHrIt=sk()Zk5{E=tcYto-6hv#prh zs)N>3XZyG?)BH`}W(WI-*#W)Jfp(}l+paF=6^@yEt@Gj&Nw!uQPAR?e$T~oG-6f?v z(^^BVy37{XD2y}u3I2|8Vh;RDIi;l%L3}s@joW8-7~R&r#7J?7E%{WJrTW5hE0=Ie zN&`wi5jI#yDJ6=Im+eLJDQlh?fzQY$l(SOWvslHm=1uD!-J!#YvtKa*v9aCE9BO%_ zP~iwzh6JYa&MaXpL{H!u8Gbrpko4LPKw&)F)4w2c;yi zv1i64x^n7LlQ^k2qBikexJ*8kgZj?}JI3rH6o8+!4#l43=yyFNGu|hRHfyqm)vS?1 z8F7tu2|eA5LS||OV}uOMM(ZdH5(N=uHIbUPlu?w-ykx@TMbuEt3NX8tnPZKCV0Rsa z+j=>%0$5XdE4!IOc}?Xw720PVrK?6>agiK>6MX0`>q*-a`Y;Ci>A^q7=~q! zX4S@v{p|hLWHc{73wf;8b{lkJZVIP}#}A6@6+bZ5Tw)gGq&Uty#GXdo{;i!#ABSr4 zOY4~tVtLV%$OyM$HMeIUs)y;w`8&z6+zIeJrt3WM4n}?VjriT_W9n$nEko0yzuA}> zNluik0>v-pOS=i0wGs4uPLNiCbMlGoVPb3IptDwQyN2`|mGFGlSF@eEQv9wLF>6R? zmE~shq^4qfWt26;aKV>fz|FmCUa>xlD7t|KmXQXd$XnbVFH{xJ86P=|pTso+J-BEn zZWHU+r@ZZ@K5~6J)(Y5_m03btJy7T@eGxq7C%dzhh?aVkl~qb9%`sz8T3^II6t`wz z`?`_u*A(N0bEZ!Zc8-|GzF@wQ3krYDJ*W{pV#ZH?xrdpVc@MMfI8NtRV~7wXr?ba_ zY)7lBbvKhrPZ|ZO-JX_KT8GTD;t4G95NjX) zW`tcx%!J~AjmQ2)FVA>Xp<9SO?Lv}ewE|Cm3|f1}u57**Qp(Fv7jIx5egg?tOCT!F8j0LGO{@`ixJ*SH(i zN=N_oXdz0{?YveQVy2Sd&iPGMULd&bN5V=whcpm9uW%u!RbQwgeX^&Rt?Z^!Vdh2e zvuj#c$Qx|t>ed(22`9|M!gREBA6a>^`qNQQ+JNT7#pDSXQQ|CmcW|p!glct@>zg5QoWsj1Jag6k9i={5=wV(bK4sUjf4y zV-*qum>ijl(`ECVBTyYWZ#K6tp+%!IH$WDv6R)I{Hkx(p9Qe-dRz|CVZ&4F4u(vl+FIpt^B#Lt~o1mz;TG&Wrd5Uh>tmq}C5qk@1 z=)ruBCgVVI;V648GeC4RNJuOHvZ|8fZDN*sI{G}bQm+Xo`d?{1w)enT>&=uBjZ7-F zl!_YhX`-$Ts5V{`#)v7&Xicy=*@$aXVA;`ev%0Z*H>Ct~xwS?5 zDNG@QJwzOKA5G(tC=zeM3wO323frXfaHIa^omY~X)D-fg*7nPe;r&%Rn{<|VVsy=>+=mb4?VLrDkNuGwi`I#aDrKe`J2c0Ms~UqWqb0l$n`WATQ|hcLRE8+T^S#ofk?I$(?doH9`KUg ztk@YeSg+7G8ZL~t8rx|kP25RlF`g<<4p4_3Aas+6HjjgSW=GfWvi%c9sCL3AklwVY z8sY=#^3ts*c&Tq@2DA)p;^pbQ{-M1})U9}8zjJ6(Hf1M$dUJvB=pIuUx=GGHfOEQz z)6rg-Ccb87h8vrhPS|X<6?QQ-x-N>#6Nxk52@S3Hb`5aFw^n*)%%uixIz|@pi8KG6 ziAbxNH#rl;hm%EqehU5A%1jmbmYkt4f#tM^82uWt=wu>v2O4nK>6T1FYq33<%y@FQ ziDV`RiGx2eV+1XHZgN}Ve{>109(*z%uG11P_75`z{LxpdOD?yVye%Wy!9jEnYmrm_ zOSCN(s_sAT+$1oAnZY~9Vfg_E-f^9mR0m}_{l z%;*@m=F`>YU!!`6V&7t-JB4VtJ-6U2^WZfo}OG`0<+9hJUq9 zxXpf6P9DwX6k+xXKPAWp@=5$mD*bC>)6kV<)O^t%6mpr3?W_1x-8{=d#j#G8x&*E>gD za3aj_y4Zn@RAi#8uU1Bof!@r)d z)xtT*(O#)6-4;69y{S}wB-5N^{%0sgPQ8owchV#nLTi!|>9^{V)E*YvRI2(_%nTsQ z^H7#s3F{3_4(8{qky9znL3-~i9nlS`s9s}g=}@JPTu6!)TM)zD2e+Aoy12*cNj)f+ zISt%>uKpbSHV(doSKnj2q?5}Z6zm;nR#{g6Aq?tAFc6~9vFr)TSP`UoFdTs^@+P^q ze1l9VjvDGrG~fRPsVra@ur|?ucFhLZn_FPMHH< zKvnM!n4l@ViSz+nO8lL;f;qw6lERY)dw(PyPMYCutbfw?87aw$|5biCO1LU`O8GDJ zkN58sI3UH|l(~XV1r<(}C+J~{wt=32)qb2`;r|Rws+#l@R^;-8 zKJnS&x5e#=%Md>({&~XB#Qffy-dFk~Qzud`DrHlOpf}LPan-S0OYQ99Oa+Uvgln|( zzV_9zOP#08kRM2?m?}}me#-p7k#q`NNy?wJACCOzgc%7J6SgJnpf_<4eGK!wqx2K> z9=|g`S<8j?Vhzcpvv8SmL-8uh)FzIjj0q>BJmvu^zqzQ`97QRqJNi1a&_r5ICD}toBp2~< zLF*EA#&=ZX*1+mJ$(?9KrKA{YA?IPFkqv5QA}J7W3dyvG7FW(3$Vhnz$s)h5twKz>Q$ZTLFx*k`<#2RfIHC} zlyn>R-wSH#Z;!WB_-mb*-829;b_dY66I6%(p@#e$E!#^}I)+$(n59&XYTF1VQ$Mic zn4D9b2=WHZxW2Gt%b=`!i`v;AB8G}+Ll(C05sUXWW2rzr)?4VCy^%@Iq}7Q%6RW~6 zU6xoLt&gvX!;*@6KkGg09deAe&7;EEp67qSPYm!&`90N}G)>cXNc}rS8vmW{3$7`y zKF+6V7}-=s@5uPwF+Kj8e~$h&W0uDbkF6Es{OkDZ7gIZ~S7J(iud&MvVcOky!Imzm z(ay!5w*H;~C+~dPZ@k|}zjXe2{j>R{aWB)}DaGlKyJrnIdl&}&QKJ(k#Z8at_*eOR z`|tUfd~wa=izIYQtdlg(TiQ5knZgZek@8l(fPTSG&34vv2Y6P(3QBNKa%s+Mj$-Of z^px6)*@SD>FGJD8=zN%&kTciT86vIo0gKRCJ{t|DQRvu!n!PNh65gm-VnskG(7PclaC|lD`*-&KE6&uH9DgHcn_Q7#bJ)q?u!1p{{_EIp1YnH z|Dh@WVU~E|G_gS?Q(W+G?)S@m$=T2Gn8>@28Id$CzHsc1zqMlijs3(t_4_DIEQqNV zqs5+&`&dE**#-mg8ON*`dnHg z^rDC0iayaBnHZc98#gevbxfL=88O#ltHq~G+)6LOcm1RB%bahAh%Wg8lW>}9F+KVgnK>TcO34G_wcWu-8)8?W(&V|drqY2XzrY9^*@FYf{MEf!+HFFaK#DZ!iS0BG@ zs53Bl&Bn~6TJhOpYsNf^*%BKZ z*FOFWr!_wQX?#-rvxLuyTfAe8I@T@X%TiLbd_x_r6>;5m3w{^<&iSqL%kK9B-LD_c zyV@8>R`r40k(pkbnYYx{tY^IR)=7Gr@NfL&xQ(%uW5>jnj2jT2FR`>YtufF%VMT&q z-W7XGy7W{&sEnlhr?=y|BbQcL%b^*L<&N8^z>SpCOGAXuSpSA-)x^VYzkv$MSvZ%P zo=dN(uh+lp!PL0#gO1&Scaw*xQ$(?PIFDUj;%HgMJA;(4(RYGX&JrY#j8lLz?J;)|oa_<`DbbC@lYh>v=}lh_H1 zZ#VH-0F0)Zut9DSGnGWQya?6B|qr zpGg{$XW9=_`oIN zsi$P(%cl*z}vJwpAtGU2CK=Zt4QJ#t&7mtWS6MRRQPhsSp)5qao zV_{V4-YV?h2s0}+rlV3k+@(A){Z(bKQi^%zx9H)V?r7|2=BV!I=Q!<%cf5klRNFCD zZLNHhF7cC&kk?6g-=5^L3#}J$848&e)$NR&?$K0^KcNyd&niNu9K^JUaXfEl`k*$D zU!R~>)*Ot=pE`5|xPVGFzL1k%2A@8fuU2&DZ{@XF;gyu7Yw8~7T(`<$J+h(v9Lw1c zfhBax>}dwWhMGZ^!PHt5r3%BG@T02zlt~Q(_!$%6n%d-EX}CWz%+VN57F!97=^=UM zG8k*;;C6}Z-(|W>(phc6>n>Ymu~(;wGyIuoF^BB=7E?S66YaehE2BZZh{@|iBtK~h zDB1$5x3N^Zt5H+kX^kO=s=z$0Mpg~qR4rHlMNu98YOM!tcn^1b9`!(7%z*NE4AyrY zY}GO7Az$UhYE%KXaW#ExdEQ20I&RuZsr zlHr#HLm5dukr`QJ&Ux*Pu;voXKW1?z0jz}E69Tic5|!z%c#Up6Sy8hJdR!yP70)ph zeH?QZ1|(O0kARzvL;*h~U*F-EtijXEWaK8;+7#xf>*S3?>2WLzAMw7~9lO^VR%enu zhMvGS@N;va-*y1ra$&i(ynx=7AZE=@R2nOxa#?wtw3W~Pjay!p>T+KybZMDNK7&ZT z(f>YO6KfEWds=!{L@F9LnecFkPAGq=DlDm?s33c!(_&fJmIsJ13sAXr;1Nor1KG;D z%jqixk1_=;-pN?<@u=*}+=32#+KuGFuehb7c?OGF1Ve}>4})ctgfSILJ+?4hhVo)0 zyopAhEob}9{oC4RPJ!h#haq2HqfLT);4$cCs{4OHYHdN)aaHe|D z2hc$3B(0KcG?*62W6`zCMCV%uc5b+2il6B3C?WOdN%BgYI3cQ-kNF0Dv37Z>P(1># zI0bKEBq&xD{P-iLBwS3sot?Rnd+Eb}LVac{D8_cG*tI~>ih~;e;g+R_k$4QgS{^Km zm;8P%@msNEPV8|y7xrQGp2PV)Mu*)*JXIUe!gg>Yj=^w<2g~n;I^SCuF4xGPF2iow z1EZ`reDIoN%J;$5_hVH8LA88yVp74Hvp~57I29+cay|L%Autn{py$4oQ+y82$O+gn zYv2uagK__xtnCt?^$du{OIR&~nFg^O7T^VHPIpmD+E>^=+n4c@B+3v(+zN{HoOCpn<%)4VnEiSX*u3N;Kqdeit`#hD&nq z_RA@h7M$P}%5-G`thN2hHL}}A81K}W zyLk>S`z-RHo#1t!uo2l%%L`&2Q)+y9efZv`v3yjv>xS0Ns4c_c3nCeq#9ke4nO|DjS$= z1rV_G@UaSEDbv8!Ph@e%N6$U$PG`U@SFcAf&Kp~zr z2u4MWc^%HxBbaF~xl0Lr8|0KTh(Sxivn-2ssRG|L2i)2u;~AZ~h4I}DVOFj&&sct( zIssN^5%`>KrFbbpR#ijY??|sz&>Crlv?p}WS8ynf7wGzgtF6$y+lDPYB;A66n3^u1 zdD1(o!abQemY=Q&-%ONPEMIRyg$y%`>LoYkC+<}MM) z7*^VkNURF@eM_daG{OfohLtn{n_^Qf@#o}s6myFXQR6hc1Ydo{ExOO$-Un(}4fJ$8 zxo=lE6%FuLMK}k#^#eBdSFp4&Sn+t(XMrj9 z5|+^i@W8{U^_|08zU0iDLWf;JD%q;yj>eM@?!cS8Zl$zB=}Vk!%3e+I^zUPZ3T7r^hBFM`VH?9G%^Abj*(hz=S$~RG=eLli{DlIVWn(1^q2*kK2x$mR*Dr0KdjqDtm6Ldg89&1~o3p`B&f~A$=1w{SzS4hFR=??;Opj&`Z)Vi~b9l?p z1=toH;krh4>#p!#-r&gTEbZ*CtwQMKfb+Y%ssELL^8pY1SNm=8v~uTmDb8Jv(+mHk}B4t(#VVtjI+oz$h%*GES++U75TYtgVPIG-;IH2yb`s4+Mol9vU0-4JW3FK8 zh2gclY4yJ7s-D;P=o{!ZDnSq8NjtrCTd4%5H`94g)3hquLuY@_8~@gUg#)ko7w|LP z4cz~^GP<%lFFHD_QN?#0qNYMsTTP%)Uu7-ilJJ$3jfk7 z*^2d}29gE)-xUuL2@Zae-jGCIdw_aQetcL)EYVo%B9-9t#G(K@jlRubV=O(E`$3(G zgOZ1W?H8q^?vqgs6m|o?AvHZ8E3t^bc#3%9g@@djFIE~(1Kb-Tgv)TCbE35JiJajv znZr>~-0r3mws&{>Lif=Vd%kZ=1htSF&9&PuO@X-@-nO8fg?fP zp-S2oS4qE*{;&LV`DJ(4aCLUAab0lrL3KPEYG}WyQM^`KDTS3dc|H9xGsP<4uVbnI z+@cmY!u~_H_1kF9r*2DTHyT!XK2*uSQ}^gY$4)~!_H$c9Q0b2|J!G(Fv1%{LW>=9l zq$A>7iNCA>b9pB>stg(FF6>lK`Vo6^^2^}?!i9V=jkUMu2&W18a(^@K2WFdbW|$%D6QGqfA@ z@a$BZs0GxVYJzeIE#CL^7j}|H;Co*SUJ&X_tZHYNnfb|7z2g~ zB>(Z^e><9Gh~I7FJN7Azezx)S?ro*l;w(Mt70ub^O?rv0)2EP|XSjxCO+lYVL2lPK zyDE886PV?%iTWam6(_*bEk@KJz$U5Us%#U;n$}r^Dqh<8QI>1!2(J0LJVe))h6!Meg%gTDXt2TnJZ3Uq??8u}| z)nsR|GrP05ww+tkPpygG&ItC)p*B&L($(IN45*frUwRI{qlqQq-a3d|(__05ux7LI z9MkxW>xkH+=`;I}*ti(k;dJsFUkAoJod$lRt!!2cF_*cOl1m$10^o+u{w~h*!{Hj%1}u6ETg(hfJVrZ4I3L>)iC* zR7VezgV7fqOqxJIiUPT*6%-f9oDedvv!(Gs=BL}T@whqa%Ml#U*1YUbRuVb0r5S?CX$bM@sv5xlZY}kc+&q;XTMGs`i)PWNIsRHeeuQ6 zKA-m!-?pFXRYRh^@5F|iKvp_~GkNhnz1XEM=;bUkdeL2EGeajMnB#c5ihmf3P)f0^ zZ}ws48|w667MBI)>}^*YIy5To5n7U#!9O8CMSE7h{fFo68VPH(}8of4~yTCd-aef-$f>NitOeN z_h_oslV>bOuG}0SFcYM0H`#{=?~t7cZ6G`QgBYzX_vESIAWy7?YSL)pvm)Xb`sS)J zUHBZa>3+^_1kskSQzJkBHjX>##>V95T{7`$QV=N@V_&)?N3n;AlgGfhu7s`aLCi3K z=&BW;_!N=!YSw!iyVR2%D?zufAJL7QuAL^>wrT8Ecf8e1cK0oL&1ZZ|0+EiJeecVC zI){DU&etvW{4-FkAQN&Y2cmhN5m)KBxB>f|2P(&)`eZgUq90Bx=*3wpKUo@BV zidI|O<*?OhX!v(kr>eu%H0o02g3~j$#1Nl zKiN<c}GGW^<{b)E=c>jUfD zfAmH7}&@QpdW$~-NIC>E_q%DZRWNu;vx$b`A&l}ite{S$^D#@^3Q<;@BgjoT$e%3gOnnfO-^%D~;Ym3vQ zOY%LX37w!l)vaoMqPzJh^>5T>Gi@(e^Jt-t)T@mNsC~QG<-)sm_DO9pO=#FN)%SVHubraAbl5!^BlrSY}*{Z7jlal!r$8gUeX!M zxrFn2mffCDMX51fXC-(09p5YY`@Y=Qy6jCVFqm+5Wj740A*^~N2<%BJ^1;b-pSPi) z@rys(N~BQ=th6fL(KiwKBw14`hj54{tn}lbM-)NZrHYJLu(L5d@+dv&5Q&uU)~Z2(|NxwdD@#EJs~O7xj>mT3zJOoS&VKojIM& z=+$1W)pxdZCep3FnW+e~i1GfS*&L@VPF8;#P4~z#^d`m=?JgFUz@PwEzE`ZQ#6uQHV+-%iPzbySdhTGL=7Lo9ZJ|&`MI;$n>C!#YoG}y_`kvI+H~&PLE`XXoT2JO-Zk)m z^}r@F_aVf5 zVI|trIVqhvBL{_wa-8}^dx0W*4x)(*jz`Wm?&a=rXlxGE?rVpftx&}HsV#+7R196f zEa(GfQa3WA^b*?WhrzmwgBkBfBVne!i^%sGtpABbsd=mcC;)sjd$E=?s98kQL6?s8 zyh|iCg>_5fKDEc^IEl{=aP!}Q0u9G=Z6Y(d&TVT0`k7?qg^B!&dRI6$>N!5YKdiZn z$zDB-JDbCL2nIO_ET$-`N*RfHyW+olQ!|v&{&``jU^jD!iSL;%{^c%g(*sLo-_nBN zMRKzWiOsHJ!>{D*H7-x^_|1tO;J~6r+gy z^O23ECkkDH4T#{g9)=+pg6;LGa->_qQu!Q@Q_i+YXKQF!2?Z8IUnSRuMzw+r+ zaHWi91eyv?<^;Y);lj-%hZOw2hpy^TXh7X$Le5owJ;ltT7v3mzW0SnS(Vg93y|aD! zZ(NUEmtBus?_HBz9@k*vlE>O6Eu+@gv4{Oj zPy*F+%3L|66iEc#37hj2t+Vy^QS88Bx|Djdq7~?;?#X&qKshHCd}RenY8p@R6;??o z_j4mwt3TeO7XJALx2rVq#zeFYbJ9mJkEnhu$fT28RuX=I7#-!tEdh&p2U@!XENMSA zq7K$CX3DtCtEhl{WWHW&_Ocz=aS-dH;orN`_57Wft1s3$1)lIKm4wfnl|p2LgT=pK z$PVg%zvyfY1jC<8|3q(ex3bHT(q|MF`k`sEk@`(T>NhG#f0A?sg{SW5VmMK?+DV-N z>>D=PYuFiHBAm{|!g+`@RARz#d_)MyzQsi11hA(+M440QSgHuFosaJ8ugod(X+iBI z*4SsvBnoL`v@;qIi{wMcsuTRQHbxdI`Mwx`2@@#VGI^|F@_*atiOKsJ>Fvl=M#GqC zZLud%T?`Zsfjkcu(<_J37@y{BqcwKCQ3p6aYO`D+%qA`6wq2)OwO#w2rJRek`dSSw z(b3W|K<%X@$%W8WIDr~k5g6{XiKspkM@<7KcuPKh7e#|~-~(&1*x$@lFb^`IZQ8?h zVl^$icP-+KPw=Db5P$fA-{z)*-v|#^oBXRjxo|kPGCjQ>;lzgDK)0jt{TIM=BJdL3 zs8KH@Q`*N@KQf!^$r?Qk@$whY&nS!69|Ak?G?96I?DtOMgictyj&z=e!SK(HMT|hv ztqiwsBsvco8UYjKRq`No;u^_~nOU7A_f{?_2b5yUG}Q85P}d)Up1?U?_Y&^lFYIkg z>Tdx|8S75wah+Hi<}>zR;gsFMUN6SiuEA~=LjCU>nRY0hlEcaJ263{BgT<|3l9GZB z+aLT{A)^JW6b#QW9-nsvy|3!zX}(%@rJ{}qnc8eyiTr|A2j%du>|Fe%7!Dk zR@nm`f=A3i3o$RlhOvaMViWUC^JmjxmLS}dq#UU zxg$_(%jEcsrdds`hjzztTwSk-N>TYMCo>WMdK4c%i_VV5;QG%&T~=d3mY|;V0#=J< z&cF^?bH1MXP3osAuhrRq-T;7AO%TOld{tFtk0u}YcXdpGG9-J2? zjEQK)ERNwx^bKdLDxdK+_ir8N z?HgamjK18!jIh+obLy9)Gjz%LV59^CpNjQbg)+n=X68KLE^h-3GtIPg=S!$9odrAj zK?UnNYugv~?O<~R>K`R|_AO}HYy%^xVUD)4pcuFw_D?9CC2!z6Et97@&N|~<0q)tZ zF-#Y|2Mco%Xx2bib5D0ax8D$VqP9}4p(x4>r5bv-X&m3w8tM=9pwkKqm_$FqYE2DR zhON2CIzpAK6)JQ0&`~@KR`BTUiyizO7fcb}*WN!&;u8XH6nDV|LE zA}4(qu|iLxx<15pDXnWD7}LQx#)6&S=gG2=kzE0$dk;e@$*2lWo?!Mv)um`M2K$=$ z=bhCPmPaJr*2BaB;yBQQSTR3}E7hd2Xr1MP%`ghZhi>vnw3E7{=bM&^?rt=cCbPHA zso`M7x zAj8Uv64x8^61ZfnS(}*gD3Rm>kg`DQ3%K%9Dk0NzhFv<;>95^U=co;^&IjCe{c8JF^|Wy1)pDql z;b6)rW_4#u!Y;L;x=b!4-mrdSN1lLL?=-q$b=%-qGnn`Ewcba}tZRWb>1%yF)}lTP z=8|B9k!Y}0!Ip+o$!^A8_9x4G57%b`UUobAw`#8=pDITleUMCvo|`kCe6Cds2G>n)>_?vXAYB8!h(Rh+Md-|p z3PWMzof!mTb(h$^mes}F0WvU~JoAHo#<*;5waSg z*>lyexc^c=x5sc^a*R-KtF^SUuBq;fXUE67^h2-ACK#;tL^yy}ko)bm9px@4E z+TMamaS3M=wkJ06W~O4W7zBI++H;!zk5}{N^oDw4_1@+ps}#{~*JK@`TA(X7m0kff z?zvqXZ+M9MTQ~}bgYXz*Y=g*XB6oQK7|9e+G@pjSU@9(uIqUz>5AwvENUujKUOkH1 zS}bw14^cabE!ic^6$cW^beqdhGOkm?L}0FvlhT?gjjUQ^W~h>^o#Gp~?AHLp)PT z>`)@u&K`23o@6EgZ~=aCD^H@3HIjWCjdoFMzQ+>J-C~8xVA-})m6$;7ZZ?ScM-*K8 zbNlzzG`( zRL^ZJFt^wRrDe)ab)(ur`6Cs@8->ZeRfm@3%4*tGSZmCE`t+pV39S+qB}`6SnH26dbO|<6 z4eW|wnrNV{(i|$34G`TxK}nh!(2Q)${_zsln1XxD%OB zl4^-h@}e@_3e~d|@_wSJ7ij(z5xqhVy4-!5Kefm^PvTKuVeL1=C0kD9w}gnj4l!OH zcr4$*3w^4>ZK+rnh5{7qPOnTa|F!+S+}nfQ1MQA$a6HIUb@0lPXt<#{M^e<%F-e>{0%BAY1c9iyl^ zn&eHI?Nepz15#EUyFHMcKNmk`pYedHt!Gdf?W?CXhudSBGf>sh!_k0_hZ#ip*_7{! zzvF;*-j&8L%6~?{ZvWm)XwB=Gu1tWrwMZVPB&bug&CbcNyJh7ka{-RR6K|_mB?AA< zl$eirzYk_*V~Mvb^PPVu_D#y?EvYXxQn52fLC+UhTZ#87VVz+Fleu0M7Bcm;5`8PB z#X;nOCNo?<5zqSAU@rI~XJJ?sx0hjuZV+|tA^y`FI765Q%5~3OW=H^I{TTi$PaE!jT&V!s4KcG8A4|D*V7_EJ``D|3&0zzRE>T z&O!$CPwK+W8ct2|1^4tQ)_5N^rATh_WqipB;)lXa{j3Uyp&jvlcPx2x7<*0dKY>L1 zYso2E!wE|Z3#AdK6O=PAKI|#WFrzRap9G4iMxHqZ0 zx0in0$YSLrQw_3HS_MH3)|&gRt*{g5>u*RU`en-LOEwQ`MLjK`+Xd@ zqOf?1fknQ6i&-6B+&z%HRc1~z0cE*v=1i)RP3TM+V_5nFy$ZHgzq@=X?wIi&dTnmo^Ze2eqB9PU8S_c>OFLVzspBp@fA=WD^1mR${@KoS~#DK zBTQ5|<{gFp@HTIIJ&&=GNU^z|&l^G)o#g%FeWxEVT7g=GS|zBa#9>W*HKNZbnB;~n zSsH!F{7mYdhlWp67;tatig(gQ>XJ}x5#a!XKwhN2B9J?l#^TAKRkyKAd;S-fM3VGWeIw!@UjPJFwb>^40;iXX%Z+})GtmA^yB@EJ7-2~24ajM@-luCK&b*-#%C z3+kB#_TCd%P4|Q|)I3M>X#(l$_32b@0Kc3}OfV8e zdr{Q4GI9oFqK^txCH`O+j&iyl@s1CyD#RCc!H62eQ5a7hWCTp|zo5v8#9JRY6)iXy z|AL+Wz#fEQ`9~XT&B^u)vA*JR$XX^COb*9BHB=3P*_Vrc#R0Bc?$e&Pp7frfOh%=! zOvhqJr8aE&`%E#oAWwsB`!8BWMXYGBl+xTK9d=F+J%jG`y7V93V`z>y1r=MNF9yrF z2{U08dyxukV;~Bv^R1R38Xl%oH-u#v0Y7OBY=9^#{G$bx6+eLr+C6bG5zsPXnM%YK zVcd}*Dl8KGxy$$^Uri$vJhndjKM>D#0-tw{+Es3t1oTtmhpUkzT?D^~hsnMdpS+3A zgAVkq4v}A@qpK>1$(RIrGZ^qGcxhdTvRd)qF)(%8QIGoqf|vzXX(znv6DkqKsD7US zA^Vq|ZUxp^m+bB>e6?oql{*tnFM}I&pUkfye%Qy^e+in>f+)eqyL2VT3$pc!oOUHy z{Y0|eF(4e3_-X^%F*y16Rk7Ed`M-_y!Y;-d9D^ZIhblu0tnvrW?oRwiHgfJ~LOGa^ z%$gRL!^T=p&GrU+JB+nB#66GUQ_P1)6~dgVnXrF~VdG}N=q$o5Ey!s*C%RTLtNt{-sox#h9NVe0c|iY{D@9N(zYjlYi71M*$P4G%XUJSn z8yEGj-cH_xBt5B&cc-@yeLP;~gQwuW*VQA510H)b5p#LypIKwJ~q}&mm>xp1Y-R_NXi9f(_SfCzf~?-Qerlg+6%D=P*P5vhU~NDSft5 zQ=^{4`dp`S_KiBiAK056h{n2-;rpkCXsPIvaAv?+=oyd zjIjc+SSjgmC`R;gm>8o7+~J#8r}boTW8qTxo_`YT^nqlP%UHRqWDspYBwmpPgyKKu zQs>QqeZNE8wmvzVI|;gUg!3?i8<7b=1lJw~s#WOBzhm}DNu?>=>XvFM=Ghn1`fDvU zo!QmH=y@sWFw_H74mK!RlufdY>ewMVYkdCjDa>R8%e)ag)Rd@fBvHV3?|JVz?=9~m z?`vLc_vQv1+xyZ@p3yCTt_GPLSq=;4Tx@LiL1$X{)$n#^5d?+o;Z9OgMAgzx-!HC44bcgs)smL1IdmmkT(s--&dw*=sHMQ zKi;<>b;Mib#mkBJhjDYOf@GCrr;3xqILMf05*Ke^UFRh4LgckGlB=#OlNt2a z*^8gNrYqGS-C7CmZ-D3Jw#Q?k`hX?107=<_ee!jr<>92QAR^fY9w5N@mY6@W91n7y z3g%ehrw}fVlYWpRpi0iIPZU~6J>(to5-C_XkEJgJLo-rLBRv;d+K2dTKdgrKAR^$Y z*x7+#n`2=Qq(x!nFqZbDwVhMd6dMvo)R9UoCiRtjGMVKOs;-ly1nHCGSRv<#Xgpxu#;@c~5@mrCbhS60N2*Obr4cP!HZ#Brz^GF@UJ^bBk^u}tUz1#@A z{|kF|oqB3N_NNXE_?~p4o!}n)hvmpY)RYqb@;73=M`Y&v_-e*UNiF^dk8dmXpcbEb zE_K-dSozO(K5`_K!-#Vet&yCSXfo$|_D3>=<5UxTHL!w2((T#PBh-ct!Gd@N@4|aystOj_9W}U&Xv?;y!($Zo zHAbw1aLFq)VB_oaFJ^(VJVp&MjhKK{O0=A0|83|vYb2DgJMsUE_}mBW{z3=)oq5rk zY-c4iSPJ5mjz}mESWS8~BZpz5?|{*|S<8#u;%n&1l!W^ki&vQkySa?mO=>0Pq?2F` z5%V}Pkxw^{&s~*!R8v?)eYd_ana;hd!a&N0qa8@}2(&PHDJbUK44d}h59 ztbz0^z7(oiSB**54n9e!SQdS{+V*>4ym*THaE}->){dY?xSiK|Sb<>r+?I{cZ<4qcgL!`x~^SocXxMpCw77X3Sy&z z*h!ZKGg(d`Vp>b4v(yL zx+=bgy8Qi&T`&uq@-k7pM#$A*w8K>A0Bt4Nyprr|SiqU@@GLg{IjpR1hKJaV+lhl8 zgvv`8x+s0&kVSZCmB~iCD@2X7pV~!uWC+&fI$&0gI=kukQoE@2E=HE+I5l<0kP$OhR^iK8A}7eau7%R8;OmSB%RAVY5kUW}TU42A&LQZfsRmbe z*@bB*oOzW9yqjg%_9Ll<_2Rz(=UOs+qp<-7pf93{+ZJV4-7_?w#Cql$_OmJ?BXE9_P}0cWmZV&z|U!zEpG^qrZ^K{)0`NmGfRZ;@ob?RHgxgI&l3?=VwD# zb*f<+(TBx`De64uG%XlPstG^LLw_A5X49DNg&mY=swAD-JVOzsHu1b~L9nChY9F z?C`E>m`=W^7P?49Kdgqg>p;cdpu0YJsoqpyrQz4u>63N<%*AM{ocWa21_jD4gkIUp z-|ck!xnbywy(N1kJL93}z>c^>oyRCBzc!xPRALOBuniWo)1;mu2wiId-Z8|geSzf+ zc(XbZWeHL*kvfzRq+4}Xc9B}3N5JI)a_MhsT=JpU1}R0bHG31IOCzeh23iYYMW?}i zVS|mi(nH&6cf>ev1I;1|6i!WSVZ&)6X77o31{<=d*~z6$G1PRrYbWV-mrG4F40TpO zKTLEE=cK@s&S1wzElxS0CKyIx;d$T(T<3Xq_W1f^+gRv*V>ZkoGTe&1X&@G+4J)b( zoL5*m;oPV-2bMLFl3VG8>2z$Qd*)KZ0q0>d9rq2p)%;2$XN04nvpIh3E9YJKvlkuz zCe!P3{J;CSb_`r^Z}X5-oyf`8ag=ZX~)pr|2SJ~e{k~6K(&&xkeugx zN1}5E9YJe3YiUQFb`m&V-hksK$gsbi7tp!gu%-Ss zTq5gtRGUCowD(BvbJ!6_IC-!M>o4nEM%Tao=m=zylk=+S!BGtf^(VH=14CCT4v!i% zauyCOt!n6=?wr#S&C^;U84{fvkdc{mjhGAt3`CzdQO;o9AEEO3tuuuz%UI~^hVu_) zfwGPKaWuUclh{|#z%Y~Q#dTy-099UttP^A6t5N`sARhLTDK#%$;K!%V1xgvJCg);n^uRWxXCs#81)?TVNXG+IQd~3) zR(7Bn9L%pcROp1B%h1`Z0oLPe?9cwl>zqi>LE3sMvle1&PUO1#L?d2nFPQstXFaNo zE^C#X_kq+tYOWe-e>r`W{_sLKqBeEt7+el7Wi>I&>GXIwLtOuiW}^e>Hl&*ebRJBH zE9y$An(s}1AQ~*(a&Fe_hP_HhDAL49VBL`w7kK9aZ%sjuwc*MTVEP`*Gz_S(fg3#0 zQnA>!`E(0*Je572=vVm~i`*5x^@Wu$;Do=*bW>Pms7HO{ZT3E{!5_MYgtxH9G0r~t zO^1-IPIT{iErxtj9X=ULT#5ad$m8w!0&TDyh7y^3L?6D9=!w(B;;XQydOW)Qq4tPg zel@YdJ*ZK+XvoX{fbH}e)8L2NL`?q$uGN@(dGPAVy%E@6Ex7j_@sn8QaD%Zo;{%UC zHs{4sD~DB;ml<_u%wnv)3f9_HcIr&iXIY*31rbx7i_H~+)br%7Q@}f%d0r#(`4g@_ z#>&Id?H7SV4rJmZeD34O^AB1!j{4m~oDX#bz^Rt2I`mD&Ba6fM|S*=+cQ zHt?n|K#(DjjPGbV4iv}Yeu`e34n)h*O;L8448%Lnz^WgL{rw&8KE=9cQe!m>Uts~( za$mXvdLnDi8e*|LRxt}dEO%dEaGOfIo!Cud$`tuX8soK58>=yId!ZUUenR!Z{VjDi+ZlHP*+=ha&Kk=e9;@mrlhNOY} zN5H%#+POW}b1m$ZQs~st?4|okoM0c<`4EHJgkSZMNV|i0T5YsWB4c!Ax5Qm=a|xc$ z!a~f8ynKy@*kHJcjWrElyboSWbGk@;B7#vKZ)PBQyW8yJABR0%fe2<7J-d{Gylck} zBq5%|v{Kie7aAP(zv8uVKli5g%S2zH7Y1~H>X*aH?ks~Ko=U!n>+T}nD3{vN5w|x3OUne0j~^g5!}V8 z0dU50!xtsU*qR;w15HP$;vZzJ%HFfj=DIG&h({`xaF@I;52zQPK_uSQ6hc>vqEz}z z9sr96snwW#XIW-Gm*`v%D!(?eU+W0D?EH>UDuGO7vP-Z-q#gO;1nnAfdlnw$ZKB6T zv5dz+Cmry-2h#y(5NCzm()S7ejb6D)oT@#3X*7}k2zXR#?PMS3C!jbU3Hc9|_noOu zE(*7o#A4o$9$GPUWl#&9RKyxUEm$otdG+D*?8LR_A;qh}Jq?I(Po%1M zCo=H~H1dL2RyZ{(pO8rkvbhx_4d*ow&DRHteug9*iM(*3lB5b(+{DXDBFfhYdEK2g zRKW);ghiK1e5{yW1C>GcHV1T7lP7J&7nf5es>{wxY_Ak}s|^{?rQ~7)i8)?UW5~qp zCMVE@`lPn>pXp9z#5#3{x`-|`K6E^4L#F%^F&|aQBHl2AnUp{R?*bN8$p$%z0NjT% zC10=^%-lpPf7YXVE2*;UK{kH}Hg5n~@)G#rL*TE<#9nK|g}u>`$Kl6=VD17EavlEn zC@}d7{g)FsR0D&ZkQLpCN*}~$-HI$;gw1#gi1#C!IE%RLJ0e+O^j#T1Bys@quQz(i zuJ2|#5B=Qa6-u<@G#W{AyG@8u?MH{r=BfAD6`l&r-ym(>sN3ntPYtBUBxvR`(UJM+ z`RnW|A1aYuDKwd#kv0yZ1I^#xhFBTJW{R`5}1=^B+9-COfHl*|4{JsOFt!0h7 zk)`**_ZnVODL3W(p+is& z09wb)Yzh#gFO7AShbt7WYYg4|h3q`Q>^GvVh7*ykgxypCh*TlY(GGdr8$He`7JSkk z?rY1pd02BEwCO44bP+n4jQw33{FFeSM3GN904C3Xzf17l8Zh|@zEhp=v3v`H$JXq_ zS_cJZqsnv;TpY$b~2{`x#5+F0~=I&?RPU+t*0wXkN0z z@dVfn6iF_Cbq!)puFJCS+?Gx;p`yUdxk4GxPbpJ3$uY0rKh= z^t>48u3@%Q`DQB;@-id5087ze;5OVn4yZ2!_OZk*1DH*IY_8JqbUS9yo1ee2BO{Ug z=lJ~-$&^WdV>ZLUXwvVvIq|_@r4$`UyvRnsp$_vcwrUjg@(db(h8~fgQf`NM~+;|b#tYfyH_<J`auIDpxWMO-9bo%cE~S3=(#AcE`k>Bh2K?; zn4CKnyBi*91!%kzcPwGGH;^S4809(qABZN&!4qDv%16lXXk?Z^E=q^oS+KeXUH=LA ziCq7TjpRwZ>`yS!k=c!9#*@%eQ^+!{!&~czrYOmXCGm`MA~&MBDl>6IR^*1S%Of>3qStM8N)q4(D$=l;hE_X~e!#hNIh-aOnuJ#(5uQsa~!&=rt zW2K!|y!|Nl+E#UU=@?f_CevgK1`c6E(2NGxn>lvrhPcPuGgK$J30o8E*bX zgsD21?2gx(A4zo@yXiPEdWlSmMhj_Bl+-yMW=?;@>r24ZL@;&(IAu72nGfNmG`md0{tagS!2Zk=gJ9I01+GJV4jg zW|pJjf^E!L&ggsz&c#1Gip+Qd@4STS*Moi0;D_M7Rs4iOEt7!KYxdEULNi>0mgXR3 z<^Z*k@W^IpS;6lzGJ|O9yc8%hJ5=fdB!l3E8t6Z7`0E)X+<*qRFyas9YJ+~fn9qOE zNNr|a8M2D5-BkJCqvPE&vaR0Adm*6b<03nLPVqM!Tk|rNyF0O<_LFfBC+;nF zXJdLCbVAmzVBN3D>bWqlAh0+9nK6?nLZrcg)C`4IcM;d#gO_!h$Vw=7^=T}axoG^x zz$ggrtiZiVK;6jSpGcDx;QuAOeg~LN#wHz1eMK9j-ycwOMLG)wA^ie*{z%{+gB)Ff zmaLC;$*B>@#$f2V7I#!7*VYqxS`lx>0}HP*(z71lmPDc!;?uhPUxc+*gc~M9Q5T_` z?NHT9sQwnRZ5vc>f@c0eQssb(lc2U}q~}s*wHK_Xqtm`adn>{3bnw2EnfB$`t%2oX zsQM)II2YIoMGPfRy$nirl98OG(^D6$haJSot?b5IM14yn5wn-{s(ZmX7Fl#j44`w} zBl-d?qtds7(wKZ`TcUndv3BpkUkl*KwMe{0$cNQ%Vhy-29?4mk&p%*y-Ur50fNu)8 zIgd;es>_G`i({2Di1*5Qen zUI^mzo%lC@vZkeA_dZg)CZ5n>sBAd$F<7~&hMCT|%%JCcNAoD-94gGZoBUlSS~|E+ zb8X~0!IIadgXuB((f&%b(wmC2O!Bjxor9?FJWh332-VY<=&?APohManr`TQi%T|uM z`h8?6K5@qJBr35hXffJDa+U|xgT^>xBV#GGp^}JSwT?cBBRN0YN|t}Jx`}b$qv8HT zCXE1obJ&}CmrfcTs4*)_m2eyO(~PC#MwtB~b!H>z(X^SI^*JC^3+t^9QMO&!I)jMw zccKdNrW(y&grQLFS1Mr-b9QNWs#Z@Up#td)zYnbwjHfL4sH}a!Ge6Bvrk$L_?7Akmt+L~chS4U};n2!Z5@eMt;kbQy{<8ad~^F?}^gqz$r`Kq$9I(s~qP$N9rcuU<* z=JJbnoSj_*crBu~X(W5TQf;pGJnXJrM}6F0YQ9(EH*dvL@P)ejlZ_fdws0vt6^9IZ ziJl(?zc)tLCS%7ffzA)&wMJ6GHH}I=S0qad{trZcG{eqqjgQ>}KXC!v(gUgtMQR10 zRod%Oo$OdV%fWUZIDQdQ+!wtVf?lhHjgx@(xQEoR@ZJtBBtrW!+`kkm6zjs3Xnqd* zM-8P`ceTSr)whZVJ(;I^ka6unzUU7kEE}Eo4GYwGQ*O&**Ojh+S=N}Vo8pZpOwZ}1 zw8FB-62dOS_vRj^I%;=AHS!dL>A!e~zE9n4bknxhw;i+HvF&HSoYhv4E?W0&eQnEa zd+gh_t%g+g@lPa+G@6>$CG15Q1y--ob-SK9hU$>h>U*;Fn~_1uSOn?lymrq0S{ifx zWPirKwma+txx?PwmQc_|`quPDdz3~>F2ZguNR}!NZBSHcfcMoL|7sf5R+ovF`ICh_ zN)})_R!&>=;u4@7jc1+;-lnnp;2bn^9BR4Fjw+|440(tNM3kbnTtq8YqB(mb1Fw>& zbE8ryr}ByDe25aJ+*aQi7nmN=l_<#6m6}{D5xRy%>$(yx2vGJT$^D!GT7AbpdnZoQ zE(OIjrBW$3^PfzAxl>dk{>!W`K^<|(d(rkjNU-I|yP`z9gTaQYHibP+h2WIx>^>__ z9dd@q9##W2|V71tX){*+Oxl|UrI&x~?oUfD-#@f`2edi46FQz3X zZ_@?pgHBTa+1uD(bx}CcmCCmoP-+0(rnUl^<@PXJxHX+~HgZ@kwyE|wM}~HW4BiB+ z+az|fbV0+Fr`~po@iS*nUt|YqH!5!ZjX&9s_yPZF7@76^P}z9s>K}Tn=3|dgNo3(% z{`aC5wmMXj4?A{?vl<%aBU$c+Xw6q>(>>HOIkitH>Wz;Dn@8RJ}If z43fQc5-LW`d@!`?POPde8i7-2v8ops51IOyXS4h7t!Waqp>wF(0gsTaT35fcD-++-e@9T)8F{Odx-C3Vc+E;);v#{Lf6n_{Lz>6 zu~-bZR3`hd0UO7keIRmHLrHWX34J71ZepP-pNF71>tgGeiGSas!bM`Zwb4=1$8rw! ziifEXTf%;UUygd@_Y%;Jji9eGhTckkqpNw5OB+iu%Sk$ST%r%yJ5GE5Vg6)ZV)i!I zGi7q(T?X0ChjjWa=h$R_X{%@ZVf}6mp-;#!+kAG$l_83~4lVc@yFD*{@eSpHy3yEz z^T&Ibhnn-5$D2Zt;{}Xw&`Dd+MRTc(o6Y{s1ym0&;FRcedoG8cBZZwtclj@a{xJFQ zV`h_|$}ntGa?qdr1t-+cGoC^dw4^?961yv^DeLf3H=&2cceYS-yMeB2o2ij60HqA4 zpH?0!|4U;{T)?NOMhs^sGmEBk+j~Pi+WI{9>1nLvo#?H`P|bXF)@1fZ9i_Id8dw^E z&U($BiAqSQL)7&3#eNwAel~EXUp=)y`(dxCA5=e1*1l|PWxB~8m&>NgrgrRHn?bJV zITq_xszA%p4W|he-Djzu%x=$*?AghV*Hz#kNjpkCSQ#qar>U;ShR~cld%p^*e=EoE z6M7NcJ3w`@Cly9l*b!ZWUCtIqS*rCnP<7e?9^QpybVn;iFsnj%0=0n37^pUZh-xf4 z`v-PSLpl?+p+0^IBdkZRzGIiF2c1M*v@H5o`%>T77@O&{HX56(6OcGZ#pDxUBXP{( zz-0=Ou(%%E4M1*5rQ8&^z+M>Qv&e0EJ8P@edEatM#UfK@!i+jM4OOQD()O~iK z8$^91TVMR>?nJXDP_JB#jL&fNN;W*IcSMMOAoER}t9=lgX9E`1WvHYU@EnNjUW|u3 zf#}Z&A{*}bJY}G-PIv%qiKZBc4(uYbDc#)*@Cje^E7n3!VwW0KVLg?C z?3iyrO;Jzyv^dq~6R_F7(4`}qUDD6lC!OV}Nu}i){Iy7O5Kek%Rm0kULFQ{3(zZH& zL>DAdJkg#0L>gLy$ys1{2l1&!_`!CfN2i&|Loojo${{`m`n@~*8%EhJ_G`8)?8u$R9%3JKeq+Pm zoab{vJA>z85w({AYP4RcsZJR~1B zx>RZ`#$D!$mMF_P_N>%$nFBpdM9MTZ#(`6#+E-~~NYajBhg7xqwqK_^L@av{ zb|ZW3_9pCdEUWpDQ=dV7Y(=DY1$tH2V6Xf<_E`UAtVjoo)v8HtOLd%sSj;@^`jXTo z&R{jI@QZ@%kJ;0(-B#CL77IdrzAx0v+%t5gr+ctES3Rg+Bv<9FPE?-Y9TX(av5u90 zfScP>Pt_2-<%ZrTvd3c}aF2sBn;_r6YAt~#Cv_7isR%v&M-_Z9l@hD)yUbKh#*^ul zYU7{q6{m4?#}MRsf2y$e0ijsBWLcp~PL;;Ls7{>cE%hRi>@habLDkKaoldH6j0=p4 zaXNb;Tj8PklXZ@Oe)`crs;T|0Z7%1rUFXz=&Fs?g#UI;99PF8)4_wkxUBimjqvvv| zN7y-W8L1JEt>WRFg+3X9yi6tUnvY%VW1;C%Xd^GY;&>_*(0ycB>*G80;F+bV2D37+ z{m32J$IzJ?=R;`hOg#5vShh3SPx?3BU<*1YmBe3ci%h$W?Xnv9CJ}Qf1D*86HXjKj zuR%dKk?H-AOesVsZV;C{N$h_ZF`W*?dEOHf*hkFjD_PWH_;J(mV4pII6|c>J9}tP8 z9ZJ=#V%%!7nZsS4l5y_p;_33;e3Jf|;U?8o#8^qKrr6j48RTeae{J)&&!lU`5xN`v zi>5lusZDk8Q08jv$DvXv9L{(~Zpj0E70MpL2=zR^RC#rga)Qd4zEZOWRHPSher*$7 zE}B6}W8we7nj0|}PvY6-h)Y&vJ?9KnsohFas^gFCVqc>~dCOBJ^O^|H5b`}s;IC!$ zx!TJj6|<@ z#d2>&OzRXh?+Qg%Wo_Pkvj8dYioLUhex|Z7Yb-G!S7<|Oe*OZEw~%IGRC#OyBKbHQ zV=WQ>Ke3;>UtY^A)!p+B(O(##%)Q~24%iNJ(mb?nB9yTbLq&@z&l8%0cR6rbcF z*E|ymB>fgr@P=ZLp+>69Id2!r{Fl7aer)|a^k1k9?Ywk^Grv!)Ih$_DZbKeQAJ`#` zwI2IMa^V5wq7PuLRe*--B8|QyMW!>4w?uVUvC`34GymdKHzxZ!izv={V$q4v!S4)o zCp;~a^Etgf{D}3|&~2o4%-IEdZmVu9heA(Xkw>BU>weBOG=l-Hai5x)bKs#Cv7uG0 z^$8ImJJGawpmdNZ?PIcJWuT-HVBiuMxWam$8!SZ8O2hZ1$vfTz!qdraRUwkZiS77x zEs0sSho`P6b?B27#>p9XjZNs4@y7JTv>$8xJ~ZZ~dMJeq&FC*7Uc|iSGF6_n!N@I08R>ueU?>yA9iyZt` z^two&dRPk8v6Xvc&lEzk8PWaoh?%|y-fvjfEu_e0G@3v5<8tbURjQ*JA_eAf2GC(> zcoMQ80(8KY28@oeOz?pUx;r#9X!`8Ml!I2}ZlgNx4_?deT^3 z4S4f3*l5Zw)eOAI&B)s+%v7ocCC6QaoM#rcwSkOh9_)c4gl~dohk$WzO-n>mM zetBeE6Jl!DIHAi-q^E*uuj!M?%n@1dOgBuUIMJ{PzU>5M547%u*Yq4Kpay5)G)J?I zApYY{jYegn=+WdovLTmippPW08H&u6z0*=(mx*7p6;Z z16rwpRrv&)U^M(H{P-G;@<_|l^5I1_LE4YQSBW5Imk(^cV_(irc1;*mP07uC>eK5i zJ2u=8WMz9~`x3I*U7)OG=qh)5_N=BqN*vv9nnM$@NSZTfi{VuHM&Plzp{<`m5#w3Q zXDDS0aS1E@F&KJU24te4rfk5v8y5d6=CBFrya?Gd0D0Afe3cKLR&!>3n7ol0{uoU? z!abs$oaoI;$AQtDLa!G3e{tei&;l~tx5(*ag* zL1n9%?|EwQYLk~diw_eHrBR*BDDRMT3clbwJ~@SDFA+z{v$>PUS%9y&6Q~|Um-iy( z9Y9>oNsjh4GdzxEm5g5PiVhgW40eFSvBz`{JABMPgUy?*9D01j=lV zk#>o26(l;*tAKSLY>2KVl=DJ6i1c%M4|_AqHqJ#H2TL*F=aE&H$GLX5l1yB z1K54y>RiIky-3GKG-ns$Gz*}-u}J6^L~rFBGk@&m+Q`iy^k-T4AP?ERFX+46*g@-v zvz;eb8HV;8#~P)YDM7EN*nu4E17DUvzjPuiAr+Fpcl=3SIsu*F$!LX%FJ@x}^^nag z(4VWRnb=G|e>PeDc5qvEsy*_+S=o^+27H#XdNyn>S02D_m8!zMXpxW5h4jj7i{)jZ zx0`eh>qu_ttJV{(lz`NeI+AJNwih)jsqlJZa%34)nI}R=*N_}xS{l_)E#Tg@(ArBR zl98F^rp7iA%q~Dyh&@~qo+?5VuqOG5S>zrfbSQ+7joro0qwD;jErQHn15H1temXarwkbRQH!^NRjx~^>HNo0A z2wfe7|DG|M4?r*-X!=1F4ziIyfTa&utV9it)VVZ63N!%!CBSMb6%Ltr(+7d>5=Q?A zyS6nFEf`C!A~~zxQ15!=_bW!LszbFKyxEjYb^`SD63<6!X@6<4#XF>m#d(L-$|8 zs#^rz4d-VDqwIlRuJDpN{KHV*Pizdact&z>SJvbMwH9FxQZ*OBGd6Pf5b(B#xq9=o z%HW_R>mP`fdx5;_cIe8Be7EBQWZLE!#xFANkcH!ocC3b60YcgkGzf}=@064bhE>Kp6OZ z$#`d>hy8H=4RCv%(ZBPuL3>i0_JG$dqK`LNMLhri;`=DxB?DPHGcntGtfM})Mue%z zjwnF;E7#uTzHok@XZ=aY^Gd9&JMizy-`4mqrNBo6sJW%EG{m4PMR%rt4zS4M$>))bqlkLA}9u8AdLP1Z=?g zmErb={GXR+SkOlbPxj^6Ihc#o;#Ff_^`R`O8_EQhN6`^i@Ia&4z}86SCGf*Bc*Yyr=z~NZgH<(!xrVUf za=M-_!wie)qkRQ_QU!Gn$@LQ~Ci3?^xcq@kmD*QVonNGu>KAlw!;dQiE~Mr(4=^ap ze*vteIn(-zTRx*n!qZDBDZNs?_f0hx3cVpK&_f+sGFD0S&i6+y4a? zq&Bk?6f;K(#gTe5wB3gD1KfnXwG0NcGiTWJMQnS{I4k ziBYBhqX%oXVPQcSSbHAOT1nQOgSogfdR1tkCp~$yG(Ed1LDdVv`wo^0oi6>?9JfY&8%r-l}UBNDn0bEg(CuPCE z@j%9tX9|~eV6_ccYXdMutl*yeNW@F%n0P2(fud;}1Z55l>!&myl*NqNCkMX)3l`4V&d4xKz8R&bd4 zA3*Y+hN^_Jt}@i1k<93C-pZ#qyiA%!x5LK;`yfYQQPhe!q$oO$yE zsgmCbg&zj`*H~Q`EBeXRU&tW&LL+&QqHUmphCn_y^Yejnt$d%(I8yzc1#G^dwV&~u zLsZ~n8$NAO;3ijQa^>%*{p6cuRw9}vQupp3!AZ}-P9fxMcXX1-wMO8;#`86#QUEYj znSm=130 z!3%wO*B;()3iZ{49?LR$=Ms%z|K`LD;EAsC@KalNOkfnU>*wYBKRzvpC4c(AF#;>G;=b4>EbF<#l$CK z$q3}dZmS1}6oF&p$p$1vDzx^STHgYluh7{O?n(nrPR9Jmj}@IS*2x$C`vzWq@ofh5 zX$20RI@fqGS{@+c2UOj89{{wgzkhIlp6%;hcj{Lraae7guR^^vihP~lHr@z{RS zztD>@ z0Udrepy|SYMZr&be#)>ecPQ`^e103aNN%;=b2f+#}loU1}B#TFV%re4KP#(Y32i+#PFOffEjEk|%2Roo z>`tr;Z?xlTp_-=HX?=)aEJvdMLp4Z0e2nG1CSW}bgaZac_f@#2A3C}(7EB2D)!|hJ zNEQG}fj}l3FnOWV^aiBx6)58cRDFX_qR>sU@8<~bUL#Qw`FoM?MOT02e>;*zYzE<; zXV8XNmJvv)8^~YrgUvOzql41{4aBA!HOFIMNjOsNXC8+_NA_# ze!f6bWOPAREB$=jpui%`yB3%z1Rf-QFWlLPF z;&nc_j?VwVc(O~#4X7r9gE%O#5LYYkQ%V(KCWaYwnB-r%jE?)l3gZS`R)=j`U=>-3?`&Q+DpEB z1#V@JRefmBo%-STKtXJvKwkOb4#fzfXsRe<$TQlamAawB8$*ZbV4k8I_*M46xq>e@)-1a` zG-PK@__P_1>e`77dkIUlBh(zs80{IWCmbqu(rvKMY61zd&wrnOC%b^I zW8L0j4if#Zz$#q9NC{qEJV`7bdHsN5wuqm~x-?-)nuo2l?kkM2=lmC2>IHDZ{PVvyo6X@;|Gcs~rZf5wAxjlkA zuVVXb#j=*l>I>LYm*LrOP}ohh&kg1t!Ae(Q<%wsynQLyd{%Gh^wO zS%plVt=rpg;kU0){SW5;2v|IWzP-_;Swt(ucClj5h;>j4z6xMIuE-RT5!Aa8R~rx2 zcEvvGz*?)oV;1Piz;zPY@@8ehNaR*X=f*&zBzP*o?~35cjXkwbnVncI=UB~kcq^2Z z+~N5jb$KG3FOtWOSEORuT_duwot54KW3ga9k#7WM5*zFUt#o33+29s>U?3&@;l=XE zyHZHm>^e`ihQo&A6*PjHM5DxlFY#IGK)-?9>B~6fnYjnj#h}|A@o;-Ga1hVTpZWKO z7OTKN#lUAxDAf)=Jy=mLsLl=f^O2eT_RYUB3yDHSfCD*0@dK2RfJBh}t2@EOHNH;( z?w|O37fEo7x%dJvE33bZtPro~7rY{$WrF#)j4YK+B3lc>KTTMd?8Z@oytpM)q(9>tG%0fQH>e4{E z>iNRYvL9BgfoIGm9iGe1)5X5@*11ZkP`rge=putxGIN($*$=G8X3&S^L|eh}-I%E# z^AetN=E+=r!HVnt`cwN z3XuJTOcME97~E6_`@xJ~k|z(x2B^h+)4+j5>|D6QpLu41k!)P!#yv%WQx5Q_fdjFh zf|;%OdOkod7K}QuN%AoB40!V%ymVcsFdyzN5AMGsU7jO10^w=#(F9uGS;Ku^vRCah zP?tT$VzFj|okXq==ev7Q`x$V5i0IB1yxlHDhQ{CtZv~GB`0NIEyYmc_&Tr|gTcX6W z>rgB>y1w$fVAficS2^gQ0hBEitC)!6scc~ zxrml3!X3r9Pb^&r70}JF>Q?G8{ozDfW|BP^O%!CdZr^o6P8CG|+L2$yflUBZqJrV# zz@;2CBWu=Jk61(^Qyk1yY=EhH#CsjmWiiqAwb0Z(aP-@^i-&hEam`$=+Qe%gvg#~r zzN%B*6FB?{)cFMZ%Rtk4LeW8RWGmq3#R@D)2eG*PSV19v8^E16YpTzD+JYzXy!_!) z6Fecixcs;$C;z*0jY!K{NY<7>s58>1Aovj*U%btSIt85o?t9>x^UU}<5WfmN?SY=Q zGtZ4kniIT#0>#*Xzi_P=YZe|)4>)?JJ$vvq4Kd3 z*}?vUyThTzYm6bDv`9=1C`FGb5!rN{am1&7&icHVO=+-Ogdg!cY67!b%&Q&L-U_IOpdA}A({{Q) zBl(RXysyoQg7~x>&`V`zlJ&~L%)hYSocdjD!NZ?;YzxRowZ?Dh35;ta_e8VxMUvHn z8YG5Y5_tY(9gl&V?5n>F&0l7QyP?G?MBo1+S~&%(l$c>8YkLFdUWOhX^ZqV)qWc)K z`zW+}0lJmwax6HuBJ)eKmRiiO68M#TF5%A=X;kM+ z3|y>0$ul*FN2@aLmQY!4UCKOVepi6u3TC?w9(lmpW!+zZsC0KQfE)4ec0voX+wcrO ztC65-5PlYVbAi|MFn5veF~BC36_$e1^1`bs>y>?1 za%Sx|V0RK6Y-e6Gcr9mU?~xRe-|^u}cU_A3>b#j9suxeeiq}#FI0nL_xsVP)XfKU> zWZ$3#4f-7(p&c9$`V5R7u%a8x)WK@R zl=r|S0{BS8FoGFp!VeOet<0U38BKI{b*O4EG}j%@3gLf|3KDH91^y-W>4U5-uhV*Y zK9hLIAJ9ihWR3^3wKFd>JZfUzUywW|__2-dsRZf#AbE0kpzjAIc>>iOj3D{*LX27h zyfk98nm`~Yqe*T;q8U=3YUbGrGB%wV2OyJr0LxY73KY)aXlSfwcmmhqh+ELy zL7OJU^`}BoBs>)$_*B!7DwWTbm-(=6<}GNXQaY!@4=#U zla{Il1My#3q1c28j4e?)v8l7OUeWNyfXMHRKrHgppvO$jtTz^nMnLZ$Sz`pV+YhI# zMMFzAVi|N82JDUC!@wvxp*xB9=3u=wkV(~m-(TS5w;v+bszmW#G1f=sVS$bs=@!UP z)>MVNh4#hDw=(ayKw~=^{~!>S7?|{7k^S!ONS{dfPI4PZ;8n>$Z`3hh!`?7KkrEx2 z(`&0ShqgSuCmOyRu=CfoP%QLb4EVS+S0fnkfkp~LJF1TiKM{D z$rk4Kh!y3;y6y+OYk?QxCyA$t1d)7eE##Bf36kOZfP{X*Q=T%TmCR!w94k5ON6bNi zZa)CgZOrflPx!>lCC@KD`bFmb8W<|UNY2~7s-{M&&A_= zrc0X}%;za%KSxts;R)jZrRY!#;yUR*AYEbNbjp@GAur_D53Z8T%mrO9NdFwM0uz{x z#FxLY&eup}iQATjFI}PT0A9J7S$QN#7kEKp4aIcHQWt(Iz;~`tn#f#n)Dz_dSQdw;^SAz!{;?w-IWUN?(y4D&seYvj?DMYv}fDX}GdEu`h|>HAk|P({;lq z>^X_x6l0x@;PD`6Iy)=thp#e;YsFh@z@3etm!gd6%@q}}wR7qGDwIsOT_{pKtthbn z4fshs#uvCtN0(3hWI~H#<%t&^#wx?O%gF!5kTO!Y;LgbYj45?6U7=cmt4L+>lZ!zU zqLFekN3qKVpArR50Xq@}kyw_<>EBtjTwvfmu)7XqCD!zSclUvu$W$wKgGgXE=3WAM zmV*$ zil^~j?2?{X6-~fp5Z1t8J?dARSyy5n5*<>3xp4dr?4|YCsnVZg7XKZEV`Fp)ngD#o zig^yEWuN;O{uiEp%YUB0R0GF9fwkBc{?LeIbp&o^bYKQ|UW8+JAjicAzXOi$!hwfa zjmQ?Us(#C051=jC{}Qa+kJ+_AQ}u*4hT>mTfVR?jY7}>g2f2aY`{57i#T5frhXIFh z#@xdb4!|FwJmoUa4TsO9PDrfN@7yn0E6LG`wVw#zN)1Cn=q-?$OHUSw{G9*_^L0tR z4;V-%7_oDALt!70zXiaIL<1z|>xl;##af;+l63T(h-cUn`1a?gvkq}H>;I@j>kRYS z$*f-T42cAWA)8WwnyT*rdcllE5}XIBKadr6W-PCCpy~}=3P5$$pg^%iB`+(Pp(;?a zWPv5FEF->zYO{gO1mOA%Nq!&roP{1F&MkRO>Eq^!bdy&d=yf)^o%LwwapZRg!Ta6e zMd<}N0NvdUN-4|O;z5*xcf{}h@ITD$1r`UO=2+lr0Q&*Dj;sPyB);$``hPu9^BsCU zgI$ zQy8fc0Jg;TkQ{a@W4Q2tN$&f9+fXW_#KX_VXURP25g1$rK6gV8kD&mmM*7a|r5Y(! zkE;nB9egHQsSq?+1~`dTl7&7g!n0iAk9@$p4DZC|kT{OSx{`Pw5B~S*KF3m?agA{m zq^j(+Xs%ruaht?&_bqm@NUq*T$nSop(pdvIWdEG)_D;Lm_3f7!_UKS{%GRx0UfgLJ| zU_3dKA{QJbu#>7cfu;Bs9;~uDIIIgMs704B3M46w& zI=aH&5_K6)UUWQM-$2JhcClOVniEW2cnWC{*Lz}HbF%w;3H#7BuFyCueefTckke+oX}BK z=1`OCIwBXOdUq*Nkg3Sxk?1BlCF2iX4fv6&t_HwH{Lw%lAiIl-ac>EpeV4gP?0+9N z<_Id8IYAW~k+{xXe!t_1ubA^=aQlE6NYq90G(w9KrQVH(Th9nP$p?vNwGiq%tJ9ct zJ`zZ#a*xbC6bQcuf2DK^s16=QqnOCdh$pXM%V%R$;S7!IQ=nYQHe~Vp8*6?9c4SY> zLpW?7V@Z}(EYx^l`9S}4J=$gy^dc2X_qkT$s*y+%(IENZWRcCqb&m7r87eCmIc;JV zlGAty1xO53>P?Ol#r+Fdtm0iL5D;x}2?*U~7Q#;wACcIrL^`tQxzrEH_rzZtMl87p z^dTN$5h%1FIBW!rrL&m$K?d%S8Z4=f5^v!%<6UO1iBMeEK>0UI(^I0pKCE z9+}KR>emvPaVqP+&k7@%QePL!I#E}^^ZJ_}{Pz-&Gn$=D)0`T_Y0+#wz0vgr{K3nQd}t0zFx z0gS|d^Z*A1fo2@5&D5hpnhtUCpB_SaQZXW3(3`;3jhRPTXg3{hmiT5gbY^7~@vk1i z4PnSi*}D+B?2iHg!yRFzr z=lMjcGJ;rp9psvv@+1}7Vn<80KsvPfnrk0$_iZpDJx!yTMF!EeLcmCB38ey1BJ@U9 zDVgIEj3@p;DkF#|EgjQTT}OybF1bBBwu=D{3;=SOU|-^yXW{avKqC&xBvzhOM#XY} z0#AI!d>*i_6==V|sYI73>3yK{6&(|e6cEWNR#!RJkpoHfj+d-46$+8Om{@TBP>Cz^ ztIfO%>2WHl68j1L_;Qz6cD`I255&bX`i1T+tV@FLj4iZT6W^{qIC94de$Pv4`^?NU zC#y9<>5*{0R8dJy#P5uUKjTTnO>m<^v(iafs_!MwD%n%9b3{751CPI@#cS3gRc0b{ z#TO0*D$>7HD9Kl!pZNDV!BQ~zl4!36CS;#W4c66OuNdlqjwpb}H?yi1Kt$p$lA)4% ztSlYwIf0SHNzF)tK(6@*epp4eVg%W!vB(zbez*w!kj|M&Kv-%deYr=4uB94k6Q1Qt zxbh>baxzcB+c&hrKD>_&@M#QsGMOhyC7whDt1z>E=)vhke0n2)CF(B~MiRf1=x7PB zBfg{5Rrey_ISuX_gXSE7H0#CRK1dIV(w9U(=ldU@hoIS|_Nf-QG=WW5z75b+-FGe_Va3*v`rN5#Md^($68jPO7JvI7d@~)N=_2@$dKIZyzoo-p z0pyK=0C*QY;1GGO^x#h*SQqnUGT&q|cmG)aJs@ z6HTguo%-0&t#zt$M>|N(o7nqek6z^|_n5^!=>I)qN`=fbp1K_??+mk*y6Z=9<~1NM zF<0qGY+%JcV9Jkien%>uNM+fbkq$3EKyn-fKYN+|UN~Rk?$T{iYW2Usy(w_|OqDk-AYtU5=Y=Cx9heQqQgZ)lCSE}#?q87MCdUtk&2AVR`Gv+VV+72{I z9jj<`|-KdD5{Jr^@d4sn!xfuWXfEq zb`L+tk(yG2a0S>LVs;W`4bttsLX7f;86SZs%BjIj%IefA5p}_29bny@JDNhbje&eeDC>~g! z#_scsZ5oQ$8V7*5>J%lM_RJuU4igFT-}NH z5;>RLmP8+l^R6|YNllLhT`7IP6X1D~8rP9G8=!?n=>HkSTK4Mjwln8QR`VR$nv;>* z@a!>I!i||*0PvPQ*F{;8)X#k8jw`@06*(r}!&_ai1tKvmz;Xx~-+5>~q2=nxy()|$ z9WVQ^wq`ob^Z>qdu^-1`J&Fz3gOz(Rwpb2Q`yBQ+9JIAtIZ?E{1xv5K0&c?5Bl;YgK1tgI#Y3IcQDgGp~~SDxU3 zmbSuCvTM}BDn#Rm&X-KP1zH=d^LHEgydP_xz)v?gPwWrLGGvD;y`fF17Lbgg>>$1d zot;P1Yz1Gtbcrf4iZ5JOS=TBJbgNXJoDZx^eSQWQaRP;dNaLlfNtpP-Vw%g#=;3i^)4VD`F};rqeV#r}(af62KhUBO!@ zr}yQh4`h3K>l~)vZmRN=bD@XRT`PgTGOdxtkMZ^Uu+uXyBbH{b>>xbKdr(VhDEkmy z2Hoi}(2^b`3pqXRKTgnW#p!hy@b2rv8*jjgRIaRJmW%P;hSBxqKlJSHO!Gb@*h(NF zyB}Rxb7g2;yrFh*gVYYmX{INUb;H=h=}!NoXIccO8o0A_v@*W?OknX6nNkirXo+D9 zQM2vXXQQ!{{)N`Xx9Se3Nv5Fyu=EBOawdfAICudj#Y>VC6$Y|9`6#>GuCV+5E2kIb z=T(HgstN4LzrdKwiSadoq9vmwov0rH^9#f{Co=PONNw3&l8iL^i0*#^7YR2-qmjf5 z746m<9+4WZqDYSz_-qf*oQYR{5KfrKF06@qJVokxEYP4t1SS6_vAN%ouo`fYRNhKF zJRL12`~4)pRuIfcFBXYKOY})%Vp8{(42?=1(*8&Xythqzi>FPnN2uM1K77KYjqyqv&4oS01pYSe};y2zY}biQ-)1Gl@vaj;DB_ zDNzH7+lt*>NY_$1pcp?SbUZi_n19A2mi+-L5=r7k#kj5{enj_C9A=^eH zjeqYYZh!^l!>gb^Z^?xFvYIl;mRG=IFLbyIs+8{BTcE_Pz<39?h2)K8r{6g!=0826 ze+ejmVy>rk`5Ml;G-Byhd18Hj2QwyTfU>Gc*6+?NWiRj}C@>9gK+Z~#T{BXlCE8K& z(**fB4!JpltoJBg77v1NMX!iYG?f+fQiLL zOEqR2guauy_Dax3T{K^5R`!PI#6NJ=Y-n&Pa1ySU>Q>nWCDDp1K%^l*d61kN;qg9t z1?YGrZ+mu)S7x_xGrAA-W>4^+XuFxfcMR`Fq4y3jP6G6P4_og(kh~8(?=k*K@UfOF z{zQ*X=b5X)%zf4&a&;x|Ut=@L-Zt6CBH8{U;7%&RN70v{HxaWLU{(B^+gxLU!lV+R zDH8IyVW84ZZEWKpVK6Ig#4qGL<@MsQ*-GTxJaq2URBG|!+M!j%UmvJwhXu2rZnP(XN zCKhQL9YH(Ng|ak#bxzR}cp6>zhXAbsbQ$QusmV|2$o3HZ)t6qvr{G{8qTpBYV7j66 ztaR%OrOQbsU7teOt=*4T5BmDFp`S=k-j70}tN{-_z)uS_oEtqi{OC>gl2cy~(3#{Z zy(B83yB=T@ccMz;2=T59$cGisX**=uZMpzFWk$8=Iey-;*6|;&wRB#Jp*Nzx)`x!E zv-!5O_LL5;Bk3`n+qTl$%-Yi$VSQx_bqq7CGM2U6azCH#lUGxpZ$58)Ci_(NY3toD z`v{N8mW}FQ$NtQ(sd{Gmk zdR@(aFk55KMeh4tm%3Cotx{67h4w4fo>_}CTV+0Ht20(-e#mNU zi?ol_+zsE9XybeHUCZBYN$zf*8$3sPuJb72UdJ`oT-V4s+}cUo5m6q8r)12v>*+0`OtGe~H?N3KfXB(x7v8*}PC6DVP*8n#^w@t3gEfrl> zn|7%444bqtdnsFVmX^6K^Gs$)R#9sM+d+F9&BO3a>1*6&+F-8jGR>uzOPKkNX`QjQ zdcjc08Rl4FUu3&wt!xdjcCm(Ad)ung&)VUzARVsI0q!oRaFn#|`IJx1apb0h`Z@ba`xCn_JtSitE$DC59}i$Nof;3&-L|zlkkh!D(<^Bj9Zq|2 zO652D#71$gKfTMo+7sx|o0Hxvd*~%RfDVMSkl`)Z4|A73OQq-qUyqY=+?C^K$DN$g zu$;brnGQ1@-^N4L-H=N+3~!VI#)hVD=E^STT>>oCEoRF~ml*Ri(=p=)y5V0%B9`T> zUprlVedtQx%JIlClumjlv2%lwpS$TLJ%t{wpA?JQnA7B2s7=+vs!HeEhn(`XknUri z$^!OO^rX9Z2Q8G2izOUM_Ve~h_CfX)_Q&>4j@nutB+OD^^^eG@2a$VkN#-q zk&4^tZ!}O#p=ak7`Xy&@j@@PY%Kn3V7Mt)A`_@}m{?>NKnvpdyt5DW}toW=m)@L?PM=OmE#>k>=Y7OHY<7~QJhO3@xMP;O659juJ zXhR%|?Pbh)d$w4shPjhzIXVx^6RXxZ@z|q zTl8ab;>y&8S>Lq=rb=!*vkmoW^$lM=}J3G*FQ>^?SEYrCxY%vYIlna?s~GBYzvW(8)g&w6aF+^Z|*yJ*5{d-yGE`uIjj54_3i8R#C^SKwqtPmfW(X67Jt?fygwEG{PIiV?-PIh zosuK-yFE(@b;<7WGkZPXWWPS(cdg%3pAOj{yWg`kGM6+~P!hEs_L*7H>AO=~r6eYI zNy(p@oVq&ACB1g~==2xqyEA@e-m%tl6fg`jUUu2*w%TL0=PS?io&!8@d%SZ$;r7|m zgAEW(v|-j)>7P@)2);3y|rP6$?6i*Fqf~E2-ik#b=-cr zZg!pI+Q!x7`oeP5Qq|JarIz`q@uYIn8RckV&tofNU7GbGGhgPOj6E6kGRtP|vo5r6 z&|Vpyt1~%?BHjGT+`>G;)ZX}6X=Qk*b%jHhS^H$k119VtQeA zvD~)UEek9oUGA6;s9{WRWuo7X6FxRv+ zbWL;hb6f7Z($d;xp6QVK(NNL3%u&(a*7n*u(b~&8+B(GA+uG53(E7z%+;+>h%09u- zOX~+MRi&HVVD*&hZk%YO(>lHX+Nu>4@_E{4y47ac4%tjtBki1z)O?m!9$sEX-!nPV za#YCC$hW>vgjZs=JswqE?-*}uceCoJO-Z`-bL@{&Kd$^p{OOmJkn%CzVjb#;GE_C~ zvc$XlWMAYRl|9bG-L10A3}b@fPwlepduHqOjFcwHb&_Hd4M}U0MkjAc z$&nVC-XZf#)*9Qtjy+C)^^mEKWwu+0$9T^lo;y7+c?@??bp1b$t^zuWY>Bq__{1GZ zaCdiicXxO95Q4kAJ8W=wcRudAKmrNGGuGby@9fE8VRu8O`@MQq^{Q^&Tab6MsnlrV znHHvu5q0k`cW2kAJZoN$yjHIB?wj6H;!(MkmSrrVLf9Gn4|AC9i@liB>ns*<(3#+P zX47|mhT3-FCr4Y^8bW8ET z+hOCQywTmFH$)ByZyq`|I4r33m0eGP6M(jxd(z;1gbOEOoKsY;zCN*n97V0D|Uh)spd-c}yTRmv6U z+Ta@R?(Z4qz3p2q-jkxlRC2xM)~?8O552QCgA9x%y~Z52&E_7=6lSfuWiGJWx$ zey%&YKXX!Y?&r>Tee;YLen_n9)y^30kvCIJWp*_8l#en`x7@bI+veI>TOVr=%O6t> zz9-w4u1iK3FV)KOTA{qRkGrueFK?Txgr~nRP|8xEfJ#O)UAdyBdFEc0^Ogk56>|?$ zTkZ$VlA2aVc`Ftdf_=-pK5v9DPYjU%Dxb93Mn#gRuQNKE%}wT8@pZXb%v6daW@tl{ zEa|j(Md&Rg2p@!DVs~kw>{PF6qYNvU_ZQPOnBj~C5#k5W0yKr3X^7Olzfx-b`=Hoj@+KLV<&YCKM@JcwKC(7%IXW ze8JhXy`+7?`el6`J9;iE{V?LidF>gf9hw9T8k-O*~?7!SV(|+@7%PH$0>vpSXiMB+VInyKVBWq!^=}XjD@|7`C z3sP)SMd6HB_k?%{dfWS63v;Ac<+R#QcM{V{57m?z$U3;e+$7G~g$O-#Mo|<@CEU~5&Y$&}Stitb+M^JKksjqkxk?eyIDSi?iax1Xl z7t<7NG!$f_^(Wwt4$!>Ncj}~%GOiIZR25``%NZM6m9??YnNCbR-J2>$f_VpwtRJ-F zP-$3#EPs`@0-6{Pp`24rDK8h2nur^PC?Q%{DRdU6NO^LG>e8E&cj;$r7k;iO!2HJa zf-lUSp!X1MwL4Or@2u-)&cduJnWZw1XO+&o@5|7tavL4?u&dF-3kb0{VuutclrJu7 zN!Y}oRQpHMTRKzMB+ffLcVgz4zbR?8|D^pbop}(Esj0iFx00|P5!7r9qY7{_7Q;R- zuv17(*pIM-VQWJl2G0(B??|^kF(t80scrfyd8n_QYkGFEj9Y)^rAnywOs25NnaC_kVz$*duoJH)1EVWH}>`VF*xy4A(Rw=useZmUw zR`>6`1-ZL(`sbX_*_+$jb-^=IXe`&$Q0JqgxaX#U)+hFR&VqrD16Kuh59sPRW}Ri? z*;V8X{e;>?=`JsmK8mssE6n%(@-Fg@_8#^I`Cj>g#a&XQGC=L9H3Xw$5D`tbrKZp$ zm>TR|_A5J{{fw%`cj$T%^uN?Zu=qbl&G;F#QwpjJ!JaOA@Aw&7^vA zW2L_OTkVNFy($#BkAqwGjy?m-;n%hKVEdnrf6fG7t3%tY{sEu;EhS2G8_Be8YVHt& zW`#Y7NQ!J6>4@kXmK|InP_iGj1o8vvYsLd*zcA08mQyNgcSb_Sn#|4F#q$UcCD`S6 zYGor3dPf;_ZT1G&&6H%Ot)HxoY}IXhtUD}S%qst!JItPf{>x>kKFl-oWJLd+MoDPPN!X_{-cS*BR7TK=&Vu`DoOGri^ya9!94 z#sh}d!ej_>OwUrE%cI1qzCj+3tBq@i>#TdbR}$9AVOnK_CYMql=#uO(ZY96UbkIE4 z^4ns!ezUZ*yfHU3k2baCJ8|!r=hSs#roKUm7ms*j+&Q^hbL!=6&56&YU60(iyhaj4fZ~7(Mbr_|(wB!BqoaIhxv*oBzip)1wGl&GhZaYnatO{qvv2e_H%K zlKw99S&32mEk+w^g)E;s0SKQa@2ULd9JF zsBV@ep_BKVt7NX6wJEb+X6LN^Ih;G)_d_0}Zy?7pB+r`LS*loXTPxdE*)G^x*veYB zncwqPZXHvPrl>c_TMTHV{Lp@?la#v>E$;T6_D=Ht>%HWwE;f{3s=JMnR3+vSnD!^~ z3g6W;E@{6NMh+I8zEID2SCzcYxk%y!HRtYxAM!%Xr+KMNht^|gKj!dNzj<+ z#TMo(n%7vTI0^;64BiS0|x@vL3a=o#4lQp*KP&=o4{}EDCKviONscM~!qK)cU}BYb?{}Y30;!@)_x; zxK&&#W{X>;=D5wQRVt{XQLPxO7FMlLM(VDcOY5K4t;!CEF4A^om+fbpiCL2Fd&6 z6x^rBsTZ|iqd%dOZRtKtSN1nca!L4pl^#cB66K9UxCa(fCd)N&*Lp2)Q1+?Iw4?eZ zV+m0Y{lfZCV%bM2R3&JYO{e$Jr|3b@3OPX8sk&rYqMtEazoOMdJ?%MEB9g&5z7t%& z3vom1NOU2GQM2eFjGJk|R$;#|m7xtagqlV|$rqij+F%+?NvhEs&rHeDn*p>%2TBV>~#|KIR=19c0bhOK54bIb82TbSS_OFSIeqJ)uHMl zb%GkHo>4|B#gq&=7Rnwqpnuaz9|LunH0YD{K;Q2UYI)h{F%^XlQ@mafjMn|MS86r& zwlYUquDnvpL;bM=Gz2KH>37x#q3V?neTAJw3R#Hm&&*<{at-)D{7leb95FYv?6O2y z`&nC9KUtg>pJ_bLbBCC|bOov(IT$R>UTBo$fSWm6?T7s}BX`)Q)`jNGYJCzEpE*9Ko=|sbw3vwNan+S4AO5RPy8P=Ns>@!90x_FgQ#s6hN4G9+>;8Ra$N_V z2^M{O2?{}{q3^zp+z5q(8esEn1l^=V;IIFMI^YD-KzFkon9?qyhj0niJ2y0iCIJUJ z2fY_RBEAi}Tj!zFc8u%@EiZw_5oI4ZT$=f&A*dHWJ+NKTzqs0p9PDc$WiM(RkE>UTbai zo8U|Bg>KhlRB9!3x@w~1=I_tm1mE#L=;#$f)p8Vi+k2tQ@djFP6QB<{7W#LGP~EK# z%^)j!ha;c{p_5tE2D%st@UF;Kf30s&w&-7$`^ho+Cw6i{t^~vMt(tJQyZKf7e-KZ!k2m7)Rdtreh-V*fe zT4A+61C8!Rf7=B;rgg}47DC&t0n`S@z&3gE3s`PjVy|YSPcadFy5Hy*-a-dA9z5a2 zp;_|)N>0b1cfz2jSUgM-bk_nl>iB5XVr}yN3SGaC$z}>Bd4gWj;NdHq)n| zNz|I$i;D6IaEj;P4MyN+DcZkKO7TK#DF8dV57k-ZAkbbu4BOZS)WRp|@A$|C(36=; zuZOoON$&+y{sf>7cA?`u*0=?AlWI_mtN?{261_4%1A>1(eJ8p>lhEh%Ph_>QuXNN^dUP0kt9}5ReGEm1?&#m0 zMhDbCmsS&)ffr;Vbs8sR7&Q%w3#;)YiRkh+K=t_y{Qn4Oo;8G)%L8Zt)I?_49_loI zk!40}={Pgrpe|GoHd6x{dgIYAY=B-uX>=k60;_QjD5Ueob>yd|@#jZIJT$P9V1rRq z9Vpknpvuv=sE=et(vOSliv3W4lZey1YEjy1=;smOCY}qd$0uVX&?zs8HaPWbU~#3$ zmt+oEk@8W4>DKfdN`Zffg+@bhSXxWy;*5u$T|=;=_XRiiXsr4HeG%BX8{&DAfRXa6 z8n43BIdBIm0Iu9|=s51jiiTpw!3Jw?iYn|BbjD7gmv#wi3}3T{wsl>IKFhjR`+?aSa~zK&jTV<^tGGM?(S(b<~}Tx1bsVhymS;f93g zxu^#j)ARy*Dcs=7K=I?3-U@%O7y7M!-Uu0e(@bdnoX~4vkLRNAHwPMp|00IBN6)uA z?8&d5nN3_H6=;jMhj$$XjRpeVfF)l+^XM>n7yn!bG%RCO*RMgl=Cig7_3gR1ZREmF zo3Jb6aK3LsU#1Z`j+_S783|qR{#gBk@bG1zt56PoQ3k!w$LQ8J1EZFi2sb=9*%#3r zx&W<)3fKh_D{>zCh*ltN#-ii19=~r!4}UtmW^1S&)F%s)A>;%w-mZpED?s|-MdQ&G ztxHtLI(&eiX@PF&abqUpL;_F=myNxMevk1kFLA=+@Y7ZR*U%qmC;==+wc!c&!%L+C zh4&ksWD9ny1bY5E(Zvpdwj7DijQ~F41it?n&Q><~-v{9|`WageqZ3{c+3R{li*>Lk zkG=souQC3)Kz%);$Y3LpXn`Iz5C0cS42L(~1;#rO{rWpbJA95G%XJNww#|qFVyqdu z-YXFw(%}Ds!7-hTuIxyB?sRzVc(OZ?N5#Mm9*@sd5z`t21Jw+gber&0-+-RDj*j^f zpv9`-&3~fb{sbMX{!mOPhtqWcovN+mPBI?vu@hLi576k{1csSYdK?)3|7WZLrlmW& zzKijHE)%iP%L#>gV7d_jn_Le}%6mLnA!Kyn=;_56pY*xtuMs$z-;DCWC?p`N`QWwd z1It?#c!)tj22I3Q928hf029y^cc4eG3Fu-HbMa)cz#a@n_46s#?f|rzZW5cw=R|w* z2y&xiz)QG^9q>P!fgd^wi(HExe2H~A0?WFEb?Zh91@d7r^wlwg2uqlPSalxJ#7Pby z!^s=?^Dkr>ClM<=(3F|3Cn2Y~tQAG4`VL}7L3F~yfm)c3oZt#3Vs{y{5TzC)6L3Pc zbt14*!=U960_BT8u#=-WzrV3ge{qJ!;`i4;(e}Wb&P8W@J#6V4v5=gPwQG*9Wohh~ z&ln5q9)*b4nOKEx`!T4hEWnDtgkEF^;_T$$u#I6VWJe=GT|KWF{p~oW|Z_oh{fK==Q4Lv_^&N+1M;}L0&0R@tT zlYJ1lr!%nYbFiO@z)!3Nw&E7*&7;9KR0VjDX!QE)g9X10*7^dlHMh`LEr>nKj~pZv zXW}vZ%1G4jEW|P(EZoRi{Q6f*jj4G5qv#0UG3o(3TN-`A4Zv*_Mckf%)%gx9{tk@L z6l6E)_z89K|J^vTCC~}3OPKIBt6=xB#5>rm8#dh@*pztU7VKdp*2#gN`W4*d+Y$F$ zLAT;0YIjUKpY7I-eW(Up4Go;RU$tgCPVN-ggoYbee_)<2 zVxDL_R?^Sm+7DQzuE5-V$MdhkPPKt`Rf2uZf=b2{eA-7~H=E-9{J76m*z=12eWzdD zygf7q{Mv^p=otgyCm^U52k%Sk(rcj=jiQ z<6$`;(T&}S&#H<2ss&qkjwh)>%mL1)BtD}NxdnZ3FEBh^(HXvnJ#Pi9Q%~4KBAzw@ zS=y^@%!-1`S4^8tZp*a%0FugtSWI9t2)eI+d__e z8OW_Oz?IxYw5g1_qdeny*Z(7#H7hYG;CjzV1M zhKLYCe!zbEaaaxEqe>w*)F3)z{Yw}-kW1Y_zkVT*Od;?$5#axy0Drd~SkTMJ{$Jy) zpTv6Z#y?xb5{6-CdqNGM1HP{@;!zNhh#N*RJkb`+@;v4xla01FtGz zO~D!fe2;&6D+xJm8{neeAewGQG?)#H-6^b1XXw;r6AIqB1HLN&$YQ@%%Xi>JKEU?B z!&9t>H+=yc=!>UXja9CQ-70`gs}gi2(~NnDt_g@a!-+oF@pAaPwUGN|V#Pvn%llw# z!rmW%)po&`4Ytt*w&+LWufcb>fpv3G9Gwc@fZ~X82XSKdMi#5q|1ZAf9H!-aF!Ko&|O;hZs%fKvI9NtytfL5Tu@2d4HmXn~U|YX%no0pL=fLw$L&WM0|IrY+ z#}lGAISXFAyz%EhJKl+Gu`R4>88VJ-@J+iAQ{SOBG!T*Q9-e4A-oCYwkNioLB>fXP zyRi-*kX(P z@{roN33SAp`}GE%5M_ZuUylrHA-?A<&T4J=A_How^WkawVm108H+qK^_Ul5OhtKAr zZGIC`U@hWyENr<3Vj5~!u+1XKB5M;ZjVyg5{?007TYgQvr^v9j;E8CUR2w6*)PZM8 z!-`hJo799A)Wff>u;2Dr$Ki-<3f@Zs%Jw<-@Gd@M1kUcw|15qEvg&@QJJiKle~Hr& z5C7_qUC~g0Z;zO?6keh}(ENdjOP%0vM#1MF!D`k=ZtLfGU4WRW2K{5}Hju?lwYXJ!k6WrpAm*$dvL zBWz$V_Fxh$HWX{x1n;p8aW@3k@C-RjB68Jtc%v7Hw=Y8Iat(Jh z|3shv=Vig~_)+=Skw@Kv#T>;upMw`DjrH~Oy64A<{DCv>#eVp?3iiNG*20ebv$N0f z^Y6gp`1N+C!=sJDEJG*6WEvTEWgu|X;!&#c7H}&dFg;q82>FeE5kw$Pbz!R$hnwy+o9rjAyEZU48{y@#4PH8kK}V z{R^<2N73hNuO&b&vZodYH0KEXmmj!8F4WCfTNyWmAgE+lgz~+YY5|47{7eaEIdhkJ z%uEKRc0aS2naA{CWV$01-z!t!$bRGltV;+gKUd(vcH-AUtn6dFcMD81oJ2m{8+wo@ z$pET8aD0B-Z8s4=KqSILTi2KOeSLnU}(`2uT%0~~>e3qdG^$%*HC!mBD z3_rF5xbM=qy(9qTb_yqbF?`K$ta5Rv_qlKv{Hm{les?sqZhvVn@J<%gv-;_?P*M5~ zZLhz$vr)!LsGR=;t*<9gSUZL4r-Hs`H_f8$REw#yVpY#6r=XR*6<^Dhv&u?ko-$6U zuB6G^#xM4GS?spN98}Y#3D|YGkA} zd}DB1XFH3Bd1}Nd`Gvji)4BHSPZ`bAqto+eG|ak_d(D$Ame#__rA#7!#@y4o#kK-8 zOsnlvY$GfMO^4Y%bP>u%b|S99)8*pbn&Qp=KpFl%s)c{mg6hBWOX-2=60QoAm>~9+ zHpzdKhT32K1(8NoW&7}z%>yj2EXOS2mRe@J={nbl{fG9EcA~5PP+g+T#^3fx1Lf~> zM`aCgY)jM-D9-OibP9wAs0BRpBT^vCK}DTH2V)%2X{V^_)D`GG>&WoR5*JV#xd^1` zD9x$m(~3aLu?L=Oq}CKQp$sSmhiX-|wonpnq1D#v!s5uyRB0d-Z^QeX*LwoBV#pzKpwwL41eEVe-wfX^Uu7XlSSR+7u1SUDB66N|Sh|8I z&o9Txo8%n%tg=bHsM{z9H_}qaIXyTo>}t3ra$VGm=mRl{Fn9NGVkuC4Lw33pIVky$w81-Qk{O&luk3Pma0TPH>&EJ)wgo6v`skUtLv-dTkCTQe}yQim;70Y(+Z&8G!M7q56EBAq3(QE zU9BX^=Yhb96JG%_a7I`ozL72{)AbfqaqgEn!(KP&QfT#vkC7jvrbHEwJR4piY(_BU z+-<%{Khm;=M7N$(DQj8!sXzaw&Hhs;oya?D4)d6u=5opsn9UF?RnuEhy_MI7A&SiV?gwb;cs&ojiG?W*Yh?RI<0`Q8af zr5(yy?XWSEY)ao?3Uaah4|J(!n(vxPO9{(6bFewuRF@CnPBH!H*QApust;8k$e~h- z5GdU8?e+Zw)MF{(nQ%`0AXQYhshU;=kvW%`fvVp#RMzcOIK1LrC}@lSDX8y6T|z5tSZ$_t&)`R3&Ro&QY%BeqN2&jN>{jga%UIZR`1tWez5HEYP9jFk7^D}U3zqQj~!#NJBtUm2s1^TjF=u# zC%kEBLXg|B)3TMVL+qBzdynOg%W9QTBK_6ha_L1fGBQu(#JSBvj8e<^Onqk~fK<3- z>1e%Z{cL@OZemx=BJ>a>f+9_CMKMkLHN*tEuy_pm#V|nBWe@$O9$g#ah|LJ zbbT6GlB`15jECBIV4jM}t)*MyaWPu5%M+Eh+9Klyd5mtv4(5vR6ZsLmmG8h+hsVD_ z_AxqWE0xnym{?I*;OpiOWli#DA|sl{ zOv)c#;BJ9YaeE6*EPSir`+ViX9Jbz6Te+(zE~iZTxnKW$um2_BQ}0jLzm`vK^jFNi zE!v3<+!kBypqAm=qtE2G#WpYSBBn%S`;cnR_Ld6VW6Cf#X~mVL;v`=gZ>%@UHwHMM zQeucu)?38=Jg-Y$@4VZ216+gM>7JrOXL*Z$knUm5=UgB1E8=H#F^nq?iGCb0EwoXf zX5G!s(u0Mt+#cz5Q!`S^q#RFi{O+7qBK=xcLsxI%n7Wtz!nZDnJRQUDjnU4k>py`13g-PwVnJw@u868>)^ZW4f7uKqHwyQqdFlY; zG5MGti7AXyrhMkz=Go?@CX%OS?Z>cm8Zrq{$7TtMBUT2M_$ z6}dqX?WwX$UMoEkJBdZbKH_?5m~uhuMR4>y#MASpb7r&Uw)wkhJ->`irqT_MDoGuM zmfpgi1a~oa4fp@BLruKotR=VHE>oM#as?w5|F})YP#osPGaVpzv-#3QnVkFzxVo4{8y{=eR-U8 z(WuHMm|1(9fQ!M&VbPI~B2yzqgqebiIHy@_nRc*a>4Rhx(M7MOevwWJ9emNg`o7A( z;ogt#UwOZC(y}M#gyt=D_w|jC)~dISc=`rE&sHHYJLE!mwa7-1-+@Gl4hnNjHperg zjn;B$Z=Kv58I%8PPwkePn6~z>BlDl^MS15v&Ba#gaH1GvUucybTPoxP#U2ijYv}G|l*6srTRX2)*REyv82O!stM9hdx1FCT|nde9OIQ zo`;@T?>p~jUj%URA#!nLmD(2_SbxpCET+CD>B-tVl~&&%J6fQ4l>_jzdo-86fJ|Vc ztp@^Xg}^KJ0t=5@|q@i*Xir(`90d8#Kv z%sVLzQkxL@nFFTN_6tGL;VF^3qv9ibg-;DB75LNs0!Tp*Baj>P_i}Avl4rS#bZv3X zb*H)qy1Tl{ihV}F=fKiI=K{|KG;#K^>z4AS zB<2KBL%k?WcAv{3v+iYF&X|xnJ!^LMnVbcA13X2=+G|nj2YsmIUhv zYm#NI*~>?9Lm7%bO|~T5`cbVaX0N77Tg6G@F0p_VB-c>tsZqEsY%_|I_ox+2U9LAj zlz+joTz=Nf{Gwg}neqiF7fF65^~BdNNs?QupS2doX`(2#hJMeK`0LyNHjQpZ z?ZdfTp*2_Q0&$%S%&jOblZPoY)s2`1%0P`^EBZ9kk^hcG#jpWp+O9ymy&ftGyU=}) z#0_Y-{#C06ROxS}v+_SVR-P=Sh{MH0LVw|t&;>IOJ*3je-2)U-od~?gbW~)5s58uc zQ?A_1ENU>dqwaZ!Lf9H3GT?CIor{Yyp=YH*5th z$GCaaCoNXm>)DF_!x_1Z%l^4bZ`ZD*~6ae&GfVGu%gLRQr zMJ`ghY~MO6{xEQ@$!rZL#h%s-yqU(=OV{TDb?@ea_2{Wtvk^Ox3%n zUO7~5Dh(262!DJ5!vBQYQcI=2_FJzE%s~<+rV7ws==w}yW;J4NL-MZCTaVRjYCYs4 zDN+q-vlu8o6MhOEL@RDrNpig6P&=sy)%@CNZ7^)02WIv+=?65EHccI@c2Rq)F_?gw zh(oN<=xIs%KRr|e8$I&8<~}|`{uTDzxVOV5Mv;{mHTFD4!qNP>t8EpyK7Y} zE6wZqrff7llx&U;L=L90R;aJ!k`gJ#3%`XU;!3HXTuT|K(mFaB)LOQUsU$L-5L-Um z9P1IwOY?9O!R1ll!&R?IZH3QX%Dccb#*^g<@x2lj;#=c+_os8?W{H`XMbw>!@x}j>_AC zBFyn^^K}-&#Z>9EI+pm%OtLf!hzX64kRsnkos3S*mz_Vlz}A@3;U^rM*_B!o@u25P z-m2`$>EC_}KNEf|Oum?A&YtP{D%H`KkS4aZxve8KcxPDmi2dPJLj!{vJAc?HYbWy< z{tc5tKGh{9Nop(7VskM|^a6K&#wU30yLY>{dz$*bV&3eO6bg)wQ)^GuV>0;tmX@|p zwrw`eI?VdQvK{-ema&o5wW(5J-*orGJXh|++`zn3z$_N>n0*7q`brC(Cqpq8lFEp{ zS+8NMap$?STrnv}qfcN1Mn5?VU8#_s3ntRX*=>?$zAf+|hZfTx&c|fs!j}Uc*dvp{Fr@ z*-+p!WlrbDb9LFHbU|t|;nl}rax514DkF2&1=yi+Np*2C0WZ}q(T z6(_VO(8V1w^g*k2q7mjEUwJB;uRJrKmG57KX=i3_@5S-r< zOV*WNc^|pB+^LyA)4n9n{odpIhvb`oF60FHsw<4qm@32dw3Z4S7$!#6jSh*b6<#!C zW8faAWdCSQF;!zrlEXENd`1}S%k-Y~*7DZ(EN~^|4#_E)Qz5swtEZ^8e>bgwzTfIN)uqrg}?= z&4nwzKfq_lNQH1ePS*G0Z1!P9_82c=3UQfvzd6=yF$vrYwh5C!nMo5SLAz;Vv4Sg< zwF;qLfJdti)RAFCkRo{vsFpkU>VeyM8}d7l9+b90sSYd1FP!&&_Vo8m_muXw_MH(r zN|lu=S~XPmx8QCY%mbTNJ_wUB`>9n#Ph^5-*&>eijqwVeAn!}>UEf9FhuB<#Yu!ZNvx_K~R0ZMNME zN(^rlwKb|tWJGx5&=0|_As@EUmd1~!ZmG#aUr%#a|Gdb&_qjcDKj$pT$;;`Rm*=h` z#L3ZW5lrZ&X(#pe#09DrTY&$>zv4&p4fv;ge)9}Vkad&AW$MRuqNfu1^l0^~)JE9r zd6ic+r+?Pb%;H(+v&p<8?ghSO(q{FW(Ua!+F_upDe$KW5C_%ePcuj))J`Ka7Xn!=2$|_60M8E>5KaFHsAa_zOzB zyhe(U%u*q#m9zuI8%1S@TpTlmE9I+ls!S^7lr zGkZ?t*5FitL+JIG%!Su6{!F53Zn zSG&i$(maKWroo=1s#2gJyMN?rS>H4A(g$bc&pMrR&2`=TLj08RyV(*`V*$=BVMyacP;DCX5g!3HOC1;tc7k zd`|6Z%%YaEiI}RpWSeGBv)`~cw1?RG!(aT%^de*Q&dOIYNbq|5c)NQqcu)AE#s5iP z6}R@z7)Va0SlULh0!)Iu-nEoRSI;wP}732Gcz1I~!|g=fA4KH1k=I4|53 z%gJMvYq&>NB_@%>ffpaigmBYL6|H&pbI!T}cb$u!Wdg1Qo(b*|S}$~EaJhi-wrwVj zHRx;Pa=nur;eC{IGsFG&*x&2v3o^T9SI8~o`tDgIcG0#{Kl!${QqHvjX9DU3+;GM^ z2RmBW7h72~&&ANkkV|U%T4NGHV0QntF;1_c%~MrnkTOg@feDR~?$lgI&XDYV**kJL z#Ky&*;ER}69?2Vme14aa_aelV% zvF0!H-W0tofA3}|@L~$~Zaca-4~Y&y)-6YM`yy3=N+6E{Y4I9Jy<5Pnw~?z#`Nfu) zT!;}a39H4~(jobw@?9;c_XR$r0+5lF!FI3`82c~uZ>9tn&cEjinEILm%{wjqYypmc z9kQbyV%a_GYs)k9DU*W_Wmi+bjWt@ba!YO`SCd;|EzikamC9-}ZMhD1WXu~m$Yns_ zywUTtLfS0cmZl(E8?5}rv!Abwmbe*E2N_@T3QT0ZTw+@#BDc12Itw z$O6CqGK2o=MKH^?0B)}qY^*l$klQggmO-_pkI)`^0CScxvo%u*s7P)U*ZY(YA3aec%vJd8`Cf^^v=MmG(bPM6;K`i z&s+?|p@M#sO`osj*Ep?$HXC)3AwaBE0*0b6x^EIu2RQOZq!pf^2ReV9jXZRwyXj{A zw?^yDVF5AdYW@a>?kq5zf5=+YaB2e9ECB2qM}Rn8hu*fGJch0>0Yt<$bcu_hkMJ7I zXqV9mybmO17u@(4po{89Q`N!B%mu2)L&N}Est}pz!L9{b!Oz}Y0BFv`=t9&1D^Yh; z3RZvz>>7{|UUWUe@x`F)?(ZA7#wVD7X$%4;uoP+u_t5<~0(4p{avphHpQ4{kMm$hp87d;W^mG$V39wZA=TY-c>O?9F2fOvfh{LW&a zP44MEu>udYsoDU{0;Fg?(Piw29=8LX+;ykw3}rX3 zDhAc?aB>ka82eHEIu5k(4|Ezof!Cq~I;bCXJ9dKw2Ye9rJqbOFCR8GIlKMi)l!`v_ zZ|Z;4C2AYB6sJrjmy#7wdsvU2@OmJ%aZiC^Hmra$|NA(JeU}nr!R)@ z*@s$C5O#PkR&3>e^9DWe6UzWOc@*dl3s8wIfrefT6v||Df3M=ZO8|wE4usuw;4GdH zG$sN5qN|(*q)<8IBYtXA+`2D<@u`AV8o$b*k9kzf)M}#Yl%co4Q@P;XF2l07fMqb5 zjG!8Td7~>;22+dofTm6b*5D0rCd+Zg4`QY^NBw~t(;YP$9rr_sKb!S1APj)0AxeX5 z@E_P-G8sp;hBxU>4Zuv*1Zp(ZpBjmNxt;n>PRH6D!^xawq+kv&8-A%2a`X%86V;3v z&p+Bg^u5Oc|8|v#ASaU-NsSDpWH2D!z)nSzAAu8_2u!qJP1_7lX~l|6M84kun52`y z4iq$Ap|>Q#|0QYX(Elw79DoNHej6B~>XX+`>AP|B7X(+{#@*9bM)_f=zq02I)w<)6caKFfs*whH{$XBPqF8l;fD$XX*>)2 z@B(Oya>%F7Al9@XD+BkPAAaQ)_H`Oo>|bE-2II^p;|)6_>L1r1!3N#HuY1rfTLJ7# zIItUKaXM$iZa$+UmxeVQf=Qt?SomdfA9$7WumX>OU#I}ipm5?6aHDqMXp%4!kpw^f z5cbdrIIRSDAivhHpU3kbtm`?T&}!p}bKwC}^){GF`3uzHV%S;=A!GieHQ4}X`325M zePH#TnuLtn~^^ z6g&VL;xuC37X14_c#>5(L9Y=FM}o0yIp$7|=z9^nym~)$_D2J?;>Vb@1Tx7Atmk%c zizVx~u{zt~@$Unr)edMB^*=;YX-rxiL_YG0+zyO?BQhVE18m7ptWplJ_+8*PbIHN* zAr0u2m_2zyKcr{T6=@fygvwChN&*52cwmDv9%HTcB0lT`;^GAUxl7*)^yX20n?4>n zaEQ+9B)HMch#QGO2s8l_`3U@XTVQt-_~*GeA6cY}d`_-{7p#K)%>)9|kGJ(}{Ph7w zW)QHc1MueUfa-jWn2`u}&)Ps6-NUCQVLb_~XGQpckwB1K05ZkTs`?9f>cjZL>^G29 zeqPXM{0#$h33|mLnsWrL{vG`KxYK=c#8g5gDxhPl*D9Q%(H` z(T2XqjAt8QR`C-uuwAB1OMTSxT3Mf(4}sNYig~wXFM1-COu6iCbe9R>5jz8gw*#ob z5!yIpa(k73m8$AAl~jMqiBcRY5gmlu;!f$dJP7!e%Zf+2qK?pq5?SPDa0T5$K6-{s zA@||btzd?+H`py~JvNrD$WCEjvG>`L>;|R{oda)N3W&msIFUt-)_O%PL~V*`%M971 zxaHND8%>mIOH-wq@@(aS`du3fEbbTJ@ID<172jt!!y_Nm~UD~y3 zsQO;Hq$Fb6awPKKb-?))08>~aO+{{V2$|mk*yja33i;L%uwTxBf8K;s^$-#B1hS1p ztjHjM~EXGktg2QI%qG|N|^U9uB?&&C)uQ#Vx}+?A`}(#xzs5tKbXI|8}rbO zyo7nxxxjYpk;26SU}H&f2Y|sPL`WA0pxgBwXo6na9`q6}lOlDIK8%ytk3LN=Wlphy zd>@m|+|--d$g(HGX=qH+_ znbK3K2C90amA>k5ZJf>_ADan8+(F=$4}uZv9TkA7#M>;*8LY@kY#91KC)oftgbAb@ zP&?6ku)ak4VR)8&VFQ{jR)NDWTp}JzzH@X6iqCJNB3FNiSIvWp^z zFMtt z4WlC1Wi+}bcxjHB&RKuiZ#ot@F4}k4osM9q;mmP5Yf7D}SOkmT*=-Q}I)>nrpYTS>#@&B|MKE3&Zem{H`B)6O9OP&_z` zO0(-&g{{c7#QNM}3$pP*?%b#Lk%y7-`dMzzVy2>oF{+G=bC|9|f1r8+@H+#Q^4AA619wjN;}1>Jd@YTUFB>kUW$@_ zW0qY9=f*i{45sUUssYA3@(vThH#YSHzvFXMu4JK-JdWxXy>e{Ub(DIrxHBTbf; zN=~_@lA{jA4RRl5qqYMhTNJaWy>X8o!1m#SfEFpmm*nokTg;|- zndoG!MBZcq=0t{vD}_1cA>fY;2lL!Jd8SevJ8@V22J}NcOjl~!I#e$P5T{9!uEUgM z53osW57x`thQ16Z}#Trv4EJF}>J=m_qKQcG9!pF&cq`hG8?AN6b!UDAS#34$jp$ z<{jMvJX+^5a|qQD!=y)Pwbac@sB%S~3VSVvnf)H}Zd5e`l{m$a|HxLQDKKuGRF^td z`-vOu7Vw`G!@Y^aEvo?87u=A`Fhf@e`PEw7@zRL$WPjX)<1zJH1K8s4$XZL{lMVnq zoQ&A;0E|WTfLT<)u~$?5t_)RN@;y|3hsy17Qnty*fT_Equ-bBC6BWq)&t#b2nERS$ z@C*3ArlMBa*4(k%ao+gH)J?dHEuI_H&>FE>2P^Ab?PdoLpx=%Z(SH>-*1aAIoh*n_Oie{(cmY^)wmn%Cnkh}kEb`$BTyfiOg+bx>2&zTWke(J9E7PhWfq=%i<~a!!?`)B*zvQ+ zt5wuz%1*3XSG5h;<>sJ{*NiNN`QtD2YV@rl*$Ql3whmjAHRCK+V?NO95J@T`qOJuG zWj%5r_(T?f<>8nbrKTz(&U9t9gSrlVpbEHi4%S*~I=FF5;@(>cRhVO#)(ytZCXU)k zrBWV{-ACbPwFbjY4X}8mBEpRXBgSrIl>30vc3_@v8PNMTk*7b>#%OkJBTzG|Fopj= z)Uy-i>DY}Pay?k`K{-@GtOXCk4WKb5X}PfA3drNn5|hbrY7(`cnuS|R1eJoE^&zs; z7~HA)pe|CIVyN55y5|5h@(I(~-%+D^3C^PtTCO@(4N-S0C6!0u-M z-g%_X(*F{#(PzJAS_72dW&RGghMUa?m}gm5Sb5tMTb4c9QQaBp*l+D&s?2tTKm0<4 z(YvXGWIQHNMjBQlU3;z!mO2TweWEwO$NA=auX#p$Xm1yvAheMx%X?%|KB4?lqmU6F z0ZO|DY9E`4msBQGjN67!Tgsi|UT}-Jo9t$0Iq-Y~=x}B$DnBtyH+mU%z7*Ml;Eg?6 zd!QgYDyNl7YA?01>QlZbv54*`QD57H-{<4*`BpoH=@}I@MxOjl-ll4S=eR8`V9na& zws{iIJ)6z~+rT--!{h?#y^G#NZ6zOKUT{4oj;G@$#_9oj17y6TfX}{$?Dn&|S6!p- zfj_CGJq71O4@}ez$4%rL>MF%i=P8U^qk}F>R|e-oGAv;j?z2~LM~y+|w*l2AA2R(9 zdJA-&;(^TisV-Jasc)5}|FGASl>W+OB>@ZW+?qz? z#x??I@@%jq{=nxA!x?A>d~!T0OcLcT@^BD^~^$2U4b&ai? z-EA-LC~f~^Imqu~MpKDkZ+5}vLV(u3f*aIt{hpSmoREGCZr^is(3c~#N)Y-08Cz1! z6&C?Ru1eJqz5IUVht>|v11t0zm~XPdPu^o%u`>`Ihp;tSnw`%)K~(fnP3d%WY(F!b z7z4OYJA5!yy^K2OAzr}Q-he$Qf;z=dHBoJb(>@M6a~L&;RJACgdq1pJH{{HtG3T8| zwg*1+Hkb^?P&KJ?xZR(jvM3u)V^@%T9DKha$SLZAx8;S>5%2X{tq8u5bwJ$bLuD-xQ`=K=qc8BBj)^CVXkEb=0pB51l;taag%NVhK5+w?arWM`W=;= zwm@C<#0*6oDx&$}XDecc($62CZB)b@%u^x@J~|k=ZY$(M;iQC9Yev*;L=M8;z7Wpn zPRv^P6-p`8D6Zlw4b!GB%r!HpBhi@3^Q(Xr$8+>WHK7ZhW(Sb{UvWRXj9XJ9%pqRD zoKtIPcZ4FEIWb}Hp8)ykw7OaB3 z{nWE`#>kI~Mm(O}iCg|j%t?lm5#%R4UtiQuTB0Jd5byH@|2>W@jSA@k%#Phf{A`V? zd^DzPQc%Bqgd6G+^eDRPQK&5Wz|2AGPWZHP-9w*TJmP_<9tFz_!^#bJQ(Hc=;6S^h4)^iIwLmx0n z_a4(nV=(j72frtwKIm6&@T(aW$4UjDZX&>n{pvdeyn~8rs9#HSG4}f#s%XRh>lPfx zjGh%$%~R+zw1i*JM!oVb`bgvH<8&}{m}$g5VBPE|b|Cu=vGO!?k9mpwcsR2cF|HHX zp__v%emEx17Qkvh!B=iTeW-!%K$M$=s!*DG4z-%sQ*J?DGQ zQIph0=(Jf>RXIal=f23-7-Ct=kf~+K6@QHHKc2kGuh4cL?lN--X`KgNB3Bm1zcl(J-6jX@4riP}zw8*JlLoo6ZYE#gW4AWcJq)BE`)@ z89czPd44-$>zAc${Q(YI!x=7MFfwP8!gzCa3a_a9>_d_vB!bCn_h#1c^A|5~4`P@*NQRU+a zJeG&54buZJkSEszf)Hf-4}4Y-R9%p6D+iDtQ>nyMiu1b&WhkuS1yQhrthy-GMTSB9 zbX_NMGy!d&oB7m(tGgouKeC^%^{mjv-$dOzb2jV16YRr&9m=Oy@biCxO~{xL#BMCe z)DSdiESc_G0|R-_ApW`vdi)KFvx;xCnq~6y zMQQ{r0nH)`AIWup4}a#x!x@Hkc$2kFL7Mo;cfTk+F|7=ArD{ev_r^oiB0NnrGY570 z+6tY?3O%J=){{)dMAjGBfd2j)EMj#bG0>2%9%lpVs1&pw?Dj5JR2dtge_$V(7)#N) z?XVuc5LsVFMS>yJXerBC8g}3f_WGP27PzbX0}shW?i=`%&l+c1n0NSDv495e$$oZ& zpQ;ht4JVpaQaDDO&B#pu0~(Og^5>C-z0m)0aCw5BoBZw`=<+>efyWYWyA}xLMp@R> zJ#bP>)cYah>e20@wO#@$Im@Z%HPsD#*Rzos_ePj%T0xxR9G>P5!esDFO`-3-c=}%w zX?uW$*_d6emUmda_bUNUVgVqHdWu5Z;k z2ZHe8&7mUG9OCgE$&QyyL%>BCeMU=D9cNMCrJe?F?}LtS(XFHhw&xksLL}2VWb<*} z<3?&%Kn|=heHV^$zVTEsT8IW*!k&HztRc=^3-7j%m~a~?Y75%+88*VGz)g@bS3wUr zsnR1-b!~{4LIsLcEufdBt?fzCZmOV{*Z-t4szc}}G{ze+ib^=!ko>Wn>QppfTT^9W z4jQcoboPq+5vxJAw?d{(5K7~xZ_LUU2gZOvX$u`k2I_Kp)lBWVW71cJSWqw$Aq|au zRCgkCUs27$#@~trM(A(!w}Aq}NaB>s_&Eq&^wiXeJ7zwl&J#4=eqs!FsS|mYm_{16 zq$Hh=+nJGc9h;2SgA8slt z9tv~>_0v(`g!g7I5qVXeuXh6N69TfXZXk;OP}LbZo9GgbqTk9u3k9TV!hO7`#>2s!N^- z@rN#YVbdDU(G|$2|EhaTiNZl~ys4;OiRz&AW~1gL@(nLkYrUJP1fIU0%&#yOPk(Br zeL=nsg06jmBf=&zSU4J}!8(5ECrtnmuNO=ni{#X0H_6fu3LZfQ9KeT3yqb!bpD z4FYk*E$0gK83udu3HxFPR{S2SKYS!6p9i~g2v$-(xN$3Ykxs~#)Xa$gA6Q+>&^v>$ zk87h%yJ1@&Lwfy(T>J;C=m%Dd2dz93Tc~khId*A9?x@$m*la|*@5E1CoJt%`@tQp2 zh8_d{Di>I%Y(y>6&|JB(Y6nuqq68?R5G>GS?%zf|2@@3C4X)Y?vS0@lGF(t}2jj*8 z@?jR97=d}0Mjp*Vzg|O|7&c@YRIvhS>V}HS=fO>84 z@(%2xyVT8S4QF0q{mI-qf8qD1;gA6LwK-U`g^0hr$EWZREj0(-u$o)`b#~9drpyA@ z_Xqd9N!;&t@Yy@qTEoHUTtp*mMK(O5J6{A6;3;bv!*9P~?HGOBJ=BPKg=JcvIvRD6 z?c2aT3`YJ8L93p@T8;rXHktJpHgytK%r-2-%2>450|WR;0ME4*n|Uaf;xr_CEh^=_ zXTCC)LMJc{Ij~xs+_Nr#hjOy>3231XX#I;k@ir@-#~O^zzNgviPQ2!G`sdlhci4fa zm~m(35zc-{%+BbUUI+2qZ*9EQ-?LdqhSsdiK~&E zj1KC#nT-S7#1Z!ND}NKQVcsKc-*6ME#;6szJ9lNC1Ci$jt-i+gs*mNA&UwDir0!ps zLjpWg3XYixJ}V0Q@jG;y2m7lSws%(kZuFR!pFGPe=AjfLc^))>XyUi(`*Yb2>Sxzp|UkQ{!31 z4)lpp|7+6J}>r$oWL=E2DdHPoC&R zez=heMhES3P>7vVNaPu(m~S*A868k_L4g5gZFIhtsGhoj)BT8yc#6#Gina6!T&4rA ze~zp(sIw;W&BpHS$M2vb;X?3bk493SVDuvRGWRh$S{&*s&+27v*@dC!D(pxaqb49l z8$rockPoShZQ^~o*spZT9ezCvvw8 zD=5dlRl`%+j{8z6R`Qs-QLi{nA3vSJT8o2T4)fyv%cu|dt?`w{$ls8>8kN+HuBu+1 zdOcI`2ym3NosM9MX_Uj!e!j$ulY<$J!!UdLU(Tvd1Pn1lFV)Rt?!bK_2Pa=Gq#9uzbpSc)8V7x+{Voy9l zy?I6@o>r3II{BU-zA&U>F|>i<&CvMVRkX|%_U{Gvy~*5ZjmFKV~on^5Tj4I(G7h7 z=Va97>y3PD4?jnIi-awxj+E7I*y<}vgi}s&$f5!LZPZgP^nS*?lWsiheL9(wp6IukRia$_(UG&KbRA$3>9U8ciU%r zkBv^bRp7}MXwbI&l$+HS&eTvp9Q)4op&2`kh3ywRd!(@Ts4k+-%M6mjWJ5HFQwVj&+M8}^|1vj zHuHa@uf5R|J(XFz_+K&Tp&+sn4(|4fLPn?H|XFP-Q9T^=UDh{R(3QOnk_Mt zW}jyAw$bt0&);R9c^3(7Vx@+*t7zwyNTjOSz~#aXW#9yD{}`Qa@W<5p((n_z?1 zWu6t_Aw%l?3Qcz5w{_Y5ROa-M*Bzer5*zsxynT&%7~QiS*bM(=T84%kIe_kagof4m z%`4XO9m!pkFB`Y?%NgG^jV5W^u(G}snT%3W%6RqrJ4!-`+((%HBB{TV}J?jc*%^~odQA6F# zdoG?cR(#3)~vx}CV1#APx}m)1{gh!lfKRj z5_pHPvf}V{fEh%?UkZC$4}a7Y?0y&7SH>-EFqZNWEboVkuOCS^|uG zhqFGJ$wQamrC6}l9k8RD#ras!ihH1mCs4xw z%`uQUV>_piiybM$_b}uU!A_)%C-Xg**~MdU>>j8ri4%)q*NuHn`2Q=E;7Tha3#?0n zlD*KkVU-)z827^s=lIt^OCE&_ZonHC;D>+l4%fvhHSGQ;{QWHz>wfa79o+2qWJ>ry z@uKu+9mg5LsJ=v-|%2M)<}MyYjjN*3;v=RJXMHy73N*l`0PkL;KzwNB@!FE!2N3xcdi|H z@F$}w8Z%aTzH0Kc()^T_sP=B=I+nZbS@t%7W-vp=@7SHatksB_pJDf2;IZz2r{H%+ zpGb6N2Q`C_;^DiGSN$hf4i8b4?#K|G-F%JCT*U2bJKrax4S!0NiMG~8SrE7S* zL#Z6Qj|lZleI4=dc>Or>I5gLv zoM;(j+!Z{(dxajNCN8II=oWKPOMj57qb-9#HU2cOp}xtVRC)L!?xafMXS|F$@!Q;3 zh6kblXtHc}67QZ&j$B!?f~t`61!QERc`>T z)tD}S*@?<#=ii24x2Eeeh@>7MPX8Qqg_*p$8BpR5^z?a7^#3fAFRb|?^u2~0tO|Jc zjog%8%$pc(SZEfdx2C{5b?aWmnE} zB&%A&ZEZE2-x06YH_l@e<6R-Mu&!1P^kq8zjlZd>^zFW@-UL%NSDj4X;~Q!?830eU z4f^dsccCcR^KY$3?9-eHu1`Uaf(L}m327d(DL6IA74*Wn(=o&zW{b15H-8c52z5{;uswJ8E^jePaJ)LaOnVr($kIc2~s--45J5LGk0n{UyphF_>G@}UCyCJ&Zj5PB^Kx-$W} zRwwX<+2kO9vH`X627nm-MvlrE>Z$Hkj*ycwUzw+@1|!@~ElD4x?}7Z{ALjM8iq7dl zGeTa5ehOO|?uZDB*bp8Zu2W6tbI?<#VqZuvwjqLBk5zy3-}2q{HuAQnV|YPoJywz* z$i1j~p!%bfa!N&N7OkZ2!7g>0)>bdYea244r6Tq-f2MEcTKil_1!rGpN#{gI7rVvQ z!_r?GA&d?j)XJ+irM!O*6&23-2Kb8k%J_QtMEMByQ41=UmC^L5YR4|8X~CdXs)1o? z&poHSFj!nFtuptqe6SR-HnFy}R;4adFEVK2!PB)P-*_nz-mUZwOJrxSDr=OJN5k84ZvbOgqIs3tL~C37lG`W3(wXG{7ZbGB3Y3)=@mMh zyu&@bt28ooHb@;qvi(LrR5HCpYiZZjuHaA4a@uzFf*Pj{Cwg>7YG8e7U+mI@{h`&u zt3`;BVUf2ZazsRgPY5kTAKR6-9_GQqO}&SjCTrebl~LPs!`nuF>+hsK)7le#af!v*_1@;y=8NX>ZsX-lc-vKSZRG zgj!(Ur(2d-hthrTzU`K6j_thlu0^nfng12X3-zcyIgHNm8r3{o%K_488p@04;TMdo z%B%gVUnN?V2W0<6sjzvtd9*pm+*SHb>?E`@RRU`;TRTg~wb6=9M#O4x%ag#VfA@EV z!osOJcnZ1sJCZqAuYuOPLchIB=mVyQ;P~bGW{nuu>{*=oi~GGLUV^ViO3r% zMOBZw99b~3Ld1-)Gr>!p$<~KraA1cr*LTKUEB#37juiJ#6Sb3GrnXHV>{h*T{`1;C zlPW$kH?$76Rkr_O|6;#oUvBSgPqEdr9jE&3D9a{ulC(f7Dn&{gKy|;TKY^e(QRn-w z`@+13+*30Srwi$a(XdEW>)e$* z-#uk~CjWJ1x;7$ER)|9uy|-ksJ+YOwyX<3ao2=z6o25jdnrVq{)%Gc!{UxYI+Qs|A zbHtPES>&DWTP$zzucOLwEIBlJu@;+BjlC#&=f}XEE*3fy{r*6%%VoR*9kCMcqP05` z)lDHjw-RmKm&oS_Z5=txKh^VO))fR@>xVawg0F6`G^a0SQF_~ja@L>KBic{>tSLcU zV>xK=>dF)13F{ttIciMwtLQz^LUgsLqLIbIg^+OPVaqd9xT;g7uu)1f2mW1(-tq)ROScm4c;1(BQztVaL9eK-WEHqQ-5`nG(~t8xJ=LJ zIanhy7C2e9RuU^me zfVzy%4eJQqoHEIm<3_x6-B5{&*ytl}PfqQ?&Q`cH)?YzyK$bPhJX4 zvzuJ*kwR@D50Sx5_(Te!w>M!)E#>|+B-5sh13jt2u`a5YPXj;QlOB-{x@<<0hZRG< z#~>`Iri?v_2xfvVV&7S**%)q7Y=1gG2OXv#vNODA_%9JnBNHRPL`FpB48ItB*Ky8r zSU95Hk>`8Xre~zM=qN5FA4rNy-ut6c>VFxf^rur=ZSA&NPn}6+mu~)ientLAJ}tMFzxX=)wtJ6zcDqwDZeIRh4XvW`z<1hH*qtq7 zYx=76JQ)?-_dQ>HzbMVM?tvCUUg^5Ily#u3o&B7BpM8Q|vJbFbuqIlHP^s{`G!sqw z1nF(mMf`yswwgQa4(dFJdP!}TdS3}w7L#F8g1YXf{Y|Kn7^^PVPU`|aHD`-6rM6H^ z9N9PdtufYPmIU)v>9jZz%u8*3t(sH0P1n=zzS~g6aw-ay@?G)ml~>S{^jCGg_CI|j z5r{jcd|;}V3YUeqpir&|^XbE=B2|ijap(?$GKnnlc<_X+>DTE+S06lIYcGW{>mhBBt9tR)LUwQtZ@g3tL#!W>)(!?!J9&dhp&$K7yX(us$KMq z=si*EB5H(2y6#&yi&OMY{zaaw>BUk~lKUl1PgK6wPJEO!^5>xRg1#VafKb7*)jq=| zgxJEChKEO74OhdKg$@fo;%sG0mex_zdb2uS+3EMo?a718<@@1{q5^unudaMb{vv1zz`cM4~E^chOIe*Yav>sZwWBQ{b(k>{2K0Cx^frJ_Padi|`MK zy3b@Kw^E;X^ni*eU{NBnpvNZB=L*BnIUj^P*RPSm$iCOXhfKKpfWvCXiCBlScouT;@? zfc;$nCVV(K0jcsfd4hbF>Rpw|X?)|qt1MENXk7!cu*7oH5fidHd}d_6sQOWdqr$W7 z%(^<}UDhVi{;;1ezqN_@NZTQ=bYD;H{X2i-~Trk+zC zi7eMf-hAFJ-c)Z1`L+L+`c1!O+AFp+zp^y9ZL`00+;O&db#q;Ec5!sGC74T#mjhF^ znaVC%@y_t{b#Kn-k+C|XwR^Xxo3DYtvKmR<2NyM9+F@T!vaGO}J`3?7=jSfGA8&#PhACVhqdld1;X$RO^2mRd`@#o* z3nh*^nrUha?IN{3=HeA_fHz%>cjyg1h3UxHs(7v%Kv^UC?H1w_2k@=L;q#clZ~p>e ze^m2fg>_bLgX~`|*O$xC8K5{h$kF~4{``uW9QG;N61}ELqR!_dXX((z5rw0wM(L5i zL>-Je5S>3ui|C#a#X?FrV$4kg7yL^+`O}A_%=*zf`C?MDr0Aq`NxObzO&jA`piDF^ zHJ`OjaoU1&g^mmB6CTH_T6oT|N+Eq+`Rtd>&oZ;cBh`=o{c>&Jch3s0#m%4~){`2YsJyd8Zb+M#VVC9hgqy2*2Vy|l(VjW{?U_K^3ptJHp)u~*Q z6XEU{pO<{4jlNI5V)8_4|H}US>{TDV53zU)`8QWZi@Bp^vUR;}p1r7}vg18Dv0H45 ztg9@4n3qa9#7Ur|I%?tS6lE~IxBpVQQh)2HR^Py(>22}g?KF)B6WxqFgUdosBw`t% z8dgmQ`ge99+n^TK-b%b%4t#4y^i(I3o-J?)ou^Z)q?Z<>U8Vx%Ph~xKJ_~kmUS$j& zdfSoN^@d!i?OG{nO@wn+9l&&a29IS1@4b_|!vH*j>%fH06U+$u*>7fR5i>1Iw&*ia zxuc3jc!C?*_X!1+9`5@ohm%_+)lI6MeDp_`lqIRN)5MH@o)P{>`YGv+?W|J>S{yVb zD9P1^ipzdiM$p;d=#WamrCojP(=2(Y`bsVhHg1f%L*DB7Eq!Q8!5^07#mTvUu1al? z@t7_V6_mSLlyJ)2-@e6_CFIx8y`h&vZUmKfHnD9tHx!4NuInB(Qi+gTdqX|z-4eYA zLVOkFJpQ%*1!y=`elHisGf@@&_ExV8`l_<@(|p@H-d@CcjcnFA!L@>4xjr~{+X`9k zi`PtV^s-tUeY3mEJACte={~_fQ88&7^$cz&eW;x@8o4TO^yQ90)!FW%?Cpif8Z%*ddNUfv9`18pde5-wDeF;9N z+*aNw8yyl7S@(9oO$`eSkUH6a2~G%`6>&1+MnttpHL_lGgDk1h`yxw*op2qto)tp1 zM)C+xmkdwZy3{f$%YSr89-S=z$dabItNU~6O@xr#Wdgj5Z+g}x8I z=xX7pW;<`WZcb;VokWHHbd|Kf{Ox?*JT=_6GVW!(%J`UZKVxS`N`})D={xG*s2wyN zkZM?MRA*S{c;k>A!OnEYILEK{Bi7%|H-uV&Xsv>BU;c}wj4K?BrEBNxLlYBwrx0-cdNiH zjmR>&hu>}y9b3jx+i4c{V1B~M1CW;e@Nf3OpFGy|oa#`MuspAlop4Mzi?^zd_=otG zN@~-{-Mml!M@*m_9*BNeCp*Ys38VXnK;GsGwHejS4(p*{diUtxsetuB9mn5mA;Wu7 zae4%PhPL=1jQ(A<@g8jqtkm0b`wjRrnvsH1dcO=_4*cw`gT5?$GST0+VEGI0<=7rKCAzR?A zs{51V-f~NM0AF@l;NOn^XUZwgLp4eO=j_nxR~0m3#5O=JmNs%FTB{7 zGPTrp(3NHBG`w36B2qAvted#_k=l*&4)C+Ty?afXDq1K1Hk6 zM@_|hnN6`Ph4E)BB=^#!mQd@e^|7ReQC~lby3Ot7rt%H>m_J>ar^V@4s4Q4WbehLl zr#QlbW(IqLTL){dInJ++O^yPNwOTZIkaPCI~EodNX>r_R@4 z@x1s%{8KC^#*%rG9R#KjSWcea9XLy*R!u|toKL`;G=ZErD_;0--1@(Pj;$*GPmB{U zQjIISxQ;B>Mc|nN`fJUh&8PN4K76?EsFqPuDS$RhQkHN-FzXR|75o4OHhFQN8=b!Y z6K078rCU-4pt68vA7wug>Jv#$mnm<*%)YhAku3c7u$cN36l<_kA5KT@f;T zhJlVx$A3CNF9<%l1M!+`^f9wjH^7F~8cE!!8j@imbhrepY#;pgL-Fp!v2WSAzxE^! zHj2K1Pfac%hIm0)>b*sAue`zNjhWjcVpj$>wIQ#HWD5b&C7WB_e_< z-bIz2jmeBZm**ap2dS2b6kA2sgPqKxWHP>PknwUcGaEEfFAL^+EOgzA6Usqs<1uoq zHQ9q%i6Vt_uEmLUOrqw^2|YjOb^$y>e|9Yj%VICpz_M`jGgC>losi7>n-FCi%YCa5 zy|t_28#$F3{jCjJbS2dCle>2rp%hhqoMho>^i_I8Rm$ghLG6N@{Ft5SzxMc7W^mGT zh?X`cZtn+~V8n)v8i7WC>)(*P)4@n~=4&_+@o_}Lhl7tYdK3nM(l|*Jvq?Z9?p2I_ z6tU1?W#U$`$fNw+>VN39@Y+a3a|%&OZV$Fm0<{8;lK0aGEO;n#`Xq7n_C#?bL9rSL zs}02D8X@Jp`cwP@H`$$UaBmr+&NGR&W)STkk(u8&j9Af8ynqj=_P32#;Ye~owq@?e zVtDKcdpeIO=N@t_)5ybG$Eu>BXam7v^w+cz*Ei6M9*_W__R$rMxW#V6m*|3)nh|mA z1P2$zTRexqKLdqboZO?O#6O-Qr3zwGl*Mj4hlJ@!{jASq^>jr>BoVzlP9{)M;+I3o zSKSW{eHD_pueB7;fyFpT?99iWWaxgl`zsm4iL5Oz{JDv9D-2H>INcuLDONI$-_### z#7rHJ5lGE~;@ixu$r@0kk#SLxh;|k}agZ#)6m~toHc*?uZF39Roi1T2a~=Zy9VCNh z4ECvA+(3S1VX)4z#5=D;#m`yORCug9l=FjG*vZ{008LhbFBPf;AEDBLgPGmLX1R&% zs{(qz8kqf_RE>I1XNz5ECL>S6M?}5>+;t9}ycoan66nD|@D&Zj6LTI*Exs*8<*I?H z_?I~ALZrDJPyAc30XxC0za<+f9qrdf*g@^SclumV5hi4jfla6s$VmilF7?qmp^3&| zH?<-jJDAT~u{ZYf+`&|zjYkSwgudK&!}(he;<*bLBM;uC!qDYWJtsBX9_eL3$=oG| zUzRvtB)K^E$@5$bXZ2z($DuzD(Y*&q+kMC>CwV+|K%@T+Hq}G)b_a8@Qva+Ka@^>A zn~e0nz8ICjvLe}@dg7guyx)i7Y zN^o~35}^lws|aP}7dFF(g^~Xg$m4rR|J^Y11)mbN_zS)}V9HGv)gYu+8$5Syk&&Ch zW4zT{AU%xk-8ZP`RT?Q?13fa2=Wb+QeNma3AXndzEsI(CR4BDQ zXd5HH+{@Yw#L!;Q5=ZeCTtbhn0ejq;Jcb3#(7=bJfdeUqG+T@0xs5J3gML^`-d{Yj zJOeyP6xf3>a_f@d`MPL`1w8#7JNue5NFr-L7koONRcfG!E_2dHkmsY37b|#Q4WjwA z$v#}cbB{njx4APL`3L*(Y3Sg3sBn+h(PzpFPkS@7uO7o;5;Eu{7#;(ey$t=(ldsKu z?mTBclF@pC&UnV$_aY~pyvx93FC^NKK;HBZtddl+#qOdpOZY29|@PMn~$FpQiHevr_pswSbl#TOvf&LngJle`0 zjYi_NCByPJboF=e82do}97IbO4_w2W@B}17VRC8?GR_My=(9nITqdS^5uIV=U*Dl> zE^Sp*+e;a^V56IDHi>93*HfU*6*CmbMm+!^Z7T7G6AXI zo_W?m8zynONqp)$`(7M6k&w?d=})_vm7RrN_JHLQq1WD^n~eOoP3+kN#@i0X`%z zD^RMDSJE5ncoo&RZbKoF5F0wvUiB9^iXkI-!fr5pJ6 zfna@%3;_dg7{z|igx}^s38%mkJx4x0WL4Xd49(C0UpND!a&jXyOJ!!IkmuGFsXl{~ zy$l}kGj{6&aBE+%zHUOVKC-Q%u$WY&+zBK_bE0OcaL5yM{$AEr7JvM6>VO7k%79vU z`&PlX|HH2M0zRf7r_!C3lt=R$|BnLs5DfxCW4Dc3I2kWrnXkv8m40`xTu zI<1N{tVGq;a`YXiioxttUu?MF_~dj@Z@XF1Rz_P1r_^Vy2DbJJl4lM(6N`1Q z9H}*xFQdN5J^1AVJYq)<24N+}pbh)6o0E|XGZ}9(_w!#E!NlGQd>K?>LaG^PQ3HkA zhuKXgRO!x>{CoKhVsGNYOALS27=%A1J?dmenX1?e`E&u(MPi;Q}J+r)O>^LHk< zS7p!YfG{nJC0iIytIv9-LnGbb;Od-=8Pv&M-hYbEZD&^uWceF(Trg+$3$#{|6^&yq zi&?`Q_Mko75zME^Ku3OLVV7j^DSjx;42`?N_*UUF!$6NVf|3hy2b1tYoMb+KGV9Bn z&uzZnL^pfc!EZ=NBO}MiYR22!92en?}4ryx%6Vc{I(lkxmN8H#j}tM-i(HPGun(U76cz67$lIj=^{G&>yD z6V1^NjopUbX$TFs=X-tjtvTO|^Hdj4NQP=opywtd{U;#PM)1>2cJ2~SHPBIcS!;Rr z_ZiQB!t8>Ot!+5;O5&AI$a2_jzU3O5$qOZMOoMxT$asEze8gEQKl$q7%Pl~?>V5y~@j2GQuC2za(6r#%lTGZ#s* z6i)Ak{V|5~H*zy{MvRB6b})l2%;_EbP>wa`;j0s;8!S{|H~c2LCYX-!_r6s4pxS0& z8+$q?P(;X0t%ggiV~wK*%b!MJl@jCR(v_$_zdZ)6AYwXrps95EU-t*n4zr7W$LG27A{~Gvv5Y(_3 zyY&{FOzE%-Do^9S-yDW$Vi2LN@DXBV2>&zDem#i3!KIu z$k=*hVpZd}?XkMXVJXH_FUZIq-wfS<#QSv#55-1ia}gSS!{^?!_G`>@Ef$I!-hI!$ z4S|2t95ODI3YIyz&z297_NnOModh~o)P z=)Vp-lRJ~^?dWKc=hsEb1yLjBDyy!G^jV73|AX(7nfVz`;WAR@3>-2O>97?0?;zA? zN226pC*E@R?E_`Ta(nCp4S(kShCF)=)#X7?h+sJ#=%M<|sz3D77#?Z!yW5}#T0xnUm|Guy{?4xb z!zvAnb|&`PP^`G3+-H8p{%U~#WfUX+i)>7SCUx{nJwDZy(>88&M!w=Bo@CgWZJ^H* z?9VfJH7PTjvH<5`^f~C7d0%VH-o!xBosf2;_-r#MvK*)T7D_zI-dy8#fHfsB_Y#>F zVKKC<87rg+9)>SSDhKkcCc9}^XfirXLV83pLSgtb7(P#d>;K`TjOqbK=9rnx%-f9j zn0+_Ao4MieCcM{-wmlCutC_ie#yxx+bZTh(c;tYg<5l+BovCvl^1UkZa~k~A6|24_ z92N_u8a387XIin#kk*}fjpFaU$rkAcwY6b~XTxUM^Iy$b)Y!T+P7vT)?@Ur5tONR;EO=}V?Y)R9RinfU_bZ6~}1jkza{ z$39%fYXOvc6kBs7GJF*jaFKPNMar#%iWeiJjePp){B>~VT{Ac6h=r_pD>8Z)yM7+| zJsTQWj5HmAAMGQE@q_eAFnqYbQ9ah^z444*LoYxT`?;Sv@k}+wzV8oh9AkvpP>G?7 zk7MzVVrB)o%i05RcpH~8#(cb(Q>h-fi+qJqaO6t-i&Kd?R}=ph+eoe+O=dpdZey|?0cC|jYL|A&52S`TjGL_S-6&OfQN0* zlP;*o)o5)Zy-J$Vabz;o(U~(iompkB88M&bRB;_ zpCjBYBk`Ap3$Hnyg`A^N;ouYN8^sMh8@{St^fswaMaI@-Qw$^*q$xj7C5ETy_28Y# z+|Qz^>ok_gYobs{tWB(F0$p{^&}nEbb&Ru%kE!?PhIU%xbM6}04~G9INU@TdpBkz6 zs4RGc6`TX{^q7kGjfuf7&@NL|u>d&jyF{XvAbF~A|N0AWz*ppY5;tL+@D+d7bgYDY z%+{z4G=iEj4Z)B8#Phs^nKq!;MOCs$O7l}4ekwuT%(!UZ;_<;ncV~Pc}+b&f_wf3PZG}bUSDXqQ8<>MEiwYfX>C4bC+hR%#-=)D?6M7n_{QnR!HmdhPetWZV#Auf_k=4IxRmf4oa7RB<- zB3g4=gRQ456)msL?aa@lLDDU;uDC!rixz1_r=xoM3;3`e5fN3rigf8nJbDQ{*^&Gx zS*t@Vxj5Q8FIwO#9_cnvp_^=vvGDa}@h_?v4--Fvp(!oAgA%{uG2KHA%^UD<3h2+0 z>T~5ec|`ZgLs`j;*DJ^Pe@^mhPLTgIMB4#+?X?!F_b1w%K!!w7D8`6y?7}a(2&uji zz8^~rBPSKRw&*LUu-XnhVh=Lm8eso5Lk3(CW{88Ospj352i7;X5B58boz4la5OFA!Hti_J~h%^KEj)_U|eK4Te7ivCRYvWgT-L|(tQLwUYV}yi<1N3cV}((P3h+e;YkAY}C2$ zPetHT;08kA(Qn!?{UMoLd${cv!x|WdS7ie<-VP044h!Z2I#UX)*K_GhwF-2U{1uc< zB-LcIAf0@4Q#q(_W#x^9g5vLz$6Up_-Iij{>FnlO6f`;58WI`uJh)GAQ1JYqC$0o% zSH~RN2uo|Jkx(&EMQfnUl7oC7Jq5wv&&bH1Q6ghYMk%-7?ee;OE#-&)8EPqgS0J0v zR$MDxG@r6ew_0s==@;3+7GhgzeL=PIa^Q|`(f6mhFwk^}`2A?`)#ItU9YY8J19ViKFUQ||eHr@O{e`M7*uP6k&u(Eo_8JX`4Pw3FPn zr`k|5t`6X38i4;hjJV-sVLTZd2SJ0h04*^ApXwHDO=%be;7qF1-1j(FEc3=m68kpG) zWN`FHll`IP(x!l;EJcU+Q2#*riEp28y>F-QtFJd0%0>RlN_lm-_E>LbS})ubqs{#+ zyRFAd?*L zCRuHxIlZw&$-1$_ZY=CM?D2E-aBd;LVB?jAdX+&!fZwE&16UlpsteKVc0^F#;2Ft7 zO`uw23{NFfZk!gc=AmP6R{F~6Vd{7L;~A^-*fyw36wqGfukud$Y?={S(v}eJOP&kBNEAT`jqga~7MEZP;j) zEd?zj%_k+DJ{VVs$%i86PGkS9^lbv z{D`NuCjdDmyszK@J!UeUv?aiM=(*f$Jkh>xn1=l zdTHDY4-tt=!IG#5jVvd7sw2{=xANNG%l|~KE!LJ<0tP71u}Hz3+SG*VJ->^1^BHk~D)(OhJ|cWFb31cD6A;mj+4-D6nec3o5$4 z!XkShPUa4@2Ak{y)yOBBztD~PZ(%AOq|0h_fAP2S-SVt(N)X}#f=@evG00y?<_nsXqw%4cN~)%=6F@6p4Rs*ek#So1_n zDeFP&V{4rCqIJKuB%OwLkx!E#o))H28@Z=G5=37a^#PhPxANV8pVt%rH@}k(Yc-YO z$_e^IM3Lu|Ppzp=1xuJ0RO>S^=>qZ>Wo_%=5vj z27?P;?!4?A;@skxZGUBrHxCyd27wvUsn;C1j%uzFFMM!sbgB@1>? z5v4x4Oiz>+~KHzH1=(mQ~U*0D>tWWpx0k%-?e;HKy5)ihX?xHz(lef)`%48>X_d^`g)lwMAMRW4K9OEEEk|l>%I=ifsU^}Tr5AHQ z_dNE!@gG+w>4|}A^nP|pN9m?mm>w{zrCO3rDoBR-BXFE58hohLVQXxwY`bGEU~Oj^ zW9}t+g;}Pm0i8ZwTb14ZZ*ns*&0+FyiG74;HRg2HO%KTA^k(!H5g|VQPUf_8M)>^A^*scxmrOY71zqe{5 zGb0;vUg3El(KgWC#mfyK zM%_iHxF`OeewY8PoFMO!NBJKq+4X2)oVkR(gKJ92$nb}e@zE!K6Zz@w`Y`aSNFX39+2}Yl~of@P{?^dLkF39>Qg0N=?Tql z)$dWWJVyG@e8D={{><^j*~Ycsb;woM72#}P|86NSH8OS8*7{$0N4qDaw@)jOHU~Q_ zkRI)x>k)l_%0-pt*rCNJ9e6Wv!n9f_PB*6aVmWE0^qP)rzmtKko4b&Cm|}U!{r9EW zZ1#bGyKMqFqxaT!gQ0Fqmgg?)lXNVU#q|2Q4*mJH7^rU%mhB9@TAThu$&UuZ*xR@LRsA=}QmumZy;zGIlZvhAIvp1HdiZknLo@elSr_9*U4?hwykp0Q*w znB_bZIqk*lr|B)W-BQuK zgpP4<^_E&;GH2^4qmhoqz=0bXFpudcZlK*B&^O^0-3eOY6TZ&PB8K?VGCEci5Zg+- z%x5eotaWUE*yh>(A@_BYxeFO}AeIBU^#HlSjg>wALjITXE4dyu6-vTePxXHTr%in` zyI5xxFW_CWFzQ-7`%M`4DQ9jVG9FOBWVt??6WfHRWDj(ra#Q_?mm`6Gs*9A}%2#g3 zPq-bo0%zz1U8sPe4hI$Gqc`8*U_-0oQ#PtK8I_rCQlF+Te&usWfz?C^?16Puk7-Wd zq<+eFzr!CZhsqvb6z7{2`#X!ePU{-DF3dImw3T$F1zW=Mh7St=7FH`PHgs;t?BG&C zY0j07X7)U`?UrZKVxe`Qqh?d9C@=l<{n7rpNWF6MaCtTq^OSt;8p=+!iT)~3430Q2 z4nvymGmA*L&UDw4Ez7M5aK$Y$zjH_jh4ysViq@mGFW4dV$pbwj50t&WrM}_5-JI|q zxeg~hidrLA)HV14D?+coo7z$#XrR=S`Vr+UA!G_KHpiG_=?s%Y?1i_eF|nAx&{!t2 zUly~Ec6MS9QG?5PYer-uW~zWbs)aXR2J@Z;gzy*PSMjI#K$>iJS=v}S;ScL>sX~Xo zqtafwL7c^tcv(M-pQtf8%&i! zkw{{bz)AYjc(mu*MVL`~WD+b?PAOsP81=O}20QMKK7hDW zNuuDL!51VjXD67~C3xtUQ;}W$e^g;>A~NE!pIUfx$Zf)Ilnt@+FRIeS(cjfNTmfa z5T}+^Lj8N>I5~~`l-1wSUq`73Z-q0nbyRO|M7Qpy8?$AEg`U|(e#_9rfGZikn(Wf>CSrk!7`L*hIwF@5YocPPe zvBGPhl{>7{5{ilAsoQgyE^|xR+47>D`V{BE%+CTZuW=(>PMk|JUFCMVk~oLa zEoUk}C$W1g@c|AXXK250OZY5AF|Uq%6%cy-QIW?7vT*H z=XUc$t&NTG8`!AtfsNGvzmHcWk$Ax)LglK(BG z%T`q_yY>ZZs4hOEE?B)U)Kd6^v*9moM7+m{jrHahwgB6{Kk_()+4sfQv`z0xu4{e0 z6c$}2s5~=BKIX#A4-H~23n^$$Zb)2XkKW|kFUR? zBh<0W{L8!46`2PLcc6>t9@?%^W2+k5b?76Cu~FS_RkUhQB#n!e)KSpNaM zh7+`rc*C>dHB46}YEI3>Rx75@MDxxgQkNeo8b+MA8QLcvkHH5d>>BPlU*XH&d2L~L zyu@dga7T;;Wq+IwOgeYbeIUVe;u$iY-Iq`2B<`I8g&3%vm()eCi;rg$XvM2|9L^HC zn@pTxC3w_ec(5`rD?LJ16W^RfHrEb1MeX2qhe$>wUgbr^?~WreQ$QIA#7NhJk_z$}Aca#qE!b`bMT;A3om!q?l2G z)yT;$hyAvnb3VharGcp|KxFKza97+QRW|?6Jl8zV{Qp=w3#cmkr;l&ld%2_?*w}&H z-Cd8Z*xg_&b_aH1cPkj!E#}`=?Cw?+;l|ecSWoT9ns^IK+7JW-YP$PqN#}V7z~jEj$6mh2UF0 zq4Ss@8JYRyMcz{*_X`O+j+|6Eyn_-%rc`ddAhf!WQ)C(edU7TAusXShI_z;Q zPd>>riXvIcrCP+D$eeBk|0Qrs^SG0FAXlP^XtJD{g@|&Kue=|)_&d< z$NwXUCe;LMV;A;8J%f>U7Iu9DmZ8kPM)Pxi)O9apo>~%8`YExCfyC#|aoR&c1Ewcl z+#T%h1+vCRs7$zkgqg{dp{smcg8!2ZiHvXu?8}`X61*UKx4}nQ;iE$2!s^o}#YIlM z6c`f^NRwY!8K1B&rh#%Bj@;-1Mx!NYlWm-CbMl)N(SBRNA`~Qlww7)>CKI~aQU4T% z>~0|rB}!PB=k0+aOGAx4u(HBZxh5yboAw|fth{?pFn=$=<4i_lr@?l1!)-a?&>;NP zx~V4+i)69-#8&1v#`9SY&e2)lRc^fkI_L~Lv~t@k$q${UKFSwPYzB5P0=md(YG|7? zoWMqM=U0#k1<4^#2cHl~?6NVEWHlHas|sN~Iip78`(JYRTfke5=l3HyC(Eul$eyQh z2Npu-EBJ*_B&3DT&j}8%B35=i?7Y2j$yT}>z9LR4icT>Jyw4dB3N5)!owK$O;q zGePC%fUjR+6`o^XR)tLh+4kHZ%NK(oH~?ND3Exl!mr);znuJ`K1oh12SzS_5ASK{j z8!Jn~->|x#W#YC?@FVBpXh?9~*_Z;9f&p>hE{M`^;+=jCq&DmO&IVbs=)%`LO z>DdFGU4_)&#mx>z2TP!!i{QPCMy~G1Q~Jc|?c!&xnusoVJRwj@P5N%-=0x8UgB%ZX zpfl1lo}P}Ysc?%yu0*0Ae9#b^v9^j);dT_u>j0;?mN(P_O(0RJR0f*p4qBoL5+f9< z8v#ONJ=}2)v`rdLK%WWHCf%Q6HRNLhSeW_BGfbtd08Cf zWmb?kAK?F-AcUTAN5P=s@_Qi>N|IO04j)+v+z`-l9`5!v-slpLJx<~h*5rX)P{T|# zsD;@og4LVADI}sVTwtoKsVYUlSXpRkli6f>h-L(F^2_;t5H&lUkR9{LE;OKA_mvbK0Yk9|5e;j?^dsjSWL{giuX33q0f`eB<-P#}m2BmdK!nNRy#h zXstM#cI>$^D?7$%yrI+TZ%`=3=%cZbu0?M0x(AWdTcPC!aQ#DYI*ZT{tKs5E@RpN! zdUNRLp3%;nL9K(+JOnpJl6g#r1nNV!{~J?`ienYc#X{N2DaRvo4)NK9CzgQ+qp0)g z%>O4qQPV*zwM8!dV4B+yDt|1jd<`h8FK3*c8!th9^HBKLnrJr<$#sy*3rQ65G+p5J zJfPODq93do9EI6gPdL97H`MB<_n@7VZ3Jr95n%L(F8gg#x1RbuVRRAyrnX137E+=$jLYCKo>G|-bq+MSA=k? z;T~~TJ=lYW)Z5C548W3#MY4OigZ=E}IJ9A-F6Irp6~u*9rJRAW*hN-9guQ>@ zcSDgtwaC4dOkb#_bJ7hFB9_!m;0$A$7X(??SAwEf(D>2h%T6zddG9o2MO@mF{m>N4*Ue=P2mX( z(Y-6+sCHyP1rUBikT?V3p_j-wq(>&T`*vI5(@#;|?Dpd5)HR27x@!!_BP4-dP0ajX`3ZfWofRP4N@w zWmR$XhHhu_-DPlkb8cTGQ*WW?Ch+`y+|_#SWfJE$g0%@)1^!6G;^^V>P`x!{N5LnL zV@=_#cmoX=r7Z{`g{kyMA6x2og6rAMDuC#@2fatz|%SY)39Opt|`6osrkl?RW|i)P`8!$BZXfqwkDh5OH<5 zz0*Xk9w0+L;)`BJ3tBzEck|nG(ZlVr5xVkcH}*W6d+*Qhyy8rnWs|(%q_VB8T!pU8)r&o9{2D*yso4T<+V-?~Pxi|1bZrfFS`j15WyX^V>>! zb9qOc?V|FD9wtM@&So!tv6{;}h@8MDcUI4IPm*T^xwe1ap=vhGt7YVL_kdX{1BccE z<5F8Lp){~%v_G3nEV#U!< zr7e+KkLRejp6XI}YyHWhhk-iEFa8z}N?qk?L}#MqFnP2*PF^gp;{Rdfbl*w?r61y4 zDx7tEtDH=#+=d6$NB^iD)nbSt%b?o2gCp_9*Yg?^(6rmB((FeKyn`OfWtjrsi;f}W zOQgeiJ@=6&c4`hosa1TBPj-YDDI zY)Jg4G$_{bWE>tDHY7_Lq--;MG?OQ-#h)8X9-=cp(Hbwb5Sgm$`a$qu;oM$95KQSn zMl=8)`3|2vopBEwQ#?KWE~!E4F0xR+JX1WoJVWRt*+QReE|jj=f}C&s_66(-oER7p z_%?8B&{X;~9#69*C^}%C-#7XeKck1~IyqjPX{Oi5dw;mzliRKQ@65ji$z4;PrDSt$ zbd`4JC-31=59>|MUBWmq2RY)E@;RlvU2|-9zVw~tx53ZX@2T^IPh-bR+c4#ZluK-9 z9@d@OZtq#9)ZcL5bDwfgaogSFUD;g@R~6R{*9rGaj}uJJFg?b|#SFlla)OeXEJ7}a z+rHJF%YM?<1T57CI`bWt1LS;CL9sX!16LDa%&OC0TkYmO>hbp+cOP{Z^ZfH%@orH2 zfY!RJUpB6rZ-|NSBpRDrxuLYTJ-2nVzqaS&xlwjMdrmr&)s;I+3&kx|(cI(wE)mH- zqP8c4D3M1jPItH`>QGJBZs~`N1<=-SAx11tua))EO(~=v_qB0$fg6N@}=)L1T>;2;mOzj%(>p{9zRcB_Z3$kS5Ma!*GYF7?;Gujkk>ZG`9HtX0Z06= zGooWiK$bKm(j86rChfhTF7(W7;Cs+{!N+6YqHGbn8R^tpE=O{Kq?3OKBt89CF{P8s zbj7>Zdy>7MG|fmB#z>)Z3p$49V=DZ7TPug|Q^z-xUlqT2Ux)7lpZ1PrwnOqK5d=M$ z+0EYB9@SlnsP`6cI!@`ex0g4Y_nqgrr#1b|T&h?5r)Mx*2%E)EQf4~AOsAWDW?NxJ zmo|!3nWUN4=&nB{yOfrmd0oi87g0;`XAZLRh13S>X0?`9N*`kEG;ay7#P`x;at}3Z z+iag~S?o6YFW0s`QhF*k>4~+Q=;%*6#2iB|C!kyFkm<7%*XajeRfWIyBE#o^Pxo@V zc{$rFLP=zEEAmw>Wfj!m7BY|H>BC%JIW8yB1Lz^OAV)zVZ3QFSLocZ(gEz>c?WN<* zRqqTCHlg0aAb1uK2Omfb(GOni$}DmN57R+RB`X@g1@_WyrX9xMCyvH9TnECw2fA#I zu|%IiKdFLbNqT@`dh0nyr1=bS|0sG?eWKU+K)7OpC!gBFxGi;X-1Z9#>>sozut&hy zfTX}uX`iIKoNiZ|kbpkUlk}+kuC!Om%T>hf#w_);dtOTQfjF}oaOn`2+#xZtSbs6t<=3bs|YqTtw%*c;nt*dmo_ayO}zIL92TPghTQ`nt!_ zJGE6x?UW}e?_DCt_Xd)z?hyZm4us(>nOq+c>BV{eTU-%8)**2)tl#C{Wu#t4wG zTWr6;-Zljjb{b^*OeW9n!+sfwh1C%M^D^=Dk7PmeBN;=8i#!y{5)W)l|EeFLF_w|X zXbWQYx0p*>C8d+w$z%DbD!-PhO0&cn0`{zNAH4E;BJf4DAF4(aGr#s!y{kR~O;=z2 z z_jT`I@*@3JNgF~otp~lzhSRmg4nnvUJ-q7>KaEYr2It~bj%#$F5mKZz_Nu<${i6dO z`~UI%?riUS%dcsG9*`~Ik8fU|f3{6Z0p*MwBu9X@n@pF(0_r<&QPr(x)Z3AZn8yB> zijTxUVm)k}400p6w_Km7XAoUSHTsctR_@Aoz+z0L9?(Nny}E9KxoY8A=^p03nZLX=w0po0&cE1nV3FWnBIddcm?9mH^u2v9{CvhxVZ8{js&AvL~1GS#{w-!{7^KX zp>d{zh|LLKgwhGMpq81BZkjKs?=MCU!AtE#vN%`DC9jrExslRU$);>Xs(hs{s*hYu z4wrXgiS3XpaX*XDq2YK4mT%bzJIR`m(F6amGm*76#JZm1Io83ZJ%pblh= zmMCOJBYc{SXzh(ed2fQ~D2Gi{mz>6HJulS-m5g_K7%}kg;9kq|uMMC|vrwrJ4PGEE zc4>b6=vGMRtC|fQUMA3*bI1u!@t*O1=2VxciP~p#n!L)<)_0X(5x*PGZ_b*2H3JF< zB?XlaDjSf~Z?p55PfMQ@j!pJTwrz5VG*U=w1{!7bJzz-^wW{=>9EnXb2voAFFEBom z_xK-DEEwDPA(@$XVryzQ9!Q6z3(^&72KkEmVo^abKVr)r2P0lb`$BGQI@Fy_{Y1t& z0Yv(1uT9MlmFCb2>#dDQqAG*P53P~D(SL7;)DphSCEX_TJB=CVZ8@D9SiX&^$Z3hs zs_RubiQi1J_^6*IS~vh-p^2ah8^w`QO?f4Bl3hLnzr7XziEX6YQgv=vrK9->WNSZq z+7*pZKi%eH}uhX+YQ0` zWj9~ZDW@#)?&{c73O>kZJl0agj-O*;55p^BJ~wf-GgxJhxMz6cAMrgBh`VLRYgt5QsXN}`9paw9 z$no?b24(e-uxfSJGm$-#JJ|{bE}3p@6{JqmJm_c?y0|FvvYVKQ$K90(U=~ieCZ276 zyu|<_B+c>pk0M#4@#UxToEW+_?Ba6||8I*Q`~t7AB=ca(!;iy|or6F*_jQ`6$jb z4o~wi_IO|Dx(TQ9o@&(^M5?Q>ugdUMO}uJrveX1WR$aLzF_nVg9u)%9l)~uqGd*Q*vTljLs!-%_N9T*IV!#Z zfu6*T1yHH!4=@fRB9$)*232zqHl5v_ud@mi@&NyObPsp$~LmIThy4rwN5TugG zh%(Y6tQsdMp-X&RAOm!m_~R%}FdbfN0v^sSDzc(Ezhc#29?ZLe(PLYR%v~#|nkiddx<| z!G=PkrK#4ms$OrAg|McZ?&sfWi8_7~iqnzLs>-`eETKEN{Y`SPGF5RYBW*uzEA6!$ zPaQRU+WC}WK0`alFZ)7!3Hxi?8ry%iFZ6J`FwY$It`y7JVPDs`u50REY$_ z?WQ&w999!F(I@J0#u7m>sJxp6Z|5dsJO|E7S)DAnn{KVc$Zh^6leL)G@JjLt z^{HMy3woy+7VmX_l)z zfq2-&q?<_~_ImS#`pDV@CcGTs_w01&`^DdPuqdv8khnq3Um!@l22?T(B?hz`dMJi` zJVkuan!LD&ct|)=s~J#Re?0D)#H6fR&yz^Bg4h*SMV70y2l_1rlB;P?9&QxY zd7v~-Ql(ztrB2It1U*lXK6o11^t!1A7?l%`i} zg1kYlD?g)m+(zmEdJ~V0C4%Emq<2KB{2j{e)<%Z41+|aElK_nW~^sO!GM{U8VACJXJ%hqOJXLs@MkN6ffzzx!Ub?|M=B4iBEO=z(a%Iu8p6w?;mo6CTb~)P(Dk?J&G(1+ z%3rAXy%B(=F$`I;gztRA)`l{Xz|UF|>+@T!KH5 z+ac()1ge>vp?xOuF-^RRtr#lRr^$4fOXisoI#kK69$xC66b%Jx3gQmQRe9Hol52IUhvfhwWnFj`O z8`;=IqcQs62AK`3Khq@oX^B_`wTUZ^VLu0{?AeX9egYP6f)ooj=%MsM@<=6^GtgNc zNWb8L#5jk`)#TsObLvw+N%y41@Nio?pg-bS6FaOZ84nXQ zN-mHJov{YWVlh~ggHx!ZNJ6LFKzdt!GW_7^SyW9IC67Lau9TBGlUKx``qTM#q%@t6 zImGw+NqwbR(jHKF9_l&pgQ-x-F6ZSwN5Shc@=WmY$)MC9Ns~< z(STOuzdV&C#ld_pN2}!Jp6?P9oQIUFrYrgt?sznI=R|EV$nyf)J8JA=@OW?FqZNUB zXJ}{Oi+^acQD`z94sM8)TS`pUhm(11T9Y?tkokHDDxv{$cq=spkLf)c%rvf&tYs7Q zx`vfKB7z+#mzRsmxxlpil|Irf`?3^Hb?0~~0(mVkDz3?!>`S;m@?=-RGo=bnC7PTF}EIsUD=nr zJqOyi5;5g+=-NSG-!C%-&X*i#L%6j*I;|Z~UcmjXAwu1Pyt^G=EIZ$^W-X4#ZyV0{ z2O*uWa%(ln?OFX8RI+H+>}YT5=a1A(X-6!wO4tVys3YqKx@ia>#q>2=Zf%R&Rc*^;rbM+ORDJd&3nIn7>(xO~?ng@< zrVgwk$Z(C`HC7+0PN}*4V%Tr9=on|^7Uz@u>knmLrG_X3UAr-Lry`l%FmlY}$WxWV z&bUM_WF9hnHfPfguc|MWk!j8buh)h8+%ec24d{;hR4hd`xeB#rl)c=yhiFS4dR3p1 zYU0tWWJym7iNvGtVRgCSvFGsc0ICvNV^dW~jXC^}+UjjsQ+udt&4=c>pq(QY+LhZr z2X`;z1V;1km%0P*rT|qde#~#o4~-u{9;fhmk*L`gJ|7zy$>KKRM!zDtr+_D!#d*X) z2h-5t*^E3`-%c=f|3G=JCf=D=55a0_4<3I!XWSJ1R~g$TD}OUZ2n0$`VIZ+UC%w_L z+S)7CmC4kR9QSehR`GQ?Pl0?+aE|v)@24|AFRwF^+OlX{ZkuBBB@6Hxc|1|tDik*B z7-xx^2I#po7ZEfc&l7i3&wHk)4E1IqBAMCq+#LdXQ}YI)n+AfyOTfE~F{TlT^pZVY zPt;^8afCk1V>3ZWw^M?ZEAn_M_-#}*Rg>Gue)0+_MJy|BLk3kR53OTg?W9_|B9btf zdZ1!N+podjFV)p#NiL}Y#4vAz)&Hb7qJzW}Vq_`^PMwKO-?1%vGGXnLyiBp%#@Mde z?qVOdx0RsEdjmE6#g$}v1W}PbQV@BBqT&!?H?fG$M4&rzW`B9=Ci>c!1}l4!xrT{! zpzcGJ%P#gM;upnpqNDJ7Wa5q%dZ`|C^_zN&`SAbMRGYjNu@Qls>`5}wHnX?!04beL ze+7Q8v=&dj>S8d*^E{Qvpq%9dzfq0k^4#>Kp>}wS_mQ_Qwuq{p)}H9~gn4qf-QTB> z&nQPF`#0M@#}?o6V(pD{NH@l1Hq zh)Ir7+A4#T@5+1IQKrT%bNuIsVS>y+DwviK#YmC+%e$mFqJbaD*yqO=$zxO^|1es8 z>OJgD^oCJg?8ctEL;Por>Q!ffm0Sw8{xnlH_vlpm8Nca&D}YY8jed`UP9{MoHPKhW z(tP4zL+PBf0@?5hi>s<$Q=6#1XRgLbZ&mL;&qKHFYT$}ZnND=Jo-52X-*w-W#=XUT z(-W;8F|LaDEl~<{{Jtjo;N%kojyQ+|Auxsp|FdE~YxU9#SMb z_QplMsws%;dE07cBvhr2=$JfJ8Np=CF}CZYHx91)em*kGpq-ne2;208Nwyw5DqK*O1jww)T#j?#9t=_PE+ z3`q2BM3L8;i;bOhV2H-jJ3!Rw6crs0J@4Ip-L2h`?j`O~?lSIqE>B9Z>yYb}dz$yP zW}0=SIZWE@BbSx8%j<0eeNOs%>0~j(ua)0fzccAIf!o@`8O`{OR^8AvpG zf;Tfh?sTn%o`L*n7dm*|6()jSFO3yzzVrN^ zlEwD-FX=zp|CYb$f6c$L|7yP`zGr<#JC@sn?bo5LNy-E1f$$4j-9fy2kUG)Z%~RB! z(-oI8H)VFpn3O#!FH+v5yh}+;spAT9_w~H<79vL43m>=}v4=szH8F#nQMscuwJo#V zw3WACvX66ocgQ|p9U~pT?bGd5@iGIrCspxPzDif9Cap$HsF(Que>evFvK4(C-srJLAMWfGmQ+bHCFRAF)Le!W2RgbL3KKC}EK5mZI zg?RZ7V=OwvW46YsNfz4DY5fav_-}Zh-SHW_V=u>Akwx$dS+ip0nQw==IdDxlTcdzdtnn3*X`{}3o&Ubd<{Oyha z$0$b~M@_qEizHU|9mJ?8u%p}_!__r^Z1TvpoLrLu7IPwW^;JYnYWEa5< z9i|g-DBS@1Q#s&e-Hq@TWe~JpA|@5gHO3eGrDse<4#O+dxlxaIvv-Vl5;>&wWI^+) z8neS5QJFLXJ+gx8lkIp9<-iGSrDJz_Eb5KKIf@YrD}$B{Bht1Stl0{BVgE695$&Cb zr;-ai`%!$e{}KJJ#AJ~au?Ps5W=PGk@@;vg^2PRtX|{&FvE!g)CtWU@_$>1Y_UY(Y zX5U0_gkefAWi!>@+mu|&5bA&)(2L3+6nzAp2eX)^(X9g3Ng$v9qlP7n+|P6BDL0Z! ztBp@|Q+rHoQXy{nh8p1cq-;=TAV>E+f)jLEF4K3+`^`t3|3 zQr3+aWE6JrM{rs}%#YnkJ~ahzU?VY=K6oL;saDx$oFyW77GGvPc)F^oc!?a;^4H^B z-hu0_K`f>v2pwzA$5dhp2SFumCYD|t+j|M;lpY`DBNlUZvP34-I#he8eWWV182;Qd zGAEU|o2=MMEy!iIBL3vibI@6W{H`$`k6;K=@*G)T-Pn&6U!Pb-2hhGtpp~YeP6EUc z^!J;{Zl`clWd$d?;|QK^H)cwFW|f-SwN?x64c-Dklt3-bQ}Cq)}3sO+Ie9UJEzl}59~0L1*e=3I$LbS3qJA!aB}Aj zswSk`=4*VlS>`aYlG#SzZVVBo%Aw|A?XyCG<|}NMoMd z*~p@PQ!AM>gokQQ?W(0m5vK?^R(tgFkbzH7g81O`=1mk3?x%> zo=yw(gd{1ya9#fd(?CC0b`@K z(e!7|>Rr!Q>5w^HZ)}9BVNymhgPKbZktPYZw3bGQ7;1jfZE_W1pq1t09F4VZVZ zW}&V3gjm{PYL$#OnoF5!>*Ot|+_PbX~A>x-|=9_k7C4!!6KiZ6^8LUH4|UW|;_Fmtck)?8+9qs}ni znvwEEp_0}_>_(3htWW);;i86lmR=R>r>Y){)g?-;jd!Xw9pQqIp58sXO$RcjfL4z_ z3hl*t;PP6Fx-`H@)NDk9{z>)8+FmmHDIU3*zEV3&-E?;0hhABbY)J5;l70o-!Li3tdOPVTdl6IL-)W70SX`=9po;W>~K(V*#h1&(G8x@f< z_Q^f*HYf5-_$K8y)=^2eT?!EsyovO1jMI)v-SujCyD!Xf*qk5bHlPywX*r0KJkb9m zZoQUHWKY$2<13X{vf19CF4gR>v?5OOjOy5bNE1b~xqkEjhW8V`Qh?BYshYg4R znmFkUZ!Y1SR7`xPZr7^XgNbdfXs5dd);x=#zxu^}vW8H#f@rb!spMexv zU>p=*8;7+ALVf9)xmhbO=9Xreue4~>p;QO;@j@LURIzO|8>?%?j-VNjsSV8}`6d;< zb&XTz2B);E}%u-*uiiV6>n0Bq$jdcqW=BR~exW1JR?34^gY z=;tUzh#$p5+Do&t@(2WHF0k@5rS!rZ{VS6C9(ju}Dv(+mPt9OyjWJ(aVMa-F&30NH zqn3DDEMd&hX9yv!2de4YxMtWTuO^Bny4B6Q8Upeer4=S~a#}G#yJ7yr@>7kuW+&Fw&uB^}zm0ytTmbJPb$(6(^W_P2$xkTDt4 zi-@M!%xG`+k)GgZP2npMd=wSS2;B{9M$%5RDVd>IahsUeNT;_IMoII8A^KrtBz-pZ z{#pgIyVP0=qPNpdaX9FsD#QR5N*=t7b9J zkid08DqZ96Xl&Q4!UAyxHdq_+BRT!*<|OLy+JGXA6i$NetWWob?({(!%UxgN{?nya zMYJG?QQf?be_Io(e!#PC6ZMV+vyfXnCA2dO7~vqLy7T|L#!;c3m>o}}6SiRwa*@ZW zBfe(j#OG^=9BU$u7FrPj?g%DgKc1)6(P|^n*8}7UT9LoHZ|+U?6=Wh*$2qrqbo4Ju zMqR+NvFhjsQ+?*8Duy`=;%V|>1~>l!D|rY{Y00yigIJ(Sh1^L2Vk-xz3bbaqj3Dxx zj{V$WR~@mtU(kO!H(BsqWWmdjX|j5HO)n-&~j(6*glYdd(E?{V&!=+iNcK*{=+Z6X66yyWPE(FU`LZJt1RT> z{lV0pMR6kgiAf|9Ayw$k*`9i}B}^uL&v!Rn8{3FxydloM1HYsjr&OFhE+ao9QVHe* zS5yEgb_$QIGe1`rtKG&|dg2FGqL15ouqhd->}w2W{~8hV+#KPZ->C!l_=t7BhxV`WlxUDDBdP6q$7wDQ?vgd$M6G{&qIqA*o-Y9#@|FA1 zjOI|uYSx+vj`A28r?g~S8{->Qfm;f*`>*&a^{|;=Q;)a+wANA9AH?b8p|WfE^^}vSGpfpc*x{T2ywEhfdkD4Ob-2k6c=$GYJh$QJ-}0n+>^YR*Nd$S48JU=u zchmwMUy+=cgL7;PMZG4Y=EDwrxFh^_?)V3|ykA)Uop^p;;T0Hw!koq>d~FBZk(

azW$PQ0zq_Vd^6>V|E#OA=+OF)g5;m#7dm1dlCUr<|Cw}TT< z_b1+*pEr&}i@jvO&Ecirwn#v#?ftud&Y%gz|&evYB+A5slJ#fgrLU-<~1?Ozl&*cU)7{l7r zgY9cg?EEe9uy88b4v_=3<}0n_4!E}29XVmN2K1!Pt!q6=e?M2F$z z!$>m;YxV}6e#-Fv*Q{a_)_xLeUW1ot(Y!U&bv@55!X4*8I{pAZQJw7fI--?zkeLtR zfZObP8?tLB=Q0^Oy2knk@D!alW`(b#u|FC^Nejtm^n+?23-_^0B3S=$YG5;=$E^uH zg|Qi}irAfGuxInTR*m}?&f*vU3St)$(JE{5&K_v19_ZP=;M5lrYx;|wq0y%}3wCn^ zv@i;pb{$KxDqj9p;^@J`MdU~#H}M%sn*l2%o^wtELgOl4XWc&bf-Hkct{-7v&@sSGz_)TQ%7vyYiYW-~RN($3B z61Y!;mCWL_-*C#&{Cg$Qq|@B-YU;B8@cpYui&yN>nxPYqHpm4xA7fqhq1l`K{e-)j zfwVi%s{WGOjwdVhfxMkH2crTJ&mYL*HrVhj;rO9gNA)tm7K_$|M;^c z(&RbSse_T?``{XtKck`ApKwiQG;$0a{tLPoi^luM3deAA^N8zNNY1XT_ADHKAGuNi zJ#rEmY4w)Ll3LGS6cmXU4N(^gDa4!e@ZN&l=}$CMM`BgUXqL>}a8778mRMdg_i#P+ zcC8sViBQ@Us%1ODga1KoUC_SPtfs0w$C^Ww4<5)17N7>xWd zzK1oH;QXV+&Qw7Tm7XKZW`KH525*r@XeQnldrLV$loSyXjK_4!DGG|E0NBgy(EdLr zVCEE#&?%(|m4kh#aM(<2#+o8Ng*v)+=&2oMKH_;J;jIW_hUxLE?~^eILDOdd{rHvV zEJP=67RoSJw=dl~<}igcpSg*6p;bp)MffHb!xLG@Jw8RwZomfX0Bw$EubuHlmw~gW ziH6)yw6i=BzQ+F+OeN8Nyv-B;&C{R|asMkY*0%2h!U zPv-vepoM;+aeSzLS(93STZG+Ob9+A`+d|>WUvScIe#YvJaa4E?g5{|A5-U6-*~B4W zC(|RpC$sXE?DG)opJg5gv%QCScbt(1G~;MEXd`bdj$B<0zN8*|@^ z!5b~1X4WYl;FObu=S%`9A|8dxpP?0hQCE=(yZ->>#5mY*#~M6x?<^V@e;dd1&HN| zNRTkT-js}&g-K4q&pC-s)6i7Esi=*&CK2)bmR?OWYH zF|2JOalpSwN>GGoX7uB3rHLejSmr*0jcCWbt{P z-;MpnV3&`>TI$b!m-A;JvZ^k>Gn=2z!HtSw!I~h+dUJ~P&;{0nxvNOArD%vBSdUH6 zRrH9XV^SO%A{vc+61#FXXB|bQeKYVmEr@yth`o_mpLpjO?4ysuWP0s4CK4Wr-!%eT z_YStRj^+Jcs0I$O9XGxLx$z78G#{C;GxU?3#k2P#Z#S~8$=qXC&Py;eBW=gPQ@zj( zRgkvZxVehR#31ydH9z<_JKlw^t50ordQRjIcG4@JYR%*-i@)@n>~sh0k$zYjcaXf# zxF0)~(@kz}6JC``w7)%i-~_bQADXxgE&E^v#-nW`k;jYCB-TX1*X);$*u3K&8f-q6 z$_lK8K0MuWq#m;Ww){>zK6UuwYiLd8Ou5q^2`2KsWt+eRH&b(pv2Wh$1@33$zgm6QU{{Gel16=o1r6;=4>4pQ|qbG$mB=Sm_a3uC1^ zuyogQ>!-1`OCs?~!?(IH73}pd-mw^Z>yAxWADWKkj&5)YH?Ur_>SDkM|ki8bHUTG`-#w)H8Zjv?=OYzx&{ByvJJ{oEz}z>icxp8=j_uGTxv=|UiNjd^YQLia zm&1>V!g(T}E2x@IVH&^%GWFHzWad;}$!S3Fl%{gPlxoTEsqQ*X+-WUYn-|O%zee}J z9iSl}(v$BYnmRkU-b67knZaY=lBaTim!Vv%^Th!8eQ9)DxIk9k-U_5>Ju2_YP;XU` zKXXy#cZB>{9&(rg2#~Iy_XxvKb?$ zl^!#9d^_?bsI6B zBHZUqDkC&7nE8lP8)Qkp(|OS9Eb$4X@hEOH1l{Wrs*;Inj?Gmb9rqZoCjvRYnESX% z)#PBR-Evq}qIBs@09H#Ifk}-f z-w~|uWzNcTdV+>hd0$?Q0DqWGJFGQk3d2w#pEO1}V~_Qj<@?cZzW*!#3;w+CZMsXPM7FrVF8o#EzTb6rdAinI+$ zufzMp`K~n~?id>W3=(QKpIf=TdSG+U!z100I>FTb*g4;!oXAC^67p#eZ|H=DqEbWF zh)!Y`__#^c!W}Vzs*ZE;dKa?FtMU9^z^%RTf`5Tl%x0XX%BCAtLz#4)`tu^xCiZ3B z^RUfKy*xDm`{#XA(B7ZXb)O44_!lQaVM9I ze@1t7(A`u|UnFXh2an=4cKHCT>RD81ddVKu)9+IQc|$u17Bw5wIC`nMnSC-Ctjahh zoQ(yY6t0zqdM40Ox0&MaIO}xyF9-+;Y!LW8AbUVI|GU1GoY@_pl}}PIophg4J9`1d zOL{sHXQDD@x}F0+B-yMk#!4UL61GzIi}q9YZ1yR(5lTn7s#Jm;n`XSG7IOf-Ub|`c zw9ZuOCW9_Y#`DgI4*vulZe_*g@vq0C6H0JHSA~37730Y?PZG0=XR%8}`goQhI`WCC zlMsB9Y)HOw=+-CLP|L8C@6$ct8zIRPRiJldl^#h|&s`j@g+P=VJ zFNBvj1YMj!&Z3|sG23MVSmH@i0{U?`l5-wCZ2dtkM}r!wjCDT;D_k?exc}qWTNSaC z(oi)aT{?nL$!PqkMV zwXwB`*IAZiEF89welG_2n|N?yzu~a8;J}BnpLtmDG34~?Ayre@#RO=3BGNpA8D)F| zWjTs&4C}a!RYdi3jSp3>&H@YHp?i2eKUtjw=ZyOb?l^^<|L}hmgr7>zSf$q zyalP+SOl7@vmU3*oJa%gs)IsLQDS0N3Gk)4ls?L0>MA)O8U zwHjzsYbx3oBR!T>6gsUNXBh`QSTk5hVgJ`4D>9dE1WV*NIY~~d{F0B#-Q}I++{%gr z=+ZUKSWNHy6540g1UWSpMAcNJ_#Qevk3*7dK+;yko33b_fxFX^8#xNPY6lWA5ai4S zpDSCT_6X$3G|rN4RJX>`+BH5BU<7Z z7_GkA45pbJ0D0D4J7P4Ky4t7uCIs9KYL=#7(9pmu0Z{>E1Ah6Xbyl^VAp6r^pRKvn zAKrstk;^l4r;`?FG(t8B*wdHATe509=veAg*C&r-j_tSnj~xAY>5kYzsAy!=x_j5s zN&U5Vj`{->?g_0Wy2Xc_zlOgwn|cy!7Rv!F*Hu`N3$YcSa0aXJK*pg_Y8dUfvl7&I z&(Lx*U9Ka0$%PcVPv#*b^=mV+{;Ly9C;%Gt0eYu2S^WumZ?uX_zi!N-!`V49m9?mI zGw5xQUhXANq+%+BilF<{&OeZ@QAv|tiUqNk4V29@eC*GunyxlyIRYPN8FxDX3$#1l zj%J<}+R&eOCUtaenUAnW{>l8WXH;ILm3v8-$uZqAZ&52$M&GY31FyeKi$Y>np|@-f zrWJI?v;UtF#7Z-o2eA;IF<)&va-kG;kaxJls#x*C<}+g|nqt20r%%ydsk`}*sO)Wm zW*%mg!v6e?7iINM9f=qG2S20+)>tw&lbh&AZaNcmV!f@9fZws3WTTP(oW4#4s3M(2 z=WeI^fGJW-)bm;&vzFZ5(cbTAz~#Vs0kix9eaAXiJFhxBJ0l!!WrdVed=Bqa5L58# z^Dt|x6cs?zSZ5mJD!A&)ppFX4V{G{y;~X>W@0CLG2Wr?G6A3zKZiSy~dq%l`y0dy` zsBu~!s>z<{PC8XZVQu6yR#C-#mT3+L^!tXJ{_vUc>=)uCTT!|yMjZxVRSD^L-(ukbcu*Ho|}uli*H0+IT%1n9*_Au78Y< zXo;ctQy1|EpF!)H%?b4B`a?}XEIqS5R0P)1s!->9K`Q`ebC;eQ4y}Sud4-*}5hqZU znT2@SZqCQD@EfC@ni}b;*eOZe>^gCSBqVuv{7X04qWt6zPH-+oIgQd-+`hc)GkkuL zN|IVkal6WNl%ncr)uA^P1}T>q9Un)9z&F4BzAb&X`4;u7;3xRTJHl)gVzaTGC- zo`$3+(F?4F+E=}%{!*QAVQ&3~@ju~>6vwok%C?$H9eF98AG%RddQt32ZhSXb?AugB zbYntK1^O}!p&xi45_b`@D~!tEF~m-)W6$0e7fOokl9o!6Gz^UCRVOA2zcc8)rK4%Ri}E9RW_BfevYh zcPk)a^75vFVkta;P1Isu7LPy)O~F2A6(Z5Q9f+5%q8IoDu-}1jN{qowD!A}CmWeeH ze4u#`&od`9QWd_w#H@<@biL0;kD-I~5^sP-JQ=&QFuvz;vMFDM>EIY1LAx2n1y~r* zur&taQw5q=sGGJd>IQi5ZtRQLD?Zm#JfRR!@e$zSt#0R6sTZ4JBRvgw0BqNoz29=a^R<~ zs$OqRZ-}=(a$|y8Piv?TH2!B!rCw$RQwTEK1}jVDI64XpqvPfR@_SQ^VtP(y!n9Q@ zsiUY{>O?n&Um$`rqiL_|&F~HP5Cz^NG^A5V4AV$@Qy-B>-hdxlTwF`m<~%<4NzNmW z(H5^Sj(!%Wk!^lh(?{{0XA+^CO-kg93&McYcC1i$p3c$OFWtCs;^+5b*(5B!Nu`)@H(f6 zBAFHrYXBPN0I`B@Si%e8lofa@SBy-=Te=WkzsARXqGlSa-orj4h)~6{{*Oe!=EHw| z@rv#e2N{T^TN@b%h88`So0vvlWW*Vw7Z0%|#`D!yeAVig{n_|s_!2F<1=`jh?6fsi zIlK6lNbN~v>1JXZeYyK)=!ybV(U+$RFG`v%bpTcNkiH0i*heT4v>mLZ0y3pJ9b}5} zw*RBf^QfEUS* zr4hu(f1F%-o|YgEr`EU_^KbXygWP5E(>b)|R4k*m-Rn|gwVdt(f2sL7B$Y!K zE@1_QK|Ho4Vp|y7rww!8XY%a**qVpfeM!TO|IwL>hq2Uf1%kSdCz?=}Sl<=AEgk+W zL!|uyv9yVBy%WA44ChaQ>o3FeInnP;*~ekBekHMZ4r80$zz*n2yf!cVF#x_FfaG65 zbi4%Gu{`|nn)8{6?i0DMR>V#k<2{71zHap9tVNFFFOkZ{y!9=+9!rGyG0{ms&)-#{E}ERm*@ z*ySy$f9;7RSO@OjMukBs`sqg!3%Nlw;ye8P9UD*K+%teJdxq7%3)yyt=vg9hg40-i zHTW0{5}dOv)kY)C#gJmw~bUoSNy(2)M%Z$*Vl z_z}b&KT{<;mnwx_^v!G}f1=;vPipDYGZ)NDy?jaOF1fMN%s&W$_v?^3`NFg7a|>0t zofe!}MdI{bh(8Ks1+pU%+C#}LIJvDv6Ef4|L&ZueN!+O+69?AfNd-gg?clgDXud1) z*I43XR)q2|^6(iv?gkBHCueY&pJ@T-RAPUT>@Av1`)cylzR&x<#PNSwxF|kQuO(v3P>~IKi`8nqYaMf!4%`Hemn!GqND}J928nQ)9K~h`9L@ zRS&?|UqBSFCU#5&GeV0|DUb;Lejx&3b>oRfj&_Dl%MzDp$Xzt&Cfai!p?uWCZVJPW z9#3w~>L$Dxi>ohD^H4ZP;mO(QV0{Lw_5-J$j8s#J13%;*mUAy9k%RxCSH7Z~dT=(c z=(bS}`#6-IL2aSJPQEcu%k!~@fzv#c)dpRh_7!E^73TCc>< zUW>Mig{L2*|BAwmR($C_oZFlnc4_j=78b!yp59~!3CQ;SaA_@mvMLggIlP_6N<8nTx2S(jE;r}?&ZF|vZnjc^eg^tKwX*el3H-%o4Ktx z`j+}&FQr9WJV)Ec6G0lmbF$&xN?Vj|B&_@xVd`JvDKIRJM>hX?>MmCE|R^y z50`J^#v8MbES$qPWZ8FUDu67s)tfpPdd^M`?lIiEi8XD8a-y-Mk8!GRp_{avkP{F3 zEfjyAon7MzpUG244*t#U+o0n_GNex4nH!{m z!Je!R+D+JV9q45|IHk?p?l1P~#jB`}#nBzVy)*hMfZXUgboz1D_nS;$F0emWpzL$( z!j2vvNX~5#TCOy7Vr7{!u%~~laubxe7zr|!e?_y-<8Z_!{)|hd<_Uaf3^MTud)>i0 zucXR11uw;_%~J7gbg1-oq|tutx@*UjYi&%rVK;GP{wkkLdJ{sY%i0@|=>>@8(MTWx)o*K`-DkDCPstml)8J?NX>8&Dfe3E-HL0#5EdJSZC(~xoV z@Oqk|Zz^#@e)xWSptW_-++tRF9+~=uo%+HxtvK(#SoxK)5K3?kpO6~2S#?Hk-{PWD z$efz|Y4s*a&wi`%lT~^$+GA<-W^ZLdJW70KMZ;xA7Jucwk8{qa@H7MA z!Ry?`dhYZY=bOYk?A+OJEOcx27Ov?hloyM%x2P?KpFYDGTC07_YAuQTj+1ym4ErV$ zI3v-JVo-^SY_alsQE<&{VIfXwMDsiVNOOM9Rj%pZoZ&Wc;TOWYk)7 zv&YdA)!6-1-kZ!CR;7AeWypEeBes^8{~h4>PQkxFkQt@1ajkh2=gG~4B7-h+HhH14 z*JcgLhde(lY2u{=RqnT4CKaIP-&-9PZ^G%-GOr}N2D?@(Wu7g&xY{TGH5eX zYE`E%@81bs+mN6w!Q`CeG)+!sIy@7_`#Tdq`v%8%A@X(#&72j_dnlMo=E*@vGs)^% z@3rDIJ%lXi;r7I3YO?;9@ctQ)*9zJBG(tJ?C-^LTf&$!dLq>Sf!dzRN8U!s5C4w2r z`?}D{CK}H2F&Cnj&yoMyjrW~~-?g%LRxNdYyo+4Cr!wfMRy?;Cw#0rc$Ry$w8N~8P zfsxSR6zJwSC)W&o%~!H!i;-1DupoAjC4E6p&{o(GUyM#r&Mass!RUjWXq<|;_{+ZA zvg?XakW;+C=}trUMzh1^skn*KP`(Ov)&NNwfvuc}h*xe-tRXZJ359fq(vPy*chGM& zc%%iZoQw<+gL%`F}tgFW>P#ROd#svkqT)HwdID{_kBUDRRp&hj-Kqtd1Qwhb79#u zrF-T#xO)`#*mC4f0+Ek-XvbcxMZ^ky_`Vi*o9T$eme4K{H#<^2Af90Xjx4$_MPuQ%JWDz%e5nXW^?GNYzVlyaLVV zM!uirX`Uc`ngV^X5#$rsKaONrjt7(mS@oFTu0x7R$dmlgN<8x21%ICmSte)6ixuMJ z>wvWr%5Q}q-3-mt9sgq}xN3^b?7#^$@;rr*Tl4wrEO7Haau-9O;7BZDv9oLvIQ|RE zMr@iMjTQ4JbwC-pV;?YF4XUlrXA79>4{h&5QpNDTL_CR!K<5h@;0QY5BNR0U>2n5Y z@C5oyK>FW?3+F*0@o1}8JWFQIZ}9YK(W$Mdzdgfq+(qyIgD%O&J32ErP;7@9h7=43 zd&1-1Ok^lEKGtQfw}Rh}$10=C4obd4ja(hxVZhyKz++*mw;$necEx@^NERi`x)OIndx?G-i_h)3f+}~+A0Cj2EgDM_pS&{_T@U8;H@T5Nj2c#0$Z{JK1f$^A}SGvzz_Yv^Khir z2ejF5q--6i?@uW5FKmpmz`H22u^!l}4Gz{q-Sd%=!+4%Gd_4*rjE0xfp|L;(Ri|6g zTMwbl!+71o1093S+6H?ojuX`5N$5?6(ypU}l41Ehgo|waFChy*aB50@fZ;TB#bUT+ zGfz7g`f0&;pLs?DZ8aOY&=f0pFgoZNS8-yK)a98)Z_i*TqgSXdEgXH)gEOuK&JVD? zS8&>m$iYcK{#U5}V?18g0^lg%-5-yj5;RZ%P6_9kyYNH}d8R0yx+eH85AGK7>__<6 zf{YdO2)5I$J_cwHM)UqgzvjY+Peyd?9CeZFh>%!_EB&IbYYEb@Auw%@+!NE=DgfCF zSRrE5%rIzk5ByzHO6o;MyPT4FY$S z(JqyNZYLyKK0K%5R072k`S%mS{ltXN>v&Kr$*Y{D*2_*5;Rn&tN6d6J11Y#;7Pzts z(b-a5MQmuP#FYfE_Qm$j$wymgFC~$vEa=qx#FAV@1D#|;4-!9b%^ZnhWC>NG5b5#M z<9Lbcql(1fuLZI}qjM~stgWSSa%1Hrd&&0o9IdtGQPh8|J(OB1>QS{1$`BIs|gJx67kGLw#80V^bVQK$J|Aw z-e4P%pWjSBI*%v+2f4rc=!a!cd^>6+WMUJm0v)IjS`AjF0K-4&tt`d1%;m&4(qP%u z1P|$fcn-L=9h@R&c+P-ZyF>Gfp_P98JeEAk5i;{bsT!WolOLfHVjfwtKY5iQ4iwL% z!M6NP7n|=9Dg@hO9nwvmP;v zu8BV%xwuSx6lP*&V`^jS1*X!8)K{t^_h1jqI;OX7SJta5*ftYwUuxfKA7anXmh29; z47OuhoYqfW!Cbhk%>KO36v?{O*%|Otd~i7Pb0!i=FVAGVy7ag2FlUhCeL_~c7;)Y7 z%uy^yY_}58=OuV-z0qGA;oYOm@vKUP^<1(~>zN)fl2~+6vL!#z7OSvy9`Kx%sl`fU zHbw$)84&0Ur2k-I?i(iNo?`m#PyZ)s1HSrG@r?6<`E+8WR|6LKpaoFALCC)Hoa-=gk3(>7 zHMr{`a|kz+`!<=wl?-}mNX>Xn_-_bR2}xL95)&hn6Z04XrR^ZDa2|g#0nc>{x+ZY$1y4;^9!uKCbe&C#{D@K1tN`q!W* zb^uZ6hSd4=V?Jp&vW~@xwtgXEvKK8e9jjv!+F=lo8h|fS15J<)-}D7?coDIRKhZb6 z;g!dDdpWTS(_?KH<*RsX+)CJg1CTvK`Rm@mu{A$Qg;gHL)5T+_w1*m6BK3=7rHlB| zE2!lY9=-4@-}3(tXzvjA#yuiAYtVe-d5+D<{N704(ddS8;G;Ov-zR|^(8V#N2Q$?0 z1YS}DcN`2|t1d*Z?8S}{eX5I~mZf~2 ziFBF*-i5yX8)?y(|4-t%6vCSqaRnVs?&qn)h_n4fW)4S3lqS<=5^?uZ`}G4F3?tGo z1bSl@z9j?jokA~moV6rXCB>v?QV*uhRb_VUQ>JkCl<%|8{e`@mU1II!Hq2zb%@&G( zrL|03-XJxjLi81SJQ?>b4c!KL0vGaaCfvH0IR8FsAEwiVeT=V`@%3Evnh)8ZlIpte zp$W**=(E4DmD0dXsewdeq{0ELBp;E5ti-2tK^aZ3U+zH_VZ^`!9ti#>EvE`P7g(6fwl}CQoL!y?(lKl%PZbLp_LNDyb(m4WTMgCAmHmpQD zR7NlKC1272jnj_#(Cg^7-pp3wK12qyAW3vG&0n$h=3yD6L>FCwW0!H&hD7=7!1ORx zxMj(QBxA2|BGpM{$*fPH?&t)%cME)Z4xC@6PWUFe_5~bJ7r*Hbq__?3aE-q{%sbwr zy%aL}1&J~ifkPT$6)(er5cTT0vBk<`M~x>AzXok^1YG$YR&8u?fq>9wf^d8wYV10yjpJUnUWx9V2{`v^2txF6~O#QGK<66P27e#DDS9v zsm^os5A31RIKgbs43-b{cub}aQ1=f6yIbHM5#K0*T-!qwVjK2e4*a<(_&75-c>=t% z4%}<_T66H$o}n3nfgB-SSe(xlv6=?LT`RC6GeB=?==WIy4n_fyMCiW++Uf~Vxd#1L zK$DjOQ<3mQ3>N(@IQ9{oY+{cd;_4z+cpTdx6ZV~mfvkg`7!HCp=%E6o9#xYA=&;Cw z90j4E-jDmX54*CVsO5T1kr+Pz7oz zf!)&|Eg?2m=7ZPbky>?tp4gr|0G+!Fi@6=S@D)&d4*Z-foGA$_a3|Ti@W54SAtnD_ zHog5|1J`CWYfqvssjQ3SIqC?_s!dTAGi$KCmch}InRL6ICzwaq(e};O-TuN>Q_H0^ zry~C!AVLHh?R61*v`}yh^QqbxRrEeoR=qNZV|@%^!_Xe{ga20`UvLBU+2@$ox|~SJ z8en}592E!iE%04yLX~&Xgav@lbM#|=G{fsa2fVhdSauWXwmKRRJ5zTe%f$ZpD`4jV zHexhU^0e0D$n8q_PFq{uc;&bo5^c#{A;o4vM|=eSnF+T7B+e8X0CmAq9?`KCE!B}yoZM7B9q5sFFfX2 zI!`hmS>F~Za}aEN;kjV?f;vEJuesMDv_v_+JB*hs z_6*+yMsu+=TRC7K`7Gt>i%l`x^*AG1I3(O7n!`G@0#6Ez>h2VHD=PV9yrlk5a97%Bnn_vadQYmt*5&~X)rQaQdX^SQaC*PcvuSam| z8tD21RN^I3|1y)B(3w<^7C_zz?X!>?DGgiu8`Ep5sC#V>?XB%oHBJ4d%-8;QoC@0= zz9np~V~Q-{NG<~+8= zL1rrS1ac{Zov6jO1PUSPocR6cz*vRgG~}GvlTi*&trRsPso{iWfwJg&=Bp6Ju~3>1TA=|2@u{N(4f;n z=*?qLRDL*lFTThCuHKipjg6e`Og0LfpnA3?9W7m$A-|WJg3MU^rRnH(v3=n4x=>OzK zSJ-<~@$}f^Mm>x{MOYMG(@6ZApZ;)aloV=Jeg5S5fxUU(YNTQYtk%3})#liqeYoCT z?1S%tlAPld)clL*-i*e%8nSl$$fDWEx!‎B@%(x7he3Lf!S<|1CFR{A70S5Zio zLHMy1=y%DAxA;9U)N)Entv=EQ+a7A?)OJj*-e{ZUcqmtk;nSa{+So%~&p5g}60k~2v%TpORjgI0{Lh3; z*g+M5J@^F+uwdW^VU9vX9rECtMDWD>1Fd+{E#&VPVpV-4Ryh+3EhoN~1$kT+EAI|k zZ7JE#9ef^$MLPsf~;ppcq;A1#Z1A1N1=0Ax| zW`)P+vq$12F@+xReOBsr@^FtbY=?M9EJ!8y+6@gb6&vsmDmn&Y6_iEGDnz>PV1e() z{=W?kj{(*#;Jl{T=lM8GUt&SGkji(V6`|AC(}^__?f)3mNB2r~=ESeZcgn#0$G_qGWk{~o_-H4v%z zGZfJUui!LZ$Z^io3%-9FxP_IK20OPY+VmIFZ#dj#G6!`IGPW=r{}WvuMYZSmzyv1R z%#f}q%e00ztIf&GE!x3l!jNxr6lsQ;-Q!#33%xZLtG;){FhJl6sJQ=)Q{R zhpupZTI`*6*#7I`b5WDljjRb19MQNZsCj!yT(vSjUu7))1n^LwXkHw4*JpI^B_g*n z_WoO--HvLpPUymo_|Kc+ftAR554n@&L6)s}S@mToc9kTW%IZ-)qO`fR?9{m;c*~@^6Ez=+G@(_N_NMwCkB1%=z5n==NQGU7# zi8+tojDa&t1>bPrDrlA8ey=|*eLDy7HIl$VJAV2mP}ovbDz7|KcWNEAnQE*$QQfRn zw6Ad#aXxU=bL_B3*n8OyvRC3gbNXjt>1UCOT8E(fMLvB7867|IpA1wpZ%4L_qfaP@ ze~Ld1)O(xQgA|yDH8aY8lzG`1i9tLK6ogY^sRz6OZ2Zu|O+3NAROwz1RKmJ>K@`~! zM(1PW%tkxELbH{?POAa$?t~6Zy8iAHk6DJ7kO!DBXzyO~Oj~g;kkTrvdC5}b{9p?UR!2T%gsTstd#z7GY*ngj~kni)f3qx_yOxR_& zkwR0jhL#YWGLhH$z=4U!PYsC0w04rKD5I5l_ElSy`N{%yl9t2P)mG7#)%IL#uU%8q zs&ke0N^#{Q`*r#=^FJ+plefS^LG}zN!Lj&}w!j1I$lX+Md;E3KStHOXkKp)ANSqsV zb^Jn`HQ^aIK-I(XU^amNt$aTN8$rXD8HOEp1nj>?b0!g45xe(9O;lwfGJWC00x47EZo%e!uz#C4|6M5I4)UP{ai$zp$N%JxVdw@69)&>^Fp&<`r%0tS z(1MUU@9-s-gMS5ScoC`loVedZm%{9g3x|5K5CGmsDocR1`oCi^qm^-Y#|=$G|ssSo?64_X+WqpoY)xJ*$3B5;3;b{ zZ~D0PB|B#NNLl5(avNngouUhw%Un`vL09h^;)2=KXUbsZyj)s-&t`x(QW|O?AG2BM zHgTd;*lS{|+G?=28+gpe!pV$I7d@yg_&ScR+$HpRbS1{{2n*yCwc#`TYv6&(KxqdQ zx)_cb%#)WV8{LiAM{1zf9xRIZ*#cy_i`oE@zqJ#W6W9xb8@oUqTZsA`fEH#V2O9Il zm9Ud)LlZU76Y;$2!n6I5+<(Ehefg+K^fUuiBGKs6Fya6b^iT^4vkDrxMpWq?_%CW5 zk4?CM=?o*W2wPzZw8A&ch)pQ!`UWFqMD1Kg&hi*O90)WEqnXoT?FZQla2$;F!TPKX z)epiqV$vjfvMHPri={1N;U(jh3G1sie%S)F!V+xpf!JXg$X|%fip`MOC$aqQVCkJf znodJr{|CjBns`fQB-|;hK@a|PI>0hg<@}2-?s3e7{~M0)$Mo|P%1b4nY*1<| zigJ-1^1Yc>9!0!wkn|Fmyanbhz=;bhzfSNea;zZF=fS!xMYJoDXz)5bjrDXpT;pRI z6RQ{am-4fl#KH3uhwgzTGL`!81zFe+p<DdYC?;o6XL}SsVIw+0$G(@X zUy!5a}|iBL}_UfTfl_$>7D3LtwH55tH4j|Pt({(QR{qZl&DZ3dMfr<#$Vn&uXMhf)%+Pol^g{`#ycLx!^Uz=SiJrO8 zY|%{S$%)s~oet_ROr6Tc)|eALpqbqa9xdp5X)d0#W$SMbSVFv1lsc ztBQ!}6s-0qaBn`&niu(6hUfVcj7;Q?Bfv{DvTc*F*I$tPw-TGVi5)(H=jnjd8-YDH z6bz2T8tBM-TOpwfA=kfC4{`{|PT>0E@YW~snu`Y9#^*6W`2bKBuNxs>*+ncb3LeP; zq`GpwE#Utk+GQ;#m%5 zRBvQcKh7`=${b1@s0~zH6k2YGbS#YT(vB0($9q}q0xR*qY_R%aWQx& z97|Tr2~w?>LbV@T(3>0iiyeGk!@KujTaO3k!lFu#OwGfGm>U`keHACJoFb%opF{n7 z@UvEcvzgcwtB9@4$NHQCbmyaMW}~6j;~}r(ce~)P9pqPzK-Eim=O!fBF>rDPd(VRO zD$E`7K~YV>+(fdgo3X&oV2ORVq=K(YP)Q%)>-$u@{f5I|l8ZgaQ%(jF&7kWH(E1yo zb{lQDoog@Rz6*fp2z2sX&Nh~lts&;SnRAIcjD4ZAEr%XPQx`Olcdj8`DrN$@sKLn$ z-DLp#f6x*6kci#^qJuAxv%j(PuJQLXc!pJ6X*Za;&FAgpX^--pSCGapc+YG8f5LaW zfXo_xlYrH-7q5H|+;WP)xC<;q9k1}u#NS@v`~ARTIoIq5C5!-uVg}+ z_kkTdB{Q!mxZev+hoJ2Vv!EDE4kb0A}qA!|PKJR*Bk*V2rr ze+SDCEZ9g<5eFSkAf|bnmm6#T6ZxKnM3(aL>WFn4g*1E!_rD8dB!iWj*u=}gAo4li z;lDyWe^V&AJsNfq_%90Nt02J|Qh!qlI(-bKPXgy%$>XP|?=%l_xLQ^(7TOP{3N#=- zSJ-+R9b6e0h)uldk-cNW};9m%tx98urB3_El<^m_>ToJ9PkEV6$EuLaoFi+IloB9FZ} z>%ZjUQ)3mTg*xY;f1Gq$Wh2s53R`I-yjY0rbt&s7{Fh$H=<`sls3)I=Bs@l(zcPN} zDy)Ii_{EQb*-_#LZO|a$XvgJ9nOsQO9?yC5h0~9%yktPtga7yAn#%@r->?n2je+jkKMBth;NGkPgq0txM1o zVZ>NP=lf=!Y$#V2mCq00lpNR-$ACc-?8+5kO`I_=a_=LSsze;L6}l}q7HcK& zoS*M|1M3aI^$OB=1(^TNby9=zlE~^VNY*0gvsg}?A9@oxb~}eT z8&8m%&!0opcvm^qb!7T2PPH0dToaO9$Do3rRA)N5`!Bfn1F!F)Ji48q1UcbX^o7VJ z7T`|dz%T$L74GmHIU&6C**xz$t~Uvo9|%dnc~HzMUWd5eXPzS&=V%V~pCT$HUScw= zs6-eD7tTj6kAge9bAof=?IIH4BvL<2v+NW&sTZ0w4t*2ND;yu>9kZYQ zL)Shde#j;cthDN2qdXd@Bv|Uf_oeV!yYrQ(7XBMqnZT1Zrj~9Gm^bP2y-(&VoAlNC z!U-J6)*jp~P(9OVs&^Nw^2lg+jlHd6HqAs2Co{ zL{2>f&Rc`Ok_FtXf;R@kFEgq}VmYMEg^eWkkBa*0^n4u$ z?n`3(SBEk(LT$g0=MRZHi8}i5kfe3uH=W0`JBJJw)NzU{@8g;`@w8L(H>I&m>O-rg zxK26j!?s{-Fqo4a1r=XWd zz;As>OJ3w8KZsi<5y^SZ>ps%{Ysk;ZOXWi*PF@sQR|2Y;fhD(;j}^!P;k*0;wG4yy zN8{yofZ{u%$D4DKEb~x}M(%?DwVJbCr_yMoU#~n3d8a}kxE+9D@ zoQbJ*lhEgDIHjQ3m%!&KpC9w_lk>gdCHkP)6oVW*f}YF&{Df<0G*$2E+3OLl$iMP(0=yC;G)n{zIBBgPINjk(F@I0_1qrP`!reQx#LA zxAOmZY_AP`y`5)yf-HN_-=;=>MKj?EeM8b1>hFn%OgaFM2_aLZzG0v;DwBJX8>e9J#?s zt^?Ec*cG+N&ZWTai-8Niq7yTO=vMUN*Wk>X!8e zaPpk1T?VE{f$up^cNq>9HpByXNObO8M;2TPc?b$NOEKU&0hlgE&%242|3IP~N8=yj zv|D(Jsn}LC_^*q&NVv@`F=`ysyS0WL@zY={>8KBu}_=i2fzv#Z22Q^&AUOj=_ zJPP#1*0A}Ux;?t67~EO`2@%IhQ{gjR0y@H4*vsn-TK^yH&*{hjQPDGx&-3w7=7e;Q zurR;zdIy~dJp9KQY|zUyq``ftKtw+dqRl@;wHD%@3R*Fd=Q#uK9_IfiK;R1RI0H-` z@Gd*2?#5Zy1IvlL#uG&E2 zh(6ziz|=Jd2oPIss`frlv07`yHI)GM`sK6LXOgqVMPg^m7y_ zFXLG^AwfiyhNz+W2(20Lh3M@SlY+}3zq%mHgof|MnfoKdg+;g`1mQktklxU}@c&xy zsskkz!}?1KLFpHKoDUd(H&P|Wa_5rB<1fIY7 z`}@e~gPe9CPct0qILtGMF3mk)PRuv|23`#82rrP02E*mypDO%c3SCeW3nm7-%n$B6 z!)MEZ?Ov$k5GU^pRpjD#!tRNODuh*E3GLAr|E?~SEV>hAUSGhu$r(jN>p#8<0{`bo zu=m{iEU?=F+(cjLYT$W;_dG)az67qb;X%<6x0BzE10VZ&9RTw4IQL^9Vv?&W!X4WK zE79#&4t^KD-XC~+nK^GmeqILuQ&?do`MwONYzq9F^L)wRoK*O0^^q4vd5U8EzBGT8 zoTo|7ncdK(s7C${)qaCoUxQV#;atQp#CuXfPpQF&L|*rBNTaTT24-8t*&7RRFwb|8^StAFuc5|F zoS%FfP!}~?xkGV|cy#wbXk-i&UWT8=03{LK@ql{|^l3+Ki0(w8+rrQiU!l@RSlptc zZ!|a-{_UiY{8)iD+r!!ShV+Ec6t_5+m{WWh*|vwzVlIDrB%ll0O%1JyUQ8XT5_0_` zSKSLeZGpZM@qrwn_q_!EU(l>p_(b%~Ch?ciSkVIdO`)O={NDq4SQ4Gv0SWUbFcWcu zilHc51u!R`vQS9B(=31on=@qRzXm3Pd%gn6XM7yv+`FM^As61mtyb=y5!%YZokX8( zC!~FDAn+Sl`@pQ=AQ8P6TMZ5Y^;N+CE>G|rNZ-O9SqX=nNAjEm`X0{g0qY(iP_%mEed}$kPu{+oO=SO62bXA)7(Sfj3+$64{a$2)uy)zCg=` zp_j6puLrRHo6}W5V)jBZihOJ{{_le&GnLl>o-ZdfBWV5=*t`tq3S5uFBN&E$UJi6b z7w#nF^LF&oN$|cA{kjA?n$G8iy!(H>P#N$OQ=(JKaOxJotvPpZ&Gkjrq!M(M0j%m^ zMRdZeaJZPFa}*dIe`28&4JraL%Fgj=l&^X6)iVZutxN2SWN-s{o4lTGW6uavL3`+pZ+}uAu)KnRI zF2<<@rv88)it@~5`HkqLjzyk`y%ZX_5j&XN)bADI@9RLDO|Ss|2CvzHcMoW0CK3N7 zc+kzcN?xv;3F;KOQ`G2xf#w{=vdrA)W?W>Wd@#YkYx4YuDU=jBXEi1%BhjDBD&(? zT_3P}T;Mwj+BNvghdj{3B3Y7x~QjECDAYN-guGXeebP(fvML?)i%J`g?yC!Pf#SNZNX-|gf3pYTLF zt`!N44X8kPnNB2nHn=p1PWXV%_{mR7b01NQoSpZDhdeSdv*0b~7ZZ}&g2%s!!S$f> zW*XRB1r4nRN+0=KlV^GZHdX%4PKU)4AeTrzdJ`ySIKItjPCX*zMQ*{56g{M(zDdkj zC;){uMN*3$5el^Q44JhN%Gm%kw!(3nfyhP9kcbCTh!a-fd52>!G=RRv+yy%@7nASG z^4l_8ry5eZDpwTNXj$+sxH%nHcJTb7LP8><@*MaI3qwYF{o?E@(2GQdr{aH^cW33g zKhar*;jflZOkF6aHfI&`KRNf4m`ig6+FlpJm#}u`LG|m=U9X`FKQ@K1Dt;pU1*9c- zRwK^#0F5SM>!o z>7W7;$M}G(N(GcEgy<@kCoK#=3dvImIA#Yv9%SlH@FOIvnE8E&Cpv>3eFUw&XUM5bcUyYsm8o$(sfI2rIlW=ZJYLTqgXM zB(Nkrt=mxZ6L6LYXI$Y~#JuDK$N;hNKtzZhLHEb`$ton;OyslJ__iMJK*XlEgQd-2 zXAkeW!kHg&rJKl*8=PEp)TH8>1@3=By+Rhe=lrLkxXZk*@$LY5(c$1i&{Yz8(XgE-I{=@|48<3h zgPjY&>mpZv1}%yhOTmz(F&wy;gyV!Al^2Z0fl;AdM3rt?zD`6wJ%UP3!|m6BxA3kv z0JklC{~US|@cG71S7L9U0!JR=!r9P{<>8Tdo-;Gwi8!|CK#t}4#NMGC{G=&xstTum z1nOsjsjyN`bLJ&Tlpc5q!;#tI)K`(&8uXrp_hx}Is_~ZtkQzOC4M6ud1o9owYAum| zHK3NXJV#C7FQ8kUCy9Z}M8@zoXL|^*iAYxhw6F=;^$*eBJVfRTkb|s-92>|_#&d!* zP@9e)P!vwX!&^YefD}=LTK>Qn56W-VvC`nA&5%%IqsOUWU;Sjtpbp8ym z@f3_nSXo(-LoK0_sl>>ZW2dY`-;V(XS+OL<{1*#4U&KVRLlGL+_wv)r@YN)=abN0H z2Y@%Rf8!ESmE%au<7D%5k^OOFql;Nxvx!jjfwsPLu4kO%9p|~i-9LbnL|_;LK1bkh zpTZR_Z~+6Czh`>;&t{zPyMFKb}5mNhh&%BY-7*I252=26iwk?Hfr$bV&` z4kQhAPxq*Eq7#amh}!;A%s-q@mBcK*xBAbTPJgFBHS0I!yKSrEyW^BSoo#?N#rD`< z#Ie%;MLVg~k#`Y&FCq1}=CRBQ?4+Kep!r$9<}2cx>XQwt|8?LYJ6i5pep%1U4b@zl zSA9XHPCn+;ZIBnrubAxnCsF!6;PfMgdCkCHe-<{4?lYe0Df9^anqH2r^WMNgc%?Jl zZ1b#Tq#e>hsiE|R?us3BhHA{D4NxQWf*Gvg)+oz+>Qt9d({qg(o>lx0>H2+Zj`vrg zvpfzvumG`t3zcp?kg!wfswhX*$RBbf)4m5v!>KemAr+B^TaUt%%)J5H8}PbBFMcod z!8{`RF`+KlN?a#5xuvDRU37!q0{X>*kEoQ{OihIms2!r;&G3{2KYSn;*p#ebLnfxS zAV((CNtj6W!9eP3d{iaQ@XzL~`>1?hOKoy1e+z1GE>H`bf?9hERq>|32)*L1sne<$ z*ygY7Uu7OQB8}sEL4B2Pw=bD)_p$lo`*lDNGH}A z9?>i6V)VzT72$;(gQ%PQq?OX@DaR~^X)#jhIenGAhdoU_k>1L>=D!%IM%4#3xxvEL zX!O=HS`Z;~LQIG!l zPv&s5I8#h#`m+YNTjo&3zLV*$8>FN16y+xK+ULk8rOeVi>r=G;1|l6TiAT>2bn}-m zHyH1jdA-c9&;hoKs`HEf>w&XO5^88QtwW_nRQNoj=X0htOj;#KH*{vk zUTu2e3b50oAy|4%70WZ`6Z{8OA_9Hu|;|(`%+-M&@+?0P4jv&{Lk%+-0;jiWmir z8b&>%xba*sqVMpH_Vr*seuA%zK2)EhXE%Ns2h8PxtI`u&{_szcEu#8HEOkzE6blPP zG>SeQy)CkA*b4h@+a6oMHs6*_ZEmd`=uDT=dfy{&TW@-=->d68%$oirbG?~pwhRoi z#7n8=Bh)muw$_(ME334NwoSII+BNwCd~}TXbRA34;C=rgwsf>NCoiTz^*{JEC=pPY`V0LCh>p6NvEz&UgU*$R#bw}iCa&5T=b3}hLZT%3_*544RKOS69 zb@l^#{SMKQSBj4t&}JrTXfq&JzR->N1Y05=O_N}0M|a8v>m+c|$~q3L|FAx#FEOk2 z0#;^2VqF=7*#j;8*Uh%(e?||((D&){_2v2keX+hoKcQ!3mq?s>-R#B$QU%N2!gh@~ z=^r-m3}A}oFDR}NId&g;>s4r*RF;2&N2uYv08V0g&8fwxm*fT33c;TKTV@cbmS7{wBXgC7TXhomotq#r;9L_8MBIjoWN^ekQqM4(sBVvpV>7HAF5LlxW)Pq!Prj4#G?o^U8sAZ8~w#8#__-W!0#EQQV= zgKoTlb~}fL&1;!O1^y=>-=4|hCCpf6T%R)jG3FRojN0ZUxM>{yem;L0s)}C)+TlM= zp)RBVdE1WGLQ-jYqOw)(t1Z>qY2&n_wlen8%sqZ!KToGmd0S`O3foiLY}*g5n6^jF zpw41r%xE?Rbdw%h^I5B~t!orMTrDcyHU?h#)6hpc-5kSahD&f!GWr-ze`o3#%46|l zz@kY)lJ5jEk(~d(P*ub?vO6=OvO3s}$*CAF5Il|coJ}WLChAj~QGq{-UK}g)8j3Pq zH)yRX4VC&cm$*9As>luCp*hk{I#GUMIaCY==pr8v9-bqC^3iv^)*Qx;gt_KlW}NxX z*l27ABI(VMOdrWa6=F&tRuOGr_|pemNUUkprXKb`W_QwZdX&1NAwA}KbAXu}h-Nn% zn*EWqtC>-L!#@a#SCnX9FZ}LFd^E!&TnLU;vaJc&4e9739M0^>dC*;7WaJ{MH$DcU zsdX#OJKJGBfDHKQGmk_sQScq7aa4DEF0t4DD~B#cMIz z4>d{6uC>(4YWvh*%3sP5X0iNI?kdBTXyvqgocY90Ph|eIr>Vnp?7N%z4(aTKO5_3 zCwaFur~_|B*ZdGT@CY4si>TYN zS=ZsK-X}`Ykm+t`s9*O}|NWK|oMIQ%aAam$ro&u9W0W!zjcdjY;~>~tXv{Q58bkR1 znDO5DZ4@ylnTO4Ay2Gn6brVQH5^Tt z9o#fy{!MxHi#k!eqm{I6wxzM}wbyr?cDNj$9Ge_%9d^eCdrJFCTPZr|8fo*@@5&#_ zGG?c2k=nECWCC-9IuHY$f`vN-tN9=5g>wX7QrX>>>i$&hK{yI_viLu;*<`TU73!P7 z#*7V2%ZWt>Hp7ZJNUG$uuVm`+x=%gAQsaMg5 zBg~p+fUO%H45zVGZ?5OobLoHRQ*@s`&PXz9anes_a(^Wz2<-MJ`YZA*TY>s^p#KU! z@daM;)jEE<5dD7$yojx6`*_C@IN%+%Vn3;L?nk|U8tAYwm~aI|y+{N66w%o~37@Md z^&mQX3$9wLf`vq>8hb{*$_ zEHc;78Gp+B!sHSw+okHkH^Y(2{rnT5+xgVBFQnS}D0p~9S9Kz?Bn=f@hXUED#yO45 z*NDhWVG2?Sb}lGVJX~2+O2+1#E=&;bBV|O2&ZN6=82!r`sC(Ir&r+Mez`n+p8jXZ0 zLj`d^YO4GDqy6X6e{D>kG2Zxt9RyePqxwO83)W9HJ(Hea|5Kl-f6yx#OAH%SbHt2* zj_#qA3PIOB>8RKfaFSJwfDW~GVRPqSM24H^r#i0GvNsm z;wYA(2l+UZTIIS(?;=zb^s=UrHcHjy6LNAk0+nFtbb{8}CfmE&N3tz$k$tiKxc#2p zMgRB?zAnL_gjcrRwm!CWwrg4?ZMEuChAMXDk$hGjLl4P$sS;3_N?*kn^jmt%80cXt zU4dumkBVnz(Ia+&WH28Xi_whvjV!!M7;#2kBfXKsXlAT15{+`^zsP9?pQt5r`#Q5L zND@^yjRfj3(w@Hdp0BGfrl|(IkA3Nl?*tcseUxkb{vr z1<>TTn3w(-xU2Mq8_-lb?7$k};6A;yHiR`LFd!-GxeX%vP z&$J)2r*h19Bst1Dn>m|1OE}$*v+OZw&nBBc9oZar?Dg$SY;U#Z+AnpzT3LOi^iX!n zS0xv6UC`iJVigy8(h7ldc&nNGAJ}<38LQ%{vCilY>YmyZh`fl`}6C(rzYm?C!6 zY+-&irW%Et@9oBE$~hB4e$-aTUhLS zUl*8B4NTf3nl$#hNbA1Oh$~LH&S~J-sni`Xj?KFqN;RV z>LM3X(yOVo6IvtN30qqGFnf^R$@0!^bk81kZgBqX%;)^*xaYXzSm)^M@F9^d+fvwO zXib@&qN{9&QpzfQ*#JiM?`c)@D$#_0JNg&f6qj%1!ih<7MlQnLs4G60PwG8 zWCZqK^l+mv8|S7Pv)L6m%9wBLXUj}9w)ro!H60cg*>9E`4JWn|4<%;hCMMT{>J$r8 z%KM`O7IDXe%sHzmZDD_7q7=p)m;Ulfws0~)O;(f-@&#mVWj46|knTzA&>+9y)nuG; z3GtVr%w0_u9EtzDiH&eykWLqjGR9?eM3V24Z-}qFFT2n0ec`?3ed0aueeKQdYX%+; zf|v8YPrkDHHr>?ILr;6O6WdoE&j?=rF_z+=DELWV%~W*s*TDLI?Qcxy z$2I!3^I>~zAd;AZ`0rkHT4Ato%9;*)ZG+^I8p*Td8}f7ZquH>tb1J2jib{4x$Lbg* z*9z?!*e{J|{+cG8fG-s5D&npQMDq-+w&nzzSNh}p33z1tk&vhK?s|2-upXn|@Xh!2 z^u_yf_)_^CzDQp>zV74e=Iibo{-ub$M-+g{rDJ2ERD~>UaVUBH%Gmf#26ZSK4#^CVgiGqWs8PS3omy})z;yLOJrqm(XcK&__vd5!|= zPi*RU%4VeqTYU1%t>hna6D6gRQNCeaObqlQam%)3p$g&?uV5qSBB(y6{|TN!b~DCI zZx+RWn*%-jp^IYrHeUf>k~arYiv7MvzRAG9g?FW=lV_ACn|GV{uy>_5);rP@aG!9i zp33}mfv37>k2{|`gS(99lCP%!Z_91#2Fq6eLo|E2z%iwDSjwonk;$De)V9hlZDiQ) zsOV%p`QQydBqRDtVtJ!PS3GeNadD+;g$LmqXb#qprNN_}8 zrzsn)^!IuN^RWL;pe@n8VsxT+#A@!39np{Y@H67Ud&o?DV?OdrOEz|J)Mn$jm@oH| zC|7U!9hPSXYm#NDbqLcWCAR#gz!u9xWKg5C?3lEKct|O{y04N4nZF5%{~rHV#}2%T z99Dx~dL$Ex&22HuViQ!sU%qLsG&4gZ3yBNeAvX8R=kYbsPwIE{F8WZt1S2nj-%nnBC16^3cnT6D(Y9%y2x8$SsdH!H=VsBE=RjsCa*1UIwo%mW^C)rYojIO7tPBag7~)8>!u1&vesI?0{*^?34>s&^@(?jWv0! zmtIeH=r=oYM{@UNp(#JY7Xf?YbiT?9{R^B>)L?neDa@^H! zs#`S0e%D^Y;c!+CZxD4YIw}fzt&Wuj%av@eoiSmh>@$?wQi3!VC|8#K!Fp_6Iulqx zheC#6UvsAM&8!n#WvLW2jIzG>-rC0fz*oGQSAlX&oG5_iz6Cz=U>Am2Mo7Ex!{`BK z6HWhMA@&@(iDvhtdwexfzT9k7m6#1qj@0_i`W^ePG1-domP*9hdRY6giElMD{s%tp z1mL!sn4XEBC0Y6dewD~PZG9%Sw*Kv}uE**{{NJU<_GV!h!$vtrIr=#YvA?;cZIRa1HpzJ^ zvRSe{(c{Atwe|9DIZ7L3pKTwdHBg$#vSLy1$*U~I{d0_=CXq%aqCYc}=}mpPjf?)C z!3BX>)9btIi!*F^?1TNfdC7k>_}W@Qs%6>aFJs2|?*zLs5Acq(oSu>O)>CwjH;`hi z4S>*8DXr2@VQ-Fgr=>2D>6Y>Zsf)ERnZ~o0>()ACG>#KZ5Sv>ftX?MC%nD2kma`ht zLV1T|v-S;6CnAv^Pv{bGziH`$W}eE#{7LwV1M$xP#1_jMyh*ogCp7gnEQO-}H$-Xb z6CNWJH*y*W(?_>R`?~OMq?GY=(`-LrVjI?dlzT3KmO^f=KtVptB z5z`zuG#isrGio{2+{!a~mePrRu1VIafeA(w+eANOiJdSEZ(+|q&t;#@yk$N&#^{%Q zVR~IX==Sde*YgT2x-GNNNp5l1(-srNL^BlD4rc@3!<=?#2G}zES~eFR1V_k$^VV7R1~K zS{DpT}eC+hEH)sh{K+!(gR5$`DH z9OO*2uheeiF@`DkPDzyc66nU62v#MkLWq*?{q`^V#{95R+0e;N@`;8}gV-im#K_k1UP zH+&y_E1^i2r=cf@r>7^acbE5$cc1q!Z)@))Z!&LnPkVQ)d#q=WK0R>CdO|vBRhZ=e zBKXTXO&P6~v6pbx4KEYfA?j6R;fUE`eZ$s=w~o9Xl^8WQ@?7}CuwP+0!{3H&cOJ9% zw0+SAXzA4*(vzT=LNGb_ANG8^z!amP&*fR^`NK2bozZ>FUC$e%Kh#a%1CP(O&z0TN z-B-*w$#&~F_kDzP|5%67^N+SnA-^RmRHs;P$*HyD zwr}=v&W7QcB6~#*i<%g@J>o>fiAXj2Y4m_-Uu4&arQy%RD@1$>pC6Xs=wyGy&evG& zpnTo3p4hXEdaH54*g#=(8rEq(-(2rD&sX;&cXv+(?>cXocf7l>tEg+6`;9k2Z_4)I zhI(23IQgsg#v=9}U-usgHlzo8IQrN@-rGhz_^JF^zAS%|dy_p1Q_m{hm6l3^GD20+GTWGA)XS=>Emjq^m$FaxV@aR4u4bCAN~cyVnbuiE*1rq|6!H31L9wNE zP~cxSjt?Yvdx$(;T{8hoTQk^diS7Euw+H{=tGAZ-v*&g-zQn&TSbs_V+;+Uy$T>942q-wJdms&qEMEClOtIZ27s7TcCMl7%agVlbZ{svS8b@>S%z$b^Wo;rGIBhP4m7>A0sgkT+q|%%R`2f|bs? zzy@Sx8soSxjW@yF-`&P@$y?jk#5=|vOxl{Xz*WKXulIxZk@u~4n6ES*S!<-~J)hP1 z&zwwW>c7lcI^cgFcxf4nmslQ8cAlJ4NK$s+mSn=gQnj${uC0e{jFzB|Q8%kk)fehE zb)Ra}st|LpuVzqMlC7Hp4*p?>c|B`$EP^mH6(^YAn4frQYce|d0$KcZ*_7MTR70L( z9kZFSMX#;PcmQ>LvA#UMqCPuxaLW_z8Q@Oi4!Z8U7P-p19InSnTapeY?M%9o^dRYO z(ut%AN#BzGcAxZCFvbRx*k*%W&MYoJD8C5W1Sy+y+@nOTD%thK-ITw3q%g9CK+1A)#ZhuSiXOj6w zFYAl;hU1TS^WFAl@w9iXP3q>#>{;f`=)3A|>Ye4C@5;4RB!qGU6$kRC|WTmULZ3nQ- zrM6T{D4se}xAN79L zf1AIFUVig854;HGm3k`qw4Jsn$5rR+@ZOP^B0ERsk6asBIyznQv@uJPhbP+@RWRyQ z)QRXv(TAdnM;46O7M?!*va_r`sGOFXN)_eY%3b+^wP-Mdzaji`-`J$5@j1Mbce;0h zx07dr%bR#9F|R9^r?)qqH_4qI4b;!G#G4hb+wVQ&t8aWXr;$^VP2vOoC}i$Msw?U< zTc)m5UyeZ*=7qN-)VFFMZL-!{%c7;%W@+_oM{OHz>22LK6;Ju7_Dox$eOJ4xG3qB~ za}1XMlol`rTWtM~wYDG=m_+T#zf9C<9e9av+RMBhiraV4*ZHbaEi%-%(O1e>!s~UH zb*FNdbJuj+-CbR;puz-T_dRh$VztB?iQ5yyk|L8565|qg{LY(L!!_Ew)L20*ueASf zv$lU>Fj{J$q|nCMaybTuEr~c8xh*nzx zWZ$C7MrMu}9o{@F(f&YPEN7Q1Gi(01`cmF%+3eSemwY!aWA_g7{o|Fr7d&M>CEUN0 z{z&?fG~1outFk#em1jTLPU~*uneQFvOQnw?*3zGR-g07i3bFYn0lG8sV_s7S(vrMl zQ%P0Et3@>v3F%ZTsRz`G+7iuS8(_<1Uv3}4Wc!0!O6{}SPy4O;$Q`s(3n_WziZ?U=3II35LCadaIberDZH^w^!%`(>+-dxWUEEh6ZN&R)2aj7aGS4>?vmNQgwA7Ur z$nzAhVp9()HI+o!C!bPGwXW?SduGRPdzk&Dc1ta)PE+e?-?b_>U8_kvr-U*}&Ln3; zw~r=n)kkV;HG&(MgLa#lH|?;Jlt3weelx;ItxqRPmnh+%?aJXQ>?-1F=b8n_mvwDSDw7nSv^lA*Yq>kxn~&O!_kjvjg*FQ& zQ1x@k(n0BF-x?MlF%ZkLR(QSeb`eU{8R+qQ@;Axjlh;nxBl=v_$f)vBFYxn|!uN%Z zaCUTLvX9gflv46cX(D;p3({Ka+F&ADJcVzdceA&audx1^Xh?s(kZ+ZzihHBW>2Bs( z>mBWTPpor>Z=`RVPxmz@x}2X_c(hsAY-)}(Uzv@Gzz)O5PsZGe^?30om{{;1z6?7; zr8;t9<)<=C-K$wSf`{~bY45K z=qnK!c}tANW_?7~$VXPQdtf6m(2QuZ+G4*q?UYPdsocnTv0F4Dqx*Ms_pGUoF67IZHB5 z-io;673&!(tFlS`sD;}v*uUEk+LzcT**DoEspE)p)^j#;mT{hNv~dLOx9y2`FTXi& zo2SL9*W`3;ww@(N5!+lN@1d5B+U?+8GP7l<;>%0z>r&!8N?<8ksj{pos}#GcsuQvITgnm4GfSXKZH|;d z8fQIE4QB$q4b6x{B{Avt5k7knvoDrX6FsjUsekp2^_kxO-mjh&o_U^|K(n`ZKQYNN zzQMkqzW!u|Dq=MkB`Y_BjG~o1)j%>M37oozf3E)pr=ElFWub#&8uh|M=?~mU?L%QI zhe}g}bXo4N6jHCMKcJD->N54QT1e}!4b!S>n)X4xiM{e1i>0>ug!ya^Ws6)-PA+?; zXt|+0L@p;EC8{wDdU*@=c`Rv|<51d~+4_WPyE){mazso-l1d`r!iso0s7|x z^(x7kr#u~c-@v4%L(FC9M;`RMr809?4Kktk@I%v+U+YT_t|C#Xr{q1>QjaToTOU%# zyqWBHak~6hlimK}-$9;i1%Bfdyy%mBHIrO(Z{qSZ$xrSl|GI@rzHek-Gc#MQ0~7PE zGTpK_v*o)%Id|AO^N1MsKSZ!}qR1tUoBANVvfh%as6j@4b1{+5{pLBdFm)ZL1Gn%F zi{c;eBWpeg8@jNRQVz1_|6p1cg&BSM#ldKzC%+p%zR}$MS~y3 z56eN+WhxblgRmbL60_Rmi|{SQrYwW)mew2R9Yma?moFFc#YG+3SftB!bF9B(pb?YU zzgqf1Eor1z(iM5Ha#%fvHfdw4Y%5`_VmoQ;ZO`FIaO8HTL$;K4+_W#VuOiFV%F)MR z+T-nGY>%`*iM~{%Rwe~|h+bpiKbH*Xt0^(Lu}nRhZDBhOlSqpbKOPNb5AyrX&gKCl ztFcQTtdG(!>FL-6-}V1kIt#ccwzh%K%zdc@ zR!r;;#Kdk86*gzS|1*B>4_tO<&YbgfoMZw?=W#kS6VLBP{$Z}`0nyK< z_)D9}Hivj>k!|rI>#~v^`of4qy)yo0Z@B!#Qjd{MJF6A5Ot-wZ)L}2pE7o^p75r?4 zZB&dx51Wzkg`lM*YpV6L^^$cBxu-)!!6$0>*y(6H6<$HgVQRJKvm5&gBCzQ_FWqa9 zcn4frU3;B9iENK>u5(^>ej$5y%-P)ehU{izM`=f>~r<8A}RvjZ=??RHU{lFtva{zdL%o~?tIMhKMg!2`hN8t<7@Hl=rhzi+`FGw7B7FVE4Jab zb5@t7qGcr6%+*wFJ)|nWnb8XhKR`|?#dU`aWj~kh{KL5sy}!~vhxo>MyXJ`CFN0j+ zM|)Yv21kD9C1+XJG}kEdX}iqXcBEFfJ<|6d_}hXH``MU{Je!6Ey~oU9X4BS_M_x&U zMj>nYpJfH-bSGHSlihw~ZDkv0>%b1C@2D6Fv<7UD9yIMb`|7lY_{$@(7PgalF64-{Z?!KY3NZ-Jr7F3IMfMVoP^aPS@6Mw4q)tDx zkxKTzqVZ5G;r*B2CjZ5L^L?g!SMq7*Hzr_N;MBnX@IWdBJPl|TXkf>5@_*vD6X<{P z>EzuOJFC2RX&*nI5ni_~RLN0GzmA=I7SWee7_C|Z%Qc^8H&SIhbqWctG`O)1^&tn` z2a%&+-K(MdhEUUM=Vi`lNcC)?Mn|V6ZWI!+nWq&My>bG{N4+8S2E)i7JSIxsi0X0K z4<#L1RUz+vUyIOEIAKsu8()dPzCj(2v@eRTwW@pRaw!W8w{q= z0m0Wp+#yFo0zy}Xj>6j>p00G@1pjM()BLLXPVlN}i6nM?gveH??Sz`q)5%dYZA#i# zd!nO+vybzoQ)j-Rj#FtdX?g7t&IH#?cNVu)f|T=2HoT|>TW93aN3&z%bN!ZaR9Q#` z_({zm@6c2a@YM6{(`yiSY)M?KA60~T$*T*5UW`L0-IbA0`KWmsreL=OE`zs*Qe;Gd>0<7sC>| ze}XdK@QGkpJ~5uf9gnZHlt$Xmh%-6xt`Pm7>;_*iBygab#@_k?1yfcg6*`3 zT=ZY|A!!xTV(fh#FPvjMU6p^eMz(rhC2b>#(T3Za`|e0rFT7*K(eSXa$-#NkJxNz4 z>B%#cs99Tc#dFA<=4+S%cZ^iIrC?5YHHdj z$028k>o&80;)rnsGoR=9wadu1<;R}MZn%}{SX`4eP6*@dny^s9ujzx*Vk zFvG~$+z9P#1J6pFI*vGOzQ!)Bg<|~0z*s4UF6ncB4Q>+|$ z_de_caDi?;4a|Bg>&X_nsU-GsM`0EGaNi_f9*b;Ug!bw~XNTRav8YPKHnVG;bDHyt zvp4>7XMB?_u8ppjt`=yrw?wU+?&h4=^?(YCL{6JgsX{8GFQ68_Di+g6;;TCuYl`Ee zy|{g@{f8sUmBTYh8Ed&s?CiL$vL#Hdt!4Fo9+)Y7az;zUiqJhlSp&QS(gL5S8x~wW z_-eYPLCXW4``+>jw64->k-;c$ly&!Z_}eQu7Py|f@45Qg)BViwy>D`!lutirrgd_3 za_)8>rJ}92qo8A;^NTyt=mJG&wrrw4rK)v}t+!WBuT9okmh(hpT3Y*YMnQkuaBD6L zn%>Au3@NLzmfTV-XEg*GpE+qP7^{dAys7IN3p|grd-GH*>&IG@Wj%3Hji|w3b)ELW z(wN=UmrTPh(>^b!HQkvWAPguD|p{Mm2R2wm^1r3k}t&RFdVS=dA{D?wM5igwO?5 zfiA_D)C3P>k9^%d)N@|v>{YU|{n)1|3K`goy);{JzO48$R<$F&Z+o#DhfsUm!gxj& z%t{<#4pk=Sz||nv4(CY6W&4S=$@q9@Q@f_#`&BtLDD7|i9M?oWz1r52&vI4Wjg{Zc zQr0&%cuROl26t$OptXJ#ecSt02q+%diL7kAe`UXT?<3ZpU}`Y7={Zj&cL`^*y%zkO z8!Kdxqe$wJA2WXR{x!rt)s^CzM^|cbvZJ}^i<+by(W=|pc-QyI=u_M~hxbzN?mmxv z2K#)VqJIss!aG_R(SDKsoG)gK@zf-u^3nCyotbz@H@%OimivOMsJp*smoBw?$EcDW zz~VBDh_?Dr-fTVKg^(tYmwW}UBWfkdEU{Ds;^Iu zP0m`bj#S`TTp7XE4|f5whE=HI`kOrgqSY;0Kg(09ZN8ZGHKQ%5mJe2|*E+8SWTmHC z3qx^X)IT+{HYTIzZ#kjP<;3J`WN>fm0qDm1dNa0cW!RB$lirzAUbeb-p=CEwU7F3k0f||e?xMSnxef<9E5e?V zIw`eoS`m9Xdm;N8`zw2-W3}^`$43pdzNSWHmL*XguQlA zMgR2ye*`WH3=Ev)ALx76Hc5ML9ObNuXd*40$c$&PkFeiyd~m*Tj&sycd;2R}>f^Kn zPP?aza!mbBJ#a>~9lGM6YPW3mTH{m1x2;dS*KAuP=Ro}Jwbbh)v7ZH&o$Ls^Ukz1< z(BYcicR?q~3D?yG388S0Dxr#)&7Ma_%MQ$huS=Xb{_GF|I}GTp(F5o6D$iXt?74LZFzx>vC_JT{i>&1Kl4c;%Q@`uqSl9G znO<|AT|=VRcR1y<6BVbi)brfI2kV9nbCAe>I$~ZKj9v7D48k8ih9q7`@5?OeAh)?2 zyPr}WRo-=x{K(&qFh_l&J|7%*yqQw;(AgcS9?HI){b_H1nD2?eE*z|9(K_ zz}EpD|K`4BZBLZP)bqOa6WDk8^-a!E_DlA^T?IKs>x&R3@&I)?c_>0rICJsxJ`B{%6sVdjpEdOHsNHzQOX>ke8MQKCTbmtEbg;j zB}UkXZk7AFp=L6K+V6(M4Q64JS*fZ&Pd}sd3Jj%lt_TwDE`4k-sQqq^7g-MvIL>|FmBH23 zImofi{)$MN9sL#R_EO5*7IS=!iaJWUhugw~A7>aAaX9p?|52|owtu~T`rPvO z4qBQnG`M}x9N)>7MnLVVa>%m7I#b)EuSZ^WbQ_+6dZ@dRqhp$qmc#CFd~sd&tTL?R zP2!Bfo>#7|?&eBWTTb76e$#vo+ZtQyX~VQicyXb&MB5MewTf2Fc-k8Zr@e zJBlQfbF6pL(|bm7s3}?l%U*4aS{qL#8-2F>)lo#`zFG=Fm46VC%R|rN80`RA;Bi_N zt&`T5ZqadSL#3th9~}g~^f6aqzpv&cRuhmQeOg(hDxlbsQbIif=s=uJk*L4l~2&Z zlwzK)Q1w&yM*S4Jv#`-ZU+CHBrY6`~1i93Qj?tg)w)8vMJ+0ZNy@jVXQKnh$s`@IW zFT4J~!Ix`*{kfRx;25=ymT0MB`-qp^+4FJ` zr*2Ts?pFSy!)BV+K}*0gh`C&1O$2`mUr)t1$L@W6oXiCW7_TQ<~Fxy~*4z;O}8n zG{vLAJ5ifagdWyB=%V%1yhRaFenrG#4y(yZx6)QB+A|W18BJt*F44ze>i^%kmwL(> zYt;X&-@NX7b+V=@E`6G^)8-X0Ke$RryP))bJH2MuvU=_DPWCNC^uA!I zyj~e#*+O=iJwrWvU4>k4>Ejqcb#60q}w0MZwoVLbm3zbOtqNP$+TL~}ivP{!_)oDfvy_;vbXRq#0EbtL> zv@IQo4$m|ytIN?97_a{{Rv;%WoZ?cK>WbRz65Eby{p(5^QT}&Y4XU-4u@gaIdc9oA ze`hCtIka-$Z;e zgebZ~-+z0oxHzgLT2N8PDGuQIJ~ilv=?ut2{oH7FAXu;D;grRebf9ln->?ftq>-f8 zp`vaTdmJyPW2d^YO8-W^*9z)thM)!Q)VRB-@JYq08s*;Zw!3@Mb^Hgl;*06Z+(eIh zM`FjFb(Os(uF^?+8_V)O7;miK)Hl(6cZQDtzGP8O(E~aSS-%PG^-!V7J4{xsUE*Evr)i#AqytylCsb|pCd zT_I#!{q#6A?kwUR4e{MZVd1WK?|_pR(dBYq&jKWB<2|%OD*nmyz33r)Nnc0`9g}mg zZT?h#!k>AyZkk1_Pu<-@_L{h2RK_~}muP$*Y`M+sn$nc>=%^w$R&3KssU9aQ|jbYuJc|Fq==tV&|2GA~LJ@ ziRFB!8!{Kq2~*Q^VuL|l@NVRW?D{?x&$u-;R6FTEOJEm?hwPn_h24pU1Bn`JU+&CP8c`_cCI3Og2^6aF|H6D z4@MV+VXOB>zx6;awZcx{?QfE)%u3rO+Usz{W|QcnZm6qo;8W zUAG;GOC3{c5}&?I*Q|q@&Jk4KUL$60L&M#nD|5F>oJ=^QGwJknqQA@b3sCOXM@#TE>*dA)9>68sucWck6 zKuc#ihtC&c+^1f22IuOg14{KCusK>p&Zvy*Et@zk}hi)1pkkoRjteykZ8vOqc( zhI8s}U84Xssw?QdoPyQ8op|0E!-t-gADr?XtYuaIhiw%?hf`rS7>~FDR+T}|Q!65U z>y!whc^lMqSj*k07VNI>C(twxpu$hc zmVfbecY*WC^rx!y=)UsYf#=K9RneKu;8@O5`5!g-pU^Bf+1+6PQGJ8cYj@LYQH(CC zPn@K0m5blxu~%72xgUBuIT*>yrrAE{3j#(u7BPLyE?l&rEF zi$Z6m>|b;W{u->m0)M%&l*^(ME1@z}&It}IYgN4KdMj&Ccfs<-rB`^Dr zEYr7Q!@lAinX6QVl+(|%14bTnaJ z(TeO(J#{HGe*$0Y9Fp6cDAW$D^j7SP(3l?J8{}AX(ye$N36aDuGDX+}usP=dT!9nk zb52!O`0G#dfg1UY?Cc7!K{54_C~FH1xv^$5vBSlF))r1B%r59{9HSQ})^Uf} z@&C|0fxs|;-E^LiUyL+%lkI9xU0_{SQyL3>0$41GjBMi>wZZC$Trl5c0*l zsQZ5B3BV>eOWpMWVhly-Y#U66>O;;UNhUHh8OgI8=@|u0|1frumv0A_U7Rfv1Wr4U z30tS8Q7K-59HsRCZzGTPUcFA-G!M0+z3`lGvKzx%c%%;WaSe(00XU8$^ORYg#{M*C zsRYkLl&1uImO#DjRQ5J%guEJxrdfz((w_4WauelPZ|-_D1eqs$r=%i{5i(jngE z!J-q5c+w}y?1mC2 zN=LM7DeI7ZQ5VuVimEC+CP}gZB!5UtZxNizO z-pRhk6Um=8fl{mDcZA~+JRrKGq06HAtQ0i77-+Y{${5bO-ROLky1njneGfth%C2?w z=|vBMCVvoHuMbW#64%|1#(BZWe!Lb_M{tNPzh>%HqBrSzRl(m}f-IZCX$}@L9s96a zHUMt}dAShnUxX8BW&nZ0@X%?_6yC@Ey{M#l&#os;*~#WVxZ_W-J=dfQD?JzC#z7=P zyfK8k8Z-VSuKZ#wRW7kJ!UA^BZO5Dv$V%rY&+r2eye1s5i2Xpeady`z>Wo{WDR04R z_sG;7VGp2*WR@kSVqrhFPh?fQLM7jUSWBK50sL!n&v4}$lJzv}JIL;y5|u0fzG|?$ zU`I4TM=1LS@kd`e!fSKlZWwur`)K!haO_xY#V(AP1r4#ADwajOR^n-|Cj(0Qj$N$Q z(yuM2BT)~{r}?NQ$Oz`jL$R{oE>`?MB5LQb(RD0yhkg;80DLR z4@hQ}x%XDy;5(gHu0RicS>r?KZk9Whk;KJGSuU+XYH8;>64fU4zdpGsVc#YST%qk~h>8&|YO`+~*I-clDrLfVKlZN9w z-HkLX+z3`P+-O49cr#TcZeuXgx;LNr8@JI3HI?^h@&1a6&iZazem#_4?04m@ref1Z z8{_o^JyEHs)m8IArBmpPd4gY?4E;>OMu;TK*#+zSEuDscped;uR2Jd?)CStmm8L*7 zK>5Hqkn_-_OW1SeCf2PJ+MfVzdc!Fe{NwRR@ab4P>x}nwan><>}GcZ85~VF)mV0e_*Y~NG0%6pO&z3NMB2QT zo#g01`wLyT5~z*CYwn_FR)(P8`eRvsHU7k>=9EIl@I#iQr#E=MaY!!>pFGnC8V8h$ z&}@CMh*ydHzQOM7m{Fdd?{9PPOCG%Z(3F0lid} z{exPtL;gNw$Vf2%AFCSz=j~+1+xR{|S*0Ox=5naC9M*PKvOGK39q=L0ughL>Zlpyl z*`O2Xg+TOd7qsAMLjmHe=(IC*oxD*ZT(Az?^EtZ+w3C;brxMYhI(( z8Y&I6hiV0-DLaH(IcMp;Qb?<&uEE=RN~P2d{fu&)9M?3?Sh}JgH$2KAwXk{%8{>z* zfV~vSRTEJer^rcU-|=(yE4z>n^U%;a@Ttba*&X4k+vp)1P&|+AJqC=NM>9?}2BT|Y z=%g9xX{6`FrthX~r~m#M7FLYWiroknD;J0>ef7-M^D0N_>avn8S#HSwnb)Dan`n!p z%>Pd`Oi!f=cwCA^UaFTdb`vMOh`fJAMN}4~Zx-?33l)?i&#)4cHD_z2eK0A)&AHvw+tORsDPj83#=Pqy=+%n8$tMo+lbdi5w(^bpEV;J zyTr^x%T)nKlNHWuA+E6=+h!V zYh|2-KQ_=MxfD6IpS#B+Rf=H|*1!hp!;U|f^ruD-Wb9uGCzT>M$&l*4Wb-UgRU8&$ z9V#0J1Gl-*b0i)>6};WC#yqrPCb;mcc0o1pW-c1vz+Eh{;vs6FQjsgZB3LIvs3GskPEYs-*akLzF|Gg!qTkF)4Z|5lCenILq~&*zH}IE zM1E`Zb?%0a^3r*Jm$mv6qwGdK_(dbuvj=%6yW=mXR>GprfOBhtm1Fckzd|!MQ!f+E zD$Mz2k2%Mz4D|jDUuY;6*EDcG98OQcx5`Z3>xVvt&!!O>zYm_uq9tab2Q#y~td$O( zx8!*zA~E_Zvb*d^D545dJdF6tdZ0ZF%XOqFi7aU1sz~XTjC2zGj>is+2gmi`rm@Cm zq}fd3sqcX7LH#fGx}OE-)ga`T z0zVwYa@s}wBaJ;@hQY^2vBw{vLr$^!N5FC&^d=`xy)qhLRYsxzu0Z3J(bYwfq@CgN z)r>X)eLn)+_=5SZP-Ri%Z%+J!$yC0?>xZGk6?ijam1w%JqKPWyMRqoaM~{Q$7GzQ* zpomh$5uYGwPw6Y@gC3%u!7_|dda`@sO62`D^@`$ewAb6RCfN&k1=v21H9s0ktWOl^ zE^@mwPpyFcJ%{X6FXT?Jnp+8lW3r=Z4jZGWg5AdMzt@aP=;7V0?ixGicEd`w1B2>N zb}Dy8VhPG_d0W|$^f0ogAbWIdLOYHklhuSu*IufM>_{Z@n~I&Yi%8r8tjS4a#E$a! zNGh`)V3|LMiZ- z6;P8?W__6XNls}FA-7Ur`A%%3Gpmomj_}7*DX$bWYEu`GsAr%iY!UMOe~Lk5izo+{ z%2Oi*=<inQ7kQLr! z1dijcK2|o8x4wez<=h&gl0%iE$n()aItp6$r3!8_`qG2^oq_($X*~BdfX6Ok2M-rJ9$cjJZfbI{$lkc$&OE9nf zXy);7XJsO*7l;=(fLfx#?JcB9XDWM7@Z=bLhO34*vOXVN?qZMeT~Nt=?k$bRO2!7h z0Bl=>%TRMi>$b?=-pIGSbn+)F!>I4?2^3TG4aPY5Kqsy~1FNAQGu;PY*ChH>AC2$~ zZY=@T9b!MW75LtqOv#C}Uyxi!sq|J^*)*)Td`P-PXren*S`W);fzg!QTzhyh9xnZd zmCS>ZWUs#LXuBE6U?)EKbF_b5IO#I|1`EjXJm%Rw@L>m`)n9|b&EV+>lJy2wnv4AF zGc51U$^)WLr_hpl!S$b5g_<#u{QYcf#3S?)B*5wZtXBgMRq-y*;(^L3fO)X=w=zOA zYMu%aHF<&u+y%cO1Ue`~JkF+&v(;kk!kXUj^7vWe4!?04LP~7yHd z(ycS=U4%s)hlELlrhYN=z37BX_?5+oS<6~JfR|wG4{vbX6??7@nkE)Hd5`Dei!9bz zXHz7Yobz^*S)3;}(SX>=Q2rl+7vY1IxD%Qmfwl69_`*UoQCsLs_ElL;g?TAr9{&;* zcn-&=7=56=KiOAk2`iA3w|g=7XIRCuhx$^a*?261%B*n)dLYK!#d-?yk^S&%eK6Ju zEl?Gl>cFZaKKes6*m>Yl1l`gKd%iFC&t;sha7}L{&T^<+C?*AMz8o!74!nn9kynDU z{{!z+Sl2)Bx;LL>Bku45X)RIBc=%e*)s>U1JKz7m^sEsB=`zLk( zjrcrO-APP$l$w_aX9|&$h4``)cy%PIlgzy5Kav8EQIKURRZb!fAT zSeh%L`JH@Hi^yn3t`3;;I$@V6QwVG__nCDG;$f$wy%7{wJ;u(^t( zRSw`$%9&GDkR5x0>pEaG70iA|Q=Nm0KQnO1ojZ zG=iV{KJ_472KUk6Dv{3Lr>TxZUyeuTE`_eza7Q($Cl9)_C_8fbV6Ei9cg;yY zHa~i+1&|!f&noCRgBjb|#@Zx8t1B(k#p(rYi-}}=*5hM+q@p+mn(3*PBr}u1Cx`j{ ziN3X&MCv6gauSZ|iD$4959b##uqDKD2SeYp@!I5c>#SHr-{}Za(a8UUMi=1k42H(# zM40PHp5ms3{1P1!4#l-br$3{%d@Q4`G$FeTDEH?j9`8ixWfr_YoF@(<3e*89Jqnr9 z1DF>BuGhe!1L-Fx+ni#3bFtqRLc6V?l4E#DQ<0N(=!uD84x@-Y_5j{}vAdch3%WvG zn}~`Y1|K(&nj4@Ui71Am^@^h}(!gCUW^@VfNg*OSkWcd>O}gOYZABVYLR#HH@*PJr z-a!IAH@UwZP?!iu+~N~C<8}l%zKOqakH6%ksO8YXaXQrGsqt{lzr;k2Kp$&?QGUF3 zFXBL{WwusW zOH$L}`OK#NCYLr*{f6B&mWtI!^b!TCZ(09GG>#k3JQq5pIXk7zMuYwV73{^ryU2>h zvuZh$Y$-e@=MQ}VHZOtnM6UW1y;Xr#w1az7$w&IKp7Lau#{>U0;HW!mTn)6c5rZg5 z-se5gSx7|jIM9m)(h@0p$pn0dj=!T$HS*< zp`?-g-H;bm_55Jv@&q~mLr$=$g{}`lQdEb3D`OAkL~Eo+Bjm%L^FyA=NnKMqX`a*M~p_kiWU8GGK za}H((<&Zf!c(#J2>dnlmF`qtY#v9n5XRsSyp$R_`54*=Zm0WrrJhdozu_#%xnq*7c zl54L{6sJD>CFJ3AAL{7dP|vUuDK&)EM6$9BNM<=jTu$dZir07zJ+liL*c)mc4dhp$ zZzKcM6YH%5c#@hoIlX)W+*1tf^x(?Bz+MUDikIO6Ygu`P@Dog=T&go-sm}TVEY;U=Uu~pfEYL{> z)(zmWD6)}7$#(9gR=yeg+DxY3q>!b9WeeS#r!4)+RbQt^^C>&J<+OxQQC6D1?Tgfq zbVIwvsdP_bIZI3^7T(&!y5-ED^GK_f#8;{yZ4aCDv>zTC2eqz12hT@_$w`WGiqmxT z;5g{C783P?eg~~r5=~qONm9mW3tmUT*DJ7&CwK2L~giYw`CY?(Gge`~$b_2M1f3iJboN2CvGFeIe((r-Len!moSKjz`gvOL(n- zf;VFkJ*Qh>8tamCFh^kL$w|=-xV{kDPtHY=Q(!YAmos507KS&!Akp5Knn=zxmeb*) zp=kpiN?_(kv6bY++TF}#9=dlRGaZTaEQic#%Dk)MBjka~TQKVx@Zl6@yOF!z!C`XR zV-lmtxs$)}>ifcFtN0m-EioP4J`LWRiG6pT*BSmE$*kw%i!33ERtei68$WUu-5sRH zCFI5xFtUM{oHo1)xps)>90tbIdFD($Kgdk)@N*vi`G!O}2$jmIdp+R87-}oRu@gRE zv78~I6Nfx}%j*=>)}5-4p=3JxArFVJ_f`Qt8rak~V?;8QUWbPihJ5>Sm+A_6Rmrb*+?EgqnrUSXueCWV+CLKy$^^ z%5)q0lQa87OnEetMe3d8)SoG6udH}6a(eO&C{oV58h|XU3Vcff@rKw^9q`J^!cSS@ z!~U#JEXT#XmO)K3pxpz|*JAi|7PKR0yxxUhgaY4khKmpKN=~qO$)|G4%sXBmku=4y zu1le5vmj@RVQFQ+(H}di4EHrhYfr!nSOe|Gq32_f!Q)Lk zejaqY1H0-Pmh0b0ywDBbS528-t6iNbVlU!e01xOW{p9V{->kxyMf&Hsy9Ou>knr#Bvm!aR#nl3dG-F zU)_V7Zh?;oyzFP>PK)uXgjY0_y-HOq^9$NGdTe`OVg1D((5u+bdXn}(syfSSs&*AC z>pXRDTgjXGQ$2ME-e1r8g!6&=A4sx0Jaet-k&FWx&6#ZpY=+`UXz~JR)?Uz0T`a-s zaK!(htGmf^}tUTeit+CnnqxF0N9v{ZzdEXCxM3} zGraI56X3)#jMxP#T!2;eH?WF?Zu>!*^U!VoLvje^Mq$CUhGREFN3t(|Gv@FQ7Qr!S zRCdHW$v4Ze(!@S;z-tyXs#s3CAMQ(J0cd9YZdeB-O zxX1xzCjomoEAR($T=LHUAYCV8(S4&IMI#cOVEXz7t0)0&7G@=yBIhB?*_;vu_{ekL z62TMe|2wluW+oCy65Pk=ucrO2Zwcpr#hAr4MrJF!W<%zLCgqIXCKm$hf-5m)vlDd!E)3{;dV3@`0&$ zKv}S*V&kMI;$jdv&c>b9u;P0nE1Kdh6@rU=7^^yxr7>6)>-!rvU{5UnAy}VX8MPZ! zASb}q;iov<@ELyl2=3*KE~%dB44oIl{>sDes^F(77^(`LRz)J@06X>IloEJJInWeV ztd?x7sQ_zg43+lAPuT#@L$Qd!=dN-@Ygh`Mm)GK@b@6zOGIKh95;|vwl^^{97(Y&yj0w1NO5mnucWFEj8N)b7F00)Z&@zUh#BS`2WV4xk4tPTb$W9_bjPbMJ+ zX7PKhNp+HOUI&~P!&NWfKo{|g0ANuTZBde!Si-eT3D6v>YKUf$Q}J5xJ0tKqX4*^t zfs?aHl>JDtM{wwWjI{}vU51Aa^M5Q*y~^k3Og$KjWww!PFQF3?fr^|yn`J5}x zF~$=xy$-IPjbxKEWaonajZnc{W2}yYZACBEQz++g_m}HVKY~ ze06~4dzjo*2y0E^;4#2En(y<%KR$3z3ern{?FwX|hbqgnYf@*`)+cN_)=Ct2YV;=K~LKaS-p zC)QjfXL$*@tVGHW1mFJX8mTgndNT1*zqT=>cf%km}_lTHws8BB-$h=`!xaES>cC9z@<7E zkAzl}z_n292OxA0{+6>rx0w1#vcP+gBWYOQf`#r#(-H9Kd|*D2cSEtiS7Vb;f+h#@ zoEhM!10zdzR!*EP0^L=C%8K(zQ)ssm9M_QViy_JKLVFoapEn8lEtNwaVn;cUcDc-Y zOfP2U1B_kNDwJccHaztQ!0r@~odb1kL6!=2J%UTFBO_egof}TBi8N}5jj@m!4Fr>& zncHk^!L@M36lOXcKVb;bsY=W$2ehFvvO%=og`|y!4iqY<fZyiG&0p$u=f0kO(!5fV|o5>e@o8K`~d}gXXZ(~l zdr?+Zh_UN1!>-7U;?P7_IDWjz=Msyq4n}Q^EFNDVI@`lI&lye5VHPYOff9cE++)$* zUD4Ttp_wUYZaG191r#K;BFCW-q1sURC?i&m4`ccPgGe~Q4lkYN>f6{w$;|aVKTb5e zhg!-U$dF=Sw=jI43o5C?yAbA)o}V({el(mfar0*U$Vsvt(R&S8UlXkSuE+rCx+u#C zC0NCKV0oT-zX1+6dB4{5jK!~b%rg_2#a(8+4!T$g9|2=It?rI# z9f`(~6ME(B<}=9hE4+UIY~`d{KWbY}zJFUq;~v5bAm96g1R#3xbkKOeE0 zH{5SG&tVS*#?6>{2XHC=OHME=@#3N;73YLva>506qAdmZrYb+hfO9d^@~FohmHFHY zYxNbgNB|;olD(X_9?casq3p{1F2$!-B<(RY%@Q=_5%4<{SvMECH3(@p3@NgR-~X6& zu@wkkLe3m!Mc?5qIT88`bAJNY?L^aVgxZg=I`LIEA|0=o(!4A)&A}@>no#2JDR5*- zo-G-z^n6+wsu*C#>SjY3lhMKRiP}xVI(>$vdLKDBk=V^RQ-=Rd#<@*F#pEg zqNQ&Gg#aw%$KY2^y~)i8rP0I987qa^O@hltVEN60V+^zwAL zP&Ce3utCiVV>;lAQ~Z{5g5?zXcdYdi8v6>*FrcQKz$QJCQ7BjX7QR8Xa-MTVe&oFH zRFh&$FkgvBlmiA)&~hYnBeAkBL_nIcuC`#Q9Ix7@yr~Mb^CB@TK{s;ByHv`@nD>j` zkh9J&LmgYtn=_FntC85#u)&UC+gyToud;rD=ozTT3)~ln3ms_hcr1+z@bd$(lgujO zpszz*|GP@uf)ugw3M41-6Z!uU=^sKIXq%~FB@ZQiV7s6^u?22&g+!KKLYWF!4L5C* zFesb~cVHh3GzuX98zMV}k21seF1`_rOE%{QbI1>ER)IpJ$lkn#Bj5A;qd6NpToeqw zWjATA99S>Kyxb;b1u#c9-or;KqFSL-{(+YA z2R1tI*!~}i@fiBR=MwB|NbKW;(S%z>)6LOb8h{aXYtnV_^h+))+jkb@O}gonO^i*L+C;=m1< zMNU3{2;E!+du|}}9-JrfzgQ@~K)y8)j{(cM7$*mF%gIRHSIOaOP**0PWaa8KxH>O% zI>d}q59D2LM$mw6ebz5$B#L}UWXwF;JuMa6M>KqUI9yC~V5qZzj#WLPj`%=Uv8m!EEw#XKgSNZAwI)jA4GR zFAiMOGhYP>mkjJe`MUrdk{#}|aAgkek}8Dq%vvZT4P4Ta!Z(?@)?n7j%Jo!3Y z{+`z_W+XYH2h3aYWBGwxZf56?H7fq`CzD$O&;*kIDgnHEnchWV#tZ-^HhAg_vk|E+ z|1_Rf2o4DW^EuH9bxnE+=I0-Xoj1$AGBo2|vQyo+*RDcQ^g2Uf*ot3MLB3J73 zQ`F4ozUQjk%)kTZ#{sd&Cgn*KU55h2lDhx}I^imbu7`t}EMTcF{_y{>(Aq&0RhUa- z-buWhx=ehkCD=V*Ud%-4a8#n8*Md+XhmQC9|Lb(Vm1XC zzXZ6=&U%V7%gU^(7!VQ45#5`DSDppTM1YN^d|MZ~$ptJjLL={)|4W`H9`-@-`2acf znNJ>JO(a9zr@_%Z^QzNeMxse);fQ$nPq_FETzwiYNJN6z;ftTl`yQV?XT+mG;u?QD z!P^sf_W*aEgkIv|?s#TxVK&v!hrRge#JsA3iN?I^j+}0WJ^2T^usl*_7`DJTUR}_h z^}s?U)+u>_ToZRu(L0hRQC1kvORHES$x5FU;o`+@G5#c>zTq=)}YNUP4C& z(Z6Encjt-4kth$4782`}`i_0bowLmODZi!aNurKI-#-~SmMfnlD@8us14?gM^>@BY z;_pO8{=#=RxJsavZ@LJOVr#H8_Q79=8222X9|!gyxbuc7-2|@1;AhEW zyhI!MGP@c;p*`~z52_6m-yIm_BQk!MxgCUGcQD?6rY$MwbA&vgR5^TX=`IyNkaQYa?C9&>v z@at3Fc`nd7jEv=nta}NJUxSb1tm`rN11!ECac+$yWr;AhD{Wak;ppo4aBOeOS2 zc1E%?+k&htgujGK3h{qI{ule9t(jqI2JaUH0tFebEF(lSvN!WB#^+U-a|T8z$ve>= zViguKM~T2fl6tyWG}RZdQGi!wW+l~s8k$w?yH9ZcPv-js9EeYO3wRy_Q{uNPTpPl5 zqI2HBVb@uoFVe}MpJZ^K3U;1?aiPydM)&}9^RT9x&__KijVj0eik@O;Nghc&T8Y6(zPk|jYxKQHenGzuI6kj6fpwH08KNu+ssJ-}%a7;8&z?Zmx7P zN704_d7Aw7iy25pDKm5QK$W+F=s~QMGeApx+V|jlH-9IZGRp}szTlb<$SJYSen)d& z!gFGUSkS}ofW>k2%uaaWEGxK#jeZIo9pbJRP?Th{Zt>J0ASEj;#N2{ek?c}nU~Pz< zT%Ea=1oQRb#F{1@Wiolf%2nB!sd&!<%eO$|1?!P0NIq~WaD0gzIAhAV6+n6l{Pz@I zPho~{!2VIzasw{B&YVu0lH&uTyaj{57)kQ6Vi(6V`ZvbD$zShScSYtC1Glt*S}VbU zcE0__cVa0@HOhNa8(N`;to-Iw}NAB}|@tN98% zZy>=|F{dPCN;KT@3ORlo*`Py6eoMp$q(-U1(kdP+Y&{zvbHBcTrAix_!1de zyF`w}MhpTU!u3CqDiV3j0}jiAH4oRl0yCSLr$omjcPg3Gqu8T+S@AaTmH@@rxk~JC zvGeIhfWIUTnuZKuPgv6*EN5z{T#Q^E$k&AC^DvfFWPW5$;zb0p&TPzB;#6U*Ac}V( zz&nL8GV*x_a8VJO%E@PmVAaO_rB0?llAtO4kehY4B0+{?1ApeLs_+WzC@xbfLF0LyJG9~KnwK4((DU{TQkeb;JGaH zCz>c5xOxC=S7W8z=b0k0#P2hRj0PbQe1Pj4=u091vXWqI%e&C5a8hBO*nsHKXyQn{ zp!agXEid!^&aAF+MFgWO$l6C-zlYaxAnDIaBv(_O`4!>nEQ}Zg^uv+L5(khfeGkyK zgWZGB`Xy5aNk&?FA(mnPHYI)*$G4Yw##!k82v_arbqe@OOg;f=FV&YRNRgM&;B~Hg z0L=t~UvDg)Ptd?uDE~FHlz8VWt_@?Id6~W7H9eTD2yW{Fr~J@{)QxxJulmT8HAK@I zBMHQ=ba0paFP_tN#!2QW5_LaE6i9MlQyI?%wF*s%CwiO+&I;@`so{Es9=KrIRFVg9 z@J@0t;sc2M78~Fm>x<*B6JX_=iF1jN8oW;iI^sz^gM&{1+57PH1EA#MyCk9;S%IV* z-jV2n6&jS7t3)Hxz?cWh4})8y;I<%EmYwfEnh@FpwBvw@#CTFo%k~Gee!?tHG9MqL zo^n}E+7 z)7}&xULpb(v}779F95ZQc5RGAkqW{}KtC7jaFT&63yt+A_9NXhBBlF5Z&RQ*sj7X$ zh;xbZ9fflJ7*BXOnfZq@hrF!xE6{$+HIj7`YyLGb-H#+W3gm9^msGikhxwi7RD|n8 zkx%KFlWykB0+{s=zP%5=j+p%Mj!~qtKq5e*y~Hk&xa}`AWkGnW1^m{T=;|amtr8Fu z@6?SqSOp3yhfK+4O85i7^&^xKfjkUn2H{{d8eXc+s(qBbHh98w68L?SI}4%rO92g^*m=Z+yTGS8!F_dRCEiv6_(Npykei8Gq&}d*&2p9ofNS7{5h6H8eFrF{q{r z-w3>=cBvlJBl}>9$5k15-iWIj1A{O)R(!!XP{~O+VgovJCOTBQjWz*W$29LSw zChreJRY##Gv6J@$^9lTHz&2w~2e|PvKHn0q77rmEBj-hubVj4r0A9gBQD84UC5?bh zZ9eUZoT<$^Y(P{j9S^)%nD5#mDIIXGa7hI)QNvtG7!bF!a?t^Ap}rV&(?G_{%jXJX zNbLBQNdpoeJ`RmYHd<`+N6>@;RAo*I&ymip22gHQ?z+u22bsNad^8f|3$o=L>wCv~ ze#Z%0pzox2PGXpnIh0Bcv5GACdNLEi{tGA}3|`E^b)`*lDwVN z=sf|C5kTTQ(nD;?jNqg&>le#NJ># zyu}v%7cJ3%j*9WD_yOybY6yv!mW8&Zu12y<5@V4H)OV(>dXM>Jh6bX6Zv*C=4Go)! zB&)!z8$dfz{ImwPHB5U!_|c#B2Qr$9q!T{82p@g|3z9uif#NZ!K=KnRoF4S{~r|#SFnZ`@J^yGHTWhJPO|_%iB%^7 zr8w??%@4aaARQ0#bg3B-4W1J!4aTmM{7oEmewz6w!2gm97N1mN`|W{`^zwiCl`|GyeE%Lg6#MNXSoZ;%kAT&GKu!4SAy1Ha-eX<|(D$q0 zB;oo~+#xx3r%7E2$OFl8T;WN_`1UgYKZf$WkU=U`BkK==8ze(5RU#4#_2>B#DZYzd zutB$Vfly{(`W2dehs;W5MiSL6XUYk&fpfx5p>49Z#URB`rM%*oabO0RsfVDni#SghI5PV3N+;y-Z5s7!q z@I2JB0ef*3(n#uPZ-JvEo+h>a)u7CFjF_IaO18xpPG*lt_+MsMg`YvxtQ@3fWjR($ zJ7!%K9`HftX6D=4(7O1;;lMy7jteZLH*EtSFenkl%s`+T@aaoLs22B$|Mvnu%LP;= z=6sD=ON{(7us;tLq~72jv?}@|)ErmhvN{qb0DA3$-8>3CQ-iUcrq&3Aa$mu}lAX_t z?#z!Yj$kGqfq|@^-G`tm$?7Kplk;FqtlzJEbI#L$WGn=0L16zoQtcVD zdW76a2kg3I@eV~A6f|v(_uL`$`WX0JVs0n+Ejin}aJ0msZh_ys%uoDDA2S*i4jqd{0CQ^M^Y^_`R*vQ`wsmTW^`Z17bz=sczMm|mIS{Un3>4KM~o?FMaV7=vDi2L z(X5Y{$2-;|m2tbk%_sgkjaPV-S)F1g3D|Vej8q5689@Ir5EWbfGL-B9Mj=qgBOoFf zX^G~hXO1O+npA(q!*!DDl>B)+q+uI0Mk{1yI_UT-5?O}^B_n$gScy~|fxg0*lBc*gN;h4r@v>JovGYNWwX zrBvG001GXErc{ai0yk3GRtK5g8~QE;?i|d-pOGc*n4LL10aHJKnTG%A;;A2*gZR;M z>Z>ntSE;YP%UT?)L}dn|g~alcO#FNz?^BtzMC!hQOX)FoA;lzueuLE)M+$a^$3Am) zEKm)Ct~HaRax+7ra)TKP?j-}68A%idkM<)kKb`x^Lur>^%**-bJ@b|FqOz z?1wgff!8SJFFBJUNC2sr7XPRWIFTwd7uJJ#29l%y6W!GhI*Q@_5Tw)qR#O=Gh&+tv zc_opxB7cIp^CN2!{1s$W$^25%4mB9ex{OI^_@l&Zs*-To+Z(L zH_|FElqB(TsUAs(3`<4hh$bun7NwiD7b_ZsB__4)?V!VM)R{?7S8JqcWA2mug+w64 zL;b;Oa=>F!GcJ*E(NV#yPK9<;;oU?aBDCfOjwK#k3>b^0BhgN&zl?+OM1xqsk!0Pa z%i%4*C31I!d9MXa*TJY{1herBi9tm%`y60J^l@2ePCDnVG15tRQmO=w1D6yNTWPFU z;^zfftK=nr_j|X3R;BhRGg9^z_kQK76q7?m&lX2Q)Hc7B7_-Ft5`pqp_`4vSS(`f} zkpE(Lz2O`2W$b3OGK$Y7hLI0A)&?FGd0GqXbcugRCL4glEh{0KwT;h%0Piqhn*Lwc!L+o*%jhx z>G0O2``{IlA~R5H2v5r0QfFEgNzxP#H5%#`JLVhDO5o=zQdhcR-B9{3@biQjTmuh@ zP={n;T&y?_=&pxak1~6SC`l)>#Ei;A?~*OZ#XKT-Vj=k94^}U}i)7J1pf`%cOWt7q z8?clZu;A%55RkZ(U_+`!rC%cwotwn?H?Y#A?&>XC@E)IhS7LwK4 ziN$h+@!y#~fKY+ZrTEz5r*1||jixqdGCV53|6@+C8BOX2e*r(~7m|8`j3z83k0d^! z7v8Y+Hi;)4%v0|$n~WviFrAHNo;$AYb6q!ALvUT`U6&b0e+G`BZ*`N@Dk|AiA|D~ zlq}d6M&1qto};^^YC!hAj)u}oLuuu~b|a`Mk~@-2nt2ZfL!f*sR4tOk1rO-VL%Jb< zXIB;MY4HRlGMB=N@320pv&_!QWT)6pyc)Cinn=@u)Ui&%b6>*$LZjh4-w(<0i*-uR zi$qYRf9(U5`4FsU;>n+Qb^z;_OlKNaWH5iFXSS`8(}U3PQG6%fm-u!+xmGH!ivZK~ zjGh6m(b4sh{4IS3W#EnStmiY-B|e}p;~UVA73^C1T(~!tHAZ6nl;Fw=&}}%QiGQDq z=g5Ark?6+=c%v{}QNy&hC9Wvd$l0Jzv3mbRdsKuzr2bW6*#;Dl6KqHoc@$UFV6-Zx zwVTWIM#T26%}N>or7G~RNN}mIkm!MmmRFhOYxv{Z^*T0Hr+=#c4X%gyMR zm1u)gTqD)f(m$|v?o{xSih+N`V` z|NEGJ;uWAOF`7H@vsh^o*M1Kt2wg~ZgIG}FCB5f!sh^YBs|!4c2X&9x%tKEvMuQ&! zzp|UyROH@8o@Rq0zd}RLxatgg>_5hTY0g!ArhTSA^#q7(&`2m8AUohnO^9^Mx%gDH ztn6=L1+pT8zOuq7AlQ`o*8>ASwEmV;ZdPX$q{Dc*^)8%!QGEp-)Xq=4xILqF`hC?5>zDByP~(Gk|mmXilrgh z!EfL%m=Pqm_uCtgzG&$pkS=(s;W>i!E3u|uK;#ZoX@ydnVY^D_R9^0h1S9!)FQW)e zXM|RM?^WB1byncpPvGW-$uF|Ys^}A`g-!#$C4fdp=qfiO6bGN#8Lc6azvcMhO}JC6 z^AIFzZT@P_6D2pBgJ+e2M*CBjE3PmABOn@lQZog&(CVTe@rCGS|DNZ2in!zd|Rgx$Xwgl*ox> zwZfT6L7pwYqmW;sljC`U_;|U2v}A6?E-nkaMp6&`577hJg+=y-sKgjj(<2fx3D`<) zxKs#Au1B)(79b&(Kz>H5$}0yml1jddaMn~}VslvMRnuydn!D|+LOf>aIG3nGGLq({ zX^}|X>lG8~&$&W$t8{T*fbSlXrIGq^KkoVMyMKqGB7kr)X6D6uGC?zivB^gg_mF+7 zCATG+!oRStXP7_IkKY_2EZsb^qeKI+{Fhm&Q=1W`>P<9> zblg{GRH+}9n4;_rn2Gs_w7d`O&j9h+RJKXKR!4p&nNnpnl4Kq+#W|)8F8kJq4Y!k* zbiNB`Nbd6sx@XFOvSZD7 zxO1{;r!;3~;_IvE{D-{0!_yJ0v?ftS>AGxh%G2JgRIaNDEF=OVxmtO$SldbPQ3^aL z*!BUFInl==B@)4=WMrnG$!D>iMNs4cR(;c?-Xq{m;zTd_OCr>_q3aurD}8#R6Tk9= zROl;&(F(JUiYBbfLp51}p$GU~;wAF-4wNmKD2csDUM>?f6$!_SCHI=^oox)Qe!$T=ZT-jCT8W0o%skSHx^%Mt|QkN?BlEmgD z_9Hr7;*MgOyMW;X@bCyv@fP#TitLg&mmhJ0+kEnwwThMq0gI(sudKzJk?hQIDVpgF zT>lCPKI8w7JX5S@9T`*)sUp1%qM;>*`x%}&g8n+l^S%S`Oh8K@@EPb7N74#>h4Unv zFMe`;e5;P|tp}>wgY_w0R@-^41#^;Vybc5d_Un_h68#1F;NQ^1!63DOGgYRr73`uq)0w4>uToBr$>~%tPu-^7DOrbY2zK_yO7x4^+4;GuV*sPbacmtb=H1Njw{YgasQxG6%0% zuhd$8W$hn*BeO$!tyyO@P>2HJ;tM@S3f}-uVt<@RQ*MTat^*OVhE$#<^eMf4L99i(4sSE_ za$r$|PUM+V&n}eU%`?1^NeTQFiKh7*DEI@RhrsA3Gt-gXg`tfSjQEJxM_>{Jbx9@s z0qmXwXhqpO>OZpzeG&86%T@8rNV4e}fm2Sn!UxI;hq`)zD~Vk@fY?=N`ah(Z@N{-) z;2a#V9o;FBsn0xLqBqaY8A$Y7x?sPsIt_{x3-Ua_r3=7=y<&k^r1Iw!T6zrpyTY6q zyZbUH>07&x{3?Y`6o0EeD@hOhu5$Ge<}aR`?BNoJl$W}W%FHhsT98_BiOmH=^Ef}jcM8ZQm~{I>n2+F5e*+2AP2yh}#*9`G*D=cR#& z$MpB|@V*W(uSSfp4|vYa3ey3p2FzEwz@!#WtkjH*S`%$2{XEinQ4!vixPE=pF3bZK zrRz=lDkP61JyQu#gLu3lj39oc*jYi0E}dCff!Teei)01eP^axDtf!qSuh?L&qRMthhNV?H&TgQ8@}t$3K~IU5?hftQD)XEwd9iX zmX4DCSl&IMfCf;7*w-2OG>R2h0A{U-LP^X*s{2|HNoY?zv?DxRpYdGqx>#O!;4Z0- zlw6^BFcR83u;k!J*HZwn>dw+sc;S2G3 zr9SpBYhhsieLyB8@!c8xjdeU%_T81;6|Ql|JEW2g3FOBZR#tig3t}C#D|xgZz-c#k zy?`dgK9`(rJs=}J3K9bosVseBVhxL@wv%VPX3Q_F>nl(E#(h3W6zPf)yDy)~>uEgG z&Kh!o@AhcLVffB1fMR3QE0>I{cmt=BW=W3VK5 zrv_fm!kuyd$I@AVTTy*&eCqTqx&>5Px?AaPX=!N?6cm(BK|s2t1?iHMR6rU@r9(g( z?&aPSGqY#D-{${4=ecm>#LV7nuXxwH)?&3}>@9Vk>{EGzQ@E4I??~KcR&^n^Uv{^X zaqE2Ep&;)krBk9BGIo=wfsE~PASW+b@ylHELEbTptg0Y%GD?+kjb!&edMD)}?os+S zl0zSR=XFc%7v%~hW-i&nvcgj4OJ!yGFyfeT$YalUCsE=wH&`=?B}isKVRo$s>rs{e zWgj+)CP*D2C!u_Y=lBzP`!DuPdJ*!9?{g(`-DPF8%xXR1>lN>nozMJ`SC`_tWN!kA zKuMO_|6(zTXKa2K;oe8@yyYDvH|iq0XJX+c!cdeQl=UgHK0~rcWldxzWTFszAsOsm zzJH(l&ckmdA44KJ23M4WU2nmi%IXh=)iAjd$z-mK-&dIDN!&z6+0qZn#rsMuuQ8t{ zagsPBLNb?RXPl+@oRVj{0WBbVnry*l{mS<%u+l4#UfDfzF23p_WO)XAxt;5m*<$Gx zN;F7XsMGKK7Flm6xhs<0EHmy8SqIsHM6!-cqHSe2i8yvD8{bP!EtyQR3-D)Xf>vC4 zB_0x^5qKk6xQF!IgRJC`JiF7J)-=xK3-)jmSGkGbO1w~dcg2vZOzerwWyu;SnMGU6 z{zXJN1a@#cJH&$^M zYkvhPl3ZW|ZFG$*liB!Z?B{=6-7R+S4ktNf{zse?SqPG7lM#aDdlDLz$p4SCFJ zNbbXZcI6cJB{N(ydwiDjDy3GUuaCLo2(omD6_D7#8U8NW7cyFr_E}<0zIU&ZT~_WQ zS5jMK#tzhFcVtFK)}>4TLdM2rxtrqH%sO17jKrHEU7fHNlJ6>!lYHohLhNKE?kC7T zN(R5w3O-()_uXFR#LN+B9FLB=UoRPI$wSqiI zzfoq0Qjys`?7%(NTxu056R-GQcB_?j?2>OTtF0u`9YZen@{pZ=WtVHorjj*0Qj^H) zwDQ=;#@v^z+z)VT%gRendubmk za!+fSrEn0Ot0gD+5T49t-v2V6G#jsRC12l>V;cx^CF?k3tR?+u$&-=HmKSvK9E7!K zguX1m70Z0dU3Q=t8O}M#Pe!`GpX9ptzB_3S*FKVd9kt;v*Q5JT3%t^+FxeV%3M5jL zhy+RWIq98+mI#K-jLIxL887xZ& zaFZXo0@=wx@{Ob=lE|dY0LhGj#93$KX>8$6Wew6(cH$4BtWD^OJ*>Hu!3}&jhVODK zvWIijNk{jo4eV#iJ643CdrU3*cXZ-68+fJUvE2ia$V-v} zL)q7R2C~(LtCKS-x1cM=BT-lAyK#iOt4nTi0yykExOz*$sF)4IUVe-YlvNaxtN8?} z%E`_aL!VmI|Hv$l%tOxQJwIWcWM?2*DfpJFlsIcX^o`7LRpAwd(fX3vznPttjFd-6 z?F>4nWTCfKW;zm$MP?)HQy=bPEB?q)EZJ#xQgRtxv}z{eqx*Ozley2F^ru~=rgFZA zgR=A>vMEDfm0a{Sy~k?K=WI!=Bb7D(loB%#Y z4kA7Ao2QZAmGdhk1MeQsma)u1&OsOOj@Inl13qUQY-@#8+J(nli2Z)WnOV#U6I{FO zAv6bh{T9i}$fGiM^(FVa0#9lr(mR*WkrV0e;#IZa{Fq#&tPqpl>NCz*CJ>*0h@_-$ z&x%FOic~j6H%TdMh}^6uE|Y!7F5s2O9_G1;4p*baPVwnwI6ZPkS{ZOEIgjHZJCzwb zu!Sz5f790|GbiSK?z}3Wl7*c<#P4?z+0=M_ZOCF*p7)s!>)B-dC*^-tJ68R4E~ z7s{g9hjRroe=MudWS{(eT;C&Bei$ChK0IW}nA^+GQ>ixSitg~E>mu0C&*_CHXS2*@ z-5zo>GN47)Ap5cg@B=(A$-B9YCn_ubrA73DEB(Qz%ebIAtNIzAn*rX!4t8W99G6Y( zg%<|;SU#m8JqUZVyJykKCAoizNtb@-*U26dWm(zc#Aa{MInqy-HUB^1d!6JS_G;sgv>@shCx}hysVM-<0Z&EU?NgnjXSTvT2_7M5y$g>9ngBeu}72nPR8KuBkjR-2d+-`v`w90`6*;`dXG}u&=W-giaQfxD+(H6xvP&{&EPIddz(UHbrOhYJ z;#^2HK<36K^8H{U*D+k%2&z)QhHv@RyPjm-(a(+Pf6|D@WX_iKqJCpPuk-Iw^uN>z zlErbCQ!t9(o6OXx!yYAYj`nd5$M9J9PQFTVO1Ar3*6A6t z^BgTap9gcl&|W%nF4+q*Ll9v)g6vs)NJ(MN##`kc(^CpE)9xgvMq=Y*(4vRvTy}ze zJHXBj#|N2#M%sd{Zj4Nv<%-X-N0ON)5lR`y%KW5?oHanls>t9A?xrsBkL*6Z0;b)3 zyqJMl$4&Ia-N{_S{e04Nw9broy15e)R-V0+JC?Z_*@1EfXGKo6 ztU*`9{CstR#afXQH3sRB-9{Yr>|zk=Y45l#1L*x!ihg`@T5neR!e6H+uH{rmi&@O2 zD?mTvtb7+oceuLrb$;b!;N2SYuCkMAe$GxFx+M1J%ns)nvIAZTAgXk9V9o#k@Arx+ zj8C2S`CD^-m!6I`Md-p?40}Hkdz>4~m76(xJDJM-)G3M$+eZiJPnn-pmj0`|nG$=Q zZqa|xIr_HK79ZoX(v-QSub48vg+9B3bg!|%=;|r#{oebN_m%gB_k?$yx3>3<=cMPn zXT7JLC%b32QPMb1AM+CWI?bcaU~b<$R`5F7q%FN2w=f6zff&u)_G9*Td$ir#ZfbvO z_qUhY7j4VVC>n_`m?wNzBr;R>FUO!;ep}Y=9MUEIL&+4E-R*|qbNtE7oE-FIy@5PD zW+w0*WMVyE=jrixm|d0nwK%&aYab0xv#i(8$~la34#Q}aAF0ol9q48fG06^t>(Fnq znmUFSlt{#5p7XzskAvLKtaB^+QdVR|b8=0k=p>q%PPdm>v9-+eoyLrwip*l@#XRoK z%xAd4Y~Oz!mFIrorqatZ;=JW|TbQ`Ml|G~jUWUPJstbJYu+CBX9Cx5AuEPAJ`}F;M z&eWm(PKdqk0Sl@*-iN~68sTK6OHmf)e*8`s@A33a{@nSD$vtbBv$==P-Gk}w{R^L} z@T~So#{%TOD4h<=)4jI4wn59JPtvm)7mQ|}Up&d4F5au&g1)T2pzpQ!y7w1vJMVj5 zkM}>%N>5i$sWD-k z?99R^l5N+H7wMV$TUHbm<@mp@=q}cZ6!9@rfw!|>7r9^4DTXAr!xqfJ4$G==8GmI& zC%T;1yX-;)%_*}95<8H5_ZN7)FVSaL(D3p+%iMh`v2rc;cmO`(R;E|(=KMZj%2saX zn!iz8tgP(sn8u`l^Gt#%!ad@jxG_C+NrN=iW%m!E}j* z%qF~w*Igc;>n0j`J$j=glMR?s&K>sV-t#i;e3MwnZ11L`hR7yT?7Q{}d#}C4zF;Sa z%AyeaZwUR}yz>T31)nIEh~Jo(8e!^lMy9cMp#S`4*5C`{sTc+*M_G-UrJGD*Pc^>DqquM@n(j@KMcXL$AYbG-@ zrqM$+QMHhUMa1t}nSZtnPjWE+OmpIpimY{T;(b^ zGk<=U_(z-2nY<3lH&=j;_b#6EFMoDuhgPRH_3n5SKplV6JI4&#~a zH1FMGH$C^?FqOFuUiSbjs`ON4NBLo#$rXGr=SQ!_&K|-(NM?}ijwEwGz3B>CmeX4b zja?X@_H#V3YhV&f6^~j^9j9(pU9|`O&Z}}V_v=B<=p^Hg5%lEo4D@VbcaM4&dbW7h zdiHr9dQv^tIp<$^YIA&co>5o~-fHq4lrCuhot;5-pvpYL-qW92s-4!>v z_rqeD_*C4o=i7trj&^IihF#9CV3)E#;k$x%KD(G*$*#_CYTIq>zV<+SlKqo?(=LEc zKO`*i8IuLhF?B)5!j;EPt>I)>#G@IBw{?%xn1dNNwbXv<0_Fi+R_`-~w~6*O{kF$z zz3JaRjZWMfwN=_y?YQ<@d&Mk)IZURn%!Hhew2WE`nrtC6&aTU5>q-Tz1ey1e6+nZDWiBkhPY3;ON7I16&)(_;o&qInYabi8Jk7bn_ircD;uFy`HMqHU2b8dcOBu^eEoa-m2d6XzTvoUf$;3B3Rzto*z8DJY_r& zjip9M<1b$GL>r+cX@{9qSwX#o#~ViKn$m}VIQ{kCbDkpQ4Mi>yKwH1D|FO5(%k8h( z(Y$tg_BX%%zFiZGJkXwEFR_2K57?LOWIJXT6XV21_BY${eqy1L7*6)#cP~(hk!xdk?+Wr_pJ> zg;s}-*;V;BxAusB>#OK=oS;6y*ZUT4GV41`{eM%34q#zBvm<3WkM}sAXT=|AqyEg5 zA1R`?Pb7)*;zN;6lo3@$4|ct)XePd4=evtOVkTPa2hQ&mv65NzS4B)b6?a8J^m;SA zl0xW$Z`jqXP9`jGG5o5{L>t@K)z3jk?%;jMT5?&_qqD0m-|_V>FfB7L69zJ?1`~hF z&{O_BaF~Mpd<#$HFgEBKbLWKeiaxu;u~VBtWXj{2zQ*QdA#!<%94y7V=zs?)dr%qZ z^u>6k*O>MfSopVoFpZ|%$yuybcIUJh!_Ire8oQTW(Qd}+Zen+`zp|&>htT0! zL>t!dYfi%f@j|@k48>k=$4~ql@8mnK@;cWnyH|I?v-%6m8sH?CQ0t?a8uM>0R-qL- zYCcno`m2rTV?6-vJxT4%;A)&_alv)=}5YPxa%S|`4RIN)?vYACHq&bYza!f$jan?>VuSA}?zp=@Uut>L9@H#r;2aHzY5;9tRz-&#Kzpcb88gWmu?x+W` zWFo%sPsBLwiF9u=Y4ilUxZz!Y+>YE!Z~QEov1`R^BwpN%&&rR^zQp@nfn73!sBQ#t z(-dAKBf$?@0on5=D?2xlD{sIj$!~=-E|@GkXEdt%z9VGoAACvO~$-K z3jQK0U5ySIiHA{~NTwq@Qw@tTo!^xvn)2f}7w7*w5C8cLvXDzzf!5x@6yKd-m=Dp{#qm^!(bs-5 z@AxtI^@7-79GJ*D_haWN6MXj}qfhZTo;c;*p-d94#6ERop5aoW^=5pkt$a`PdzsUa z4ga8wD?OU?x*rfx%DlxCu^L>QkDXXe!c>PYu4SCe0p`LAqRGxaz-8&dzqN>v8)>7)Fp zn!LwoJZm{2aUyKC4OqaX#M=%(y^rm)l`n~chA2%>Bt}hOE4BR=Uik zVOJfzp)SnA&7qDa4z}H&o#k#TXEbQpLQdub$6{LeGv{Bl@(JZDq`emrVGgD#4`r6- zJurl9-05p3nm#seMQP6L_Xg@gC!;dmt)eb)SBpNJ z`SIc-x3@Zp88$VXU(_YeM?{5Z(B_SmI_^~Q9eC0iqWRv;JY^!Jn#K9ceW{*clICx2 z8Fd>ceS!9bwb1p-%xym7F2=(x9$f&>!~C< zeYCCWW4IkJYsS97tQdF5G%MqUzwTy{QET44h$C%d{xDPxaE z5;i#r$`?*i@cAchb*Gd&%ZRw2*$!`um;>BA&RTtzb5v~Cnkq9zadb#){gU0v zX{bI>cRGKFyDB_Bt)^px6D?sEy0~XWA?-49UypNETAwBQvtP@!4DL;_-0cLiH<&pz z--{;NWOb!;%zjVps^Psm!_@)GcA;aNR=eq)@!A5nn)(v2ZlN*VUQ4Vs#2Df}FdM6T zwVg@_vC6rj{H#~ApEA99tCphtZ1&R|DtX;D&K9?HeYpDi&a` z`WWB1Rqfy0`g&Ha0*yX08ke=@)&ys{zFaYh16OFB-DXaD{XccA-BdZEZqpqRwa$2M zC^zgj+WYQ*oTy2nhiYIQuc{raVtR9JjHuzXVY>BpW~>)?RL!eD6#MNY?Y6!bAE+^N zByN} ztk`0|c31RLx9bt>KWDAl+f&6JB7V`Dt5IU)wM?m4uc+dhTR`uuU9^ka1-*;(vEoLo zp|;=GSLCvEx1Fbx_N)2AF6&vTPIVgCJ2a1HsP)Q9RI7T2IqRYwm9Xcco5|V(nAJnC zV@-oH@c=7w(%PuT>qkUkd%B*a{Uj2c|J)*)S4nX5i_g?`MoITeQ&GOx^Qk?QTUIXh zd$qSRU9DnQ)OTq`Vr{gD&Qt|ENi_DKa#zPdnLUqO&6z9~Dl_!q;!kI#G6#KI&YgnA zzG1Ia{CX0R;Vh@J(pQ_N3=o$^Q0=X66JJ>+^o!bHJ16@1vVKFPnH`LZdKSkKm7QK5 zdgxmP_1}fe7dI1^+>pqQN9(h)GGHh>)K^9~tB@1#S*6q#7wrMY0=<*H#Q9YHLY=5w z5S@t+|1}=DA6Y)7k@u~z$> zF+x^b>$3VnziVHyk2(!J9wpICQ~p8HZBfqcuGvV~=i)bYtR5AMOk00wv=a}Vi{!&f zDu%v6-DcKRr}&SWPo2ivLo{`M)z#{Wm*SOPQH|OqH0HjzUtsaCVLMK6-#xX_AnA&| z-mR(E1|_icf$masfRWj#Z?7}^7^A$~&8hYf-&!lZc0pC`549tDcPA)@DTR%3&fb`2 z#Or08J>(_+a&j;!^-Sysb)QkfogRy+kG(Cd)aY8zPEVugF+8e7wYq2_cI&V40xGKq zt-NkIUq5>c9&ABFGZM{C?jmEP8?bjNz14J{s?pzFzh|-&wTCH9j8mNPeEMAFnpsRO z?)g(}0hjzmI$6_}QJNo^ZOLPpdh(w65+)B7?RLq`kcO(fGvp+3IVr1ydWY z_OVL1MSZ3{*8WC)q5k4*K&O{ge^Cdxj@`|^;kl#6Tg&ZuUwv&z^lK-lx1|0PYRHu}E~Y zS2JIGnf|3a$L=Nm(T4i=ih@>iwV8H6%)@#WP)mrF?oB3)H+5!PU6rlcf7%o@O-c2E z`;R(GtXB$Zxs_xqznJPjqR&kIJ9;&+Gx&4lw}|Sj^H%q~Fek*mP_B6nsL!K=?JJ(@ zt`MK#LEKkbsQKJ}G3p{iGt^gaGngOy_5|*lxuXy4b;dWoHFk4zt3BBNtEXJ}3%kGP zp?A5tAza44!(6LC z^`+5Si<&Y0xpvaNYVS}!^DI`^IAzV3%1rINXOr_HS`*)Vx;G-aShn(&^Obws)6vSqom&7`Ci<&n(E7xk@ipO9AbpVJpZ(QT|E|? z;EdLr>c_+q=bZMQ`@1#XXz3{y@re4Ky81#Sbgy~S`ZRdYTRNH}TvcBZ-^Cmf#as7% z>TWR~JB7T})mCZy_27bX?iA@ENU!s~uNE-x3Fbt3vgr$=?Zr;*j5n9~HkQ-a>nSQ4ngi8VezGLi&rW~s zjP_C~CR(_ks`IrWZeQygHBmogDWa37vys~#9LuLI^^8?2TVZpq{;^(7ly@e%J(PM* zQDucU=xmP8byumI^l9oH>$p=9oZ+f6#5}D`^>kOp+v}BeT6yPo9 zee15UHj3}nQAPpnvh|TQSzV}IbC2oiwHczg8Sgv~6n5uCO!r&;gfUdyuq=@Nrh0L0 zy4hB(;3;l4G+iy|ed272%~5XYT|9a1snPeGsIk^N(8*%Xbx)|@C{;oEo*H3$o>^Bv z>RaH93=dVmF>b3Fjk8LF@NF^Ods=VeZn67W+tpRZY|kodNhC$>QBqP8*vO>iJGB2D($-4eQ6#St6G<)tD)= z2(Q+~-KC9iYv9+S@w7y3mowh2s+PqUO;vMiaZZ}GR(WRhBO}nw?c}5>UDa~>K>Kge zS=pvnclU{$T1m7u)A*c=#3yntTP~uP1nr=><9tUR=T~)}Q(ru1qVZkv3wn5-3MUiK z`>JxlKID9~(9B4N$N> z*_4C)CQ;2nOghSGuU1sOc;=1N&Q2}&k&|EFsnij9$aZ|eDYKdVeTV6mpAtFWR%U58 zz%r9nkFF9GGrLNE%cNWSnz{z^S5B=$E@FaOSUoFrH=^z)Tc6W8?=DflBd7RMJ)#!T zrig3ApLdGR%b)|$m#_6r-L`CrTL}~Yh(n#&6oFihqNv!&@vdUfV>{L3bR4su+f1#dLy19cu zS<1T$m7eY{XPz=l8|jvGt`VgqX>`(dcDg%=O)HS28}D9M@3;+}H)0bA&vGZ5`<}B% z8S8F==`qNmVo4pOlyRoHr<9OpazfyJf&LA2zHwVCrL^yrChYb==Y2J|!u%;vqY-X* z=Hw)P8Yy zDNVtx2P(Uq#Y6(7wU%m4vS4xQb(PLeP7|=bnaT*Fh?Yc*J@sSi67Z58pe-+*=1P(_ z1srL=)7PzuoKJN6ie+F6)zL;%#1|kaDva*TAPz^FO-$9Vx)c6FUiG(k+1Fm=zCQwa zT*uCR3_@3$tm-pzLv!5*#0b~5TS{3$jT||FkCdOBwV*2llqU8OU~(k9T1@6Z}+ zL9k9aYsia9*3(<(7xz2xuaYpAj&S-5sri+*ZeudBeK>8uDi=W^mnq}aN$h)Lcy4FN z1<9VzP3Y=V5UDKARV^X=^ds7`8u%VuU*;d$Nb_j0vfs%T^dk$DhrF1M?wq2&CX09p z&%Cbs1E0Q~-1jF)^*k`JKk2l6PAQ=tQx>@^#awqSSjQ@Izj4Y$H(69i^R$NVqp=!I zoW^9VGdaJzbJcoUGi9RiIzNIR$m~x5uf4XrgS<^q@(@KiA^X4pzjexjrfo(-JE_ay zuWx3 z71(fJFviaAW>Bu;{C%!k7W@1IXEIUSrp{5bsxyezFMv&EP^W{M{J^Kg+yf5md!?+p z0o45qw>8=0apdj0f;{8_Mg82p?`#kS$dO(m<88v3sL20^$)z?RTe^%)>nkGi-JloC z!4@AA#a$7$nCv8yYw70R1OK=~#^+bDl6ma+oOy83j)O*p?Y?5GqbXmLp>6AAK_)+U zE{Y9!^a1fq{6Z$MG`aa-ow-cuJSirSM<_;Sum~EpubYz$LnY_6m@LNGx9zFU3MaR7 zN;Kt}Tf_+`2sXYG?#YkveNHML>3?|Qypud@j9$ho{gILCJ>_fRuODa`huz3h3Etbt75i4Ei2@ zy8gR1LvyrtdMkaJ-diuGAJO3tXnD2F%sVyUbhvPIhO@u_qFp9}j^%OwB)?XHyy#U{ zproi~-?qkDQ>^CX3~$;a?FH6M>x7kTh3wqowmsRtW(Do{M0TRj0%TfiI7{utb{?*5 z3CK|mXCYafl4NvBlC{Yxa@mutOI-a(tB&=nsaQwM5@t`crS+XS;0!_rb2=SGnw<%6 zqOoz)TiLhGBYmXh`beX@??B*0a6oWepk$z>|G58opi*#mphw`-z=ptme_j9I{$Kp# zeD%DJF;_pWcQO_lPmB}#Pue1-k&^_P(~|n1+He6v&K3JFE6u!NwzHDRiw(1CSgr9E z>sfcqAIn>)y4n*g)oN-jwO&}wt<~mNW^J>FnazA2+Z-Ej?lkM0narwY*gR{FF#DK`&C_Nf zYlKz8&SPJ<{7{kh`qf%)HKgU~>)6KFadU(<%Ip_=9{s|sXD=7&VcED& zIcvN*z)n*8dh-WA2`2cb8?&`JdP!fY(4X;r67I)miz^Vk61W;X5^5dSHLiKwM{#FD z^+HR6&4LNRHi5Rjw%+T;N~5?Zy|=Hore}pV%*`&w*nbEEAE|+2xaaNH=H%G4XpLBT zbCBh+_L&9DAu%x z1kZk|Ow#j{3vcVGv@S_7J~WTg*DvD_bMeTU`vWevh^b zze=kSsUF*DCUXKWn#s`vkztWoBv0&T^OdF9m#w_^QKY&p8UEgOH*uY4vj@DAI^<@n zX(h-ZZ&ADJ=RL2z^L)8|0dFCX&y#Fq_au3n`fB-e`AhkBcno8Q{-M#>xT~MjJLt*U z0x-iL@y#lLD^GUHIs3`D+xB`stB5tz{Ln0A&N7p%^ww~5bgVd!&M^<`dok9*tZx1o z`!n`gY)-7GIm{em{$l#97iIzLEHb^{Y-0Xyer?`0^O##>C!$xQWn=fF$D-fGrd#L4 zd$6sxv!V;ly;e^ppZDKjjku433p~fPQ`&6r>rl0%uhX4LjKp6HRSaDU^^VIJKPmph zgk8w*uW?O6a|2%gcwcGXMs)9evZ;@l1KvWN?ndohX4zPSSgN_fUS|>yQ7lVrLNr5kaimhDe} zViRJoV;Rhvu}eta9OQJK8E-vj*GF0p%~)(iY(?z9n9powCYk|rL##G?vB#`!9~PtV zXEkS^ebSCP1NEx@exXUBFa0Y#U5xXdM!}(p^U@bge>iDNLMZ-tT}<2|TPv(d))utr6Z2whRn#AC75Oim7OoeW8rctK)G1OSVuc0QRW>px zk}vvIbXPP-tPxr%2ioAP=-6l^`X<)jJQYie=8ydmtr2Y(2}PzvMn;xLGguz75hI*+ zLKFSbwjD$hwSsSL=u-UeaY?j%ENS%h`r~${%bfXP=A#*_q&pcuI4(zg-^5Jm*Jb!2 zW62EfrF#&6FsKK5_!}T8`TftlV?Fcq(%L#DE6DR^Gc_tAL!uL6|Dq}9gg;7slw9Xc zr8iBJjkhyWC#OA1%bqs>?W<%@^1kHRZ>xqsiz?=C=%{nC3b8}6pw$B$U(s6YDt6}p z`tX`@++%q=`ZEPPhkC?)5;rvTUvPNv=U~Rr7ops7hvL%Wl0yrFBLmg^G4DiAHqVEi za-Ii9dSe!tZ;JD?SZoJu9j!UuDurC;jm?kz9%$CqHaK-4TKk?3uTbjONmY=hh&$2y3G_hd9i})#t;dBX^ zW@KrQWoO2$NsB}C{jTpwAQC(hJndiQIir88-2?ZSYF9NUMz2LWMBj3X7Dm)?$<#b4 z?ca2Nz2o&iZsWRxi(@GR z^q-7z##!Cc=Ihfu$-YT}^}*|*nsM#1hlS%3;~s=^#C;uiFD_ktpZLM?;kZkoV6eUa zh(Sgr9}?M9M^4MgNI>64@Kx7!D%0DXEd0Y2UxCk-Xxy@_O5wb8l0_H)GYU z2G&RB^61b=QgoO3sncIgZ+z~l=b2{Q!K%GA_IZuKtzdlI?6@9r+2Y=W>`;xk&T*yU z9*53{I>cR$`zx+;+^gUV|5$GdOqguOJ);)B@(F#gI>=dPCCBE)x|?gPUtu}rwPr<| zgma}mOx==po2FfxqpPD=BBR1p()y+@Nga?jBRno*MBhYaN4iH|g?ohEG=DfQ+&?@o zydc~rJUQ)V>a4fdQm&>%Q@%-k7Cs+KwjY9erI`oA8Pc*vo4EJ<4U#UV-=CN#_>a+A z@8(S(H#EJOrEQM$IT~iGlxahv87dGrC@CRJQqH=$24#z-KN%OnhMx|U3Vj~x7JTA= z>`iCPS9*v}*t&}5G4#tPku_=CQ}3m=NxPhSJ0Qb4RLi zl1GQHrxi+Dnc6Y+!_-`(q`#G{-3p#a zESGdLK491#QpS@d-v)Q&}8l2QC{$~8`beA))&OR>twaf|W&&1~n zoe8uL^bV8?{OW)1JLyT)Zo4%_R{M#y6;G^QR1J4dO?!JJ^-$X8v?-~hQ*OP^{%Xz3 z1F!bJDf6~`TJiAFG;ivddye@nXU&0U4YNSO|r=;F}oBnN~l8lD|u7_O?J;h*d1@JhLyw zJ_&O#1=9bX zsX>;OnKxwcrfZ%!AiiX1Q(&BbuTS-L^<2}a1SLZLjF`+7VY6N&dusg@D|yJcgLSC_*JNJ-0HZ}ag{@LgEIq@0(pY}2DgTehu#GH1g80Z_go=z z*k;^?>vmlKMBA<80oTYZ=GfEhiSSbUTCL2svEQRJqCKPEMrTF8jOK_Qj0}(TjC>LK zDzYQ;cO)fJE?PEvBGM&Nhu=37!v351bE72whD$pMFB7_cM3QXs2tD zG$`rsbTcyK&G=D;h5X08 z89Wz3A^O<2>?zC-^3?LuanLtHcPk{Hz#y3m?79QaMM57f7RF1JHcqG4OJ==moJ9>_qph6SF^I1 znPP3C3DFOunWFn6MI#NviOF+^$D7)rcz1p#@z$z+D)0{u5=1JgJ7Ztg#&|PfKbI)`;gV2h^N1G&MIuj ztgozX<^c0cv%ERp{MXE7rL&^=XNAo}vAwZytcUrxxfK6=R4iBQgIKB9l-RXcX?%{< z*!WnsSi9I!V$UUJE3{L25WSV;fJdtv)k0dLKFTmX&3qI6-2&Bu{!so*)BqF~eLCVCG5f> zO|No-nv8>Z(R0bQe+lyDah8kP;t(;=8v72I*Ij$H-O5&phCa1t*=hX7gt7di-Olbn zCVhcD2M+Ton663o^LMyMz#d}%O07zBxZv^ZPY%&mtP=G=7MsIJly%NG;ky2>dh{#h zuF{0+r#0Ne3GG{bw%$?SO%AM%_MVw^2=VR-gCYf(JjFx}G?_j^tuC5AgVQf@#!r4YC;%ogy%RzazVsEb7CM{nDw$YW^eEgTgn*-VLGNAqnREr&PF9 zfGzBIESL{DoJ5$kmElOQ0tuZ9N4h%ebMd5&@`AMa&6xHra*FR7&h7qp_6A$$)ztgx~8t7pjvL&f6Q|;2__DNuOcmKXekPDe1x84x@&sFKa4M>fE5$lfdssQ46vF z8T^5I+c~hu-;gmK4O3WF+V52|!n*!Zy{O(+H<2B@|E{`t4XoR0NJ~!j0<|xL;n_YU zi*bwEi23057085I&LmEcD>ib4wLn!12-n_Wm$bu{1xoyx-H)>}pK6cy><{hnFcb2? zogYr6Kmzx1gsP^eWWP3n-nUlIs*Y;$=^m}WX6WnnfKkB6Z&c<{#E3Jp80A>kTcDZ` zjoYlTJk{Iv>zs`P`V76R-bT-^|ILZGq^?z`bGBxyBh)(T3+nZ{qdDcQt8y?$53{#D zU<2oHGr;{l!Q%z|-{>{H>c z*H*K$rkAO0NTVw79CIX|k3L?R1dyT*Jxi1gM;m{P|wU z?f@r(Ua&-U%^#fKMV#h5T*G7M1}uwaZUyp#F>|u1o(%l%oY;9iGlKQmXtkh?`|3E4tI1d5umA z!O{7GM;}(Q0Suac=nM0-dVE7x;m=Q6ZyOF=;BiQs zT%T(l!j&zfPUjfR{}A;(qtqqjYe%9Rm%~0n0sS9WFXDos2Pcoa_jO@NIXGH&lB+A~l0xu&Qx%5=-=@8G5{rEO=FK6EQaJQ-0 zl3lW8Kh7s;nJKL0Ao6)rc;-spRrculms;bi@T6rguqV871)6;)(zt|&?8~+gE4+qh z&xb3#f_?p#{cQ=~^pkg|WH;7eGT(KCAMhy>Fc0rw6CEx-Lkl&ds&6i@`Z}Mm-GHj3O}rCqUTQf?Qr**-^;-Z-;3%<0 zMp!&W(c?q0PP5T5BhV^Es5Z+^wVY6VoETlLOx4vgb(gvlsh_O2=NXOFj@a0$ShKP` zGO1~3=~vXIrIGjDj0{eu6HQ-wHpq_qDXhwQ>QN8#I6_rCwFBhRO?2i3WPhPE!>!{iWWl4cgD+WORjhnGSC{P!xB!xR(#uQ z@4753zYUB=oYJqAh1|=(yo*jhiV4X0 z8|3{M_jCZBP%$)56Zl~Q&P*=q7$4HvrHncdKVl}`QTAbHwjh^>IT3QjXQK(^9FKV3 zwJz&rz%Kg?{pZC}SEH_H7o7__V^KH55X!2R(Wm&}m# z?BiT?^94smuMC0(lY{*8NzU{N_o9-l6;TJ^zl~?VCa@o4l(XospOsP6&3@ywhq)Z! zPB*#aIJ0YL$-_8Iv5PTGpV)fXviqC32<$OBAuYidS|{d@hsx;bZ%I(HN_p zhYk|Mobm9AGqdXFs5#n)Z4NtMqcJDoJzU{k2C+xkTv>J9&z;2;PlQv_6`j)rEqOsX zrk>Vv>DRQyRCaIC8tGq?JL^l_(_NpauOU17L?3AE#A6?!-$4fjwPe*)t7(JK`s=8` zd@{@gOyZW=XmgE|T-IGnmG5Y$8#d90{2Zb4 z!Xu{<+VW3W@a@r4y|KWfs9x!aP3p^jR-!&@7W}LUc(5JSooX>Y`3BxvUTut45)N5D za`sE9bo__<8x&#Z|l&{pfu0 zG3dcTk&*bUrYI;(@!B3PV)lKJCPvtU@Ng%ADD<%B+57E-R!t(^?Nsniv?^N()TGE)Dgz*`%NN0$knaWi9@CrWL&$5Yu;*88XTxwp7)w(m1v zAzvroPrl;5=Dr)gV*We6+TO2Vn>bWrx6mKyPxZIjr?B=W<4b%@&1x>!>r_N8z7#XW z9PwNs9JEl|E2e##|DkkjjDQXEutL7!`p`4IOx0-Z}{}P zW)gYugXY&()LKvPlo$3mI~7FcjJ*RKrW~vlM>KRYx~0hF&!z^uuzHBv&3|EgO$Aw) ztA^E+>L4|YUw#$d)@A&WGRn``-xgTN^hmz^sj|Dl$>^wf+TY<-e8k_H<6-}T6`p{G zzQ-hqJ=prr#Dkr+CmNYvy|kXF-_tVc#dS^Jr%lnu5_MPPR6M2TKaLu*cx3@z$sfeL zA?mmH(i_AldQ)*;n63hepg6a!qgIM#!}X|1EczXq<$zt9j96VB)vf$i25Y-niCpau z;9wi06UjT|js6hX6#g%*JsHm@slTKaPJ0u+5c7(j@ENah2j96f)Pcq#-{?ToV9Q|n z(4)A;35OG>C(TKEnRqK9l%U1`6Dl5>9(oyC7&;LADDaK1ptp%?pJcfVL* zuY?m;9em@o*vXk|ZB4gQEf3c<&yL!~#asJdd!xAI(7RrVQT18~7U^_H;iM$t^=Pnn z`k;xXpn-bfu@`W!fw8<1AK|x6NADL>HsMpehtKdgXP`RO#$)Makcto23lF9%@!Bjr zvyFJ|Kl1aZ_-B2HsHQ1zm5FL??YdS&Z>`s5y-Tt7TlD%yU!$v$)%Z_OBVv4_-y{02 zqPONw`cYpjJBM5XlS!evJ4Lh?SBXSx*ypU0)?XmJ3NoC{tYr={*P2_+mF76}WB3>y zVHP!n#WLIc0sci-b1J{DZeER5jO~wBj5ds#k+bA+UxYskKTRt|mh4Vy35mLz38E-A zdL*mxzS>rAG9*n%T%6D)enH&exIX0dR)ls3 zI|MrTU-%mM29eYA>F=pGh)&AUZ|?>X{Zx9*WED*~qkZTy(#>9P4Yq!?Hj$OuL-(MW zu*o)o=oJ;Esb-%HDlkqYz?vV&nf(l$tqnfFW%%@s@xHZ@ty7bTGJ%r}7(>?vKHemZMD$V9zBsc?3%p54I(zZ|@{Z>cn4e@~le4Dt)xu z+9T}}-F7DE8}wONuHNiUK3&kaqbG4mi0&Ij)s0xF1;pJypy|Jd?RL_hNi9%!yDMMa zVc~SJTiV~#n<1xYDe{S3Fms+-tE_LW&#fHR8nY}p@M>mB^JZ*oY<+BGY$&{q%(3m! z>SRAMN3W9q?G+R?>{QY6V zeI2L~>=8O0mz3~#!rO$;6E?&r#y^Uy6u&wCczp5r__!Uxt%2eJBM|m)^{4vMygNM8 zjS1ATeMLW!`aMC9{{Jb%T14ZwG9LvFma_>HUS2#YVTeZ|@apKCT>{jEk;7MN#8%u(FcZWu}p z$t}lX&0y5rGP7D`tpL2WZ_OIyZimBADg)=TE&0RM)?F(MfAKqbZ9kbUO%v?=eoVw_ zo4d?^tOSvTPB^Xb_@nN0Wt#ede$A-wz2w{N*8{;o^T6Z4-r(KP!MF$vj6!iYLwiE6 zVP{N&WtR}wKeQS)<0XGZf12-*uNJ4Ju_w2YLvN{_#^p(HntTRn&{V>o-$_-FZ#a z*h1@$fAm~Ez&@_U%F9UPIbAf$(S=Y>fs%8IFM&cdgYPZlqkpl?*O32z(Xz|!bM|w4 z89gg9+rM!Vc32Usy}b@wx5oYj;F#O(zipikAOpo`@KRgQ(_#u0 z61m`H+2jb~$yeOB9s4yp;~P7hecI|qrfZM62<>LEtBYduV>4q>K#nKf|}h z`=R%aXP2juXFnaa4paB|7SHA%FqhNdBFE`XGXhM$C;H5^N7*6!4%WOl*0318SYC3U zbFha`tU6rjKlUcHLRPy7)~F4bb1QK0L&T2P-dTzi80n7E3pVc~wJ`kAH(E*TK_>kn zSm0XvB|Xyq0vQ~}shOqyq|FDn>cnnn+IoEE{aE`V_+{78XcIsSGGm`tQ}w-=9mwVU zj=wnuboHuuDE{JcSlocod;m25wUe3XFDJ-U9ybPZvL7^V4^`u*z$BhHHPPBL(f`AU z&u?OP{zA&TVCnLLV};zypoi!2XQnyj@h_s{iFibQa}Dg_iExy!i!dl(DjA8j_?PwB z!Di%Z>WJK81KP2u9kDKOvL;%kIg!WA<7U+Ci559&9}xdi$)q%%WTcYEsji=Abvx5`FR>l8v|sCiD#`S39y1wo?hq-VXn$7=CeKI>>fJj*FvH z?!3ci&hd=;;0c1xi8Jw1HOxh5QVg?!I_dMiA@A6pDo8evD; zupfWoPn2LM%D^d{&I4LJJ;^+?(V`C6$J$q14Q8V%zeLS{_oxoF5EkJ=A7p|=gHsmoX_AKE+JGbn973O!NqGlJPnNp37?XY6iW9st~=|kG+-%Pu!BHATRgwIxH`t z+e~#ik?6rFthEkOBzAQsW9?0xMMNb(Yu!;2(RvNAPj*3#e)uXD)uEK716Dz@HFwA^ zbwNYxmG|xoU`i{yzk@Drg0j*j|{UXD!15oc9b2RaP( z0L$-7^=}-0@KWiT{Ga}ep_Xw6U9F~&hkasdXIbPm)l2ePX-Pyr-mu)YEVZ<=477Bz z_*vSTkD2<=8!4YD$GFhA%dl0SjrG|X4^fhrNG0eXIRi^8fw9+RC4VQf{2LjWH|U#G z`Wo#~7APB(y>PZKbdJSde@DK)0=c|>M$oKlk+ z;Sd>@W`?)=kyJ%Kl6n2DaQi0yq1 zG=4{9O|Uy1D{7#tItX2hD*-*-+F1Yu`Jc{~*o0pkHypbh>&QOWb$kbfcFR8AKGr^$ zym|}!2>We&eMcGROxGj#bLAbi7&nQ5oYvb6uZ>$xBh8yEJH6_8cL!xu!sn6CM&Cfc z@_svf-9Gz$7W;Jb$?&f1y%4)%xaEL(kLf1e$_5$?NXnu5+42SULs#bT8sAU$OrmOi zKiRdR_-Q}LU7dF)(Z9-#)mH$E?-&wMB@>juie@Ugkt!DaBr5oo^7OrWP9(>Vy*ZGa z+E~pBAB%Nyi#Y8XyvUVAyu>Lud$6$^QDxW2ix$?GOB@>(sxOlO zTvbHb1Y*0BJC;sP>8=yt37WXdyM8*;ovF?T&Wq0DX!fR>_1??b%K1BgtwZl8q4j?{ zGn_KG_9CF$$AVSQ<4VF7S?KHzDmx#(>{&4Q&D~A$;NPk1s9ReuO_L|sae?k}n?Y8O!Q!fe57@xnh#c7_R<=Hr-9yEW9~r1S z1cL^$6BnN$l%9(90J; z?K)4I0@b?k?`C-D3hQ~#t;0q*q2wXE?Ms9p4{~>(&YwKN1R~lyrQ^gTE+I8@p%b>s zNpiG)g8mk0_eqBRhTCL1Ul^_#wi;#{`WreKq7AtWA*^6yek6mU_oElVZE~dD^be>< z-i=*$9N!@YuB+_1Pj)Sg9P(8vT^dqrdjah~o%P7&x`vGz>+FKX+?qA1^?QcIk&1RKt4XY-*2QSk`$(dN=p(?|sa>rB8rwGv6$qu|B>& zPH)va#e0u;l=pP6Z6C zHpk{DO}1qZm91^0d(s%$jHlc{@4ypyAY1gKLD$zdj5f?QEF@c9(Kr_r?l9AA({R&B z(|%qhOdpL~>4q9?+-8V0EYp|N=hgopBIzdQ-WYGEx@R;PpLq94wDB=l4OanZ-N4nJ zDw}PtbFSU!7FRfT^$@P z>@oJ!_FQ1!V{I>U^5#sIW2M)=G3z0x4pLc*bmz( z*aB@!Y%lEtougcPUAU-b;0iyy+1pON_J5R5$siEGv^98>R2oBs5G(I>osGnc9CCM;V>hCeAYu$EdVMi<5 zpq%GfSATu|RV%xS)nz+p*V`{!i&D7|>Kv>%JUg)a_RxQ$sOdMaJiZ(Kviv6b#rmG` z?&mee5@D%hX=^!W_A|ZIUzDaI0rwJj9iY~6J+=kr?9LjRbvdh8c8lyb+1;~eWLt7v zIa6)j91?Y8N$8y{`d(C2273BSbMOK~h(x{9zeD@iGX%-=r~=AeuuS;dQ(MHFno8#T;FSq zPdUGeeiMD3U|L>ro;xmcRY_2i5lHV~q(DciXk=YIxw^5r z*LJ@{f!~5Q1o;M)2|5^DHFvkri=j31oCsMF^dqoe(9@uTL2vyh`y`vM7;?+2$XuOu zHgL?aTR~56%JIpro^>W`an1-^P5VLHM9`I0tP%D^XK$sZXQQr!>}$v|wlOzgN6tep zulAO`4_ac)ai;C2)#g?BI5*8_Vro~|cot|y>=mw_W$i3a|JwN?uMr6juUjq;zG z;5jUTvNDvV{^X;<$5`K(+qBX2$aKs!)l}Z}&^U%p;sM6a#zw{&+;bS~JBVD&YJ8zl z>K^vodDkQ7UGS&%sogjRQhFT`uO#br>s4wXrrFYM#qC`{1Mjgb_CXH2V~les6+}_= zp+10KIEu|aAN#B=nbN+}Cp@i=#74J+iO5j9s|%Eq?j+>EF6^d!T3u9Lke7PrFGoe} zq~-MGsYYj<6>=WqP16+f-{yPftCn%zn|;gs-|=7NALbwL7wx~r|BQcQ|A~J4sZ|*3 zTgdm8_e@J!(=@{~eWL!R-Xq6JN9mSv0ShP-t)J`+a?WuKB%*Z79!(9(C)+1z|IAr| zxNH^II9Dh!4j*z7$;xeN`!DIXfQIk_>rj_C*lKwp5q4jB8TsGd^l!UHu3DT4(UHD} znV`ygscFg}tcx4iR8z399=m#@k2+I7*_;(=h1V0W6at@pL(R{3rjhearUG2}i>syL zM3)EXPttj`h5ik(gC6oM@I_1Ts=kq1UMrm=*5EDM$$bq0iB<*w^(~p(yC8oeh@So+ zX0jVOa$WZkf80xw>DJy-HWO4z99i?q ztj7XyKDWWF6{N0tI=QY;YK#`RYN9u4<5>o{C%a!0(UIvnU6x47K6>{CfLHp7PxV|W zN=8b?qHl$79!ajvhfH=J?mCCeTQFFMSS;(NaKaPD_lOL0Z9KUm_?{i0qz^q5#ktm0 zt>NuDz*l?&UvUJK<4h{=pQw$%t?Z(6poN@es%{!_$6oSzSq5Qk1K%28Tx7gwd~Up9 zd}OqkLO^NPCJvN>mTYG%WV~hg&ETe+>v6jKM(MB6-FT)HB++S(F{OYSh{3yF2XZC} z-|!9o8F_4XoO=M#ukvnR;#JLv09?X%tcI`M5byGv`x@G*7}iid^|%(B@FMykx=kRZGqcMunRMMkb0HP=&>`O0lNc2@?W+MV9b<@tAmYGUTc z@$YOPL&UlF0car~qK;8y^K%jRve2WU2tIvlx>O9}|5fqe0{FZu9`s)F0uMm0WssBi z=PZf}aB(pynr^=z$l)($_5US@e~D|hKv{8Gr4DcHJ~C!D@u0f&F)fJaT^f3H#IF}9 zp)hhid5MAv+}Yp!E6##w$|{Ld&c)et;v~q0?3xhJ8}Fq?;LT3RZ{+XPJ$Sukz^$$VOC1BI zH;J7ROa!DWk&tf01SYU@vZ{~pxorJ^9;I^I;`I^XQ z>14_6(A7(N0Y6U0KBxlTA(HMzHsZl=h_?K}er?VU9zYCwAIQxK?B)9G$Y#*8EKz|J z@O%%6RxV-3>|%$e5t%a(uc^s3_1OjDw3%68cW!_fNg%E=1>b8ssGqMyvr_QZcav@F z1lp)RR%Hirn_XBlfw-zkw4f6!G?AFkQReoO40a6j8V5S?GRCkRiO7bAq$4U@7|z>Y;-TFj->C3B#0q4Xr&+97Iw&Gs4<7kKz-y_9__50*Wj1d z!tZ6k3#S@KP5ztQV247T`)-^Lul4Xfd$5gV0n-;57FTJzlQKuplJ0!W`cd zIjs%#)__PkOAPr7uiIdL#A!_5*vr*;{qLO77Epcw2vdP$>&u)UA=l57tGGbdn{#*& ziR8KC`E4SIsxs^d9lNb4*H$GiXe3T1&cl7md01b8R)fwSe)(99P_z%3eP!>2P|+(OF;$w4F+K%KcgAfWT?}W)fH#;iZkK` zPmG5bCvo?A%uSrbD9-VG&AgTp%~+4#5$B2-kxfxhRh+xsnH@C)$?%_ssH=e7a#Cdx zMD8RLt-Ti%VpH~sI1h0h8T@8Y-mUt>FXf4oG^IQ7cJ_ccU;9I>_(L09t>bopqsa|fcc^Vr!Z`OX6J(33%GG$(`F zP{TU5=ALoPcOsvOE=q@ykZtIJ{gdobHnckfb;pnepj!^{EMMvp1V+^YQqfK9bOk7b z3HVq0kS;sWvg<%qZ-FCb@%IJfObPhBDttZy+AT%rJOf8Umn&`mh|}#&JV~5>TMj8A z&My=>r624DLDzUX3?JonjNg8twTrXzL*U+{tk(kQ@E3b-5+fhTZ(~@yjUcYJF~`NA zGw*|s$Yym9GV{@7-+M5}zI2@sCvM$@YgUmX_#JK^h_s18ZbYyzRnF~+fPV&J$@Js1 z&V1I081hKgdj}kp0mf5Dcjmh6p~5^>bP68C-`&|oZq6<9pd<644aDqn<2#)J6TA^= z^$?kOleneIr)}V_qu?(JV?AVouF-*OwPDN1$W<4yV;i*<&%xR~fbTC7)7r#S>e08k zJe_!op!@7ZKn1pPE!@}_o#e#I`Kb!5bQSm}3cXYVIoJu#>Vh=wN`*yn-G8j!R!^d| zfNXZUv=@r^=c-Ti^L?hvEybfVBdOWh1G0AXd$jma)fwkbqnjjwIq`~A!dTN;b=E$*@pc)&24=&DJ z6S;O0JERKy83#u2cWBvONx@yIGv1`4R--1gz#Q z)NnoJogZ-4YP7~EDM-bJ@PkvGa0@b&c7@0Qy2119abSf@-l>-Y~_jfkZ4!o&~5BhamsWf zBAuQ2{}4_8wP7rSu{aZu3on_+1FjY4*8I*h`|#9R%wQzH_kxa{p#5$*tTDTFJkMJR zO%GtBT!SC-qNhd?*Zh}%yMPWIgEm-+tlvz8%r4acVOR`|!WYh9I8W|j0haneGC>W{ z;uX+y)FF`-vv9tH4;{u{Q9m&n>_kmC_8HQ7K6BrQ1QX|t^+9VzlO3#${oRxO@fUlx z7i(RfU2i5Y=7WEtB4hKTzarq0GHBfz*hV?jQ+y*{e~$X7YrJlO9V`awNX&3Qvp$dY zKAm~CC95%(j7(Q{*AOJ>0=Q@@?^dO6_gnnVv&4)qAW0_ErK|zfH{q&*YNuSpZZg0x z#wuy267>mktFBbK6UDpE5JA1|%5hb9mjHWIlx*5$_fRs5bEQFs1?KNw(LU>Z z3i=%I?&ICqJB{kPJwAPWi{tH0@elN0;M>nTza@s_a`PKn>1)#4N_6L~Oig8$^MqrX z{jKext)=}v{^&IOK3k%-vvr`=YE86Naa?g8abH#s(;q#LydQ3kMJ^STmdMo%Z;h4A z7RzeO2I|Dum}i*&HLf=l<7}-Z`rq{HEFtuIEj_&4Ik<)qzcU%EoV6Hyw{_Ah0f`?V{# zYmu|KhC&+XJPcCgp_a!O=B(wc#7|G>9OrY86+K*y=n2!;X>|;7IPBN#gY6~kIkp^I zYx_V)Syw^zn>62);a$;xQowuv2*0g9q4Z&B?Y}ATeej7~XY>3P7MNEFJ(25DKws~? zrq}vLpi0+r0#YuCdMh=-{ma?eK04>quQi!lGQ0jVWnKKWH*@pP{y$e{CS-lFUU8In zXDKs1*L4GsOdiJGL!INtR+2EE{o{G&&qdR;jU`szGc!tai6jx@(JI#11|%TaE}CoSp{ zfz?cUZwob2VM@zlcz*z zvCxINPX(I-I`~Yrd^cs9>RTRpUGuKwt(q?wib!vj;;!nBeRLyfYi(zpY#ndaTNASX z%v$(s^DqDGM%E#8b6E_+ttTCEE<5+Tez|X|<)ki#o94&f8-3^ce)1k~X>Y1+9Aunp zx@cA{>0V>K2YS!&5Xf0J#Yq;mR9Ikk}ob7aXb}e-z*nYQG%^8&aE$e(%barG; zPis%xQ|i)7*{)fWt#@q09X(tnltt%%jjuS+q<1t zX-iY{B~z-YwfU6!68Kyn(_F(L`H9ZQGfnC3zTx`n8t(o<1R%q;+?ir;XB%amZtZA$ zV4v!2=-#QERX5Pfa|N=$m()Q%pdV%IWmYU9-mSb7ITH7}d9&%Bu?hH;sm5u>qV)Hv zt52qr@bW|ZvP|8-!`VR=dIXMy5kJi1m{Pv#IR`;O-ye<{izK$^G z6j!*iM^!l2qBs`GD{u@Q@b}Kje{$~JT0?^2F_qOeeIHOT-f}N#J7}Lo^^vlLUPMiu zWHamyY|E@V>ysQG>niI5+ZjhscW;kbey?}YanxBYnQ8_dm^0yn<;tKp&q+#pRA|9!7Rf^y;UAe-lMPnv7wf!sd=ZlzSaXO&CrLA za2@ptNKkQR(s`~rBOTUt^yE^D(xt2qxy&|1hlaV^yPJ~LJVHdzm&&T6ROHnmda@Ub za}V7s^U8&_>_g%gLfu zMPvR&XP;-F0^*VGZ=4;7_m3jl^uwJ++|5n|inEvKIxNnJxrIen#PbF;QaT;k%hN$H zMb_(A%U7@#RPd3@v45_Ev3vt+wU_*soM9f$)F?quj2=pFe8d;z&l_WBb>^IhD@t*q z6y=CcTp^w`8^0i3{fqd`RNXPU+!o>VBP*xe?a0cY_o2r>7nzKz`jhhUA z==aD~sr)JhLU5woSAL1b(nc3gL}n3I%2xcLTxyuo-Te`5|JAjK9$k6xJG!a`)Y*#5 zoj`AZ<5Yxask87O3gTfUVRgB&O`GCd)WC0OgH3mVs6~D0H~2grui*zi;{&1)rx!Yw*lb=%<8`1&W+2;YYu@-w_^SkVF1>k)bM5DKnCRL~VyO;j^ACRY5&S$6 z3sH!wbSHL{O+NbzC(3Mrlf9^68H%6$Rc(XKGYcy*0J}X)tB3B1{pz8+t(n-$QLMLS zlAEftIQ-o{_-h8TDoeGPYQTGgfXGdp zXfqhBmx_^UgmpxIGMHsOMq%UJtP$ko#aSnH){ay(TDt%(2}gG$lZv)i%1WANW~QX9}worww` zA-XUV2^Npekm%TVo;+b+V#b}M{dgOj@v@3woj1jbGr4NXbd+7ZR}Z^;GtZ#fB9zW^pojX*{a^#F(r^j@}UI z*@@@*8g3eiM;3(MYsk!c>CjlHIDszOWOjlxKSBbKQ z^8_~;w2F+Z8_^*V!F1!lt;7pB$<8Q3mDOf^uy52l?8k%ZhaDM-cUF^V!2@Dc4q_pL z@Q02d5sFbWA4^PS6LEtr$bn>Hd!^JnoPQSw7V!XByvBIlqOx`myw;s)O{gkjg*~a~ zsf4Bur=Rr(e2V=%aXoQ?O?Wh$hy$I1f+d(wLwto`DT#<)BvlJ7iM)s4HHBj1S0$2F zgE(*}ysB_I@$LozeM$|+Lm0+hiNiaa%l$uq1g(tx66Z!u!vmj*&)uKV_lM3cc(*i? zEuI+3N}>RB*wwS~sITMSmB-_&NCfsXF%CWUTc z0IlrvHk$tvM=WRu-sv*?z|22W=?KH(xh>yCdEj+d3fsYqc&v8oUuFjDdL z1i3bw`hd;MxHSH6BmDd(#I?JDS z7W8I*kS(pi*7c_XzBe@<4aovj!-K0qM(P%^fOK^@^KJugK4<+pvF^LrB^_Di)~Eji&)1XVnoSm87R^RY@~yF!D-O(1D@bdD&P)d)x%`@tqDDmFP}69_zN5-X?D$THb~oC#3EbB=AfkU3b_;R(9)D zB2@W3@zjY$sAlyg`d|^WsYCX%9C*HI;LIYifaAb_E(8htiDdpfBwl=@wZ zoNhy^d-05ma!36N{ak%NeGHgrAAOGefzw)7Q%$}LyyXxrX50?mi{Ol?c65eLReI4g zFpYfaVColEgAL9DV%b5Dl{)Ou{vd1q<^0hG#Ju0D_27$PXo&HkF!z)DXhbGY;M|5G ze@Y{XUJ$ikL7$2K?BfYk@SnglKF#Ts5$Y?jG*`gcmc)Y$P`7JnKp*b*BDXcw_4JTV(9MzC7~<&XGRrWCI+V$V(&kw2 zNMA4C_tet#GWnY0EhoGu`i%EJWGQFfZz^s+VSZx%Wol%ssgIF-&;ngF-Sin=zpgx| zqtkry>5IUJym1Y34R!wQ@B=T}m2NjbTnXT7s)C1Zs_ayk5JRu*Sw;<4wpvEFU3ww+ z(~qLN#3eA!L$N-d$uat)`b_H9o{}Aj(^u1frXGBe^p$w6IAwe?(X$S^^W+%ws6#b8 zhlN^;(uzg7i+1P$vaTD5fN`M7UGA}1b#15}xu*4?xCsZhAcrs$O5o|v2R7YB(YahJtFVck@#YFv_TW1 zT^Zz)qNq7N1g96LU&C4=jg8sq71{5(kyfAB6JLn^Tu1J&gi8Zh;VD#+7(hoRve%}A zd2bFXr4c<{n!q_l!IgX5K_IASgGPO+7+C8FwFx*?9XXG&?6YZb$|T}8Da5S$BhN>& zGxLK`-^0#}Axb0a%Fm!Bmy!D{1!BFFvJC`lEogL*oboa7$n)Hv-MvAGH-Q`cq-FH@ z`6OQ@X8c^rXXro&w>jQxyzZG7nbw+&mi_caoawy@yys%`eDeddgZ^|wObWFm>%co! zl55i?=)Uxy?vwh(JH^F))y2d$U!JbxjJaG)5ZbWq)aeShu?Jrk8WfL6x z6ckPuF#ko>DV}juHtVG8MB`I*1F4$b0+PsE@6Eo8#3I>CH|rsKKm8Fh+1sgG5EVL) zz?xn}lHNf>&qAwpr`Ja`HJZ_&NZ->9ZoV>#n#*xm_B%oKy5NRHaC`Ce?P$$@3dQzl z3FdnQ67vIf!e_{4R?zC^w`1LfYL@$W>Ou=+_kNTI>aTEmZzuf$F!;lWw}p{cKZ%4| zBA1poV(o~N=@c@RJGft6&qp$L4Y0_blK=Np63BKgrw-s5_IDyH(1O=^teY_Eo!TRv z%HU%+rP|>%RiYh`4_8>bx$OHrNQHO2a*!93k+zS}Y@*goyjmdhhGH*>JjDbuC^g8a zgpqgMLd-7G8JjhN$1tss~8aq)Qqu+X_tA2DiJmCl z$Zu+zv8xPZZAxQ<{f=ey0z04~ws#ru6Fubw&M@}nB(O1blhPAc`;Qu`Q*tr#x0i{% z&L!_sL`p@^z98dpfQrrDj35p>=`+;B#npoSz6Gv5M{V|F>Z)2`c}yUa*b(k3h`trI^~H5J$QbQMo`oW(#Mz`D z(dVh;a>KzxBoa&3Q^k-MX>bO6_6`=B38}jT>(EXt=P3w{_QY8KK|-v99(J;3$Fb|S zdWJ|X=(E&O-%mPCj`Nnh#MIqugVzT0YlDm)j5Av4s5ru_wD=RN0DS5G3iOI_=TMh1hf{DLoc z4#{n%GBe#BspcbZ;i6y0IQ*1I?1U)x*d#fF-ab)S45hFOVmTS%??EB(mDG#fCS(&2f#O}z* zljJz2)1zo1a-lo6_EoLsFpiwSRP5y4WF^1qs?m=j0xj`Cs*iNoM~zN2XC(ZA?)ZxR z;!9rkFjG@91riNsopG{ix0xLB)_SxtsxTX=kX@3H$3VbzOb%c-ph}HOW){4bKka zrx|~XtX>)NXdlU%iSs>&5O42}?e4^5h@w8HC6PdHcE)QmyP^+MYotUtIw^`1e6GlQ z^t0qhP7j-+u91?Br!3bk<4tw-BiRS*3|-B3i($mo|hOQ;iRe4-7T+ zwW;V^kC&%nbuaYX!F~;A#1)8jzf+Q_ph$NW!y?(O6jLWCx!oI_pNW$`a+QJ8^C^0F zUDs%06!Vq3SSLf-mjzjsBz3c=JGyu%tLTHQD-VKdG}SIIso?L8rFcm0sozL%stZ($ zY{2V0FSU@b$?u4a?50BS4%(zX8oiWshz>13@ifF4!7H%Yzakm$A!Y9%U-uvjdLtV% z6)&EfgQpb6PFSYCR+n+YYzkgMI2vX=h?g4VUpDiUhGbFlkb}9#InVWoR_uZ&5<%y* zmk!~-H$V%9Q)L>D4ZIu=sgmbCyR0#GS3aI`pOfgvq2*tJV}Iao&R^H~+W<1|9nqzO zXegDl-1jNF=#N#19z4zP-|NwzVJ<#=9By2ChSy);PZ@N+zpX%>vjD&JF+XM2R_ji!GCWDop8z|~f%Jy^Rj{Pi0Wpc@i%0uhsJqA!s|l7jgu&)vg3-_$U6X;rdy zjp5eO*j0DPEmpzGoS~r-IGYDw_JpQi3KI8TgNE9QZ*Iqre2>jA5zXHOuUDMG{0F+Y zKX%^_5L3m8A=ML0P`mUM&G!oo*aR$v9ry=N zs9xElC`kUl@wxuSiYQ1#Z31zmJdAT2{?&b|9mL7Y)948xGWj(?pWSEWH{w6k!+U7N zem?~gVFmzY14Y+ zSba3TAG;X(nbORCEHBK{Ox2AkhIxkE#vY~!v)%Nk@jg|!ML|UF)-RD~&}U89bt8P?QQ85+gZi5uttsHj`a80FRHO5tiiqGoQ6*Q)6`*mlOE6cUp-BK9{9&C_% zzPkaQQE677KKaF1GUav9z(>)gUhJq?a-PSemS}&Yem=dTet~h^Cr9X;6Q{Zf7j&Vf zH3~bFhLW5Ue2)m!dSVLgu?U}V4{@IML2TvqXv=Nr_#fop3(&V~G}MYA<60OFX+~C^ zb7b&V_Yj?13H_4kBQg=IIvTXVLwMp2F(DI@y*w5|K2EYZg{8Hbd#=KhYX>L22FEmt z(`JUDTlMtyx*P4{XagCkw)9un zLoVMRPrnHq8BGKx7!DeW*B~MaovD6r%ATIXUvsbpHZtOm==Ve-FD>D2FV^4?dGncA zVymzS_9N}~l6OuA3A2Iqk0kfo8EWdJDfDn`AbloErpPO>tY?B1&8^Qvjdef6eQHZi zbIM6|>L(g=QpG{K_;#Yga?#N~50OfVh}#w79QlZ6PGya3 ztn>?FZPUpwH(|d_g}aIn9aza;@AIx2=7ug$9`D}IQW;2t73uaLYEU_bn!ba`SrA;bX=vhVC@W~-)m7b3%Uu`3H8 zcMD;?&A=K{u{XD2)B7QnukqAcRLoQayVo13+z)-f40~)p*WAZS`-$J0LblzH?06~m za(krL6V7rn<4@kk8_=`&J0NfB!tJrdf12`_I7hxQc61HUVdL;i+akYi>sCqqiCS!- z16Xk?MU#lI{E!E8+RkWJJym`rzm*%INxy*XegL|)BlR{7^r3iOKe4q7$j9(;i-Pj} z1hytO{dv|fkCvc-cB4lc;KM#ZbH!8fRh8Z)bLpb!Aai{XD>i`sIIZw)x>EUb7~d<5 z$jlO~nR3`evFP50@Xiu;=1kTp5+u)lqPT}aZI%QzGMiY;UeFDR(j7X%ZI@orL7)s{ zc~8{m44VG4+ClF;qqiHPMLR(bx%ZVeM0~xzF*=Mr`yY*y3&I>-QSjcn=H>*)uq8B-ZvbIL(Y* zZYMI44Cj3XgIn7Zhh*x3>@LUd_d$=$W~CkY`LDUB%Tt^RR+a7wQ;>}%utkE1R=m-* zMe@xic5;EkU{RktrlkA)@1zRy!D5?7tCmD zgSyFPhn*!td>m=El}Ou5W}3tdbC_RIESMNpqcb+?UtmuTlA|fa?&*RQo~zqI{B;qM z|2#OrcgTTnbgM}vz8_9p!bMaz8{X)Me6PtaH-MXZ!;asGg?K`D72f=d7)VuSpb$yE ziWHxQ*F2a!!)I*$HN;My^147iW(8i^4J@FQ=)!&cCWD-+j4dEgvNGe(#gEY4>&YIPfxEikm&C!JH^Dju@!cG>&{uGNr8R3}j)qpfi|x?>|I(M($Z^m> zZ;3nI1mAoZJ48hCH!=T}*d?p^>jCvtN$mG{qThu<{|p0fCGvPytTH2Ub8qO@iB7aS zeAT+SMdj`sH1BIX)56g1cMWB@icxQ14$rWl{zDh7 z#ljYy<~ovV`-i;IP^4KL(Y`_C4(c(UFl5|7c6S%-jFQCjD-a=Q2$jNhSSCT5KP5EDk^w(qQ7EM__;2tI}~|5kNtTHUJ)3sE3CqMqSKjJ zoVPV=N$lmPP+&crv`uFKDA3bddb zFFJ@b@m`{h3NrTm#K13VojL>1bYtP!&iHDDxlbQYjy(4He|J1!n@1JV2JLBQ=n-p zq7gIU?bCeg0n+;&KNrZ6>}6&bpy&haCp#4>wLo>6$aLuO;5uve)L=AJXYAz3n&mte zO*N2}tjKi{U>i#_lMakD4C<96uh4||OW@h$f`SiNi-)YyCoTIDOg8B!&l2)gATvMm z?D^=EdF=be?7MDAqHcUD*6)gr z7!ogm{c@J~WTase__{YEUd-AqB`!V$&h8KYjl!dvLyq$wq)Z#K*wy)FVb-=Fd-xtB zyALlW^E7X441q^CGNatc(H!1OfWO|r0cYUB+05u2^zuOS&uChaZ}UfU{lK>0MNaMj zT)T;}--975(Q{`6rKfz&t*MSj)R);0~>`xSQ&X9de}jfLwSa6l^e4QG{lBEh10*UA(0{QrNe zoWZzr_zB@Vq1@jHH;BJ;Sf5Aio0H7*6*A-l`aXyhU#411&mfts6PM>G7}?zz+Fvz)&;3D25)#M<4fo6FY$mn zvOdFEmsZGCGpi~3u&T(bvhZSgC}M_>#fZxxe`@khNiDOH2rV<9ZwB|i%?M617SKUR z(z9p|(W%uAg`V??8SAMTTC)oG&&B8kk~)Q5_l%uj&^?5^g#5Y7XrA(&AVyq+r@e;H z9zjtRNz#?^jo{zr@XS|MD;b}s4SOLs+-D+poB}m6pg{r@;_z+unBZI!`__j@$Q8!- zg3o`lI#%wO!yZ2a&tBqr4pt~YtNM^w!M9xd0Ui|nLGtizU)C;(xm@P`&z=J8x!+mY zPH>YS>k!JnMxGE1jf4kz5Nm%B5^@{!zt3Gvy!%1p-N%eijLglB_CvR(!_Vowgca$B z#o`ZzW#mdWGZ9|P6aJqB4jI zD!-j&e{Mj_EF?cJs(kkH`5jG$e}U(PPy2zpCNTPBIO{G?6a4s;Z@z+xFM0n1yWf}j z*J7sm*m(wIi17D9;G3%OXntr_0FG?|w~HPJg;}L6zT@JXLM|k6e;-ysf*L7soTx2p z2zSPCbu)gtA+!22s~9A4V{EY&?2zC2tvY;P0*=dMRebnPRjv>|pvdtBu-X#O^@d{p zJi9F`(URRF^p}v$Ay7YzH4xJ5D04mwt&c;Smr(pBe~BK(@$lapMwbOYM6u5*bN}{8 z)GqASG3?~#yc3P=6#USg-O!Csy0J5sAy>!pp4gqG;D;<$Ok$KG>-dO!Ca}|~%uEdEb0Mux zpksuMkj^d>5$Jx*R#-AVNR@ZIp9CMs>{VewZAZ_ZhRcqzzGAkIS%W=T#Fya05I8s- z%6P+@c0T>c_X41nz-@@QuZ7=BKntPa65+$|%qkq+8G+=j$O_be9-WBqjppYMXd-GQ z+Olp97*!+a(+)}&WEP1$Z7n|GQsyRXzFUlD4_a;^x^4$Nc7r=V)7BsjTR~XjZy242 zbq<2Z^09iM%-w;s7D$yGM(M?fB&37rN*xa+Zz1bm!x=g7zmOiGjIj=DRtGLE#VkY> zl+c)FW@Fp z_^pg)lUSjKot<3|+BW2Vc{DEea5te3tMF5V=Y%uEhU}Q;P+9bNuE6uXn4O1t-sdR? z(a1ZoCib%WyIFtHomx~qt-zXGuW{gQxa<}DW#GO-ZVT(A0=dkZtY35P+yQA@mHjN} zUkXZA(j-|p*9Z;UijhP?IWfLSzR`gFQUV^##R$AuEf4bM0^GMoL*0$V<{yoZFbSVv zF7IvD?1Xh_pGVN-2z(mPZg|7YGc^ttt1Kj7Bs>_vjB`P8JMZb4V<_V&2IWdaF@K(; z=kF@W=%T!$8AnHEP!wFU*V~2c8~`v*&AJ#%+;ck zyEoL!$J~ujrzGQUh6OnU5AaWSRKVWPY-smgvELiu>P#&qckV*r#bo-_um1Zi5Ri z@jRK)sz`}szG>y@1>u=c-g6*xiZFh`=MomuNA7Y5DsF}=*W%+WXT>i=&r2Gulc4Ku z?z@LQB;>4vdu6loZhrp>bqhnIM#$qBR#%)dHRPhx#VM(H~8@jU-NXXb&dlNIc+un0ZO@h58`Y}{b3&&M3TLW7IQ zc+sc-9<)y6=}-AB33<|lJ=>L)okOzb}$dh`V}xXb8XAXmS_k*|4PA{0+YKRwr;eHR&&OfIY(_iv0`ZN*Pr zp4S?w6{)SmMJ=+i5!wie8U~+4Ks&)XHMzDu@}~mxQnhvS;i*YbVIQk-k0-kLjSw4cyp3|qB9OKC$%;65{~Mg}oN+#cCTY#xoOH|Mgf9N8!19tZWwRQW~E9P1`kX`7MUIHe^0F zM&RUgCp>0iw~G~5c4xoEP6yqbiRmjN?@Q{+I@d&WBUk|UoeUwW>SwChVzW-JV|Kou4un5 z$dk_OhdPWb2L7!HkDK6jKgLml>nduJrXG}O%bw|g#OjJRtjsP>XSTw&7QW>-DD#|A z?!*S$&8i7&J(G9SnV%iL7P4M&PC>Z;2P1mV%D-g)??Y3rhp?aCPJf%ecgb+g>(#t(*>RXXV>^cc|Uel zCY+kg{hq?rg7*}(XaFMG(B#E8mC#Ohscd=o^pl1Z$Td~##COrdmgw;P(_A|^25K~p}mNF2nyHX z@3QE6vFkpvdf&Jz4`T^{D+HE$3Kn2XW>thAVQ)u30kNY?!&kzmtiz7(gsn7ytJ*=0 zddNVlwtv5{t1|d4O&gb)nPo8k2h1iO86x^`zkxEs3wg%A71jFr;Fqe*J0E&XL`8&t z6xHZwS@Aopt&_F=!Tbf9Tn7yV<)WZeG&2?Qr84VKj#p_YC#>HPeu7wiApwM^C?W?2 zXzGuyOM#xDSV5(rQBh|99gYcu{#m@Amw6RJ&nH3?QH>>d>Jxtn31Y{d`rkT-a(6d> z+qh3jMk8!_QKxR+tAt?(~jl8sQ%gtDVJsLf|G9n#A*rThLr+5+Tn%X?~Ef zxJ8$da_p`5+~pyokm0<7Joyt(IR}MbF+w4$g)bZmb;M~gM*bC6HINVP?tq&_1xOCh z4q#?QxJsN~AhX(`+&O@m3ce`}Jxem`q8c4k=4v21`x?pg1ddE$ya|lq2^wM&)zp8H zN!$qM-bPnkWDh=J%tGGVSg%)%^bzxU2oH$d&_`(hoVlw=T!~Tn!?&H$I!#%t8az`V zLxmMxA3kgWABqYU;UP%GG-a+1VvXN0*9^ulUV*H=_(lo1O+*!3P*UQzNWR+|E-uPn zpILV!R1jw}72pmPGAGqQ{rp~3F#`{t%sG>73olm6)p>v3P2qjYhi#^ zg1^f0eo>zG7Jfd=jNe0df36AU9v^t`5cCih{w4U}E1!LaCI@(Z#x@9qzFzQWBGTkC z*5_`x;xarb>`EhA@iWx<0QZKmCyKHLA|m^WyDh_3+6PxXz|Qt!U7b)@4~N>I>0PM% z4O=k+YKU$DQP8a_G|0>D&&_B{Fy{YR4If##MCcL3E-MTz^^Ce0*A{2@m1CX}tU+O} zHtXK8pA+$5eW3hHURiLKUbE~(CBNuXAbfC<^{;_^X~`kc+W=hbHlSU*yd^ zaGRK&j=6-h-m&O|mRgLe473oj7X#6d@9ff7jP5f2^I_&Itd|VtlmR_%X*)+?wuF_P z3LlC72OFWtR(8uHo+IMKIb3-c?o^?2IP3h2_l0#O;$4DLI(Et@s3tNL!ZPz<7ZhNo zVxI|mhePi?%%B#W7X#PH#B9>|uE+%>YT8>^VdXV$D*@m1Msi0p=V)|DWvEaI>ih8% z1I7J#dIop((KLXFJA7eJc%WfnMrLFzKcLV%cGLxSO%iwi&J4ub-(qH|a6)dlDI4y| z(d4bL;zT^5AhKRqXhzn~%y`A=K?N9-@PUMf@`VvQd5*9WenHKTj6F@GyqH%L=w1hk z3tEXM1aKdfXT-C2kMnu~hX?X`F2-1r8CB=%(%LvAc7c$wA~q!QJPM-;(Vid6{t+EV za>G9&y4sNc=Vx@EdA`_96&PCu_(k+4Qdm{t0c7%YUwA14uJS+!fy(*8eLt{IuCdnQ zgqxjcL%{>jk!`EVsVvnpQjZy{(2J*d3CrdrSDr$;{6LtEPOc&yS)OtCBG(5{opB)yAYnuch>wKqd(3Z zQ`j@_7|CU3y`OhP@QzQOL6g^T?g@}J+p%oV!Yw};dp6$@mhxHbpl#6N8Q*`& zJFocp#J?iLCb+Nw8b6IE3a{iD6u8E|dIn8| zcU=Jwp*d@xmv0IkBO+M8ut0kGrn`o#o5~$}^Y=jV!Tms9 zxxfsZ1&eV5e86RD2>r<`Phm_;wA|xMsMQce$5Wk?+SB{sI$u&3@)tEg)#(ua7PMbG zPJC?(=I;TBpj0pnNkl-4vvSYiEHR$ja9tXbPs9rs6)L+Ue22RsdfGN z1G*N#D_PG<)?rq)8GAvp*#>ZdY2>jTU>sTyGdhTjF~VCBJR^eD>w=xRk|&P9I<3Ke zD#SjQS@&=3+x1X&8WGl2{P22bUC{rLY*_JN4Wi>#T*Du})8jV2NC=n|TpRh=C?7>}Yo z93ko)`|#HYBH<%hyVYQl4uNd=mwel7Xg`@Rw#w&n9xLedI{da&OFUJb_1D} zjO>^~{o5D1MP^gQVxbnyhaQ9C>vT-1O3(U#jD?LG48exp^sL_nw)i>gG6jxZ$6lRAJ+)oYgW>!HB1`OwIAq@eIK2XE zc^#?bL*0q!j#->H+$B zf2Bfu9Ei^$^x?_xeoIg6`*aES=4^`^obj%>w<(puQi_u@FR+eQ&r0dKA&fXD!w&pES$&L$bgY38-&8Aczus^^(9Pi>+Lq94CqN~D8EI=#~; zxvwg1sa%R@|LAp9J!?VtEuw}&oF?8#SB=U8c#nF#;!-u;0kyyK!d;vCg1pGyx}Gf> z>TJ$7BI%CQufwjbOHGMM>Q8QbB~-7a^(QUq zc?gG$1f{(cTw@}+em%EV9AM$Zh?I_hx02Es9b!9W()^2W}xea0d8uBu09JN`mnOy&={@B z@~=f2KcZjy52~JrVr3*E3y*MJJurO78DD)mDOr^>%35mfu7f`Rt~eNFDtkw!AIvo> z!G2I-Sb|j_210WNXxQB#_fCO^eF(2LLK3>^A|}w4Rq!oDzT_G8%AZk+9iK*%2u>Av^_g($6(oSj7tDaWb5Vy855~1Ff0sq;?Lhi%0jcDq z&SoXZ-*@18>u~ywOa6`ezed#le&%$XWBNAWk*^v4H58^xzn7t;p`0O~Ar;G@9UZJY z(aCiaeUC!)o8P zRrbme{N$mm#yzlDTbb2KDzbgS&_+-vcaq(Cos&n0Q>iqYGs}7~zL9iF>n)9^#_SNC z1g_G}@iKJ{N9ikJ<({SS!1toH7IWWIVE8Sbe?hKV)Mt!t76|%&)R3G~@^X58a}c=8 z=`LFm)NO6*k>`Q7{-u1R##q!rccI2&Ib1N0s>s3oKUtCJ2=STuKIA0GDCT?&v~eUl zQ)ER~BXRGu{^-2u?Q$30w^XTsd}pd$gMuJ8mI*;sW@(=hO#^im*TE*%2?9>5fyL)3SO|#XJxc zZh!W5VQSg~sZ8^tH``a%wkq`_8~FZw*0d{@#2u(N1%zrWPx%E#_6SlpKNa|UsUDvV zAC6%Siz#+ay)sZq9z<8DKY3T|;+b@3`iJVF2UIoYqsm|~h~lLn$JbJYaDeK9WBh*~ z6ZuN zT)z`tY#~M=I^-V4(i71X(e*Vy-o+$z*-y0C3#z4lz~y=923!f0?g_Bl-|0>VlSzS` z0x+05&bm;g4!Qk|;Px}ekBaEeG5D5}sqW*pT~qG#nNjcQ%dny0aM1ah!4R^4v2 zb%tgMi$0B!V5djY6T1^NW)bw*D@A|d0@woI;EYvpdPTggYuGYdpy(20&R<|AMJ-l7 zbj@ul4J%TE8-@;6*_|(`3fjWn6gA)#sR|6IIl--r)a!&6)cw*D&AdJ2B{i!LAf zZ8CL>JE*ak$X@&p?X!IZ zs=$kha~S^uf$}ftzKKYP8bmpV;He6PSvS1yao|rwsmm$_rCRZ<6>x?Q4fqG=S?!k3 zflogpKc%v^I+c9YS?TYbd-fPCdpcaPl09{Y9X5gbjtO{q`J^+fL~i8aEiA&L*d+x# zPvH6y=&p{)@>u$Bhp4ydezThuKZ4G?M$O$Z>P==U|52l0<ANqs`z$j?%n9P z6sjRB!{z_L@elACt6@Pc)v6J0fv46}@8Zs|%eXnqm!HMiMN?niJip zn-VAaf^Hm+rmKN1TY<`fw%pZ2f27Z7@$0Nx6LzmA>@s-Qrt1pxfXdR{u)FC)Hb?^gqeChHHPKF#L*OFuT$t$POrM@>@7)@6W z6ZK^~czz_2>)Y(6aYPgvA+rk8PdP<>OXbEc>K7|w`=_x-PO_8tQ%8S;*CVXi^~llr z?5?-eRo26T-bBsgTXdX4t=SMp{T+;(@ZpBSue({zTSQ%)xQ1=ZPM<9E^ z#4boff5pSi_t;05q1p&6?2<$eE+7-{obX0pg`zua5Iq@#-V}ZH+~{3_2+YEBdyEaKL+=`~1w_<- z5>oFJ6(9-xh<^RMu%AY=cCmam8oT2raw;C|_;_~VNU*p)m}6hf8<5#M=h!)CuuEUy z|4C%DFQM(*VOtu}mZh*7hv2KN!;agFHMdKP+etiIWT(RLN&kkcrVt13#EEyA!IDxPFaXW@Vs)NXvl@-brhbY%zMpei$j`jmWhRVYfecnB4H53qO6Ak9x} zTKqV5DJys_!~*<>&Q~pws%|``XK?jcc<>J}#A}%AZ}5YNdK!?j_nFU0#yJ_^OL!!q zX#Nt^SBPHFCTbJ{u+iQlCo_3H#Hw9~7TgKnBv7mH1Pe473+^ob)k6AZ*zg#-YMnJ! zW4SD*f@2ITyB!CTN#(gx;Ay@?@sdQ&MTN-!B6Y!BZ$frlW=&4RO9?zhAV~z`;wC%lA)g4W-gln) zzZ`);gp5bJRKzRD#73#X%wv)MWteRi9VFA>sC42?d3c3-()nFwUc-s;2zz5P_WWE{ zcQafoY`4?w<`-CbBHvS$z1sopC3-3khRUspKh49=-+`Q2OeAPBKMVM69`79B=QI@t zJ2id~vCCt`J5IxUFYuRc@X0nJ9&_Q;t#IfvWYBo_*-&B?v$VC0A(HG1O)~Ko1bV6- zaz>zcO2cOo`do#o?})*QIF-mR2sGI@{_1c6k0bg(o0lB z2)yKGM)3|=c!iN|g(F|lzw#?S*mpGGH?;XXq`Tm)%h+=#;MrYtN;-`-^O-%qA8ogl z@vld+iuV`c{Zz&(y1?GtOpNM(m0gS2IWx4_*>E)cFr>ap-Ch!PMlY!=e8TQeg1^%7 zEsLQ0MbCFHVguRK#~6u!6lHyiu!eg0wi!0UZa5%;*&SzQ=ZMf`LW5$&3k3e6IXb4L zhN?e}6uJj(=c9E;@%oEB@duiqGZ~74$Y>p-v}v&&k!KZ{ur${9DnFtho~T|E(Ue2j z_e+_{3bfuxy!k)i^ld!HeVVF5a_LSCb0ZS8#Gm zB9nKp+`ACV*v2k=jV!!FmDWpqurbhOCOTU5cMC*U-S7w;L@M_E3N%g!c1|GQHNgAB z*-fvI2}jv2ugFeJWDkYX`=Sq4(0=^a_w)_dNps=M@o2+o#BxOzOvEL>AdQ#tI;gGH zY+|X)&?J+P$u*EWmyni8tXnT6k)CG{L5J4mwGP>_5N=vZ)plmn+(lnspi|B^v_Ll2fd_A2bPJn~C6=UQ5F`0W6d*@g z%icajRpWB5Uyi5sUHu3C)UgMC!ef^>b=aS7&>N{td?j_!4>o);6f#B_A8~ff9>aE{ zVk%_0Y*}gXvXr7X>~Ygs(=O8pQ=n-H9aKi+=Z?bP-LIb@SC!5)qPfJ@sxtOl>Skq{ zJKJ@D6ZuZM3Q$-5+nSXS%67#HC=dmy2sa-#ogWA-2w!6cX$0D zxCVE32@>2B+@0X=&hEm>SnK?6^FRBLAS>PX)~#E0>eMOit5z1}kZ_);1ZTG<8Bqj^ zY+LY&cPx#V3s8;uC$HG4R;+YI-ggJf3%@W9sV{ru#*qteuUCj^)rF z{+C(UmDC+-A=GNFFt2ATD9Cu!$QFYsEn|)P;Tt*-1)z_*W-R_lIE#mV;GT$7`0_kIU zCv37V7X9dwZIv3@ZraBYucb zT35Zotj=0WI8#^KGN-dI6XklUUDaONW@EIaE_za4I+j;i*GaPNjeUb7*7*_LQ->qo z9!uyO9Kmo1LOnp zTe-Wkm!C9J4M#y}kzR)ELWb5mgf83Dp~l=@qwulCi5{O4o1ghr*PRGnwF$+S{wU29 z2Mx^%weptwUinvX=R`CA9F{eQ74|QtWxJ`o%Ot-BbG9} z#~pEwZLXt~r)^|v^y`?6F^40+GQn_6Z0>ZcGjz#NC2gUY;}KoMB0YQDaqdlyIno9p z#aO1TW_D>y<#O=3KRGpBa-W}Der8Qxn_9@fC~zQ9J}}PT!|xB+)hMsqk zCEM#ecQOC(mV2XnkUQ2r%T>`e$koAh&pF#Mo1GqIX>DB9&Z0LKrKmwyuuve;cgXuL zbx*3~ox)W$&X>!dB~UEbRDP-S(q8Jh;Dd163w6XIwiw3^=Pg%Zw}7fy(AnFW6}^*N z%tq^GFKK%tZm?b=C%#G7aovzr&!T2!rfwUxyXFE@^J)84S#dCR@S+w?$8b4(+f`25 zR?8s4MuyYKI!-u4oi^SwPbf?O{08P9CRqLr{ew4Yjeqge1zy%@sUJj*?WS6lIegod z{EAQBB`=mQ$}RcR66KgO6;iJz4^W;d|HF#5Qqx#%I6^zX+EYL(78ymj4+o)wSInq_ z=HN0dlfF_D)V^}fz()Tf|F}RD6ALde3(*^lk_#$(v?i9LlInUH5g+{|W`6XR$g+`1 zQP#Me8AoS{$T}ob=5)nk%SJtlNDTLTsyqJ><3b;lb;17v0|KA?6Md;EYky`;`2OvW zZ*}7r{m7Eq)&C|KFI$yDN|IVDG(s%mxZ|qt`M^xsCgJ~!ut)WXeivOmdVb{Fuqy5c zXvUmDWQgXHpS=9>+POWEBRGY-=x^2w?9w+eEaioQrVOR-a>(O@;p^Q*>G^E zr%+AW<2c~{5mq2#enh^A$>I6Kg>WP6N?2IfaCb9j4!d7WV{K?zXk6CjD#e3$ealiq z$*q&8B$rAFPaU2*B(*{6#?&ZZwcs6fj&aJe)cQjD*BrLyw78oZRN?UEM9x|p{ za$3)b1#LB$Tz$fJTyiiU?y-1LTrYknbKPZ`92!ln;VwFapO_+gN*Sdj%BT5Ixl71R)|o-CWn;? zyXcwbRvq8OV@7-Vlh>Y7J~>TFrqtP~n^PVo{rnL~oc`lL(w@`<{%^rS%6hef9&Y(8 zR&zf0AnSguc5|xOMdHR zv9PU){g$Jb>$JOvXSpZUo$Na7EbiECdnRrWdWY)iJ=LkQ8b}`)TBF&j%=KEo_VNsm3zU%fv*0> zzUkh@UXSm!Z&!dRjMifA`Z2NT7Nxbt6c4N8`p^V{d?H4@XL|I zV;-l`;)cbwkF6ecDXg6PwzIqQmNPS{#XiX%3Jc~)ei^^!Td(hh5N5$t#tdf+R`e$H+az=Zm-!e*D23vo#op;*8u0|}68Xi3{ zI&<{usGE^DBASQS^Z1+_>~3kK&?)q{-c38AWR$N2n)z>f52Th$ZIv49-QYVFcqSiJ zo9nTmPnHC0NRsUroX=eSnNvNE$O46y@RA9sbA6w^ zyS(qbBYkcBKL3G0&!AhLD$i5uYj$I4C`>2`rc~CNfSPMOZ&Rd{70LCIvnMY~`aNk%%JE!9#YY3F3DmabS-0e2NwLd22uS#svd z=g6BadzJK?qW6cN_ZY5I_9B8^$)5T?u}FNT_=v=`Nh4D7r^Y9%KZeIQ`&RG!pdWwv zG`)l^uls&j=7=Vd8>4ro$(L?O`mgE6#vP6x9G>hta6MVJFnRO3JnUR{CM^C z!sp+Iq}7Ws*?T`du93=n?iHx^mh@>8qvR7B@8d zOISsh*Zz@u_F2hqjStziqCuPQN=o(Q2R{q`sF$$!`0%8sVmhyid}vYxZ^vT+CTZXB;)7spT(2DC0Fnkm1hW-J&WUNW-Oh4 zZS+^yczY}N(Aalbj_3X(&&TY8Gqj3n6VW_ugnO`KmDQzn_T@`zo46{W;*adf-%~4a zX169cP2Bt~?3g2kuqQc>4j{ zbtzgJV6A1zWqeeR$U6f+dJiZo2@o8 zWMsz}XE&$KS=*7>?w1aUxvfJj_l*wvdNo{m#ME3@uuO1UP!1N7wctjkw>%G&2!0Rl zmnSNH)k;LtW6}JnMohe4f5lXb?y5x{u6&mJ$*+R;V0fUkzql_UwQ=g?)VaP#N-l9} z*l%gxq%DxfAHLst%26*YTbgW{vt;j>twE;MaUG+Ug%=46_q=nKv5mL1RR8fmO)Z%6 zzmyr?_kLS2=%1Dv^|M0!(C^g~^_21Qd*i-vo@t9$#P;^Q?o;6#qmD;^h?*a9)zi&Y z$$7;Y?s*%&H}Y%50MAIP(NuqGC*Lmr;K1*J+WsHj9NzifTK?2vf?5@y`u0=96+eCGmJz_o$QXtO>Zd$*{2i2`QI$-()-8O)*H&K&A^rWP+&(rLr>zl|TE z1j`Pgo7DvqZW{IRSLpB_CNp^olJWvP=Mxo&l4KIGuoAve^UbBNMt^O!`i-d;d6@z< zQCY*e%cQK3M+LhFhWiWp+Iu&oT2oJ@984*l`Z@KZx3Ygou&>@y%;YW|IVkE*L_JSs zR}Igd$R%m7XZo5YLzeFu+NAwE_EU61RJX`#;SRTEJ1yMQ-Rj@6EdNesH$}Y}9OD&| zCM8UdFOcZ{Su(Yd&&C9o8~z%>Rmv>Aw`Gg9I&*{vNSDQx;%aG-W2xtALPqeLll~A%pQ9xT(^#s2H0;ntj=%t_tG-!KkxwhTK*Nfh`psZ{PfadKVe;{ z3loZ$Qd8ZjKh|5p+_}MYfe~=~sQ(&8^q$%yCd406@2K@r@!f!iW(VUVmAc~E8#ND; zHJ<4e$vG2216o=NP+407I|$|>lQItLx#0(lv%D3~!DX0_X6+=)6wr|3pb?$nZ7c=Z zX#%?OhzvSX*a>!-74^hkp#;>w{LG6OOP>85eXtu;v96Nm-2lz~P6qIp8m6LUChsqf zf>pB7hHB&gBP#PiF}W?kb4|poFnZUr@Ce1eN7PnIgQs)>1?UfoI8-PGJE{V6Ni$Ph zy@2BR3Fbc}GXJ9_b=TU|*FEA1Dl(U;;#h=tp!q>ipSmc^pF?LapS6K?BTCHm#7W{p zv9b7=>URotp6h6~B@5S>Q1+Dka|LMUTxwk3sKMMr1MDkUN-|Zr*Wf8{^h_WLN6_Os zO$9F>lRWytaXAqx37**!{O1uhxl~lqwvwlAMX`DY*uh7#){e}CD5m?FiIi76u8txH ze#9)DI251r@_lxLnjE8=UIDxZ6j!Kby=o0wJ+MVDh@+&_(rD=k{E+rmD|O@!R3CjP z%qLK# z+FW1`VF>h&%HwYRx$c2+(~akD&F%G&3T_Hhz8Yvnv}vfZPk>)F2;S>@v^`U?wwX+B z2KOBZ<=sPwAQ>vkL;&8!;G=8wHq zl9ih3H+4C>fc2@|6{4cIh%=F7`JKA-QeisPAd7XOH4E?42Npokddqqi_U}tn3pML2 zcow_C+E#;6##6H^1g|$z$VR2<4te+NUlX>wq0pM&S`NkTjn+r7hO>+D{EKKEz@&>a zR0|G(6DGlv*bK97KeeIhc+AHzYKrKFmc;atKDq_G%L;P;8WrOh7>A!>O2wh%`wGU* zYGxPr0NE<9dzs8tlF2k@)py*4>$TEYdIzeFAJIf>2BRW5)SS9VUf2RH1SdFc041(E z_@)WMAN*aEa2&+*3N@VcR07O-%a7os=LbRSgaD+2rS$;$1~=z#Qih`eOa2-WDHN7gTi_Z(-_FnkrWdU_By>0rLxo zGt)yvdF&V**nwDOIXsU_qW%64EFs5tjDo)A=<+hSv{i(jw zj_M6iud`w&CAi~nhx*_Vb_(I>a;9VDt}y}JP1e(&2^tMpFOxym4(~le7>^BQ6ZUbp zEZ}B2!}l;%5O#kiyRi3_mPcEPaP#B8nfT8tLX^z+)Nf3DVUXpz{; zVpJm6P_54ZBk8-*lb?|d%Xmz^;14jZtf4=tg*;#<47j5y%5iyH@IjzbV5EPQFO9FX zFV%O~|1?lq*`ZTWF>fmCdrMnkfHc{;DJ&w&9rHS7Oia`0*r;xsKDMHnt2<*` zCq`RybJH$jRkp*tTB48DI;q{2yYe78gM26$85|R65;z~Y6)dWpSL^7FL(Q10xE0nC z6O}@cXLUnPIGU>8NvhJ|9eLzuXgwnm>6l=jU`v*k@KY;F-^7pB-onjLJ;O`gVjp_OkCh?H1^H#LRL}_6gHo_c zFb@`WJTN5CDzH6JFIW%c?{7s_9uj$1Qo1l5V*v5uBV!}g>!P6%Ao7Q)j%3iB;1WZW z<#GXebnv0Soxh8}iod4+B)P~D{bFbvD#i=-256-JYiVd}7HPO6PslN&kckBoTkil>TR{XaC#2)!xb;X}@k;$UV1R+JM$` zfcw*&h+b4!Xqg*IrGC0xS;+Lt;c^b;s^VAXD+}b_!M*`|pdPq?JGrZ3Q!}cUmEW1^ z*B8XKg<6^UJR9Ln`q=r!Fof3Xr?tB3Hsy(&Pd*h41}n&Kf}4XKf-?j2{rCN!f|<2D z#y3lSATv8JNqHf+llRCK)Dyah`uRC{!UHXpLc6KsMq6$O7etqBw5_4-3g~k^QG-1>UUW#E zq~?+;j$>xZPB@k^+?K6CQIcSEn4R|Tsh*A^zGdbUKHcmfG&P;d5~DZg*8E}VC(MC) zvjEmjdSNphyYtlCD-j(yg+v$|DR2u%8n=jf9@8ICh3K^~Qy|Oh$C%u3Puke^(t*s-UtGj!O`#AULeaAD$TW4$c70;Eh zn&Fqi+I#N1+BnZTBp|42{@|>AQHyIswEM)f&EP>c)3c$#r8`REU;w=(hB0c%0(r<0@`RJ~_Qm8Dp$QIhIPo1|;fQ|SPCN*?Krc$$yJAQn?t zhbrPvX6P&@6X}blcdK9X6rK^UY$Jw?g*##D0hm?$$;3Nb$tn_9jRf+9CRkqrtnJZo zWPjt0YBRyCHT>D-)abJjRlKLKV+1~E6u-v6VyTRBZau8%2R#M1;n|DAS-1s_g^j{D zAqf`xZ`k-wn5qNe(mWzI&q5rW4%S*Ge&wNhcbF_=J-K>T5Xu9Cs@n*#FU@NoCdHDUA1~ls4{cer^K9Lx{C^a zW{`v`?ALB~APmOAN^VHSsK(uV3ARmsYU~@}EcK%znVD&Zd#z5+cy_AMsboa^tw*hs zm_(upKgoQyk>`EnyxJ@~=y_{QzgrQMdSi%z7cmi#Da+*F8OcmH!H6^Un=9j^bHk%u zN{#p(zNJ1qEVHZJ)T9!L$zBls+2DjugrAXwW_%-9&i7!I9D~&ow6v#R;el|UnsA5{ z8iPW28<_lKIA<;3`AJrR?D!`$G9U6>Wtr16jhv=E9DzeHnD)aM?gY=HI2^(;;1x&7 zs;|KQc*0I>;?GN9HVxo;OkI%Pa0{Lz?(hRrCDICEz3# zsEF>OqoXk&L)nROSc=`)16~k|=Uqw$zJS?RXSI0N{WgM%uINM9Ng8v@qUf4>1*>!w-_g`xF3(Dv z3MG-S^lYr~aq3@vVU~|1w>ySanmop|aJ8Pn^jXF!mMA@&=emmI0t#k{;xxYhkXt$j|b@)bWFL$wofdE~f6_Cp?U)W>f)ASxw$= zH7MCAEYoCHno2e~`MF`ltnXl{{l(8+hAmuWE$0z)jNzyM!^*yd+nblKse(0!$avH7 z-CAOC!&tNa+<0?|&i~+hkA9Lv8FGu(>>VZ4OrlXgp$M^ zHgKSQ@F0itnQi#`8LaoXU*EGmIZ`8jMj=+~KP+?>%%mA`0=u)PX2-=wxY9@A+)bc| zx;b%o51!jp^fDDiPP1w!S=9^h?PSyqEAZ~Q@PR$y(5xeWUV{A}gPHiy65R=HRwr~GP*rrV!$bF&p}dTljxFd}8Ui*sn?G&ibZmr;yc(?I z4P2oKFu~4nc8>nq!!2NMuesC8@{x&7-W=GBsaoNpyZk%+u^{)2sX361b?nYBvk$!< zyyBLuXiw~X469q86W))Gr}M1h0n1u;tsQvdC|0!}ziZ4}$?&B9`qdS2lJ9EjsXb)R zH}N-3v6Z64Co_nPcd=RrVXm0Ua&|KQrl8uh_|+HAU{kWqzws0w$@lkjqVBOSU+AU{ zz)CyHT6O)!H=N7o>}Pjc^VK$x(6iXbM!xSBZj(uH9#{P0srBbRo5o%(XE#oxWl)%> z>;>wwoOL}4j%sRUw}YWLgLAZxU04D)wkzwC#P`~Y&tJ}WssoQKHyBYI{y+V%lPbf& z)!=B9gPA)7&t$6Qm|3)`4Ql0SOuZ+Kh|JWrvcS>t6OY};8rJYEW>5EOScLn)Ff4cp zQ%ma|oVWX+DKFpvdpLIuIQiXq%c}6al78`$P0du3A8smum?|fxB9oi_E67e(;PjW` znSbY0*N2T+jDP)|m7EIWZU+CaO|%F6z!N2Nny+wnOl^wW*upcO+En5CO2laLke_nu zO*OpEoX1tz-8}ZZH#U$1bjQ>g`%F}6a@4ExIhnAP0DMzZKQIpKe9OL@+BqlLiKAHk zDZZyd3|gM&YskC&#uH}a);AT3O)hA6cFwdvlb2lR7j|T-2A${QCcW+d@tqE1yI-is zo4j;WAATykGXVy1Sx|eE_iE}72l?+AZfA4P$8%3Fgaf?^do%Snj$>n+vBi1#v=P7V zlgF%KI-Z~u>l$FS@AEd{d^abnY_hLS=Bud~`dMAIgHHqJntLw^$DD~9h^Il(1x}LCarVSo}D|>>dV=O7h&r! zf8Bx}?jS2&woUM3%wq#jxkeY?W%|z}c((L#ffj>yP_GJZBY3eSV}>D(qsCtSI`xBh={r>`(MfOo+(}9xraZ4p5Ea7|A#j( zU};15#Sz|YF>jxYCyl56l?i_RTq4;X*us2lU&Qj?a!<5@quVpop6qr6Z)wiGF}skn z;jIr@h6&YK$DDW#2dv@qOhhgK0(}h@@KU-uOX1^YaSvo9BmZXPr{`r&s0ROz~7mQR)%E?{MnAeUzWe{_>C;FOh~;!Z_{k-D-#u?^_If)t9-U>AO`xA zle)vQ5O#9~?xP_*Q+Cem9(1`2Fq?EQyI7A(%1h$xdsuN>nE9F6?OmLTQkvcR*AxlQOuhxQ;T*4yua7UiB6r+djCBA;A(L2gOa$Du>?|E|JkT_tnU;YoCkEq?=|`w4t(r0c0I~+0;}{8N!(>BdVS7p zLB7J&0zQc+zQiQTR~8>}=O3JsFPuD)GniyVee zkLSq;aF=D^zDeNrb#f-muI|K8FYf6?e0w?UOk*ahIhXJVH{L0{ zXb;xOR7NjJRI8C4naTkhc&6IeSXpdsFZuLUIwikgYi6u(s;-`6=S$$*be_VTW0{Zj ztIIRhy=*K~I(y-V=|gIV2MoT(V9#xtqh&mcM}!JDMT*BSVdteg@L{?b(L>&Fe3 z$gTGQ5Br|m{3O4A5NTe*>dJ6WKHwhOiZ3zyX=Y=KZ@DGT;Pnf#KW%v$kVjM}>T`#V zV_&)wv5mz$W#j~x!rOq-5>NEOBmc?mUYA?&_s}xBu>;0IGMgw?EzWX`6MY62-V|;< zb4pY+cG;6VeJQtmZ+v1ZZ%~k|vkqOS*I2nKc<@%7rF~4DtR{rGu}0uqnzDusg?QNI zr`WZDaQ#hw|5K_VtA#jn(XZr9|Kqt6EhFJ0bP@14HXAyiAv9_I6Cl1QPG+y#wbGk2t$ zWfm3G*;Lu`Qu`SP5_6q8(onq#m7bMgGYzSw?gY0w&)?pqCHy;`_XmIdejWAv%zVX8 zy0LdK19z!5nVkHDHi(LwIUjnO+DSd9=AdRgU-c;ylmlR*fI43&yfD8_E_IB*E*`#dZex}Mb z6;SDCE^ZJ;{5h?$mRXyu-cqV5wPZc`MsBaAf&jJto z1wP{*sL~VRuJwsHR`N-$ZIx_gZ0l|Dwp{j6_I386_B}SqR$Q7TTEs74TsP<)zDiuz zitdxqupKs1XZ(x0SUD}I%Jg3Nn3`P#R(LH?yEpVhEYeTY8}5P`(Sw{hA9lT&s3M#b z^$sgO%zbj4+hQ8lH;5jx&iJWq%#<9aJ3!crYe&?)>S*P#+(Z5nY!j?Xbz(-aNbspY zgMXNBvhRkkwm)}ZUZ6!_jsL4J#rN8uS-z(<)ci(c>ltZ~G!HF}3{nn9F;6X6{zYRO z$3CE@b~i$ZY#TK(CQq6vY4*jAiCz+sFYJVS9*q3M&X8k{{i0M{EDqoAt2S5tAC-&` z!T3OtK>fhNz+L}zUvY2Q)Mlxo_m+3LZ?S(vU~sUAyivX**HoIQceNHq7Rz%XQYjI{4!u18*q*bpVhmQN>U$d>+bnpsh|!erMcc)i(Oqnv`{p!7*# z4zw<550wAprojRJw!Read+&bV5dR>53!k3)C8cA^my})J+x~~a4@wngFy@Cr(odVM z4-P#R3Q8sHrySXx*Br5q0rnO4$&MY)Gp@DnOYUs$fb)gpv)y5@M4cs%G)8>QjKx#< z3p};4Mr)&fSN2mq$qjPfNlj5QDtY7;!2!X9;CH#EI!kLtpZ!s6Ro1&1PG+Z`6v~OE zq?yurdY&do)1^mJKHE~;CEH`$pSDj@RjHJi%NilnhkcQav*!i39i+8VML33@;QYX@ zfIHYISS#oav<~d`m-dJId;1Fo>I5^(>*cF*qWndkEuWJQDy`_T>7q_l{YrMFt};lO z1Ji!E5hkPeETozQX zcFAy(F*z({LfgqJ%7qSEez#r_&$GS}wntJ|DNecpZ)Y#sC{4wE;%c!tr=yx6Smqmb z^c&h7?G7xdOIjJNtNK&k9DE#j8)zC#Ls!&dMN}iy<;r_`iTp(VLz$>f(Edj!R&jcY zHsYUW!4GIj)L)AD#Sywn$H`l2Ir9`RpFdd6Bo~(BgC|)Fi{2pAfUIYa-kQBRWBe^X zcJ>cz6@I~Ecg?nMvVFAAa!m^hi>wm$CNf7vbI)#PMaL>fxa%3CG~!%29o1~Pq+z0! z>iJvCZ=vz}pH!2!YxVf~RrPe*bme)lUtqgGGSE8ML}{$efbn%vYpoVj3M-PjRC{R@ z6H=@VmOEHfsCx^h3ChwJ7QN^=Y4 z7dybb$p>3*C+s$P1oN^_!i zs8`hkYB6z zVBq$Ud#Y>oGGzMS=|;<-i^e=l9x2h`bq75wJgeMeT#~D(E0=q+r%d?sh?kMkQGFtZ zM-&OK5w^Lh|q@Fa}$I8U0BVzMBZPFmc`uBDnwY^=6OoT#yVAy{G<$ z!kyZk8*&NiFwclXHZyfOR{w%7*nW6HB|u7Z!|_@U17aLJzM*I{#4{zbs_`F{nL|{> zj>Afsh6+z(IO%DaZ~q_4W}>!|{=eVVKh+C#N^YWZS%`Zm7S3Wis(0iXFeVgXDK(p> z;#BdKm>-`QVcTSDVYfQEIYv2ZJK`MQ>^4VrM}J3qM}%XiJ*WMYZ4}k@^0pt+eyN>g zXN_u!|6#E*HLM)MTw?Kh=z%>n0N04eS{UnK+bp8?T1~4?b*Vi*f0#C&C}KV}r`_}i z#!xBS2EXSicb|(n!<+faDP#tvz}mWzGrS?j%}PZ5#Zn5sP(QR9b_nl<3{;iMS<9o? zQ-JE;Wa}I%^iw!55$NwMhr8BCXolyCBA+}5@=%4i*3{LFz&|}tx&j|oh(G;7ym$?k-AVRiq&|gO;89etn$eFvg}>>c_e3FSwmyj({~BVVV|p*D zwWhM$Bk+L}Sk42>4YqIo-l0I(4YosR z{uG8*(o6hRWn#!VaIoGn^L(oHrZrlu#CbYR*I{;Rl91I}4-Rf86sW3Dk+NAI!7hvy z_Jh`DVP{f7#~#!3s~HQZig}D<*nU;Lu$~uN&PLpnO6=W8%R;@_snx`e7hprpw6t15 z;?s#*xLzH$@jT`@SJobB^Yon5Z#uzP%?gX|5AB7vSsw_C_dSY4hp0O|GG3G2Kj&|r zlPzQ;PbiPB)haqqGwDyXGxQ2%Vy{l=8;npWBUZG^(kFDz7!YbmZ|P;*Zu<^zd_?cNTRncg&;1R^ZrqP&_Qr-4U5PHwFHRN_>dnqc;us7L8`?x`JNuUf)h&CGMJ0f9(^7ScYl zB@K+qjYpbiT_F~dIx_>?A}tXMh=w&i`!7m!@m~3*q2g|;g7UBUu^8ti8un#l;W<^# zvT(ZZp*JxVv}HP0Kad^158tvYIB$@wc?}u@Gth%=%gvA{bcJce)zPs`hXs{{Kj$&F z;LERRC$(={E-a~zUO@kcy|}JkrUSee)#F=mFRyYUrfA(!T};&4;jyy8JT8I1Y6DYX zF}|`jej*DBN&mvZZpR+j;pU!0U8=g)Mcu%CazVZ>OG<0ysPaa+qg+?ffebAN-`Qe} zrcNvfF}Cf_IqsqEJ+Al8)y{oR&3V*y(e3wi3GWuMH{x0NZ(&YPZnxc?-F?Be#hKQT zYFlHwX>;4V+K!21gfyXUdL8YQIu*8PoLXItREH`>nxL`s+ z3~r^X*{MuYTYd*l+ze6xR5_dkL7l{Lm->oxI$_rWkTgjyK$ z^|UawTkyVr5#Q8Ex_Jx6Ay`(PplncE>l;IhspIdloHP26<({>=om0YwMf?|@F>EAU!-?*`p8H|#A_hka zQR$@%cA^rFh}*v?96tnH0#YJ^-mI4`(VZl%0Z4k?@E z-GThRT&b}s+f(Lwz5aK>6LKNBNN`c$eZVbeR%GRtI#a6(KmQHcZE@is0z{jnZqj%( zQkz>BiW_Z*9eG`YT|=A)>;_!8;?m#JN?UXL9(!|pMcY2HrnNr!+!e5(J?Lg$0ADWz z9#fHgcbC!2%$a}b*EPiITX7piQ|a!;1o0RwRwesOfEk<*^sb=gFbHy`Us!Mk5REv? z8P3aB(1l2&fxZoYw2i*$+-e6hfaYov9ru6BZDH;#l|^NzQc9hJ@~BOd)d%pESHabN zr;g(c9fr%jm%SgW#gjE;Qa{qMugN{-$HDQzRl!^GC~b;mo;X)}CoI%EDczM6V~=f6 zSdORyQQyPkJgLqZ&c?0+o~Gf&B3DQGqS{Ak5#wMTZ4P@JRz9qWyQ!m()KF|AeYJa? zbDZmJX@vz^HTiKMSMZv=MI8=ikf3!^y#7O}ktxknI(Xv(4cL=6vQzFB>>sQNL+~~+ zY(d`Vks7V5Mm3?bI87QZS;Yj)B=DE^!dmHH$9q>Ew?>cGF{vsn#7&~xHpX7xQPYvv zenpxnp2GjXg5_VtQX9UR!|$#{oGy6uf9`Km7Z!mS_{LtJ3gy@)Z0o{WrnN)$$Pw+}A!U zj$mb9@6;Bl=e#@o)q`(?ql41}@BE4Mj`j`;un1Sm59K;aakYyU2EN+_70xK(VrU&| zQI#!T>ug&O#}kKcA85NQo`mgHMLa0wwCi@sQPLhj9U?}2PPfHbJXIc{23YGD%LJh# z+IBUp`z(`;XJj&pHqa4AFOHcS1`etijD5pKP+@g7};vmk}$RHx6O(p1TdS*{YZ zmgS7-A;oeLj#6vEVOe0*$9hL7(3`NAUJ8>UdSD0JZ_?%m5p`6xnYJ_Tv`l<~wvua?{Ck zl6#?zep#yugK)IkPc_sTS~3}SRy`wYH&y!|C}vs|9pklE+7~Sw+Ce#g>H777-!sFQ zNVN2Xcqs(3*N2ZDR5^=Miz*CyJd?~tK)G}>Rg&9AaU!?ps7dS;k~x_tjn&4E&^V!{ zI9Uowmr$_oOaE3=c>MlREY_(CEvc&@HIyIN@=vMOpbz&}ow? zMYRSLx*_QE9$^L;NJYUe_)*WP4<6T_E;Ea;le$DdSQuIPntz0qLT+I`sPq+b!d+nW zZ>i>&KqGk}7_`TD51Kq1jJcGa41zq2JRwzU%nf`OO!p79jk=3$_B1ilzv$gwhQ&IF z__H^+c}vzeKML~`seib^)7|u?v?u!OgWlvS;Vj&>2-sdz=^!eHitAHhE1fH&$dGPw$yO7oE>;HDO=GC&YQN^^oF}7Mi1(QQg0X^JIv@HUd3f&AR3FSa zTUN5Kx3K6A3jra&wH=z6UFgfw_)~QhQdM}pUr@;0MAp^S+7oTr_Iy;~GwQRQjeCl0|o-W?75-^b_^DKVft=fOAleiqQyIN*a~*9#k$y zQEh#Rn%Z5ebq=a(*TL02WNI_Xs~1t}iXnE~$=_w8X7UHztVvXeSF^)2u#VeAm=lOA zXHjP|GYW&Xoq^(N7qYJz@tZ`3C$S!%qY%jO<--kG?(KUsEeBKQ&X-1Nr_G@>e$iRwrq$6VrOXX z<4H`7sbX*)>SNu5$v#b;x@uJW%;}5?RJ?282~0-e3#yZ0)X*cq2h7Pp5}129r>QAf z@FuEZjbT5T3U+1Sa%|z}4dwK|;_RHFzP*&$TC%!kexJ{_#uBDDPkeYf=^o@Jt18NE7_?)?%xJIb5nc8+U@Nuj8T_4VxIY0gl zx&3YU1}DM(Zc=eHXV-jzMP$GRGpDm|1rzz5D(fx~t*o#G%zEutkbuX0{@-}f`}qD? zzLPmcC&XIF)Gdy)LtVHf%!;%*hd6+;UzEQzdSsP$Yeiws+Zf3q}JUoNt*uY{=&K_7m*Qg7-;E~m(_G`|V7!I;_9F4?t zuoc$xu>^bA0w3Ty%+VOw(r!HZEZ9Hh#D?+M|01l$oODxNh;^ zXqX$OvY*GS9K-XF>9btM`2)`l{l~p@5_ZW%GLe6HyMH;SwfVUoD)Me@>?pU7m$#}) zm3SDctrf8aZ>TPo8({V4QZpY!jV&*>cncffVzI*&h_Mb7TozML(@KrMi|~gf9ev}$ z&_Zh^v9WLwt%~)AD9omUwA0cVp20(-5qrN0Z@CH1^%~z`wK-RfVOYK3xtnroeqs$J zdE@2ShB=k7BP?j2QJ!7xjUP9sCY#e~cA_O|&P>)=-$b(F2r3pOU|avqoBRorbrWlI zn7J0kP~A;0941FtLMC?{3}Y|R=|GUYDSUMgta}{yauT@ecAoq*%##CDvU|bq`iG~e z$!WC0>6p&nnry-wtf@I|{Q=MZ6F%I0x&z~lTiEq1Y^@}IR3#T|%}r1Xwqzep_dlR? z8cdx7#vrWnFIaRc{GFAYlgeP0R_fkm+21>`rAolHyT=pG#CBfbC1U7R$%%6H7;f&y z%X%Dt9iR5)+8V2sv@f$jo10aiCV(C4#0k|z^}W7T~=luNCc5y4BqSs+<^A@ zlriMg3yD|f!HL?=bJ+RXY^=p^ustK_)fq%hz68E-D~yNZ`0&p#9H5{2NFwb{*qM!=wFulO3k;py_?H-N z)WYoBIPl#GFdsU?cbbf!y9L)FBj;&t6>8dE6tzHGu8-RjSnAqm9w6nJ7YHQI-7{42j@OJ?6B;d(e=D_6K(-Bnq2!U zSIUCNaNrTV*x@bSygVynX9rraKjy5k=P)=n;)|E!lV0FYdk~r0@lI1=MES^Vzf(83 z#*CSnFsZWOFWwn#xJkx?S5APL72vj*2)EM$7o~?~7nZS#*utDvpPwC0%Z@A{lQ1WH z6z6U4!ZR5`-D5KtZX0+Y!}0d9Jf#^Mz|bdZsLAI|#7{M4RX1VtE;@0=P&IDrG-xEY zfbI5^nK32U;V0Zu;V|f$ff#<|sWX#{m*wZT##_GT-ygzHd4_Mw!;_B0$Kl+6@luO( ziuZ5_$Fj@cS?|xVJ5)<`;*j|$cMgQTUI|r(f8a!&hBfku2^){WnOhKDjYkQgyY(17 z^X)+To6(Ka8Flt?M3{%TU19y9){|Uk2N-HMaQJ;-tQqu^S~pOV zIL%4D;Bu&wkdKvkiXBfD%2;2Ct87o~2OKvX*Bl!h>m5Cu+nkS_FVV}1a})!u&SxJD z7Bkh6&!O5nOKHVQeC1mq9bMS}5Kl0*nYS!qv?aF8WK2c9Vjw()ORzp>qNm`dmUNI> z#~n@Nna-f%Sb#e)4i7g3Zt#4jDEQf{8}!%|q!!bj3gbp5C$zMl<7D~Zgk8j@Yg2(K z$?f(Wf7Fqli#z0wIZ-N*$VROEt`?c@M)Hp(#76(ZUU_4r<<@HpwzPo`fF1DdE@53> z?gn$#(t0fU8u3*Ryvk?zZX>}jUHtk4n`a0yem-g#hf(d^&Qt$|IzWBxH*GXsWtX*^ zuyztqx`-zOlZgD?dPOu6n(8)C`5oE;(EkPKwEU*MP-meKvrOHmZUoW3PiJ^KDsgW5 zy9S4vqe#$u&?FAeS;XzzVUi|Z6DNpu>FjHS8bv*HB=(a--^1bt z!c1F@g*|3oLmd$1oO%vwfN5YAZAA0JtmOywne^=4MpxkmJw0W}yvl=0pXXyNelQ1q zv=6&oi@QDvUt0{{e~%|JyQR|_gY`G8-2gf)3u{$iLcKtXriGeMb*X98lDx?o^{)Ct zeWRvf*3tl~k`1VE?k4{{LO+>>xFTx$#;ur1SQ6&Iiuyu5kAa_hENE_h9!Z)_sS&pu3i9J5`v0_L4B1l5Nv$ zDd4~d(Bk+)B(k5Ac`QWz2ThE8bRJd(8UMhO9R%C_s4Q0Els-xk^{qM#9K1T+iih>F z^h!*ETlm^CoN9VWJkWY;G4ZRoSW={$QXQuJL`zBH4>2bi7iXC7Q;K?5P-x1%ev~_K zH}gr3p*0dqM_^sFN0Q0BXM^u&V*RJk+8_a(vwHFBm2|v}N8Jqz{ zE6mS}qXTmxNY_uj5jmtn=ihqru0dp76{!YxrV_9Ob=|jgz$PmBKqPviBU+N)*%Rt} zwLjJ3hN$VyK$E8ndNmEjkT}V<&feXT+gaSDvkO_>b=iZj?na(ZbijV37S_$V*4fS( zk2k92xQOo8EUB4T)cOw9lw(9hjp4lBr~0{-s#`JCUI%HCb`V6iq54#JtLc2 z$F188jzk9dR_RctE>BLBIb>E6H}J8ay!<5fMl*M>X;fgBZ?Vhf46AtR_IuE_I;`i# z&ljPRtfMJZ0dF;dj|<#6JHdxWa{u(k^KCLFVJQlny8FC!tnn2Fz(+WVcfj#3Q6p}_ ztzh=OoAtH^tXwu?(4{cf=fjk^_ zoI@NU`ZIe2+v7%TY-z3Fs6#zcxwd>BGbYenlI(6^D`OB+FCw$dx!gTCfL z;BJ!jE$S2h@cCn?y&s|1)h9d$XS)wW;td_e+sFe=MVqN$1r@*pdZ4y42xeDd(9Ef< zo`v}Mz^{+PtX^v}o*&!*%eamAV%MqE$J6|p3!ob|av=>icnTizIsaneUn-MN4JNKP z-?SWOBM!hdNVsQqojm1KY$u8u%=?Zc!kPgKcMacnIXlwWsDvjor@5`cmNtM4edRl* zVnUH1bAyGXq- zmAL!zFHe*MUQBklbX~w{UeS##S)a30Ds#CU*0k2lV1{X}PV^&g5X0W5kNOOn3cEqp z%r5A$kFVX1r};@0pepBK43ozGV+}lXY{ar= zy?Mt=LeM&0sd|;QT00hdyAc1%TAu* zW}L~>4CGcc=PWG8`kI4|l))1zSZ5}3z`EFF1!9yu>}`8|&@#UE1)e{Wh_x?~*Acu< zB>luza)o$KoQIP!5KXUj%$Pa`x_cPr+-J_tOW__E`w`&;9G5BFF8||{{R^Y|BsBpO zY3oQPlb($A5pTDHbr?u|l!koc6*q@DOYmRbVH@k#2juHHnZ;?m{ACy@7r=7=1!Gx& zj>0&+&R{+|ga7siC0&p9%>{CbjQG}UtbA5%auj*|K~_bC`;Z1-Ymf3@QV; zO=0dfb9P1+P?2$1UwQmc5%ReRkbmUg$>r>Pl{sPAPLG4xrR*m6`^i&o!&A*8mOjDy zzXv0DPu6L638Z4p2SG7Dfmykzni%-*x8%SH>_dp0v=EY~~J#gjv!atGUQmZsq>{huvF6Ccl6kZokW9HF_IZx4T)};Gh~Lj*6^CKtjqpE{ z=uzlnR;0lNvk^tO*n>FMTVMrEOLg!p71)7lym12%kILK*Bl&4_$-k%b-<_P)Rp1aa zIHBeE9J6=S#F4Lp)2!wDrpJ#Bz>>1l!M&gJ^E+!~PEWo=JkX9?Ypwni6^spN0X$^N zL5%K%k9<%sN=;xgsQ;g!T)jcF%|I}C9JBw{28Qx4_f-d~c3oM?5PfC0h!^S_ z8d1q{@*`Dq=)0*tUH%o77lL&=Py3fBFK8UKpwj@;ydSK`Q`RZcXj?Y>aeH>Se>d&5 zV7IJsoOfJw%yi7NPqz(}x}sV4(pp`d4KKEzwTDpB(h&Uc3bD)vqK0}}N0dVAp}6uw z?T1oXK{!L*wJ7ZZ>Lx3wyH3;Hc$)J@4EOg?GQ|nR*zrVj8R-Iv6cz~r)unJz5dUTh zW2E&i_uLJt51q-MYFpOfuR9Pkm;67L&H^l|?Q7#FrWhJT5$x{n>$U6JEn>G`Td&=% zf4jQ_ySp2^yE_0ya%Rq)_cZ~u`Umhl%?!zOJfoGuRK0OTm0-S z`WaYgZ|QY7jr{a6^&NbtqiR#`paIcjDADjjGH3IN+SlQ&mE}z51!XA@rmz8fcNUz} zqbKn1U_G7epPzOXHd=i;(9HABQbu{M!hOl%-cBF;z3F$;YNd66mA^Nws!Lbq7?OO) zn$6Z2j^z$(PoH*v0fEbdUI!HnJ{`0$cu>fPkX505Lhpyf(b4C!bDllkI)J=tFq!^R zafj&w!#q}EHxlAJX;Uy3VLbUmhW#9#U0S?Fo#@uOPhhPJ|}0{^p# z9AdFrJJ~+k%lOoGj&@%4Y35jAi-xsS&g!-HvgNk5vVM`nr87(*PJ+iZMXRNzD{l8r zm)%v!Rl*(W$)SuFlJ!VJ10UJVJrmdcTm@iS8Dv_q{Yp&(4y)J?3 zhP&?kRKE08a>8AFL8q*P`XcJ~&VZQb#0v_Lj*^EjNsUM~S!16Q>5b4BeEJo+@)BBO zb-Z^G6AcEk{)w(1>6Y~Wr7liMO75QgZ*t${EGeT?PNd{d{gYbA^^els=wuNcPkn-% zlbr{goBXZ>&JKyq^fT;s=5Lw%WGu{!Ll>9%V23-%Pd==cgAG~7PM zzS2J0oD`QbRfJ>dm8*M?v*bDqFi}Zl`gd4CzEsYiA?t`>q z)U2F`1Cr!vMGZk|_gt5+t8;q2^!n*z)6b-TPVeek>*@^m=77>jU7(FOUWz#^zidg4 zVa|fSgM7#Owe&w9U=KPGR53U#czp1l;EloUgKGsf2u$<;=J&vNw6n6$H2Z1hJ4}#j zi@w5pIHe=#rj=RA>B-~H;wqHhG|iTlBW+?@>vVT|BUhAblq4NTxsuhfD;iV z=x>bo)M+k-BR-EwFRM6Zv#j;4<*cQxHLbO+onh|Vt=nK)E+iM;3=LIS>`HxdAP8R> zBDfXQNGN3X_oBgX(W@~RF?TWi>L@IDJ&=`itW*j7*uLaZKZ(A~Ak0TMNiRKWA0x-2 zI2uc_67=v18PqmJj^(L&D5IAke$TC61}QA7ZDE@A5o(93d!y+S^^x-sL3Py<`k0M` zsr(U)Ue~gBlIrvk&@8X zE68lL6RY8~40QDd4^>ST`3LnSZ(GhjdSfhbK@Q9@`MEeJrus8d%hq$%mXLJKv2cAd;#|51)Mz z52p{_^B;V*TlmN^>_AtdlH%BTCjQ`|>(u5}<53o0v=-jR3T)#ea^P0>MPgQHG2X2c zaY$Y4-a$N{K>YAa_-AME!Mfr-e_+yJGcdhO%&hc)%svEpn4W=2Ki9&risRrMtYH=J z;%oQEV{gD6JQw`H;G2MYO~yZV!^{t6?(sr==FR*+kzT-m({U-6^oIY}#^=7ldeS!? zZ@d5=RdKGiKK@%2+1$c-Qsue!*4*bDytFMm*N&_-&d6v-QC*ssKB1NHy*GgKd>~3% zl)*i4il2#wHsYn-Cf9X}9D8p1U+=>*cVSP8!yZ4V&Qgc)7z$2y1AhBgsyH>Z3_Eof zjzM)Wm>X1#nwjS7vwx zG8g`P1W5b`;>aI-zYhkx8oaAJ5kNljXK}>URX`zT^0g3Y-pEgjK+`YaiKlSQk@%~9 z@jXZIy$_$?kM-L_cK9i4_!xZUG$_h8qMvo(DKY$P&R}YQk8V!5o5rd8ACcNrUG$Bz*+9O8QnzUNvpb&okQ_qd8!ViJ={aFoBAGcVWT zm$t@-Z-`t?=1$gd&FjcZj={g}z!RWf8JnrYd<;R^BFvjNKUKS^EsM`X(-578zQL5$n-d*dJVF)itn?C z#=0QsW)Gs~NLD#^v)|DP^M-X;o{=ltz-L}Ui@gLdyu{C|*v+fxwM4ji zCO@Y+`k)igx7mHJGbd{RC#(}I(VlzE&Q4g_^H;3#BCv?T;0Mpy!izwHup1H{<9zX)# zW$f2I?)eaBVLqr?YtF_%*fHmkl#5*RI!<;!_NOpdb2#6Np^=90e*1ZB;bh(5a||-K zWw?$}oD;LV%`(pNV=6h`qXEzI^GVif7U!fU`oAf6(t>-c#kF?8LPzsTGZ*@SQ+t)G zScDci%;P5Z?nMUMsVo}HWN?{X3FscfPbJt5bDncs-gPP1({6Tp4m__Y-ZdBgNgzBT zCt2=$T-|Bp@da{p0u*l76#GNcf`;12) znG<`<@JT&+|Gr$eolZ99` zW1Bo8h$slq~sUXP7PS@q+_uw~2xr&c?9_BeQC)k(c?rI{FgOK{^d@W#&R%T3r zozI?6V-FVcn1k-BkM%Lx8fL$ja88HH8r|pmZz4@*pRVJauG9SHD_`d4Z$c`~llF$^ zeP@q<;}yQ(=WATSIk+ZfSC<$x$8r8O@t{vgoRfDodn|qARW5KSv-gF<-(F;}$L{f} zco;io_bE4bV6d___P029QH`}Sc~=^zKasCu*!Es%iP{-bX7=tY!1J7-9^ZJkJDis@ zpo{_RT~YoLjwgBs$zRL*T|;K$xz?xb*c-mTW)D3)-jmTu<1U_Xy@}wjbV&^*yR zI$_UxVF|mUhcY8Ge|X(vKFN>wH#_y^CD-*% z;iUeD^!Ed+9E6Qrfwb>Ie;wd2%Q?NK71@W+YIb!v%>4zhueI6PES!CF_I4!i>dT#G ze_i|m(h3?7ucuQ{3Zz`&Xm$PWI*OBEMSuN(ZPE; z1A94pmyj8gKj7i-O2&Q^W!*M#b?rD~ZL#nJxVDXG(=GTzjoIbA+(9He76|^C0CJj_ z>of1u+(Xl!tH~>eu`@N%tUb|&3yFWs`QO{QyH-eHPp-coS8wvKFK|u_r0F&1@;y>) z_Q&~+mfVazTENOr=7e6ylZk=zHk%kK9zU)g+2bvo;XN?2DzMrX;V94DiVkwIpE=lX z6VJ{M3on;6om|lrVxCV(+(7o>2KRUqT~-(GD<^nORw4v{G61neJu}F=T-Tjo_zk86JHGIFyeLiO|%uIqgf83vYsLqPEL3+bD zCGXg?g**u#c55Lfj zXIXcK`lJLt@s#M zSjqXwTU`)#fgDyTWZ)WiSDTeHyH@(c2>(i6>MgnSsYHqG;Z`*OKc9-Odz-;w&Vl!s zhy1s%@tQ2gepsY;@UZjY_q<2@_U3fQ;19cDE5F4j`38ezFHGFotXKeA=sWV#7@WTm zC}~#mGoA4e#v(=EIk|W7lb#?eZHQ(icxZ3QKM7>ns*r=7&Z?|q_c!r%1#9Kv(TK?4 z2YInNJgO0^wImA74A!uk%Hl3Wqm{^=9l^iLj{O}2tLqVw>qM}OdvH8I!e%=TpW>1p z%#QCu9t+}!yr#N&Jq$EK$}Q!kvb;YG_<>SyYBo#4W~v5y^@u!YBIs@%cv4Pc18m zMzEjloXLN|T`sZ~rk{L-GrXMK-6x(kj119YFp7t|ANlPfUr;_d40K?!I=tK-ZjR(&T(+k6<7*7`PK1^#j_g)tdvoYDUo~(Lt@R18> zhffngf+xllgQ+b05w=ku6-lx+8Pag zShysfmCnl-EHAB_ZT0MKdwpiu?xefOZreTUKx<~}G`js4mwQWO-H`33oWDGL#uD^+ zV>D1Mw;=fe~~=8(t$qT*ULwl9{{Bx<1Ls@D9gg`hnHA zk>PQ0syd(nHe&5_!;2Wts}7RIZb+r_O)C6XlQ+Gs*{D{?3_GwUnY@Wao=*I^C1`Hb z|6PHNjpY9!W@FcvqwpxAg(#CL|>S@hjK1~CQJqtpb2Ya|5zQ_;#C6a%HHEszD za*?j-P0&sKk(Ju;B08hh7h%06e1Hi2$hpYEb)H)UF4;vmK7Y|C{R<4fwIECl$nZRZ zzqi>t(>v5V%DdEi)oY{vItC`yOSOkqNq=QzmNHv++cWtt2&@`9A*^QR&6)FMo|dUX zs4MtWU>5%kKF6&MM1O6yX9)ACA0J(W#-UsWT_KcC0MVgRaY-5|MO&{cgriV^zn4^Dz1(oE3i_# zrH2b;ncEpakAi)cY}P#1)s{2zW@$f2;WjY1_4sLC*k|dS@~p^f5qKNTSnIpGqW8iB zu;H7{0$F;16>h}t9tDvzCk8x3>(Jq!DcJ(eDXTEuz@Gw89)9D9)La)wxo89S5?{PawO%j}lU*L{o@yBL? zzb@tc{eUHP3_PzbJMjSoytL|-2<7eNvDEP?H!y@3pMT*?|Nmr zvW)2t%)C%5YpQ-%JZl}|Ob&Pv5*L=7WqbI?@ZRASv$PI77xGV_k8d?wF|oS3)b)4D z^~9;aMko9)LHX77Peh8ICV48U6OEnHW$S;A4bC!tmHcP=xAY(BSI2j=Pn!Ld?GiQb z=i~)a6LGU~M$4p*P>#8Or}s}Qn>sP2Bl8Szr7oq**>d+;C0gq$Y?MdajyO6x=ld4+ zE9lqPH^SMBsnWSD6{Oz6S-q-ICAA?n ztCN_EQ(9T;$>LeeJkLaTYtL8DR4?2)NwXIXcolj$%iFA1BI-x9%DN?dYL>xaw}Yen z@7ni@3%u*o+WmPHAM<17_bES)$F2Sq`zI{5mdoOeG*-x`?DKus1{@4J8T==>QgHLY z$4qVhW$$U5WDT_TviQqUVgsY4HpY9{^TSmp{Zi@|@_`+BEK6yTIy`NL>$`ac+NG{fDU_lmhoyK@dZvAJZBoBWot)tz$HV=z z-^`IeXZD=kbL7ggAlv(_*~3Z(R(8A>zIckK{PnBHkH%k%eF^$H>U*xZvk6@iJ0#~$ z-=Y}Cd&@GP8vg$VZ4G%C8j`7Q=&|7JL7xKb0h-@c-vi8RYAatb+NySsniib0<&~w-G)RW+epfdlJGJF|%B_ z>PURFp|A;-z~?;=nD;XI;Y#l7t{SdW=|$6DF$KL< zdNKN}O{2G0WA(i@$tX=Ux={3W+MPMr=}UA&I3mUwx%5fuXF5UbfN|k< zKl7AhzHoPa12sVl#RR67__JOgi9#nCbMzV7M!FCj@Cxc4tWS*o&8RD`llIdkZyd9| zj>rypE*r$<)bPaWc734M3%gPpKe#3_@jqC@#^g>)7|rx(ZIHUeo2cwluA-q+z47WR z&80UN&NJKED{rSaiJ#?*R8h1WQ`Fm@dakT#z5A2>&&|Y#i7Wq9p;uy%yNPO( zUOILJR>{0H@?4JfIUPAIIhJH=9oa2w)6DyVRo}1HO~x0`sMM*62jUZd9RL37dy}6# z;~vIW`c)*+nVcy-UioZ1v#jyi=^qjNBJ@PqsLV|=CxtBx(=s*8v^->XU_sxSwjyF( zb)l>QXa za7w^@|M$MhKKtyYtw$wrPko&FMH%f0a$irMkk&kPUCN}Ct0{w1SEfBl|J$A9q1%ZD z-Yi~}PgvL3Tl&!5&9|zb;hWv}qEB1LDBBs!O?pwB(BsuL-uX(L=NhLmL>bP+rP=gK z(dai7(tp|fcV`fjGtlkBzBP5fjGU>9bn zfjox5+?l2o!IxeMi#Hkn{0mV-R;?ht9rq}$l)Ch2w0nPf3(~vdjZsxxA{~;aGJkgz z9sfirSh%T;_c}eh(`Tf9Og@|R>QC7}1^&GH(<9lM*4h1!79vOblnZQ`sYrO~2>-~w zk(nX`BfCT#4!;|AGq|3A9!G?nO?SIjrM~{t{&z}3n}pc-%<((puEc$cANo5sX+zpf z&nqoJs%b0ajP~Chm^s)Q@+Txc^i`&?uzZ`IYHcw%^YZ%O*chZS6Ye30KGJsX{M#r}c#Wj8Ba3ME_F(jRNg~e+6XnpX2MH zGtMoWYM~E>SWk%6HC6UTdbTiK@Nru4v~j6#Q@W>=OZhLQbn23{7Opv-Y-%y1mo&&) z(NW2H)%UbtFaKBm%l&`)9rO)$c5-aD1zEewtHobN1N{{|vUkc;&tCURSI_ieXULO#Hp6z_AWl3ol|_r`}+A7a-MbkvOTnHk!A?g@KApc zpq``ZX1sTXcRdzpKC{SfDdm*i@PFLyfDHfjz7nDy(pnmBVVx9WX=V+z4Y1|2m9xgm zN#Y};pJw+ScCSyjr+rEpo^mVYZfaP1KDVZ{(6dXAt-(IMd?o*=fWCqCg7yb343dNV z10w?N`7QMgch-0Gx6QRE(mruM{kRWmkGyw0Uf1aKvS~F^kET3K8IgJ>ZKi9YXO?%V zw%xcUo|ZRTt#*fFz2k(Vt7C`#rfoAF*j&MH*zi|E2qUfrgZFfI!fq=|G4wpmMA3tCf{ zl@%}NmTODn#n+%{or%eMgPiRn^ZgQJXCz4Ea;WT^$7?k_nVf5z0%G zz+cd)EZlB3FL875z_1 z@D9|z>otTq;ufi|?2)4^b1cU#S1kXtgwsd$Dbq(E!0GKSmZmcNJ3JGo5KXVwt1tzQ z($_C5Q`hRl_Nzm+gFii@T8h2-2rK#j+Ax@P!F#iA=XD3frL{{XwRI z7(^S>^zB+KK2#y^Q{_HAt6HGFW~yDZwt62Tay$5vLNxqK+yxWkFt6BxmzE2jZ)Wm+ zwLxJ9f@>53V_M9NwG~ur1<}#vB(~@R9^^s1?LuIa2ZeUJp>@H8Fmu6OZtMpF%LmncR5w2I$x~ZG#BWm-O_Kv|G?osDxll4|)4`PHe zOt7jY?Pdz{Z~Wr(;BG}=?VVwbcEN&g2A5|O_~R8ApErpki_%YJF?D_&`Q*aX87!dt zdc1H(94%#*XUfOvrE^i#H(~erf(ab$7L_JMe z?(DdhqV)t9oB~cijJ`0LK@anR+nxvMH~Uy5g5H0@bKU|bP|WxVcDM!3P(@;!+r0J` zoZ3~q-vWAA)`sC744!t1spR@>HDXt~p2UMr?*(UX4f^W>ML!IZ z*B$iFtV*3kmzK&P5X(SURb<9RucmnB_zj}Nj$7#xnFJ43w`@)%FNx z&~T!JJoMEm4fE$IwRx(Rjhexg;8CN&#A<@0m4^Rvj9kNO(9Kk2_#;)ir(w?j1&`%B z8PHtJ*YFT8<_7870YbKfZl%kR9e+4i$HCX;Q$KC8151fjsobg!y4;P5l)tHLxD6Yw zC|J-_kn~C5@q6e4sw3|s(V(NiR}L^+uRLcWgmvvLgo_8@2IrT`(~rdLYZyxPL;|wX z7HsJ>nH;lULlSI*cVNX^K_RAtm6iju{J@&7hk-Q!NpdLo#6twGM?uwe2sj@0Q6!p zaIRtO^?BCFpV|(EHQxl1o|~&{PX3*VC0s#swDRqY-mMp@1E`ln)V2#UgRdea(??Gw~!32w@ z&LIR&***~MSmr>+z>XWnU0S$`Dnuk}h#Yen=ehdU@Ct2u8nfZcfW~y7cIum68SMEX zT>tK%Kri5r6^5_Wm5!>p;lAACca6x#F$Dt#OCDC%VkCjQ-Ultt3d>{{2zE(yP!tm0 z7K_mkE%X%;uCLQ@EIu=B2-allkQ@;X%aknK_73x1dWSU#?M*zp@+=<`7e$0CE< z!3p!DM^eG6=P*~PA#BHQoClkf4txCx46#mhde}=YI1XgPoIf`e`7GI;-M_*?l|uSwvcoxt5raX*)^v8&-HH6Y6O@E&HZQej@#fh^H5(8!_0 z<#*s2rIRgsLGHyvSDs5yioDf5LF-1y1No zm~N^xF#aaEgnzk_fqC*yHN zb8gDwDI8}->Kg@#E*qk2%x*ZFzy-b`#R?f*f7UV@xv#@Il;SSXb&}5lJBU7p5Ca>@~!AaZ? zX7q*?HGBPhLc?C?w8xOuI)>&thb+cta3?-+$N8~JoycDHAiLX#>`O1SL3w1xWSkCT zcidF@yYaA!u*TE4m&;s5DC=bEiprdC9mzS0&Dg+--{qZ6{>g9d$-#-JN5--oclLul zJIdEJzAxrXpWzyAv*S0}A9FHaV?MJrpWmMSZwJF@5$k%DJKRGyxHAld{N#B|CX7ml z=>quJ3fb)^JbrLWZjgD{LiXl3?|p=KzJ)FJNB*jlMVf=`nEm5o$@q^%3QBUqV!1Dq z2YHYzSr_sK3s~`M?7bI0PXPB-1x;n{?hH_DlNnHnoV&S0zd+))v7i4#A6Fy`Qip&0 z^A7Dn=7ykwOonGJ^nf|3vl=To3l7$4vcHG9kBywn5v*_-cBv9sDu3?hH9N7Aoidqq z=DZh^S7J_4Jc$2J6VpWxD1x#Bl5P}_}@fMM1HdK=h@k%6 zW{{n;keeO0b7FI&)5~yY9W!E!9_&+P)+LgYZsohl)w+~%?YGG2`SAQgoSC9X`QKcl zoqX~~_IeA_a}fXMArk38|7GXzCKGlC?CL}^1sBP_j^{IK@=4~jK9k?Rm$i9_v`l4{ z8=-$X!GT%9Jsd`U-y*%)_@vhClX;5ckOk&Nu|vzz3BAd{`hv9nfJbo-Hg_<&Y_t1f zPxQkoR{1$nTpc@Cnb#`htqgRm4_8|mZ?7=xUL83Yj@BrgaWYL_PagK84u1*Y&P^tr z$*Q&RY_qnw1y?bEJNZQAlFTHpOK`f!Vk?@#l{Rak%??e>dqQuWVRaKZCC@mME3obp z(ZKVOg=ctGcR3HgU_F{u*Cv;}2k+dFHO$LuR%XpxaLzkr*z4hN6gDB%llaupSb$zU z!>l(n*)wsRfg8w`f=qikGcQ@k{ha;X?9my1GWCGj`OIXE?qV$#po?2!wIVs=MaasU zY_~zIQ%|%GnE~#xH0#up)tCU=Vi4yvH9R@0Wh+=^6U+)iI?6|v(So% zk@>A?{Ow4s$!-Ev5b?xo!^?h=!f(E*;ozJPMqd+o#J8k zrmxA_Gpkz5b3XI1t7ea`OYF!oG{8zM$zblnoZ`8VeYwn4-9uVCupTv#yu$c7PU77> z$RN_dk)@>4|C&Ix_s_jb##i1l44Tp0dhD<)flA#UzDJ*mJ#Yxtee~ zudgBA-=dd7?l%cz#SWlxrReAY@m|uvByUB+*hR?{btTn#p6&{gX&Ds=P?EE3e^=|@ZCR=DOf@^ zWJ|dVYg$){1~=ME8^xw^yNQ;r?Cj%bun1ySMut)>DY3Z zHCaOZbBKJb5Al#$Q;?tavtrMdaMlCKJzPLO@3501_R2|T>ouIax5U17_-F5s-oK5< zWLtZ|^O{C2e;2AO_7ma#qm`n@;sBX%k(oYa=m0Z;nIQ$}8uXT0iz@VDo~WIs=V)~@ zd=>O>MBFv$SpF0oVK<0wJ9-)|p@OM5GJVP0#@mQ|d1vt2soFF>3=Tsgk1|qzR^Mwm zX3cHeVEbjuY_DjKwa;->^6BlpDGX{Hs~)bhy5W+Wz^n$_I& zfw@D^BXc5gFEm^{UHqO3&0$HIy0@X~%r)JE zPAP6Q!vbz0-#*Z|Lc{{bgltU_S{vWB_IhhNn!MLqa5~!5!}wB7n-t&my+cF=cncc-;F#QJlM_Vn?2tA!dT;6%uJqPB;K{<*dvWUhsgMIWlY z#Il&3!&>P!`U)T88PoKorl$+rVWU0^pC=jL;}BWIZSVmf2)D#p(jo5lA2h$mGSBj_ zWwkY%eWd*#`yE?Ddzd5LUVy0-g~&ATvIW};STBM4jb_$UEc~`+FtJ|a8|*a>=o9D> zU0o~6M2BzQJ#|qYp(|K<_(C6<0TRtb`ts6GQKB}mkn{>%%uVz>j=ml}K{i(4kCYU0 zh+~N>%^oV*nXtGWk7^Lsa}XLV7VWVDlwpp(lH5)=Fvr<&OdnAZ-vs329`*A3)MJ`x z{08A}r$&O&6kt|FES!)H#O~{u%rX;Pc&)yg9xy-DD18W4HY=UV=c8w`2>n2@%LoqX zywR5^Kby2&+(_h?$9N_T6>H&%?lqnYm&J+98_jLR(2KdLa154_!Os;12NA2GTkup` zfL(py$`(>3w^}c5CrTL~t})nM5 zY{|A@Uazr=$)Kx-VX{PjfXpspenFsk2|uYb5%@PE5zVMVG;tD{H#x;Gkn}TnWiqy) zGLmohNlGQBJPqHh5qmoYM(1K;w|HYHzRqMA;l1ENePo4ZkRkttk7f4ScHo^Y)VpY8 z8}MtEf#0pt=D-L(NB1JEgkjf5srl9YY8`#JR#_e4t)O)@oO*MZKCARiaLI1diTx51 z-4I6KZGENC!HCf((=)UQ2tqXY-haXdP}DrqGZ2?-%uZe`u951&A-qVntX;B*bD6VV zp8Blj+}RyowUfBED)+TdaKdRU!_Ds1c1{wRzsq06t#^c7OyMuLVF#VcTDpfMUI*J`aV+}+`7LAru}W~YX04r7$QNBu(9 zI~1F}nBVOpPqUx@_k-_sjaO8{J7uz}kq>s@S7OPUAc!OJH|L-ut%9GCgqQFIw6HpS zh0N?vFQyAu7w;Q4=~BCsy*bP8Mf*T&-KR}_XAZ?u7WKD6;YdPTn_^FK$Q4mT-3S)&qq3LP@48KxDy zs`k-p3){t0%q5_6v=~LFhY7Gpw@I5gYa1ogLoII^VzF9sScr>eH`ByH6M#@SV4FB{9xX9 zBSkRL&KMiJXL}It2;UWIgi@~fN$nInVbv(|MM}d&FB2I0IA5aS% ztr+p2Rh*ALWV$lQtcoVQkKP-AL@Xv^{E5ug6Q2=#{)eVijLpQ&Ex{U2qD!BFnVGEq zQFsoc*td%GzB&qSV5O(lAF^>dVSNvW^Sg{*FJ-}(`|HclI;~)(`@n`P1vY;Mglxaw z3@zIct23LGz75O%Cmg?iCQi-$9tCfyDMX07h@;1H4rhQm6a^V-E(VDCz%X3g$4~BW zDoBbO?`;SY=np>IiU^=0{YodnMcxj7D1q}|SGz$~^e!g*b->zpV)E;1uba*-)9@1g z&^Q59nfwQv_ZE|PHsd?2$0NUN9cim?TW@P?e`61HJhS`S8`^xBkk!KW(Yo6DuXPW7 zDb1OkCzw+33vTFjB9nCp5F_6zxUq1=>2FTia1AK$~IWd zrXT>};8K^V&21lQc9@?T1NFub|z6Qq(|RgIxJL&1C&o6pD|TqF8$;V5UDmNf2Wb(kHR=vK>5{+;Ysl1{4B_co$l?gKGg0Q&0%~FlMg7 z=X~d^=-bZMaL#hJ@yY7wU@vOVW*6)m@dA5VBP?n3!TBE<9 zmA%RiWulUYxw6ANEtq@4%v|Q-zElbkHRV+k)tp#`l1TnOcBUpZ53j|FoWu@f>62wY zO9`^=d0;dQ#;=(xd-2NepfKptJ2d13P|wF?c`DQ8_Z-nzUi#s6;;R|&dzUO?anX%-E<|_lo}B-~hX)>jy6P26>o3W?`J6X0R)l(!b<^+5suu;k~G2S3Y{K!vCx19^fkN%Im5}kD4*= zcd*lbD1|kva97%DS!4CHouN2;j87Hc0e*}9iuy+eWDlGcSRgPiz!7K-ydL27@8H+R zdD?Nt-p_8>qHN`@IV^vud)Q74a6~%};`2f|=b7i;u}ol<>AtE5TZ9YRsWe>|JS=Y_-(0%(qOYE96qk2g_KiW^H2opKUHD^csCk zHdy*vPB0~Io4AE~q3Yb!(qi!>2%p)FmfSFN z%CdO(GvVzWk~M(IIwhHGwMvNtW2(Z`%8lM)Fun5ApKb~^?wHY!+(a_bftfq@0XG^7 zLYgn5a;bF&mVOhoFb8(89MOG#Z7FP`wO-LXmw8TElzpC`Ofp;Tx|seoZBN>YwAE?f z($=Nda@}=ZarahgtC7ZQ`I_UR^R3T!df`-e4E5U(+&a_4(A**Gf_+1-gftBO8FDgs zLD0HDOTa+i+xFp>5_EfgW|?UfEX#y1D^F9UJS^d! zbgF8C9vX+eo+VZx>bgbmjP_!8>8o7V8o(^&56mun0Gnl`C6gt^RX2DpYy}L z$Yi-Khs&*{oiHc{Ar;3!T077iz7RD_`#|v0=wK#lbEy?+!mOw1@a`7C^7Bzr;CE(W z;+3zf54*C$bv^w}+Q76fX~)xMq%UvEwv;1W(VtrxhXxT%Y zQ42O#gwO+|xDLLciTsO38q=G8(qHC2XwY|hTd#)?TpVh6z z6K;zerAWCZGa~cJIl&OO$i0}mR6yEHKC~a0brQL?g?I#)^!Kp9k9bvOi85BX1{3;; za!y&wx!j;^R}Lx{m2Zk$c}lnBSyT<|Q4*9WCe{r^F3NhdDqr3C-7Z&l_b6sUcU8VC zfogAkjabim+_Bl2*%?J2t5|z|=RN=b1#Jml9o#9n5xtwX1dR^d?El!egR`4Y4o4B& z8F`0zl{{?`X|D8BxU20^&bXhr#<<++<u>FC#6=l(w%5}+7(HduYFHe&m34@IV z_=ZlH95wL7D(bVf1I<rc{7=*g|Qq6jeSm)w=@s`O3Q=*6B9QOSDlQWFZS4WEj&W z_KVComH&r6m_z^VBJwy;m{MXKwkm?k$-3m^s)LMF!z-&z)D2S2{F?+iZI0Kcq0{zd zeR)$XFb~H|ax1;yC-`p zgRs}f!%Xm=RqKMBKElVCskfw#)B;OOlpac*`&0+-_0cBN-|Lxo5i`OH zc_nXNW=bAmwRW?cN4)*Hmmc2F)Q22pZtNWIPbPeZ(m%c=UBJV&)p(*ML6RE~T`bT$ zQeE0dcp+YtZpf{yyKIB)hP|u9OLuO;C!ep2DV-CYPw^Kcd_MX-_u1`eW&dt%X$ipg z8~`)&7j_UC#TmZBJYtvwROggYJ;ZPVXT_=B^KMitf~6$rWr-@1iEfr5$-7~h97KA% zOZDkl*MbOOI@9hxO08MpiJX#S(i)~0kyd%&qKb4hKD)nG4`6O!Cx`9y}^awwP*L~Q;zO~kRLFS5_f>qQ4K4xn$$j_o# zf4h$p6i&bVpHi6IM}8oevXo&~@>ekV{^b7_!DTDTuCBzd{0cVTpDd&Y-bWNXi4sic zI-x%#V(7@-oBciS5E}$jt=Jn+(H{@w4Cu>Fs-ydn&3#Wdm6GVso_HXpM#%-DA5CBI zS0LB5wU^BL4dZ_1qp$C)smzUUn9-Ac6EVZTVEH3Bv3=Vj%nVD?8WU@@dUdwnV_V#A{9_eEwwF%e1hrGJ0uq!uC}AO z5{QJ%dF8EO5^tn}ZzFMHPtai(4B6%M)w-%aXGZZTyfj^_fV^eV-9+|3I9n#ZR)?5p z6el2nID(qM^gKTV6VOVz119OsD?@XVD z?)-b64C@u@ef?l5RW&}7Wxd3y7y=ekjq9vVrS(DbToJ7ACNlPuS>yZUm%D*fMS`I% z1qF#fy3_Egg3#i3VdwUvKWjA7x&wcC1^<>Kua!z|+b}v`ULbb+k11v?(FxzQje2D= zyMyT0TLctk7&VL2!HACuF|b27;>{ZH3myv(iEBo1e}kzPFT-6_7E^?NW2-;OwOnqR^lv@mk$J|A~TE3X-)HyzClF0*^N%1E=9N>E3r~f5Ysng{yQPp8{th> zrN5je_Las;Kk&@nApH%@E^W+s%R#IW3%0tA|G%X6-!2{{zYbBV4LW+#0FZh#-#V}z4C!iQPnW^FoaGo6? z*7fO=(T!JL1OratO6M?3rllCmeKx>5`e@7(R-#vSLYFZkz$Xk@+To0gU zQ){7sv7U_9HF!DGS(CZK2)*=KwwH`#?>_cYYQB?qYadT&P;VvX2?I-C$ zXcPDF30@GuwjgIuVEHhW2Cik0P=>0&zrbqhl7C!;{xSOwOd?vZuSdaV_Yrba%l(Qo zv6U12O5Z?D#(MmeI3lzfU@$w7#gEv$Q({))%i(0f{a~-mq1%5!NuWYun^Zw;#yaOF z;^-pg=KS`Ams(JqD%=BYb{Qj(%u?caVURId%T7Jr9Cjg<9!N8d?Vtqz)4Lcq@N?G- zZv>0c1S>w=sA?kktVT!T(P+BJGB1=J@!=$AXC7WJp%4k&p2y9M3<5mac8 zaa{L*-VgW$ zwJ02ckI0ToFM;+Vw?duS5upm$@eTT%R{*_D<27V!1V1U7sloNI&W+GV^fQwFh*hZ4 zy#+q_nYeT%c~O{NLVZzUl3XcRQ~$DuEyZg>RoIzpI7gAjT*)QR6o=8NpdjdbLD2uF zoZoNcAcKr``ZBJ58yTaYT)|7@6l{SN)II!xUmZ{W)n;}M2X9=Yy)a_v5gUkIb zI%KTDo9?eK*KX)ZcpsXvp56P66#tK`;clj_RL9fXBMdMi^fK6)C&njX6E-qUJZ@Yh z!`#HkC7vJ;To9eoS}^-TzTo}$8BCF+>U9wvFLXV{c!(BFf}gvb$!kT(kQwkwo6|Qi zA8V8cF3VT&SG%!P7paI(q2FnL!JmvvTdWMd9fir@w70+-$B?soWOP6W-eZ#PV9w+U z@&%=ZJW?NNh_H+Ew2VG@CLin=QaRS}YCrTfMyC* z8HP+Y0F}FrE;~bpavJZdBPk!X^;#|C1X{Q+S+pX;Mk9zE$1f1&vTz{ZAsa*WY@n&x z>DpaDx=r-9i_keJ2J8kFWyOjS0-PI7}^J z5iv$j(Sq_lgGsctw$#Ol<{~tcGd92jkg~L@&?AW{wt0Nkzr(MCpgg?sZ3!Uy)<} zA~Y2fgj3{UB$(Jw$gK`x6~3Z(g2Yx>;%8J5^~1A$C)OorI2=~=6rnp6U zeD|K%=SUFyENG%xLKjY0ML7LU(Z&hjAn1 z^$%BtWUPCAW~spz#4fBLSDK*r6xxb!kcdb0FAg(~!EBfTYwfsBu8}GXc&4y=rlCI% zP&NBdJkA8ll4MzO3)88ExQG7xg5G}5nO-5Z7DJ_z$X6ku6{oHWE8L%JzpFPUp03II z&Li_*jGcm^C}tO`>Nku76|#b+OytxJ$7;94D`=0`Rg>IY6ZYJk@Oc_=ngVi#qbIjfN;C@`C6KOLiot;+d=3v3BRzE4j~)t z$S|haZ6iZ@A5Ury?7Gh6Ur*p6?xISqBR%6xZs%B-5gPm01l@59-7pzl8X&#FCQV>Q z(LZR!+t|3HWT-ab+3ps={IyWM2YY&r{=jF&Wx`_i{+vFY>okPZ=q80&haO{8BNb)+ zj|)feTpzOrQ#h@q$Uwe?0Xj-J$8NtvgLEc3Yfc7WHksvy$pitpip z!93mMw3B;nO9j$tb|)t}cmt~!#ZTdAy>{$=X8MVYBR3R9)U^z&^$&cYKJe$F!9#1X zey6b5kA-!d4`yrQ&A!1#mM4m=Eta95O(^x2m4v#~$;^V;J)VlFqG+54*yl8C$!B^@ z<-@+LCv&?F4ZG8rh;LGuIIb9eSuf(8`{X(P;P)QDlfQ_k-Gvp8L1OYz196&sdL!i7 zqhB#ZqJ+;dir=B3E`lG@BIW{^ro+=e>&LaI3EP`p+f$~VmWUk=^Jq`-!4Uxd|a2HI@_F!W;QPUvoW-z+noO?G4ROA3> z;-GMs4p8vEVL*e%o|7Tt<({2G2pHoTS2WN20k zeT_2wJqq?pVeHlt&iNtyv$yEo-k=fT*rX;r`#Rn;z2fw~ ztjTuXFAsQ~jtmxqS>2V)-rsO+bgn)U&+Y)ya5!V99+EA&%)8&ke%@l*MxywED|np2 zEU%2F+(jq9I{1bS*r!0&#+)FU&PwLzPEWX^rD*#t>hg zBg^}en6(PBX7=iQ$!R%&);WmxIh8o`HlH+vt5@*j<t9fS@(wU76;qWc?Q6_3NSs0%M-J#}jn!CC%bg*%WFHKz>K z1b_L779C4AI}UHltQY)&KU4z`Hy0k`79{2fYZAj*yuy2yABeSC?w!1QriO`D^_gD^pM$78R-}kdRl?|4x#MNAh=K~&?WcLG5OH08#!a& zIB8vwqaY&OqN*a%Ls-2@ z#%MYbEymLA#dq3=Hy6n@6(%RU5~k}tYE)CSOzil3IDw_fQdFecUlxiGaW+K6JNu%u?Th7oSwNDV0t3Ds1{p>O5~JreieF5_OZMx{l7ZuxwfT&B6gO znq6)qU8Yuc0ri_-sBkuGn)kA=?XYQ0(E^9i2_KjR-3Co?6l}5$9`rWFXS`&WTYvl2Yr4UX0gZPW+m8&h{#aTfJvOaew?>O$lqjQ$N8X-OF6w`LBYC!QdfhW^#c}dG(47U zWF35%N1F%Cx*R;-t6b2wuxAAsfAl-lQ?hEi$3ZfzVV_O%Zn|5NeuVN)yz-kXb z0~?(4Pt<-^fQ@$oebbGq^dS8KoWcoIbI*rAu#%rg(~mET{yA17L|iTZv8pgqf-DuK z8KO&a&~av)-($ZQ&O7#c);X4W*0J{Mj%kjKwpa2ru@hG0iTGafldDU|nC;S1o2Pb$ z(cyrFT~s@#_GbETb}-p=Dy!FPx!`;?AT#RK-h;`8lU*o4%smyhSs#3PAH6MEz2*8Y zV-h~OEQG_ryZ{Qf9FHp>sM{1{3XJO*ke>bcArqOW7*7oL4d10Zds2uf*_Swd0?e>l z#PgBjK{R$E_wV6cucTu0E53GK;-_NRs%YxFCKCM?=dA3)GCgD+%_)7GkcMvD(^*!t z9?^qAR_ZK#t2|;F`K5E5_gSp*V`9TZ9yf?)OiQp8l?8``U^L@y_v;x6i%Stt; z&%;2tPKUHNO#kS>6q%k_QVUE^1s~}lK0>4aBk3%_qsW#hJT0S9NFc;;cW3e7?ykG| z;_kAzyTjt{Zi~y}4vV{6Jd=^OdH>{n^AY0d?t5?5sj5?_!1GC1t}-C)li-B|T+cDC zwJE*Nt?9+_Vhanc$&OB_q;51dj@7IFa z79g#ZZ<@O?!#5R%W*NDzIj{8}+f4gYo6TC=TutsImoV3`9E8d6oqDkiMsgq(*GIw= zvc8?ko7K?U!Sa4-^ad>qAp7$hyR9fZ+l}C$+P-I;f#Ljn!S#h;r;1>|a)HoN$!9bL zvlk)nyAD)4kqF=o2&))%h7PG86Du8${k{p-xXt=!1N*+9(v%Fa?If|f)LcGmzD#ASlckC|M&1wm`i49Vr0`f?Eaei*WORnS zgD(~${w1sdPy3mG9!_~^LZH4*h06uFZSJ)riQl8jzFv;+e*v@CFlD&A&DrJThH<&a0jKAdX7B558)Qh}!8TLg1;<9dJ z^PY0<;!U?emJ3+*fjsX$)T?UZX;#pGQH~1I8oqLfDESZ=y+17Y7_KOmx7mp?kK?WD z<87M5t(*WJYse`L##?`3J+FXNW^vlvP*H5kdly)d$#7FEzze%h^{)}Uy1{yXs(?+X zSng)d&NW_XdK9~V6R3fpe#O)3-o3uk(D1C)_|4jHp+=0GnPO~bPmwQOr=}i2DCQx^{AkFD}EC9l* zC6;I2;W;L8l|_#tRJh3%d5M<>v4{OVs5P-*X7Z4MoT8VYrZ;5u$50J@MQ>s(`{%7u zgD07r)tk?`K82@@0u^LtKa0d5KBF{<{vj5`Ms$^(Rfxof26J8}a8iB}%Pysd)IZzO z&f8Gp4iEl+4$;~a_Wo0L*9-1&0J!5BxV-|*;&VhlJNb^TL@?9&_!oNR_1Pr_;QGEM zekws1>ly4(8+Z&0(KTsBWu^fiwdcKp*r>O7jYTkq>tMlKQP~SbQKT9+FCWjjG3?+- z{?bysb}h1$**NhbXcfN#?>wPH>9gd#Yhc;pSf!2FSrg|=zzgcI*NcMH24N}vvt~cw zwQhlwbz=EeWC-RnFRUPJL{ZeyD{2cjS(fTbJ*5t)2K^-s;s4I`37RL#-p7y*q_#_D_~*z%ze_eXUL&>JqWc8$3!DxE%iwX*C3YH>M7HnTXvB zIxNh&uEl;G!hKA~vfX4=*I-pnu*W?_pjEK?{^;A^=U$ZGJpcyKTK>`q_Q^)pYXmuk z8Ze3SgD-yKeO_@E{^p4s1ZT{~OATYkZsPyStYj9f(=eXOVgAlaVvW1R7?r6wjsSt4 zqzVv*Mok~Gc+;4swvT^*3XO>MOM+1!;|E8AQq#x@`1McfQdjs%l;^k1oA8>=i3z5# z!Yj$i7Xvwl(Ki!Il{>Q`@O}}aBwQ*D9)oPq5~PcKEdnhxm+sVq_*y$K=TSVz5-evp z`(gyPwSR_QNH1*f9-c%B_4RzjH<9G6igL}BK-B(;Ks7iqDa2;kL2ZBYTqaZ3@5>I3 z05AFHyX3&ijE5r-#d|sNdwGbCD`HJ5;jue&=l*+N3dXv@tk1jbtBH8iT3CxB+^fIC zuLLXl)>N6A>ug~Hm76JWLXHdf;h}95HS~0IOaH(&ZU}EhL$NcJ-p@Yth?L0aY)e4Z zD}Z?94!P@HSo|XFyGYLSdwfbg?CMSSMRxAC13gH0hya^X86N^V|7ny1|F>gjB!Uvn z?2MaaYVHz6#bGz@g3~IJLEVZTz!~<*Jl67G&N{tS{Qj2gk4F5Jc=pdz_K$)O^6N7A zt45jdJ+H7h*YUqsu=N$NUzzYO&5159a#C*cFPZZaOm43?-AF6&PX|GqcgW%-gEBvX zDc^xUE|Fh6#IqXBbIea}sTgZf5U$!wqWbwDwRxP$`grl8?6@M}O&@tpoeW(OY+nLB zRS!WtE4kk|utNkpA`Qj!vmpMTx{19~i^zNg(TP7YSx;YIlHuTY#PYK?pNA{ex#z0B;?| zDo2wiNhPzBjlCVrN2S>TeucE+#4Wut?9D_xZvCpWko)nLZ*Ts)8Zeo~*`xYjAYbF4EMFYx?1t|dEXak&- z>*O^X^B0Ta&9lHssY85liG2_N#%#hr5nQ&Bnnp98%wFRixvwyCIxZ~BV{Cs6e!4t1 zuL#z<2;CcgW&Xd}F`=xuFT+3J~DYW&J)ETvz?BbmHUH_q4+eDfJ1jC*iSj$r%Vus8mL$99{i zy_V0M=b889$@Sp7n{r}{;=9xES`Wdpb+>UxZ*4O*#o=h5WP9W=*_7@{E>s+ z*Eb-~FK`&QqtJaAAELosOQJ&S)}vvg7pFI-A$w>vKlKgknv;BZ6;91x;K+Hr_HYM& zYZ%NEZGzvM%Pv^U)l>)T_|aE?u5A{X?iJwaDSW*N7NIQmq%W3mA~vK8451PHgugR7 znF?__F~B~)cNf0dOI7|0NXxHv=$}G&2Mh6@w}-Ga{^?}X*b__m->)$M;=ohd&0kmWSRC8)sLs5uopXg8;i4_&d_<} zn(_-~#!qzwx32U~EhxQru0( z_%7IL7oXpZ2f0o*!fgc6xm5_P)sj0O#=Wd!kFNqZO~+o|z+3p|78bytF=4}CHO7EE9>Au(FTCXK3zXLLi;cxjVn1f< zHif%&i7Zf@P*o^G&e_M$ZsS_2W3h{6?Dj0!f#p2yPCO(3+fl?h1KBxkc=iRbI3_Gi z82C03D{}zkwuLACfPBW4CdN@=j~|rz;Qg>MQqSK?8+MM zWgxGS#4<~;Fjujg@fl}R#A{c=8<)ZM76UW+IRUxx(&f0yTAZG)Ji%?8rFlF{Kd!2S zg^b3Vsd%YNJTw2iQ@^UyJuKHwo}PamP%FGn4SZ^IBB`#t+Gp&c{8aYBxT|`6tvHq^ zl2;}?R1J2TUstIw7O6M)u!rkCz$y&jd32%*(2b|Egq8S#9mz#4%RgN(j=eCCIBGU4 zev0#d2|G8RXm%0aVGWVjLU2+)zNZ6ws0T4v+YHNIga7?H1MTrG<$3uhn*M*zsefiy z5uQM9FiRFtN=d#F&U=0Zw!G|{Kt9St2Fghe`YgNm13SWxbd%T>*NKrA5%C-XlXc;K zs^-3}af`it zf_?T3M70+ivJ^`)m%7y7*p!*72tEd`*?DCN{e%`_x}Y+DUd|IG*Y}cH;$Xi$Qp!IaD1I$Rt#OMbRCs)05b- z4IGQ&#E;|gSIenS)T8>54VJ-ip7IRN#USFf2_UnA?B?n`g?FHiXWZR+?(G4-!aw<` zA*XaGr_aBSrgILOsTP|X4{2CFzSX=*$KaulgX4h2Z{P`83X0lVBQn|Z~_F$}FqFVC}R)bmC zf`{6G?;gh$SHxQO> z47MhNG6(xng-YdpYF<6DllzI3K9MPyMhv}$Y`|6U&QcKmPrl;@R<%4i1%LiXH)c^Y z*bI`{g5A%`GfQCaS2lH_($fJ;9s|<6z`gi+WgEF3{|v3|`0eJrn!$)W#cA_TPnNrdlG%W354eb$ppg@!h>%k0*E3iNYo49@%=Ncwr~bi zuDvt&`_oj23XcPI*d?q+EBx<6QT|4m9l* zmP;MX<1LM>Dmp#4tP5;?>@V#e`&Rmc<$!~Z%#KZ{DLu1|ww<+ZvE)JDy^L5{=!il^ z2j6AG$=^Dxokf$q0UB1jylcHHz1_XPd*^zedSmDhy{CTHZtMLxy-(Tk+3?eK!Owez z0ir=)XfM>ix{+I%hCap_l(Wl<`N;#`!-Hz<*fe%!v~LQUMH^8%n@7)XNiB|^@MrXv zZK9)PKQ-Hy`e(f>aX?G5BA0vdZ)1dghj9L%t)Qk^g2| z)m(WIHN!cQi~fLiLN|C+3aj;zI>#@qExk1>=rg@WPf&Jk0y^9G(Nd!Yv0E8=P|)-JCMIhu+xQbYHcif|f>q@eZ#1 zIREycCKb;V_*Ctu7N8fP_hSz6O-x2E{V?l!#7HEA*dEK#4HVLsostQgSQYyh12*&P zE-gSKpf$CVvPNBah>NJ5Jj8o$hmBNNY$pwnOIi}Fb?hx28yx){iFT{wrDJk{H83q; zZa`yJ_JjSa?Ywo6MK+t|V$yS=h-sgSw-cd?1 zHBJ4bUDg}3e{xdCJS3D8|DoUF2I|xA(H-xFvRhaAImMvQauaza@oq_aK4XLy)MLj` z9Szo}X=OB<_Krzd8<~R@q0UrfGbfJwR!X^zhKAsjN`DgBu=3YC-bqG z7p=ILQfv9O{F}L!*rBJop&T@Se`~o8bFOc*DZ5um%$? zO*Y)*Hj`irSm73qc z`OBu+Hw5es%o5Zp=ty9Tzy*O>gN6o4yniyF5YxUkFtf;NzAcRr&%>p=rEjIaJwv(e zZSQT0qTD_2Ki;X{(%xuqH*Y2-NDa{LX}k3K#$+FQu0mrJ`u0c_=*wTt*%&D2rgt{=MzMw!5$DE(lOv+lML@MV|$m{O?!#mS^(YwVv+&jj5%v()S zl}GA+tvWV*J=xyv)X_Yq(?So?D~>|Jvn=XK9Z|$fX9mwmT=NWWyFX7;mcOBBEzucOtZtM4W*t`he$S!OvrsBl`fZN*!jP~ zVJ0wjKJvWxv2#c81$D5q=dr3C$iD^|@2PRtN0q-Sd%d3eOF60>X9t#1*QtT5=|OrV zJ#@_FqDEMRidRw2?<%w%auapF#-8UTd!}QD8^b+K(5>)*TVSh~V26+4-OH23T8A(5 z+wyK?JQMVKFeHkBVbk!(Il)CWgjV7yDW|!NrH-|kt%<#jqfEe|fPDd70*VCuaySBd z2lNVf>iB5?%jU4&Gk=HUFpc?o+k`(%Xyl?(^}AL>6SXs_7lf(1l|d*LURPu~|8}dt z)DG;!hx#sK5WIsmFzhDL>sC?RE#5;VeusEWyeKA+x$J`7twzV4AQlpO!;b7igftnR z-fOKUGp6RFADLT~n74JDKHtBTxyl~p0nM(W+V_^_6N%VW5FOQNddcdvrIR{QpX&0eI9>pxMzxSO0;W zs{1~{eXTd$JII zz3TzwYFwa=5_AE!rCVe(^H2YQ?XwAV&;u4g1e#Lu)LRokl6yfTW5EXb{*j1mE@xv@L?y-w-!EWl!V>QI-RbjLyiTW#V@iCce)MclA5D zr*2G)4I(xxglga!5Ox^71|CxoxO)f~+$!uO6H$TRPyxic0balvG7_!G0M8<$69A^m z49aLkRxlMb*OjbUD=KnRz!-jp+;ifxgY4;%pn~`8&_0~i{TX?{w=m7Z$%Zy2kK@-i zo(y(wKrUn%sQD3h?bkk9Pn2*7oB4qZ*c&>uer99?FA{fc2U+>g@EH)_5_X|K`?im2 z^*QQ!$B5PZ4Atpi@xEl?RuQwUAX?}_TpUeS)}OB~PHg9|PJSRmxqv&F1Fqi0Q(VYt zuLLTPSf`3ahegOxyuw$W1NB}fpBPN$t0hsuaQ4d**6}i#g8Sqgzk;DZv8oC=%yjO| zN^BiVKJo#3=NxzJpMRE2)Er4XIw`orp0XKarK6tDKSD=}jh} z0PF4VzrI6U8p8^66>%Y%y6l{bAAI&Q_T1k;`-?1aZK}44ZwP-s zCoGBo>xdV^D_@`%8zWkZy0mT0kGa~$h^j){(u6*2SZz7y(8I`i; z?!ZJ>UVpOaes#Y8SaBydV=9*B05NGGRf3QFtpi+lQM~_VcC<~9 zg}3bHV7dUFa8k3gzU#?NBoJX2B+Bc`d7lU(Gt+hZ7{**?a7Q~nentF*QXTuw$sTz{ zHtz>a)yDizzZ#T3O7Szu;$dqq=E=2Z=iTD${eT+~$yHm}`OmoGmE^IQK1jBtG`o5i zul4Lzf7ky>suoe$j4qtcndBMA;v?IW1@K2d#j!FmTz`9>WJPxJQBY$X@lPq##ujlJ z_G1q;_R$!2&0Kicso2Wxppaoy;>NL_nK*9>%%|$~lUxuxpgi$jx(BavJg*I)f;1^e zz9p5BR*6p7O*$12n-EEkoc>r}5>*PFN={)S4F^8A7cqP+=hd%zG>j8bfc>}`3voIF zYdruz-{u6I##Z>%?Vn;J2D5tk$jR*HD#npH9LuW(&-Vr%wmLh10MTg-`Re@SGt9m> zocpu*}j$c zo3K8e$+D&Le1~H}Dv&+S56%uEPw3Yc9f?oKjaTtkJJ0j?PV=6?-%h25x0%xyj&_4b z59jJebB)8XvPBJrdX|^zj`^vL_Q&(w0o@lz$>;$pvQ@RMYP326ji6D?U@hQX#k|eB zs!Qwa`&+zd-eOzps1xwRamv0Ay_c`H%8q>jGXpyXS_0B+Gb~NyXH<|PVHckh>xd6c zUybA1GiJHo@+|f|@^tiG@Y>L3As$n=qn|K=S%5e64@Pc!=*&`CCZd^8z+KOLplV_x zWhiig5NO|c!@6Dw+l@IDVEe34+NiWGg z9%r9K5t-bA)&Cxa&nn_8s$9k3-9AOHB#Sf<-+zYM`A4ocP)Mh4x1VVAlaY^gDo=M) zIb$aAz+G)4^NHUQ$>k?T8V1ICs@KBCmEb()#e3w$*DA28E5fCI3Wuc#IfT!2Bozi3 z*20D@Bhv4OmwV1>%#WYT1*>fX*t!xCZz=dH2MrUEQY-ArG|u@m^0@i1?ajzbU0`Bt zO*p)5sdlfRALKL{bSF8`tfC7Y$pM0bS8B%DdB~m`gs%z5_V%Sh>*p*lCR@`0tMUWB zeOJ80e2~Ip82w$S5U)gsY&msv1O4YEuQok2zr>4@#r(TvoAqy7 z1N-0h7WQklSlcM;ZHvXy!#q>&CHaK?^aa+XgC_!vJDrKQCA92XCE}o+Xt^B*-M%LB zTt;90pD1ubo|SF7j|?@MFl&vGc}wHw+BK~iV&3Qt=lGwAQF z_=C)3V@^ds-Qb$~fjTRo^wC9IM1-6ZeqlIkR|D&LAH~KAXhK{;#cY9mOg=6Tkb{UV zMQJ7#)Ez=4GC_6eWr^|~hP(I^Abq!z2SK{2WP%sx)dF&KetvZ} zEbC<0p7YSL9SN#_hfc^rl%J=g=b6kjug1b_{8Cdghxf^7c7$tr9gnjQ&-^d!LN8g& z2s}t*JjEHXNG5zueo$~B&`$z;rI7Csck+a1&<)j)T1?8dqfGr>Q_(wa3e&2D{z{v~ z%-tBRqZX<6GAfvU7k9|ZEQa-kt+RcJeV%=!eVpBEt7uCnU-MZUW-4gp(gr9+y@NfH zCkAD(j%e2eqX{*h{KGVTjIkCy+aPqr&q_Ju#`5pvNPO}f6ysi6Dx(PzXw6}HB)5?c z3bJX4A?vTve|qfAhVED)&nI`XyOPK5&8hTP-)J?Ap>#O?#q$+G|9&mT&hlkB$lT7{ z)SO60V59UIcEl!nrFW2P-U?IhA!;FO)E$b;d&PUhYiFXZgzk*0F4lVLeYvhwQxzh{ z;c_waaq}bd2{T${a$kH;K52!R2>N}+O57kbvYRLNG-HnHTw?DRx zUtJ=_zsabrhqF76^ZXMGnHOK)jjUh}a^Z7fb06VtW3c&u3{qRjLb^j7h1gLb_} z8K^eVD(iC%3;os~$*4HYJIqdVQB;HGn>Bfy947ad?lPr%3!M3y^wqD0fzSz+fiGN3 zUov6|(t_lKQ&xGvX#ZzR^(nSAUFPrpQoI~d90iI+D zm6r}gf_vC&4*CwW`L==nb$VEiF=Jsa7W@KJ(5e&vbOZH_r;gE#>YhwpXe+-@mK;Rg zE2!=b5UvuvtfQ84nm&+cOc?8gD$*2~72ov=__F{ipf9K~_&b`0k;&`>vTlL@iUrO8 zpi*t7g3}C4wGoc7e}{a^V01TQkNsjV_Gf>dr@A+Z*HtPsOToNdK;qT#hCAqvaSKJk zD#eKYkK?uPkz0$TUvLc3d<%Z|FHr6p5XTB~JCU5{zU+=lL{p_W14Y5H{&_D3T@Sw# z)s6&-ug2Rx11YZOd$WU+mx0&($}I=Uxy%E@jt7(71>e^rZn}?st;*eunS9OU~7=2U{Kt-H&;1w9N!wMVE`ZdT{2 zyO@_)gIwlCCKAYc74(E&=*7wP-@@8yVESMXE6g%_@^*;3$pwd)&zd_}>RXH1p4+P1 z8`%G|-LWoW@@svmjnIfGsaMnnis=36{_ZO5s_BY!|LMuAB&ZK{-Iq(uF8^yzwcNJ0 zv@NqOuvN89wbo-cZ?H5-u+bA7%=G!=T6OZe$?7N7rN)9adTVF2XtZ!9ae^0OJ74Oh zS(SoB#mzx)2Qtv&4DyYqzz4a}gj|m@_IdF%_dCwl&*;lM)3)kJlAfZva8Ngp z^+{Mm?Om);=$YeaQ8fNGDAYx+~1Ne-(u81Nxo76Y(u#eG|0E{B`6-^;--hi*DO`hd3w!1O4`*kSjU&bS>=C}2w=ATFKcZ|&8 zRDH!K{7u9=j>xq)e|HEOvuQ+Ho5-g5YqDSPESc~v!T7XTswkbQ&SaN55&s<{58N89 zz-TdC7)QtJSYsO9K#!R`^MXuxpyos=@hnqK6ZF50nN*}NP*+<_#_S0!mGvO@%hc;W z!V(ySr*6O`fvTDdO~t|L?`mJ(E>>TtQB1)-1E#6Ws|ULLJBeLhW8Z!dT}7ZEG#!ke z-_%w3ERK?InLpD7mf!Z$cHTal%Ek$Y-SOJi)B4$5RBk5LFx52v)GjEky>?HUE3fNb zdJR_(x9+*D%+OjGaQy_I9;&vaZyKb<3OX(8bsPxNYRj1OCFB;X+rEI~u&BF?|VuH5Hl-?NZXdIZK zHnuGiuW^}}D6?1_K6EbPuPtC5ldzCn)_X8Tpl>#D?rl!j40QH-f+^~dQ!I&v^!a9y zpBXF|#1&7(Brz3~{XpC&P8BP_bX$WReu*tVNfk6RbutA^e2(6;anw8)!2~RdMSqKp ztp!G`1CJ#N_V$|$X2lsY2$g-W!3`scrpDmO028OC>J0kxN2$rj5B%QX~zSEJPX z(;RNjA!m{jh4XZ_cQG2H*L;b&OtsXKsP=`Tt2j$7OI7izK8>vFe`JBan>q;BP;8ze z{!1kNM7&02ZL!!H9@j6>S6#AoL&*yr;{JETs$Nbl@uId2o$UE2`+d1}w(XfiPU;qgA8O1qtY zx(|9bK7MFaz@C&Rqmv6%8c+S9GMdbHz=0NFFnQOJFyjkTol9YINQ@Ssxz(TO_f^oQ zP+1tS7sV?6Moum}SY$n!(&aGmrV{1dqUJdU>k*0$ac<^|4^peD&3LA9Y6t2ziKvoC zaV{Ghl~GKIlrETcCX2tf*LO4xh!6Z6ydtz?XoKKE0Ri?t*7BB?4ONH6 zhDWt{YrB>>>p9;#*QW=#dbpaqeV#&UC~@^4%*2QkYs;~gua+b9FrG3;SduM`t&c2! zn=8mU(7&2Pufb!ydqrab_3h=_E_Hx1!t2B~qyG^jB3y=zl<90MSOd4?=VkcPj}B0&j`;$Pc`psI0LT74ZE2r|&H`{tI=t}5^(D$J$Ge>8A8Mz>8Th<>TH|&RbetXRYrQh`e zu4l;~6L%&(PWC1jPwt&mEcr}ocGqXmKBb3t9y&}>b583R>kZ2TOL<#g$B%$P0gddJ zEQQPs%`uh$%kT0jA=xObkJ2P9R4wZL)1BGXF#V@MvfS3%KFq$)UfGf0$QIDmam{wj@>s4Qofmw*n|g}sLnFAf67N0fUF3BrtJD>G z34HQjru@VS<0XM8c$N8I`4!W?!da2u#9~4gQ(e}hBk|WV@@N&Ya_hk-b;vQqfkFN; z?V;1{E&4XE$ZY%PL=L7iav!cGL0G^ePKoy}9?lGi4MC*=I`{mb4jL%$DA=#e_zRY!^O z)i75INC=skc}iIQ@XuL#XN!#Nnl(LihGVBWR%#}CO*!;D?gJ@D5|1YyNPdtSpSCA$ zR$4!&&D~MyqVM!o6`qPkTpVhba9L{7M;@>WY%KL zoy@nR!}RdDQYRb2MFg+>f6Uqw+SJB>nt4;j|fkQ z{p$O=8y$31>shC9UJY$`pJ%8yGrH*4)QYfF*6GWQc2vE+3yM)~FG;GUb_ zKCNKNhonPEamm9{3Oi?eChE<^SC%{h{X@dT)yN__59HPIEzUb9*UId*VoXs5vQ7?Z zV_BdTcTPPCe{loh(Iyer0YKF(K+;O#bX^vrmbA z99cHJd~hZE5VKqOq#y8HOWmLF;^&8-Q{wX{=1SU^v>}oU2JP^D`-tL*XD#RF+0rVq(-LF`gCQcC))ic{SRka z>hqLgDaTVzrS@{pauxS>(AN8wiXrA6*0Xj?V2-sjDwi3mU5PhOcN|BeCYnp@P)}kbA zpgxvKsA{@x=-O$j;-#2fX9v43Wcor^eTmV5nvK6_>ld9=QLyk{ksq#3XUki3YL}_g zl)*|hwI#aiqx5k7mNrbkX|#sbaFL1(6W#Pbw0&x0rHJQN`kb^KDJeQxN}c#i-+up8 z?p^iQS6?fw`)iGG-uVc zL8*6Br#MHs+IZ?J@oGK&u+hh4f^X=Qw!-?ED!&BNTo#Uyp? z@n+;XW{?M{W>2>0@IxW(1DIxLnyxf(c1W5KKlj&wUyfe`e~LeY zez_AGq&{|yQF80wjSZ$JVt?}w>w0@*ht&~juVcGqyudp+Kn_$`W~6akd3KBtz;i8;i@oQ z%lPse@3ceeDrKLyr8krJr{{)eu4fU`H&1xWs8_U?#5#kBpH-tVu|PZD0AsawUa9G= z;5p#F<<8@6p-RSiQ!{C+xu@-~z|hRrYzOn!{AT#zXvAlO+J-&(A`LXE_HBB2?+=rnWbsO?QE9FCJ}kV8-#ui z%yqZBYo*3$B~znmeOF-`PCg+{D~M9wAmS6*UfFU82-X%3#k5SHASG&Lioi z+y^{oy&IH?s!4CKF2Lbqk!`;g|6E2T3$%;!Wz?B-)}}~&8kfF z+;NX||8QUT^!C2>)>Cxls=Alfsd`iyDieL@1L@t+ufRYXpbypNGRfnTGGD!}J=F)m zk?l!uwqIjE1g_mf;@NdlI{EtEMDAu&Ro@l;shU^udX{;bc}jXNyW?G2`kVAqu1ua@ z-s;Q&xUH087Q{`a?L`w|ZAMe$C|tsi@P#_*H!-F7?p zc;?69RkOB@D4FeeL`rz8(3Sz`ti8>5#kIz1&&kwji4zi(#P7*7Q*I}FlO86AIpaOE z_3@^^(Qug{I^=N6QENWiSWd`W`3W-*lEgx=7N=#=fh1g!WVE zs+gHLvdDANebkknUcpt-eat=FZFj$Sy>oSRkMT4l7ar)X#!QuYicLFa3=q#*mST(Z zg?tU&7xpvaQB3cgmva7!jf=Vz@g!^6tRus^2d$8gt7X!!r6@@Q6MFvG{(0ty$sZnn z7JoVuZE4?J>FP@Hg#Bqqx$vS9OC!HVH;+w>$rUvu%i7Q@fhX+cEH#9)iZAVT;>h^l z;`heyjQ{ql=C7FeQc1VchIkBhkukzFPISuKtuO4M0lx>NIFjsFZHKI%Ez2#_Eh8=4 z%w6Q0;(ByM`oKe2Pd=`P(N2@qPD&o-y4UPI?EcgBAiaZYp}Uaxm(my8@VjrUkXim> zS!iDvur6><;O~J_;O2mbjy?9r*6QZA;!IyjJ&W4f`;WVxD>!|;bBeQEdYo&%`xEnC zV!;93sIP7j7K+)KR&tW;+YM}&6K3%qy#u+V+QcjCJ@dVbz{lxI9k9S=y^BvrdqN{) zcLGj)DROqjgzvtktm7zcICE^$y{(xYyv{S;Q^MnO_w*Ekg{gRdz?ppIneI8^dBx0w zHlDo9ff2k*)hE6_@@;E#$E?7TAzj1%$!5*|IOpt~d9(kDeigMPYEWeUEK7qn$(i(= z9*q@WI&dW4sVNQ~?log->b)@qq& z1>XqBZ=Y_CGnH1ZJJ+X-CDPfQG&Av9LY2h0l|kB3vV^@hC6qq%}KL4 z-Odcz+$C$1Q`^4O!=oK)Z=OjoibM!n45E@#;s zw%GpByQA_%{Tb0Y(|@*!rq)VHS7_R_q@6#ze33rp{22A+%lEPIKa$MO_gKF};Cv+hD=n7KlZkpO&ZEk@#kaQ)2Fzs?l$g7Pkw54J(NGmxF)K3eJiE&)*_DO z0eON}g#?9vj+~c0EN6@CGowF8ZjTI#nxAcOSoeUh;zVt`=V5y1)G7%xzjgmq_G8rN zY~Nyj+7eDC$GJ-BCFS?_px{H9wq^MdStZsL`zAUu!V^|IQ=yQ^z$X@)DagAuZDn$3 z(z68TuO~ldeNXsa@K?UXA}L?fF1RepXnn5fskmJpZs}?p>DU=CFJOeDqdlj6yj`Ouq>0)q!{LEU6x9UAANh6?OT~_OkM9U_Xt;=^!d)V&dJW)=>uJ@JhPNbT0UP3 zVTIJ>pu@@JjH)weFC%nhF`2HP!2K+g-h!!_!)%xzl{k z>aN4?q26+;rfq}MGf7xZgc&bwllF_<1<7=s?vFuQxH{jP-Se9}ultJoiRZodp>jz5 zpv^ain>vbjrG@fiCM7nOw@J6f1;S@vu%W3(yaU}^iL4r=tw}AKYE3PZIx00VEt|6w z`0$JSDJ!~K*`#LIulr_8W^01|aKO-@`XSXbKgnu|?i^b`wrljD$O+jtWcxGQ;w%qB zI@lHq2ejY4BV2t_YbUJx{{D;Y>#uJ$evXfSo_Hj+f#;MlUp{PWANV39D!g{K6Hy1F zN@nwf&&gae)B2D+fqgA2d=tIn(hH}xO)({f#lQJc;YXRDVezqvLb5saqjQL-kmf|0 zdXhLrF3q(p3D_2Jlo?&`?He880W%y|Y+Eh)%=6@3a%(xCR6+>y&DR%dx7Dpm18=fh zcCT>#K@9jbJs~~SW%f>0ujy-ic?Fj^-Mr1VG+mF~Rzu z+(CW_a?qKf&#Q`@XUXMk+dI-JT=~QetnW$v?nj6J* zMU7|D*b;9kEY)w={;kRub%(Z1zinu~hH$`>h-dDT6<KY=4&5mdt{V6so``4IR z(MzLFMiq%%pQU-I&wgHRWQx^id*?fEB)$37?q`*smwyWJx8tWI-b!ieB2(cDlFC^| z*y{%wq07VM@F$r&W$F|9A*5JHbkJz~9r>gwS%1Yev05HA{bpL7)U=eMsT)%>r#(!& z@3gp9yDi=f)Nhyq`8?z}HfTSO+hm zwKi09Ylrm8bRC?c^DNOA3@i5pYD&+jO8)^XV5g{y$>J933KPXy;v_L#oKHUfG<8Ar zpW(NTq?@uARng|;2}P&bkJ@PlmDh#)cr~gI74&16PY8W5tocp?kKgX7GKil>c#lE z6k{uT?~z1iaJ(l^ltM=870Ri0t>jm8pIuw)`_&p#Sh;WMImVKV> zwWSFYjlN2U;Ig(6r_cfF%1~+!5dNW?+b}hTAD9ZyX9qn6>)?>@ptHH6F$GSYKo7Dpc}`V*}YT1Z{lRgd90W`fzkd~j0 zk<>)4yC5Syn4e}YQXIJ&SDQ@gq3vH9v5O@LYkOD-kYv~ z%}j}DMYru6YDa-^A+J(%UQ1Q`J+;^NFgKQ9gKki%?qMWRPyJJSqW%SA?~PJZ*~nDF znbZlopoaNe$qScY0TD*M3^veTD(2u%Bb=u|ob|G-~M?@xaD3ye*l zR-4I0*Y%6cZD_!&58Sj^eHOg5Xy*9U;`FuRy}amb{>jXZ()7i}!Whpj6cUTVI?Zjq zXpXa-vShVN))$t4E#EEGt&^-vnMR+_GSxhR9Jh&?^hYdHER`%Sb13tO{;))&^?Jd4 z+C0OY#oSbG$;9S@VDv83qWk$4(bX{tX5}6J@^SiLTB!}y%4#LGIkn|oNd)FzWYor--3y`w%jh}&Tk|7U8?WQ1(g zY~G6&qJzTBgPt#Kg+ta0F4}!me|N$E+rdPs`tZ`O@;WXoV&(I(IG11Ah+^ntK;+cI0Kr~Va2r5Bb+9y*({tG(5mY9sb* zF_gNt8l&KL&!S868PVrWaXEYk8D>srF;QF~EtXa>*K9bfjO9!++-M20ezG27YOG*C zW7}k#YFh;VqnT}ywV&mHInMl%NuIwmE%HBUr?iNC(=TzF*Z^JhvA!z zqN}pDwvkSyBb?P(#YtQ{ovV7FPSvc`uixsEsrjD5R`r9Y)0JK9SEeWk)2WE@A4+0v zVHt$eg*A%GZgDEzc331WVBM6b>--_y;HgYVd`&bw7WURr>dephXgk&BComeTNKK_i zQd=VYtMn$$kOZQF6x382!l>FyU(j~gwyW8tYgv!ZN+V?o^WdteE!aIh)jMiFxL<*K zp#D?)z-%)UPj$1_L(9o5xhRx_i)pX$GojjIttxfqujq&d!l(At!bOK8Hj< zqeDC|){?$USLJ`q{VcyMYhdv#wdJuNu-~vZw;!^Vv|X|Gw3f5V*148z=3=NEPC^T_ zp`2CLr1MfKX*IRq=~%LXD8Wr3M;%VRs;PEToj^ZV7qydGn=0@@H6erD_ED=v-P}x1 z?K0}@evQ_$)PA+PJJrN=@{-wL+cgxNbTRCt^D7@cN--#54Q0<$<5Z+ED{U|n)1tMP>O{309$^&|Pd}?6ti}NCKjuC@)hp7;&=18J zzn14D&OoQhrb&(*OJlcpAE+LXZkK%(rfda zzUua1fkp7_M(RuX&C#r{+gQo``p2dPlzy$z=;y_dx0?u`=@S#y%F=!M6xMZdII3^3#F=3UufdPrKs_yl z-o#FHOs%AfR1J-vWH?8$)Xj_VszpDF4c?j?M&>*??}429EwBx))3vu&TqBN031l)p zZ~%<&_wYEFy+;p(LAOOTT^tQz>y3x;au18MnvcTSNylNFRYS*ZHN6d&w8>g`Iul!K zjkG!_?+(|-z`PDbbM-b)t|fgE?WvBm=PVU=^6478UEpFi%>dPWvZW<=yZ*o?$h!(CJr< zu8XOlpf7OYI?&-|q2s0*KJ}Gp5F87)ctYAGkEHw3Wf^WwwpO;SwWU$#ILt|!YE8DZ zwZxlOn@eKf4$1w|6g`KXpGF_!4p>l+uytJ|r?>=0%z2oxsbHrWzCFe_^e?M`Q!e50 zCUNgm=?gx?jx5TmeMc#EC0a$}cvdygV6Vz6+<30vf-x~opTG_~MIU<@b)yx=J3P)Q z`qOHF3%1jlvXT2gK)>8xIAtB_$FpUq@t1=?(~Hkcq#ti9HTI!c&Of=sHhi`kYB4Y9 zcIX9f(E&31jupBGJ8~>1Io2qRf9p#RLpju33&5$W#AhqB&k8Z8FEbq8WO(jTTwf2) z|3!Els*x4%=vVexPd8ajkVQ9;K@G67Uw!{Ayc`3T{sT@B#Tig!1sFRb-8&Mz$4hiv zRmKh;N7pVE1kqdn0AsP2mS0oJ6rW(q=M<(m&QO=D2dI8#)rM)w+7!-ZZ7hw`SPIu` zBl^8C zgA9AL`86F4J@HWS(tUhDR{EH8QwJ(ePUtO+(5|T1Oydb{Wg=!0de5U!yRHE@^BG*f z8QN_4HCwckoG>TcC=vS|&MxnY6&ispnas4%73{f3Sp7JdOZQ;-eS)9up@%erlQNlm z9}n+3tM40XAN_dB(Wpa6FqsR%Vm^VcMSr;DSLsM-j7G|2_GV9>Y-#LI671QRps*)+ zkw18T%}}xMYu?SImvkr$)K&PG8*p;r^3Y4w12$F`v`u2E5r5!kKG9)(pPJ-e)+P=d z{SrN3|72`0Q%@4$s@;Lv)r>?mGr1*h92hU+H$xdtFZG4tQ-PIr)v@yoP`v z{Yn&38J+Z-=pa$Bw@1|hYB6d_+0`a$BiJ|X)Me^9RU<1t483iq){lzf(hR+MR}g3 zzU2pKudMkQz2d#)Qe;@}QW;h%M>E?)(Q#WzFbuKfqT`;5k?F z^{rUxIdHxgVs);9Bmc!-9_6DhSkOvzxP;OBeG!{IMjOvMcEExU;uIW%4-|_3yQBZk zncU~=Zi=FF;kejdx&>cxi7c2mnj2d#TI|*#sE0hYp0)0`p0NIFy~}$itTU~htTyXC z%NEOc%T&u`OKVFOi;oJJg}RuX9F&P^vJZ%Y4q!Eoz})>z7xo$Y6v8s1tjwUMJm9aV z+Ip_4yq3-`+oE2kCh{HxmJcnE;#v-7JU2#5BMBC?pu6dv$^=@Tfgh*?he_d_MWAyT zg+H%BAJ-P}ekvW;UixcAPD~OVs6SZ4qjZlK#8(~ybJXR@Y4k@l#vA{Kwtaq1Q%QOh zm!NjBLHti_iJJ6sR487dL_Xir-_pmD%d*z|NbbtS^byo$`irmVQ(K6I+>sGk1j5+$ zYjZ3kwy22xABA|eI&36#1;6NK<5;o=;JlOJD2oc*lCi*r5qjd!7 z%usKs1<7ehAmY6sA|Gl5b@6jm`JGvf`=FGD^a`oq0a5=3s*TnTfcy)p9wiT!cE9=r z^^Q3Bp0#*dF<7uS`XaO$O0&zM9GTjP{p1*HLwl=$1A!fb#s>uioew-27#P$!s9umB zm_M*$KwU>2djnezYbMJ}6q7?m=9dr+cLr7ciOrtN$yvm#iR#?nV0?Wg>|8l`nvdy4 zUeB3WX9}VBxEV3_6rOr3p7ji_zBhQ@OP|P6?465#R;%eI{g-#?NNB{HmHNlZ`}!8J|g zX)oZl5sxtz%u0 z^f3+1g61HaJm9kL)M=Z7iH4KiISZd_BE1^L(BHS19??q>M|2zPJHlT61ZMw_uBA!|1` zYj0)CZtZI6YHlFkBmdSKwqZg237p|8caAOe&Z5O( zL}r^{)tkhILL=rLzSlRvg!U@^lpy7V_lQ?e8Y=6R_ev!3dT(_)sw!3Zy%F&BXBs^? zcQSd7doai!^ZTj_Cs5*g0_!vn%;8Wh@+rDbCer~w7NkFv_s$rR*ldw55s_@md!ZQ- z`D$?yS20l>PJiB1rnjtOvd0GTj`&DCLf_v&u>%?y%g}kc&C@&%YyB}gd99dv=7x6~ zgra*5Ja8qDr(f5-6|r+O&{jiwyjsyMH5|35nQ-juvPbTk%90OT$=_arFJI?7WgLUu zHHgThC2yOPecVV4*_fW205tP+fB-+y!!r<_?D^Q}Z{Xl>Snds2fJRK*NYIXeB#&z; z*t0>L)t2P*R)F4_yb11`%-Syk6}r%Yh~w|ip|7PK89Tc$8lO0tZqse(P-%Kz^d5GT zD=-ZIOyeHpn3{nhmhygmauPXVWcK6CE&&N%($}#!ExvI=Y5BaRwarbWA7R^PYiPe~ zZ|1lU!+wqMfRn71i7uN<_{53$!|L?Ow8ajt!T(L71HU>L z>?~RyozVk`Aye6(&iCmcqqe5^^ldFb-_Xa|4r5yUAE2FVzISwZAH>=iaOrlDSILat zU?N$te#Ajv>3H^_=H;btYYM)k8olo4z{xrtn5Ru;=q*g5QkhBGBxROQO9SYYuOd|y zJ7W)qqPjbcKDj4Y#PLi|c!pOUq;&>it|e<&2JieB^fj0h`;@6ib2ypVu%6|~e`F)B zxWrSLO5bpd{!p7nK6?WDVh8?iDJ)}!KICnv7hNO{KSlq@cp|6r?1XW`IqchZqP${) zmo+G5x(J^8k0|~#`zRax;3~b#amX0_CIru7fTN;A@*vQIEB+EJFB2sSgjcJlk)$?e7 zE2rt;8LvE62kFP?!dy$otKpkN9&jO;=n&mucZHnfrJE90<%AuX%*rl>cU8q`j-3dG z33P>Sx#@VcAw-JRux0Pa-jx9(tz`nWnNID$#WG|#a`2N!$^ZOhI@B}zxX!cBBZ*pP zl3Ur0pS?wNvs}07XNgFwU<3WVNL{&>2Jm4^3&ZKLD?)btIA?bXpRvM8+8}&m=4(k| z;QvTE2k812Sszpp@GPP^{r0-aN$KmIXSlg34m(UBk_DMUVU1cJeN`H0~ z_S$E(R|In=o>F002;VOW>b=Z8g1qPPr7xpNo-<2qH<^y@LjrQ6HEh7SBa=DW3NRPpf`oySCy`&&fYm#K>hWycyAHJfg2*tvhrM! zbUIx`@5e&p%ixf#*w3ZV-;qo!>L-;319OSPFJlUb*d@FIh!!H&JrQqx3A+kr5tSGQ z9!H=@b0If=5?NnG450`coQ8taf^1-k@y+iL^TD68<$5HM{cO}3w#TM_0xkTc>w7M7 zs8z)JuJXEzPQFZ+Vpsg(5O{o@rxu-tk;tdnOlrDeoQnh*NNo8p8I~eo=>uFm3rrBv4t)*Ax35pn&Mf4{dE*PBs{gRLuApR>K2dqI4sNZ1ynTyy z+{u-VY-~QtmBZ*p6MJ}{6CoOlpL7{-FM>X`t=P0}nCbGDZqPX3n9&nYcgbF&Zp_%@ z{IBSL?+RZgv$OFcTwDk4=*yIj5VD}N=*MeJX7o4~$vyG_X{JofzN$tP_Zkx>gH4Cz zn(|4ePo^3Fg2#Hso8(RXymg3NR7Gz0hCBS}N$pL1GKP3QSy&=r^YO+PKnLmR^R#fc zMp&V;>?&=OIVGGqq9%^DTU82lmz^atPay3;WR#w~@Jp@fbz4^dXk+O}K0u{kG+( z>OTb~AI4tr(1BN)YNJ4;%v7FuD|So)u9hD-i_X&`+@ZapE*YSS*fUGfff`j&5Adt| z5GRXd64i2G|CKp2Q^Cr1ViF11#SXHZpXgLxML$(5!+%iGKhyyhBbs&`7!~opT${U(gBB zk=m<=#N(*fHWc#?A#Phx-;4ixh)DudJd2ov)Pow+b4;q~Okec|WPWM5>Le8XgNp3z z%p`bAB<~;^Zx}PeF5nqIC8BzSc_oAJ?e{RpqbR(8&yav+X+UPyMj9E}^w3ReOo!G0 z?mGdmd^Wy&Z=apj5I?3ZpPdQp=V5mg^ty-;eIhzklkCQKCPo>Ekln&JnuLtn1%x*f zF=7K5TFFXeXA)1_oXJsQ2G9azZVYjubZDOT&`K}#Pb7DH%#)RZ-qzrGEyKzSfNl#w z`Ok>U&L+keMHJ>Fxz)z>R~I#8L|cw!R^wW1;KS4fr(^!x7`dLD$kvK^R6mw8?!c$$ z?;Vd9T2|kM|38O}(KBW-9wjon9LfskiSA?1MPv2$qdV{)CN3qxQ#QjqZvs}!Q983G zYr6WLczj*@($bhdQ-w`1aYPh{!H+r7V@clraM1+v0txg721rktgiwnaLYp~rB=aw3 zz>$H(%&uaI?M1I_CmuE17dP$9ITfP04&Y#d`&u(E;~ev6Qh?1|Z%-sjPvqDw^jBG| zw$*U;6LPU%=_I>^A8{Wnn~c|?#}ahWg<#`6HY7@I%|)#1t&=PRO|pEQTAHh-+2*EZ zt7!rCH0PnXxlm0qc2EmX5j|Q$H@nDuZL3w%D>WH8#q#*Vt`z=eIu z_!KH~ri!659&#<_=*-gY5eYsBKW#-KQiBf0GxIEBPFg2o(>;jP-KJAYGCqcm{xU%> zl}_$!d|d*wBa!+WnG@U`Ep`vd(G>{Y1fCg)3O-}Y&nhI-2C`?hkV?_?re|R$#wcR` zpV8Nah%;OT>P9+$R}#l<27UJ9wV2<=@p{S?A0pw7V?~}uLeUvO#H0XEu$@e@uz=H` z)`D#EYlp2?g&v6nX_g$!4Ask~UCiQmMh3JP61kwX#~5bpX<+jZ^pi#3qK$`suHm`< zVe8&DrUk|_$8RF|Ngz_}W(s0arlJ)h)9@VY`n6{Q8G#9WE{+^qN39W`KE&MBFg)gT z$fu@cm)0z zvq!5lXPHj!Ra^y~eKU4a6G&xdin83(q5T`YWg{J#)Fb&`rLFX?o#gG@Emk#@OQ(@V{aW6Y0a~ zFSVDxGUuck64##zhxM5T5KCReT4NRC3Eo?h=S(D4vlz@&$M0^9jvftP*W+H#@%K0L za^m&$g+gnftIzYhjr6BA#iur5LvO`fx=buL13wofv(X)Y>Y=APGWaOA`&IN!91^59 zJhK(s<2jaORl^IUZfow=f(nej=-%;o5C0KCzl^sZiDxPH4n2h*P7#rvN_K1nk!NB5 zJ|I)r5}hAKj^iC3fY9{Uu^5`cbA33$N~#_{VuOp>8LgQrzLz+zWXy%XdH^jYWF}i) zyi<*RW!<#PT+JLWpCB7QSz1Swyn(rbxgVSBZb`YMM0%!680#7Od3SQc5lmS5tnb1@ z4Pd&P2My8yuAT?xwowUUHwFO76~NR6{SCsucnnT1!VN2lkBp-?cPv`%vnQTCTm~ky zWyk8Mh^`vJvxXAW5E+Bt%n{jzo{8Y()j97rqS$Zw{RQZ1kZ_{a7$smx&-{u=2%Z(B1gjk32?l&Xu9DT-bUZ@OBosCqA$kx<3vt-Q@h2u+nM)Md5ucAws$anY5L^{)KaD!Oyvw68RiE z?lWg-1KnT4f<6l!MdP1@;Ypa`zU6#66pz*gP3FJ`FA5c1fJ$G3p-R|;V&7gvY6H9; z3-6rE)qX=A3BYANvhNoWg#;w_boel{*UCHBfg3yeE)MA`_McrN8(9L&S8Q(j!rwBG zD_!lIy?F!5K92MaVmsn!X1hK@`p$xDMq(kJ#YVYL$GVy9St`2c4VL03PEa3>xD89K z7g3aGG7Ba&PAq!*GIm#bq-0^N_QOP}t0H~6kn>%G9um{^C9fMAi^m2uGMj~71E}a2 zPdJ(LpM*=^ko8L@!fZy`hz%F*ku}S)+57UH*niR-3tnt+DvUG_CWgKlywyNz_2$(G z-iyLl$OaWP$M(L6^?i#uk0bFMGLv0s$bD{MLH;3Adycz_{NPURD|U5l<|#Z(pAnn0 z7I2?x*iX5USPPlHmu#FzoOBu5T|{(SFrla#mGyhcWaVZ%;z(+q#!@@97!GWMA5s~M zdL~{`HGJaBaO*`VzJYJ+!*lTT1lfCud!51dSp@vX`S|4smP)wyF4z`xOxE+F^LgG z;>3pDhEUd6vLw@qGc82_U4#Pf5rI<3QI>`px)BLjjYQpvm-!WF#6Yndp@);ee-nH< z6R)E$CvQ!S$3pP26G^laNp>9mUBnHT|8;*aEmA3-8bSG{zpDgGi<7>85@1`24r94aim#H!p%ghSiDQwR z%c1Qa*e8>T#CP(+D;Rof3j8*K-$_7DXbQ10wkbJ(k;5NAM1Kn0E^@rziJd*6hAIJR zAnwWoB-uWw{c^Pz{E z0*9_Zq!w_ginZGoT=vEz$p{sNLP6|4U~)+bq~LEKg{0zF1Ov??SSLB~;nVo-E3AkO zP>Zm@2Li7(@Zwpp`Upw03`=S{|89m8Cs5hd04SD2E-l9U7rWnnA~(Jv6|12q7NJ?M zLKmX)U=fz>1|;J{Y{EaxsuX(n3$eERY)Q$E-F=?jT6>Y!)9}3KQ3+ELD~W6v8tMBE}&0$Q4HlL)Cmz8wZzXO&i}S1_Qv3q$`Axj;8eB0NZfT6Y(FDEFhdT~~errL$LZ8P{ z%f1%w-44B8fs?mjq4Y=cErXwT@U;a?E)w`8p)J~RlJQI-uSAT;F7;++%@S&M2l3r% zDM{)hUy)zPXQ}0?BD*_;ARk8ePLOI!e z8>`;&u2??#l+WHIW_Amh#zU13B$|*)=kY)KqVEc#;~E3kxk&oOP|`epzn%A+uq5(< zaTkzF<#Q@iMnaK(GG4L;$rlKfh7lQ0;VJCsTk#Byu;>~AjqX01WCpr-89IIxx_1ti z=w6~}&+wTyAjuXXorb{|ZHU}eVdhi`p1cG2=>naO1folc^mf9^7zrI7L5FU{Qm*FH z^@Vw7OWxZYZ#5nE+IzU;Ae3~NuT?$@T7ox!80v{f*T)0Nh0xzX=&v`l(}@!o$G!@{ zw#^Br)aAE>`CCz}%codqUpz7vZzOhQAvA0Tuu~m7t`m}a8t>}Jt2aMYMoy{tvk%c7 zkI6Y1!C-l)vn5vF7$oCHbYeKVq98WeC~W!@#0QK-VVjc`7*F2q0`ou*(i8fgu8Y6S z&@@YlxN*@ZQ# zL1p>)K0owP2x)i=Op9rd2eE|2X1^Cu_Y*kbFs~SNaXP5eOb+Z8zdZ#t?B;9=a!{iB z{QwejEfQ!bc$|Vv5!1s*@l!AOZYf%O3lJF!_lg}%<*}Tr@+yUl@k72div_zp2R2?- z@O~X#xB$B=njDS6qv`eW8Mk{U5&29qHlQZtFLU)?7$-|Ln6Z_W8ta4P^9Iu+Q;La7 zrKm478OLEUq^DLvY&C2H@4UypS^^I?LbtX9!eY0x6Mwr8(%MFj)|eD)EHR<%Kh|0znsY`ZHG((<*0yVPW6hKqh`q3Z@{Tp`8tLHX679pQUc!wy`H9x91{xe8963oiSh#|A>#m$5)< z5ZA1TC0hXNHUqP3=3pDuLk={9e>=m^HSkzoB2)e&_EVYX%;n?00DL3CHQkXpSE1fJ zXoN#>Z7_5&l03q7bbTUv{uz;l1a?AfK?1Y}0$=gy{_^?={@3ul>8Ml`Pu~=3?*lH| zp(olweFDcP!TLJ5;scZ$VmaAok`ZF>W z?F@7}29BPAj9v~O&jg~Qfq;ofQ5<=?>R@y{dbBrmyoA$T#Y$M?o85Q^4R9XMZaY@- zBjj~fsP7Nj^($AWw-u=7;k=WOwHrC1@R)YuoxLQ^a~D}XkW-Z69mSy7C{9y?(-H5ZF%DrcR5f4mYt=>$#n z0=G@Ty@_e`e|efGaP52GC3Y#_FSKx+kP;y={)CjAy8XHL}0h=(MxG*r!g}%@@p~6gJv9qNb z&r^g`rbkM6xKlxXDgiE{pwj%%;76calAoKPYXxm~gDY!s&g`66P|RN-JPvM~4&nYf1s+=@)WJ+P@0Qx`D%3uPSP=ij{ZEf_w?2^@SzSPu&K&d77RfQ*r=m&S%^ z3P*H-`^xfE9$>Wr8s5z*g*+2-Ttu}uapsT6iYU%0rVxJy8V>ON8Ojoz^NcGSeXtW- z&c!Z6C;yB0gu&|-d6fWrIk|T*pZ4$|Xrub&C!fy4 zyJ~~)ra-4AHcu^hHk8vBX74PF&_3V-Z*5jP3v_lex^J^t$s z7^W2fps~_i8f#e_Hc?uf1 zj#PZc{ZH|X!O*nC-)kUQyTFHysJ9u9-OvEM^o9RM@zZ$dTvR;%pHrN0(LJP}h$8Ry z*-FhP#d{$U+-;f>>PZfo%=>)H~;$E%r zzr~zxAs+*|e+|wR$*IKd*qzwPvB>2tK>szm<|$Igg(fh;50P9)NS>TLeP*aCKR=88 zv>FznfKwuO%IM>`ugI-X&i{f_q;ii4Bw+?VZ|ADteRkv{;PRXAUjy+t@Er+UEAgrW z97ltxj>wlha9|;zC92lFJVSo25K0W^B@`CI6aL|oKfz5|@YxyNoEe;o_+$*6q4=od z91272AaR2>2pxA8wf|D z8)-1(x(+1OZfpmk$G`IaYe=1W{8sp6p>R@WEU*yH`-C~9*?lK|4h#xIL-~+9PAuN~ z==_FU@hg&B$oNR$DkC4xL5U7%Cl##ub3Pfk6~{6t09CyPZt1~RF>vu67>GLm6ySS} zC-{m)6a4a-GrmAxrsKRipAH8~g^>USfn6X{>KxF1#aU1AzVARnL>5Fg;w?B98;s+D zL;}AT`Y(xRNCmet++>Ge#MR7D{{KC`7u;Xxt~ua~yx`ZtPcrZchCi$DJ8_;|$e#wB zRmjdl{3ejA*nIRYG;tub;s@RQ;HjNR&l-3E^RNTPLa76w^WNaJET1e$grPE5Y0PyZ zfkY5bkt+4TabLBiXbmi z`3iDPOn4W2gCl@>9-c8h&t3wWE)Nzvfbq_p__5FK5VmGPs3C%vh*MXAd#m$48=PAxctg?Kec|qgz&IF4X95c8p$B2N zNKpQFa4R(MQ)J{m@c)%_{o+o#j{^dLsgRqcp|ubwO>8L^v8XP{r`-HL0*;U19)jB9 zptqYyM`4v;!S=d|%%(=R@#T!(67ILmdO{5r7rf)BCzZYG-i zG4e*|Z-;#Th_K_HLSeDcs-3H)2fFFOV=yO?kvJi6M*-+U#Gd?+3PS#+p%1b`{ozO^ zvFW`zCu+!7Px#A%t_tMoJEKGE!hbWx9;d-q>v*0sTI&z}&2OwgL(p0{ z6!`;M^as9gIq@ZAS{S$%w%{i)BlaGC1ZO5>={K(SnfC~d`2~2SK(7`~E+|%bCwq{R zvx%FCen7EXaTffv7AtEd*nS4hi{3S-ufA?1R__0+nP&Lndo!Te2GC?1=;$BLBs5q_ zs4fcr&ks#i;jdAivR}EEo3>FA13b;*MW|n-R=Z0M|9J2eR>GX5^aK#A)XY0eB{Y_dWx|FwSuc z$=KHy)on%0y$vxBQF9?Gv*zJ-j6wT|2sstoNt|yu6YEaD!WcoEEjEJY);MqsOH#dRKUpy62 zGqM@k@f4g1FXld&e$IE{=)|+kOwVNeh}`Z=T7(R_25acj6z9 zbG@^iEe{;fo==YB`8QE3^MKb*?65*W_7>L_F_aJRg3$T+kI2M_iI0?pwwKcwGEv}GRRy(-nz7pX`&LLTM}l^KVr&va0SwT7$z0G{iS4E?F1 z??|Nh5qS`imzn74L>Ef|YU=Yc7w87v`M>poO+9aCy`?r*eZi!ZZ^|@vEE6;>^bWS9BI}J|p|K=0 zKUOg{sSlGc2Ae9I66A*Rf6P1|LazN7^funR!$a?zUZ1HK&D9XKFuNFR%q z6vK{q;_XS^b`bgPC?=2SQZ{ngo0x+8lsP@m!P`Y*w}V4 zS}X+pxqzyKdt}<18<(-SO4ESCoQWP@Eh1Im2j9@0{FdbDu@nmIAP`#KyC9UYjZcj#J39->u zRPYssS_7!Gi6d8dkJ`XJ)OwvDdpMrzi6-=8&Zj@DnEsgl&=BpT8b)PqM(X5;vpXZ$ zkk{BoikJJF(=5eo1?Wsqwx`#6IrtD^g*`>c|n2GSjIfvJf(p&!Y#-&Df9%lz3aTdtTZnTMG!%deO%^ah(U z%v;RUllfW!+88#}d}e1}S*D@!WC8*5($hLw;WV-4=i`$q3Jqk^* zgxZ@~OhF1m8pJX=;T^LR>QUj675Q@=9onC~|7S3N6{>HFj(rH-7W2M^&Z~G*^+Vup zB0J#*Q2%(A?&rpI>+hwqsyKd%h_Q%G@AuK8_mD{Y(G3N>SAElv8nP*+rS<@hFQr~o z{!uh$g%x80QC2mp)=Iza>1U`UwKT{#fEP~CcrVTgR! zywQ5WR@XkjKG-+aH)e#7kzZLyZ#=3H#^JZRi)IPN`5=V^Yf zlfVR^NT#!Wr_Z=O)2DW^@!>qvH$HjRvb|&&6KXWn#-jPWA%*8d>-gLoK`gTRq~^xUP+gd z@}$&HOL8)MPPZE*w9OOKT5}JJzjcz`~PE1^Bz!aYqaP%Cy6&|o#{H>u1HJFp=yf1IeLP+chbp=PsOin>6qZSyoS=lXji9 zSKseVC)c-r^0%k!96C2VbB2}Swy^ghi9!AScUwQnOAOET)#`8ed6&VJ-5KeqoK`1w zcuM7z*D2}JrZGAHn|qDgQZM99Fcgz|%cD(Q%*`x4nbjNN*UZ0fz^{N~0h9e(*=Jaa z%7?wt+AY_-G*e2Gq|JZN{=Jw~E5$!8(Q(4nRJpFYwQzcOB&?7rhBnej zwzzI)g7bIN2a{$hYo5xUjiPJ}(k*$|KICV4Xlf>}Gk*8x_6%WTO_0*h?dJ}3w{bsm z4^;B0h-odSr-^sF;fiswq)1uVM>(51y!&{9`Mp!=(hys5U3yh&rEif}{(x1unkwm* zaC0@LHZ-UHWfPkB7aFx6-pzGv^k_83GkPGaA$R)gF-&MVtxjO_&r!8C)5hu}F|HUC zW3c>A?rqw}W{EnclXOs&kVYD}V_#n-auY@VI}02xqd__$-QQqw?ZDRbLzYdVGr2t0 z$TT{gCgJg}Wh>t-c20_Fi$?4`drzNt2IF3=uMBS_%)3}wIwq3U6w@2Eq+P+zrTCSRo zN*fH_fc<+V&ixM)sT;ZvyQ?b?m8R-m^&;~RJ|eA-sh63iBk2ZD4ewM#g0a55&lJN% z?l5a!t7M&reca8whFQaDQduUu#o>)DB#U2+=vFK04Ko3$r`X@!>HCzK_wWO|I|o*y zA5hz9=m>1OQn`DL?S5_P@9bzi0QU^12WODS>ei#W?>6w_P(G?{v;K2H$4`jftTh%+o zapT!JyqemxAJ|j%**RPXj*2jDKz?nb0>kD#hNoopB;pYqz{1JLWTMOTbRTE?cT47R zZTGbIR-<-rHB*G5sUTWL)!GB59=_uBmG2%if#flDY?J6a%OJg^W?jQu%|>i|AeEnk zk+%!MOyWQ!T5=qm?OTbf(Aq}M0T(gHje1r z5_1Xf5s90Hr8ABCkP%#CGj@>3v5Gu<5V0K*P0WJDG>?ey3A!3~QJdZu-KY>D>5uNe zh=h(~7i({(wPt7bj6V}apD_XJG5Z65F{iIJ6NGLtAvGKGgt~Ep6L?o0@J-VbSD4Aw z9-^O8@b4}Xi6}+X;|1rsL@ZGBW|bwLP#%xI0@5cB^*@E7g&x#%JR&yKjHuvhUQ6h> zdSS?qM?4Onat1ZcBd82-j*bs8CUEjO#4beFdJ;K^HALL;ys_^Jrxu)q1+dOlbKbSTU4Ok)Tbo6bf zzhMX%Tu41-HYx<8fa`d?k8#vERl_em099m%E@JhI^t}FM66I1VL(5P@yOMi~-k=z$ zVKICpe0c|abb-(4_|H3tN@gWSe;?h|n{KbmKCT!;4?zif3V!2P-@_7~3nZHmXRAf@ zDT+vwOvG~?^=&=jkjlW_$h?7g{OU+(YcEl*5aNQB;ayS1^at2o0j{Ia5LMBliD>Q^ zYG?;}Dp2G0k@=+(ahiHm&d(siaNYCGlOAp^2^I!|;r__)!#w+2;_{Vz^(T3W-}w>q zbP`)kA%^DW`9%~_s_=C9#V=m1%pg z`Ach{6pB`jC)#!sx_!)b*$Cr{HD`%joP4g55HewI^U+x|N7i8k@ z75G=^)z8RY(Qh;dEhsX!kBGVEBnn&yde}(Z=p;IBEcdL9eOwMXli!B{5z!X0eNkuq zjL6@5FgXAm%z{rVB1zLzy&XxdY&7&x6TO`SshJZxZbtpJ=*<~Vm0eXTJK9o1TbREk zQ)5cYE8qWv{8|BpUG;fkC4Djd#!zNk;N0Ao$1TmX^deGP9odwMt-hc4iwV~)xgR@x ziCazPs1eyhY@k1N za3Z3*g$U?#bbV1`*ejUB;blHSA?j(xhVKk~<$)4f`sQzo`5aY{wC|ycO+-igk|Ptf zTyv1Rd*Qxc%zg=Bb5%w2|YQOgp5!+2a zLq=p@B`EV>@-!kpyBnBa;Qtl8WX4xBO??B~X}u7q@*!}O!$L<-lC4J?by+XtQW z=1BsH_}!xpJ&_K=(LA{un?xcf@er7AA^JQBEDqruJAiU3)>&R~)*UR0Ns_7XwE`!c z;F-Jf?EmrI9k^M;>&`~rp%hQi4T`G|#(Qy=MMTT*aGE4+t+P~-wuS}`WF8(vv9sXz zHc(YxEQF@e{lDn3BXI2jo^v)j;y*0QAy8Q(=uTjAJDI54_zj}Y@FYKPr0-$~T;GMS zM$oXBVO1MCDupDQja3+nhN|!DhFk|{#=+NTe0}v3@ISjiO(N6W4jncWUTp`@h)OB} z3k_*@2kGMA^kUoaK=RPHz<+6IWil4#H1tz@`XxTX=hZp0fm-x~R0k|Z>jeS-YIr8g zkh^EGpl&c*@eVRS1bO<+=3qM0i~l|6u!m=*p3NtuDW<^TrYxZf9^Mf4inAc}A1_uYxP zipq^q+{sF1+dZ!HgUm@u&NCK{naDlH;?s90F7Du21*Q%_>zj}-yEvn$#}pZ3(OXaw z308nte(qKfogiwPe?Upg$jZ-xu3Ga{b&*MZc+y8u=pmxOG3-Kqj2v}wpA}H_8~7$G z(kCO-dLTQ1E;19S5<0UM_!~eR-RtAss_?{l?(1O2 zc1_?aI=|;Z3&qI(JR~M5>e!ATpZ}j*9PP{FWWb^_^SOuI>o`ynbpbiB$nx|12TAGutFXB!A67xHYIkSqUSeTjOl!a%Yq zFHv1KjO^E9?2gXZq4|J-A6K*Ro+(gc1AaRTD#?eo$;b6wz(DkNifJcp!SGa|)E~~R z4zGq|%V}sio#@bCe6DKX>@KtuhX!el&6~^ygA8)yT3ttKc$QABYfBcY(EBC zMTkX&5{t}=3=#D$qB7wvvhfk{x(Yr-T~uatO#z@Fx=@&jF--x)F zsM9NsZ78ZGM0ef{p11?_*3&0XbSf*l!^LgESa$S>s1V=J>j*To7z<+-bbAPh{AJ4F zW2EUwq^799yo;8wBXJDiIs_VBj)bU>H=PGfl@Tk@!7D#BkP*AIK78Ab?C5mlQF&2nuD)MC@hla>jV_bLreXTDgF7>7$ju_?8h2FxsuPq|4BTu7EhT8S{3;K zQ5PvH>TaPyMq~LN1b-iZ(?{gfDW1L`I=LS9%w(*il2B}4DEBgP<3M06YS*05n+dG8 zMJ@$#@0>`N?eOt^@Ue<&u!sD$F!cZ$mC*%xo}qmHBBy8q1gl`14&t+&Ia_tEq`0RDy??#GmO(z$yaA7DhXHc48wSxfv5IxB8Rsn zSIEIRMUH4}J3*j;@I{YxBOrtLR5pTxTe` z2Uly3MLeG84uc9iamKIQdpPtn4_Xja1QF=f7vNR$b;*SCw`fjU0C_ZlzyC&p93VGj z<9^lQA5!NSc29X=@dD^hMn0@Yx8w#&eUWCnp<92Xo~Xzz#ML9XUQMvr3|gKD zg?l!VWI z0Tp3^-$fQip|J)7SHTMd!AKNzmy4^m2Xj^7%H`y1r-H2rvb&q1p%q9`K`nx(a`XJ@ zq3LZ%+ICQWJadF*;UUyO+iZch#nUi@5znInyqt-%_vSSa9vI3MMcu54snFfP?R_Nu z|ECg^rGoY{7UNW4R~fFoPVIgjsus2&&NsjvHJ4YHs;wvGe5JVGPo zzVzP^?^jsi4E;JDoZHbFrJ<}#&|Ep5x+Fh~ zo=P#Db19TG0l%m?cF|qrh^Uurf$Z)9%%}0`9-J`@T&55KsRs-jbL}2rW&lzsJNOax zJjamIqO!j|{>B=t=_g#L1b65QRLsOlrvS5NK&%YexzqSN=b^;u$Q9vXihj}4)Z1?- z(l8U=?1P0l(EEirRwVgmJ6L+ko$b^@Bw?|)#ir~Gtd5{}MnS=kfld^D;SIDvEqHDr z_S-cuQ5dhJ4G@08ndV`mA45l9Vk7fYWNA9&l7htDiBv8IJaQOB&vP<(O5pzoz@s-` zSNQn__v!>(Rv}jv=3O;GU*K;NoecySk!xHmmk$GER#7(vXVED@UGyu$%i z4n!85!j8%aUbk?)u4slk=p+0JLoViZe!=@(jr6HUM7GoN%nSyF8iW8+k;UX5Z{RPwF=pJ8hJYdoQ?s)Lpe`D==mxsVUv-&@8SJpNVHO5Iu=Uoh9+|1*$sn_?n2qEq3^c9DHZux5NIz08v#Ub z>vOk6&fE<+L^2z52!DNzei6Oxzr5_pK#%6)#3PxrSC!}AgYFbFl0;o#PJB)a61WDE ztq+vc27NvS3#lhiT8ku{V33Uq&>ri#|3>uZUMS}%JGyT8s#~M5td=A7ejynpB4rv| z+vAW+L57n^$1cEdg|{lFZvmeaLCzc^j{Sk0TOw6VLC6x(Au$Z=s|7q*p5Hn=TabO# z&>wT)lCn^O@YynR{>ISE9ANntsy~F*&j~&GBiBVQXFm8@g?ioqwav(82`xPyZ|MLU zX$tc96M8^&aV_MNCwQ&|bV_}wxdfkE1@&IQ9+-;cx&c*%!$%{bxHfR7=wq4fn**>F zO_hO9M1cXY&KzlDdg6xwbMAEv>T_u!gdSUaoX-&1Ih3RK4TMs{=nC!(|eG51>r zwg(eW>H%!OanDeACJiaRpXYswE-4J0H~QqVgZXLixPD8ZbO`({#@_WH`NNP8lc1eI zEDh1Cl!Rp5jwXl!_C?`ZF>&)TG}#aCG$JobA(QezBc0(VQ6btEe7r_;2^}o#mz_Mx zW9;ZpP?R01_?hd)LQP$Gtus%vqntA`^yuCA$z-`2mO%EDcH^eUki|1Z;8mS z2AhAN-LE{$edJzMw33)ZxfG5TJsl^2_cdVX^+ux^W3fy=;ekxzM6=|pE+&-n)Gm3^?7fs8qh4{WAxMl9c@%WQ- z_(~_m0c2k>Ah{n8C=;|ENJhti?fj6Lc2}sq5;bR)u(4ZHZ5E8zRt*06jm5AYJh-8> zzC^v&Q+sn1F3HB3!hp#|sHzuKd7TKKLbk6Lu+f=wM;4irh)BX)G{qF^KpG;)cEZ

^eD2KE6C?O@`Ui|+t?-&5^N3{t_7SO zg`{nR_Hx0m{qXt%m`gc;nJMkrm^Y6}XgAsTvxH1GbE(!$ zXy?f!9?{>kovaBtmLcRAE)o|!$rOk)_^V%my%Xq*dG$%23`9J-f~nlhBS<4RmO_@r zpIB@XRFMWmO}+?g1hz{s?|;K5{(_yK{LRBVa$+Ae08%A@WE<@3u}JrqaKH_CaU7Y= z6t4J^%*{F~-p+%iQAG45wo%{Gda|`Fj;!)GwFueWZcIG+NSsRXiat6qB_tzx;1kAU zY=|vuK4&>%Eoz@D5c$l7pC1gJJ|w>~m02!**p#x<6NZf!i)1T<46)-oh`MXxJBqm3 zQ8>n7kht1&B;t4Iu?#V%L~4~Bz(0W6qSIt^22x-2fl0=T^eK8TvfESmZWUEa6Ud&` zrZ#juln_t$v>2Jy(quoYlMC!Ye0V$40nbr?d5Vc8mzZ^vAN#2`G}V(=TX5~hUz*CN z-c#i^nOWy8*nIMfEkyaWZE99^vQk*7!#0S@>O$?Oj*c>PVN%&$=IS4#LUWCDTOMm3 zVC`ZL@h=jvG@x)m5r578&Q{D8U|VN9V*lxvBj8)Wy8xY?#glC%E$K{srD4R$rZQb; z6@2jBIN7k-vsXK#6mr*P2lh?ZUUwhmC0ovyDsSBPUH6>j>2LM@y@44I6oVP3bLimNj+FPeT?SEZoRK+ZZ;-Vr*60$(q=xDoM(t- zS45^SVQQqqI7j*<<&i^WS$-#dkc>=|dM(#6jWJDNyYVjOO4+4m#FCaF&Bh^tdm_(| z!&_F5pI$+`qOM@uhLJ5d8-VgOEt@u8eXjIUiYcv?U&=Lg8Pi%Gl{%Qid&Y3RaYn>%2oFS_fq!* zcUEPl@`F9)iP{j)H*aPp7anI*aD5>0TsmedXkBD~=07~JMo`0`-hnv-Huz1qm$qNE z*Ygh#JP@=ycvf(9&>w%vp2u=sE-ali<|J$VTrN+YOjXHhsI0eB(wy5IBOMM$dDj5< zKT0>{kbAN#$uTx0CeD1F2JWuutL_Ip1qQ)}Jbg2Zm14Zh5DvqIoE_ z4h_sv=1%O`{D<9--`O#Kot-`9Eb-=LX4zbyT=-MwZkvs=p_=y~ng2G_0pEpED`^eX zery%>xNf)}xk|WiP|q?7icPOvai4SlbvIF5%2oA-*39Dvr6jV^VI&)nW=Zd*DooJ$ zYznkQSbtg@v%jmNwWuZ1T+Osq?jpC5$I92`RQZe?E6wJfLp=4_`(B-WtD7`ME2y{9 zAAH3*hErK&8YrmGiriTP#NT&;Tq?>>R9D?UvQa_4WN|DOIj#u59^bX=d$XEzTZ> zt(O1TOmxMP-a66x$(m-}YxP((^GQ>7kkW?-9WfRX2GXBJ*$bOGQFUL_kGFtBk$9mWqW!C1YPnG=2c=sb$4_AFxU)Me;^pWeZ zYXU!2bN%D`*LBs^f{nzAy4mx==rHB8F19wd6gP#)PvyOqNdL^~u7vgt4-Bsy`Ze9U zpi2QS{k{HO1A7PW3$cbCP9L4FY(Q&kLAgKEG^-mLF$p@0^wTic(_YQ!%9!R&HYevz z4ovBrx-Kosk=@ZWt#E2|%G2b$DLGSDriD27xdtgkv?qFbHY)rzbY@~uXQ{Qk*py(- zZ5?VGVK3}A6AHR$-)Q^Sy4}*uvXlzG1=i)(KbCdo*YX)7`C{fQ`)i+73)SrRpw`vy zqOKi|c4;S5C#OD7J)E}JvD8`1HPz^(f}M1vBSA8}%N` z@CF(X+lr+g?Upe+(~>{3&3G(+&3a{G__mAi;?U5SP+ z5~!7{fQ?Z=`Q=vK{guM%aq8E5usO6pdo;>XlVfAU(pYBL*QXvy!otpKSYe!D>SSBs ze=DH6e?_ZT>TjGbSGFevJHwu2s1RN}q(#6eo5Ng|n)|QTt$wWoKLn)&Zu6^c-tTRp zPIi}7`e;s%n8q37Nm1Xs$~g|D{z}P~`Z6^st()_MD_Y6UKD^D!VzYEr}9gN@Dyt*iZ*{f(`JRWVJGrWi(Ifj2a)H{Ox_SM8w zz*3jEMyzS8<(GAl?I@dB)>?O4ezTp_O!Z(3TW$MBdv$wO+X%}%(?_P3KW39#A$c>E zZCj|BXkl=8Hn4BasI5|Gvx{nx`co~$Mv=Z+C+(3Mpk7cuDfd-B>JndhHsI-tsnt2L zq+Vl1Ei$xW&ebqxTiqe|_8Y%F1AfT}{OK~@Pt=8;V^^2TOz;fYH04x-8lx;x)+>+M z1U^G)hwdFfMe$=LgW6p+Yb%-j9mfWsqiFnUL;y$PD{nUxWG3ryHkf~qx=QhEaokD_ z>ms&iR-!toc*FU;%)`_-Y3UN#VQlM2dZ=@c4kO!kiOMCse8y4=Xlr&>O{a)ADuIp_{` zh-y@SD3h^Dte)EJJ~mRL@Et$YNQ82|A;wr+zGC`f?rrI3d2S9cH#Nn{=h%&qVp?U6 zHLo|nHtoeqP{~_HdS790Kl2Vkn&vU2dP?YLX)f=s=4C&2B-_b?*aEeU>c9EaARf^F z=?b9o0+NBZc(!y3U>Mc=BRf6(%|)e13ms zr*5YPG#6H!Ohw~bJi5wMaP&o2x8`nEx@bf#_aZ&LK9Eg$ozxG?GG!Vx6sVL{Rx4BJ z?MTs&8rsOYsjW}8j5U?#IpgK$wjV)bLvw_m39X*4Z@?XUylt@kpx?RxOVG=pxS+tm zz4ra)6UMim{?tPD#E(7XX{^^%pSZ3%il==`$&+#=c~$a_WU^?5rczHeI!NY|Z^f`z^9RwSnK_LpS2 zkGz0cx|OAPwqZ@x->F*^en_VAbAYJ{ zv;Hs0W!T`en4RZ-lAGE6Kbf;0tMAvN*sbJL$Dy|k>MwXYr!vZY#5K@0&b7&vjxMRb z*wuOYYmoX{IjFQyt7<3p+-wUf#a1^bdGSd^HeVY<{x6{AMj6DNannmJBNJ^46!vgJ4`by|Jo}0&GP@` zzu9ktwSe5z6QTsW9ytq9-#bVRRIj*yI-?v)YL3)ZDN1tFm|e4cH>H!O8INFj zHY2-ShzMi2H=F0Bwp*>HymP&APH?7!!rm$p+_@5Zc%zh8qLol}wKm2x%ivGsqA;kZ>r~5NIyOGZ%aB~9s9;qxG(yyPmi7&P9-e-eA~etjxx$2l6db4)YlEHFLNnpT%Z5 zY<@|Hek%(@M{wHDlvsDf-Dis+-jV%3=2lS6J@ljMZdf;}2n=sm2;aDbok7Sk!VM4goS8l#EO z!x1WhPIxZr8oRLL*=`?-%pa!R&|YaZ_1tmY%8Y1IRB8 z_xwOUUBWkac*c0^P;qeF_>8@ImyLboU*^pAZ2lYlo?F9A6{YjCWZMu}Go)8Yr=a=v z5c5wqb`>+1w>Gu4ppR;`DMDgTfNrB3bv;sM4nBxk9p&!gy6*hs9Oc^Ks^MzwyyB>e z)*hkydybIx%HqApK0oNyliM&xlFf%L1=uFW4!xSPNy(}e^%S9J%fa+X zH}UiYqGUG>4~>aLbo@<;c;j8@|2qh+l$0F4$&*`*yQso#$3*G}#DSOK8&9KGH#_yq zLx{BvAX2r8iSWnS+5A_luTS={ft!BLPDDsn6Pw%*E@m0_7_UpYS{<#0Hid0=UGN0fqVfKr3-hDC zfu5j-Z0a4xKF@0CgyuxI8~XItT@Us>@zI`CJ9-R-+18VXu7G3ITnr+kl*O0>&u$@+ z#!SQ@m++|!#33{D+3`eu|HDf^O=YV}A4Gzlor-nEW2d_H6B{w#X*0F!S|I&MwTMGh z@$ErckM!T5`*~8mxf#%?Yfv^3pTjR0ns#w?C-q|MF zqOIrY(abFSODh>=dL2BJ zacT!m*DZ#4V>vmFJ>#MBG^va9OnN5E=3DTbhi&9m^C{D4HX!!J8?GX4U>f5i;_}tV zL@lN+z8X=Dr`Z2Hl@NIAnbwhd|0&us^|~@iiR3qTv^9Eb__`1gi#AjOv?2R>7mvkd zIBlFSRbra*2Bs?4k)I<^1`=E7k6#{OdPPiSujHYU_BL^H8}ameD`X+?PZ(Pb6Q zJ~183p)&RLG5QXC$_}2NY&)0OQMTEWgPbaR)rbt=$4Wa7=guXYu!l%_J+znCn33(r zb%_COB!>}#?bZ_fPA77H3ftiikUOJZRZD6y+6T6%Pt~euztCTG)eczx3+VoK>vfoc zKFM<*EgeBjbv~J&lUP*mkystsqEVV?ZeFPjGgrGY0r(|RUX5AxSJ~4vnGDbm@|2>| zDhf$*g6(7-sN~n#JTqBa&Thpy_*AXc2g+U|Hr?n{T1Hn+Gd5e#v!3Ir2i_v_4ZD-K`#WHsL1(KXSz$64A{+&xi=(n35F z$>$gh%g}vnG{A2wLEJ7#PLOk$^U>E;#*)n(C%+|vohU7pdzjvu{LNQPPWg(I*LcGF zlE`8x`M0;k&BwwMy*y|`{g9SfyQc1=XSfX=0J-VE6C1L}=^^?OBA*n|D>6@%Ff+@JlS|1qWDEjvB- z@w$QZh$QdO(U2J*!c5FyDUo9{w#OA?d(I=t33SyAgO~l?yLvg}3+(2Bp1QIhd^Z3URZI7W>;T>__xt#4JQQo#dd=Q$d61k~o zf})^+ zL6=B3NW<>V{`WKf*IWy;v+um`InQ}I6QgLv--G26v9+E0CRCTD@vLaT8kX2fTK%zN zrjPNWFA+ytD)(ZG_(?kv*xPZ;N8tROu>A%!7(E7(iP>VI7l>&+Dki=iSKlNbXH%++w6X5!}q*(}k8=@k+0E$YX<(4U}+6+8eE zx5=jHC(1cV)Ub^2UEry^vX0jiw`O9NsP5(N$!p;${}TajYu~lyT5ObS@`DJ~g><@u z^5&b{;Y(tE=c*lgUKHVB(TD?f6?GV&sieFPM&Avdqv7?ewo9W9;ZsIevaoGUXS zy3qUtHxjaPa?ocjwp@3S#n0*9c`A@DK<}f?2_2;u`hBJ~{9;DiM!LczxzJ~`zq*R4 zb(cS19b}ivcv$N-FOwbgZ;eS2ysXMMUTF7&>0Gr$^lHdzxKwPi70R6=`Y{#1J|66^ z66b3qPIHMYiIrklvqkiW_|`k*(jKthE*d&cMCb;w#8Fn<8OFXKmt}`5u2H8@8-DZz z;kWRHQJ`Dj$>q|PMu<>G7s!zMcaw61aE}{(ex^LsXWjW+alO$f;!?D9vin`>^xjdy zag{2x@7S3asvFqLC)`LC(Tirb9%lLW&+2Pt*5$OKX*kKJ=%9Rhw^_|Y)RtYT!fYem z;aHS0LqCr4D6yeximfz^JJDlFPZ`R~HAYM|_H-h8XlkSbWzEDiQTqO} z1inCrvz+XAavs`~Qrq}izd;Sx+QpCPCF&1`c|U-p{19%u3r1g{hk2kd0W98iMf6dP zJk7IV~$|oO&HxxT;#Ky&|i>i>r1N+rL@d?_9ijw;1U@(ykYt+el657@TUa%9_Xt zE$^@OU||#2RrdRXFyl@+V)giq8{wTdiOTlH**p37cCSuybshuF4OVa~I{X-am{igA zu)aFQO6fgfmN`WO3QzDF#oJWIxz;A(+M~xWB2W6&wxO-9qItSlYHSEG=H_*oL(jfIP4%=36x^tWPa6<^<1>bU00XUO3_ol8DH zkocW_Q?2*`QPzeml}V~^#;acYNbT~s<_7*zxK4zqrOpyhh)|BC%il{wJ#4meNpafm zikhg6E=ae-5l+rT**D8bzkz%?9d16z>dF&icz|4JiOyy@_en6}9+{dg#c7}L!~<$m zFZI-1XZ|JKqesyg*5`{lV7$&VdaR1;!9`g`&lUDD_3_4{RYhl#oA;#8EUuaun))N> zqud8`b|zQZ`J<^(d7qYAUFOR&i%NZ1qIG^c@0}1jmRdow1_=wBr z-j~xayHaW|nyj6;$b95?by50LUP|lY9_e?B$`lPRJh`w?&yTZBwEwnnlfD{H6>g=O z?>t(jU~Iulg^w2XMRg(fKEe~n;E3CbS7wb*+)Q&!oBeYjc_i62HCA+ZU+Snyl3lYW zXD`csKD#WRPg54iXQG3*@c3Lo|M`Nfxj5?yn!pw5Dmo7=R$+UJdgVoC)gDyM{FHj; zm4#JU$4{8px|3DFkn(?0Z=yHxshdcFH%Q*cVD?7*VjB9KM@uWF50;?IEfdo}CB0Se zip6S!8>xIf(Qm_5q~FUgxwELczg|YmyVp)e(sQQa@SA+ME1hMl?1?QTUi2(12R_xw z+0p7)zm(y-g?DzmI)mwA!`oojI#T}$c{o42SDyQvo>dnV7AMV&Ogx#~mbxluN6vLD zhu-;2)OT3leDF5TUOY;hIE9Fkg&PuHzogX#pc9ATY z;pw?WvkIpdOg>tNANqWe-&6DzxL$vleuqy!-2d>T!$pT%AGz(wx+7zczEIG`jPU7& zn+v;vR#W=-+r?jGeWlOKo5^Rv`vxAE%hbL$)$3(q_7`S!*UO%o>MNpOhA(cNNln9P z_p_bJD7s~#45+JczR0o|p6)~oFJ`shZpw6CQL^Y9p5#O3z8)(}Cao7o0w*|{9wa_- zDjqeGe)5S-qiYjelX=-W+0|1g@cO)%Y?^&Z&Y+xzIX$w^Pu-ZDX(Dz9mPV!2*-3r{ z627x(m~}j>70dnPqUYh@ou;4viib7j*P36jn$M?aVR@F!2Ws38@hp5$Sc@Z@P zx+ZO8zg^EvFBiwWQLfqVGP^2>DXt?!r;(-i!it~phA!mSM|jvL&g~IXfySnnp_~Tt zhSPlFGu&f3`LhgvuZT|!1&N<&vh&%;zq-#**?PlZ$DQ`rU)E*JYyXpsyiw1pLB#{@ zx@>Yh`~FFsZEe;giLt5oazD%eJO4jv-GRrd~?@k?N5>B6~>oliBBH*U#Q& zN>{1WGce;6am}w)#!aXH?jZXreKqOCaK-1MbjREK1FUxrNy+GcB2wGXkwZb;5c_0K*ryMsKeYNGE@M<`pFL@$zPlIKM|}%z5e8Jh2~p^)(gjZHxLA-Br}9s9RCvqNzMf ztMxLvQICfisJbtzo+B%x13vLa(E`4xl#G+kbnb;RcDO`k>*bzoR#$zdlZ{U6Q@p0}ZJfxG7)>IEPx26H{ycqgSmG7?S|anYkIcL` z@Pb$P8#d85y3s!JNsKML@cRZ+!mF{8#h$<=vN8I&X1qkK9jk_GQ1EeY&1rFW}uKeJ1+QvN{(Zn^whN*ig5t z-u$9-#aQk?`pJ=oN3xGJKJvD5<@}R9Df{y5ulSOyrD~+QnHQa%deUV2`%NnD%^yF87IcYy zy_7DZ5BK5=DhvKkIf222-xTJO!JijxmLs>E?{#L;vP^|!9*w=SoY`*jvRlXr-jVff z;(y77smrsw@sn)ktvNH*oFAiH&a&)*=BvD!YR-rKEWg((+G=|>0iAL4@5$8Db@gjp z^qPs~MFnRS4j>W6+WkT~XLl8hD_B}^S>gAj!OfiEzj9iDlU+bmtfbl#7oNf+vT*u7v%@G zlD{@b{OT5Ykk`|pAI9NwWpTgkR4-stG?H;uR#stu`+Zhs>s2z5-(_(=&s+Hzd#E2- z^|_Onf{Qegty@!nkkvHgnSy*XQ{}VwrJj=y^LOgL?8Di$aL5LF6ST-VKc{BSFWI%S z=kmh;keV<1>Q@~4I}-Z<4lzd8hxggg7m3Y1Ci7wf|J7B6)eFDlh5w*nVL?e5dHIDE zebTw`8QRgsx>$_lo9Rb-zXC6>V$~jm*LSguO7TxTDsysb@)uFRuaX}mKjf{LksOhH zn%uZluft?=BKt7S{vS!hKLuAkBHQ^rzJxZ#^HA)QI<(xMJ}tcg*XSu0{JcI1CDTpP zH_4APp`8YIzAW}Ja=Rw7PL5{@9II2zg~_Tq(9KRgoLbA%bX>9}A5C}9%St}QM>B~{ zc!Ou<$-h|3Li@}5ALK!w?AwJ@{X1(ps_#CyRY8ENG+a-_N2Cn|t&NoY|fGG`QUSeT& z<1;IUeQ(HTo-LbtAn!)EL=AD9zOtuRBo-vQq>j>y_9u?h_vJ-=@;9^0KIGZ|AFg>x zs#(F_>ZfGa=hTJ)>4e!&?$FT?GgqW&ni zbx{Mf)S>7OJz$2(x-02qX2Jiibm7tbC-c}s%}Jhb?JrGh-^4E#^&FqGXlsfjEWrD& z$E$bCWjQKFT>`C^6}6j2UyTVVedtFWMCxi%-4Cr->&topJjgUb@%xsc+78^h>~0KU7X9V>$Xbq z-%cjHjX$!sf9IbYrvt$pexusFM!n_VM^*ebIr&jdvsE3yvFe0I!RGJy7XEN{b@{J) zqU*a@;Gggg?AA>uU&ca1y>1EQ6#mKbxM%H5t;$+{xajzO5^q^UbhMh~ z6*ZS>dCW1(Fs*M%Q+}zdSz--%?$+`S#jJxLyxZbu3XXX$Tn>!s!hX+}NqvycR*+6Q zi>sXZJFqpn2S(4G^LTn<9!nd~Yr+pvlb#;anU3ZA>BT#G3y%B}f5TW&q&fKAL!?>< z+HE`EIhBv`bXP^SY@yZt+s9-%yo^fQn6I=P$1ae))lp4OD_6zjsWPe@>XU0RFD9mq z)K*#4ilnQJzE8~5z21dqMK#e`uBmTjS2^2z@XRirSe-pl-KWQZK}@ht^5WICkF%^d zrUMMYi*Dd`?Cws{bNB(=;RMn@CL$-SGFwFGAKKl=u)MX?CdxyPT{*7ydXhhGft?nsjcOp))&#`PwCYDx)%;gp|9j-y z2jtk@>Q}m=?rTtYm(07ncpe|Nn>*q2K=}J2eg6V`k9kTJ)Hhm4Acpug*QRvyZ{ zqBF^0H1i-XaF3NfK>v=q%yKXwI(n|LmXiF(LHpm5(8H`RY9?YfX>ITdziSECmUafw zPd#RVOj4!t4$s7EIL$ja#s{pnF``#7Pj7_!yp?JmVm96=uj%f!%E~G_lUmLo*FAFZ z;}SS~oLnbo(}H(1AiZ9!shm@e{_?FnJNhYfV)0#^!RaPEC)4Q>AJ7(`Cr|FcD zkTaQJ1u>uGTbP`3!ZAg468$GfMo)bahZA|gYI|QR^R5xk?s9&SRpd@CEw6@NNoUZT zvlBbWrpfrp|M+RngdNdKASO;k<@pEniRg_I6YXnfs@*G~ktS+APQyKh+jn%m*o`-A z6De2^#xX}BXs)@vWUDTzWXEmosT)1!-`Noz;c{yj(2`VN#Y6N3Ub$AMhJ<;v57A>9 zpvI{@6W!(3_GbC7MvX-x?+fVtKl1}j;Hm2h79%|WEijns91F$$TjG1in3%j7KY112 z#I*1NFs}sjH@dn9?7bXTJj{oDto6sd;wN!{LVm@)PVGPJ`&GrAQN@IePIV%Bna6AO zK3Fe>3$2~nN$BN161u#+fs33|)Ky-C9`ihlG40)5@bw{d9u?Mu=!$hY&%Uxnl zVNBRrB7$)zDHT(CAB5#01Ea=z9QnG#`L^aI!lNaG9N5V{i5BDrSeQaYCbU=9qKw5GXwe>Oa8sFYW0yaQy~!_TjX7Fg>PD z{qCj1*nqm(P`IU-asaU^N)$Gh?&XaBl24sd=Qt)?ykDlo0pp|2wloSkqDD3w(?G3H?6n^_lKr5z!MQ^t{;86}_c4w${stq9{-|ywz7U*ft$5N1xn#}da||KtZ=<{YmS`YP z=)`1!+Jp^>M6zr$k=&S=#v}b9oAz0n!o#ff`&3qyrFXw5mLEOl-vObRNb@Z`JBe-n zJB?(x{(uAdyI$x0dym%Ni)Q{Djb*Lg9qZ^R?~38h61UvJl4?c2>_GaQZ(UE&Bu3(o z-@ub2YQ@hdpI@OpJ`F;b`*#p(n@0cq%9#&x>hFr?y)TA&3*LK;Gdu|tV?xxgurK^E zb7AurG`xH8r0cx8kZ~8%5PH)b&Y>yQ6I19)FYf8n3)o+m;4|;QkeN>RM^wMj_djs2 zznnx!qBOo!CliT!2CsV1lcU4Jw|Mc-xbze{#J^sxw@Ap_P-6wNn!doHo^wvqVahu4 zC1SIMI6>5*o(qz9ZZGw${5Z@x0H-*Rd!g<`}GS zbp!RBt*tlPUCX)qiEz3B2_18lYLF6pNb^&y_;i|1U7y5!ielU&<~#j6%WVRhjmbQJ zg557~)XXs52_bSwMWdcX00A{a%DX>{$0`DzW^vx!rRjxt@HdmiY=WXDRLUfaK$VcDxR z=!a0|m-ZDC=2n1p%+L$2oDX`zpE{C4SNXXdzlfRf@zXb>CwGMt9dN)4;ZrMDpXRBp zaE^{nzXv=!!?$DZVZNQ_f_5u9X&w8FUURxx!;Z#aaY}|y{RP%BJ1I1nufTf=Ns*23 z9lJ+#i8T`s8- z*4t8N^bHL7#(meqhB%K6{$GPLeP|V5XEcwuKsYAlzG82&rZ22Ew8D>3-9q#py&1lA zkI)5UdR!@bQ1n`ChGLp|VoU^Ub*xp!kbDwVl+984MUep4z>Zc6keuMY3y}qIi zya4J`Kzf}uW@U8MnATDnd}6LrCsHIPmpzMj-x2LG4i4~@}mNT{gyUIAuN%U#xfEZ_8MNMV^v?n@vEr zH8|#=pc7E*&3>*1yXaDN7AT+R44T<#$lK4XH#{;wSp5?3UjwCAocgDv5=G3(5TD^zD3~ zp2(Jq$mXTA<9Ati^-1NAMR8xGQ=N^H?jb>U$>sW+$NEHhIo0IIRFjWzsp#%)yj71T zo{uX>i`zem8AQlR{pThUV*(MN&2_Qxaj^k2Y)G#D|WN{M{wnSAn-J~{|O6cI!-W3 zKG8?c;Tw|n8(9;RNZAqK^9XqNLw#+?jTLzAD17E4ueWgNsn!^}bM$AM0yn+@x2b$S zF}ppm{RR5kBl24(+fU4p`vYFAg>&J1sBWicz~WOuzZ0$VO&K@eTgf!fjT-pSdD}b3 z`#t|k^d7xJV|roC(Y+?ajjLwl=Qcbf@-n94Ibq{Qr-W{7!RzUsH_}4-umT>jqUb^v zSUJ<$M_B!*Fy{A+eUy#1qa#3ymn19GZ|QejYKv3YZ^x@(R7?PjImfj!89p0u-~%L1 zfom4S_|M!e9`(Jv?uY{RDB z=J_E%*2Cvb?i8J4BWtKU8jQI}+x%@CTPeB+M$e?^V3+G&p@XNbdcSYY$LoK_{etI3 z&xr&c8WT-JN|i;`Ej_WizomV9F|IHN_t^%bG50Lw+A#Y12R@BCTg6s&5Hy##<7P6y z25O6*sWF2tW>K}YhoJbEQP9JrZATpL7W6rQy>}ljRL9xmfkIea`JNs9@ly1gl=~Es zA7P8coQmj?cEHn;KCcFz&G57;R$oAaiM}0?r5Cv04;AzQwf4SM&9%Sd%U{5cZ}5{f z_P!J!o1WpQ%RoKm7thH2Wkbd{HWR*weIGq8Dmb}{UgdC*8g>$SX=R-6i5cG6%%{!# z)*J;L1J5kY{dRzFOucR7+4Z~{xVnzNt#!{8Ud!D#%PEHL7+TX#d)njV7lUkMyal!W zjMuHQuISmD>kjAGT~8QwYUVr-`21f_#c#0gGt%~Jds|>%G0QUiIa@NTsDk51&#L46 zud>oAV_MuL;L{ECVwPF-NGS#0DOeI+Q35k!GW50#eHOL1;#dPbeE>^qzw3$&-@z`62C_EgVA)*>@(sT%mXe-{=xH z(?YK~>A(0*miz5be%%?m9B%VkDhf4?74=^+s}XWc$I2XhE`6oROM`V4SjfBbi@z`A zU5UBJPvI*spzPJ6Hdo;8XUnfDpFB%+riR$bkmS$FjmdxH)vQY%PF7D9ZRMq|lNmNy)a4pJsP!_@`lG`2&MA6rPa{V=^5Xt3@-`zA|6VRCxx-G5kc+b-?L??)#P#5CxiS$aKAGXMYxX??vFx}`a8u;`VYyH0sKO@;jfqJi&0+o z)n3_^hvXY91hHo`*5)GcERq{m(+)yX)?jsZ;OBgqoQOG)`%rxN80$Fi$8r4mPW%h8 zjp1m25Lx^CM)DmrsWB8b1h6V(#X-R&xSvrGhoZ%-=okw*w}AhnJ5hmj;tI zcksg8VTD(E_2$ERE#p5JOHRcs+IL+!0fc`=S26pxP<*rxX|q^_V71kJsV7D$nXLy! zma2lzb-JBjpeJ<`Uc;L5F^Xj{mhu07U1+AWj&GnDT>y_O;es2{X` zAgMJDuZ;ZGk!Z58_!>C6Q_tmj_|gyX;}zMe`>gI^zMAJmQNQy3T^^hrUYpIP`$SFU zw{i?_6m!}Slh*U;{fCY|3T^}rK3052@ob#sGN(52y zx#>!>qWZ}PKS#%l{xBwq>U#31w58qLg46y+)80gehnH^$`{Uove!cMW611nb_;pB< z-~Eh73tj0!BTz>p5HAlqBd$|cHuzB0IN9RG@8Nj;RRZi+KeS0E$xs||tFC1|JWGu( zd#Q_B+LL&&R?Dk7S(ljEy8X0G{YPzDD*K%5Yjhv!EW2!1svy-e`vw#K&dmNNHCiUa z5&Uqt=yk_r1JUa*Wy5|?6R%F@ej?&{k{sTTc(S_5??2$l`-=9%lI=y~*&fnk=N(fziomCUQdmmPfuY-4;!Eq$%7-YK)chUx^pLY@(GDnkbWT za^kVX4bE+em6sE<{hzKZ4~xp(Am&zDT<{VY_CFQ#OL&+Pazs;!&uL^41q)BfR@^lD z48I_P6ME&tIP}x>%F4wnNs;yFc$F1QL|NbHoiY*JwxnA+hmYRF!(e5F;-kEcO?)rEcpi9t z=NUW5vBf0BLj3<%cmGMw#6di{q>8KQB-u?of*tgGX#=}v$lU8C>fBlk`nmM%tgf=Y zt|;DJ@Xpa$g~uev=grTbkaK+2-Gz4-O-NpmKe6OP`5m*rObks9%Ux7rMTs_fKd7tu zIQvjus$_{0zvc`}o~D;TBU6vQ$?lrUOTMC>buZmui;9e6dGSp~E*@A^QQc^wa9`mG z=0Nq70dZB~Plf+A7prN(nxlgYJ}tajzQ|DZMiUEK7w#^+MNZqL`qJE=?#*+sTpr@a zqS5Jt#Z3}Vt01`)hKp*Ws|B*v7O2gcoctzvYVtwncU7XK$uFH!6;e;|rsk8uX%u;l zPD`VBTVI#=x|hU?ob@&Ipnjst^}w+Z=eh~+97%?C0_$5yj8^i}l4RGlPP;OFWf{w> zj_Qy1beFt{cGVgUJS?xIKZ$e)Yo$@57e8%@#M>m+J*fPU_|X9|lIO{+o5wy7qh42 z=I8&EcSr8u*=w_($*qw8q};g%IbBn)CZA39%vqJwE@x`0M)G!c>>znG?XzamDz6Yx z(5bnoQsFr&%MKLoHHGY*!ix&tRFB-WV0ghGHL*G@sW02Fva?FTl%ofZj!=;`$sDF? zAkY#Fwwf8*MeR~Mc^`M!$#wYmb@1wF))=!Rzg7+3%|xZ266Ivce3Yt|T{3%|8oFCk zZ>K(2k6kS_H(6RPORwx>^@7MvUQIjuk4o?rD%JYvnD7g&qP4z#Z?Qx=)4rcCUMRQz zIN8JRlfpOSx25o+*18P+N_Jf@hq{_PxI6XASmv}IFumn0S=O7;`2aaTmzsF_8`)lq zSD+~zdcx}*(z^n=_9&|;dbfXowwvJ=kCET&(`S=R)r&un7k3ICR-b&|O@5!6zE#!6 zKWcnVm23JzQDc~L7+v>L1#vvxDn}go8d`3W?C(r(I941vJOI(rzb0Mm58A~_c=4pw z=ca3U)_nTHv*{CbQ2vno`JQ(8s0ntzq>jyLl9Qc1Ud7&w)YhDxc_Z@=s%zHj}zKqD{@@nRF&Fzu%nBG#KW?u#;en4GOw>3o%-(O^%)+aq$qz4q; zT-dMRpQGuc&pOrL^wK)#=+8&yA9?&}vY>9kE}bV%P*Xio&HW%5WBp9g{K;(DtPDpV zOzP!|W8~1*)`@LCLmxhxRgtgc>%^_eDspaKNVQA-LxVmq^=qnA_O0@;zLl$XAoWr9 zhV19E$E%M%lzbs|y^P9a_N?R}GbNrhA9Ri^ySb`2tMkMC}QevqjAGIx{1A6 z^rWoyXZ>7na#7N(()n_%lPcRzQ`a_FmD>ka_LH32JT-NnIOEUA;#2XApVBAFV2$dK zH))^cGV#cr?5c8Xrl;AgjdkPvCKDTpe&e687%n4qOQ<26AX;;syIcX6O6nL_DqTjk z>>3oZ#>B+ou;C-wtBrN>_%vOYw7;CJZ$oxp&6DvM?cq*$+dw8w_4{q~@i{c}pS`A_ z^DJ6uigg~7zdMQAw38>f$PBUr;!K|>swP(^|4D7mzB6ZVPVJll*)8Q84$qmJJ0(|v3^Fkd4AEKUyX7YzOT4I)VS9L2qWDue$fI$j zsfC{vzFByp>5sKl%`edx>Lqz!-E<1L@92X%P~1|mOK-7qg+Ds;fwG3Xs2gvb;cw@G zZhi2qjf358?HhT{o|NtML}ICK6s`0&c?3tlCbcScs+p>@^>Da9RX2NF_H0_lx2cO{ z=oO^S&0eK<)f(2;G?}$!QoE8Dn7dggu}t3UPW?A0;@7`}Q%bkKA5ivSQ}g!8IZdhX zUR_v1SGCS21b-*9wQu1Ag+o-OPAdG;Uk<>5%68Yk=xZ63`7&$IrKkNDZDl*f6VcwS z&h{T#SNJ6Muw~kyuA5oUF^{?_-_#X2{yG-V&A34ym~w|zbmOnP97m{xdK=L@8`CA; z$#9AyH2!_MDhoPhl1FdS`+1YLlXf@D`nZa{(~dm7%lk7(pRj|^WF@7uTATa(AH6)o z&OV>jT;K1v>7e_q=~R>1rB~8EOC}VWyg-u)1z);1WTtQpGMWJH|Zd+;+Iuv zsq2fzfaKo7$%Vt=!VogPeqnLJa$KNlVOvtahu&a^Gw<&zoLktW=puT^K-4wNbi)VD zE*wc>Y^A5J=M@_vzwO?{Y%^`siDTr}-IVMnOZGwY^NveRlrR56sz<7_J{9??8}!Fm zh{KO#TaTex)>Vc4xccJ@k_*(?--qJUazuOc9Ui9Nzv>KsLC1$=NUt)HZ7e-wec=;2 zoIR^Tcz0o1CGG=-9STn?yrXa`soP0T`w$xESh@8ts82}hiq``FxIL5Cmrh@W;x6Jj z9l>Av0o&kfQLlkKS39#h(N+4&D0`6nP3wd^Oy`k5_IulQva zz|!;^bogJaatdz!C;jkNCt8an|Aq&(55BcX>?Zu#eNlDQjGo_zulRME?t?UpQBHog zRlFq1d;y)O0W8Q$RAIrEpw-@|3hhr-t$k#=9H-yN_4MGOblq#^!+*@LGMl!W=i1RQ zr(MR!Q=9MSUi!^QYY*Hjr1!V=q$#}A(XsR*e)|)6v(C2PRk-w8@;O&8fMxpSyaU(1 zv(8%Lu@iZ&*P9vEmoK5c$l;r`l;_dS0d`7BQUA|<@*0bI1HKtP%Y&}|lBalx&%f9C z?=DY@?(5&Oa#FDFHh#fzAUy$}xP@MIncv@~i(SSWxQC}JboEUvooSx&j#zX|8@YiT zt|2q4A|38@x?N>9b3#qq-z4MDycT)nb_*QYWRh|_!56fbgU11Z#jQkbZEL# z?0W!PY7mWnIKJ>NcRv@O@8vlW?F=9FCv?xqJNUppzVQ9*y?^ioK;u$QwzVd~M4rM~aSdf#_ulhNBo zSrOqut`5t)rpx@u z7dO&9V|LGsj6Y$#(;LRhi&#RRMbEM3NuKx|47`hV)X$3B!h?!pVO!8p%37PC zpL$k&9xuU#Fyves>Hyyv#m^R*PT!Mfvz*Z`a`F($uj~Hp$dO*2`UE>_7@jfNn%@Aw z2mSsI+u>!9d>o8l^X>`n2W7bEaBF-)RPZuCcf+L<*j3?Qo=*pgIQ~@B5}AtI!DJIp zV`K(J_RvSx@)1n@%Bt?q=ut1*Rb-p3M!7%ZwJ{STGEEOy;R>`n36~z~-(SJFrqe%% z+%3m9AJZn<;Y3&9i|y>BDSzY*Xy$oR=5Bnh2hLcV4p7RgB3n4-P37Z_ku`Z7zjY%k zuH{}4XFrB-Jz~;jNvbXMxe9#RN30;`*F-&UL|3CadlSFIbdumtYs;f&p2}+(xt;4> zTh*Ogz|o?N4|KNEnnllz2~`z9W*f*%1DkF9fES^evvBuyDEfDIjwu3$vy(EF)Ws}B zp}WLPPqNcvML=Ud&-b9S1rA5XzfBXYp2M=$!Mbo4dgg5 zIYL^T?u>f+w5k2pvEGwKc5V<)I13#$_HRFUX1cP|9!gif#$O}nBH+;k-eBKmurO;ab;z7c0#=2sfMM$G+3{ly8R-pNi;IYbR3EZg*S9uV`QsK6lVlN8>aDt>!cGWwI5{LNDKf zV|3AvsVbXYy~x@k79X+8`Xpc;yZnG>L?8PNPU(A5=FuoG^74vAe5&Fi+g<;U*Dl;J zB9TYIq=Gfn2a{NH_(o0yxlNw5%vDEWToQiAHzGeZ-$_KacV$m2@Yx1e)bQQ9@IGQ` zhr}i$F0}?;FSfpzx)qam7LvGo?XAF_R@z_G^e?l<`PLCLN|wWfxS~Yn^h<*CF8aed zpKNz-|9yI@nkGj4(-MOI;L>knMZv&QZy zC*d0taG>SRr2)O^*v#tl=nE@BzYwI3f>BAB9W6wRg?FE2 z703(S$Ok)=ygAqY|8QPEIkCIwIp>OTf8i-R(EGJ8b~0J;8QJtb4%Hd70#3`($Vj@; zE2v`}-Wan6OWQ|KV?WQTMO)s4Cyu9eyo}@DM3z2EOZyZgimksbtUCq{9dBZ&5N{Mb+q>NHd$~t`iuD+>zwC`4A*|1 z1pgKWZYH}MIg=W2FbyLr;*pE3@GZN#haBrb>Rv+PoE`Ro~;N`#rNP_(ev~Dtjw|+Z~_b3l&`* zG4&SCv<3cmhF2Z@KC*OU8re=)S9jJ|;3((8nTwoxXI!QOtc%!iJ@_3_=dJd&j+GxZ z56xgmb9V{8QqQ}}c*|ezwHG!PxNfZ-ZLp@O_t>4u`nU*IT!_0>CYiEHhjp$B*;LE< zlo$Vwh=0uPtLM&<3)b9Ij)Q~sK(QRCEJZtOedjQ)@g<%3Mpo^kejcZ_hO~b>6BmuS zSHXW8!_JCudwyd>9EItN zXe<-R`s2XqZ)ZBwRo~+F5q19sG$Nb14T~shPC9^Ge>%g}pm+#o-$9RR>wKTEy1`a? zAAFt#I*0MDE5SLYP;3U3TKG@Y!bIl677!`MTO#)_ax~k3!EqVMbUcc!X+Qg%)aneE zDuIemCyUQ!XH>r@2T8cq)O`u4BzeF&Gefm!24UY+Kw%< zKv$)kd4xuSL=qhC!+Ud4`*O1=dWbvrrT?}Ujr@jnS&J{@DfUW<;&pV^55@X!<*Hb8bz^<@ZqD4{8|ZqyD$~qx;xLu1-c%Q2Z5i+&(-=ub{2L&Tp9izr@uy!S6vnY$H+Q^IlKF*^-{I7=K@Yr}Sra zy$Igl(Rb%sSIEGN>2?EDW(_3mJ38lz^tPayiCzP(tS>w3Su#GdccY(P)Z;XUtueXh zN&AQjm%;w;iSi<&EOKUN+sQzBz_TpIF*w=pFs~?+4SSn)-Ub?1(lUF(kK^I*c9a|X z-W<;uXLav-dgR!d$C(z#_e9EMFiJvie?w0 zp{JdHaIfvKbuIec%C0O=zBhu^OJH7P)XjEsPs4{aqPECvk7;Hd zts-p=<9#RQq{WVQ!-L4&`U))t#m*w5yW`M5vKW%Etg*_MHE8Y$v5ihRS4|S>M<*1s zM$2a;byZLak3i)3gzQX!>q`6k8dN`b=g=MA!L{GEo0xmq#fn;4TSt5D?R7CYpYKjb zX-Q#QdMYmyxb`n2nNq<;=5N{)rIuz-t@9I;9P>fwFiu% zQYo?%p7O21?4_%DTOXrWO~s4XfLi3zRK+n%gFw`nM4y}zXf?8&4&aW3{@vuY!)m5v zZ0=>G;<=vlG&}Egc>X_h`w<&zDhi7^jpyN`wP8j`z&*Yf)UzJ8&jhIyuGbaD_UEOz zmkl!}BiBZ`*QIu`+)f{7>6I6?oGAwMocQ2WQU7UZdWKkD9xiaZYgW@Ant8Q$4t?mJ z=i)cVtp+V173Uwep{Vt#DurNpB?UU0=&D^9^S+e7T`f; zbRya#dtnRtJWi~%1De{7qkiKA9wi-bC2?;f6UTy8W!mUBz5Q8 zb7YY%MMtxo{9EAPmt4KxTBf1mJXZzn_W*}m!R#tnb(dG?jQ$WcPHo`*F?Q9>na0_+ zv(K1^+SRU_cxowrot2)m#%m;6kDQZh@bi4tjF(CGLM$;|{Lt^Ip2~Oa3TGOxUTfg>N zOuGJq=2wtTAHnShU|3jwr;}yvt-7W6F;%q;O?oX#9m*3?wsNXY@1xw z^^Nq&sF?1;yA&Os9s{{1u>BMHL???8KOhTXN8+DEVPdh)N?(hX-k<5DH-{wXh9m9+ zzu|BxI)--PsjLf={+3m+%pRtRQ!f&|UCBGVh{TV6c5PgBDM|Y@44I5?rRa(G| z`0r@+(OaHFfu1nYuPvs{-GIMb$M1Cu&tT-oE=IK*$&#ztDh+hznxQ{N#pGqWM807H z(J~$SyOL1#2e^2D$F+GgYVF%Zm`1hK`J3~w3}}B z4vM?c9UgYyLGpD5d%_2zS2K9QYSL$;vZ)8Yw@8j@FS24^)@kyAYFd50)R?L*DQ8U1q@1tKcsV~Ok@I?XGm)$|CITE5^EiM9{wlV#NJdpl{g)mSg)n|J@)xfA$w$CQD~7$~g{?egpi<;4rW70GD+pBN9nde}?F4`BieMnLX?EkQk8c zA>U;#Z|dcVqpYXudR4UXl%eT9U~#f8yj8`DEBQ%@cy|zmj@dt@P-S&o?J*I?w{g3u z;!rE!1(e}*dti5hRfd%L6c`TFn-6EnU=%&S3B zi=rl`$KFRy2`$qvplD3?ylS?^Csjyn0I+zDsQ@(10&(gC8=YQ-4jFg>zkH- zsHnN=UqyQT&pP_9cx#uVy-Wcpql<10-MVtbunUWx7TM^HKU~4CYUG4^iwks<`T2dK zNpf)V2c4!S`}s?huUu-U_d}AMldC~%veWpJ>{*S|eI+wxs3>m@-Af+TwfB$0Y=8dcRS$W zQ*oPW#Vh36J}0VPT1@$pqNBRm4AF=93;l?5bd-4##&nT0aR-ae0INf@ja~K-Fft=_l`d;3vu`_*A z#>eb*F3$7`9ilDiTi@NjPy8*X=mc3N*CZd-wQH0ZcoSW5BIoa15tGg2=pedKzRnDh z33e^L=T`sTLO;2hd&NuT0+asGl;c-;=Ik^5ICR|1zHy*gAITBz6b=aT+~loX!oSNSga;4R_E|PDewF$mf}~p}8c*|HzJqNV2eBBIiDO zp@y$*B^^0t6_8<2wdUJ+DvUbg||>cB*I0+VBuIK12ay{0jJCOvr;M&tXjy=gK zsh6{x=e&?}Fy}HE67S^>lO?f}&hSL`Td5b4eG{i;l`bxq?o{-4VL?G}c`(12#C37O zr}{vCR(M@eYneF{iie2U&oa5_{^WvWiPR~2JoQYqH+^9pN<1z3C!L^+o&>4H3fM3n zydR`JU*Ko{W0qSnR%0v z4di9Msr%m;UE|Nt@`!mp3lCG2StUp5kTFPd; z2yV3^F~Wnk7p7H#H_fc+X&%V!B;3>TuQusV`G-v0VRG58WkLSKKhRs{_$_qdg=FO! zG6a_EKNOSflCoHzldE19g?!D+c03Ar5#DbmFAvJt{L2}>f@8O0AFjrKXVPu%rHNcb zC+tne-iniswX$ubUUk}O1uOd<4a|XS;k^obt`rM%2>(Y?#=}`Ud=8;AJWa3uuO2~{ z>2p+0M~AoJ?MWy%y5rnWVr+K)uVgwS4R`Vr=&2{529cPrZp}m zo5M?AG2^9)cvUZSK9aBIE0S^{N}J%_6rR(`Jf|rC+Brq(b;5vS90kNQfL6lbC%Bf?Me77+W1s8G1GCFSBrm=p}EJD!8@a1F^0uy{ zeSD4<*MnT;;+gE+gYr=)>;5;9C3-cxWS^bA$yV-$jxsOC z!R_!bOk_ozRlFZh{vA)rMx(uOn}K#V3(f`Qm!)Nw!Yhkd-|J!Gmv%GV_eP+}@PGv8 zHQEYSi!N9-aDh|6UCy@icdue1$<7kJ!vK`Fx^jR z^fU0q!&cSaihA-wJr8ppMbj}!H!2~c*GNp-*nlRFWFn;%@bTI9H{WO5e7DH+OTzf7 z?jN0~N{SQiM(?vd=hKYOV;gEI$7*Qr9Ir>Y&*74fl55Z4vnQa!^S$nY4G-cS*OKbd z&1R;qWE0`Y?|A`o?_OlbGI*Qi>H%+0~6k98zBx^xsDa+_PeC>5u@SguCTGwLNm*)|0 ziaPJbWykUp)FW@(>aTWJqQ5Ggp0v`n56kq5x%R8uRh|pE7#DcNxwbeFdiYWR;a~u-*kE z!5?1V^C^AkZ{gAY4F0|8_mxh)uBTk;g!;m+_IPGwA=J0ue|+~#_&H5{s~elGnLL-W z{XL$&OHN#G zdc+vtS%luV%h_75?(AXGZaugTqmx`qYiPhH7Slub@ws)SH>{-FwS!Ahwf+VfJexOn z8F_gNSpI+)UnxuLIM1q-d9@;Yd*SP|NZO8kD>26)tc_35{&18KxOkbrKayaZYX#QDJ@d>9J z_2y9*m8ARq3H#=efv@37cRQJgT6~Nr&B1RP;VWlb_hTUT9*gHCr+fjdyU?e{;t(@% zujg>I&}&+$q`Da$q*Yzi!YS&5_8=7oyJgmulvlTcui`@36tS15;lz{p-`V`8$Drz` zVC7DFSSw!WCo@>So|Ig|L%Rz7%%EwXDUgdxBs8 zMk9#MRS)4n4S9H*!ueO>8p?dX>}o7=N4Pbc>3Qv!}~ZdDQtww56F< zSEnI0!iA$dNgex(ysACUKl-=ix_|twj(eY$x&KLUt&MAs`0F3AX1x7GCf)1K>{gHr z|IBY7f7mNBX12nsnLf$FRT^gOic%okmKIWrme&w=*Coqq`?oR}uxpReP{0~pzS3v`9~{?Y3Qr)rtjdw4W#%W954E{EQFE2 z<5~^T+0!WQcbM>7#xL-ae2#wj(=qN8wSJ4(<#*G$`Z(K#C}UG*2WxQbs#blWGkKJB zi0`xmxv1*T6O&v8;_rH9WDvfAUVbFO!|S;t<6SRdmmSe==-vPJnfei)e~vD~hxHy_ z_yZWNfUS{@aXz}b-tJF_H&OTAzz(C2$O-K#7M)K1uHXZK<1QmqHWY`Q8>Vn}%n6 zK+k@g#c?}IdD|)viG`o)DVIB&`*E+)xYd2M<6-diaeC~TXyimZ=M+0_36mmnpMXD+ zS$BxMuM38?J*6~xc%gmX2qq`F{w$b&0o**z=Q*y7-h}yZaVOp!6DM|%fF)_pzmrKJ zk=D>iqKCs#8d3r`S`62p0r}`9_AX3();T;08~d@9hS@mS`hH;ksqPi|8h^l+m>{&oexe)P9P~X3#gFlM)HzVkt!!xqY89-im9>p3Rm9;%W}zv$dg=Q&(Q;GqSZHp?H6Vs zRTrd+aEVf&Ro=VM1*1ztbk8W@F$pTz=$UWe^pUkY+HMv)rAh8K+bL~uc5zz2_%u3L zeEe4!2&aGg;h22wdSA@9RZ_LYjmYL2X&SB{KM` zd1`HUh@MRq{Vm4{9_IwB_}fWdr@@=^P}ng(ISMZ};%PfUBr;Zi0*_^Q=U1e4^u`Em z&U0ntK=yE+F>~u4di7)YaSxajk;|}1q6X`W45x@0Owk212WKn6$~Xj;`>e4pKG4WF z>R402AtL9$+S6*>&v`^Y*NBauXca9okjh5oTde+&XGTxhlGar} zLt)MA_k6f}wsXryo4e3X(#eE>qq98(K1N5r$+V!){fyu-d6s=G!8a8rATzA-_O0jW}+)6-fzSX0A{&e;b3UqLE`9did-I|rw@hLpJ%U+ta2 zjHLO;=E^4L%j% z>TdM?JG}c5XZQogj&X7i!o62Wg$elP&;B|KH+$RZPP3XVAhnldDFDHMTV&uySK{Ta zev8EZ3>`*Swy3RHj&8p7nr1KSJt=xAugh@oz@TDx*auI$_!W}8ymeIcTM~AbcJ>8eyAf2kxjIfI&vWZ~MNg0=8679O zoG!Abbs1^W9X0hsOZTCm3tUwMV$m14ntT22n&`Yb$1D1ESM%3@DYGT`oNaXxiNBL} zcRM_a3Z(s3AKjgz^Hfa82~DhW23{xOv7u|Vz-jB_!6MNRxI zGJ2!K)IRsfx33D|Ru0z;stpfw^cvddx3GNkKq{agm0ZRCj~*RX+ZD6^5|bU9ez*4V{-0KvPoi6SRnOU-;eJ&-^9VS$bUIBvZHE(&7<&cR#rx71_W`U~ptKy-O4S3g3CpzM>oWGEj(9h+e1Bvn~2|#dNVGz7-lsXpz-H zH!2m1WQnvzH-R6;RuvMq11t-Cj~>Q-*fj%Ky7$7u=Dr!6FKVS0;5Wa5@*(FLJ=P+# zwt0qbcatemOHj%kb6kJK$=7h2!8^k4$ieN`c}4ZzR+#da_uGAEe}Ivb@# z*YwDyzlqen+UW;w9P2*Y;O<7yDFxyYZ#V>|g`TsE{3&Nw(F-hUGV4VH~ug7PlnkuK|g9I_GK_RBvA=JQ5h8Y827CX*K6W?C;LCwE2?&) zPje-&GWHyl9JX!rvv23@^Hl46LW_Re>AX$5e2e|}w$~Um7QNY`cUyG)oQ;ET1>3ZL zmxJtdTr}#UqGwWQ7r~?d0`+(;#Vr^3iOPdL;J6z1S?C*|`-u$M4gMcbqnU!wu6E+l z32Yzegypyny&RXpr|9Kg0~Un67a8Q8KsWYy#MxD{@~D&v-*77stYMuIq2Liw zmFVTXAMXrVQEVN#846zvW-sypyp1D;-zYSqAN+jh-3quCHDGau-=m!HJnePvt1=W3 z-Df9fs6Kj_{!C8&;@ShI@r7sOE|h)@tPQK}I2iYD3?{m*hPB)U2A&HB!DGwXTP?e) zo7qJpJB%#ByYTdW@H2An%2;m#G{ee>9PKI97NB#)j^? z$oXv|OM@dvC*|Ocp;?uI?Z;YCbP5hmQQPlPHPi~1Z0Bswbpj_z>r`-glP4)T>My^0# zDO4Ms7QXfOAMHFU@T#M%@EldM%N};r&buO~wbvaZEBi+$xybL0;9E%XTJ{~jiI^pE zitmTU8nZI;{B|+eC9Nkk)Zj>=w{Aii;Ug~zzB!&0`dw+%7d$Akfp@~Bcm+l8_5WVd z>@fM(68Emo25bHG{7aSBg5EHeO-noa!7);jx>nBa1{iVkco~ z6?ooSyNsC>$HVz-JpCVgoQMDY3Ett?IRHw*@5B0N;BL`tI;y~<`Z6rOkg_x2bl~ZH zt6C3s(LuSA6KUy8Pw>4Ou&PPstRgG7MkWp#y*#6yJoMC#v2?9S+tp~uELe?nW~1zPD$wu9KeJ)Q$sB9<7Pg})?8-i329OT_$oG9n`ItKsuL@D8gqvg|jK z{%hg$0?&ymxqxTj?myNOUEZU%G%5sEx?{xf=J|GH8-(5(dVcir+>MedIqATwm=Y4+ zx?}tV<}E@ypYn$eV&ly7q~-8!k!No3Zl%+GmuK!n6cSzABReK|R#o3_L$f#!?5o&s zR1NHc%Q5dMET=W@utoM%BiMF6*>nj=)yi<1(%=-68e&FCXsH3!x+o_+MTfvTUc2nH zj_Z2y0YqhD^fM31-3VMudG>KIGO`kmimZzmbLfH5KRqfw0>cYIY#%&soS}$jR$I;v!iy7?e@(m# zTPnO~p~ZwxR;MeUMtAnm@`H0%@qbjA#&^TFSk)&d+G%Gp`9#0{3o8o_JHMXaEg0Xx2pmtGY2P)x`FYuhFNf9lU0W=J0@=g9XBJD`my4k zWCcD+_TMb3J0O$YP&sP`8|f+5QdkwCk^KbwOb}&vEJl^J&`wS3*zBn*o#<+C2#;(U z&sk3%{_E+=b>`9ecPpHZs-&2Q5tePp=g?}8I>nvdRnJhT~wO-hV(d~8{P8_oef;%(;`Ov*W_ddZ-=#jNyd*lH{_x6-)j?4JF zy0a86!HK)Th%WH_Vp{ESzFk7JYrZER0kg1>SKtmGl1VS2(a)`EVaA%7N*4U!=`Fz} z^5~j_Ncd=iN+Mh5WcO-e1?^x~_|r}Vy{>*g$5UfYN_+SgHPWFAMepDae%ivV;IPq& zAnLEfYT81lI6H%hhn#jq(q@BHR2EkPkFa3ED;X!B>pa6_9oow_kPRG;s^(VK6xBp! z(E1**TLN#tb)Tp<4NvSL-`b}W!fDpm9o9w`#<3ZAwZIoTXMDxctNSGH!yA8}>%ylR zp10_nTqsH&HPm5G$0>xrDghc*VQ9=*{}JVcK6MyZJmjf6Ks2gtcA%!xB-Wvf3~%lp zVe^;BoO4a@wqj%$6>on*uh@dy5By+M;9LRkahVWAN@G~1fPg6jPvgbKMU;i zcTX&Eu@!=5+0(MwGp1J&Uj^CSbs4d7+Pd# zv5_MZKEnlg&s5T5B&?m9IjK0Exo{=AD}+uMYYWLS#Z#l_<1VKX(UPz&qMOV{Fxc+O zDxMoU{~@>@Q-8v@R>vwr&KIM@7UbP2;NJiamj?HIoMsDt85(fVd&EE^pC{H4l}q3I zZH%biyI>IdPx$Lr($7AnQ@ulj`Wjqf!oygfjPps%RSJ3yoo<)&`W?h-!r1U6UBvcz z2>0p;ck9EY^H}-e*^U`&p^=^mn|r{UsM!zxQQ4Yf=0Gdh+zz$Z^{to()yI9pm)gNk z^vDP)9hTHt_7?Z4zb0-bYPz8K)i-=fX!=)xOVh z?%`WIhh1?#%x_>94Z%93Wmqf!&Wnhc!ykS-3b*$-&m_!P;~7(}WUW&x4}*gHMrKu5 z4WZA(M2$sG z+@0`+g@(4r4!^;Xwy?=gbsmvJ)6kxhcy}9_AXhrS%DfA6z-N=w-R7x z{~uMn!T-IyI^s;h^INz}eRA@UHH2JUgIoWEE+c<1I+kw%?T8)z4I9g5I7~#qB61W~ z#?lObiD^_Z8S5{5D@1)Nl6AMfW(ZwI?e zcMC|TAkwY)BBCOo*t~**s2JEHCMLcr7ziRDiU=y*(kUU*-E2(HoO9-Wf3x>q=d-vw zbLO1q`SAyXXWi!wc?=dfWBq{NWUub zsS42os6-K#X23w6!aR0aHFc7?32tuAE;Wbi9Q3>b4p#P*=E$htUimVzJM)37qU*hh zD?Nck)x4bt;jT7(F8_GBRjyNT07{W9AnCji*{aDCe~``fA^%qgoTXS?YuT-Q#5z&w zR1^aSvUo*z0pc&pV!g^5j_}%=vYZbA+aPptn$N@RU(k}DBM5po7Vurs-c{fs zOUz_WiQia&ph#QYP?s3VNLyRy3ES#rSv)uH}t+-E)a zKg()$qRmh7lF4<_2O%KZiC1N;`AghEklurq`7C9zZekTd_)U{FR8QKAj6 zUCY;{)4n8Yb)cvoFj|{(qczJ<8qf_~WiM%RRW@)_w`{Wz2xDKDFBrK8+e{ojV7{~k-7jO0sD-<1rw;88M`-$Kht1~fyi zG){dM0AjLMuJc{dpYoq7@wsGyV(5|p^0)_qz9w*L>Y98W$^wuy^K+#?B~es4eFLjf zRJ|W-Xoh6;^G+Aue+Y=U9J|r(;uW1^7l8G*umLth>8nyav;^K<0AJ-n+m9fh46-=3 z!C_0eLj7&>`Fk~gDFUgvaUNt_1E5nKnK&ALy^H5O&i-y7w{A6D`L1TWG=$z-vvU{ptl3C9d&@iJ1AEG}>d&)PL8fd+3n^3t zt#(&FwjB zXkM%r|FLXRO?2za{S;$-mCVPN(F;$Jt?)Qeo9D@dG8(deU*)O4hb$_g-$Zfpl8ho-NQPCDNPM?(%I+&R~4~Fojv#SEOlbq zo{~oD>?QA_Dz0?CRTb1u*77f_J^`&On^Cm{%2s`m*o98fmym*$peo6&UC^D*oJW91 z5j58m>}XzHMI>$zD(()1l&vO8R|bfpDcNwJg}1aFJM**G;o$jMVAzfCb!L^XviFC1 z-v-{w{g$so=dBIMj%EC=Jl-=Y_^#zI16x*h$QkHwBk!(Y#qz7v0FIZS#5M5KQKY#t z6&4}04{<+Dfyx3)`_TfbYtczjb!HEeZSp))vm25}`4KVl)(hdlP25%4h^53`Zs41j z+4n-?I6uNqic3n*RA5))u98HS;DcHRazAy z)pvxSCP2-~;T#DS{TIK?cjUyq$={XG(RI0AzGd0k$`{wvF!>l`Xc(QiYe995x#NG~ z_Db}{+0QyE0^cgEVgnhdFF^ClpltPq{3&G_AI5fh3aUTKnW{2bYs09QKW}&BJX0R~ zQ1*xX?-o$~hHozbzO}%PvW@e2H2~Y1-X`0*7JMfQ@41wXqMeLjhdL$d6nhhUYsIb= zFYjWVU*Q#bgM4;nAZZ?%ra>vrpARM0A&YYo6-4*&`6pbv9bKxE zRDGT&jkKS?HM6@SnxiS+m$6u*1Np1m>-jHyB>kWKzq-2=+Mf<=RziglC`>0{O|I>M z-0cmUpp5{0R7RC=e|Vz_?^l5qq(dq~^L>F&IbZ7S*$YvZ=Z%vnxXA`^KN7ADu1ssapFV*RgWK-!Lh~`z>sa-Fby&ks@Qg`RqAv0@$~V+w zbRz4N_o04@T3Q2v+I+U3r!U9qJe9)7DJX9uQKHGIeCN-gu07Dkue^3b7g^+)ZsN&5 zBKhSDQcocHnlyj0Iv6s*@)c-8Ik5xLgTqqR%SdSHE^I03<3arYZuWke`|L*UDPKgM z14)7_RCwG2WUjMzRc&JHFI0eJ5REtkR@7OPbD-HpJi8&dbb-0f9PTQ9+B|6y;X zcsxIcr_|d+C$e2YLaTiXjg=06R%Ep$aBhgFYYJ>Xa-660y_4+fd#;(wzps+}_aq!P zf=pxeQu>zfuLkllR`p%#9ZeFI7wiz&ya~9s<-T!tP?7Z&FztUCxEab=zGC$7P!(3GR?X^M(ybB>~9F)(X_Tp@KbXju@#!w zgoaR#-8v{l85@^Vn9+%?9q{YUdGQl6!4;|3w6dRwe>_Z1@Jp;+IdXsUuk6NCDF_T? zoj<3lVm2ZtTB6r#rcTj{#f+sIV+Oo10LVTKHkBzmh}{;!s~3=@jnHljfrtF$I{PR; zQTggYBzhNcCfhz8dVGVr4!xdWZ;GciWtTSVKga6B>|_o5U!3Bl!>n&A>)Zs@C6EV- zm?#J85}ef=$)G8Y|#deEnHiTnv)ly_)R>gB06C{Fnp0} zrnb=jCi01t$)}T*GPJXy*y_mIuYs`U!0v%>Jy1+-cxNrEnux6Wm=(VQ7EZx?5omY| zSH@Yzb3pGYV5i*AF;u$G1}=(f=0R-%?l_vP&2?bB8S?98WQDwO7I#?z9*e-vXXKxc z1ODxim-#@k6_l`_9iHJCS2#Jg#BR75Jt3b(2ktR~>)b%HC$!uSIiAhS#rbL}nAie^ zdf})QK+R0v@A#JL(REyT2zXBh4$84qX1=_9n(C-LHuZ67L(Sk+=s+iGO{iAxat&yD z89eeSyLk(^jQ|(D;QKpyW(W@L(Lsa!S67t`bV-CFL!7HMYTk3sH3F;b{u@)X0_^H z8%x=fzhh0@O@D%?;m_NE?o*s(ZU!T>_-!D+H$wV}RtE#It*J_dT;z6F_&))6yoMzp zJ+3^=6X=t{MA!SmeV5SNUHQg#bjv|t))%R;5WPDB8+#sf{0*}2-qe{pPF>Q?{QfCb zIU9hD`dYpOMkm1;zX05`X-p4@?U{0!OE!m$}^U;-;U&Zp&*L?7q0^$XT% zE3~ER=T2Z7G~fiUeE4V3DQ_c5D`O8xQf)_?oMOlAz|gHorH{}Lk~;}3Z=H66$fv|Ds_zE<8LTS$M$4ZPa`IgT89VYE&1?K4WaVy2$Cm(!pIO~GFe)3NGBS7?T=ggX=t82c27~i~*?FXkD|r(YNNJ_H@AE9GK-QyzC2a8`^p%I{m*`waH24Eiu9mXBE1;`% z{e4J8+2U2yrR>z1R+0$>pYa3ER6N|?4dI1f;GX5kR>5}LD1>V%{omXeFgD*Ot~KMhJxKzY@X$4;nA`3+~l z?|!7jE>!Jr^?5s zNg+{A@zuyd+Xn70@VWfTvR@^%BxTnH^Pm=Nd9r-JNSd zKdM9Aj~(+3+`kp-xWfG!f|Gk#mm*clf>o!JUPuoc=xC~dWcyR}#j8&jzh|Ih%_{gF zIBACNOz^I`1)SP<){4l5|+FGZmSVAHGnneBoKqP%k!{`~>`UQBsLROKzdgcr;z`}i1i{R7f72p?snViJXX z@(f&)3;JE(|!mK+#ONEycOlhLOqxr2PG;_fVV(i$kI z0fFtT@hb9I8L7&AQpS|>v{m0X61z!t?ho<04|$^8^XI{DcW~1Vx>eSN>NNWB?&r|* zyJ$?EaoPiq>gX`#G@s$gE_kT{+OQ@PzZCwI&r(^5niZl-AoajfmtFZmb+T40O*QYT zk(H;_$vO|?^-&epa$uqw07WWp_5 z+yKQ_Nm2UJRSZyQmG+ z26ERTq>g5ND_>v@)U_GvmK-<$4=n*#s`Akpy&B)k1g1xU|0kSF?uSR`VlgS=IExj? zQ=tC-pCiGP@3jqD{{y_KZ|2YZrR+B453b^A(vgaLCE zllhxxoyOwT2|_tQG3Y`Wnu@F2tXTR^_PgevE6-Qcm1eQ~wa9Sw&`)C>sTg{>3u!HQRbFKk zcKjfE@Bw}%0ozxwF=r6dy9L>x8uwzbtPFK^sSv*&;hE>ymE@#!RVmV`F1TzAPL-ph zd@#kB)KzC0Pf)&!Y8{omsZR2Wt#stx8Q@2ib!DtqJf*tOOzza4&mV;2R0XC7%F(y7#X6ctvcuzKi91L__+ohtw2U!Hyh`l*5Rz7bkc)~YgtJ$OUq zRa=X+*SSFPamn<>aGCnToZNXgI~s5O8uMfDxBALN--cj}v2hbBL%YF@qM(%=`J zZStYG8o)pq-YcPGO-xycZaf3M)q%>#AQc~AXWiMQWVGt>6%!AocHWKqss8kKV5!L_ z%J%KYUjyMN)g!jz|EebL$o^!1{J`B6-TV-$FJlIPa>C9KgH&r98 zSfpaEQ#rkUfQDDL!ZiMRpU+fFuG~e{p}FyyDfeC#>`CsaDoC9!1a-|gS%D<^f;-7e zpNqzGvj_FUQ)Rzk-V%Azj=!UD(=-m1uv7SP6%NNnY;jRcD0IafZ2 zmH05!Gzw|0***6G!G=&_B`E0@?4EKjuj&fg@r)K=MAoGtYM z=)skZ*@Joo9R-40`M>niIpBYocYo!ZJAtaQ{}-~>?Pzt?ag^sph{>Y(^1bfj&JLdMh7S}imKM}mH#21gS3xRfg4rTC{E?I`a1YQ@ z9lT<~sxeipNI9_Twa^cm6(6boQ8g*)JgGJER8eeMtg0kE0k+lgO+DvUBVE)7?q4*? z4Bk>JzWreNacjdsTMy)=- zD^{QhFg^LLH`o~pMxH8A?p>mvA4UdleMxd!Z1 zeWpyJgZw3GD9;sMHP~++P>ZD}?Wrb9IStJNQ(Rs58*utbv61dO8d#XwpRn5s;F8ZELTt1b@5!1NJl zTD3?!*oUkSWtGSxlpH<9@4L9}6dWk5T;aL{ybh&ghkVQivP9XafAQ46_`fn}oOl5S?qP)h)=4r=du7 zLKNk+1`7lDzK7GU;?oacDZK#KDvMZBOx1(i*|{=%G&ih#=9%)Qa-m%1hjxPUbe=f{ z+~mhmJa`xIUk`oOOYLzWGDX!{O`$_+zphxonvtWrQf2owWaSOPN{r_yGe>Y&zQF-_ zss~)ti6^O}g{IGq#$Fu@u4}Ru)u0wbPnxKv83KFY;uXlGxp4SqFtU+Z7tT`wb(21HZp_~$Ba_-~$Ev)1ius94o8c6Nq=orlt z(v%$4_};?`)o-COx~U>ikw+*qrPJlnG1zrKaMl!FbRzAfzm;QWa|hWo%8^iyNmZZ+ zu~Rxg@#?`NF0R2HHGL?Tf7O5BB+t}L5!HhC0A|u6%DC3VIeDYhg>5Qck`LM85_YGo zuywqvmn7XTbc!-z^T0tJ)}YEYRV`-$OVx**KwfLc$~t}v@~kFkVolJy8Eo}quT|Nl z`oy&39uPO{s>SzJ;bdZoO78p7Um@g<{N0*+CNEJXD6Bi&uB(*ERh8?6O+`19b*5}P z`QQ|TNQe86uufG*=&Y@Npo-3`%3IQ)CimB8PFB*EU0i0Z%5FXjR6TINw3579tAMF0 z#d6u3`roz!Vr{@sU3RT3_OsB6&MD>Dxym6@j9p$L#eAfTRD%%!;?hpaUm66>j>6V_ z9bOy?OqxNB9e_&L6fb8XjgF(OZ{SLKRUb#f_k&+N?8*m(m7V4V`!PcM zV<%~dGzSPMp2xKfQEhoDcA8S=Dftk*-1Ce%5u$sX@RS^R9Fp03E3s9)K zxU5Bs9e|qLK&Wp@W2k~84Z5tucOK>$s$WazJLg%CvZZBjD?j=y-&QuBYQ@xLNO`N$ z29kHD_*>G=3)QLmTN&*ac+PI1t^6$2iYV_&wczR-md>h`8?20MSvi^nca=|7g?2S{ zUrj5V3!an(DlIEXrRcmYH2MC-OWCYc8bZ4GBu|hA-oWpw8CI@OdCur6zOC$H)!QmU zD4Sl@2eKL(z(dLxR;Iq<$Ex5~#$yAXqS^pe#6^I#mzn6_r%olJedUd{LL!S=G*eYm zDph5DjNR@AhpII@%WhS1qPSE(JCwgoU0)Rs%H-RsLh@oIYf8c)RxjIG@t>v0D6M-D zv~?P}rAmKwP5KeMZDyVFXlmM>I+!Rvr@1HRcxEwomG}Q1C`LS6ft_f+n6e_(GeXt! z4S82{1(osJ8Tz=H>+k0^I(4qU1N;v|kLX-kGerS9zm|gCzj?w{{#^|Orf^#RhUY7X z>v-z_>KrCJtUY#lTkfyw-tEYVh3rHVJWm1-)rHq%Md}MA8>X1=#NeQdz)P7^;wt&Z zb&60OoX%<5*=8iwPe>ij&Q)!;DoK?;C(BATzRCdCZvEg<=R;XUq7eBWRnK}2dXYWZ zfzL%{(yV%^N>N!B@+|xKMn3COG_fY~Nco$xxANd~c`(X5D&_NcI+9SC`ic?P=WliL zQ6~aL{DyLBd6FKO>M7WZJJsfS>OY}OYSp^Q8u=0oDV{AAtIg(DVstNWGJ`0VPf8QAZr*iK%*CJ&BV1dkhZJ8CM=N z`GS=ft4N>xDbj@Mc`Lvyvgq`FQ6tCDqCK=Ru3LcxzZU)F%)^UmH(l0nQ~*R z0X=1+8r)xgxjNiS`P5NXwt@97WZkOZRYai#e4XZ=e(+z1Zwx{)~JdBX)fhpism&PPE^u^v!gm(_QpEUnXr3`qm?h#01i{NXpB4$@wjS&+w!B{ zS?U#_3S8l&HY?D-gJ`8bcw1m4zc%As3aW>X_7}hb{XfIf4H|Y+eI(MT%{^K)v2h8 zqgsmIYV#b$)64r>=uT(2I>7f_DcQY)?*z~W1wdLBsOr~hv3}LhYig$Eys0YZBr8$X zg*!$4ML<&XJT)6vJ#SU{ptGrZOY8+2f5NRxxwAUhm?__UJ0y*&*~`%I>H4rduiBR6XY+@!A_@0)$r=ns0ngEKn2o?F(jqUk7`)jV`r#ikE)SXr*nqY zDa%aunXEqbN@)t0_kwckb6*G6fuPuwR}<*99kxq%wAWpHUWr)iB4RB|fQY(~se6sG zj#YQumQ&FiST()DlDbbQYcvZ=D@0OB@}>g;_4d4r^~g7H9X#tqt(gc7flqnw2lW6{ zHvr}GtJkMsF5lA^oQ0kvO86bT_$7QP?5I*lbzHhX7@lH(x>vGO55msE6!K%40SwGr%UNS zFZWb+dja287Y)g4aiKVFPs%P*+$MqqZpf-sG2J8&1{zS8_3|i3SD-8%t-L|` zy%n2l&Q&^%c7kG5by0%cQ5Ax^fOX>k(oT|@%IGfQKB~``GOKCxDGCI(cC?5@DDL<@~}s8<3W=Y*qsC z_4v>>bEP;#zP}nNd@46qc`u4asvb_$OPla{Rn|R--Kg`w&b{h^rV6I=3LNDKNQyUR zHL{=;FVg8qF*`{-L0vW8nNZ9??pmE4#95pCH0qkCI-m^hA#J1jB~>zN;+JaAR7RPgaVr`%!2q-=!8(+13A>gAr5M7ZCjqC#stB_yn|ERu( z>iMiFeF(}+hi_`aABT}0v#|U$t6IIjM|wDjX_tN(Nmf)t+Rv=ij_Th252n9R#V1`s%7V~ z-W{pc%F0)V8GRzEmldYD^g3fJN~z8m(!&|hjpo1?@I1|}RyO8FpuCpv{L8v_a9{Of z{uJr4oV%?82jzXb)a^m`^Ec>M^=w(o_m03ls!>h{FXy2t%`{d`hx*BCBDi`!sj}iE zkP!uzR~1INLR6)EC1t4I0`;obo-90d-dC@ZM!-U~dZM>hTr&)g>WNHOhZ^Zm^?_{$ zM06HXzP+-vPasL)#l5~sP0nAm%IWcNv}J2 zf^yumSX(o&S1olv6KHzS2eKn1rFA#S(h5+0dv=-)c9dnjg;hj}Cnz4Ku17itE2gG? zoU*%fcy4)jDAoCOPkB=ZrKG4bTxBaN2e>*bQ^t%c6VyRR(Ur$J0S$vzRN1J>z!!kG z&ULDb_#gQi)6q3Lb5c%Ijt!jvMH+pQFT*S*55_G zp-zjk|CIfyiJ0o7qe)7_uucLsxK9@#-wiug^`h!RB%4QflK)N}GgNny1B5iyLy}cp zWZJ{meX)-vEz~DOezm?lSv^-I@ionJ1N%7#%_ehI*aG1 zKk1FoPTv%N>vVRW-7DMhG}k4eHg#xAlKt8US=0jx%HbMSrO)L$aYlkCoM*SPYIGJ; zPYDZ3Rn4ZV*5wI2#ubXt=={)_mEMBBz5`lR?{8%bN+RnFsS{#3&Sag1xwB-FXmUF| zA`i1V0$+d+RY9kIP&%utn!mipP?q+Vln3K5Qsg>(FV0hr#$k9;ku&*$l~1YKO!c)^ zhb2||$v)F`+?{+wU5o;(MEwgD1yZHY2JZ3~-@3*pn$EixsAK~ZO~h3PxrJB=Gq4zb zV|}XjJc16?n$;Cwo!6vWck`K|JGzTJ2&$!1c807E0~OVMZA@8hsyJ5PB1skXR+K!_$zD~o%6-!*KzV*zr95x4Ci3{b8avh`Z}low zW|pi~d6_g%G#|Mu|B`G$Me*dbQl^ULoyc?Ig(mbp$r?>k7v0BFc>xXJx?7P2Iyb0e zSR1ZV7bW?;G+jqFdKlk*SGcL{XvFvNP=5z4EkZNR zq$BnFDeHF$+Cq8yVfH3_KveU0O5?~nUX-Fab>~u7)y2H8J{H7M6}Fc$M*uB!_7`GIN?GO-%JNtcv& z`B84F?4L|lqb^@r(7p7TqNYv3ntJ~zcBhFGGgzg%Eh(#F1=N3q9g8KCfjYCu0#=@maMQ3C~Kh|oOBxy zk-ig!DXUEt=US`uud=Jldzh#*fubd=*zZcHNfqbir#5-RRo(P2@KV-Ld2XrvkeXt6 z4Y{VC3kmLIFilRCS5wI+dY#O$i9~8YLXxfF+OzDT3KY?sHMM2s9r+o6-JlqyY}DFF zAN3wrk1FYNRXv{Jd#Y|$lvz1lTD^yOiLxl<7tyrsi>!1nw4^%AhER%nPs=x;igD%k zsbb+C^h`r&p$T_Vk2IY^LMo^DGZny577(Zge5!M2$x?N_*J)gGSKY^Z0@YURPG>0fvi0*E z_4!hcbUUD+s8?IQtA1#jV9g-UO<7RBGaVh;(m%FM9hV-jW!aAGP zXOA~R-?Hx{19bKo0CaTjm31=|cs!D#ik?8O2sp^TQVe@7{Qf7fn1#H2o=h(F%brfw z;VZnO{x|dSqWuCzuK-T!pC-+r`f~O5QPg@a@^yAff}Y}ZpsYbz-16yHOUalfXjhuv!Ki0b{%gB8IlMhXCjFxchjvswp(9MUjl^P@Xb_Byn0{0o)Aaly##nDC(B16QE|K`~)Pn zu}67q)PMaT)S)xCy5?L24}LtPTiCa{tuEyg^$s`;Cgl&7&Ju-4va5H-M(((pt55Qh z$Lj?5Q;$-MYjlPUvd$e`p~(=MKqAk7HC9;OTlf?^RyLseSKihH!)Y&qhi%o2*G0$>&&cl5M*9b@XbI(eiHfZeMo<0Imu5_EoOtyNQT@VR zFrr#}^_maDE$XSFu3hR}s~)WKYoAO>5@iLI#~hV!C&{Z5hE9X|?C0MU)LO$i-JlI0 z9v97!m5*^RnA!?o&Vl>pL5pj6s{HoK0%*am)VHV+I~@QANp4p~3aEZncA&ies@qpw zu1*RPihb(bp;~8k9j*oxR3|$sMFX9nN@<8R)~>D$2C`iqCi&!R0NFY~Nm^Z15Z@y~ zmmvpdBO7+IO34awq~b5?sHC|y%lY(sWbJh1hAKnU6LKn+*K)Y-3g1xf&3lX{XW-z>oAg}gHaoDN4;s$)-g_O88VWBHzd zcU6_Bjw6aDs=Jab=L*294bOR$*AOTt24zZj*W|A1VA>LzzXhsq4Fu&CR6o>gev?#F z=0aW8qVu`|{FLRV4l&A0mJg+V3YV+dt)>zjV4uo=QO~oV@#g)Cl$}QP&kRMBybtKg@2=jA%Xyx1?9TyBt{{Vdl#n2MlI!q%UHCL05ejE%p1W z0ZjL?HrcoT!p*W_bqbQDq6mmQO6p3w5}IENT+|Ey7~j|ioOEJRZ2daiu@TDBY(hyz z#h)Y_RO2p>OOXAzsY=@prG5<+Dq=bfTKpJIwj9`q@&tF;Ldu=exl%TO=F9F+;p=x` zHy6D92z|;LS5FOPgQ?50(oN zHJ-{rlfO{bPX>@yXAgBels1*MrkM-X&}*vPQWsgp=){%hQ`D>zM|t-qbzSo#1w3Fw zRIkXUDojO3>K-Xw;7{FE(`}ShBYQ&;Bze`s(2%AF`Y^AR`<*Q4J{Ck=fyBO-$ z#7Wsb<#^O-M%|#6r*yMylAnOTyos70q+9|;jZ`xh$9ti8u6)VLt5#27QLlWxnpmP* zH0e{>_J`QvQJx`xm*(y$)*E>a3y+{u;>MLBQ)Cw9HtvT);rrc|}RCK>2-r}LM( z<}ZQwHFHIAJoV94&u(=yzs$vO&L$J)tB+D^4NkTk94EXlgcT*!WHsm z$bwRrKV^|`X2s%T#jGR+&vN$|_ty#FAD*uK@a?SUXL6uE2kO70VHQCbsyY9eXDRFV z0M}@u)lb|{XAb!!WW%erOT8bI$)d_KWkMcgotm$5glFV(l>?2c9DMPYIylyV15|6J z{L{)nwi}ew2`s8BbYuSN2{zP+Pq`U4aGi2D4gPkr1I;UIh6H(#e7rl6cb)ltGIcDI zuuWR>eRUbD4ZJ0*+VWDi&=9nVa;t}dtG?)byw3a!C@x1jqY$Mhs0>oqcBl zXPsM*Bhyqz|2tSO&u>%i`bVtoDEHKfQN76a@eG{>R99Z!#Y4Fh>aMJe9d$=n_GTt` zmd9RE9reTLg1#KUza7ERNG!y6kn0Zs;pT9T`ob#rNiidJ>QsmFT&z#!^=ckQAHJ_{ z)Z%uXFjYOF2t{M`xoYQ|LIJJ$t0Grv7F=7NbPrg*3mz-)8mrtf^`Dd%MzyH-a;A8d zD5kpgDX&4bAO>}t%D+)Pg=Tvwe^0rM3xUT9cCJ3(oB2yJNtr}Zd?V_Es;ME%(F_aW z8g&9x?}id!_6>Br5lbkS-ANxPJ3&3JWqT^uLm8!i!!2(CV`b1wU#)^Nq@}j-zGerj zUTZgZ*#m4fB}o1D)agpy*7vZ6ZG7`bYz0M5HD&k{yfVs_+zeeP^II7M@^GH#p6ckM z3fv0{@wLwBK@_ z`jdw`>Q$!H4sb@r=wbdHk#Ro;wrsx*=Er{LH4KTLxp5svJzcmnkMTqxwA{MC$BElY~RkO zw^G%v4jT8+MdfZfB+NkjF9r*WqiO=oYVQ9fXW-Ak^BYt;-J0yoywL%v3OxA|$5V~g zmieB&c-C0z7w==H(or|4YoadbfoS!-e zI!lcm#uj6vvDSE(S;AML^iJ@U>HyR^a~D;KO_G1xKij|9zuTLcclCsv4TfK9BNhLG?ms5$eH@k8eSq$pU}F&+_cO4cOjm|J zyg!Oh?_zyznAQECWN$EZD?Jp3^IN-QLuSf0WFGA;;GjpcQPR!)=tfXNr({cJw0Gn? zEt0O}aeJq|$G&2x^X^SxcO==`BfyrXiH;>t;TdS_1MC{jTX>Tve*{%Xwil;7ShZ8u zoib;VRGsRSsM=G>e`SBkA1}XMJ#?J1JoDkm32@#8CbjPeR^qtb%!R+jxXXChxYKx^ zx#`~-0pmvJ0OvE#cbq>s7dl@8+q0Zop@}-K>MoCKmvfPGzH^fEPUo%8-p(%0Z0By{ zZR1hyQDArt3vQl)%Ql|4Mb?wco#sU^W^-jk%M~JD_rH(R3)Dea@O6W)MFH?d07{iQi zMjfMwm(w`rILW8BBgrKC19W3i9OMq<`Vchuh?E6s!bOS!&qE$8r!L}F@Ya+SR!E++ z|6*r9L%Cz^yLsJV4}qGi+J=4II%XZWu3JItw6&h;@PF`k$ckG&JB`m9*-h+j_7nCq z_7tG;k-gkrVQ;jz+57FQcF@jb_H-3`s-!XRx+zrlG%xjP(d^RS;D*)EjwZG1Zu3KiQaMd}1Vxr=7K3 zi(GDZEw|TgyZr7u-P_!)Jx_aH@pSgAb>HDGau>JYj+QVK4+xN25oz{P>JgdNZ(AsS|8Ktt{`pUZ9s%F); z?zfg&7pxdRd#$zBQLB#qHuTaTe(KL!A7D=|c=QcwgTJNw-z(^puj#VV+<3wGke@+D zbtCAgYIHPuLXElnzLTA;aYP&qj2po0k5I+)bYzb8qx3b6Ev+OzcLVE+W&>ZT23+s*&jrh zJ_iqXB5|rRy?i;)eA#}|{sM}ciFfNYrj=iGRDtW;I}^@XuFmeI?o3Z@kJppudB}6f zGs1h)Th~|Yo#c&p=6SyG{N~x|x!{@OY3+I2y~owbb=A4S`LlDfv$k`Ak!QS)<*Ck{ zn(FuklI?q>e9&5A&9UaOug|RetQyvz=6mJ`=5OY9^9--Q&HtH?ns=FZo6nf@&C}*? za})o%t-;n0mTh&kU$x(}f3h#yZRxw9JPw<_2qUPVeFX`#2D#-h>Vns*Mi^;{l-^LpDWGP$(iB&+W5$rYrKJkt8cu7P8b74HwL@Y?90}hP;q6e9dsKq zKQbRM`&`3S!H z*6L^-Fn5~g&4g*22h6b9$*OCAk&GCBxsQ3veEWR$eH*+JeIwIWXY9#J&uNy^HajmX zn9(C6ls++ieA;B+?Vb^?KF$bGxXC%tSfBjb`YF*P{#I;WtZ%$Hep}+ML~f!|;=Y8N zdw*|b*co=Ib%bZEus0>oIc8u93^Cq!e1(ik+QXnr^&sn)eAoU5>YjD2pG9yAx>%f8u2PRQ$L2x%eZA(!`4-8sEp- zN7+a1$DwU?T=P3#O&+r!v%^?2`Sx)8W_!O?$?9xAm$(#P#A|B&k@#Kl7vsLfti-dX z-+JC!Y<*z$vWl#ytcqZJIM$`IGiq{=w!Av z%zDupZQ15*Xe!@ynM2JF%^%^H$=2sduWNyCWk!wcqgfwhxYN%1Zcn=*YkS_V0=poV z*EK6U?QL%d?+))5-Y4A?9MQy*s5`nYHrxE3RXfZr(e~kGp;eJv6AAlCqmJ`KW3R(y zyyZOVKJWGUKle58KJRV>ta`aZuCDIiTy>qBlKrha5(DBL;_KsgCfX%N#dBguB2yym zqTj|EC90d%&36;^6K^Drn5A}g;}d7p`LgpJV-FU=7^5YY#0Spf#^3Y@yK27yZ%;)p z9J11^Ugj5xF^N@)x6Mzjr|o~S6eroA*v;skc+u$QvRxhBRorddTixqC2fdSg!+rC- zhdskR%RJwBTlzlr)$k=eU%Gp{p1~6FJAX7TIlfEwwCh_P%_WJ+i6;^r5?{t0@x`(8 zvG3x4CDxgf~leGsb=+ZSCE z{VqC#*C)~UqjRGzVwK}JC7QrrBdmSa!}e)=6q>cXlSfNVg!8d6Mj#89U_s7FwuZWw zLfc*JZ;*ZUt(&as)?4P=iIMS7V{KxSVvoh&P4q$fH8HcyzsyzkHs?M5mf5W;Y%i=* z_-9_{tWVM(&X|$&pTZ%PODnZ4crUYphZ&j4;f~tI`^i7z4+ry#pSku{$#>y*%&p0o z-7wKOGCPzN-DFL6pHAzWxjJJ*T33I2|FN`F85=U^WK8h)b5C}3w!gL-+b5vI_SWQh z@5l$iYJm=c`N2)$HPM^ng^96=V~JztU-m7=RM#>0-|ktiG0xS-Fy~d*Z=TKGq28+Q zETb~Pr6Gx4iRp>Y6Goy}d{XQ}ba%9E%oneksABFl@3elk2HAHfPdMf}Z*q6{wDdgg zp6m)Z-L6kuH@T;{eeUY6X3lqwb;kdkldv-`yB=~Ka_)6bcWrZj>?!vA;MwCo;5z8+ z>AYl2HCh<=JNhPb?Q78fn8bj@1Bs!DMv2PEBQvqpJZ`m4KIE8SxjEl(z_Hc7!{Il4BXpQL1NM2-BxHSAhGCNA4s+~ znmK=W?r~0Z9>NxU2Jg=bWbI2pcAGujHmw%c93=djM8(7h@xu6)Sk?H;@z3J##5cwp zCGJSdH}kA-g73qO-2AQ;msfl?e`MO$ zWY5?m5l`&>#EQh+XvM(pYn87)QM@7CGda#P&i8=lWn-Rw&K~NV<9j)CarS|%b{W6< zXZt$&N2Gn6_Ox%JtFL``>_n(VaCmT8XhLL7^s(5;*qP{W(Sy;=vEGSc)-TCT#?P+v z?){!uy&ZkI{-6BA{XcjM+?J!iJ;O{l2bc@Zx#sD_OYz;&?;=H!J0n(PZ}eR3WPEGl zdvm@OwcmC0a;|oL=l(HYH>YwR4mfpx2STVgEFwPLH{ zA16LA^O2^e*%FH@U~RIB%@-1X#b!slM*oWb8Cw^>oQRt9?P>DSgtOxf ztw-RQ>Ba%0Hk7!;^{x9B&mEo!9P_>VHqWizS>882P^i5pz9%v$+&t`vREwUE-Wi(~ zyD@%WV!heczL8URm*i?rPT$&}BwfZl=QY6+@?h>djt|I=Z+uQfl>B7Qd7FWNNv zdbD}0b$oE5jrkUq(M0n#a~AgAv(`=6wIAR~y2G)}5k~{k6*Jk{u4gU59;}WmX=AUm zBlb1>Q*4Pp%;H4H#4GWf_@nXX6W^L?Xy)@+)w|60iNUdHkw?Ntcy_o)Y_B!T-6v~8 z(ZcGdYyDQkS*a*zpRb$eobUClVfi@~24=tRi6usd+6L|kt%&{*{~`KpVC}VvS8}dT z312YY%xqj?TEV8g8?zRt{q3)wekH?`IXZ2u+mrk_aU#Ceq>HvO%Q#}!j9(3|E_vem zXV*8B%n7!LIqdJ9?|KKPZAjmoo|m4NcEs1s>vE4UcG-K(wq_%%Uh*DeB09>Ed^}Mh zx-v93xIcI`^mwFQY)Im|wZd_;>t;`5?>VgNC!EBb?d#aZQM0-|f&Pm4_>L49_mtOk-j4 zA^RhA&lK~`M60+r_8zj@i*EdMcI@_eBEBxM5}ak*PoYDev*VoJKD3^*8e2`Qq1K<)?^uIN9hH#UA38Xl z+U@N7I1js$pV?cjmFBucL1IF@T6{t5-dIkocdRb5GnD8Ebk-*xO=Kim#wWz4#M&jQ zB%kth%l@h2^%`H*o?D}F#e(cj-UjZf-nY}g%&M3<+;b!`B2cBcddb#czj#k8#~cyK zD%)8+zid|gG0(`HeT6qw++MIh_v@^QnX@v-W!6lu;HhPQ7HJl!UKR+9kDQNZT908N z+!5~(eLZ|2loNhAa!Y)OJ=!(HeFR<{(SjBdZG1wJ#PxIt@X1V?_<|W^yw z)-^saG0nOwIo(m$Si>1MJK4B9jqMrH0CvH+4US{#%r$EkaSyI4>`|ZjrPK`)G}$|vk6+wtpA!na2DtnpA##L z{So~m+Ay|1wjo~4j9bGUZr6LB*L`FBBm7_ap7r+dY`Wx4sL$i{4}I-p+cCSM5iv#pXb>lIbyzCVZTH z9rnF+Z~Y9v*@Wac`)|v#zOmm*PH^l+pEXWivUZp!uzdcT_&zZ^@oZvB;=RQBL|3!W zy1{-ZIn^=FC^ITM*Kwjbj~t3YtI0GqqIjnyUef3MqYMb z7=P(}afirR^vsv>#!+YVa^g!@qpTMT+f;e9%Ci-lRrogNxvYVikERE`3yuDXaiM2Q z3rc=1X;!u+kQqt~-yE@{cO|x4?;stXH6C!TaqaVX{g0=uN!#jw*Eh!dqo;wV$XyNX zb~1i%q;Ke8aB1j25hFG+{#l~H>X)qTO!M5}JL?NU$AwmgSuOD{ zXOduSe5^P+9XtJ>=mW8CoLE0M>)DMQOP#lPdicisulWD*5B9(5yVv`R`)%hvj_2$F z$gC??0nv`9@C;=;X3!;L71kd(_^SEZcq?z-a5Nt=^))qkIFrYGV0!r8_6U-AvBX`({x z(?~EpDAFX_Dt0jTPP}RS>)7bnhS=%&LNhCQ#;D^S>GAU4Bi`qHTm7!|w&@rAZN2w9 z587+ZvP3O&mD$8HtUl&_aO%y8VdhACkzQ?3(^ zL)MFly79fSM)6;fS9KEo&^Y>U3o>q^-hm*eNRUYvdH zrLsODGkBmZy|hhPhscEFRsYr88HJIe^M%6;)>TMU7+P>X|5Ell{!5N{tSB_LY;ozT z(xs)}mo6@Sv+Rjrm&l-azmwd@E+zrSHgWoOLW?I({9Wcd+MOcY*5($5t~E z{U>xh&@S*{V0>_Y=#NM;cG+y?c+NG%`>20G+O6s1((N>F+F0L>o;RH9lDAsJN#t)8 z8_3QogiC`Rf;qwWgA+o{BA>-xGG9z)J43D;Jm);Kz0do)_~-hc_s{c<_ty0kx(B&_ zbuKrEeA_RXTjDRo=0+PvZ;Bp@hGSC`Ut8ysH#u)}7kiT4mweCr!rqo%k7tc*v@;3y zkFYygwau!Ddhvd-WznaiXQEBx`DQn}iDSF5g6P!qSSr(vTE^3k|0EY#2NS*HLt{Il zH^f|668D+Qt(N#`F5x{I;+UG8Y%j5*=6G{v;!xZh|0vce_HVRuv_<66@YZ0jz%OMV z1|AGAH3xZr%{x$aXq}05(`tQHrF+rW1#c8~DZ0JFyBRLWia^)PE6yD|f5VmUic3pp zmbNJQrnpJji3q*Y{FSm>=h=DZ^Zv-+RB%_tzbb95xU#~%S-rfi97_|gMo)!|!0zHh zR~KF`y8O_Uw%5;<{u5dr8)IE^yx~6U^Jgr``Zwpjyx;QMRCp-goqI0hE$?N=4JId$ z@UT#|;AdsNQg_KiCEt}b3cnCvXg753b8qtA?_ZI&DgB3xewmMF{+n@Y`Yzx5ZjbS# z)i=I4ayj%~@P)wavQ}j;maPdChgL?vO?d5i@{p0|{;zkte{uTxj3+a@WPXzIZ2EP7 zd*3sjDXuxj_sN#lGx2vLfnbHe23WJ zyp=rN-1oW;8?PqcFu#e7j*JRFA08Ds8ofKdIdR&YVzoxPJx)|=Dspfu*8CsIfZfMF zW^J&_@aLA{1v_XiOMHg(Y#jeQo`yGZYUlD=agpT`ycU~aV>FfcU-q_if;)&608-tr|iYj z0VUqzDc3g@FDg47el+m}`@O}p#5X8CH)~RkKktsb!MR&382M>7EM-aP@V3 zV|Iwm2(J#_5;$Mlw)Ck|cfbyvj8(GJou7DOesAW-+1a@te++|||jrsGrls`*{w_4wb& z;FGb1u?J(@q6Z^a!rjA1LeoO`h8&^qf{TLn!*9mcB=7e>kbiI0TWdF|JG%Cm8gr^8 ztM#q1uKMvxOLI25x%&K^^{zks4YH=?9WESG zc}&&Es?M*xx9IT-mvhc$)bst7-{VY8_`&!N$xnpu~$*qw)C1+*UqV(UrPdO{t?)ZpsysUr8#OocdA1R(! z<_uqs)=6}+HYYDS$9jjRm8O@aZ@~)@@_yny;*E0}`rR|by~Iz9Q_2xmM#m<`+TR)pcVGX6jJny4bFJKWbAQO*mNCx%zI%xy%bFOwKm2#V zT^1-=Qu0yhq(GbSU9lHUV(`Xe&W^4}T?1S$*H=!bGu^nJ9B((ay5YMBCz_gV@U^5T zs>gd_q3nvT#P`u9_I2z;?1gx(#L~og^ES@oCx{N*XSR>ejPwju2uvs)ShBKsdhwsd zJxflMZVCP!a~n<4Cl<7?KC14U^^^5>)IC)P@-B+`q&R2lth3D1PB; z*2Valg{P06ZE^XPlDgsh;t8vhbFpt`*7f|`i>6lWS#ep>jYaPk78Vrc)z13Bzs6nL zIoz?-u4eu(dVg?J$=K@?u4P_(@@gx}P)=Ujd*w>;gP~LLrO7W{J$(PoD9U}LpkBo# z74I(ETVZ$Z|FSaDW1hQ>WoB+{UN|!}EikwAP;uh=$>J$x>ESzKx8mK*vbJJDMBS%- z-P5|HeTt4Kr1a&r^y0LK{nNeuJk#7qTyva1Iv%q-nX9m>8pp;)4@EXbKExaOqd7JC zfU~yeC*ReycQRhe+?Dxa=KUF8rv2k>;5u%fj?a#K7@5nE) zw&qm3wc~ap(>dJvFwyI&&Y8}yoWqoRd06Mlj(EP)6$z zALVc2yVKLhH57k=!|rcxkAKezeP3*E{JTV5JP`#}H~UQTf5sYTKQcnLxxDWB?&Gc+ zu6*Zd$N!Q&>@{Y)#Ln2SQ8V&Eqd; zrr3+D(dNZ?v)Jd6ufjis`USUP`}8QjaINRHv}@Vd?zr~s^$$z3gMY-@Iv(>4%^qBE zW91grAE|w<{?W#Zn?BfdccX=Mo~-&_!Asen`hPU)$9k8XztH^jkQ3h?w~zmIB5`W< zx!{#9!MWxr_c!THb9WXFs(Mqc1$FG&Q)+mt#0pFE8|FTkS;=?Uk&~zq=@U8?{5be) z;8NMAWe*4J;PCMN@Ezf$q0d9{(Dd*F;s1ti3dRFdL(8I9&6wkJx7R-{V`|Ri3a=L} zspzkmSvWrb{hU^rU-?cMV-mwd3rc2Q`{&A%%Udsvzcljl*{c^zCWbb~KDA889qu1} zU!>2?oSwBY>$R+gS;sQFW@copOnb}k^Y`$7<$uh7uWzeoi2H3~tG{vjZl0Wvk79cC zxyU==1);o9)zFbpr-&mqJ3igqWDhc0x-WY3(w<1)kUlcKXWF~I)}H5_W0PIYW6{q; z-<9Q+^t}GbwLh-?dHtD^n0oh)AWUA`);^c|6;8v)ecs?E&o9J zBx7^rw&Jyy8l0bhX2_`zPfR@V@~MjFFJGBg>WvhbInIc`R&G{NLFLPp7gWAj`GqPw zt1PWtv*=3hUl~>WS3I@d8;t&T@A!nsP^f-vBqQ>9=;y%NvR!2p1HT4#l)YN|RLPem zr%Ja5hJ`*37f1TUS6PRRe>^v*{gW{!`<}cp1#62wuDH8sQ$el#zB%hN2KcCOWd(K^IoJ=;sUp$$!cFn{S@w~VdYZC9zt8v^JkHl)m2gW~+w@KV-?zToH z=Q$P|)17a)K5&2G8Ry;XZR@?{uI6fJMC^HHwfJk1W})hVPf912%!Gp{7k4lDsr2^1 zxnQO6|oP4~rG|(~9GVy(~mwRYhjhr!so2u5R^JjzS z8lP@#G=95bVg2uF-&w73VTG(&?#|Yi;VmWIuQWVA`gGRG9Vfm$S?TmsXBS+Uc=h43 z=CNm!uelHVXJ^*PD=sXnJfYgOYM)jusDmfWLU6f zS&P!_(#JSUl1Z@Ho~sI-c2C zWP>(sYBRN&nyGc`)V6hN+qP{xwQbzGO&jg*SpVt&?9<-dD!Vh^yg2VU=iQSHY^Io(z2MmT8@WdCztlf}PAARzb?#Rn>EWLa zsa=rORV~WSPTgd)yDE76VQ0cOMIMUU8?`9%bNI%v%H9d$Fn$=*j2vsfG}dY9N+RmP zw(=(6Jy(<`YCe6Xal>2zw0Xb%1!!Up@TdORE5I;_v~z(y6Ky}V23j3}KvuKf0#kXe z579h;Qw)33Kmp%XRd=YsbXTO|zRMf_>WunOxcGA?vSJ~rqW=z)? zqdRG3la|k%V2m{4jeYtCt+m=+*(gWKZNQzUk0u$|K4RIEb zZ|LgWQQ^8c(X-K8!*}2J*0<7k+q=r+cV}{aXLK7QjO*MNNthoQr+qvHKdI) z?&8cQ+83PN#C`G?HH{vMdE5}H5LFD5lLuh@MbcI1I69urNnfSnfdxyXNuI&{{Xd{A z)2+v5PxFECO+TScP}eD$6+`|dAClL|ro2XJqV7|VtNqjxYOGpSZLAhnk3mitCqI;i zNgbrT(!x;okUw-I^iXP}4AUwaxzJH-55v-Q7L=Jm1_wpdh5U&NV{_<6T^Lb`)EaUB#@WcY(!Li*C=nWoBWz7)giGJ?KgF zPs|Au9n#)mjxY{sdzIo++hDoCsPs>1<%vd|7bmg3*&vg^w4{qtzo5lU#_1%i&c-Qin0i22 zuAEoiD<73himKdJziXEM%Gh9zvZmSFot;EHd6CQy-d!Ww$INA#Go$IH)LXJD*&7(( zRBM&-N?WZSRvyZqBqNkFv?I7GxI8!}7z&IDGz`oOgayk6GXxU@L@+kgR(_|x))$(J zRUSypL!S5C^?mp6idYs^Gx~Jq&shp)t&^op441hxs)!L$Z1^cpoGT3skTFEMIbJIw z&j`&89uLefZFM!R(=4p}tZ@WsDYK?y__ne*eUFiebvK|FW&YvtT%x zi|74(B`yp5n2BW5*r8l=&SdYg?YKI8Jwd{2kC0hd$!pwowmMUVg14Vt+pMF1P{vD@ zg2U7Mrb($LaQy~psc9JlSA&P8i^^;5qaoVZkVaKtVz{S#ch`FHi96a`)z>V{Fzea(pmCSQYoP6z16^b)!%y%kgML}I9u%f4y;WAOS>b(L~P)}>ie zHR({ONa$#=OK@cHZg5&?fpkcYQAcWdjW$*^(SzQ^O>t%M4EG)O|A~;ImS!v)T`T%& zMn2=KsN9kH!e{$rcVE|Ct}fG!1h>uj0!-qpvPL$S@u0i9 z_q1=CzhlIi$SzS6qk2TWjFcnJhu;b7=DjEm6q<1jm@L$2M==BXZta_zRV}K-%B!VA z(kywtQeK^^eo`N49SpxU!|n@ZL|dW}=G@Kb*{*VGL@V~E+x7-J?saVbWOLK zfbrEHbK^ZWlqkeTat3vSZqHg=L%|lRx?){*Ts>WFTpxv-d{M3)vl=lVzY}YP8DUy` zB}^_YjSES^vB7b8no3H0Q_HIyLB z$`Xw>lFa%}035P>d|6jW+~=9)tK#1pUMr${L~{6&@C@Ni{n^8scw4zQxJV#%kk`aoKocB%_1OVGXt!VhVYK@-cnbJDgXrghj5Gu1~Hjt_iM-!V$g) z_l{{neCsrgJtSgXz$iq|@ z+Rr31Wq{oj;pTC-xS3oJE|Po74rX((_n25_6nfLs;I`Y~9!J?(t>)%#qnNQnkH@rO zzji`QysI>EMWEiwr*MjEDN2}b>PYKz(N|S3ECF5n_3TC zgO+L=bucCsN42Z^d1HlH&-#k%B0KZR{PZg35lip|gbA(`(QzO3l<@BMR`jj)&GVi2 zmh>cuw-G^?b0yjF^dR!Mv%$8l+15uB{j;$EoV7vVR~|D@;N2wROxJ@_Lj>DwIyld< zU`B4iXXyh@%NAs%QdU`WAWYET>x!PuC@Wb?zV%y)=AA6Mel_UDMR8+9ZwBZ{b?cG*4O|Y`DFX zV3X79@TtCVL-_;zasFSvF8_y{${k~mFuUkJR5M`k3!Eq5GyZ4o2k$iz*X|~+$aBlE zYJoF65UkZ9c0;?2t$|J05cNnmOjqU_^^HhF)=%kU(9!?YZsQ(r(WYn#S^?bulj>%5 zhPoF&pH&~KNopU>tBaUazA&F#VVH}SqTbNu*go76{-^M-_}P8UQ`p%XNrBvGR=vm$J~MWT{mMfcuQ-|9#%a&4vfdSIM<))<4kk*7Mqnz z;6`wbxhLS?wgHbKnaV(U$PZ9almahp7b@H$mIsJ-Q|qi%1YGg4_6&Qwonl9V9iAP^ zf?m+~{M~Ll7*BWz%-2p}bH5~Q@+Y3L3FKOGJ=q_;=$hce&cyF5BZrWsFfTlUsbdRp zHAjHAIRV#qHoiw+^D=l&bM%0AT{{b;YPGgPJEl>3O?{Id(({AwwAuJ*kmhe=jZw+C zsdv_YXic?!Dz83NvZx=_Pue3)fj*h5>`lZsDgx2*fv`|)>#@8Y!dm-3`|E@k4o~z8 z{;$5K-X88TuFd=%HWO0^NYpvIhE)tzak9QZAE?jQpX#NJ9Y$&M6PSB#K+2yFzW*mE zmUbXs*!C*hYxf1KI6vO+ddx+;7=w)SMpH8rp2BxPD@=l;JhY3+z{>1St^`oo_532f z4Sx-%SO(7W(+Q+gkD zlI(>Dwa%(!KG7R$FO`Gxd#Q~SCDoS(Nav&)vLOeRr|L{i)LZG*^_=<}ZMv3K+oaZ3 zGpU_aQOkrGM|blZW~iIVbb2IrPk1MO_ej1Aet*Q1h?vNhk&Pn%MC6aS=V!xyd)AA! z1uqw*Q%J?hV=psX8NI+k z(Di=$X^q$FsSA~Ja*C8oiVJNDmI*RJEszO#c&ktusiAyPIiP(uGT4QQ43wLx!Oama zi}O94uXQ_;!ZU?;_ID1;>pSJC>E7@13H7)tObH4*>g{#r zRq&xsDcfXG-Xfg=I#Wo#Di2dys9tT2mJQs%m%6UsL*8?vbDFP>)Glio^yPX*BgOb? zmb0fgJBTIZztjl26tj%k4n;*{=qfrhLz&k2YBckhX@+yVjH}GQ;F}6th2z3>p}4RO zbA`6-Bf32mL0)&p+fBgB%4^m#mzlbmU@`U)yA?Eoy62oC5$%uQZ1x*S3jeBZmSeg3Mw&5ZpD^& z%O&MqQgP{6r~&%9-N8e_x}g%%b~&3`TyJYG24k=obpV{hoLoD;hfvD3&s9}CDpo)p zGt)EHyUcej?3ce-M5oBusAW-oQKeOkoE0%Eobi|OmGxv19exe_i|S95wTGE+^`V-j zOp?ZaEyB#^d z9P38kGKu*}SD;T&9jSa!1AO~G|Gcns0O#~OcCH zn*u568`BfhhX-y4dr4cB`uZXBv<(LqvNiRVzRNz~pSZTWX>TrHB&J(U{j&dL_@s!q z$g7cQk*gzjMsy7C<^K>i5bK`X`MQ81bzZFGYQvXeGt&dfT0|3PIC}UZ=6U^JZJPQ_ z$*#1Q=Sz1&r9u;f_XAY}Ptrr_I|BEE;fO)Gl_QErjaOHx;o5So9oWt#%!O}K%n!A@o4*aKkv z&O!7nNWY>c09$ARoxmDrGcsvEtFhI|>SO(5`K*;@nlTj-e7JGX$Y@qJ<1jT5&DX{$ z#Q(QOU0laUK#~80W~7Um8?2~@#&KhdQQkPDM}akVS19QecQrX`dj;J_`CUA`Tz6( z34a{XG%_V(P57IzJH9NwsosL#1W!r#71t=CEg#K|XI?_{5=J&B;+&;cO)%@%$it=I z=%K!cDoV4Z4N_Yv4c)R8{4bb46fISkljXeN*#>2wvQim>YB*g_Fuz-k&t^ zy`iw?5gj5=M^yJao+;v3S2kCIE1S4loGi|C?S+}y9dd)+**s!Y08e=%B7F*SS6_8E zzSAnX8K%YCywi&VQQ2YdaJpj>odtY+6Vs3B zR1c;#_krIa92VLMfB2RBEdCImSNJIGaODtRx^4(jd;?ZwrqIpkigYIW2b3gLsgY!5 z;e$uWlaOvhE-bKeGN{s&d-)LEE+gs@;%X*zRTRMn+qWy0!*T z*gRS%^^me%IitKqA2m?zk1pmQ`1zZl3_5CkH52t4D?E?}+tKC*Z5J4{ zH>K(5+y}|)mHFxg)X)`GLM??FdY9Hy_Zm8Ij-Pf$Vl}jK%b+3KO5`MegGc&?4r7~f zUHE-`XMQ`Xf*{)ynahW1ct`MsTJVjzvTQM?3i^gWR3TuE*Qxbn9MQ?&s!dIaSDv)q&DZY*l{uC;_M#gWj_OI$gG5bzeGpZ$( zdU97OhcpIg#RB=0JX(pxXANMMJk@HAJ~rB1V_&D7D_%IyALBm@N5oFv zto}3MJ;V91PVNW%XEr;xkWX+e5Z8$PT~+y+%y{yhUCjDordzX}!BF3JwQXalwoI7~ z=EN=eu(Dr$tGw2N+lXGJtJY7SVr(%JEgD;tQ;2ZrWoAS5lndH~-SlH7#0Iz< zd@G?8ke5}Qo2$a*;!6tY!b_oqP?X=pexTjdL}CM!o{w>Dh7%`&6)E6cT-94*h7m-+ z`V|w6s;KLGX_ER_eWn)FwredA3XnttB@CgO2qg~()og13?DF}IMr$M<*j6(72u^Bz_uvpe(6vzXCL#;kI-+1IRP ztkI4s|Hzl6CsH~2fxK1e0|st!eXAZs_af^_dN-qxImDuzr^IDyEOU`9%N64`vKsS; zxy0t@PYHf8Nvz>sjJetg@W|S-nbg!;KfZOMj%E z!X)!QX_J&$rj-a>*<`IHG$QM8)>`5WC7F?^qid5iJ(T%}y~Jv)iyO$LbN%>4V0EVi zUAQNR!eOojn-wvr)&KM59w=b85G|cI)?9Ncc!y(AOK#Cm>uvOv=nI}I)0LjUHy$dy znos?uR8acj*_|qNM@$S!adI^|R(>U|mi9_x1N9zMS#QW|&O&>JV^ej6y54Hx zzamOUi2kPDM(&ZKDH7h=eiAj>*NFCE^Tlv3Bb|#{3S~|OXnq+wK*ZYBjAiNx*#dsH zLRq8o+5;dOKa@ktzsd+@j&fI-t>y1cV-s*9u0#97G0AK*;gZm_`jYBPBXd&0}B`4>K!(FO<#NX3Wfd(~$;2`qXHv6I|RO`+=m&H0C$$0rJ}T-(Kp?k(xonL70j(^y`uI#993sl<|piXE522HflR{y50$K?CE(pu?{d-76EH2OLtsb82e%p>YU z@7#`f%YJoV@JB_Kj4T{3_)^58g3YD!8%4%<(tp$c)mzmynAuBQv!B=}(QjrXtCQ!6 zHV!&*Jyy-53|Ee-mKF&F;X z+g(@Gg#G+vt{dBl8AA`DN|9@vcUE?DvYtuHsV-33DHD|`N=N0h+*E!dotDbrIbqZo ztsIz^m7rW5VBN4{pmlERlptOZjiJ0<03FFaxX{?P$L6i4#%=wk)?7`Hr-Vv@!Igx% zby6TNc=?3U$X@F-u&)`#5I@0dYJ3cTX!}->1o;r zbX5N;Q8m!^x0#l`ml%s&R+x++Pm|f{jOySqNldk zmymk>VSmTHr=&~1)*Wr_cA@`rqm^3v@^$^Y%Mb<8sqgE+6JYw zv@;Ns7D|r(^D=2qQrz#6f3l=fL0YYA$wWBynAi;!^E4unTi|<=;XzC&Yt3vQve=ol zs4f16-XHFY-s1lC5o;p$hAH9~b^%$#?q)nycVoWUUV0_%k_LyG2VSJjNL!meCm1jN zD`&uU38>-5R4Xs>kZQxO4idcGL%F!yX%E&e;^NAPp8 zN|>+o37VizkV{F$Lk)xF1E10krAjG1Q{SY`2zHX6s_XPK#sKq&Il^iHtx|Pr9h(D?>%|l!^ArtA!1Laf!RyvZkV@5-cPy8W$T)8r!UCAr)yG6C?C{5YwVZ$N@YVR zn7%nRCOPbP`=nb*`Tmqi{*?SErFeR6slSfBrgT59Jx>bPgeR^E?rq)*{>qVcGfv7> zJ9EuU12YT`@8#X*n!pK6dFnLLz}ai9FkY%_B`&Zi<UH#qyxWeCNrsD(-H%# zL;dBUDr4lekCOS=GC~jWn0uiozgP5?4qNX}3-?FPKqqDTuX!_y4LO!xMg;7))=%@C zF-~W+f572-AAA;w4^$6?0-3-r{wNPrOX)SuG4=uC4i(2#VUM%X+!d(rdJ4^5UBqVY z8}3!^rlQHeV^)wi?KkF~|Ep0OLKV7DAEg~uE=t8P8?78#D|J*#Y0vc^#w2sOIlvrf zu0#){V_r55x|j2&VSLtSYP`zH$HBMTnEEj#L)y$>1+6}jg`eR$@ALZddM>%n3(sA) zr%rh13`v=u#5{?qo2gHv?kT`wueuqb$7y%fjZn9}QsR{E;O<6*zM!LC8kik?ge5Lx zlvUbw^AJ&j?dePV1eTA$4Zzp|rIDL7J`nr+qVqTg&YcK(9wrec2MOvfi!!;}Hpw!z0G{ z-*|cVC}A}>kX^xCqlbavDOnYbA?iKpO0Z4f9N5d3fG^An)|YB23$*O!K0BK1N6%%W z`22z)ymj3a&$(xNC~vrTzbD!KTx{j4#2d^`Y8~;(jKA(fHrPy{CXv0!FU zz!9^e8{CJUB*yGun?!l=%F@`JT!2krr!h0=gP5Q(U>g0g_dv-sP9LOZlivpSrguv{ z{O9&B?#Jt|$zO7RyY=&KN@pdJSmjz3mObKZ_~NiOK;pOvf5vN>!?F#?@jU08od0Bt ziSFjx&K`mKv9)#*)7Ys}x!|Vs&1pMR^QFxDv-@}G&)BrAlA-3n8(k>kt0z%d#eHNCuvOWL%zkPsk=g#L%W_&^Zfe=&f`9J(4*kyg z=h&Yw$*#1R;AdIUlkA>U3w8^f6NZRxcMbP2tecAQp7I%C+x%)+9^WkYLP2D!lFhBf z+ElrD$O3i+laTyJ0(e;KEW*H)(eb#Hu&EI!h_-U!cT^c z@V*me{tMHJDn^8C5vs3PbCdpB&7iE8wujZ#<4e?}*b{CWNRjBiO_ zt9_sGtA1*lVv?;qVNs(p9myP@$;kLLBRGng7i8hHoz51X{ZckhmO2>@c=FOAJ-fU+ zG#gk|p5TMD=P9d_ndH)c_W%Cy+f3F1SGB=}%#7jAuv?hQ>})=_JLszuVMR5Ho}RgN zOtnlAk1hKKo|d&;=txTVZkdJ&b4?1;?T*(_)r&>kx5q#eOVfmi9Vfe(S5 z!K0zRazX96=_gCF*M-~eiN16GIuR!#j-d+Zgh@kg5iC?;GxTBE=@H-nZ?+6QQ8^Y$ z2vkU~m`(+%1n-6tK|^*NbB7)XY%l{oojOi#Bj!7! zZJ#yHn5ZpLG&~olLg}IUQWq&ZnA7KieFK5C5~&Hv^M1EXdj0FuFVF8+$s+@kwc1ox z&)}&1F*~!?$oeGaWadtpYh*r@X-@Qm41XeJj2hW85l}*eT>S+CSpLLL^%Z%XmaE*W_FQs2F=eRMLIbL(^cMWh| z6WsiDdaBdTsI1tvAwosD;!7HCa8btK$#X-rsNm2N(}03RzW@$;t%Os{+2- zspM3)%NfDfY#d6&9etLnrfg0Lq%=*t5IC%yv8(ZS!pdjN6>}lxOy+5sPDHni9-r}k zRAj_)-$6I+s>6Ay4^SKS50(uq3s#dmtGCqS%3JAsC|3F@chgLxv9p)HDwOdxjHnYe zGm;2z;rqw)#ht;^)wAC{NNgvZU{6y$ob!gD-VYrQ1k>99t6mVQh8a&MbY#Qivg&Bv zFl#zp$%bH6>|u|xMc8;|5Z#Wh$N0E#VYw?pEaZyfBbj@|B5RReUOg)}mOtZ;wbUBu z=kR>)x15qk1zhS(*FE4;JnVd0jJHCVJw+;K0n`X45T+<?gItnGUVyH)*Go zD)&-zYcs$fq2%Av8l|#6#@bA5rq6ROk@hwS%j91hmh8#YzheUC?Nikq;CKwTTBEm`0I!IjRDdqS4(1B+XZUE~ z*JWK|83dlg-q$OKN^K1?u{hYC|okb;BfQihe;KZp1 zu|WKWJ7a*G%*XW00&4mj%;;2j65+{<{$b8~<3$bDrcRzPP z%pg7y{`Qx>d7G~;yyZFW8FkX`qigb0OcE}IzDf0zkLnD)usH*MIse$ptik3Ubn<7- z_BKi6Ah(l;$p>UM>J9aPKFoAy&$A(RJ^PGlMK>p}+p5uDTcF&RE21AF^i{@FbG`Kv z4m*+dS}0s6gNbBO8JP-fW3CzR@O6d4!e@RHTs0~IYy6izLboGz`#WZw1N8R54a3bw z))4S{a=?wO2ek7g;H+B_xPNUpcmCZO$6_|0?BvITch)%n#{T0qEb@dS%shMg^ zOuKsl75iz71m?I1d|*lcqSx z+2-FKu{xq@con}KHrD^vf6s4)weZ#OB)P8hec27v3+JA>Ucam@(chT^ow>mER*H%$g}Fs-b84DCElfEi?^G6Q z*NiVvY9+v_?j2mQzCn4M-MR&2f4DK)+=p869UP^0*{|(s&QT&Kg-A`Wqd!yc$-_jv z^T3KX^BCKK>}Q09tLDrmt5P%-MLvLM+jhGUI>kxkMd~(vg&EBz1KZ@el`IW@`fzYW zvs0bnnKK583f+t}-x~eQsTS*Cvn%-q?iF!VC#nUNgStd^Co991V>Gx22gnBGIHJ4L z+U{oUHLIhqdTvZM@&fbS4fXC9?KE^q6)GRd2nN#4 zv;t|I|Z>5dJK@u79+zvv(M#jH|=C_)mw;@%`t?>Tc$m$i>sm zoW906^_@~*i!_Hi)qoN8pvyyRm=_K`Vep&U48E5IZT)9_&Sh*rF2udz?sCnz(p(*` zGG^Ot=;p*N^MU38yI~TlliSu{rv;|lfBh3{6CS7#OPGSOKtHQjF{0p&wbJ@z$yPR8 z`O?HiRB3U{NNCN+Q8s}Mjc|ZFWlglEV8T+`xkU`47J_ZKic&EzNr%(ITC2Cc&5?s7n4&QB*^6FV6WZ zIG(q8Co!Ua%=ud{g1*I2rf^ER3 z^Vx+%{9`UJ*MXhN1gRCo4y%KnrfkBj?33y@BH=Uh)V_yPoCKGG{P2|c1~;`QW>u?% zJ;9lT3C}S+)1&a2Cpa~UN~BJ9q++QNyzoUjzU%-t!=p@){tX^<#Yy=)hUMI|1YUi_`fW`I9 z`eLQQLF7Gr&j#5sh`5J{K5&+B(Ycw1Y&eC#D`4}ePv*icZJ=`>%4-%=G(RS0E$pdaJuHTw>{akXGQnl8 zj@8*}gxR|rtj|qA409{x#ghB4JA?OqTPdUQAPOfwXlEKt%yfd zEL(stC_Lb2bDNoZ^c^s`A2GYQ&O$GjUyKnOy7u$?nTJG2vzls1Y0^98oL<-Z24}-H z_7zJ2gJ};q1;0!ht}A8jnwScgCSQ{gR986eE}{p5vp$o_Ll-B5b{4Cg*%|q{jxz%u zu5}3BIbp3a?-|XFvf%JkF&97q{h83n%G4I{A$Op*Xij`3Q|MH-3G(JQJ|6GzC7X|} zkGgX%b%!iR?t};SY-b;4W0|4PEd&g9Hs+Dx&PnJh&cUfO8~Cm#z#r{{sFxAWiDk(} z1U&NXT~<0!@U?I*t7ARDyRL^n{RG&3FI|A9tFU}7lmI6E@N|Fre`&AN!qOL|4-9k) zR+eJb9;TP-#m{vco)zvIu0LE7*BD5K7=9U@+F5@iUwzL7*GFzF^}?#8w?HmFYb>?f zgK0I+iASES8q+tx>3>5Vrk6A4+0MKq48oeisp39iEPIapWxdynYMeG) zpJQIJ8-wSTjW`6<+kqQgNo|JqL%(M}vbzwea2qU0oPnEbCR8Z}s23DPpQo}?W#DM3 z!uK;0Q_lM2D6qj>JGHDb#!YRWwnHBZZre=a9{hZYP_vNtp>N`5oW;;>|Su5 zZieqU4sj<7JO^4M;^hH;G8(UVt2AO?7VEXy+uUb-)AK@GpQ@Gwqht*D(%VC1Xlt-~ zFncg5up=-j@K2ygATl^fTA-D6y0Hbsa#-QeK%lq<>~nsc`;2dd|0MYH_rfB=vU-QX z->9b33(P9TE8ntsys9q(Qr)?z(pGXR?}pA7ZBFIU^F%$7TFKYUWN^Rsbgk# z>XIGld@LBi+*nGU5 z@oc<>Z)ho;tgmn+?11Q99)7)XV5OD8eW_?|2Y1JVKIgu1*jNE~$Y%O}F!BbeGn6}W zKG`c*mwUrirJFoQnh|Oe>>c0(odc1fc;%5%hK%7(2&;u$Tq2ybmoUwR?4DcTPH*xb z^Y05w@NRUaFtx~!&SY{H3D-+N3rv|~%bqiS* z^RqB8o#)V{=uSBM#lS%ug?D+`?nNvl3y=q$EOrlMy9V%gZI4c2FZ_mSdzMwxwDi6D zR7_c)+ppo1oDmL|x!{xjcV`wh>X5H+x3AINsiH(C`#LxZFU>R7E_)F?O;q9$_CU9@ zufm;agOOmawZ7XE34vNb--9E_Ft$6}gWbj6VfUj($-xYSKXeYd1np46s7FBkn!;PB zq;mnD&IRFBS^}QqbKvsc6dpzO9TuQ5&&$De#|`>~*FXoK8;JEiJx zoQ6#I-MWYK)c~HbruhI-{e#&bOoi*6J}18;rZGCPUD%t zRG5IM@Doq^ZT!7BtEOEI`+knWPcj!=O-o`PJde)H7|c=j3EPZq&$Ooxp~fzXT1O-I z!|^r&nfNz+w42+z?f&QpI>M7V8P3+-h{edsrd=8C$<5)*P!)A#E8;8E>F162sQud- z0o>bmrU~}vIHS681w4TyOp|*m)s?Q8XD(DuD3g>f_&-M3hl%n%sh>1ZDgnmLdF=&o z%_YdW)xi?|<*cFJvO9%mqVBHe8R5Q!8P#G`(KPtcr=N2Rj9cag2j`Upd1a^eWJM! zzAQnz9pMGjbfe=~lfjosfY#x%dB?hDw}3kEUwfi88Y+a1<_+r~XD=~_+zV&mILy-f z+KG5J8rVafcf@OWMKa_Dr>Z?0yz;j8IA;ZF_O(Q7?|3 zLfr$`Y8TwV%Ym2i9$Jx+WJzEr`-w~NJgY-?hVyg<_^5A(Zlng-SsrRQc@cN@l0D68 zk6GYpqZ#Vf7vKQJTQlG#ncJ8Rp6@}e*x%_Ou=Lj8uJ^)^ZrTOZ>E-lfcpg5&q&the zSsElQl_H=XwUoiyNxd_8I4$8DbX-z1OQ zdk*vVPvS3EbJr)~wonLp>0j4N;W0m#OJKJ$<>39-5?8}(?{e!AOhelgPKUxv4 zm!GXSVB_2C%rS6dIJz6W8uxy*v<%7tt8+? zFRbO(C}?K>Sd(zQTf(PzHg;-kaY{MSaL~_1JSAq3BK+kQxKd;T=dw8}?t0Mc|A11j z0DS-zA#^@eeq@IGG5ARRqH@x;=)rVvIv0J3Dh|GIXE@F_ zft#z14yh=f@sgdWjc`634gJV{ZZh9h z80uOn-gYN@&Un9ghkAE-j=0_K6tR^1oI8)FnTPYlx<9yz2sfb;oW{;zr?XwzZ%h#G zr!7hB+khtH1){bWb$4?+FLVRBt$x5BW*NJTFmU%8SzB<`$H1ZXG?)jJdCFL1oJG&s z2HdnvV8Hi66}Z-@W#&XB(H831beC@z-d*3-ZKi3`WwDJ z1!vy>sKRtPx(?kBET1@91V^hiYWt<|gKvow-Gf|1YGh|bp;S14o`gPe9Mz0!fXI^v zp6{zs6P71mfa#qXEbW=-!=iEC8`@{Ag4R-#fx_fB++N0@>+G%1(}x4Y6!oi^Rrk=Q zYD>XFAEH%&*P0u%>p{5kHIzNTJnPE6pulep?fn4xrrchs3zy4|`h2KZ4_e*OwPCj@ zag|(7k6`a|*ZI!ETcL&P45E5&_b2ypPgCzP?`iL9Z(Z*@&p1z65AC_{ZsFbq72FL$ z0&kt;7ji#Y6C72N{zj%^x7bwpZFhFMITAKcY_MLTbK}jLm>J5r<^#Y(>0>#VA6^FI z?-$teONzy4%ug%W5jqUFoNVa@&kF7 zd4>+Ua_0 zy@|dETy#m#ZDcpT>TQ9zRM7UQt>I2MOTD9hg%0k7Izp|X#(?ejT3N5GQm!eiT16cL z_DdN}(nO;J*e0C444W$EIgN>8`fLrja%W>Q(I3HZ`T$I{8FpRtbaJC-ziKIF zthpcl&5ObO<;*W|9bW>>jDlC-IQ1T8L<_Yft*G7uufAYl{ilzCaw#hmL$SzzU%}G2 z=ClUp+7TJ$AbA|ld2;PHQNdTIhrEch2gU!63V(zt%tCML~P>&iH7$?cC6HB?51IOfI2@!ljO6 z74|l_jBg@*1^d30YaMtZ-NY|q8Fy~CC0-Kuh{wdOa9(K)BzK>yuIrc3Q+R}aP~#H0 z0i1`M!JcDIK=*o-bd$@lPv9(YrdIGN++cpieEpZcU!SA*)SK(EP+CpXifZ8+yas?M z)qw&oN$26_l*=e$G=vT(i+R|rjw|6u2B~j%#ywdI)qYKE1+bj5II&-d667lK2icQ) zN@WK>tRvWN1rc>9+5&&V08aFc@G^f}5}3x(`SV*(uCib8?+L*I<(Q&+F2=f_oF zr**;kFRKlMR`Q1SN3*qxxFfIh?5HCS8K2-$-`l(h1<-QT&~1Q0OLlGS<$LGU!Zs0s zYz4OIKyZ`FLqS!IY>GNzJhIVSJmZ_mGUR7$m^nwhLf_t0I$E;xHK{+17B+`g&bOX9J-4|M;v-CEcJA&SgxZ8m$ z3taUPGGQDkVt3Lj^yHOM!z@Qle+n_oK;=3WIR5qWjFl3a`_#Bt8 zZRQnpYM1c(ioFBBv5_Tc=R~C*htGEry(f*`Kn>wR|97`e0+c(05ZU8_E5>6!)EjQ~ zW3Z3uBu?G~)I8hJr%uQAovpw_FJY5T1=Kp{uxDsEI=yYEH?KioauZuh;+&U=tPZe+ zop?8!tgX<`rsJgpmwN?tVIi0?!@xqXZdHQDEjO6wZSnsrthZKPbZmRzw09mj_TTLY zAML`}E|TID0K0WDzT;f{JP+=CJE3{|0nJ-HIT82Z4621OsD10>?li%Z+K=pjT@e9d z1DL@1;I#jDPx#+0#Ubq78V7CX8)VBtc7JT1sf%pMV6(wjeD9}tci$}*{_Q=msb)R6 z+_&MF9Er^-3vm)k;2j)<%H$`$YZGwQwjt6-Qe~*#n5dn>3HG2anM=RIovq501md|4 z?Ae*9;fLS^_n=1s|LjJW#)-c{?Vy$+r;est;Us58HUEfQ2TpT4+~GXfZV`<*e-77q z5O!P@!F&I^9egxUfr^+WJp;qMFHopT=p;P0fh$U5e^NBGJ`aE;Cs-|^_ldJw;j3ZT zIMNS0N1Ea5_PElS5o5oY&%pQJgKGLIwr*6#?=Hf9`2r15lwBNH=Po!aXGLZ_25q^D z?RL@FZ#En~+*qaMa{t8pgI&o{Y2J_pvqIZfm`py zo$!-Su&3eg7UjR&h@ydGd$1i<#=bBf_)}^8%HO;32eEWMlo`FS;U@vRmHzI1DdY%F zsx9O7!M=+V@D_KV06zOoyPf?DsL3Q?W~;GfI~p8kCBxJ{eiv zOJ2u(H6Naivv?ZjV0+77<<$u|r~iQxcck4GU2+Zl7>-l61&V=qoI=5VY|R7ymc@#; z!mRwbmyN($YJ%r^DAfP|;+Z)J6vM&}3LY431Zv#oc5| zvmuocTYgTV{vLwA*A_p|K>nPC7}x|)Sr$a!}ZF*7Nvm7%h;N;9@~m) zJNckFz6H(MI^eXU@tpnL^;Ou`!K!?NZ9lg46EP|kRg;OYz9BAT1%_M%ah$=uyn{P? z6?e8MG_Tij4zfYHo&l49f3V%=J`}GdfRuH>uAWL@`TyO=oDX~9s$frC3B;q8$O?0a z5%_OBqFOWjdQNQNdklX2OKcnLge^Ze5xeFA$N&4M3&FEI2LJ7XtJDquVgvZ`&cL6b z$cl(81<-rsBa2{P&Q$UR`5yJ$YjQGG9Bl2@)H_6?8OX)gsSLM~>r&R2s1VfJp z7t6vXf}F@B+nnOaeDle|PFAQ*D_FCfx=`Mhc6@Mz)lh4`w%=pV$yo3Xf1-|fjT*8T zo`%X!V<0J?6!H2IIClp zFcZ&0Mf_U0vjv#OL16sPu#4d(&Ujz+H9v9oGa&}t#s(Q19Z$Md-9Br-x34104M&YS z3|k@A|9{`e2y7z1N+!`Y*fwlVCV)s)p5DPU;fnLs_?6rPsQM;)$$_TyX$h z_GKX19mql0brcDN>NKACmB@URp$1)zGwnyc{r9yHch9sl;Mo+>E#E}HTm@G$8aU_% zJZCErQ4T>vHXX6%Kg@O(0IQmA6@dEvH}s7zM57MqL}l}gNL##xgc1uhTuiClJ) zS-?&u8@%2Z=&GPRhpj0fAXW&;h~gZN(-TF17CifypB><>=)4m>Hz*zk1;h>G8iK^#1Q zD}NlUnrU_{Dve3VN$0W2uQoXP8?fsu0s9BvJDo7M{EY1~M^Np=1A8iif3F*I=_llp z9*FRT9FLP?yWs<%V*}j=+`%yW7;36M$oY%0J*Xr$RT0<^H3j;$VNe79gCD1`K_Fyy zFpHS7p#h7ph^-)^x!u^R&(|Wfnp%6*#_P2|VGS)?U zv$)Zxc?od}`kAk<;n1z4Zz*JK(xdgM+IzK~ngNQpF4_V;%q#$% zMG~AQ${U&Bai5*|K)yzn{|ZECZeu<8zk>A`oE2AN>X(W+!$N~IN*R}MO7mE2tOJOiv0zy( z!a0e9p9pUswES=rd1vm#dnJr1+7k5beW1NLt7eorN#5c`-t=o;&L6Lkahx#7(=960NAa|Nzd zHoGVvo90=c$qCljvwEz{!d}O~*=yl!z zqdyLxgEqu7>}MN94d^AF86iJEV50>3^W_t zL$DEW1>Si+q9vHf#~6iu&8^^L;VN(u8-fN4orP(_FJZAOr`Sbg#PY5j!Z$9NEzcff zy23NGC*6d4O4M?$1AX*hBS=YO0~jTfwZCk=Gs-==CAbXXP^s2Xhhc95@z;%Co2-`t z)AJlqdCjI!Nj*ll6oqNU7ED$eGcDN=m(EXvVyPD^G1Z`lE6vvy1__<`?Ce@=I#!3)W?0Gp0cu^{Pfs=)gakgRu!_jFA<4sQhMm@JKh<`%y>RR!-!x^FT)` z0MF}H}Ca0=EUW}7moNf&qN~SZj{kh}(abY%mp%#g0Vh~yHgR8tNB$RfI zcAaw7b9EJ>`6Fy^_)lIUH)4LMB6kn42AHwNA8n_)O%cFbn#vOi*ha9&BH$>K&nXT?`f>2k)-!LA;f^x5n7d2~Fmo3|LHrsn6!r1F z>Z9siMz?0Fv!k)Egr&d0+2oW}40*p2xH=E;JgQ(Nlz@7CxpCAyZT*j%#bG z9Vbni+O}=mw%vbiq_(Zpwym#LnmToAH(2lN%=b&5JWbnRcV_Or2k&{$dpc_4ndaIM zK1qL6Tw=)c&T)c5@ru(y60TA~IR>&)Rd`Bn+ZTS#6lT3P)H?gGsck`HWpy^M)BzR$ zj`WkhL@({A!{lg(_V0+)?5W|YInjw&mRj0jaqseVMa!+Vkm5h5j@D``T?2N9?}&UE zaV$8_+R>6VV0h?*$oS}(=);j}*x2A8cuTcyooq8~Tj}*|WDYd!5ua%n)JDEXp1ky# zg*evOUG~?o#gZo`$}@+6ys3d2hUl|GCf1 zy}`zdrVEzmw$Xttf{F#T4*X-AZ(WBrSVhZV%VJ9>%Otbk=ul2ci-fCMBY%68OKzhL zw7{Fr)6~_|k<-2$g|K(2YwQypwVflJ<(!8d6CKYT23I;ysPD6y3qDlZTu9V~z?cH1 z5%PDXl2J8^risRRh7{#3?8?%{By>0`n`#?RQ`4M8wepzsRvJV^ve!x-8G0`4mADsFnWnIdO z)I`T=_hfc^VQfQly&D)&4)K`rYhdGu`cc=?)eG$x(A!!npnYhzsFxY4WvrCGQ~0W& zfi~SDfoCqV4MmS3&X^>{X{~)W&r{gnb(}36bL={b(3Mi>rc_Vf`gho$d4EbJ1*G%*{GX z4+gs#W@=B)M_cOk9jU?(M?+wvdR#T)gPMW>ob+cU&$+LKfduVgT4Fn~5fy?0)XjcL z7r~#Zf+5D!ubo2)QGUytWUrJNhTufH&>u4w_8ZtpdVg-}rnR`2O3cX1 z(cfBkM)wuhc=TUpx;~*hFbyvJK6j#Ph;y60a%zs$r}mq!IzEU0yPlWHsBd6av?GU` zESI$XFMNG;WQO2$H3Dato*B>BW`>(HEXq_W!{dmHK^{w>Nk-#pf$120_2#B$vP*mH z@xs>cW6$R3=SZ->OD&ojn3_5DK+2rt>`BA^Zb%xJ8t!V~%>{>U3tiUV=nK27<&@?c zdN7x(uXP78KMEdbLQb^}4jdUYGH7AoXIr#&mN~{`H(oM2jDL-ljq8*yQW>Em^Rzzt zGWwc&r+eav_&>RNxfa3PIAYIhZ(@Igw*Cd@c4h-^a6fb#sZDM5mQb0Qrf*{wOf7PL zO-w=Q|C&3KeXEXXj zzra=2nF2dXTaV}Rg0%1V<^YSog92bnPZV}>w|la?mpeObiSqnABbBvtAoLkM39Mg7 zH0Nr-?i5{nsbgNHFYdm#iGPm}&&;Z=@-1Pz_FFG*oF1Gh{kV+j(^n07W)3%Iv@{Eu z7L_fdFS=v8Ho;L=moXL|aDCGgb8$<1Q;ghRo9r3u%;oHeqRCs=8dU4&CVxmqL*4Nf zm7w6%_9>%NKRO<{d%)DWM0S1!2G9uDXusr+#=YiZ*1guY)`ON_mPeL&t7_{NkUd~6 zwbBOYk#{k7N7ZkI;UZ78khDPfjec+p>N%k(5sh+WV8`6CJM9G=6&<VLs_Rxm&2q(!W-tkFW^*NC5Bk9?I+9Kz{I*wsA9fi=G|!W{PfJvyQ00N z!k1Gl3bV@q(i|%mr5=@9ys&#k%E3^>v)Ho#UashohkDj;Dz_ zQ8S1c<&Wf;`GnP45#gM%dr0T#8kz0U%R(Dl;teOw3qu~KuaUWDromAULXxc|Oe3f` z-{QG{vlOtHjHiTG-oDPK_EnB>*IK3tR<@r>*_2WlMboI%amiJa?j_AjjYf4lj*KLn zO6L||cTW2nshwesDbzC0Vzvx6molfAJ6pfnegs6Ky*k>q+WO5>-_q4Q#&pn_-*}bh zx=udIj1f`u<6Xx3s(2IKE6_+t?>z5l=J;lxL`T^ydz{@&$5dO#A4fbos>40=VStCD z`q-A;QVfi0i?~C&EI(0>8zdNBrwupgc&leTNAE^QQ*G00-f1y>-788zbV$E~(yRxM zd_2JnH`>N7xE0>ryi4^9atDzQ_fzH&5t z?KUU`|B%iz&Hfoa@F5zY1L5v{BL}+xf^n0$a}3kC8v5=rl`_)P*nP+K%Qe<*@kDrL zphEQ0xzTB)o4F3BG15JSzNz%iLXQ8@36$a->)GpzA{uTb>QZ4@L!GIQ&|R4p5R$jB3p^jF1QrcwK@U?574q`tAhTfZXi|(0y4d6OrCJP~9GQLBQ5pTo z&RpyK&k=2}nrbBW3b(hnce4+%ud{b^jCRg(4RE(c!Q>3{m9jCvv;(+XMsb!DFP}%1 zp|K&x@WD`pX&i!Svgy9*j%l#zE6S3+jcttum{hnEP03o+yA!0rO!BNI4kAv8B16xu zouCT0j8*(i4gS0D3EB{o;Hc(@+uB>t3>H^bPtubF1F^|Us>^#2 z?tow)hJ~{S&Tt+071Nl;Z6b%sp|3PPYv^OjCoHF$95-F{>DmYodcttqC2S;`eIgF9#D(3CUXx*Cy0DhHOXqns^j;R8?kP)ba(R!I0UlxDp(<>=E{lsWy8$ z`}@>YsWVbrWE&EHmifqK$iwV-yGUWkEIP!q)4;(hTX>I9vLlx(miT+$&_1z%7}=m=_@ ztUUyA4baX~-3?doqx&@z#e>Q2qjYM;xh6WBIQH4c&{5xjj@d@`g{kjS#-*G}@ulW) z#=4vPCTM?9*iP2nu$M%^YRVl_DJm&_=5#}YhFMBlW&~!BXd7KH z8P*;azbV$(#PD4nDj7uwT&ZEieipTtcZEA6+QnBKs~i&?MVJXx(7qlO>cy#6`%C*i z$2jM4=T~Q4Iw3QAT6;(N+N&k~&9HIc_u?mUK9=pOQpT7OepnaNHq&17XUl5qKsB#LQG%6oay@w$eB8t6CJokF`xl{kRY4uaM^12(3*L<0YaWyL zDKoV@F!8wowTdcK`A>in57UmJF0)MNf699&3du%Hj+Bn_k$RUw`BT9r13B4SgAt-%d*TBO;1A^NIt+!#%I1TYs zR}U}&D%kfQvkiuNUwM6=eXe!(1}VLh8>Bo3D!LKSQf4RFS-L7W~g%V=q%W^DOf(b8E{(OBrh=@aG$r<9wX3)U;GIzcBVU#K0J= zz&yHkVlg3$uKGW#&o}`h6R)1Q%evds-%_3#2QSDME|UA#a^y!_yr*-5Yp6RN?DY4Z z#>`$Qiq_R;dJbLm9Dane^&ZWRkMc4@OVcKJe1$BP$S32#YqBx<^(bihWlJn7e8J|| z2xE3JTtdn41$ukksRqrZ;$1{ffS0n&7wCHp9^M&t%~+y;iTM9MD|rVNN5?cw9{#U^ z+T(B#{Hat=qhUn1WKGKv5!FE@td@TwII=^XMg=m$UrFumTMuKjtLM1;FLPgFQD$!NA1eXCZAv`UT$( zS{RfksBT~nkgi7m*h9aqv&}P%D^Y0Nj>hO9X_`KiIQle^*JHT2AJNblgN;1os=}1^ zZ|3OY0dHOBmO4;6%i+mAYEL9rKldp^0lvX(geoU%h% zjsE6tWvn3qHd8xOF%%0s%O$08V4?j)pO9Pp21?OO?+y~wU9Asd-bO9MX-FlC1L5%2 z@{C7ixP>ReTbpXeC*LMmw8`G1?69-GN_2Ug)m1vN-s`;t1+LK$X*1P{R&q8amtl$F ztf7zL5$u*Qr5B3-o0-lt69$M$$uIvU2OJ8sD?)S&xj;1LQ8U{|?KdN~^9ohc|4_;? z>AS%CCzB7#V9V9?&&(>U3ZG5Y|H8Xi%Vg6Z_{o!cL-Om2LM5!RnN{(O-hh+Rc;0OhUHJQ@ zu~K_#)fuIA@Lw(phhQeOqqf^0HhC89j(?#4GIcmJmAE)~lIc)sy5c$IaihsK9;Qz# zZy4xhDDlxGZ#R&NKHhiUzOc<|f;M$RBk`Gf$Um4J6HA@57WX?&bc*$*NzxS9sqxZq zI=1~xshonoWd%bP!*3cDRV2288R4}6U!`s@o`c9AKg+fq#E4)H@E;x&}Y~w z^rudK5ccqG>egH6w)y}ECw&@%LZ$N-b)eDsxImQ9Yf^i9O_y6CEf(uB98O#Vc3w^P zUtY8^H*t+;)CFn;F_G$<1}AqC9Edaq#B<`@l_-&C;gnp4$G;n%Of}dRSBR6$^uu1G z%J_k+XaF;O5moe&FcB0g-$V6i`g+cTZm)xxKL>=V97x?+^sI|h`+ZISMFK1I7A~kF zO{dP?iJ2i9{W7L69wQediW>~4>3^~g`tV0S4~y$NjF?wUNZZBSsBJL4 z2Ew%~34e43tGkzT79eecL$HnRfOPOcj8sjw=w zYFmyOz5jXJdoOyAc~unHzN78!psTSQ^N7o%7F`w{@i@338MI>5hd*oG^&N0n>T-@h z3n4`FJ7EPHnABELnh#s!11y|NaETAWWEmZ&B^(ix%n@PUi@C9M$Lzk6|5JgN1(oS97R^a{n(l z3l+dgkLVxuQZTPR2pXNJgJIQ#FmrJRGt{5M7m9(w=LBIr!3wvAGq46u)_m&q8)2R_ zrAAyw$g5{18qT2kwIqEcHN_HiS6zjF@f!}pP`Zx_3zzxnOEBIplOr`}eG+-^2`Dw3 zhud^ZDlR$L2RGn9XMx{z30~wVD%5S^%w&e)q;XDkn8D-44&+tmIs1LZb8sv6axITh z$6f$SB!MgYt-YlF9|8ZY9hmPRy&<#a<6wFHBKzA2Bds&FO_8e573$=HFd@2Pg+ldf z)JbEg^k;!5G?_a5docePm~S#a(NAy7=W4`@^FaE4=|*;x1k2(%|E>YbEQmU~$m@Ax zV>#Eo$jwv0c0SXW5zn63569({_zu=YC}(yqD;!Qg;%|Zc0v69t7;ja1uCZhuW6|tv z0E4Fn{EcF$ujl2uerbs?#L96emwEO}h=2dWS$M8VuvNyg8|LwQ4Y*f<`+mw#hQcLl zz)yw4kGl>_`>1vb^!x>PyPP;`Ih>{SC^Og8^1|^t0$(Xuck+2Dd%BDE72ZGu9<3`3 zwt4y)6dxaB^PjROHsdqOq9dl#+jJBzVx+iIjDz{Hf?fp?Rm`^R)+un}3UFs}?3X*( zgEw@+tre;ZemHxF^q=rFv%&zK#MABw1~QX<7!I#;9&tbdcT<#(l5;3{r%hwZ!2M*U z?rh~rY+w@k3b+huJM9$w{@(1z1>jaaz{6T$E$)Mq( z?9x7TS|23Vybe$8E0(Vc6SdcCSM>WrHSsq6N1HVV6H{_=p9k0>bz%Q)g#$T|r??Gn z-B*|xSLyG)Mt$!c{G~rA26qR8y-2MuqmUPtdqy~TlgUjpveTCEv#I=S7JV@t6AyVd zZCU$Np1>>iXuMXDU9w5P%g)|Lg=#48*;Q}C`?S?1*3FK0IRa0s6I0yt(QoyReK`fS z^Wr?Ud_0vAu;b?POjqI?*TPuNEl#0wUPrHt{!eN2sTYZjUBuJxc;-MF^RAjBR{M``xQJ-on5-oH9Mz5?GJMkOX2oky?f-3wzSH=}S@4uK;I@tl3v7)HWEuKp>zd>A|E zsgOmyDy-(tJs?zdI87~xX+{XM(SV=96KToyZ3JI2i6aH0ewQ4-G+HppAm!hL$$DmL z^9uZ?P`F2Tv0jC!S`>q~brMf`oQn8iEd~50T3F8+ysb@5>sW(wXLCE6v~$ex{!+&@;iDzf83$i+DqPsF#3qRDoTZAZ&-<<t(>8LF+H=2qgn`;<|0nI3Fcya1 z8W1qIwp1U3KR77VCC(}gPwXhG?_uZRFJOX=5X1CM{>NGl_S_BqES!o}bVIzr66s*4 zKlE8tYzxzkIUQaJdxYQrp)wdGtmCQJwGOOP9QHjc_u7{gNsBe6>d&;kcZj@>In715+uHc6 zo9I04qOrfNlnV$wTUpHt{tJwjcj2L0~U z=?Z8s#$vT55?5ZUH^%c`PN>=$f{$Q(^PAEukPNLQji&IgmAvcsBYu(kceho#E64=(6 z(mru7=jgkjgTsvxev8>fYWOP5Bk3o7SaZeGs0+1Fi)lxMI9PGRK=}>}4`3vI(y!2+ z|54kC$NZUQIoTg6ehbg4K0Ve`(H{Lu&9jNWiGBm5CmVaOCswi(Hs&-{F@vy^+zw3x z_`Uh{|M1Y`scYS4ddU@Syqev=O>aTWFN!Ouafe}#hf*PH#m)^wp`kUi6lP#cdUF!< zP#b**=Pymu@-Q8J-NX*CeeMcJ^ib~VhrW{Dno?pNJrbRO|MYY)V55XSSi)F3fU~e) zGfTg)Fx#;zgS2njI57VDSfMbX6HLtxR24D_#X%OIU^TuHbBrfX7>yk}O|`BaPvZbt zS#y0Y@%>GBu=#{tL{WbImVQClFFp}+!REZ8&BhmegN4yXf|agcq)#_13ZS!5K(H_i zDTE$89glv5PO2xGz)1~7MQ<9{n(e<2<#3x4*9 z*psS2R!-PeJu)o@69TBaA0hsIPqg@!d#$fk=49Q*kCh^NxvifN3P}HnFSVoUZZ%RL zEVL5Wl8HTlBeRKJe2E=8O{gh85>Mh=v%m{{skNb>uK_XpcsjZtYD?*(KTQ6m2~EYr z_ze%UvEuPc!Pa3Bin`PFzBBMZFSIM;;6I;I5T-p5q|TnR*sDEHyyckc&>G{HJq2Ka3;UQ0RIkK zcsEmr9%>qx&q&Tldph6+AtS3@8SnE`NFr~YA{=G^)fbYq!Z3-^J`~4ewR^xA+=OR( zFFs{H(lWgiG5$PlxK6i_oJHWdJC~aI=F#9d7?6G-bO@W z+0X_U#%XDV%0of4Oy;vg>gl>rOPoh`oKvicvTil{wm#pzes>sNWr z)~Iky^5@_cRta~ppuxgWZLn6Am0k`D+79Qv8&hq%fw7+cpm$S#5RKt&ZjbZin?eB5l3F4zt2=`!u)n$od38m z20QkL$o#rCR88^6!DtPFL;f3vNH@;J1w7w3&d@>B-}eePwVdcfb;OSD6k_2y90nx` zAvXJ~l^0ISC*^tiJm&c1B@QY;?XkUhn)>M|tvGvbDM}WB?3VI)mWE<^sigRZ{>A6` z?&kQagMswyx5f4u#0A)$ z8q$BlD%AN;;X``U8NwP0ue7z=ML4cE#bqEdv&cp#6ZzVRdT)@=jL7UlEr`P|y(66*>?N|*ImFfRpg1#x3v|Urp+_-+Q>*KX1y~8| z6k&>ZMEK?3q8`+8q2gVD?>h8S)GxbpQq~HkuyYeo-I)tsa10JeL$C#l7%42kla_`t z-WarMJGIx|cK9+K!<0H zm|w~wcA={-4u*Rq`RN91*JR-N!)0wzA7zdLrk24>@^7a@h{Ve;;8VMKUq90%tE1RfhohE*G%Jx3xq1 z04!E_qMwm?wCUP8wBthXZ0CfU(n-PX&!ep&BDjLz8BE7!Td=i-Xr!#9cVGi~#&f#y z*ApKm)5Wk&kK_GU(V5m>zpJg#1~8$fD=~9-BH9RgV5Y%FJE9x$YSpo3YxGZ?$jo@R zOk{Dr!9oUOqnGOq=-OzDeY#5aDH59})AxOaUD-jOudQcAvf=w46Bp{7>>#uka&mSD z5+COvQk;(;ok0XShKz18x!d40H82z!Cg_!QA6exrGKizZ?%AnPZzaR5E0kuZj3=%> zteJ_*W|NK76jQOIXSD-hjNgP)AOg4bL&Re-y#7+*r%+KWi7m*WHx`P5?#;yST>|lR zkrjMIh3XHnWP-kdpL!;&ARaD{C5{4>K8V_2OMQ>l6$ONGtj8d*4NzM8j!qKiT_fB1 z%-(sdjiZM@ncVHU{uHDmD;dNYPESUjR}$KAO|Xv{goh$rGVqL-WQt9(rcu0?Q7^CU z*Nd>Kx#*D1#VbVVAJ{R?gmvts?m|Db@b@w&Y_f2M`0+V+wwhSi`zBe`J}dINP< zGbi!ELcZd|{uUOxbAFrBpBgASK{*o0@aE`Mz~x#Ao%Lqe=$6DkM~K@?3!&`Vdql(C zKnGXANEiU>6CivdbDap=^fj4I5qxQGEP5h6Oh%@Wm51H4hkT?bxQLBaI7%F`k!-Fz z`DJ&a*m-&=^|kWE-!-|nk9h65JoSw9W4XBRYG6Yj=<%3AR`m|6(~9-U$NLSzzJ?1o zgzca<;XH{V@S4A}cP-?_)&FIQ)tIW2pw;1PIXHcN*>AVGmkpec+r)7Z^w(78^p4ZL z`b#qZjoi_5GB6+K^Dfyy90t zdWnZQ6Jzua#O7=1+={0A(GNzgViPCu8~2Dct(=>*D;dW*SP0&;nXCTBUhYa4&;zoU zKzxTwyG!h_pVvK%a#{iKm@jyfso)b6c_RBjLfv|A(9eEkcAa^bZ+Zprg*IU9QCQ>c zp!y;{?-|#!l06y0-sw(^R+4qv!M<&&mnDZA&Yh*zLd(KNErD;p&tCt-nM>P`1$gR} z$ob2&dxr8_jf6Z{vI?NIgZb)YGMEb3lX&jHBuwI(^NIUF&ZdL!jp2$vV(I>4?TYX_ z1-NTF+2nmRi?(1RZnBTo(z}<9*G?cim;ox49h~AGlR1CD|MGuuVpy8$TKA zRIYUk=tXT_dp1^N2e0sopZSAzTuD#VZ(h%b--;tD+0XAqlKZYDKAQBeqpu>@QGu)O z#FLyy#{Q1APNDK~kSMJd`?n8Yo%;_RYDA`(fz`;%SB?0z_w)pX6YH*}%jYAx*CWkN zzPT7wZzDZIJ2=yg@e==W7sucfbYVYD;TaqN;nCp-GYyYg@pZE8gY;m2#s{X82GX}o0$K7y zcHSh;eH7V!Ghzsp=ev+QY0Z^a=8RtEnkw$^KqJX ze>vyL*Z>1xZA~pTO-loHC!XpPu--xJy9wY+)p`1hKvWB24ccM{I`i)id^Lbv`x6*t z0`YHJA65lD5$r3iD{nLzzn6IT?mw-)({xB)fGZuZwL;-+9Sr8?_%Sp*$ckiQ@Za!6 zXH#W34-0CTzXY8Ls6c|aPuF}z$anQ2+*@0Gjsm`rES{9M$z>Feyc2%LW_q2ADG!wn z)F<6aA4O7}a(TFO$LO6oEeFW;sRT6r*O`9`9zYi{j-FEo-L}b`^l>P7EhbLZIp_U3 zd44)&R$x(jkqds(Y^*~~tX3yJhtt;-Ni`#hd#KHK9}?k3f-rO-dSAxMB=DOD$PpLt z#D=oIThU7V%(|$06t>IHy1n@KQ@^>Fb$s_e>$;vZwVuCkVTI@M#HX;v|Knt*A%=y( zb0RrInL*u4vY+a}KRSfJE(A9$w`gE4S5{c;CB#U&d9BPdjHB9R1=C5yCLb0?<2zcj z3%YWuw}Aj|;OR{z2Puj_cJPjC`B=yr)+76`!-@uQr(^Kpb#xm(=n!@HA0T5Duy+sW zuB=3T;S8EBadbswhHqVl?uJTK9loK0IGs+}jr1}M^|$u_fk9u1iB|2@%2bo;Q1{BM zT446)qxyA8?c^^ zDm&r zed|+%9X!i0tk0c)Co{d!o^w@>Yu|z8dj$&FkiC+IT}R<7?y}ouDhEgYVO_sL=(cMg zh}hDzCuC0EWOi^{R1vv{0sJE z1F=N`Jm?UZ508k{->_qDz^0B8UU6L>tU)DxC*5^%DEgk|%%zYIwxDMu!M_1r(B1xR zd~QU))>~?OY4gTnsKRDYclib|LD1p7?mgh$g<|SD=A%wVL87HMH$6)`(1l;_pP+vw z@;t?!*#zGhT~Nar_&LW-9gQ}_VP&$xYFck@Y58E@$}GA4N>}Bna?UW#c+nWm#2B}f zAkLw(cNOK)NZBP$6NZER-_i%M?ycFe?}-H^?FKB50CxOOtX*M!4STBq7P=%@e;ifB zM3i=V@)V1*HV-%zw>WF9$iy<}136uJL66(>CoAVfA+lM>lj=wmyn*+rPBd_uXeE*u zbv{V+St<>C$SMP=l%(;jbgE~+>Hl3#Z2DT;ru9M*PosyjJyuF!KNP@|wdRB@$2&}- zo)IF}rMe-|__K4BwfMf-^+z)ctnHqF2X5(JOL(C^Xx=-BF z3YJQ5&fs=@Yz@>VALF&E(<|8&Ov*wwvjr7GSu^tSkt%u&ib#3=3VT`f-&GeeWugEz z=bgS7-*VqF>gu^*SbRl+Z3?UQ5*3flXlO7%0_~XARBiA0GlPMeiS&Ph(k2OZahY7( zaL`zv`N$cJjSY1S>5RdqC#Fp19i}J7yUe9_D+LT!>6J-_k#zx0jFR$fI?eM_k1K)R z-2|}nSggo(>MVBR;30Un)A)q^RF00K46%gX3=zLD9=q0yy5c@`odV&455?maz$?44 zn3>q2hyG0xE=I(5owJm-Hsi2bUh-QnvF~AF3lT|qEM;0J@N+)iv%+`ScQuHC&_bk- zy*=HHzgV>|e%&7d{$gSkQ_yO9ph4ZK zLR=7i^t$Fm0jeaLH&v(_tN)aA?t&=$IfV)0J@DQ8{}dnYp%GS<>ehMkkwv`9Ve<0% zyk0fVVI(nk3K3>5BGg#Al#L)n!Bl|`QiCmu5<)C}x__Cjv=&Ckb|(9kr^0-X6V{&Y zs+pX#rd|W{2aBM!nAdaDbI!ZdHxzbNfL1~;OiyMiSXM{zKiJ_T4GWCVj0=rF;RQT2 zj5AhXPTXoVSohOi9L)@YUvTK+QNA8-c)-Ml{_;y{E;<2)rTgr|P_$G(@k(}7u?7+a zYD7-&h%BP@!|)cP;f>GHerq+UBd#VE&j)5OlD_NSSdvsKJ7qa%HX@RJWCz{JJq8jh zScyz6#0XQ_58JuU#rPTe&8TmwtgeJTT8STRgU`%M|F1#~?-{4LCw<)MP!4&DPFV_l z+vn&-+)lT_Lpnor6fxeRZyW~OWQCT4vwD_z(m}?#i`>33z2`1C8kwc^aM@+*r=LYH z=j}BLNAE=k)tTMsa%?7B9*QmPLghRi`+gKL!Z;M7>)<6%Q`y+d)sM$gq}5rs<7Imj zC+eUIzp&7)z{5Y#@7Dqrud3>5it1J~!U!6#zGw1t3$%_V`u_7ZVS4l=bfzTlZO?MF z)GB%^c=CIOc>d?f>RszCHvc3R z%?EnfnCE#B54N8ZlE|r=irQgCtW_>*#c|p>s=gmVm|b9q4Y4w3v7=A$W^qKG<;Z-Z zu;st;Mm4ZT2YBszgM@=a+0e2m|yvE@WtwGZ5y zdz`M1bezXi0W2txf2oN9jw6xx{|~EH<^cNRG*-1HUxE^XUw$g<;{vF)m+xm9fCJXvR?)WHmeV%X z($X}9IrVfIDu3j=(g3jw2=M`8<33srCUBdCkR=OHgHV5_M{+zNjMHKHpZ@!J3X&7;0Q!SxmlFqpcF-Y@)#VPCGY`%phi*(j?HmtF}eVG;H_s950W`M ziD;9^X)?3c%rK`;Vk6s3)HS^=S9%glxg9&XU06fRxe<$z4sE6?gr0zH_QD|AxD6Gm|ik zF3`oEkKTctwp03I@tIuSD49Q+lgyLN@#g*3>Vd0*SA-l3P6+hdO4w%Glz=tpEc;El zP=w5Ca4CM+@$uA-I}-;q(g$dB)l=T|D3-5>4;hOu$&F&D>TjvH12KO?#it;c?Jlti ztP^^YKo-}C!(fu%gc+8^*)$W&)dH(1PX=Ve;x#04-bmJSS!=?2cO~D>$KGv&`a%d3 z;qQ?Bx4?7I3C#)If{n~ZRFXoTl$+UAr%|_lJ*u0f>MCix$t&bJPY2fJDq&6ble zumX63B305f^z8wDy)v3a9iRCQf10`98)hmR}4&ZYw zs;n;Tv__RCGnPE9Ds&can~QuXC%EJ+@|xP@rE|z$(sJtx*qh!U*PqCid!j)ejGeqk ztl!073|->faE#`tDm@FSOc5F9&Ej3{v3RD@<6evTuGie(>8&<+YNCJ8iSCQB>Scch zFr*>UJ^8Fs+EBx=+_27A$(&-T0cZJwt-q}>nqnDkKds%ZV=M{gOZ0>eHlH+2Ag?MExoSZ1udIiWy zX^`p5_~Kk*5BNZlC{xy_6RRv7g4NPaX&P!dThWBcB<9C&gFu3U^u}Jd!`IFtldnpK z6G?6_kO3?P4@#TwahsTU3z66)yzVvf%Or4>ga7i$7sNitiIuzKhwXTUE9B&3Vcmvn zf7wf;*k^)&57UR|!OM(Mt1#`|4a>B=S`L=xII@KOXpH=a4?Kf9z-x3!K43i}m_NP@ z%kjZqkNlz;*v2f>Qs;7OK7Bt8T2|7_8=I*0m!Uz#6Jl%drH9$fB--8GRxL?oNjOjQnyY6(TE{H$1oE02^n%Z!wF_C3H`tE)aqf8)6KS-8Yg!7jjye5wLV<@>x z8UyD7(PI~r8b9zST{F3PewFc^u>N&YBCtf`&J zwttits3Poz>pWB`uRM^q(qCQ(rQ1~Ltkjb9<$s*1ttiQKL=VV^e; z;9@h!oBkaq{auyNml%eYMIuUpt3Z)15m#o^y0VthJn<#u5+X?04l;rHuyfv`Wh|oe z5kT#Dl=MG%nRVo`D5yYXZ zYl}5N0OP2LHpkj)0486+$z5Drfc(9NYu^y&fgOS18gw;D4)l zc0J{?vXZ8fgZ5ko>6lnqyo@ESL#`ABhO!FuI#Rz!)wm25-9%39Dl{;P;_D`=&G~4n z_MmTkFH_^2gHI%(;gb#xnYzRX?TKXu(J8V|{S9Yh7-&=k?1MwZ@L8#>#*(8CAg;O& zmu3L_?J9YJL~llOX$V(8mKbJ+v>R^u5R~+WF?)D7inZfe$uTG%m1R~{A#86=__t+H z1`b0l?;U+ZM|hRJ;J-D<`7h%~dr|w!3Fq?+SWY2b2FE-KYjUbK9LrLXkGk-1i-C=0 z=gz$3pkE0#qNr-O#&>om%YZ8Y>UjcN{F2!46{=GgP#^pTOS>#Pr55pfDOR!wYt{#Q zaGfgPady);qV}Jh(lqs+hWLUWpu8=Kkh_CIb?1u8gA2Kd=iMNe0qltYd|VhST!pwk z11I}8KEXp&>0}370$F^6I-W>;T>=cMGxyY;SItjmSc6v{0ov7<|L6Uu2>1y5v=wW0 zfY|#acIzFdCy<&;IU<^#Y1}0+jV}EBNV511d zUxS~VBU`+O4ZREl^9ujJ2d2=0D~-adF_4tqu@k>GjyQiZXLT|5VHKWjC?}~QQOHpC z^nCXFIneR%TnSx(Tz6%j$RKuc46)}Sst4PNN#_$Q$AGRK;op7WQj`Jn%nL#!<2eo7 z`!77@e*Ez)PEssS`ZP}_jpwnGOzJNG{tB1QKo$NPA6LQJUa$tQ_|6#+)0KQ|;W<9X zKcTCRMcK>w8pXA(!b_~e50B!f_H+HqSiQT%h!=VNw9}Ww4$bfnGblvNoP+4gil6&H zj&qj^ayTnu#5=s@9S?I&tNvZxK`ImX`0aOOYiW0!g_?pwj{S$c@4>(GT$*QqY70D= z^uz}fM3V|HcXSwg(SZ1_8Sk^2dszMN`Zw~< zSGl|I+*>#iA`Fy&_*n&F$jM~-)A0|(SoLb`o|2q}2!0}wXYqvR@PwzK5t*i^u9wDi zUCwF^#Tuo}A=|(+Tf=&;z~?;%tMuv-tb1p!b2#zUVE$EwJvEwqbO&~H5qB_@{T0JX zj^Lf^;V}mD|Ec^$A5Qr!er6q!)O7wn$xgZt!tt72wVl3}Q zDfQHN;=E(ny0N@sR-VZhcJyQJ^(681bgDqj;1|{4?A7NCdZ`WlqFxkF1n@=+!57!y zdCg^I$8%EQxc|%7#{S!#1BsD)u>J>mc8A$d$EnxuV~?KWd4FVAXJhXb zu7@qG#U0flcj&^CEXh|paj%QH(j7eI#+>L9WC)o#$s)D4@9e!@ScFn|?5tRc>UizS zoQPV)2sN?phlx!-vyWb~%BQ%Bw0hAt?&>QR;Rp8@%i8Y#hovU5&kXDzf&KE3*Nxzv z`|#|maXmHg$;G+Oa?}haP$gOcGH{a5Q>o*o@fkjXAI0$$4|1h*K*7#oX)o~1m*PLt z&hvfn>Q^8a*V%t*+OFeSukk$Vp;QtlU{QjIyFc(mF0i9daw4v9-p>3x?~{lC+VKp# z5eIa`vUcL$isLuiP+6G?29OA6&CT9QvtDO7q36kxpHU;ZidTD$%`@1fOEtA8{scVlhtQy%%C#j`7vE+=oiGkY?kD zaH==reL8Ywzc`mc*bNu2lvbO~#=oPn-4(fqK#;f9oHf*q{@KbS*oc?BpGy6^7(bf@ zM5#J|=ll0N`Ty<8DE3)bY;axdk`o(}=O3!6vr^aD&HLC%OS!gtJo)>e7CCvZ;D4Wk zs2v)3#auk2jKUWz)NQQ58CVBrdCv#fmsgzfa9+JRYd@OxslsWU4ombcd;B_R+*Zyy z^N6_W)1Yxp^bq|wp797D$HCtJfJ0>>J6^*Xea{(+03qtbRZiuMeB^9DWT;&ngr7~+W8e8)kc1jc(wj|!e7L~?( zO(pKC#P76X@2+Bxe*Kp-?BUEvqR!Q&vHhnLlVzcTc#pUxjm7^TcfXmtbWzEu%||gl zvU7D8II-KXn)BiDTw|B}=rS3Gy*owTpA1hk4?A%KyBYm`SP3(*N7;1`n5_p~t?4Cxf_z%|SH0ya2i+_q${fSS?%SQ>Uz!=U@BWyt{JZ42!CT*VK z4|d``R`xx6s0e$kHYX}8`#6~$ewsb;fPKA^Yg~lS9>{%sfmPm!6N4HMR^cWd{wk-g z2Pn{JVzX7~8vi1@Fwt+Z8vgAqKCgsP-kWH%0`-!a#M48l3$=nV)e&`)Qhc-^`U-<_ zvxeQ1=IKo24QqL?%G49=#7+kG%~$fJGOSK>aF_}_hcII9l2r7}*zJti&4O6(uAGWR zSjRi`>m=bh6L`99_EFZZlrHKGk0r$mjtg zxOH$i-|)_9eGL8i7==ApjGr3>W;dF5S%|%k<@GP)19uP=q%peoV!1|PyX$~DzlR6* zgqZ#~eBBrjg-KLPs*!yz;j7`iZ()4b9BP4+P$M5l7Lr>TLJ#K`C5pOD6KViEL5_>k z9r#2!rA$|HDWB<+yvTg`9P$RVJm-oi<$>jtfsYzPrnt(V&VP|SzBJkdQN+WH0O<2{*D0LbYba_F*L$4jbgi-J`yi?`$FTWd{;%QK*f`3}T!45)QkI0ffX6>LG@qf9O- zQJoGUt8Ay9R_lW#B9Mw zg|+TPwH}r^5&c7=_=5PHZ2Sqt+Puc*7J&=70uK6eznfgo1Ge~sN}0{O!_yoE%~9xn zT}HvCi>t7!q^pDLfvdN>lV^jsnYut@-kLnqu+6l`(v=x-wSpQ1hlK11NeOupaww#A z$jjht!O=lsfgf!fQ37g$R>?f$O_Y0vDwpLH>AUy|9#@byRsH52;0bZRa=vr~Iab-1 z*!SE0=x;qkORs{fqPvc#yLX{)h&q{Bv^FBW++;9c>DJf=mbV{mtOR+k(%lec+;6OD z+F|-_%5RP~|2EAxg_>fFN169@hZ%6Fu)`y4s*PeIQ#tj%Zx$+`xvA#9_nxB$KOY>m zG1d0~?^n+o&qI$N<&#(5K0cfJRNe2NrFDhZl}j|zEj$3uOK~NZ*-hioPpV~{gN}!Y z`cPX_MbmHNW@CMJW+%fw^|uZ^UU!S_x|x7 z^|iq!jHeH%6=>TmdIT51%o##dvxq+8UPNCdxYrWo#M!CftR#c(ApIdG(y{Dm+=F4v zKMF&ia;EROx48F&r>^IndoEKz3@Bn{3p5574>5!e58W0zKeSQkr;s`!4T7yfy8?3B&ZFHFVR9LMDuPm4ZZA!u zJLuEr5Ltgs3`_QMKWax_KP+`*|(^pd&a}#rM^CMFk({$rG!)0Zg z+)MJ&E!kBw!C(Iky1s%*Lj64_JZ-#g??c}SbuJzB4lRc0>pb?grv4Wz*^E>69CR>| zF1K1lQ&sTwVZ>j%;a@%=5-5s(3Voxfe4aq zC+a7)-OpVuT&d2(&dvO>xxTtGdn)>FFyTExeq;P<`CmX-uqkv^RFg4|W$|=K5<0rJjc37%e zS6VMxk6UMmnORJPrO3lr6{_=(QEMm9ZAJJT}!co`JnHN<5=m z=#l#Djq$v8HFUmt+;Hu${@??SEazr&!QSs>qgfJq-8N`tWaJw~9Gztz@ z=A9%e7DE!+*&oq;sws64`@?Q8rkjWaCy<@wA&$NShi(z*<`;0_qV$q2CewX_mG~VFadT-KVetx33}yEVCPICrdm(+={Q)f z#UJSp0?(YUhN_d8U6bIAWwn~2D{zNv4L4Ki4f*yI~iN$RQfe4Ww8PNZgb*wxGx z>T)_Wx;DGcxkq_-sT*~_G~Z~qJPJq-Y7+7`#1y(eBx^|9;0;0Mpbvrdf)ax!2Hy zS?n9`&j%a4FS~FPy5^hZ($sDOP|Iy-E^m2i$zqMRzOr<)T*HH9HT7Y>=Uk-+D%4e?{z4`l+p5L|RT4$rm^;#mOinDl$hwdIhPPSVH(k zzf(hXgzt%WxVIhGJq%6elhh1@h?i1`jbdObF87sUww0TYT)vaOhD3$kh{|%1A;G1m zN4T2*uut?Za<_KPbQWZWURP&`Ym-aD_OC-px}{fEQ63eWC}m8?E!Ax$0&WE~4tyNg zGRPJ5JGfft>adsL$q}{Eb&JRvwjpS_b*r(eyqqq9-Bjd9`;Sn$iSobo8NEAQ0ge+C z+OsE>N~)DyDRr3Rlq=qI%{R<{4xWWZ&!`3E4~H~Bk&FXPQ_NK?&n+db^H4C|YOQFU zVp(ZkXXV zKAhnwxQ(${nSzvR8fsFYG%#&=z_ouTmYl!i1RtVg)+Z zu5lJJsfT?F;9o?kx7CSQ)U4niXWQ<1s%xCKG{o_QBedv(tZ9L>Ms~Lc4k$zMA<(YrTMYMhh>P~om74dDF64T zTK-=3t2xl&>W?;97g$)`m`PKY`}svZ!K^O!eT3B*;<37Gx?Ikk&KAxf=V`||M-Jye z*ACA@wV;qenPCdEwznO(^|fV1!MAU~+@O?@y5S?zB}Pt)+8sGRB6p}1m}uT@NJ4?8 zHCNx#ut+&4MGGfX)jiQMB4t5Rp``aoqf)xsgPiqIU(~T%H(Z(BztPPbtWMOrpzR(j zdlbcZ6iuKfrY+_@mQB_oHiNCFwT301xrk}9(PE46R#d11G{BtOdFkM+OP775d(p~`9E z!(7Z|?5;%0pWq{EFbCGdb92F>+71J!2VEA4@I_z4;vI|P%Qj{!bR_et4LY@)KE*Ha z(5^|Bschb+{<#?BN1~c}URW*mBkFxZWK)zWHv8nFZYMQ!GhuOH0Qg|OfndPkv?(;dB;b!r${Lmx8Meri+Z;x_ffxX-%6 z(3w8PMBJ-AdNcr9 zH2g&H4cid&L1Ub8viXg*HwsWgEoBXLg}%N~u7~z*sqa%|#}=oI?)x&=MaQhvZOO+t zZ52~>|4*+}g`}#upZL~PP#{Vo_c4i zQSfkFbV~_vPQKFR^PcYFjnWV~3RU@Jd9nPPv-X{A`?B~H?xKy3^WHq;+xh|KF%(44 ze-|A2mU@0rsaxP+i>L~@L7=*UOtaY!5TKB7QLHwKTc)OE*7CHJz2`k46xer zA1Yw77%Y_5muQN11vcdjtWhiYIoH%?=-X~)t%mw$_~!WLqhFtc3A($z^H9mr$V|5R zPWVPMH9DQQ4wLkrx^lZVIZHS{I{rH5J2$#ykI%c%zf8z2M;UG#+A(1^QT8hp&D8=I zhOLj>n7%;z|D`J#_Auz3&1V_QeDpm5MT4(}j1IXF=rs3|>uV3axjkRp3q0$*#dvDx zJda#89J5mfB=!G$<8Qa*W2tt>eb-HQNzZuC3+5`U@&)_PYg>f<(jH|sD)2ka@69*N zvF7#WM00h^WXo(~`;(SSmKm0_=Juwsh8S6rLd5OVk9vT9MGMpQ*&6z;z6zc@t^ijN zREn*hR^Dzt5v-uIZ?t!k=Qa6zPtRI!5mnSS(gofV{gbo~kY#jPjG|llzUY)(@@1t2 zI;D-2=ENV>#X;a!1wa9Jitka?ZXvIf>WcsA2mSp~Y^?yd=ZNN~d+85d8h^Fz{y23Z z6<`be=c2q`VX+KTc%OotnyIAkLT@h>MWlZ6XsM%E9u1O-AR=dBK}d8`OHt$AL(8&CN7v^JTyXmB{mf;5qT@%D9 zpsmwj>xb$sv||3RCi+>ffR$}O3iSbutK2bX~9IC*3Mk@vE*$lEU9pp3^Ot&|B zOKYeEw=GI;)T?{k%Ny>%u!v$(pGH-B|Cz($nxrdOZ( zr~d0)3lNV;4GoRWA-0wQvjU#m3faPJ7j2$^E(i-joYf*KVfH#u{kbZB()t!lAFP zHcd2rG+r?bREo>h=oF48pL)kBxyJ;mRR1Wonr|&u|1QzhcxKS{KtpYwH<8RXyQi(k z?9GN&;CMK1FV*5Oxh)`sH|XP_Iwdxc{=l*+BDZ8p{5d&9>8wlzbt{C|c|$B{6SuxT~AjHa_f}WUvO1`_!?7nui*vD08K$bTIdVM>d@Z zcc*$AwaSH_nr^={!nwzB*U_7K4rkHaEXtWpaz!warI@FrcZTnxdW+qbrkOVjbpIp0 z-8JOlN^!$H!yH2qLp+GaY%q;k%niO|w3==iOBvf5S}A>Gv_qs1=nbcrdclL}EV-B- z*I!sfPV^8q!ZR@0FVrKR!069SfR*acZ0al{=#jfl7fWR<)gZoZpoZ269P>3b!==>U4^q*43KMP;EbyW*S9LZxwcqCgMa-dQQKOkSn-gwYXV__de7oTCHYC5DiYDSc z?`Ujid$ks;v`zit|E>Q-_aat4B|noNDJP8E%{MI_tTn9#tdp$eZAWaTfc61>nNDLv zQM!h;q$Sqe)jZ5xl9@4IK__O&vCL}^7fT5v>35o`Jwg@#mG1>??=PMvs4u^BO>)h1 zy~Km}ad&aoXMSx%xWvJnk__G(-j%*u%sc2sR<}`u8NytwvUJ&Jmrh6(P^w#~lr zLw-i|@Jh_U+AoBcbr=lKfSr#P4}sIype~r7i0%?N-WmAAw^@;5OosD=PFEpLo6W@8 zUcN1?N*CshIn*D-a}CwUT*DaOKBCb!Fs|aUt`F(=Itr!~jf(se-wW05UqBu=n)*z7 zAuCM$2J}(<5CfDk#@R4-Cz}Ujn`Bde(@bYp@slPR-FHTE@L zH)!oi|LtYf&cyF@9lq#Ciy$xG+!fM z8J~seyf?kCy$^_Pecnu<0g)(1zO{TJa2z{4})KMashqw%;{B_9z zSAI1%uAefQD=;WGU|V&U6Un?j(d#fy?8(Y)gAcNw%K0zmaX;bo`~-s-3je(cs)j|- zbX-9;6iqac8=mJqIGx>KPn{q?>O+?K33h5fe`)`7xSzw}hS!8m7{GkJ1Ly~alV5%i zR`{gpMxgmygbtKk#(~Vd-ocetRSGlvdM65< zz0qA+LZ{S55d9oPlufig_`NmMWZt1$lE`%q<9%PN6^Js&F@}GdI|2I5&0xh;PpIkZj+;hdnAUMxa&s@jky@pa6 zW`Yi2{%tDeqvn<0qq`W#OxusF>q9Vt2zp^^iCd|g(}ib0!e6h&5+%XG`(l(d#^~ww zm!RyXCW051CBki~SK_-qeLES9O-5_DT$8CvH-U4I119Gwp)z-B8TFDO@>H}b2grZP zH-0iB2f%|or(D4b$Dq~Ul(`M@Jl&t%IuEzIBfXhz!1z$2;WW+zVV}X?RRH@s58Lk) z`C1on2#d^lBm3AIKQ#>&?mtXW*@(X?LDu&x{gmC|^S>l7pG7^Z4LsL)Do4#&+svFo z8x4#WFfxX*Vt)`@%@C_dU(q(bC#%dDT?k7&2NS5%D2J4HOeYwkmB`H`Ka`U(*t&r z%JC+=_;!593^2Ls)P;^v?QMnskm!;7+d58la4WrCov899p}-Qxlel4DRKn7v;RJu6 zH|#4Dhgwn#yNXRrVeYh}4tpKGLOHlYN13ZT16=42a@q{=S&R_q8xCD!`?RBpQf-3sMBuQdO8UU*QZkhs*qd z=|1@#(?0&zO*9W{!HBWw zfEfoWl)|_OpbyiCj-l1?arSYqFM~Af0pa^XAJ}Q^ryv~{x3aTCm?T?|UCSeWrt@nT zr|CLAc@q4%Ogvi->W$I#t6k$vz5(OC499U9^^+WIe_YipHpGeMW~=Y$))B zV*=-(0*sg^aB=>}9lQ(1@eF&h38sh}3pD{7RUhueMQqe0YV-rCzW2o*qqq$VT>v3C zg0+rD*Q_L-uM4d5$y5!u!=eb|z72x`y%w&;Mc79_c;*_cKzE*WAvpUa_+!iAkG<#C zJJEm3O|+iO(>)|V-Uu%LlBbT~EM^lk@jEYhvIl4zYD*AN&{^WjES z{Arb*!8iHrfC_0K!j8#^_Ke|s#vm3BWFww1VfBY5^X*sP`4sNdP!1$?bwmq)?bOZjs} z$P-i8Q7JvzlX=$}SeusYmk-->ihcaf{>b#(9B03mv%>>9S1Anf!g#x^)E$b$X=zJN zJBjBW&C_+|G%mn)p5bp?;Jw1&-jw6BR>G`#!a99NRpueg?cJ=yMAo+_)&Tuo)*~J4 zEGPWc6vpZ*bU=IYIaR4Eb%X`ehff#|OQp+C&Rsn|u?xAxpFi)wBtEgJU4>Q33m0!5 ztFoK5i=&Hn2^^N4oXvOW-fG-Q37%YWPVsN7({L(RQ~C5xJVSq0Xd&#bQ`GG~|Ktqb zVgFL*$=~269_DHHvu~@h?(4DjySbMMoP>CInHTvR>-iU2;k&y0ydm{ixkaqRU1~Oc z;kQ>~y{f^KuFKi1K#eRt+^i};IXDw|%40m=R!&^X1nf9CN%vvi-Qg@axm7{#WO?pe zHmqh>*iCggfsNrZwZt2@Bu^Q^j_%-VF~8IMr)}*Ht7;ivUpNsBxLE@^HShS>ieuwl z!Zz%|dutfawv^R73vcc%Cw(O<57E?XogkyvxTBS^g=;wtk5M){!JBpBE&t`+8xlcu z$989=3%UU3cQ~skP|dE-=}Cw7RU|j?3m$8?-3{fskwh$=cst8#NL4Tl{!BLfLTC6z zXRr$No5F33XD{<$YkIK@4%VLOhVZ2Rvxo30W3kS?u;72Q9{Z?7-{Uh=c)clnz`j^> zgZ|Qi;9uFW+sCoAtyr0vJl|(@ANTNUci^xMWrF5obgNpiPCcCap~IC$7{c9)M_ z)Tdi;YO`^I$MB4w(RECOUpA464~sbWrFkMh&wL*YBqQ%whnTPCHH3Yjt6ubN)(hpR8e!*IlRD@Zc=_l|#kEH%lO zSRavH%#3!N4!`aw?5EbSkl$IC?ay%E2Gb#!n6k>8opGG`Tmmz8;lYjcXtgaR za*KuQxiofW@7CLaI^ll`&%u@-`Hzw0Sa@o#8g6MDb+~N`e2~~Y(iZKa>`0s`Oqhu%o8^^-=G(Wt}T|JANJ}D9%maX z@(}A&&e|&G;?#W=^+%RDWe(^`~85XOy_yA{d8hT~ZiQ}GI8&VYFSjV$=PbbYQ{a`rX;dTweMwxH`i-E?C6=tKh zI*e+M0}u8e``Zg;V3TQslQ}!I*P40{}q{tbs2rt&UPf~CMUSl{~BqBjz`tB88_#?kYFt{vx$YJc4idOLT`M>4J#9xj15!lEV>CzQ2@;V zYxt&Bm1t=zYKvX)7aNJ<2J)$=VOvxZo>*}rdhb?u^9>>N*1NUUTYw4ZRd{N@4_46CAj`NV1sR$d9!NyVBbzLSQKZO#Ty z;WN!Dc0Kqz={Ohh#5iN%I~*6Y5mnt4U)o2^2r0d2T5ANf=H-$00qZ@H{SvVhI+QJm zCAwPe#i@9xxpFZu)NR%f@vFVlTqx}kQi1qBuo2B9SNZOlz32SgI|@h&`D;+)nsMmW1u*4SY;HsjPIKJ-A`l zMV-K7PO;LG-*y)(T7!+2@&_pq4d|U#GpVUm-~7w!C1jSST4~JDa*Fp_Z&edwrR``m zbmg8t1yQ(cml119*{sc|A?y&|OM|WLb{q1LkHQbrDO{Jk5{*>D`uoI=*x`{@yj?~v z&;4Nfq8%>{w{u#Bm^Su8>_~28ibw1V)>b0ail{+9BZhGbzftE|C2p4PTBG#)LbP1T zeqh$HcX3V&f?wno(n$}PKo&zB_SuRR$sU*=K2xHn!%8EZvKEPL=mOA$F4k>vBwU); z=yNXMZnUv|OqY=HgX`_D?9FEDqtqJS*k>_{yss4#%`!?Oh}pV;(D>0vE+E9~%k4Hw zMdFcD=2z<4-OyG$N>n{vdO~esz4RZqYL~FdN=qj33vt0Gq5|8BCi02oq<*qQ?#Fqs zm`&_OVZ7U2GtxdoU7?mZTTdmtchnSqwW}H1WRJSf(9Bq2hoVT=49R{gwNbVUul3vZ zOu4??!QN}U5t5|i%;4N;_LhE=9-FVNJ>V2)gl^^}tGImE{v^EM%$ApvInnj4!Gcd} zB{=K~rXtReTtYo7t@Ty1;q&)rT5Nea2MCIWO3WSmu-@FR<9IHlK{xy_@wt4|K4l!Y zThra!z@A~OBm+*ux0}Wap^~&s9A}G0IpLIa%j!udZ5g^{`d};STMx~3=+1P3MdG(V zN=1YtdTwigcv|jj#TXB*i(;IT)##*efz?^x>a4XEybjgQrjHR$$n>Y^ojG@v#6Q6l zmSI`v2p7fnW)|zJv_skeCiV!9Q){CCv8d$73WKGWXey7eL4L78D~M`7+fSvCG|37X zwtYYuAfz@ciQlQgl@ovAOno#D(62nu>aF|i0**!Y8GQ0}aTVS#R@}wy9B4B|MrdtD z3k7WrZqZ|76gjLP^lG)4Ls&y4=$d)fC=2E_-L7p8L^1k4R{J8^K}P)c93u26bFp<* zjuz*c1KH(0;%YLjXd$;$glcms5r4%hfe_nIh&Ij>>7^CsS)+~L?9TEFA;A2qf5ahb zWy1_ElMc!A(6FCoCCbF8Rz|$rGNG0@4NmcJtB5pF+^#>i+e?yoAKY_|TuUkr;#J1X z0<-2R+BIQznD|CsXf-mAODXeEtcB??cl5DZK{?U!pEWQTArx>l z7gv~THLrY3*KwY`oj7w#&4P{z5^rzGa0CtrD~-ICD!&v(E1wS5 zrM48lYrV9fl-nuWLG7jeRk~X0`amx?>iW|C37C)J25$@+rYXztbG* z_|H(0xCTGCguM(Dvy+&GnFg)7agVWBMFp=_!4$FaOHjstZZ*d5jS{lh#jR3g&aJUJ z|Jv!L3f#jd<{rC>%;Zn|wNb~uC9U8jIgO3>NV&9--7I2d6BiQ`RJBgoW2`@gPtr$A zHM-iBl~uw_V}zA~SvuFOTlO1k9Jk`IZGzHem#0ZHz*Os6yfJFV`M~m*iK+1{v8c-h z1hnp{Wb|SZ=5zbKkzgO?e*VvXX8Ns2DuFqKH|7MZlcb7&8q2H?@B^OHCpVuH-bdVx z^1>>iD}1nC)+Urfj}vFbg4Y(3R!bMr@u-Kd{y^5=6zf1ihh>KXKN%*kKVnrh3l)j~7%$Ar-Tvu7#$Z0JA zMs^z8wv}pvVc#V8xN3RrJ5&{>!bPpluU6yUPD3Gnx_A*b&sI7{s|n%O9D6HV57|Cv zI;d{^PUle`JlGhqG783x(S?5rZnBb`_AN1HV_3ks!Dt4GF7N@}o)5Zq$J)fZ+W6M& zC@fS31si~dr{5ll`p*BXy+qEBiRtHo5tRZpsV`*4&Mp>zkc*$P7Jyqjh!3mNe>RM` z*D0K^rZcJI1@YtdjjWCze0Q>Il3;FL;;lz-HR==CjCI@(Ag{15SZ-7UoZvLI3a!PcZ-l;wjva zkW~>M^bdd2wob4o=h^E*!W@u=eXK$?@UXo^Phn*GC+s@lNKA*}t=E(Be`V!*(1$q- z1osTNhKg10%oCLXMfih1^9i-#mE6uhIU%vsDn_HNR)!fj*QsD0rZUr#Ji9YkZC!GL zTKqVOIHU_q>HEBKK9HUQcRh+1E2UWkUJeLkz z;2zyvY3Q=;YyD20WH0KC4XnpRdOcABOzFkzM`aiNIVJrzZWTZ!KEikvm?EA`P5!Ii zh;D($dOJNoGdaVJ6?%0ohc+Glk4I}5x)nOD9n$h^-$K#)T;sVBZFRFdi1X!!N@d4J z^^EH!yvB>3Zr%!EbHWCP-S!UiPW6uR#(0Z);@uhC@yyt4tN!LFs5oUqgg=MHJ!Nh; zcIoSw#yel5aVr=W${DH{Y5pK#!7i= zlHZtlzd+efx0~wNs_d8hQCU48y@ERwMa-5@fB|J6q-L5IUZMd*rL!?YpRXU(2k0Z{ zqIw8#XtHq?-q0g+CEWa-a4NGA_lWFFB$HX%(Y>)tUa!=0JX4-3y_t7B&@oqOPKU?F`!xRw%5xcZ_GL`=u+_$1gpjK6z651Qw z6Ff#=NV-sESYDU4!g{Q((77<33S2L0ZxgU5-RN&_M*X;`JP>YuXJwgkABCTeOgF8h zT!IHNhKea{d~qUqS__oFml27-vBsk^REUntf0+h41rBK`Gr^F#M{Ut1+J*AtH72Me zTa~cFPO$MZ>}pM{;S#XAV{}Q^q7%3yHQ0+NJgSmH-ToQc+kM2mRI-kOyW9aQ-b6Lv zJazMieDy#@W&*Y#rEhmL)!tqBmTmYu2N`AlGwqe#_abYQwtS->Q>!5RKr=lsAW0!gr&CctNu8tRuhUy(h zEPb6G$4I3Hv*9Z$ZlxQY4C9$(`3OzY1H>AMczv&!NT#QNMC8XN=0GcU9zE+n$Xz>P zVGn|ltR^?zM-^*2CvG}_c0Bds(PZL7sP@djBVK~-cn#0eAA3<6oUSPUy?}aGb$r5O zZb&97#1}*lYCT89SxmumijTpHHnBft@pA8|J?8|Qze5dbH>+R5K1P?>d)Q7d=**~x zouo$qmi+|k?ir|Wce4hgf|Hp}&)IbP#?o7#L@)1GI(F_cZDKdxv>G#<=acJAq?+3R zwY8bdWzS5tue_K~^nq{pgQwg@%ulb#D%h{N#68r!2Y?_4xo1UqtL9W*8{nr_@HA(s z;ilC6p0Lwtm<3gaT33o{@89$=ej(#X5AW;&yHSDY^(b7oSty=fpbNMN`g-Y6AKU^@ z<~;j*2L4id_<_05-pd2O$U)CQ2!>!AW;A~=7h2g_>m)q$Sg_95;yijzdci~tXA-R? zPf+G5HDSMOQf4VlmBC7^GEC{G^ibB(N$YUbS7yk4(3Nt@JMaYGs9on5pHb0N>E5l6 zKPXBBbCug>;y=q1oz^DaX-=g*kyUR`^?E$9OFwE;^TF_Up|tUk2&Ff1(?RMi(Xb8t z_`wzAQ%#Aw_E1^x%QH9S?G722&~95pzxP0XyZ~q9qfrrc_1$oN{xkD}M~r~KQ4C*m zi0bZW{*8>BoO;}@$3%8+FR7rB$o%{4rIU%ENEFxB)fT!JTNL3GV(She}{ zDSF$JIeR@(Z<_+Yb`tO9qw6z*o0FHi=Wyz(6RD}c2N%1{iGGZQ-pJQYqQVM9Da(ks zKfy)Gg`(Fs7$EJXG4Q;q^0iEQC%uBBs*shrS1Qrp8t&nI>a$hs6#dA7 z_;`_X`j$TY_pHNRbjwz;vl*>`>A<$!r4Mi_%6;Qmi8xg5&R~6R!~YZM`F_b7{e>1@ zDJw5ug@}vNQJb1cbeD=aHkP=o4S7i}v~j=E8PyYotO@kK9Y;SmyJXQ*5F?e6BBcu` zqQx+yRFG0wA(LSVw5EcjGp9vjy1~C>M(3$D58|C_5PxQaO<{s&r=ce2;vo=j2x z8Hd%0BdQ(BZ7jvxwWK~*f&P+zVB{QR7DLJu(iSiYQgr)D;x97N|DW5cVhtq%oQ*vn zMg+E$SdR<_Hq0H6wbn$OsXxkXn6ucr_58ez{B8-AvJ?fG{=9ix zYW~B>j3%?k^RbDyh*R@%x4QB4o3VP=s7aI-3yH6}Pv3+dC>@l<^Gu;LLY30d=Q0H* z-T~$dUx&YyqF-B`DZS<4KG%|RNH0;3uFY3}w8Uyq(K!!lI~W_o(k4kx=DvsatIpt8{CyJ%h_rrZe^=nNM0AHKaz^6SnTj3cU> z%q`u(IXH_AO@&R*NdB>dw|RrT6R9_spgOxA)Fy=$@K%t;vP{&;57VO%9H3_W_jq=C zH@9~MUNsJVhF-7-Cz5Zj=D+%ho$$B$@UE5NVVE#RP0q$O>K^;3R!jmV3xnbG9nby; zC+PrtI}5zCHs5XN(aXEJe1poUqV9nH^4oJ7W*MEoG~7UekCXW{3?NFBfx`=MJg1TDJFbfk_&%l3b8 z1jqAr6y9JhJYGe<&-`Qz^<%Ayl5Jlg9@#;bGYf0G0kpFPypv&A+^*z;X~>XobI!(7 zEe`*=t2gj1i{Zy~hPl*|UF%EDtS??K9cXo1eBdvf?D9nOb+ApddGoKl`**7p_o58B zd|f;H>hPI+|&pvZ~I|zea0f~W?wH-%Xr0(_JpN1gWO~<`}u%KqX6oB zPe79np{aiuox!|6J8qvzKB*ac#DAfxT~vO;pLi?1!WNH}lE@2+!Dgz(DgQ;hfLGkd zUH$;eVg|8ZYb;b%5Q3EXDWkEhL%289!LS7T6)cJh)O#C{cb4MxUE%q!a8^J4^dW!o z+;7;^H=IR6ZSr>2Hi<7UVwINj>$zc`6e7aN%i5$&7oE;mf9z8%)-FY9D~`|GO*Z@D zC*LQDs7a#w-Q*{K?I3Z?R5J7J?BW5`^XjrQDNLaS@KLsaakRxd^uy0D<-Ac_{h3+T zrMA+T6VQUU8G!9AN3HS^^OQ>CO;U8bQZkOKSc1yjh;l>@x#4uAFlL%lo zXwPn9;#KU{URWGkxDn65F5LJs8K0aonQ<1o)Q|f<5C&w*6zdcQ(^q=2UQ+MfLax#R zyIGK^sFApdUh*^i>Jjjw*06R?;A@=l0`6et7Gk-pvPK3U;y}PS^nH|?#?V$_9Y4dU%6#-;Y(JwM9_{?DDLJq@6i=j*SJVmZdE;9%;N|DdaP zoH5SStT3{*?qU&FE_^uRaSOHk-iBy&n z;Kk_N{^#6~4ftxe-3|8hYr0Lp5ZRchNe@K>^|g5!7I-~Q?FDj_=oGd9or((j&keBx z)8)$YL-a_Op;*&Vdco~y1~1Xg9AdZ*!Xd1gX{Agk`i_dwJW!F_Xf;oyCpeqwA}Vf% zpFI!%_LEtRSuGujBG&LU`>8lb5%G@WY2R_8VzJEixiO8&5pSRloDwMyB0|ZCl8q1b zoD=AeW`aH6-^hZ3OMCqwS$faV+MpWTK-b{5$dh-+rjjMk#w*2)VsqzkAmae9~AVa*h#*wxK)K-<`J$ZuA=Vp z?vifFJsOpV63#|yM#p}+w=|iqmM8d>1oohSv5XFp-Sm$R)G}-Th3=toQydk6=UN^8 znLY_Na7Q@pL&3bFQld?GzNVNPMdcaNF?@D!?0yZ{1;s!!TT;vJVBJSKVhO7MjliBt zXb(c8L&ZWD(H|HQc%3{VIp`nYPv!sY8|QQQjwj7Y+Llz`P6=N^bC;#qm?N9P|3w#fHwTCF{d^3KB12tVb zq3l)LUA=oN7KX5ZSZSrXUWnWF-i=-Y&PZEnJPEIJE&?`Yr3?z2) zbxbZCDsPmqPl>8>U9IOyh{ziAFnw6&rdj4?Nt?Aqw$<6=a=gzTk@a+j-_o3qz80~~ zyUm$J-fI1$8G$YSrb&S#59 z%kWaX!%mEoCB;@&s99Xi=mD(DF0}A|_LdJTPUmI?Z);C8_giNvb*yrfS?~*NuT|5i zqKyvb4&-F5BKff=qp^OCF6(mO4gXTz8D!_SBs0pWpx4v-hfW2ff|~*j16FeXmeymN%@4M^oNsr?<|LkOE@L1@f){1VnTt+)H+?L>?j8p2W16<2J^THevWg`nm z1*4Qyhg024^*TBtN)Ny4-RFMkeC-$|#|in&OxmizUVmrbw4_Igr4qL$OiQ?tFgNjD zQeJoGC6#gRHHx>Dlk*+?VTC&hpt8WxkkxM4G0ljz)Mq zmy~4NW9UJZu^sV0ntn6BWd8c$Yv*rSzpeke;p^^iFMjk&tm^adxt8yL{;7c)p&^n;!7V+|Y`Kt_K-x$GBbSLa1%Dc2y^PM7AYL09F0;Md6SV!V`CCFxXBgzurRTJqCC`OqltN*Xk!3R!KLHoAnFYVDMqs_C5U zKIeJtmBXt>G>*&=wKnQNRF$atkrN|YgsWjwJUd+@R8?6cGJ(|iphbpe1(f8uz7a{A zu)r1Q$*+q)tl?jj{4G!=v{VZ-wwu2)Pxm-{i5lq3wN*YUH65)S9UZ?rRywwzdDRi! zvJ;MQ;>gTucD0(?P#vrGqnEX^qpLC-b-}_eLQdsTpEX$5xah-4tcX?c`oSoF+j?v0c?%-Fd zz}>7S=I=%#Q_os!n?o;9TDuwOA1D#notzp=Sp~oKkME@K6#F#SH`#a3*TVnAzbW~2 zU{Gkc9?Sk;kZvkB)%EVC;hCd*#S}?fB;A*EkJC@hsAfKx`Fy6W8GcE(GiGV3?oso? zvwLQ!S>-Qw9rKdr3AXS@B;82pmQW?(Zv2ZM`uEk}^Z(coUo){|(*C5&N&S2){OJO5 z!L6Y|T6w*)QP(VlmplWL_-`={mZpp&zxtng%=w?Isb`0Gepq~1&hSFv>BHslM^x1w zc~^N%S0-mI$0TN$r_6>=G}7wzLqhOda^2*I#4WA;hx|>G(+6G!<^=nrIpH??nX}<3 z`Qf7nVHB^IqUBpC+Gm$^5Ih4e?QKx?mCQ9Lj$X(H>5WuP{#)*(TxT-oYNeR6Np4K{ zsu!L#Q+m;AxJ-xh!%#%1P;gKnl>9b1GB7AmFqjxT722U)MK$jUjFX$N1-6QZsGhBt zXVE*jSYCrtR3-T&a}$b*Bfu5rf^n@QPsw4frs|mAxQ+f#BRz_~`3u@2w7d?Xekq5d z_?+9pNkJc4US|S1gS&zfoz*U_mtMe315fw4m?#ZXJkA21y5Vgin?>D?8W>$9b<;GS zbW74LPWv*Zdg?T(ibwB`Y#(07Q^b*b)7uo|bOSZH7RX?;vw2Qyd_Ec*1xIFm%QJ}y@VIE{92YXNbV7A=>RP`#zO-k?b zEM|G_LYHU19ZC2u=NQXj^~gd~>7EknEa;x(S?WCy zHZGz?)QRY}snVoc6&;M48D&S_il`V~&s*2s#ktBMDPQQ6cuii}Q$MN6=r3gm9t#`| z3UJqksj?O6>anW=h3Dr*nXefUltGLacNZKKJVdHX~Lj z3Igkt8SdopQB!N{eB!L@TH(4)SNUz%0#`X^QQlOaGv+Frl1I)gdBqP@CZnuo<{)F9 z{xABPGKv#-P&zyux)S;ks><}D|A?vuv?!`Gr|$qWMKrwz({KKy`qzw^b@kC%Ji)Ci zOeLcXIK~6{nKIvzQ(d60RS&9{)CcN2b&c9i-S7CJJ@c@j`YG!3)fPO}c&_;yL1*PEFK(2rqNFAsf7!f!gm>N7B%B)v6V$DWy zDjo}2zd&BF5P4uL9|8PE2;~dB3 zXK2f`0pFZytwW*iD>doz+I?m?wL{~rH)?jWzFT)20(itG{XTE;o_Q>p_4`_~)(@QE zDf(a2`1L79dsL)E5PQphF1Uz?YbgbpRd|+L_QF}wHPAKPwb<3y)x!1J+0;2e?FJ{~ zA2~*@DAf~d2nB4x%FT|QLs8&_R!}<~8iGPd?NBbhs)x2y8Skx~&>VU*eZQU$g~e!U z@{uVlTRw3-Y?TG{bL0l&K1B6@7MNiNCI#nH=A)sj!v4ta$O;R@#RSve6qhnY&dAin z&O~Rb*pZ3uIay=WVClu~YkVRHuC9B~x=YX$rlod9h2e?b!}!l=NbZ#je9TRCDFLm( zt7u2Ipk`8^o%;r^*N|?KZZLtfSUuoIeAG+mi?rOR0e430mBB5+zk=6;-q6&L zmz$iCVJzP1fdO zv4%16^&S@F7ru%zf9;sw-pE5;w-$AZU+F|Ti|wCFBoGZ_VwS^p^i}^+e|2tges;ch zPJ;t7L4CkV&Q`vnkI)#6hW0`gyPdV%d<;)w7V+&O@Qb&hZJ|-2>7i4h@91{aVq)nv zEk8aYQLkjI<-VskYcZkeyV-!9dk?-k8#dHPx(6G>8H}akHU{<0icFR5iP~l>qK$cK z8#R+EtDha$P+!}wB+A8LDt-d@{*|t(e%4iLxKlVauTVDq61t9!357CgxwMv8nI@=c zOlQ_k3lN~m)P%p&XEqIV{crj}qNp^d7Vl9{iGgR=-rj1xU{+p#u#wp?Xx5za2Iw zd~Nt_zEeeXjVK#22YuA3-gTZU?(41z&Zmy5%2z2KMfbCId21_r1ZTBT@H8%F|H(@7m5Efu1aX_5_D$uiWK&Vtu$l*(}5 z3Mh+|L}irYfun>v5=;47eT0u4qq@{}aC|DECpkenDl$(OK2&$k&EcOqTi?MohiaYK zu@2fgb}bSmE5Cljh%@t0N4P^*+)Sz%WkD}fW`y+Ob3OE?b;dGUAljo*nZAsQU1@6g ze=!ltr>_Ige8suTqCE<22~7!g0wD?ummtw2w9``DH=MC>Ep(X}8&w zbDvMw(J7qD=e-O)4CUg^ZP5<0ZrMR^59lFuzh4lp<$+N)&Dex`=s5gLg3%gO@uQgz ztkKCHc0uF+26=h7RI3uSM*81GL>G*fa8(w-DA@`}u^3f| zOmx*ZK@FiGIO`HPD?^n?M|Z~|hpKi_2XXUus#A#jA34rBRydlW+E`gx2M=hMR8!i8 zw`+=TDrC>MKA1JlIMiXPqSv;UbKFgIwO>{50Y(MLVNrUo-ZyWg#ILQy%>BLe929We;+%xgI=mSRFgMKQAEx@xjqcH%#KBlFOCY# zGM}v0S8qAGINZ$4?yP)da$`BEFg=@!Eum=E$(XBO)?Tr5*HOSN4`ve{%owa6T#hVh zb1cCiEa+#nxymv1Z3sK~+>R3*FfEpeA7N}QmTYN+ya%+S2SYp&J7@=ZilsVH{0f}t4)v8>JLExaOl zrGMmhj;E?Xp7=j^gm*`nH$sb87`ZfZTIA};osoY;K8ko6elKjR_Ye~#pF4}F4V3rH z)a@k{ve%l84N(t-hH)c~1S5jm0>1|i1*&4-tY9H@oSfQHtvFg?CJ3}ZENsEz+D`?k z5LMYr@Rp`g3GPE@`VqJ*L*T+ZWTi#;JR^zQ&%zM7%lyV|(qHff>M`@~xcD3O*KNdW zv#5OZgWFlqoNT-&lG&-1(_HNAIU<w!#7){7ePJ@h8f|0O5Y$SQG z-T9SHR8QNbL})CK0E!rw?m&H4|x;VBbn(!&rish?fLVhyH>{DHV(I(4h(_=bwu z-!n`cXu^chQ`o)9_^p}5i~l2XtV6u^+4zmSl9~$XT#)O7;yNi@Y3FFC4q*CjlFQ|Z z_5Kw$GW>IRh6n+* zgu+87gHwaUgZG04LVZK~LJ6U2+GZ@za^1yDgtzD~pGP^SEVb!F=)(*q6M6}s;uY~u zE|@?&gxByOGQglHM@&(KT3;MF^+D7|&itHv(UA(}d;Zi~DwHX6<)g`d+tQ76k?9gX zEO}pYvq;ot)1w5n8pYEXvpp=2QC1`jiW*cAH(+CL(b0Al_5Q`mb0wZ(W0J#1j=m8^ zp3G=HO(YuK0&^h?{KrT#&Q|EpBnloX{0rey4WZ*}2h6yOWChG0#^3ft*TV;6L81Gx zHvQ$3m_*(kZrftC?B2kPjU`hR(d_)7ebVB!D7_}toh=}I?eIPcMn;&A9u$ODkXw}{ zm#;+)dl}!usqEI}?LyQe&%&lXL>+$!T&r|calR2h-9f`9g#&mKzU{yCA3h|%ea^2x zLD^;+5lJZ+l=-mU^>}~7+zl7&ccSF_LT-_FQw};VsrQ^SUESP=!4>m_e*syX7||^v zE@ERuvxrUMtHQQ;R}kAaaGh1FI!?+>C3=C-R)1`C)~hlf*d5x#oY`3Ptq&zT0;>Y0 zg7br~P|4dBs;hm}rs<#bZiWGCbFjG(*4S;BQ7L@46|fUl!q-@Xr7A!@`6)_ih0!4R zOHkk(ddL8_OYu@Rxf<-g_3}C91Tab7qe4DAlZ-y=L8&&)cSkOkWmM+r2l@2~xRk#uj>GS^%ucHGs|L5#g z2cD@H)yd-47kuhgbDjAIJl1vQM(o%eZ1V}`pWecv6|$=Fr^>;ZEx{B@A9dHi%`5Pj z*HA?&Kqr%hEkA;qUVG+z=R{dK2KBuRMDlIH8~*0^ied%+BI0WfdNGV@^(;65TUpCK zoQf)NLe^42o(JEoJr(D!gQLXJP=@Q&EJa@e1!XwCFzDL**RU=167Kof5 zu{L~NSOf0|_eNK|TF_BW?oHR6ZFVp=YtMp10{xOt`D^+`|8c4meL?v8_?sn11nvjc z2G@knX*>1R#z5w}zOPd4^gFY!ui%qhMceoac621l^{4Qnk74?ZLR(j~x}Zon&L~8t`&GB7Q?xeL8Mj%7 z&*qt*9O;MDC#y;yq(yRN<${tHecNeFbr|G`cAQn(GVLS3JW0ApR?!!=h1Vz_ZZ)fs z8;u|;4I|1lP$ z;Xcow5WAaS)-~& zTH#y5N_kgN_1fzg#9lA8Zcy4ea6OSyqsUiIj)q(E7ZY4-!rb$~$GZyxdJ0>85sUo+e&Z-+ z^xlN|^<4_F-#32FGqgvRjh#yn~THggB>x_9?V3G@Hm$#t**Gdcb)2WY}gl zbU$W`)1>io2c3m$<8j$fZAL=gpOkiWi;8$Z}g#gY=>0@)aM8afW5V^ zp$(zA_<1AL7B7@Tzet1;izm$o2jV210YE}glWUE=3zSGC@7HX;0-D#M-lP)r2^te=& zQ{PIRKc-L2w3yGSZ>Q=Q{a2(vJi&X-UBOvSnJR|UliO9x9?X^O^fgSZlCVF1M11S` z-{Wt`_e^M)xG^cee|2&QjqfkoAIy0#X7?9*izVQIj6nJI9(cer`8e5Sefc&T))}cg zuLd7$0{d+Rdgke1g&g61Mxf=GOX;9=MVqh-sN5S=9RIV>qT zq0vEi@K9hzU~=GIpcymE_lA~h=X9sp!_w`c;yLNF>?C?7UUs&3opybM<*<@TEwi1Q z)m`+djFihu&(KxqN?%GcUbX-k!}s8J^g&k#Z!-yIJM4zf*s9IO7FZ26!D7D%YsEUS zi6<~qxrg$XatSRyAGuaCXW*E!nQCqwkV{+MC-;=AqR2N3#?%F@+5%YkW59qNRz-81 zaaB(ys~Qq|8yrJ+@Cwv#a-eab4pWh~1PTWq1mi*(h}5U)V~jy&AFDat+bGtEW6?Zr zF5i(Gq2j25-IQRil#eMw4X7CZD#uCZQIbnX2WnSqp?S{m81?jd+==F)E5XXBM?VgH z31k2beim#IN=v`WOFfUd3N4olVgvbqR0&hN!aYyDQ^LPRl#J>f-6WNfYJBSDsSBlU zn5t5=8nrayW7tQ}0asgUg?Gitc8p1m9L$%z$X6`sbHYoM^RN84_QMlDBR+qEl$biH zjBleqCU89XoAyrcY(B7lMR&0Vdh_X-Iz13PBhlehMfE*8s+$~tISx7|g6LgV{zQp* z2D+KUiJVKL>%13z^JB!o7eM7EqM0t+=`4D?jG6jfV&3ziN}+vZRvCkZf^&o6M2kJR zqtEn*hLbuz)1AbE@)%{VBNm3sET`eD>}u`m>`L$2>P$;Da))D+GMo;ZTquA1gI32F zu$yD#{mYo3lOrUC3K5Tf3w72W!Ji7F*Ps!R(^78!K6(&$f+nvhn>s~!c3u)1MNq9~tf7Uj366c`7fY&HVJe3+A zUJPhwG7-;5rYohDr=VQwa`aJqI>TM-TsOEclifExm%Q)2$=>VUbl#($9iBftN8MFi zH`L}18+GCcI@>*F0evZWSL?uk{w|z|ZHY@0FD4dCnv%3WDVr~!e?xM!pdFf{+r|>B zh;Uz=E89wLH78i)NmmAUGk1Nr(|yG?%~iv7-kHYPoP55cGL3pyCaHlq43(EJ^rVb7 zHIRos%zjH_yg;{pw|)XF?Yf?zXD3f-fzI$K<0x7D@4QkVyNt8y>t#Jm{=}WkxHaxdMaF`WlHe@sJqV&5JrP01b3T>Eg zvkE@oa-yHV$;dkp^Jk@swF}PhCVMPA`q!wT&XEr&Zylf2@6KDUMedcb&cyJ0;aMW8 zM|=w(9iBb>PFSw6$DUg5vCjRDzvXNA{>tVXZ7((A4E|+F*%QAq>*V&28$a^Ke~Q1J z@K@rSq#6EIfz_dn`rqbN`?R=D9^!~`_I8bSH}ZV*H1T%zHeg1_a?d&UIi_a$)kMcl zWsTfV8YE7mzxNJIy+-CvGO@j!<9y)Tg_&F61!dUF?7jWy@+OiA71KLWtMIV*Q^*U4 zp%`(N&hc7;OT0xs+7|n~M;YWOtZr6)YA&bKc^xF|qoXAgpZ<^+f|d*cajtIVF;g2Z zJ-s#(TihpDGU!6JJ8y7eFn{PnXb)AhxyBmviuD#ni*Mo&>81RqQXgyORLg^^Z2*hi z0jAwR&8VhPKQWE0yyKBlTY1N{sp9f<__1@vf9Up(voAB(>kTN=68))`3bx_BV7Fj_ zpcU|`HxR`kj+XlTAL7|HBvVp!Tku zvRXUr5@Kg!WCLa5-HyiU3U!$}1T5=Uh{?+2OE8t9Ii}OUMRVl zsy7_uF1Oa!b#`?N-Ckk+@>R~rRm zv5-7d84BO!U$vIAt#h1nm9w~WuNtWqbu?$T-QQvl;>YvMnrp7dfx-KNBbmciJ=m3q zA}zRMX{JbxW-8H3qYs?C*<^`>Kwn~DyJw`rdjc+cURaU$P+T2O*FnI1Kv&5&zTd#u zECZ)_9PIp9t08kDr;@wDS|RFRK=(psx~QL$y~|{gOu<=WZctK`)FB}soJ_D^h(SURz9;*-QR@FsUBZwhW;K28xdj<$)d<@1UQ zOBi&na0RKLWOpxu{}b)Zrq*^0MOXQxv=WQnml|Cq>mN>V8gn1YMq9w$0yX;^Qk5=M0!UzCLG z1e^U3{ZrLI=EKm^ElHnECu)N|P#)e#Z)+?&6MJhrMw_`ay-c9wHyb>3Essf`__m2lZlC$VL(uo{|q4Tqjh z8yLC(FYaLALZC|UX>bo!n#^Fc$wqB!wf$O179UB|!PJ{7XO%q6#4P~-d=Qb-X1Xjp z(YyAY`|vOClZifnBGxf;7(Jaqn8mM+=X`(s+1XhFwe~6IaWf6LRx`8@eh2mI%Ves? zcEY4Kd*H}a!QKQtJMQV-U{$M zp3&9u4Ey>44;zUN(Hs!8bWA9Vq06=fcB&a2DZ4=uFVUUZmh*H5_Q(k|tz*O`ps}6p zUDg*mB;UiFC}Mo4PPmg38`7&9T{yKRu>~&hn-f+osy-TeRs-l~xX#mN;qRwM8MqJV zQWn(Zvw)zaqB3_IO}5_jujC^4I6@c2JMf;4VEBi?n1*pSI%7+xQ+;elZaIhUwOsg$ zXUt7{WS1wx9WO4C?$WcqUOnTS@9OT(@44cc=dBgCHmrTvd~avZBj#PTa&(Z3h{vp2 zhOGSx4p7DKPr8{{j;|MqrQjrP35*W))CZYy_7U-u{K?VJ8Al9I+Ov_Vyg5D1-NjtF znBj02+;pszQ@m;qu*NgBs02Ku{(6Ld6W;nx6!@cw0kZMkh&}sHA8x!in$mYS#)>5p z4-+l%8(KwUl{$`7j*@C$wUe4b9p`vMg=-+^DGw?z?W`Zh1>MqmP<cpc8wTmFgx-J&8r=iWO*0|J_q&qHKq+ zoff5=i&)lCFmqnf>CzdE&6FA5SLvX8N(WYHqU)2?=Mqs7X$}@Mk`DIn>_;K6i|zdG zD>xbj>7jUpjo(QhdVdu58!`=`52(&AxJR$RY0opkV+2TO6SE_`H5bqKlxoc;`Xi#~ zidls^NGBqmLG*r~q5ttUy{xyuPv(Gt-=!CBF#Tx%g7`P5x_a(sFIPwW!)I{bf9aY( z#n%(?-FM)N*+CZT(an<5x!asRxZ&s{q=OmuKU0A9G>MrhEvN}y6tYWcmC9;gS2fQ# zZ>6wY;R)f-BhE!CQS~BEgg>Pk*WUSu(m=dn*4A1D%KC35jZZ9_P%-}dk4f=w6BOUv zTov3y-InXEa}=rvn_+5YB5K}>hDth=0e+*`bc%Tu z>}vwGrYNT1tYKbeZ)Qg%2S=01p4Z0e3yp8)Y`dxGAm3Z#Sfx&8BJ6ut3HV<{4CICfQVP<(_4V;i6niFP4er*P|-MernJ{$AQ%6Q+lAj?I;mAAku_yc~~ z6zLy2#)}aZZIJ(wKT%t~f~rCc^^0R5_c3U{*0ffD!G6Pv3}-gNJ|k!>G+QwP>MI(7 zhnc>XUS7f9d@f&LCiFIR`u~$!OV{b!oQ^U}9dz@$(*t~ho7e$Orc~w)CNQ@prmM@3 z&EYXk0@Zy8SD-g6fkIU5Z?p4F=&M{x57b7wqMG9+eh~ZK#bbEs5Ys^P&^JP7bRxP7 zrcjL<$P#!+&G1fZh#oJ2Lli=JWGJ=vk6?h$K!vLiEo`D|xi)d(8|rm3m7yTou>$iV z2h#zZg}DJk$uGW|1L1O&qDQ@fxfk?yu9blrP!f848Q{-;aLx0K_T~+{81^FUG(S!V ztLGidbhXEh)pBuhzV%Gc6nd6C%J1--zIb0@|8f6}uj&%?782-BTA-Ylv&)<4I2$7l6zbbetut^0(!%4Lq7NlWkHOkzcJ$HOVL|rtrzo{1tFm}MXMbJHi$O1QHpSI#nHq$Q?xfqFL2lY|X@m*I_p>q!H zbDAthxO|R|)wf8n!BkHjBOlQmifBY`U=^G{0!#C~UK~5}I`wVU$n$psg(HjzJ*d#k=Xcui z&fe8_#nOgpwX5XE#!+pmx4!$AtC_16S)o+dvGmREd(;~L(OTihYtlh@sk5myT>vWH z`y4#2a4Lv)G27)P9p9sro=OR27IC6X#uaUyy37086YLq|wo(Jo&HdKB#Z$z)9q-y- zs}9yk5v<5wrt(buXl&VJd22amiL$IQ=fHP6!(@;zN-Ji4zXjuDqe0&&^2bX}V=Phk zHtJ8JvK^T)a=_b6%|+HXUf%=u-YI#h(hxr_&|J|x*t~-nRzCAe;(t@9?~Rr}&`a0N zm_TjIP^~IF{vDsZ204U4@~aQf^E>#n6Q0{;ZKmFwZop8vC};&)&2P>PUMqb3B+xMGMbm&o=K^wLMRm10)FgVf6fHBE#IJ9BhXRK zQNZ!sUe=yyonxtHF2M}(L1bkz8;iB#YBuU!O`ce4L4&E&IqVH!j^TVW#yjWl9`PvL`|=7CbRKA)={m$2aD^V z_7{~!r$LJPEtSOL+rr%EztIQpsTXLWe3R#5aTTZTa2Nf&->F@>PM&x%9jzzHGsb}k zHH+TG;?(B0#9paR2X+~{VZ-Qg&ItdNr}n^X%0*{%ag)PzmNibHf-NiAE1gQIDntwx zQ5}|@31<%5V$MiaG}|sD zN+8p}z%eIh7=i3h#k=l}$GIN+CNI^I{XmRfPCu?j2ksxL3KpX)+H<0_dG9;E)i@B& z5AnLp{D2AY(tAD;WDErr>?WRkAD-_GcE@NcHRgkWw*}INBLGdnVcs_P|2F|zln)Lq_#y&og?5u|TjYA@%asyGuK#T`qu)nOZC8^e66KGyw~ zM6=cGFg?e@u3;o<&*8ZC-XzajIvD?9w%|eUWMb@{^+NQkmgLG8=n39K4)l~1h-5V6 zIf_Cr`(P&Qd|Eg7PiC_ zBDJHi%+^z*w~id?4A33Sa#Lb8oAFRRTrq?mvDHMzU*L24gNis7`9Fr5{y9{6G^C@t zFTJs8NX}`@Bn<{#pcY6yuR(lU%C)BO1HLr%%GKymZ%uv7JaA(MVx7m2JKLx+9W;P^Es;Ao1!7?pgcC_o^`<{=?=!nJa|`h+VAlp;ngDt z{0NUH8rlklBI10Q4egLH^+0Hlsc3k^gr1XBF1*4bnM_UEV5&iC!DS%e;Te6WWB)j` z*pWW>T7n+sS4y(!7xiV^hzs^7ic`vVkW7TrQq0`N)QWDR%E*=?)arjA^Cs#+ierQM z(}|pm9`EYDXUmWDbD(dNh%jGA!yS|w5kdGzDU6L6!tAj+=8yDQz?t$XP{R}9zX@77 zx{kABCzjWWGHbFFHq;I}3VINmD~N7=3sT?+&>_Af|9Vm@a+>LHIq4p#j7|$yn7J&+ zP_a^l_;gPF9KP>RXk#(fx$xNzpn)HP^H2tB@B~+~gC5u!Pop_JSBFmVaY*sTAc>?g zHOYh27%i8i!f6f?s~4H=wV*21LE{ev|Di73^n=K8)kg*|C3<-P+U*bW&nd1ZQ-5>; zN?gKOyoA5aT)iJUpe+7nP7o1yaze~j9UV$z)6m)^c=$6^6a~hZdQS4Y1upR^^VXFQFe26PZuZdW@Be7=} z@tTGF8OFam>&5VldV_Ei$US~Zy_LJBW|pC;-%prBwA@1{ta+XgiD1v+c3_L&!qk2hHXP-1-O4 zcQSTDD7!r$WT*Uc7i^W)Xu0Y53nS#ekgOZv-%t=PcOmPCag})dimg;A|KJq|CPpiC z#4(~&R$|io;OKYQnQg$*Fyle(q+%)y@sXTFByxbx5(DOZUNGAqf}0Re>@SL_2nDj( zsdMo}UuyxxOuoYt0$*nltIUeT_>T@4iJFORAmsR?mzolB3E?@rQ_oWjOXLS0%LL>~ zA&@arkxdqGLE7=W_wl=vhzwc5KaE7PG{yt33rDo$&r)dXW*}~qVl~;J-Y^i5vQT5v zlvT zAu2-s=6KeglSwM)L2lX#B1b;rSM`Z7e)RQ`2z2-Vd_LbFeAP$T0$<^b3!t?`;nyD_ zHoh94z9#ZDNiWZNxu`%|jV1IMe5^-Ai$=k#Of7^z7DJi;^4dj=;xi~Dd61AR(SPgF z0Oj!fZsHA;L|Ut0o2uB8HK5`h^siM1&+H`@YIV?CqNuaWESso)zX0mcOeo_jNK+4> zjd#*3@Xn?qf1bgIZ`iv+^iot}=axdrU@^1DKR|tJgO_>$3n88={-@yg#Gre-VWIwp zE~m2_kzmwXsC%@M`+dc8?jW+A6HR#(|MMd8{u(@KAT_HJwL*i@Hsh!z+J_bS5!-hz zyEu+lPo75xOW|61v91&;J1pYDfyrw;p-+rFB);0l0;|1h|i=ETgrhwn*bN@ zfSv|}J5zwXLMymnDzaq-FM&!HMV#^_*eboK?WjRyKAJV2!qQudWZA>o7W2EgNa!=@ zrQY0gyR<>xr<^psME@MK_OqR*V|F96G#6NBSo)dUnDSFeEJ-28JW$`4Fkd!-==VmV z#c^n$!JKSMY>Q@Kbyb67-_vI?j(XOgaz-rKD6mW0D{Yn5N;UcfFTf|U;47VGg>lH) z2~@I9M2dR#>_~wUP*7jEYdkx#mRh=*XthV|LM}99C(eHsv7^0M3UjE!stx7p#4=*} z>}o!}8v9}-nxhjqU4Ik5Xu{tuc(n%eV;M2^)AT_d649H>lt{+G+0i1XNY?v}eRce}MaM!h9zy0vd>#K;x z(gobCJJ8G>YI-*iRg0iDU^@0VI`USN1$)rMpx`* zr3*Mgfsy}}xRyYf%!H4YpE*tjXiJIE%2h0&IDQv|$J3L2=!Xmuh<7>~_BvAOkFR$2 zDU-*7kue34yydavLg3F^oKFIuN`i`lcqcn_l+G2GBN?+%OP#LgXTPGM#cO;!omoc( z*@GNpE$*>%M_B0$>NiL7iB8ae0#VzM*a^Fc-ETqqW}{AK4H=NDrmE&nc$*%Oa0e2p zOa#yFGPP9Guru>g)$d2FIRi+ZF%miVDG;t%Ht$**#v&u1n0TQ z1BzTn8En$;?CW4^?!4rVc7dtV9hy;*C|`8&W3b^1!3P6a&2@Gm6>A`#sL?|tN+s}e zEL2U~k!1C;1J`54A7;m&an`SVQl>Dmg9b?1@zBF??8E*Kw#o=K&<=oUr<^zfC z3BNA}_U9t}R1e&Any8lt+I6yj6|oAepz?%qw|Yp3rclfp*7*ZG?wp_~RYR`lXD=@! z*G}>08E}1Ol0&&bo%2jQ&W6xQWuoC7psdOKHVK`o^X+3;%Q2kXDJVAq*${zdqr8_r z_ye5=@hXi?s~{2LcqV~}{0!RpjE?!r(_8V01#M@-9%_#Las*lX24CU~p2RY6i8`>$ z|3Imek$3Zuu}9ePLvUC(XhtE*7L9xjV3&*FZxtloCJ<~!u!AFzJe_#P`qloutP*nv&p}xNS+fSa)dL%8H$3=~D4qkpkf2wAytHeeg-3A6H~68^zg zDh(6Ba5~DaF%JnTxe$5M2g%T$I^e;LF}f}C$*n&ZmWDUCmJq32yaFa!w}ZrBeYrq(taw`B0Lv? z$F&a2Fa~=553N@h$rTT^kHe!t+MAu5j~6I( zq0a7kpx7iafk zQqxw3s`e)6nW}h}HKC|s%=g?vW@I%UMsK1eLqTTjNz|zeQl%i7R10$D6PEs7{H4vD zSZTQH1^YW3Zj1zLF#?(qh=FU^n`hMY-({_{+1WnqcN;W86w$`g(D5#C_Qbrt%5ZrA zS2znlPKOJ&a}EnQt&x1A0l9=yaV;Bo=pQ-zm)K!@*olLzVlDK2k~4V-)n`J^|HY{YJF+EyR&gX%05;Vjsw##< z`FW|deTJ{`iwc_(=(3)?KOafBgKMng1m<)0F;LALC?pkMt%Of=h!qv`S+oMD@EZ~- zfzQOC3C8pE`FN6c{Cg*N5%Kvad^?F}QjtDiI7=pcuqNh_pruB^uS0mQASj}|&&tgM zAEZE$H+Y)+=+O(%-$^vUeXjKwI@`tSR-zNHvWH?W(j%UGB2u*@aC{| zv9EZ&MUMO;hg=8BJdTZTBWm*ixmE$54TpEUV8hnpcP)9QO6+ZM{w&W^7Gl3gK~1yp z+ZMt3QTY6&IfuERR>nZl%eck}o?$GKQq060$vaDVr!RCUq)%TW6V>3PvRr!s)c7Cr za}<%j_V@=?xz0SOYC9NwGq^`x&hv-w3{N9H1iI)f_CA`ETtu9t7rb8{S-FfIxl2_1 z6FR^PErsJ_e}MKrVhaYKlXD}{BhcSD(9jV+9S{ixwPuC=@WKZ0gyq=xNT{(mpLSxy zq#~aZkhuHV)dT#j5pcAw33hEE?v*6qN~+Q$ir z8Ih;q#XWp$IP{+v8D>QSg`*$pV|o399XgRK|H~c3{LFv(=4IBM8;v3~RY&$_Etc>% zGW$WzV{>qkY-S|DlCD@DiCxf6mat83yvq^?7d$*O&o43A_3b z8to;TYZUL-M8{Tw;xf}GeF=@JqnGONX)z_X8Yi8R9VrgosqF1Vo?s!IIg6)X!`_^S zif^#bUZi|sKIO#PzX`3MV|M{V)2Ap&!C}ah8!ziqm zI?!4TxF`h96}Y^@?@Q+8;`(ME4|Kp6Z3-udslD&us%Km)oHb_mX`TO{$orLh{lq3Z zg{82CegBF~Os3~W2lcKn->twC*M|x+amMf9kViblT2Ag5vgbHIH=(23@ZxvqCKFE- zz_*$~DOHgA$*ljPk9!iSU@64aav`(xvVXm}W)`0WFUu2T@Y#|1p{V>&9wmQ7Rlji6 zE8J@jRCAnte&^daKcCMU#Cn>+FGHcluJBSIdiga^cY%G}3Qupr-Wi9Dvk-~1hW`o7 z8~L5kKTmzQq6s|1fBc-_b%VP-;0iDJ^FF$b8Dh}nY2@BR-Vq3-mw57@>`6hM zqX=scg3I1=I?v&m3~)m&_#g<5C<8~<O_ z4u+HZ@~@I;G6f2>vdh`orDFW}L1QI-pDoF0{$|xzSl3fdNZ9SkSTw(ogc9GeGeg40 zE;u-8J2dR!Uf)^mZ=|e+h;jlPcbT&~%byRh%`NLqsdR?AAecPE7J@j$JttIWIKV8_wt#_MqLjl6+7=BwAWy zwYS+lAz$A^;osOzF@-!M&nV&%CAq_IsOKlYF>#m5+@mUYD#jYaeCx>zt+ap&%dmn1 z?0+71#toH+Lp6E1PB71{Qq`BqcM3I$bo}j$A9?qp@BJtI`w@4DgZFQ+Lx;KN3(oN` zc&;G4@dF+82ubdu-zyaA5W6PgHNQA}FDtPU9e&6Pz94y1;rNgI`zd$bi98fKJAvV`vzf!bV8yOVy~NEQH_R|2BJ?|uye)H&S~r}Go9F5H&;1_ zZ6&mAMyR>~l$o9N*pPp5+~qj3U<3MOA<}p)ycWZ|u{_OdWchiX^gR1`8cy*L$u9sM zi?~xBPE6Q-$a+aC7gA;Df26Z&x-5@A5o!C?<_NOU4TnE}Lz~@5w?ho$rnJYTDqt1FFpx~1H zM(6!^d@BRoVCN1FuAHCy)kH&=gDc*$7Ut*pc;=^Xr6=Jj5u;fKFP+9ObMPHI+#3Sj z*Wi0X7XQVaDsQ-eRJT zf#lD{e%avUpM2^D*E!DX3{?1-)!pM?`>+QN^Z&D4IgxKz*p(7+U?tAB5mZ@&T^9Uz zhbNEayI1+vPxM&=^63E7_Y*EIz}^P)90BahPp*8Gb$;Xi;&TRa(8W3m;)Ay3e1!b0 z$T<{euM6}1={~;J`1BiA_ybA_{@No6;^P<;ZcKY~578$GcXs`?6_35xTF zVn8e4+I_Ho`auW6tNF=Zona+HXNLMLXn%IW#@UFy{pq9VWY(CB{EUFt@^il|P*pJd zpAFg(G%ol!6pko~e5=d#Tc8_q^ZRgCmw{6}1wRY>c{lqaW}}OFKeN$qqp;~lVA1`{ zJF%S7c2>NH-!0)4jeRtcxb;|YNtVH#$9e9|KIs<#CF-m$l$8{Lo~!U@E~u(8T4*qF z%+YAnZmgvQzlnm@t6}N%U{A}ko>#2p9M4pq-?V^^nn05^*x$13x;RPU?UlwW7ZIT6 z&|Vnt3)>_+Cnn;Jx!8j$+^G+7)NRDb*Zc0(of%NQIIDcD$-@bxuv-i&;C~TFpdh$h zgNlW1^M|t#*HbtzL5Z38eFpyTWM8wfi$X&_BWLx1 z^AM=h#aUGb?okfyQi1h}>*V(Fsz4kUzCb)2^#Ar(sP75#@aZB@Qx5L`hpYLsmmVnD z5Bey9&Tq#)3I6JeT|W+Aq&qfBWvIIq6eOmpiIpXyA9F)_G9Iyr41MF-{_wRt~>~C4? zNvK^0%NZ4eYwkrT6^51B2?&Yxs`Hh9JZwS@7s5y-u~Jo5v%S;(t5 z{CN*4ah9jO!c$&DPaH(@Z9s!uEW=ut#oobX{XcZy{73e^39 z=T2caPO&G+$Wwt!?#JGhL26cK&r9-T4&+=Vv~C?%R+`_42^=9jdud+f+3Ww;$b~qm zdU#s(`G0x%vpUq4jn$?j-?Q_bEYO<9^~FjQ&P@0!E?#2NjYO@*Rpj>xR`Q9bdye(; z5Q^FceVxSyPvAKo^4={xh0E-T0T=tTOL%iZ5_@?P$0;hI^I zyCTy35RN~?szr_aRV3F#ytLd5B6a}RORH_LJ3!ee`ENf<7NY4Hlj} zlHK^sbx!krA!$xQ8@u7Yk6d5m|An1-3d(rTv;1Ub4xYCRt0=}Pi`g_Cc@0FSbmkmO zvL~gnqw4S~#cH!bKgD>80Pbr;YJ~DUIk;zDc30R0A6Vfrq`^A&E|!0v#0r^%9$U^k z0tNq~?*u<`zK^kbws5wwJnJ_&K!MV~`=0&@6nzvPje&}VoP5U53%=pvX~W@o9cuo> zUOeGl;d_8L%jx;Cb|=3NXB{ ziGW`MxQp<^gm0*7KgbU19fEBOir5c>#aqDDS*s1){>%;iC}~D@eTt1<)jwDtjEVWlYx)*WYQpg7@+?~ieg@?(cV0O{qzK?wT^Ni2_L_P`sQ}{wp zdHy7>^@aD7eA+G_=N`nWi$HB9IOEUk)?HS68!7-aoYkj5-NOG9(dwUkS0Qe4gB`pC z7u@AvUw8r=E6oUhSXh5LpAx$k0+$x$jBMCzDeT{M{Ec<+*kN|&HhlerpU+&=gNDw? zPO83rU{*D2DbH$byr*+QANll8bhP08FrM)bD-l+{&|DeVYmMuO{NfE>x452_E0p0( zQh4qn$V}!~bDkor`;({1#2WvwUnbvvt9&mOs!1gGE3{=KJXsQMWO^z;*^wjlp$cKk zWkDvTamC_D-eSC0p3et!ccBqQ<}8vGtDJEvc2r5OByz{4xPqWSvEuj8+ZUcyP_LPF zSAx1HN#$j$X{l+gQc6moF0z;rEx%+k(JW~{NS?i!8+wp4-%T}U8LF$k(T4$Vkx5=d z4y=+f40N7f`YmG**^hs@M|pO0uXK%UO(9Nuy7Z0Ny>Y}qkAMyEn$;C&mb;aSrtjr> zWOvf2g$iO$&I#7|+gMCRm`Ps6dEF*+F@ikWAblN(8AU-mUCJb%D^#4UAZtB?T(?fG z$O%1=%B1&Lt4-kj65M?*87z?xvygGgP95Y#sv((yO$086{rLmW314g^c5^nk?SDiq z!>9wuMwP=YC~6ux#hm1K%Q3UDIGO!Z)F6G+M(XQ1wf#g)Lx|A!rGKg>93wIvF5?js zaev`;j3(xMmi&GP&gBc(V>Y@SZi1}x2%a25%=&@R9*dz8RVi1Iio=XtU}2u)nhWp* zdXcv*#0VaF6#?LoA1ysuCsW#*RxoV@vr4 z`}-H%C1Tny$ddL$9;`r4w}#3ERYWn@_b|JD5_xcfSuc~Zf!9Mx)u~^Qq1%FZpJUmx zoNzz|bml&Iz z<+T;Q_(I-wGV$PA$eD*!VEOa!mr$K%G>0axQ4iGt|0mTLM~>A)ZN^;g(ZDBdBcaS5 zOpTI3MT(GzAV)q@&(fdi44t7Z5hEXr)T&H>K{dRo0I2;b@#YrzS|^yyTpQi|2AT5$ z$#4_RdV+mh$zEkaau-7{WI$$@;7*H))khO$>5L{^O@+unxb-r!s~@LY5}P`hwY23Y zA9mP#uJizzv4I_3gT%T3ANlbNb9uU|Jl}opJC?jaD{PH=cweJ73YzT#3gdI_m>JA0zzli*ymCRKUs+@v2*S2u@eReK8vLskq zOtfn(bhwC|UT*T;Ezqwbf=~$0;2$y%l5?8#8cKm)5N1i>-w@VO*zw} z(4L6nnRsU&X9^F&F^BP3&LXYman`+|lh0Jol;D(xk&ivgc{GJv>chEb=vBIiWl){> zPQlH0`0Z{go#Oa=E!yoQ>wScdxd?|oX3snzMHYd}I~tjh65XMWFiu{~f2+f+zSh(W zxIuxgD%YnMuPfNCQT&u4Ln8wdHaC?`3c^E}0U=v_=P>qG`-1e|)5r@RFHFCo93N^W}}h?Y*nMve7C zDm12(U%pLGOgUs(Yid57`X}bWP1WO(<;)1D!m=xs9MARKueSjtKXuuu+Ax_nC#{L})!N%6DmJa5+ z%0J9kFUX2sQ=OGt+w5KBNdozDu4gU1@0FQVSemY+;dH3g(yJl8nnE37UiD#coKJy( zuvwX;G^Y}IGpFzyRE!z)m7JpE;0?aC1=@B}nR6W*pbvC?ji>3(YYcUD_t~j`>1Gb4 zdwDrsku}MW%|^oQM&75;#eWugo0G1BH{=GVAcqelwZ_7|SE!8mLND6|_G}kBnhs9) z6}7e+0Q#sMOv2{;?4f&b7C4j)6G2uH0bE`N$$^{nd)mT19Jb9)B(h#56U~!ck^h=G>d9( zX3h&PPKvpKCDih-*+xG^9>q^qq>{!&C{JhR>ECK^?;)z&pMjaXo~~*;SYV~pK6D(n z)q3bm^Oi<3F84o=As5HGucK)6TAqLh&bQ9;p*%)Dg8 zaP_&`CwkmQQMb4aK@DacBR^H6SbI?;I7f|SXOXvCcz)= z^nCgcy7(JV)i#nHEx@YUfxI~s%=O&rXL>v9(+^!-uMEFtL)Q3l+I#U!+~~p^RIB}_ zJNh{tB^5QBwp6vK8N4gpbJ(#*bPIKN^>yWTJxDv7b~o)W*H70G_fqwRO zq$H)(XM)R{l=~?;|1?Q$=<@f(s^LZrxwUDw<)tmkZ>)cMz}&ztfpY?CfWE!lp2_yb zVrE|KR8tLQr1Y2m#rx9z*i|9z`k$*QEmP!_@+p5(vZo$oibgk&0!qmVtu9nwSUMvw zGBvU!Sj*e%I}As4zv6x;9qIOmw*J;f=Ax!^@))YFCzGkJghdvvM}kuH&XdJ+)14hG zfhF|%G^T3yiB8>v)CFA2;^2T5P}1ec&`dF9G96~0m2t{8c|91r*XW3Es9m7%=sq~< zq4dN*1wFStDAW7ttL^FS#-H)bhw`eswMl5NFI1%0MC;_nDsqy)TZ8Q+=-?Z(1P>zl zHc$)OjULenbh{l_CxS`V3iPs^bn-hux7rE9(Fo5SW(aveUA+LZpV_;Kck42@pd1Lg z^|kwYcd46cimj`E){v!PlQJ&JI3T=mSiKD4U{0+LRYFe%9}4{F+-dJ)si15%eye3Y zfv){2NxwWly8j6O`QTUalxeAZTz$Zj`{dQtS^8?p-_*}i*A{Ot>NnK++P`Vw;-Jvr z!$BniuR5>VS6TWg1mwX17~yT3ZX z7^l>*6t}grI~{!;>m0F;Gmblsc*i$K95YQe*iYDIT2suKOv%zdy}EkSecjbJ?MrHK z+K05D^w#c5AaLfVqx6oNM_**fV1dS%ewy8uch>*e;%wRMe{8jES*_#E2b2#|R%5be zSFd<>ySJrZapiMOXZn~ktwGwev=y%ZrAL8%9nAdlcG@)kz7Yg^!#!{_s(@*@ivF~+ zN_OQ1T2PW>u&(^+GnfpOB!B~RjLwDbYIS5^Cgk2LaEX7@>;0XnQ$y(}?LakY20Hbk z=^+>ldi`rUfvV8`_`n;@6tP54F_0^!xSzZCddjLV^aDz5TQ2_&!TmDq1`lj}hM%E9 z8LDL%li_mc>)`BxNq)ihx|ZEa7HO8Y*;ChbC}qg6updXh&H8rrd*aVw$+!P#Y31CL zy?Jzp9BIyJtK(SZ*WCHexx;^dK%StT!TUo#g?tG<8o1HvXK!piD=pRbdzz&;NIUXp zddi{X*~ylaw<&l2^h!-iEtu9QZDraBS4Q_b?=t<1oNV4;19{MS#=m92%z(ZDG5%Yf z1^mv~i`X7n&YNSI!hIeYyGU=UK6iI@bxKW2$&%uqvN=UgjY(UOKFu>$ov4qJ$|(0t z9`ivrTqfUr&C-{b=_qa|+l~>1#aeyrt2SGxViWMpFliZuPfs1DENF z!|dGZUn#IkkS%C=z$U){TYJ+JBT2pFS?1P3b~uyXE!{tTFOx^+xy})K{N>u~+U!bU z&ecv&8LgHSZK`N($lQXaezp7_Ix0J2?QQHq_B~7rI&S>~-q!$gH&duGRBFs@wh8W* zuFp|v!oNW+=1F_Gz_#nDWu^vmu6i+PY`s6{uIFlRQc zk$)Qb^qy*hXRrGah|4|GXMxQBBfW>)>e=VX!yLS)-kKoe%m7WR9oBLraR2 zZp8?8b~qMyS**bgL^W2>t-1g|VUY1Z(DMe;b^g&CK|j`Wbqqc837|wcu{qw-_nBF5 zO4m$YeDZNrdoR(G^(RD=S5y7v(A#U%=o5_vXSphfPagLw&rt7H?Yp$ea?-DUVD;cv zL6-ymb1w3G;CvX+FnC(X)8I)#ul&pU9klhf)H7X>D(i~3t?OvYhF|@E+JDte&YHTz zb<_RDGsk<+Tb(Yz4|=*}0coWYh|q6rZ|#Hq`huryV;bFY|HqED)*w@TX^p;_zQc(i ziO$uwtE;_rm{-*&eK%5OVp`6$zG)>~=hL5hdTDQs!iwEo&2rqbkqJRPz`)D|vQeb}(*~@o z*&tkLAjP!DK4^i>ItUa6kG4@SL8Y>&Z;2zKA0tK6FH0+Mwg_!8r?fylsTQDf zqbho8482VU@Rh3)Z#;%S5l!vrRaPYCn@3UWD$zC5iiqG=<2#(jbUtuY_NdFifXhj* z{%f$?PqEJcWNt`tWs1q)(#<;E_CNb6`+j?I`$gL=TcWMF{gF+zz6CFF zGN>@aNv$?eu7j7@Q0hysMO`{KT>4UMo`(4Jqp6eIN8Q{fDYqO+FMbDQ7U(Z1WcVqmp?|LL>*a%%P9WQhl%|&@3>>Eu)GSUQBGH)j8FV~*rFeO{ z63REi%>&HS&0Wl)=JDXD9|DPK89#lw&c9#*FH+utf6@uu-pcfv?xx?!4OYx6&bA&= z(E@m;v&r?oBC3!HeH=;rEP{BSpIiZqjKAnTs7lY$JSur_;dyIXGF>+}K#Zz|PjenE zeVM+VWpoh6a@}Hhj~j{VEacRq@#yapaZZMsP0UlM#C+DrV8QePuX#8NC$iivEhmdKO&;^n!o+tCA@$ZAE8;_JP5YZ|}jIIR{xO+s53&AT}h!@o$O4NqP;A`S@b>Y?5OqMCg z@2b-$+LE~4DY8Q@k(4$_ONZDxvTXH;PG)0H-Ez<>Y8gw2xs*g3JcEK$n5PrVJk(!mGRU7P#0xS|OB_pw zY%es^5hTT7x;!e$ZK%f&M1G52kF7)li^Jbvk+AuQvDG#XL2a|S`$u8~9_m$evM>W! zZ9eHSPxKT7#z=64dU85$dVA7jl}N`D=_FMIlZmVC0uAsKw%-MM{jM9g>7Vt7rcRQB z7|MAUB?=n~O;_bQEy!eSM7EqI_BRQcS%rw>NooxYqFP<4w9N~~%n^D8A7Zfx{GbQ= zO!lXl(Vd+*iZ*N`jh0^_4--KG*iK*HV<}x;%k}q3E_y}I5CPYiSiG4Hzciw)+!F+9~W01Hu_?hd5p$H(*=e}2idOJp|yL)9riauo(kfA3py32 zNEV`t`G~t;My4+@{*fNYhA~cWL53(E>dT}bLRv|jLN2KWK6MZj7|#CmVz)*S4gO8d z`50`}3ds2z^zdW={moAG{1DJ6I}urnhCiN@k%=NZ;zvx;kBY%>$e;22y&Hf_gT2EfVWZXlN)Y44IlI_M?y|wXJ`lI&*A$_S4B3pXivZ-mnSIRDdu>tbiG3kG%6gkXjt_MgpOy`tDDMs(3^+oqyqyOs_{+REFD%hNn`OS`zX_u$NEQ3U?IIb zAz;zSauM@O*{-eDH%N)tCW+pP`c^rcDbk2g%Nli*${@RCH6rz+^jK!mqqH}ADJ4JW z-Os4Y&el`rL6O@)+0JVA2ZNysy-90~%4!hko{PZ2T9t)k|1d>?e)GxrrtFc*crPkm=?XgPt?^r`s8a=xO}9mdKJ8mzZDlI#q(cC=U0DU%C#XqzlrInsv7V&$R=)gzomxqr}O3~aeRymJUn=f@T zQ5B>;#*@*cXrgE@CB=M0*7S$o-_lO&UBl$vukFVQ8w)aRQF*dSHom#b$PJXzdKP1@ zmWJ22URsXzvQ{!Nk7Ra_oi{sQ0vc9kcZ(xg6GQKNz~ z0NZVv^h3l1^m@h_WuCM{y@*ZG-Q3P7ixeIrT~Wpegb4P00@LyeNhhJMhoa+^Y();D zO^ubI@*nL2Ua+5Xj`PjQ+iLl)otcAEQL4%e z!q0qn7PfX4X|U4NSgNJyr@$EBPo{5;z7mS+%hc2PdTIFp`J|8XIenEeNdBp}1W~+} zv`}efLyRAwq*cXAt1tZr zvddW`2O9i0wreoi#D;t>BQ|6M^wlGMzL8H}iVtnZM=dTL*7xFp>@xb1)i)uDOX$cYnTq+#{}}po5`d;V-LOLBhMIJq_yx&B&zlkMZMvva-n{-&nqDq21}Pxt~a={1`BI(p?e(HTE>`zCW$Yhk9d}_YRPr(8pMknukl+W}C_IY{n+`N0Mjbjz4&kTzJMAsoaZ4#%<*e zPpI;hq)t55VK7f_lO3EwPG~gUYMGG)^`O9E_(e&^Le?IR-*kujG?nUO!_49Dk5~h( z@Zl`n_Y5|B0pdSbvGrC$Ctc{$eFa+NEGRORCp!Z-+<-nlAajdBS%X2ONTzzE8X4(4 zMAk#7!Vz75qCadWw#N(d?z=hp4N!uCf4>gfYZkb%p;Q5MVUHu&^P6NnuMly%$TMu_ zboNpYwUv_!1%vXfzJy)x!nZm?!)FO{a5BZ1Q<}zSml12&O+9^8Ji`ZM-^y|l_u;CdJbiPnDLS77Qp^`9Gn(3h z&zzXZo34V#Hj%6EjUN_ErY=8EyN>#|Ts(!BJ(&-EOvWN~LU9d;gD%29zBN4LnBrEnw%Wz{QU^%lgpG7Oq(n&%Y;;#I1OrD>(5O zp3uRS^57fvhHl2O!U;&m1*|8Ub9is;gI*`VeHp22%Pt+DM&mL&Eij{B`)V)~xq|4g zTg)oDVtX8;{-+4nXz8n}ld0HQjs!c+bI*iU1J1rS(k)DiV}C_& zQhv_)4!iz?s+T43QW*1xrr;rnDs~g6Z04y8@LYBHDF@vYg+rR~1mB>q2x!jA=OtDv zqC{7qR|gUEoKVGS_*2wM8<MXlFHiahI#!32jDVWYRMCDR; z_|y-{l9@kq!LRw)U9)f9iR`D4rlKPAD{JfmZB8H)osnF2Jaru_;oZkjz-4O9P9h!r zq3Y}WZvnL+3%K(*Xs;X3y@AT6O;CCYPj(i0bQG^$AZH9^Jlbgusc+5Ha^bv?FT7WCt7Xebxwa13r3N-Sjz-^z-N>J1N9;vCl^ z)f3U27l;zvM_!harr;wiVl|I>f;Ut~$jIef&~{5|b2<`p^5@L{uu3PAIS#a+JnU6A zb~a2J5B=srYDG$-3uX(n;TBa##18l4#dV@~PhfavL@vKa-VB7x^C9<#;?FjwmhTaD zh-a~dcM%VWrpN9UI08M0kc47y7DIw|L)PUazO;cjYB?lI0sfzhij5rDPj3hu*-sosb5PKv8@|O=309DHXXq6gmy3PD|os>QgH;lAU?R`947Q z=9h}0Z*HI=>XV_l$u2KNPV9re5Y{WWyOH&7_r5w(gZ&R^O{0#~*(wN69HxO^w3e{MBs`?5ra&h06ade*AzpS#vLH?2=kOX0=J#T73>y%=41p(`Vgar~{v6}aMZW67g2-h9O*033d9<%a^CDQi zXFwLbNKXGc`1mKt1msuidv`FW)W*c{`<@@*Tz+Rpc?6S^dZo8cUq^{|b9YAfuXHo> z*mHTZsUb#`Vq&6WqV<7gfw`qQr**93OhAK>v!NwIX9S-O+!HV+V01vYfSdu<0)G12 z{SWx5_J+15)_a!s=4z&3d9tpkHJLEh(NmnsvWq=U+>2c)srUX&{u7cqDNRbR?as~Q z+04vMHBph&UVW~8*K<<`xJ&*YlR3Vd;>-z50V!q;w4Sw0vb44AvUIX$vo*C{vEHz( zGUqY*DG~A`mdxQRz=XVh>I$&b9;$uFqqG5EY!81Pz+Wp4 zANny7AR4T_(xx;ft$tJjO!+|!R?Ly+>!wW1%z8&$ZYMQ@53#AP6UTOf=l6=rx|>wS zd?6$G#y!v7n+h%2Q_8cHtu)n!_q&x>bv67_NSdrE6v<8DZQ%unYWdZ*>s(3soByR z#QJ4QHQN!YVjN+^!y+@Z3F#K}Dj?o}F4@PFz+u6Mf(Hd9`yX{Yvj$iSnwy#|__|BU zyCjeo`@uBJQy!hnYMd)MZDZP;wDGRd?q%K=~+QI{})coOU2I-D(@on5=_h9$aK0D zYAx3@$yrK+}hA=BQ{i3d+612F;IxMZqW%b^9= zaoy``HKx~m@#d$d>>&}Ew`3RVt1c!VOrbKVp(<=BurLim{h~yUKdMVI!O;EhrQ~#M4 zd)tY&3ZUNdHrBxoys^gEE!Xk7t`G;lOaAl{8Po>&$Q#K~lp&+qj2g`&%+(t~j_o7! zJ{pqwD@P{ijQo*o-UH4gKpur1bKJ-eYUC+uQDx#uqp`*|5=F13_XUgmrMDEbXLou3 zFn?hiHGfOJF&>BK2)N5ln3EIDq}oZ|X`I&wZwT`?H!>|fvo{}n(vmr=i^*lzqB<PD&pA&sHCEp#pEf5_gVMsD(|*+Ma@2AT z@_!xBD=1gUOs3LVLvsa(1cv&j`t|fH?dNsWcJyPOUvH~y>1Vo6+;#!p=qhS7T%3Px z=2LI>Hc<0J|G&wzTD5lMBYT1azD(VyP0(#bUL|8Pod-L}A$I{&Y$8_P0knEN)v0Z% za?6TUFFIjHVaL=XPgaqxi277SPbLl*z`k9>uH8r{fSDQ1EkMHV&P?CF?Co;qF+3$_ zR$l3@u^EMCPQm{@h~>JH zJVFTPe+)#-Z{Q^t)*h&<)xylj=zPVUzshy^mS?#`Z0SnS@1M7hMRD1 zcfEhB{j`Tft8-B&nrP}|e$DiwX4cWRy>{Kv-g(M@U%;DyIsx7N=Q>~co%M_N%i(P1 zv^b0S#o2APQI_WBC8i>#e9W1@&or6wM7+T#)?Xnr=TXm)f!UN3ne27I+h2Xf%qqKf zMlGRE_PRWAo~qE%UUdpJ7-x_+dyqP9u^B3o>k)l_Y1lWV@QEv-4dxTSBG)T-h95ex z^L^y@Oo8pkzMABfWGi>TU+d*$IV;HeOF*{Uj6TkSFS}3*2HQ4@8QY_n`*R*lJu+{y z8*X?ar!aXmRsJGdm0XHTUMQzv=_`1q{qZFFV__OtFFVl^C7`9rpg!02mV-M?-Y3+r z4D+;Rx_nP6T4I@H9P0hc#{qwM#vSTsByUM3DlcV{gQn#Gkx|rq)dNq=Y%Bn|cp%T& z5Z(1R)ehP9Yt-hQXMXJk(8%9WX;zy0=6LThY9209+uT;Yrryyq(F@cV%SSU`wG_8r zvmUgau-|n&_G{04wQ%P(zcI|}+wEs|I-J}6J~~vpVGFdiVy41b%Nk2RtfGddTTG|Z zkRCBm+<4<7lD#MMN&BKzhhdvE)Bc7whEoHziZzTw!-|<=MbHPq*k?WUG~)m7@b#W! z?H|yo5GOOPP(yzgpY;;FG!j2-8(F(roI?P3U^A(rC_omVEcFykkx`T6-CztSQLTEF zi8`67f~f`q-Azs*2JU%*7gCl?+bBGT|EP2{$Ba4}CNgW1Lu3Y)#1Ba>fNza>=o9<;x<59WXK{B|%2 zC#QoYTDw{MTTffRF=gqhWw1Hi^pmQm*6hm&Bts!7+(_5zV+$__>G~!=T|kCkNR7}9 zwYm0%TEU*Wf<3zsi+LWojI(&Ulkgx;QD@VFx$bcw61OFm(UjK=&buygm!I&)ed3YZ z@LVFmdrkn|{sdXNlH}G)vhyv$E}uwmg@;`1PQ1!qL|%^Kixxmr2s8!{URiy7&8Ku~ z2^{GS@aivW6JoKb*J=jZJPw-Afz;Bm(ArRWTo}u0ARP-a>OQcwH>z&631@fJHz{C0 zlfF~Pd`{K9IFXP_o6DcGj#Auy`g0`Hc6U%oym$jl>17osT?)f z$L&WP2OZ@d2kk5DKkfS+XZ*%HhdS^19dQh^_qN4bW2}#?sn)gDeU>WbcT98M#5}N1 z@>e8JMX9V&i7pK)tf>R~Oifu;aDJOGWw|ExcR|=K89AFp+6k=yJqI<(Yz?9JP2jRl z1dp&cUei@L#R{Fd$$`xzCKrK!dJms{2-bd*v6ATntwGz9u~p}h^^Qct$5YAE0^e~W zQZOGepy~K*WANXvaqfM2Z9{JfM9O6J*(6SW81aa?_^&mHJ01pKxhIt<{<_3O+N>b+ zhTs9IRJpvR-X)0@=c3!BJvAm%spjvA?Nyn-N5C8Runw+($0@L>Px7xsPAgu&#{c*G zerxCj@%n@DkyG?9RLCsmP7AS#=Of#qkQ~{_hkWORCZd4~;JIX>kL3iXlY_|wYpD;v zh@3H~FdLzc*4F7ysU~;_N@Z`5O_ze1+tj+pw$8rEKGEKfKLZ@Am`pz1udd%ej{n(5 zf#TlC8pWi-!q!EWvE~e>uk-{ppu(gb6(f7ebo)~g(20CVVWKhH=)ie}U)q8b_@H-S zZ+eq|c#7Y!hvzcWGuH|`;t0>)9*v)lwN@Cf_W~2%S8#&i{C*AIdIGO>yyGX*AZl$s z!aXDDrAw0jP_Ho9H%H+*r*iGu*qf&u6)+ zM%~c`K79=@Je>GW4xaQ0@7&u>KNZ#yO@-Sr&z-1AtOPP5Tliy0^-vBPCzkr}8f>!a*6WwDI2 zKCz9nN85|pL+lgmB^)70xw(Fi9Z&6VY(chF);)02Rcm`|XG;e2HMDOxwCr`H{zf^S za|$32dI+@lY3%M5w8=reDCp?7z~7&a96JrZd{b(420(HCXyXWE!7^%?YOo_~sr9)` z#n(J)teRj=Td<&>5Cix#A#3#4Gt&Qmz zEyKHgIgPA*$Aq1liE}H$sTr*90&86l4{pboSVV74PW>rbX(kpO`B*hCJsz99|6`hB z1J3IUmf}C2_MS1=B$d5px&m72vq2Kx1KpOOXXKV?F%!VU?G^0lwjK10eX#AY&*aR) z{Wd!;*W}m&O*N+lE8;2~ zx|6$KfzxB4>~-*JB=YtRUdb@(%ub@29y96I-|L32c4Ei><$2~_;~wXp=|13oiif(* z)5F_Ejnr==jJ36^+Y%S7vETOY@BKh3$+f0TcS|3+sAXKUwZXFum7<}Ie zQXl!9Q@TWE+$w3M3-6&kG9f>9MiQE?BC?&SC1i1(#61^cBcw}x$?rdwFXH1j#IHGpt972U z-r}#^MPw7Lu3*504qooWjv^6u0-ksXgF z7Gc5{o=24Ly!;#QD~y7S@s_i6>P9sHm(86UW~(m+F|*-zctU)f%ctso8JpE#WPTm)e((lyq-v zJbgc=khRn5Lf1~>hUcgiU&*@$oy0q}zp&@Fk^|{2H&@1)meO^ag=qR`YbKj)`(#~d z&BO$~q2{WljEYlcW`;4As>I@S%Wk2QbE-OoUeN5!oLi{3AhUf^YEQSuQ6-9r*tdDz zG2JkYF}+k;Du<}4??R44gHAVbUemAuuX^V*)B7)P9wyh9XZmk%?>2gOe&JJn^nPYG z-%hld%d^T8=Go-V=Z;ICp58p&mVVAP!1Xb^NXIgd9 z^x;hYKFzG^B&{KpAw!8F_l9OqFmX7_vVr+h7V9kQ|8aB{U`}0I6wZ}+CK+P}cZcHc zPKy?&xD=Nb_u{U_9`074P#lUB*S0vt3&mw5o-0Y-AHMmZ6lRj#d(YWt@3q%j`v-G- zi^Zy023aPW`xBr1}gzt)PpWhv56FQ}I(-Oi8%%N22 zv7F2F%+#5Qgs(VH4;TxhgPJ4l#j*byov}R8AE`4NQCU{kS88|A4pmV)hkg#045kO3 z2A&1X!Rf)$p^8dh^^(>;oDo@K_#i~ZchVx+FPAhX8Pnx;vM%*uZfONjr{N$XQv^-e zES8ftNG~P3{7ITGRb(b^5Aiezp~X-sI*UmYqr=~U2E~|(<>+)BH0)+-b_*u|E|)9P zKj~_;QIBqw9)jT(5Qf8U?hZpU1%%=UYOL(x3?`KAVh+1eU#3|>_{yur)ZA(tFsYlW zsx|?MDo!01W-i?{vZ;|&MXS-jIkYP3RApi4PB0J%`fdJxzWlztzWKh9{)FHHB{AGn zh&Ro&Wp%!e&ytjorF-g#Y%j7qbM($OAmwvne^;_&spXvf&`>DcT`k2V|4-!pHvsr;81ClrXLgwxb_AzQGC|GoFV$Kl!O{?Yx=ebQ6h=kvb{n$=W2 zf25${u`obdEDtgUjh#$OP18&TOjC@9<%|h{ z9A*NKhGj7pbh8Br>EG0&8=2%Og&%{m7vL+PchvWjF%CnGo`t@VuHgXu~oy?3OV;TIu8 zOkyy^HRC!{0)49f=00fL_L`m=H_N@z;wK0e!`;YF;evWdO;Oh>vz2{H8@0K%LywD$ zXR_2Zv5>sY__t}RxsauxC1Ad0?q>eo^oMaHlcjcu2Za;x8J2odP^(F zI%Cdj-_no00pghlEzw&}^J#FnrZM5;4NvigUQ}P8y;O6mtCY%0G?YbYuPjzh(c{~o zcGvO{-wUFNTTW~qPma7nzok{so~!fe6RuU-D!W3ff-3_@{p)>myvICeJT1IkF?T$x zr3-&p&O6&D{hXynO1snr*=ptNlDlW_yE*2j&PpumoM0&?rgo6%|dV#N~t-FMbg%H1SW%q*MvFw^W=?CtNL6kMeIqOAf`8zC&GQ}){Y)H>b1 z%#j3i{fooqC~h~|zP5;_o)Wr)$TBTe`5WbZRc}$xF82s`H+L`haraoydGAoaE4VXM zR{cwx6K=(P*rE6;6b6}_W*%huU~yP4TdG<%n2VTKn_e65Fb&QtY=tkdH*yA?;xZ@O zX|SY5;97gbJ0i!T4-Hp^;^F`?Rk|WAk@Fj;87~-j8k-Y?e`GG{N~x1nl>BVDm`_~B zJnn07AM=9awT4L(qKlgn9Td6Av}cWz^BesY-IMBCGG7T=m>#2`72&ii&(!KAXzzBZ z2`H%Yf^2R<$=Vnt-$-T)Eu`-60-~}#>;{Fr$*H&_v?#baP}Bd1w~J?(yNY{}`<17u z|8!_{I7!NFt?v9M;ZTw}%d(W=S;uA1ms8JCB)cPZb<+L#Mb5sCKdq}x|C9bUl!<`< zhd%i}y48$E8Cx=pnI$tTWnRkc>iO)81j}iuku?UVblTY0a@;n@aV74f^R{b9{QP*; z^|!Nj+-iH8HEjN5TrUk0sA|Lew986TXk%a!=iyAE`*h!F-ylDF`QSh$P3@pR2seax zok=$`K~9r*8FQMdo9ZxGKC9`Xu@AEwOUgZ@Z&6rA;E9w9U(trDWtFm_hQU>VLIGEx zc3?DT;)39?&_rc{`m1(MN243Lgi0kMRF)RX$BgGqKQm>zhSg@RmdydcOW%z%m2)83e*oA z4h#)`hd)4XwV&1i%r*y4FAhZLCpwrnKmvb+9Wah;tgLR(eo$rY&R*1{vxMAyW&1z1P&l$Qz z_Ub=s-P9zdZg8Hzs?Y7M==;fM^6&FE3A_qi3T`9I{a2l*uZXw}Nzy-Z$oP+Gg1MyS zoTZWVmbH|vldZJvxwWx%jAglbhv|^&cVp3`v|`~a zS;^0I^?J%G#iNW=i-O`640nJTxxmng9OOH30aIocQ3G6{M(aeL-x}VG1q`Y$-G|%o zzHU&HEddYNNNqh0*QHHgs=-L)@Rw<)$8oe+$?VK>^poDB_RAs8AO=@Cd}I6aBEy z^jbDkB~JpW$OV$Mk~6{pwlRdR$!lhVbrOGuACm9{S6ur~?`fO17=aFhKd&Yp8=Eo^$X`~cc|8kV&bKwP? z#bY8pYz6fn1hx}%Fw#Nh3cz3KN_Fjxevi9ZeeO*~@q;*0dLy}H1Jkqrr2fkRFQC78 zi9F>tpL#UR!!e-P_vn+>0YPX2^3xLpw+_9CGR%-^OHH~0)O|XAiTQN(&I=c*FWb=v zFUD%>4CZo)c<86kI|$yzU+A8nFypo$ynwg*1v1dh^hy#yADY62XcBHi7qmN{{!X|W z=u8wX&NpCgiL3%IUEi``pnuVcGeu5<0Zs$Ozrkd}k<`d{LEmzM@~j36Dg{>3mo=Ch z?p#(dscEdbXYh#4!e@gYb#*_PBMJ0WY&QNTId~15T`_M1ur*l+?uiB_{JCW^=;o57l&NKDv%sKuQ zmQy+!@uAEQyc!OMtAM*a1tp&VBemog9@rt&v8~BG-wF=Zilul`xSS;~46| z;pk@X!ABjy-PlCl*@|A@T)zJx=)oWK_iDo}`2=1O5Fdz}#Ic|N%fxX^H}$cOHPF1r z;A?eZuV)1-Ov4#!E{JAf_)<}_-emj~Lhzyn^7%`K8-jA0=rr~Rujov_ZWV~P8@#?4 zJcx=QruD$>hjY3<1$*^^)YbpeJ^vZZeE=Ma_=p9hv>Dj@U3&d!KEC8uKKS}|;plc^ z^-Y7x*#Pd}yDz$DBMjch;N$;;g%$J8enRJQ5B%1~eAXyf^La4)wXjaVBCFmHy1NNp z?_t*Z1k{{=3No{qFN(TYUiwa&$GU$b{UgnkvNI9#9&><=(hH^lS3~Xek~MOjdB!ui zZ$-f$-+|av0KvKsgYFEy)Vut5B~dXp-K8p@vn=euheXQ8%#u4x|Dra`xP#$-%$L=0 z?{QI)<>fv#rBgW~ydeBP7&D7NK^KB#&1K^A$8Zg>?b={!C16!epbwbNCte5wzl>i? zg(dV`NAUaasIhOc$5zvUtd3ImKQN?C;G#dkI!S{e`!_%T0~LN4Uet79>H z`WJd8=fV8$;PqAkR`E>Y-U-mD)?CGXrdWLjv#&@sI|l!^!l0qW`Mne@#?o*XM{yq9 zLOqa3bd9-5zknCAoUc8cEl;VhrtzHb!eaUZ=0-l^(SDv^?9O#!;#GG#b=h!K?8x7o z6z;+&NC7Y3#a^!>HInj4|KghVBO3Rb)MOqe{VpQPRiYvFo&6C7*SjCn1B0;dL(997d$5-k{hIstEb@vO&FA=++jQrXiNd{5 zP_zJ3Yswz`nWq*+HP8|~Zz$+&%&o65okj`%QOx=EGrtP5AAf_7TY#tDmCtrkH~@2E zB)#Zi#DLr&@j*0{fAbk*UEvkiZl^9Z+NE&nctj#l< zPLB!oDE~4U)U`Qhd@fE&qU#y8&oG4l7TvebG z(h!dJX0GEdRZ+}ax*a{BJH&w8?B~uzf!3&+`*O82=&1a`74PRg2k>kw(0f}+AK)z5 z@_umdokWGs_?0!o`K~p8`XLgG{K^iy#7d9xL<+(2IVp_9v7;cn_71(`Bxx$_!GqE| zX`?hsN|MfrRXFKaFpYbo7-hPA2T+wp@Y`nNRx%gn!dtpaed!AI<@Gf9s;gNMxj<0v zbAHHl@e09x4HEBu<+|j z`BDbCOLxdaerNB*bAF9tjW6Whc7&lFQzpbFwj5&p#^#I_02}BIH+lh2?Fd@+zv%wF zf;C)#te_0PClh(ROo~1ZSK}vKG_#|(c!0`lD_KQMM{u6^-OZ~#g^wR&zZc}x{f|?y z7rXEb=X`8-n}JpO2!F6|_`ldLKFPDYNoKn$Qk_p5+{a?)Twh_zuQ;1?4!SPSC&qKrXUbn+Pk@s%5Cp)kkW3t%ANH{1)Bt zI*`iNVpnm4*h=~uC!49J*5*;>_2!VdKYjep*59p(wvctN)oLwcDQNC&%46~ye=~yR z$(Lb(P7*fKX>Uze&Jk%8Zl?FwG%$~`>Sk4hWwl6KjO(9IeWp5K279z+dTC}nS0%Q1 zBb|r}czFtahkZwiN!+t(^aaB(x~~{=@CpLwX3XhgsMwvE z@^z^$jp7X=c6nZTJ}a#>dYY}#>U41XQeTcB`%yr`ijwa%0rPE-m*X5zxYO_^GQ+8K zMjo-g(jsMvGu!C`htTA;qpRrx8+^z01?a@Zrr%s7N|q;I57J*<%1)6ZBVcY8*UxI% zwYlnNP|E*>9tLZ|Y%LSW7pMXnn%uZWfw+rl1m8oL?inkrf1Y>AE; zaXp={olRZWT-JCY{pIRlyBvA!<*e<^`AoCOhjSQb$sMHqLNGcdQZ-zcismP^ zm(o2nGI%L4HBceYBw)hpZ%3#AT;^&@qEbQGtPD_BYR7e3qyXB7h~ctuMw~6R!ZGSM z<0s>P#=nfmj8}{)rpc!BrgYP5+>LGVvl+dwxBu-b*{H7EpI6!;~&cM`eMsNx7iJ(XTy; zQ{Fjh#k+9PtCNw9pig&z&dd%@*xQ`VCx|C=;kuO}8$3bIkOvi4`RFO)My|*^&iqHb z_GnmX)zs5URmB}z8;Ut}wF~YGw8Ud>Zy+@|JXj($G-OnIs+063(WPQPV-53W^GhmR zi?y1)Nu1BM8_wmM#I(eOBq3>E;@62A6Hdi1a(Uz4+k03O%sq`GaiIL4v_gD}O1Q8d zueJ$Y3(WJ6@Llj0^&avJ_I&MG=c(@P=G)Rl(fXk_BBCUcfv2L>@IlC#tUj0!tywLUU!a-K?4v z(?6z~rXt2ok}R%68N3s3*JWV+%kXCXQM*D%V43orQdUV-UWYb@`h_}%c7<9i1yvX9 z;1BvzD##|B%%R9&RB*EmZRv2kF%H-Dy|Gw9xDyh8)_rmxQ^?S!o4ER&;n_O3&LVC zMOq}CmQF}3a8!IOju(@}bL6kNgqNs;D#NSxQ)OI)AG;Am=DOAwx4t8=IvawD|AL+( z8y&>oLxVyop_9P_!Ct}g!6U)9!Lp%Rp+2EIA$MpCo^SiK(cy#9lj1{T5^S<0TQl3Q zw!3zxvsQf8#3zaG6Ne^BiAuueghmN#*;93#(;Ulf-&p=Nevqb$@2H@DN2xk3+*#|R zJPckBy!E&8KlOF>{q61R{T9c{oxaEZzQLEF;%WtYW9{@$`mFH&NIdQqjX@(98S9%a znzEYbno}(&ELN+@ddOn5)MhSyYg27gE|b}mZd^>y;b*Cf_?3{;&>9S?e&j&d7;dDG z(5`WvigFbOLPPv%wa}T+523E1nIR?gm$FNpN^j+;UIUgrJ^cvz70xjqabP$i<^%~X zB_EVO$T^Hnjpac?7s!QVx3pVoFNxA7F-o307wu===tg$%$Z)zooSy7B&Jay4sm8XYmAz;J4l28qB)I4!iR{zZ z(d#rDy{adXtin2JxltgCcx*mN*Jpq=WLx046F12@%~=!Y*YVD|&Xvv{&V6x59i!|o zt%;Tvrb5P^@@sO#`cf0|yg>*1IRuK79SkX_(jxRA*gaS@*pm1(C{#^J#r<)xHlO%a z8usu>ZNGjyTolFi5TO}3)d0yLFOidteT};~ul5_iH9nNrgAF)%YLAIs9bmY%m2!(# zp@?B7s;OmU5sSl#;h8YFe+RaApQUncCed&<&K}weW^b5O#wdo)i--#vRL5BsGc#1o3!L24QdcV?SdNJvsUn3_RNw%FEKGi=kL|TAw5_|cVW7HpWsLyAS;jV!(_J}&FKeLx& zQ%Sp1Np#@seFLK_JN3Ym$Y&~?GvvR8BCkPU9@AT%LoAl`N7`}i48NS{U5@D{I@!HI zH zVJ~>X9;%gZxW3nDKVws^R+71GL)+Vo+UNlhM-u+#1bBl#i488!DNws{oB-bNRM<*& z$;-xbUjGC;Jm%863ZC*9u6BT43WskKr+5MM6DxFG?;KvkY4(v=`3|&GgL&5#&f8z? z!q#9NH_+n7cxWnDI#bw2PCA>@M-w{0Fc74^(yTAe&W&MQcyN&ID1HMj)t3A*6DDbQ zDgYO!)N6QAyW#p4Kxgae=A&ZY`(&AUuG4 ze|h+RxCNDLGwQ~m73?jGbV{7ZeX56sAyIUJ)*J#Q>4ML^Bs>H?8N=$$OV{;R(2By* z7qGi}P)}xwJfpWd4n(d<*iX;q44s?F@Q{O+$Hlb2g*_kO`%P&3#D;D8wS!agxq2r*~V}9VqS+ckHZ>q zF&8WB6S+nvD94=mLW=?F%@2}L|J*eJFQHNipExf>qRn z9CZJeiq-}KQ4vmQN4~mHm3N{F{FWzk9?fzVR#5`TL(H+@0AI&gL-(lI_kj5hoa!o!Pk0zc!WQ4Q2$I6CF;QJPibsX9@bU;2W}wWlf`z-qkB*IoEuFTSkZ z+8}!?$-Azx|6Y=zUFW|!$eY)LZp;KpUO*N-nLn)%)=|Z^;qE$!auGC&Ik|VQVNU&z z`!f+n;B>q|hJ$5|!Dp=w@9W`xE_07e==Eb$7EAHp6n2M!PjsKPc$zC&Oa(WC>v_PH zR0U0)2qv(Lb@&6HzttBk?=c9)&ph$dypqIgn!z7qb55^wCDXa*llZro{ya9j&qCeZ zj1^an-}~~3MpMr(-=U!p&2BOx=Ep3hrkxml~{5b5wy< z7sI(+)SqrD^q5QHzAuPT9DiGf=d_!rUW%*SjYnN3c+VF0P(LuKPSmc|`P?z}dpkZy z32*{4D5=a2xJ^aBot@hg#_~u$`Fzlx5&XFaf76j(z)-lXwb_}mDF>(Egzf@CeHl?< zB^TyB+k$fqzc4E;a>k z1+U+iC_e(vbQWYL&W!8Pn%OVf!r7>TXxR%;nK^LwkHkZ&!MV^m; zE5cJ%So?Qap|^>2IY5!mu_O0@|NRDTae_4(n*$Y_MVp7$sm1F27EgxW?4jbU)7Vt6 z?5u$zykj|@(J+{i+d*6IfVcc>Nc@8Hl>k|*i3YC;y{S&<#m2CU`>>lQgAR2jzbQd? z{~oKO8NLKD4}|r+`+D9zHn({WNJV$hpXJo}i@3u-!AqS&tk})xjm_(4qGIy{kF(?4>})3v2G9BVn3Ls6khT6`0nK=)N32^P z&te95cpuMlF6jCyP`sIZPjODSPM|^W*?F;PX?NMpSLr^z;uBp56JNpW)-ucya2Ww@ zo=?Z*Rip;K5ruj6BGIOxm^WZkuZ?bx%mTZA7CFWHZQ@gma}+v_E$Eny<&2z1pQ15Jy$Fgh ziOT&XXKD*joOn>PY;a{Ca*vyH`uN$k0xRe`@A(5UXdi2EJol;t(XKFj=xD@6XUQ6U zPgar&TA2w#8(aH-!cq4{DsqJ-_}(Tw+wZx`#q7S=jGh^+fttkVqWoT)Q+X>G&{?qI zn18`J!wDSda?u6+hut}xRali>(E(g{@|S1cnxAnHH*53xDiJkg{CW1WFJ{rp$iSuW zD|SF-7`0W2I0txQr+A_Z=_+(!g~j&oM6RL%C#eJz<1)^7p zsWSP<*9_5@sHSAPXAyG3$JE06`HAP?smc8OYhud>(9O4?xUV8bz@`clW&Q?zO^&<* zO`C(7sxzFT6Y$nAp{1&!T~=p-f0RajeTMGqC}phjN^xn|_1@9;Vw$|j^un~wl!MIe zuKA=jpW|iReP^6&uG8#%9CssbbzDkZy1l5)ZN6kYCG`^*k=Ll4(_nlNyM9*@L&o6C zKwPkSutxBHU|L{*pi;0@=$7li1+we<6Gdb1V#?sxgp9wZ!n{QjJ_Hl9bT|#_Q z*Sxrj_7j$Ern<(?az|+o{$>yL!)hg^LC6QIcUW+0!0a#J9p|2&8OWUBx$o;8oS|r{ zk(wb9zU$BMiug&>Efih_;bf_n-nQ}fY>vzJfwnQ0W~TD;1<;0OboHAU3Px{-2kRZR z;a}!k6j25!Be(}8;QhukuVJ2^GZKl+B*MPs#BUA@Aye>)|4KvUX7YO~;ca|flD0_I zrTyY7w2*aBmJT7})ui9tk>`;Ewao`qa~;DIP`P&rhx8`YJkO|i4(i#-6_ZdlXGe#) z1b6dV!BK<$-(D=(}7EYDS@c} ztlt&*KF~cln{_`)n-a+_?XeVhu1fqi`BsuKQH_6@urRr6)>1i6=E%%8JY{rZvTL1V zvn|8iU%nkJr(F#U^Y(H-&Me~gxUHUL?$Vj%(+_-}|G9Yj2KQe7dF848J~GR2M%-wG zcM~VY&rMjEFeSd8>$o%3RUy7k!kC1z@k`@wS${M37KBKD?I>LAlY#BNuRIqr>ZM;x zYoFdQqhn^0dx`s#d#$IFuTn5UYZvV$9Wq(1HElNg8T$&yy0|&cUaniNF0RJTtBy|W zcbj#)d8hH7*f+XO>l}LPuk4%PmAsog4Lx(*hck0!reuaPXM39Zg8o*))uCqU*Lr4T zleo>4YM_KB8{@)hAsw0PtfeS`WqSkeE=Q`fW0 zbJP3O|99{!MOHPXEOmAn{pZLQp_3eMikhmJP3DuP!KNLkasOvJX%yrgxKGx==U@a+ zb98u?9P%eQ;ch(mgDw%(9E=k^zbyn829*k-Yh2M>^SZ`6Mr{heZrFXAUcOQ$8WZ5R?SpE{>|`S z@2V^boc8tdCVS>*zDci=miDRhr+c3&q!q~+=|1Fr>kkC;s@L?R(R<=+<2=iH+eP~Y z$Dz0-&PlFO@tYEEC7g^u=q%|WsM;mzhN z=|3NM8#UY_KDGd{MI?K4GC$-5oBv+2yMRP35 zveA{xUdvL!G*lWM&8Ft}?M>hGG48{Q4+TH}k`eUS{AT}c?-NgJZ&IMAc0*Wc?qF{b zSISx1RV|@L@}blm*%PyGOD&LeKdz`XXe=y$5;q8Kqkn2Gg8MuR(l>qT_wnM#o1d&{ zfwX_p_hr;^XZ3Xrb-+V3#q`uN*7m)9h$9@AE53bVx#a9weou}h_K*KAE@(Ys`b9co z7#v=vjP>_+=S}y2D)(vWr!Jr0fBr6QeA@A}1?i(RmwNjIhilWLig?S|&2r6Fl!)w! zubcQhu}R|n_&3gTju*C%))Z?U^K?1CP>KX=vcJ3cpl6_`kSFNA>E7;s+HSj$>FKzzMZLO&T+fEmjbDg8|GXI)w7<+(LQ_oEYIU-#$z}rAJM?rNXV-oPE-Z=O<_Bhqqw`t8H`4{JXj z`8+hEfye287Obd!6J0GiEpr@&oV_p4)||`j%#gks5~S# zGM+K()<^cZxO#Di9QW+M+uB&|mWQT|#xn9x;x6WyUev2-U#ovATj3#J4V4K^2!4fj zA&&l!qBPWg(!0T;i=sRFG136kbH8v+?1UrFFgoaMg*Vi89@Lk$^a<+wP@&LOdWn?- zuYGO2ZudXL@@<)yJrjccqaQ8*BxI&0Wbc!8OLG3i_K7Ex|4nseSF`0wU7KihzP7eD zjTcKrIxDxl&(pTO7vHRY)#dfsceb=Up7p^JTAT2&2pwtRvb@34-Z9O!Gr^tsFlkY8 zG`Vh;P09BXhq?Z+E14clf17bL)8Ogn-R1K#8SGO?(l&={ z3ImN7tqIOU3Hg#MW_go5G%0^VZ)X?#dP_}HBYCkX2t)D6-Jo?)LV>NmW}ecSqtn-? z?MZu|KE++kpGT=1P7-R!t4xI~l68o6HdCj{+Y;f(=7Em9yGfT%0?}?a?aCHP<9}od~|!-8fpV zD4i0PP~Bd~Ev^=od=A6x==I2<@H+h>ejf%cCyqOJv=^Y!t8o(P1RDOI`jhrn%LZz- zQSTQ0nvbDDF=RfTd9gJuf4JU^th{b>I1=tJc53apenDqFS7 z^z9l)t{Ga%tt@W)ZfAx|b!~P{b$MLtoiDDTdS9p&bxKP{Jel}Yi%6K=)Na;=JMYCmStjOpT|sID(j z4~1q1^?)_lJ~)v%a*{cN3JYj3l{vHihP-duuv zdQ~nUx0lL`Yv9GU*C(opN^T~L{R`9Pa&AbE-Sb=zSJ=9I3AcCjJ8)^!si(@xt9HG!l9JW*$QWy zo)Snr5dXn7jjYU+*fgQItAM?r>6sx}pAuZ@osxMqt=^~kAGG&nKHmF0$z3`4T0bU? zG(IwCvkJB{_Jt0~St_O5JYB@&O>Jta^ir+=*sd)o6Jnz!meE##}!jG3f zWlevb`NZ2luspO{J+AvA%>}dk%Je^LqN9RSil3h_CGkvR?!;$*{($ZdVMIzO~KkmR@cmU#zz$9P4b%l~H}UfHXSh$IT5q%p=vrs*i1 zH^CCInQNH}8Xrh4#lD8YkuADY>#m#&b_pDGrl(6#NonZbb1xhzzqwRW+m+p{S`3zH+g4yT%J6h?>zlI58VmwX_@`pCw#rt zIB|mQkA&)}qqA?w(UEiIZMGxX4rY6jtyH!?sU4C}#&32EwJeZL21%>t@17C;uIUCena$GHf4cW^+s79lw|~0zxl?-o%oUz%K77YQy1FZT%rHdWW>FoT6Ut^e zk~%1B&(u4~UniL2-djo=KMUQXvzarNs@+z83Ox>ZeQ!KBGkxjP(sHF$Oh1u1%~x3| z7SY6BW~=>VTpyQ>6QreUit~J2fw*IiQjQ7sW42A!=jKYrE5f<(S>>xhes5uSm(1&# z&)i+TDgK**8KE~yD&7HY!%HI>(bDik6QoblZF#-1y6HzexX#ME#i2~BXv-NAP>Z2m zY^{~i5;ajPrL92=SX}SJY|l*G2>w-ftG}uDR2%w2Bw@%Z0NK(mOCE1wj_>CdYv%GdDnK|6cV>Ycj(#F zhrz0W*WPyS&*=}-4`v?nHBtTv_dx{^66Wz)H_K;CGpyI`v*N;W=i_!b&f$7=&t|aY zv3!v48_MaqL$7?9o;~g??n3S%Zom7SXRNoNFVX)gus|887mLmp=1Gah_ojT-9(DsW zrS?1bfn7{?bhgbj50Ezs4WrhuNo%c~3HA*9x}$*OnEi=ut+k@%jcJv!nXJG}t8aJ~85Az2zgG7t$EZ232VVz! zhf)=v@=>+x8^TqiPYf-^@6iQzmTMvr-wCrmg6~@eL*r=8NM+97Hrn^njFSIc2bPx}DpsQ3}_c2|+OR`zvP3-xvfd8*)uOjk5t zTemZ_m)q#qLc{g$zM!ZChKL$2!^i z&SJIPH03t7kwyqFqZi=9J}|(pOl+!6{hp!w;ahhK-wiK{lmxx*0PFn` z(@{=>*EXP&bq#cPJcytJTx|(6=H7EYJV!Ay8Q$4?d=fl3QXGi%00FIOcn_w&6vcWK z{1kpe$C(tq&djfTFtetj+iS-uu#;Jk`{A;+fhRBy$DOM1m2%TRiv!0D(M?+dvTtKL z!2IYl!zgC<4c-AoIk{-)bknZ+|n?%Z;`d)p0cvoZ+9L)uh z(czBzUTqq_FBy6fcxYQV1AnIvG{Mk@iC3+da=DC|Of6tUk3-Ml6}I5l(?jeid}Ejb zhtdPH=PpX9x-i*pM`lMmfwxqnOE`$`WIdi;%)u?0xx^wGh-V^4fp`{H_E{kSPG1G2 zq#pgrX&}>C;PS>MxunB|`<+<=d$n9dp0e5@Z7@CN$!I0><4*CJ&sPnM^BAn+MzDQ` z3$@U9O=MP8L*Xu%+ifuU8FX4Iz#whPHUCf8N|$K{J8Cd`&XQ3#Yd7X%)}DS`Wx7pA z;2N2P9EKv%p?r?DFeYQjZ!h|qOJNun013C?oRAYH{Ak#`Ynf^?nU2gnP~9rg9Fbb# zaUes3wN&^My_gKxL4Sy+)dc*-HsJ17nOU#5^=IMi29wm!*xdBVxKDm8-IvgA)8Rgg zesPttxN#iM_$b}|nu68PB+^q4sB=^=sF)LPERjCub~wj*z^9bxYr|XNpqQ5~_;#rs zo%8%StnN4d4uj--v8m7(Pk}9Ts(RD=y$V0D3cBtNAiUqBG%dniUKwdZFTABt8E&K- z&!yfl-)qrTt-%_(kA9$#pwJsH0B5!$+KI~WhmN!F*1-b3!%V)KdLH~|#xjTbsa}9t zrX4^+3!v&K2Wu3K1|8_TbZh>h|M7-66FPyXr|-qyQ5-JHeJM1M2=O-H}{hxuo^aB@RyceGvVeeJPEEp7VY-} zaLQ_End0%CJO>Y_IiAF)qC*WWgdE}pv5eFJ#`M1^p$4G$*v;=-nYox9PxIAum0WZw z=HcDn5Xa_n(KAG~)~x;ryao~W{$gAx7UAMNgueYes+2wzTf*LN3EqC6e*H~e zwW7FQ@bSK9;2RX6r&5LPVQfalNqS*F(c#=o7rGEp;RN;NQyjEP(cgIq8~-%>vMP>h zr|EW|gIDyJzVLp!$=~28{+8~?VZ4P-vVYdnzdslb!Mtq2Gimt6VXPY31q+Pl?L0#> zuJZzUdYJ5G1P%e;!oxi)tinsC5m>_&K6^cSxO><;P3V+(P$Fz1PW{Ty|40{6qtk1{ z&!!Arx`p&_uA&>dha=B8?n-`oH7WER{qQYB9K8HQl(qCXlhFFCLHE**dnEDk)GCX!Uu!xLZE-(ZLiD>1Pfw$cnTBUx zeS9&;6NUTJ9ofO7?SDG-K0PNBhNL)QBipJW4yu&_GN5dhLxV|JvdJ$)^JG_ z3PZl|9(L3D9LxTl&dz#_9^^m14zdeY)2}=SkMt_`HSaKpv%w zH=56~jc)(Xyw+Z#^3U{*_tA~W2j_e_5xOp2{~UZzCO-0C5yNt#=Q+x!?@a%12W!p5 zRVLC)>_T49f_`!xqF|6+HJkW;kWZGIK4&c$0KK^%llh)vxF9h-1+S(*U>|ts*yksn zHH8nqRk#8(NX z%FN|#4x7)wsu|6mJIMZS&pmF1@~JfZG7oIOU-QFQu#>K3d+tL593bY5z<6pbUKVRG>As1Sh)UZn9)J_|R6NL9>yIaBeQNF` z`n^l((i_lmT;L2i$bK({R_AB9BHt5p!_?_UvYVU4+kd$Bwe)AFP`z)1lV~FU*n@B3 zcsOw>oFlRBY=}SqK<7LI#>Ek?av}~rM_|w8=P4ehpB=~9aFhFgpI-c9K51)K`hM14 zd!B4nV!~Y*BEz^_{dna!FkwEx#Zt&za-ltpdEM`0eeWm!ZR07gMK3ZJ*2yH!HVX*XFsGfXDJHuQZ>WCZ^iC z4WH%?XXG7v@^|U{zXD0ROV2*Qr+7e&Eyvy)4mYM9KbeJjEF0l+tmL`<#rMW|V;49f zE^rO|h<{hO#=rSGOxORy7ye9LetIx-fjYCdr*LMkM_JY#y~9A1pV{F=)a3lmDddLV zREG8ShWk9}%M~2uE`1A@^^A2skTp6I{>N1A!3KKi)!9)s;FgpHYk5NMMT3U5|BU={J&p9!G+3nx(on^&r zaM13etP(iAn-U!iC@xO(`K#mn@gG_3X&n4Mv-%zE*iP*8#q9K}%)2N@?)sT^G80W# zdDfE+t;b$ifX`9S1z;7N%!mde0ny z+(ZZ&UGWvpfY^+Y3q+e($XQ6J0e5H#`n)T+F8zh3@i;lxBzQPF8PYTIqz5o6cCeEAa9$^{Y8P|A zVhn|JPK`x)UJi%ZwuC>8LLpWW-OJPPZd4!R=;n<;r!)lBZ4bJw&BOV_|H4N-z^^&D zjCN&a_&Ye#vC5$=S;I3BzzsZuUNG_&N6`V6|a{7=lU%4q9U2b8g!g_SwH{5-e}8uFrr^M z$|vp!zakNxWjyP70=dUrGQchT%VYQ)8T`JV=y`zk@&TTP!hSeKl)TI;@{%LAVjWKA zte(s%{g#Za8f#(*Uwzo64TP7I>PHLwB;vIaj8GxriLSM$2x5`&yyxPZBM4zbmAp7-uX++V`^Sc7#p z>PwzKmZ{qZ9&u+O$jocAt$zn9GgUM2z%yC$%_r!U2 z5gPDc^^+*do>75k=srBJoZI`-r*O zVGZ383QHB_U*%fz*XXgsVpPgyJcmZ1x2d^lJPL>dR8QPcvN ztmmn?Ur+tQxtT%suo^zlGWJ!>&-50|sh8w||L}@6U}8RuEQJHL3f9mKI5?wt{kyOd z+7hwSIL%h^+=}sJs_%OmCUyb&%Cm+AtF5(~?BK-o&Aph??6*5nx)*hR8(6xjU`S`!IXlsI{=;fJ#cG>`M`v?R z;~nsT3c(7zLJZx&l)0g({Jq4N>u|INlN)_T19F3t>Kt{!Untg^qWqS_hj16`srSVz z?5G~%bpHh(=l`@5s0Mz-VJ}?b8@-cdo z`-PTKp9vD#P_Kj&-+b8Qt*Bk6pp0G2JobFB#j=WP(Lpa1W{NGPhSEpjO7tQbi7VV6 zK9OSh4jcgQr^q?9c;~`@M{MNe^>7Dg7 zt-dx+y`r>I2H_7<4e!8qFueXi?N(p+8(JD4npzkYLu*j624VwiAy=n_I<5z{-KGIJ z&hIgIv-Y%hu=q?RjVq+=Vk7c_FtxNV{Jr*3`9^IBm*cY`w-AXq^;aO3MYSHxPIaQW z+lU$|HkW~uP6&x<(yy?&I|vi0{Z0xyP+^}optaQF^=jez%mw~Nn9bzCG2!ldHQfQP5GV7fv9aW3u$0r0O$)K5L}BWWbo7nJBjx|q)*SJ5rq;Y`~KX0s8u zf=;;YpNGp{H~K5PxC#n|^5{E_oXC&JFZOaTYa41(#Xo{mXcBgS|LqXwhz-a{4>B1E zw?mkjbGWK}!hEJ0Fbf!EXfZ0QzllvRh*g*MuHoEN^ux#vDuNVMfvLLC(3mRsA^CO# z&iK9T*B7uHXY*Y5fF%~kwPQ5Sn{Qc9_236ddWxP6->8dB%2>rbhoj0QWf$77d7$bm zVSlw%-h^Is@m} z(_{INbPw+2d10pESfsXIRXw6yR3GTs$qR%?VXa2!W8mA+e(gGY2D}@7O3#Z9kr*j&O zG1Q2@0;?*=^pWo5*)#FgUO~iquFL2;YLLg3rxI91&Q^=s;52@mhd38gsr|BZMxI8= zPz@iO(>xyo*wF_ZYj0AyjKE)S8v8p7IA1RPJQ$w}>u$Mv2}jVVdP;qx*p-s_cBKU$ z!Dp%vUMLJPW;bUT72!Q@RSyj(EKA}$WD&Dej_+ps$&_UBo6p%g;(Yh5y_+?!*(>Lf z&I%2gWt=U%kUN?c_xcjBTB=4?=@DgGut{KdFiB(Xg=mz^NKXvw(Q(X=oG=WM<{L-g z0{NF*S}Ket;+5D2FOyZmFVS+Wt{t#0Y8ZM&`-J;xPnBiLd3Cei2S=U_;KISMK(DiW zq=uosxB?`txwHml^cYbU=i*8zp+tL%vh^i6&sx+n@1mdaa_NJU#k_DKGM9NE3ihx9 zjy*-Ih1Y0Zc;nN_`(9J$1UX{{MUNYbi?yU@IB)$$#Oq6zJeB=hL>w+U#QZ{-tnNQp z=j*_=ax-V_CD=}7vZERJ=+D!)^Q2aWdxEJ~L2L2}3~_FB8}C{Nr{M4K^5`k_gHL>m z_qhW4u{AP|lkIAx5@%Tc=(n);5{S`X>p$Y9Svx$P&-tA`2>Utx-^QAen! zU~Lv<64M-Ii&{#b8Yv-8Ga8s&azmQPDYD#H$l*?Sn>;Sb=d|0#o41+QTaz87<5oLX z+I}@}k{<~rm|vP2y$zeXc6cK_`c(BtH4hUZMrnQ2tV*{~Z6yx2)Hdcp+!I$BW--gC zWONe!!ndXz;Q5D**(4kN${I{8o`?(Q05OAnVH615b+luj@tX~3%~9SZQ5Q+kv&5ON z!h7`O@W@DC!*ubPJv4K=0bZnM&BMj6#14~H0F-{9=z`fdu)uLhyf|LTuizt+#y#yYnYBl)Gy?_KfsI3 z%JW;#nz_k+K1pmXgu6j8c*bkUkXn<=%%n0Jh~DUHGQnYRd+Wd_p3UvFz zOqTUEywZ1|c3Rj%trH4|!=ED!iCJT#v%rE6M2<%L3cKMYd_YgU1uU!-nEqo?k@Cr7 zqzI>Hf3lZ94IYr%*Ft+?I2`_QC_ZfDr}cPFelVV?)Z7m0#2cVEds#QTc#qLM-%eoP z#p%5W_#Q-~mx$FcaT}c{l^y;8U1eK3p~vYfdqY2mMuxnh((3Qp z6&M!j`cS!PuvE6+8j027NX7_cIIC8*ON#u@F6d? z(RqB+2Sv_3I^{Wd6BLmiabjy!a5ssXVWF`2yHH$ci9>8nFy6IPh1Ef?7Eu=t1AE)V z`&?k1MB%nnL&tg0@B#E@AJzL*>X-+S(R5SpgZzvjx+F(_3s)!h??Kr(n_iAe&VCd( z;-Xq_BwdT?E1y)ls79@(_Es&aR>d#yXFRl*gl;JvwI%wqa8;&bRzVrFi|lo@*is&5 zu4Z55Jc{SwV)j`}Te5ALU5C3UC%EEI#+9)hHkFfuVydJ|yHJWVXEK_hFT^)68E=Ks z#LTK_xKAqE6sI~7f7&4^Vwcg4wSZ}RqVqwW3WG1TXa3JU@u{T9n^A;HOj`Rv3X4y~ z;?ge3DCd)xNR#M6Rxos=n^G1uIHsPcADtZ88U91J>N~U<+G=JPRn!wXx0~R~nXXpV zwrKCQB6@F>fJJeqFHGLC0PhwrxxpCFupo0j|DbP`0{XrbbYm2iatc}8UK9^KjF+V4 z?E9j`f??uwIh(1A>4IE?$@4ep-k@I+_tV37NQuIhNE_zTICv&`h&prWkNk)`;WxN_ zWJC%QyZeC54W-t56P?0yt|qw2`7eWk*9HCE0t&o1dM$D`dIrRrxUAtXv5q`|J2^^RA)b+2TPizByNbl;a9wrmwRN#Ju}-yp?}&4*ajuIiV~@9_ z({CP0$2N<6MV=vNmu?$|NA8lVKi2|O6jStpoLLW;6E}#dcipvD>aXfLROFg=LubAS z=kyk447^1nTlCA6r0G(M94D`q(s}Q@WS+g`{>C(82$zwvazMNzBnY<*ezNV7!Xm@U zXopDFaDIKhmZDw7-~4yx{{6~qr$lv@QdqGmGnEvy)F-tbO!_JGWe(79`Xe;e+0eUu zMx8XCobWLn-gKdrl+XCJsUez?Vn(a6v9YY_5ghS$=HsT>#**?gJY2@3A}NgnK}c9; zFu)PCQSDx#PAJ1?pU)JXK3wHGybF5MtvHX9PA|@XFFC)R=hvKS^E3|o>!|lugT%&A zTqkqIGdT-qed$cNi5X)+f!5%Ix0l-Zy8a$VzXf^*uHa|r^=?#;l1<)MR})1`py+6W z-{>6miTWKg(za_2H8Kjc@&nx-ShDaNd(w`Rq*Anu%V zg7XrYieMdNNwJQ%nH&Y;>c`Dm7U6e$_xB2a`J5Rd|`He)>`Wg!;>Ok z8D=vGt0CJ@)L5E{?hO z`PL@pA;x)fBl*6RN6IVq5>_!AVt;tM_MehnX`%e9B&c_kZptsAXs~B6IangN8J~lW zN_Vvx+=T38$1BL%2S+C3l=BX3Vz*EO$E*R;EvcBiMSg>4#9eus+)18^D(Z|pPFC(^*b6{%)*e+Jys94$~>-p#5chX#B(Yz45%9MgB$_EItA) z9*V1^LuiV-eJikxyl@ci>MF?K2>mPliq;k0*%Dmo`|A(%Abz0v;BZciMaoDm5c>b< z%bX+E{0SCMHSqNhJVBfA7zFb!74f($T~B>Zg8qP)5Qf1@gEqdGJD%KV?Cvw*JZ z+Pd(ut8pR09f}lpXp!Px+})kx#a&z6-Q6GV?(XhdJjs>g|9AOEMqgVV4Y}v+y=2b0 z=F+d?L-7eeiXG}RwIlvheVO!l%!wJGX9ai4#H@H6b?XwUz8d(%Z4nnrr`TLI(b>s$ z#J)H+MtV6UT1l3LWw-G&B>y z7}|miwlTZmadU+!vPar(?H|0qGm(dLXzjFtXz3oJ0nSa-m=8POlGw8X#GyH#FbeL^ z$=$F%*tW>MbgVrvI~cXuehC$mlUZLZ}5Noxz%CNzkuEj2djF__q#$yYmdYJ z2%HCs;uD$xPScI;hz+U2F5p4414pdTmZ81;(|+)Q2;Ny1Sjrak7+I)qKEOK80>kV< z_b?V*y%tDWI{L}=_&A(pv)OZ_h55#Ofx}i=6gYdB@b7AFFt2hxmcbO{i0c)Fe$t;x0UVEQ&iy&w>Ir8#_XtmVZyxV)kLbyaWASKLKAc>3dro*> zxhuL>vNzy?d>T*5HF8b)5Izp0#JRlB&LD)J$$C}Tu`>WISa)*t7dAoKs3tqIHPB?Q zpC1>q={Q#vK~2yR*MY*QM%Kb)H3qvp%zR+9c#Mhp<1o1csJm1!;VSeJJK&Bh|5Pq_ z0AbvWx?&o4;{_OcM!c95zH71}Q?<6>*=|Lz_A9ZiG0LTF%)X96?@;jPgmo?$gjr}V z(tskr5UVj^I}T6LT6F2-*@JOYdItwP0*)*a%w;azjf?nq3^m0Fc>bGwpYpZ_F4+oI zlt7OfgFpT)rZ#`aF)lkb?h<;R5^x{4;W+N$Cv=c%^eFgqvQ?3OX*RslGqAd1@B_zD z!Of)JDFh#Q7{2T#JynQ3yUSbO&D&o?4joV3dyw<)w*MpVTGZm{K{r$V^f<_hUvnMK zNM1P50_2&8U=gL^l*&@4nrv07jq8SsTKfYC)F4jfM!CK+O(}t^>rqr#rd$l4*PUdt zvCM%^!!f*$@(sr4Bh|nQTtnC5@0MM<4-(QBzSCxUd@+oQ1DvQkCt85#FHJZ6J6+K+ z>bA-D(w|)Kzc6UgbX&QpMo;kBV(qDLKcP=sn4i|mGphk|F_B$tIjmP~-Mel+X7@`J zzn+;Izb&jmJJ_PLoYn%I&d_%Gy5yY|WS*IvdKE2QVZKt)5e?uREPq<{sIO&Mzp+AP`!0A*V{@$a!Y&M(Ka@Q7H__&*<+uBa z)7VB+Na`-0151J4~shjq=G&wa`&fSOu!h+12$4sdlP?0>BP zc%RMS8wbI)G-D3*hxwm%8{YK_SV&FWNGe+sQ6GlXT2(nGY52akz#zMTqTB`Ny2aE% zfQWq)CU_N^lM(p+7M7m#i5J4^M2nBXeiW1)OU2ai2baM_rwg0tO0L+ta8-}14>bbLYcQCQ69)AQu^`@F!aM#I=a56(!jR9;5U{F#WKRda1@DS+ypH%&9+Y4^v*ri5tv^4EXy=Y=x?6*AvR21*^!XTFKW^Tu{64REnZ% zu;A8Sf=E1ouk8qyloy<@1Zd+_K4mNF%L~FN+hhmUFy>);N%zF=!XVVWed&2Ck(bB9 zT@Is<_K<-F!)`z2`y7GmqD!X&FzpRs^ocyzf}$G)*#V>1oG1MS%sX0WM}6=c?i>y1 z%{rph+6to5nn;!tEOQx5dINA#f%^UiT-{xMCZCnZ)~!{1hrfvo6<~QP2>rRWnc(@F zg3kod=5`lm!wTNBhQR)n5Hm_X6zoybRa7GiI+X#axF_I=o=++#-m?1=C5l-??Q_CC zu@{O3r_@VK6l!w+{GoF9E6)66`S4W&;@ViR=z`du%ADvFhAvXe)bJK!wJ?wCjK)KC;j$nYQN5W+ECP!L7UFO zMs~H23V%ya#rnbv{F)}RVRVIUiyh@Way_xLJ)P<62b{(i_DS(S6hzmA%=Q!*#M!s3FyWOb;0Iq<8YkqpE1RQ@Gv*z;E-1wi zsjZj_jc`*c=GXkZdBs9R=dU0O?_hS9Q>%P4z4l1qI*8yLVW~9{p5Zs%`cBbD^w|oB z7;Ss3tT=?ewc8RCI!OPD@2rrrD&A_$J1B)8)>CnZ)gHI-Dqs?=a3%c!w{jImdoDHN zZF`qknLBty@LMT3uV1$^i@hZuJXUe*Bn*53E1g(UN)a-0X9uFyj0MMSfFA3ja0gT= z3(>8BI334uQC!3n!WYwLo9JEVpbA_L+I${7sH!m1lIbRP2rprx-xB$sn9uaOMi$!? z)5yO_Q+Vp@%?|W(bM1Z7Vf+BTTmPEbj3fGaGne=nb>R&0g|HZOG_&=9Tl^>YeT{IB zs^c?CvKBBuAI!E^Kk)>6c7_X^*m-x@tVLEChKqeSF${#jMT9KFZcompy@fjJUty>; zQfy+k0h@kmR<~XXQ`qP8yU+^7NJuxt&UYavTi8V@jhI^aOnvf~fT9CWyiwH0xKop5 zpR$vr4SL|h(qi#<`#O{VZOktANa?=vSs5Tj+C7Ztnx=oZWa&>Zyfnf#^O(`y(%2;+ z;Y!dD&0JA5l|@iW4MS5gLwE+VyPp0(J$vjP2rJ}g@(ppZ9bq-(#&s2c$duP${;$g^BI$ zJyd!pgt?r6GQtyZ-ScGg!s077EzGiSnvbnN(Xsz(-83uNcW}4MDYYh}<`F&`@3eA8 zJ$s9oL%Im7ddGfa)CDp7pZUsuEM1iIOBbwwvBOM-k84x*zSR@=!64-~r`xBgD|*2# z=Ct0LSF9t#L@AT}1ZBk%5TBWFrt5GG3a~?9E~uq}>knb$guP|6;5A5#DGT`m}B+wTJSl-O}H}6sn|As92v^H|=k5 z>ACs28_SiXFuOh)pq`-ZJB0}8ytGc-WMwutSYL!4a7KH{)2GnTSZq0Qi@VWp9<>jX z?>^X(;^&{vNYCL!@`D<=sPHbK_)sv$kE|8f8wk-Ux4zMMA-@U z2jMolrFYgNqpxY%PthOzLgs!?Hp@e$^qSnkl#E?fs;MZ-BypPcpWYCURhfy_&M0W6 zGo3V9++};sB>lT_+O8>ml?ysME z1r@IZ8n#dH%6}-Sq(N3@HY~o=o0-1|<>dvAJkFBJ9$}Wb2&}WBF~M#o?^Bk^mBnKA zM`MpZ(JU;qkoMyyGtr(()tmq?{D-s>Hsv#Vvl1`_0rM8qAqE?Z>&i!z4V>Nwx~fN6 z<-yoWeSatLsT63eB@IvaKe3y5Ng{ zmOUq9g~HY>^Qd9!=%K_}O1Sb~blN?r1V38YL_w-Ug|d&lz6|6)FBJw`YRx5du=j|_ zf7z?hG`wMZN?!W_lNT?|+g1@FpEO2Z1D{&VP76Znhg&I31sf0F;S*+ZaMrAkf9NZ#rj*B#%kfQG$L6QHdI4M> zdh(r1vyt&XoK!wrX~ki5m@cuC-H9rw1gG9>XB9Synb@t-QS5BhXLH>@MmICJP!}BG zCXD!II@htr2Ysbg0~gw(|Ky2gDAZh6LCbM_zE_V zw6G(@S2)WxB#X93Az20XY@6^0TA)v69%~7=WUbVO_w#@np&&W8844~p+=+^^djOpE z2WrzW%V+(~q-uoRT>M1EQX2*E|LpO?bDXITQJu$f;<}*s`a;)NfvJnWBGV;SYij*z z_9605KcS<9p^kaRm}`EriVFompsMj~Tag2uW_IhWy%r_geDntgsa!75MV%(E=Mx9< zE79U#u)__-6p(>?F!wu!4DiLj$t`$NkI-Bkuq%s=&^o;m%2|z!q2^w@25$EwamssQ z)il!KOESe?BJBm|FD7MXma@HZ&|Dxil!i%L#rwix@?$FI7wW-^7DHEEkhgr#YCvC< zLo7_6P>znZJ*WGfm`l1skF=0(^QMI>7}e4OKEp|B_oi%18c4P^M3u-nh0ahhTN@p$ zCg6(a!K4a^*X=0wuj4^wHx$duX{CO425S#B?S9aUV${eV$=bJxxCWhD2K1!MnNT~* zZL4DEq{gdl*JZNnn^XW7*D=CL>rWK5AqTw{oc)(lI&lsp#nSITUm|yHwQg> zHL;6WgWU3&oV1RfuqR0F3i}*L?HTKhogU@s3jsYGY+gxNv0sGJY!vJ$y%5%;%c+b) zv4(X|xI`xOi)F!(EoS^0S_|y|36fY$XaVMt8C_h+=e8oJ>IIc-A=su->_vgK4J>2@o?f#Rh&Xqs(27;G~Qj{ z>3f1q9_74`pl+@Tj=g|hqZwFTJU#*M(FG)1ZH0l-3i+s1i}#kyZqrOwOZyUUJ~ege z1K!1Mu$)9X#D2m8y0>42NXuX&={Phv3Fy9OQaQgtiyP$WPGTZpFu8dbmGBSLd6}6v z=#7GO9yf6x-12EE+h}Q=7%7}%he906;VJY({m}4DrjM6F$?`Lgb%>ic1_UDv^;=H5 z=^-E{9nceXLMPAzl)4H3+<^Z^ba+x1K$oZ)+{;pZHIhI=Av@A6ck7pjTSI6@kMY9#0}OZvS*{cB{XKh>C(`-XW&$u4#j6VpBokOr zPu`f>yLXo_#0nS=K}I538xoWDuO=1p4Ia=$%_| z*3O~UXO{_ji<7V=#ldFM2zl(#gl`ShFE6ZLgzWGWabPd&>^xQ`bAdUBPJ97;!x2=P z>v=0F#t|@~->LF!^yjPSs2*55!2&vgy}YL*d_eX1(JJ-RJMsWJ{V6=DgP?I8Im5F- zb5b$Aa7C&jzn4FwQ7_7t;`eMn{h#AE=VwP_$8lJiWO)ZltIA4VWj8AI59rzJF^L#2 zWoDnzGiJ}C#CFUAr{|td;-0!VMW?89bJ|bfyqAIbgn?o_#|NVc_{^_%Xc}fKnw%H# z){8m2Z|FQin_IqvtTrV&&*U8@;#26Q2g$@&QEp~DnK!3>+&WLsvkw)EhSDSr6Sjxp zJvV|RW+PJ+XG$wS8tc$@ufM_DYJ=p&^It{UK`SeFx*C5UN*|Gh{;erhw2Jn{XFdl* z3z(5Sf6@8~^O#^}VzRn2%H;w~K8CYrJ`LRLJ2R1Z^~F5Pmigu2{dGa{&w`b9pyF8o zr?3EauLD2H48HR>vOs$7^Hw~MCo)xd2u=4JQ2HDoS!0=ONC!3^4SsM~IqEp)$nIF8 z{Hx4&^mPfI51!WE7T(#O7Vf*Qo35*_6F38Qz-1>ACUu*mx+4k>xj87q944}RFxlFd zSx_3HZxbC!fggiH5RqocGQXQjA)pu)bt;tz=4}^nzn(8yaxxdLI2ZncM76J zONZC-W%{kEAc2!Waj)|}6UDvEJGKM4O-q#~gW=W%SIP(amlj#B4;CY>hr@ zFX(+~)Lz{B8oEHn^9&GkMl}c6#DHk~^d8EE}>ToHZTI zm6mc7^u;JEKm<1t>(;_3JYe>sf^~>KYK;B{H;apUe|BWgqbJ{rW~(=w%|@A9h`c|* zW;dhjne+2)?BiC>=W96};$b4q4`%CTgL_!0YPRAAyNmyw3PxE3R;mO~@F-EEH#guv zVowrX&jOxyQM#&drjl>->~DiXhw2@b4mmxy=__dJR1{!!sYYgSI~wsOLo&1k2GQjpc-2V(MiUm|8v1_rLh5kD6h`xenZkd_<)X3O15??T4JRZ0Pfbz-nDV zwe^yDlFC1y_fWI7Srtq$hndU_Mk(XIens~(t5=Q_njJO5PW>GE)0=uPw(V3v&9MbP zfY1Ep)2S)RZa3@}VuU=Aoyk)jzdIC11IJY7NcS1G^hGWf;YkZ=a6MfPx)GgOx zutvg&OyF!CA@=uxT{+AR{gb?tnP*v#$%%tZ>#l*Luk^Eye1VF4Df55#P+7akq_vry z83rb_3wCK3DCJ&|=SAeN=iHx=Pwjf%MJcAQzB0po2An;_tfdAsjv@CN{B#Ocz*o!39mxPDFoB7gATzj0cmWim^04s4O-*izW0qKiX$#F1zoX7} z;s{WZdL!iK{gHX2FlLQ*^Aia4OuJxWGH`>2^7SXX?Jo1YYQdw0qH7Hpk4I#NsYI1S z@bKx{rLrkFEyX9AD>)>?}^|YC&JcDf-h^wP0*QFRGHH|MZf(S zW%hHrj?nzlYW~#TWS6V--C1zNf6Wa%OvG#pid2I4_=#G22fRaf7_O$&p50J7Pov)M z!U=jq&2SzJ@F6qoIy~nI^0vb7`im&|H<@ZSIjjTsI5!+?HtOQfypIGsJM~%LpYutj zZ8yCAb`Z2{oUD^D#fw26SMa}E*#mI`W@IF1umbP(J2!8SIfP9l8BEJ~Yb@a2DCpOo zvCV8F`^;vbI6i|%%T3S+%iLf$4M#CgwI1jniyuKTXX z{JpQM20o*+onAKcu5heE!}b~7j3NJp3S%hD(i2#a%HkR3a4!>w#_{PJ!C*C|_WKK- zw-mf}d-MxKVA1PvmcVGKW>UjDwIHA7w9Yf#*or8)9u-C$r+F&6>iOKF!%WdDoc3vC zsr7UA%SX<9@Ali9xO ze72Hw?GLzLJIQPVK*O&aBaLE46w0(XbW81x5yll`AQJ~D(22{YP#=bSnKV*vf;-Mv zHum;%_F(>R9Q8s4*K*f8m*qO=8p&3q<+xeZb>?z9oOd0)@n7+?H872QP#T3>L=oOn zIdLcN=mpQKCFd&>o!}|n%nKs;O3ocb=JWoG)@xsQOOu-`65~$1aW#RvM4 z(3Ie7p1^7*ReMv{rRJnoWt&8KBHn#`!Vlm{vw-R^7pjxbDCn9q!Q=qhe8N`-W3y45 z+WiQfWHNQ<)CJ3=lt%tj+3sZjts#U#*)zSPNyWM&;k;1tU1Vf=aNsqb#1Pgz8L)|Z@Dh0`E#D!!5PKafR^ zQ0MPP5pa#OGmKN7m2BRFbAgL49d=1nG^b!0o`I8{=e->O`+W*Fn}^z8;?I1}>BvEE zSx2l3E^r71aZ9T8NKvhdmYGhA>V~3=&9nUrk5J?`D?AQoXsY` zqVYb7BirxnY8<3FwEOBkwS;y9t#ugv$TFsH!>A(~fqrs6*$O>ezAaZp$tgLSv8ON5 z*~Phr9oR#-IqCQ-HJjE?<5`r~aarl1q*Lz5?eLcA2g2W#PQ4NN+08Qw(k&k3tkncD zd`?te%o9Hksy*v>|v1%B};JOjRB7 zBxuH+c|i4?*y4*-OqY*0g8Wtd5rNW`#YHH$bdum|G!!>^s0BLP@Kff?tF$FAbpiN4b?y; zA2Dl~4qZihrjQOw+vRdf1D-&DjhDIEO^`v}j#{j=oK3Q+4FjN7y08+Ln{BA>>NDAK z80IN7`F5PUI*6LFF+JZ{eyYtx#D}nH(~Qd03qSN7s8g?Mtu&K;Si4Xl?o&_j*CaJs zT^rmV9Hnm8GuYMTEUxG7Ij(BTS(qY89O7v0T@&GsY#V;v)7uqvEORt-j&^;*18uT$ zK)k>tx{Rj80q)yU_(CP~mfoNZ`qhEvT4p99%^_9`CPg-)_3|)1c8bW{8HIwx$=giz zwT*5v65jeKu`-J|3tb!YOw@c#F{0!Oxg33gg9~j5f6KYv$sP28Xw8KGh>&i=72gvx zNF^i_r;&;<;k$?`_fUr{r7tQ4(wGCxtUZdg35JX}+AdVcql{GMLHfTkV35bmcFbS2 zVms3gZjwx;{R1Xs0QW;dLBx(f;VkuITOwm!^Z_9^n@MD|>tx34@auEydlZKzT zJWsj+(XRq^{8>7=Ih>c{Ftq=O&(O;>kybEAl}m2M7P{ALPAjLhSJo@nm4hgo%geo_ zk4!fDVMJh4LHZvO)lzJM}d1>Al|`3Lhcvy`UFHs;74q7GYul0$?cm_q)jL%z95 zwG!k#%%ox(#-_9=lmtN|CwFHdef$KR3UX6VWB|hn@K3#n!zn26rjzT=Q6FYO)wP|R zxrZ8jH}7yfPiF<)-yO?AEi#B|qAJyd#(7FjuVXOt8HfJruy_pRT4>{O4>}JKWcoP# zXknPtXcV(kQ1WF%l{bxeu!GoGjV|yEDwNr<6OsISfAK50cNIQuS1{UBR0`?PxQxOF zXCZ3GP;a*tcat!3?p4r|S9Ij((S839Wza(q#ZknkIn*wX=odbNCoSQgZYOfg$dmVa2u)i3&1Flu_zIBy0x;G?t#}1zsaFrOND8-vN%3NBkc+=sUjq96sSv zTww;%qr9cg&P`V6LvAi@dpIlAL9A66F(;GD_4pn6$QTo;8NK#&>ZnWf`XA^X<`G2~ zaq?DkH)81GpW>fT6&2}cD#rpu&kxo>`pCD!2Qj-`M)816WpUh9Mxl7LVX05aFJzB$ z4Og%j={41PEpq<`y0rAxCu*Hi<_+eVj~cn@aC%X5tRem-fVcYTd7aEOJfUMrgL_I> z)W6fw5-w+IN%brgkC6N8x+=Ig)4sOn08}4H*qE8%HMm?&bV)Q5Fc|)_LRQQtR zVmh*m^o@5|iGAA^Dz{T~+aa8)Gr6QVUEe|UcKhijPtZG$W&%qh-&xkb)T}>XUS2U@ zSd;(aI2m*+49H9nniROK4mj2P0c&6`jb0pB2VQxPoc?u8H*QoB1wHK&F?2 zSytk!6`#C6TEFU4vDL{ZlX(szPbQ8xE0Oa`an@&Z-ctDPA+Mlccy@n-EuXKo>>2CLCo2c5mWQu={JV*k=P-!>8hVWiWZ`RY z@FnMwmd3Paw z#@~FV>U1QhK*wgH0qezG&&>=>Dk{D*s4TiNtC$te#JGcuK2fhu=3=Vc&Bqu;p01pN@^zGLW+ z(^>cJsp0{g>+<7=I1D^H5nq;-@_2cyyc741cI-~64!5ehR z3F?_4AY%8>VeaSsC84S8Mz$D&li46impZXQa5|nYx#US~8Fits=}bKQL`7$Cw;OVQ z(pUzWW*b=jVdj!EfYV&$%%!FaJ;*%u6@2yXpzwAh zU|4(T#X9i@OHoxNfD!H{*VeTI)Je^#^)i4(Y$Mu+_R6QB2R=l#{uPIVW!#G}5c$$@ zW<9A}L*MZbQTP;H`AxhQrolFEq(5j%)nsuJFB6Rh!<~I2rX}F$71}oR58q`MT=X7# z%WM3eMEc3x{0wp6U2ab2Sbnz97Ol|UzpnHd0V>+#{CtbKVIiMXh%L@jAu!Oj+?IuW zziIT20TiOui69}qz{@$$$sOO015G71ZViQ`$+!(P~7Owf3u#LycJw_?5mliB{tV*%hEC@W*|jY1V7<_Z*tT2x6%~Iyxj7r6`=WMOBMzhrFUwa} z(Lq$1O|EgFkGaIBz5!}=8J1)hpKvn0)kbE&#t~f>;r`N!*cISR)gXR_;_`3Yr$0Ea z2f<%`#@`^V)AicyEIvmi-T*w}i_so^-F6tcba*V*MWy~#`Rv%{%!QZja@QVLd)HCt z2S-tQSC`Tm)x}tGB2{I)-GiAYIpZH(UftgY-gUK-W*Dt zEl31hMHYO9>r`3ZPH30kNZ!l=IBXA|EJe9Xb-+s^#C234x8SLsb9X*~sk zn_vfh>9&fX6zC44@`T^F3(Pk^oct5?$0gXGRtv;+6#e2I@VXYvW0%6wZv9UW$^mp- z{{yS|470k0JlqRx@FSS~PMH1mpa34N)p7KHRN7!%q2d zNk76FT#3_22K|+Goe9HmW<}@l6HPSoFz;BENK%S=u_dz}4m!}Cyzko5NEBCsTvz^A zJ}K++J9!D-C?8PB-@>`LHEsyAnEglvFRr5FN%R4a^h`e&&a~rHQ;bMO+Lkat+KPA1CPsHCJ0|>PjFhGr{^-!L{l}spf=lswssMkR)_47-SROykrpu2 zKZtPizz^K^N+#Qj5L2^&6ME_5>yiOuiDiq4e+!Avu|%s|)MhV<>2tZmXXzRnaVq}g z6C1`Wa?U((sktbZ<})F=59f*9Y#Cj}=GYowR>Qg5pQx2vaFQOGdBFZ_Q1@jQF5y*O z+_B3kvCnvxcX-%=uu5Try_-E{Jtx_?zQdW$(O-VUbb6TG(43~fW9NC_;H2QAU|Myi zT3MT?*ETm>bE(eC!h?JkTC=;bmpFteuqjdpCR#QsXO(a4R-fw_&d&QORQ8X!^UcH( zYym81?$?KCW7LB|Utn2aXy9>RWw51MPRobd?jQ4;Re)~pt=Lw&AVuN~(h2{w2J}6- z&^ISAvwILHms$9M{v)+wF6J8A&OB(=V$i7VV`62#na$h*wwj&k^4`WtBOY`tq-8kF zExpXF+-`dK*5U?qqY|!1Cz&dWMZ2|(NYD|KtP}jrGqmF+shRiT>*K*&w!74V8gZPM z7A|i*(fbNV-k z&XMJ8@U0sljScB&#(~u@G)gcVo|cYGW?noN&#HBXZj3UiP2f`9WKk11e`#qTJ`p57akIcOGav?$LD?1(golV^LJt^L9VfC5*`|8!;!peE; zd2+eaxDuTK_(c*pZ|kflMi-yvRRIPbu6bn0vt6fKX*7Bb|m;w9%|mf zLS}BsOsO~qBHXdjqTgioR#@Z zhvAGn;jXZbIP-vt)WheCph60xI(h@sA5Esr#a*lmAN|@gi5v~7BKlG}KcEBkF6tC?7Qk6Y6w=7=NoLio9BdW<=gxrgrJGbvmt?&!eIm*(!E zdz5Fcr;A7ORCAAVm3HpM4Z%boUf$fTGv%%J4E_;Z9jv4l)e7sS@q;g6r6-F;fCgtJ zFQdx?cT5ihrcx7Rf}IRGl0{Izb>#L;VS1!K4A5G+yL^R7n6e_=p^Zxjyha_S#lLAI z^xus#MBq$z9ia*pMJnp&>`F7Gtx{0A#H468DOL=i(+Ialn!lM>d8WJRB*(+D;9 zW)Gr&cZ0U=r$X)kVpRwK>G?S7Y&L2$)mzEvX>4THz97E8BM>BUw%5cfPPHJts|O5qiPui zL)M*a8BZ1(ES_TGa4r+LjpWO?6HdgR@D{U$WAMFOAWL$hH0&o*n#hf63`SKBo@)+y zs3X0oi)o$R@Kt_xi`QqmZWr}jPCm&YzT%0g>v2MxYu2FZ-$o?8Os89s37n7MiF574 zaG4FwA9(E*=F|UzibCZl%?qY#SyfRRwIqKP=Up#Ci}9EnnBF`^erk+^-&v+m+reNq zfR!vt$6gcGZzYt)Ro*X&~dD(OlAcgC7oVA6c ze+_+&7N;#z`>5lXv!1O@&|et+EYMT&p}f+eI*Yi|Fl8I%{py+Gx$3^>+KKPZ54o{) z5&SL6dTTu8?w!`=X@|L)J)jVm<1@UIDmuU!HbD!%5M%zM>mA55Tge2OlUiUTlRYow zE{fCho~Y3aCesDtC*h7`vv&lKBg=~?9Bkq zNeWEc2fo)496t{0Rq^?_h?l~0{e)4;>LA3+UmS6sD`9;i??(SGHedXn_=WLW+^N`# zF*TzKM|}0}a8GkqRVqu3g|XHLqd#6clhripb9S_R36{aNz|H5(q6ut#J&XrkqWXp1 zrl-{_S}J3eIlvw(E|#Y_3cF^zCwuaFn|c**b5C`5D%V0sapf&psDY5%&SEtJ&HluqqV_WI&B(nWK^(Sux0RE@R;h>!_2bwHZexY zOC8w+h1y5&9o|ZNPg%F>+~{bfe3xc13zXR|%(jpFMqQls_H+MI^Lg(WUN)NUgClaY zopgj=kKI0V^&)6|N~2Ej3qzy_ayB^c3NF8EmivdhxTl(@p{Kg%xjT=$kSn9}wK7*O zE!`wD=uFrDPrsqXYwgu5!TQ0EY=2pKx7oY1MXRrt3eFEq3%m@B zz<;bWxnw{0q&atHYH&_4otjfSrf)Jo*=?jVit0S=j`D8yF84-y<2{dDjhy>&=lcXR zQUcAf4Swmhc7dJwQMx4>8}v@>c$4)bS`W7UE>vr&?b#i+T05oZVS?bXa8jD2xSiQh z8x(bm?hfb;8#pQ`N98umvyGPz;xtqcb$>_YEv|h#rT^%8$_T^2#U`4ajnaBIZG`Gk z_p-h8Td=h{Kx;*P{{s&MKkuOq{mvQmD)~A8gAJ$gh-my#&xSkb2)y%3;!|)~->py5 z|J2`WXV^8oReM0}{sVr(XQ<{I>m2;hK2XYY;2{aj%Go`PtV`W zaH^{6#rU^9oZyG}OO;RybD9V0cg(ipKaQo|Ls5g`8>POPwn_Tc>7S>Q(&kAN;txd! zBRt`?y!+kHoa>dTQfr~N)yinCO$wm*Sem2EPuD|B)w3;hi*IT#dH- zC74?jc*ZLDi5y}}COp>Cr|br2t^sFXfDU2=+O?3M{2^v{3xKvo*~wt|Q#rv+$qlE` z53GQv9i)H3uj!t;Ks~O;hP>3YW&C>)D%nN2x|CN3vtc?fPEG@fo4Lqpx!GMY2COIx zO6jKH+qvwT)@<{z5#Wgr*Kj(-Bdx1iS~Y@~gV};T0?Yi*eT{uaikULjcfnsqU2IGj zOSl5z17jB^yh-yZ-OTh`)AvkwFO8b`E>(s2gRzriYDZ6vY!d#fXTRgE=(QedYXiUd z#wQ<0l9D$kZ%OIuJMD{QyXtfQ!NBccS1rLPVvQC~OVN(k&hGBTo*v$eVf}HfSrnEk zERQ$IUC9;em@RF#uNj%NXMsxoj=ny=Vr)xn6l|mB(7NLal%TCtJK>wtL|d_~;xs$LdQ8oG3IyY@v5+|sCmmaF z(6_T7-2Gvj6`_%MTS}4-DNYomgHg8am9B_CKnoM`BD_JL?`H;Rh4Gg@SUbT9eZp3$ z(&|z*Sxu!yP(Suo!_^zXZNXK+UBTT!g`aS>`c5s3V^c{oTTSzx)r~2K`FQ9Il3I)V z?aSs8{e(K6ZP#j`Bb#cEt9@__9gHW?3wA#{{iE5myCJD%Qihb&f$zE`&36YQGsn+L z{E%i!+CS1(PO~EMR;mH1-HX9Ct0|No11urn423a$db~aDxA=Z>H~8|7>frxpKlW+9|kFQO~V+q{y+7-+pUmG^S~_*>YDhxIS1}&58$3arn`WIMeLG zt9WtnpTKtici%+c4PSr%kigsERP7JroB1mke>D8m0y0TXD~b(kb@gl9o1t`o{k10A zZ^TuH_F4U(noeyH{5$ZEf3j~(%Btjo$pezB_|^qKn?AX_H&b-E_zel`6Jrw>q{z4$&pg+EiYRrnmzn43Uq2WXQ4G#CceCed zY0%-nm^>t@WKy-{0=`*+=GuMZEY6EysZ3gUm|%M@4OSXD)4Dr*vUwYM4|p!%6Q!{+ z%E>hKXc(SBOq1OJaVbR2*xF=o5xV_BFx(x{fwe%BypQd29VC}B%F)&N+!=7LaL#os zSGMEO)tPC5zrb@R;4gTdDW73_DQ$q-3&*OVxJ8A5DPPvlX!+Ecfx7jEIMm+{ISgs?&JX}Fa0;w%G8Sg8PoM_T3xk; z+FZ+E46rhZhCJ8V+5O2~!~KTsl+W;mS;i)|ekf#{DIetAa#86cGhu?&l%6Ub`jP~& zistBz%P}S2TPlq!&S$* zVB6Brd9~LkYZFTvpYC7a|IfcAFe=zUy{|@4r!U22Y#itN zwW_M8)OPB$;Hf~0-{tS=n~+jBrF+V%l*;U7t{Z5l)-W0i=afdA75)yEGd;`$_|>Q(97Bguj6Qm%t2l zneow{&a>PueV`&A0gvLhn}8UmNTcK;%0qC+NJlwXicyk;i^(B$Z%e5XcdS@GzhS4`^qM%T_tKy4^(P8A3U+0vzBO`69?8Q>;9@%Ixsi zE!|`5lvj?FXENQA0)L;`J_kydR~Uo)>Hml>nmiRvP8+5(i=#z-g<@c%*qNShGqb9z zkgd~bcv_%u2YMAU>Mt2c9lRP$2`;DB%@dgB7ya#h8GQbf7b&+>E~ku4*`6}SS25VcC@Jo6 zJjD&{h2sT24ROwI-agS+;^Pv#B&3Re9epSwf0*k2%k`_PuzQKOVnl4zg-A#EFjp^W zlvNBJ$PjA#wrDoy*~6^*=4u=}gT@wf8{NzVqC!zV{SvyFVkjT7;UaenC&jk%4N%oS zp!R=@m&6pfjShHr?_jTRb@cti!O9df+}v*HdTG76HePj59c0ul=xOZ+VjPN?D_1R2ty}a2k|%Grj8r`#d|6YdCAUJg!NO=5ktbwEd8J z#AIf5FmA*n4(4`80#o%&RSL)d|a+jt9${keGKyPgO0WjD$3osu`dF79i!*g zAF@AmyEa`bttG3vnp!JK4KxV{^j6$}V_*nJgE`5m7Xz;YX@kGAL-TWBPaqK387!#j z`U-PDw=Wkn^2d!y=6IYYvO90Ni+L}2obG*&Yq&pslyf^jxZPpb!k&5`y30HL%*-#9 z=Q`fH!o9V_eh;haz2$D;`UNM!XUYsE6VWk^JQm%OQ+g~egXJhLe6VJi!;MwAe}C6M z8aZGePvD?Z((Yz0!Dl2NeY(h$R2=Nab2fn7rP9A`W~6VNiw3PWE>BaK0f@k(vXfcZ z%7V{81(?(7b~&pnJi;GnW25+$3b+!z znLPzbI0Gaz?|p;0^Z76xGx1ou3tLwPO+yU0QePBj3EZUFsN$NS2ijz3wKu@REyCTY z5!$32_Hd@Z8yNZYLb%Ou!=Esp){<_zBGLQ3|EsTv?{&(hlyKiz-*;aRzaua^@F7qo zxGva;TI~v3jc0>af3|w!>he^YCMU@Slp;zEWxvwJ@rSd7%W(B^?{=?o&v#FAPj%m9 zmv(ATqUVi!i~GJiwP&#Bx#zs64tXrjo!3wlX|nE(IpXG{!g)AZt~kT!uq)-sCVfclLol`O_YqRU^nQcb3ZKLZw7 z5DaKFYO*XS9%`XrsRuip#ME9i|PoRXPIi+mCqj7B3D{mX%NJerZpi zH4)9hYj)!P!@hzuOhevAF?*Tsei#;T4c%)YuiiDs7PXqzoE}86 zHrs~aWphNNJc-EGPs!_ucjk1Zbsu);aWmv*CNR-48P4V#?CNVczK|1K2^7dL(6quZpe^WzvM<_F z!8GE{v8XCa8w0_F+cE7m8*J(gGYnm+F_Mi2aJnJ(@d`@5lPHSw!0M+()1ol_7l$e! z3D1i~QYm8baAtm{kOwxw_bq_;DUas*vlPyaOhj!qS*l6IdWE*~8mv%XlwReCUag7U z_nENz_ophX4m?~dZkgL`ug7Y4)C$_a+C(igXk6c*D_9};F_<4NYfGTGzm!k%)%WG} zKV@rpX8+cdAIWD@rUWYKGx5P$VLj1xb)q)i`k{RHzKUoUQPi8y=}`Vr`nV2xUx!zW zNE@ERd&WK1eb-$OoMX8Axbvk_g&dcPIqY(%UY{zhWV`_EBW4oVNqx&jb=wH;$6sQinQ3UmIinRj#YE=#hfcSxXO;F7<$U-NDCE%)vB9q_gGJxY0#97XM1$5&Z>WM;${ zYm3!RpQH`7B&R#_x3~;(jiZ)^jqnZ&{}{C(wnALd*q71OqobqqMESzIcq_QO(ZwY> zpDHb+$#73;r3hIQD_V2a3;v-0s5;-YU>%apVfrDe?5Q}y_EV>7^^LP2rX#@oYQgwS zB+3q8e@DP*VVxA#F~zRPEzzvj1>dS>elknj`NbrVt1j|Zk!eoi+zz3cv_Q_Jyut~* zg*2RAc?f*tQqd*XB(p72PRp~UxkT)OsLh6S!tOH#yU_=I`h~LWSIcs-kS`eWIhYTweB}vzO$qb_V@! zpmX53dfB*yUNE;&L>(6J1oj4A1rxRIczKUt4r&T9Yp!7!3&7S^GY{C%d|+i4Q^V|U z5G&aEQK|0cdA7h+Ftj(Z3#VxXs+sRNTa3err!(4*0rU@(V1i3ZXK~1@D&3OjI8Hk+ z($$qwBAHA2A}x_^`FF*wOoBUZCUwN`;469QAq;*7)R%WbL}sA>Okv8eHae*Ts5$ce z%v1TS2{3GHVWf`2rT=AI(v!5HT1H)iyZq+BZ-IzFX3l4GK2s0>YTy2pVkxhaUnh^F z`fBNW=ey*K_T^7GmC`jZM?Y^%@LV&^KDurQa;vaAF{|TxM)wSR>fYuZ5!EPuaDpCx zFXmj-vZ$WXr=yod9gWx=CVESFez@YCtCgo{Z2y&ah^4KW+6YjAdGg7;1+GVkveq zWkCm;883u0LQZrj15qb6AO|?72NF$?`7T{hy;zUWXIl zEHp0bIG3N;{JoDk{r}Lk_^B9nfUFJ;x$`mIupZ>;1+&v{;Hm3^^{Qx!*TB9N)eqoR zd=(7SA3Ou5u{T(m{`#0dx4#};+j#!Iz_&8xd-9;5hd zJKQrKX|MHqLMN9m;+L4A(N?(Wspx$ZkuO$C^&>6Oxc9ME)PeBnUdcP# z+u7U8bHY{5c}D3g?YHk6ad0=H8I2OKpZTxWEO;o;IOtM;hi}-0>-2U|)mUw$no6Cb zPSS?!MPUy%QQ;DAhV}6rho%<(;lfE!HJf`H`o4oMxB9yU}JNA zr&5lmJY{G6-INt6VJUV}Zup_}&B$>*df@j?o9Im$a95Cs&ck$}vl#9(kL< zfi#auh?|&DEFmJUL3CC&By5gv8NW6rQ{>99OktP9c8AUMZgih=%8u0XGhqn6E0Q_b zN=2O)Wqs3n1dj%u2779ibX&`%T~-}hb1jcnSe*h7{zz>IPu7o__&jJ-+oH-ohlW7` zuc;!`5ZyR+xKYlhA`&kbO5xHq0*BwBaz6Pl&fFV%yr480?RiG|s@zc-s$7s?!**_$ zF3STPb)1Idic%U~@m$<$S9303GXrpyS@ueBX(!PrdQhT|LG5ibn=u+q#z{DmEyfZ< z0K;!b1Yl1SsO&Q}3cuq`c!Td%HwNzpss_IJeg574U)UUn+`A($PNU&0yGnl{1f%>e1xgTALUz4>0 z>apOc;6K4O!RLVtfdZgag?y{1>JR(M`lh6KQu-vfNZy@%HF;5Tm*i~8hmwvYl}@P< z=&RG4S++h^&7>vRva5S!VoZ(5Y3|!fSLgBY3UT)n&ZJ5eTRm!ER6=Zr_+s%xVva;M zkI*CJsHc&=BmCa^?!C@g3ZzNW^xCl--r2(8SwYEf|Z4q7Swi8fj-PW@XTSRBM6 zZ6F6Va4)qpdkUUt3GfV`c$=5trlN74DQ9nlS#4&gw$7PXaIwouw9Ls|fQQ_(2#tMX zoKRbH2AAO8TbE5$Nz!;F%vsL4+ObKgz>`hyeD8eh40A?1rYqx_>UgCLB&ODq7f2VF z@j3&So+$o?yZIb6b~8{e9yc?gb*N`<;C6q4_0NkcaxptjHfx=f!pOT`I^Curk`On`^t)f5Gmunl< z%K8vtt!quhpHch5Q(SqRvS)nM{8a5zFH1;@eGokKx*Eco2ljUo3DY*jNj~a-Ip64^~&p>RZtLb=Ajd zFSUinB5N5gCdteeKQgYG>Fu#hUtEyhN@b+#Xwr&{{iILwJ$iVRKV>>T`)QbWJmDDV z$mA%=q}?#bdB+dOJx6WFFl93{WYgqA${aW*m;9J*jLE_v{D4QHPOS!i)tqhMC#!^2(k{}vs%)+qtcz-#DhQ&u73t!-Q>I%4L zhA)Y(822NtW!$9LV9b)3xR@EywWE7PSB_2`%jP0{mR)Bx6P0;w-Q?1 zbwZ5&9i2{db#8D5XSK6d8{hs$+O%NxK+V9!U@9%3y};@Io%)xyQh#KWHIKpOT+*8u zg~3Tqq1TIMetiup^pd=%c+`&n5=S$Wck-eiyp3~f6RP$6N*;W>CxJgyb@X)9#f2_e zIm*BPsr;tYV?$^Ky!KV5O)^Ra=s!2&>RnRmDLz0oUKTa?GI+WlD8WIX{moc(?Sw0kf$_ODb_ zFX`tC5pNUWSOTof{fJZJakKzU>=I0s-xqt!#T<8>e%B@UQO_CghOqMCiQx;vMuk-f zpB_;;@^!hyPlz&zQkl{DD#{1uaPk0ouj1&M_3q_3&bfVD7t= zaEEDx0rZ#ig#6T>e(KLHsF}vFHT-w!x>O7uXdHFtWH=mA9?hoFSUTfYV5vRWsO)Ac zu0Q(nj8X>t{@>!4T@kf>Ximjorgk{${k~vOxVticdz~4uYk1SMp-oSx-vaYn%)X3? z>Z9Ps;2tW#+USk+V6R|m@_4V{gJ4_rw(8QnAb{)DHR@fpI9qLQX(~4{=I0i^vf3{& zai{bg<}et7o@i<62-C$#xiGnPkK?2>hkLrG47(TVd!D)*dd7J#g!KuR!*hpK^d9ld z;a5Mo`@1dI05*^wbu?7+GZP*Q+A><62(vYb8G(%88=|n9zGn&@N&v0FH}>q@2c1=5 zlFty)9-x_6!INIbM8+mmaNR(ip5OyA4qutj2J24DkaP!&Is^(j6OZ2Uc?hZ6C%dK+CrA=L&N8|QEA|4a9h8C1oG6W0U~w+isX!|8~71vOVe zA&b~cGUPlsB^fBUla=w-X}0_JZ;lbp2F{s|753$}Jhqay-nP}Yxi-HwtyNNrQ;8o< zJh={5aY}g_T`9ie1UC|`UO?GB0uKU37>>5%81Ce6%$2Z#=LpNqv#>sX857Z>1Ul?S zg5|`Z1Z~EXo=qJ+90!nHD4<*8iqH?9>_;>#;Y94UVORC%EVAK&m6DGi)YCK^8q-SS zaO<3c_w6h3Jo>z!Vm;>dRMf9!r6f4$ONm?4flCE^wAS%=I_kj`u*n_Bq&|SLn$z!_rhv52m~XG^seA z6oRxy428A-AB=(4=vXSEk^2N5+#N2>S^9{zL(@iN6<`;+0S-P2CiZ&Peid-R(kOi_%+9{CK$aaBB0pP*)IE#0M0K_Rf&TA+b$YI;4{_n)$^>yU{>nol|V zo`9r((@wD4Z3K~=4)1;lk=i|Q*OICa=Ib5*Ebx~nzR{fJFT6k4jrYJy61|&23`%>f zo=@)a?)>gBcN+HucLqtcD8#~`SsyWJmYX#u~vfI|lzSN%4{uk!eSIZB}Ai6B2wC+?sSl;2r zJ6dXpxAH1#ESq5K78R$Wq)Lx}T5q0INnEpQlBsXSrD_jYcN{$0Huy5W!DU8+#Tm?I)&y#ga(nAVkV8C2$@t9)`&adtU3gyQ!ZH3EBv+o^)X8F z74~;!*PQr|g|m$qptiagj?@b^C5-*OR2g#Mxai<@l8v%(dU6u|)gUXgp;6n3|IG(< zu3cGi9(L-LxaLx~B#Is+2cw{TOGp#adpwce3Z6yNkQH zd#>B>wtJ4aE4k+-E6H)dO+&HbmZ{w;MfyOqBXtM6 zxmuq|w3me)W~W{gcOuw3RPi3*5u1;?=VbUytM&ax4Lgk`{iCFBf@W z9DB@t-%s*`fpjL_Le`bRTg@}kox`1!+|pgoJ{dMJXMI}`P z{mGV;(*46XT#v&QPBG7-Tj*e}RaykMh?p3eE&NmP;2>*Av+%`H{uHTFgh#Fl9~eF{ zd|i0P@I9ezgGV~b*&Zv?lx5cb_9Bikwz2XVqchKRgLcDo;Pka#h}R#0KtF{GwN0;z zN_-uh<6rt|Sg42a_PK1-)jET$T-53aF8mJih#&Pguvm285w*A8i5;k;F-xBdd-pyr z1e3L|S|rSbHE^7!2(#&e@zT5o8+x`@+?XTGl-h&7UlhxOhz~J77+2}dJ^)qnXJQgB zQKg7e@;RPhTe#6)a}Cw%_GsW|ppG4Ye@Sg&v~-%f;Z0Nt-Em^BNtNR)Rr!qF~gUCRoc>D<1iS` z%70*9KYS~|ID4>OzIy-CEp#?4=Mml_9(Qu}GVYee|rmB}FE_KsX-=kVa=8o(g;SSFf)-ZUo z{Xa{x*i#Iarz@>(vy>-7YITZxYO>(XtDQ3~;vKW0-ox+n=Ja*;3tCh3P!04L?r4F& z)NDeO{ndP?HG;+LQn#DurMGf!>8deZ-Qqj#%csgWV3pB$dRC* z_Ai#hLLGfB)oR12LaleHantwBbvU6%!mFe!p6+UGxJzZVn*Kv{JFM;V`9*ytZmrc( z1J|ZP^nngwt;NsA34I)W6e?O`tTEO)mb9Wvw`(iZC)x!#x;Z7AbV%53G{7O#r{}>@ z@D$t?yVOvgZE3DlQ?goC!NJzy%|BK4*^b&f5g)v?{FY8p#qWc2RRK#{%K*89R6tBi z&zaoB@zrsDc!J-=U!ytN=~`w7qk`TIjoE)jG`VV^uhC%Ea$%yWX{g|b5(|$o>yrtO z5nBkIaG~9;4bUD?1)s(|E3AE`A7Lb1innmq2HnK!aUsu8L8r&tkQG_+Z$CEOnAj+_x$Hlk(N zjF1&U)g7;_QI@|#8RL%n%QxLO)gOi)C5PJElRnA$_rjm1iId&!{S}DrzN?4*uYD(c zVQP}r*<38%1H@Ub6=UvVvSrFBhoNv=^zLtL%hx4rBA4lONidn3WRoj9y_uvtSJq^OamaGH)OqQljY zCSh7+npEjiuTR+~a%5P^5HV<*{jM@rmhdQFEz~w`aBjUs+rNFoJlkDPFuE4*%>I1z z_o!gx(er7a)d`wld@)B!S?R34$>y|K;Lnv((pooKW688Xfi4bKG%&FImJ4DtbE$qD z6<=|<=}Xb$*E7y)kI_23^qp24<4ts!JZ&k=>e9F^^cB1CbwepV=k`|k8HHiQd=ej# zH8#M3b30m+>L|SH8fCN*{-N}u>OHsou`@Gmi#j5OVR|e_#;VYT!)e;x)*s`_;#q}4VUPk8^=R=on^DFWysaY z=v1>)wulr^#SI8mBGyJ`iQE@HDRfG3Ct|Y-N@e*w&uW$M#Hgyh@rC02wbWn8U)sAi zS#Yg#U2qTgtycGILyMTBlpIPX5bO>e_0#j1o8vW`%uy9zINd+LT1vHW_!vNM!}V}{GT^Xs z0R>P>9HSQDA<>w6$tm{OK4u-9CEVt3xM-m`DLmCz8C~IEoQHdopudMVH-OkL8iiw+ zv5JcMJCr1!*#lGfUVEyOOHTO5Yi-~vlaw#2(v$@j>mLz78LQpWs$Sj9z*nZLa!E zwdu*mGP)m`U>yM`)%$#;p{Cr3??s{c?n!rwfVbdII(MB#gD{@jfgL~lOy(TDJxUT8 zreZ9*tGY&NR`MAfX13_Pa4xDx-v^wV

C}73IHV_hZC$?AUdcefS7Jq^HbtaSeVO zd#IMJp+b9-I{9|`?cRsGQXHf$6S$TamFxv@?+X7+1J)3*+*c24ZQ%!xB~Bfo%>&~~ z1CBM6{>rOZTU&?*_xdmR*24WB*1hk;TI#DL`H@`40#efEF@>x^YD{YwAY5mQ!&ix{AKHCeWzSknkvuaxnf~+ z8>gsAmXOVV#`8A063Uce9=G=bRMdXH=H4WC1@|KNc<(CpfbmV}DQ*c?ko2v}-=Q0j%!LoqcK-9to+J}p6?#(D}e#&AA&)^@-r z%d6f|!}Z=UdD4M^l>xJP!2Z*O9rTKMT{wk``=I!Zj4THIiQAmZD7C`#J_Fqm{V-eF z;HxwYQ!1G>jZR7=}nAeHki_HOeW^eo4npsYuBMyF6^zQjxc<}e$K$g zI16qTa$16&Ys1P!{fc@QVGsKloG*BK$i=YikyBR0;asPIIN8bR?(d66Y1po8I`A_H;@T)+_ra_#w z^~~3}bS$vQRux~pVYYA9Jl1~7Z9LPCS*j^Fl`v~+Iv9Vl^pSUp8R%Nq0wriQ(6-I6 zGFdtLQ?$+v{2%>U;ik^w^~UR8P`)}*ybMz(!@oIfbjN#a2sMZr#$V3Rd}ah)V8j3U zleD7l6G}(=Fv|=k4HGnQ{C;T3GglkQpW z%kD^TA$5XTR(=WkNN-A`v@k_k7Bn(EPoyK_MCjPykwN2vJVBR&bA;{RRD` z;_ITF(~IF^Q%CPYN8?*+as8{AMeak!a9inPsVYB}a>!o!mSqu&#o4m(lmR}f zMqB0bI2C-u2QQ;6%3a{|<1>sdHbk9F-m;ilMVP)_&FO#TEdh6{tZy2%oVWCUKB*Pb zo}rP;g{JN~Zl^%m++fxBKCX@lH|9^DD@Be0R>u%ND5@=(>kX$iH|z+32y zG@NdJP3S^55DWG-n!8L5KQ!?Z@n)E z+|v)9x1Nhs;3iXF@%r;fwq>zUR*C?a%)B8KuQ_?*(sqpTmD$6^$yw0vMtbQOIwAw{Mlo%Zn}V zi66JvHrk5W_E`H`TUaYwTaty1vo5nXv#wDFSPIK&r0VGKn!-W-$;tkjidakTc)5Dr zKNG|(*>A$QorNN3G>V`GsDa9%XdJHArjM2nWd51<9gL$VF0E&ctZ*q+^Mud`PsprP z2%XXyBF{2-$D||^w95^o-r^}x(R$P&pU`cl5dKn6aqIQy6Va(|4fJ?L#~N_ZJ5TR~ zLGZw8knJvot!UFff`lbAOOI1=D-GlAC`j*H*4HwW#`Kk>`j!(Prf~N6hN#vg*seF# zNB-xaWi3IUVsR38s-^tR{5m?j5Bg#PP%7~HdmvH-bvM>!kz*(QF!}b@geQY z+5QOye;qn>WYa#VTX8XYp=L!TUJA#Z@;K{1)_Q6#a;jQXR4(cBaISj;Hh$4~#~IiW zN1rY*Y@3K1sib|S@6Q5QNXO~-beDX*g*2L4WfrQwG1Pmf@Vf!!L}~i0T|hbZmUaFP zti2bEW;@aRN8I~jLEKvq;{^IrNnEp&%Ebq?%R_kbUh=`IoDc8mDexOU#&s%%xo}XN z1LMyNmfsWC@fc$Nv)ogR9*jSEVAOvYKiD_MgW}XSWKM~)xEpf%qJA!ERA%N4q%!y) zJAXbgO!^3Wx;f}-6l&ifOGfJ%y5&Yvm;a5KoSH%CJuk4DhEH6=qN~|#nzr$$b6y7+w zbeo=_UDtNNHN2yp(t6?b;YKOf5I>3yc(j~htwn=3eAfrCt9^p~Top&wHG&9F?<#I1 zkAz_2h|a`4{qZO(FFhs}r1u^yo$5I3wS`qSM=UGuD1Cg#1ycytI&0CHa={%@bg-X?@#HO#b_nTW6Z z5e-Os8TezddJtcS8sU0zt$@D6D9uQhGy55Sysq-mu``Kr@_|L|WY*lLE*nn|#B1DN zJhcyj=Qe>{`l5h?`9dWIh1$OH%bTxH_y6_yc!&7{Y!{11{gcdep^%=$kp%)#s z@G3I`9usZJHe1lYx1(eeZlYRvNY?nmxJk@J=P{6+&)jQXX2BAwEceX>YKyJ$j)eOaZm zLQV8f?WkOyV-!B2qFSd{Kqs{su5V^@2s0$cSj-L?EQXm2$QT+J31)uY<*6A%?)9IM zNl3|a7zW3hUg~%w-V@t$k`9D_9!#&V8Pxl~fS+F#3?UOrn9BeL@y9TyLjs)idD)IFsn2A@#i6#wUFvOofd`DbS}1T=_*R zSqqFH_Rddm2@G78Uz;WI3<{D;;HKpiTJj9$Rhf<^rjQjK`s8W%a zjz^!Qz!5DZ=HPCK>VOMruSTq67_;J2g9)PaK4I?LzuswdUQ6g2&|& zm(h*x3*HHh*@L^#2|LO7kJE5JN{Zt6S6##Fsu4=N5qN=4#SLyX9eUS_t%R9|(>yN} z64!7N_=u}Jn>O)3DO##0Ok?$gFoSmqWpJ?%6)NLl-Ok)AjHF7nUp!}KBtz;-=id6# zNzTJQMDkztmF!>R#Dk1kAF#?%sHYEslZ=GTFq9MJIX$Kmn8Ew$s?ZVVx2@qWY7n?|#OQ6sXxOFEbl~$E!E`B%VAWJdMf}6KP4CcBVt$wcn?Yami*Zz_@4~%s znI(y>w~4tpYmUK59Vc$U5jZVZGLUl;9uPTUcVeaI|JWLDjVdsVuH*Cff}J%j9F3Bo zn4?gs6~!U2I4+D)oUym)PP>PdnU5Vb8dtYSsU9)<7$JONhCJUGPEL8eOO#J{Fpx%WsOGZEK?W!$ue$lTOxJLBR#G zFr1y-PS1<$=6jUWOHtDF7W=V(zJ>|f(#Sv$#5vMldTM?OL~po9b;I?dmXOEjrkh3( zYQ2X<8{GvJ1qB_g%ojopatTG8hw7)guwEDVC9>k{NDh*c;EA?%V{m~o%!(3gcSY)LraUFB8p z5Rsl`F5e|f-;JYz2)6P<2*dj{i535fZnSr)#sxap#);q9)mnocAL2wb;JJ1sSz$LMn7x-Czn<1uZya z%2HanjZ{-`u|iLA@~smp6Kj>B4zSyN%$`_*%KkPwSr*m{n|DB>uH#bslhNC1tml0E zADLT8_NBD+T&yB)2T{4DZ>HDoHK8@U{a)fd#x07j_Qj+Q^g-Og*-;c7KmqZw@R?Y7 z2Rn4AuuYsS-4&wPql>YZ{D+RG9-X_lkoOK@6fDvmiQbTU4{f1tnzx1H;%)rH`rpG&xo=j?kNeFvK@j6Ql9o@`OMm$-qfzAoL-e~X2rD?$r%GJWer z_Lg~cL7iZX)5hV=^hCHCK>18Vi(m{AS=F0RnWWP@XdR3Z(jM8%UU3e;<-dBMnvhb= zM?9Ak{7TnPn!DKp>zsI3@?#Bd5X48bOV> z0ke1$>v6sC5#>Yzor3?Owd`zGM>UX6a)VoZ1}EQTj>4rN8urdlBAH?AF2~SbKOpys z;Y_(l=k~AQ9u>)GqN%OEg*&5ALC|@zDH4F1QR9(N$xz{=uwG1m6HZqa}1& z&5B#BO;q8hRAS_234d^3J55YklvDW!t49zfoBhdD+T(CwlBbQwgE%dY_R*ZevF0Ho z82|3f>^5V;h&K=)wt!oc21L3bF4`tBPam?2QpDmZ#69K?_vhTM9%XVK;fcS@MLP)#aL_tHl&*5c^L^M_h@ z13KtR+W_vn^rWMPZC%`I-8{36+_|(3~!=i=R5kKU|oR|yn>$**zywGSObfNS2QscSM zfad}`6FqsHR#uM{f5=&+5AgfCny<9s#y0U1yUqzPnWIK~eT{jAZq4sGpHES3+@+7A zLi5giAPj-wcZp}4gIsV07^q2~(249jF;YhH3DI|MGNz4Y#()>9n9i(+e|jIY2(zq& z*q2^#b3 z?lyj#`-!0%3DeDM=tJKbZ-nz=8zI5SMt{M!;Eq4dW#R|cn{FP*M{WIejK$KBa{0#fHg*en~10r6^T!i!QKIxZm5k+||vnIHIX7Lu!s4|#hFaFX`oWi+G zho(7ZJWLINo~S**1h3HhI#{Sq2b~Sf=|P+p!`Wkc8EcFeW`A)nPwq3$oHq?8I*GLG z8!=#&gYdE~7AyY(~PvvK?!1 zxj2Xjsj;!noJ|(die2RwW0H}xbua5c$eJJ(1>-2DOH-Uen@ z&Y&}_;aJ##)9LJ6j#v6=zM&^@W+KDJfj$Ies~4D?`*`=PTumIBx+20JDx@huLG!>i zoFN=QI}l8+Kbck8T)e>Z?Q3L0XOaf}!z?oAdSrmH;PoHb8A}uO4CdS~%N>3I3qOZ? ze?03wiG6)IO6o5-LDIJuq~Hv!P#t|_H9Y1d*rIVfo7KV@p8Iz2^)Q}63E?Z)!EVO? zg0T!Xa6>qfo6Y9NZ9R-S%>Vv%!?B_`3xi2npBZ16opLwMt?|YzvhhoJ1}ewY2yAN=bOL8vW7jBRpk7n((Bun}Zm)j{eA*F+OOm0kY`e?NwEW2N~OH^yr0xJ7wB zQLqf7Sr?D!3G^L=yB82 zL%)6w#!)dk9Gk{tqZm0#0rt@WaPm&U2oCtPf5)A@HF;7d{E9~5`E-+fuMCLkaag=B z=q2%i81+9^z&5;bo1>3E4?>j8Zh4x!YD#AQ6|H6;oLO%X@hYGo1&C0O@?5U6YtO?~ ze>bt$SP;1a#6FjJ0y$X^#Tc0{a00vII&hdLlZ*8+74`9EBFI2j(I$8vujH%yLR!4# zJ96^QA-8_R-ya~SXD{a`&v?QgxfYYVJ;I0{;K|k`a;?J&+=^Oi;C>{$`G1%i6}pDF z`rqJ0xk2x@*TjZX(2JB{$4`fbq9adb6MIrX(;U!=9w*=3OK!0Yq(tONI@lk#vKx=( zd7t7ZCpZ&s@vLIm7gNA~m_^O6p?R8fC;^`JND!1v#4-2S{ar>W;vy&UTSi8$2boY| z97!Kj7kAT#VJEvqCdSWgv?cEB0H)I3OlH56ajJOEc=rRLX$tG560hBb9ywn*6EZMz zNobV%6Q6Zwz8qne)M97a$aB8V+zkWy$po`=IBwMar7}2gr2;K$Dd)oF`~rL8Tlf`^ z@u}`iHWm!GEWofo1)8)DY;YQi+#|n&6kCuP#^FkMji*r`E_4er|58K} zow=&-)J7%~^Q1Gcf-uUqm%E44AqBaZkLYGPGhh=dF)$Zaz})RbZG0fROMTXZ!04yu-;AaYIK~i8Xv(^m*AHFTMu-s zd#*px?PSD{^~129=1?((yQMoo?F!@R+Y^-e5)88;eANYx`atbCRgS58q8@N7%}j1m z6i+IxE@eyO$bAus6yN(Abk?`~;|LYI1j?uUYv+b4m zQj8bBp+ELg(=Lgd$U9@aXV+86xMU!cyo4)<(7iic)eM$`mNv{tD2E5l%ph7EfR zH_?TBu16&^MQu+tyQrEGM)qfaL3N*+o@b$IW8u-2Cqr0mHU-P8BCVBg(iOcFcFYcD&*9f#aOxkt4`i(P?*{U{qS#kJw7t?6%Dy{NJe&j=FbCP- z1}eargihvJa;bE<3SFh95Q*REL%NtRXAE4hrx!B{ZK*c3f@4{StFBJ zJ~IZ#;L*hKllgpH0{8Hu~&}bKp)D36sT(%I7a$<8Sfac*A%+q;7MR z)8GRAxkEWICNhdo^gr}Qs=-aH-*4Pf)mfF(W`dQY-|AeBXGBugb{46t-nLk7yPIk}q zbP@YtTx2)BLS*$E?&cQeOgp%8o!I5Z;-@g3ise>fvFyBFpu?|&C@T^sUN*AWvS4;~ zdBqw;ft{${b)j405IzS{_h`Xx*^rsHnrm7@yca_RIF`-}6{-4`;u?~u@l54insTzW zfCpEM7~%%n(M|YXPsG2XHlI1!S6i{;Z^FUw0du-Nb-`*xZJALw{Sex)TaTsoQyWE9 zW6MK&^)^-dDPxrLN)793>kaEY>udD+E2-+wv96;(`6cUiYi;XxrJN!v$6(n^wRD8z zHcBob-$kXTNWbwt=>_t0ojCg)9Kt5%H_rdzXrnq%%jNhZZaBgIFpJYF8@ot-s`A4b z*)fdmJkI3>jOanC*5^2(7wWVBd3hhFj(tME3-Y*(j>{W}F}4yX-2=}I%#+td8z=S8 zdI3~^!-%tb@HvmH^dP!K#OK5sZKxt1Voxni-ZX+e^&WM+8RAg-fz*-nhUW!}s zavF^LU>=;F64@o2QZ=|u6w#8nF#>E`fOF`fDq4~;If#GLYtXN6yy7CVhk!XBj~2Li_T>EdG5B&8G;6jKo^CCamP%cEmak;Q%PLOPa0e{) zEkQ6z+tF1fo%~uF$|?7P5gkT8zm5?tNY?a%wN;L}_lD~qL}X`ha+$CU8gP#6*4uD~ zOolVIhEsh zqH4Q`NPifdpp!7~9vh=U4(?LrGpT^hK+E)(IQ4+J%e*giM<2CLIBllqoKT6qs(?L} z1@-Soc2^cn;$w2pBE$jH$XWZKOUVxo5)6j?hQEsiT@C`({0fps#hAbSK^!eJSGS41 zgiHEWvNRAd5Wj)wVVg2~`>4H7Cab$mK6HwXiV65bJ`ysDWx*+vjCTUhji{y?I7>BR zoE7X)n}`Oxi*rDY5gUU!@54XBjY3AH;va-V=U9-}LS{ZQqwt$GKbjqJKDgdpPTw40 z2Q7(LI}zb8WA-mJ=JIrTDZdwlxX~6-vdDgi(g5VC*0_%A2C*d73YNEoQ+)W1AL3hAU~5h@fMQ1 z@1zRi(r1EmDnbb(S?f-%Co>wkj;MRS>EDHL$qQ!n*=#O;M-_6?oDZgyXa-B8&3CNo zvqFTtP_$546!1GrA+3XX=+nPYE8EPev)f!i1$8Tx?83qXBO|+$12kuWaYx81y`&f9 zH1ic#{#JOV-!xl;emoP7akf1NA#sX5IJ5lhPO*YbwBV6)9Gyx7Do+Q92O~f^(-{)A z>ZL+O-AQH?B0P{b36)u4y~TpO&PAaJ*mHVn*Xzw#Ar|)d8!ZF)Qm%kUhThgt#QAiG z-A6TS70)lD(MSwtA|sR?vpXIn2# zlT511O*pMLnF;vp6ejP=1gh!=BW~ zm^X3wdacJW=lAPRsM7Yr12Eb+0e|_KPIp(aGZoij!c7?Og+Y0?g0=UcO=5y^53j>m za~yo@aA}}0TC7gr*&?S;lCev6h%|GJH`tJkfH@uIZc}X>qe(0)OME*#^A!n9*6EYkm>OnjyFk zSJ%c1s+38zn-8>T<24-Y+|&z#^e)19X(m{CJMEG=24?hiP^3Twr80X$SMfEwQ$C{U z+Higkf$U`GTxc&IW>0%X6jPO|*m&x#4~6pb0|8|@I7daHAJvA|<}a8?nc)$PBPWjq zqc+T(umSv_M>mbK!VBWPX!x;2Y2-h1gil7iHcn^>Q?j3#ReuBbs*hQXzv*KvheuJ+ z94_4l`+lP@r6wJ?j=RErW0;UaYR>uyhJ~At`o|TjGL7_u#w9el!_8vspjqg%VWF0H z+8Cg_Ot*Z%e5+P5-bmHOeP%XeC$n}o6|*bSI^(-mUTP=Rffd$FIw!2w3kfBqeMC-e z$(>fgX()nkSXnb(8;J@mvy{O+sPBO%3Pd+aut}%IndVXTwy{b6PkKsxJwy0q z?jTk_N!F4Xwo9Z?-e{;LfiWyM=ipJA*&KxKr;qNU^V?Ur6I)5to4GPQ;ae2L8&)_(E3Fw+NHvDfEXCSyMgClQ^HB)P}Ru)D=>3 zw#)*jYNR`edQ;-B5GGF3D;e{_jjiHmy`VN!47ZGAJgw}(qokGcG$8^ngOqS!74e4d zGD76hjA4IfP7&5aD319Sst;ShR{si-RLmEM<&55Z=QY*5+iEfKiLxFvKy#zHl?GG+kJ1%n+{_)3kKLEor5>UEfOu56utk_&5#fDYc$=#&7XIj?geg)NRDyw%;0emz!`d!9pXQkJh}Q42v)ngEs)Ok}l9mc>JEJ3g zV7{{NPnEM7oAp#^c;dux{kCyeIxWN*dxc+8395^EITh2f=au42+(4(qPn^6mb={W2 zSN(^ni$AGieC8dURIx>K9(`33L4+cyGKHeS87wU_7aN<{UD`>V#US?0RiHq#gxB=R zZG|s=P9kdyynw7!lco{zZX|Mg@sDfbVr4hwl&T9CE{NUlzPXUyXFuvdLA(WH?;s46 zHTa_j`px_`;)Dos8C9ZvXcb0?Yrt;f=z0GgK3{5cEq=8J%w+wQagOWPj2WELABhIL z@am7r#;?GqjyGb>iee^M8ayeZtaw{kXo+QXesM(?13;-(`$`A%)R3Hx44 zPQQv&j8^iU%+!33P@NH&%VOCt=6igzHKOsyMJ8-X!%mY+Q z@({yiV5e(Cy=D(}+C^q@<_L8k_-I9q1fKN}s%+bgP}am>5VqpfEP~-$RR{GR0JShd7nUFe|5rMSLOjAje%oG@73((`9Ng5!7t*@r|n(K~!|N)eYOeLst8>huuw*)Nw?UxWD{#0u zNUk@I>pVm)U>e^`%XbUH0eEY?fva_k97}?qn3;^LE@wbn^FK!E1@nD7h~hdTsAkNC zl$@Wp;Vs`FI|(E5tR$qShBFB~q8~M!R(u@>>gHmEPBOpJFkctp10KZ;Oym@}!T0a- zB)0QzmAT3~Tth>m*JbqlY{uR726>$)guuz^0~hcPQGPmh%zk8^2G^UKm^q4TAj)A* zJ_J3yMI{LaHCMTV8spo4Tp~CB=Vdhp?&lkLZwK@Ebjd5W{#5)47^;WXXA{ zaIEBdw@^=_3Q7$nD|_J}UiAuGfxOJr=Ztqs#=Z}~RghOWYhE|Psl}9}zfMDbdnaxw zcZ?$DW*FD|j8G8yrd*r-U;Q*AdWadEqJzl{8o`TW|DYS&Jh&o@V0Iie?or!l&pT%$ z-n4@5EGHJlZN+TpQZuoOxEglLXt3KNpbUm!ax%~66xu=6Eswc@_k2sPtim||pf|y>s0eD- z_i!@)k^z??Q;PwE>dt>3)77W~*;#dvt2N+eqc}fPfgd^HUQFk&ZhTkP?qc3 zRQzt=s5QZzU-0iRoF`+zqZ?{fPwz^#Z4p%)a|xzxSiJKi*);-dX-%T5fZ%n9*`fSzvQG+XnRuUKlbl zG%S2%cx3pUuo+?H!p?;D3+)wpBxG3dOlOGwkuulPNlp|Wz-^d>PuCl@n*XHlHHu)( zm(@3$*WKqi>IuUAcAoFMe~20db~{4((qWr;yKF_0LOY8eNqes^?JizV}&Bnlwjm`_T8sm*DH+?}nGmTs0Ti z=qcu$P3TX@yBw$`##nMGPn8N*hqZz=!rImP-kOE^vfnz@YOz|Cd6wVu38}vrEPSV9 zZ7%%`y-l0bl_;-26VE3r{k^)wNeaBSvuAB zR7X>#P4zj&pr{6s=_4kD6%L&hT*7(7=CfRsRQMQO=|r^8JKdAgz1>yDRVe9e;@YI= zNp)Rb*Q(?m?)skA-gN%EYAMhK8&%VW(tSBb8EkE7n`p1-nCQ6VxbLXq(Coz=PweIG zBW#VVrIj1#@*DxdgqhW7Qj) zOaE%5=VZ1?ALInfWaXVw-+JDfh_>Q9;~itIX?>uiRT_as?9hce3YMCr#Vsu?+f*Y z=ZzGiGNj0wazx52DUL*iMa~Rg6t*<0Vm07W#PFTqoZ5%MRl zOSt#<+~1}N&l2_~?oOKF8j^g)-PPO9e^8rid@(zUUa78Sv~t^e*fzj^$PpdXHE31P zgP=J)6S^q~LDtfB0!}hfawsqFV)_8n2I#@Q~=ei4ZYa$GX zk0@>2)J|hriJyFaUpM?41HO0re7Aiq824UORTXMRwW&Syp>EuSn5QrudrHeeun5kf z%*}5pP51Q!xS?N^;-sA@a)XFZ4^gGhB%A=h>q`t@PLIS>ol)YjZ5)4;#&Cp@K0f5Lw^NHj!4^B#gHpYlg+!@ReuvS z#x~Cs*UE(6fBMGT{*?I}k#Hj+I&oZLde;v3Cf`792rT~$QYlMS>pR;J`$+pa`vr&J z86A8ncyRCmG<|^{ZOelGIHR09?N_WnEGMNr!f}0+I^XBUk#$eM*kXh7G9@S%?5A~U5xxUrLZl(J(J_F7blTS^d`LHlj$?i!Bft&z~lAQW_P(x2hwc5`@Z)0X>?cPIRoG5htW+P zBR-a?Kz^a3`c>|1nQa+hnT(r8O?v7+lLzBHc?kZ}N;r@YIiFsD>kb8rd92-4_t4p- zl<$eRzV|uvDzE1+-HWc##WBC{fv*>&W6_VKpN){U0xvMzRmJ1rVJwbklfJR(!zMmW^FkCD!R*L8VcbIzD+teUU% zkX)f9YlWzhc=VA(4b9*Tbr(8=m0qOtd`3&WWrGr8jkNx!=me+L zw$^e(sw9p!$Ld?ut^V)+1e_c4dOIY`Nv{*;BveaS^SAV$7r$Hle*e4GpIHe{lI|tP zde5o_%!QIuS#P~%%kNn0yc4t~_*n3>pbyS4XEO&4a_ymXv-xPBX>VtbwvV#)w>p#s zaUl*6@yhB|EyCym`|S;Uyed+C zdAMaP9dj;NhuJFGQ#)kmGUrL>G-pcZO-CokQpQKLuCTO`3rM*|a!kFc=I8Vu@AJ`z zc(G@``%khvIfuIh{UkcMXShS?jkLjA!S~ho+^?$n7@zT=5trcvM{{bulJ}!ludB4j z881*FU4_rqE%@H^VeD?E+jS9tbKf=Z3hy9qb8ims5zj79oM#e#iNAR5(>P|;b`4E> zlvpXTOk!+er=*>(-tH6LqUvu@l)g%P$Ial_uvHN;QK6}BrumWXWAw@BC+W_o&6oCh zn$M}DQw@vi5&kUXb5M|Tt8JrYsi=ay_12F1dwbKnLtGydyC-H%Y?k2qb0pb={v`zmrNPO-{^~m?QCWqMo!N zxuBq6TV`*X(;=iDF-kKUcZMM7diN`!O| zE*7-fk3HIx#B_-wpK*yzlYB{el25y3U!<1GOeO8JG_cihJ_)%Q(J$qn z)WgzFOIIV>mOg*_)zKBx#iSXTYEgY+iS3^9Fe<8E2vo%*11Xb35xzx z?%?F%NkZa*zp;OM$D6-H;#b5E_*3d{SVG#wO-VhI>v>A}GN_-my&&m^I8@$id85Qz z72JxqIsA^F4xgifbCh$cv$NCXnC@ueh__R(x4pM!w2r~QRIvohpq*&j>KbMB*Es6w zM3xr%sO4}UN*)Ecm}KlOBm*aNl2TWYNc|*L}sj=iIH6)41{{bxKT;ST%8XVuqyc zNt0YBlEXYjy(4{F{jb$1s)V|6!R!f2w2s=qA{=)=!YRxqo02Y_pr4$H0<$z@@;`V* z!G=}OtEE#z{8fDQy~RDV-FK1;;2q&|HKZ@+K~GtqMV+h_GSY&fw^9^)V`skLvmsf- zj)l7;9!Ew;Rf@_KwL3CXq)yBg6uLCHM3CUTVef78!j|if$9`SyCP-Y}0ZIdk$iJdrWV#Cn$eP$$8Oy+vJH-0=n)iU^TO0 zhE^oM9*3GJ8@*MVXrbC;^%UO1<>;ulnf_M~e0?x+-s~Oi%|vY52?xcy_y8>T)%6!3 z4=at%(+j$F1dpF`xWayu6xqtVIOO+qvnnQ?fZ>u39mGp;k#@YF70;&ttK}2u%}0GV z4!lp{(v{Lox*AL9@DFPhsNSzN+JSxC67GoSr0H@EOOj=|QUG7b0Wg5(mwUpXT63|APVe3XG)%jJzFiF!t&1sT?c?e~18`*+JE_3V1_# zDjF7e);WZXTzgsg0Q-fHpo8_{KRps7c%RY#?s*zt1xfGW4Q#^OwE^F9?E)s}Vx1qtQ#_OL0 zG0Q0Cqx(q^Paqye!XBkl{djh2zwb`Dlb6%g*MaEN@w-y5jC)P=b>0#2A!swUN!IxsR7 z=w4WZQOO|%9NFigt?Ug3P*D6v&2S1BWh}L^fU02_9Gy8}(j#%-F3%l~B}<(S^4tgH zb_h9ZH*^K%{&DytQ5|1lq*lYk9}Ppg7B%+@FpCPoU&%^U|11bkJ8+%+uryNR2~Z8^ zraho-m%)2-ftz|?@i3?wQnS6wdp|a8 z@XvFA+9ez5;XbVb|F{b-d6lbK4qqUk@~(*IgJ`~lTd|Z89R}_<7ZmUy?2tRuTXZA9 z(8|I+2BR-fjT9h}jlf)IgM5Af`Od|x>Hx?42=nbeD;!u3WZTaiJOyKY1~^PNuB;&U z8w{H}FY|FJSNt3d`Zk^ipTJ*}n0HgD9t;3&8^(WoQ)64f-z&_`=I|@yz@GQwbudF5 zh!0*Pw0&vC)M)rz{OwO(=_Y!Y3;e}QK4KWJ{@@7#O_;)ny#Eio0;D zdJNAfnrD*@Mpa8@N>4Dd*36EHV9o16_Mf7`ElPEL6;FCTYkW4VCKgq~Kv3=O%)Vye z>5W-64OmCT*}EF^{)7294bLis`JNfX{Rhm63*7NlcsYAu8_i%I#lpVp4$7RHtE#~J zb^~qQ$4YyH^1{hDj$noF6}G}LInEu#GRCnerq{BgY+zUF!3kK89V&`3`om~n2Aw~_ z)o$ZyZ2PBZ*-YhR3s3qYXTT#~)y>$3GLC_@KZJi9vp?3MUQ&+F-n`=Ye|u#P7+*iR ziiaS<5113Hxz>5ikNpzXU?HlWr~1sExpcrt;fvJd?E2=$A{T&DnA(vM$SF*qav zwMJl^LwJrNU*~2w?#T5I=8DHL9$QgUoMA1zBD)H#tOwxa%ecdyj812MuOd1liJHV) zX38a=P#jvQz+P|_=H7ZLPcwO5*SM=QJi`n8RWziI6w6iFglx=fJ9qA5r6e%|5B`mupBTW!6)mHVw25n7NWY61=1YB6 zbP(u#Dmb4(>`{@-rN*rD;q21A`54ByE#zZA_Y+0UuP3K!J@%W!>=~!H#_i1L!OWsM zoNYFqb4Kd-FL|Hc#8iPjVG?}M5imu2QuFA-Ro3CP7gEzk3C%kdCGM&NBd8r~xHCVQ z!(2Pcb>?Cn^k!`=VXV(GDi2wM9u#FK*bf_X71j92&76HtY;&1Ay!$WCSxG%;D(}^s zmDiCwtH$}#kozmmnytZ|tYi<{`ETdgM=j(X=SUFqA|ubZ3gerXJ+=+2^CXdBJo+^Q zp4&%wxu5vIP%$gof)G0DT!TwFl{wyswN-$-l_+pmH>p zySdI49${WyW=^=7-{05^?d;~koQvV?2>~A4AVzu~9P6^|nZ>x4KxYms)x{^=*HPxw zHukI!)M6tU-8%gC92lVciC?#Ky;J$SwyeSqj6xSaIx$AQIg6Y9o4XyU@)hS*8~t19 z&3SswnfXQ7e~WSU^!%Z5@ z&pt4h#<3T6rveqtSodNs{Nj}(Ilt1fv)tspJFq7QQ=yKBI~U6P_hxnn*1>jmxE+ky zY{u>cAFp6;W@TqS%&7aQD?cQbUdGdI#u(+{zKb$X(lJ|J@smjL7+2dCmyK&E)`H1d z{zsQT!ze>sVBnD#!zk8fr`gYYl_#3`!_UgW1G|eB>i-;_19)6(7ljXI&LFIf6e&{v zwcWH$-QF6hZ8L4^TO)O9B;~Db+qSK6=J0=~Po5&4^I>D{z1IGhewC|Owg0d>Q+TGb z-0-Z_Qk(O;M)Hw4>~C9Yw(0P|`+0T4@GAMO-29yJR3zhBfdCZFz0~Kna~{uNG1lQL zLpV{W~xn-?1BSIU!rPdzY}MZgx5pAC-aAkd?1(z$#7W zG*97-I;b7*8ZX{fyVK4~vwINbWcNgOS*zA6VM|(Gwm?WICPP z<03q{t8}3RuoFf3+J&59g+0oEb-XqgKy_mZDei#2an|q`>~c&zL@n* zrsDsUCl26QYhW>d@q731^j(O%Ch|RFcy;SJS4()B!hF6lG43&PjX2JdFMcYG)oV+1 zy@{#jA~odZALiL(@X{SQWw+?q&(9s{!ACA}7BbTpGXeV?N_3H(yZ!}MVm<7Eg(d6D z32KeKe~FK~!Y=+n%c(85GJ>_ZjuqI<*`12FKh0};$jSPN1+GtpJw42YXkJ}g>~uKY zyQQe!SHeI?8!Y;%g)D6|FhJ1=#6qtoBIk#ASTLGWr1giQ*%v zXfG0PqZ#GTq}>z{jI2zQ%E=U-f>gF^qMQ|g>g7PXG|{0Uk7+FSfZF}OSJVl*0G67u{Cl0=n}x8*bD27v zLdk4F&s;QrD$oUxfTrI|7-VPRt9*gCa*Nz`-d~PDD{9}v;15KR{SSi0vH@1&NV?EJ zp!BEecIHIAK_U5xzDi%fpV#^e=F|l1d%!?$YPa<$bg2fIv(XH>hzd|)rV*`|O4)KN z>l}TZl54Q*foll-FvRa{=5h^mKJmfT!eGkmR^Di%tH3UZ`?`FI)lvh zKjR{dQI*X8E*UI<4N_b_Lfs~pozH8Tu%@AI5>-AB~%9OX`L5c zr`@4`|M*?>tLs0&zYe(YcfUBl?0)^+JzPVa!OjTykY^oHN>2M_`5+vUO5$+tMSE^h zV|q13qp_YJ)xrGw4{erKPRpz{(>_p5U&GGTqlcj(ihzg61$z>aZ6Yr#Pe)U#_?#KN zkK~(F4EHbxaSJ?%ip<0)z+A+-@)wv5fvCGoN0BXo*mp3t^%J*rBKuU2Sl>ylcNW=Z zE$YZ7y3^<3WO%8n__3x=W{c&9uh$j!#w5O`7#xPyWI~7O27N`Ia*Y^r3AuJ{Ix2Eg z?K%XjYA3w7Fqlgw?8Z2FaB20=tmzi57qh$~m@?HLEzZ}T;vSdhBQ>{Zsx1do@1}+` z_rO$BS2&~ z!?uqjZXb;8EQX(dg^w9cTwu}dmyU=!Eq=hjM)ra4G6c_Xl}y)xQo&FSk%&ML3t+%AK?7R3#%3CHaRC$~6?+!e@v$DsWw*YFww3uQy4?k=IRY~p*BHpi<0XkvjCR&9QrCtK_9m3tQG0@S|(aTZO@lq+Ow6({fo_UbDX-(xs81MelI54GM zWKb8#bK>zaQ|O|b!s?bn31=X>9V=Mpd)&wwu=5IVMnb5ECm4mzaF{)fV8Bcx(wk48 zcWv(RSu>1Ce%IfeXeW7(jhuc3u~Q}1IU2spc2K2Huy5C5YkfFHJf3SQanCdIe1ray z6#6gE;N8X(HPj|@?M%$S3f7lSk3@W^biXiH)T)_^&padTgkcUCW_;Q4ceRsTy;c!qt)Ld~nHdI&1EG&oK>Rv-gs> zl$xOW>FK#O7r|FwTJY!Tu>Hq}5>)FV2CVuxIr0G_g=k{ATyPkEW4EhtAHwCmzp9@w-F&hAcq1nQ%)@Hv0!qLG8yAIbQqO|Xdf zz#~3K-$;Iazg9sLwX+Ij`WtECbqP?)48U$SwvzW1D$ou3PeO$6O|Prmkx=iN)}8O>84!4^Cv+WpPiHNk3z5qY4k$()DY;EdJbhTR4$ z3Pn%%HeGBv$an9fhT+8m>>_eGLgxMs1SORC>IG}RiyZGI?76~NiX2pk&Jnk1#5l7w`6+iP8k#E z2gw5d{}cxEL;V^Y-{By)#q}UCc0+r|{E6Y3pv_Ucs6|wr`9;^g*SsgGt6x`BwcJ!E zs>9=4V-{r|!5y)`yqx)Q@2Cjwwx?6RDFvPB=y+=8igUhnba3Q$6nFgVILpMj->6t` zW3I>q6sAwh$K@9C68Ztd(eND%TiA}e^m6eBR`EXTk_{^xg)a|avQ|~z`jL+sAPW0A zy-C*&;Y8$lh;Cw2U-+})Z|pdLTy-n^vD2#@6n z3X8}2`vvU931)GkuLmIF@8Lx~f>}BR?0W?!X^T4~Fs+b0vL6{h9L@Wea0sY&K@` zo<})hoI`Q`bVNJ8Dm|DOHk~SP8>~eMM>}PTeV47Jt+{P9ddAu5H~Nao*$Ei&=g=3f z1MY4Ro9<(k+YwRM<9<4+;x!}VE6Qp&A&zek;E{Win7l;ZHurY;Ntl(SxrPQeG*l)*6FJ22TY4BN4WxgSJvCx+q>B7*z4Py+DkGeqPH!b&BN^Z8=UqT^8e&h)Hhzz2a9qw=jcFkh3MKcQ4)nv=Km!M^;D?BKQe@GlPi znm*M$AQGj}8p;A*UYh^qXRWhfYyGeVE@F#Fa-A#S8h+U3!u-j{-^DoZHnQ9l@XOo0 z-Nf?5fNl&U3TcbAo(pQR1Vp6^IPYL`lDS|n4Tw~J5ZPU$lC%RvurqN-aUwg^_+hjj zFfMT77ol~K-*`rCagyGgj>9^{A@k@jo5x#!{$HC(hL!nD46*(-UDmhK2h`3i!p}`H z*Mp{1=7w$;uS;v?lkz3m&NS+oXdo0pm5UFT%+Sa&VsNbBdaC zs{X*cO)zrs(b{+?k#1lgGdYGFYYW*|EW1(`ZHociiR-Mv9aiB9)?7EUkj>7=Ld*gK zIgPE}L`Ldk=Dx+6xyfJCll7MdO&ZRf*h~hTf{pvY-S|di86aE+_o~6?`jgL=2L18y zy)QwaQpneRwtqbv7ulicfbouaDtOLMax-HlbrQ1XXgpI#cHyHXl`OR zUey97sC;2ZGSJD8oz8cW%(n@(%*9k%=Kn=WOzcQEYMcwn+k0@z>k~a60XMn=fgGVy^Ez5lMm&C4%B*j64gYarW%79dpR`NVnLD%p;j8lZpMIUr-55q zkgCdi7(+*}`91mkT(E=V)PZYozj9c!VQ3q~v4x4IRvAt*^AltbrHywew9haj$kFqf z%Z#GtQKCMNe$AL?&LG?D%Xz8*){}rDQXOL2!ub5A)=9GhIMGeK_gWa02ZTd-y%dxv zKY0d_!wPRxKQRWAC zCSQMvHcdmUeir&Q)}kHM2Bt(f)qqSOR|R3DPQl*#;cwFlOIgQv)&Me-QPwx~94xA! z16WguxO*QMelA#_E%B^xL7B#boNNYXi6>TRLbfe|rhS52(+qTJxX=V`#_=F~v#FPs z1)(cTgt8BA&SlWcIUrZ%;AOeUeHLNOcANXL_22OTA;MT|lG&0x>KU2OZfm^I6ML7Q zr&owK=sC6;a^{z#F*AV)1Ra@caFgzUFruELD7Uo2ik{#uT;euY1aB&7wPQDy5pA|L zC*rLOTg$n>DCYAN7mQ>0^JIKRTG(045H<%2rN!*9E~=soJR8-$a;VMCVMagDD@u(x3{~e+ z!UeGp+M2)2jp#62;8)*FCQS+Gr&5C)A~XckF9#Q?F`TmuMD!w0mPg3OD$X?H&>QSa zk5wVC-04_^Zrq{~>_<`J@EJZnGB{mNFpn}!aw|oR;3cS^K$hy|>@$;0xM)UM>BWuo z<@xcwrKK)nEpB*WAyTR*`Y^?gCARO4`5Rw*>P_^UK2(1H(1sFV5f|{{{A2zW77~pOwoZ~$JQgO1$G|+lSi?j&y-oM=9DVuiYe1oI zn||DvmZ0RHh{qa$#a=g;zz7^coc7DCDL%C>>Cr}v^;MW>Of@GG{T?+l@T#_PH?|w) zs2TJ|8*h+N-1O;)^qPJkvl^Qf(4ClTy}#(zGg(a~Q4Awr z@ZsDyc#=ol!CRm?zUdIhxdr*q#@s2~Gs_dLHlTjn!SbLqaog(5S&I`+!1exVEkN^i zs96v6`GPpmIA&&}Q~0?xm9FRr6tnY^p=5(!x|w_0KnOG%!lQ3vH6pitEOrvkpdv6D z{(lX8cOmN3#d+%WSi%7)R@cMNSN7G0=~%iZB#QOn=$Ay1c9+mwmj##H493nXd|6W| zK+I1zdjaO#Ff=cYiec7W(<{2<_u!b5#C&o)I)`OGo@iA!hQmA$6Z@LsMs2A&y#TGT z=5JA+ABc7CY;`w_fp~N>_rTN~0(Uzdm_L;bc$kBI&qS3Z6!!U8R2H7#NA0Ml&JnXQ z6WLE3L^hR(A1y)EEkf)d<+E-Y8<~EQftchFx?49<-kpW^hChlARiy85rb?oHS`t?O zJ}XG1T2ED?x+GzR{>77}8bhs#(t4{KvCJJQyAWzj6&}Dibox{=iH|Rs3ymPDDzU#$ z%V8tCR6^WN_V>t2gqPiip2gePzu{yo*HIYg0Ja~9#dPi9T+A+LxsPlEq6 z1;0)(lj*4_ik~WAwxg>v-a3KzE=#Yk_k2foBAC~Bfv?1u zX~iGrDuJo=tV<*|Y=*g?xodgIz48hPl7O9_3y*R&o~)JiR_rP)F%~gbp`(};@A-{L zs)O0V+9~>2&6*hn;%S+U*v%(mC31~mvqhB%eNe5s6%)ocQ$HP=Z zl_fhBu-+&upTG{zFpG#jg|CMEds2+UXCxc(LM!tb zEaE}JdgF_^K?)`V2@|@TN66hXp&v1i-Jgvu%xU>1-$a26ej^4SB=&?cjjEMZLu`W{ z$26kwH7K6%BPN+Ih6r=a0b(+-wl6zvDeQ-3?>iAAv5ck7jQp;%d^`p#)t9GSV|J4i zYoYl;Xh;9VzdCb7r2h$F<{RwCT&C{+HW!JRt-5eu55hCOhkbBzQr;7R+{SnO!cPtt zqD2?@MJjyRy4E6tGe^XC*1T?=L&ah%>Jrt>MW|dPljmJU0e2V-@R6vMjIl0Jy9)w^ zNY4oiW3I?evpvzPM9euFCi!Kvt92S%eb9iND{i;^(5v}JJ`AE-UK%5|MC<>yH9&q# zhjBw{v2Da8VxB6TY2qj0s`%RKWM&cv5Mz}`YbO~c@7qLh9jvY9O{NH3G2dew@(3z( zCc6+F{!83_l)L|tis}Zq^qtT~SVm2K20AZ6f`R9J$1QJ!ib`ecsTfQoca8a%C&)GX z)5}_ssAvH-#K+i~+1!593`gmwp_D=NO$7_U|9F|>;2^Sy=Jgb#hVzRUGb?H!C&1aJ zSqDr_WS)zemWcfapXo>)DZ#u6H&_b|bd=wRjk8sG-=L=@e^O^Z7(a zXYk$ysQK@J0lA&i*pcY{EFXK$Ii80)iAvOc6F>S=IOwZb@Y8}&PacZ}xhft&@xD0K zhIYbPV%Dyr#7u=c)B@{Ky)~>#tZHqTyJ2M1bCiPQNvG3Ikkv6vZ}F+U_n}W`Jr#o0uf>qgRvh zZz1;UMf5nF&rUPTqCI}eT5D+5C@~r|yDnDYI%w@(@|RE)YnTrUzS0*}rzO%xYpFSm z&(_9jd?cRzMKu10e5xpYmP@!LKaB3;Q?zp485Ni*kcYj0j5eo3oaOT5Vgfq;Ifq*Wj~@V-4-r zH11J*>IuU@IC9d@iy{fRaSs@GuPPoN%?k;wR)KovlUGcuqTu#{|~hq*x9 zDQpH=IcbG)+MKY6I&jV!@wv0~<~XtNTZo>YL^>0# z+hju`x!nTpqYs~t1-pNZ?I_B0m0LVddVEt>5P*V0Rq-va%!laG$4S5c4pjPcVp|;I z|Ii-4OT>5&zW00HilZ$=hZQm5CM(u#f*Qtsx=>n!LzLlb!r0r#W(;Zsh1sWy;1cc0 z-gXnacNga1Te4dv=quK#k1oRpR3=aBizWOYiscqHfQ_JG)yOIakRkl$Mm}fn`;w7< zFk{guct>wUK7Q^&Fq`bCgmoqcT0+Of8~pWFV!S1&3Y4Imst-OqoM&Fat!j_|iN`Cn zr%UM;6Fm~~b3M>%|4!F-4RIQlBNjh3opW%Orw2p#sWpI?e559knaJ%dwfzC)Kr6^t z@1m=em%2?7@$Mx2^+@L3WudB*TkK0_pTInzJN*2uXm)K!9is~wWEJ+UAz!tLpS2ln zjILOLIsAlGpm9!6{q|r1zL=;xc4!OM?KUyuc5Z7&{6|ZnEk7f|S2?0ipMz)11Rn4V z4`|VWH3j4#FEOM~*}@5e{Tw7~7^uxikk)GSk1>%ML^%)mXdaZ$Yja9NnaxxQ^kNwn z?G{$3C$ZoIR;4AqUY|ha+EYL2!2hday;l&8XXTV-Cs%(4NB0=s%bz$Yk+08;ZpH&F zokVBWDzf3U`2EuSc8!%%h+j3jV*Icp5#(8Kz^E>EGI!6_(b0N<0LbSdPv%@9X-+0c&E%3s7aw_`qL5f33Tk6^_vqin-e~e9asexnU_2#5safT7ON5+rD4R;d#HO7KY?6E zv5INYw4O{|qaEvdi6`nnjMExKU@Vo=;^;7i(Sfp(&#j;XdM&Y3C>FUqcFyFzlhmnnp=6y;NG7B);N= zofj9%g8jICCz_rs?K7xx9JZ%d>cFpG4%#)6DWL1@5A8XW>`EE?D0w0JYptcOM8y8m z5^)cwV=uVXXzDH7(P``k0=Gy%rybSWp*S#7E2CAU;(bzUq2JJ_pipoXF33#7pJ;eK zUM7tG!?a*C?Z|yEpjtM98vQk?q+CZ{EU%@4zm)$S0?RLgg4!)Q&`Z)opPhPOf1e5& zJvS!(L?@}Awm<>l34I`2sIW!Rqkjv`HIBb?G5a>g@S_Ibn%ZkkYQ_)HXkJFW=#ZI_ zSosE)|1+p^C*CTf#nF$`y@|8kft3w}`8}H+l0)b-#Rj+$JJPM zy?Q|1K~=82`oJ3ihbA1hPqHr5*q2}`j{eTpDGj75>vS}q$ z%Ugwt>kDsr?-NgP?<((2^`WNF$MRK;R!6I)wAy+bqdjrrT3<#@T;GkX?JoKTUU4*7 z*;lDCZ0d*dE&04WN#2Gg>sh&hZKutEhm;-WX<6qL@OD4Y&G_cZ81J>tYKv2L(V zGl;P*YXM)cjNFQt9c<>FwlA@eWaCO*CFGY2rV0 z<+>VWj7Qcq+Z|_rzczlEU6s(@-sbG=ZtPz(V0OT)09Rm-Kr0|yKqvo({&)TQ_%(GO zaz1n{cN}-Fc2#lpQjSRntWQQJ^nzSu45KWsQC=^o_VYA^EjuDLi|4T?$urP1E44=I zlhneVwx0K?#ZrHz1bQxb>r(AnNvF+fCSeD`BWY}WMjh|AbW@%uUjx;wf%2U$J(ts? z)}9&8ZUQxPMd@UpWDBz8v}Lj7vBjeAzQp!Uz9~%>3o!X%0xSFpU62WM0loA*a_%PsTVEUj|fl>b5{nGhuVg7AZcQ>c0 zlyc;8ws5vlR!FJld2s(eW+HxKxOq)m=lznpEahDCyyRgiBU2lCx_f4)c1)?8+#oq^ zO25?1odWyc!@+N54o3d{BB&=+5}@ymVC6rxT$pHLOtc z6BhU&_2jKoAHtc&Qqw3&Ww{NLLKiYKeLc4)h~Ak{?UFi29ZuKGd+#{!L2rL-+k0)a zF@RWb5{i#)jV1ac^Pqgrxzf*aKXBZ#^|akos<@N=Uj`HmDCu9z|CxW`fGYm$-P2ra zU0>YA{oHQ5GeQxSPRa%)!d^qFWG$uMu@9tRfUw+j=|$9Uo|&F1p5Li0nChH^)yeCX zJzrD0rc{Ti8%KZ19Pc7;c{NIF&3WF5Lb=K`=>Fyiw6mH}S&x9T*Pgd_%$u;=X4!Wr z9#o6dD&tVO9r|}#Ya8W%$_M*6dp_9KmRv#}ilysBcS$F4Gx7dyy5g>**U^+o#Z9!f z+Avsn_n3Vi;{EEW;EnVS@fK0Psv&g4bwfjDmv_H9Sx+>Av9jIizH5us+s{nErcz_S20v&gQgVwe_)eq+Yj*8mnlVBj=VcNsI7m&%}798`Pt(B%Dm`A9FcX zp^RwYHlmX91_k+E@bg#DE7+1g#m`zvl*%jUI^BvFv}7%i|K~;_rXGLJbKAz#8FE_H z;0Vq|?c%X_g*VO{!VYHFI_NXa>%ta(OS0LST6<+NRr$xSn134gY-Omen!U4giC@!z zUI9D&CI9yRJNy&;qTLN))Ms?p_bcx=&6V4++rGozUFoe%vrQ+Xdjd|8#H0r1(HecU zHz+!Y-VdJFp4r}|YH6*X`qC4edM3F}N`6n6T1cx+$Iw!Bg8Hv|TvgTO+8F)5(Skf+ zHPK}($p0O&kMu_BLj8Dzt*-r+J;^@Ho@nc5Yiqk;YiR#&&w{$oMEe2yT0T<$sR^Fk z3#$|%?jpu2W)?KI>XY>lxU9plHRsh%Y5`_kXYwxatnmcG(QS_xI0l2VB^o1TnSlL5 zTdhYLQAArEiTpmmN1$^}$}JZ_Ct;HP6cxX#_HFi5dwb=W@?5#A+)&P;IFm!!Z4bBK zw9T?Lu%*bY;hw7089GvL3nA-Y$L-2a&&e11O{(kvaB~jv+m5Mc)hp^fbsLH?q3Ds^ z_GaR}1=>UA6O_~vRZ(lnJmu@!U3N1LChiSpyie5%YbM>40a}83f%~^k%>`d}BwSxZ z&q^M$j?8{4S$Z2QR;uV&?S{VZs-qmVEwQh6y8M3zqz!BuaKrzB|3&|oejD6r+{@j% zUueK?|6}fO=V4_8)@rko*Zy6qLqF9h^sgI;SFOW_uBmhg6jAqiyL+d2EpKo2C%OY; zQ!l2-sX^!o`JuznOl_i$SC4YKa%c;+g8ES7E~w5=I(TM~VP&Grx+;9QP-&^;Cy$qR z6Sq}hKDQ>7mJiChT+>#YnHGoW!aO29W+uUZ^y)VNRU9iW7rI*YQIKv%?^6mhiqA52 zV2m#4Ew!b@Q*G6|YOMB=PMscF4eW9*eLJ(k+tXQ@Y*eBqy_^Z2lj(@92>x1Lx+^`B zQ*Gnzd+ooOfRanm@SYi!`AQAP6h}wLapkK0vaK3gC3Tp{yhBcbpPP+I#>J?S?nT*d zI&q9*PBq@@BdO(Q(06Dz)k5lgZ)tBXZ$FeS4tmFX1H4C3;h68a?n&{CLknY@`h)r3 zkJPleM-L+VYeGKW6tzThUOh%|Lj{|aPQ=qiC_H=>&1Q#0RS6eGee2$~vA7u*qJ2TEuE znm^6nR0NXXD|pFWbJMxBn<=_;i0J5VhDF4u=!zEKXaZL)Q>=c4xBNU5m=Df{e^ z_9|F|r-VgcxTlf!^R}vgmFTY!sh$?MQft}|a*~yL=4I@;h#hT957<+BG#cx-wHDZ- z?P>?Lq*_~zFf|(OWZ~MhPoFxk17Az9g418awIsS9PgB}N;ExWon=X?fTu6O+j6jg zs^mHospsC&FKcDBbf~r1nKWNie?mub6dvmPqB7Gv z#xZRqk_uXN`qAgmw=#)tgGFG_TZrWa^qx0UHx7V7(3E`i2a0y%sDWOj*I+cLo{Ma^ z0a(FvSXKs|P{Cy5eq_YOsC6e%JZ}YJ6iV0RGce8nQQ2Az)9(+}h#PRsE>hzi4W2Yf zXi2qDkot)Ch%N_$8FZrO;x+7oRoL=hbXCsd0`XEHFT=12REqHa(mdm$OiR*{@PEy%=b zJ~kD^)`vzkp}%9FFa_V0UyhXTNZ;u{A0uv%a@dC3>nTH+i{U2Ld4NCD?bQ^ALv!qO zu5(^-oTHPZfYMeOsAN%|*kkPz=%(_w*R{2mk4e4gol50(?58uvH_2!a`REO^61S!Y zp2gs#P0}OvA^5vGc>LztJ9?R~s-IP_`bte9@2g7GeO9fZenfAkGTtMO-merjfRBct z@YF@StX0z2Gd<+3{)e|>bnP|aT-TuU?-CeB7NW-sMEzr69^~bQSD_blDR+D%k;)Az zRdO)#H&NQlN22Ms=nbR2j+6sGcpdI{NqGK0={*iaYoZxw=1{uXvXNihrMqq?otb0l z@tQ=1r4qAM9?>r6p+S8jzk> zphz`{k}7iwUxTw(r{>fSwCp7o>ot37P&?QT8k398rvUOwZXU?MH`vNCbngFV4ncw; zV3o>){ynC9u>^5&Mt*B3{gq4TrQArj_IMclC%{^!V$HsR=)9$-R1hZdTfq-6(F`?+ zA?!{cu`L}wqs95W*A?#fA|}ee6CLO))T7^UFMXb?!6_=UO3%n1=E<{}PjgLL%lxxF za#`CJ+ji7H^4ng^FXa3Dc`bjGfAjZkd8QmB$I`u0LMqR6)FM(Gob@$es8hhY($Le@ zmmHuS9Pw}T!Grp9n>)}M*bkfF`?Hh|qpdu{5<0j1)ZFM zQgd2BcV1=?s4J|nZ)eBR$v1|M#hM@{cj&vl3jZX@C}j@f^~|C_xg_&C-qEA=59{nT z%hOX-leeNgOCwIoRjUraC!XKdo0?TOn5h;Ttog*Myq<8_Lw53-kMyXRd|V*nI?j%C zrZ?F)?eHtu*;&x$II!bbu(xt(l)j~ZZ4*0ro$e*Ux(C|O90V-Eu+yC}lkUpPoXPF5 zV79VWYsr5#vn{vn3E19#tYJJSEfEB+6U1_1a(~pTZU|U^g=`y{U? zHG(bRI;DuO+LNv3k^}#$o`Ai z6g_0w!7o43ADl`BDH|5}DeE#4J2Zxyb$~Mz!xMCd>!#ugjE#LaXr@cSBH-X=jpI*+R zoUvM*hSvP+2bVP;_)~jSJ}S}Y7J`M)>CGL<8Ci&RIB9;QvvCbfo-5`7&VvX3-dk)? zB7G>&d8U`_#7}f61oom3aiUIV?`pnM;-{8mJ?iosUUTYnvnoDoB^dlDsyglHk~{&r zpNwrf${LElcK`oiB~eg1$e$FW|cd8U!R zehrY~64Wp2ac>eh+dHVw#L}O6p3WQ*OPiL@*Pz;Qm{kepq&DH^PJ;P3fE$02Gy0JF zVF0(O6!!g!m`H!L%#;@uPM@7~@m-XpqEZ{waDSojF;9#F3A#Xs%K;+vx?%;o=R#1n zI87bl2FfoM`VFtCGtA(3bfaUZ6wg$NOh^EYzra%;;I#kX964e3pTg=aWT$--RgSO< zKHVFiT}tKiK4xuADklB;IlcLJI6J~D{!8@997wvSW zJ>@4~0WmK{bg>ls(Ti$Z2o;2Za1`d@*JpEry780ISEqiyfR*UTPx7&H|3?kRH|H@L zOshP#%rJaPYrd~7)rZ3DPZ1E5_jtq|JjVrUPG5P=7qEK&;b9W-lJq`M-T2IVXL&or ztGG)=Ckf*e%ujc5hCSF2FCR(dbf2aB^#>m6GC%PE>)`uykhf)={dMeaSMV9%sjP@C zoW%ZhB+e_%UixwO(@_sg4|mo;3&hSn%f|UD!Jjgmf)3Q(Z1R7>=cH&Hs7}{a6t{ zRv{OdUwZD=3(jK_clOm^rmb(zQ3B8B#+4ig#A22Cxt{8Nj$Mu7tV2h zYM1R;*+xeIH9NhRr&HL))9h6&KH&`axf6R8f%hB8 zZU6As>XpHQr$vo@AG;6=!scRq->@tD>Eh4DPaIC1e45vjgpVwY1#QMFtir2X&)vOC zZ)pSalSg1zzKA6epV*1#h@%SctGjjL?yuzU!JP4RR7T%&f^^QtOwL&kVL$uxncrQQ zbH0dET^NSPb?&%8)Y_Wq7bYFFHR%Sti5l1$x_*YD$g~rbr7--_RO0QY;tx=#cvxi3 zQ1bbPvd?flt(TtMSa$F<9xt4m<3%+;hY*d`O2Env<2Br8b>_1E5;iLWJLqSg~+fmha=$h)ri zl1jZ#YiKPIcvW!B8gL04iXW&fKBB8urdwws(cT5>$M@-JP9z>%%uO!HtNq5BX;{z2 z+>*^^M@}VuH{|gf&06@ZX>{B4GiOoDt;>A)Y}6=Ysh>sBv)7PpzXhLpfRgDRSfACo zZS(kP$GCq*sPPrS0`%n8F2Gm!<4pWyKUFgqyR(HAc?-+vp*exC_QwOH<7baFdvg*z zrpH)H|E`

Fa3v8;gf|k1&4$O1UvC@*hc6EThIPs z_R-hK`z~J+%;S)SSSJY+KgK76?&oskDB_N$!#P|fND4_QHJD#NAw7{u{w(Oum$?ow zIq*B|o8Y-9Uvx!yj(CGbK?4|HhNa>hdtGuml%=3TK!xil94MHMW%GwLZwrkBt8+8F1)|tPECCpsGX5&EAB(_y;rZxg_&Bl!zl`6( zj>0-_1!E$2k`Caf*-4*Z+d?iusk}k)Sf&)#!XSi9id_rHc=Dxd2vth2;-(n1QuGMGWN{YCC5gWjkxrk#^<`90o>8D?&}21Eb;} zY&I<55~&($82=3INO+4ritgf<;E`+>`l#!Jb0c+>^dOHg{kRtR9)4WXRzb<@lqLl# zo=AA%t@tzKR#%2> zf;<_*cBP*>7TDHWzuS7b3hCaU7as{Fpq&MeBs*oP(lSvIg0MebC!J=;e~!;UWS!yY z=L~X|QLnhZV88hudcck1{h?NXsZobc!jFPI@fWli?9=2tNtZi+TQZwAG|q2YYH7AV zAp5X|fE>_P_($?a9;@D{S}pq`>+lHJb7 zAsYolWZiXQw`$!5b&|{`T1ULbMu55f1#nWI=f}Wju-U?%q7H)Iz*oMAd(KC|F!B$) z1a?DrVh;rU#NPnnYniAc4kNF@=)Ol~xD3=zvXY!fy`-12W4QtBKIXpjsdcjDQ%g%r ztfiYBb*-R&Pz&ipjDgt*C{Bf>n{$ZOzp1!!Wplfhqs=8v9?ew^i|W-)H7(D8Tjsib zi(PK-4fe(>snOt_26~diW$kD6bDW|(L#gNqQH?ZKnk!3@_LKEjUR4cOA6J(st%_jf zAZ5Agl;(iyrP8683{<4)@^!Kk(g#w#)FL?{4N!Jfy8s2SquT_1nRbOHN?9Whkwu7y z39ez^(G7s!k}ZA*G*LHcxOkR04ve(5NHUiPx)LK<1}GjeG(j(PzfBy9>GGkVnF1cfJUDCePXPF!Gr7C|z&455JHE(P7v5&ixRC#;3+}#`z|r#~ov#{)Stw)}Rbg;Yzb~wya1tK{{1J ziW@{l_zNtZIDyCEGEuZ}g79BKiEyWIs32G1D>#5|=l3$(9G_c`TgohV8kJ3cO&fqZ z8ebPvcd~YBbzbGO($K=y`Kqj`X|qz&;&;ZKiSLu3O}n0TuV`xhQi~X*^JL0MWk>Z5 zU7AuSJ`cz$=jiUtSx2m;(TS1{dJr@ZdnkS-*&^+wI-|L+eyL5>P1QxJ_iL@1_lhlm z&e%<{M*feYyY#2{v6PnomM>6D(gdh`YNGVN4ZRGP0D}`~*SZ$XTJsWQ!OR%@g$&&I6t0LxSH#4?!AHNVLT}fz|sO90&BWYxD-^DEsB6=M6p$ zeQI7-YpOn1C@Lz;d`icaUMzf-tIHgoyf{8I=H8csQSo2@`l(I6n2nS*H@>H3!W_*- zk8&@u&k?V^ZmZRIWY^FM{AH%x;qQ3q@O232v5*0iiX_r3$vfEyIVanu9HQ*3FepyT zd&rgYKzUDvk7Af&yTV68NbZWqh^~vTh_{QaVvD$s)S&#NJgnKHJ*chMPS<`?|4_lI zL5e28g_wU-){Us|LjORk(*H! zWIG4xRTGWN&2RiE|F!=6z0wVG&0%3G$lh*sez1?U^>v&fw{sV;a3V%HTZ+m$$|QZOd9c#BNTyUL)7=%OR}tFw-#;E7n=#c1VM=coG56QTp- z`hj8aS4C#aBBV;T*Rb4ohX0wM>%nS&vya+1UiDJio;b{RW&64;&cB(h>^`IbKS2x> z50k!-JdvHQ1khJT!}v4h3=C2~ZQ<8Q0M;Ay z3LA+8;u+C|$6&+Jcq9rg1blD>V62Vcyx8@$FSXuz$3Dl>*dVESP&&EbS*9$dDcH=jz!( z%zD=jo1&?8O=MYZ{>+T0Nin}?e191=`Sa1QaX-5yyhyu|^Se~sw4C`P$X1Ti?K95s z(t0_({5_?{NPUg^sqBc@Bs?nkF76@?kmoDbDc^y)N$VD?->a=sToa$cr$NOO;yBj4 zs{VA1Z*_k4=h_X8=@xg_A?7Ni!RLwk%7RqI>R45YvZI1fM1c2KsyHa04w%`Y8dL|8 z7l6o}X*lS{X`iSz%4sn}XpqtDH`h%&ZkucUZFy~3VBKPibsQ!;GUcE?EQUl#7+?#V z&?2N7Sq9QsPk~Nb3~~G#Xe2LVuh4ET#%8q)t?yNJuC!H7dd9Ov-=7C!&;RNe8<)5< zDJ=C*eq!0&rhIA@+aJ%8jF4~CEpe0Rx*ERgV>L&#rAn1zJ|L|y17lmvDzs$qiU|z67_|Rku68Ej ze!&`I5n0~0m@T(0*R2!mSDc4QZ=jm>;12Q#JOS_l=ObH?t_Tlr0BL3kM*+6uzs@$c zbIrE;Wpm15=)*%F=(NZqL}1Q(q8RT~NQGX^G>DOTxLZiNsl9SLsZt zT`p2SP^?w;RjYK}-R`=rGTb)$n0k8M@g}@Gdz~;XaMu`Pb!PQ)86rH4-r%#S@y;}> zcgwM+-i=2Zq8d&&PH0|Wt+8i=jynw6AdZ0OD&d zxs)1Dw_?@-=SCLD&fNlMDFCV{Z6QO+RgV9xyFo%EtZsJow9b&5rlI%%Yn={Ac zBxjz^eOfZJJhL{nVNavKT~7KjH{pHoXLN#aw9qLY3f#>@lvd3#-B`E1hGgSu4}y~NAtdDJM-EmbX)nZyqKGrWZ@0p6o@TZ`p;%Z%ol=1Z1cmXp>xTc~}C zYbSk(UCAGYh9NWYj>6^Q8PW-WwR=l7KtpTfy1BZmy50JN`cpbLtxl7r3RnIs?<>Vc z4+$Oq4xyo6TtDU%RYWStjjkG}+PTB|+u7Ci(?z+$Ni$VT-J!P8pnF5P(Gir2N_2H` z#@Maa+b!zm;JQE6>nb-E&o7F}UzpP^e{kN%>^233`8x_dtCZD_>afPdruPuu)*{(=STZ9#}8Yqb)!vWby++exHH~4gBnedbRoAAeg;T0NL$DgZjE+LI@O6G0w~l4#?Q}Jz zrFM}7`PtRv5|CS5D_sdr%IV>Xciwh-IbPc1Z67QjTbxb*)YsKu)i=t5OP&?J%Uzt? zA#Zr@nB4Pu1$noM63R0xnd%dDwT*o(-yQDcW%3U@8Fm4ufUmGxNJ$%18EOX9twLj^ z>6>>?^BMCE-!$K5zp?&>ez5NY?@ykCO_vO3wOf^G(n!&4Vl{G&<0&uD6Zq*++8=-! zDZ|pmw$eV<(cUR=y&`)sC%AUd9oU3sV;+K&!g^tpNGjnap|Y1UPuVKj4*4IsQt?TC zU6wCxkp3qP1uOM5VFYm=KZMmG>){d5b56>QVN+;~UP=8T-6$pX!Sxupnm#!rU8`IV z9D5ue>~Yp`>zkH~jdSWfYj0GhmgN;U=TFc5l{-CGogI_2AqUUdS`=Qkr!=*4U+wbR zu$FTBYugC2E7yjfi|i%V5c@=rv=`}~W2 z@B7U19_3*)jMu(UMM`&x((qsWUqBBU@4}sj?F%fOEH^C2Ec0wb?8EJs9XDMosXNRH zz`lC{1%eF1M0_rR2^@msqDXNcu|{%O5+;2j!{jq%-DUe^0@*-WoV2&3MpPi&EHDur z@QI+C@djGSFXIlgFw+6hc-BzssJmo;@bq(B-O0x;%yr0_;E*{S_T$#Qmd(w}8-nVu zRL4|IFJlTj{u|s|{;<)pFLJ;%Ff^as)gAeS?1& zcuW2$CaC;03v^xGuDg4C-Syt@Bl7*?`^^7Cz~I2c0p)&5ziDRJTj>#Dn5EsUx*o04pjj&tn$u3{uP@e~8q?LR;`~(R{kAV7$#Rm#5 z2)+vr3crh%il<5XNIWHLB$UJmD%z5^ne_RDjHc8$gd&%|$T}=wCDq z-lI*_9^h0L~IM0>K(|OK%{zVP6OXF2pm;N4gjNi8?~{qYx^P>P4-m6jUx5O|~cd z0B6Q;N2L9S^y* zTZFbXE*y-_18@PD@j42EK<9a>Vu>nRU9KxP{A+6QTH~|OoZ-7LV16JMR2OtOz!-4J zZ-V)_SCo54{T6kZysh{L{v962#nHLsMyJL0%R10H*t*PCX>-~h+Y=l!U8g7&=&AGr zd5zgX&+vy8fYz%tPH!8=Pb2xuv@%{4imA=(?>V7q~wzhR3v0W4n+}j5Qjlhv{5P1bYbA)RFl-V!TNa`uI zliE(jgH#Jb@pak)qaSn6|>(JHf6X53?X z>{Vdy<2%OxRM1A?@V*{nv=^Jhc4T)0cfwbo@Her$SP2&a6gFRQLfJq-t!=?h6LEqxpnd!; znJv95-6d5?TS>%XzykyBk4@M(AmzoCV{E?zwQt4EwgCmec3Rd(NBoV#{5SOm@{IS8Xf%Ts*0;Q_k_6c6ong?Z`oLr(|^~8ddhGyhCkR zouR>K`{4ZHI?1S5F~0=)iUeaXu*rfNaW82q=+JFa2f3{@m3VG9m--&{n-{b{_(t&L zkjfxM0O4QflkE{`*rG|3&lImAM6iy%M~!e5I$ZWHwy)Nowh;RZ$3j=MYX;ei%Ax|9 zROT%>BchZRG5dh0NydaTgPA^TAlIG01bu{cSPtGvkT2XUu9qy7^#(ino`7Y4M0!CI zAx1^>1f#%8U;rxNmH(ZJH3&F-oy-9?i46mqSQj1xUd8_aspuE$2Z+1Hu3L_+whfk< zO)Aco)JolJ~_zm!H8@M2FZ1A>_-L2{Z#`;BiX*@O> z+o=QPpG31j5Bn6zTt9X^x4*UBu|2TPwobK)9Iu_9TmsJGFzk)O9765PLXy9?p;nzVod2oUi9}8$Ilj#{$2arZuZ>?^=(HL8|q9(aAvTSQf zN@4%}@|@_byv+0I+fupYX{i@89%UcO$<7WgRF#L-EwpuU#*#^#KipbyM0#7=T`^si zta_{t(EhDE<)(07>~Y0Q=iS@)i{HS2i^0=Fih`#F>jPPTm;X`khwi6!jLcJn0vhTk z%I?6d@y+cThu6o}e{85}bE>KI($!{nhsuzO&BkiU!CK>j)k@^Z9q1&yAQPQ&Il5$}@5f605k8l>~w7zC( zrY93m_XhLhQ;G$v;!)Q|SGDsrSP|l_+bz>t+Bf?)rPOEE&a3%c>0R-n%(o=0Xn$VE zTt#;K^fPIB$%~TiB!?#TNsBE|fYid93irBD$5Lddw1aeoB3^S_JIO1~e8B6w_a2{r zyn1>Ud$sqfGn@UM`Th0_^L^^8^LyZz>bKHor0I~>E>DuQ7fgj<*E7Ig?_7~z^0qLu zATz%pKcjGI@u0HlHA|ZEteMU&)GlT=6pF_SO2sxwxn!*@Ms`U#TAQQ0>sDtNZXD?$ z^!npv^WnX^drkD5V)WJ*sgEe8N;#q$oS~#Adppw|w`~tB4_e@sb|BFeV@q{JIL|v5 zxZb;J$vAo(TLpXyC}e|=fYWux5Jv=Af*C{wNJ9;RSMnX`wYC9`HB~{Sqw-g$KT6sj z$NqTy?fj?u_kBLz{J8U5M&jkH?Kw|M;;XW)k;3(wq3%!ITN&lvQU2qCI|ZNdCjuS^ zxcS@sd-#nF=ob{M^|VwhBr4Ky4?tS#Imyh8l9+RbG0qlazZn+-p7Jy#Mp{_DlA8>Y3sm zas}O=tlHVr2X^{23WsLQ(9GUrT<5 z$K3wGehd0u_kG3pn(sS)bc@~)yFX!N?xNZ!^#P7T#3s39j%w%I{#1us9Xqto>Jr!` zx{W?K%&(t!7gLtLwd#y)mSVNyj%<-AAN>jTD|vL0^GH+23aCJtotT-K5s)rPwWs{Z zY+v-XnzCjx&Deg$4Yz!+nE?}nSA}d0{X1lOz*nDGkGJkS+`ky&bZs@KRne-`>gQ@s z(O2AvzG8h`=PaiBT^0J`J^3}cM{~{jZYBGwUNq|LJLxyDk5H)?tZi?w8B>kk#uCGF z<42>wy^k@>z`5N9-Gl(6);QPgkp7ptSY{Fuh=OZyJ+<7bdt4q=#O3eL<+AItI%llO z(4=2YlV?25Se1^a5vkvjU#9$(ay}&ibP2ZSj4bJ9bqD@{r`jg>w_fobcJ>oF^R&*t4CX+W#46;`L=shZ3M1qZ5D z*PdIQ&Q*PhESOLj|s$`7gsY3FIaD!e4H zU@`iNOLF#TN(ZUTjYV?{*~0AN6BYIKHp>bp%lmwgmo#pKW;a+7sXWe@MXl=c) z{(kkqiq6H8^MB<0%=(%Yo-rZQCu?~2h}=J*su-WQv*1>#i$fk2m6B=?W?fJ^A z4cW$$yoUc;cG>I5HP8)-N-H*qjEw1n3GrCx9-zOjR;j*gLUbc^9W-Z@V^x)!zWPZ0 zCG~WP2;adE2if@2=94wU%7>KADnDMau#&2FHEy$oIp2_7n3?=TbTl{#utYE$bngX{ zmx}S)gKp1_ekPR%=NatdZ!Yj|HdX1vRLi6(g5PK(6vC#HvmIqt$~Me7&$YxA=s0a% z-Eyrlz5ZoQY{jtBn+5lC-)BF}x(RkxFLPV+6Z32G1w~hKJ7-@=&q{J-rdQtq-A5hx zW-kf_{x8~e?Whd>&*!mG=*GBD)w`+xk|v7=NRKF&D*i~9s=9)-dzk)j$rI?Sy{5Uj zVM1+TNm*7zYQKyj#n)fY1P zNf)h;G`97ynjrmuvK3ej+n*}1uWEW+9aV;xt|-it`cn){J=XR^;|SPm`o=P z)I2K3wbP!`^1AU--TUfORjK8^Wy6Y_3KDZqn})>Cz(tKU|hE?Jv1JN5hTxLw%v`D)vwDfWy>mNR(7Zgt69)! zvLv+VTkbWjYrbin5AxQDET}5bXTl50zYYI*4f5UZCl9z8@E~Ae0O@zcC)yOF`=G>R zev+S}J%U866uQa2qSsMXq^Ik+Ex*~nVM)!~$}Z*aN~f3nDO4Bi%KeaiKHHf4G;dgr zTlRd=c^i=vk-aoMB4u^r^VBw_Q~5#CrwWCt10Lr7sf}0rvEkpnSE<{{QWXKp?Fvr* zK{`)!(9qfxWZ0xVsr}$K#rRcm0_kI2P~~6tt7KQn+ycKG0N5@}tQ%qXV`?BV>LilH zrzOKA6XckDhwOxWqAXE129Q2h`5tYR-Rh+Od~+A`1m7OM3eQw^yx;;j*P+5z(_(7; zKXvhO6a1F{mZ^SSRb08hCcDAkDgj*aamY16h2o1L#XH!4bN~}z349ea zAP5fJ>-WL?fGN@Ku6Bodkm|F1zht8z61m4+pzpb!IxbnCG~KN`QpJ_KfjTUpSXWq* zHz3y|yJu!jM(51btX1is)9TU>rvJ_&vZrJ$PYX%ClD)dFr^Kvh-5~8oe2wSEwrTB6 zZDyJvMNjbwMGxggRclR)a=qJB&sCrfU#ah>8l{2sn`P&rfsONu@8v5B_m-3ve=BNJ z)V=IfLyo;an*k@nwaDLq>E$GziPr%SjK8!&(OEmv(Bz2(Ob(qAcC}Ty-%}r#ca7I* z<09o>f`7Ok8Z|?Whqlq+okSGHl_AYeVMi+b8`lsaXIZ!M*rdn z`kSVQ+kTJjGO@}%_+8t-+Rq6x8T%*~Do!eEm3wu&wQu!i&qY3KeSWx4QZG?FRgRV$rzb6(#dZd@5fwJh2^RNd6$y9=-x=kybzr?jnd2U6s4EywTxV z<7;c>)236Km0(@2@gcmXy06pjkzE4h>aJv*ql?|sCbkHhde#rGol*U%!lS%+LS~`R(^JpgAxtaCzYRfbah6{o>7^y>9@%*S;^_f)Pd#&*JVc4jxQS_6f5Go{_DRFMJo^6_4Us~;|$S?a+Twd5S-H-XYct^@qivV$ite)eDYZ=Tmv3!w@7B|T z?t4b-IrSw~vwVPNu^Zvu?77wa$ye>2pr4`;D_+Q7ie9l@8rm2C&hDLkB6mZE<62v?FwP>ltAWgNp;V`A7QP zGq|Z5#24`~a4Fl5oz3H=F}28Y z;Yj&;ZMkukmzz1(&m-V!fFwZdpW!##Z;IboU%7dN_al!l1~2U_MUEII&O=Y=b&gLh zSi|+|krhWv_Y~#kx6e(@^v#G!?VGGh%1@Y+*g2v7pPmWv34D?Wg-c|D2HM)^~%Z`V4d*t#7MYtoC(_GRjOpy>|Kb3>a))uM3ry2qy|p z;wE}u{h>lr#)s7X8LRV#miDh4UA?^KWSygVq@8rmB5zP%*>ZHaU?-Lf=i+mfg&qTf zyhE3SWrjTonceD5;G4j+flA;1467Bf!fX_VdVw>CCsS&w-tn@fJJ4ItRVpejmSJU! zO3#*jD9$aJUeU9@(*A)f!Wu;9<()Obh6x^Tz5LA`{r>Zh_qX~9{jT`7@y#^PHXrq& zJUQb=twUBP?1qhl`ZDFt0oDPHk84sZJC>g*xl{BuZ%WR~jLxb3lCC5~|A|N#_@^qq zUqY*ddC8|!+*1=$i&DpCi>g@Oq&T9d^%BKp)5M@3p&!Ej3A|vg@~Ag{(phx~$Wdo{ zZ1fo)pbOgNQ>j@i=`3V{mo&^3SbIJnOK(oOo1T_qDmIt@svK5*tnN_rA)x+PY>2a- zk%9B=_Ms=B?|2v0Nv~P0_5c)OMw__Mm935k`h!gVE}t;BH;NGPQT#EKPKT4<$$@}{ zx6fMB@T9s+#q!b)C7RO9rSnU-m)tG!sd!Z1+acxG;a9{@6fs)Vc+nK@sqxM=m-_Du zI1^y;o9lbQe8gvf+2S+F+uzhqJ4W_h5RJI=UFcFr2g{BIRZT#}wUV8MJ@S|2e9f#& z`{J1s(-HB>`N}LGu#bI_Y}hT;V=UMqR0YQ52u~Dm69rQpV2Q%)(=( zODg)+6gF&cZr?Ju<)H09dJYyTs6#(MTd*PW3GTGt>foH#k!_1xJ3|z~KEY{0`+XDL zpJ+1_1H_}zw#-|n#C3%{?8>rdG#A#5t%@q^Q@XqKWSOfpv1D|~{<0S}K`l+LDE=Uk zE9(l*i#}%Z^qk@~%ADeVH}G8G{D9ZK^L^%dPw<}RBlPa>vBGVJs)JM^u)w#NkuJUU zbmO&}t7R_>#kroDYtlP`n4vN8NnA_ZlHV1vui~oWe*amUG&b>YqHj{SRIFffb1E`j zc3i(tJINFoG%ajI_+3C3Y7SEPCwe!!uLZki%;fZX=1T_z`1!jpRK-X_#dhK+_rkKa zqBQq$+W6$XNku6M8P&NjiVl`%*SIw-Zpv<+Y5SWR2)v(_AlEhtQHmC-pP2^u&Tm!R zx_$WEw&Po8gf@j_24$F!o4)Cq<+}(6yT`fDE_Q-nZ)(caOXOd5(U;6-AK%?IVbD5Xf>|{>4PCm zw@YKONr0TOJ6Y`6*9;8oAm8Zy)Pbcn3EKfL>urM(_u{7yza!zvJ ziR@(sqsjnyxwc)y^`>xJ9!0^?STi~m z9ZIZ{tXFQ*4l(3;wFESUG=|;_X$rp6YG_ag|9d`x9y@f1Y#}z9-AG<`3haxli7o9~ z1kE9hr|O_ubM5-tv9)b$gKFz*57*Z=p0k{B74fOW5LsJwmfmQZ@1-=q_C4rV;&<8i zvrmoZeG~4U?RG_9q%~;O|;|D{l2~*KaIt zV%)MnC;vQ5e3GO}E=|fwa!ZNG?pTp+`^-CpBKaeo+V`K}$To_$5p6fM4Q^8%G8mBY zTwe7ae|wGhsq?$$EA-l=m#TiqdWcQv6S93XS2?$MY<6Di{=_y(P-=Kq+k!WxE2`ky zt@ZxR6C5o296g1z_$I+EF(;p=4%Y8AUGZ&cwIr-QEIV{}$k$fcfwTP`KIct@_Ji1j ze5H3d8SAo^2TfC(IyF@`xa!{3w68f)bE)=ZT|wP}dPT#bhAB<|S;NQ<5Jj{|6Vwmf z;!W4QSfB5{LHp#RP66x$A2G=!~WzZJWK4Cl%H6X9GlU*~AK5~EV?XYS75Q?jExwsKKzMsu-?<{MBwaZ(s04O8Z+FY8;Gc=MuGaO<)*)7rFz zeF)jvs#joF|9J0kqf+T1c*VaWD{Kdw!y9Kd^lhN)WHp;BFO^4@->(o|MZCWiZ{f!#D7nDnAfeg zgY7E27i|`O(slC50RS_%Hg4hh;duC$u;Ibm1Ec(W{W$;7z>R?sW>3>D{abaE;*%%? z&U1Bc5!7TC>$B&jj88xlHYZi3cFyjTf21&^WKl)G`e@q-<|HHsXM?;FWl3i%_Nsd7 zTRcMiKLt+>s|sVoBEx=%8bfyl8v{Lj1{+2woP-WKK!)1_THZF9nl>~<*7mGQu8646 zR(`2GUp20JV9mywpql7fTSJcZ9`zPpFT5kq(bT$i@p$d6^}_=D2dTlHNbUd8C(~19 zx^IMxJq?@nVVbjw*WzV30`(zZTYEGtuAEdP%N~)k><=HS`c?7s+t2D>Gh>r}e~8-^ zw=^#0_qjh0QVR^uE`PoB+MGMTw^?|bdM zzcZucSakTaW=l?UyVXBYYs; zUw+@KgRd^2Fm!s9BCaalJ7H`5H;_AB5%J;2!_;Aahd2V~Yu|Xc^Lis&NoNXg+(T{S z_5KyT3SC(XzN$Vc-Vc4(}?iKXKFHY5-j^`TL-MWV5 zPxJrD`YW?b=A&lx`zangU^ugE< z@ne$8Q!c0J(@)niq~A^%AAdOFeP9FMk4huck!ZljctRWv%}aG{Yp%j6oT-*$ws@@i zJLb{_jNzy{i)W{LgfYTw19S@agi6IN5q!J{vFFk?}~Z~pLI5TWay(n zM%&d}t$HNAPd!sEalIF$_j9ni? zNBtc<%Qr{iq=G!JOb;sqOL`QP<<870$l8~6Bh&M(AoEg=x#*{g8#NzwbBwu`{f?RL zty~28Ojgf3!~aC^*sz$Wu(*Q6%c-V1_v(4o^G;utv>;X;SvTaKmen|w7AZ|N;)XjG zSh|>4!#VwHolMuY_C<9`<-Q6-MNw7n+9CSgMqkT&TaI%z%MfcNtyQ~x4*N$1M_|+v z5Lq6{Mct1MjVX+36_FWY2~=qhdiPWvljYLC!Xu!EEy(buYFpWWB6;5a?~)A5$6fD+ zym{~<|EbsG+=p|XwtPosa=CX(R+sOnt?xQc%9MS4Mh7UuK1KZRM zPx=t|B#H_j7<^58T)j@(2J?TbE7AYd7F0sa|dQA29nx?fIYdh)c=_$i7 z<7u;K+vE-u4pUF0!&Jw-^ZkwnYza1oosAq59UA>EQWF8tT<`(^i#`GBAqoqlCGLA3 z*gKm4)Rk3TEbm*qDSu&BgN)^$&b$qNdFx5?qj67~zj^;j`>k8fsodDI#fIC?529VN z#_PV{_u!H+efWXMkQjqdCeR$mi>EX$n<7%VMhYr`KE5DIn zJcsOEjc2MRmi;Jrn|KB2p1zV4jLH4#UWA(9m3 zZ14L1RUt>Cn#VU!nv(335|p|tbwtXxq~8+)5&?M88y*pqrpe)ZNfM);o0oi{GNa!6EHJPlT3+ z?hI`avNrIfc9&10TCGT6!o(Mz-yG*H`wTs5##B5hi7Hr@qsr|6S$KOC`yFRqReb35 zFCmx%S#Hbb9H4*;S;N#&vqfW%Ujc=bUNgb8iKD9iBOgLvPFy79W}x*z(X#(L&)mLpc9J=67x z9Vytz2GWhn@z}ln({Gvop}DBCA5xo}FcU)kZx?z%aq(H5=kqhl7^ggD16P`p&H@qOZN4!RyTEqZbM z^Th7Sk5l%f)~0MoN{yGqY>u25b}i7y_nXQl-9d#36FftlQ|&jc9&;m8r162FxnZo~ zv*Eh&x@n>Lgn5r8+1Acsc0P4)=dQvdqec=T@1_h>f4~?Z2fMprzW;bXSHDnhm1WWq zgoC~7G+Mx+uQruGD?X7wHaj_E&PUDr^iLx)GBb~4oy-z)c9!(3x}~?9n>&uMJ;)k~ zTNba}u8#9P8L&TON5tyrtQbYyh=ipnXX;$6*QM^X^!aJcQ`aXAiX9XFK5&v>NA(9u zC!wvwWB63nulzvCyP~TFbl$=2N7?6dbr`QqD$XmbSCv(pVmNLrHFtElJTt}R^mLg{ zSx#%0w6%oHgABe9=Zk2X1tt^#HTa`+rWG48= z4-S1W(?Dju5W$lUjJK*peN$G>063d#=3 z8(Q3@^1S}KMdOn5FQ_H5zmz7gKfGIN!veYBUZJ|sGhs2|M`nm2`XRjv3|1yRt1d>xf&*qJRF@I+dPhsI~czyVJg-{CdaIf zG=y~skp}Me&Gfn;>qLi$L)mxE3TqwnIm2;%mF|pgw=P4+7&44T^Cl~0pXuo9`olAq zGw~aQQQ&d?O@xy!;vI-(t%)Zf$DHKraw|N4yWEaCw(;f@dQbJp$~t9^q6>LLK@qK& z^F3FUPv@5v+yL49g<+WWs&f;6k0j-G@ct3KBDO}}jA|Ye8X7|! zjyC^X(WbH~m0N0KO?T|`*zrUfG;LdyL7KXL0|JVIPKH#6?g*DfnxZ~N{}tnjSr;pf zyBnJm^CZd?(JFjFs4b|azt(4kYNG5NT`h)i64zSW81n$*Dg6>%Ztd{e7P{VggJFTG zz&zEOZ6{p~JU7{2k+1rKUn#T^)Nn7k!nNU^c$&HoJJ;CFh(@I8u)bIA+p0Ac3(NW! zpDgswFD)Ej$QHGS5B=Bjs!FRq89owSz?z;RNmS%|-SuvyjSD&xdO5r>qD^F4q!?L0 zx^2wZ*wUCGF;}D3M(v8MjMyLEH?&Fc(|~?{bFgOc9n=WXImjyL6RXyiPAoW*J23lr zR+FrQSuJzs=g~zAN-tE5tKDKc>IjDSHY1@FtG$-_6!`ZDN)LS*+AVy3#No*8Q7@vZ zqVl2^MlX&oj5-$?8zF?{gl-J!7!>Ef#pjUMUy6K528a_EJu>G3>w0s0<3)Wl-I?0T z+D*Er`s;=ulgm8TW^pLpd7dY16}O8Y1p-1n;TRvojc41rE1j=xeXRpcXAEC-3#vC) zwfwQYyi4h|;-sR|LSvDw_<8A@vbjH&RZZ0WFga`&-8OzOl_dL&rIIr~8~tt2p^_2x zA_qlHih3XYBD!6yBDPsES~|#|O^{VEtme-z(#!7vS@~#z|O%4bG~@ zK2duy_WT&JEkX>N6`B;164Wg4vv!Q{G)rJa&=iXZJs#aOGQvnP(U` z>7LgNt2$Njyga(Bamm-B1x3L{t+1{+u&nWq$(7S>IwCEqM|6I4TFkW=A!c!mU+na#Sy8(12VsWLlAyzZ-ddB-OZ6yanv^43@N?YD z>}|~s?BZva-!J*S=v_fk{@DD|d}(2~;;6E|7229u!*R<9=PFJ^E|uge-m2gDb_l2! z+#u8$);VHgWS6K@QQ=TF?1{J?o)>m1v?7=a>Jl(p``q`Y_iyTI%!Aj*1OX=Sw7mL3YH!7J_>RbMI#e(WLy3MBjwq>qc96^3$5)^B^ z9(n)m7ZT7us6j}xP*Z5Pu*9&Ku>PUvLq-L+3L*ph``6XVeD8ZdR@+q*mFwg+(k_xg z^zWpWNQOhv?`$Uz=Q22(IS)HX$0Ped`x*OLd#YoH<1zY~k{Jd_wumAH|jNHLdj~V|VLW$34$xIHygJ zeplGk+kC_Pdj)n5-W{?mjE*=N5fXVXqA+}3_>Qnop|eBjkRL%U1AYBx`uY3ZQqNH> zliMXvsFmUekXxkoZ1V_XU!A-9cx8M=$d9Y#H_Ds-7+>LEHMz#9yJh@pNp}>xkMmk` zA+uO^O8Jxet@mT!nc6%4rvq#Oeu3IRN5GtbYyP{mtB|?S&8M698FdG*b}EnJqkOKc zM6wIa7>e>CkBdVEKmHDz<@w-NyBoUJIQ5QWjzNxz4j%{OXyz!je{zg>e05Yf`a708 zp4;Q>9c;tUs-~EC8XFoEI^UWh)nv7;%3M`b`MR=W^|=~BpJG~Nxo#I-hq;!-A^N>^ zxl*sb;xkX160k3j4dR2pg%pLV!Y+oc3)vEUFQ|UdtHAXEt^7y&xqXtoPpMa^dMcdK zUQ8=0L_EO-xSKgPTO-U`<7=IZIEqe( z^HmDJhMU43@N{!ObEUdmPQ7!cbCq+T^QN;g@)&x$4et3MQZ?WPBVXAhT!&}664cZR zvH?kwC1P{2xsc1nvX!p-j&$1)%Q$mq(=|h!zO?qKE=f-t#~K%#W>_xRUO45Rw%mO< zkR4!7OWpE5O2TW6x>z&Yr?>AozjELCe&>CU`K4>u`+f3j?MwOg@!9A-UZYkIQ9V`o z$!!vfX-0k*d^s0rgSGa9*7xQK#y5sf`uY0ldP;vv-^cLT*u>0QPT5Kvl>03Ek#A2l zrsjYz)5ftVMRh^{eM6zD%4!;qa@v#Ti|5% z6nFBy*erLpbEPBIo@Fhwd^8_2k2RNq9njO7VmoIKcWT_pY+X3$jVDjg%O#aEoBY1= zqbl30LY=Ca3oWl!vjX`Y?Y)kwb}Pp!BIJ7MZAk|67oA20k|)Gc;TC_0+s>}{oO2sp z4d4VH=-lb}#nIi-*|FVm+mYomEwU7uUV2(u0M>gIGn6T%)9C9|B(;T%ApZij^$|E{S;7UO5j+o$@SXS+ zzKkp4UU9p)jodhHB8XDIa7Vcn+(qsLca^)z?c*+Si@4p~Oi)tlfnfFoiu-fy26)!= zVHdHp*lX-}wjtLRuX2IL6AyN5l-LrUotr_oIZX28FVr`x6}=NArMk>`=2s?{`HHc0 zfJ81ykO+(xl)7r>II|l6Q^EK4F};8uK$qe@8>mqfL46@NkfX^CBuCtYYQ6y>C91@3 zVhR|%a$!GT!S&_-Wlwr4;M8!-xz$->kF@`6n`Egmr<>XuzZ!nUOmL+k$Z)`L)MzwW zEIVyG9lKo>9&diVc#CYz%$5EwZ=hU_JpD21CYnQUac*{Og6coZI?Fo3ddT{~I>J_LtFT8n zceoUuYwS4Q4~}I^C$1Uo~kQKQaJ!>|9qJOzH@^l zW10Q*N6JigC07!~VpDOSkjvNMr*h?NJ2uZV%Ja}Y*ZtM?-gN}Di1#j?tDbw0JI(Xn z^9MVN>&-U;vpI-JCTaLQzNV)#;gUlVv!t_hr1W>`2I*vJU1_ysg`|t5m|4$sWFFDG z>3C?VcY#hQrEZaP$#C)+F%}HlwWuX0ge2hx9|X!@eeN}~&ivW;aC2DXS>f5>+30zV zT#U}_0oK5_<+gA&TwPFU&htgQ5^zHath zb7#9so!O2|`%9b0y5HK~Dp;~Dr!5}KYwKp)HTy+JuCuv&qbHQx$u|?<5R<7(bOUgU z2Fkj~3*|1wBBc_lD(I3_byYGIt30I)Rc=&d$YbS$Ww)i-l4?f5=qQph6A#1>AnLv3 zcvi(md8WCqxVj_j`+@U>^AG1?XQs1{s{%e$tvxdK8T*{Oz^@lpiGzubWC~SEt*3*S z-x)hIL~>GcPx2Hkd77l3Bt%lq9A?r%^y>;w!(~)32xAS&C*TX0fPKshzoA9{!?)&Z zxG9{Jd&Ev+yRq@CFFZIr9*S+k&Sk%_@!Wc@ldII0C^?U8O$x*Z9APNY80YW^ zB<-1E8_`d62ycW>!ZYEEPyn*D2fXl9cs)*lGw303q_2Q#eF&T6vf>&rcsNyq0 z@D}+G{2ma#8}kOPn7hvX#T`SQ#3nrd%N23~d?S7d2zxSNk#JWC6~~E>K`!|N?2(@2 zX|k5=iF2#P`A?$v(vRsMh&L*b(HO=~m(e%rtssp0(_iqK9w5RVz?o;k@3H}+$=_mY zkrp2an?dkxBKQa%zM8M(bCI!<$qVMUU_qWbaMFufix~$P5vB;d!Jm8#A;U zxI4aJkq;M-ig!WSZbXcSZ_ZnymWUv`kYmZekRShsEF`U@g=7)OIXu^p+2CfKMVuH0 zBDx*q?j=MLdXzaJeQp!Hg;l(T>%$#qZJxoNEAAxs0at?S3u^pYDD!iik**uAPHwyV zHP}!Sxe=g$1%uW7o;VLybReBcPiK6Pf5d{ycwXv}Hk7rKjg$43CCDh*b?IVhg!GD} zx8yO?hPg^d(+4P!K*4nz0)qP_(JUMl+TnhDLe}$sP%`)O*ZA*U79p1y%Y&OY4j+2T<2y9tPTBwT$8g;yZtpA!B+RQMHf zc$6?)=r0VyS3lH(F{lAc@xSe;2UmrcxCRHf^r2{_?Zi=NrJKbQ;#Dx}OGFNA%X%QV z4nfV@17dp?`1l$`ou1@$@;7iouaHm4Jg}n(N=|vBEaOnBO;M`hC|@U9m!0t-&*BLAAkiJE|`J5{g*t#A}|BmvJ}d!V{n_>Pac+`Ww+N zG{o6v!%JW;YKE7X3pV8%p(mnPHUEg;$xp)QF9gvl6H#gpw+5rX;apFy1y-?IU_95A z8;pKrE#lV$E}N5rFFgW0r>8sx7Vd0#0`PEvSPfc!t=J4%jR%NqKq{M&Q^~!ETD7D% z)rcBOEu^+nx2O;3(RfM@PlyI|Gq6tk(*x;|=v5}tQ}O&Wdb7H4_!g;L>JnmHTZ%1ZLJ!`9UE;Zp5wg=6>1>U0mmjjX$J!EXHfxPF*|x#vfIl7M zxZ+rWJfKPLUp=eX{oE}+OYnowb}woSJ(*b`nI~;0Gs%`C|9qt4jzUm`D?^l4#RbJG zMLR{Ie1Y68nD3Bt)LwoWCi|!QvCrEkI`Ti)$ zmHZ(-k0*tO!W3Z#Toa^X6ZC}#;XNS*xgV>u#9`tdQG^*?1X^|vvLEW-Hu5~;_XjeI zwBeyezZ#3ylLoiq=2T0n34V*fC#xw2JytF#mQPW#57FW-g4T8%eb`m<7Jj>i?^kh@ z@2D$fcrL=R45%|4JhrRIQbh1l5WioN*U=L9kt@jss3XnDdKgz(;hKJn*iI}!pAn1N z{T$rcU&IC?i@y7~Fdl1oGI%Lo1QTu`-;DR-tB}EX8Jzw_m?3n4BS#|GS_H;76>I_4 zxW3_$fi_ysnpi34%hd(L_ZRfu7eTR-VwN!!C6vKO3Il{gLa`7FKK4VALKNGM9w3=) zhthaKmXm2zCu%gch1ySDpgvLWDHCO(2=v^3bPC;=2J?*WP7gv3^(cBFy^3B-pQO*x z*XVcjEf8KlgQNV8zCy3V93q|epyqW($;}|Mi5~E+%Rv7f&v)fovmqXfYmc*?;PsYj?quMJ*=S9{fFWro5>(M7&cwoQ5oy_Sb=L=SpR<@qOC2W^{zx}$s+TO)+)6vEG#ks_l?)JEUcATMB6 zAtV2VcgIbPjdlpdd~bdS_n4(vU##p8bIo!taV)jZw#~54u?#i$G7U4%HJsEZ>vMI5 zx;FYh^wF?P$~87OKeQy+Uf35nwQhfw=W>L6upd`4KTDPJjf!y9P;jdjX__EAtAkHp zp9CMJ&wlTC@830#)GxihtMZgN3XeQfc3Jv|WH{5EUP?{HT=24Z8KPLjODFP_xlHPXgykjYzwf&Zgs-PRta{ z{r+SQF{j{%d78O|ImIT_oypA4ObbTM=;%`V480kSo?*0udO_{Om@|y}h|y?Q%nrXJ z(*F$ikuMk#^+exx2k|al_>8*IjnBs#LPxHU-OAPjae4;m1Rvb{-IFj5bi0aNUtHf^ zZkOmv!u<6PNaq)k!TuL`HD|a!@ch|;TAn2ig!j%Lq?=qsmBD}D0i6Ipp=(SwBb9_> z>^DX-Trv(lxsSw4@|n4et4L%><}D(@5cD38se^D1Xh!)_S?CM@`rmAED8?y}Vd1Je560FDs2y-vxP-`)2~WRw)HCV~wUru6 zMWbG=CH+7>k0REi{k9V3@MpM8RtE}+pF7-@;H>M2v4`0>i^E)FsxtBhnW4LWh0aoY zz4mr3i9NMceXZVT@HJgFcd%yJ7CVAmR`++dfd2#n`%?NR2`S5w4^!S%snwyH=H6aD z4}AQ68~OJ1jrP^~?DrA8`*^R!8qzthXCNe;f{S{jjFyH-!k8F3gi0hw5I5jUe+)C2 zTl_N2pa{MiapV=g>|Arq%uiuX69#XeVxbA#e}0HyIc5s(%J8a8sootj%W(%}Uw0UfE?HmXx%baIi zMeYXdc3Q^TnEn2N=U?H!Fr4m#(F;L;piWV9 zslJ$f=b)C2z{p&QsJ@TrPT0i{m;rYZ%itiA!}sMsa?QEJYytc)7hsjY#u?#k<``z* zVcTQX_vVl94fOMcU&7i=RxgPlU?Xu zl4i1c3LR)~9pEng-6zlYnqNEZO>L?6sJ5Rr%Wt^fS>Mw>BfXrW3rfj=taWd+2NE9mo1i^klm5Jl<8$fvPZJRvdOX-jF+}agQS0fCKt~P zrsq;~$u`6f;V=GYuEw*;O}Lgi82brp4~xxo%vi@@(S5DGS5r`3UFE7ATv=9eq~dW! zW<|?NMOBCDeKmu0fd-v%xFyH7#@W^Lnd>d)kv*9r>1IWaiqib>KJVLEYx3_J*eIwx zsA+Jg;CjLT22}q>D=b9y_= z9iUV=zdEP7IM-9SXPjVfa#?(dKoM5xASk+oe#fknbdmbV&dM6fugHBBe<~^!KFU;O znleQh4zIn3icKKYRLXbC)8)@(Gh{}rtC=L@C7DbEW*%l2R?NnKAvVDEWfOlNea9=0 z)BWBBj~+*sU20FV)w52qY&QREl0ui!!>~+$SXWisx^{ET*XkD4AF7sBU93v4epP+C zW@_zz-Ar)9E?Vl?hdYblS?&^UlE;_@vS{T+ueTb?H$hwNe?PEoa9OZ5WNv7k&~~A} zglrGqALJjn(?8R1kIx)U8?SPOL3Tm1hYlpIaI0C%F7q^Z(=K0UJun)cqF>Inr#J>X zERHqKk*<#JE}&h{=FafX1k6gv;nYU@6JwM(q%wJ)yi8H5EL9!%>ZAS_F>0aaf#xH= z)@Y(MkJU=`c(3hnB0Qtmh77W7$tWh58b*E)+XzQFg6->m?wsIo+cK?xTY5pI)518< zVAglhU(Zx^XpEb+u|yRguQnu6M{`ez1{=?`m) zL*f3&&Jz}Zo2-+~QOyVSaEmrKpl#Fg-wVedk6g7yX^X*c+M zRew>I%4(Tz@WpU*dp+%3;~g#R@wQ~^R_sy!W`1UlwtTldvCgqgv2StQaMpFhPl&t2 z_lIZG3@VB_1uDmDc?;!3RggMCQ_s7n&k*0eShN|OR8hJyFFh*ykH*7 zgVcR}ANn5%9ujshVpeq9*qS&`yd(avcp`pG?11Q+h?}8LgD(2ZeEX>8t7U@pu9_ps*orXE44D)1biEX3fhbzZp;s%J*$UJ(qbe#Nx zvewJ3sqfoZ8xT+#*gkkzNT1MpVHIIr!sX#fVNB?k;H^RT1I}q(K6N#-R14%=BsZv& zqR0*M+;pzD|7QJa9tQvLu7J(4)>p z)=sN^Tf0ctUSFw~0+MvfQfjMmym8(26mo)af^5w^mG)3PRB1G)d}wXUfK5SXLNHNVz|>Z+h$Ja4#zy*k#^#oXGQYL=QWnTn16#&h~q-H_V+>h@J$mFvqJ zl?^RhQTC%exZ*;^sjAMk8HRmm4EYq2}I}QDA}T?E@Xf4z<(j z&^yAN(T*T{g|*iFyXlglQfI4KR@J3qfBA{hrp1Q~uH+5LOU{2&_)Br`l1AnAE9Lq< z)^F|?M67g-YO(M3Am4}vaaoCTlh3ELNbR2bN6N0GOL0%5{|%oJeA~ZLb6as)vW{FP z%!T9F4(A^G5ZeRW9{Xs=4aaN85J#MIj4RtU7;}_RSF&rJD-H233jN<*zBjRuDwTYf ze^-t39-}=Q_#K?XPZ77H`b5`_85MImDk-9O=*Pfm+8I8g>Xz(h+9LF1_qd+hUDlVD zW#$>?K+6fsbF19$?ckjM!m}ia8^uk-y6 zwEQulEWhM?@l>cXB>Aa1eY3~sG|S&#+@kci@`$Q6y4AL`e0RxlugQMlL17W^Vs9l6 zt+S@?;W{nSKc${d-jmQaW_H-Vz)^mwnjVTe;Qy~jZvIsl>uQHQs1n;fTZui|wGpiS zjm`pltfP*@jZ$cY9rufl5$YIM60#}BFhIfhD8{>)l7QZBJY0TEh z1IXmC1-$ny_Nss)>JK7FxWT5ok6`cQhGT)#<(v%m+zC%HOJTLujn#ll>`ZniMmyu& z7oErKy=;qYv#rV2<(5OH0+Ytn(%9O#Oy3W!yRag#yidup{EIpKH}+HDN7IMiUu55V z=FBU|D9xyu>zGPyS2qnF7QH&DNu2`?UpLh^o6}6$FtpC;WKWC`mg3()MMwvdGkDRn z&9T*X+4iq(s%tYll)ovQ%CWr-`@IR@LcV=TZ8*)NRSrBw|l1V^hT zl00deoR*J}wr9Fh9mLV>G^flu&6_i=qg_xjFIbfFmD?pR^HW>>m)vPZz%A448yYGk>}gDSmT%go~aTn{bFjeBbb zR1}xWOAE^$RE{tt*;n(QrN?}mgpG|`lhUrvyn46lH?IFj`r^cyk#_=Gf+cgBYAcLz zhd{lw*fG@oGoM0!VXn(}t0J*4{*Pv@*GYv_(wdsh)2;^AV#5%fv39err~bXM+S0(e zg}XpHq?eV)yc}MosuaaANwrw$sxd`Yw<(>L`)`KVhwX0?UiEu5=gq4(C2v=Mc4cYG zo0_+|)zo5Hh|i4B8?p0~C#Snoo2HCQ+!os^rd~vs;PZZNb!(+jHd_)+W`JD#Cl$`j zrhSOR{6yB_p5r;=+H42uM;}$QzGhNwf4$4t$eiMs!CoMX72>WNEA{@t?Tj zm`mVMZ3>YEp7nXHYAlOp*29-&E;EB+uqr~xF3Ec+a^+KHpCmHK;YhKPUFw?RsB3R$ zk8>nD{&gg|vOM)b(>crZmexwkq!Xm8B!4sC$xr-i_f|V?nQT~9)3m~?Y+`Ze!t%V2 z+1oRxWu$!_|1syi&$~7stG=HoK3wBxKIOj5l=$w6vZg(3w4nK~EmE424Iig}N_0f* z4>+$%pw+@C$19V}C>p!iI(S^%6zr0DfsL62-$bj(vObRCrh7U??c&;G-9%*Q4Ra@u zn-y;FSAnM^Ud30WbW9tWaxcyh*)Zg&|6K3-s>_m*R1+duTtzmgoszH8aO_jcy(S-awIWV&~no~ z+g(F!PmJAbic9tyrYPgZ~KxE6JhfBA~wTkPqW0JFUAM%ut zz_nwKv4{A<@Y@pUZ0a0wp5N>NA(?Jg zot^0x3X}h!sLW(}!@nE#uRo^l{ei2Z*s3tz)c>jbP(beIBFhk;4R9XQ^Aq|3f^7_QQ{wMYben z3q$#EzONu9+EL@_`E(OWM_GVE=GEG(Ox}mt$^B)WU0Ys0yI@mhpD!KWZFsZl?a|k} z->iAN=JoZD!*VxQWjTMQ7Ah(GHn|Zf76* zHub}>H$7jBer|qp_)*}qhVMfQhgscJnpca!bD{Z3o$K#y_`cz~6rY%FT9>L5V;7oX zKkGByQoUHSNOK)+C|zQv?PL|#&bqYr%8&Quwbji`vF;q99(_tiD;ueDm1ATc$t~$4 z_-A!dcu70UPIwL0whw1xhQ*$aS{pJ4Su`7l|i%q=}6yN`L-8L9_X*rw2; zu+MqhY&Xs}$!&|E1F*Q0xf|k8jJLK)vm_5BjC8DIH+_a!!gXdFdh%Suoy{##H5bZm z=WDVo8GYYJycqG+^Vs`Q+vi8$@mZ7fkH`$olaTvS-(p`U%}VQ?GCTf5SMZd=Tp^V4zrg*r0ey;`3jela!ff|<#~9^-^V^Dect9IaDn~d9x&y3ehgUm^`_SofE#-8IcgtNpvz6d1K>6R15U2q?3 zRqd&0T1pljE(prLobl?*xwipt13upPPXE|$b5S!S|ELCf>6L4J|Mlss-sE*fJy&|! zR~C>P*f%aSHYcHJY*F}ApK?WSx`^)}Mqu64i@Z))vD$l)GrP5}^R^X6jsB+YH+`~s zlheq?(|^hvDjRrZD0?VgDQ0;UYCdY+LDeCx!=Hw)4nG$DIDAdmrjRy4G1?HHFY2qR z^*XLJfmMKcWs`@_gtMSvV_m{u@D6g=XDOzb@@`T-j+p`2ttHT5b`9?ZH*}->x zNKtr9?BGO2?9}igf2p#q?1f~HEXu3GdzkNX?_bq@B(d~Uz6c}3`?kgw(%8frZr$tp z#Hqx?WDNBP4p*(=o_U4*QzlpC`84&93%C-%1f2FC?H?1E?LRBvyLPK@iqAsLJg)|- zck%(uOyZ>HyM33%-+b7#!t%lL*ji;>ZoOfh49#JH?KkMDPuo8@^`3*mJ~D*qKtG~h z6Mw)_4eOoaBH<(_bFR0zYmSyTDw&===+lKaPhW+89$d`p6740fO#3LuPS;l3V`P2b zHjj3$T51Si*DyegVCT4I*Y2a9J*}b(B{BH#h4+;(s3f>bq!7o$kCuMo3 z=Q9+myFKgJpPod>isb-zmYgIR}g3n1F(O0Pm;-+BZmU1ikUcw=HP zx&L03!XI!CH|0e4=U zO=Lo?@s&uTFVY+7CsYA;-nvmKq>kuL_8ApgKVN)@~t-#}URlzc?GWpWER1%lX;f)XG~^ zvFA?P%qER-h5n|#kvYLJ%`+B+=Eklw_Jy{;>^GfX-F8o{dpTx<$DKFa)!ZgxFLjh! zB}(|`>m`IqIx<9Q*5j7@PK_575168x>ElglgkA zM6d$v=&c}+W2Cc|$&wUHKgh!66|#x4N~wUoz^@?pVy{)2C0i-)C_f?dkqwdFljK3= zsDmcO4~}9@v75CGdpys{&4fnu6By_py6`{q#r%5gwm#%bp~!OMr+Rz=_GSNsbGeP* z4`)rI01^};!ft3~h6%rbv-_HFECdLDL0>dfkO&K~E9&B^xla5rC@V_&Z+sKsJ)b04 z`Cy?l6c;aqvqA#)RmX}c@X6{$L=r!sJG(2!VP(FQXhU2>K2;nPAgAHSyA7(iXE^G4 z>^x57r$KY^fj98?pp1Kl9rYcc=kCKk`+EF*hKs|zdp6gLp91}kKODW){06Q*|3&x( zwKa_#M{I#lbxTn%o+Db2`OxvCh_8h~;uN^|hGO>uj=b2zn=B3?YKTE3Mb0BKv4b(3 z2qwLtz3q=uT0kx%b>ue?0-H0x;9hN`2V*bf3Ei5x!OUhFF}tuE@)YjCrBL1-hZ4n1 z^noHtMa+jv=}#h<2!+#S2ceswfwS_@qFv|;J;4j?_AL@Bum{6prk;S^nJvOPF^pIU zHIxUMfK}MLY)AY8HPln+CJ1O?FN^hwo`gS<4<*fBIN#2Kg6$qu1XH2yJc1G;pizAR zh1~{dm%D>Dz6Gt2LEV4OQT%UsJ`L_N2?fI}C^Y1FPfMXAR0!*!hB4#J+M|S2@F5Eo zqj85iK|$3Ly3x1LPK_7#3DckpTnrUyst_sY`5%01AqrZ>-=J#U4qaWbFj(9G_0KWr zCgSkk1;~y|B>oXU!EOCNrCt)$3lGFP*fG6NTqEmJd&xhc+SvqcLsRM>?4<37QffcB z1Uu5F$s_;M%6z1Xpx8K$9jWj5>qfE{^k}nDrcP){v$H9O4;elbi)_{fqpL zbQhzA1NkGU)xHQ_c^AvUKVv_q#gz@>GuaPt9UJbTotbWdT?BvS{`_5jkZ@DH4!uSg z_SSzP|BGLzq{VV?$ZBzL6IjIiR1n}$E5=({aY&1LzHh5Zd z-T0;aZEhAj-=lLk_LLw$s*KgM3|Gr76J`+QWHahFawv8)?P3GC6$$iprj#jz_w-lh z1JjEcLARy;tEsc0L>o!=hcm)I;znT|p9!^+i@n7jM3z7vRIcr~v0O8(y{%@|&@Jy` z7jOspO+rKHyIzX{L_a7c9Yka7=$<1zZ{24mr1B8;8 zB`U;!gt72s3*lEouhy8$;9B$Npfij{t@{Ov`S;ua@XVJAMMAo`MNET^!iS84zJ`SA zZ7HdsZcq%(BCghjvgs;X)C|%MEl^*m$R3Lu#eQfP6NG1cDE}F8`Ue-v$3sK4L7pZ|iMZ3Jd&`%o+A zLJ@dQT!#Am2rlg5@CP3R-Cu8fe+W&_d)~v3ME%|hcKSvr1&uiJN$5{Kq5=xV>1e-U z&`|uR^NdGiprG~~iaObwl*03RI@A$eh;)J^u3$t{gUEPRcp&^E97HK;1rwr`H&m^w zg$=?^w6Y^Yju0o#5O?DE@W2o&;juoOI0-lK4LH~4(BO83vT80doJb?8p`zQ3>suj? zfELjxTt*xH2YZs7U=Z@qB6lKh;j@s0t6wc1#}RLcg`z)V#Vlx;#-K!p62Bp8l)}UP zF|i!FD>>m1MQEIVh~J>Z90LXC2xwJDLu>1YU0OZ#nw_E1x+Xrukyaw^+{SCW5W_&F z?}?+0fP&&Q6jD2I>_~zWd69%xP(XX%B@TpldNOvHqfowPD5>4h(zb-^b0{J~Z?vfC z*kAr2YH*x%B9-_A=D&rUPNic{Y%$8W0Cg{re2C+^a1}Sj&v0^&Af`Yu)($Smk8)VJsBR&2uJwp{i2U%f zM!6M15!e!1iz(3Dw1L`^ffDN^N>_?DI|n^NQ~0O{p!CiOgN1k~suMu+pNM#SUdTd; zHitfZ56b2lv`<#C0hGp1pyE;CC??_?YWyo`94`<%QHC9$s?~{4P;>eqhU!ShYtbM5gckb0{^D}+FVy7s;&;&@dZDb_p|(uNJI`QE?K95oE^&p} zOn_v8Iv9<9P(#QFH?*X;Q7a~+el$SsX@n9O3bp8OxFcK8Ubp?P{(Zxp@xfT29nP~S z>hlovsQrn~cnlzVqwGe2ci#!0=ui0K4D-cgye<;?8alj>z~@90Hnid(sIpnHB|cR| zU5O%^;r*>}%-%SkB%&@6hFY0`x*UOrFQLZoQhfJ^f`LFiuRs)#nm0`~=l8$gw75G_c%7 zLan|X>hmX1DzikCbm3!okh$P-w_ew6DaIwVB{nbW_tYC5;o5Mlx&16x- ziRb)sp-7}K;+(-aF^>P290V7cmT>j^2p7A7WCl@(%A)7e68boqB6j3&BD;msbVIvURTgq$`-aE)<>KR?;jlMpWxF~?WO6c zS+5Dv#Hg&Y(M%BWlXYy4z34xJ-8n1;8LKtySuv;FWOQ{TWZwZ(n#Z;iM;#!7Hjc~wIC$T z?EO6VeO;iZC}91f_os>ce<+Az`>!%8QMxo*o*r*n#f*c{RxFWx zi~m7SAZAu?28|XH*w1=^EZ|M!i}5Gyk^YCa?dDA5LBl$;+w|5l$#&Je)QIbys$Pw< z+MtS<^0!5&{tPW%^XFnA?a!3T_MXb1|JuiQconvSd!N05u^*n!A0nA76-(ENOp+P0 zwd7{*T;E(vMpIEav!tk+YlvZl;jM^wNE#Jn>STzsMtLqUqF8#U5_nH5usNXDHyW#j z?lE7}Ls&w*hF>C0P@w$w*e_x|iGTSd(e% zxnaV6ven`ZJOQ#Pu*$u{Gr^z8ILz(_eMdz+Gxir+!fC@``r|wooC2?kW@N4bpV=9- zLmr{^wr-DVL;cg5geqYdHj9-_{p_6iAzPI#DYdbYP9z3CylYxIW>p7v1O=J4qu8NnvyM8yxfj4`~aSS6Q2oT*s)* zsTfomQT!ip7B9&Dm6epWBa_HTN4+=)x4`tSiQRbsV>E4^j~D|hgu-}xKDU%`2ixB7lpP#o*?z;6<#qv zL=Z=Qp1qkmLY7=!a(9HPG^0FZ41sY4f798boC06mS5CY;HubH@kHzMQMs=xwZ zsjIMKjy0|XceIa7+rzjCwL(8*gLun{&b%C41gxVeym7oOcs{xiJq2H2^`x%=2J^qI znDMJ_nHp}YYJ5?@v1V4)-J%ag=L-JHeU-i8XRkE+&m9@=jP?b_>M7J3p)*R*>R#mR zuvy{v!c)RNC{|%@p+SLrzGF;;X+c_W&$Gh)RhDoKOJ!^f)^0VwG{?9v``ol{+#%#~ z(FloG@B;0NIKcL`2x);$**Zj|b0yqs$~InKzrS7xtFEmSsyH z3J>!bxRtvGmasd~rhvQ16EM4(AK>~wfs=YUm?J)7B(s@ZKCg-ROJL@+LI1r7d^`6e zmT_l+d{h@oiFvTvgJ!&wx^Ep zd>5I*wITG_`Z!L@4eiJ{rCiPJM|H7JbZztuVfx`0$WZPX#%T8e`yu1J=9fB#cD2q8 zyrK_WQubv+3`k|u#fgGL_k||3Xg9^wr&J57x>Q^)rsqw|9Gw0v4bF(j z{F8M!OP|@Jq?dUkR~*)=B^39y@v}43kxgHWIH9CKOvn?pl6a70kmwzy+!osCB{msAN2(mJ9#zifTc zpWIKslQPz)H+`%6#!UPEV}DlX5}x@6f`mVfzZbtMerU&&oryMKG3NynXnJRbX}qb< zI@oi=zl}+7_A&4J4Q|xsGDn*}Xqp;Ns;4&(b`;Wo;ui5DMWQrOS}utfWZ=HtOm3@|Ed7v|00iLQto8~89^Gko zEJrkkImNxewE6JuXn5R8dR$3hBvOB^mupx(L=OO z{#ChF-cdZ2zlqzLeTKF*uz;}yvY`>=abg1z%1;I<2j}#MA*WD`KYW39v;MzT@Y+Wxtvs1Jm{?jv0SWSoFFuo-$l|wenk3Sz#@j@ z$H93@%zX+kh3<0(a`tenP$qO7de2%(6M0d0Ki4mJ1LgHT^IxI8XWfVY#V3X9xv(NAGi&s2*IE|e2f9J z$3U|X5gLa*#Lw`0^DbgxTsw4y*%zEoPB?t#R^}OcnJ!VwYAS3?HoCmiq~51`&F>T)yCTRa5I{G5-5mQ(V+{3%#ilUl zFuDghODyD#;+LG1ewcFxZ4JDgC=)GE)C4^U-W~K*wnNYb zkK`N!Ey`ZZO5n;$;SRtT@b^kyDwK-uvQp6rvJW;2c<7h-8a+o{Ne-J`=DZB9U`75c z`eep1MmqBfI|i~s3^o8J4b{0xjtHZrD=J6 zZk?+7ROREc;eRB*um5Cyvwkj0Ka)SCa*6t|DabvQ*E*tq`&(Umb@8_S7}pT>Lb;n? z1$ljQ9BVCWjO$H&XM#V3y%Jsl{{`!y1ZW0(QQ)Zap~2NOwef}Ooc6a~V`=SL9q0?M z6*SAn2b)7bge(i{E1M?#%1c96BITS_tToIgwjBLI>P6`iT$(L8Dm^W>@mW|8c7cDs z`<0CZ?Y9Z0e=U0ZMb{fDCvb}8hA(2l_)6>>_c3w@z5RmR9`pV98e;7?ONElB;4J)wwEFV_gI51@Op`*EVS+_vLe zU5tfdx`ZW47xLoSOMFk=iOvd#l-dzcF$ZuCKnvg&NHLd>bYyFMy&PwZ5zRw%?Q~_@ z4!TEr$a2|J%)E?t;{Os4k)2Z%E5|D#`F8PE!71KZl!^30%-j=rI^iXjljj6|#qA_l z#RK_!&_Nu6QR$oFF*(j#H<;O$%hor*c6r{n6{LHO!0SB)dCV=~UPeAck64T8t-veP zg3Zk(uZp?>Cdme>rFV>bg>9;Njozx>ULR7Hma`x|`P1AF2fwWTx#)LL30%EdJ=s4{ zAc!n%alG}iR*i9=T3w6&FL;Eg0Paltm&$W2b7Z?|-b?gGrj>n`o5p(sh(U*VBMSc+AC;$=&4Xk&?I@BD3r{_?bsh|0tR6d zP&4+6Y$^UJHAp@RCz5^f8QiNJh~ACjJJ=T7^wfCNq_o!At6Y=84l{+h3yR@>8~vUi+{dzxq5UI9#I|g0jUU*eEDVzQ@LcfsPNKfMb_4oIA@$bZm2n!mLW~qo zlduFg@aM=tP7{3uwaE0BrmjI&KdV8c3e)X2ZE|}2mF&a#EXjeOcOgqc`v#v?UQqNrSxq7d9Te~>Ud|T zJM!&OAfsIBGP$REj(M|uzXGQ~qjxf3q3i=wUj==6Ad58Rp&gW0q?qI_>BW~LgMbt90b?uZ z7f)wS1KyPZ#8CbNVlufvcv{epEPyZ3M|e%nm)2%up{~E?pr)PH+iv5qxb-HB65W@ zj^ki$W%Ys&p|^<<{Fh`AUm>cM9F<%W#9>CxHl{1!bqj5~jjwc=E=)UKJG2=#W!Wyd zcle($apVKfBEn@qBu)Y9$o|Wg=o-Oz&ty~ zyb0QIyTP>hp3m=j?B4IZ?f3zj>Ile~{%22gD8aO=wRb;g*|hVSJY4TkPoDFq71mEx zrB$aCMHWoV_hnUNmu1b*Tv6bw_)B%lkZl`Ct!7%WaPcGHp}(&{kHpR779I=bbK(M{T^FoNj2Co=HLsg~H>q_j z!%cI!V;yygQ3q>zYTDJ`W6<^B^TE>m|!$OE2qCD`C&Ek9BCBBb`#v1tbCc?wl!I z*R9E38B{X0Xm);|-1MBh%y+*PMUtwgs(+fbmS^s<%*R-hUA zU#l|J2Xr1|4@Zf&HOs*T%r{|!^o!zBa8U?5Bwu+$?vw5ow-u;}Pz*u3!&z*EJ&XMu zzThvlry-BWz1>Z`f~WXTihYq?Rr zM+%8O{Fwwyyhno&6j{zai0>i81hW9Ew6kJ+$i&cAAxD+NWV@sv1?$k;tS{a?#{qM+ zuC3}x{i(Xdh9N4s`f4-Ra={%$=R@nUF63>|TzOc~mS8;CrRXImWYH3rU^W5aivXGM z5mX2Ag@ZWH0Y_yATg9FN&Z-P!{0OKXb{6)r32pW7{`)1NbC^12N5KpoQ}qYg3NVuQ&w{FCB| zl9jSAvOZ#?U^;(0F_@PDEhnmBS!=gi-L5<3tGMXR_cs}-0 zDu>KCR=ub3VC~vEPu;U(CQuf za)Zz$GflUwHHnOO^+PmKjXBkY<+n<5iXRvJ${U%tB6m&kgDPqLd6lF2Kg%u8JzB-bM{1CY^>~`qokd5+u z{%Nk2(cqopcxu?Mscx85pHnxeZeqheRhsU%`9DV+FNgk!{RO>CED*dB{}CUTR7lFj z=f$0YZ6!cN^7yzM6QdhJCg?o35ZpGr+?{9&cNek;dJb59YI=%)H#ODO#Zh9-F?TYR z8ipBJplh|opwh25zy_jOs!dTP*49*xE0vexC2R8^6kN?w=WZ=KSLv&5Y8;`SZVK_x zXgArP039!fH=4i&(PSK60<3{c@p)J&ybfJOydml0w&Eo5v7m9`KcaTUjEa05(I;|V z_zI9`ci^?qX1dmcS#z8Bs&6%RZ(Q5Zv7viYgzl;Fo_)PL*H_J;I14Z);UyCVY{4nP z3}G9=d2%AK-mq~#_y(Jb`nf*zAeh|UKxbeh@f7SEx&T4oB+ebyZ~AWkU~fmj-kW6q z*VbsAYKa5Aq0^R~)*jX?mP%8=a9mr_xTJP^<%NnFWpfInfz4Qy-?=oRYJJt;wZV-$ zv<7R1+wDp7#?y)z`Rt{f&8+rJ4O77`XPOz8n18WPaf65sBrMF3f1(VjMec;;<(8!*|Z zMHuLGbQ&7N%|ucWGg5=(!)uV%+^*>auVy^j3@sEDU{PqE(UChlpZFMJW0&3^}O z_}ch7Qj5XVt+4meZD=mStv|)o$Bj!7YV`a($Jlz>sZfVZrQ#_Y8ds=O%X-`BGXJ`Xovn z+b(Wi+~ZbL;;zMbBi?{nYaiiOYzxd_9ict&r+SA1t9+`r(J!S%(r18#+Y#mk<^@3G z`-c_=?tP)YCT}pHsnvS>`nfa%eE}pYgLnCKKRfXk*EDEa&x+71PNhQ_d5OF&pL-2}Rz%S&VA?6XK*i5pJYB=Qou z#r=xx1;oBpP&tPVZ3G?Ye7GBOg?k$Pjk18ZeF}aL?}rkwgT0v6*LU61V83k%*8i*d zst#*9rhcb6-_%bd)%@3VUH3+Fp$XUgs4K5|P?KCSt!h%0rgTNw)zT#;M=S5t*3@@V zF`C+HE*s|CV%*DpUiw|QmN#C!M0q&$T+F7H*IMt2AJ*z)3#etc*o6^aL-J(XB*!FH zflzRq)ZhV}&-;LhvHd8RS;Mg$iY}o&r%t%nI;UALnF-@cz07dkkYWfkU$#a$v#C-3 zyNouRw(v=mj-SC5gpf=oC?b#iLhK|v5qZ2j#CTpAQOUD_S2qilai>FHm~H6;e9zp^ zoNX;bO^Js78m4ZwMxg$oO;S(M+-rWN8{K@md5Bi8yQv$eDOIbQ{-`n=1r6(Jqv{0p zf7f?y7*}_*hN|gPW2(EYJ#9YXdKLf-F77ee&qznB4z2WY1#xjL|7|rV=2vi<^ts>^ zk&DG4NpKl_6noCA!Ml?G3Y(?7<#EzugcDxH644)cJ6aQ)&o%6?pW0Aece!D4Q@UZe zt+%_{_l0=?&F16cS3W7U`>h8>z$nrB*j^Il`MX}9^LWim+6UT}IHubny0B-au530JzS($&`0 z)_&0buOZAZSAVZQrT$=vCRhKPky)5Cy@Xv=!F{gzZr#&LLs{GM zdu5)=BMtj>byg3xp2gy|6z!Bk(!&zHw1;Gt*hdy&3EZ~GUhXjL4UZyQ33Z~cl6|sK ziaD|<@hWlxQpy@1c;+_Qz8O|%J2&lZ$W;9RJ=@{N6Q*LT!2Ql+@mm5m{|NdT+BlF( zTMP07cIpZ>%u#QankP1MG+S$56;H}}k;XQH?uOrrlDB|ad)=S&S zddsgUHY!}oc8Xr&p`;w%&gkdcV6np^5++A)S7meG!}t_~i)w}bx@Ncg@1 zXY{Gwd|yxRRbRf(=y~ql@9{V-PO)8VHR{R6*fMd>-n0#0SAQCn2K{K67G5AJxTz`% z41=$Uhst0%9{xD?RdmbnUeY0WJi34_V=ZB>qpydqpj~)vg+A%25LR@LXmzMn9u3aw zG0+6>bn771hT0d^!)my76%7NmX-2>Eu>TI5gP#{>h&G9m!KtaYaHwD@e+6$9_-qD~ zO3?smvGP~2I)oNX1TPOtliw1Z!3Y-Q-R*2^O*eX)m0F>e&`AtJ+W^;4FBUlDPYV>% z2)d2t_f7O(MDKzdra< z@w;0dzlQL(#cawmVdl^tplO>L*E@2bq!T(1&gFJg3!pc*fnv> zBW}t1Vjr2w-jkN&T5ok(X=-U>#rwK78k*&g+d}&aV?3rXQPv<076a2WDnZk^@3?X3 z6m%%g;_Jm{gLa0OM2(Kw5b-C(t1Oo!l0ooj{|5U-z+-l6qjdLmDa|_LAgj?e-hZD_ z0XQ6E=qEvT=9=#yb;>yr+^VE5uSXbYps!%gVc%j61DogD-eryhhSfE%@?NG5ct0aK zCMVvFW}7OLWEJ^lC}nCrq(e-Tco!*8~r0FPrR3?gkR8ax!#%v={{;Y>W=AKni#eg z&Pg7d?-6YXb0f>kJVo0N_{$^R32wQ2Dyjg*G)^<3Xy9~9%URIaDQcq9&MSWpI zpW=zXwtmTdb0PWjd+JA1Ca2^<4O3O@Un_vZ&%|a%BnK^5Oi~V)rIYiZ5Ri{B20Adi zp?CS~WsRY|!?a=ZfM@nh&=+w%JewM0Sk%;3wWGd!{jl1~I=^PIC7#;O>c?G$ec&C( zqw%r4g?J2F3AtHn)=tn9dL!Z~GJt)#XL#8Ewa{aZMl}ST7BnIlYh{2k=SN{Gh}E2TjPj%kBb8Q7sn1`y1`eQPJ8t z-?ZyNW}y?5Tm2*Y8Jv-t+5T-rwbYfp@vi4x#_Z2SxrJzC~Unz9Ic3Gl}N& zN}w!eSN2bshrbX^kwpZZ2^z0hChaVa66F(OXok1Ma9Ptv9inz>_p80?xy^$tVh@e6 z1scHBq4&9TbOE{rOTyQqJGfi9>DWNQU|F|dE3icj2|EzR3CF^*uqyd#ay7J=KErRP zo`dgtpSjc|vi7$30G#PARE{SZG|C52_q;A&Yu`>l>}}z#2+U)40YvD_@N9S#G8CDL z6mrt(%cygfn|h!2tZHVhsq|y^;Ixp>{14;4Wo2K_J6S%w)?L@aRffJ(L`JL*E|TVn zlVufhRD2k}2)1Q1_Gv_gjS_B`1t|qej_iwgwfMM*OAg>?^r*hI+_5S~;2pbAItcWVDZg}O)g54SFujMH%Kaa|sl;6JOR_(5uH-;zd81b&~ z_^^*Mf%uRFQZl83$aI9uVzN&`ZP7ctsp8l2-}0`q{-TybM6jFRpIhm>Xvx;>)&y&} z>l~WaO@p-ajANav1KDf}e$QQt#-okcH@rK+BrafU(I%{c6iPya?uSal`h7 zhKA=VLxj29j;x!sB)^lQoUlC>%n0||_JYkxhMVQ_yUx4rxQ9{SKx!x$q#~+(cLEz2 zsepp=H`E3?$+^YO1+14|^he%Bt|KN$+pO-|@VW*mpOGW^S@pSRit|g)tieUS%flPT zH4LB5H#tSUTt5hnD-1O6Bq1}Ska!atKymdS`FNsw0m{%g?{}^>ZEEhXsjZ(?omBYl*M;xZ@9RDa zzk7aPD^9E&)G)DWkpF{lZ`iM>K+srmj;OmrB0~j7kiP)MFp^ylS-H97KuMb7gM5l4 zRK&Su>N5?#`iYvMmdVs|mI{8r#qk2(53(AN>id$@a5mNx z?S#)G$B26>mIsvtmjb@;2dS_^_RF7nmZe!wX+&F zRbBhDB_sb^dP;1{fV9QAZwgOVdg`XBhXgJPSAZ-IW)kwXdzK8mON#|~)x8t0GM`BI9 zIs8__&Y<(fB?jO^OpdQ65`<0CONvqjUFir43jY*!KXN13d?~Tna4|4wm$?U6gN0;R3^sOeTaAs!L)cGm7AD!Pj{JxQIDt}z_LG7V^5k3l`LfR=tikY%$L0_di zh_!6cv!uBgW$ar}1TS6am(7q@OWVoc$Ws-oMK6*2-fFYY@TfUnd#f>_rn=^V>b~(G zS7o4x<%iYa>3&{VEc!<@MlhF{iWMV6xVwPm#vvpn)spA3*Fk?pe2qRGl^k3qoHJeb7oe7L2K3t&fUXSWf`{Ur3(vQJkQ-AHrDK1)C?QA&Wfr$qBu#j04FH)E+u9RdbZ-!7|tCjEh zhd4}sDHUR0U>vNftSqfcs9&pL8;{u6c~;X~BMra~{7C#$rjdAr4x$If#yaACKuYK| zwj7u+O2tEi9);ZtU8dB_I>?qvLqu-O$rSpYySCeIo7g}J68MAXY=1-`Iv%IDsfzbOgPjT|OHCP_+ zHMog1gboRtt*8=oMm2CW>w<5c^>>r84y&(ju&M57pX&d$9;P zCOFIY^4)xl)MG=qow;}MuYxJk(XxFKm$)1F>_o~Ji7R-+kaU(VAn*=#9kpGtv@`cL zT{ca&o^XEhV7|G&hrWCMezXA~rP?(R?fvN9=Nb)WJ38P_c;;#LyrZ(I?Npju<=Ah1 zsejj)Qdw0Hnbnj&|CcChd!{D$UT#MD4r8p>4Z81J(RzNTppoH;A*W^M1%t>5JTZEm z^Mm<}brR};+(hEg4`hn;pI~}Os=R~f7=Jsz6JF1r?jcPxbaqX;`n~GADyb={`Ge)E zGuWTN_M;z3m!PjuDY`DUN#;qq38xbMurugF+)C`{bNQ2rzsZThi;_3eb7BMkHg7%# zBRTAkbRM|v=i1j=zMB_YI@@yWm+i0YHt^|f;VlCR+Y>&8mq(qZ7~TZ1N7V!BS{t|D zZE$^ZUUPi4t~Av*UunduXOtLo=lrh9`IwWK8&m)l6jknNe&uW(Xh-kBd5;xHV}iHI znKFlPj&P^&5AP-S8#D$E;ie*h9s#USmqhW3LGp!Cvv7f^R@_H084IJ&cYm|v*5$@? z+UBNWU`3m$r<<0`N^u-XJ&ePmp!^O>8@klPLnX zkP;yL17rhX2M&-I$aMBkdPm=Gx7nEk8lr_@zt+d{pJj~gy<-h%i(YUob3Jpv_uQhS z{`TI3R12!J+u^QqPIfG`O*2i>C#fUqkjlcMrd(ZKQhw*WoIj%q0!5u078$}^LT^`p z5$h;cEFLIhi~j%%%TeK1K`fy~8C*M;hs{AxaQ|R@zExZ!`y! z0>gcyKlJn zUib)J3~46~Px%yi?V)_+`Q6yeauZ^ZzZV&Q}(h zYdY&DT8xhWs9v;1-1Gd`f(^oh{3F7aqM3qc z?WV|5+?6dC%musNzc`~9T&k0OhpDgOmA-ZJY~6JIbCc6{##P~25tzY_=H~M9$;+gk zA0gZz@`~a`;{=0=oxJtLWnv()ooB$(Fc>6!zVJA_I&2SW0K69=r!%V;LlM|R<+}_H z127Dou@sy4nwFX3&3Wdv)?K#S_5{ZT#}U_Y_i$GmHz*PJ-wM_OD&b|x0#P~t5xJUB5FUIrW=Ave z$^6^GNYQdpKk-v(ymEu`tn9g{C;tvH1}o-d(}sB>9UH9WMs4#S-Kgeuh9nbgO>)F} z#?h{@f5VfpVZefxAS@K6f)hR^@bLfQEBKk@4_*Ym8;iwmVwbRFYzKB3jYEsT7UU27 z5$eMJz-Rz-4YpV1E_8mjMOhOpuT0sdOcQGUVD4iXZB4S>w0Cv(a^lVd&O!D&)*$oo z=H;3(_1h|{N>>&q6vh<|%n#3F7Kc=fY&fA=)LdyPa61@Nxi8RNygh>D!p9PYv`APc zJR}Sk#tB*ie^4sXK~N|13gO?PR-)HJ9sdb25u}L3JQ+R} zQ(`E(f;)%X9cd5fxJpRI{=*Q{9|c6dyPiy^)0SjSGmA`nFQtDZeIym4ed4d;Q34kK6+c!m4m|1q5p5TG#Yys9#XpK_X}tJ~C`yz@jz!rZ zhcw%#arL*YGi5bDY@VgpH~-d`8Q)qvxK4RH(Fd|-LdUU4@{9l%_Y_x%K8rg@=8J8@ zZi3C^7(fraiVp!l{|GvkI|SJQAArxo^PzIKiRqxfpb>$k-py`?bAxTG<&P=GSf_V1 zS2lldwl{|xJ{hcro~9lqpYgqMxIQ1eO0@At&4O}znW7Xb&MZ1o&{*)cd{B+K@qBZ< z{y+Q2z(E#@_2pTKcjETa1F}ZIu$v;;D;X;KD11zo6Omxbc2iU$d@s<5&&ft9{|-Xs z)4~76V{wzP4F3WB1Z1dxJVmy)MydXfc87MDu24Hv-_Jb7K8acusA8;!2BVQ=lQ2d6 zOj023E2)wM#I3~r1doW%JTX2CJA^8^HxU;+5Ke%P!UXa!Y~`$GKVvRtsA!RXDdlzw z>|ZT2O;-)B<~urpE=@aF7vJ1ZA8E)iG#M+6BaD9xbbW?)WzzxRt4=6iR@$+&U6D2a zpFf$!A=UdEe>TzdG1f5@NZKMT@rUGgVWw0e?3+3=u@psC6p@_gAQS+3YhKIHz(YE9cmO}0NaO>0ij=78z-Kbmej8DO03arO88 zMXzRSxUGp$;WTl+c)LU;`6Zbsc_X?f=tAD%Eyt75ap)LsH>3@+5LUv!;Z4YVgaMD{ zh}ln>6B)+9GVdC93}CVpfjRtDJ-7L&wn-bWJEf~{ey=}dNHg9vLFOXkF@sXyL7SwS zRQs(0D)*Ll2mABFf*ysR%4XHhY?`1I7#r-X{O6!PXa+uj|5_L=WypU?QpEA%8^W2Q zO@d3rcCr_L6!Oq%7r+5JAY4AV}F!K26?1(x33dq0CYKR-S-$fT3Pnq0Ugx z)a=s~>0TH|*#5ZZ`nxldI9*U0Stpny8YjLaiV-s={_#Y7sOG+O}&F4T}8ai4yzHUOwv?JLz~NLgypzM?^eJqyxG##GlejL}LBDYhJMSGEVG^U}yMqIQxD zDNA}o^h7vTVCNT-A$TQzgm_Bc5&A?b(J+vnS{6zV?;d6gz8J&{YN0R-Phlu0hNkkg zvabNFG*W%K$O+?iV8(8Kvnr9)C+RSSG-EBi7Vu` z=8i@hVG~Ruhmgz24Uob8%$>`<0^ZL!_FPc5y6#=-zG`<{PMavnybw_EK30i?;o>Ie_s2!sj>D`rK|!ki!V7>bf<7{v8Mb|U4QV|sx=*Qee~x- zr_l+RpJ*@GFI*(*D7a3JAX9k*!INwUdIyUlRQw>}5z%+aO?g4co$!9)o5Qj~dWH-P z=EzG04^b`a8?6J?#dgY=t=pyTtciF9mFvsP%X?M?*X~r; z=^M@0Y&o8N^i@zC)&;FZPvJUj4JPKsK-bt7)(}<^NF#k<{sYb7{^VuxI`Io7Qxv;_ z1^#r%gW$G7_Z2T>Y{4A#3VT}MmFK0s(CjlN8C-_XpfI=EUg@6de*sv18c5B(j2|MR zf!Xd3J`C;2{RtOwR&r*5=2t%R2iX1|<;;V|!%grzcpb6_JWtxeN=_1U4sEr+z^ipn zaK5)~wltbAns=KwTO5{uZ08(HUAw4qV0e2-zrv^nHyAbJ27Okb2X)a|W_jMc6nsN8 zG$l9w*O1vzrp__Uv!y!1T-;17J68WzwKk$S8Ce{v7|t!-y<=7k(C<1fy&& zZ7_Ax-NQB5nQGr=`(R}_Hc=x%$F>gimX&}EaMpjx^U=}8Y}Ylacy(>7!YdY*l^2gL ziY)F^{|%{@tqd$@f{q5~GJ7nG1~Pj)0BhtsO%KQ|{b?H+E7=rc26+*w z@RqDy@QJXup_@atfppnl^6ip$d=!u4zTzxp3K^a0>40ZaMB5lxK>yCV$ASP8UIoly zlWEl;ncRvx;#vlX%QnYFM~3sEJA@kGPo*7T%;h+sP~>lXB=!}XK~(d4f!jc9;x#dW zxXJs;>x*x~YPoW_j9$6;@BH5>{QP7*p9){j@$yjWusE^fOPlEq6wMdAA67aL>D&-9}H5E5(s*&oYVi zC3>5oy`wAG$SkE@XCNp?JWinrGRu>t^Q9b+y?RL|VRK*}b11EzHikZ%*^gb$4ri}| zE}@%vf065P2&remEHUi>)z@{ydf1W-7(T;n4fb80=D=q_OZx;5LRbZ0fv;{v_?KJO`No~Oo@=)U2K zb53_6u5$1@;e6pJbiA}LvM;n9w<>MV!98K8y~O^}`NAdetfSgcYp6?}Draxo0DF^Z zx8Z7&viebJuUu*Fxa`)2C52~8(yC+XM_V}lvuIaBF0AE8h_*@o6zOB-)-c3#3Z6 zfIRamzar2Pa4ksxDnRZK(Z2Z!Djt{wXF7V?+uC|r`dge}_JsU-B+knuF9{dO2877NuY?*y76kVU-Wl3Pp2+V675a4U%Z_^6SJP0# z8T}Ar50K2x1nkck-$LIbzsnosF9hAE@$@{}RAxN@qWuw_m+e(N3f={*^Rk#NktyMy zLhs9Q*-POl)W{o+)@sAVr zbfidc-$0`O2E*_FkyICdZ{Iv%V*`%0KzBwN$X=|5F|?fk zmQD%z5|fssj37bB=pcTj)HM&Vd8h1VN^h+NI7Y&o}Cpx23LZ>S3%C)kKvKr|t!FK*wUQ6Byya|nj z6Pb?#ZqHNKeDFQK$adYb&!V<8+iTpby$}5cUxSYgnkRSZe)=-TUeJej(fCY~$zgs5 z#)!A{wf-fP%z4(-uDMTL%d(k8uk)AX^vj-=X8U~PWAf`ZAACRk#V=}lnaZ@QY@d+N zB6ZZ+P+XqNzar=@=`Fn}y(U4V+vI^@c36jqxsh|i<|=OSZ*U0uAI~C}-*Vq@NjFE^ z(fHf?$+g$hkDBgY=L*=(js({@_c*G*x6FTq`4inQRLgUMK=gS5 zY~u%jA1s$c}1UPFBOpu%o{+U14kPxxJu zF8^}z-Ol6}AIow}D{tw9wq^QjzV?EfAxB&82wy4A;5m7_`5i@*C5yxy**p36;0+;g zcxq^pvQWr@w)>XaN$U{vT=NiPv;L4NU^`7^1iCV!0tW90=X%>A@OnRa|MnlIUt>|| zW6>Su?67X(+rsMrqyDOVl>EDlE^R02Et)5OBMnnp6#J#U1(VT{>@~EpUau?8@!nc$ z?r7<1@{8 zy)hkVzM?zQ6w>%l#ml0P1>Jss`aSWdDE01#lgU3{g?=%XepYn=8RRH8lbbBkN3M+a z1J*5)<5l*NFH-867o~1XK|mt!={ENho5MQ5`WF0rhPXv;2KA2G;RQY^cdYZ4?X7K&y~v&5lQ4d=#=yzQ zB6JD97~RI5%I?5C2X>GmD#~-!J;%Aj(Z)1i-%gcWlU!uX*_##fV@djq?*qRUq(r=5 zma-+Yq2ZLZh?&f=c_t(A0$%Wsu$kg1=m*XNI1NJ9s?l z^|0Wm@|d=fi9t5$2;KzlRzRVe?k)yY!EGi9XdfzAMa&H}K%BPywhF9!Y*|*0<)LN0 za}I4e@`e9g7%#pJ<`yf2iNagLe&R#Yld^sC>&k(l(cy~7P7y6adP@=hLGCIJlhvMH z?{mB70^&N&Gr*JJ9_=t#5Ocsd!c=Vux4(B@q{0Gs83?<9vkJZkPlt=4gPagfIR}Pw z!MXejOtA!vL;fMuV8<|HH`SYp7ll23!@osY30e0u^M7Xk5PT&*clq)xWQ{~YwzH^;kh4(;UwZ`NQR)bU>@H`oFKwTA0L;lR_+Qb3BMQ_ z7VZsQ98wj`R5pmxiAnG#NX_!FOXxNJd}_8U%z4A+vhFiY1-6Sy(+x|WJrTU)L;PoH ztC^LI2;jYY!U}`-A>RPqFB;v+eTcT=_CXN#7TRkn&ECnpSnIAoT5+IwWX{rTIP-Ic z_SdPOtA0d%TlmTTH8C%s>FYTGrkZu0eCP`jDS9YLAROpMPCjonKP_xX^sBgaamT|3 zE1g6Ubix+_xFB8W*$ln^i?2A)iZz!#n`QEMv&)-bsMqU;SZ(eTpj-QeQ4Mi12d_2W zk9P?p@F~1~1T0FFeGlCjaVh#! zYA4e+r}>!LRW+{+$?F8(-A6xzfA#!vb8h)Z(Kd1PXb--c zYyd3US@;@JtB~NB({XcS$3;ZS7w|D?jenj`!3bsdK$!4!V3gj;RI?P!pMg+UxS_pf zujah@kB3XkV6JApVBdt^fbPaFIEvdHY(9GR6@d8-7TGn5b6j*j^HlhvX;*25bT^}% zQ^Dt?D!-`Jq=;> zfM27V@ow-8_9j+KCW9S8OQQ~&+v{}Nb*69bBwr}K1MLPKX5M8FV(((lflk2-@I>N? z@Qiq_VnV1mYIO9qh_=BVX)#EdU_tTnkD>{@hyP>gETEg%x;8u(cd5GqHM}^*-EVPs zm*Vtd#oguN#ogU06pEBmq|`l4npBb|O`44U8~$(2nuLTTojG&n*nao>hzKD`MB!uD zdE{&RK3j$LaNVuC)t2Sf4)xFM?v8IZr>)FULRq<3a5!s$er(m>54?@8g~ng|*%2*2Xj#dn_vuDH#=53Y05MW+-t`B;gQpTPS< zhvbk)HjM!3<_Y%(=lJ6V7kP!mU_L2UDCcX}yJvV^@m(E=hU^M< z3kdeP>h;K@pXUPYTGb?RvYw>}=u2{OQ?4e?H0_n=w42 zeP(oKQs%JqQ(vNgt}MqLd^}Vzl&)ZcvGw8#x1pLr>MxQ(ie&f80hbz^!pP9o{sa6@ z_`mYLrMV*x1x<}kK0;lnoG8iVZNNwKPC_iBnE7cPRg+M2#kQ2(Ne`uKTvFx%c0@2; zl*iu;4BbHzzG}NR+5I-~J6?JJ_P-ji!T&rY5)`T0D>o<_ODuxD7zey;GuMN)kX@Zq z>-W~LvU=HCS>x)`tvl)`JNr|0>_n~)@&O3~{?r3(4*m+shj#NfGn)P3YT@i_qb=&K&WN|}NXNj{vJDq4R2ob*Ei3HKY)iRChs47`CRIyvRP;(pBQ3^aJ zYW6BlE6&Ig1)9hOr3g&29zu)oQAY!hryA&@subbzB1mlFgo2zs?p1 zOz;KJOSP>zVu~=1G&IuR(RD44EAuM9Ts}<~r{7XhR*_#3r_-0OEPq?JwybOU?y@80 zU-WycPT6`;Z!w+Vu4JJiSjE?z(zH=KRj{sYfM884dm2snA;fZ;t3w}O{@ zdo+U^&vs=lQqNq^9G$F>Yo}Mmo9@E;^9qyDs4^_5cwgSC^kQK^ZjY?0@9OV&zZd<~ z{ce`qx?pf|Q=P_`UL9&_TQ|hY2U<{_i>3D>9(YfFjHHFKm0PjrV(&cfbf5RWo&DDb z90~9b92AHLbqekg(ki4|uqr4J&N>QKb4*^(J^H)3)OW#!+f8v(riZMFej-lLR&a$k z8=sF-;8>?)CjmM42wMp5tg%c#;Lm=d)9C-`E|8_Wh`q@!0Uyp+>{;#(c-sDgGI@#y zV9$XgDn$d3eq1rA4CRc5=8=^RZ|%74yxFI^Pvv5Lu6}Ig5aSS|uyTlgjec0g;c{tN zaLL%BgM~!}kMk9UK}Ca$4wgJA3o|5Dla`(JT^jmQUzlI$24X4SU35<@l%**8YfpKs z_BrIo`AdSTf}NpF!!klOAyiOwkS)OCH_Ru?^MreEw;auR^&?epkf|i0Gq#ODY+}x75JK&}Fg*5^vcp;FQTY<}|2sp9_L3f+U zjsvf^XrKcRMJHo((YaU*dKnmgKY-}_0LYKO$TQ>*r@>KdkFF%4|M`c_^Txn%-Ly5FJwE{EjH!RopG=%H->Q3oSl^M%pbnhz;R`B)FhDg1{ z7+}gaPp=nI`D{K?e+W;@rXQOXez^H&tK|g|3VM(FwL&pcb z^tm; zN@5XV#NUDY@K&HQ_dpi|fpiR9IhG6MBDn~drB7qam_6W1cMLqm1pJuk(G0*1SVxMd{ zwwU$>|fDI)WG6nU5I z($(nQ42g!dh6nml`lO10imVE8Wp5)N2x`NsepRii64ZXJyVJ0Mc>(@$_Xvh>6r2Jz z;cCtXf&k!RjWEJKowb zS-+z^r?jD{b^gwR=)%>dO?90OxAhi%A$M-)SEakjmnNnuMGSR)tJ}QPg11 zbsk&XCTT^QP}M+HIOO&GBU>OVlm3!b$}wdN^$&FuO@*qhYQEyM{E*Bc_7YzeyygGP z%fXjmZ-Dz6$|b{nxdnX={=aWf89Exd!44-6{9Nx z^&KnyOnIiWRoSM`Cc8;deaft{$APM3EsnxHJD)#Um@Pgpb1Kucah~DcTAxKel|E+Q zOMd+V>w><8tPH&x6co_G=et*Hj~EZ0+bXTUc9#0I`l_NqF%c+!2Na8x9n?DYCT)-= zPi<6ID+VY=$&#i0#j8Xnn1g!5IKMa23!HI3GhX1)dJMcreZeg;7@SY;vTEQO9{Iy_ z2Oj!Y&~iO@&2?>ec6Huvu-jLFFZ0B@{^mBdLuwXPoi)uh4A*xk&ngWqqVqFz=jYxn z^e8dt?&(DalfDOB^{*9ShH`zeF{Wx?)hE+`Mla)w%CS}A+6tS=b%tAly}(=XQhAR= z&84%I*EA^}Q@xM-?DXyAGtaNTUwOd9px}^&pD+?Tppv_6{c z>VC=@ip7d%iaW}ds;iJWb3r>ps{&ocL&aQh1Un=(i%W$rAZ3>0dm)Z8A3TO5*^Vp^ z{4FW)yObcc>?dG?Q(SlCAvj%b0($BxDuOC-ZF4ntg*z)7X4+@l2U~T(m6Oz#)=aA= zs+w2+t5@mPl`bxRoWCH~KUbK)qsX`Hnl4q>rlNDj2c1dhs<7+V8$VRtsCr~-Y@Amq zt;9@))y?a)4Oi$WZZxpVpX2euy^=4=U~RHzviAT#55LL2t^9lYuL}Ad)GE|H)H^gf zJP(7& zXwX}pXlU=Kv*lY~TW~X5)1l^RRVUMo%164shr3zHFP!XYu zQcYKlR9}JX%U9JZ)8#hVQdxiL9r03;R;UCmWNYpLy_k|yb!1Z*@8(mBfzG}TDDA)K zE9?TUKlH-7H>S(!cCP$@7xL(iJ7nvevpa5Lus5ooNcHtTO}}Vk#F@)z@n4 zkGe+Ev7j>7qDzUFVzAxP%=2373uYYtTl~%b4+8s#XhT0_-cZ?D?_Vx08B%aR_f7V#Y+k;pz_a98$<&fZ#Z!vA6>lwx zEbpwB0>kuv)jOlsu-%YpSY(=2d!jxHNN~e|yL$jM*n@>HWrHSrwpkGU$W8R}Y4Q}t$r2M8VRytaiBs(nMCSR;b zP^8MX%GyG{bbzF}-m=`Pomzd**jk@jezs(H zVRhcX+}n8r3WgMA7ELRuJx^O|>w46~nqP+ciZUr+7it@V`JSxx@#1?)p z&Mh0F?_7DZGR3gOSZ+kCOtr`A;v4qTGGqkS;x9L|qpVCZPaE&m)^BZ~74m7$2F(u% z2pt{vy^*MiGV(@~7ELr^%R~6Vy#qo6@_kSGg0jIgR=ZpILN-I96H}r$qH>W?yh1cm z_%Hu5&yP2U_knkc_n5bXw}p6x*%1@VG3%H}$l$w0Zvij*9O^OYB)3!f^cHXsZ;U!% z4003O06ya%u_CMnW{4$tF}@Bvj5^p$j0e>c=(q8XJM~p{V=SF&ewfk>o)xKOjZ5|v zj?Md&lasB?@yLzOWpii3*%SGb@|zTFFIrPNP1m3=H{@5EjR{rFYAohHwxO`bauz8C z)|{^>N%BfQLuGIq>{a3SBhVRS4Y?cAGE5qFJbXo?1x+3Rqw`^tN8$6swg&Tp6anM> zR{Lgn5BKzTd!qUR&Np*JN}){fjX#=yhWC_sf%#%aaQA+UR3IJTE^P-hRd)_$yE8Pc zqbun~%ui+oQwhYG(U5<1nElK>K*E7-KY(aVB!Um=FyaGH?NvNEuNm(Yu^WGf8n_x} zAiag$>b&pRQO~nJHGiq@X395ot>{|LlsqUpQDDw%l^dITIp<-{m|R}2Irns)CO@p; zM`5qx_ocmb1rhTFq$G#c5sL*ws_W`--m@`6o)?;(40gin&!8jmg7L#k8qLy}-oA3*|d zBlwK?V|S1c?k4kqZbZ|RfocxK799Td542$-ss+`VG6Ush9xVdK$S+3CYW_HFBd=gJ z;2LhiZxhMHOkO1KChq}nIBzBIHIO+P;Z3pW;GeUZc}Mw@Va{F-yX{pSVZK{E&NQuZ zg8nu*GW94`f{UlHaAQGm!N7ux1){t%VaO;mWr#bRYy#FjF&6#7*h3IMTd&Z zx+}VOI)m<5#oLPMdQU@=VPWMyBT;pt`c&-(OS1K3{cgu9=X7!%oyzt`PvByHsPG?g zq_jqMMX^%V2r{cbxqEv~^y=xI>Fwt;3dW+xeNOny^J(Yv(EGF3H_rnequk|guQcP; z<;uy5YcgNyWbt{Shu}W14MAbw&`UsKImh0ISwdTSAJq%^>~F{| z3qWi4rj}A)C>gzi{y_I&_Au>$ZqKm8!1q2Ee8auL*Sj_NymtnVcLrDsUabP9n;{n*jg@yy=De!X5>zs%-l+Y5T=f9vqN-Ih)kp2cKF zEecC-%M&2@ey-bY9cJrT-_t(AF|Xl(^R(+1X{J1Yv6RHUMJUL$m<9Qp`FvktPtkPo zYRPU;)enXl;%GkK`p0wKnCf{TG{eAF4(LV8VeRe$-UcUM(+XvX&|f^*^mODbeu&tf-`$3Sjj~YJ-FN7y_{%tWB%$iUs>4SkTFO~=zwG)9+F zuc_nIZfX`a82Z`9lqbcf8b}LiCXHknSwQN^60!oAS{9O^+$evlITcSWq)t$2lnWg6 z=F)d*h91S-W!zzWozKPs_lV>M0tW&^M*`=@iT1*dKwssDPsXo+FG~n9me@r+C2|N0 zfx$f;zzgAdf$vlUp(j29M{zkZkcfs7`GKFsSHaq65Kcm0cMX_2bFer}iB+R7(XHrU zREy>VjbS>lHq5~H*aRif7*-VZYzF%TYRh4EHLx^B0SU;TWq=CwoH@a)h8)O&OlM#} zg)fgSt2m zm>ApH{lM$E1T)7JSj)?24Uq3l05>9xYXhv830xx39!>xWB8AHa>VXq@9ZGP|3^I(9GV1^PagIfL@7%bm_=aBZl z?E_GK(tr|?0_=#V!0osTvHGjPjyMnd1)$DdJFUoGZ_Crhfh6+&pqbu!#g*) z%kcl_p(K*IQ{3^t`+snSP5^!60JjH-L2IFWW&=Iu5BsDCP+nrWD7cz|93*=}OQV2x zMh;{U5y$6HXn#vVaoQAvjG!Smi!sA&urlUZd*$ee%%U>GFdZ6E|18T^>+^WCdUIxFi z82BcOV4KIyfy~bt@HqGHJ|DE132^+&xRvmoB%qCK0j9|=__YIYe#haQFF+Yx12)RN zzoqjM%J3bOW;&MvnZZB)ax)6KV%Q4*5?otGTc(fY1H^z8IeW|62?Gp8tG!{8I+AxasgX6UurD z)V#^uRCxX8-xIm1H_Qov`&m#AH)9e@Ajv5J%QJc|JJv+aL2uaHSG_ueS|#~ zO6whz);l1cy@7K3^Y{|#<;%bSPJ#dZ!$SE}x<8?OQ{m42`uCl8@QwHI`qkgE{!`w6 zzVjUZ$}8>))Z{;P{wy5Xh5tQXg?DelQT{o;JFuUCvT1;O=>A_)N&^r_YT=5~aMe!$ zza*K{K}$8C^F>FX6F@Jv6`hH02I5Q^(;S~b48@eZ1&AI^#&XeRoF^j&Mo|>3oG=WJ zi^qE5JunQb#NzSmSTiIX*lwr5ZDcuZCWD#1Y$2@q#vs+`0bIb_2K-y!=mtdJRG|O zgu32HH-vedUg&e=EM+5mi1m!N;vl06z+Q6@Y|_#&82e4yKE=oHBSz{c^inH;AkC4%)}O> zN;rd&SQDtZg=i^Kj_pHw0SWIjj6ItmUU)H@iJ!(=V?NkyJd*f@uEY8QYiI`AkGsu| zrO(4U>1de0e1&;qSMb*HKx2XbHxeC&6tQvaKp3^fvML~p{bYth{Tq*T=G@T75Y^J7 zAJALqTyz|?I^UqDn!(i5G^2#<@7ZiLk_%K84{R3jn~H#bH4xOwDt0_D+aj1<^h~B3 zkn{E;vA|YahNWSNSS2dPWWZ&!K^r{}dagrk19O#aj=Vz3P0(Hp* zEm}`#HUDM(xCiW0IG*ne!F~Wfm_IPXro;7k25rzSpbaHKy-b4pB@yn#CG2tNHDjSw z9SU`KEFuC@))P2$5?0p}Py#&*Od30UG7jqOc&-}yz&%i#Sm3oB0=^m#qAR`O-tGXk zvkz-#)&rYr9?}KQz2i+A$Jx`eCa4g>tH>@3g4F8R{ z$9qBDddgYYMa&U;Eie}2=v~YuHjnFpYOt?ZB7O;9jVEJ2VI}1i^i&nVRqM>2fZlKt z(2nQ9{nm{;0+g8RjGj)U&%rp8qw1(U>IIcf4`LFSZjim+nNc%a=xfwIh(gb%mQ!kK zH07lF&;c;A$AC?Co*e^YRvpkNhcfMfW{5H)p!_-juQH8^gB8n5>{K|v2(}0m=1=G% zIuyJr;+Rg%Q6>{IaYw`GM*f#dW(6{2KWGJN;SOAajD;(|0mh`Uco7~Ab7c>np7;r6 z5(iOYKi&e~K;B+n6aG2=GyY@#EWudtyC~*w<`3cT_lSd} zE#x7PmwZKe5ptRiX*#(t@f_;&$p2*Eguq1sM+4A+Dj%8qcO@(K6!Ze(aA((2`$k() zowwyn?K*RbxvynxU7{7UovKf%kF9TMFLwNJwV{$2AJk4f7B!YnQFYdScaQhJ;XBhW z$$x==Grt_4A>O+@ce~%zGU|L~w%jBOk{QLzg%SMKcoaI6yF>RRmpR`xD4kUelbk(V zDzYE-9K2t8a~ly0PynwZ1{k%qMjCPL*_li+T}elSF@rj$`Dg_uHZ;c z^3Rdq=B3ws%y=L0zUaf}FBg8^&iz&5Wjt#+=Q1M4z-6zk=&*RWtN`+1!ohzc&Ha++ zY2SYW=ZDmU)`mR@W5V;o#)5+9iThS%XYm(&E;HOY&=zXmUv(6G<`x;B7|$C8Rg-F_ zS=!h-)EC#^b_{fi$R_k4_6TwhKPiZo>{H}vHn@NDJn4Phx0C;E|A&6(d|r6Pcn)^| zqIs??mR*w^5?v59<0}aZ-1ly*I~_>fcX18x95Tl;$GL`nu5@w;-IuLFUgA1lj-aI| z6Et44#9hRXM0140AiH1@=8m4=vKcq(rE{CZr+#={LCs;~Egf6DDerOCr0=W0-v1Pu z_C59X+jc4Y-!1s`_Pe}rlEGpwacPK;Qkk}?J7mXr)_BhKj`NlJbO27wA+Ofn8+~5- ztqchBKkn=9JH-2l`z#eI{)`@^+#E@D?Q3QjWA$To?&aC#tMt1o+nKu7bgO${KT3wO z54ljJ1Wh2CgENjuI$pL#aZxRB7kUMGKl2*mmFc<4Gt#q-`y+L!JWkS6c!xI+?}i%L zy>RCmn2B)Dou=nO?=TeTza4%!|5(3y-r=6}-8XB0YTjxi-8O5ksogX$ zR5fz7q$zI-`_{==r`4P{h8f=J9+p2WZ(I>kIj-tt?J8KOoz9@RpMZnBiA4E1h=_ev z_Jo*;8)Q>2bi3uA;gRVsbJJ)8G!@DNaz;`htmdWSJJG}JMpz}kPwpnyQFY))Js#S7 z25LHRGSO>j2W&O|iwNXj6SNZ@5$8&LWE}U}PdN#k%pf@`DBa z1@i@y1)w<-oD-trj^HM>LDoq=P?4Z4Qzj}$DLTq_h))R8pcHJ(AaZlV2K!KO4?b-D zuWpy6ulapVhiY$Alz}XNUmTrp%^vfsK2x1h_9g9;E^Wn!As=Ub`;^;Tf7&{jTPqpt z84!}%=x4YyY-8|5-}|0@-OfN(xJ7zR)J8N>{8JJpi;{Xum&&hzf9wVE7s5(MJD8d= zhH2$pN;6CDmrm7vFhyD4HpDU{`ig&6+Fkw0ZLO!b_kFK1p6>3)HS<*EpaFd=StY$A z-=%!0oDD>&w~|8P3Z4-2;F4(t6+s>br_w6&6GT_?DJK(x49A)gzj;dp%Yc8u9%i3X z-{02J>TB6vqclk?CKeaw-Ta;RV{m5D*Uz8nwDYMy-_HE-^XulER{CGoW(cl~^eYK# z9bs%THhgYSYu{!bDe4)@+45q@OMM}kCfz5yB-6<1W!)4D6$4~@MFDtQ>Uo{Wm|dD& z_%(ld!T92R6@zLE9FXTlj2DknJk$7isJvf7ELq``;CWB$Q28q?l48*+(IfG9X@z_` z9RF1I|s={r{XeRI!s~L2~Weij#)54aDYD! zN}^KKN%{@Eo4%^bRqvGxlVvTyZYu6~;H;mX_PZ|{AeGAwx&m647*-gWm)p+3?5nr({d z(PTsLY@dPJ77B+nQk*9&5`B|Alf9PDknNC`OE*fdOPY%GL^a!{p=Is$itu7JP{~f_ zdKJCZZ>;M}U*b)ZxoKN^#ru}|^#dqkjGxu}y2n`U5>=6`hj@md6aNywiSUZZPkdXP zAU-9GL~a|b3~nKM@_O;t2o?&ZqJ1J0L^lSBB8AHY zd4fiwjp9ktNcmI6dZoXz1#kp63f%D;HUlW~LtSlM>s&|3GRl)#2rE-{^mnQ=xe_uW z%+@~U%f|Tf+QP^j`w#bT&d(Rp&ZUN>&#Y1wjElU%LXB>x4up%sG5xS4t4=v(`uqPQqCckk~FznA3^ z<>j^asYHQOmEgTIFfn9f$h)A3fCir>o+8L(k5Y27W0J8V7Ow0h(A|Z@;|0Ngd;>lZ zanQ?ME^yP{U$?;8*gn~n$echI@x}-Zq8iZ}(LSJ>9fIgY883@}R?tScRhTR)5$!|+OS$}b5KkD9Ld2Wxx+BTcr>TL@=>RIq1&%^S6 z1U76JsFOzv_;RT>#N&a-X?Gv(XQfiQls^dR=~`XqVVqz3Ezcjk=%@S~o-?EDi&@Rq zN}78<461H~L{v9g9g^wi<@r+Wp>UIJm9CJ+O9Q2qlJSyGkSlEFZv}4J2s{AY##$(c z^O*gF^@4@8B-kc46i`kikl$BaCmkgdOP7dx^Sj_L(LLxj>=5yizfW{XYLq`#tOF;6 zm$FmRF_Hq24I(7}VpF(3amqJN!a2&hnyg{2qqm5w{ON+3{CoI1u0MIQKHA*b#IG=v zWEZ~86X&>R)n|50&;QUmCHnQzchqOo?}hp#d$r(|4;yi)<^0&bEtWUwACRdTAzFzu z+-&3&u~l+Y6Xo5(Z<_BZuM+Jxc^kn&PV2mH8EV{C_Oakg4wn^_rOBRJ*tN3W?n@-7 z+W3jWyF{v@(i+zU=lk5!{wLopriCvcH^onONPbI^uIMHoA=x1qMXW=gurc&g*Dz!Hy)?Q05W3iT_9}27UNKX^P07zX#(ZbGV~O9yX4DRUD?+tuEE{(2Q0M zm7|iog5yLX`h?43CerEDWbk2O$V6ywgj^C@hR5;7@H_}UdY?{rtg@^!R+T*jJ%ThR z_4nSaoF4(-8h@JlZhFdw)J9*5e?O_XYTqYL^}iCG6w9`m-aIF~hc{nVfC-sp)K&H~ zpHtrOxfUE3b|EC&KhWd0>>%EmTv7MSm{B&la9OT8yCAFQZ&jYF)KK%Cau;{=SR0(z zWKDExWPJE&{}Q)PvOK{E-hAE&VU%>DGFh_-T*H^Db+S!DZ@dY6-*wngSAU>>R=uQt zO8qqZ=Z3LlAk!AviT4+ji{d4exTUBie+G7p8v-ot8srFZK-5vOQ2XAahvya;i>1pJ z2`Ax0xhQ%*`P+4xWT|B472B6<1GBsTAm+T97)=ZT%885V;VQFzuQ_U{Db*Ie&+nQS zoYyaBe^%p6!KXbbJzw8VnfGz9X;8dLYB4zoS1 z@9Fpm{LD-yo=ZnoUEV)7w#NxPjX!(W@~&x z4mOQ!J+al$m^LAG8Xgg5*HzvsF3;_i{WJSQ-nYW?5{J%g%C{OQC$Uks)ve4YFW^<+ zA-{T$g{pdSA#X4KmPim@l%sAzJ`VrEfj9k(9+MO?{6J=lJb9q(>(3&%QcJTo}v@@3HAxe zI9s`TP+M6C`h#f0U(3(u<$~IK1>1od>+I(kRDaDHXUVP!HFebIlx->wEr`$Ek#!_9 z_)FcpqF2LTpH6+3ekecGG?Uq(9oeXJtE9GJts9!8`)-we<)RuE)uT=uc0@frWK45I zTUk3ti{FhlcpTtm)q5Cp1)1V_3i2;;1)c+>V2g`|GGS<h z1#h_2lj-8zdi{21t*m=suV}ZHK&NWF)AyNtGK#yh>{-qfEJ*z>cx&?k?F}9L+U$?2 z^&KKEb>9=;U_gMIPao|JX$-N3KIfQev)H~njHH?q-fh0||ORx$^I@7RKSSCi|zllDu zuF;Z6z)HEB^g}0Z&#XIRUQ=VK+GX@M%+hTyy|AHeF->GR; z8;XhCSJ_wc{mNZc-Zn?WXYvJghw4v`cCKvL;xv;vY#Y3S?{ zz~M!k(O>}%BYlCqul%}Pqv!^%r?=#f1Bo7sp?p{O~n%bCI^T31?DY z4tgHZEUI&(B;Q13S7HVE$WiF*j20+%2bH#168ENuvRgnqYeXl{4a8;ZlnP#9ao&Z( zow|yerjD0X7q&k;gf0LNnA4W@+Uw?D)<9<_vjMLW`h(M)UbRDWRXfjJ=6TO65R|!{ ze9n62YonDNC42b$@OD@sHkJtBZxA#Q-WG)MB9Q5>>(b+F zj_3i@4WG(8Dol_jDs`Gs?)yFbJa)RtH9}>TG(c<>1oO`k$3PqP9eKpQrraP?*xTI9 zG+Z~Z*qTfK+LQTD`k9aIQ!Q`Wyi9#1e3$%XNY)wRUsAg?fM zQ3=0J9IR}vW!&F-boD6LHdC*cZUa&G@JJPTqcAIKB(nOg(Wtut49STT-+p zcj&LO%q{7`pUBjWDGy#xPidZZIioh`qVBlGlRYacQM6H|%GU4^B#=}&t6ha?6Zw39 zakKrM5A?3>{2J2WS_ z(51Up`=cSBo5$-bv_T$OI&U971Y3>0AdU$?$#$v7c%VLC{Yd|T0mJ>L`(E(+<Iy2ExLdWbH|e3kuF z$CVB8OsQBhUnGLOT7&qKR3Oij9|!H^TalJ`lH2LpW0Tf4HXhSWDXlJ?ocC|`+#gk6 ztJD17J$#*+(&@vN3|scavVOG>NIxP8=Coe;UHY42ldaG?#2JefD@%h;xA2JD+cUez zmQKH$hlYM|YY-)%$;>XcE%942LW_EL0yomDii^Z}XLR-U(*1c^*)MYiCEF`k*2!JZ z*bV4*bQ|YQCp)u1LH*bASjTOKGsB%L7I$UBJWsN9Cbw%eAgH4~~jR?e!}RMINn_t(yJ$%o)~Ki-}B z82K$XdvNKADsRUSrk1tP+a0&eFF;9FRJqDLhrB83@B1opNxM#Q-f`30u8J`PEp@0ijG8;3W?3+Nh#YSja)|L-KKGZ2gbQ#-;O$8kN{8%4-V?Jxu3YQ11JnoP zUBz*{wrC>bP3?1ey3V@F$uIOyHiPSn#3NIGUIDm{sgVV<)05gebu+bDQ1Y$u8r zVuB2!E%urVW7>g_WrVY%qosX@^@~|s)m0x+vLiR~2S0uDN9N<$uSq}H+-4=W3`fkL z?2{m8{j%9%d{yzf{JD-XW?JVVmlX~EvZ$<PA65=SozdFu6#ij}UA?3A_8X84AM z?~Oj*v|IGz@L1nu#X(H&h^h&!v>4K=0<0@sBe+70#-4Enu20susyyAW(x#pzcsH5{t)v06!b&#yDNdbL6J;L$Vg7buM*9P-oO%Q#)UG=sXpW_R~Od+ zSF9@){7Mt;QtQ7p7Q=|LjDq^Cp5K3_KS{sy&G{=W?@r0UtPomwhY#RXk1i%H&aRpo4KWtQ4q) z^9eh0lKxJ1CX497XsFMj~1-T<_;M~cnhLs>6X ziXZdG;y)2z?lW7*js#0PFh+5A7C3vlQe9cj$qgUtH`g_- znP%Lsn^n5IP@Q-0caLB1e+siU&3aPJW zqJ^C5xL1<^pP(;+XMHz&4%d!W2Wc|g3cVi%E^XW)rg@9S%@0KH3m1UZ!!+p#ers$k zItV`t5uR9?UFIWo7dW{Hr%&CODs`pCKp2sl%hqeo5A=D6o;R?YXgRsA;gBQS(ct_{ z??;dDy(BN?vp{9`ufkXE4l3}&qJKq8#FRuNXB1&7UsYe_5xHD?LF6Nt3B0VEcrJc{ z$lz_}YXs~0lX<)Gjc6yXIWvhuNVzNBdDi(D6kkUhx;UDHO17;ztlGm!RSYQquVh@& z_54S^X~uyv7Q$JIe2$*HGT$bi7BwRkfC)JO+n45 zFSZ0!!&WQ|;-2|fAM9WBAaarW${HZvNYXj9fp*a~^e!62mDFj7`LuDItf#EGbx57j zGNjJmO4t_JX4xKFAJ&Ci*3_P@rcI4ZqfA@CcW%68r|penw`(or9IT+fQZK;~c!=wU zE0lTygzIABKVgQ%B1@CEk{^*mtVmb{T-|d179lM*fM;)a^#S!g)ddA#)<>K#=+A$` z8_0hmSR-m836*x0b_4BjF<(nOL!A(Z&*z$g;=792&p#+A5%d+P_;ZNY5dXh{>;uJn z6gD5o58z&C$)6xy*j%5JYF+fJUx@)VqqDZ(y~q&OX7evj3{z zU*EF+kNxlbswi1wyA$_x6A47xLOo3Sw@=;dZG9DRl! z39+vLx-WedqR$r?mhollp+pd7FnyAu$Q2|?K5{JtCFN`9Am<}sCG~M6f*+H-eua&& z-LS5<&af^67ph;@P}@3NgKam&WUK6t9J?B3I0v`}kOQgF^gL!8dl(4Qm(YXQTs(p( zBocTo-g=;JZGmvzU12lP2~o93E%p|3qBPN7(HLNK^%0E*x6MS@Mu_C1%fd#&GXfvM zL;e_kI56X_JTotkca0a%D;!ndoqY?rnhU{aKMpyLR3ed}npuyYMz4cj<0I%bWI#Ze zh$UbPvB8)J_5t#I#pnf4tCWJ4s2x`Ux;Y`ck;$hc=>+OIsUj0yr1PM&hZA$YZ`j_@ zrXkO<*uisLvCpu#x3{nlv2U~2+9x}z9P=B>8{(a}of6k1*K=1h@+mo*QqbS%BqoY| z$9CfW15KS0>4j_s-C%3<3@XRAV0G9~P&WL;X*?9>Ba4Z<;3qQ`oB`XzIJlZ0B8U_8 z1cGUlKqM&Rr$YRH3x6`dBVWSL{a;f$&eKs#NwdgO<|6J7PB2Agp#`j ztFLOi+&y?~~Iwj>A5N3+ncXd1kF3s?6x zdIdd+ZiboyqX$%kR)G5EEMzh-Kqi2;s6D8ceL)q1KpnDz?xGeR8#wN-($fC>Ssr@*Vx$Vg-m(i>W(P(+Pzpoq+YvUmx4vh&;#D7j^zXNw0d zZ)4EdFrX1BfV}X}aLwPc&)7%oL(pGd0^LY5?AJiA`I#+covaWtC<3`gpjm7Nd8r*h zAvKU23$=C~=nW4;UHK1`fIFbH*MOSvkCJfNUqxO5DDoDAE^jfsTK4z7b@2E1fST_D z=-pm}3hf)zvO-Y#Kw2B@Mo{|z^#OEh>3>yj@1QjQXe*OJHMbu0d6T)ppowV?x=AlM z9{C>?7Duuc(3hG(x2=Zv{1~GaP3>P>ha+>-9f-JL{T8eTPn`t_%VMZ&wjP-A6(uM7#w zTRUvEpgzq5ZQUO&-hI$3o`G`S&8>!Vm;){TP_7S@b~{i|H|JV{g0l&fd?Y-_z^nFL zC$1NiWjq|?RQU8f(9O+<@>&gd#IC<8!5dIg&q1x13EOx08y}!_Owhh|LJ}dHv;>_` z%;s;GjF!cVuJP`(*T^d82G?e4IYY6Rk@0fY!w5c`M-P*Noz zOVK#&6sV+C;hmGQ{V%g9tp5pxjf4dZofXMtlW({zY){}!U z4)f){;A^=z$Z~84(gzKKHsJyH6C6oqp)y>;PDBiF6{c}_T`1Rzura~7KfalB68|#I z*_SM6-iaj8YRA)u$N@wN@&FG(7Bk7Lks6L2Wuwr$ILDr(=F$SBJ@=S@hv|Ui5GD4- zh?eF-ZsudU4dKStF=2QvGKgt}p5;u;8#IgEiPmxlC_eVcmBc%b4`gqnkI1*gFWO9t z`66x#>y9bu21L%uNckbCqP!WR@~?GYh+5>(c9Is$uvj$~$lgF`#+JKF^h z!5*Mb&_&Ebsyk~!BEk8v0rO?gFg(zDzeFBkx7jNkhpnWJvuVg`sLx^OKODpKph44% z1u!XmDY}##f%gEOU0=L{%fi!DFbc~9@5^ztku_po zu%5jIVLn}UyKwczn$Wj0`UxM@I(kuXX4DbUg7gD27s zb_;O^8PBNjXlM@`p<^&U_m+;qPlM)?Psl)F{up&*525dvv&>a=Fq@6)L9u@m*3YZi z&ES*6(o;~#MPnbM16X&+TUrU(vuB{4vm>d@O~_9%!pwRI-4gWPec0AW61xO+^lsqR zNrD?=43f;4nKV#p=CCQC1C4|d7{+`2WnO1YN>#uK9>JC^?%D z?UA$GHu?c6<4mCAor(+r?Yf4mCzmrG*lKhnr(l;dOX*M0N@rkk=oPYtUPHIxR-ixG zP|!$=pv7(sxmI5xf1)ds$-1M%;7mV)mU060y@^N$;*Mr>pV$~Si|GuldID0*J%xVN zMW-{s`k1$HTz$AeuN)Y-D`j8ySfsl7_1JSdg?0FdRUtpWVk-p^m!SfPoAFS650v`_@ z)N?QHAhg1(SQmxDeBc4InmY{{9|O62>@qmNQ=kLg5AL`xT)SNV$I@BAMX|nbe`33f zWrGG4vAgxy-QDf6yA`|Z*sVVlyRo|q6#)t9hTUbmYvR52{X3ss7J=QFXP%j-uIqbU zAsa#4w+A&s9t;P$MD-ybgFB`d_-n?2vx^`jsp-&v*bgdh8K~uph(p1p!EMmLX-oW% z3Z;&c_34AqN@h~|K(?s?jptzKm4<>FvJJT$C=-pSx_|ft$Y{`vwjd7>%V4ZBlUfMQ zo(5zsxd_@(1D!*CA=U<-1T-WQS^zcO9BRQE>M)f_DCl0G?kWy842DxcD@7Ne+dw_q zm}&^E+ivO>(SmqD1i;yJk6HvHq%pwdaZ{@x5!eIm%zW_CG$z4kK|P~)z@2L=(5KhK zJ*|k!CF6l%^BTT0gO^|dv_ok?9{NgZD1z8RRKT2V4QRy2!BuvM%7A{vT<~HGf%3DJ z$OkS;7O|Xc3<_5P^q($)+II&in*V|NB&BwOXK4hvhD@Q7$kWssXtS!RCj=9Ef(wDR z839M1PheCc_$9xQSkVGZ(s)P1@&?E6K~E1!Ym>`i?k`v8w(XXi{(Bh|p zg18fS4op-dI8URgU*Hz{No9gU?mhG{+CeXmM(Uvh;aYtTy|RW#5U$&a(4K{$n_&I= zAoMRQ&`4B^{sU*R3|uhB5jWBl`q~<77W6w)K>IubJ&HU)21DCEnu>UmyQPe{!HzuqQaU*z^tL8zux& z<4cZ^H;z9=ut2z0M2gyis%y9Sy|})lpF|`%Cw?k=D$Eu<<)7hA1J=<{7}>ueol7MnKKwlx&#Hr1@CI#jW;ta<6l(l=!bE3MW4ftKd){vdbO`++k z3+f~4^QzIx82M1~Y`&9y7zpBx>5-5u{S~h8bwN+y3ovk5-qY@Nu2#-7jv)@N^SEn{ zC&`x)K*@3{AK8lq@gd9@)@Rltc5_a5?kZji|EOSs&@Y@OItXieM}*x3Ufv;YT@J-6 zVcuqZ$1!{yFj$(PBaz|seCi?@B>E9g!KW7lUmh740knt3gq;`#civdIN>BWCzHNdl zHWxJE4dEWBpdUedwuih(EDpLsqjKEy*!9G*+2%FRHD132AEHn+MCx|PdjJ(yC6H*8%0YLr&Sv5Ywc%E9d(D$wIKr( z9EDkNQQ26%Ln{ei9X>NmquHd4m01L**;&!sULrq1uwP&j3>H=kABdKV`I6(3 z>yo*Wd~ua%r0_mJk6Xx&U>#<3h5QsQsz9PaRk9Uy^sZp5;Fdt$KorcP-UK?tvY;ur zh{%TiS`z6X=TT8Gf>{RF>SmZ7%Y$t-8ir0oX26}lA^8Fr(YJsWH_8>XceTct6oyUJ z^U5a`$LC+m3D3^YKAfAK-=O4ng-18v+}+;Ync*DdC<4B~X46^I1Z%WwS>QgF&EKj} zht)=(j6>>ti^ZeAg*Vn3R98dqhi0qpYnq3L19_@t#AvNFv_d*dFp_l^)~CLEmN`e; zz1C3cY0ER`;!-7QdG---# zvwWTW4CG|=mS(~_5Jz@bHcGxz{zLXtIz*xsP2(@*gfkmpIMRu75UDVt*yX?B^LwXz z-JbtELp@tP9f9My!(S5k65I)^2T#e>R4;lovKXz#RxmoS-m(igo4I?rUAZ?nU)axB z|1mo;IN-ZqiF}9t(t*G+Z=`FLZH$SdudQ5Gswl*M8-CqM@A>oDPeJDW>~Z-AOa50? zQ(I;AIhuj)^PBApB)b?)AFUo&0`Y`-SmM=ei)+;2M&px>mp7EeXT``P%GI4i8z^5W zSF4h=!y){+F{Jk^1jAdYEq-&gw+EBf1v)Ci;_B0zboo1TM)88bf|iS=8AfkiWyp=pk+?!QAqrWl5u4h zq#q?rQ1Ia96v(xWBsBc#^##{=C37XnngP z4D1#j#XQBzWXEwQ^0xE!f=@8R{Zj}M5`m2Wk=vAWht-8yiqFKpAZgSLVve73@3o&d z*E6J7rIej3`jBVOPRSgeW>4+%Wy_aN-#Vvv$eC5Npvr0b>gpPJN<^N#`QLwE z%Ndh3He*N{k$UE9&u{$nsN5kXjda^=PXar!6!tgHSynohNIeYP_tzt)V(WEngV5>td2OqU&Y7@#36|xJ6?tDf(JDZmx>Lc3}603TrZlQUmR;rRiJhCT}+mQ880%z!3|Sr?`XSVX>C4i+);Z&KUdeO`c>u8a!y%D$@`)fg;VmM|2~ql zJ8Nm?g^WkPmgkKr8)BH?_!)@AVX;G?5VYjrz(<`6I^{#u2G(=wzu^_}gPYg3>DRV* zi+%}jNLznbW&4c4yOgo4y;9@ zu`1u)OO;m@5?QhMjIe{C1%DrJ2=^L$GP4vrg6PP7!NL9n zFYcb~jB$*%*S9-twC#&+fL(1LZa-pMYjv3~8s{0h)f871mcK4bE_q%&qUdnp=6vPv z`8laMt^!U4vv#?Cq(2V1z*+z+t+n`AnjZ}FRl$1S0nP-)*(f?8yhTBq1+CjQSy`u* zX1OGi+l7fR4?(c}KFLkl8`)sV8E$X7K4ew$O4sD)=gNO?D(F%=x~ivPleyZK=Q!)Q zXlFUzI+uGd1cCd`NZ@aj<|+%cKchayh1OdYe>=Ws{P241;vPnPQT3L860P75;@a7j z>^JP=tPczpRzg1a9=8=3x>fJ1R8?tf@(kZBEnRPY!-!_!lNVAuVOBMf)FW$|k9lR{ z62)bmmUM*psPGPdBKIVVz}WQ4pvW(QRia+*v#t}) zn~rk(LeM(u?OJCKSB87McbspEPwt)V{^3Zo?lVQyT6Cjox>j2&PgeXaTVBE_T=83) zwKDT$cFV$}m1|7RTs49ASR#|c7t-_ZQ`V*xQkvHOE4VWUnMG#a5va zQZ>dgCQ=;VGKa(TRfkvQRv4<*8tPdB_EeYCv(mf5JIDLb`@$C&{6bA)?BK7HF;)Gw zv%}hl8MI?H9+fL}cj&3mC8{OrRCR^wALRq-A%1t(M069`FVNTb#xu$@+H(Tt{ogpF z9bc@qmN6Est+}I*Tk88BkdQ;DwbU^3NifZK%cFB9JI>qG*1aZS?aUfO#if#w1?O@b z{K`yw_ifh?P1c!`9r~g6ncg47Pna!G2JFr=*3Fg$wmshS%(|iH>Ls>l(>|xej`k;8 zbf_1i70FfrZK|a2=u92bniqIQLSSEoSR zx=M14CuR5vsk@G4mI2Y}s#jEx1uo}J(+cYXTcmBdwV(Ba^_Sh?&JCW!Xil1FxWcU3 zrcKvw&}h|9RS(s3w3EUIM&6G47!@8V2-_UWl|A7b83*X~#6Q7af#Lqo-b?PF^QB!6 z333dR+xWuN#=6__+w*VW5b+u8`3Nu+-ov`cbIgZr!Wux1({94*?d5!IKA?}Qx>m+4 zvghUHJjpzsKKJ|P)adWWvo9C#(#^EagT%ryko&&L`Ox;p`rYx>FJQ5iE$WB2VRjC5 zj_bs3bGPBa$U`BGr0b*+A^kKHqT0rdiLt5ufyjQ;nKW;S!L-tsXsqG{+ROP-`t%QW6d0AI`k>d1eu;QPKEuRQ|Z%Vr=-{G z9BQfPr0?SBbiSRVX>HsHb(Z|REJOZQc~P4dF)89r=nKI)n(tg+bF1J(#=0LD)6Qky z$sJJGwCr|eUiFTeVKq-{ZtEHvNTc0$*B`;`A?d9~qi@xl(%|2QXA=Hb??s)laYZqv z=vOhD;*4?pm_A{)kVx@9HcdVEFl}3msJ=zbSm6JJ)GVxiQ#HC8)m_vdtt~L_xA+`Y zzV}oK1627Ey<)O@ci7#C$B~aC$3>2ctc*Ms)j#@H)W?W1VdbhVpk3V0AIajQ_lO_9 z>7MPbn~qbqI@XWo81sG8LsP8zo~5Pjp8bLg_jLf;;c?J=4`-j?wiS#P7K-|cn~3KN zhw>V-kK(a(g8#O2k_D@MSoNZ8MB$p;CRyrq-S@*^SAP2TL7TcSdrw(kb3M;4zYyk` z%7S`-NB=saCDNN48#1ZR?G_ashID3j%xNQTcBAfKtxsN8QYxA(?l1izmxC)PlJz8T z)hejYDR5?X`7!0YAYGY#DF1colj`fWdn`+AgY5?#)y{M`f#!? zVX|q9Rq0smT;w+RzLI0`uG~t|J4Ij3$A}ZrM9lQq-Z7J+{udDyJ|^t9cAaLR>NK$L zB;rDzhII)YL5BH#u6V~Z8)98%8DUXa0+wmETlONL?tgOc@(%LLi6%6Ku~`c^JNWa3 z>%=1^e8_5u6-$Lfc(<7}I)U_gmpUGq>g$=6<4aZ*l;zz2H8CUghxJ?6uirimNHu2N zDw}N<2d+{CiU9ZXDOLpua;5C`!XY6Gqn0)t*Wz634{Z`#J#Bh8{&u)TQ7bsZcH{rx z8H|_AJ9sJ8(s#{PtCN>J%xRGEAnpCnXTJ{S4JntJ2#xj=+KMwJz7f)8fw1~p<& z*=&tH`c`~eqt{KAG~SUAQKv!FS55nnlaT2Z!Jo}>FczU3sbRtT-j0qOQ@*ZY<^Iy^ z#XUa3!vtu)=L|MA_Pzn(LaD_b=QFUXvn-4d_zTr($I73FlmLGkdLg zro3Covk*#QkXOhjD?e+#L@tV(9)CWeYr>j(rkJbYTGb9|XWngm2KCw>>mF=>VSQ&^ zYkg>8nn%<|=|ok&@{01vs#@J3^DpP(;33Syh6HzU0_c-|hmKKi*UZ!o4{I7eK4Ns_ zxyU0C)5C1)Tgp1J9fAq06=*3L9h~fM?;GL0=$Q_he75_CGt05gao%y*8S0+k-5e+( zpQ2wNzb;1jpLmtzKgj}czm5DqKf#;5d(jI4rXwQ0VOoPe||q{+O~WU3me7pf$x){r*Z zKs_yDQ%vW$SMlG#buuC*E%Y+K9V+&&v$U`E8O*iwYm@Xb`uX}sI%8#Sd3D)=(!C|B z(p%-jYEFRK=^VW=;0Vu|5@3nGQ7G}*RED%x@B5wT4#(gt~OOzoc7c1ZGr!g`s}TOE7JVX{oyU5 zpT@3@i;g`IB@QoDw^k)9dxV^mXGnG8(ZYlRd-tax;^U!3?$tX=+WX%6GIZ}%4rJlH)#z@`0s(KY8N=_AD zDNC!`qibehn68+on(*2OdV`^ddA`Hv*D_Vox9UrgkK>NVSJ%HCKeO(}xUviK-78|tQcBO4y2^G|UDdBNuXo6O!^mo69p=NW z%o5IWeoN6&*-Di?{C)J9xXyJ4)*TmzL`%Yj>eKR-!c_KQyc`I<9C8m3xf%x6`FeUf zxUSnjLmuXQNRSmf#<@0oKL?peFNT@z;faMmM6<=S#i`=4k}i@dFjE#G5lcFXPm2xG|NfR!o^|6_^DNmfWtKMQXineX z=L!S`Z;G}VI|lw^y_JWB=BdJ>&c&3*?u^Tb-5zdJ?v*tZ$BDa$%cUAs!?3oRUW!M; z0UQ-BBC=htEi?3Y%E&xlZh8Kol3NvRb?eRl+S!h+u+Lk3?a8r7e|!OZ1%H8Pq^wvq zKgv+2dwl2mPwF>_-yW-tNK#8edQ0>?33$aWQFI{5*AsYlFI*|MrRGP*3x?#H2Gx(M z<8*TjWhMsr4cJseybrgt#1w+5Pib7*w&7dCSB9Mh7PuZ50>f2(Lp92&ifz(HqNV(1 zu!kyYL;$A3olJ+^&aiK{YaHjD6FuL3uLGBfX_S+$L`C=za3CJ#jO6+G<3vT`_Y#S8 zJZuf6;o|v%J-h+z9L5{;2WV>N2kv?Vu5#NwGh+Bum0iXwew9BUw{8|IvwOy-3~ok) z%qdx~f0?p;*>$sd`Nwr%+?_C%I7i-1RT9}cZg)LL{rq?;dYPOtXrStvoc2A zJX9dxEp9J(%;}Bi5)Is?wTnyZ<#x&1nSJy3KZV=N>gvjk8!faw+IyGG0y_9IW;|pn z&E?z@WXLyYtx<7tFY1cxMc*@9Yk7o^>p+ueZ)LKQ<+r zt089^HT5!@4FUa5LmN{j_?x~2%dz#m&eFq5m!@fCuh_~se%yoTK*U{bd(|^Vyi6;( z1$6wY+-sc2?5|8eb`n_#^So2M`<-iTqb#$`v&|Wn?)JOR_8xAaJJlMy!K}-1bCdYj zfZ`kEGX?kfWBA+npZF?)P>{=?%Qx~oTn*;~lZz+NPXiGiwPUIEB@kP0Rjn<*S>i7& z%wLwr{QWuSQO<{7n|^VBC8c-F*q7Nm^GQ}k*(*!XFJtW$HkEe^cgHoUf1<(J2F>bh z3GWgbCeM*1izK2->0(6(`FDv`Tq*ekq>DzJGw3m|)wr;1d;W@CP0pPxW%jDP*~J;< z6}myTc7Zf(3%h{J=0D^|2v!S2CA;KU$exfMq5ZUvBc-vw>X_=e>n*J_FlwE8f=tZ| zqT2(5TyAq4{iaGuMZcyjn;rP)_AM*nP?cKmz(_a5JGd|&_LOxo_B`FXc%{%3Utim-{?Z&JUe zS@isRvlF()KaQyhAEOnh`zfsoopN62iI5aoH|Z4FF}X=PRrHa26#dT=Fyd8LOTOj* z&NgMFWjy?~KRYu2Rax`eS584N3rS;Ea*qoaNn=7rhb{^&3b`M0BXpxCD*RYvqnOLF zg18Z}JE3;<)kqXk!VuOTa;Qsb8eDU(GFaZBYLGfFa5Z*mH5PLE%Gj?Lkr^tz6m8$Owqgc$}#~O|`gtgWWfx-SY zKC5?!m-Oh|vF=pYV^@;P>741D?972wwyxeQ{<{RQzAynx%_aFyAr2v1@<76ubjB%gmqPVQTsF|BYsuF!MfVmRZ;gNUTd4E8iXoBH-?ZhT-H|6Hgu6{Xy_9~ zp|}xeF3s^etW)&t(x*8y)8G9Fey4v}Gss+930tSM4)>m<`>|Vyx+^f1Q{{&=*?ys| zRAnlgYKXe8=7{!ccyv@h;Iw8(Il}+Zd{ZAa^rY$!5W!tZKFA@JBc8tXw&c4m{@UDZJzrSRFyrc4~>Vjr%*uDrd(iRmK-8u5UmZM@Q zo{C5CSF@7Pe&i8<68J9nI_BH!*(|+O`YvG)gm&fVh&Qo4>I{f|7V{%!QuLOH$J*T*xq7OS ztB}d>D%L8c$|Z_gX_4RyYXD;Ot6k5HDHU{nhio!Knf~o(v&@A#gA2!3d@)EJQv-*v zv%DwLHp+OFP~Agg(_RVN8n#i3YMq*a+KXY`BL0o6iP#>Fhe8F1%x0{@PgNZ!)0RV6QZfF(DBn8sqb02 zvgAmAQ0oMeQqJ8*bYt5=Pd0@loXnZMQI=))YQ1(iuUA zP1pX?v{sJ|{S?9qU7(4yLZp}sc9N|*&ed$d(8{YqY=zg_GOKFFO-PIn|2-W>Uece5$f+y2Di0`wZ)o4p$z$SfslEIit!C~ z>@+T@!b>BHG=+}}#}|(+J5qJc(9+rivI$(&Mh3yT0q?t9!~rj2n&gbMpNx<_k-t;? z6S`QlGVD%xQbc&9-!Rs=&CIf$ zcI3N8dVl((h;fvUW}$;1KkXf(C95wxp4*Y%Rd`E$TdI>EQjP}QK)fbNZB=PPDMe4& z6_K6yg7puqjK>CJed+GQPK&*@y~w7u=h{y=HUPcL=Q`;g>ACB<=UD*i@DuL2?lRXo zSC(s^>x7HxHoKa*?XGy&bLV)+1Y2)QUE@7{Sk3<`3}rvdYRls)}vH#@>1nKdOIWXAGzuJ4wGaobo z6`tPSJKj`ZqF)#|0LR;xD-F!#3IaW(cW1I^qy zgo}4)q%cjahMZ5F`P?~N9*^L*;Dvz4GEZD2Ns&&Lt(3bI66Kqa3dLNxMp`YJD==~2 zvnuc~v@7*7*eGxcsMGC%Lj5O4{VKr#?e1pC6zU1e+t)}QB0yV$*8D2)8_O{g4>IP$ z*rOG18h@$40r?P=u%jqlxE~Ul>Tp{EQ#_I}5W9xVr$>V#L`izVo3R2C)53vTmJ53K z`P3>P6^@}Y$$YXAU-izai_Tcb-s1%u-CD_wI$dt zS)*V)dcr)<>^7FyHmg0SkJV8%`WjUK%aByt+LUh=+A1B-+;2QA|IEO6pqqaq29UeR zmY|yo0j6;S>?qD*t>!G|)fdc!G_nD*H*#8jMUkfXC0_-Zh{r@xLPYSJ`+$wJxXffm zHW1+@ur7EKMm(KZMo6SO#=OS(h)=}|VSksxTHsx35Zrh8pm*N}xpDO{Enb7KVSHiS zfyAg4j0}7>J`}^zcE~zVH?{!}R35OQ4+B%N0q_>Dfa_2Tx>E|s zCR+$Px)SgUO$L>JG7!<0f?jJJb`EO^$rI!7Gx#=qCq4?&x7*@<@CEp1{5B|@48R5N z$2bk@rX*l67BH?bIx=XWPoBV=;lSX;ZlN2|P_!??K#ZV=FQXbzlSzzxM5u`K;BQzT zsPqr=U-Kz^Dc(um-=3bHX&%&b)LrSm;GW^`=1z2NcjdYsIE$PH=V|9dXIF4(opQc_ z*DYN?T*>a{p8KA3?|;7K{_TOA!7Id8Q04K!?Q|0vi*~|H*fo45gTs8rJk1)%Zp?|~ zUgZwvJ?F*qkMVEvPx8<6llepW2Ht623*J}mRBjn(9!CV(9DCS@*kjmD*%Ed+>kMl; zE1b25ISD*hgBTU~5d1#Y1Zqz`V04~<5lkhx8nyzFwL5qj?tr6)1Ul$4QcGSUmJ+jx z1R{#C1RH^0BcIShLi=}cw`5WEKv^G&Yy|yj7j!LnTWGWob_n|n8sQH3Cj2p|##%7u zFm^IdFs{Sbb_@yQAwC~c50_vl&_R{xLD2sn`s=C#y3H{gd7nK@eVQ} zk>KmVT;Sx}{8u68XqvyTf26;@pW%1-Ui(h_rUD0^=_~S{_AZA!(5~Lz-cH_b-WlF? z-t%6{yUs`XlKedbR3Ir>7bxnRK$i&=e!4T_LXLm}-Uuvs1$eI-G4C>EtVOIMRyXzu zb{<>IY0c@w8P1smJovU8lAX>z#vaY)u&=SEv7%Vn%q`4l=2ONJMl>TEYEn2}iEYJN zV>a|8ItUbSPmm?xg0KQF_y&~cMBq%!SnEMZgfa2|lPy$W*KWwW<#C7P8P!;CeX$yxm@) zSj<4qAu|vqk^yY&2B7p@Kw*&L(182^Nu(v9W#tEN1cnB%zzzQ-zs&y&Sot%2O@X1$ zgb|j_YxE+%X7E4fAnl=vf2-f*9~O8RXc)X2Y)afC8iK=TD(v%GaCjU-L_of!(b-rI z)&k#;SKwV4dl>JbUbbh>hEh4gOo9#OkeSCJBV`$Qx;g^w17-eVTms!-FNPFI!smc2 zUl%XI&O!o?0Q&&fR7>#dB!TlL6v+bj%M4J$`Qhr_398CCIF3>h2#s*fAAqa=Jg7t; zfpWP3TyG*O9%%XN!DCZR#en1E5_mb91AF}?;zQa%x!ghxz|QT4&BykGj`1CqiTwu7 z>Sqib7FZ%S0c(JH!DF}{Z3D{qE67ODEWQVCO*2{#s=P_yn#vuy}YhhfI*3K;)kRKRHc2g=>N!>VQ9UGwfeAsfCQWZQ%dWfi847_+?VS zi^Bx>%TS>A9t5XW8e|cmpe=0yXKySz8_sYdx)~lDz{9i&T9%dQ9CQYJ?T(H?$3ywl zfqKvk6~RUV>(>T!;rGaMaKV<61K=}Pb%f1Zy+4G=?z5p89Q{d@20bZLwk3W?4W8f@04gY%%l&sgl zhjSCWL-*l0Qb6Z=6UfyM;JbU^0(uPFBhXd9{_EIz58Ub>ps#%ik3ZDyl)nz1KNR-^ zz_s56W{qygS4xE6y3kxC~ z>>!-QJ8;g@;JA$7-{OLMt1h^+I>UZVhT}+tJv{`O2g$&~egd^E4es!nNG|k!3L)pB z1Sy5B1pJJdz|;K*xfhQ?o1YAKp~J`?_}k_14wJ#T*AKi^Ex=Kz`RgR4z;)+aOe91W&H(wi0R{l z-#sXSOK=UGfjWN#YRUoF4nfU7_V+3{19d+MYQq(%`FG%aJcj!40si(U)Phn-a4^Dq zA_$7G5dnP0pM6rpUd2E=+8DAUTEJfQ0O#O9@E`Vs^V}cGVi06O3_*q?-Qjgl_*q|g z_2<#^Z$?aOIFdj6T_3g>*!Ku{$3NHbpDh%QS`M{D0AHEV|6su9{z+gU;mm=p5h0*H zlJJW9`#lc(#DViE1J_0*q|k)JBNkkE^}uu23_hhf5RhBK*AB3Cg-`#ppMUnDGq|Bz z!lTXK5^M&4@n=u8uqola_;3a=_@n^1G9A!P>A)9S1huCK$|D;}^$V2ETe!bG{d?s< zfYQAS*V>=lhns)zG?)M0NB;lS_2;^K3BUdEFH=7kk_-M^RTg+>KU`NlI8rqd4Odqq zD94t6&u%X$y;0Cc%z&CTAKKStkh8G{e0yu)YFiIqSHWW?Y>VON^N?Bady_zeJ`}cL z@E8o`+yjob4ZI(Cap0)bP&R++h5$+mh5A9mCwt(G+Mv$a;pkx40Y|9&`^p4+W`v(u z;5*a*)kzoBN)MDp0Gt5?lspa_0_R5XHzh*|M z4$eswTyu5cYaKW@^`SO5ghyl8$ENW3bC%lt-8v!vKpAv}tq;_rfl!BrA!A^h0NWJs zF-(V}{o_}i0k5Y1{eBu;eG{PsCjNb%0>3pGeliV`Y34xb&4m)0hb)4&Xc277|CZ%) zDAVPz{VB&a@Vp%EXDi?*OOXZeyYrz`r~SRlO^4?{pF0uW@z48@hv)IIFXQ0sjDxy8 z5blOOpu9(cUuX!Nv(fN>Bj6_!;9VBOK8%H9Uq{Q(X+ZycA6y(*icMfHM&iikF&mG{7gK667Sbh$FzE{4aR6+9Pt%rH*6tK{tcTPMd7)PfwQ^~;;7bq94 z8N*2(_+}EY&rCM)KeU?4^M9NDN*=Ug8n-OJW5sq8;clthqmmm5a-K zpCN-{J#`6dPpOdvVBC(OjqEp6Cn^y?LnN_>5m)?2*!`H75Rvyjewy_S^&_7>a#SL2 z6g=!)$ZpIiMpOI>Pe)-E$KYM)9ZhfM{mV^qj`P=HpJC5ow>B;ab`3eoYT@!Yw(xVf zdGtlU$oHPNj*kbsxYRhCFTl4F{{;|8wYx-|2~1{IVG>B@OCzVSi@l@KMBe}07imA| zH~NBz^!n+1-azmJ$$YtJC;mEefM4U;&Ko5>?bSlILk43dvwg5fa6D@sokYE2fA+o$ zo&$mr1`f&NXb(>U_L#c~S>d(P9Ck}aUxMk|fbU}eKqvaT2U0{~I2t(Xg&s-B$zWUi zPsT0I6=tpTmOov1p1an2)X|rNa>rA{+_7{rYbM9#_xf5h<4|uf6d%W$2U(cw=+^8I z##B=5WwMf=?3sBV-Y>k~`Den4HF-i50w3G|lEh9W&$(63Q!gLtC^MVPqdb) zqbA{R0&d0zJl#Er>4)Tr4)i-T85a`_fl6b5BdmMM0?Rkfp7FqMk=+On1~n{ZRv)Ask9DzgKQyg6XzH(uJd1^I4lNU?Z1sA zqD*2GdKU7O>Y?FCA#fh6(HYoIf=A7VYmy10hz00wY6Q3>&B!yvguDfBeSPEu{RZnu z^T}$e9ynUWs4-wCTjBGNo>VV16KM^8)~!Szau(>dOVQ!P1hgE}AY&m_GX`x!@*yAP z3$+>etmlxaXa#umE5XgFM$VGg{kPGLj9K7w=8zS*j z-~h@5KHP!iS=tPvvCv?BvJ|~Z4+=h{5%4#&i9E6`jAqr4uy&fxMQ)Jakpy%=@G*H1 z=HYTkF=>U=lnm?*jERQ9J%>jxB%dO8@o7Npmy9unTes-9WV= z5PAXn0Q%CuNEdkx+NJ_1wdd$}j8AtY_R-VO1K=fGiKb$8klDmaBpcHKIVKs2!K#oU z)N%3_G9DNd71SL12y%xSOxGnpLw1&hRMJDqjZ_1KN6#h;f{n=u<gvJAmrwIPgrCA{Vhmv=((?A<#yB zg4Svyj6c6Y|D*$2gxlWR{p7{)5_k>ylLWB>8@~&G0&gpD zuv-Y^f)%{c{Bm9@{|Vo~9nKYUdT}muMBFKy@2oql7OWAhHQ>m&i#GyBlZ5dSZ-9?R z_aaW(iu6JkgFiM8+@n2^=5#)FhBS}^$S5L%muxOt zI^bt}H}BHG8gMFQ1bzmeLLJS(T}&?P0kHaK@EgJ0om`%VYO>$JY zdimM}!UD};?ye3!6uXJbfsZqseSq_pBjvGy7&rjbQ>!KK#AcCF%#}=+9+XMtDj6>A zBv~R}EFLH3iYJP8i-e*W;IG#3Zt^UU>D!mvp0fpJMDMe@u|5H*TZX5jgMs9J5?z9^ zVV>nUat?R~449Wc2xA!^jI|v=95RtSdMC{1v;iJXETlbh&{Z(=G8KJ>zQ>+Iu5Thf z1y|xnv0mVtZG~^e-{WoY_1G(fPhX;XQuoNSkjnQ6ypA{hn7fhlsPmouxc$9jn6;~U zk$JnRgJF;UreV5HZa4)w-LI>|bqi|t)D#-d>czFBv5`q;Id73#6Yb|+SKVda8v$it zb&vtq?HnwQHIKEH*@^QKRt9u@k7$f&mN;CRAlWT?CYdBXE&VQ!l8=!|q}L@yl8cgf z=|#y=iCMf?JV4A7`uP&UW&RGnj-SuNx%W9f_8V3?a}09{(2lpjV=wj|9fLfFImsa~ zo>@fa!udew z%M4HScWNC*TwkkutaH}b^&1R_Yd08^&7&>PEemZy>s*J%9pRe*^mPTL^p8!c7plAZ^85AbRZo_VQ%Ifj2jbVWId)EE?Z| z&A=Bh!twg}a?B1yv&S&!H3;U3_mWov2Dq~fa4}sGZjrOSjc03P%`z2OZW#C046jYC zJ!{~Y+=egJWA*(FmvoP6lZ?lVy)B8hdbVCRj-$kp;y&viK=6r|>TErw4a1N5E;uZo!IVE@k-e>lmyw!#MU39SFGzAvGtAT-}z@%U~kQ&C3A&}O&0kSB2!Tee;m?zVrUw|xp6^LIc*m>+VUI(8B zYctm{9$pFap6#HfG^P#{bIIGm>prWehkLE_U&nTPJA1nAjJ<=RDq zgk2#!c$#Q}c#Pze^sRI)C>AEjM#`GWK1hotZ>1HoVY11xQVA|OEWRS{Def&AC3+%U zD?BQg%P-)*hW|i0_%dA1=*xJ)h+{rsgs^5YOMnm; z$2`EuVBBC-psz6_%qJY79uiXOp?_~+qJOXRl6xZPoF3U0+W)hSvW~S5H=Ndw2PR|( z(*xtxS{>9Cogv<^#n{X2G_SVJbhNbx>|2~GTx)!rf~|md(Ft+}U!n1g)y#ZmXYM0j zHGinEhoDCAQrJ}7P*PvoL$*rllr5F1Wn$Sd=_%0qP64Uw0nAOUh3vVTuyXbmRI-wQ-G3yoA}~5o z?Q7&iV0FUoUm3U@5CS2B2)-pO#Bef;+63eGdQdLkU=6P^-kf28_6K1#VJ>Exn5~#3 zqdu%UtYf|g!qFm(ggGw*-JQ%K8xjUzLw^Ie%=OcC*S5zt#yQ45#oF7_*q}7*t?g~3 zP3?_|wY_RJdfG6=7*qSiLYj}+ZaWyZlg>0pEB99akRTiSeA{W51;q;R<*=$TmYc%s z#os8bH*$A#)^iSXu0odEGsY|2hOR^3!%QU&ecgN1C~`LtNV)~a24+HU;5RS|Uishn z9sVkRS>Q9QW7&x9WHFfyD{rM#1gsD}Lfhh1_!ywaooCiE;vw55jcH`5I9Y5DYZfbu z*#i>gx-u8w0~yoc&UJ|XMC=Hj46uW2|3XN7s1Nnz z;F>~=r*&b*CD0~+Fs`p1ZN6jOVtZk)v8Ov$yJ$~HKu1JD-?$6<4El>bnKxJ?IIp?& zg^z_1qAj99q7ZSkWRdi;Y`h$iZvbz*Q!17Xl{%$}{EU2#{I+x^=$xC2pF{f`1wEKT zp-ecD@8xde)aM|a3+w>U6cQLRtOYsqM*Ol7TNp9Ml;C}$JU2d4v% zvN1b_)sUUfbTa&51=cDnZb>wT7`|02 zbjyJ+ddhgmaK_-!P1LX@fnqB~{%S=cZyhHqg^s^QLr;qm9eP#Z^ z!5M#7Okes9P=gv0Wkey22$WD;5@7YniI(68fB+(A%h)a1GENNST{q)Q<8I*Aa2N4D zaLnx8tp8YbSQf~~UW?zsf1%%iQPqZ^V9j+`Ai=xNeakt(I@dy(4jbER>(?azmNpw7eyVQ)p}qum6qO>&y`P-%@n7C z`+U0~Uy#oqB-qRs@k==$Sk0L0@TGV>eg~V4#iOD060#HIH?0V?gtg<;Kr+;uF#l{= z6@KU~_ici^%>bCiIUKwO+Lg9Ili7iEgVn(gcp)CiC}e0^oj4}W1D=ZimbZz|g;jNw zAK*6?ycfI>#PDNziJXy;Oj^QP%&G=;Kpu)BZ-H2o9_Z@-#~1BQbjR6G+t-^$8{e8f z>PPDb88(;#x?6?=)pJX_SKY1H1pI>oa92jFH-Xdgx?XB7vpjdbbxC{-PY3r!?+*VM zonxs~@9amVOr9pV~((1XpI5uiSeG z64fX8y9K_&-uLh(dRlwe`KtrtU_}$?9>8N<2W!^-(S|_z7|J@xzQBIQ9tNrMV!l8? z3!_BGL>omBqEzTDafGh~t%Zk#nf#u-rJMp*Kh|(YHQp6FfUKl3a&K^*Z;ofNYnxMP zKW5!!sW9Bo57PaplvM7}Q#J0Yn>9O1PZVX9G$~(QzOZ_s;d+&`W^|3bsSp;~cnX3GX_U$1e&-5Ao%nTxQ39htAn@T; zpiyN*&1vP@Vq0S!Zkby9&@fy#q-tVirwT*K@{;uxj*7DK*A>HyXXQ^X9$ivhy0%(h zv#chld#$@!9j`AicCvnVCj_L#31UU?TW}NA9pA!{3TH`1$hOK-WNjsNrJRs}GEKWT z{Bqan~tz2ha7HDs^ig7gV!(v`rr*)3epKgC(iCK=IH@r_hfjU<0f5K!7UaB^nF2a2K0k*SPMTwU8awrT*!VI z$WG-<<^BS-*=oTF(Q?WEE9xu2qe!~0+hr{72@qU^yR*0x++i2D1(s!TS)9e)-Q8_* zcUYVN3B-tv`%F*we>(Zz=VzW7IxW>Dw{G34y7!#Lp>IR=VO7FvgslUvM5pkU;iV#; zhTjeQ8uB$LC};<)&86Cfnnc0O{6{nJ5t;!6zFaTmTH#n`9cZ3p0KNbHQ=dDu6$+!1dIncMf#`FT(b(E!iGt zt7F>(Kg;@#JZE?K1vYax^c?UyeQv3(d|0V~+4FFA6Yt`$iDR|%_0xm?3V9J`2yYtE zDzbIt&d6#}iP1}=d&g>H7exOPWr=Ww{}*;KtZ(SZklw)>Lo00$VIjMM`btpB)qvYS z(`$DAWh-tr7gWg)&8wGFK07z-^N&GkSH7)JIrnME=l37Rd^nb}?Q7cC-roXWHhie@ z`Bcj6uN8kJW_fa|=cX6z%RidG)b!S}*7emthT6vN=DrB4gmQ@YSsk)6Vn%eyga-+k z34Idg#lMfQUhG@(rHIqYC_X*G6IVHQPV}!)k0YCgj|}w$pVSZ3T;RvD^%))0g^E>j zrD;B!Yl&;MGt1uEVY3(5I%3xQ$=u%b*xbQf#xl^h*JiLyvRfQU&f@OI-iO|blBiTC zMw6rHF6<}ngxFrUSN}GM3!NS637Z|UJJJy~HKsw_w7C9=-M<>UGxnF*3$Yzz%fx!4 z>qHHX+!*#fL=$usT#K^k5~i_NsNa-U(n{|LcX?O5eUbH!DKmdy?t<*MnPYxD`7!mo z|69$}BPm&*l0VLV-|k)Z+ee?wpL4#hO{@E5-k0iM?tLlvRwKPkc9)z8Q!jIp<(6%{ z{k_xZ-K|t%9EejKpqplx9r`4!a#T?4;kYTq))%i);+Nt*i?=CJv&6x~Rf&rbwO1*= zRm`Z!4v{0nZ-<@?xna1aQ-q_ej~FQb69@tm@eA((cU{LGTcYK)$!RKUx@^icHZc8Z z>TO(U9A>;~tYitXb+j8Cy`6(x-`%pWkCZR}K|Y}KSR21pvqm#oUnVFdWJkC+a$R(( znAo@*vG3zf#D9oynGj#BTJZtJrW7L*s>Lmc{Tj11S{GeBa#r~3kP1PE^#Sp-K!F9e zF*R1nmJWJ;IEUGrSnrs(n_lGi$#rC3&#aMN=Z7(^#kXduZ@!NCQZ{AY=gJ>~!w(oQz)3$7g6 zJ3KTh5cfyI?BZoh9xa)fxUWQ1@j3~k69yG~o^UkbmxPqq)=`JUY{4t^ZtW)V8*8E8 zlXm&EzlwLU%jWp$$g_DYYt2)PkMpzh2IR-*cQ2@CtZs}kl{8b9L*{gg&W6PS7w`Sx zOO~ASB_fh;$PEGujZf!<#wi<0hHnUe6}dY4bnG`oldelBmQXgKQ$n+Z=mhlm__1*X zu`6PFLJPErP{Q0HSA$C$7HWS8gSgSmCQ3_Ale)UXPql z+1D~(r?<{1|K0!Hk-G9*uT=7j_p3i;a>|C3g`c87r+rzR>iPaWqh;oS%#Yd8xmkI; z3TB#TSZCPl15bN|v`>kllDXcRSB7c9+OQpABf<|xm5k{hw>iFDu@1#g6dzK;iMkmd zzbAHLY)bTxNL~1`kZ=06+TS%!u*Te?-YIc`uikC$@Ae?uB}iMX5#?#wF1jJ+blv+;7T~0@Hs(=e;+q0u1#!%==+g9!ncQ}g?NIR7`A97pqRH~ z-SlU2yfOr97SY~3*9XT-TPo1iX|u+(r@)^#DmM%sru(zoW;M->&-f=j@B1I%JKYR? zw@GOsX>(GWe0%z(Rce(V$r)RJcFB2=`L^k`@lioBcu8N)XA9OD%b4q1R#+?BX4!8zJkAMjz4sq~ zjLea(>4xA=eJZZeT{4Iv-9j&hJ&A~qsu6Q1_I2!_xGwQm5=;s2686Uj$E}aug_wzT zQS&2bMhp!<9p**-lryAg7ihizQxzQ-}a#dxwH=obQ`6&o>jwsP3_TJa95pfK zTC661eSEF>(D>6}7Ha2R>ACBX-M_kPxH>u)I-(pAj(zsZjet|g*^^g z8|(=h5Y*amUw2FMNLb8^TtE6Nkr}w?FN>9=H198-j&9Pu-@U>!!eex|cVBfo+y^}C zJn8OPo~>99J>{Pw{UIIlp8&c#6}Tb2_Uruy-xP0qcdGM_J=T8KcEZ}7$avA zN@%z1F9nqkc@R=RbZn?K_u8uvK4OHys?DErcK3ST>HCMEYv$NU*>?1IA#!)LzcPzScq@t^cYdDkYUrcyz}i`sFlu z6rT8;UK4hqHn>Lst#XoenmNpDGYv8IGhHb-oZlz^2x@I6m^GxFQ#pfk6Y`4X=NJc= zyI89^&O48Ie)qQWR|-S`D}SDH600}8(3&8(;ez41{*$(Z zrmGOi?`6{%iaAAZpa;+&sV8Iz7)Eyi4f300@z3;?N3WO&1khlAh%`H3!|Kl-G7cP@ zL0k*rmiSOJS$j$Q1~DVm4GdOR!h`z<6$|>yFx@c2z#A6n&+3+HTWZ>gVZseQo&O96 zu^zmgJq&;M<;-g43S*;JQ2E$*&L!RCbn+5>jrYRK_Y5_UeognmUcg%V615S0@gJ}c z5)xR78a?TcL%yu2%lEz`zNg-iU=r!(vA9+_*E>#wO(W8s=APv4>9#nBIQuv^I#xP< z*p1dL)(e)&mMPX=)-#q2tgL7)t1NqgN2Nn7O&@!zGtzt5cNQ}U*{_p|1+?-<%s(@f zjl^$cMX;Io;>Pj~#1@(xnl{j_!Tc9+M?AtTZWXf(9Gk7E0&)!SA-91OAdxCVwV`ek zY49|>uPngYVICC*^iqWh~rp??5Y#O}HST9bCL zcAw^qSXQhpDBvvW3T}#1Oa|gHo&!O>1alRh?S1k0dL|9g1Jmeu`cJAX^%*|AdsMDz zrZIDj-b#HS2a%TvkjsE^ufDX!KhHPASI&Q4>Ka%mV^1Ckmj8mkWt01?vxD;wC*ykU z8tAU(X52&Fio3q&n){{mDX?J=U`7~W-ez8Bu57Mp5=~2t$4qU^t<3*ghTA(iuex@6 zYWq3^JF|p;u%8JSfiXcy* zAPtn#rPKa7(qZ5?xA6D&wexO=eNfH+$#=_J%6rz`!Q%tQ?nrN6Z=$C?SUW1Y*MZ@8 zh;@nilx2^#y0x>#VvH-0@*5j3nzAho>}QhM5y(7S;a#9MDT~aYA&%eb#%Kren zjSrO^;tiQfHKO{G<%yc`OO=31&0|MPM}MLR(Jh!8*d-06w!ydUynGN55jEh~Kb7#% zPx!T(j=D~|zqL9|Bk?05FKY2Eg|}jN&F{KAomSrm64pcW6)SJQ!-J+BGm!4gjN^9l zy9AH$RGcgB6$-e4>{(iaNR@DUCKF(*@;fn$ZXj+ICW7s|1Us9V!hE28^a{jNjA5QL z6_{B}8Rjs3792wd!JuR%8Y_E&fL>1NDXV%e$?x=$Qlvx#dO^D-V?3BB=>x5$T3AyJ z_C~o++ke=%I%m7Hy}$Ss{|KqFzpihMXQx}|?(WpOBHY&@pNaN)mM1_wJ#21fK4#u$ zW$ag-13hj}3BTyu>pSC5^VLTbOeudfct6HToBSGSeBd`QgVca0!ffIc*^+($ulsFu zG(DF*N^XRY+)rXT_Ir5jJ2xa5>J$8CR#ByCj$Xz56N}^y6>QspLPThZsg( zrhmhZ^-1;;_6frHX?&uPD7F`V7rwxQ>^|RyZzh!EhcWZXf$}Wu+g%BmfG9DCwor-W z7x}idCD0K*t}Vb=bd!2V9wl;Qcsxm8{3m=?-%#&jcXd~e>#J+GYpIKNe|8V?ZuEZ& zWMD+urL+TzS#R%8SGKdVvw^*yV}^6O^M&)8tFq^aYb(|YwpdHsE4YSx&v`rfDXAY2 ze`9?cd;>kDvGcdU@y6cDvClQjlPW!h@5&dl3~?o}$oI`l`K!p$l*C*YOJU9JzF3v7 z&BVb6sfzMmGDvRtiP*_g+#+7#nsAkww)93)0Eg5PsuG*RFVNl#iVvwD+(ds@EX!-z zwR9GBiLTEo+;l7eqI5^~>AC@$J=|`3Fg%#{5e+CE z6UqLEe&3x{m|vKsRDHx(j9^G^uAtNG5N8PkxYqPE_=mR!20laC$!73Wd#Kcr*JCB{ zci&}1NnOHP$ZB{AO!N1H7vg);Ng0)%zDw>TPqO!i|DS;3L;Rh0kh_t4hFj;E;NAzG zqC}U;ebc+Zciy9O<~lwj!t#xCjVs7q6Nt?(5V4Zxz2}@~ukL7I>t#J=_c(eu?Vh31 z^uT9%Fh;UY#1f*BGTXldd}8yw`antMo)9iLu+Dvr@JW}X_69t8#t{9;u1q`OoA#*wph0Wcq`f7+5UvSH z;z)tuZh=XC4Oxc{;}2WDS{I$xGVdQXdlnYWv zZyC=s*ncymmC86W3jBH#1LOQ1ePcZxTu<$f%nwZ+t;w!G{9ECJJ1$VxKi%=%yw7yo zGR*PFvrQT)zX&w*jd3-%=h`mXSKAla8enw!)0%2IiQLF)`7LliDBNxC5QR;G}CrUKUdGWgeQBGZSgN~9{Q$Q&@9 zd_^zHqVLm>xdp;J@tC-gU%^hMTT$bwYIF$qMi{26tgoVCWLBv*nzBRXp?aNGN4+@#W zI=(ZzkLnAbtzLoGhz)H;-GIf^ovBKB!A-iue^hD-@1jM(hMyO>3N8c@p1P~$>3+)d zmp#w?yZH~>4_6I;3&bOombdy_xdq1~TcW*(OYnyHA4z_n)s=2vY`X%huabMe^NG!4 zeP?Q6++{9rJ?j_`TunWg3pd*j*&n+czKQZ;(n98g>o(81()QVsY?e(jK-qbt2rfZ6IkH_>LyvphSfP;S6n|)$7s6=-PsNFTrkp2 zqUzBB#>*`iN^2HsmS_%XUWj*v#eyhAiD{w(5r)D1Z@@n_(xvG!^jN9`nMZ6v?-%J^ z>>KU~=Vp47PXlrOp1z9Mfx8meugoGVQpbq2favStUf>S#9QU3B8`fEWEq{4ml((Ea z#Sw4oX?tP2<;ZdyJ+a;vp84*J&T@{X&XHiS+h!|k-C{0gs%f5WI$@e@O}FcuYn`7R zTKliI&#(>mdUR5Gxokj@ddtlvSKu%#`~^x4xv?+V*~B@`wbZjpxxopV#@d~lk!&df zJV5f&fK2?x_7-*sZoVq}kjlos|9|W_{v~AVPf;Vb5_)hZW*O^&FB5~E(#c$B@E*?A z*D-X^e-V2ka&jtnhds)#=Y4#pSWI!qYp4=h$S54wmfs_-2ozFb|QSEZU z&Y77r^k?WtJ3E=@#c<6J@fd%KSxiO21O7Aox+bypx#!qnI|X}ZBlSp`6PWLB>pu@{ z__+Zvy7(7iSKQ*scL#ZDVSlEXyOz5cejT38uu6ttL@nnf9irv2@npeu<9Uk@yD8zO zrv-J5#f)!^n+j-SOKS=1Rm&0}`Mz*=@XYs>@D6p4a4&aGbj}et zvEH%HYwqiT_262l1b=$F^hth78G)}pls-f2Ve8zcw^O~TD6XNf1grr~*dufb^&1-_ zoaOfj4YZFmp~5@<85hRHvcW<(Awx*le9+VtH;KWT1VLfTvF)*jKZ~7#c*fJ z_&jujM_LS9h26+3V_bADV&;tGV6q~)k{BZ=VtGv-7^^oin|+{BDbI?6MEXyreGn zxl%*)QTXty?2%p2DW7%=O{+u~}>*{!gKV zI9;3~&Jf0Ok*tHM&*~6QxrIBBtOx$ znHa>62hpby@v#r{z7X;-P>eKEQ!p8?aGtV1wqLi^1-5d!CDFXx^wnI&TF>UP0pZlb znjYtW%zd1_BlBLSAw%=Md&Z;e?3^LyH22zoo)Y0CgK9=MVEV8@ zY*j>p-(e54PuLIKbuN`TjkpU3<6)}<>;8&3PFy8;xgAV1FlpWei&jJWBQuLF#oNTH zn%SDAy1y~^eXG5$)gy8sPrLwL!m*kIntwDN(I@l}s_{Jd*mL>I{8%wcH(l!iSLt=l z3PIuz^AQ+H61l(FDa?Cj6uX^S3~l_738KGIb-~$VA%91#^DfFvhLWp^6y=(|fJ zmriLx>A&VZcMT=~)K-dq8Cxu}ZiLTpg8on9ygqA1b3I#e#~asLUrqS6WJ@gr6(qk0 zOd9U%-W2~bIz*GJ%LsW8kshW5Pd79d>ceBVzI@a_Q|b;+=0@aLAdXdru1e-=GqY%g zq{vH<$1rZ6I0b#uqj@dtX7AE%$X19_T}Zfzv2bhgMRBEOr1q?~w)VF8mv~kD2&T*Z;%spYqNsNZ z3;E5QhdaY=qyMGS$q>S?Y$3Z*UFZ&MESJQdX4=s=h~MRhQWt+8-vqDLGs^97uEv_d zVQBwO)}NMKQ>TKm1)&84^U4?eVM;Q+FeO;?Of_@YX7x`kc)$7G)c5J{&V32VaXVh~ zpW0kqmiN6BduuHJPztWZFn&Bw!GzT*I z4Tb}e<+1YJC3awV2mMFp7ipwts=J{x26p5ie}CyCIf|dm*A*s%)p$NUK9i|?M1A@h zKS#Wy?QiI;AE}we@1oZ$Kco--i~g(9O4&s00<&`)elFxLMobkwSPfISD505P7B%{t z`XBm*`li})!T>gmrl`_X4f+i`TX-P$(|*yt)wa_%(T>+7hzVkA@fTsc&`(?^t`SMi z8nG_)@n+^A^%UzHjo@q2hML0+;VN+}SR=E6YD#>TPX_*y)?y#_6MXM{z6jrHcarO* zbqpnQJa+=Q(C={3G*rF8pv{L_*z)xU=nJn-AK zzZP3q=|s};ib~1ik^c&d17QvwkZQW;#OJZ*_O@;Af$(g9A#VoT_eoa`uwk}x83KER zM~40pixOrQ`!i-iMDL(F>~J~7o9x(TWo+Z@U|jKDmnYIGT!{EVWQ9F!67wGwM_K3s zZj!KCGhLUVeIz>hw(KWjGj`<%`eu2f{6FBu(vf<`^%nor7<7rcx*CNaj#1eWljH!19}EZhmkQeBZO8IuajBcr#6G?bDNh} z@Y9y#8AODOPW^+RIUyy3R)|;FrPMpb_E^YcbPc|iCJeS@yfBsSq%@RbJ!_qB9sfAu zUBUjkgq|C#$AW3-n$UwmHt}EfJr%8dl)eR`WfpcJ60WF1> zdUtJo{UiNmy-)WO_EZc%hN*@%;j(mp<`mbQ-@uQ===7Oe!S!eD^k1}0&B6S8IAtXd zf?2i$wVS#^HJ}vu`Uew?vQv%&(oCxChlk5^e~`ba_jmUh_c2$dU9z8X_^ev%VQUvt z%e-*xhJDRb@{>&$jq3|q8~yore~!o~lQuJ@?xzPS&3_Edb+~8hjKy4)Z`3(chpw`r zWJ08gnc$ve_GhmDMyFl=F*WO7W31~R=@Qif)=xS11F)X2Voh=v(TBfbI1qL*YEDW*QW{`v^wBUV`m>Mm8D31YXf z%Q=>B$a!ILJjdKJnH#|FK+LC?+)p+KKXo%Ii!2S^xQoOEM5ubfx9BCRV^ymfxb=2G zj<@>8`AptQ-l?t!wy9RD`B_1a{MiK|`CoF5IohRqhregP$@h-QK*WHPgpd?3G)E(h9E`-H6qHe3_EAf{aOpW$Z=2L6oV_jI@Uj6<;F`NH_ncG=sL z=)v8`&UlbMUb{fhaF5~TbDR21$FWvUf*u{tm1LsH6M+xj6|M|0Ze4X0^ZY9%lU^=S z=Q3mleG2NKzaUQKjMO5<7$_Zh>ldVwz`fWR_($1BmY}xKEVm9wDvUT=qk#wUyr9p( zNsTgG(9Rboa3z=osv{U7hauKNLnp&8^&h4ythqng3k=1MVQq|y8Ns$?r?4jW3OkS8 z2FV|Z$c(4(i)WQ4;DBo_4+QehJm8#U2Qt8N-Au}bK7Q!A=Irh`VsR9VGfprj<=-hN zWgcw2VeD+3YOQE(=5X8ZS({mr#NBJxH zy7)?BuKieELA0Y{nO*RstijA-oQ#&^g%+CcTDL9-_*1iBopJDtEY9y_9+D4%G2m4G zAcN`E*h8rRZ^@NR6LuJ)EXOhPnd9J~o`C(IM0g5cgWpaPB3i#=re6!ud`V!VJP%}! z+e&HWZD4<(O`wXuoUgGj%rgvjQzPd`TO<2v+jL8sWt#Pd`9D)1^D^Mm9y5J5H?~T>jO8n$%KmnWlJ9)zD_4141H#7Kk;OY07Hv zI>$lVGiw*y9A~`mfc$_q@%zPYn$2P&-<4@ixs;Xia=9D$`wmhgnRe_>@MmwKqsd+} zDYe8*r-wJ-yB7!{pE5&)TbhT687Ze}$>-AfL?AHK-^Hi*)dLEcRT?elD9gwcDwMg+ zh?t*^V;8ZxTvxH1_8V3Tv-R=1Ak9|3B)bIs?d{0-UJy|k9M9kY3CHuJ8$kw3p> zEy`Y(*UY-wdyGsH>xG;IYWa%Tx6x(77HDXCoHX8D^*@L{F1FeXw)LP~| zJ((N{CeH=_0n%C}nL5Ol(Hzj7&<{gD?x;=YqnRE`z{h(MT;rV=U8TJjB#}J9l;w@W zBjF~OhPnC@Vx!zl&JP3v6A`ukldMFCU>~Y7olf3WZU!v=w>|^nguBDfVmWh&`^vWh zW>s74fL3MJ5Wj+vc8d47d#9&6*2bOw+VWCm2sH+Xr@@+vx_jEHx*mFmHbuj0Z-@q= zqHvTaIV(_yerI^5fDS{{(nUm)tfdU_=zYc9W_f;#uthAd83UOZ$7qNXi4==Jr^nc1zd5uG?N7NS5TP@*9Ni z@H-x<87Jm)EvZ#X`@l#a>-Bp#`ASJv`97IJ|H+1PFIg*7hkis(AbKH2?VCIt`*=;r z($r>X_PbOl^#jt?N~XbmxK64eXDO*M?c_uEJ({E{*Z^@Z56pjtzFbeZK9r{gGq5W3gkjk66` z{Ky-g8=D)Qdmwjs{tnZ8+g4A8vWzQdXd1aR_H-;6;|@EfJ;uD1<2)_x&8!-W)>7Zz z*}c(!TUkh-Wan^$xHU`;kS0pV3}QXDP$${`K%RWBZ(wMSn2oDq2D=W-OaU zu0K3u{RX)o>0ugiOS%8J`s{1!Z!(rBEBgcEz;NwRqEQ#6nXYU*whPmMUP@{ZS@1V- zxGG9{ft^YRas=&W)??@TI6D@xhnvWEU{Z~euK^9vEw?1Hh?`_KY~w%BdTXdWaxl>6 z2eYyKeQ}Mpn=W2Y>VMac)l3!7@?E%jtOIdUSE(b^Q))jwkZB8jqR{oR-#Zn1yGN*T z*dv?=y{^UDMOWq}eVN)$&L_TOe>fy?Rr-lol2sT<&l2s)-eeAO9yRbriuYIZy4{uB zR_AD^&3V$<#(BWG!P(QX(LTw(+7@cbT{;F@lS$uzoMgo5w?=)0340| za_>M3>{$;Wnov6I(QOr`h;=c47y(Gu&SZj8BCy#1*f-t3SgMTm?`G5hx+2pTYm3)` zAvd1L0w-+(xOXqfhm?oJEW`*krW?>^Sj98QB=CsmBfk2kd{p_5$ROis5j$3OFzUUA zjXVeYIG=$wbXS=|=qN-y({^Sj+ligUo?tsN^XLFD!q$)#sC_^sq}esx6n=y_UE>!& ziKoRW!auOY2XMcz8pw1Sy_iuWk^W>SusgB)zl?fL@bD*5xs}~gL|{ZCuiNy)j6#SjOO7^d!RSlUNbu+DRMx>z3{BC zm4>_gS!S`4<+^k8urkp14u>*xj;TCegg8qltA?Higf$^6Wm`5bjN!%RK zq1~=qt=pizCot?;pd7sQKLYyRckgcBf6@Tx^J~;qrUu)HSxyh4su8cDH)vqZjSgIv zUm?DH6ZM+bG9m*=QThOx0;C(4++5y-DEjM48}c#LfT@mAxB~kt(;8#nXmSR2cJ}~F zY9skKb($&zo3IO04Qt9*nX$B$T8iE^5^Ycmap-I41nhGE3$(@w)L?Q5_P>S@m7q7y z1KqI`{4iG0o0;isFdM?w#mL-S4QE-myI^WOq%uM_@4`@Ac?`9RCtgd||3?IqBe~&Xie<2rVD^Bywfshfr2uS?ETm z2NwA1c_~kX*REn}-21#`rBaHHuExh{>gjsvH|kz$ z<_c%n&Qv{RxAf9i*Ov(3p(P4!kGRME6!%_J)6mFqc39ZGY!$bS4lHwOI_ea(wb@vXsAYHb0R@;0qI~l@L&f^ z+oigJ4uQdeu3&$@>reHC`V8K&o^$TWu1C(6&M}TK$1jdnj@$NZ`$zjdo89`<`p)vy z9Bk=nxnv$-9&avX`lDb={@%PJIjes@%HEVUFKaD0wHD?RHk-E}RYun+T#UXJJ1uT{ zbWr%1;Fj8U>^CAK(A~e(x7@!ru!?w0kK@mYtHjs*XQnSPOIqx$<1Xtea9#6s@V8d( z&=2`x+P@68psK-!po6*_!bT>Ah?ZA^pZcTp3w99(k>9DY@RJz~q*6P2$Wvc}_qkhx zUf3E7$IaoVUYAUxVz4ru3Ay@>d;>J1i-Ep@^H>j?siXrbVJz@;48T7;%#e(Rz6A7y zX@pk^fp6MB@U8lrp3I~&mDsuL5H<$m_jP6vblQ2w!7O6O1EJT&{~??ZHX>f_Z+;hc ziw-jRbbtCTH5MK`M=(-02lC`TdOB9{NqPw7Ah(jSq)bGSRmjFbHGEIrM^t|(*29O8 zT5>dT1+hQD%0zIl?}A^*AM!)_DzxHJd70b#ODHMfa~cG(6B+r%G=Oq zcgri}sd6`YqC8k0f#>QW;=3}gwg7g=1bHd0KbB8}!FUF+8acTX5HCcz0=}Pt9a;>& zVpZhx@)O)q8z`7xfYit;f648Ta!r{>bPS6)7IKr%reO2F38GXmRCBrVa}KA(5!A5Q-1i;UZxI69hjPw zw)lr9`>i>~_m!DtXd77}?sUSYVr3GB$1DxM8PrGfJL{loz(RPdTqP5j{ag((Ofyh; z$n+o#e#JS`+SJso;CTKJW2`mP+0S2(yvudbwhM|2{Sg|0NW$&<^4g|i5G1H2e+NFg ztu-~YYqf&5nAnp;)VVU+zu8^YG2I$xsc*S&b=vK&!M^!{=>$g~V!|+*6=QB;Ke`v0 zPc#NX!hG7w?13*>SD_bnm-=vjF!iYBN_QYQZF3I?_VonB_nq`K@g4J*40M)5fWCMe z_;3%gIsyyQ{6 zM_M5=b}80h=L7wege^3K-pYJpD`EAkv3OSu1(w4!@saS8{|+Ax9`optKouQF)uUp8 z#QKn23x4(tpqVMk6opq#$!j62nes%%06d>LN!C~0N?&Ck{&+_+{0@7yqcHWa40KM`SFdHMVgSMNv zKv*%8&;W^L7I_XF@pZ6w+LT&O9jDI0$1a<)Q6=f6^b0x;(n+D$$8k%z?_7DlD_@m2 zaW8?7yarl#B(s-pMZbbh{f-s9B$6l1STT4BT*l zkH@O?2j4fuUsy>eD`ZpVs9 zGy7xgxhLCrn_>;KwXhj&PwcxLubhg@>=wM8;m=iHS{T?OUxcRXiZxpwy%9J9x4^+$ zjnCpY2`Ru)86!>3&Fm09yHf$@(}Q7EaXYDGH@1}68+FBd*m2- z3Vhe*N{sZcKMn0M!GFR3uYZ>Ru78f->bnetfsfu}-Z0-m?>+BO-`_r`FBN`hW@%4g zfc#Vr0}A1GU=gZVr6EA1I|As&WHOy>O}&Jbe3mW?MC1j`ab^Rv1F_|$nNmzmk zuLBJ_mnp}7L)oL*Q_NImAQQtp1BUcItY7_3Z$g`Xplz^t$}kClW-OpT(i5>(d!O11 zER~v+4SNU&$QERItO+aw0^la)1|psVnZ={T^3#RA4%UhmTH?L;iu)Gy{Bl0TedkWTDCOMD$mMST9MZSV&wvIKp4e>$G z4IMEri_o3WrfME)`eV0aw)VX)Ss$!N61fr^;B0X+tEtL%M!aE+a9H5PaM2>1272==z8~-9Rv~&i6+7YGffrB_ zD=)8@Va!|DL5--1<$d+j?i6y!H+qetOBg!&s02i z@00OKqCaC+*qnN;A|qlKHwRCFs~js$cZkEpK4`lk!0*%pd-@o#j5{k;ln9`|w!`r; zAZ_L_KX|f44{cy4;^3Vi3+L^#~oeSJdAg?JK zfY|$MA#&_2;OO4L_nfjD2)m0=3*CXK*a@1mF7Rci!s{_dsZ5LqYODz<4j|W)7s$ux z&7a68n*pi01TZB&NrzxB`~_=avvfoH zCJBKk_zM-_{!3Ws=J%OIwheHucd^&l*K7*=9lsBOJu!=I&Bnq@FlK7X%B_x7@J<|gC($jIrSRg|U1&UfEDFBQ6JMkLh z%?pf^Cy8CeQ6Q0jM`|SbD>;BL5_+t4e1wje2D#Xb-1Gnz{t{p||Due?-yMNTyakx& zWr0deDnEfgw-+|%2=tGOK{3;AN}PJW+Zv!De@V4jl6>2 zbn*jygv!J7UBdja9_IUHF<%);^+cO=q$7IzdxSN`G30j0QX6tSJgYMZ8Wz?Ylsp}Jhyij|GIVkicx443H{FrD zQe*@$V#h#2A7iJiBlH#gf#JtE592QdOsRdqi|!4~>^Hj31Dcqk zT*AD?31qRRU_od^oQAyD#ClB}FngyGDjIJy;vc0Uk%>P09x2{JueyaAZx8I@Jm{;> z&{9DdasR_w`T+E~<*?Z+5H?`;HbG5(N6%V-*q?uaOI(#Wg+4PtsQ??R6>{qVmeCKT z8*v%PSGRys6bUrvm#}4bfuo`o)+;8HwSi4s7WlnKf#>T&2?Ug2C2klrbz4QvhjwAi5eONe^fmXM9LFcN+#O(p>l<|uXKd8&%!%y!w+SL@;k82pAv5r8ovDx zfWsrA6lax4;vH&sHpZsiKtkq#X}lT7eJ~ciRq6o!_!}fB4z=mVI4I*vBI?44{hBfG zHqRm+qOT)xfmjV`Oh!&K(W;*y1*4Q`AR=M-A*EU{N{ zLmNL*UK7dCAjg37eGKjXQ9cXH_?*%lrE^0=WI>*5!povHdUvi;f;a?ASHX9N9&-ivat&=3Yg)WfQMLz z?0}qT2&3Z0o=G`aV+>Fc82Dviiw?A4Lb{`P_IE>ld(RbQm3?BsP`USnph};cCPHakd)MguH94w|+ z;323A56su{N3_p8tU>;RlHS66msJ{py{D%lpl+Ul<)91V)_MSWUd3Gg3_gJEiVk=F z0z~z#kbRbDg?yNhVh}W71LAMx4c4#;Xvz=DQ0!-|hOID7seyVNh@;5MzrR2yhXK0ZbOo^z|A-Y-`Y}W z!*}vDSbjGlbCXf#2k84%l*U9u#VY@!_~nztUD>5fgw`2B%)*XJH6W!QfgL&zxp^q> z0$;-?r5F(Vo1#2DFgj{ccfEk2Yr#svpXdW4m6B-NyRw3}BI-i^3o#C}lvYF^@_}$iq79x*kHWvO`M_2hw*#<*eLSxgs}*ghVPWk&i=I%UhyM z2G;ok^sWowi5ZSm-h&yUJ~WZadNC2J8np@8be}x@(9MZG_eWnSqfINn?$&lUzA{uf(6Y{nXHGCMB za5C(Mtti1%AgabeYbL^PVl{eLKa8U1UU8;P*$A z);Gu)kGb^|;OzC0N5Sf_D8A!qJsOLc#6>mUWG{)>XA67v<;w^C$ZQ21=!fe=$Cdk-n%)Ph5qFF+8A?rep z@8jJbwCEaG{2PIyy$rV9Z-|`oE4wg)&4#YJ0=@ndPt8SpBMuuWk3m}>MK4?fxj6}& z>o=@}kHP4$2c>NT`&EzIzzLKc=N=P}c71B>_z^y6f-%36Gz63bvG8DP=L zupREA&6g{!$P}#CmxHah1@-ps0_&?tAEmR>TP)D!WhvJX$&&;~@#|g9ym+Ryy2oR_DOR=nYw5 zU@?lY!g^vJV8Wi%bhPnzC59{kXiBvzeZ-Wb&ZLqm>=R0ray~ zC}Sf?&|GNivB>#O=+sJ(tcS2e{E*toC|L*OY%jD;by$Ue!&2Cf5x)=SW)YBtlCU){ z!n$<8c3+Iq-2`pX80{YdJ0THIcwrBPL-HCxd!NN>`~oOPqmHmWK}7tsF!vwSau?#_Z&EVNO4R!KE2y5PxGF&B7R37HdthNbMJVRZg21 z(BSv+NrC;)1|!-AtatXt{IEXSs3v6MATXa*9-lN=g(`zp6l6aM>@i`GcP+|#5mrG4 zG*u?v16f0RMj~ArGw?xpPadSA0Cnj>3uWP}j$1OU%U{qQ3UZwSt|bccq%w06NX>>c z45+W-=o_if{g2RwyCM6rXwR}3!D`{&BxsnXNI4ztl>&ww8tf!D@fI(}ArTTtpqJ%9 zu75zX863Za?$5@%RGuC)jwr|pftC_5zTCrDZH6S+(31tot;#l6A5vHiwN5}fa#5-; zuog&^r3u>l1I|XGmee_-4O&Jiw624o3B1s=4%B!$N^HRs*(hx+^jj(9H5|1Pg<}El zjYdgz_`4WnBBoHjB2c3=q$mutKDDM))*m%B2C|Zia^&Eg%3Vg|DFWYoXwNj%c^-O< z6GuNWKIP&m9qNq18I?K9irU4?AREDWYXUx%Q0j2hYZ&q$jZq^GH4u&x)q;GM!WmW5 zM-{e$8&4WgrvmPzRP=XPtS+SQMv44*tE!89=+ipnmcsieyj8~AgA3nffRt-+O^>G| zQK~4sF&KY^;2VzPP@Gj6nY4I^dY^h#Wpl!Y9L@SfVL=x+(s4Nt;Jn> z+-E4fL%l1wFfH|sJaWV1PPI)iAfu1U$fe5B zU$*dn1g0WyYWdatsHGNhr1l04{*v)N3RfB2qw|H8nmzuVEznZ%GMgSWZ{;#%x`k@PRt(Fj*OZZh=kSIJ>pHh$iS93W0zkh1| zsaMo`FM6w5$MXNxYf&wzWR!f4-GJK1U)U!op`(F(em8IxS>itF6)u+@N*8HEmb7*^&PfP802J{yLj#UXz z|5Cqt{9SZLJy!dd`j@`2byc}hKf#d7pu+xE)PvM>BEIUm5PbE8X$9f9=v#D6?aS(Y zsw}B*Qd3obwfIY0cvj7snjiK2|K1ynWA(Wry`WxEbEy6wRCtf7E&i8lp)eQfz3M&c zubQ&v|L#@ar~aj$`=8Vo$(<_Ws`RS0TO|Lggsal5wt#x1N~C&Cy+=J?biewmN*16A z3cnmuF6sm7d(?8O&#GsNdbN5^eWO|j>KPHoYI;RFM7>{?clC_=q}nP) >i$hVrB zntQdSims?FtG-+9i$xM%bVhw+5o6u|uK(|UwP&m4QtwvpQhR>UU#gs_?^Zu*EfmR~ zdPh-@DU!G%sZ^z0mAfK&FOpzyVNX!CpM?IQ%D*4IUzO_sG`-qS#T z_rFh3n(CeE_kXFm|L?s;+;~N)7Uki8ssAsHqBPZ UserAPIKeyAuth: + try: + print(f"api_key: {api_key}") + if api_key == "": + raise Exception( + f"CustomAuth - Malformed API Key passed in. Ensure Key has `Bearer` prefix" + ) + if api_key == f"{os.getenv('PROXY_MASTER_KEY')}-1234": + return UserAPIKeyAuth(api_key=api_key) + raise Exception + except Exception as e: + if len(str(e)) > 0: + raise e + raise Exception("Failed custom auth") diff --git a/tests/proxy_unit_tests/test_configs/custom_callbacks.py b/tests/proxy_unit_tests/test_configs/custom_callbacks.py new file mode 100644 index 000000000..42f88b5d1 --- /dev/null +++ b/tests/proxy_unit_tests/test_configs/custom_callbacks.py @@ -0,0 +1,121 @@ +from litellm.integrations.custom_logger import CustomLogger +import inspect +import litellm + + +class testCustomCallbackProxy(CustomLogger): + def __init__(self): + self.success: bool = False # type: ignore + self.failure: bool = False # type: ignore + self.async_success: bool = False # type: ignore + self.async_success_embedding: bool = False # type: ignore + self.async_failure: bool = False # type: ignore + self.async_failure_embedding: bool = False # type: ignore + + self.async_completion_kwargs = None # type: ignore + self.async_embedding_kwargs = None # type: ignore + self.async_embedding_response = None # type: ignore + + self.async_completion_kwargs_fail = None # type: ignore + self.async_embedding_kwargs_fail = None # type: ignore + + self.streaming_response_obj = None # type: ignore + blue_color_code = "\033[94m" + reset_color_code = "\033[0m" + print(f"{blue_color_code}Initialized LiteLLM custom logger") + try: + print(f"Logger Initialized with following methods:") + methods = [ + method + for method in dir(self) + if inspect.ismethod(getattr(self, method)) + ] + + # Pretty print the methods + for method in methods: + print(f" - {method}") + print(f"{reset_color_code}") + except Exception: + pass + + def log_pre_api_call(self, model, messages, kwargs): + print(f"Pre-API Call") + + def log_post_api_call(self, kwargs, response_obj, start_time, end_time): + print(f"Post-API Call") + + def log_stream_event(self, kwargs, response_obj, start_time, end_time): + print(f"On Stream") + + def log_success_event(self, kwargs, response_obj, start_time, end_time): + print(f"On Success") + self.success = True + + def log_failure_event(self, kwargs, response_obj, start_time, end_time): + print(f"On Failure") + self.failure = True + + async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + print(f"On Async success") + self.async_success = True + print("Value of async success: ", self.async_success) + print("\n kwargs: ", kwargs) + if ( + kwargs.get("model") == "azure-embedding-model" + or kwargs.get("model") == "ada" + ): + print("Got an embedding model", kwargs.get("model")) + print("Setting embedding success to True") + self.async_success_embedding = True + print("Value of async success embedding: ", self.async_success_embedding) + self.async_embedding_kwargs = kwargs + self.async_embedding_response = response_obj + if kwargs.get("stream") == True: + self.streaming_response_obj = response_obj + + self.async_completion_kwargs = kwargs + + model = kwargs.get("model", None) + messages = kwargs.get("messages", None) + user = kwargs.get("user", None) + + # Access litellm_params passed to litellm.completion(), example access `metadata` + litellm_params = kwargs.get("litellm_params", {}) + metadata = litellm_params.get( + "metadata", {} + ) # headers passed to LiteLLM proxy, can be found here + + # Calculate cost using litellm.completion_cost() + cost = litellm.completion_cost(completion_response=response_obj) + response = response_obj + # tokens used in response + usage = response_obj["usage"] + + print("\n\n in custom callback vars my custom logger, ", vars(my_custom_logger)) + + print( + f""" + Model: {model}, + Messages: {messages}, + User: {user}, + Usage: {usage}, + Cost: {cost}, + Response: {response} + Proxy Metadata: {metadata} + """ + ) + return + + async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time): + print(f"On Async Failure") + self.async_failure = True + print("Value of async failure: ", self.async_failure) + print("\n kwargs: ", kwargs) + if kwargs.get("model") == "text-embedding-ada-002": + self.async_failure_embedding = True + self.async_embedding_kwargs_fail = kwargs + + self.async_completion_kwargs_fail = kwargs + + +my_custom_logger = testCustomCallbackProxy() diff --git a/tests/proxy_unit_tests/test_configs/test_bad_config.yaml b/tests/proxy_unit_tests/test_configs/test_bad_config.yaml new file mode 100644 index 000000000..7c802a840 --- /dev/null +++ b/tests/proxy_unit_tests/test_configs/test_bad_config.yaml @@ -0,0 +1,21 @@ +model_list: + - model_name: gpt-3.5-turbo + litellm_params: + api_key: bad-key + model: gpt-3.5-turbo + - model_name: working-azure-gpt-3.5-turbo + litellm_params: + model: azure/chatgpt-v-2 + api_base: os.environ/AZURE_API_BASE + api_key: os.environ/AZURE_API_KEY + - model_name: azure-gpt-3.5-turbo + litellm_params: + model: azure/chatgpt-v-2 + api_base: os.environ/AZURE_API_BASE + api_key: bad-key + - model_name: azure-embedding + litellm_params: + model: azure/azure-embedding-model + api_base: os.environ/AZURE_API_BASE + api_key: bad-key + \ No newline at end of file diff --git a/tests/proxy_unit_tests/test_configs/test_cloudflare_azure_with_cache_config.yaml b/tests/proxy_unit_tests/test_configs/test_cloudflare_azure_with_cache_config.yaml new file mode 100644 index 000000000..c3c3cb1c3 --- /dev/null +++ b/tests/proxy_unit_tests/test_configs/test_cloudflare_azure_with_cache_config.yaml @@ -0,0 +1,17 @@ +model_list: + - model_name: azure-cloudflare + litellm_params: + model: azure/chatgpt-v-2 + api_base: https://gateway.ai.cloudflare.com/v1/0399b10e77ac6668c80404a5ff49eb37/litellm-test/azure-openai/openai-gpt-4-test-v-1 + api_key: os.environ/AZURE_API_KEY + api_version: 2023-07-01-preview + +litellm_settings: + set_verbose: True + cache: True # set cache responses to True + cache_params: # set cache params for s3 + type: s3 + s3_bucket_name: litellm-my-test-bucket-2 # AWS Bucket Name for S3 + s3_region_name: us-east-1 # AWS Region Name for S3 + s3_aws_access_key_id: os.environ/AWS_ACCESS_KEY_ID # AWS Access Key ID for S3 + s3_aws_secret_access_key: os.environ/AWS_SECRET_ACCESS_KEY # AWS Secret Access Key for S3 \ No newline at end of file diff --git a/tests/proxy_unit_tests/test_configs/test_config.yaml b/tests/proxy_unit_tests/test_configs/test_config.yaml new file mode 100644 index 000000000..a711b65ea --- /dev/null +++ b/tests/proxy_unit_tests/test_configs/test_config.yaml @@ -0,0 +1,28 @@ +general_settings: + database_url: os.environ/DATABASE_URL + master_key: os.environ/PROXY_MASTER_KEY +litellm_settings: + drop_params: true + success_callback: ["langfuse"] + +model_list: +- litellm_params: + api_base: https://my-endpoint-europe-berri-992.openai.azure.com/ + api_key: os.environ/AZURE_EUROPE_API_KEY + model: azure/gpt-35-turbo + model_name: azure-model +- litellm_params: + api_base: https://my-endpoint-canada-berri992.openai.azure.com + api_key: os.environ/AZURE_CANADA_API_KEY + model: azure/gpt-35-turbo + model_name: azure-model +- litellm_params: + api_base: https://openai-france-1234.openai.azure.com + api_key: os.environ/AZURE_FRANCE_API_KEY + model: azure/gpt-turbo + model_name: azure-model +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + model_name: test_openai_models diff --git a/tests/proxy_unit_tests/test_configs/test_config_custom_auth.yaml b/tests/proxy_unit_tests/test_configs/test_config_custom_auth.yaml new file mode 100644 index 000000000..33088bd1c --- /dev/null +++ b/tests/proxy_unit_tests/test_configs/test_config_custom_auth.yaml @@ -0,0 +1,11 @@ +model_list: + - model_name: "openai-model" + litellm_params: + model: "gpt-3.5-turbo" + +litellm_settings: + drop_params: True + set_verbose: True + +general_settings: + custom_auth: custom_auth.user_api_key_auth \ No newline at end of file diff --git a/tests/proxy_unit_tests/test_configs/test_config_no_auth.yaml b/tests/proxy_unit_tests/test_configs/test_config_no_auth.yaml new file mode 100644 index 000000000..1c5ddf226 --- /dev/null +++ b/tests/proxy_unit_tests/test_configs/test_config_no_auth.yaml @@ -0,0 +1,127 @@ +model_list: +- litellm_params: + api_base: https://my-endpoint-europe-berri-992.openai.azure.com/ + api_key: os.environ/AZURE_EUROPE_API_KEY + model: azure/gpt-35-turbo + model_name: azure-model +- litellm_params: + api_base: https://my-endpoint-canada-berri992.openai.azure.com + api_key: os.environ/AZURE_CANADA_API_KEY + model: azure/gpt-35-turbo + model_name: azure-model +- litellm_params: + api_base: https://gateway.ai.cloudflare.com/v1/0399b10e77ac6668c80404a5ff49eb37/litellm-test/azure-openai/openai-gpt-4-test-v-1 + api_key: os.environ/AZURE_API_KEY + model: azure/chatgpt-v-2 + model_name: azure-cloudflare-model +- litellm_params: + api_base: https://openai-france-1234.openai.azure.com + api_key: os.environ/AZURE_FRANCE_API_KEY + model: azure/gpt-turbo + model_name: azure-model +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 56f1bd94-3b54-4b67-9ea2-7c70e9a3a709 + model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 4d1ee26c-abca-450c-8744-8e87fd6755e9 + model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 00e19c0f-b63d-42bb-88e9-016fb0c60764 + model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 79fc75bf-8e1b-47d5-8d24-9365a854af03 + model_name: test_openai_models +- litellm_params: + api_base: os.environ/AZURE_API_BASE + api_key: os.environ/AZURE_API_KEY + api_version: 2023-07-01-preview + model: azure/azure-embedding-model + model_info: + mode: embedding + model_name: azure-embedding-model +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 55848c55-4162-40f9-a6e2-9a722b9ef404 + model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 34339b1e-e030-4bcc-a531-c48559f10ce4 + model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: f6f74e14-ac64-4403-9365-319e584dcdc5 + model_name: test_openai_models +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 9b1ef341-322c-410a-8992-903987fef439 + model_name: test_openai_models +- litellm_params: + model: dall-e-3 + model_info: + mode: image_generation + model_name: dall-e-3 +- litellm_params: + api_base: os.environ/AZURE_SWEDEN_API_BASE + api_key: os.environ/AZURE_SWEDEN_API_KEY + api_version: 2023-12-01-preview + model: azure/dall-e-3-test + model_info: + mode: image_generation + model_name: dall-e-3 +- litellm_params: + api_base: os.environ/AZURE_API_BASE + api_key: os.environ/AZURE_API_KEY + api_version: 2023-06-01-preview + model: azure/ + model_info: + mode: image_generation + model_name: dall-e-2 +- litellm_params: + api_base: os.environ/AZURE_API_BASE + api_key: os.environ/AZURE_API_KEY + api_version: 2023-07-01-preview + model: azure/azure-embedding-model + model_info: + base_model: text-embedding-ada-002 + mode: embedding + model_name: text-embedding-ada-002 +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 34cb2419-7c63-44ae-a189-53f1d1ce5953 + model_name: test_openai_models +- litellm_params: + model: amazon.titan-embed-text-v1 + model_name: amazon-embeddings +- litellm_params: + model: gpt-3.5-turbo + model_info: + description: this is a test openai model + id: 753dca9a-898d-4ff7-9961-5acf7cdf38cf + model_name: test_openai_models diff --git a/tests/proxy_unit_tests/test_configs/test_custom_logger.yaml b/tests/proxy_unit_tests/test_configs/test_custom_logger.yaml new file mode 100644 index 000000000..145c618ed --- /dev/null +++ b/tests/proxy_unit_tests/test_configs/test_custom_logger.yaml @@ -0,0 +1,26 @@ +model_list: + - model_name: Azure OpenAI GPT-4 Canada + litellm_params: + model: azure/chatgpt-v-2 + api_base: os.environ/AZURE_API_BASE + api_key: os.environ/AZURE_API_KEY + api_version: "2023-07-01-preview" + model_info: + mode: chat + input_cost_per_token: 0.0002 + id: gm + - model_name: azure-embedding-model + litellm_params: + model: azure/azure-embedding-model + api_base: os.environ/AZURE_API_BASE + api_key: os.environ/AZURE_API_KEY + api_version: "2023-07-01-preview" + model_info: + mode: embedding + input_cost_per_token: 0.002 + id: hello + +litellm_settings: + drop_params: True + set_verbose: True + callbacks: custom_callbacks.my_custom_logger \ No newline at end of file diff --git a/tests/proxy_unit_tests/test_configs/test_guardrails_config.yaml b/tests/proxy_unit_tests/test_configs/test_guardrails_config.yaml new file mode 100644 index 000000000..f09ff9d1b --- /dev/null +++ b/tests/proxy_unit_tests/test_configs/test_guardrails_config.yaml @@ -0,0 +1,32 @@ + + +model_list: +- litellm_params: + api_base: https://my-endpoint-europe-berri-992.openai.azure.com/ + api_key: os.environ/AZURE_EUROPE_API_KEY + model: azure/gpt-35-turbo + model_name: azure-model +- litellm_params: + api_base: https://my-endpoint-canada-berri992.openai.azure.com + api_key: os.environ/AZURE_CANADA_API_KEY + model: azure/gpt-35-turbo + model_name: azure-model +- litellm_params: + api_base: https://openai-france-1234.openai.azure.com + api_key: os.environ/AZURE_FRANCE_API_KEY + model: azure/gpt-turbo + model_name: azure-model + + + +litellm_settings: + guardrails: + - prompt_injection: + callbacks: [lakera_prompt_injection, detect_prompt_injection] + default_on: true + - hide_secrets: + callbacks: [hide_secrets] + default_on: true + - moderations: + callbacks: [openai_moderations] + default_on: false \ No newline at end of file diff --git a/tests/proxy_unit_tests/test_custom_callback_input.py b/tests/proxy_unit_tests/test_custom_callback_input.py new file mode 100644 index 000000000..d98c7619e --- /dev/null +++ b/tests/proxy_unit_tests/test_custom_callback_input.py @@ -0,0 +1,359 @@ +### What this tests #### +## This test asserts the type of data passed into each method of the custom callback handler +import asyncio +import inspect +import os +import sys +import time +import traceback +import uuid +from datetime import datetime + +import pytest +from pydantic import BaseModel + +sys.path.insert(0, os.path.abspath("../..")) +from typing import List, Literal, Optional, Union +from unittest.mock import AsyncMock, MagicMock, patch + +import litellm +from litellm import Cache, completion, embedding +from litellm.integrations.custom_logger import CustomLogger +from litellm.types.utils import LiteLLMCommonStrings + +# Test Scenarios (test across completion, streaming, embedding) +## 1: Pre-API-Call +## 2: Post-API-Call +## 3: On LiteLLM Call success +## 4: On LiteLLM Call failure +## 5. Caching + +# Test models +## 1. OpenAI +## 2. Azure OpenAI +## 3. Non-OpenAI/Azure - e.g. Bedrock + +# Test interfaces +## 1. litellm.completion() + litellm.embeddings() +## refer to test_custom_callback_input_router.py for the router + proxy tests + + +class CompletionCustomHandler( + CustomLogger +): # https://docs.litellm.ai/docs/observability/custom_callback#callback-class + """ + The set of expected inputs to a custom handler for a + """ + + # Class variables or attributes + def __init__(self): + self.errors = [] + self.states: List[ + Literal[ + "sync_pre_api_call", + "async_pre_api_call", + "post_api_call", + "sync_stream", + "async_stream", + "sync_success", + "async_success", + "sync_failure", + "async_failure", + ] + ] = [] + + def log_pre_api_call(self, model, messages, kwargs): + try: + self.states.append("sync_pre_api_call") + ## MODEL + assert isinstance(model, str) + ## MESSAGES + assert isinstance(messages, list) + ## KWARGS + assert isinstance(kwargs["model"], str) + assert isinstance(kwargs["messages"], list) + assert isinstance(kwargs["optional_params"], dict) + assert isinstance(kwargs["litellm_params"], dict) + assert isinstance(kwargs["start_time"], (datetime, type(None))) + assert isinstance(kwargs["stream"], bool) + assert isinstance(kwargs["user"], (str, type(None))) + ### METADATA + metadata_value = kwargs["litellm_params"].get("metadata") + assert metadata_value is None or isinstance(metadata_value, dict) + if metadata_value is not None: + if litellm.turn_off_message_logging is True: + assert ( + metadata_value["raw_request"] + is LiteLLMCommonStrings.redacted_by_litellm.value + ) + else: + assert "raw_request" not in metadata_value or isinstance( + metadata_value["raw_request"], str + ) + except Exception: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + def log_post_api_call(self, kwargs, response_obj, start_time, end_time): + try: + self.states.append("post_api_call") + ## START TIME + assert isinstance(start_time, datetime) + ## END TIME + assert end_time == None + ## RESPONSE OBJECT + assert response_obj == None + ## KWARGS + assert isinstance(kwargs["model"], str) + assert isinstance(kwargs["messages"], list) + assert isinstance(kwargs["optional_params"], dict) + assert isinstance(kwargs["litellm_params"], dict) + assert isinstance(kwargs["start_time"], (datetime, type(None))) + assert isinstance(kwargs["stream"], bool) + assert isinstance(kwargs["user"], (str, type(None))) + assert isinstance(kwargs["input"], (list, dict, str)) + assert isinstance(kwargs["api_key"], (str, type(None))) + assert ( + isinstance( + kwargs["original_response"], + (str, litellm.CustomStreamWrapper, BaseModel), + ) + or inspect.iscoroutine(kwargs["original_response"]) + or inspect.isasyncgen(kwargs["original_response"]) + ) + assert isinstance(kwargs["additional_args"], (dict, type(None))) + assert isinstance(kwargs["log_event_type"], str) + except Exception: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + async def async_log_stream_event(self, kwargs, response_obj, start_time, end_time): + try: + self.states.append("async_stream") + ## START TIME + assert isinstance(start_time, datetime) + ## END TIME + assert isinstance(end_time, datetime) + ## RESPONSE OBJECT + assert isinstance(response_obj, litellm.ModelResponse) + ## KWARGS + assert isinstance(kwargs["model"], str) + assert isinstance(kwargs["messages"], list) and isinstance( + kwargs["messages"][0], dict + ) + assert isinstance(kwargs["optional_params"], dict) + assert isinstance(kwargs["litellm_params"], dict) + assert isinstance(kwargs["start_time"], (datetime, type(None))) + assert isinstance(kwargs["stream"], bool) + assert isinstance(kwargs["user"], (str, type(None))) + assert ( + isinstance(kwargs["input"], list) + and isinstance(kwargs["input"][0], dict) + ) or isinstance(kwargs["input"], (dict, str)) + assert isinstance(kwargs["api_key"], (str, type(None))) + assert ( + isinstance( + kwargs["original_response"], (str, litellm.CustomStreamWrapper) + ) + or inspect.isasyncgen(kwargs["original_response"]) + or inspect.iscoroutine(kwargs["original_response"]) + ) + assert isinstance(kwargs["additional_args"], (dict, type(None))) + assert isinstance(kwargs["log_event_type"], str) + except Exception: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + def log_success_event(self, kwargs, response_obj, start_time, end_time): + try: + print(f"\n\nkwargs={kwargs}\n\n") + print( + json.dumps(kwargs, default=str) + ) # this is a test to confirm no circular references are in the logging object + + self.states.append("sync_success") + ## START TIME + assert isinstance(start_time, datetime) + ## END TIME + assert isinstance(end_time, datetime) + ## RESPONSE OBJECT + assert isinstance( + response_obj, + ( + litellm.ModelResponse, + litellm.EmbeddingResponse, + litellm.ImageResponse, + ), + ) + ## KWARGS + assert isinstance(kwargs["model"], str) + assert isinstance(kwargs["messages"], list) and isinstance( + kwargs["messages"][0], dict + ) + assert isinstance(kwargs["optional_params"], dict) + assert isinstance(kwargs["litellm_params"], dict) + assert isinstance(kwargs["litellm_params"]["api_base"], str) + assert kwargs["cache_hit"] is None or isinstance(kwargs["cache_hit"], bool) + assert isinstance(kwargs["start_time"], (datetime, type(None))) + assert isinstance(kwargs["stream"], bool) + assert isinstance(kwargs["user"], (str, type(None))) + assert ( + isinstance(kwargs["input"], list) + and ( + isinstance(kwargs["input"][0], dict) + or isinstance(kwargs["input"][0], str) + ) + ) or isinstance(kwargs["input"], (dict, str)) + assert isinstance(kwargs["api_key"], (str, type(None))) + assert isinstance( + kwargs["original_response"], + (str, litellm.CustomStreamWrapper, BaseModel), + ), "Original Response={}. Allowed types=[str, litellm.CustomStreamWrapper, BaseModel]".format( + kwargs["original_response"] + ) + assert isinstance(kwargs["additional_args"], (dict, type(None))) + assert isinstance(kwargs["log_event_type"], str) + assert isinstance(kwargs["response_cost"], (float, type(None))) + except Exception: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + def log_failure_event(self, kwargs, response_obj, start_time, end_time): + try: + print(f"kwargs: {kwargs}") + self.states.append("sync_failure") + ## START TIME + assert isinstance(start_time, datetime) + ## END TIME + assert isinstance(end_time, datetime) + ## RESPONSE OBJECT + assert response_obj == None + ## KWARGS + assert isinstance(kwargs["model"], str) + assert isinstance(kwargs["messages"], list) and isinstance( + kwargs["messages"][0], dict + ) + + assert isinstance(kwargs["optional_params"], dict) + assert isinstance(kwargs["litellm_params"], dict) + assert isinstance(kwargs["litellm_params"]["metadata"], Optional[dict]) + assert isinstance(kwargs["start_time"], (datetime, type(None))) + assert isinstance(kwargs["stream"], bool) + assert isinstance(kwargs["user"], (str, type(None))) + assert ( + isinstance(kwargs["input"], list) + and isinstance(kwargs["input"][0], dict) + ) or isinstance(kwargs["input"], (dict, str)) + assert isinstance(kwargs["api_key"], (str, type(None))) + assert ( + isinstance( + kwargs["original_response"], (str, litellm.CustomStreamWrapper) + ) + or kwargs["original_response"] == None + ) + assert isinstance(kwargs["additional_args"], (dict, type(None))) + assert isinstance(kwargs["log_event_type"], str) + except Exception: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + async def async_log_pre_api_call(self, model, messages, kwargs): + try: + self.states.append("async_pre_api_call") + ## MODEL + assert isinstance(model, str) + ## MESSAGES + assert isinstance(messages, list) and isinstance(messages[0], dict) + ## KWARGS + assert isinstance(kwargs["model"], str) + assert isinstance(kwargs["messages"], list) and isinstance( + kwargs["messages"][0], dict + ) + assert isinstance(kwargs["optional_params"], dict) + assert isinstance(kwargs["litellm_params"], dict) + assert isinstance(kwargs["start_time"], (datetime, type(None))) + assert isinstance(kwargs["stream"], bool) + assert isinstance(kwargs["user"], (str, type(None))) + except Exception as e: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + try: + print( + "in async_log_success_event", kwargs, response_obj, start_time, end_time + ) + self.states.append("async_success") + ## START TIME + assert isinstance(start_time, datetime) + ## END TIME + assert isinstance(end_time, datetime) + ## RESPONSE OBJECT + assert isinstance( + response_obj, + ( + litellm.ModelResponse, + litellm.EmbeddingResponse, + litellm.TextCompletionResponse, + ), + ) + ## KWARGS + assert isinstance(kwargs["model"], str) + assert isinstance(kwargs["messages"], list) + assert isinstance(kwargs["optional_params"], dict) + assert isinstance(kwargs["litellm_params"], dict) + assert isinstance(kwargs["litellm_params"]["api_base"], str) + assert isinstance(kwargs["start_time"], (datetime, type(None))) + assert isinstance(kwargs["stream"], bool) + assert isinstance(kwargs["completion_start_time"], datetime) + assert kwargs["cache_hit"] is None or isinstance(kwargs["cache_hit"], bool) + assert isinstance(kwargs["user"], (str, type(None))) + assert isinstance(kwargs["input"], (list, dict, str)) + assert isinstance(kwargs["api_key"], (str, type(None))) + assert ( + isinstance( + kwargs["original_response"], (str, litellm.CustomStreamWrapper) + ) + or inspect.isasyncgen(kwargs["original_response"]) + or inspect.iscoroutine(kwargs["original_response"]) + ) + assert isinstance(kwargs["additional_args"], (dict, type(None))) + assert isinstance(kwargs["log_event_type"], str) + assert kwargs["cache_hit"] is None or isinstance(kwargs["cache_hit"], bool) + assert isinstance(kwargs["response_cost"], (float, type(None))) + except Exception: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) + + async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time): + try: + self.states.append("async_failure") + ## START TIME + assert isinstance(start_time, datetime) + ## END TIME + assert isinstance(end_time, datetime) + ## RESPONSE OBJECT + assert response_obj == None + ## KWARGS + assert isinstance(kwargs["model"], str) + assert isinstance(kwargs["messages"], list) + assert isinstance(kwargs["optional_params"], dict) + assert isinstance(kwargs["litellm_params"], dict) + assert isinstance(kwargs["start_time"], (datetime, type(None))) + assert isinstance(kwargs["stream"], bool) + assert isinstance(kwargs["user"], (str, type(None))) + assert isinstance(kwargs["input"], (list, str, dict)) + assert isinstance(kwargs["api_key"], (str, type(None))) + assert ( + isinstance( + kwargs["original_response"], (str, litellm.CustomStreamWrapper) + ) + or inspect.isasyncgen(kwargs["original_response"]) + or inspect.iscoroutine(kwargs["original_response"]) + or kwargs["original_response"] == None + ) + assert isinstance(kwargs["additional_args"], (dict, type(None))) + assert isinstance(kwargs["log_event_type"], str) + except Exception: + print(f"Assertion Error: {traceback.format_exc()}") + self.errors.append(traceback.format_exc()) diff --git a/tests/local_testing/test_deployed_proxy_keygen.py b/tests/proxy_unit_tests/test_deployed_proxy_keygen.py similarity index 100% rename from tests/local_testing/test_deployed_proxy_keygen.py rename to tests/proxy_unit_tests/test_deployed_proxy_keygen.py diff --git a/tests/local_testing/test_jwt.py b/tests/proxy_unit_tests/test_jwt.py similarity index 98% rename from tests/local_testing/test_jwt.py rename to tests/proxy_unit_tests/test_jwt.py index ad929ba4f..c07394962 100644 --- a/tests/local_testing/test_jwt.py +++ b/tests/proxy_unit_tests/test_jwt.py @@ -147,7 +147,7 @@ async def test_valid_invalid_token(audience): # VALID TOKEN ## GENERATE A TOKEN # Assuming the current time is in UTC - expiration_time = int((datetime.utcnow() + timedelta(minutes=10)).timestamp()) + expiration_time = int((datetime.now() + timedelta(minutes=10)).timestamp()) payload = { "sub": "user123", @@ -175,7 +175,7 @@ async def test_valid_invalid_token(audience): # INVALID TOKEN ## GENERATE A TOKEN # Assuming the current time is in UTC - expiration_time = int((datetime.utcnow() + timedelta(minutes=10)).timestamp()) + expiration_time = int((datetime.now() + timedelta(minutes=10)).timestamp()) payload = { "sub": "user123", @@ -264,7 +264,7 @@ def team_token_tuple(): # VALID TOKEN ## GENERATE A TOKEN # Assuming the current time is in UTC - expiration_time = int((datetime.utcnow() + timedelta(minutes=10)).timestamp()) + expiration_time = int((datetime.now() + timedelta(minutes=10)).timestamp()) team_id = f"team123_{uuid.uuid4()}" payload = { @@ -349,7 +349,7 @@ async def test_team_token_output(prisma_client, audience): # VALID TOKEN ## GENERATE A TOKEN # Assuming the current time is in UTC - expiration_time = int((datetime.utcnow() + timedelta(minutes=10)).timestamp()) + expiration_time = int((datetime.now() + timedelta(minutes=10)).timestamp()) team_id = f"team123_{uuid.uuid4()}" payload = { @@ -542,7 +542,7 @@ async def aaaatest_user_token_output( # VALID TOKEN ## GENERATE A TOKEN # Assuming the current time is in UTC - expiration_time = int((datetime.utcnow() + timedelta(minutes=10)).timestamp()) + expiration_time = int((datetime.now() + timedelta(minutes=10)).timestamp()) team_id = f"team123_{uuid.uuid4()}" user_id = f"user123_{uuid.uuid4()}" @@ -936,7 +936,7 @@ async def test_allow_access_by_email(public_jwt_key, user_email, should_work): # VALID TOKEN ## GENERATE A TOKEN # Assuming the current time is in UTC - expiration_time = int((datetime.utcnow() + timedelta(minutes=10)).timestamp()) + expiration_time = int((datetime.now() + timedelta(minutes=10)).timestamp()) team_id = f"team123_{uuid.uuid4()}" payload = { diff --git a/tests/local_testing/test_key_generate_dynamodb.py b/tests/proxy_unit_tests/test_key_generate_dynamodb.py similarity index 100% rename from tests/local_testing/test_key_generate_dynamodb.py rename to tests/proxy_unit_tests/test_key_generate_dynamodb.py diff --git a/tests/local_testing/test_key_generate_prisma.py b/tests/proxy_unit_tests/test_key_generate_prisma.py similarity index 100% rename from tests/local_testing/test_key_generate_prisma.py rename to tests/proxy_unit_tests/test_key_generate_prisma.py diff --git a/tests/proxy_unit_tests/test_model_response_typing/server.py b/tests/proxy_unit_tests/test_model_response_typing/server.py new file mode 100644 index 000000000..80dbc33af --- /dev/null +++ b/tests/proxy_unit_tests/test_model_response_typing/server.py @@ -0,0 +1,23 @@ +# #### What this tests #### +# # This tests if the litellm model response type is returnable in a flask app + +# import sys, os +# import traceback +# from flask import Flask, request, jsonify, abort, Response +# sys.path.insert(0, os.path.abspath('../../..')) # Adds the parent directory to the system path + +# import litellm +# from litellm import completion + +# litellm.set_verbose = False + +# app = Flask(__name__) + +# @app.route('/') +# def hello(): +# data = request.json +# return completion(**data) + +# if __name__ == '__main__': +# from waitress import serve +# serve(app, host='localhost', port=8080, threads=10) diff --git a/tests/proxy_unit_tests/test_model_response_typing/test.py b/tests/proxy_unit_tests/test_model_response_typing/test.py new file mode 100644 index 000000000..46bf5fbb4 --- /dev/null +++ b/tests/proxy_unit_tests/test_model_response_typing/test.py @@ -0,0 +1,14 @@ +# import requests, json + +# BASE_URL = 'http://localhost:8080' + +# def test_hello_route(): +# data = {"model": "claude-3-5-haiku-20241022", "messages": [{"role": "user", "content": "hey, how's it going?"}]} +# headers = {'Content-Type': 'application/json'} +# response = requests.get(BASE_URL, headers=headers, data=json.dumps(data)) +# print(response.text) +# assert response.status_code == 200 +# print("Hello route test passed!") + +# if __name__ == '__main__': +# test_hello_route() diff --git a/tests/local_testing/test_proxy_config_unit_test.py b/tests/proxy_unit_tests/test_proxy_config_unit_test.py similarity index 100% rename from tests/local_testing/test_proxy_config_unit_test.py rename to tests/proxy_unit_tests/test_proxy_config_unit_test.py diff --git a/tests/local_testing/test_proxy_custom_auth.py b/tests/proxy_unit_tests/test_proxy_custom_auth.py similarity index 100% rename from tests/local_testing/test_proxy_custom_auth.py rename to tests/proxy_unit_tests/test_proxy_custom_auth.py diff --git a/tests/local_testing/test_proxy_custom_logger.py b/tests/proxy_unit_tests/test_proxy_custom_logger.py similarity index 100% rename from tests/local_testing/test_proxy_custom_logger.py rename to tests/proxy_unit_tests/test_proxy_custom_logger.py diff --git a/tests/local_testing/test_proxy_encrypt_decrypt.py b/tests/proxy_unit_tests/test_proxy_encrypt_decrypt.py similarity index 95% rename from tests/local_testing/test_proxy_encrypt_decrypt.py rename to tests/proxy_unit_tests/test_proxy_encrypt_decrypt.py index 6db37e4c1..f9c3ff42d 100644 --- a/tests/local_testing/test_proxy_encrypt_decrypt.py +++ b/tests/proxy_unit_tests/test_proxy_encrypt_decrypt.py @@ -33,6 +33,7 @@ def test_encrypt_decrypt_with_master_key(): def test_encrypt_decrypt_with_salt_key(): os.environ["LITELLM_SALT_KEY"] = "sk-salt-key2222" + print(f"LITELLM_SALT_KEY: {os.environ['LITELLM_SALT_KEY']}") assert decrypt_value_helper(encrypt_value_helper("test")) == "test" assert decrypt_value_helper(encrypt_value_helper(10)) == 10 assert decrypt_value_helper(encrypt_value_helper(True)) is True diff --git a/tests/local_testing/test_proxy_exception_mapping.py b/tests/proxy_unit_tests/test_proxy_exception_mapping.py similarity index 100% rename from tests/local_testing/test_proxy_exception_mapping.py rename to tests/proxy_unit_tests/test_proxy_exception_mapping.py diff --git a/tests/local_testing/test_proxy_gunicorn.py b/tests/proxy_unit_tests/test_proxy_gunicorn.py similarity index 100% rename from tests/local_testing/test_proxy_gunicorn.py rename to tests/proxy_unit_tests/test_proxy_gunicorn.py diff --git a/tests/local_testing/test_proxy_pass_user_config.py b/tests/proxy_unit_tests/test_proxy_pass_user_config.py similarity index 100% rename from tests/local_testing/test_proxy_pass_user_config.py rename to tests/proxy_unit_tests/test_proxy_pass_user_config.py diff --git a/tests/local_testing/test_proxy_reject_logging.py b/tests/proxy_unit_tests/test_proxy_reject_logging.py similarity index 100% rename from tests/local_testing/test_proxy_reject_logging.py rename to tests/proxy_unit_tests/test_proxy_reject_logging.py diff --git a/tests/local_testing/test_proxy_routes.py b/tests/proxy_unit_tests/test_proxy_routes.py similarity index 100% rename from tests/local_testing/test_proxy_routes.py rename to tests/proxy_unit_tests/test_proxy_routes.py diff --git a/tests/local_testing/test_proxy_server.py b/tests/proxy_unit_tests/test_proxy_server.py similarity index 100% rename from tests/local_testing/test_proxy_server.py rename to tests/proxy_unit_tests/test_proxy_server.py diff --git a/tests/local_testing/test_proxy_server_caching.py b/tests/proxy_unit_tests/test_proxy_server_caching.py similarity index 100% rename from tests/local_testing/test_proxy_server_caching.py rename to tests/proxy_unit_tests/test_proxy_server_caching.py diff --git a/tests/local_testing/test_proxy_server_cost.py b/tests/proxy_unit_tests/test_proxy_server_cost.py similarity index 100% rename from tests/local_testing/test_proxy_server_cost.py rename to tests/proxy_unit_tests/test_proxy_server_cost.py diff --git a/tests/local_testing/test_proxy_server_keys.py b/tests/proxy_unit_tests/test_proxy_server_keys.py similarity index 100% rename from tests/local_testing/test_proxy_server_keys.py rename to tests/proxy_unit_tests/test_proxy_server_keys.py diff --git a/tests/local_testing/test_proxy_server_langfuse.py b/tests/proxy_unit_tests/test_proxy_server_langfuse.py similarity index 100% rename from tests/local_testing/test_proxy_server_langfuse.py rename to tests/proxy_unit_tests/test_proxy_server_langfuse.py diff --git a/tests/local_testing/test_proxy_server_spend.py b/tests/proxy_unit_tests/test_proxy_server_spend.py similarity index 100% rename from tests/local_testing/test_proxy_server_spend.py rename to tests/proxy_unit_tests/test_proxy_server_spend.py diff --git a/tests/local_testing/test_proxy_setting_guardrails.py b/tests/proxy_unit_tests/test_proxy_setting_guardrails.py similarity index 97% rename from tests/local_testing/test_proxy_setting_guardrails.py rename to tests/proxy_unit_tests/test_proxy_setting_guardrails.py index e5baa1fa8..b845f86b6 100644 --- a/tests/local_testing/test_proxy_setting_guardrails.py +++ b/tests/proxy_unit_tests/test_proxy_setting_guardrails.py @@ -45,6 +45,7 @@ def test_active_callbacks(client): print("response.status_code", response.status_code) json_response = response.json() + print(f"json_response={json_response}") _active_callbacks = json_response["litellm.callbacks"] expected_callback_names = [ diff --git a/tests/local_testing/test_proxy_token_counter.py b/tests/proxy_unit_tests/test_proxy_token_counter.py similarity index 100% rename from tests/local_testing/test_proxy_token_counter.py rename to tests/proxy_unit_tests/test_proxy_token_counter.py diff --git a/tests/local_testing/test_proxy_utils.py b/tests/proxy_unit_tests/test_proxy_utils.py similarity index 100% rename from tests/local_testing/test_proxy_utils.py rename to tests/proxy_unit_tests/test_proxy_utils.py diff --git a/tests/proxy_unit_tests/test_user_api_key_auth.py b/tests/proxy_unit_tests/test_user_api_key_auth.py new file mode 100644 index 000000000..f6becf070 --- /dev/null +++ b/tests/proxy_unit_tests/test_user_api_key_auth.py @@ -0,0 +1,389 @@ +# What is this? +## Unit tests for user_api_key_auth helper functions + +import os +import sys + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +from typing import Dict, List, Optional +from unittest.mock import MagicMock, patch, AsyncMock + +import pytest +from starlette.datastructures import URL + +import litellm +from litellm.proxy.auth.user_api_key_auth import user_api_key_auth + + +class Request: + def __init__(self, client_ip: Optional[str] = None, headers: Optional[dict] = None): + self.client = MagicMock() + self.client.host = client_ip + self.headers: Dict[str, str] = {} + + +@pytest.mark.parametrize( + "allowed_ips, client_ip, expected_result", + [ + (None, "127.0.0.1", True), # No IP restrictions, should be allowed + (["127.0.0.1"], "127.0.0.1", True), # IP in allowed list + (["192.168.1.1"], "127.0.0.1", False), # IP not in allowed list + ([], "127.0.0.1", False), # Empty allowed list, no IP should be allowed + (["192.168.1.1", "10.0.0.1"], "10.0.0.1", True), # IP in allowed list + ( + ["192.168.1.1"], + None, + False, + ), # Request with no client IP should not be allowed + ], +) +def test_check_valid_ip( + allowed_ips: Optional[List[str]], client_ip: Optional[str], expected_result: bool +): + from litellm.proxy.auth.auth_utils import _check_valid_ip + + request = Request(client_ip) + + assert _check_valid_ip(allowed_ips, request)[0] == expected_result # type: ignore + + +# test x-forwarder for is used when user has opted in + + +@pytest.mark.parametrize( + "allowed_ips, client_ip, expected_result", + [ + (None, "127.0.0.1", True), # No IP restrictions, should be allowed + (["127.0.0.1"], "127.0.0.1", True), # IP in allowed list + (["192.168.1.1"], "127.0.0.1", False), # IP not in allowed list + ([], "127.0.0.1", False), # Empty allowed list, no IP should be allowed + (["192.168.1.1", "10.0.0.1"], "10.0.0.1", True), # IP in allowed list + ( + ["192.168.1.1"], + None, + False, + ), # Request with no client IP should not be allowed + ], +) +def test_check_valid_ip_sent_with_x_forwarded_for( + allowed_ips: Optional[List[str]], client_ip: Optional[str], expected_result: bool +): + from litellm.proxy.auth.auth_utils import _check_valid_ip + + request = Request(client_ip, headers={"X-Forwarded-For": client_ip}) + + assert _check_valid_ip(allowed_ips, request, use_x_forwarded_for=True)[0] == expected_result # type: ignore + + +@pytest.mark.asyncio +async def test_check_blocked_team(): + """ + cached valid_token obj has team_blocked = true + + cached team obj has team_blocked = false + + assert team is not blocked + """ + import asyncio + import time + + from fastapi import Request + from starlette.datastructures import URL + + from litellm.proxy._types import ( + LiteLLM_TeamTable, + LiteLLM_TeamTableCachedObj, + UserAPIKeyAuth, + ) + from litellm.proxy.auth.user_api_key_auth import user_api_key_auth + from litellm.proxy.proxy_server import hash_token, user_api_key_cache + + _team_id = "1234" + user_key = "sk-12345678" + + valid_token = UserAPIKeyAuth( + team_id=_team_id, + team_blocked=True, + token=hash_token(user_key), + last_refreshed_at=time.time(), + ) + await asyncio.sleep(1) + team_obj = LiteLLM_TeamTableCachedObj( + team_id=_team_id, blocked=False, last_refreshed_at=time.time() + ) + hashed_token = hash_token(user_key) + print(f"STORING TOKEN UNDER KEY={hashed_token}") + user_api_key_cache.set_cache(key=hashed_token, value=valid_token) + user_api_key_cache.set_cache(key="team_id:{}".format(_team_id), value=team_obj) + + setattr(litellm.proxy.proxy_server, "user_api_key_cache", user_api_key_cache) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + setattr(litellm.proxy.proxy_server, "prisma_client", "hello-world") + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + await user_api_key_auth(request=request, api_key="Bearer " + user_key) + + +@pytest.mark.parametrize( + "user_role, expected_role", + [ + ("app_user", "internal_user"), + ("internal_user", "internal_user"), + ("proxy_admin_viewer", "proxy_admin_viewer"), + ], +) +def test_returned_user_api_key_auth(user_role, expected_role): + from litellm.proxy._types import LiteLLM_UserTable, LitellmUserRoles + from litellm.proxy.auth.user_api_key_auth import _return_user_api_key_auth_obj + from datetime import datetime + + new_obj = _return_user_api_key_auth_obj( + user_obj=LiteLLM_UserTable( + user_role=user_role, user_id="", max_budget=None, user_email="" + ), + api_key="hello-world", + parent_otel_span=None, + valid_token_dict={}, + route="/chat/completion", + start_time=datetime.now(), + ) + + assert new_obj.user_role == expected_role + + +@pytest.mark.parametrize("key_ownership", ["user_key", "team_key"]) +@pytest.mark.asyncio +async def test_aaauser_personal_budgets(key_ownership): + """ + Set a personal budget on a user + + - have it only apply when key belongs to user -> raises BudgetExceededError + - if key belongs to team, have key respect team budget -> allows call to go through + """ + import asyncio + import time + + from fastapi import Request + from starlette.datastructures import URL + import litellm + + from litellm.proxy._types import LiteLLM_UserTable, UserAPIKeyAuth + from litellm.proxy.auth.user_api_key_auth import user_api_key_auth + from litellm.proxy.proxy_server import hash_token, user_api_key_cache + + _user_id = "1234" + user_key = "sk-12345678" + + if key_ownership == "user_key": + valid_token = UserAPIKeyAuth( + token=hash_token(user_key), + last_refreshed_at=time.time(), + user_id=_user_id, + spend=20, + ) + elif key_ownership == "team_key": + valid_token = UserAPIKeyAuth( + token=hash_token(user_key), + last_refreshed_at=time.time(), + user_id=_user_id, + team_id="my-special-team", + team_max_budget=100, + spend=20, + ) + + user_obj = LiteLLM_UserTable( + user_id=_user_id, spend=11, max_budget=10, user_email="" + ) + user_api_key_cache.set_cache(key=hash_token(user_key), value=valid_token) + user_api_key_cache.set_cache(key="{}".format(_user_id), value=user_obj) + + setattr(litellm.proxy.proxy_server, "user_api_key_cache", user_api_key_cache) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + setattr(litellm.proxy.proxy_server, "prisma_client", "hello-world") + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + test_user_cache = getattr(litellm.proxy.proxy_server, "user_api_key_cache") + + assert test_user_cache.get_cache(key=hash_token(user_key)) == valid_token + + try: + await user_api_key_auth(request=request, api_key="Bearer " + user_key) + + if key_ownership == "user_key": + pytest.fail("Expected this call to fail. User is over limit.") + except Exception: + if key_ownership == "team_key": + pytest.fail("Expected this call to work. Key is below team budget.") + + +@pytest.mark.asyncio +@pytest.mark.parametrize("prohibited_param", ["api_base", "base_url"]) +async def test_user_api_key_auth_fails_with_prohibited_params(prohibited_param): + """ + Relevant issue: https://huntr.com/bounties/4001e1a2-7b7a-4776-a3ae-e6692ec3d997 + """ + import json + + from fastapi import Request + + # Setup + user_key = "sk-1234" + + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + + # Create request with prohibited parameter in body + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + async def return_body(): + body = {prohibited_param: "https://custom-api.com"} + return bytes(json.dumps(body), "utf-8") + + request.body = return_body + try: + response = await user_api_key_auth( + request=request, api_key="Bearer " + user_key + ) + except Exception as e: + print("error str=", str(e)) + error_message = str(e.message) + print("error message=", error_message) + assert "is not allowed in request body" in error_message + + +@pytest.mark.asyncio() +@pytest.mark.parametrize( + "route, should_raise_error", + [ + ("/embeddings", False), + ("/chat/completions", True), + ("/completions", True), + ("/models", True), + ("/v1/embeddings", True), + ], +) +async def test_auth_with_allowed_routes(route, should_raise_error): + # Setup + user_key = "sk-1234" + + general_settings = {"allowed_routes": ["/embeddings"]} + from fastapi import Request + + from litellm.proxy import proxy_server + + initial_general_settings = getattr(proxy_server, "general_settings") + + setattr(proxy_server, "master_key", "sk-1234") + setattr(proxy_server, "general_settings", general_settings) + + request = Request(scope={"type": "http"}) + request._url = URL(url=route) + + if should_raise_error: + try: + await user_api_key_auth(request=request, api_key="Bearer " + user_key) + pytest.fail("Expected this call to fail. User is over limit.") + except Exception as e: + print("error str=", str(e.message)) + error_str = str(e.message) + assert "Route" in error_str and "not allowed" in error_str + pass + else: + await user_api_key_auth(request=request, api_key="Bearer " + user_key) + + setattr(proxy_server, "general_settings", initial_general_settings) + + +@pytest.mark.parametrize( + "route, user_role, expected_result", + [ + # Proxy Admin checks + ("/global/spend/logs", "proxy_admin", True), + ("/key/delete", "proxy_admin", True), + ("/key/generate", "proxy_admin", True), + ("/key/regenerate", "proxy_admin", True), + # Internal User checks - allowed routes + ("/global/spend/logs", "internal_user", True), + ("/key/delete", "internal_user", True), + ("/key/generate", "internal_user", True), + ("/key/82akk800000000jjsk/regenerate", "internal_user", True), + # Internal User Viewer + ("/key/generate", "internal_user_viewer", False), + # Internal User checks - disallowed routes + ("/organization/member_add", "internal_user", False), + ], +) +def test_is_ui_route_allowed(route, user_role, expected_result): + from litellm.proxy.auth.user_api_key_auth import _is_ui_route_allowed + from litellm.proxy._types import LiteLLM_UserTable + + user_obj = LiteLLM_UserTable( + user_id="3b803c0e-666e-4e99-bd5c-6e534c07e297", + max_budget=None, + spend=0.0, + model_max_budget={}, + model_spend={}, + user_email="my-test-email@1234.com", + models=[], + tpm_limit=None, + rpm_limit=None, + user_role=user_role, + organization_memberships=[], + ) + + received_args: dict = { + "route": route, + "user_obj": user_obj, + } + try: + assert _is_ui_route_allowed(**received_args) == expected_result + except Exception as e: + # If expected result is False, we expect an error + if expected_result is False: + pass + else: + raise e + + +@pytest.mark.parametrize( + "route, user_role, expected_result", + [ + ("/key/generate", "internal_user_viewer", False), + ], +) +def test_is_api_route_allowed(route, user_role, expected_result): + from litellm.proxy.auth.user_api_key_auth import _is_api_route_allowed + from litellm.proxy._types import LiteLLM_UserTable + + user_obj = LiteLLM_UserTable( + user_id="3b803c0e-666e-4e99-bd5c-6e534c07e297", + max_budget=None, + spend=0.0, + model_max_budget={}, + model_spend={}, + user_email="my-test-email@1234.com", + models=[], + tpm_limit=None, + rpm_limit=None, + user_role=user_role, + organization_memberships=[], + ) + + received_args: dict = { + "route": route, + "user_obj": user_obj, + } + try: + assert _is_api_route_allowed(**received_args) == expected_result + except Exception as e: + # If expected result is False, we expect an error + if expected_result is False: + pass + else: + raise e diff --git a/tests/proxy_unit_tests/vertex_key.json b/tests/proxy_unit_tests/vertex_key.json new file mode 100644 index 000000000..e2fd8512b --- /dev/null +++ b/tests/proxy_unit_tests/vertex_key.json @@ -0,0 +1,13 @@ +{ + "type": "service_account", + "project_id": "adroit-crow-413218", + "private_key_id": "", + "private_key": "", + "client_email": "test-adroit-crow@adroit-crow-413218.iam.gserviceaccount.com", + "client_id": "104886546564708740969", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test-adroit-crow%40adroit-crow-413218.iam.gserviceaccount.com", + "universe_domain": "googleapis.com" +} diff --git a/tests/router_unit_tests/test_router_endpoints.py b/tests/router_unit_tests/test_router_endpoints.py index accd5ea40..4c9fc8f35 100644 --- a/tests/router_unit_tests/test_router_endpoints.py +++ b/tests/router_unit_tests/test_router_endpoints.py @@ -87,6 +87,7 @@ proxy_handler_instance = MyCustomHandler() # Set litellm.callbacks = [proxy_handler_instance] on the proxy # need to set litellm.callbacks = [proxy_handler_instance] # on the proxy @pytest.mark.asyncio +@pytest.mark.flaky(retries=6, delay=10) async def test_transcription_on_router(): litellm.set_verbose = True litellm.callbacks = [proxy_handler_instance] From dad1d78c06d575e5398a9e51e44edb86dd2aac1e Mon Sep 17 00:00:00 2001 From: nobuo kawasaki Date: Fri, 8 Nov 2024 05:20:12 +0900 Subject: [PATCH 36/67] chore: comment for maritalk (#6607) --- litellm/litellm_core_utils/get_llm_provider_logic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/litellm_core_utils/get_llm_provider_logic.py b/litellm/litellm_core_utils/get_llm_provider_logic.py index 6d3861bea..71eaaead0 100644 --- a/litellm/litellm_core_utils/get_llm_provider_logic.py +++ b/litellm/litellm_core_utils/get_llm_provider_logic.py @@ -226,7 +226,7 @@ def get_llm_provider( # noqa: PLR0915 ## openrouter elif model in litellm.openrouter_models: custom_llm_provider = "openrouter" - ## openrouter + ## maritalk elif model in litellm.maritalk_models: custom_llm_provider = "maritalk" ## vertex - text + chat + language (gemini) models From 6e4a9bb3b7720dfebba75a7c221046556b852798 Mon Sep 17 00:00:00 2001 From: Emerson Gomes Date: Thu, 7 Nov 2024 18:26:22 -0600 Subject: [PATCH 37/67] Update gpt-4o-2024-08-06, and o1-preview, o1-mini models in model cost map (#6654) * Adding supports_response_schema to gpt-4o-2024-08-06 models * o1 models do not support vision --------- Co-authored-by: Emerson Gomes --- model_prices_and_context_window.json | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index a37a431dc..bc3799229 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -108,7 +108,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true, + "supports_vision": false, "supports_prompt_caching": true }, "o1-mini-2024-09-12": { @@ -122,7 +122,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true, + "supports_vision": false, "supports_prompt_caching": true }, "o1-preview": { @@ -136,7 +136,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true, + "supports_vision": false, "supports_prompt_caching": true }, "o1-preview-2024-09-12": { @@ -150,7 +150,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true, + "supports_vision": false, "supports_prompt_caching": true }, "chatgpt-4o-latest": { @@ -190,6 +190,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true, "supports_prompt_caching": true }, @@ -461,6 +462,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true }, "ft:gpt-4o-mini-2024-07-18": { @@ -652,7 +654,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true, + "supports_vision": false, "supports_prompt_caching": true }, "azure/o1-mini-2024-09-12": { @@ -666,7 +668,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true, + "supports_vision": false, "supports_prompt_caching": true }, "azure/o1-preview": { @@ -680,7 +682,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true, + "supports_vision": false, "supports_prompt_caching": true }, "azure/o1-preview-2024-09-12": { @@ -694,7 +696,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true, + "supports_vision": false, "supports_prompt_caching": true }, "azure/gpt-4o": { @@ -721,6 +723,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true }, "azure/gpt-4o-2024-05-13": { @@ -746,6 +749,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true }, "azure/global-standard/gpt-4o-mini": { @@ -3790,7 +3794,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true + "supports_vision": false }, "openrouter/openai/o1-mini-2024-09-12": { "max_tokens": 65536, @@ -3802,7 +3806,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true + "supports_vision": false }, "openrouter/openai/o1-preview": { "max_tokens": 32768, @@ -3814,7 +3818,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true + "supports_vision": false }, "openrouter/openai/o1-preview-2024-09-12": { "max_tokens": 32768, @@ -3826,7 +3830,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true + "supports_vision": false }, "openrouter/openai/gpt-4o": { "max_tokens": 4096, From ae385cfcdcc891b23cd99a10387635f705193752 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 7 Nov 2024 16:26:53 -0800 Subject: [PATCH 38/67] (QOL improvement) add unit testing for all static_methods in litellm_logging.py (#6640) * add unit testing for standard logging payload * unit testing for static methods in litellm_logging * add code coverage check for litellm_logging * litellm_logging_code_coverage * test_get_final_response_obj * fix validate_redacted_message_span_attributes * test validate_redacted_message_span_attributes --- .circleci/config.yml | 1 + litellm/litellm_core_utils/litellm_logging.py | 19 +- .../litellm_logging_code_coverage.py | 95 ++++++++ .../test_otel_logging.py | 9 + .../test_standard_logging_payload.py | 219 +++++++++++++++++- 5 files changed, 334 insertions(+), 9 deletions(-) create mode 100644 tests/code_coverage_tests/litellm_logging_code_coverage.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 8e63cfe25..7a742afe0 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -722,6 +722,7 @@ jobs: - run: python ./tests/documentation_tests/test_general_setting_keys.py - run: python ./tests/code_coverage_tests/router_code_coverage.py - run: python ./tests/code_coverage_tests/test_router_strategy_async.py + - run: python ./tests/code_coverage_tests/litellm_logging_code_coverage.py - run: python ./tests/documentation_tests/test_env_keys.py - run: helm lint ./deploy/charts/litellm-helm diff --git a/litellm/litellm_core_utils/litellm_logging.py b/litellm/litellm_core_utils/litellm_logging.py index 4753779c0..2ab905e85 100644 --- a/litellm/litellm_core_utils/litellm_logging.py +++ b/litellm/litellm_core_utils/litellm_logging.py @@ -2474,6 +2474,14 @@ class StandardLoggingPayloadSetup: ) -> Tuple[float, float, float]: """ Convert datetime objects to floats + + Args: + start_time: Union[dt_object, float] + end_time: Union[dt_object, float] + completion_start_time: Union[dt_object, float] + + Returns: + Tuple[float, float, float]: A tuple containing the start time, end time, and completion start time as floats. """ if isinstance(start_time, datetime.datetime): @@ -2534,13 +2542,10 @@ class StandardLoggingPayloadSetup: ) if isinstance(metadata, dict): # Filter the metadata dictionary to include only the specified keys - clean_metadata = StandardLoggingMetadata( - **{ # type: ignore - key: metadata[key] - for key in StandardLoggingMetadata.__annotations__.keys() - if key in metadata - } - ) + supported_keys = StandardLoggingMetadata.__annotations__.keys() + for key in supported_keys: + if key in metadata: + clean_metadata[key] = metadata[key] # type: ignore if metadata.get("user_api_key") is not None: if is_valid_sha256_hash(str(metadata.get("user_api_key"))): diff --git a/tests/code_coverage_tests/litellm_logging_code_coverage.py b/tests/code_coverage_tests/litellm_logging_code_coverage.py new file mode 100644 index 000000000..9825cfba1 --- /dev/null +++ b/tests/code_coverage_tests/litellm_logging_code_coverage.py @@ -0,0 +1,95 @@ +import ast +import os +from typing import List + + +def get_function_names_from_file(file_path: str) -> List[str]: + """ + Extracts all static method names from litellm_logging.py + """ + with open(file_path, "r") as file: + tree = ast.parse(file.read()) + + function_names = [] + + for node in tree.body: + if isinstance(node, ast.ClassDef): + # Functions inside classes + for class_node in node.body: + if isinstance(class_node, (ast.FunctionDef, ast.AsyncFunctionDef)): + # Check if the function has @staticmethod decorator + for decorator in class_node.decorator_list: + if ( + isinstance(decorator, ast.Name) + and decorator.id == "staticmethod" + ): + function_names.append(class_node.name) + + return function_names + + +def get_all_functions_called_in_tests(base_dir: str) -> set: + """ + Returns a set of function names that are called in test functions + inside test files containing the word 'logging'. + """ + called_functions = set() + + for root, _, files in os.walk(base_dir): + for file in files: + if file.endswith(".py") and "logging" in file.lower(): + file_path = os.path.join(root, file) + with open(file_path, "r") as f: + try: + tree = ast.parse(f.read()) + except SyntaxError: + print(f"Warning: Syntax error in file {file_path}") + continue + + for node in ast.walk(tree): + if isinstance(node, ast.Call): + if isinstance(node.func, ast.Name): + called_functions.add(node.func.id) + elif isinstance(node.func, ast.Attribute): + called_functions.add(node.func.attr) + + return called_functions + + +# Functions that can be ignored in test coverage +ignored_function_names = [ + "__init__", + # Add other functions to ignore here +] + + +def main(): + logging_file = "./litellm/litellm_core_utils/litellm_logging.py" + tests_dir = "./tests/" + + # LOCAL TESTING + # logging_file = "../../litellm/litellm_core_utils/litellm_logging.py" + # tests_dir = "../../tests/" + + logging_functions = get_function_names_from_file(logging_file) + print("logging_functions:", logging_functions) + + called_functions_in_tests = get_all_functions_called_in_tests(tests_dir) + untested_functions = [ + fn + for fn in logging_functions + if fn not in called_functions_in_tests and fn not in ignored_function_names + ] + + if untested_functions: + untested_perc = len(untested_functions) / len(logging_functions) + print(f"untested_percentage: {untested_perc * 100:.2f}%") + raise Exception( + f"{untested_perc * 100:.2f}% of functions in litellm_logging.py are not tested: {untested_functions}" + ) + else: + print("All functions in litellm_logging.py are covered by tests.") + + +if __name__ == "__main__": + main() diff --git a/tests/logging_callback_tests/test_otel_logging.py b/tests/logging_callback_tests/test_otel_logging.py index 49212607b..f93cc1ec2 100644 --- a/tests/logging_callback_tests/test_otel_logging.py +++ b/tests/logging_callback_tests/test_otel_logging.py @@ -260,6 +260,15 @@ def validate_redacted_message_span_attributes(span): "llm.usage.total_tokens", "gen_ai.usage.completion_tokens", "gen_ai.usage.prompt_tokens", + "metadata.user_api_key_hash", + "metadata.requester_ip_address", + "metadata.user_api_key_team_alias", + "metadata.requester_metadata", + "metadata.user_api_key_team_id", + "metadata.spend_logs_metadata", + "metadata.user_api_key_alias", + "metadata.user_api_key_user_id", + "metadata.user_api_key_org_id", ] _all_attributes = set([name for name in span.attributes.keys()]) diff --git a/tests/logging_callback_tests/test_standard_logging_payload.py b/tests/logging_callback_tests/test_standard_logging_payload.py index 42d504a1e..654103663 100644 --- a/tests/logging_callback_tests/test_standard_logging_payload.py +++ b/tests/logging_callback_tests/test_standard_logging_payload.py @@ -13,10 +13,16 @@ from pydantic.main import Model sys.path.insert( 0, os.path.abspath("../..") ) # Adds the parent directory to the system-path - +from datetime import datetime as dt_object +import time import pytest import litellm -from litellm.types.utils import Usage +from litellm.types.utils import ( + Usage, + StandardLoggingMetadata, + StandardLoggingModelInformation, + StandardLoggingHiddenParams, +) from litellm.litellm_core_utils.litellm_logging import StandardLoggingPayloadSetup @@ -104,3 +110,212 @@ def test_get_additional_headers(): "x_ratelimit_limit_tokens": 160000, "x_ratelimit_remaining_tokens": 160000, } + + +def all_fields_present(standard_logging_metadata: StandardLoggingMetadata): + for field in StandardLoggingMetadata.__annotations__.keys(): + assert field in standard_logging_metadata + + +@pytest.mark.parametrize( + "metadata_key, metadata_value", + [ + ("user_api_key_alias", "test_alias"), + ("user_api_key_hash", "test_hash"), + ("user_api_key_team_id", "test_team_id"), + ("user_api_key_user_id", "test_user_id"), + ("user_api_key_team_alias", "test_team_alias"), + ("spend_logs_metadata", {"key": "value"}), + ("requester_ip_address", "127.0.0.1"), + ("requester_metadata", {"user_agent": "test_agent"}), + ], +) +def test_get_standard_logging_metadata(metadata_key, metadata_value): + """ + Test that the get_standard_logging_metadata function correctly sets the metadata fields. + All fields in StandardLoggingMetadata should ALWAYS be present. + """ + metadata = {metadata_key: metadata_value} + standard_logging_metadata = ( + StandardLoggingPayloadSetup.get_standard_logging_metadata(metadata) + ) + + print("standard_logging_metadata", standard_logging_metadata) + + # Assert that all fields in StandardLoggingMetadata are present + all_fields_present(standard_logging_metadata) + + # Assert that the specific metadata field is set correctly + assert standard_logging_metadata[metadata_key] == metadata_value + + +def test_get_standard_logging_metadata_user_api_key_hash(): + valid_hash = "a" * 64 # 64 character string + metadata = {"user_api_key": valid_hash} + result = StandardLoggingPayloadSetup.get_standard_logging_metadata(metadata) + assert result["user_api_key_hash"] == valid_hash + + +def test_get_standard_logging_metadata_invalid_user_api_key(): + invalid_hash = "not_a_valid_hash" + metadata = {"user_api_key": invalid_hash} + result = StandardLoggingPayloadSetup.get_standard_logging_metadata(metadata) + all_fields_present(result) + assert result["user_api_key_hash"] is None + + +def test_get_standard_logging_metadata_invalid_keys(): + metadata = { + "user_api_key_alias": "test_alias", + "invalid_key": "should_be_ignored", + "another_invalid_key": 123, + } + result = StandardLoggingPayloadSetup.get_standard_logging_metadata(metadata) + all_fields_present(result) + assert result["user_api_key_alias"] == "test_alias" + assert "invalid_key" not in result + assert "another_invalid_key" not in result + + +def test_cleanup_timestamps(): + """Test cleanup_timestamps with different input types""" + # Test with datetime objects + now = dt_object.now() + start = now + end = now + completion = now + + result = StandardLoggingPayloadSetup.cleanup_timestamps(start, end, completion) + + assert all(isinstance(x, float) for x in result) + assert len(result) == 3 + + # Test with float timestamps + start_float = time.time() + end_float = start_float + 1 + completion_float = end_float + + result = StandardLoggingPayloadSetup.cleanup_timestamps( + start_float, end_float, completion_float + ) + + assert all(isinstance(x, float) for x in result) + assert result[0] == start_float + assert result[1] == end_float + assert result[2] == completion_float + + # Test with mixed types + result = StandardLoggingPayloadSetup.cleanup_timestamps( + start_float, end, completion_float + ) + assert all(isinstance(x, float) for x in result) + + # Test invalid input + with pytest.raises(ValueError): + StandardLoggingPayloadSetup.cleanup_timestamps( + "invalid", end_float, completion_float + ) + + +def test_get_model_cost_information(): + """Test get_model_cost_information with different inputs""" + # Test with None values + result = StandardLoggingPayloadSetup.get_model_cost_information( + base_model=None, + custom_pricing=None, + custom_llm_provider=None, + init_response_obj={}, + ) + assert result["model_map_key"] == "" + assert result["model_map_value"] is None # this was not found in model cost map + # assert all fields in StandardLoggingModelInformation are present + assert all( + field in result for field in StandardLoggingModelInformation.__annotations__ + ) + + # Test with valid model + result = StandardLoggingPayloadSetup.get_model_cost_information( + base_model="gpt-3.5-turbo", + custom_pricing=False, + custom_llm_provider="openai", + init_response_obj={}, + ) + litellm_info_gpt_3_5_turbo_model_map_value = litellm.get_model_info( + model="gpt-3.5-turbo", custom_llm_provider="openai" + ) + print("result", result) + assert result["model_map_key"] == "gpt-3.5-turbo" + assert result["model_map_value"] is not None + assert result["model_map_value"] == litellm_info_gpt_3_5_turbo_model_map_value + # assert all fields in StandardLoggingModelInformation are present + assert all( + field in result for field in StandardLoggingModelInformation.__annotations__ + ) + + +def test_get_hidden_params(): + """Test get_hidden_params with different inputs""" + # Test with None + result = StandardLoggingPayloadSetup.get_hidden_params(None) + assert result["model_id"] is None + assert result["cache_key"] is None + assert result["api_base"] is None + assert result["response_cost"] is None + assert result["additional_headers"] is None + + # assert all fields in StandardLoggingHiddenParams are present + assert all(field in result for field in StandardLoggingHiddenParams.__annotations__) + + # Test with valid params + hidden_params = { + "model_id": "test-model", + "cache_key": "test-cache", + "api_base": "https://api.test.com", + "response_cost": 0.001, + "additional_headers": { + "x-ratelimit-limit-requests": "2000", + "x-ratelimit-remaining-requests": "1999", + }, + } + result = StandardLoggingPayloadSetup.get_hidden_params(hidden_params) + assert result["model_id"] == "test-model" + assert result["cache_key"] == "test-cache" + assert result["api_base"] == "https://api.test.com" + assert result["response_cost"] == 0.001 + assert result["additional_headers"] is not None + assert result["additional_headers"]["x_ratelimit_limit_requests"] == 2000 + # assert all fields in StandardLoggingHiddenParams are present + assert all(field in result for field in StandardLoggingHiddenParams.__annotations__) + + +def test_get_final_response_obj(): + """Test get_final_response_obj with different input types and redaction scenarios""" + # Test with direct response_obj + response_obj = {"choices": [{"message": {"content": "test content"}}]} + result = StandardLoggingPayloadSetup.get_final_response_obj( + response_obj=response_obj, init_response_obj=None, kwargs={} + ) + assert result == response_obj + + # Test redaction when litellm.turn_off_message_logging is True + litellm.turn_off_message_logging = True + try: + model_response = litellm.ModelResponse( + choices=[ + litellm.Choices(message=litellm.Message(content="sensitive content")) + ] + ) + kwargs = {"messages": [{"role": "user", "content": "original message"}]} + result = StandardLoggingPayloadSetup.get_final_response_obj( + response_obj=model_response, init_response_obj=model_response, kwargs=kwargs + ) + + print("result", result) + print("type(result)", type(result)) + # Verify response message content was redacted + assert result["choices"][0]["message"]["content"] == "redacted-by-litellm" + # Verify that redaction occurred in kwargs + assert kwargs["messages"][0]["content"] == "redacted-by-litellm" + finally: + # Reset litellm.turn_off_message_logging to its original value + litellm.turn_off_message_logging = False From eb471178008ef38ca67d0bd7611053ed311117a5 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 7 Nov 2024 17:01:18 -0800 Subject: [PATCH 39/67] (feat) log error class, function_name on prometheus service failure hook + only log DB related failures on DB service hook (#6650) * log error on prometheus service failure hook * use a more accurate function name for wrapper that handles logging db metrics * fix log_db_metrics * test_log_db_metrics_failure_error_types * fix linting * fix auth checks --- litellm/_service_logger.py | 3 +- litellm/integrations/prometheus_services.py | 31 +++- litellm/proxy/auth/auth_checks.py | 12 +- litellm/proxy/auth/user_api_key_auth.py | 2 +- litellm/proxy/db/log_db_metrics.py | 138 ++++++++++++++++++ litellm/proxy/proxy_server.py | 4 +- litellm/proxy/utils.py | 86 +---------- .../test_log_db_redis_services.py | 83 +++++++++-- 8 files changed, 249 insertions(+), 110 deletions(-) create mode 100644 litellm/proxy/db/log_db_metrics.py diff --git a/litellm/_service_logger.py b/litellm/_service_logger.py index d4aad68bb..f777c93d4 100644 --- a/litellm/_service_logger.py +++ b/litellm/_service_logger.py @@ -241,7 +241,8 @@ class ServiceLogging(CustomLogger): if callback == "prometheus_system": await self.init_prometheus_services_logger_if_none() await self.prometheusServicesLogger.async_service_failure_hook( - payload=payload + payload=payload, + error=error, ) elif callback == "datadog": await self.init_datadog_logger_if_none() diff --git a/litellm/integrations/prometheus_services.py b/litellm/integrations/prometheus_services.py index a36ac9b9c..df94ffcd8 100644 --- a/litellm/integrations/prometheus_services.py +++ b/litellm/integrations/prometheus_services.py @@ -9,6 +9,7 @@ import subprocess import sys import traceback import uuid +from typing import List, Optional, Union import dotenv import requests # type: ignore @@ -51,7 +52,9 @@ class PrometheusServicesLogger: for service in self.services: histogram = self.create_histogram(service, type_of_request="latency") counter_failed_request = self.create_counter( - service, type_of_request="failed_requests" + service, + type_of_request="failed_requests", + additional_labels=["error_class", "function_name"], ) counter_total_requests = self.create_counter( service, type_of_request="total_requests" @@ -99,7 +102,12 @@ class PrometheusServicesLogger: buckets=LATENCY_BUCKETS, ) - def create_counter(self, service: str, type_of_request: str): + def create_counter( + self, + service: str, + type_of_request: str, + additional_labels: Optional[List[str]] = None, + ): metric_name = "litellm_{}_{}".format(service, type_of_request) is_registered = self.is_metric_registered(metric_name) if is_registered: @@ -107,7 +115,7 @@ class PrometheusServicesLogger: return self.Counter( metric_name, "Total {} for {} service".format(type_of_request, service), - labelnames=[service], + labelnames=[service] + (additional_labels or []), ) def observe_histogram( @@ -125,10 +133,14 @@ class PrometheusServicesLogger: counter, labels: str, amount: float, + additional_labels: Optional[List[str]] = [], ): assert isinstance(counter, self.Counter) - counter.labels(labels).inc(amount) + if additional_labels: + counter.labels(labels, *additional_labels).inc(amount) + else: + counter.labels(labels).inc(amount) def service_success_hook(self, payload: ServiceLoggerPayload): if self.mock_testing: @@ -187,16 +199,25 @@ class PrometheusServicesLogger: amount=1, # LOG TOTAL REQUESTS TO PROMETHEUS ) - async def async_service_failure_hook(self, payload: ServiceLoggerPayload): + async def async_service_failure_hook( + self, + payload: ServiceLoggerPayload, + error: Union[str, Exception], + ): if self.mock_testing: self.mock_testing_failure_calls += 1 + error_class = error.__class__.__name__ + function_name = payload.call_type if payload.service.value in self.payload_to_prometheus_map: prom_objects = self.payload_to_prometheus_map[payload.service.value] for obj in prom_objects: + # increment both failed and total requests if isinstance(obj, self.Counter): self.increment_counter( counter=obj, labels=payload.service.value, + # log additional_labels=["error_class", "function_name"], used for debugging what's going wrong with the DB + additional_labels=[error_class, function_name], amount=1, # LOG ERROR COUNT TO PROMETHEUS ) diff --git a/litellm/proxy/auth/auth_checks.py b/litellm/proxy/auth/auth_checks.py index 8d504c739..12b6ec372 100644 --- a/litellm/proxy/auth/auth_checks.py +++ b/litellm/proxy/auth/auth_checks.py @@ -32,7 +32,7 @@ from litellm.proxy._types import ( UserAPIKeyAuth, ) from litellm.proxy.auth.route_checks import RouteChecks -from litellm.proxy.utils import PrismaClient, ProxyLogging, log_to_opentelemetry +from litellm.proxy.utils import PrismaClient, ProxyLogging, log_db_metrics from litellm.types.services import ServiceLoggerPayload, ServiceTypes from .auth_checks_organization import organization_role_based_access_check @@ -290,7 +290,7 @@ def get_actual_routes(allowed_routes: list) -> list: return actual_routes -@log_to_opentelemetry +@log_db_metrics async def get_end_user_object( end_user_id: Optional[str], prisma_client: Optional[PrismaClient], @@ -415,7 +415,7 @@ def _update_last_db_access_time( last_db_access_time[key] = (value, time.time()) -@log_to_opentelemetry +@log_db_metrics async def get_user_object( user_id: str, prisma_client: Optional[PrismaClient], @@ -562,7 +562,7 @@ async def _delete_cache_key_object( ) -@log_to_opentelemetry +@log_db_metrics async def _get_team_db_check(team_id: str, prisma_client: PrismaClient): return await prisma_client.db.litellm_teamtable.find_unique( where={"team_id": team_id} @@ -658,7 +658,7 @@ async def get_team_object( ) -@log_to_opentelemetry +@log_db_metrics async def get_key_object( hashed_token: str, prisma_client: Optional[PrismaClient], @@ -766,7 +766,7 @@ async def _handle_failed_db_connection_for_get_key_object( raise e -@log_to_opentelemetry +@log_db_metrics async def get_org_object( org_id: str, prisma_client: Optional[PrismaClient], diff --git a/litellm/proxy/auth/user_api_key_auth.py b/litellm/proxy/auth/user_api_key_auth.py index d25b6f620..f11bfcbc9 100644 --- a/litellm/proxy/auth/user_api_key_auth.py +++ b/litellm/proxy/auth/user_api_key_auth.py @@ -58,7 +58,7 @@ from litellm.proxy.auth.auth_checks import ( get_org_object, get_team_object, get_user_object, - log_to_opentelemetry, + log_db_metrics, ) from litellm.proxy.auth.auth_utils import ( _get_request_ip_address, diff --git a/litellm/proxy/db/log_db_metrics.py b/litellm/proxy/db/log_db_metrics.py new file mode 100644 index 000000000..e8040ae60 --- /dev/null +++ b/litellm/proxy/db/log_db_metrics.py @@ -0,0 +1,138 @@ +""" +Handles logging DB success/failure to ServiceLogger() + +ServiceLogger() then sends DB logs to Prometheus, OTEL, Datadog etc +""" + +from datetime import datetime +from functools import wraps +from typing import Callable, Dict, Tuple + +from litellm._service_logger import ServiceTypes +from litellm.litellm_core_utils.core_helpers import ( + _get_parent_otel_span_from_kwargs, + get_litellm_metadata_from_kwargs, +) + + +def log_db_metrics(func): + """ + Decorator to log the duration of a DB related function to ServiceLogger() + + Handles logging DB success/failure to ServiceLogger(), which logs to Prometheus, OTEL, Datadog + + When logging Failure it checks if the Exception is a PrismaError, httpx.ConnectError or httpx.TimeoutException and then logs that as a DB Service Failure + + Args: + func: The function to be decorated + + Returns: + Result from the decorated function + + Raises: + Exception: If the decorated function raises an exception + """ + + @wraps(func) + async def wrapper(*args, **kwargs): + from prisma.errors import PrismaError + + start_time: datetime = datetime.now() + + try: + result = await func(*args, **kwargs) + end_time: datetime = datetime.now() + from litellm.proxy.proxy_server import proxy_logging_obj + + if "PROXY" not in func.__name__: + await proxy_logging_obj.service_logging_obj.async_service_success_hook( + service=ServiceTypes.DB, + call_type=func.__name__, + parent_otel_span=kwargs.get("parent_otel_span", None), + duration=(end_time - start_time).total_seconds(), + start_time=start_time, + end_time=end_time, + event_metadata={ + "function_name": func.__name__, + "function_kwargs": kwargs, + "function_args": args, + }, + ) + elif ( + # in litellm custom callbacks kwargs is passed as arg[0] + # https://docs.litellm.ai/docs/observability/custom_callback#callback-functions + args is not None + and len(args) > 0 + and isinstance(args[0], dict) + ): + passed_kwargs = args[0] + parent_otel_span = _get_parent_otel_span_from_kwargs( + kwargs=passed_kwargs + ) + if parent_otel_span is not None: + metadata = get_litellm_metadata_from_kwargs(kwargs=passed_kwargs) + await proxy_logging_obj.service_logging_obj.async_service_success_hook( + service=ServiceTypes.BATCH_WRITE_TO_DB, + call_type=func.__name__, + parent_otel_span=parent_otel_span, + duration=0.0, + start_time=start_time, + end_time=end_time, + event_metadata=metadata, + ) + # end of logging to otel + return result + except Exception as e: + end_time: datetime = datetime.now() + await _handle_logging_db_exception( + e=e, + func=func, + kwargs=kwargs, + args=args, + start_time=start_time, + end_time=end_time, + ) + raise e + + return wrapper + + +def _is_exception_related_to_db(e: Exception) -> bool: + """ + Returns True if the exception is related to the DB + """ + + import httpx + from prisma.errors import PrismaError + + return isinstance(e, (PrismaError, httpx.ConnectError, httpx.TimeoutException)) + + +async def _handle_logging_db_exception( + e: Exception, + func: Callable, + kwargs: Dict, + args: Tuple, + start_time: datetime, + end_time: datetime, +) -> None: + from litellm.proxy.proxy_server import proxy_logging_obj + + # don't log this as a DB Service Failure, if the DB did not raise an exception + if _is_exception_related_to_db(e) is not True: + return + + await proxy_logging_obj.service_logging_obj.async_service_failure_hook( + error=e, + service=ServiceTypes.DB, + call_type=func.__name__, + parent_otel_span=kwargs.get("parent_otel_span"), + duration=(end_time - start_time).total_seconds(), + start_time=start_time, + end_time=end_time, + event_metadata={ + "function_name": func.__name__, + "function_kwargs": kwargs, + "function_args": args, + }, + ) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index ce58c4d75..12e80876c 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -125,7 +125,7 @@ from litellm.proxy._types import * from litellm.proxy.analytics_endpoints.analytics_endpoints import ( router as analytics_router, ) -from litellm.proxy.auth.auth_checks import log_to_opentelemetry +from litellm.proxy.auth.auth_checks import log_db_metrics from litellm.proxy.auth.auth_utils import check_response_size_is_safe from litellm.proxy.auth.handle_jwt import JWTHandler from litellm.proxy.auth.litellm_license import LicenseCheck @@ -747,7 +747,7 @@ async def _PROXY_failure_handler( pass -@log_to_opentelemetry +@log_db_metrics async def _PROXY_track_cost_callback( kwargs, # kwargs to completion completion_response: litellm.ModelResponse, # response from completion diff --git a/litellm/proxy/utils.py b/litellm/proxy/utils.py index 44e9d151d..9d33244a0 100644 --- a/litellm/proxy/utils.py +++ b/litellm/proxy/utils.py @@ -55,10 +55,6 @@ from litellm.integrations.custom_guardrail import CustomGuardrail from litellm.integrations.custom_logger import CustomLogger from litellm.integrations.SlackAlerting.slack_alerting import SlackAlerting from litellm.integrations.SlackAlerting.utils import _add_langfuse_trace_id_to_alert -from litellm.litellm_core_utils.core_helpers import ( - _get_parent_otel_span_from_kwargs, - get_litellm_metadata_from_kwargs, -) from litellm.litellm_core_utils.litellm_logging import Logging from litellm.llms.custom_httpx.httpx_handler import HTTPHandler from litellm.proxy._types import ( @@ -77,6 +73,7 @@ from litellm.proxy.db.create_views import ( create_missing_views, should_create_missing_views, ) +from litellm.proxy.db.log_db_metrics import log_db_metrics from litellm.proxy.db.prisma_client import PrismaWrapper from litellm.proxy.hooks.cache_control_check import _PROXY_CacheControlCheck from litellm.proxy.hooks.max_budget_limiter import _PROXY_MaxBudgetLimiter @@ -137,83 +134,6 @@ def safe_deep_copy(data): return new_data -def log_to_opentelemetry(func): - """ - Decorator to log the duration of a DB related function to ServiceLogger() - - Handles logging DB success/failure to ServiceLogger(), which logs to Prometheus, OTEL, Datadog - """ - - @wraps(func) - async def wrapper(*args, **kwargs): - start_time: datetime = datetime.now() - - try: - result = await func(*args, **kwargs) - end_time: datetime = datetime.now() - from litellm.proxy.proxy_server import proxy_logging_obj - - if "PROXY" not in func.__name__: - await proxy_logging_obj.service_logging_obj.async_service_success_hook( - service=ServiceTypes.DB, - call_type=func.__name__, - parent_otel_span=kwargs.get("parent_otel_span", None), - duration=(end_time - start_time).total_seconds(), - start_time=start_time, - end_time=end_time, - event_metadata={ - "function_name": func.__name__, - "function_kwargs": kwargs, - "function_args": args, - }, - ) - elif ( - # in litellm custom callbacks kwargs is passed as arg[0] - # https://docs.litellm.ai/docs/observability/custom_callback#callback-functions - args is not None - and len(args) > 0 - and isinstance(args[0], dict) - ): - passed_kwargs = args[0] - parent_otel_span = _get_parent_otel_span_from_kwargs( - kwargs=passed_kwargs - ) - if parent_otel_span is not None: - metadata = get_litellm_metadata_from_kwargs(kwargs=passed_kwargs) - await proxy_logging_obj.service_logging_obj.async_service_success_hook( - service=ServiceTypes.BATCH_WRITE_TO_DB, - call_type=func.__name__, - parent_otel_span=parent_otel_span, - duration=0.0, - start_time=start_time, - end_time=end_time, - event_metadata=metadata, - ) - # end of logging to otel - return result - except Exception as e: - from litellm.proxy.proxy_server import proxy_logging_obj - - end_time: datetime = datetime.now() - await proxy_logging_obj.service_logging_obj.async_service_failure_hook( - error=e, - service=ServiceTypes.DB, - call_type=func.__name__, - parent_otel_span=kwargs.get("parent_otel_span"), - duration=(end_time - start_time).total_seconds(), - start_time=start_time, - end_time=end_time, - event_metadata={ - "function_name": func.__name__, - "function_kwargs": kwargs, - "function_args": args, - }, - ) - raise e - - return wrapper - - class InternalUsageCache: def __init__(self, dual_cache: DualCache): self.dual_cache: DualCache = dual_cache @@ -1397,7 +1317,7 @@ class PrismaClient: return - @log_to_opentelemetry + @log_db_metrics @backoff.on_exception( backoff.expo, Exception, # base exception to catch for the backoff @@ -1463,7 +1383,7 @@ class PrismaClient: max_time=10, # maximum total time to retry for on_backoff=on_backoff, # specifying the function to call on backoff ) - @log_to_opentelemetry + @log_db_metrics async def get_data( # noqa: PLR0915 self, token: Optional[Union[str, list]] = None, diff --git a/tests/logging_callback_tests/test_log_db_redis_services.py b/tests/logging_callback_tests/test_log_db_redis_services.py index 9f5db8009..9824e1a5b 100644 --- a/tests/logging_callback_tests/test_log_db_redis_services.py +++ b/tests/logging_callback_tests/test_log_db_redis_services.py @@ -17,23 +17,25 @@ import pytest import litellm from litellm import completion from litellm._logging import verbose_logger -from litellm.proxy.utils import log_to_opentelemetry, ServiceTypes +from litellm.proxy.utils import log_db_metrics, ServiceTypes from datetime import datetime +import httpx +from prisma.errors import ClientNotConnectedError # Test async function to decorate -@log_to_opentelemetry +@log_db_metrics async def sample_db_function(*args, **kwargs): return "success" -@log_to_opentelemetry +@log_db_metrics async def sample_proxy_function(*args, **kwargs): return "success" @pytest.mark.asyncio -async def test_log_to_opentelemetry_success(): +async def test_log_db_metrics_success(): # Mock the proxy_logging_obj with patch("litellm.proxy.proxy_server.proxy_logging_obj") as mock_proxy_logging: # Setup mock @@ -61,14 +63,14 @@ async def test_log_to_opentelemetry_success(): @pytest.mark.asyncio -async def test_log_to_opentelemetry_duration(): +async def test_log_db_metrics_duration(): # Mock the proxy_logging_obj with patch("litellm.proxy.proxy_server.proxy_logging_obj") as mock_proxy_logging: # Setup mock mock_proxy_logging.service_logging_obj.async_service_success_hook = AsyncMock() # Add a delay to the function to test duration - @log_to_opentelemetry + @log_db_metrics async def delayed_function(**kwargs): await asyncio.sleep(1) # 1 second delay return "success" @@ -95,23 +97,28 @@ async def test_log_to_opentelemetry_duration(): @pytest.mark.asyncio -async def test_log_to_opentelemetry_failure(): +async def test_log_db_metrics_failure(): + """ + should log a failure if a prisma error is raised + """ # Mock the proxy_logging_obj + from prisma.errors import ClientNotConnectedError + with patch("litellm.proxy.proxy_server.proxy_logging_obj") as mock_proxy_logging: # Setup mock mock_proxy_logging.service_logging_obj.async_service_failure_hook = AsyncMock() # Create a failing function - @log_to_opentelemetry + @log_db_metrics async def failing_function(**kwargs): - raise ValueError("Test error") + raise ClientNotConnectedError() # Call the decorated function and expect it to raise - with pytest.raises(ValueError) as exc_info: + with pytest.raises(ClientNotConnectedError) as exc_info: await failing_function(parent_otel_span="test_span") # Assertions - assert str(exc_info.value) == "Test error" + assert "Client is not connected to the query engine" in str(exc_info.value) mock_proxy_logging.service_logging_obj.async_service_failure_hook.assert_called_once() call_args = ( mock_proxy_logging.service_logging_obj.async_service_failure_hook.call_args[ @@ -125,4 +132,56 @@ async def test_log_to_opentelemetry_failure(): assert isinstance(call_args["duration"], float) assert isinstance(call_args["start_time"], datetime) assert isinstance(call_args["end_time"], datetime) - assert isinstance(call_args["error"], ValueError) + assert isinstance(call_args["error"], ClientNotConnectedError) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "exception,should_log", + [ + (ValueError("Generic error"), False), + (KeyError("Missing key"), False), + (TypeError("Type error"), False), + (httpx.ConnectError("Failed to connect"), True), + (httpx.TimeoutException("Request timed out"), True), + (ClientNotConnectedError(), True), # Prisma error + ], +) +async def test_log_db_metrics_failure_error_types(exception, should_log): + """ + Why Test? + Users were seeing that non-DB errors were being logged as DB Service Failures + Example a failure to read a value from cache was being logged as a DB Service Failure + + + Parameterized test to verify: + - DB-related errors (Prisma, httpx) are logged as service failures + - Non-DB errors (ValueError, KeyError, etc.) are not logged + """ + with patch("litellm.proxy.proxy_server.proxy_logging_obj") as mock_proxy_logging: + mock_proxy_logging.service_logging_obj.async_service_failure_hook = AsyncMock() + + @log_db_metrics + async def failing_function(**kwargs): + raise exception + + # Call the function and expect it to raise the exception + with pytest.raises(type(exception)): + await failing_function(parent_otel_span="test_span") + + if should_log: + # Assert failure was logged for DB-related errors + mock_proxy_logging.service_logging_obj.async_service_failure_hook.assert_called_once() + call_args = mock_proxy_logging.service_logging_obj.async_service_failure_hook.call_args[ + 1 + ] + assert call_args["service"] == ServiceTypes.DB + assert call_args["call_type"] == "failing_function" + assert call_args["parent_otel_span"] == "test_span" + assert isinstance(call_args["duration"], float) + assert isinstance(call_args["start_time"], datetime) + assert isinstance(call_args["end_time"], datetime) + assert isinstance(call_args["error"], type(exception)) + else: + # Assert failure was NOT logged for non-DB errors + mock_proxy_logging.service_logging_obj.async_service_failure_hook.assert_not_called() From d0d29d70deb05faeffa40351aeb086ef61213a66 Mon Sep 17 00:00:00 2001 From: Emerson Gomes Date: Thu, 7 Nov 2024 23:11:14 -0600 Subject: [PATCH 40/67] Update several Azure AI models in model cost map (#6655) * Adding Azure Phi 3/3.5 models to model cost map * Update gpt-4o-mini models * Adding missing Azure Mistral models to model cost map * Adding Azure Llama3.2 models to model cost map * Fix Gemini-1.5-flash pricing * Fix Gemini-1.5-flash output pricing * Fix Gemini-1.5-pro prices * Fix Gemini-1.5-flash output prices * Correct gemini-1.5-pro prices * Correction on Vertex Llama3.2 entry --------- Co-authored-by: Emerson Gomes --- model_prices_and_context_window.json | 224 ++++++++++++++++++++++----- 1 file changed, 188 insertions(+), 36 deletions(-) diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index bc3799229..cfc2cef72 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -80,6 +80,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true, "supports_prompt_caching": true }, @@ -94,6 +95,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true, "supports_prompt_caching": true }, @@ -475,6 +477,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true }, "ft:davinci-002": { @@ -762,6 +765,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true }, "azure/gpt-4o-mini": { @@ -775,6 +779,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true, "supports_prompt_caching": true }, @@ -789,6 +794,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true, "supports_prompt_caching": true }, @@ -1113,6 +1119,52 @@ "supports_function_calling": true, "mode": "chat" }, + "azure_ai/mistral-large-2407": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000002, + "output_cost_per_token": 0.000006, + "litellm_provider": "azure_ai", + "supports_function_calling": true, + "mode": "chat", + "source": "https://azuremarketplace.microsoft.com/en/marketplace/apps/000-000.mistral-ai-large-2407-offer?tab=Overview" + }, + "azure_ai/ministral-3b": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000004, + "output_cost_per_token": 0.00000004, + "litellm_provider": "azure_ai", + "supports_function_calling": true, + "mode": "chat", + "source": "https://azuremarketplace.microsoft.com/en/marketplace/apps/000-000.ministral-3b-2410-offer?tab=Overview" + }, + "azure_ai/Llama-3.2-11B-Vision-Instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 2048, + "input_cost_per_token": 0.00000037, + "output_cost_per_token": 0.00000037, + "litellm_provider": "azure_ai", + "supports_function_calling": true, + "supports_vision": true, + "mode": "chat", + "source": "https://azuremarketplace.microsoft.com/en/marketplace/apps/metagenai.meta-llama-3-2-11b-vision-instruct-offer?tab=Overview" + }, + "azure_ai/Llama-3.2-90B-Vision-Instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 2048, + "input_cost_per_token": 0.00000204, + "output_cost_per_token": 0.00000204, + "litellm_provider": "azure_ai", + "supports_function_calling": true, + "supports_vision": true, + "mode": "chat", + "source": "https://azuremarketplace.microsoft.com/en/marketplace/apps/metagenai.meta-llama-3-2-90b-vision-instruct-offer?tab=Overview" + }, "azure_ai/Meta-Llama-3-70B-Instruct": { "max_tokens": 8192, "max_input_tokens": 8192, @@ -1152,6 +1204,105 @@ "mode": "chat", "source":"https://azuremarketplace.microsoft.com/en-us/marketplace/apps/metagenai.meta-llama-3-1-405b-instruct-offer?tab=PlansAndPrice" }, + "azure_ai/Phi-3.5-mini-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000013, + "output_cost_per_token": 0.00000052, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_vision": false, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/" + }, + "azure_ai/Phi-3.5-vision-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000013, + "output_cost_per_token": 0.00000052, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_vision": true, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/" + }, + "azure_ai/Phi-3.5-MoE-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000016, + "output_cost_per_token": 0.00000064, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_vision": false, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/" + }, + "azure_ai/Phi-3-mini-4k-instruct": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000013, + "output_cost_per_token": 0.00000052, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_vision": false, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/" + }, + "azure_ai/Phi-3-mini-128k-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000013, + "output_cost_per_token": 0.00000052, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_vision": false, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/" + }, + "azure_ai/Phi-3-small-8k-instruct": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000015, + "output_cost_per_token": 0.0000006, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_vision": false, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/" + }, + "azure_ai/Phi-3-small-128k-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000015, + "output_cost_per_token": 0.0000006, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_vision": false, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/" + }, + "azure_ai/Phi-3-medium-4k-instruct": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000017, + "output_cost_per_token": 0.00000068, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_vision": false, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/" + }, + "azure_ai/Phi-3-medium-128k-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000017, + "output_cost_per_token": 0.00000068, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_vision": false, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/" + }, "azure_ai/cohere-rerank-v3-multilingual": { "max_tokens": 4096, "max_input_tokens": 4096, @@ -2212,16 +2363,16 @@ "input_cost_per_image": 0.00032875, "input_cost_per_audio_per_second": 0.00003125, "input_cost_per_video_per_second": 0.00032875, - "input_cost_per_token": 0.000000078125, - "input_cost_per_character": 0.0000003125, + "input_cost_per_token": 0.00000125, + "input_cost_per_character": 0.0000003125, "input_cost_per_image_above_128k_tokens": 0.0006575, "input_cost_per_video_per_second_above_128k_tokens": 0.0006575, "input_cost_per_audio_per_second_above_128k_tokens": 0.0000625, - "input_cost_per_token_above_128k_tokens": 0.00000015625, - "input_cost_per_character_above_128k_tokens": 0.000000625, - "output_cost_per_token": 0.0000003125, + "input_cost_per_token_above_128k_tokens": 0.0000025, + "input_cost_per_character_above_128k_tokens": 0.000000625, + "output_cost_per_token": 0.000005, "output_cost_per_character": 0.00000125, - "output_cost_per_token_above_128k_tokens": 0.000000625, + "output_cost_per_token_above_128k_tokens": 0.00001, "output_cost_per_character_above_128k_tokens": 0.0000025, "litellm_provider": "vertex_ai-language-models", "mode": "chat", @@ -2238,16 +2389,16 @@ "input_cost_per_image": 0.00032875, "input_cost_per_audio_per_second": 0.00003125, "input_cost_per_video_per_second": 0.00032875, - "input_cost_per_token": 0.000000078125, - "input_cost_per_character": 0.0000003125, + "input_cost_per_token": 0.00000125, + "input_cost_per_character": 0.0000003125, "input_cost_per_image_above_128k_tokens": 0.0006575, "input_cost_per_video_per_second_above_128k_tokens": 0.0006575, "input_cost_per_audio_per_second_above_128k_tokens": 0.0000625, - "input_cost_per_token_above_128k_tokens": 0.00000015625, - "input_cost_per_character_above_128k_tokens": 0.000000625, - "output_cost_per_token": 0.0000003125, + "input_cost_per_token_above_128k_tokens": 0.0000025, + "input_cost_per_character_above_128k_tokens": 0.000000625, + "output_cost_per_token": 0.000005, "output_cost_per_character": 0.00000125, - "output_cost_per_token_above_128k_tokens": 0.000000625, + "output_cost_per_token_above_128k_tokens": 0.00001, "output_cost_per_character_above_128k_tokens": 0.0000025, "litellm_provider": "vertex_ai-language-models", "mode": "chat", @@ -2264,16 +2415,16 @@ "input_cost_per_image": 0.00032875, "input_cost_per_audio_per_second": 0.00003125, "input_cost_per_video_per_second": 0.00032875, - "input_cost_per_token": 0.000000078125, - "input_cost_per_character": 0.0000003125, + "input_cost_per_token": 0.00000125, + "input_cost_per_character": 0.0000003125, "input_cost_per_image_above_128k_tokens": 0.0006575, "input_cost_per_video_per_second_above_128k_tokens": 0.0006575, "input_cost_per_audio_per_second_above_128k_tokens": 0.0000625, - "input_cost_per_token_above_128k_tokens": 0.00000015625, - "input_cost_per_character_above_128k_tokens": 0.000000625, - "output_cost_per_token": 0.0000003125, + "input_cost_per_token_above_128k_tokens": 0.0000025, + "input_cost_per_character_above_128k_tokens": 0.000000625, + "output_cost_per_token": 0.000005, "output_cost_per_character": 0.00000125, - "output_cost_per_token_above_128k_tokens": 0.000000625, + "output_cost_per_token_above_128k_tokens": 0.00001, "output_cost_per_character_above_128k_tokens": 0.0000025, "litellm_provider": "vertex_ai-language-models", "mode": "chat", @@ -2373,17 +2524,17 @@ "input_cost_per_image": 0.00002, "input_cost_per_video_per_second": 0.00002, "input_cost_per_audio_per_second": 0.000002, - "input_cost_per_token": 0.000000004688, + "input_cost_per_token": 0.000000075, "input_cost_per_character": 0.00000001875, "input_cost_per_token_above_128k_tokens": 0.000001, "input_cost_per_character_above_128k_tokens": 0.00000025, "input_cost_per_image_above_128k_tokens": 0.00004, "input_cost_per_video_per_second_above_128k_tokens": 0.00004, "input_cost_per_audio_per_second_above_128k_tokens": 0.000004, - "output_cost_per_token": 0.0000000046875, - "output_cost_per_character": 0.00000001875, - "output_cost_per_token_above_128k_tokens": 0.000000009375, - "output_cost_per_character_above_128k_tokens": 0.0000000375, + "output_cost_per_token": 0.0000003, + "output_cost_per_character": 0.000000075, + "output_cost_per_token_above_128k_tokens": 0.0000006, + "output_cost_per_character_above_128k_tokens": 0.00000015, "litellm_provider": "vertex_ai-language-models", "mode": "chat", "supports_system_messages": true, @@ -2437,17 +2588,17 @@ "input_cost_per_image": 0.00002, "input_cost_per_video_per_second": 0.00002, "input_cost_per_audio_per_second": 0.000002, - "input_cost_per_token": 0.000000004688, + "input_cost_per_token": 0.000000075, "input_cost_per_character": 0.00000001875, "input_cost_per_token_above_128k_tokens": 0.000001, "input_cost_per_character_above_128k_tokens": 0.00000025, "input_cost_per_image_above_128k_tokens": 0.00004, "input_cost_per_video_per_second_above_128k_tokens": 0.00004, "input_cost_per_audio_per_second_above_128k_tokens": 0.000004, - "output_cost_per_token": 0.0000000046875, - "output_cost_per_character": 0.00000001875, - "output_cost_per_token_above_128k_tokens": 0.000000009375, - "output_cost_per_character_above_128k_tokens": 0.0000000375, + "output_cost_per_token": 0.0000003, + "output_cost_per_character": 0.000000075, + "output_cost_per_token_above_128k_tokens": 0.0000006, + "output_cost_per_character_above_128k_tokens": 0.00000015, "litellm_provider": "vertex_ai-language-models", "mode": "chat", "supports_system_messages": true, @@ -2469,17 +2620,17 @@ "input_cost_per_image": 0.00002, "input_cost_per_video_per_second": 0.00002, "input_cost_per_audio_per_second": 0.000002, - "input_cost_per_token": 0.000000004688, + "input_cost_per_token": 0.000000075, "input_cost_per_character": 0.00000001875, "input_cost_per_token_above_128k_tokens": 0.000001, "input_cost_per_character_above_128k_tokens": 0.00000025, "input_cost_per_image_above_128k_tokens": 0.00004, "input_cost_per_video_per_second_above_128k_tokens": 0.00004, "input_cost_per_audio_per_second_above_128k_tokens": 0.000004, - "output_cost_per_token": 0.0000000046875, - "output_cost_per_character": 0.00000001875, - "output_cost_per_token_above_128k_tokens": 0.000000009375, - "output_cost_per_character_above_128k_tokens": 0.0000000375, + "output_cost_per_token": 0.0000003, + "output_cost_per_character": 0.000000075, + "output_cost_per_token_above_128k_tokens": 0.0000006, + "output_cost_per_character_above_128k_tokens": 0.00000015, "litellm_provider": "vertex_ai-language-models", "mode": "chat", "supports_system_messages": true, @@ -2501,7 +2652,7 @@ "input_cost_per_image": 0.00002, "input_cost_per_video_per_second": 0.00002, "input_cost_per_audio_per_second": 0.000002, - "input_cost_per_token": 0.000000004688, + "input_cost_per_token": 0.000000075, "input_cost_per_character": 0.00000001875, "input_cost_per_token_above_128k_tokens": 0.000001, "input_cost_per_character_above_128k_tokens": 0.00000025, @@ -2714,14 +2865,15 @@ "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#partner-models" }, "vertex_ai/meta/llama-3.2-90b-vision-instruct-maas": { - "max_tokens": 8192, + "max_tokens": 128000, "max_input_tokens": 128000, - "max_output_tokens": 8192, + "max_output_tokens": 2048, "input_cost_per_token": 0.0, "output_cost_per_token": 0.0, "litellm_provider": "vertex_ai-llama_models", "mode": "chat", "supports_system_messages": true, + "supports_vision": true, "source": "https://console.cloud.google.com/vertex-ai/publishers/meta/model-garden/llama-3.2-90b-vision-instruct-maas" }, "vertex_ai/mistral-large@latest": { From 896e2c2d5979aed538d9abea1d09b25b0aaca1f6 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 8 Nov 2024 17:15:29 +0530 Subject: [PATCH 41/67] build: update backup model prices map --- ...odel_prices_and_context_window_backup.json | 252 ++++++++++++++---- pyproject.toml | 4 +- 2 files changed, 206 insertions(+), 50 deletions(-) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index a37a431dc..cfc2cef72 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -80,6 +80,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true, "supports_prompt_caching": true }, @@ -94,6 +95,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true, "supports_prompt_caching": true }, @@ -108,7 +110,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true, + "supports_vision": false, "supports_prompt_caching": true }, "o1-mini-2024-09-12": { @@ -122,7 +124,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true, + "supports_vision": false, "supports_prompt_caching": true }, "o1-preview": { @@ -136,7 +138,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true, + "supports_vision": false, "supports_prompt_caching": true }, "o1-preview-2024-09-12": { @@ -150,7 +152,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true, + "supports_vision": false, "supports_prompt_caching": true }, "chatgpt-4o-latest": { @@ -190,6 +192,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true, "supports_prompt_caching": true }, @@ -461,6 +464,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true }, "ft:gpt-4o-mini-2024-07-18": { @@ -473,6 +477,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true }, "ft:davinci-002": { @@ -652,7 +657,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true, + "supports_vision": false, "supports_prompt_caching": true }, "azure/o1-mini-2024-09-12": { @@ -666,7 +671,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true, + "supports_vision": false, "supports_prompt_caching": true }, "azure/o1-preview": { @@ -680,7 +685,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true, + "supports_vision": false, "supports_prompt_caching": true }, "azure/o1-preview-2024-09-12": { @@ -694,7 +699,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true, + "supports_vision": false, "supports_prompt_caching": true }, "azure/gpt-4o": { @@ -721,6 +726,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true }, "azure/gpt-4o-2024-05-13": { @@ -746,6 +752,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true }, "azure/global-standard/gpt-4o-mini": { @@ -758,6 +765,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true }, "azure/gpt-4o-mini": { @@ -771,6 +779,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true, "supports_prompt_caching": true }, @@ -785,6 +794,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, + "supports_response_schema": true, "supports_vision": true, "supports_prompt_caching": true }, @@ -1109,6 +1119,52 @@ "supports_function_calling": true, "mode": "chat" }, + "azure_ai/mistral-large-2407": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.000002, + "output_cost_per_token": 0.000006, + "litellm_provider": "azure_ai", + "supports_function_calling": true, + "mode": "chat", + "source": "https://azuremarketplace.microsoft.com/en/marketplace/apps/000-000.mistral-ai-large-2407-offer?tab=Overview" + }, + "azure_ai/ministral-3b": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000004, + "output_cost_per_token": 0.00000004, + "litellm_provider": "azure_ai", + "supports_function_calling": true, + "mode": "chat", + "source": "https://azuremarketplace.microsoft.com/en/marketplace/apps/000-000.ministral-3b-2410-offer?tab=Overview" + }, + "azure_ai/Llama-3.2-11B-Vision-Instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 2048, + "input_cost_per_token": 0.00000037, + "output_cost_per_token": 0.00000037, + "litellm_provider": "azure_ai", + "supports_function_calling": true, + "supports_vision": true, + "mode": "chat", + "source": "https://azuremarketplace.microsoft.com/en/marketplace/apps/metagenai.meta-llama-3-2-11b-vision-instruct-offer?tab=Overview" + }, + "azure_ai/Llama-3.2-90B-Vision-Instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 2048, + "input_cost_per_token": 0.00000204, + "output_cost_per_token": 0.00000204, + "litellm_provider": "azure_ai", + "supports_function_calling": true, + "supports_vision": true, + "mode": "chat", + "source": "https://azuremarketplace.microsoft.com/en/marketplace/apps/metagenai.meta-llama-3-2-90b-vision-instruct-offer?tab=Overview" + }, "azure_ai/Meta-Llama-3-70B-Instruct": { "max_tokens": 8192, "max_input_tokens": 8192, @@ -1148,6 +1204,105 @@ "mode": "chat", "source":"https://azuremarketplace.microsoft.com/en-us/marketplace/apps/metagenai.meta-llama-3-1-405b-instruct-offer?tab=PlansAndPrice" }, + "azure_ai/Phi-3.5-mini-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000013, + "output_cost_per_token": 0.00000052, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_vision": false, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/" + }, + "azure_ai/Phi-3.5-vision-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000013, + "output_cost_per_token": 0.00000052, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_vision": true, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/" + }, + "azure_ai/Phi-3.5-MoE-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000016, + "output_cost_per_token": 0.00000064, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_vision": false, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/" + }, + "azure_ai/Phi-3-mini-4k-instruct": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000013, + "output_cost_per_token": 0.00000052, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_vision": false, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/" + }, + "azure_ai/Phi-3-mini-128k-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000013, + "output_cost_per_token": 0.00000052, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_vision": false, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/" + }, + "azure_ai/Phi-3-small-8k-instruct": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000015, + "output_cost_per_token": 0.0000006, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_vision": false, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/" + }, + "azure_ai/Phi-3-small-128k-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000015, + "output_cost_per_token": 0.0000006, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_vision": false, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/" + }, + "azure_ai/Phi-3-medium-4k-instruct": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000017, + "output_cost_per_token": 0.00000068, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_vision": false, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/" + }, + "azure_ai/Phi-3-medium-128k-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00000017, + "output_cost_per_token": 0.00000068, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_vision": false, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/" + }, "azure_ai/cohere-rerank-v3-multilingual": { "max_tokens": 4096, "max_input_tokens": 4096, @@ -2208,16 +2363,16 @@ "input_cost_per_image": 0.00032875, "input_cost_per_audio_per_second": 0.00003125, "input_cost_per_video_per_second": 0.00032875, - "input_cost_per_token": 0.000000078125, - "input_cost_per_character": 0.0000003125, + "input_cost_per_token": 0.00000125, + "input_cost_per_character": 0.0000003125, "input_cost_per_image_above_128k_tokens": 0.0006575, "input_cost_per_video_per_second_above_128k_tokens": 0.0006575, "input_cost_per_audio_per_second_above_128k_tokens": 0.0000625, - "input_cost_per_token_above_128k_tokens": 0.00000015625, - "input_cost_per_character_above_128k_tokens": 0.000000625, - "output_cost_per_token": 0.0000003125, + "input_cost_per_token_above_128k_tokens": 0.0000025, + "input_cost_per_character_above_128k_tokens": 0.000000625, + "output_cost_per_token": 0.000005, "output_cost_per_character": 0.00000125, - "output_cost_per_token_above_128k_tokens": 0.000000625, + "output_cost_per_token_above_128k_tokens": 0.00001, "output_cost_per_character_above_128k_tokens": 0.0000025, "litellm_provider": "vertex_ai-language-models", "mode": "chat", @@ -2234,16 +2389,16 @@ "input_cost_per_image": 0.00032875, "input_cost_per_audio_per_second": 0.00003125, "input_cost_per_video_per_second": 0.00032875, - "input_cost_per_token": 0.000000078125, - "input_cost_per_character": 0.0000003125, + "input_cost_per_token": 0.00000125, + "input_cost_per_character": 0.0000003125, "input_cost_per_image_above_128k_tokens": 0.0006575, "input_cost_per_video_per_second_above_128k_tokens": 0.0006575, "input_cost_per_audio_per_second_above_128k_tokens": 0.0000625, - "input_cost_per_token_above_128k_tokens": 0.00000015625, - "input_cost_per_character_above_128k_tokens": 0.000000625, - "output_cost_per_token": 0.0000003125, + "input_cost_per_token_above_128k_tokens": 0.0000025, + "input_cost_per_character_above_128k_tokens": 0.000000625, + "output_cost_per_token": 0.000005, "output_cost_per_character": 0.00000125, - "output_cost_per_token_above_128k_tokens": 0.000000625, + "output_cost_per_token_above_128k_tokens": 0.00001, "output_cost_per_character_above_128k_tokens": 0.0000025, "litellm_provider": "vertex_ai-language-models", "mode": "chat", @@ -2260,16 +2415,16 @@ "input_cost_per_image": 0.00032875, "input_cost_per_audio_per_second": 0.00003125, "input_cost_per_video_per_second": 0.00032875, - "input_cost_per_token": 0.000000078125, - "input_cost_per_character": 0.0000003125, + "input_cost_per_token": 0.00000125, + "input_cost_per_character": 0.0000003125, "input_cost_per_image_above_128k_tokens": 0.0006575, "input_cost_per_video_per_second_above_128k_tokens": 0.0006575, "input_cost_per_audio_per_second_above_128k_tokens": 0.0000625, - "input_cost_per_token_above_128k_tokens": 0.00000015625, - "input_cost_per_character_above_128k_tokens": 0.000000625, - "output_cost_per_token": 0.0000003125, + "input_cost_per_token_above_128k_tokens": 0.0000025, + "input_cost_per_character_above_128k_tokens": 0.000000625, + "output_cost_per_token": 0.000005, "output_cost_per_character": 0.00000125, - "output_cost_per_token_above_128k_tokens": 0.000000625, + "output_cost_per_token_above_128k_tokens": 0.00001, "output_cost_per_character_above_128k_tokens": 0.0000025, "litellm_provider": "vertex_ai-language-models", "mode": "chat", @@ -2369,17 +2524,17 @@ "input_cost_per_image": 0.00002, "input_cost_per_video_per_second": 0.00002, "input_cost_per_audio_per_second": 0.000002, - "input_cost_per_token": 0.000000004688, + "input_cost_per_token": 0.000000075, "input_cost_per_character": 0.00000001875, "input_cost_per_token_above_128k_tokens": 0.000001, "input_cost_per_character_above_128k_tokens": 0.00000025, "input_cost_per_image_above_128k_tokens": 0.00004, "input_cost_per_video_per_second_above_128k_tokens": 0.00004, "input_cost_per_audio_per_second_above_128k_tokens": 0.000004, - "output_cost_per_token": 0.0000000046875, - "output_cost_per_character": 0.00000001875, - "output_cost_per_token_above_128k_tokens": 0.000000009375, - "output_cost_per_character_above_128k_tokens": 0.0000000375, + "output_cost_per_token": 0.0000003, + "output_cost_per_character": 0.000000075, + "output_cost_per_token_above_128k_tokens": 0.0000006, + "output_cost_per_character_above_128k_tokens": 0.00000015, "litellm_provider": "vertex_ai-language-models", "mode": "chat", "supports_system_messages": true, @@ -2433,17 +2588,17 @@ "input_cost_per_image": 0.00002, "input_cost_per_video_per_second": 0.00002, "input_cost_per_audio_per_second": 0.000002, - "input_cost_per_token": 0.000000004688, + "input_cost_per_token": 0.000000075, "input_cost_per_character": 0.00000001875, "input_cost_per_token_above_128k_tokens": 0.000001, "input_cost_per_character_above_128k_tokens": 0.00000025, "input_cost_per_image_above_128k_tokens": 0.00004, "input_cost_per_video_per_second_above_128k_tokens": 0.00004, "input_cost_per_audio_per_second_above_128k_tokens": 0.000004, - "output_cost_per_token": 0.0000000046875, - "output_cost_per_character": 0.00000001875, - "output_cost_per_token_above_128k_tokens": 0.000000009375, - "output_cost_per_character_above_128k_tokens": 0.0000000375, + "output_cost_per_token": 0.0000003, + "output_cost_per_character": 0.000000075, + "output_cost_per_token_above_128k_tokens": 0.0000006, + "output_cost_per_character_above_128k_tokens": 0.00000015, "litellm_provider": "vertex_ai-language-models", "mode": "chat", "supports_system_messages": true, @@ -2465,17 +2620,17 @@ "input_cost_per_image": 0.00002, "input_cost_per_video_per_second": 0.00002, "input_cost_per_audio_per_second": 0.000002, - "input_cost_per_token": 0.000000004688, + "input_cost_per_token": 0.000000075, "input_cost_per_character": 0.00000001875, "input_cost_per_token_above_128k_tokens": 0.000001, "input_cost_per_character_above_128k_tokens": 0.00000025, "input_cost_per_image_above_128k_tokens": 0.00004, "input_cost_per_video_per_second_above_128k_tokens": 0.00004, "input_cost_per_audio_per_second_above_128k_tokens": 0.000004, - "output_cost_per_token": 0.0000000046875, - "output_cost_per_character": 0.00000001875, - "output_cost_per_token_above_128k_tokens": 0.000000009375, - "output_cost_per_character_above_128k_tokens": 0.0000000375, + "output_cost_per_token": 0.0000003, + "output_cost_per_character": 0.000000075, + "output_cost_per_token_above_128k_tokens": 0.0000006, + "output_cost_per_character_above_128k_tokens": 0.00000015, "litellm_provider": "vertex_ai-language-models", "mode": "chat", "supports_system_messages": true, @@ -2497,7 +2652,7 @@ "input_cost_per_image": 0.00002, "input_cost_per_video_per_second": 0.00002, "input_cost_per_audio_per_second": 0.000002, - "input_cost_per_token": 0.000000004688, + "input_cost_per_token": 0.000000075, "input_cost_per_character": 0.00000001875, "input_cost_per_token_above_128k_tokens": 0.000001, "input_cost_per_character_above_128k_tokens": 0.00000025, @@ -2710,14 +2865,15 @@ "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#partner-models" }, "vertex_ai/meta/llama-3.2-90b-vision-instruct-maas": { - "max_tokens": 8192, + "max_tokens": 128000, "max_input_tokens": 128000, - "max_output_tokens": 8192, + "max_output_tokens": 2048, "input_cost_per_token": 0.0, "output_cost_per_token": 0.0, "litellm_provider": "vertex_ai-llama_models", "mode": "chat", "supports_system_messages": true, + "supports_vision": true, "source": "https://console.cloud.google.com/vertex-ai/publishers/meta/model-garden/llama-3.2-90b-vision-instruct-maas" }, "vertex_ai/mistral-large@latest": { @@ -3790,7 +3946,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true + "supports_vision": false }, "openrouter/openai/o1-mini-2024-09-12": { "max_tokens": 65536, @@ -3802,7 +3958,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true + "supports_vision": false }, "openrouter/openai/o1-preview": { "max_tokens": 32768, @@ -3814,7 +3970,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true + "supports_vision": false }, "openrouter/openai/o1-preview-2024-09-12": { "max_tokens": 32768, @@ -3826,7 +3982,7 @@ "mode": "chat", "supports_function_calling": true, "supports_parallel_function_calling": true, - "supports_vision": true + "supports_vision": false }, "openrouter/openai/gpt-4o": { "max_tokens": 4096, diff --git a/pyproject.toml b/pyproject.toml index 8681486e8..c083db2a2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.52.1" +version = "1.52.2" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT" @@ -91,7 +91,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.52.1" +version = "1.52.2" version_files = [ "pyproject.toml:^version" ] From 9f2053e4afb4a7c4977b6f79c79008889915f503 Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Fri, 8 Nov 2024 19:14:16 +0530 Subject: [PATCH 42/67] ci(conftest.py): reset conftest.py for local_testing/ (#6657) * ci(conftest.py): reset conftest.py for local_testing/ check if that speeds up testing * fix: fix import * fix(conftest.py): fix import to check if hasattr * fix(conftest.py): ignore proxy reload if doesn't exist --- tests/local_testing/conftest.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/local_testing/conftest.py b/tests/local_testing/conftest.py index 1421700c9..b3561d8a6 100644 --- a/tests/local_testing/conftest.py +++ b/tests/local_testing/conftest.py @@ -26,8 +26,11 @@ def setup_and_teardown(): from litellm import Router importlib.reload(litellm) + try: if hasattr(litellm, "proxy") and hasattr(litellm.proxy, "proxy_server"): + import litellm.proxy.proxy_server + importlib.reload(litellm.proxy.proxy_server) except Exception as e: print(f"Error reloading litellm.proxy.proxy_server: {e}") From 1bef6457c755c3901be40292cedee313ab0540d3 Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Fri, 8 Nov 2024 19:34:22 +0530 Subject: [PATCH 43/67] Litellm dev 11 07 2024 (#6649) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(streaming_handler.py): save finish_reasons which might show up mid-stream (store last received one) Fixes https://github.com/BerriAI/litellm/issues/6104 * refactor: add readme to litellm_core_utils/ make it easier to navigate * fix(team_endpoints.py): return team id + object for invalid team in `/team/list` * fix(streaming_handler.py): remove import * fix(pattern_match_deployments.py): default to user input if unable to map based on wildcards (#6646) * fix(pattern_match_deployments.py): default to user input if unable to… (#6632) * fix(pattern_match_deployments.py): default to user input if unable to map based on wildcards * test: fix test * test: reset test name * test: update conftest to reload proxy server module between tests * ci(config.yml): move langfuse out of local_testing reduce ci/cd time * ci(config.yml): cleanup langfuse ci/cd tests * fix: update test to not use global proxy_server app module * ci: move caching to a separate test pipeline speed up ci pipeline * test: update conftest to check if proxy_server attr exists before reloading * build(conftest.py): don't block on inability to reload proxy_server * ci(config.yml): update caching unit test filter to work on 'cache' keyword as well * fix(encrypt_decrypt_utils.py): use function to get salt key * test: mark flaky test * test: handle anthropic overloaded errors * refactor: create separate ci/cd pipeline for proxy unit tests make ci/cd faster * ci(config.yml): add litellm_proxy_unit_testing to build_and_test jobs * ci(config.yml): generate prisma binaries for proxy unit tests * test: readd vertex_key.json * ci(config.yml): remove `-s` from proxy_unit_test cmd speed up test * ci: remove any 'debug' logging flag speed up ci pipeline * test: fix test * test(test_braintrust.py): rerun * test: add delay for braintrust test * chore: comment for maritalk (#6607) * Update gpt-4o-2024-08-06, and o1-preview, o1-mini models in model cost map (#6654) * Adding supports_response_schema to gpt-4o-2024-08-06 models * o1 models do not support vision --------- Co-authored-by: Emerson Gomes * (QOL improvement) add unit testing for all static_methods in litellm_logging.py (#6640) * add unit testing for standard logging payload * unit testing for static methods in litellm_logging * add code coverage check for litellm_logging * litellm_logging_code_coverage * test_get_final_response_obj * fix validate_redacted_message_span_attributes * test validate_redacted_message_span_attributes * (feat) log error class, function_name on prometheus service failure hook + only log DB related failures on DB service hook (#6650) * log error on prometheus service failure hook * use a more accurate function name for wrapper that handles logging db metrics * fix log_db_metrics * test_log_db_metrics_failure_error_types * fix linting * fix auth checks * Update several Azure AI models in model cost map (#6655) * Adding Azure Phi 3/3.5 models to model cost map * Update gpt-4o-mini models * Adding missing Azure Mistral models to model cost map * Adding Azure Llama3.2 models to model cost map * Fix Gemini-1.5-flash pricing * Fix Gemini-1.5-flash output pricing * Fix Gemini-1.5-pro prices * Fix Gemini-1.5-flash output prices * Correct gemini-1.5-pro prices * Correction on Vertex Llama3.2 entry --------- Co-authored-by: Emerson Gomes * fix(streaming_handler.py): fix linting error * test: remove duplicate test causes gemini ratelimit error --------- Co-authored-by: nobuo kawasaki Co-authored-by: Emerson Gomes Co-authored-by: Emerson Gomes Co-authored-by: Ishaan Jaff --- litellm/litellm_core_utils/README.md | 11 + litellm/litellm_core_utils/core_helpers.py | 27 + .../litellm_core_utils/default_encoding.py | 21 + litellm/litellm_core_utils/rules.py | 50 + .../litellm_core_utils/streaming_handler.py | 2020 ++++++++ litellm/litellm_core_utils/streaming_utils.py | 14 - litellm/llms/databricks/streaming_utils.py | 4 +- .../management_endpoints/team_endpoints.py | 20 +- litellm/utils.py | 4157 ++++++++--------- tests/local_testing/test_streaming.py | 80 + 10 files changed, 4253 insertions(+), 2151 deletions(-) create mode 100644 litellm/litellm_core_utils/README.md create mode 100644 litellm/litellm_core_utils/default_encoding.py create mode 100644 litellm/litellm_core_utils/rules.py create mode 100644 litellm/litellm_core_utils/streaming_handler.py delete mode 100644 litellm/litellm_core_utils/streaming_utils.py diff --git a/litellm/litellm_core_utils/README.md b/litellm/litellm_core_utils/README.md new file mode 100644 index 000000000..9cd351453 --- /dev/null +++ b/litellm/litellm_core_utils/README.md @@ -0,0 +1,11 @@ +## Folder Contents + +This folder contains general-purpose utilities that are used in multiple places in the codebase. + +Core files: +- `streaming_handler.py`: The core streaming logic + streaming related helper utils +- `core_helpers.py`: code used in `types/` - e.g. `map_finish_reason`. +- `exception_mapping_utils.py`: utils for mapping exceptions to openai-compatible error types. +- `default_encoding.py`: code for loading the default encoding (tiktoken) +- `get_llm_provider_logic.py`: code for inferring the LLM provider from a given model name. + diff --git a/litellm/litellm_core_utils/core_helpers.py b/litellm/litellm_core_utils/core_helpers.py index cddca61ee..816dff81e 100644 --- a/litellm/litellm_core_utils/core_helpers.py +++ b/litellm/litellm_core_utils/core_helpers.py @@ -3,6 +3,8 @@ import os from typing import TYPE_CHECKING, Any, List, Literal, Optional, Tuple, Union +import httpx + from litellm._logging import verbose_logger if TYPE_CHECKING: @@ -99,3 +101,28 @@ def _get_parent_otel_span_from_kwargs( "Error in _get_parent_otel_span_from_kwargs: " + str(e) ) return None + + +def process_response_headers(response_headers: Union[httpx.Headers, dict]) -> dict: + from litellm.types.utils import OPENAI_RESPONSE_HEADERS + + openai_headers = {} + processed_headers = {} + additional_headers = {} + + for k, v in response_headers.items(): + if k in OPENAI_RESPONSE_HEADERS: # return openai-compatible headers + openai_headers[k] = v + if k.startswith( + "llm_provider-" + ): # return raw provider headers (incl. openai-compatible ones) + processed_headers[k] = v + else: + additional_headers["{}-{}".format("llm_provider", k)] = v + + additional_headers = { + **openai_headers, + **processed_headers, + **additional_headers, + } + return additional_headers diff --git a/litellm/litellm_core_utils/default_encoding.py b/litellm/litellm_core_utils/default_encoding.py new file mode 100644 index 000000000..e09332582 --- /dev/null +++ b/litellm/litellm_core_utils/default_encoding.py @@ -0,0 +1,21 @@ +import os + +import litellm + +try: + # New and recommended way to access resources + from importlib import resources + + filename = str(resources.files(litellm).joinpath("llms/tokenizers")) +except (ImportError, AttributeError): + # Old way to access resources, which setuptools deprecated some time ago + import pkg_resources # type: ignore + + filename = pkg_resources.resource_filename(__name__, "llms/tokenizers") + +os.environ["TIKTOKEN_CACHE_DIR"] = os.getenv( + "CUSTOM_TIKTOKEN_CACHE_DIR", filename +) # use local copy of tiktoken b/c of - https://github.com/BerriAI/litellm/issues/1071 +import tiktoken + +encoding = tiktoken.get_encoding("cl100k_base") diff --git a/litellm/litellm_core_utils/rules.py b/litellm/litellm_core_utils/rules.py new file mode 100644 index 000000000..beeb012d0 --- /dev/null +++ b/litellm/litellm_core_utils/rules.py @@ -0,0 +1,50 @@ +from typing import Optional + +import litellm + + +class Rules: + """ + Fail calls based on the input or llm api output + + Example usage: + import litellm + def my_custom_rule(input): # receives the model response + if "i don't think i can answer" in input: # trigger fallback if the model refuses to answer + return False + return True + + litellm.post_call_rules = [my_custom_rule] # have these be functions that can be called to fail a call + + response = litellm.completion(model="gpt-3.5-turbo", messages=[{"role": "user", + "content": "Hey, how's it going?"}], fallbacks=["openrouter/mythomax"]) + """ + + def __init__(self) -> None: + pass + + def pre_call_rules(self, input: str, model: str): + for rule in litellm.pre_call_rules: + if callable(rule): + decision = rule(input) + if decision is False: + raise litellm.APIResponseValidationError(message="LLM Response failed post-call-rule check", llm_provider="", model=model) # type: ignore + return True + + def post_call_rules(self, input: Optional[str], model: str) -> bool: + if input is None: + return True + for rule in litellm.post_call_rules: + if callable(rule): + decision = rule(input) + if isinstance(decision, bool): + if decision is False: + raise litellm.APIResponseValidationError(message="LLM Response failed post-call-rule check", llm_provider="", model=model) # type: ignore + elif isinstance(decision, dict): + decision_val = decision.get("decision", True) + decision_message = decision.get( + "message", "LLM Response failed post-call-rule check" + ) + if decision_val is False: + raise litellm.APIResponseValidationError(message=decision_message, llm_provider="", model=model) # type: ignore + return True diff --git a/litellm/litellm_core_utils/streaming_handler.py b/litellm/litellm_core_utils/streaming_handler.py new file mode 100644 index 000000000..5c18ff512 --- /dev/null +++ b/litellm/litellm_core_utils/streaming_handler.py @@ -0,0 +1,2020 @@ +import asyncio +import json +import threading +import time +import traceback +import uuid +from concurrent.futures import ThreadPoolExecutor +from typing import Any, Callable, List, Optional + +import httpx +from pydantic import BaseModel + +import litellm +from litellm import verbose_logger +from litellm.litellm_core_utils.redact_messages import ( + LiteLLMLoggingObject, + redact_message_input_output_from_logging, +) +from litellm.types.utils import Delta +from litellm.types.utils import GenericStreamingChunk as GChunk +from litellm.types.utils import ( + ModelResponse, + ModelResponseStream, + StreamingChoices, + Usage, +) + +from ..exceptions import OpenAIError +from .core_helpers import map_finish_reason, process_response_headers +from .default_encoding import encoding +from .exception_mapping_utils import exception_type +from .rules import Rules + +MAX_THREADS = 100 + +# Create a ThreadPoolExecutor +executor = ThreadPoolExecutor(max_workers=MAX_THREADS) + + +def print_verbose(print_statement): + try: + if litellm.set_verbose: + print(print_statement) # noqa + except Exception: + pass + + +class CustomStreamWrapper: + def __init__( + self, + completion_stream, + model, + logging_obj: Any, + custom_llm_provider: Optional[str] = None, + stream_options=None, + make_call: Optional[Callable] = None, + _response_headers: Optional[dict] = None, + ): + self.model = model + self.make_call = make_call + self.custom_llm_provider = custom_llm_provider + self.logging_obj: LiteLLMLoggingObject = logging_obj + self.completion_stream = completion_stream + self.sent_first_chunk = False + self.sent_last_chunk = False + self.system_fingerprint: Optional[str] = None + self.received_finish_reason: Optional[str] = None + self.intermittent_finish_reason: Optional[str] = ( + None # finish reasons that show up mid-stream + ) + self.special_tokens = [ + "<|assistant|>", + "<|system|>", + "<|user|>", + "", + "", + "<|im_end|>", + "<|im_start|>", + ] + self.holding_chunk = "" + self.complete_response = "" + self.response_uptil_now = "" + _model_info = ( + self.logging_obj.model_call_details.get("litellm_params", {}).get( + "model_info", {} + ) + or {} + ) + self._hidden_params = { + "model_id": (_model_info.get("id", None)), + } # returned as x-litellm-model-id response header in proxy + + self._hidden_params["additional_headers"] = process_response_headers( + _response_headers or {} + ) # GUARANTEE OPENAI HEADERS IN RESPONSE + + self._response_headers = _response_headers + self.response_id = None + self.logging_loop = None + self.rules = Rules() + self.stream_options = stream_options or getattr( + logging_obj, "stream_options", None + ) + self.messages = getattr(logging_obj, "messages", None) + self.sent_stream_usage = False + self.send_stream_usage = ( + True if self.check_send_stream_usage(self.stream_options) else False + ) + self.tool_call = False + self.chunks: List = ( + [] + ) # keep track of the returned chunks - used for calculating the input/output tokens for stream options + self.is_function_call = self.check_is_function_call(logging_obj=logging_obj) + + def __iter__(self): + return self + + def __aiter__(self): + return self + + def check_send_stream_usage(self, stream_options: Optional[dict]): + return ( + stream_options is not None + and stream_options.get("include_usage", False) is True + ) + + def check_is_function_call(self, logging_obj) -> bool: + if hasattr(logging_obj, "optional_params") and isinstance( + logging_obj.optional_params, dict + ): + if ( + "litellm_param_is_function_call" in logging_obj.optional_params + and logging_obj.optional_params["litellm_param_is_function_call"] + is True + ): + return True + + return False + + def process_chunk(self, chunk: str): + """ + NLP Cloud streaming returns the entire response, for each chunk. Process this, to only return the delta. + """ + try: + chunk = chunk.strip() + self.complete_response = self.complete_response.strip() + + if chunk.startswith(self.complete_response): + # Remove last_sent_chunk only if it appears at the start of the new chunk + chunk = chunk[len(self.complete_response) :] + + self.complete_response += chunk + return chunk + except Exception as e: + raise e + + def safety_checker(self) -> None: + """ + Fixes - https://github.com/BerriAI/litellm/issues/5158 + + if the model enters a loop and starts repeating the same chunk again, break out of loop and raise an internalservererror - allows for retries. + + Raises - InternalServerError, if LLM enters infinite loop while streaming + """ + if len(self.chunks) >= litellm.REPEATED_STREAMING_CHUNK_LIMIT: + # Get the last n chunks + last_chunks = self.chunks[-litellm.REPEATED_STREAMING_CHUNK_LIMIT :] + + # Extract the relevant content from the chunks + last_contents = [chunk.choices[0].delta.content for chunk in last_chunks] + + # Check if all extracted contents are identical + if all(content == last_contents[0] for content in last_contents): + if ( + last_contents[0] is not None + and isinstance(last_contents[0], str) + and len(last_contents[0]) > 2 + ): # ignore empty content - https://github.com/BerriAI/litellm/issues/5158#issuecomment-2287156946 + # All last n chunks are identical + raise litellm.InternalServerError( + message="The model is repeating the same chunk = {}.".format( + last_contents[0] + ), + model="", + llm_provider="", + ) + + def check_special_tokens(self, chunk: str, finish_reason: Optional[str]): + """ + Output parse / special tokens for sagemaker + hf streaming. + """ + hold = False + if ( + self.custom_llm_provider != "huggingface" + and self.custom_llm_provider != "sagemaker" + ): + return hold, chunk + + if finish_reason: + for token in self.special_tokens: + if token in chunk: + chunk = chunk.replace(token, "") + return hold, chunk + + if self.sent_first_chunk is True: + return hold, chunk + + curr_chunk = self.holding_chunk + chunk + curr_chunk = curr_chunk.strip() + + for token in self.special_tokens: + if len(curr_chunk) < len(token) and curr_chunk in token: + hold = True + self.holding_chunk = curr_chunk + elif len(curr_chunk) >= len(token): + if token in curr_chunk: + self.holding_chunk = curr_chunk.replace(token, "") + hold = True + else: + pass + + if hold is False: # reset + self.holding_chunk = "" + return hold, curr_chunk + + def handle_anthropic_text_chunk(self, chunk): + """ + For old anthropic models - claude-1, claude-2. + + Claude-3 is handled from within Anthropic.py VIA ModelResponseIterator() + """ + str_line = chunk + if isinstance(chunk, bytes): # Handle binary data + str_line = chunk.decode("utf-8") # Convert bytes to string + text = "" + is_finished = False + finish_reason = None + if str_line.startswith("data:"): + data_json = json.loads(str_line[5:]) + type_chunk = data_json.get("type", None) + if type_chunk == "completion": + text = data_json.get("completion") + finish_reason = data_json.get("stop_reason") + if finish_reason is not None: + is_finished = True + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + elif "error" in str_line: + raise ValueError(f"Unable to parse response. Original response: {str_line}") + else: + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + + def handle_predibase_chunk(self, chunk): + try: + if not isinstance(chunk, str): + chunk = chunk.decode( + "utf-8" + ) # DO NOT REMOVE this: This is required for HF inference API + Streaming + text = "" + is_finished = False + finish_reason = "" + print_verbose(f"chunk: {chunk}") + if chunk.startswith("data:"): + data_json = json.loads(chunk[5:]) + print_verbose(f"data json: {data_json}") + if "token" in data_json and "text" in data_json["token"]: + text = data_json["token"]["text"] + if data_json.get("details", False) and data_json["details"].get( + "finish_reason", False + ): + is_finished = True + finish_reason = data_json["details"]["finish_reason"] + elif data_json.get( + "generated_text", False + ): # if full generated text exists, then stream is complete + text = "" # don't return the final bos token + is_finished = True + finish_reason = "stop" + elif data_json.get("error", False): + raise Exception(data_json.get("error")) + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + elif "error" in chunk: + raise ValueError(chunk) + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + except Exception as e: + raise e + + def handle_huggingface_chunk(self, chunk): + try: + if not isinstance(chunk, str): + chunk = chunk.decode( + "utf-8" + ) # DO NOT REMOVE this: This is required for HF inference API + Streaming + text = "" + is_finished = False + finish_reason = "" + print_verbose(f"chunk: {chunk}") + if chunk.startswith("data:"): + data_json = json.loads(chunk[5:]) + print_verbose(f"data json: {data_json}") + if "token" in data_json and "text" in data_json["token"]: + text = data_json["token"]["text"] + if data_json.get("details", False) and data_json["details"].get( + "finish_reason", False + ): + is_finished = True + finish_reason = data_json["details"]["finish_reason"] + elif data_json.get( + "generated_text", False + ): # if full generated text exists, then stream is complete + text = "" # don't return the final bos token + is_finished = True + finish_reason = "stop" + elif data_json.get("error", False): + raise Exception(data_json.get("error")) + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + elif "error" in chunk: + raise ValueError(chunk) + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + except Exception as e: + raise e + + def handle_ai21_chunk(self, chunk): # fake streaming + chunk = chunk.decode("utf-8") + data_json = json.loads(chunk) + try: + text = data_json["completions"][0]["data"]["text"] + is_finished = True + finish_reason = "stop" + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + except Exception: + raise ValueError(f"Unable to parse response. Original response: {chunk}") + + def handle_maritalk_chunk(self, chunk): # fake streaming + chunk = chunk.decode("utf-8") + data_json = json.loads(chunk) + try: + text = data_json["answer"] + is_finished = True + finish_reason = "stop" + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + except Exception: + raise ValueError(f"Unable to parse response. Original response: {chunk}") + + def handle_nlp_cloud_chunk(self, chunk): + text = "" + is_finished = False + finish_reason = "" + try: + if "dolphin" in self.model: + chunk = self.process_chunk(chunk=chunk) + else: + data_json = json.loads(chunk) + chunk = data_json["generated_text"] + text = chunk + if "[DONE]" in text: + text = text.replace("[DONE]", "") + is_finished = True + finish_reason = "stop" + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + except Exception: + raise ValueError(f"Unable to parse response. Original response: {chunk}") + + def handle_aleph_alpha_chunk(self, chunk): + chunk = chunk.decode("utf-8") + data_json = json.loads(chunk) + try: + text = data_json["completions"][0]["completion"] + is_finished = True + finish_reason = "stop" + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + except Exception: + raise ValueError(f"Unable to parse response. Original response: {chunk}") + + def handle_cohere_chunk(self, chunk): + chunk = chunk.decode("utf-8") + data_json = json.loads(chunk) + try: + text = "" + is_finished = False + finish_reason = "" + index: Optional[int] = None + if "index" in data_json: + index = data_json.get("index") + if "text" in data_json: + text = data_json["text"] + elif "is_finished" in data_json: + is_finished = data_json["is_finished"] + finish_reason = data_json["finish_reason"] + else: + raise Exception(data_json) + return { + "index": index, + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + except Exception: + raise ValueError(f"Unable to parse response. Original response: {chunk}") + + def handle_cohere_chat_chunk(self, chunk): + chunk = chunk.decode("utf-8") + data_json = json.loads(chunk) + print_verbose(f"chunk: {chunk}") + try: + text = "" + is_finished = False + finish_reason = "" + if "text" in data_json: + text = data_json["text"] + elif "is_finished" in data_json and data_json["is_finished"] is True: + is_finished = data_json["is_finished"] + finish_reason = data_json["finish_reason"] + else: + return + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + except Exception: + raise ValueError(f"Unable to parse response. Original response: {chunk}") + + def handle_azure_chunk(self, chunk): + is_finished = False + finish_reason = "" + text = "" + print_verbose(f"chunk: {chunk}") + if "data: [DONE]" in chunk: + text = "" + is_finished = True + finish_reason = "stop" + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + elif chunk.startswith("data:"): + data_json = json.loads(chunk[5:]) # chunk.startswith("data:"): + try: + if len(data_json["choices"]) > 0: + delta = data_json["choices"][0]["delta"] + text = "" if delta is None else delta.get("content", "") + if data_json["choices"][0].get("finish_reason", None): + is_finished = True + finish_reason = data_json["choices"][0]["finish_reason"] + print_verbose( + f"text: {text}; is_finished: {is_finished}; finish_reason: {finish_reason}" + ) + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + except Exception: + raise ValueError( + f"Unable to parse response. Original response: {chunk}" + ) + elif "error" in chunk: + raise ValueError(f"Unable to parse response. Original response: {chunk}") + else: + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + + def handle_replicate_chunk(self, chunk): + try: + text = "" + is_finished = False + finish_reason = "" + if "output" in chunk: + text = chunk["output"] + if "status" in chunk: + if chunk["status"] == "succeeded": + is_finished = True + finish_reason = "stop" + elif chunk.get("error", None): + raise Exception(chunk["error"]) + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + except Exception: + raise ValueError(f"Unable to parse response. Original response: {chunk}") + + def handle_openai_chat_completion_chunk(self, chunk): + try: + print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") + str_line = chunk + text = "" + is_finished = False + finish_reason = None + logprobs = None + usage = None + if str_line and str_line.choices and len(str_line.choices) > 0: + if ( + str_line.choices[0].delta is not None + and str_line.choices[0].delta.content is not None + ): + text = str_line.choices[0].delta.content + else: # function/tool calling chunk - when content is None. in this case we just return the original chunk from openai + pass + if str_line.choices[0].finish_reason: + is_finished = True + finish_reason = str_line.choices[0].finish_reason + + # checking for logprobs + if ( + hasattr(str_line.choices[0], "logprobs") + and str_line.choices[0].logprobs is not None + ): + logprobs = str_line.choices[0].logprobs + else: + logprobs = None + + usage = getattr(str_line, "usage", None) + + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + "logprobs": logprobs, + "original_chunk": str_line, + "usage": usage, + } + except Exception as e: + raise e + + def handle_azure_text_completion_chunk(self, chunk): + try: + print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") + text = "" + is_finished = False + finish_reason = None + choices = getattr(chunk, "choices", []) + if len(choices) > 0: + text = choices[0].text + if choices[0].finish_reason is not None: + is_finished = True + finish_reason = choices[0].finish_reason + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + + except Exception as e: + raise e + + def handle_openai_text_completion_chunk(self, chunk): + try: + print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") + text = "" + is_finished = False + finish_reason = None + usage = None + choices = getattr(chunk, "choices", []) + if len(choices) > 0: + text = choices[0].text + if choices[0].finish_reason is not None: + is_finished = True + finish_reason = choices[0].finish_reason + usage = getattr(chunk, "usage", None) + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + "usage": usage, + } + + except Exception as e: + raise e + + def handle_baseten_chunk(self, chunk): + try: + chunk = chunk.decode("utf-8") + if len(chunk) > 0: + if chunk.startswith("data:"): + data_json = json.loads(chunk[5:]) + if "token" in data_json and "text" in data_json["token"]: + return data_json["token"]["text"] + else: + return "" + data_json = json.loads(chunk) + if "model_output" in data_json: + if ( + isinstance(data_json["model_output"], dict) + and "data" in data_json["model_output"] + and isinstance(data_json["model_output"]["data"], list) + ): + return data_json["model_output"]["data"][0] + elif isinstance(data_json["model_output"], str): + return data_json["model_output"] + elif "completion" in data_json and isinstance( + data_json["completion"], str + ): + return data_json["completion"] + else: + raise ValueError( + f"Unable to parse response. Original response: {chunk}" + ) + else: + return "" + else: + return "" + except Exception as e: + verbose_logger.exception( + "litellm.CustomStreamWrapper.handle_baseten_chunk(): Exception occured - {}".format( + str(e) + ) + ) + return "" + + def handle_cloudlfare_stream(self, chunk): + try: + print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") + chunk = chunk.decode("utf-8") + str_line = chunk + text = "" + is_finished = False + finish_reason = None + + if "[DONE]" in chunk: + return {"text": text, "is_finished": True, "finish_reason": "stop"} + elif str_line.startswith("data:"): + data_json = json.loads(str_line[5:]) + print_verbose(f"delta content: {data_json}") + text = data_json["response"] + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + else: + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + + except Exception as e: + raise e + + def handle_ollama_stream(self, chunk): + try: + if isinstance(chunk, dict): + json_chunk = chunk + else: + json_chunk = json.loads(chunk) + if "error" in json_chunk: + raise Exception(f"Ollama Error - {json_chunk}") + + text = "" + is_finished = False + finish_reason = None + if json_chunk["done"] is True: + text = "" + is_finished = True + finish_reason = "stop" + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + elif json_chunk["response"]: + print_verbose(f"delta content: {json_chunk}") + text = json_chunk["response"] + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + else: + raise Exception(f"Ollama Error - {json_chunk}") + except Exception as e: + raise e + + def handle_ollama_chat_stream(self, chunk): + # for ollama_chat/ provider + try: + if isinstance(chunk, dict): + json_chunk = chunk + else: + json_chunk = json.loads(chunk) + if "error" in json_chunk: + raise Exception(f"Ollama Error - {json_chunk}") + + text = "" + is_finished = False + finish_reason = None + if json_chunk["done"] is True: + text = "" + is_finished = True + finish_reason = "stop" + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + elif "message" in json_chunk: + print_verbose(f"delta content: {json_chunk}") + text = json_chunk["message"]["content"] + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + } + else: + raise Exception(f"Ollama Error - {json_chunk}") + except Exception as e: + raise e + + def handle_watsonx_stream(self, chunk): + try: + if isinstance(chunk, dict): + parsed_response = chunk + elif isinstance(chunk, (str, bytes)): + if isinstance(chunk, bytes): + chunk = chunk.decode("utf-8") + if "generated_text" in chunk: + response = chunk.replace("data: ", "").strip() + parsed_response = json.loads(response) + else: + return { + "text": "", + "is_finished": False, + "prompt_tokens": 0, + "completion_tokens": 0, + } + else: + print_verbose(f"chunk: {chunk} (Type: {type(chunk)})") + raise ValueError( + f"Unable to parse response. Original response: {chunk}" + ) + results = parsed_response.get("results", []) + if len(results) > 0: + text = results[0].get("generated_text", "") + finish_reason = results[0].get("stop_reason") + is_finished = finish_reason != "not_finished" + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + "prompt_tokens": results[0].get("input_token_count", 0), + "completion_tokens": results[0].get("generated_token_count", 0), + } + return {"text": "", "is_finished": False} + except Exception as e: + raise e + + def handle_triton_stream(self, chunk): + try: + if isinstance(chunk, dict): + parsed_response = chunk + elif isinstance(chunk, (str, bytes)): + if isinstance(chunk, bytes): + chunk = chunk.decode("utf-8") + if "text_output" in chunk: + response = chunk.replace("data: ", "").strip() + parsed_response = json.loads(response) + else: + return { + "text": "", + "is_finished": False, + "prompt_tokens": 0, + "completion_tokens": 0, + } + else: + print_verbose(f"chunk: {chunk} (Type: {type(chunk)})") + raise ValueError( + f"Unable to parse response. Original response: {chunk}" + ) + text = parsed_response.get("text_output", "") + finish_reason = parsed_response.get("stop_reason") + is_finished = parsed_response.get("is_finished", False) + return { + "text": text, + "is_finished": is_finished, + "finish_reason": finish_reason, + "prompt_tokens": parsed_response.get("input_token_count", 0), + "completion_tokens": parsed_response.get("generated_token_count", 0), + } + return {"text": "", "is_finished": False} + except Exception as e: + raise e + + def handle_clarifai_completion_chunk(self, chunk): + try: + if isinstance(chunk, dict): + parsed_response = chunk + elif isinstance(chunk, (str, bytes)): + if isinstance(chunk, bytes): + parsed_response = chunk.decode("utf-8") + else: + parsed_response = chunk + else: + raise ValueError("Unable to parse streaming chunk") + if isinstance(parsed_response, dict): + data_json = parsed_response + else: + data_json = json.loads(parsed_response) + text = ( + data_json.get("outputs", "")[0] + .get("data", "") + .get("text", "") + .get("raw", "") + ) + len( + encoding.encode( + data_json.get("outputs", "")[0] + .get("input", "") + .get("data", "") + .get("text", "") + .get("raw", "") + ) + ) + len(encoding.encode(text)) + return { + "text": text, + "is_finished": True, + } + except Exception as e: + verbose_logger.exception( + "litellm.CustomStreamWrapper.handle_clarifai_chunk(): Exception occured - {}".format( + str(e) + ) + ) + return "" + + def model_response_creator( + self, chunk: Optional[dict] = None, hidden_params: Optional[dict] = None + ): + _model = self.model + _received_llm_provider = self.custom_llm_provider + _logging_obj_llm_provider = self.logging_obj.model_call_details.get("custom_llm_provider", None) # type: ignore + if ( + _received_llm_provider == "openai" + and _received_llm_provider != _logging_obj_llm_provider + ): + _model = "{}/{}".format(_logging_obj_llm_provider, _model) + if chunk is None: + chunk = {} + else: + # pop model keyword + chunk.pop("model", None) + + model_response = ModelResponse( + stream=True, model=_model, stream_options=self.stream_options, **chunk + ) + if self.response_id is not None: + model_response.id = self.response_id + else: + self.response_id = model_response.id # type: ignore + if self.system_fingerprint is not None: + model_response.system_fingerprint = self.system_fingerprint + if hidden_params is not None: + model_response._hidden_params = hidden_params + model_response._hidden_params["custom_llm_provider"] = _logging_obj_llm_provider + model_response._hidden_params["created_at"] = time.time() + model_response._hidden_params = { + **model_response._hidden_params, + **self._hidden_params, + } + + if ( + len(model_response.choices) > 0 + and getattr(model_response.choices[0], "delta") is not None + ): + # do nothing, if object instantiated + pass + else: + model_response.choices = [StreamingChoices(finish_reason=None)] + return model_response + + def is_delta_empty(self, delta: Delta) -> bool: + is_empty = True + if delta.content is not None: + is_empty = False + elif delta.tool_calls is not None: + is_empty = False + elif delta.function_call is not None: + is_empty = False + return is_empty + + def return_processed_chunk_logic( # noqa + self, + completion_obj: dict, + model_response: ModelResponseStream, + response_obj: dict, + ): + + print_verbose( + f"completion_obj: {completion_obj}, model_response.choices[0]: {model_response.choices[0]}, response_obj: {response_obj}" + ) + if ( + "content" in completion_obj + and ( + isinstance(completion_obj["content"], str) + and len(completion_obj["content"]) > 0 + ) + or ( + "tool_calls" in completion_obj + and completion_obj["tool_calls"] is not None + and len(completion_obj["tool_calls"]) > 0 + ) + or ( + "function_call" in completion_obj + and completion_obj["function_call"] is not None + ) + ): # cannot set content of an OpenAI Object to be an empty string + self.safety_checker() + hold, model_response_str = self.check_special_tokens( + chunk=completion_obj["content"], + finish_reason=model_response.choices[0].finish_reason, + ) # filter out bos/eos tokens from openai-compatible hf endpoints + print_verbose(f"hold - {hold}, model_response_str - {model_response_str}") + if hold is False: + ## check if openai/azure chunk + original_chunk = response_obj.get("original_chunk", None) + if original_chunk: + model_response.id = original_chunk.id + self.response_id = original_chunk.id + if len(original_chunk.choices) > 0: + choices = [] + for choice in original_chunk.choices: + try: + if isinstance(choice, BaseModel): + choice_json = choice.model_dump() + choice_json.pop( + "finish_reason", None + ) # for mistral etc. which return a value in their last chunk (not-openai compatible). + print_verbose(f"choice_json: {choice_json}") + choices.append(StreamingChoices(**choice_json)) + except Exception: + choices.append(StreamingChoices()) + print_verbose(f"choices in streaming: {choices}") + setattr(model_response, "choices", choices) + else: + return + model_response.system_fingerprint = ( + original_chunk.system_fingerprint + ) + setattr( + model_response, + "citations", + getattr(original_chunk, "citations", None), + ) + print_verbose(f"self.sent_first_chunk: {self.sent_first_chunk}") + if self.sent_first_chunk is False: + model_response.choices[0].delta["role"] = "assistant" + self.sent_first_chunk = True + elif self.sent_first_chunk is True and hasattr( + model_response.choices[0].delta, "role" + ): + _initial_delta = model_response.choices[0].delta.model_dump() + _initial_delta.pop("role", None) + model_response.choices[0].delta = Delta(**_initial_delta) + print_verbose( + f"model_response.choices[0].delta: {model_response.choices[0].delta}" + ) + else: + ## else + completion_obj["content"] = model_response_str + if self.sent_first_chunk is False: + completion_obj["role"] = "assistant" + self.sent_first_chunk = True + + model_response.choices[0].delta = Delta(**completion_obj) + _index: Optional[int] = completion_obj.get("index") + if _index is not None: + model_response.choices[0].index = _index + print_verbose(f"returning model_response: {model_response}") + return model_response + else: + return + elif self.received_finish_reason is not None: + if self.sent_last_chunk is True: + # Bedrock returns the guardrail trace in the last chunk - we want to return this here + if self.custom_llm_provider == "bedrock" and "trace" in model_response: + return model_response + + # Default - return StopIteration + raise StopIteration + # flush any remaining holding chunk + if len(self.holding_chunk) > 0: + if model_response.choices[0].delta.content is None: + model_response.choices[0].delta.content = self.holding_chunk + else: + model_response.choices[0].delta.content = ( + self.holding_chunk + model_response.choices[0].delta.content + ) + self.holding_chunk = "" + # if delta is None + _is_delta_empty = self.is_delta_empty(delta=model_response.choices[0].delta) + + if _is_delta_empty: + # get any function call arguments + model_response.choices[0].finish_reason = map_finish_reason( + finish_reason=self.received_finish_reason + ) # ensure consistent output to openai + + self.sent_last_chunk = True + + return model_response + elif ( + model_response.choices[0].delta.tool_calls is not None + or model_response.choices[0].delta.function_call is not None + ): + if self.sent_first_chunk is False: + model_response.choices[0].delta["role"] = "assistant" + self.sent_first_chunk = True + return model_response + elif ( + len(model_response.choices) > 0 + and hasattr(model_response.choices[0].delta, "audio") + and model_response.choices[0].delta.audio is not None + ): + return model_response + else: + if hasattr(model_response, "usage"): + self.chunks.append(model_response) + return + + def chunk_creator(self, chunk): # type: ignore # noqa: PLR0915 + model_response = self.model_response_creator() + response_obj: dict = {} + try: + # return this for all models + completion_obj = {"content": ""} + from litellm.types.utils import GenericStreamingChunk as GChunk + + if ( + isinstance(chunk, dict) + and generic_chunk_has_all_required_fields( + chunk=chunk + ) # check if chunk is a generic streaming chunk + ) or ( + self.custom_llm_provider + and ( + self.custom_llm_provider == "anthropic" + or self.custom_llm_provider in litellm._custom_providers + ) + ): + + if self.received_finish_reason is not None: + if "provider_specific_fields" not in chunk: + raise StopIteration + anthropic_response_obj: GChunk = chunk + completion_obj["content"] = anthropic_response_obj["text"] + if anthropic_response_obj["is_finished"]: + self.received_finish_reason = anthropic_response_obj[ + "finish_reason" + ] + + if anthropic_response_obj["finish_reason"]: + self.intermittent_finish_reason = anthropic_response_obj[ + "finish_reason" + ] + + if anthropic_response_obj["usage"] is not None: + model_response.usage = litellm.Usage( + **anthropic_response_obj["usage"] + ) + + if ( + "tool_use" in anthropic_response_obj + and anthropic_response_obj["tool_use"] is not None + ): + completion_obj["tool_calls"] = [anthropic_response_obj["tool_use"]] + + if ( + "provider_specific_fields" in anthropic_response_obj + and anthropic_response_obj["provider_specific_fields"] is not None + ): + for key, value in anthropic_response_obj[ + "provider_specific_fields" + ].items(): + setattr(model_response, key, value) + + response_obj = anthropic_response_obj + elif ( + self.custom_llm_provider + and self.custom_llm_provider == "anthropic_text" + ): + response_obj = self.handle_anthropic_text_chunk(chunk) + completion_obj["content"] = response_obj["text"] + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + elif self.custom_llm_provider and self.custom_llm_provider == "clarifai": + response_obj = self.handle_clarifai_completion_chunk(chunk) + completion_obj["content"] = response_obj["text"] + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + elif self.model == "replicate" or self.custom_llm_provider == "replicate": + response_obj = self.handle_replicate_chunk(chunk) + completion_obj["content"] = response_obj["text"] + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + elif self.custom_llm_provider and self.custom_llm_provider == "huggingface": + response_obj = self.handle_huggingface_chunk(chunk) + completion_obj["content"] = response_obj["text"] + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + elif self.custom_llm_provider and self.custom_llm_provider == "predibase": + response_obj = self.handle_predibase_chunk(chunk) + completion_obj["content"] = response_obj["text"] + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + elif ( + self.custom_llm_provider and self.custom_llm_provider == "baseten" + ): # baseten doesn't provide streaming + completion_obj["content"] = self.handle_baseten_chunk(chunk) + elif ( + self.custom_llm_provider and self.custom_llm_provider == "ai21" + ): # ai21 doesn't provide streaming + response_obj = self.handle_ai21_chunk(chunk) + completion_obj["content"] = response_obj["text"] + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + elif self.custom_llm_provider and self.custom_llm_provider == "maritalk": + response_obj = self.handle_maritalk_chunk(chunk) + completion_obj["content"] = response_obj["text"] + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + elif self.custom_llm_provider and self.custom_llm_provider == "vllm": + completion_obj["content"] = chunk[0].outputs[0].text + elif ( + self.custom_llm_provider and self.custom_llm_provider == "aleph_alpha" + ): # aleph alpha doesn't provide streaming + response_obj = self.handle_aleph_alpha_chunk(chunk) + completion_obj["content"] = response_obj["text"] + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + elif self.custom_llm_provider == "nlp_cloud": + try: + response_obj = self.handle_nlp_cloud_chunk(chunk) + completion_obj["content"] = response_obj["text"] + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + except Exception as e: + if self.received_finish_reason: + raise e + else: + if self.sent_first_chunk is False: + raise Exception("An unknown error occurred with the stream") + self.received_finish_reason = "stop" + elif self.custom_llm_provider == "vertex_ai": + import proto # type: ignore + + if hasattr(chunk, "candidates") is True: + try: + try: + completion_obj["content"] = chunk.text + except Exception as e: + if "Part has no text." in str(e): + ## check for function calling + function_call = ( + chunk.candidates[0].content.parts[0].function_call + ) + + args_dict = {} + + # Check if it's a RepeatedComposite instance + for key, val in function_call.args.items(): + if isinstance( + val, + proto.marshal.collections.repeated.RepeatedComposite, + ): + # If so, convert to list + args_dict[key] = [v for v in val] + else: + args_dict[key] = val + + try: + args_str = json.dumps(args_dict) + except Exception as e: + raise e + _delta_obj = litellm.utils.Delta( + content=None, + tool_calls=[ + { + "id": f"call_{str(uuid.uuid4())}", + "function": { + "arguments": args_str, + "name": function_call.name, + }, + "type": "function", + } + ], + ) + _streaming_response = StreamingChoices(delta=_delta_obj) + _model_response = ModelResponse(stream=True) + _model_response.choices = [_streaming_response] + response_obj = {"original_chunk": _model_response} + else: + raise e + if ( + hasattr(chunk.candidates[0], "finish_reason") + and chunk.candidates[0].finish_reason.name + != "FINISH_REASON_UNSPECIFIED" + ): # every non-final chunk in vertex ai has this + self.received_finish_reason = chunk.candidates[ + 0 + ].finish_reason.name + except Exception: + if chunk.candidates[0].finish_reason.name == "SAFETY": + raise Exception( + f"The response was blocked by VertexAI. {str(chunk)}" + ) + else: + completion_obj["content"] = str(chunk) + elif self.custom_llm_provider == "cohere": + response_obj = self.handle_cohere_chunk(chunk) + completion_obj["content"] = response_obj["text"] + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + elif self.custom_llm_provider == "cohere_chat": + response_obj = self.handle_cohere_chat_chunk(chunk) + if response_obj is None: + return + completion_obj["content"] = response_obj["text"] + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + + elif self.custom_llm_provider == "petals": + if len(self.completion_stream) == 0: + if self.received_finish_reason is not None: + raise StopIteration + else: + self.received_finish_reason = "stop" + chunk_size = 30 + new_chunk = self.completion_stream[:chunk_size] + completion_obj["content"] = new_chunk + self.completion_stream = self.completion_stream[chunk_size:] + elif self.custom_llm_provider == "palm": + # fake streaming + response_obj = {} + if len(self.completion_stream) == 0: + if self.received_finish_reason is not None: + raise StopIteration + else: + self.received_finish_reason = "stop" + chunk_size = 30 + new_chunk = self.completion_stream[:chunk_size] + completion_obj["content"] = new_chunk + self.completion_stream = self.completion_stream[chunk_size:] + elif self.custom_llm_provider == "ollama": + response_obj = self.handle_ollama_stream(chunk) + completion_obj["content"] = response_obj["text"] + print_verbose(f"completion obj content: {completion_obj['content']}") + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + elif self.custom_llm_provider == "ollama_chat": + response_obj = self.handle_ollama_chat_stream(chunk) + completion_obj["content"] = response_obj["text"] + print_verbose(f"completion obj content: {completion_obj['content']}") + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + elif self.custom_llm_provider == "cloudflare": + response_obj = self.handle_cloudlfare_stream(chunk) + completion_obj["content"] = response_obj["text"] + print_verbose(f"completion obj content: {completion_obj['content']}") + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + elif self.custom_llm_provider == "watsonx": + response_obj = self.handle_watsonx_stream(chunk) + completion_obj["content"] = response_obj["text"] + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + elif self.custom_llm_provider == "triton": + response_obj = self.handle_triton_stream(chunk) + completion_obj["content"] = response_obj["text"] + print_verbose(f"completion obj content: {completion_obj['content']}") + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + elif self.custom_llm_provider == "text-completion-openai": + response_obj = self.handle_openai_text_completion_chunk(chunk) + completion_obj["content"] = response_obj["text"] + print_verbose(f"completion obj content: {completion_obj['content']}") + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + if response_obj["usage"] is not None: + model_response.usage = litellm.Usage( + prompt_tokens=response_obj["usage"].prompt_tokens, + completion_tokens=response_obj["usage"].completion_tokens, + total_tokens=response_obj["usage"].total_tokens, + ) + elif self.custom_llm_provider == "text-completion-codestral": + response_obj = litellm.MistralTextCompletionConfig()._chunk_parser( + chunk + ) + completion_obj["content"] = response_obj["text"] + print_verbose(f"completion obj content: {completion_obj['content']}") + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + if "usage" in response_obj is not None: + model_response.usage = litellm.Usage( + prompt_tokens=response_obj["usage"].prompt_tokens, + completion_tokens=response_obj["usage"].completion_tokens, + total_tokens=response_obj["usage"].total_tokens, + ) + elif self.custom_llm_provider == "azure_text": + response_obj = self.handle_azure_text_completion_chunk(chunk) + completion_obj["content"] = response_obj["text"] + print_verbose(f"completion obj content: {completion_obj['content']}") + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + elif self.custom_llm_provider == "cached_response": + response_obj = { + "text": chunk.choices[0].delta.content, + "is_finished": True, + "finish_reason": chunk.choices[0].finish_reason, + "original_chunk": chunk, + "tool_calls": ( + chunk.choices[0].delta.tool_calls + if hasattr(chunk.choices[0].delta, "tool_calls") + else None + ), + } + + completion_obj["content"] = response_obj["text"] + if response_obj["tool_calls"] is not None: + completion_obj["tool_calls"] = response_obj["tool_calls"] + print_verbose(f"completion obj content: {completion_obj['content']}") + if hasattr(chunk, "id"): + model_response.id = chunk.id + self.response_id = chunk.id + if hasattr(chunk, "system_fingerprint"): + self.system_fingerprint = chunk.system_fingerprint + if response_obj["is_finished"]: + self.received_finish_reason = response_obj["finish_reason"] + else: # openai / azure chat model + if self.custom_llm_provider == "azure": + if hasattr(chunk, "model"): + # for azure, we need to pass the model from the orignal chunk + self.model = chunk.model + response_obj = self.handle_openai_chat_completion_chunk(chunk) + if response_obj is None: + return + completion_obj["content"] = response_obj["text"] + print_verbose(f"completion obj content: {completion_obj['content']}") + if response_obj["is_finished"]: + if response_obj["finish_reason"] == "error": + raise Exception( + "{} raised a streaming error - finish_reason: error, no content string given. Received Chunk={}".format( + self.custom_llm_provider, response_obj + ) + ) + self.received_finish_reason = response_obj["finish_reason"] + if response_obj.get("original_chunk", None) is not None: + if hasattr(response_obj["original_chunk"], "id"): + model_response.id = response_obj["original_chunk"].id + self.response_id = model_response.id + if hasattr(response_obj["original_chunk"], "system_fingerprint"): + model_response.system_fingerprint = response_obj[ + "original_chunk" + ].system_fingerprint + self.system_fingerprint = response_obj[ + "original_chunk" + ].system_fingerprint + if response_obj["logprobs"] is not None: + model_response.choices[0].logprobs = response_obj["logprobs"] + + if response_obj["usage"] is not None: + if isinstance(response_obj["usage"], dict): + model_response.usage = litellm.Usage( + prompt_tokens=response_obj["usage"].get( + "prompt_tokens", None + ) + or None, + completion_tokens=response_obj["usage"].get( + "completion_tokens", None + ) + or None, + total_tokens=response_obj["usage"].get("total_tokens", None) + or None, + ) + elif isinstance(response_obj["usage"], BaseModel): + model_response.usage = litellm.Usage( + **response_obj["usage"].model_dump() + ) + + model_response.model = self.model + print_verbose( + f"model_response finish reason 3: {self.received_finish_reason}; response_obj={response_obj}" + ) + ## FUNCTION CALL PARSING + if ( + response_obj is not None + and response_obj.get("original_chunk", None) is not None + ): # function / tool calling branch - only set for openai/azure compatible endpoints + # enter this branch when no content has been passed in response + original_chunk = response_obj.get("original_chunk", None) + model_response.id = original_chunk.id + self.response_id = original_chunk.id + if original_chunk.choices and len(original_chunk.choices) > 0: + delta = original_chunk.choices[0].delta + if delta is not None and ( + delta.function_call is not None or delta.tool_calls is not None + ): + try: + model_response.system_fingerprint = ( + original_chunk.system_fingerprint + ) + ## AZURE - check if arguments is not None + if ( + original_chunk.choices[0].delta.function_call + is not None + ): + if ( + getattr( + original_chunk.choices[0].delta.function_call, + "arguments", + ) + is None + ): + original_chunk.choices[ + 0 + ].delta.function_call.arguments = "" + elif original_chunk.choices[0].delta.tool_calls is not None: + if isinstance( + original_chunk.choices[0].delta.tool_calls, list + ): + for t in original_chunk.choices[0].delta.tool_calls: + if hasattr(t, "functions") and hasattr( + t.functions, "arguments" + ): + if ( + getattr( + t.function, + "arguments", + ) + is None + ): + t.function.arguments = "" + _json_delta = delta.model_dump() + print_verbose(f"_json_delta: {_json_delta}") + if "role" not in _json_delta or _json_delta["role"] is None: + _json_delta["role"] = ( + "assistant" # mistral's api returns role as None + ) + if "tool_calls" in _json_delta and isinstance( + _json_delta["tool_calls"], list + ): + for tool in _json_delta["tool_calls"]: + if ( + isinstance(tool, dict) + and "function" in tool + and isinstance(tool["function"], dict) + and ("type" not in tool or tool["type"] is None) + ): + # if function returned but type set to None - mistral's api returns type: None + tool["type"] = "function" + model_response.choices[0].delta = Delta(**_json_delta) + except Exception as e: + verbose_logger.exception( + "litellm.CustomStreamWrapper.chunk_creator(): Exception occured - {}".format( + str(e) + ) + ) + model_response.choices[0].delta = Delta() + elif ( + delta is not None and getattr(delta, "audio", None) is not None + ): + model_response.choices[0].delta.audio = delta.audio + else: + try: + delta = ( + dict() + if original_chunk.choices[0].delta is None + else dict(original_chunk.choices[0].delta) + ) + print_verbose(f"original delta: {delta}") + model_response.choices[0].delta = Delta(**delta) + print_verbose( + f"new delta: {model_response.choices[0].delta}" + ) + except Exception: + model_response.choices[0].delta = Delta() + else: + if ( + self.stream_options is not None + and self.stream_options["include_usage"] is True + ): + return model_response + return + print_verbose( + f"model_response.choices[0].delta: {model_response.choices[0].delta}; completion_obj: {completion_obj}" + ) + print_verbose(f"self.sent_first_chunk: {self.sent_first_chunk}") + + ## CHECK FOR TOOL USE + if "tool_calls" in completion_obj and len(completion_obj["tool_calls"]) > 0: + if self.is_function_call is True: # user passed in 'functions' param + completion_obj["function_call"] = completion_obj["tool_calls"][0][ + "function" + ] + completion_obj["tool_calls"] = None + + self.tool_call = True + + ## RETURN ARG + return self.return_processed_chunk_logic( + completion_obj=completion_obj, + model_response=model_response, # type: ignore + response_obj=response_obj, + ) + + except StopIteration: + raise StopIteration + except Exception as e: + traceback.format_exc() + e.message = str(e) + raise exception_type( + model=self.model, + custom_llm_provider=self.custom_llm_provider, + original_exception=e, + ) + + def set_logging_event_loop(self, loop): + """ + import litellm, asyncio + + loop = asyncio.get_event_loop() # 👈 gets the current event loop + + response = litellm.completion(.., stream=True) + + response.set_logging_event_loop(loop=loop) # 👈 enables async_success callbacks for sync logging + + for chunk in response: + ... + """ + self.logging_loop = loop + + def run_success_logging_and_cache_storage(self, processed_chunk, cache_hit: bool): + """ + Runs success logging in a thread and adds the response to the cache + """ + if litellm.disable_streaming_logging is True: + """ + [NOT RECOMMENDED] + Set this via `litellm.disable_streaming_logging = True`. + + Disables streaming logging. + """ + return + ## ASYNC LOGGING + # Create an event loop for the new thread + if self.logging_loop is not None: + future = asyncio.run_coroutine_threadsafe( + self.logging_obj.async_success_handler( + processed_chunk, None, None, cache_hit + ), + loop=self.logging_loop, + ) + future.result() + else: + asyncio.run( + self.logging_obj.async_success_handler( + processed_chunk, None, None, cache_hit + ) + ) + ## SYNC LOGGING + self.logging_obj.success_handler(processed_chunk, None, None, cache_hit) + + ## Sync store in cache + if self.logging_obj._llm_caching_handler is not None: + self.logging_obj._llm_caching_handler._sync_add_streaming_response_to_cache( + processed_chunk + ) + + def finish_reason_handler(self): + model_response = self.model_response_creator() + _finish_reason = self.received_finish_reason or self.intermittent_finish_reason + if _finish_reason is not None: + model_response.choices[0].finish_reason = _finish_reason + else: + model_response.choices[0].finish_reason = "stop" + + ## if tool use + if ( + model_response.choices[0].finish_reason == "stop" and self.tool_call + ): # don't overwrite for other - potential error finish reasons + model_response.choices[0].finish_reason = "tool_calls" + return model_response + + def __next__(self): # noqa: PLR0915 + cache_hit = False + if ( + self.custom_llm_provider is not None + and self.custom_llm_provider == "cached_response" + ): + cache_hit = True + try: + if self.completion_stream is None: + self.fetch_sync_stream() + while True: + if ( + isinstance(self.completion_stream, str) + or isinstance(self.completion_stream, bytes) + or isinstance(self.completion_stream, ModelResponse) + ): + chunk = self.completion_stream + else: + chunk = next(self.completion_stream) + if chunk is not None and chunk != b"": + print_verbose( + f"PROCESSED CHUNK PRE CHUNK CREATOR: {chunk}; custom_llm_provider: {self.custom_llm_provider}" + ) + response: Optional[ModelResponse] = self.chunk_creator(chunk=chunk) + print_verbose(f"PROCESSED CHUNK POST CHUNK CREATOR: {response}") + + if response is None: + continue + ## LOGGING + threading.Thread( + target=self.run_success_logging_and_cache_storage, + args=(response, cache_hit), + ).start() # log response + choice = response.choices[0] + if isinstance(choice, StreamingChoices): + self.response_uptil_now += choice.delta.get("content", "") or "" + else: + self.response_uptil_now += "" + self.rules.post_call_rules( + input=self.response_uptil_now, model=self.model + ) + # HANDLE STREAM OPTIONS + self.chunks.append(response) + if hasattr( + response, "usage" + ): # remove usage from chunk, only send on final chunk + # Convert the object to a dictionary + obj_dict = response.dict() + + # Remove an attribute (e.g., 'attr2') + if "usage" in obj_dict: + del obj_dict["usage"] + + # Create a new object without the removed attribute + response = self.model_response_creator( + chunk=obj_dict, hidden_params=response._hidden_params + ) + # add usage as hidden param + if self.sent_last_chunk is True and self.stream_options is None: + usage = calculate_total_usage(chunks=self.chunks) + response._hidden_params["usage"] = usage + # RETURN RESULT + return response + + except StopIteration: + if self.sent_last_chunk is True: + complete_streaming_response = litellm.stream_chunk_builder( + chunks=self.chunks, messages=self.messages + ) + response = self.model_response_creator() + if complete_streaming_response is not None: + setattr( + response, + "usage", + getattr(complete_streaming_response, "usage"), + ) + + ## LOGGING + threading.Thread( + target=self.logging_obj.success_handler, + args=(response, None, None, cache_hit), + ).start() # log response + + if self.sent_stream_usage is False and self.send_stream_usage is True: + self.sent_stream_usage = True + return response + raise # Re-raise StopIteration + else: + self.sent_last_chunk = True + processed_chunk = self.finish_reason_handler() + if self.stream_options is None: # add usage as hidden param + usage = calculate_total_usage(chunks=self.chunks) + processed_chunk._hidden_params["usage"] = usage + ## LOGGING + threading.Thread( + target=self.run_success_logging_and_cache_storage, + args=(processed_chunk, cache_hit), + ).start() # log response + return processed_chunk + except Exception as e: + traceback_exception = traceback.format_exc() + # LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated + threading.Thread( + target=self.logging_obj.failure_handler, args=(e, traceback_exception) + ).start() + if isinstance(e, OpenAIError): + raise e + else: + raise exception_type( + model=self.model, + original_exception=e, + custom_llm_provider=self.custom_llm_provider, + ) + + def fetch_sync_stream(self): + if self.completion_stream is None and self.make_call is not None: + # Call make_call to get the completion stream + self.completion_stream = self.make_call(client=litellm.module_level_client) + self._stream_iter = self.completion_stream.__iter__() + + return self.completion_stream + + async def fetch_stream(self): + if self.completion_stream is None and self.make_call is not None: + # Call make_call to get the completion stream + self.completion_stream = await self.make_call( + client=litellm.module_level_aclient + ) + self._stream_iter = self.completion_stream.__aiter__() + + return self.completion_stream + + async def __anext__(self): # noqa: PLR0915 + cache_hit = False + if ( + self.custom_llm_provider is not None + and self.custom_llm_provider == "cached_response" + ): + cache_hit = True + try: + if self.completion_stream is None: + await self.fetch_stream() + + if ( + self.custom_llm_provider == "openai" + or self.custom_llm_provider == "azure" + or self.custom_llm_provider == "custom_openai" + or self.custom_llm_provider == "text-completion-openai" + or self.custom_llm_provider == "text-completion-codestral" + or self.custom_llm_provider == "azure_text" + or self.custom_llm_provider == "anthropic" + or self.custom_llm_provider == "anthropic_text" + or self.custom_llm_provider == "huggingface" + or self.custom_llm_provider == "ollama" + or self.custom_llm_provider == "ollama_chat" + or self.custom_llm_provider == "vertex_ai" + or self.custom_llm_provider == "vertex_ai_beta" + or self.custom_llm_provider == "sagemaker" + or self.custom_llm_provider == "sagemaker_chat" + or self.custom_llm_provider == "gemini" + or self.custom_llm_provider == "replicate" + or self.custom_llm_provider == "cached_response" + or self.custom_llm_provider == "predibase" + or self.custom_llm_provider == "databricks" + or self.custom_llm_provider == "bedrock" + or self.custom_llm_provider == "triton" + or self.custom_llm_provider == "watsonx" + or self.custom_llm_provider in litellm.openai_compatible_endpoints + or self.custom_llm_provider in litellm._custom_providers + ): + async for chunk in self.completion_stream: + if chunk == "None" or chunk is None: + raise Exception + elif ( + self.custom_llm_provider == "gemini" + and hasattr(chunk, "parts") + and len(chunk.parts) == 0 + ): + continue + # chunk_creator() does logging/stream chunk building. We need to let it know its being called in_async_func, so we don't double add chunks. + # __anext__ also calls async_success_handler, which does logging + print_verbose(f"PROCESSED ASYNC CHUNK PRE CHUNK CREATOR: {chunk}") + + processed_chunk: Optional[ModelResponse] = self.chunk_creator( + chunk=chunk + ) + print_verbose( + f"PROCESSED ASYNC CHUNK POST CHUNK CREATOR: {processed_chunk}" + ) + if processed_chunk is None: + continue + ## LOGGING + ## LOGGING + executor.submit( + self.logging_obj.success_handler, + result=processed_chunk, + start_time=None, + end_time=None, + cache_hit=cache_hit, + ) + + asyncio.create_task( + self.logging_obj.async_success_handler( + processed_chunk, cache_hit=cache_hit + ) + ) + + if self.logging_obj._llm_caching_handler is not None: + asyncio.create_task( + self.logging_obj._llm_caching_handler._add_streaming_response_to_cache( + processed_chunk=processed_chunk, + ) + ) + + choice = processed_chunk.choices[0] + if isinstance(choice, StreamingChoices): + self.response_uptil_now += choice.delta.get("content", "") or "" + else: + self.response_uptil_now += "" + self.rules.post_call_rules( + input=self.response_uptil_now, model=self.model + ) + self.chunks.append(processed_chunk) + if hasattr( + processed_chunk, "usage" + ): # remove usage from chunk, only send on final chunk + # Convert the object to a dictionary + obj_dict = processed_chunk.dict() + + # Remove an attribute (e.g., 'attr2') + if "usage" in obj_dict: + del obj_dict["usage"] + + # Create a new object without the removed attribute + processed_chunk = self.model_response_creator(chunk=obj_dict) + print_verbose(f"final returned processed chunk: {processed_chunk}") + return processed_chunk + raise StopAsyncIteration + else: # temporary patch for non-aiohttp async calls + # example - boto3 bedrock llms + while True: + if isinstance(self.completion_stream, str) or isinstance( + self.completion_stream, bytes + ): + chunk = self.completion_stream + else: + chunk = next(self.completion_stream) + if chunk is not None and chunk != b"": + print_verbose(f"PROCESSED CHUNK PRE CHUNK CREATOR: {chunk}") + processed_chunk: Optional[ModelResponse] = self.chunk_creator( + chunk=chunk + ) + print_verbose( + f"PROCESSED CHUNK POST CHUNK CREATOR: {processed_chunk}" + ) + if processed_chunk is None: + continue + ## LOGGING + threading.Thread( + target=self.logging_obj.success_handler, + args=(processed_chunk, None, None, cache_hit), + ).start() # log processed_chunk + asyncio.create_task( + self.logging_obj.async_success_handler( + processed_chunk, cache_hit=cache_hit + ) + ) + + choice = processed_chunk.choices[0] + if isinstance(choice, StreamingChoices): + self.response_uptil_now += ( + choice.delta.get("content", "") or "" + ) + else: + self.response_uptil_now += "" + self.rules.post_call_rules( + input=self.response_uptil_now, model=self.model + ) + # RETURN RESULT + self.chunks.append(processed_chunk) + return processed_chunk + except (StopAsyncIteration, StopIteration): + if self.sent_last_chunk is True: + # log the final chunk with accurate streaming values + complete_streaming_response = litellm.stream_chunk_builder( + chunks=self.chunks, messages=self.messages + ) + response = self.model_response_creator() + if complete_streaming_response is not None: + setattr( + response, + "usage", + getattr(complete_streaming_response, "usage"), + ) + ## LOGGING + threading.Thread( + target=self.logging_obj.success_handler, + args=(response, None, None, cache_hit), + ).start() # log response + asyncio.create_task( + self.logging_obj.async_success_handler( + response, cache_hit=cache_hit + ) + ) + if self.sent_stream_usage is False and self.send_stream_usage is True: + self.sent_stream_usage = True + return response + raise StopAsyncIteration # Re-raise StopIteration + else: + self.sent_last_chunk = True + processed_chunk = self.finish_reason_handler() + ## LOGGING + threading.Thread( + target=self.logging_obj.success_handler, + args=(processed_chunk, None, None, cache_hit), + ).start() # log response + asyncio.create_task( + self.logging_obj.async_success_handler( + processed_chunk, cache_hit=cache_hit + ) + ) + return processed_chunk + except httpx.TimeoutException as e: # if httpx read timeout error occues + traceback_exception = traceback.format_exc() + ## ADD DEBUG INFORMATION - E.G. LITELLM REQUEST TIMEOUT + traceback_exception += "\nLiteLLM Default Request Timeout - {}".format( + litellm.request_timeout + ) + if self.logging_obj is not None: + ## LOGGING + threading.Thread( + target=self.logging_obj.failure_handler, + args=(e, traceback_exception), + ).start() # log response + # Handle any exceptions that might occur during streaming + asyncio.create_task( + self.logging_obj.async_failure_handler(e, traceback_exception) + ) + raise e + except Exception as e: + traceback_exception = traceback.format_exc() + if self.logging_obj is not None: + ## LOGGING + threading.Thread( + target=self.logging_obj.failure_handler, + args=(e, traceback_exception), + ).start() # log response + # Handle any exceptions that might occur during streaming + asyncio.create_task( + self.logging_obj.async_failure_handler(e, traceback_exception) # type: ignore + ) + ## Map to OpenAI Exception + raise exception_type( + model=self.model, + custom_llm_provider=self.custom_llm_provider, + original_exception=e, + completion_kwargs={}, + extra_kwargs={}, + ) + + +def calculate_total_usage(chunks: List[ModelResponse]) -> Usage: + """Assume most recent usage chunk has total usage uptil then.""" + prompt_tokens: int = 0 + completion_tokens: int = 0 + for chunk in chunks: + if "usage" in chunk: + if "prompt_tokens" in chunk["usage"]: + prompt_tokens = chunk["usage"].get("prompt_tokens", 0) or 0 + if "completion_tokens" in chunk["usage"]: + completion_tokens = chunk["usage"].get("completion_tokens", 0) or 0 + + returned_usage_chunk = Usage( + prompt_tokens=prompt_tokens, + completion_tokens=completion_tokens, + total_tokens=prompt_tokens + completion_tokens, + ) + + return returned_usage_chunk + + +def generic_chunk_has_all_required_fields(chunk: dict) -> bool: + """ + Checks if the provided chunk dictionary contains all required fields for GenericStreamingChunk. + + :param chunk: The dictionary to check. + :return: True if all required fields are present, False otherwise. + """ + _all_fields = GChunk.__annotations__ + + decision = all(key in _all_fields for key in chunk) + return decision diff --git a/litellm/litellm_core_utils/streaming_utils.py b/litellm/litellm_core_utils/streaming_utils.py deleted file mode 100644 index c41b4f64c..000000000 --- a/litellm/litellm_core_utils/streaming_utils.py +++ /dev/null @@ -1,14 +0,0 @@ -from litellm.types.utils import GenericStreamingChunk as GChunk - - -def generic_chunk_has_all_required_fields(chunk: dict) -> bool: - """ - Checks if the provided chunk dictionary contains all required fields for GenericStreamingChunk. - - :param chunk: The dictionary to check. - :return: True if all required fields are present, False otherwise. - """ - _all_fields = GChunk.__annotations__ - - decision = all(key in _all_fields for key in chunk) - return decision diff --git a/litellm/llms/databricks/streaming_utils.py b/litellm/llms/databricks/streaming_utils.py index a87ab39bb..502f4a091 100644 --- a/litellm/llms/databricks/streaming_utils.py +++ b/litellm/llms/databricks/streaming_utils.py @@ -1,5 +1,5 @@ import json -from typing import Optional +from typing import List, Optional import litellm from litellm import verbose_logger @@ -10,7 +10,7 @@ from litellm.types.llms.openai import ( ChatCompletionToolCallFunctionChunk, ChatCompletionUsageBlock, ) -from litellm.types.utils import GenericStreamingChunk +from litellm.types.utils import GenericStreamingChunk, ModelResponse, Usage class ModelResponseIterator: diff --git a/litellm/proxy/management_endpoints/team_endpoints.py b/litellm/proxy/management_endpoints/team_endpoints.py index 74289c90a..8dcd0c7eb 100644 --- a/litellm/proxy/management_endpoints/team_endpoints.py +++ b/litellm/proxy/management_endpoints/team_endpoints.py @@ -1281,12 +1281,20 @@ async def list_team( where={"team_id": team.team_id} ) - returned_responses.append( - TeamListResponseObject( - **team.model_dump(), - team_memberships=_team_memberships, - keys=keys, + try: + returned_responses.append( + TeamListResponseObject( + **team.model_dump(), + team_memberships=_team_memberships, + keys=keys, + ) ) - ) + except Exception as e: + team_exception = """Invalid team object for team_id: {}. team_object={}. + Error: {} + """.format( + team.team_id, team.model_dump(), str(e) + ) + raise HTTPException(status_code=400, detail={"error": team_exception}) return returned_responses diff --git a/litellm/utils.py b/litellm/utils.py index efda579d6..f2360884c 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -35,6 +35,7 @@ import traceback import uuid from dataclasses import dataclass, field from functools import lru_cache, wraps +from importlib import resources from inspect import iscoroutine from os.path import abspath, dirname, join @@ -49,6 +50,7 @@ from httpx._utils import get_environment_proxies from openai.lib import _parsing, _pydantic from openai.types.chat.completion_create_params import ResponseFormat from pydantic import BaseModel +from tiktoken import Encoding from tokenizers import Tokenizer import litellm @@ -59,7 +61,11 @@ import litellm.litellm_core_utils.json_validation_rule from litellm.caching.caching import DualCache from litellm.caching.caching_handler import CachingHandlerResponse, LLMCachingHandler from litellm.integrations.custom_logger import CustomLogger -from litellm.litellm_core_utils.core_helpers import map_finish_reason +from litellm.litellm_core_utils.core_helpers import ( + map_finish_reason, + process_response_headers, +) +from litellm.litellm_core_utils.default_encoding import encoding from litellm.litellm_core_utils.exception_mapping_utils import ( _get_response_headers, exception_type, @@ -87,6 +93,8 @@ from litellm.litellm_core_utils.redact_messages import ( LiteLLMLoggingObject, redact_message_input_output_from_logging, ) +from litellm.litellm_core_utils.rules import Rules +from litellm.litellm_core_utils.streaming_handler import CustomStreamWrapper from litellm.litellm_core_utils.token_counter import get_modified_max_tokens from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from litellm.secret_managers.main import get_secret @@ -123,25 +131,6 @@ from litellm.types.utils import ( Usage, ) -try: - # New and recommended way to access resources - from importlib import resources - - filename = str(resources.files(litellm).joinpath("llms/tokenizers")) -except (ImportError, AttributeError): - # Old way to access resources, which setuptools deprecated some time ago - import pkg_resources # type: ignore - - filename = pkg_resources.resource_filename(__name__, "llms/tokenizers") - -os.environ["TIKTOKEN_CACHE_DIR"] = os.getenv( - "CUSTOM_TIKTOKEN_CACHE_DIR", filename -) # use local copy of tiktoken b/c of - https://github.com/BerriAI/litellm/issues/1071 -from tiktoken import Encoding - -encoding = tiktoken.get_encoding("cl100k_base") -from importlib import resources - with resources.open_text("litellm.llms.tokenizers", "anthropic_tokenizer.json") as f: json_data = json.load(f) # Convert to str (if necessary) @@ -276,56 +265,6 @@ def print_verbose( pass -####### RULES ################### - - -class Rules: - """ - Fail calls based on the input or llm api output - - Example usage: - import litellm - def my_custom_rule(input): # receives the model response - if "i don't think i can answer" in input: # trigger fallback if the model refuses to answer - return False - return True - - litellm.post_call_rules = [my_custom_rule] # have these be functions that can be called to fail a call - - response = litellm.completion(model="gpt-3.5-turbo", messages=[{"role": "user", - "content": "Hey, how's it going?"}], fallbacks=["openrouter/mythomax"]) - """ - - def __init__(self) -> None: - pass - - def pre_call_rules(self, input: str, model: str): - for rule in litellm.pre_call_rules: - if callable(rule): - decision = rule(input) - if decision is False: - raise litellm.APIResponseValidationError(message="LLM Response failed post-call-rule check", llm_provider="", model=model) # type: ignore - return True - - def post_call_rules(self, input: Optional[str], model: str) -> bool: - if input is None: - return True - for rule in litellm.post_call_rules: - if callable(rule): - decision = rule(input) - if isinstance(decision, bool): - if decision is False: - raise litellm.APIResponseValidationError(message="LLM Response failed post-call-rule check", llm_provider="", model=model) # type: ignore - elif isinstance(decision, dict): - decision_val = decision.get("decision", True) - decision_message = decision.get( - "message", "LLM Response failed post-call-rule check" - ) - if decision_val is False: - raise litellm.APIResponseValidationError(message=decision_message, llm_provider="", model=model) # type: ignore - return True - - ####### CLIENT ################### # make it easy to log if completion/embedding runs succeeded or failed + see what happened | Non-Blocking def custom_llm_setup(): @@ -5568,2042 +5507,2025 @@ def get_model_list(): # wraps the completion stream to return the correct format for the model # replicate/anthropic/cohere - -def calculate_total_usage(chunks: List[ModelResponse]) -> Usage: - """Assume most recent usage chunk has total usage uptil then.""" - prompt_tokens: int = 0 - completion_tokens: int = 0 - for chunk in chunks: - if "usage" in chunk: - if "prompt_tokens" in chunk["usage"]: - prompt_tokens = chunk["usage"].get("prompt_tokens", 0) or 0 - if "completion_tokens" in chunk["usage"]: - completion_tokens = chunk["usage"].get("completion_tokens", 0) or 0 - - returned_usage_chunk = Usage( - prompt_tokens=prompt_tokens, - completion_tokens=completion_tokens, - total_tokens=prompt_tokens + completion_tokens, - ) - - return returned_usage_chunk - - -class CustomStreamWrapper: - def __init__( - self, - completion_stream, - model, - logging_obj: Any, - custom_llm_provider: Optional[str] = None, - stream_options=None, - make_call: Optional[Callable] = None, - _response_headers: Optional[dict] = None, - ): - self.model = model - self.make_call = make_call - self.custom_llm_provider = custom_llm_provider - self.logging_obj: LiteLLMLoggingObject = logging_obj - self.completion_stream = completion_stream - self.sent_first_chunk = False - self.sent_last_chunk = False - self.system_fingerprint: Optional[str] = None - self.received_finish_reason: Optional[str] = None - self.special_tokens = [ - "<|assistant|>", - "<|system|>", - "<|user|>", - "", - "", - "<|im_end|>", - "<|im_start|>", - ] - self.holding_chunk = "" - self.complete_response = "" - self.response_uptil_now = "" - _model_info = ( - self.logging_obj.model_call_details.get("litellm_params", {}).get( - "model_info", {} - ) - or {} - ) - self._hidden_params = { - "model_id": (_model_info.get("id", None)), - } # returned as x-litellm-model-id response header in proxy - - self._hidden_params["additional_headers"] = process_response_headers( - _response_headers or {} - ) # GUARANTEE OPENAI HEADERS IN RESPONSE - - self._response_headers = _response_headers - self.response_id = None - self.logging_loop = None - self.rules = Rules() - self.stream_options = stream_options or getattr( - logging_obj, "stream_options", None - ) - self.messages = getattr(logging_obj, "messages", None) - self.sent_stream_usage = False - self.send_stream_usage = ( - True if self.check_send_stream_usage(self.stream_options) else False - ) - self.tool_call = False - self.chunks: List = ( - [] - ) # keep track of the returned chunks - used for calculating the input/output tokens for stream options - self.is_function_call = self.check_is_function_call(logging_obj=logging_obj) - - def __iter__(self): - return self - - def __aiter__(self): - return self - - def check_send_stream_usage(self, stream_options: Optional[dict]): - return ( - stream_options is not None - and stream_options.get("include_usage", False) is True - ) - - def check_is_function_call(self, logging_obj) -> bool: - if hasattr(logging_obj, "optional_params") and isinstance( - logging_obj.optional_params, dict - ): - if ( - "litellm_param_is_function_call" in logging_obj.optional_params - and logging_obj.optional_params["litellm_param_is_function_call"] - is True - ): - return True - - return False - - def process_chunk(self, chunk: str): - """ - NLP Cloud streaming returns the entire response, for each chunk. Process this, to only return the delta. - """ - try: - chunk = chunk.strip() - self.complete_response = self.complete_response.strip() - - if chunk.startswith(self.complete_response): - # Remove last_sent_chunk only if it appears at the start of the new chunk - chunk = chunk[len(self.complete_response) :] - - self.complete_response += chunk - return chunk - except Exception as e: - raise e - - def safety_checker(self) -> None: - """ - Fixes - https://github.com/BerriAI/litellm/issues/5158 - - if the model enters a loop and starts repeating the same chunk again, break out of loop and raise an internalservererror - allows for retries. - - Raises - InternalServerError, if LLM enters infinite loop while streaming - """ - if len(self.chunks) >= litellm.REPEATED_STREAMING_CHUNK_LIMIT: - # Get the last n chunks - last_chunks = self.chunks[-litellm.REPEATED_STREAMING_CHUNK_LIMIT :] - - # Extract the relevant content from the chunks - last_contents = [chunk.choices[0].delta.content for chunk in last_chunks] - - # Check if all extracted contents are identical - if all(content == last_contents[0] for content in last_contents): - if ( - last_contents[0] is not None - and isinstance(last_contents[0], str) - and len(last_contents[0]) > 2 - ): # ignore empty content - https://github.com/BerriAI/litellm/issues/5158#issuecomment-2287156946 - # All last n chunks are identical - raise litellm.InternalServerError( - message="The model is repeating the same chunk = {}.".format( - last_contents[0] - ), - model="", - llm_provider="", - ) - - def check_special_tokens(self, chunk: str, finish_reason: Optional[str]): - """ - Output parse / special tokens for sagemaker + hf streaming. - """ - hold = False - if ( - self.custom_llm_provider != "huggingface" - and self.custom_llm_provider != "sagemaker" - ): - return hold, chunk - - if finish_reason: - for token in self.special_tokens: - if token in chunk: - chunk = chunk.replace(token, "") - return hold, chunk - - if self.sent_first_chunk is True: - return hold, chunk - - curr_chunk = self.holding_chunk + chunk - curr_chunk = curr_chunk.strip() - - for token in self.special_tokens: - if len(curr_chunk) < len(token) and curr_chunk in token: - hold = True - self.holding_chunk = curr_chunk - elif len(curr_chunk) >= len(token): - if token in curr_chunk: - self.holding_chunk = curr_chunk.replace(token, "") - hold = True - else: - pass - - if hold is False: # reset - self.holding_chunk = "" - return hold, curr_chunk - - def handle_anthropic_text_chunk(self, chunk): - """ - For old anthropic models - claude-1, claude-2. - - Claude-3 is handled from within Anthropic.py VIA ModelResponseIterator() - """ - str_line = chunk - if isinstance(chunk, bytes): # Handle binary data - str_line = chunk.decode("utf-8") # Convert bytes to string - text = "" - is_finished = False - finish_reason = None - if str_line.startswith("data:"): - data_json = json.loads(str_line[5:]) - type_chunk = data_json.get("type", None) - if type_chunk == "completion": - text = data_json.get("completion") - finish_reason = data_json.get("stop_reason") - if finish_reason is not None: - is_finished = True - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - elif "error" in str_line: - raise ValueError(f"Unable to parse response. Original response: {str_line}") - else: - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - - def handle_vertexai_anthropic_chunk(self, chunk): - """ - - MessageStartEvent(message=Message(id='msg_01LeRRgvX4gwkX3ryBVgtuYZ', content=[], model='claude-3-sonnet-20240229', role='assistant', stop_reason=None, stop_sequence=None, type='message', usage=Usage(input_tokens=8, output_tokens=1)), type='message_start'); custom_llm_provider: vertex_ai - - ContentBlockStartEvent(content_block=ContentBlock(text='', type='text'), index=0, type='content_block_start'); custom_llm_provider: vertex_ai - - ContentBlockDeltaEvent(delta=TextDelta(text='Hello', type='text_delta'), index=0, type='content_block_delta'); custom_llm_provider: vertex_ai - """ - text = "" - prompt_tokens = None - completion_tokens = None - is_finished = False - finish_reason = None - type_chunk = getattr(chunk, "type", None) - if type_chunk == "message_start": - message = getattr(chunk, "message", None) - text = "" # lets us return a chunk with usage to user - _usage = getattr(message, "usage", None) - if _usage is not None: - prompt_tokens = getattr(_usage, "input_tokens", None) - completion_tokens = getattr(_usage, "output_tokens", None) - elif type_chunk == "content_block_delta": - """ - Anthropic content chunk - chunk = {'type': 'content_block_delta', 'index': 0, 'delta': {'type': 'text_delta', 'text': 'Hello'}} - """ - delta = getattr(chunk, "delta", None) - if delta is not None: - text = getattr(delta, "text", "") - else: - text = "" - elif type_chunk == "message_delta": - """ - Anthropic - chunk = {'type': 'message_delta', 'delta': {'stop_reason': 'max_tokens', 'stop_sequence': None}, 'usage': {'output_tokens': 10}} - """ - # TODO - get usage from this chunk, set in response - delta = getattr(chunk, "delta", None) - if delta is not None: - finish_reason = getattr(delta, "stop_reason", "stop") - is_finished = True - _usage = getattr(chunk, "usage", None) - if _usage is not None: - prompt_tokens = getattr(_usage, "input_tokens", None) - completion_tokens = getattr(_usage, "output_tokens", None) - - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - "prompt_tokens": prompt_tokens, - "completion_tokens": completion_tokens, - } - - def handle_predibase_chunk(self, chunk): - try: - if not isinstance(chunk, str): - chunk = chunk.decode( - "utf-8" - ) # DO NOT REMOVE this: This is required for HF inference API + Streaming - text = "" - is_finished = False - finish_reason = "" - print_verbose(f"chunk: {chunk}") - if chunk.startswith("data:"): - data_json = json.loads(chunk[5:]) - print_verbose(f"data json: {data_json}") - if "token" in data_json and "text" in data_json["token"]: - text = data_json["token"]["text"] - if data_json.get("details", False) and data_json["details"].get( - "finish_reason", False - ): - is_finished = True - finish_reason = data_json["details"]["finish_reason"] - elif data_json.get( - "generated_text", False - ): # if full generated text exists, then stream is complete - text = "" # don't return the final bos token - is_finished = True - finish_reason = "stop" - elif data_json.get("error", False): - raise Exception(data_json.get("error")) - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - elif "error" in chunk: - raise ValueError(chunk) - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - except Exception as e: - raise e - - def handle_huggingface_chunk(self, chunk): - try: - if not isinstance(chunk, str): - chunk = chunk.decode( - "utf-8" - ) # DO NOT REMOVE this: This is required for HF inference API + Streaming - text = "" - is_finished = False - finish_reason = "" - print_verbose(f"chunk: {chunk}") - if chunk.startswith("data:"): - data_json = json.loads(chunk[5:]) - print_verbose(f"data json: {data_json}") - if "token" in data_json and "text" in data_json["token"]: - text = data_json["token"]["text"] - if data_json.get("details", False) and data_json["details"].get( - "finish_reason", False - ): - is_finished = True - finish_reason = data_json["details"]["finish_reason"] - elif data_json.get( - "generated_text", False - ): # if full generated text exists, then stream is complete - text = "" # don't return the final bos token - is_finished = True - finish_reason = "stop" - elif data_json.get("error", False): - raise Exception(data_json.get("error")) - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - elif "error" in chunk: - raise ValueError(chunk) - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - except Exception as e: - raise e - - def handle_ai21_chunk(self, chunk): # fake streaming - chunk = chunk.decode("utf-8") - data_json = json.loads(chunk) - try: - text = data_json["completions"][0]["data"]["text"] - is_finished = True - finish_reason = "stop" - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - except Exception: - raise ValueError(f"Unable to parse response. Original response: {chunk}") - - def handle_maritalk_chunk(self, chunk): # fake streaming - chunk = chunk.decode("utf-8") - data_json = json.loads(chunk) - try: - text = data_json["answer"] - is_finished = True - finish_reason = "stop" - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - except Exception: - raise ValueError(f"Unable to parse response. Original response: {chunk}") - - def handle_nlp_cloud_chunk(self, chunk): - text = "" - is_finished = False - finish_reason = "" - try: - if "dolphin" in self.model: - chunk = self.process_chunk(chunk=chunk) - else: - data_json = json.loads(chunk) - chunk = data_json["generated_text"] - text = chunk - if "[DONE]" in text: - text = text.replace("[DONE]", "") - is_finished = True - finish_reason = "stop" - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - except Exception: - raise ValueError(f"Unable to parse response. Original response: {chunk}") - - def handle_aleph_alpha_chunk(self, chunk): - chunk = chunk.decode("utf-8") - data_json = json.loads(chunk) - try: - text = data_json["completions"][0]["completion"] - is_finished = True - finish_reason = "stop" - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - except Exception: - raise ValueError(f"Unable to parse response. Original response: {chunk}") - - def handle_cohere_chunk(self, chunk): - chunk = chunk.decode("utf-8") - data_json = json.loads(chunk) - try: - text = "" - is_finished = False - finish_reason = "" - index: Optional[int] = None - if "index" in data_json: - index = data_json.get("index") - if "text" in data_json: - text = data_json["text"] - elif "is_finished" in data_json: - is_finished = data_json["is_finished"] - finish_reason = data_json["finish_reason"] - else: - raise Exception(data_json) - return { - "index": index, - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - except Exception: - raise ValueError(f"Unable to parse response. Original response: {chunk}") - - def handle_cohere_chat_chunk(self, chunk): - chunk = chunk.decode("utf-8") - data_json = json.loads(chunk) - print_verbose(f"chunk: {chunk}") - try: - text = "" - is_finished = False - finish_reason = "" - if "text" in data_json: - text = data_json["text"] - elif "is_finished" in data_json and data_json["is_finished"] is True: - is_finished = data_json["is_finished"] - finish_reason = data_json["finish_reason"] - else: - return - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - except Exception: - raise ValueError(f"Unable to parse response. Original response: {chunk}") - - def handle_azure_chunk(self, chunk): - is_finished = False - finish_reason = "" - text = "" - print_verbose(f"chunk: {chunk}") - if "data: [DONE]" in chunk: - text = "" - is_finished = True - finish_reason = "stop" - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - elif chunk.startswith("data:"): - data_json = json.loads(chunk[5:]) # chunk.startswith("data:"): - try: - if len(data_json["choices"]) > 0: - delta = data_json["choices"][0]["delta"] - text = "" if delta is None else delta.get("content", "") - if data_json["choices"][0].get("finish_reason", None): - is_finished = True - finish_reason = data_json["choices"][0]["finish_reason"] - print_verbose( - f"text: {text}; is_finished: {is_finished}; finish_reason: {finish_reason}" - ) - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - except Exception: - raise ValueError( - f"Unable to parse response. Original response: {chunk}" - ) - elif "error" in chunk: - raise ValueError(f"Unable to parse response. Original response: {chunk}") - else: - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - - def handle_replicate_chunk(self, chunk): - try: - text = "" - is_finished = False - finish_reason = "" - if "output" in chunk: - text = chunk["output"] - if "status" in chunk: - if chunk["status"] == "succeeded": - is_finished = True - finish_reason = "stop" - elif chunk.get("error", None): - raise Exception(chunk["error"]) - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - except Exception: - raise ValueError(f"Unable to parse response. Original response: {chunk}") - - def handle_openai_chat_completion_chunk(self, chunk): - try: - print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") - str_line = chunk - text = "" - is_finished = False - finish_reason = None - logprobs = None - usage = None - if str_line and str_line.choices and len(str_line.choices) > 0: - if ( - str_line.choices[0].delta is not None - and str_line.choices[0].delta.content is not None - ): - text = str_line.choices[0].delta.content - else: # function/tool calling chunk - when content is None. in this case we just return the original chunk from openai - pass - if str_line.choices[0].finish_reason: - is_finished = True - finish_reason = str_line.choices[0].finish_reason - - # checking for logprobs - if ( - hasattr(str_line.choices[0], "logprobs") - and str_line.choices[0].logprobs is not None - ): - logprobs = str_line.choices[0].logprobs - else: - logprobs = None - - usage = getattr(str_line, "usage", None) - - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - "logprobs": logprobs, - "original_chunk": str_line, - "usage": usage, - } - except Exception as e: - raise e - - def handle_azure_text_completion_chunk(self, chunk): - try: - print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") - text = "" - is_finished = False - finish_reason = None - choices = getattr(chunk, "choices", []) - if len(choices) > 0: - text = choices[0].text - if choices[0].finish_reason is not None: - is_finished = True - finish_reason = choices[0].finish_reason - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - - except Exception as e: - raise e - - def handle_openai_text_completion_chunk(self, chunk): - try: - print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") - text = "" - is_finished = False - finish_reason = None - usage = None - choices = getattr(chunk, "choices", []) - if len(choices) > 0: - text = choices[0].text - if choices[0].finish_reason is not None: - is_finished = True - finish_reason = choices[0].finish_reason - usage = getattr(chunk, "usage", None) - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - "usage": usage, - } - - except Exception as e: - raise e - - def handle_baseten_chunk(self, chunk): - try: - chunk = chunk.decode("utf-8") - if len(chunk) > 0: - if chunk.startswith("data:"): - data_json = json.loads(chunk[5:]) - if "token" in data_json and "text" in data_json["token"]: - return data_json["token"]["text"] - else: - return "" - data_json = json.loads(chunk) - if "model_output" in data_json: - if ( - isinstance(data_json["model_output"], dict) - and "data" in data_json["model_output"] - and isinstance(data_json["model_output"]["data"], list) - ): - return data_json["model_output"]["data"][0] - elif isinstance(data_json["model_output"], str): - return data_json["model_output"] - elif "completion" in data_json and isinstance( - data_json["completion"], str - ): - return data_json["completion"] - else: - raise ValueError( - f"Unable to parse response. Original response: {chunk}" - ) - else: - return "" - else: - return "" - except Exception as e: - verbose_logger.exception( - "litellm.CustomStreamWrapper.handle_baseten_chunk(): Exception occured - {}".format( - str(e) - ) - ) - return "" - - def handle_cloudlfare_stream(self, chunk): - try: - print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") - chunk = chunk.decode("utf-8") - str_line = chunk - text = "" - is_finished = False - finish_reason = None - - if "[DONE]" in chunk: - return {"text": text, "is_finished": True, "finish_reason": "stop"} - elif str_line.startswith("data:"): - data_json = json.loads(str_line[5:]) - print_verbose(f"delta content: {data_json}") - text = data_json["response"] - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - else: - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - - except Exception as e: - raise e - - def handle_ollama_stream(self, chunk): - try: - if isinstance(chunk, dict): - json_chunk = chunk - else: - json_chunk = json.loads(chunk) - if "error" in json_chunk: - raise Exception(f"Ollama Error - {json_chunk}") - - text = "" - is_finished = False - finish_reason = None - if json_chunk["done"] is True: - text = "" - is_finished = True - finish_reason = "stop" - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - elif json_chunk["response"]: - print_verbose(f"delta content: {json_chunk}") - text = json_chunk["response"] - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - else: - raise Exception(f"Ollama Error - {json_chunk}") - except Exception as e: - raise e - - def handle_ollama_chat_stream(self, chunk): - # for ollama_chat/ provider - try: - if isinstance(chunk, dict): - json_chunk = chunk - else: - json_chunk = json.loads(chunk) - if "error" in json_chunk: - raise Exception(f"Ollama Error - {json_chunk}") - - text = "" - is_finished = False - finish_reason = None - if json_chunk["done"] is True: - text = "" - is_finished = True - finish_reason = "stop" - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - elif "message" in json_chunk: - print_verbose(f"delta content: {json_chunk}") - text = json_chunk["message"]["content"] - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - } - else: - raise Exception(f"Ollama Error - {json_chunk}") - except Exception as e: - raise e - - def handle_watsonx_stream(self, chunk): - try: - if isinstance(chunk, dict): - parsed_response = chunk - elif isinstance(chunk, (str, bytes)): - if isinstance(chunk, bytes): - chunk = chunk.decode("utf-8") - if "generated_text" in chunk: - response = chunk.replace("data: ", "").strip() - parsed_response = json.loads(response) - else: - return { - "text": "", - "is_finished": False, - "prompt_tokens": 0, - "completion_tokens": 0, - } - else: - print_verbose(f"chunk: {chunk} (Type: {type(chunk)})") - raise ValueError( - f"Unable to parse response. Original response: {chunk}" - ) - results = parsed_response.get("results", []) - if len(results) > 0: - text = results[0].get("generated_text", "") - finish_reason = results[0].get("stop_reason") - is_finished = finish_reason != "not_finished" - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - "prompt_tokens": results[0].get("input_token_count", 0), - "completion_tokens": results[0].get("generated_token_count", 0), - } - return {"text": "", "is_finished": False} - except Exception as e: - raise e - - def handle_triton_stream(self, chunk): - try: - if isinstance(chunk, dict): - parsed_response = chunk - elif isinstance(chunk, (str, bytes)): - if isinstance(chunk, bytes): - chunk = chunk.decode("utf-8") - if "text_output" in chunk: - response = chunk.replace("data: ", "").strip() - parsed_response = json.loads(response) - else: - return { - "text": "", - "is_finished": False, - "prompt_tokens": 0, - "completion_tokens": 0, - } - else: - print_verbose(f"chunk: {chunk} (Type: {type(chunk)})") - raise ValueError( - f"Unable to parse response. Original response: {chunk}" - ) - text = parsed_response.get("text_output", "") - finish_reason = parsed_response.get("stop_reason") - is_finished = parsed_response.get("is_finished", False) - return { - "text": text, - "is_finished": is_finished, - "finish_reason": finish_reason, - "prompt_tokens": parsed_response.get("input_token_count", 0), - "completion_tokens": parsed_response.get("generated_token_count", 0), - } - return {"text": "", "is_finished": False} - except Exception as e: - raise e - - def handle_clarifai_completion_chunk(self, chunk): - try: - if isinstance(chunk, dict): - parsed_response = chunk - elif isinstance(chunk, (str, bytes)): - if isinstance(chunk, bytes): - parsed_response = chunk.decode("utf-8") - else: - parsed_response = chunk - else: - raise ValueError("Unable to parse streaming chunk") - if isinstance(parsed_response, dict): - data_json = parsed_response - else: - data_json = json.loads(parsed_response) - text = ( - data_json.get("outputs", "")[0] - .get("data", "") - .get("text", "") - .get("raw", "") - ) - len( - encoding.encode( - data_json.get("outputs", "")[0] - .get("input", "") - .get("data", "") - .get("text", "") - .get("raw", "") - ) - ) - len(encoding.encode(text)) - return { - "text": text, - "is_finished": True, - } - except Exception as e: - verbose_logger.exception( - "litellm.CustomStreamWrapper.handle_clarifai_chunk(): Exception occured - {}".format( - str(e) - ) - ) - return "" - - def model_response_creator( - self, chunk: Optional[dict] = None, hidden_params: Optional[dict] = None - ): - _model = self.model - _received_llm_provider = self.custom_llm_provider - _logging_obj_llm_provider = self.logging_obj.model_call_details.get("custom_llm_provider", None) # type: ignore - if ( - _received_llm_provider == "openai" - and _received_llm_provider != _logging_obj_llm_provider - ): - _model = "{}/{}".format(_logging_obj_llm_provider, _model) - if chunk is None: - chunk = {} - else: - # pop model keyword - chunk.pop("model", None) - - model_response = ModelResponse( - stream=True, model=_model, stream_options=self.stream_options, **chunk - ) - if self.response_id is not None: - model_response.id = self.response_id - else: - self.response_id = model_response.id # type: ignore - if self.system_fingerprint is not None: - model_response.system_fingerprint = self.system_fingerprint - if hidden_params is not None: - model_response._hidden_params = hidden_params - model_response._hidden_params["custom_llm_provider"] = _logging_obj_llm_provider - model_response._hidden_params["created_at"] = time.time() - model_response._hidden_params = { - **model_response._hidden_params, - **self._hidden_params, - } - - if ( - len(model_response.choices) > 0 - and getattr(model_response.choices[0], "delta") is not None - ): - # do nothing, if object instantiated - pass - else: - model_response.choices = [StreamingChoices(finish_reason=None)] - return model_response - - def is_delta_empty(self, delta: Delta) -> bool: - is_empty = True - if delta.content is not None: - is_empty = False - elif delta.tool_calls is not None: - is_empty = False - elif delta.function_call is not None: - is_empty = False - return is_empty - - def return_processed_chunk_logic( # noqa - self, - completion_obj: dict, - model_response: ModelResponseStream, - response_obj: dict, - ): - - print_verbose( - f"completion_obj: {completion_obj}, model_response.choices[0]: {model_response.choices[0]}, response_obj: {response_obj}" - ) - if ( - "content" in completion_obj - and ( - isinstance(completion_obj["content"], str) - and len(completion_obj["content"]) > 0 - ) - or ( - "tool_calls" in completion_obj - and completion_obj["tool_calls"] is not None - and len(completion_obj["tool_calls"]) > 0 - ) - or ( - "function_call" in completion_obj - and completion_obj["function_call"] is not None - ) - ): # cannot set content of an OpenAI Object to be an empty string - self.safety_checker() - hold, model_response_str = self.check_special_tokens( - chunk=completion_obj["content"], - finish_reason=model_response.choices[0].finish_reason, - ) # filter out bos/eos tokens from openai-compatible hf endpoints - print_verbose(f"hold - {hold}, model_response_str - {model_response_str}") - if hold is False: - ## check if openai/azure chunk - original_chunk = response_obj.get("original_chunk", None) - if original_chunk: - model_response.id = original_chunk.id - self.response_id = original_chunk.id - if len(original_chunk.choices) > 0: - choices = [] - for choice in original_chunk.choices: - try: - if isinstance(choice, BaseModel): - choice_json = choice.model_dump() - choice_json.pop( - "finish_reason", None - ) # for mistral etc. which return a value in their last chunk (not-openai compatible). - print_verbose(f"choice_json: {choice_json}") - choices.append(StreamingChoices(**choice_json)) - except Exception: - choices.append(StreamingChoices()) - print_verbose(f"choices in streaming: {choices}") - setattr(model_response, "choices", choices) - else: - return - model_response.system_fingerprint = ( - original_chunk.system_fingerprint - ) - setattr( - model_response, - "citations", - getattr(original_chunk, "citations", None), - ) - print_verbose(f"self.sent_first_chunk: {self.sent_first_chunk}") - if self.sent_first_chunk is False: - model_response.choices[0].delta["role"] = "assistant" - self.sent_first_chunk = True - elif self.sent_first_chunk is True and hasattr( - model_response.choices[0].delta, "role" - ): - _initial_delta = model_response.choices[0].delta.model_dump() - _initial_delta.pop("role", None) - model_response.choices[0].delta = Delta(**_initial_delta) - print_verbose( - f"model_response.choices[0].delta: {model_response.choices[0].delta}" - ) - else: - ## else - completion_obj["content"] = model_response_str - if self.sent_first_chunk is False: - completion_obj["role"] = "assistant" - self.sent_first_chunk = True - - model_response.choices[0].delta = Delta(**completion_obj) - _index: Optional[int] = completion_obj.get("index") - if _index is not None: - model_response.choices[0].index = _index - print_verbose(f"returning model_response: {model_response}") - return model_response - else: - return - elif self.received_finish_reason is not None: - if self.sent_last_chunk is True: - # Bedrock returns the guardrail trace in the last chunk - we want to return this here - if self.custom_llm_provider == "bedrock" and "trace" in model_response: - return model_response - - # Default - return StopIteration - raise StopIteration - # flush any remaining holding chunk - if len(self.holding_chunk) > 0: - if model_response.choices[0].delta.content is None: - model_response.choices[0].delta.content = self.holding_chunk - else: - model_response.choices[0].delta.content = ( - self.holding_chunk + model_response.choices[0].delta.content - ) - self.holding_chunk = "" - # if delta is None - _is_delta_empty = self.is_delta_empty(delta=model_response.choices[0].delta) - - if _is_delta_empty: - # get any function call arguments - model_response.choices[0].finish_reason = map_finish_reason( - finish_reason=self.received_finish_reason - ) # ensure consistent output to openai - - self.sent_last_chunk = True - - return model_response - elif ( - model_response.choices[0].delta.tool_calls is not None - or model_response.choices[0].delta.function_call is not None - ): - if self.sent_first_chunk is False: - model_response.choices[0].delta["role"] = "assistant" - self.sent_first_chunk = True - return model_response - elif ( - len(model_response.choices) > 0 - and hasattr(model_response.choices[0].delta, "audio") - and model_response.choices[0].delta.audio is not None - ): - return model_response - else: - if hasattr(model_response, "usage"): - self.chunks.append(model_response) - return - - def chunk_creator(self, chunk): # type: ignore # noqa: PLR0915 - model_response = self.model_response_creator() - response_obj: dict = {} - try: - # return this for all models - completion_obj = {"content": ""} - from litellm.litellm_core_utils.streaming_utils import ( - generic_chunk_has_all_required_fields, - ) - from litellm.types.utils import GenericStreamingChunk as GChunk - - if ( - isinstance(chunk, dict) - and generic_chunk_has_all_required_fields( - chunk=chunk - ) # check if chunk is a generic streaming chunk - ) or ( - self.custom_llm_provider - and ( - self.custom_llm_provider == "anthropic" - or self.custom_llm_provider in litellm._custom_providers - ) - ): - - if self.received_finish_reason is not None: - if "provider_specific_fields" not in chunk: - raise StopIteration - anthropic_response_obj: GChunk = chunk - completion_obj["content"] = anthropic_response_obj["text"] - if anthropic_response_obj["is_finished"]: - self.received_finish_reason = anthropic_response_obj[ - "finish_reason" - ] - - if anthropic_response_obj["usage"] is not None: - model_response.usage = litellm.Usage( - **anthropic_response_obj["usage"] - ) - - if ( - "tool_use" in anthropic_response_obj - and anthropic_response_obj["tool_use"] is not None - ): - completion_obj["tool_calls"] = [anthropic_response_obj["tool_use"]] - - if ( - "provider_specific_fields" in anthropic_response_obj - and anthropic_response_obj["provider_specific_fields"] is not None - ): - for key, value in anthropic_response_obj[ - "provider_specific_fields" - ].items(): - setattr(model_response, key, value) - - response_obj = anthropic_response_obj - elif ( - self.custom_llm_provider - and self.custom_llm_provider == "anthropic_text" - ): - response_obj = self.handle_anthropic_text_chunk(chunk) - completion_obj["content"] = response_obj["text"] - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - elif self.custom_llm_provider and self.custom_llm_provider == "clarifai": - response_obj = self.handle_clarifai_completion_chunk(chunk) - completion_obj["content"] = response_obj["text"] - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - elif self.model == "replicate" or self.custom_llm_provider == "replicate": - response_obj = self.handle_replicate_chunk(chunk) - completion_obj["content"] = response_obj["text"] - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - elif self.custom_llm_provider and self.custom_llm_provider == "huggingface": - response_obj = self.handle_huggingface_chunk(chunk) - completion_obj["content"] = response_obj["text"] - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - elif self.custom_llm_provider and self.custom_llm_provider == "predibase": - response_obj = self.handle_predibase_chunk(chunk) - completion_obj["content"] = response_obj["text"] - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - elif ( - self.custom_llm_provider and self.custom_llm_provider == "baseten" - ): # baseten doesn't provide streaming - completion_obj["content"] = self.handle_baseten_chunk(chunk) - elif ( - self.custom_llm_provider and self.custom_llm_provider == "ai21" - ): # ai21 doesn't provide streaming - response_obj = self.handle_ai21_chunk(chunk) - completion_obj["content"] = response_obj["text"] - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - elif self.custom_llm_provider and self.custom_llm_provider == "maritalk": - response_obj = self.handle_maritalk_chunk(chunk) - completion_obj["content"] = response_obj["text"] - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - elif self.custom_llm_provider and self.custom_llm_provider == "vllm": - completion_obj["content"] = chunk[0].outputs[0].text - elif ( - self.custom_llm_provider and self.custom_llm_provider == "aleph_alpha" - ): # aleph alpha doesn't provide streaming - response_obj = self.handle_aleph_alpha_chunk(chunk) - completion_obj["content"] = response_obj["text"] - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - elif self.custom_llm_provider == "nlp_cloud": - try: - response_obj = self.handle_nlp_cloud_chunk(chunk) - completion_obj["content"] = response_obj["text"] - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - except Exception as e: - if self.received_finish_reason: - raise e - else: - if self.sent_first_chunk is False: - raise Exception("An unknown error occurred with the stream") - self.received_finish_reason = "stop" - elif self.custom_llm_provider == "vertex_ai": - import proto # type: ignore - - if self.model.startswith("claude-3"): - response_obj = self.handle_vertexai_anthropic_chunk(chunk=chunk) - if response_obj is None: - return - completion_obj["content"] = response_obj["text"] - setattr(model_response, "usage", Usage()) - if response_obj.get("prompt_tokens", None) is not None: - model_response.usage.prompt_tokens = response_obj[ - "prompt_tokens" - ] - if response_obj.get("completion_tokens", None) is not None: - model_response.usage.completion_tokens = response_obj[ - "completion_tokens" - ] - if hasattr(model_response.usage, "prompt_tokens"): - model_response.usage.total_tokens = ( - getattr(model_response.usage, "total_tokens", 0) - + model_response.usage.prompt_tokens - ) - if hasattr(model_response.usage, "completion_tokens"): - model_response.usage.total_tokens = ( - getattr(model_response.usage, "total_tokens", 0) - + model_response.usage.completion_tokens - ) - - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - elif hasattr(chunk, "candidates") is True: - try: - try: - completion_obj["content"] = chunk.text - except Exception as e: - if "Part has no text." in str(e): - ## check for function calling - function_call = ( - chunk.candidates[0].content.parts[0].function_call - ) - - args_dict = {} - - # Check if it's a RepeatedComposite instance - for key, val in function_call.args.items(): - if isinstance( - val, - proto.marshal.collections.repeated.RepeatedComposite, - ): - # If so, convert to list - args_dict[key] = [v for v in val] - else: - args_dict[key] = val - - try: - args_str = json.dumps(args_dict) - except Exception as e: - raise e - _delta_obj = litellm.utils.Delta( - content=None, - tool_calls=[ - { - "id": f"call_{str(uuid.uuid4())}", - "function": { - "arguments": args_str, - "name": function_call.name, - }, - "type": "function", - } - ], - ) - _streaming_response = StreamingChoices(delta=_delta_obj) - _model_response = ModelResponse(stream=True) - _model_response.choices = [_streaming_response] - response_obj = {"original_chunk": _model_response} - else: - raise e - if ( - hasattr(chunk.candidates[0], "finish_reason") - and chunk.candidates[0].finish_reason.name - != "FINISH_REASON_UNSPECIFIED" - ): # every non-final chunk in vertex ai has this - self.received_finish_reason = chunk.candidates[ - 0 - ].finish_reason.name - except Exception: - if chunk.candidates[0].finish_reason.name == "SAFETY": - raise Exception( - f"The response was blocked by VertexAI. {str(chunk)}" - ) - else: - completion_obj["content"] = str(chunk) - elif self.custom_llm_provider == "cohere": - response_obj = self.handle_cohere_chunk(chunk) - completion_obj["content"] = response_obj["text"] - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - elif self.custom_llm_provider == "cohere_chat": - response_obj = self.handle_cohere_chat_chunk(chunk) - if response_obj is None: - return - completion_obj["content"] = response_obj["text"] - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - - elif self.custom_llm_provider == "petals": - if len(self.completion_stream) == 0: - if self.received_finish_reason is not None: - raise StopIteration - else: - self.received_finish_reason = "stop" - chunk_size = 30 - new_chunk = self.completion_stream[:chunk_size] - completion_obj["content"] = new_chunk - self.completion_stream = self.completion_stream[chunk_size:] - elif self.custom_llm_provider == "palm": - # fake streaming - response_obj = {} - if len(self.completion_stream) == 0: - if self.received_finish_reason is not None: - raise StopIteration - else: - self.received_finish_reason = "stop" - chunk_size = 30 - new_chunk = self.completion_stream[:chunk_size] - completion_obj["content"] = new_chunk - self.completion_stream = self.completion_stream[chunk_size:] - elif self.custom_llm_provider == "ollama": - response_obj = self.handle_ollama_stream(chunk) - completion_obj["content"] = response_obj["text"] - print_verbose(f"completion obj content: {completion_obj['content']}") - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - elif self.custom_llm_provider == "ollama_chat": - response_obj = self.handle_ollama_chat_stream(chunk) - completion_obj["content"] = response_obj["text"] - print_verbose(f"completion obj content: {completion_obj['content']}") - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - elif self.custom_llm_provider == "cloudflare": - response_obj = self.handle_cloudlfare_stream(chunk) - completion_obj["content"] = response_obj["text"] - print_verbose(f"completion obj content: {completion_obj['content']}") - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - elif self.custom_llm_provider == "watsonx": - response_obj = self.handle_watsonx_stream(chunk) - completion_obj["content"] = response_obj["text"] - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - elif self.custom_llm_provider == "triton": - response_obj = self.handle_triton_stream(chunk) - completion_obj["content"] = response_obj["text"] - print_verbose(f"completion obj content: {completion_obj['content']}") - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - elif self.custom_llm_provider == "text-completion-openai": - response_obj = self.handle_openai_text_completion_chunk(chunk) - completion_obj["content"] = response_obj["text"] - print_verbose(f"completion obj content: {completion_obj['content']}") - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - if response_obj["usage"] is not None: - model_response.usage = litellm.Usage( - prompt_tokens=response_obj["usage"].prompt_tokens, - completion_tokens=response_obj["usage"].completion_tokens, - total_tokens=response_obj["usage"].total_tokens, - ) - elif self.custom_llm_provider == "text-completion-codestral": - response_obj = litellm.MistralTextCompletionConfig()._chunk_parser( - chunk - ) - completion_obj["content"] = response_obj["text"] - print_verbose(f"completion obj content: {completion_obj['content']}") - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - if "usage" in response_obj is not None: - model_response.usage = litellm.Usage( - prompt_tokens=response_obj["usage"].prompt_tokens, - completion_tokens=response_obj["usage"].completion_tokens, - total_tokens=response_obj["usage"].total_tokens, - ) - elif self.custom_llm_provider == "azure_text": - response_obj = self.handle_azure_text_completion_chunk(chunk) - completion_obj["content"] = response_obj["text"] - print_verbose(f"completion obj content: {completion_obj['content']}") - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - elif self.custom_llm_provider == "cached_response": - response_obj = { - "text": chunk.choices[0].delta.content, - "is_finished": True, - "finish_reason": chunk.choices[0].finish_reason, - "original_chunk": chunk, - "tool_calls": ( - chunk.choices[0].delta.tool_calls - if hasattr(chunk.choices[0].delta, "tool_calls") - else None - ), - } - - completion_obj["content"] = response_obj["text"] - if response_obj["tool_calls"] is not None: - completion_obj["tool_calls"] = response_obj["tool_calls"] - print_verbose(f"completion obj content: {completion_obj['content']}") - if hasattr(chunk, "id"): - model_response.id = chunk.id - self.response_id = chunk.id - if hasattr(chunk, "system_fingerprint"): - self.system_fingerprint = chunk.system_fingerprint - if response_obj["is_finished"]: - self.received_finish_reason = response_obj["finish_reason"] - else: # openai / azure chat model - if self.custom_llm_provider == "azure": - if hasattr(chunk, "model"): - # for azure, we need to pass the model from the orignal chunk - self.model = chunk.model - response_obj = self.handle_openai_chat_completion_chunk(chunk) - if response_obj is None: - return - completion_obj["content"] = response_obj["text"] - print_verbose(f"completion obj content: {completion_obj['content']}") - if response_obj["is_finished"]: - if response_obj["finish_reason"] == "error": - raise Exception( - "{} raised a streaming error - finish_reason: error, no content string given. Received Chunk={}".format( - self.custom_llm_provider, response_obj - ) - ) - self.received_finish_reason = response_obj["finish_reason"] - if response_obj.get("original_chunk", None) is not None: - if hasattr(response_obj["original_chunk"], "id"): - model_response.id = response_obj["original_chunk"].id - self.response_id = model_response.id - if hasattr(response_obj["original_chunk"], "system_fingerprint"): - model_response.system_fingerprint = response_obj[ - "original_chunk" - ].system_fingerprint - self.system_fingerprint = response_obj[ - "original_chunk" - ].system_fingerprint - if response_obj["logprobs"] is not None: - model_response.choices[0].logprobs = response_obj["logprobs"] - - if response_obj["usage"] is not None: - if isinstance(response_obj["usage"], dict): - model_response.usage = litellm.Usage( - prompt_tokens=response_obj["usage"].get( - "prompt_tokens", None - ) - or None, - completion_tokens=response_obj["usage"].get( - "completion_tokens", None - ) - or None, - total_tokens=response_obj["usage"].get("total_tokens", None) - or None, - ) - elif isinstance(response_obj["usage"], BaseModel): - model_response.usage = litellm.Usage( - **response_obj["usage"].model_dump() - ) - - model_response.model = self.model - print_verbose( - f"model_response finish reason 3: {self.received_finish_reason}; response_obj={response_obj}" - ) - ## FUNCTION CALL PARSING - if ( - response_obj is not None - and response_obj.get("original_chunk", None) is not None - ): # function / tool calling branch - only set for openai/azure compatible endpoints - # enter this branch when no content has been passed in response - original_chunk = response_obj.get("original_chunk", None) - model_response.id = original_chunk.id - self.response_id = original_chunk.id - if original_chunk.choices and len(original_chunk.choices) > 0: - delta = original_chunk.choices[0].delta - if delta is not None and ( - delta.function_call is not None or delta.tool_calls is not None - ): - try: - model_response.system_fingerprint = ( - original_chunk.system_fingerprint - ) - ## AZURE - check if arguments is not None - if ( - original_chunk.choices[0].delta.function_call - is not None - ): - if ( - getattr( - original_chunk.choices[0].delta.function_call, - "arguments", - ) - is None - ): - original_chunk.choices[ - 0 - ].delta.function_call.arguments = "" - elif original_chunk.choices[0].delta.tool_calls is not None: - if isinstance( - original_chunk.choices[0].delta.tool_calls, list - ): - for t in original_chunk.choices[0].delta.tool_calls: - if hasattr(t, "functions") and hasattr( - t.functions, "arguments" - ): - if ( - getattr( - t.function, - "arguments", - ) - is None - ): - t.function.arguments = "" - _json_delta = delta.model_dump() - print_verbose(f"_json_delta: {_json_delta}") - if "role" not in _json_delta or _json_delta["role"] is None: - _json_delta["role"] = ( - "assistant" # mistral's api returns role as None - ) - if "tool_calls" in _json_delta and isinstance( - _json_delta["tool_calls"], list - ): - for tool in _json_delta["tool_calls"]: - if ( - isinstance(tool, dict) - and "function" in tool - and isinstance(tool["function"], dict) - and ("type" not in tool or tool["type"] is None) - ): - # if function returned but type set to None - mistral's api returns type: None - tool["type"] = "function" - model_response.choices[0].delta = Delta(**_json_delta) - except Exception as e: - verbose_logger.exception( - "litellm.CustomStreamWrapper.chunk_creator(): Exception occured - {}".format( - str(e) - ) - ) - model_response.choices[0].delta = Delta() - elif ( - delta is not None and getattr(delta, "audio", None) is not None - ): - model_response.choices[0].delta.audio = delta.audio - else: - try: - delta = ( - dict() - if original_chunk.choices[0].delta is None - else dict(original_chunk.choices[0].delta) - ) - print_verbose(f"original delta: {delta}") - model_response.choices[0].delta = Delta(**delta) - print_verbose( - f"new delta: {model_response.choices[0].delta}" - ) - except Exception: - model_response.choices[0].delta = Delta() - else: - if ( - self.stream_options is not None - and self.stream_options["include_usage"] is True - ): - return model_response - return - print_verbose( - f"model_response.choices[0].delta: {model_response.choices[0].delta}; completion_obj: {completion_obj}" - ) - print_verbose(f"self.sent_first_chunk: {self.sent_first_chunk}") - - ## CHECK FOR TOOL USE - if "tool_calls" in completion_obj and len(completion_obj["tool_calls"]) > 0: - if self.is_function_call is True: # user passed in 'functions' param - completion_obj["function_call"] = completion_obj["tool_calls"][0][ - "function" - ] - completion_obj["tool_calls"] = None - - self.tool_call = True - - ## RETURN ARG - return self.return_processed_chunk_logic( - completion_obj=completion_obj, - model_response=model_response, # type: ignore - response_obj=response_obj, - ) - - except StopIteration: - raise StopIteration - except Exception as e: - traceback.format_exc() - e.message = str(e) - raise exception_type( - model=self.model, - custom_llm_provider=self.custom_llm_provider, - original_exception=e, - ) - - def set_logging_event_loop(self, loop): - """ - import litellm, asyncio - - loop = asyncio.get_event_loop() # 👈 gets the current event loop - - response = litellm.completion(.., stream=True) - - response.set_logging_event_loop(loop=loop) # 👈 enables async_success callbacks for sync logging - - for chunk in response: - ... - """ - self.logging_loop = loop - - def run_success_logging_and_cache_storage(self, processed_chunk, cache_hit: bool): - """ - Runs success logging in a thread and adds the response to the cache - """ - if litellm.disable_streaming_logging is True: - """ - [NOT RECOMMENDED] - Set this via `litellm.disable_streaming_logging = True`. - - Disables streaming logging. - """ - return - ## ASYNC LOGGING - # Create an event loop for the new thread - if self.logging_loop is not None: - future = asyncio.run_coroutine_threadsafe( - self.logging_obj.async_success_handler( - processed_chunk, None, None, cache_hit - ), - loop=self.logging_loop, - ) - future.result() - else: - asyncio.run( - self.logging_obj.async_success_handler( - processed_chunk, None, None, cache_hit - ) - ) - ## SYNC LOGGING - self.logging_obj.success_handler(processed_chunk, None, None, cache_hit) - - ## Sync store in cache - if self.logging_obj._llm_caching_handler is not None: - self.logging_obj._llm_caching_handler._sync_add_streaming_response_to_cache( - processed_chunk - ) - - def finish_reason_handler(self): - model_response = self.model_response_creator() - if self.received_finish_reason is not None: - model_response.choices[0].finish_reason = map_finish_reason( - finish_reason=self.received_finish_reason - ) - else: - model_response.choices[0].finish_reason = "stop" - - ## if tool use - if ( - model_response.choices[0].finish_reason == "stop" and self.tool_call - ): # don't overwrite for other - potential error finish reasons - model_response.choices[0].finish_reason = "tool_calls" - return model_response - - def __next__(self): # noqa: PLR0915 - cache_hit = False - if ( - self.custom_llm_provider is not None - and self.custom_llm_provider == "cached_response" - ): - cache_hit = True - try: - if self.completion_stream is None: - self.fetch_sync_stream() - while True: - if ( - isinstance(self.completion_stream, str) - or isinstance(self.completion_stream, bytes) - or isinstance(self.completion_stream, ModelResponse) - ): - chunk = self.completion_stream - else: - chunk = next(self.completion_stream) - if chunk is not None and chunk != b"": - print_verbose( - f"PROCESSED CHUNK PRE CHUNK CREATOR: {chunk}; custom_llm_provider: {self.custom_llm_provider}" - ) - response: Optional[ModelResponse] = self.chunk_creator(chunk=chunk) - print_verbose(f"PROCESSED CHUNK POST CHUNK CREATOR: {response}") - - if response is None: - continue - ## LOGGING - threading.Thread( - target=self.run_success_logging_and_cache_storage, - args=(response, cache_hit), - ).start() # log response - choice = response.choices[0] - if isinstance(choice, StreamingChoices): - self.response_uptil_now += choice.delta.get("content", "") or "" - else: - self.response_uptil_now += "" - self.rules.post_call_rules( - input=self.response_uptil_now, model=self.model - ) - # HANDLE STREAM OPTIONS - self.chunks.append(response) - if hasattr( - response, "usage" - ): # remove usage from chunk, only send on final chunk - # Convert the object to a dictionary - obj_dict = response.dict() - - # Remove an attribute (e.g., 'attr2') - if "usage" in obj_dict: - del obj_dict["usage"] - - # Create a new object without the removed attribute - response = self.model_response_creator( - chunk=obj_dict, hidden_params=response._hidden_params - ) - # add usage as hidden param - if self.sent_last_chunk is True and self.stream_options is None: - usage = calculate_total_usage(chunks=self.chunks) - response._hidden_params["usage"] = usage - # RETURN RESULT - return response - - except StopIteration: - if self.sent_last_chunk is True: - complete_streaming_response = litellm.stream_chunk_builder( - chunks=self.chunks, messages=self.messages - ) - response = self.model_response_creator() - if complete_streaming_response is not None: - setattr( - response, - "usage", - getattr(complete_streaming_response, "usage"), - ) - - ## LOGGING - threading.Thread( - target=self.logging_obj.success_handler, - args=(response, None, None, cache_hit), - ).start() # log response - - if self.sent_stream_usage is False and self.send_stream_usage is True: - self.sent_stream_usage = True - return response - raise # Re-raise StopIteration - else: - self.sent_last_chunk = True - processed_chunk = self.finish_reason_handler() - if self.stream_options is None: # add usage as hidden param - usage = calculate_total_usage(chunks=self.chunks) - processed_chunk._hidden_params["usage"] = usage - ## LOGGING - threading.Thread( - target=self.run_success_logging_and_cache_storage, - args=(processed_chunk, cache_hit), - ).start() # log response - return processed_chunk - except Exception as e: - traceback_exception = traceback.format_exc() - # LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated - threading.Thread( - target=self.logging_obj.failure_handler, args=(e, traceback_exception) - ).start() - if isinstance(e, OpenAIError): - raise e - else: - raise exception_type( - model=self.model, - original_exception=e, - custom_llm_provider=self.custom_llm_provider, - ) - - def fetch_sync_stream(self): - if self.completion_stream is None and self.make_call is not None: - # Call make_call to get the completion stream - self.completion_stream = self.make_call(client=litellm.module_level_client) - self._stream_iter = self.completion_stream.__iter__() - - return self.completion_stream - - async def fetch_stream(self): - if self.completion_stream is None and self.make_call is not None: - # Call make_call to get the completion stream - self.completion_stream = await self.make_call( - client=litellm.module_level_aclient - ) - self._stream_iter = self.completion_stream.__aiter__() - - return self.completion_stream - - async def __anext__(self): # noqa: PLR0915 - cache_hit = False - if ( - self.custom_llm_provider is not None - and self.custom_llm_provider == "cached_response" - ): - cache_hit = True - try: - if self.completion_stream is None: - await self.fetch_stream() - - if ( - self.custom_llm_provider == "openai" - or self.custom_llm_provider == "azure" - or self.custom_llm_provider == "custom_openai" - or self.custom_llm_provider == "text-completion-openai" - or self.custom_llm_provider == "text-completion-codestral" - or self.custom_llm_provider == "azure_text" - or self.custom_llm_provider == "anthropic" - or self.custom_llm_provider == "anthropic_text" - or self.custom_llm_provider == "huggingface" - or self.custom_llm_provider == "ollama" - or self.custom_llm_provider == "ollama_chat" - or self.custom_llm_provider == "vertex_ai" - or self.custom_llm_provider == "vertex_ai_beta" - or self.custom_llm_provider == "sagemaker" - or self.custom_llm_provider == "sagemaker_chat" - or self.custom_llm_provider == "gemini" - or self.custom_llm_provider == "replicate" - or self.custom_llm_provider == "cached_response" - or self.custom_llm_provider == "predibase" - or self.custom_llm_provider == "databricks" - or self.custom_llm_provider == "bedrock" - or self.custom_llm_provider == "triton" - or self.custom_llm_provider == "watsonx" - or self.custom_llm_provider in litellm.openai_compatible_endpoints - or self.custom_llm_provider in litellm._custom_providers - ): - async for chunk in self.completion_stream: - if chunk == "None" or chunk is None: - raise Exception - elif ( - self.custom_llm_provider == "gemini" - and hasattr(chunk, "parts") - and len(chunk.parts) == 0 - ): - continue - # chunk_creator() does logging/stream chunk building. We need to let it know its being called in_async_func, so we don't double add chunks. - # __anext__ also calls async_success_handler, which does logging - print_verbose(f"PROCESSED ASYNC CHUNK PRE CHUNK CREATOR: {chunk}") - - processed_chunk: Optional[ModelResponse] = self.chunk_creator( - chunk=chunk - ) - print_verbose( - f"PROCESSED ASYNC CHUNK POST CHUNK CREATOR: {processed_chunk}" - ) - if processed_chunk is None: - continue - ## LOGGING - ## LOGGING - executor.submit( - self.logging_obj.success_handler, - result=processed_chunk, - start_time=None, - end_time=None, - cache_hit=cache_hit, - ) - - asyncio.create_task( - self.logging_obj.async_success_handler( - processed_chunk, cache_hit=cache_hit - ) - ) - - if self.logging_obj._llm_caching_handler is not None: - asyncio.create_task( - self.logging_obj._llm_caching_handler._add_streaming_response_to_cache( - processed_chunk=processed_chunk, - ) - ) - - choice = processed_chunk.choices[0] - if isinstance(choice, StreamingChoices): - self.response_uptil_now += choice.delta.get("content", "") or "" - else: - self.response_uptil_now += "" - self.rules.post_call_rules( - input=self.response_uptil_now, model=self.model - ) - self.chunks.append(processed_chunk) - if hasattr( - processed_chunk, "usage" - ): # remove usage from chunk, only send on final chunk - # Convert the object to a dictionary - obj_dict = processed_chunk.dict() - - # Remove an attribute (e.g., 'attr2') - if "usage" in obj_dict: - del obj_dict["usage"] - - # Create a new object without the removed attribute - processed_chunk = self.model_response_creator(chunk=obj_dict) - print_verbose(f"final returned processed chunk: {processed_chunk}") - return processed_chunk - raise StopAsyncIteration - else: # temporary patch for non-aiohttp async calls - # example - boto3 bedrock llms - while True: - if isinstance(self.completion_stream, str) or isinstance( - self.completion_stream, bytes - ): - chunk = self.completion_stream - else: - chunk = next(self.completion_stream) - if chunk is not None and chunk != b"": - print_verbose(f"PROCESSED CHUNK PRE CHUNK CREATOR: {chunk}") - processed_chunk: Optional[ModelResponse] = self.chunk_creator( - chunk=chunk - ) - print_verbose( - f"PROCESSED CHUNK POST CHUNK CREATOR: {processed_chunk}" - ) - if processed_chunk is None: - continue - ## LOGGING - threading.Thread( - target=self.logging_obj.success_handler, - args=(processed_chunk, None, None, cache_hit), - ).start() # log processed_chunk - asyncio.create_task( - self.logging_obj.async_success_handler( - processed_chunk, cache_hit=cache_hit - ) - ) - - choice = processed_chunk.choices[0] - if isinstance(choice, StreamingChoices): - self.response_uptil_now += ( - choice.delta.get("content", "") or "" - ) - else: - self.response_uptil_now += "" - self.rules.post_call_rules( - input=self.response_uptil_now, model=self.model - ) - # RETURN RESULT - self.chunks.append(processed_chunk) - return processed_chunk - except (StopAsyncIteration, StopIteration): - if self.sent_last_chunk is True: - # log the final chunk with accurate streaming values - complete_streaming_response = litellm.stream_chunk_builder( - chunks=self.chunks, messages=self.messages - ) - response = self.model_response_creator() - if complete_streaming_response is not None: - setattr( - response, - "usage", - getattr(complete_streaming_response, "usage"), - ) - ## LOGGING - threading.Thread( - target=self.logging_obj.success_handler, - args=(response, None, None, cache_hit), - ).start() # log response - asyncio.create_task( - self.logging_obj.async_success_handler( - response, cache_hit=cache_hit - ) - ) - if self.sent_stream_usage is False and self.send_stream_usage is True: - self.sent_stream_usage = True - return response - raise StopAsyncIteration # Re-raise StopIteration - else: - self.sent_last_chunk = True - processed_chunk = self.finish_reason_handler() - ## LOGGING - threading.Thread( - target=self.logging_obj.success_handler, - args=(processed_chunk, None, None, cache_hit), - ).start() # log response - asyncio.create_task( - self.logging_obj.async_success_handler( - processed_chunk, cache_hit=cache_hit - ) - ) - return processed_chunk - except httpx.TimeoutException as e: # if httpx read timeout error occues - traceback_exception = traceback.format_exc() - ## ADD DEBUG INFORMATION - E.G. LITELLM REQUEST TIMEOUT - traceback_exception += "\nLiteLLM Default Request Timeout - {}".format( - litellm.request_timeout - ) - if self.logging_obj is not None: - ## LOGGING - threading.Thread( - target=self.logging_obj.failure_handler, - args=(e, traceback_exception), - ).start() # log response - # Handle any exceptions that might occur during streaming - asyncio.create_task( - self.logging_obj.async_failure_handler(e, traceback_exception) - ) - raise e - except Exception as e: - traceback_exception = traceback.format_exc() - if self.logging_obj is not None: - ## LOGGING - threading.Thread( - target=self.logging_obj.failure_handler, - args=(e, traceback_exception), - ).start() # log response - # Handle any exceptions that might occur during streaming - asyncio.create_task( - self.logging_obj.async_failure_handler(e, traceback_exception) # type: ignore - ) - ## Map to OpenAI Exception - raise exception_type( - model=self.model, - custom_llm_provider=self.custom_llm_provider, - original_exception=e, - completion_kwargs={}, - extra_kwargs={}, - ) +# class CustomStreamWrapper: +# def __init__( +# self, +# completion_stream, +# model, +# logging_obj: Any, +# custom_llm_provider: Optional[str] = None, +# stream_options=None, +# make_call: Optional[Callable] = None, +# _response_headers: Optional[dict] = None, +# ): +# self.model = model +# self.make_call = make_call +# self.custom_llm_provider = custom_llm_provider +# self.logging_obj: LiteLLMLoggingObject = logging_obj +# self.completion_stream = completion_stream +# self.sent_first_chunk = False +# self.sent_last_chunk = False +# self.system_fingerprint: Optional[str] = None +# self.received_finish_reason: Optional[str] = None +# self.special_tokens = [ +# "<|assistant|>", +# "<|system|>", +# "<|user|>", +# "", +# "", +# "<|im_end|>", +# "<|im_start|>", +# ] +# self.holding_chunk = "" +# self.complete_response = "" +# self.response_uptil_now = "" +# _model_info = ( +# self.logging_obj.model_call_details.get("litellm_params", {}).get( +# "model_info", {} +# ) +# or {} +# ) +# self._hidden_params = { +# "model_id": (_model_info.get("id", None)), +# } # returned as x-litellm-model-id response header in proxy + +# self._hidden_params["additional_headers"] = process_response_headers( +# _response_headers or {} +# ) # GUARANTEE OPENAI HEADERS IN RESPONSE + +# self._response_headers = _response_headers +# self.response_id = None +# self.logging_loop = None +# self.rules = Rules() +# self.stream_options = stream_options or getattr( +# logging_obj, "stream_options", None +# ) +# self.messages = getattr(logging_obj, "messages", None) +# self.sent_stream_usage = False +# self.send_stream_usage = ( +# True if self.check_send_stream_usage(self.stream_options) else False +# ) +# self.tool_call = False +# self.chunks: List = ( +# [] +# ) # keep track of the returned chunks - used for calculating the input/output tokens for stream options +# self.is_function_call = self.check_is_function_call(logging_obj=logging_obj) + +# def __iter__(self): +# return self + +# def __aiter__(self): +# return self + +# def check_send_stream_usage(self, stream_options: Optional[dict]): +# return ( +# stream_options is not None +# and stream_options.get("include_usage", False) is True +# ) + +# def check_is_function_call(self, logging_obj) -> bool: +# if hasattr(logging_obj, "optional_params") and isinstance( +# logging_obj.optional_params, dict +# ): +# if ( +# "litellm_param_is_function_call" in logging_obj.optional_params +# and logging_obj.optional_params["litellm_param_is_function_call"] +# is True +# ): +# return True + +# return False + +# def process_chunk(self, chunk: str): +# """ +# NLP Cloud streaming returns the entire response, for each chunk. Process this, to only return the delta. +# """ +# try: +# chunk = chunk.strip() +# self.complete_response = self.complete_response.strip() + +# if chunk.startswith(self.complete_response): +# # Remove last_sent_chunk only if it appears at the start of the new chunk +# chunk = chunk[len(self.complete_response) :] + +# self.complete_response += chunk +# return chunk +# except Exception as e: +# raise e + +# def safety_checker(self) -> None: +# """ +# Fixes - https://github.com/BerriAI/litellm/issues/5158 + +# if the model enters a loop and starts repeating the same chunk again, break out of loop and raise an internalservererror - allows for retries. + +# Raises - InternalServerError, if LLM enters infinite loop while streaming +# """ +# if len(self.chunks) >= litellm.REPEATED_STREAMING_CHUNK_LIMIT: +# # Get the last n chunks +# last_chunks = self.chunks[-litellm.REPEATED_STREAMING_CHUNK_LIMIT :] + +# # Extract the relevant content from the chunks +# last_contents = [chunk.choices[0].delta.content for chunk in last_chunks] + +# # Check if all extracted contents are identical +# if all(content == last_contents[0] for content in last_contents): +# if ( +# last_contents[0] is not None +# and isinstance(last_contents[0], str) +# and len(last_contents[0]) > 2 +# ): # ignore empty content - https://github.com/BerriAI/litellm/issues/5158#issuecomment-2287156946 +# # All last n chunks are identical +# raise litellm.InternalServerError( +# message="The model is repeating the same chunk = {}.".format( +# last_contents[0] +# ), +# model="", +# llm_provider="", +# ) + +# def check_special_tokens(self, chunk: str, finish_reason: Optional[str]): +# """ +# Output parse / special tokens for sagemaker + hf streaming. +# """ +# hold = False +# if ( +# self.custom_llm_provider != "huggingface" +# and self.custom_llm_provider != "sagemaker" +# ): +# return hold, chunk + +# if finish_reason: +# for token in self.special_tokens: +# if token in chunk: +# chunk = chunk.replace(token, "") +# return hold, chunk + +# if self.sent_first_chunk is True: +# return hold, chunk + +# curr_chunk = self.holding_chunk + chunk +# curr_chunk = curr_chunk.strip() + +# for token in self.special_tokens: +# if len(curr_chunk) < len(token) and curr_chunk in token: +# hold = True +# self.holding_chunk = curr_chunk +# elif len(curr_chunk) >= len(token): +# if token in curr_chunk: +# self.holding_chunk = curr_chunk.replace(token, "") +# hold = True +# else: +# pass + +# if hold is False: # reset +# self.holding_chunk = "" +# return hold, curr_chunk + +# def handle_anthropic_text_chunk(self, chunk): +# """ +# For old anthropic models - claude-1, claude-2. + +# Claude-3 is handled from within Anthropic.py VIA ModelResponseIterator() +# """ +# str_line = chunk +# if isinstance(chunk, bytes): # Handle binary data +# str_line = chunk.decode("utf-8") # Convert bytes to string +# text = "" +# is_finished = False +# finish_reason = None +# if str_line.startswith("data:"): +# data_json = json.loads(str_line[5:]) +# type_chunk = data_json.get("type", None) +# if type_chunk == "completion": +# text = data_json.get("completion") +# finish_reason = data_json.get("stop_reason") +# if finish_reason is not None: +# is_finished = True +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# elif "error" in str_line: +# raise ValueError(f"Unable to parse response. Original response: {str_line}") +# else: +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } + +# def handle_vertexai_anthropic_chunk(self, chunk): +# """ +# - MessageStartEvent(message=Message(id='msg_01LeRRgvX4gwkX3ryBVgtuYZ', content=[], model='claude-3-sonnet-20240229', role='assistant', stop_reason=None, stop_sequence=None, type='message', usage=Usage(input_tokens=8, output_tokens=1)), type='message_start'); custom_llm_provider: vertex_ai +# - ContentBlockStartEvent(content_block=ContentBlock(text='', type='text'), index=0, type='content_block_start'); custom_llm_provider: vertex_ai +# - ContentBlockDeltaEvent(delta=TextDelta(text='Hello', type='text_delta'), index=0, type='content_block_delta'); custom_llm_provider: vertex_ai +# """ +# text = "" +# prompt_tokens = None +# completion_tokens = None +# is_finished = False +# finish_reason = None +# type_chunk = getattr(chunk, "type", None) +# if type_chunk == "message_start": +# message = getattr(chunk, "message", None) +# text = "" # lets us return a chunk with usage to user +# _usage = getattr(message, "usage", None) +# if _usage is not None: +# prompt_tokens = getattr(_usage, "input_tokens", None) +# completion_tokens = getattr(_usage, "output_tokens", None) +# elif type_chunk == "content_block_delta": +# """ +# Anthropic content chunk +# chunk = {'type': 'content_block_delta', 'index': 0, 'delta': {'type': 'text_delta', 'text': 'Hello'}} +# """ +# delta = getattr(chunk, "delta", None) +# if delta is not None: +# text = getattr(delta, "text", "") +# else: +# text = "" +# elif type_chunk == "message_delta": +# """ +# Anthropic +# chunk = {'type': 'message_delta', 'delta': {'stop_reason': 'max_tokens', 'stop_sequence': None}, 'usage': {'output_tokens': 10}} +# """ +# # TODO - get usage from this chunk, set in response +# delta = getattr(chunk, "delta", None) +# if delta is not None: +# finish_reason = getattr(delta, "stop_reason", "stop") +# is_finished = True +# _usage = getattr(chunk, "usage", None) +# if _usage is not None: +# prompt_tokens = getattr(_usage, "input_tokens", None) +# completion_tokens = getattr(_usage, "output_tokens", None) + +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# "prompt_tokens": prompt_tokens, +# "completion_tokens": completion_tokens, +# } + +# def handle_predibase_chunk(self, chunk): +# try: +# if not isinstance(chunk, str): +# chunk = chunk.decode( +# "utf-8" +# ) # DO NOT REMOVE this: This is required for HF inference API + Streaming +# text = "" +# is_finished = False +# finish_reason = "" +# print_verbose(f"chunk: {chunk}") +# if chunk.startswith("data:"): +# data_json = json.loads(chunk[5:]) +# print_verbose(f"data json: {data_json}") +# if "token" in data_json and "text" in data_json["token"]: +# text = data_json["token"]["text"] +# if data_json.get("details", False) and data_json["details"].get( +# "finish_reason", False +# ): +# is_finished = True +# finish_reason = data_json["details"]["finish_reason"] +# elif data_json.get( +# "generated_text", False +# ): # if full generated text exists, then stream is complete +# text = "" # don't return the final bos token +# is_finished = True +# finish_reason = "stop" +# elif data_json.get("error", False): +# raise Exception(data_json.get("error")) +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# elif "error" in chunk: +# raise ValueError(chunk) +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# except Exception as e: +# raise e + +# def handle_huggingface_chunk(self, chunk): +# try: +# if not isinstance(chunk, str): +# chunk = chunk.decode( +# "utf-8" +# ) # DO NOT REMOVE this: This is required for HF inference API + Streaming +# text = "" +# is_finished = False +# finish_reason = "" +# print_verbose(f"chunk: {chunk}") +# if chunk.startswith("data:"): +# data_json = json.loads(chunk[5:]) +# print_verbose(f"data json: {data_json}") +# if "token" in data_json and "text" in data_json["token"]: +# text = data_json["token"]["text"] +# if data_json.get("details", False) and data_json["details"].get( +# "finish_reason", False +# ): +# is_finished = True +# finish_reason = data_json["details"]["finish_reason"] +# elif data_json.get( +# "generated_text", False +# ): # if full generated text exists, then stream is complete +# text = "" # don't return the final bos token +# is_finished = True +# finish_reason = "stop" +# elif data_json.get("error", False): +# raise Exception(data_json.get("error")) +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# elif "error" in chunk: +# raise ValueError(chunk) +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# except Exception as e: +# raise e + +# def handle_ai21_chunk(self, chunk): # fake streaming +# chunk = chunk.decode("utf-8") +# data_json = json.loads(chunk) +# try: +# text = data_json["completions"][0]["data"]["text"] +# is_finished = True +# finish_reason = "stop" +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# except Exception: +# raise ValueError(f"Unable to parse response. Original response: {chunk}") + +# def handle_maritalk_chunk(self, chunk): # fake streaming +# chunk = chunk.decode("utf-8") +# data_json = json.loads(chunk) +# try: +# text = data_json["answer"] +# is_finished = True +# finish_reason = "stop" +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# except Exception: +# raise ValueError(f"Unable to parse response. Original response: {chunk}") + +# def handle_nlp_cloud_chunk(self, chunk): +# text = "" +# is_finished = False +# finish_reason = "" +# try: +# if "dolphin" in self.model: +# chunk = self.process_chunk(chunk=chunk) +# else: +# data_json = json.loads(chunk) +# chunk = data_json["generated_text"] +# text = chunk +# if "[DONE]" in text: +# text = text.replace("[DONE]", "") +# is_finished = True +# finish_reason = "stop" +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# except Exception: +# raise ValueError(f"Unable to parse response. Original response: {chunk}") + +# def handle_aleph_alpha_chunk(self, chunk): +# chunk = chunk.decode("utf-8") +# data_json = json.loads(chunk) +# try: +# text = data_json["completions"][0]["completion"] +# is_finished = True +# finish_reason = "stop" +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# except Exception: +# raise ValueError(f"Unable to parse response. Original response: {chunk}") + +# def handle_cohere_chunk(self, chunk): +# chunk = chunk.decode("utf-8") +# data_json = json.loads(chunk) +# try: +# text = "" +# is_finished = False +# finish_reason = "" +# index: Optional[int] = None +# if "index" in data_json: +# index = data_json.get("index") +# if "text" in data_json: +# text = data_json["text"] +# elif "is_finished" in data_json: +# is_finished = data_json["is_finished"] +# finish_reason = data_json["finish_reason"] +# else: +# raise Exception(data_json) +# return { +# "index": index, +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# except Exception: +# raise ValueError(f"Unable to parse response. Original response: {chunk}") + +# def handle_cohere_chat_chunk(self, chunk): +# chunk = chunk.decode("utf-8") +# data_json = json.loads(chunk) +# print_verbose(f"chunk: {chunk}") +# try: +# text = "" +# is_finished = False +# finish_reason = "" +# if "text" in data_json: +# text = data_json["text"] +# elif "is_finished" in data_json and data_json["is_finished"] is True: +# is_finished = data_json["is_finished"] +# finish_reason = data_json["finish_reason"] +# else: +# return +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# except Exception: +# raise ValueError(f"Unable to parse response. Original response: {chunk}") + +# def handle_azure_chunk(self, chunk): +# is_finished = False +# finish_reason = "" +# text = "" +# print_verbose(f"chunk: {chunk}") +# if "data: [DONE]" in chunk: +# text = "" +# is_finished = True +# finish_reason = "stop" +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# elif chunk.startswith("data:"): +# data_json = json.loads(chunk[5:]) # chunk.startswith("data:"): +# try: +# if len(data_json["choices"]) > 0: +# delta = data_json["choices"][0]["delta"] +# text = "" if delta is None else delta.get("content", "") +# if data_json["choices"][0].get("finish_reason", None): +# is_finished = True +# finish_reason = data_json["choices"][0]["finish_reason"] +# print_verbose( +# f"text: {text}; is_finished: {is_finished}; finish_reason: {finish_reason}" +# ) +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# except Exception: +# raise ValueError( +# f"Unable to parse response. Original response: {chunk}" +# ) +# elif "error" in chunk: +# raise ValueError(f"Unable to parse response. Original response: {chunk}") +# else: +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } + +# def handle_replicate_chunk(self, chunk): +# try: +# text = "" +# is_finished = False +# finish_reason = "" +# if "output" in chunk: +# text = chunk["output"] +# if "status" in chunk: +# if chunk["status"] == "succeeded": +# is_finished = True +# finish_reason = "stop" +# elif chunk.get("error", None): +# raise Exception(chunk["error"]) +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# except Exception: +# raise ValueError(f"Unable to parse response. Original response: {chunk}") + +# def handle_openai_chat_completion_chunk(self, chunk): +# try: +# print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") +# str_line = chunk +# text = "" +# is_finished = False +# finish_reason = None +# logprobs = None +# usage = None +# if str_line and str_line.choices and len(str_line.choices) > 0: +# if ( +# str_line.choices[0].delta is not None +# and str_line.choices[0].delta.content is not None +# ): +# text = str_line.choices[0].delta.content +# else: # function/tool calling chunk - when content is None. in this case we just return the original chunk from openai +# pass +# if str_line.choices[0].finish_reason: +# is_finished = True +# finish_reason = str_line.choices[0].finish_reason + +# # checking for logprobs +# if ( +# hasattr(str_line.choices[0], "logprobs") +# and str_line.choices[0].logprobs is not None +# ): +# logprobs = str_line.choices[0].logprobs +# else: +# logprobs = None + +# usage = getattr(str_line, "usage", None) + +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# "logprobs": logprobs, +# "original_chunk": str_line, +# "usage": usage, +# } +# except Exception as e: +# raise e + +# def handle_azure_text_completion_chunk(self, chunk): +# try: +# print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") +# text = "" +# is_finished = False +# finish_reason = None +# choices = getattr(chunk, "choices", []) +# if len(choices) > 0: +# text = choices[0].text +# if choices[0].finish_reason is not None: +# is_finished = True +# finish_reason = choices[0].finish_reason +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } + +# except Exception as e: +# raise e + +# def handle_openai_text_completion_chunk(self, chunk): +# try: +# print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") +# text = "" +# is_finished = False +# finish_reason = None +# usage = None +# choices = getattr(chunk, "choices", []) +# if len(choices) > 0: +# text = choices[0].text +# if choices[0].finish_reason is not None: +# is_finished = True +# finish_reason = choices[0].finish_reason +# usage = getattr(chunk, "usage", None) +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# "usage": usage, +# } + +# except Exception as e: +# raise e + +# def handle_baseten_chunk(self, chunk): +# try: +# chunk = chunk.decode("utf-8") +# if len(chunk) > 0: +# if chunk.startswith("data:"): +# data_json = json.loads(chunk[5:]) +# if "token" in data_json and "text" in data_json["token"]: +# return data_json["token"]["text"] +# else: +# return "" +# data_json = json.loads(chunk) +# if "model_output" in data_json: +# if ( +# isinstance(data_json["model_output"], dict) +# and "data" in data_json["model_output"] +# and isinstance(data_json["model_output"]["data"], list) +# ): +# return data_json["model_output"]["data"][0] +# elif isinstance(data_json["model_output"], str): +# return data_json["model_output"] +# elif "completion" in data_json and isinstance( +# data_json["completion"], str +# ): +# return data_json["completion"] +# else: +# raise ValueError( +# f"Unable to parse response. Original response: {chunk}" +# ) +# else: +# return "" +# else: +# return "" +# except Exception as e: +# verbose_logger.exception( +# "litellm.CustomStreamWrapper.handle_baseten_chunk(): Exception occured - {}".format( +# str(e) +# ) +# ) +# return "" + +# def handle_cloudlfare_stream(self, chunk): +# try: +# print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") +# chunk = chunk.decode("utf-8") +# str_line = chunk +# text = "" +# is_finished = False +# finish_reason = None + +# if "[DONE]" in chunk: +# return {"text": text, "is_finished": True, "finish_reason": "stop"} +# elif str_line.startswith("data:"): +# data_json = json.loads(str_line[5:]) +# print_verbose(f"delta content: {data_json}") +# text = data_json["response"] +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# else: +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } + +# except Exception as e: +# raise e + +# def handle_ollama_stream(self, chunk): +# try: +# if isinstance(chunk, dict): +# json_chunk = chunk +# else: +# json_chunk = json.loads(chunk) +# if "error" in json_chunk: +# raise Exception(f"Ollama Error - {json_chunk}") + +# text = "" +# is_finished = False +# finish_reason = None +# if json_chunk["done"] is True: +# text = "" +# is_finished = True +# finish_reason = "stop" +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# elif json_chunk["response"]: +# print_verbose(f"delta content: {json_chunk}") +# text = json_chunk["response"] +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# else: +# raise Exception(f"Ollama Error - {json_chunk}") +# except Exception as e: +# raise e + +# def handle_ollama_chat_stream(self, chunk): +# # for ollama_chat/ provider +# try: +# if isinstance(chunk, dict): +# json_chunk = chunk +# else: +# json_chunk = json.loads(chunk) +# if "error" in json_chunk: +# raise Exception(f"Ollama Error - {json_chunk}") + +# text = "" +# is_finished = False +# finish_reason = None +# if json_chunk["done"] is True: +# text = "" +# is_finished = True +# finish_reason = "stop" +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# elif "message" in json_chunk: +# print_verbose(f"delta content: {json_chunk}") +# text = json_chunk["message"]["content"] +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# } +# else: +# raise Exception(f"Ollama Error - {json_chunk}") +# except Exception as e: +# raise e + +# def handle_watsonx_stream(self, chunk): +# try: +# if isinstance(chunk, dict): +# parsed_response = chunk +# elif isinstance(chunk, (str, bytes)): +# if isinstance(chunk, bytes): +# chunk = chunk.decode("utf-8") +# if "generated_text" in chunk: +# response = chunk.replace("data: ", "").strip() +# parsed_response = json.loads(response) +# else: +# return { +# "text": "", +# "is_finished": False, +# "prompt_tokens": 0, +# "completion_tokens": 0, +# } +# else: +# print_verbose(f"chunk: {chunk} (Type: {type(chunk)})") +# raise ValueError( +# f"Unable to parse response. Original response: {chunk}" +# ) +# results = parsed_response.get("results", []) +# if len(results) > 0: +# text = results[0].get("generated_text", "") +# finish_reason = results[0].get("stop_reason") +# is_finished = finish_reason != "not_finished" +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# "prompt_tokens": results[0].get("input_token_count", 0), +# "completion_tokens": results[0].get("generated_token_count", 0), +# } +# return {"text": "", "is_finished": False} +# except Exception as e: +# raise e + +# def handle_triton_stream(self, chunk): +# try: +# if isinstance(chunk, dict): +# parsed_response = chunk +# elif isinstance(chunk, (str, bytes)): +# if isinstance(chunk, bytes): +# chunk = chunk.decode("utf-8") +# if "text_output" in chunk: +# response = chunk.replace("data: ", "").strip() +# parsed_response = json.loads(response) +# else: +# return { +# "text": "", +# "is_finished": False, +# "prompt_tokens": 0, +# "completion_tokens": 0, +# } +# else: +# print_verbose(f"chunk: {chunk} (Type: {type(chunk)})") +# raise ValueError( +# f"Unable to parse response. Original response: {chunk}" +# ) +# text = parsed_response.get("text_output", "") +# finish_reason = parsed_response.get("stop_reason") +# is_finished = parsed_response.get("is_finished", False) +# return { +# "text": text, +# "is_finished": is_finished, +# "finish_reason": finish_reason, +# "prompt_tokens": parsed_response.get("input_token_count", 0), +# "completion_tokens": parsed_response.get("generated_token_count", 0), +# } +# return {"text": "", "is_finished": False} +# except Exception as e: +# raise e + +# def handle_clarifai_completion_chunk(self, chunk): +# try: +# if isinstance(chunk, dict): +# parsed_response = chunk +# elif isinstance(chunk, (str, bytes)): +# if isinstance(chunk, bytes): +# parsed_response = chunk.decode("utf-8") +# else: +# parsed_response = chunk +# else: +# raise ValueError("Unable to parse streaming chunk") +# if isinstance(parsed_response, dict): +# data_json = parsed_response +# else: +# data_json = json.loads(parsed_response) +# text = ( +# data_json.get("outputs", "")[0] +# .get("data", "") +# .get("text", "") +# .get("raw", "") +# ) +# len( +# encoding.encode( +# data_json.get("outputs", "")[0] +# .get("input", "") +# .get("data", "") +# .get("text", "") +# .get("raw", "") +# ) +# ) +# len(encoding.encode(text)) +# return { +# "text": text, +# "is_finished": True, +# } +# except Exception as e: +# verbose_logger.exception( +# "litellm.CustomStreamWrapper.handle_clarifai_chunk(): Exception occured - {}".format( +# str(e) +# ) +# ) +# return "" + +# def model_response_creator( +# self, chunk: Optional[dict] = None, hidden_params: Optional[dict] = None +# ): +# _model = self.model +# _received_llm_provider = self.custom_llm_provider +# _logging_obj_llm_provider = self.logging_obj.model_call_details.get("custom_llm_provider", None) # type: ignore +# if ( +# _received_llm_provider == "openai" +# and _received_llm_provider != _logging_obj_llm_provider +# ): +# _model = "{}/{}".format(_logging_obj_llm_provider, _model) +# if chunk is None: +# chunk = {} +# else: +# # pop model keyword +# chunk.pop("model", None) + +# model_response = ModelResponse( +# stream=True, model=_model, stream_options=self.stream_options, **chunk +# ) +# if self.response_id is not None: +# model_response.id = self.response_id +# else: +# self.response_id = model_response.id # type: ignore +# if self.system_fingerprint is not None: +# model_response.system_fingerprint = self.system_fingerprint +# if hidden_params is not None: +# model_response._hidden_params = hidden_params +# model_response._hidden_params["custom_llm_provider"] = _logging_obj_llm_provider +# model_response._hidden_params["created_at"] = time.time() +# model_response._hidden_params = { +# **model_response._hidden_params, +# **self._hidden_params, +# } + +# if ( +# len(model_response.choices) > 0 +# and getattr(model_response.choices[0], "delta") is not None +# ): +# # do nothing, if object instantiated +# pass +# else: +# model_response.choices = [StreamingChoices(finish_reason=None)] +# return model_response + +# def is_delta_empty(self, delta: Delta) -> bool: +# is_empty = True +# if delta.content is not None: +# is_empty = False +# elif delta.tool_calls is not None: +# is_empty = False +# elif delta.function_call is not None: +# is_empty = False +# return is_empty + +# def return_processed_chunk_logic( # noqa +# self, +# completion_obj: dict, +# model_response: ModelResponseStream, +# response_obj: dict, +# ): + +# print_verbose( +# f"completion_obj: {completion_obj}, model_response.choices[0]: {model_response.choices[0]}, response_obj: {response_obj}" +# ) +# if ( +# "content" in completion_obj +# and ( +# isinstance(completion_obj["content"], str) +# and len(completion_obj["content"]) > 0 +# ) +# or ( +# "tool_calls" in completion_obj +# and completion_obj["tool_calls"] is not None +# and len(completion_obj["tool_calls"]) > 0 +# ) +# or ( +# "function_call" in completion_obj +# and completion_obj["function_call"] is not None +# ) +# ): # cannot set content of an OpenAI Object to be an empty string +# self.safety_checker() +# hold, model_response_str = self.check_special_tokens( +# chunk=completion_obj["content"], +# finish_reason=model_response.choices[0].finish_reason, +# ) # filter out bos/eos tokens from openai-compatible hf endpoints +# print_verbose(f"hold - {hold}, model_response_str - {model_response_str}") +# if hold is False: +# ## check if openai/azure chunk +# original_chunk = response_obj.get("original_chunk", None) +# if original_chunk: +# model_response.id = original_chunk.id +# self.response_id = original_chunk.id +# if len(original_chunk.choices) > 0: +# choices = [] +# for choice in original_chunk.choices: +# try: +# if isinstance(choice, BaseModel): +# choice_json = choice.model_dump() +# choice_json.pop( +# "finish_reason", None +# ) # for mistral etc. which return a value in their last chunk (not-openai compatible). +# print_verbose(f"choice_json: {choice_json}") +# choices.append(StreamingChoices(**choice_json)) +# except Exception: +# choices.append(StreamingChoices()) +# print_verbose(f"choices in streaming: {choices}") +# setattr(model_response, "choices", choices) +# else: +# return +# model_response.system_fingerprint = ( +# original_chunk.system_fingerprint +# ) +# setattr( +# model_response, +# "citations", +# getattr(original_chunk, "citations", None), +# ) +# print_verbose(f"self.sent_first_chunk: {self.sent_first_chunk}") +# if self.sent_first_chunk is False: +# model_response.choices[0].delta["role"] = "assistant" +# self.sent_first_chunk = True +# elif self.sent_first_chunk is True and hasattr( +# model_response.choices[0].delta, "role" +# ): +# _initial_delta = model_response.choices[0].delta.model_dump() +# _initial_delta.pop("role", None) +# model_response.choices[0].delta = Delta(**_initial_delta) +# print_verbose( +# f"model_response.choices[0].delta: {model_response.choices[0].delta}" +# ) +# else: +# ## else +# completion_obj["content"] = model_response_str +# if self.sent_first_chunk is False: +# completion_obj["role"] = "assistant" +# self.sent_first_chunk = True + +# model_response.choices[0].delta = Delta(**completion_obj) +# _index: Optional[int] = completion_obj.get("index") +# if _index is not None: +# model_response.choices[0].index = _index +# print_verbose(f"returning model_response: {model_response}") +# return model_response +# else: +# return +# elif self.received_finish_reason is not None: +# if self.sent_last_chunk is True: +# # Bedrock returns the guardrail trace in the last chunk - we want to return this here +# if self.custom_llm_provider == "bedrock" and "trace" in model_response: +# return model_response + +# # Default - return StopIteration +# raise StopIteration +# # flush any remaining holding chunk +# if len(self.holding_chunk) > 0: +# if model_response.choices[0].delta.content is None: +# model_response.choices[0].delta.content = self.holding_chunk +# else: +# model_response.choices[0].delta.content = ( +# self.holding_chunk + model_response.choices[0].delta.content +# ) +# self.holding_chunk = "" +# # if delta is None +# _is_delta_empty = self.is_delta_empty(delta=model_response.choices[0].delta) + +# if _is_delta_empty: +# # get any function call arguments +# model_response.choices[0].finish_reason = map_finish_reason( +# finish_reason=self.received_finish_reason +# ) # ensure consistent output to openai + +# self.sent_last_chunk = True + +# return model_response +# elif ( +# model_response.choices[0].delta.tool_calls is not None +# or model_response.choices[0].delta.function_call is not None +# ): +# if self.sent_first_chunk is False: +# model_response.choices[0].delta["role"] = "assistant" +# self.sent_first_chunk = True +# return model_response +# elif ( +# len(model_response.choices) > 0 +# and hasattr(model_response.choices[0].delta, "audio") +# and model_response.choices[0].delta.audio is not None +# ): +# return model_response +# else: +# if hasattr(model_response, "usage"): +# self.chunks.append(model_response) +# return + +# def chunk_creator(self, chunk): # type: ignore # noqa: PLR0915 +# model_response = self.model_response_creator() +# response_obj: dict = {} +# try: +# # return this for all models +# completion_obj = {"content": ""} +# from litellm.litellm_core_utils.streaming_utils import ( +# generic_chunk_has_all_required_fields, +# ) +# from litellm.types.utils import GenericStreamingChunk as GChunk + +# if ( +# isinstance(chunk, dict) +# and generic_chunk_has_all_required_fields( +# chunk=chunk +# ) # check if chunk is a generic streaming chunk +# ) or ( +# self.custom_llm_provider +# and ( +# self.custom_llm_provider == "anthropic" +# or self.custom_llm_provider in litellm._custom_providers +# ) +# ): + +# if self.received_finish_reason is not None: +# if "provider_specific_fields" not in chunk: +# raise StopIteration +# anthropic_response_obj: GChunk = chunk +# completion_obj["content"] = anthropic_response_obj["text"] +# if anthropic_response_obj["is_finished"]: +# self.received_finish_reason = anthropic_response_obj[ +# "finish_reason" +# ] + +# if anthropic_response_obj["usage"] is not None: +# model_response.usage = litellm.Usage( +# **anthropic_response_obj["usage"] +# ) + +# if ( +# "tool_use" in anthropic_response_obj +# and anthropic_response_obj["tool_use"] is not None +# ): +# completion_obj["tool_calls"] = [anthropic_response_obj["tool_use"]] + +# if ( +# "provider_specific_fields" in anthropic_response_obj +# and anthropic_response_obj["provider_specific_fields"] is not None +# ): +# for key, value in anthropic_response_obj[ +# "provider_specific_fields" +# ].items(): +# setattr(model_response, key, value) + +# response_obj = anthropic_response_obj +# elif ( +# self.custom_llm_provider +# and self.custom_llm_provider == "anthropic_text" +# ): +# response_obj = self.handle_anthropic_text_chunk(chunk) +# completion_obj["content"] = response_obj["text"] +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# elif self.custom_llm_provider and self.custom_llm_provider == "clarifai": +# response_obj = self.handle_clarifai_completion_chunk(chunk) +# completion_obj["content"] = response_obj["text"] +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# elif self.model == "replicate" or self.custom_llm_provider == "replicate": +# response_obj = self.handle_replicate_chunk(chunk) +# completion_obj["content"] = response_obj["text"] +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# elif self.custom_llm_provider and self.custom_llm_provider == "huggingface": +# response_obj = self.handle_huggingface_chunk(chunk) +# completion_obj["content"] = response_obj["text"] +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# elif self.custom_llm_provider and self.custom_llm_provider == "predibase": +# response_obj = self.handle_predibase_chunk(chunk) +# completion_obj["content"] = response_obj["text"] +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# elif ( +# self.custom_llm_provider and self.custom_llm_provider == "baseten" +# ): # baseten doesn't provide streaming +# completion_obj["content"] = self.handle_baseten_chunk(chunk) +# elif ( +# self.custom_llm_provider and self.custom_llm_provider == "ai21" +# ): # ai21 doesn't provide streaming +# response_obj = self.handle_ai21_chunk(chunk) +# completion_obj["content"] = response_obj["text"] +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# elif self.custom_llm_provider and self.custom_llm_provider == "maritalk": +# response_obj = self.handle_maritalk_chunk(chunk) +# completion_obj["content"] = response_obj["text"] +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# elif self.custom_llm_provider and self.custom_llm_provider == "vllm": +# completion_obj["content"] = chunk[0].outputs[0].text +# elif ( +# self.custom_llm_provider and self.custom_llm_provider == "aleph_alpha" +# ): # aleph alpha doesn't provide streaming +# response_obj = self.handle_aleph_alpha_chunk(chunk) +# completion_obj["content"] = response_obj["text"] +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# elif self.custom_llm_provider == "nlp_cloud": +# try: +# response_obj = self.handle_nlp_cloud_chunk(chunk) +# completion_obj["content"] = response_obj["text"] +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# except Exception as e: +# if self.received_finish_reason: +# raise e +# else: +# if self.sent_first_chunk is False: +# raise Exception("An unknown error occurred with the stream") +# self.received_finish_reason = "stop" +# elif self.custom_llm_provider == "vertex_ai": +# import proto # type: ignore + +# if self.model.startswith("claude-3"): +# response_obj = self.handle_vertexai_anthropic_chunk(chunk=chunk) +# if response_obj is None: +# return +# completion_obj["content"] = response_obj["text"] +# setattr(model_response, "usage", Usage()) +# if response_obj.get("prompt_tokens", None) is not None: +# model_response.usage.prompt_tokens = response_obj[ +# "prompt_tokens" +# ] +# if response_obj.get("completion_tokens", None) is not None: +# model_response.usage.completion_tokens = response_obj[ +# "completion_tokens" +# ] +# if hasattr(model_response.usage, "prompt_tokens"): +# model_response.usage.total_tokens = ( +# getattr(model_response.usage, "total_tokens", 0) +# + model_response.usage.prompt_tokens +# ) +# if hasattr(model_response.usage, "completion_tokens"): +# model_response.usage.total_tokens = ( +# getattr(model_response.usage, "total_tokens", 0) +# + model_response.usage.completion_tokens +# ) + +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# elif hasattr(chunk, "candidates") is True: +# try: +# try: +# completion_obj["content"] = chunk.text +# except Exception as e: +# if "Part has no text." in str(e): +# ## check for function calling +# function_call = ( +# chunk.candidates[0].content.parts[0].function_call +# ) + +# args_dict = {} + +# # Check if it's a RepeatedComposite instance +# for key, val in function_call.args.items(): +# if isinstance( +# val, +# proto.marshal.collections.repeated.RepeatedComposite, +# ): +# # If so, convert to list +# args_dict[key] = [v for v in val] +# else: +# args_dict[key] = val + +# try: +# args_str = json.dumps(args_dict) +# except Exception as e: +# raise e +# _delta_obj = litellm.utils.Delta( +# content=None, +# tool_calls=[ +# { +# "id": f"call_{str(uuid.uuid4())}", +# "function": { +# "arguments": args_str, +# "name": function_call.name, +# }, +# "type": "function", +# } +# ], +# ) +# _streaming_response = StreamingChoices(delta=_delta_obj) +# _model_response = ModelResponse(stream=True) +# _model_response.choices = [_streaming_response] +# response_obj = {"original_chunk": _model_response} +# else: +# raise e +# if ( +# hasattr(chunk.candidates[0], "finish_reason") +# and chunk.candidates[0].finish_reason.name +# != "FINISH_REASON_UNSPECIFIED" +# ): # every non-final chunk in vertex ai has this +# self.received_finish_reason = chunk.candidates[ +# 0 +# ].finish_reason.name +# except Exception: +# if chunk.candidates[0].finish_reason.name == "SAFETY": +# raise Exception( +# f"The response was blocked by VertexAI. {str(chunk)}" +# ) +# else: +# completion_obj["content"] = str(chunk) +# elif self.custom_llm_provider == "cohere": +# response_obj = self.handle_cohere_chunk(chunk) +# completion_obj["content"] = response_obj["text"] +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# elif self.custom_llm_provider == "cohere_chat": +# response_obj = self.handle_cohere_chat_chunk(chunk) +# if response_obj is None: +# return +# completion_obj["content"] = response_obj["text"] +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] + +# elif self.custom_llm_provider == "petals": +# if len(self.completion_stream) == 0: +# if self.received_finish_reason is not None: +# raise StopIteration +# else: +# self.received_finish_reason = "stop" +# chunk_size = 30 +# new_chunk = self.completion_stream[:chunk_size] +# completion_obj["content"] = new_chunk +# self.completion_stream = self.completion_stream[chunk_size:] +# elif self.custom_llm_provider == "palm": +# # fake streaming +# response_obj = {} +# if len(self.completion_stream) == 0: +# if self.received_finish_reason is not None: +# raise StopIteration +# else: +# self.received_finish_reason = "stop" +# chunk_size = 30 +# new_chunk = self.completion_stream[:chunk_size] +# completion_obj["content"] = new_chunk +# self.completion_stream = self.completion_stream[chunk_size:] +# elif self.custom_llm_provider == "ollama": +# response_obj = self.handle_ollama_stream(chunk) +# completion_obj["content"] = response_obj["text"] +# print_verbose(f"completion obj content: {completion_obj['content']}") +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# elif self.custom_llm_provider == "ollama_chat": +# response_obj = self.handle_ollama_chat_stream(chunk) +# completion_obj["content"] = response_obj["text"] +# print_verbose(f"completion obj content: {completion_obj['content']}") +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# elif self.custom_llm_provider == "cloudflare": +# response_obj = self.handle_cloudlfare_stream(chunk) +# completion_obj["content"] = response_obj["text"] +# print_verbose(f"completion obj content: {completion_obj['content']}") +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# elif self.custom_llm_provider == "watsonx": +# response_obj = self.handle_watsonx_stream(chunk) +# completion_obj["content"] = response_obj["text"] +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# elif self.custom_llm_provider == "triton": +# response_obj = self.handle_triton_stream(chunk) +# completion_obj["content"] = response_obj["text"] +# print_verbose(f"completion obj content: {completion_obj['content']}") +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# elif self.custom_llm_provider == "text-completion-openai": +# response_obj = self.handle_openai_text_completion_chunk(chunk) +# completion_obj["content"] = response_obj["text"] +# print_verbose(f"completion obj content: {completion_obj['content']}") +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# if response_obj["usage"] is not None: +# model_response.usage = litellm.Usage( +# prompt_tokens=response_obj["usage"].prompt_tokens, +# completion_tokens=response_obj["usage"].completion_tokens, +# total_tokens=response_obj["usage"].total_tokens, +# ) +# elif self.custom_llm_provider == "text-completion-codestral": +# response_obj = litellm.MistralTextCompletionConfig()._chunk_parser( +# chunk +# ) +# completion_obj["content"] = response_obj["text"] +# print_verbose(f"completion obj content: {completion_obj['content']}") +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# if "usage" in response_obj is not None: +# model_response.usage = litellm.Usage( +# prompt_tokens=response_obj["usage"].prompt_tokens, +# completion_tokens=response_obj["usage"].completion_tokens, +# total_tokens=response_obj["usage"].total_tokens, +# ) +# elif self.custom_llm_provider == "azure_text": +# response_obj = self.handle_azure_text_completion_chunk(chunk) +# completion_obj["content"] = response_obj["text"] +# print_verbose(f"completion obj content: {completion_obj['content']}") +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# elif self.custom_llm_provider == "cached_response": +# response_obj = { +# "text": chunk.choices[0].delta.content, +# "is_finished": True, +# "finish_reason": chunk.choices[0].finish_reason, +# "original_chunk": chunk, +# "tool_calls": ( +# chunk.choices[0].delta.tool_calls +# if hasattr(chunk.choices[0].delta, "tool_calls") +# else None +# ), +# } + +# completion_obj["content"] = response_obj["text"] +# if response_obj["tool_calls"] is not None: +# completion_obj["tool_calls"] = response_obj["tool_calls"] +# print_verbose(f"completion obj content: {completion_obj['content']}") +# if hasattr(chunk, "id"): +# model_response.id = chunk.id +# self.response_id = chunk.id +# if hasattr(chunk, "system_fingerprint"): +# self.system_fingerprint = chunk.system_fingerprint +# if response_obj["is_finished"]: +# self.received_finish_reason = response_obj["finish_reason"] +# else: # openai / azure chat model +# if self.custom_llm_provider == "azure": +# if hasattr(chunk, "model"): +# # for azure, we need to pass the model from the orignal chunk +# self.model = chunk.model +# response_obj = self.handle_openai_chat_completion_chunk(chunk) +# if response_obj is None: +# return +# completion_obj["content"] = response_obj["text"] +# print_verbose(f"completion obj content: {completion_obj['content']}") +# if response_obj["is_finished"]: +# if response_obj["finish_reason"] == "error": +# raise Exception( +# "{} raised a streaming error - finish_reason: error, no content string given. Received Chunk={}".format( +# self.custom_llm_provider, response_obj +# ) +# ) +# self.received_finish_reason = response_obj["finish_reason"] +# if response_obj.get("original_chunk", None) is not None: +# if hasattr(response_obj["original_chunk"], "id"): +# model_response.id = response_obj["original_chunk"].id +# self.response_id = model_response.id +# if hasattr(response_obj["original_chunk"], "system_fingerprint"): +# model_response.system_fingerprint = response_obj[ +# "original_chunk" +# ].system_fingerprint +# self.system_fingerprint = response_obj[ +# "original_chunk" +# ].system_fingerprint +# if response_obj["logprobs"] is not None: +# model_response.choices[0].logprobs = response_obj["logprobs"] + +# if response_obj["usage"] is not None: +# if isinstance(response_obj["usage"], dict): +# model_response.usage = litellm.Usage( +# prompt_tokens=response_obj["usage"].get( +# "prompt_tokens", None +# ) +# or None, +# completion_tokens=response_obj["usage"].get( +# "completion_tokens", None +# ) +# or None, +# total_tokens=response_obj["usage"].get("total_tokens", None) +# or None, +# ) +# elif isinstance(response_obj["usage"], BaseModel): +# model_response.usage = litellm.Usage( +# **response_obj["usage"].model_dump() +# ) + +# model_response.model = self.model +# print_verbose( +# f"model_response finish reason 3: {self.received_finish_reason}; response_obj={response_obj}" +# ) +# ## FUNCTION CALL PARSING +# if ( +# response_obj is not None +# and response_obj.get("original_chunk", None) is not None +# ): # function / tool calling branch - only set for openai/azure compatible endpoints +# # enter this branch when no content has been passed in response +# original_chunk = response_obj.get("original_chunk", None) +# model_response.id = original_chunk.id +# self.response_id = original_chunk.id +# if original_chunk.choices and len(original_chunk.choices) > 0: +# delta = original_chunk.choices[0].delta +# if delta is not None and ( +# delta.function_call is not None or delta.tool_calls is not None +# ): +# try: +# model_response.system_fingerprint = ( +# original_chunk.system_fingerprint +# ) +# ## AZURE - check if arguments is not None +# if ( +# original_chunk.choices[0].delta.function_call +# is not None +# ): +# if ( +# getattr( +# original_chunk.choices[0].delta.function_call, +# "arguments", +# ) +# is None +# ): +# original_chunk.choices[ +# 0 +# ].delta.function_call.arguments = "" +# elif original_chunk.choices[0].delta.tool_calls is not None: +# if isinstance( +# original_chunk.choices[0].delta.tool_calls, list +# ): +# for t in original_chunk.choices[0].delta.tool_calls: +# if hasattr(t, "functions") and hasattr( +# t.functions, "arguments" +# ): +# if ( +# getattr( +# t.function, +# "arguments", +# ) +# is None +# ): +# t.function.arguments = "" +# _json_delta = delta.model_dump() +# print_verbose(f"_json_delta: {_json_delta}") +# if "role" not in _json_delta or _json_delta["role"] is None: +# _json_delta["role"] = ( +# "assistant" # mistral's api returns role as None +# ) +# if "tool_calls" in _json_delta and isinstance( +# _json_delta["tool_calls"], list +# ): +# for tool in _json_delta["tool_calls"]: +# if ( +# isinstance(tool, dict) +# and "function" in tool +# and isinstance(tool["function"], dict) +# and ("type" not in tool or tool["type"] is None) +# ): +# # if function returned but type set to None - mistral's api returns type: None +# tool["type"] = "function" +# model_response.choices[0].delta = Delta(**_json_delta) +# except Exception as e: +# verbose_logger.exception( +# "litellm.CustomStreamWrapper.chunk_creator(): Exception occured - {}".format( +# str(e) +# ) +# ) +# model_response.choices[0].delta = Delta() +# elif ( +# delta is not None and getattr(delta, "audio", None) is not None +# ): +# model_response.choices[0].delta.audio = delta.audio +# else: +# try: +# delta = ( +# dict() +# if original_chunk.choices[0].delta is None +# else dict(original_chunk.choices[0].delta) +# ) +# print_verbose(f"original delta: {delta}") +# model_response.choices[0].delta = Delta(**delta) +# print_verbose( +# f"new delta: {model_response.choices[0].delta}" +# ) +# except Exception: +# model_response.choices[0].delta = Delta() +# else: +# if ( +# self.stream_options is not None +# and self.stream_options["include_usage"] is True +# ): +# return model_response +# return +# print_verbose( +# f"model_response.choices[0].delta: {model_response.choices[0].delta}; completion_obj: {completion_obj}" +# ) +# print_verbose(f"self.sent_first_chunk: {self.sent_first_chunk}") + +# ## CHECK FOR TOOL USE +# if "tool_calls" in completion_obj and len(completion_obj["tool_calls"]) > 0: +# if self.is_function_call is True: # user passed in 'functions' param +# completion_obj["function_call"] = completion_obj["tool_calls"][0][ +# "function" +# ] +# completion_obj["tool_calls"] = None + +# self.tool_call = True + +# ## RETURN ARG +# return self.return_processed_chunk_logic( +# completion_obj=completion_obj, +# model_response=model_response, # type: ignore +# response_obj=response_obj, +# ) + +# except StopIteration: +# raise StopIteration +# except Exception as e: +# traceback.format_exc() +# e.message = str(e) +# raise exception_type( +# model=self.model, +# custom_llm_provider=self.custom_llm_provider, +# original_exception=e, +# ) + +# def set_logging_event_loop(self, loop): +# """ +# import litellm, asyncio + +# loop = asyncio.get_event_loop() # 👈 gets the current event loop + +# response = litellm.completion(.., stream=True) + +# response.set_logging_event_loop(loop=loop) # 👈 enables async_success callbacks for sync logging + +# for chunk in response: +# ... +# """ +# self.logging_loop = loop + +# def run_success_logging_and_cache_storage(self, processed_chunk, cache_hit: bool): +# """ +# Runs success logging in a thread and adds the response to the cache +# """ +# if litellm.disable_streaming_logging is True: +# """ +# [NOT RECOMMENDED] +# Set this via `litellm.disable_streaming_logging = True`. + +# Disables streaming logging. +# """ +# return +# ## ASYNC LOGGING +# # Create an event loop for the new thread +# if self.logging_loop is not None: +# future = asyncio.run_coroutine_threadsafe( +# self.logging_obj.async_success_handler( +# processed_chunk, None, None, cache_hit +# ), +# loop=self.logging_loop, +# ) +# future.result() +# else: +# asyncio.run( +# self.logging_obj.async_success_handler( +# processed_chunk, None, None, cache_hit +# ) +# ) +# ## SYNC LOGGING +# self.logging_obj.success_handler(processed_chunk, None, None, cache_hit) + +# ## Sync store in cache +# if self.logging_obj._llm_caching_handler is not None: +# self.logging_obj._llm_caching_handler._sync_add_streaming_response_to_cache( +# processed_chunk +# ) + +# def finish_reason_handler(self): +# model_response = self.model_response_creator() +# complete_streaming_response = litellm.stream_chunk_builder( +# chunks=self.chunks +# ) +# _finish_reason = complete_streaming_response.choices[0].finish_reason + +# print(f"_finish_reason: {_finish_reason}") +# if _finish_reason is not None: +# model_response.choices[0].finish_reason = _finish_reason +# else: +# model_response.choices[0].finish_reason = "stop" + +# ## if tool use +# if ( +# model_response.choices[0].finish_reason == "stop" and self.tool_call +# ): # don't overwrite for other - potential error finish reasons +# model_response.choices[0].finish_reason = "tool_calls" +# return model_response + +# def __next__(self): # noqa: PLR0915 +# cache_hit = False +# if ( +# self.custom_llm_provider is not None +# and self.custom_llm_provider == "cached_response" +# ): +# cache_hit = True +# try: +# if self.completion_stream is None: +# self.fetch_sync_stream() +# while True: +# if ( +# isinstance(self.completion_stream, str) +# or isinstance(self.completion_stream, bytes) +# or isinstance(self.completion_stream, ModelResponse) +# ): +# chunk = self.completion_stream +# else: +# chunk = next(self.completion_stream) +# if chunk is not None and chunk != b"": +# print_verbose( +# f"PROCESSED CHUNK PRE CHUNK CREATOR: {chunk}; custom_llm_provider: {self.custom_llm_provider}" +# ) +# response: Optional[ModelResponse] = self.chunk_creator(chunk=chunk) +# print_verbose(f"PROCESSED CHUNK POST CHUNK CREATOR: {response}") + +# if response is None: +# continue +# ## LOGGING +# threading.Thread( +# target=self.run_success_logging_and_cache_storage, +# args=(response, cache_hit), +# ).start() # log response +# choice = response.choices[0] +# if isinstance(choice, StreamingChoices): +# self.response_uptil_now += choice.delta.get("content", "") or "" +# else: +# self.response_uptil_now += "" +# self.rules.post_call_rules( +# input=self.response_uptil_now, model=self.model +# ) +# # HANDLE STREAM OPTIONS +# self.chunks.append(response) +# if hasattr( +# response, "usage" +# ): # remove usage from chunk, only send on final chunk +# # Convert the object to a dictionary +# obj_dict = response.dict() + +# # Remove an attribute (e.g., 'attr2') +# if "usage" in obj_dict: +# del obj_dict["usage"] + +# # Create a new object without the removed attribute +# response = self.model_response_creator( +# chunk=obj_dict, hidden_params=response._hidden_params +# ) +# # add usage as hidden param +# if self.sent_last_chunk is True and self.stream_options is None: +# usage = calculate_total_usage(chunks=self.chunks) +# response._hidden_params["usage"] = usage +# # RETURN RESULT +# return response + +# except StopIteration: +# if self.sent_last_chunk is True: +# complete_streaming_response = litellm.stream_chunk_builder( +# chunks=self.chunks, messages=self.messages +# ) +# response = self.model_response_creator() +# if complete_streaming_response is not None: +# setattr( +# response, +# "usage", +# getattr(complete_streaming_response, "usage"), +# ) + +# ## LOGGING +# threading.Thread( +# target=self.logging_obj.success_handler, +# args=(response, None, None, cache_hit), +# ).start() # log response + +# if self.sent_stream_usage is False and self.send_stream_usage is True: +# self.sent_stream_usage = True +# return response +# raise # Re-raise StopIteration +# else: +# self.sent_last_chunk = True +# processed_chunk = self.finish_reason_handler() +# if self.stream_options is None: # add usage as hidden param +# usage = calculate_total_usage(chunks=self.chunks) +# processed_chunk._hidden_params["usage"] = usage +# ## LOGGING +# threading.Thread( +# target=self.run_success_logging_and_cache_storage, +# args=(processed_chunk, cache_hit), +# ).start() # log response +# return processed_chunk +# except Exception as e: +# traceback_exception = traceback.format_exc() +# # LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated +# threading.Thread( +# target=self.logging_obj.failure_handler, args=(e, traceback_exception) +# ).start() +# if isinstance(e, OpenAIError): +# raise e +# else: +# raise exception_type( +# model=self.model, +# original_exception=e, +# custom_llm_provider=self.custom_llm_provider, +# ) + +# def fetch_sync_stream(self): +# if self.completion_stream is None and self.make_call is not None: +# # Call make_call to get the completion stream +# self.completion_stream = self.make_call(client=litellm.module_level_client) +# self._stream_iter = self.completion_stream.__iter__() + +# return self.completion_stream + +# async def fetch_stream(self): +# if self.completion_stream is None and self.make_call is not None: +# # Call make_call to get the completion stream +# self.completion_stream = await self.make_call( +# client=litellm.module_level_aclient +# ) +# self._stream_iter = self.completion_stream.__aiter__() + +# return self.completion_stream + +# async def __anext__(self): # noqa: PLR0915 +# cache_hit = False +# if ( +# self.custom_llm_provider is not None +# and self.custom_llm_provider == "cached_response" +# ): +# cache_hit = True +# try: +# if self.completion_stream is None: +# await self.fetch_stream() + +# if ( +# self.custom_llm_provider == "openai" +# or self.custom_llm_provider == "azure" +# or self.custom_llm_provider == "custom_openai" +# or self.custom_llm_provider == "text-completion-openai" +# or self.custom_llm_provider == "text-completion-codestral" +# or self.custom_llm_provider == "azure_text" +# or self.custom_llm_provider == "anthropic" +# or self.custom_llm_provider == "anthropic_text" +# or self.custom_llm_provider == "huggingface" +# or self.custom_llm_provider == "ollama" +# or self.custom_llm_provider == "ollama_chat" +# or self.custom_llm_provider == "vertex_ai" +# or self.custom_llm_provider == "vertex_ai_beta" +# or self.custom_llm_provider == "sagemaker" +# or self.custom_llm_provider == "sagemaker_chat" +# or self.custom_llm_provider == "gemini" +# or self.custom_llm_provider == "replicate" +# or self.custom_llm_provider == "cached_response" +# or self.custom_llm_provider == "predibase" +# or self.custom_llm_provider == "databricks" +# or self.custom_llm_provider == "bedrock" +# or self.custom_llm_provider == "triton" +# or self.custom_llm_provider == "watsonx" +# or self.custom_llm_provider in litellm.openai_compatible_endpoints +# or self.custom_llm_provider in litellm._custom_providers +# ): +# async for chunk in self.completion_stream: +# if chunk == "None" or chunk is None: +# raise Exception +# elif ( +# self.custom_llm_provider == "gemini" +# and hasattr(chunk, "parts") +# and len(chunk.parts) == 0 +# ): +# continue +# # chunk_creator() does logging/stream chunk building. We need to let it know its being called in_async_func, so we don't double add chunks. +# # __anext__ also calls async_success_handler, which does logging +# print_verbose(f"PROCESSED ASYNC CHUNK PRE CHUNK CREATOR: {chunk}") + +# processed_chunk: Optional[ModelResponse] = self.chunk_creator( +# chunk=chunk +# ) +# print_verbose( +# f"PROCESSED ASYNC CHUNK POST CHUNK CREATOR: {processed_chunk}" +# ) +# if processed_chunk is None: +# continue +# ## LOGGING +# ## LOGGING +# executor.submit( +# self.logging_obj.success_handler, +# result=processed_chunk, +# start_time=None, +# end_time=None, +# cache_hit=cache_hit, +# ) + +# asyncio.create_task( +# self.logging_obj.async_success_handler( +# processed_chunk, cache_hit=cache_hit +# ) +# ) + +# if self.logging_obj._llm_caching_handler is not None: +# asyncio.create_task( +# self.logging_obj._llm_caching_handler._add_streaming_response_to_cache( +# processed_chunk=processed_chunk, +# ) +# ) + +# choice = processed_chunk.choices[0] +# if isinstance(choice, StreamingChoices): +# self.response_uptil_now += choice.delta.get("content", "") or "" +# else: +# self.response_uptil_now += "" +# self.rules.post_call_rules( +# input=self.response_uptil_now, model=self.model +# ) +# self.chunks.append(processed_chunk) +# if hasattr( +# processed_chunk, "usage" +# ): # remove usage from chunk, only send on final chunk +# # Convert the object to a dictionary +# obj_dict = processed_chunk.dict() + +# # Remove an attribute (e.g., 'attr2') +# if "usage" in obj_dict: +# del obj_dict["usage"] + +# # Create a new object without the removed attribute +# processed_chunk = self.model_response_creator(chunk=obj_dict) +# print_verbose(f"final returned processed chunk: {processed_chunk}") +# return processed_chunk +# raise StopAsyncIteration +# else: # temporary patch for non-aiohttp async calls +# # example - boto3 bedrock llms +# while True: +# if isinstance(self.completion_stream, str) or isinstance( +# self.completion_stream, bytes +# ): +# chunk = self.completion_stream +# else: +# chunk = next(self.completion_stream) +# if chunk is not None and chunk != b"": +# print_verbose(f"PROCESSED CHUNK PRE CHUNK CREATOR: {chunk}") +# processed_chunk: Optional[ModelResponse] = self.chunk_creator( +# chunk=chunk +# ) +# print_verbose( +# f"PROCESSED CHUNK POST CHUNK CREATOR: {processed_chunk}" +# ) +# if processed_chunk is None: +# continue +# ## LOGGING +# threading.Thread( +# target=self.logging_obj.success_handler, +# args=(processed_chunk, None, None, cache_hit), +# ).start() # log processed_chunk +# asyncio.create_task( +# self.logging_obj.async_success_handler( +# processed_chunk, cache_hit=cache_hit +# ) +# ) + +# choice = processed_chunk.choices[0] +# if isinstance(choice, StreamingChoices): +# self.response_uptil_now += ( +# choice.delta.get("content", "") or "" +# ) +# else: +# self.response_uptil_now += "" +# self.rules.post_call_rules( +# input=self.response_uptil_now, model=self.model +# ) +# # RETURN RESULT +# self.chunks.append(processed_chunk) +# return processed_chunk +# except (StopAsyncIteration, StopIteration): +# if self.sent_last_chunk is True: +# # log the final chunk with accurate streaming values +# complete_streaming_response = litellm.stream_chunk_builder( +# chunks=self.chunks, messages=self.messages +# ) +# response = self.model_response_creator() +# if complete_streaming_response is not None: +# setattr( +# response, +# "usage", +# getattr(complete_streaming_response, "usage"), +# ) +# ## LOGGING +# threading.Thread( +# target=self.logging_obj.success_handler, +# args=(response, None, None, cache_hit), +# ).start() # log response +# asyncio.create_task( +# self.logging_obj.async_success_handler( +# response, cache_hit=cache_hit +# ) +# ) +# if self.sent_stream_usage is False and self.send_stream_usage is True: +# self.sent_stream_usage = True +# return response +# raise StopAsyncIteration # Re-raise StopIteration +# else: +# self.sent_last_chunk = True +# processed_chunk = self.finish_reason_handler() +# ## LOGGING +# threading.Thread( +# target=self.logging_obj.success_handler, +# args=(processed_chunk, None, None, cache_hit), +# ).start() # log response +# asyncio.create_task( +# self.logging_obj.async_success_handler( +# processed_chunk, cache_hit=cache_hit +# ) +# ) +# return processed_chunk +# except httpx.TimeoutException as e: # if httpx read timeout error occues +# traceback_exception = traceback.format_exc() +# ## ADD DEBUG INFORMATION - E.G. LITELLM REQUEST TIMEOUT +# traceback_exception += "\nLiteLLM Default Request Timeout - {}".format( +# litellm.request_timeout +# ) +# if self.logging_obj is not None: +# ## LOGGING +# threading.Thread( +# target=self.logging_obj.failure_handler, +# args=(e, traceback_exception), +# ).start() # log response +# # Handle any exceptions that might occur during streaming +# asyncio.create_task( +# self.logging_obj.async_failure_handler(e, traceback_exception) +# ) +# raise e +# except Exception as e: +# traceback_exception = traceback.format_exc() +# if self.logging_obj is not None: +# ## LOGGING +# threading.Thread( +# target=self.logging_obj.failure_handler, +# args=(e, traceback_exception), +# ).start() # log response +# # Handle any exceptions that might occur during streaming +# asyncio.create_task( +# self.logging_obj.async_failure_handler(e, traceback_exception) # type: ignore +# ) +# ## Map to OpenAI Exception +# raise exception_type( +# model=self.model, +# custom_llm_provider=self.custom_llm_provider, +# original_exception=e, +# completion_kwargs={}, +# extra_kwargs={}, +# ) class TextCompletionStreamWrapper: @@ -8267,29 +8189,6 @@ def has_tool_call_blocks(messages: List[AllMessageValues]) -> bool: return False -def process_response_headers(response_headers: Union[httpx.Headers, dict]) -> dict: - openai_headers = {} - processed_headers = {} - additional_headers = {} - - for k, v in response_headers.items(): - if k in OPENAI_RESPONSE_HEADERS: # return openai-compatible headers - openai_headers[k] = v - if k.startswith( - "llm_provider-" - ): # return raw provider headers (incl. openai-compatible ones) - processed_headers[k] = v - else: - additional_headers["{}-{}".format("llm_provider", k)] = v - - additional_headers = { - **openai_headers, - **processed_headers, - **additional_headers, - } - return additional_headers - - def add_dummy_tool(custom_llm_provider: str) -> List[ChatCompletionToolParam]: """ Prevent Anthropic from raising error when tool_use block exists but no tools are provided. diff --git a/tests/local_testing/test_streaming.py b/tests/local_testing/test_streaming.py index 827a2495b..fcdc6b60d 100644 --- a/tests/local_testing/test_streaming.py +++ b/tests/local_testing/test_streaming.py @@ -3470,6 +3470,86 @@ def test_unit_test_custom_stream_wrapper_repeating_chunk( continue +def test_unit_test_gemini_streaming_content_filter(): + chunks = [ + { + "text": "##", + "tool_use": None, + "is_finished": False, + "finish_reason": "stop", + "usage": {"prompt_tokens": 37, "completion_tokens": 1, "total_tokens": 38}, + "index": 0, + }, + { + "text": "", + "is_finished": False, + "finish_reason": "", + "usage": None, + "index": 0, + "tool_use": None, + }, + { + "text": " Downsides of Prompt Hacking in a Customer Portal\n\nWhile prompt engineering can be incredibly", + "tool_use": None, + "is_finished": False, + "finish_reason": "stop", + "usage": {"prompt_tokens": 37, "completion_tokens": 17, "total_tokens": 54}, + "index": 0, + }, + { + "text": "", + "is_finished": False, + "finish_reason": "", + "usage": None, + "index": 0, + "tool_use": None, + }, + { + "text": "", + "tool_use": None, + "is_finished": False, + "finish_reason": "content_filter", + "usage": {"prompt_tokens": 37, "completion_tokens": 17, "total_tokens": 54}, + "index": 0, + }, + { + "text": "", + "is_finished": False, + "finish_reason": "", + "usage": None, + "index": 0, + "tool_use": None, + }, + ] + + completion_stream = ModelResponseListIterator(model_responses=chunks) + + response = litellm.CustomStreamWrapper( + completion_stream=completion_stream, + model="gemini/gemini-1.5-pro", + custom_llm_provider="gemini", + logging_obj=litellm.Logging( + model="gemini/gemini-1.5-pro", + messages=[{"role": "user", "content": "Hey"}], + stream=True, + call_type="completion", + start_time=time.time(), + litellm_call_id="12345", + function_id="1245", + ), + ) + + stream_finish_reason: Optional[str] = None + idx = 0 + for chunk in response: + print(f"chunk: {chunk}") + if chunk.choices[0].finish_reason is not None: + stream_finish_reason = chunk.choices[0].finish_reason + idx += 1 + print(f"num chunks: {idx}") + assert stream_finish_reason == "content_filter" + + def test_unit_test_custom_stream_wrapper_openai(): """ Test if last streaming chunk ends with '?', if the message repeats itself. From 73531f4815861e0aaa427de891e78c4896a356f2 Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Fri, 8 Nov 2024 22:07:17 +0530 Subject: [PATCH 44/67] Litellm dev 11 08 2024 (#6658) * fix(deepseek/chat): convert content list to str Fixes https://github.com/BerriAI/litellm/issues/6642 * test(test_deepseek_completion.py): implement base llm unit tests increase robustness across providers * fix(router.py): support content policy violation fallbacks with default fallbacks * fix(opentelemetry.py): refactor to move otel imports behing flag Fixes https://github.com/BerriAI/litellm/issues/6636 * fix(opentelemtry.py): close span on success completion * fix(user_api_key_auth.py): allow user_role to default to none * fix: mark flaky test * fix(opentelemetry.py): move otelconfig.from_env to inside the init prevent otel errors raised just by importing the litellm class * fix(user_api_key_auth.py): fix auth error --- litellm/__init__.py | 1 + litellm/integrations/opentelemetry.py | 19 +++++--- .../llms/OpenAI/chat/gpt_transformation.py | 7 ++- litellm/llms/OpenAI/chat/o1_transformation.py | 4 +- litellm/llms/OpenAI/openai.py | 12 ++--- litellm/llms/deepseek/chat/transformation.py | 41 +++++++++++++++++ litellm/llms/prompt_templates/common_utils.py | 13 ++++++ litellm/proxy/_new_secret_config.yaml | 8 ++-- litellm/proxy/auth/user_api_key_auth.py | 17 +++++-- litellm/router.py | 12 +++++ litellm/utils.py | 19 ++++++++ tests/llm_translation/base_llm_unit_tests.py | 46 +++++++++++++++++++ .../test_deepseek_completion.py | 9 ++++ tests/local_testing/test_completion.py | 1 + .../test_opentelemetry_unit_tests.py | 41 +++++++++++++++++ tests/local_testing/test_python_38.py | 13 ++++++ tests/local_testing/test_router_fallbacks.py | 22 +++++++-- .../test_router_helper_utils.py | 34 ++++++++++++-- tests/test_team.py | 2 +- 19 files changed, 287 insertions(+), 34 deletions(-) create mode 100644 litellm/llms/deepseek/chat/transformation.py create mode 100644 tests/llm_translation/base_llm_unit_tests.py create mode 100644 tests/llm_translation/test_deepseek_completion.py create mode 100644 tests/local_testing/test_opentelemetry_unit_tests.py diff --git a/litellm/__init__.py b/litellm/__init__.py index f388bf17a..1951dd12f 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -1045,6 +1045,7 @@ from .llms.AzureOpenAI.azure import ( from .llms.AzureOpenAI.chat.gpt_transformation import AzureOpenAIConfig from .llms.hosted_vllm.chat.transformation import HostedVLLMChatConfig +from .llms.deepseek.chat.transformation import DeepSeekChatConfig from .llms.lm_studio.chat.transformation import LMStudioChatConfig from .llms.perplexity.chat.transformation import PerplexityChatConfig from .llms.AzureOpenAI.chat.o1_transformation import AzureOpenAIO1Config diff --git a/litellm/integrations/opentelemetry.py b/litellm/integrations/opentelemetry.py index a1d4b781a..8102f2c60 100644 --- a/litellm/integrations/opentelemetry.py +++ b/litellm/integrations/opentelemetry.py @@ -16,6 +16,7 @@ from litellm.types.utils import ( ) if TYPE_CHECKING: + from opentelemetry.sdk.trace.export import SpanExporter as _SpanExporter from opentelemetry.trace import Span as _Span from litellm.proxy._types import ( @@ -24,10 +25,12 @@ if TYPE_CHECKING: from litellm.proxy.proxy_server import UserAPIKeyAuth as _UserAPIKeyAuth Span = _Span + SpanExporter = _SpanExporter UserAPIKeyAuth = _UserAPIKeyAuth ManagementEndpointLoggingPayload = _ManagementEndpointLoggingPayload else: Span = Any + SpanExporter = Any UserAPIKeyAuth = Any ManagementEndpointLoggingPayload = Any @@ -44,7 +47,6 @@ LITELLM_REQUEST_SPAN_NAME = "litellm_request" @dataclass class OpenTelemetryConfig: - from opentelemetry.sdk.trace.export import SpanExporter exporter: Union[str, SpanExporter] = "console" endpoint: Optional[str] = None @@ -77,7 +79,7 @@ class OpenTelemetryConfig: class OpenTelemetry(CustomLogger): def __init__( self, - config: OpenTelemetryConfig = OpenTelemetryConfig.from_env(), + config: Optional[OpenTelemetryConfig] = None, callback_name: Optional[str] = None, **kwargs, ): @@ -85,6 +87,9 @@ class OpenTelemetry(CustomLogger): from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import TracerProvider + if config is None: + config = OpenTelemetryConfig.from_env() + self.config = config self.OTEL_EXPORTER = self.config.exporter self.OTEL_ENDPOINT = self.config.endpoint @@ -319,8 +324,8 @@ class OpenTelemetry(CustomLogger): span.end(end_time=self._to_ns(end_time)) - # if parent_otel_span is not None: - # parent_otel_span.end(end_time=self._to_ns(datetime.now())) + if parent_otel_span is not None: + parent_otel_span.end(end_time=self._to_ns(datetime.now())) def _handle_failure(self, kwargs, response_obj, start_time, end_time): from opentelemetry.trace import Status, StatusCode @@ -700,10 +705,10 @@ class OpenTelemetry(CustomLogger): TraceContextTextMapPropagator, ) - verbose_logger.debug("OpenTelemetry: GOT A TRACEPARENT {}".format(_traceparent)) propagator = TraceContextTextMapPropagator() - _parent_context = propagator.extract(carrier={"traceparent": _traceparent}) - verbose_logger.debug("OpenTelemetry: PARENT CONTEXT {}".format(_parent_context)) + carrier = {"traceparent": _traceparent} + _parent_context = propagator.extract(carrier=carrier) + return _parent_context def _get_span_context(self, kwargs): diff --git a/litellm/llms/OpenAI/chat/gpt_transformation.py b/litellm/llms/OpenAI/chat/gpt_transformation.py index 14ebb4a53..c0c7e14dd 100644 --- a/litellm/llms/OpenAI/chat/gpt_transformation.py +++ b/litellm/llms/OpenAI/chat/gpt_transformation.py @@ -3,7 +3,7 @@ Support for gpt model family """ import types -from typing import Optional, Union +from typing import List, Optional, Union import litellm from litellm.types.llms.openai import AllMessageValues, ChatCompletionUserMessage @@ -163,3 +163,8 @@ class OpenAIGPTConfig: model=model, drop_params=drop_params, ) + + def _transform_messages( + self, messages: List[AllMessageValues] + ) -> List[AllMessageValues]: + return messages diff --git a/litellm/llms/OpenAI/chat/o1_transformation.py b/litellm/llms/OpenAI/chat/o1_transformation.py index d9def117f..2dd70afbb 100644 --- a/litellm/llms/OpenAI/chat/o1_transformation.py +++ b/litellm/llms/OpenAI/chat/o1_transformation.py @@ -108,7 +108,9 @@ class OpenAIO1Config(OpenAIGPTConfig): return True return False - def o1_prompt_factory(self, messages: List[AllMessageValues]): + def _transform_messages( + self, messages: List[AllMessageValues] + ) -> List[AllMessageValues]: """ Handles limitations of O-1 model family. - modalities: image => drop param (if user opts in to dropping param) diff --git a/litellm/llms/OpenAI/openai.py b/litellm/llms/OpenAI/openai.py index 008296fe7..7d701d26c 100644 --- a/litellm/llms/OpenAI/openai.py +++ b/litellm/llms/OpenAI/openai.py @@ -15,6 +15,7 @@ from pydantic import BaseModel from typing_extensions import overload, override import litellm +from litellm import LlmProviders from litellm._logging import verbose_logger from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj from litellm.secret_managers.main import get_secret_str @@ -24,6 +25,7 @@ from litellm.utils import ( CustomStreamWrapper, Message, ModelResponse, + ProviderConfigManager, TextCompletionResponse, Usage, convert_to_model_response_object, @@ -701,13 +703,11 @@ class OpenAIChatCompletion(BaseLLM): messages=messages, custom_llm_provider=custom_llm_provider, ) - if ( - litellm.openAIO1Config.is_model_o1_reasoning_model(model=model) - and messages is not None - ): - messages = litellm.openAIO1Config.o1_prompt_factory( - messages=messages, + if messages is not None and custom_llm_provider is not None: + provider_config = ProviderConfigManager.get_provider_config( + model=model, provider=LlmProviders(custom_llm_provider) ) + messages = provider_config._transform_messages(messages) for _ in range( 2 diff --git a/litellm/llms/deepseek/chat/transformation.py b/litellm/llms/deepseek/chat/transformation.py new file mode 100644 index 000000000..5785bdd50 --- /dev/null +++ b/litellm/llms/deepseek/chat/transformation.py @@ -0,0 +1,41 @@ +""" +Translates from OpenAI's `/v1/chat/completions` to DeepSeek's `/v1/chat/completions` +""" + +import types +from typing import List, Optional, Tuple, Union + +from pydantic import BaseModel + +import litellm +from litellm.secret_managers.main import get_secret_str +from litellm.types.llms.openai import AllMessageValues, ChatCompletionAssistantMessage + +from ....utils import _remove_additional_properties, _remove_strict_from_schema +from ...OpenAI.chat.gpt_transformation import OpenAIGPTConfig +from ...prompt_templates.common_utils import ( + handle_messages_with_content_list_to_str_conversion, +) + + +class DeepSeekChatConfig(OpenAIGPTConfig): + + def _transform_messages( + self, messages: List[AllMessageValues] + ) -> List[AllMessageValues]: + """ + DeepSeek does not support content in list format. + """ + messages = handle_messages_with_content_list_to_str_conversion(messages) + return super()._transform_messages(messages) + + def _get_openai_compatible_provider_info( + self, api_base: Optional[str], api_key: Optional[str] + ) -> Tuple[Optional[str], Optional[str]]: + api_base = ( + api_base + or get_secret_str("DEEPSEEK_API_BASE") + or "https://api.deepseek.com/beta" + ) # type: ignore + dynamic_api_key = api_key or get_secret_str("DEEPSEEK_API_KEY") + return api_base, dynamic_api_key diff --git a/litellm/llms/prompt_templates/common_utils.py b/litellm/llms/prompt_templates/common_utils.py index 6b4971269..a91ec2170 100644 --- a/litellm/llms/prompt_templates/common_utils.py +++ b/litellm/llms/prompt_templates/common_utils.py @@ -24,6 +24,19 @@ DEFAULT_ASSISTANT_CONTINUE_MESSAGE = ChatCompletionAssistantMessage( ) +def handle_messages_with_content_list_to_str_conversion( + messages: List[AllMessageValues], +) -> List[AllMessageValues]: + """ + Handles messages with content list conversion + """ + for message in messages: + texts = convert_content_list_to_str(message=message) + if texts: + message["content"] = texts + return messages + + def convert_content_list_to_str(message: AllMessageValues) -> str: """ - handles scenario where content is list and not string diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index d0bd5f674..c44a46a67 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -59,10 +59,10 @@ model_list: timeout: 300 stream_timeout: 60 -# litellm_settings: -# fallbacks: [{ "claude-3-5-sonnet-20240620": ["claude-3-5-sonnet-aihubmix"] }] -# callbacks: ["otel", "prometheus"] -# default_redis_batch_cache_expiry: 10 +litellm_settings: + fallbacks: [{ "claude-3-5-sonnet-20240620": ["claude-3-5-sonnet-aihubmix"] }] + callbacks: ["otel", "prometheus"] + default_redis_batch_cache_expiry: 10 # litellm_settings: # cache: True diff --git a/litellm/proxy/auth/user_api_key_auth.py b/litellm/proxy/auth/user_api_key_auth.py index f11bfcbc9..ff1acc3c9 100644 --- a/litellm/proxy/auth/user_api_key_auth.py +++ b/litellm/proxy/auth/user_api_key_auth.py @@ -703,12 +703,17 @@ async def user_api_key_auth( # noqa: PLR0915 ) if is_master_key_valid: - _user_api_key_obj = UserAPIKeyAuth( - api_key=master_key, + _user_api_key_obj = _return_user_api_key_auth_obj( + user_obj=None, user_role=LitellmUserRoles.PROXY_ADMIN, - user_id=litellm_proxy_admin_name, + api_key=master_key, parent_otel_span=parent_otel_span, - **end_user_params, + valid_token_dict={ + **end_user_params, + "user_id": litellm_proxy_admin_name, + }, + route=route, + start_time=start_time, ) await _cache_key_object( hashed_token=hash_token(master_key), @@ -1229,7 +1234,9 @@ def _return_user_api_key_auth_obj( valid_token_dict: dict, route: str, start_time: datetime, + user_role: Optional[LitellmUserRoles] = None, ) -> UserAPIKeyAuth: + traceback.print_stack() end_time = datetime.now() user_api_key_service_logger_obj.service_success_hook( service=ServiceTypes.AUTH, @@ -1240,7 +1247,7 @@ def _return_user_api_key_auth_obj( parent_otel_span=parent_otel_span, ) retrieved_user_role = ( - _get_user_role(user_obj=user_obj) or LitellmUserRoles.INTERNAL_USER + user_role or _get_user_role(user_obj=user_obj) or LitellmUserRoles.INTERNAL_USER ) user_api_key_kwargs = { diff --git a/litellm/router.py b/litellm/router.py index 759f94f74..0bdd1d1e0 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -3558,6 +3558,15 @@ class Router: # Catch all - if any exceptions default to cooling down return True + def _has_default_fallbacks(self) -> bool: + if self.fallbacks is None: + return False + for fallback in self.fallbacks: + if isinstance(fallback, dict): + if "*" in fallback: + return True + return False + def _should_raise_content_policy_error( self, model: str, response: ModelResponse, kwargs: dict ) -> bool: @@ -3574,6 +3583,7 @@ class Router: content_policy_fallbacks = kwargs.get( "content_policy_fallbacks", self.content_policy_fallbacks ) + ### ONLY RAISE ERROR IF CP FALLBACK AVAILABLE ### if content_policy_fallbacks is not None: fallback_model_group = None @@ -3584,6 +3594,8 @@ class Router: if fallback_model_group is not None: return True + elif self._has_default_fallbacks(): # default fallbacks set + return True verbose_router_logger.info( "Content Policy Error occurred. No available fallbacks. Returning original response. model={}, content_policy_fallbacks={}".format( diff --git a/litellm/utils.py b/litellm/utils.py index f2360884c..e4e84398f 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -8252,3 +8252,22 @@ def validate_chat_completion_user_messages(messages: List[AllMessageValues]): ) return messages + + +from litellm.llms.OpenAI.chat.gpt_transformation import OpenAIGPTConfig + + +class ProviderConfigManager: + @staticmethod + def get_provider_config( + model: str, provider: litellm.LlmProviders + ) -> OpenAIGPTConfig: + """ + Returns the provider config for a given provider. + """ + if litellm.openAIO1Config.is_model_o1_reasoning_model(model=model): + return litellm.OpenAIO1Config() + elif litellm.LlmProviders.DEEPSEEK == provider: + return litellm.DeepSeekChatConfig() + + return OpenAIGPTConfig() diff --git a/tests/llm_translation/base_llm_unit_tests.py b/tests/llm_translation/base_llm_unit_tests.py new file mode 100644 index 000000000..4f9cd9c25 --- /dev/null +++ b/tests/llm_translation/base_llm_unit_tests.py @@ -0,0 +1,46 @@ +import asyncio +import httpx +import json +import pytest +import sys +from typing import Any, Dict, List +from unittest.mock import MagicMock, Mock, patch +import os + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import litellm +from litellm.exceptions import BadRequestError +from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler +from litellm.utils import CustomStreamWrapper + + +# test_example.py +from abc import ABC, abstractmethod + + +class BaseLLMChatTest(ABC): + """ + Abstract base test class that enforces a common test across all test classes. + """ + + @abstractmethod + def get_base_completion_call_args(self) -> dict: + """Must return the base completion call args""" + pass + + def test_content_list_handling(self): + """Check if content list is supported by LLM API""" + base_completion_call_args = self.get_base_completion_call_args() + messages = [ + { + "role": "user", + "content": [{"type": "text", "text": "Hello, how are you?"}], + } + ] + response = litellm.completion( + **base_completion_call_args, + messages=messages, + ) + assert response is not None diff --git a/tests/llm_translation/test_deepseek_completion.py b/tests/llm_translation/test_deepseek_completion.py new file mode 100644 index 000000000..b0f7ee663 --- /dev/null +++ b/tests/llm_translation/test_deepseek_completion.py @@ -0,0 +1,9 @@ +from base_llm_unit_tests import BaseLLMChatTest + + +# Test implementation +class TestDeepSeekChatCompletion(BaseLLMChatTest): + def get_base_completion_call_args(self) -> dict: + return { + "model": "deepseek/deepseek-chat", + } diff --git a/tests/local_testing/test_completion.py b/tests/local_testing/test_completion.py index 77cea6f0c..7814d13c6 100644 --- a/tests/local_testing/test_completion.py +++ b/tests/local_testing/test_completion.py @@ -4526,6 +4526,7 @@ async def test_completion_ai21_chat(): "stream", [False, True], ) +@pytest.mark.flaky(retries=3, delay=1) def test_completion_response_ratelimit_headers(model, stream): response = completion( model=model, diff --git a/tests/local_testing/test_opentelemetry_unit_tests.py b/tests/local_testing/test_opentelemetry_unit_tests.py new file mode 100644 index 000000000..530adc6ab --- /dev/null +++ b/tests/local_testing/test_opentelemetry_unit_tests.py @@ -0,0 +1,41 @@ +# What is this? +## Unit tests for opentelemetry integration + +# What is this? +## Unit test for presidio pii masking +import sys, os, asyncio, time, random +from datetime import datetime +import traceback +from dotenv import load_dotenv + +load_dotenv() +import os +import asyncio + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import pytest +import litellm +from unittest.mock import patch, MagicMock, AsyncMock + + +@pytest.mark.asyncio +async def test_opentelemetry_integration(): + """ + Unit test to confirm the parent otel span is ended + """ + + parent_otel_span = MagicMock() + litellm.callbacks = ["otel"] + + await litellm.acompletion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Hello, world!"}], + mock_response="Hey!", + metadata={"litellm_parent_otel_span": parent_otel_span}, + ) + + await asyncio.sleep(1) + + parent_otel_span.end.assert_called_once() diff --git a/tests/local_testing/test_python_38.py b/tests/local_testing/test_python_38.py index 0cd703557..5fa48f096 100644 --- a/tests/local_testing/test_python_38.py +++ b/tests/local_testing/test_python_38.py @@ -72,6 +72,19 @@ def test_litellm_proxy_server_config_no_general_settings(): # Check if the response is successful assert response.status_code == 200 assert response.json() == "I'm alive!" + + # Test /chat/completions + response = requests.post( + "http://localhost:4000/chat/completions", + headers={"Authorization": "Bearer 1234567890"}, + json={ + "model": "test_openai_models", + "messages": [{"role": "user", "content": "Hello, how are you?"}], + }, + ) + + assert response.status_code == 200 + except ImportError: pytest.fail("Failed to import litellm.proxy_server") except requests.ConnectionError: diff --git a/tests/local_testing/test_router_fallbacks.py b/tests/local_testing/test_router_fallbacks.py index a5149b9fa..cad640a54 100644 --- a/tests/local_testing/test_router_fallbacks.py +++ b/tests/local_testing/test_router_fallbacks.py @@ -1120,9 +1120,10 @@ async def test_client_side_fallbacks_list(sync_mode): @pytest.mark.parametrize("sync_mode", [True, False]) @pytest.mark.parametrize("content_filter_response_exception", [True, False]) +@pytest.mark.parametrize("fallback_type", ["model-specific", "default"]) @pytest.mark.asyncio async def test_router_content_policy_fallbacks( - sync_mode, content_filter_response_exception + sync_mode, content_filter_response_exception, fallback_type ): os.environ["LITELLM_LOG"] = "DEBUG" @@ -1152,6 +1153,14 @@ async def test_router_content_policy_fallbacks( "mock_response": "This works!", }, }, + { + "model_name": "my-default-fallback-model", + "litellm_params": { + "model": "openai/my-fake-model", + "api_key": "", + "mock_response": "This works 2!", + }, + }, { "model_name": "my-general-model", "litellm_params": { @@ -1169,9 +1178,14 @@ async def test_router_content_policy_fallbacks( }, }, ], - content_policy_fallbacks=[{"claude-2": ["my-fallback-model"]}], - fallbacks=[{"claude-2": ["my-general-model"]}], - context_window_fallbacks=[{"claude-2": ["my-context-window-model"]}], + content_policy_fallbacks=( + [{"claude-2": ["my-fallback-model"]}] + if fallback_type == "model-specific" + else None + ), + default_fallbacks=( + ["my-default-fallback-model"] if fallback_type == "default" else None + ), ) if sync_mode is True: diff --git a/tests/router_unit_tests/test_router_helper_utils.py b/tests/router_unit_tests/test_router_helper_utils.py index 7e2daa9b5..8a35f5652 100644 --- a/tests/router_unit_tests/test_router_helper_utils.py +++ b/tests/router_unit_tests/test_router_helper_utils.py @@ -452,11 +452,17 @@ def test_update_usage(model_list): @pytest.mark.parametrize( - "finish_reason, expected_error", [("content_filter", True), ("stop", False)] + "finish_reason, expected_fallback", [("content_filter", True), ("stop", False)] ) -def test_should_raise_content_policy_error(model_list, finish_reason, expected_error): +@pytest.mark.parametrize("fallback_type", ["model-specific", "default"]) +def test_should_raise_content_policy_error( + model_list, finish_reason, expected_fallback, fallback_type +): """Test if the '_should_raise_content_policy_error' function is working correctly""" - router = Router(model_list=model_list) + router = Router( + model_list=model_list, + default_fallbacks=["gpt-4o"] if fallback_type == "default" else None, + ) assert ( router._should_raise_content_policy_error( @@ -472,10 +478,14 @@ def test_should_raise_content_policy_error(model_list, finish_reason, expected_e usage={"total_tokens": 100}, ), kwargs={ - "content_policy_fallbacks": [{"gpt-3.5-turbo": "gpt-4o"}], + "content_policy_fallbacks": ( + [{"gpt-3.5-turbo": "gpt-4o"}] + if fallback_type == "model-specific" + else None + ) }, ) - is expected_error + is expected_fallback ) @@ -1019,3 +1029,17 @@ async def test_pass_through_moderation_endpoint_factory(model_list): response = await router._pass_through_moderation_endpoint_factory( original_function=litellm.amoderation, input="this is valid good text" ) + + +@pytest.mark.parametrize( + "has_default_fallbacks, expected_result", + [(True, True), (False, False)], +) +def test_has_default_fallbacks(model_list, has_default_fallbacks, expected_result): + router = Router( + model_list=model_list, + default_fallbacks=( + ["my-default-fallback-model"] if has_default_fallbacks else None + ), + ) + assert router._has_default_fallbacks() is expected_result diff --git a/tests/test_team.py b/tests/test_team.py index 28fba8da3..d59720007 100644 --- a/tests/test_team.py +++ b/tests/test_team.py @@ -362,7 +362,7 @@ async def test_team_info(): try: await get_team_info(session=session, get_team=team_id, call_key=key) - pytest.fail(f"Expected call to fail") + pytest.fail("Expected call to fail") except Exception as e: pass From a3baec081bd08c5aaa725050b8e872f1a9a78991 Mon Sep 17 00:00:00 2001 From: David Manouchehri Date: Fri, 8 Nov 2024 11:40:15 -0500 Subject: [PATCH 45/67] (pricing): Fix multiple mistakes in Claude pricing, and also increase context length allowed for Claude 3.5 Sonnet v2 on Bedrock. (#6666) --- ...odel_prices_and_context_window_backup.json | 28 +++++++++++-------- model_prices_and_context_window.json | 28 +++++++++++-------- 2 files changed, 32 insertions(+), 24 deletions(-) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index cfc2cef72..a9c65b2c9 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -1886,11 +1886,13 @@ "supports_prompt_caching": true }, "claude-3-5-haiku-20241022": { - "max_tokens": 4096, + "max_tokens": 8192, "max_input_tokens": 200000, - "max_output_tokens": 4096, + "max_output_tokens": 8192, "input_cost_per_token": 0.000001, "output_cost_per_token": 0.000005, + "cache_creation_input_token_cost": 0.00000125, + "cache_read_input_token_cost": 0.0000001, "litellm_provider": "anthropic", "mode": "chat", "supports_function_calling": true, @@ -2812,9 +2814,9 @@ "supports_assistant_prefill": true }, "vertex_ai/claude-3-5-haiku@20241022": { - "max_tokens": 4096, + "max_tokens": 8192, "max_input_tokens": 200000, - "max_output_tokens": 4096, + "max_output_tokens": 8192, "input_cost_per_token": 0.000001, "output_cost_per_token": 0.000005, "litellm_provider": "vertex_ai-anthropic_models", @@ -3816,9 +3818,9 @@ "tool_use_system_prompt_tokens": 264 }, "openrouter/anthropic/claude-3-5-haiku-20241022": { - "max_tokens": 4096, + "max_tokens": 8192, "max_input_tokens": 200000, - "max_output_tokens": 4096, + "max_output_tokens": 8192, "input_cost_per_token": 0.000001, "output_cost_per_token": 0.000005, "litellm_provider": "openrouter", @@ -4529,9 +4531,9 @@ "supports_vision": true }, "anthropic.claude-3-5-sonnet-20241022-v2:0": { - "max_tokens": 4096, + "max_tokens": 8192, "max_input_tokens": 200000, - "max_output_tokens": 4096, + "max_output_tokens": 8192, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000015, "litellm_provider": "bedrock", @@ -4559,6 +4561,7 @@ "output_cost_per_token": 0.000005, "litellm_provider": "bedrock", "mode": "chat", + "supports_assistant_prefill": true, "supports_function_calling": true }, "anthropic.claude-3-opus-20240229-v1:0": { @@ -4595,9 +4598,9 @@ "supports_vision": true }, "us.anthropic.claude-3-5-sonnet-20241022-v2:0": { - "max_tokens": 4096, + "max_tokens": 8192, "max_input_tokens": 200000, - "max_output_tokens": 4096, + "max_output_tokens": 8192, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000015, "litellm_provider": "bedrock", @@ -4625,6 +4628,7 @@ "output_cost_per_token": 0.000005, "litellm_provider": "bedrock", "mode": "chat", + "supports_assistant_prefill": true, "supports_function_calling": true }, "us.anthropic.claude-3-opus-20240229-v1:0": { @@ -4661,9 +4665,9 @@ "supports_vision": true }, "eu.anthropic.claude-3-5-sonnet-20241022-v2:0": { - "max_tokens": 4096, + "max_tokens": 8192, "max_input_tokens": 200000, - "max_output_tokens": 4096, + "max_output_tokens": 8192, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000015, "litellm_provider": "bedrock", diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index cfc2cef72..a9c65b2c9 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -1886,11 +1886,13 @@ "supports_prompt_caching": true }, "claude-3-5-haiku-20241022": { - "max_tokens": 4096, + "max_tokens": 8192, "max_input_tokens": 200000, - "max_output_tokens": 4096, + "max_output_tokens": 8192, "input_cost_per_token": 0.000001, "output_cost_per_token": 0.000005, + "cache_creation_input_token_cost": 0.00000125, + "cache_read_input_token_cost": 0.0000001, "litellm_provider": "anthropic", "mode": "chat", "supports_function_calling": true, @@ -2812,9 +2814,9 @@ "supports_assistant_prefill": true }, "vertex_ai/claude-3-5-haiku@20241022": { - "max_tokens": 4096, + "max_tokens": 8192, "max_input_tokens": 200000, - "max_output_tokens": 4096, + "max_output_tokens": 8192, "input_cost_per_token": 0.000001, "output_cost_per_token": 0.000005, "litellm_provider": "vertex_ai-anthropic_models", @@ -3816,9 +3818,9 @@ "tool_use_system_prompt_tokens": 264 }, "openrouter/anthropic/claude-3-5-haiku-20241022": { - "max_tokens": 4096, + "max_tokens": 8192, "max_input_tokens": 200000, - "max_output_tokens": 4096, + "max_output_tokens": 8192, "input_cost_per_token": 0.000001, "output_cost_per_token": 0.000005, "litellm_provider": "openrouter", @@ -4529,9 +4531,9 @@ "supports_vision": true }, "anthropic.claude-3-5-sonnet-20241022-v2:0": { - "max_tokens": 4096, + "max_tokens": 8192, "max_input_tokens": 200000, - "max_output_tokens": 4096, + "max_output_tokens": 8192, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000015, "litellm_provider": "bedrock", @@ -4559,6 +4561,7 @@ "output_cost_per_token": 0.000005, "litellm_provider": "bedrock", "mode": "chat", + "supports_assistant_prefill": true, "supports_function_calling": true }, "anthropic.claude-3-opus-20240229-v1:0": { @@ -4595,9 +4598,9 @@ "supports_vision": true }, "us.anthropic.claude-3-5-sonnet-20241022-v2:0": { - "max_tokens": 4096, + "max_tokens": 8192, "max_input_tokens": 200000, - "max_output_tokens": 4096, + "max_output_tokens": 8192, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000015, "litellm_provider": "bedrock", @@ -4625,6 +4628,7 @@ "output_cost_per_token": 0.000005, "litellm_provider": "bedrock", "mode": "chat", + "supports_assistant_prefill": true, "supports_function_calling": true }, "us.anthropic.claude-3-opus-20240229-v1:0": { @@ -4661,9 +4665,9 @@ "supports_vision": true }, "eu.anthropic.claude-3-5-sonnet-20241022-v2:0": { - "max_tokens": 4096, + "max_tokens": 8192, "max_input_tokens": 200000, - "max_output_tokens": 4096, + "max_output_tokens": 8192, "input_cost_per_token": 0.000003, "output_cost_per_token": 0.000015, "litellm_provider": "bedrock", From 3d1c3054019c7e524f658af76f5f1c453c2c3dca Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 8 Nov 2024 23:03:19 +0530 Subject: [PATCH 46/67] =?UTF-8?q?bump:=20version=201.52.2=20=E2=86=92=201.?= =?UTF-8?q?52.3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c083db2a2..33d308d9f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.52.2" +version = "1.52.3" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT" @@ -91,7 +91,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.52.2" +version = "1.52.3" version_files = [ "pyproject.toml:^version" ] From 64c3c4906c4e094b49302a6529a40e2c447e20b9 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 8 Nov 2024 13:17:43 -0800 Subject: [PATCH 47/67] add bedrock image gen async support --- litellm/llms/base_aws_llm.py | 87 ++++++++++- litellm/llms/bedrock/image/image_handler.py | 158 ++++++++++++++++++++ litellm/llms/bedrock/image_generation.py | 127 ---------------- litellm/main.py | 3 +- 4 files changed, 245 insertions(+), 130 deletions(-) create mode 100644 litellm/llms/bedrock/image/image_handler.py delete mode 100644 litellm/llms/bedrock/image_generation.py diff --git a/litellm/llms/base_aws_llm.py b/litellm/llms/base_aws_llm.py index 70e3defc7..9f3a58a8b 100644 --- a/litellm/llms/base_aws_llm.py +++ b/litellm/llms/base_aws_llm.py @@ -1,16 +1,28 @@ import hashlib import json import os -from typing import Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import httpx +from pydantic import BaseModel from litellm._logging import verbose_logger from litellm.caching.caching import DualCache, InMemoryCache -from litellm.secret_managers.main import get_secret +from litellm.secret_managers.main import get_secret, get_secret_str from .base import BaseLLM +if TYPE_CHECKING: + from botocore.credentials import Credentials +else: + Credentials = Any + + +class Boto3CredentialsInfo(BaseModel): + credentials: Credentials + aws_region_name: str + aws_bedrock_runtime_endpoint: Optional[str] + class AwsAuthError(Exception): def __init__(self, status_code, message): @@ -311,3 +323,74 @@ class BaseAWSLLM(BaseLLM): proxy_endpoint_url = endpoint_url return endpoint_url, proxy_endpoint_url + + def _get_boto_credentials_from_optional_params( + self, optional_params: dict + ) -> Boto3CredentialsInfo: + """ + Get boto3 credentials from optional params + + Args: + optional_params (dict): Optional parameters for the model call + + Returns: + Credentials: Boto3 credentials object + """ + try: + import boto3 + from botocore.auth import SigV4Auth + from botocore.awsrequest import AWSRequest + from botocore.credentials import Credentials + except ImportError: + raise ImportError("Missing boto3 to call bedrock. Run 'pip install boto3'.") + ## CREDENTIALS ## + # pop aws_secret_access_key, aws_access_key_id, aws_region_name from kwargs, since completion calls fail with them + aws_secret_access_key = optional_params.pop("aws_secret_access_key", None) + aws_access_key_id = optional_params.pop("aws_access_key_id", None) + aws_session_token = optional_params.pop("aws_session_token", None) + aws_region_name = optional_params.pop("aws_region_name", None) + aws_role_name = optional_params.pop("aws_role_name", None) + aws_session_name = optional_params.pop("aws_session_name", None) + aws_profile_name = optional_params.pop("aws_profile_name", None) + aws_web_identity_token = optional_params.pop("aws_web_identity_token", None) + aws_sts_endpoint = optional_params.pop("aws_sts_endpoint", None) + aws_bedrock_runtime_endpoint = optional_params.pop( + "aws_bedrock_runtime_endpoint", None + ) # https://bedrock-runtime.{region_name}.amazonaws.com + + ### SET REGION NAME ### + if aws_region_name is None: + # check env # + litellm_aws_region_name = get_secret_str("AWS_REGION_NAME", None) + + if litellm_aws_region_name is not None and isinstance( + litellm_aws_region_name, str + ): + aws_region_name = litellm_aws_region_name + + standard_aws_region_name = get_secret_str("AWS_REGION", None) + if standard_aws_region_name is not None and isinstance( + standard_aws_region_name, str + ): + aws_region_name = standard_aws_region_name + + if aws_region_name is None: + aws_region_name = "us-west-2" + + credentials: Credentials = self.get_credentials( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + aws_session_token=aws_session_token, + aws_region_name=aws_region_name, + aws_session_name=aws_session_name, + aws_profile_name=aws_profile_name, + aws_role_name=aws_role_name, + aws_web_identity_token=aws_web_identity_token, + aws_sts_endpoint=aws_sts_endpoint, + ) + + return Boto3CredentialsInfo( + credentials=credentials, + aws_region_name=aws_region_name, + aws_bedrock_runtime_endpoint=aws_bedrock_runtime_endpoint, + ) diff --git a/litellm/llms/bedrock/image/image_handler.py b/litellm/llms/bedrock/image/image_handler.py new file mode 100644 index 000000000..a282ae3dd --- /dev/null +++ b/litellm/llms/bedrock/image/image_handler.py @@ -0,0 +1,158 @@ +import copy +import json +import os +from typing import Any, List, Optional + +import httpx +from openai.types.image import Image + +import litellm +from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, _get_httpx_client +from litellm.types.utils import ImageResponse +from litellm.utils import print_verbose + +from ...base_aws_llm import BaseAWSLLM +from ..common_utils import BedrockError + + +class BedrockImageGeneration(BaseAWSLLM): + """ + Bedrock Image Generation handler + """ + + def image_generation( # noqa: PLR0915 + self, + model: str, + prompt: str, + model_response: ImageResponse, + optional_params: dict, + logging_obj: Any, + timeout=None, + aimg_generation: bool = False, + api_base: Optional[str] = None, + extra_headers: Optional[dict] = None, + client: Optional[Any] = None, + ): + try: + import boto3 + from botocore.auth import SigV4Auth + from botocore.awsrequest import AWSRequest + from botocore.credentials import Credentials + except ImportError: + raise ImportError("Missing boto3 to call bedrock. Run 'pip install boto3'.") + boto3_credentials_info = self._get_boto_credentials_from_optional_params( + optional_params + ) + + ### SET RUNTIME ENDPOINT ### + modelId = model + endpoint_url, proxy_endpoint_url = self.get_runtime_endpoint( + api_base=api_base, + aws_bedrock_runtime_endpoint=boto3_credentials_info.aws_bedrock_runtime_endpoint, + aws_region_name=boto3_credentials_info.aws_region_name, + ) + proxy_endpoint_url = f"{proxy_endpoint_url}/model/{modelId}/invoke" + sigv4 = SigV4Auth( + boto3_credentials_info.credentials, + "bedrock", + boto3_credentials_info.aws_region_name, + ) + + # transform request + ### FORMAT IMAGE GENERATION INPUT ### + provider = model.split(".")[0] + inference_params = copy.deepcopy(optional_params) + inference_params.pop( + "user", None + ) # make sure user is not passed in for bedrock call + data = {} + if provider == "stability": + prompt = prompt.replace(os.linesep, " ") + ## LOAD CONFIG + config = litellm.AmazonStabilityConfig.get_config() + for k, v in config.items(): + if ( + k not in inference_params + ): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in + inference_params[k] = v + data = {"text_prompts": [{"text": prompt, "weight": 1}], **inference_params} + else: + raise BedrockError( + status_code=422, message=f"Unsupported model={model}, passed in" + ) + + # Make POST Request + body = json.dumps(data).encode("utf-8") + + headers = {"Content-Type": "application/json"} + if extra_headers is not None: + headers = {"Content-Type": "application/json", **extra_headers} + request = AWSRequest( + method="POST", url=proxy_endpoint_url, data=body, headers=headers + ) + sigv4.add_auth(request) + if ( + extra_headers is not None and "Authorization" in extra_headers + ): # prevent sigv4 from overwriting the auth header + request.headers["Authorization"] = extra_headers["Authorization"] + prepped = request.prepare() + + ## LOGGING + logging_obj.pre_call( + input=prompt, + api_key="", + additional_args={ + "complete_input_dict": data, + "api_base": proxy_endpoint_url, + "headers": prepped.headers, + }, + ) + + if client is None or isinstance(client, AsyncHTTPHandler): + _params = {} + if timeout is not None: + if isinstance(timeout, float) or isinstance(timeout, int): + timeout = httpx.Timeout(timeout) + _params["timeout"] = timeout + client = _get_httpx_client(_params) # type: ignore + else: + client = client + + try: + response = client.post(url=proxy_endpoint_url, headers=prepped.headers, data=body) # type: ignore + response.raise_for_status() + except httpx.HTTPStatusError as err: + error_code = err.response.status_code + raise BedrockError(status_code=error_code, message=err.response.text) + except httpx.TimeoutException: + raise BedrockError(status_code=408, message="Timeout error occurred.") + + response_body = response.json() + + ## LOGGING + if logging_obj is not None: + logging_obj.post_call( + input=prompt, + api_key="", + original_response=response.text, + additional_args={"complete_input_dict": data}, + ) + print_verbose("raw model_response: %s", response.text) + + ### FORMAT RESPONSE TO OPENAI FORMAT ### + if response_body is None: + raise Exception("Error in response object format") + + if model_response is None: + model_response = ImageResponse() + + image_list: List[Image] = [] + for artifact in response_body["artifacts"]: + _image = Image(b64_json=artifact["base64"]) + image_list.append(_image) + + model_response.data = image_list + return model_response + + async def async_image_generation(self): + pass diff --git a/litellm/llms/bedrock/image_generation.py b/litellm/llms/bedrock/image_generation.py deleted file mode 100644 index 65038d12e..000000000 --- a/litellm/llms/bedrock/image_generation.py +++ /dev/null @@ -1,127 +0,0 @@ -""" -Handles image gen calls to Bedrock's `/invoke` endpoint -""" - -import copy -import json -import os -from typing import Any, List - -from openai.types.image import Image - -import litellm -from litellm.types.utils import ImageResponse - -from .common_utils import BedrockError, init_bedrock_client - - -def image_generation( - model: str, - prompt: str, - model_response: ImageResponse, - optional_params: dict, - logging_obj: Any, - timeout=None, - aimg_generation=False, -): - """ - Bedrock Image Gen endpoint support - """ - ### BOTO3 INIT ### - # pop aws_secret_access_key, aws_access_key_id, aws_region_name from kwargs, since completion calls fail with them - aws_secret_access_key = optional_params.pop("aws_secret_access_key", None) - aws_access_key_id = optional_params.pop("aws_access_key_id", None) - aws_region_name = optional_params.pop("aws_region_name", None) - aws_role_name = optional_params.pop("aws_role_name", None) - aws_session_name = optional_params.pop("aws_session_name", None) - aws_bedrock_runtime_endpoint = optional_params.pop( - "aws_bedrock_runtime_endpoint", None - ) - aws_web_identity_token = optional_params.pop("aws_web_identity_token", None) - - # use passed in BedrockRuntime.Client if provided, otherwise create a new one - client = init_bedrock_client( - aws_access_key_id=aws_access_key_id, - aws_secret_access_key=aws_secret_access_key, - aws_region_name=aws_region_name, - aws_bedrock_runtime_endpoint=aws_bedrock_runtime_endpoint, - aws_web_identity_token=aws_web_identity_token, - aws_role_name=aws_role_name, - aws_session_name=aws_session_name, - timeout=timeout, - ) - - ### FORMAT IMAGE GENERATION INPUT ### - modelId = model - provider = model.split(".")[0] - inference_params = copy.deepcopy(optional_params) - inference_params.pop( - "user", None - ) # make sure user is not passed in for bedrock call - data = {} - if provider == "stability": - prompt = prompt.replace(os.linesep, " ") - ## LOAD CONFIG - config = litellm.AmazonStabilityConfig.get_config() - for k, v in config.items(): - if ( - k not in inference_params - ): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in - inference_params[k] = v - data = {"text_prompts": [{"text": prompt, "weight": 1}], **inference_params} - else: - raise BedrockError( - status_code=422, message=f"Unsupported model={model}, passed in" - ) - - body = json.dumps(data).encode("utf-8") - ## LOGGING - request_str = f""" - response = client.invoke_model( - body={body}, # type: ignore - modelId={modelId}, - accept="application/json", - contentType="application/json", - )""" # type: ignore - logging_obj.pre_call( - input=prompt, - api_key="", # boto3 is used for init. - additional_args={ - "complete_input_dict": {"model": modelId, "texts": prompt}, - "request_str": request_str, - }, - ) - try: - response = client.invoke_model( - body=body, - modelId=modelId, - accept="application/json", - contentType="application/json", - ) - response_body = json.loads(response.get("body").read()) - ## LOGGING - logging_obj.post_call( - input=prompt, - api_key="", - additional_args={"complete_input_dict": data}, - original_response=json.dumps(response_body), - ) - except Exception as e: - raise BedrockError( - message=f"Embedding Error with model {model}: {e}", status_code=500 - ) - - ### FORMAT RESPONSE TO OPENAI FORMAT ### - if response_body is None: - raise Exception("Error in response object format") - - if model_response is None: - model_response = ImageResponse() - - image_list: List[Image] = [] - for artifact in response_body["artifacts"]: - _image = Image(b64_json=artifact["base64"]) - image_list.append(_image) - - model_response.data = image_list - return model_response diff --git a/litellm/main.py b/litellm/main.py index 8334f35d7..5be596e94 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -108,9 +108,9 @@ from .llms.azure_text import AzureTextCompletion from .llms.AzureOpenAI.audio_transcriptions import AzureAudioTranscription from .llms.AzureOpenAI.azure import AzureChatCompletion, _check_dynamic_azure_params from .llms.AzureOpenAI.chat.o1_handler import AzureOpenAIO1ChatCompletion -from .llms.bedrock import image_generation as bedrock_image_generation # type: ignore from .llms.bedrock.chat import BedrockConverseLLM, BedrockLLM from .llms.bedrock.embed.embedding import BedrockEmbedding +from .llms.bedrock.image.image_handler import BedrockImageGeneration from .llms.cohere import chat as cohere_chat from .llms.cohere import completion as cohere_completion # type: ignore from .llms.cohere.embed import handler as cohere_embed @@ -214,6 +214,7 @@ triton_chat_completions = TritonChatCompletion() bedrock_chat_completion = BedrockLLM() bedrock_converse_chat_completion = BedrockConverseLLM() bedrock_embedding = BedrockEmbedding() +bedrock_image_generation = BedrockImageGeneration() vertex_chat_completion = VertexLLM() vertex_embedding = VertexEmbedding() vertex_multimodal_embedding = VertexMultimodalEmbedding() From 092888d593493b09815808c303158c7457e58082 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 8 Nov 2024 14:04:04 -0800 Subject: [PATCH 48/67] added async support for bedrock image gen --- litellm/__init__.py | 2 +- litellm/llms/bedrock/common_utils.py | 67 ------ .../image/amazon_stability1_transformation.py | 69 ++++++ litellm/llms/bedrock/image/image_handler.py | 197 ++++++++++++++---- ...bility_stable_diffusion1_transformation.py | 73 +++++++ 5 files changed, 300 insertions(+), 108 deletions(-) create mode 100644 litellm/llms/bedrock/image/amazon_stability1_transformation.py create mode 100644 litellm/llms/bedrock/image/stability_stable_diffusion1_transformation.py diff --git a/litellm/__init__.py b/litellm/__init__.py index 1951dd12f..5872c4a2f 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -984,10 +984,10 @@ from .llms.bedrock.common_utils import ( AmazonAnthropicClaude3Config, AmazonCohereConfig, AmazonLlamaConfig, - AmazonStabilityConfig, AmazonMistralConfig, AmazonBedrockGlobalConfig, ) +from .llms.bedrock.image.amazon_stability1_transformation import AmazonStabilityConfig from .llms.bedrock.embed.amazon_titan_g1_transformation import AmazonTitanG1Config from .llms.bedrock.embed.amazon_titan_multimodal_transformation import ( AmazonTitanMultimodalEmbeddingG1Config, diff --git a/litellm/llms/bedrock/common_utils.py b/litellm/llms/bedrock/common_utils.py index 1ae74e535..332b1e2b3 100644 --- a/litellm/llms/bedrock/common_utils.py +++ b/litellm/llms/bedrock/common_utils.py @@ -484,73 +484,6 @@ class AmazonMistralConfig: } -class AmazonStabilityConfig: - """ - Reference: https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=stability.stable-diffusion-xl-v0 - - Supported Params for the Amazon / Stable Diffusion models: - - - `cfg_scale` (integer): Default `7`. Between [ 0 .. 35 ]. How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt) - - - `seed` (float): Default: `0`. Between [ 0 .. 4294967295 ]. Random noise seed (omit this option or use 0 for a random seed) - - - `steps` (array of strings): Default `30`. Between [ 10 .. 50 ]. Number of diffusion steps to run. - - - `width` (integer): Default: `512`. multiple of 64 >= 128. Width of the image to generate, in pixels, in an increment divible by 64. - Engine-specific dimension validation: - - - SDXL Beta: must be between 128x128 and 512x896 (or 896x512); only one dimension can be greater than 512. - - SDXL v0.9: must be one of 1024x1024, 1152x896, 1216x832, 1344x768, 1536x640, 640x1536, 768x1344, 832x1216, or 896x1152 - - SDXL v1.0: same as SDXL v0.9 - - SD v1.6: must be between 320x320 and 1536x1536 - - - `height` (integer): Default: `512`. multiple of 64 >= 128. Height of the image to generate, in pixels, in an increment divible by 64. - Engine-specific dimension validation: - - - SDXL Beta: must be between 128x128 and 512x896 (or 896x512); only one dimension can be greater than 512. - - SDXL v0.9: must be one of 1024x1024, 1152x896, 1216x832, 1344x768, 1536x640, 640x1536, 768x1344, 832x1216, or 896x1152 - - SDXL v1.0: same as SDXL v0.9 - - SD v1.6: must be between 320x320 and 1536x1536 - """ - - cfg_scale: Optional[int] = None - seed: Optional[float] = None - steps: Optional[List[str]] = None - width: Optional[int] = None - height: Optional[int] = None - - def __init__( - self, - cfg_scale: Optional[int] = None, - seed: Optional[float] = None, - steps: Optional[List[str]] = None, - width: Optional[int] = None, - height: Optional[int] = None, - ) -> None: - locals_ = locals() - for key, value in locals_.items(): - if key != "self" and value is not None: - setattr(self.__class__, key, value) - - @classmethod - def get_config(cls): - return { - k: v - for k, v in cls.__dict__.items() - if not k.startswith("__") - and not isinstance( - v, - ( - types.FunctionType, - types.BuiltinFunctionType, - classmethod, - staticmethod, - ), - ) - and v is not None - } - - def add_custom_header(headers): """Closure to capture the headers and add them.""" diff --git a/litellm/llms/bedrock/image/amazon_stability1_transformation.py b/litellm/llms/bedrock/image/amazon_stability1_transformation.py new file mode 100644 index 000000000..83cccb947 --- /dev/null +++ b/litellm/llms/bedrock/image/amazon_stability1_transformation.py @@ -0,0 +1,69 @@ +import types +from typing import List, Optional + + +class AmazonStabilityConfig: + """ + Reference: https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=stability.stable-diffusion-xl-v0 + + Supported Params for the Amazon / Stable Diffusion models: + + - `cfg_scale` (integer): Default `7`. Between [ 0 .. 35 ]. How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt) + + - `seed` (float): Default: `0`. Between [ 0 .. 4294967295 ]. Random noise seed (omit this option or use 0 for a random seed) + + - `steps` (array of strings): Default `30`. Between [ 10 .. 50 ]. Number of diffusion steps to run. + + - `width` (integer): Default: `512`. multiple of 64 >= 128. Width of the image to generate, in pixels, in an increment divible by 64. + Engine-specific dimension validation: + + - SDXL Beta: must be between 128x128 and 512x896 (or 896x512); only one dimension can be greater than 512. + - SDXL v0.9: must be one of 1024x1024, 1152x896, 1216x832, 1344x768, 1536x640, 640x1536, 768x1344, 832x1216, or 896x1152 + - SDXL v1.0: same as SDXL v0.9 + - SD v1.6: must be between 320x320 and 1536x1536 + + - `height` (integer): Default: `512`. multiple of 64 >= 128. Height of the image to generate, in pixels, in an increment divible by 64. + Engine-specific dimension validation: + + - SDXL Beta: must be between 128x128 and 512x896 (or 896x512); only one dimension can be greater than 512. + - SDXL v0.9: must be one of 1024x1024, 1152x896, 1216x832, 1344x768, 1536x640, 640x1536, 768x1344, 832x1216, or 896x1152 + - SDXL v1.0: same as SDXL v0.9 + - SD v1.6: must be between 320x320 and 1536x1536 + """ + + cfg_scale: Optional[int] = None + seed: Optional[float] = None + steps: Optional[List[str]] = None + width: Optional[int] = None + height: Optional[int] = None + + def __init__( + self, + cfg_scale: Optional[int] = None, + seed: Optional[float] = None, + steps: Optional[List[str]] = None, + width: Optional[int] = None, + height: Optional[int] = None, + ) -> None: + locals_ = locals() + for key, value in locals_.items(): + if key != "self" and value is not None: + setattr(self.__class__, key, value) + + @classmethod + def get_config(cls): + return { + k: v + for k, v in cls.__dict__.items() + if not k.startswith("__") + and not isinstance( + v, + ( + types.FunctionType, + types.BuiltinFunctionType, + classmethod, + staticmethod, + ), + ) + and v is not None + } diff --git a/litellm/llms/bedrock/image/image_handler.py b/litellm/llms/bedrock/image/image_handler.py index a282ae3dd..edf852fd3 100644 --- a/litellm/llms/bedrock/image/image_handler.py +++ b/litellm/llms/bedrock/image/image_handler.py @@ -1,38 +1,163 @@ import copy import json import os -from typing import Any, List, Optional +from typing import TYPE_CHECKING, Any, List, Optional, Union import httpx from openai.types.image import Image +from pydantic import BaseModel import litellm -from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, _get_httpx_client +from litellm._logging import verbose_logger +from litellm.litellm_core_utils.litellm_logging import Logging as LitellmLogging +from litellm.llms.custom_httpx.http_handler import ( + _get_httpx_client, + get_async_httpx_client, +) from litellm.types.utils import ImageResponse -from litellm.utils import print_verbose from ...base_aws_llm import BaseAWSLLM from ..common_utils import BedrockError +if TYPE_CHECKING: + from botocore.awsrequest import AWSPreparedRequest +else: + AWSPreparedRequest = Any + + +class BedrockImagePreparedRequest(BaseModel): + """ + Internal/Helper class for preparing the request for bedrock image generation + """ + + endpoint_url: str + prepped: AWSPreparedRequest + body: bytes + data: dict + class BedrockImageGeneration(BaseAWSLLM): """ Bedrock Image Generation handler """ - def image_generation( # noqa: PLR0915 + def image_generation( self, model: str, prompt: str, model_response: ImageResponse, optional_params: dict, - logging_obj: Any, - timeout=None, + logging_obj: LitellmLogging, + timeout: Optional[Union[float, httpx.Timeout]], aimg_generation: bool = False, api_base: Optional[str] = None, extra_headers: Optional[dict] = None, - client: Optional[Any] = None, ): + prepared_request = self._prepare_request( + model=model, + optional_params=optional_params, + api_base=api_base, + extra_headers=extra_headers, + logging_obj=logging_obj, + prompt=prompt, + ) + + if aimg_generation is True: + return self.async_image_generation( + prepared_request=prepared_request, + timeout=timeout, + model=model, + logging_obj=logging_obj, + prompt=prompt, + model_response=model_response, + ) + + client = _get_httpx_client() + try: + response = client.post(url=prepared_request.endpoint_url, headers=prepared_request.prepped.headers, data=prepared_request.body) # type: ignore + response.raise_for_status() + except httpx.HTTPStatusError as err: + error_code = err.response.status_code + raise BedrockError(status_code=error_code, message=err.response.text) + except httpx.TimeoutException: + raise BedrockError(status_code=408, message="Timeout error occurred.") + ### FORMAT RESPONSE TO OPENAI FORMAT ### + model_response = self._transform_response_dict_to_openai_response( + model_response=model_response, + model=model, + logging_obj=logging_obj, + prompt=prompt, + response=response, + data=prepared_request.data, + ) + return model_response + + async def async_image_generation( + self, + prepared_request: BedrockImagePreparedRequest, + timeout: Optional[Union[float, httpx.Timeout]], + model: str, + logging_obj: LitellmLogging, + prompt: str, + model_response: ImageResponse, + ) -> ImageResponse: + """ + Asynchronous handler for bedrock image generation + + Awaits the response from the bedrock image generation endpoint + """ + async_client = get_async_httpx_client( + llm_provider=litellm.LlmProviders.BEDROCK, + params={"timeout": timeout}, + ) + + try: + response = await async_client.post(url=prepared_request.endpoint_url, headers=prepared_request.prepped.headers, data=prepared_request.body) # type: ignore + response.raise_for_status() + except httpx.HTTPStatusError as err: + error_code = err.response.status_code + raise BedrockError(status_code=error_code, message=err.response.text) + except httpx.TimeoutException: + raise BedrockError(status_code=408, message="Timeout error occurred.") + + ### FORMAT RESPONSE TO OPENAI FORMAT ### + model_response = self._transform_response_dict_to_openai_response( + model=model, + logging_obj=logging_obj, + prompt=prompt, + response=response, + data=prepared_request.data, + model_response=model_response, + ) + return model_response + + def _prepare_request( + self, + model: str, + optional_params: dict, + api_base: Optional[str], + extra_headers: Optional[dict], + logging_obj: LitellmLogging, + prompt: str, + ) -> BedrockImagePreparedRequest: + """ + Prepare the request body, headers, and endpoint URL for the Bedrock Image Generation API + + Args: + model (str): The model to use for the image generation + optional_params (dict): The optional parameters for the image generation + api_base (Optional[str]): The base URL for the Bedrock API + extra_headers (Optional[dict]): The extra headers to include in the request + logging_obj (LitellmLogging): The logging object to use for logging + prompt (str): The prompt to use for the image generation + Returns: + BedrockImagePreparedRequest: The prepared request object + + The BedrockImagePreparedRequest contains: + endpoint_url (str): The endpoint URL for the Bedrock Image Generation API + prepped (httpx.Request): The prepared request object + body (bytes): The request body + """ try: import boto3 from botocore.auth import SigV4Auth @@ -46,7 +171,7 @@ class BedrockImageGeneration(BaseAWSLLM): ### SET RUNTIME ENDPOINT ### modelId = model - endpoint_url, proxy_endpoint_url = self.get_runtime_endpoint( + _, proxy_endpoint_url = self.get_runtime_endpoint( api_base=api_base, aws_bedrock_runtime_endpoint=boto3_credentials_info.aws_bedrock_runtime_endpoint, aws_region_name=boto3_credentials_info.aws_region_name, @@ -107,27 +232,25 @@ class BedrockImageGeneration(BaseAWSLLM): "headers": prepped.headers, }, ) + return BedrockImagePreparedRequest( + endpoint_url=proxy_endpoint_url, + prepped=prepped, + body=body, + data=data, + ) - if client is None or isinstance(client, AsyncHTTPHandler): - _params = {} - if timeout is not None: - if isinstance(timeout, float) or isinstance(timeout, int): - timeout = httpx.Timeout(timeout) - _params["timeout"] = timeout - client = _get_httpx_client(_params) # type: ignore - else: - client = client - - try: - response = client.post(url=proxy_endpoint_url, headers=prepped.headers, data=body) # type: ignore - response.raise_for_status() - except httpx.HTTPStatusError as err: - error_code = err.response.status_code - raise BedrockError(status_code=error_code, message=err.response.text) - except httpx.TimeoutException: - raise BedrockError(status_code=408, message="Timeout error occurred.") - - response_body = response.json() + def _transform_response_dict_to_openai_response( + self, + model_response: ImageResponse, + model: str, + logging_obj: LitellmLogging, + prompt: str, + response: httpx.Response, + data: dict, + ) -> ImageResponse: + """ + Transforms the Image Generation response from Bedrock to OpenAI format + """ ## LOGGING if logging_obj is not None: @@ -137,22 +260,16 @@ class BedrockImageGeneration(BaseAWSLLM): original_response=response.text, additional_args={"complete_input_dict": data}, ) - print_verbose("raw model_response: %s", response.text) - - ### FORMAT RESPONSE TO OPENAI FORMAT ### - if response_body is None: - raise Exception("Error in response object format") - - if model_response is None: - model_response = ImageResponse() + verbose_logger.debug("raw model_response: %s", response.text) + response_dict = response.json() + if response_dict is None: + raise ValueError("Error in response object format, got None") image_list: List[Image] = [] - for artifact in response_body["artifacts"]: + for artifact in response_dict["artifacts"]: _image = Image(b64_json=artifact["base64"]) image_list.append(_image) model_response.data = image_list - return model_response - async def async_image_generation(self): - pass + return model_response diff --git a/litellm/llms/bedrock/image/stability_stable_diffusion1_transformation.py b/litellm/llms/bedrock/image/stability_stable_diffusion1_transformation.py new file mode 100644 index 000000000..a83b26226 --- /dev/null +++ b/litellm/llms/bedrock/image/stability_stable_diffusion1_transformation.py @@ -0,0 +1,73 @@ +import copy +import os +import types +from typing import Any, Dict, List, Optional, TypedDict, Union + +import litellm + + +class AmazonStability1Config: + """ + Reference: https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=stability.stable-diffusion-xl-v0 + + Supported Params for the Amazon / Stable Diffusion models: + + - `cfg_scale` (integer): Default `7`. Between [ 0 .. 35 ]. How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt) + + - `seed` (float): Default: `0`. Between [ 0 .. 4294967295 ]. Random noise seed (omit this option or use 0 for a random seed) + + - `steps` (array of strings): Default `30`. Between [ 10 .. 50 ]. Number of diffusion steps to run. + + - `width` (integer): Default: `512`. multiple of 64 >= 128. Width of the image to generate, in pixels, in an increment divible by 64. + Engine-specific dimension validation: + + - SDXL Beta: must be between 128x128 and 512x896 (or 896x512); only one dimension can be greater than 512. + - SDXL v0.9: must be one of 1024x1024, 1152x896, 1216x832, 1344x768, 1536x640, 640x1536, 768x1344, 832x1216, or 896x1152 + - SDXL v1.0: same as SDXL v0.9 + - SD v1.6: must be between 320x320 and 1536x1536 + + - `height` (integer): Default: `512`. multiple of 64 >= 128. Height of the image to generate, in pixels, in an increment divible by 64. + Engine-specific dimension validation: + + - SDXL Beta: must be between 128x128 and 512x896 (or 896x512); only one dimension can be greater than 512. + - SDXL v0.9: must be one of 1024x1024, 1152x896, 1216x832, 1344x768, 1536x640, 640x1536, 768x1344, 832x1216, or 896x1152 + - SDXL v1.0: same as SDXL v0.9 + - SD v1.6: must be between 320x320 and 1536x1536 + """ + + cfg_scale: Optional[int] = None + seed: Optional[float] = None + steps: Optional[List[str]] = None + width: Optional[int] = None + height: Optional[int] = None + + def __init__( + self, + cfg_scale: Optional[int] = None, + seed: Optional[float] = None, + steps: Optional[List[str]] = None, + width: Optional[int] = None, + height: Optional[int] = None, + ) -> None: + locals_ = locals() + for key, value in locals_.items(): + if key != "self" and value is not None: + setattr(self.__class__, key, value) + + @classmethod + def get_config(cls): + return { + k: v + for k, v in cls.__dict__.items() + if not k.startswith("__") + and not isinstance( + v, + ( + types.FunctionType, + types.BuiltinFunctionType, + classmethod, + staticmethod, + ), + ) + and v is not None + } From 086e1ac5bcc0c991bd3f2b555608bcae401131de Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 8 Nov 2024 14:10:08 -0800 Subject: [PATCH 49/67] move image gen testing --- .circleci/config.yml | 54 ++++++++++++++++++- .../test_image_generation.py | 0 2 files changed, 52 insertions(+), 2 deletions(-) rename tests/{local_testing => image_gen_tests}/test_image_generation.py (100%) diff --git a/.circleci/config.yml b/.circleci/config.yml index 7a742afe0..8b999d2f9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -625,6 +625,48 @@ jobs: paths: - llm_translation_coverage.xml - llm_translation_coverage + image_gen_testing: + docker: + - image: cimg/python:3.11 + auth: + username: ${DOCKERHUB_USERNAME} + password: ${DOCKERHUB_PASSWORD} + working_directory: ~/project + + steps: + - checkout + - run: + name: Install Dependencies + command: | + python -m pip install --upgrade pip + python -m pip install -r requirements.txt + pip install "pytest==7.3.1" + pip install "pytest-retry==1.6.3" + pip install "pytest-cov==5.0.0" + pip install "pytest-asyncio==0.21.1" + pip install "respx==0.21.1" + # Run pytest and generate JUnit XML report + - run: + name: Run tests + command: | + pwd + ls + python -m pytest -vv tests/image_gen_tests --cov=litellm --cov-report=xml -x -s -v --junitxml=test-results/junit.xml --durations=5 + no_output_timeout: 120m + - run: + name: Rename the coverage files + command: | + mv coverage.xml image_gen_coverage.xml + mv .coverage image_gen_coverage + + # Store test results + - store_test_results: + path: test-results + - persist_to_workspace: + root: . + paths: + - image_gen_coverage.xml + - image_gen_coverage logging_testing: docker: - image: cimg/python:3.11 @@ -875,7 +917,7 @@ jobs: command: | pwd ls - python -m pytest -s -vv tests/*.py -x --junitxml=test-results/junit.xml --durations=5 --ignore=tests/otel_tests --ignore=tests/pass_through_tests --ignore=tests/proxy_admin_ui_tests --ignore=tests/load_tests --ignore=tests/llm_translation + python -m pytest -s -vv tests/*.py -x --junitxml=test-results/junit.xml --durations=5 --ignore=tests/otel_tests --ignore=tests/pass_through_tests --ignore=tests/proxy_admin_ui_tests --ignore=tests/load_tests --ignore=tests/llm_translation --ignore=tests/image_gen_tests no_output_timeout: 120m # Store test results @@ -1112,7 +1154,7 @@ jobs: python -m venv venv . venv/bin/activate pip install coverage - coverage combine llm_translation_coverage logging_coverage litellm_router_coverage local_testing_coverage litellm_assistants_api_coverage auth_ui_unit_tests_coverage langfuse_coverage caching_coverage litellm_proxy_unit_tests_coverage + coverage combine llm_translation_coverage logging_coverage litellm_router_coverage local_testing_coverage litellm_assistants_api_coverage auth_ui_unit_tests_coverage langfuse_coverage caching_coverage litellm_proxy_unit_tests_coverage image_gen_coverage coverage xml - codecov/upload: file: ./coverage.xml @@ -1401,6 +1443,12 @@ workflows: only: - main - /litellm_.*/ + - image_gen_testing: + filters: + branches: + only: + - main + - /litellm_.*/ - logging_testing: filters: branches: @@ -1410,6 +1458,7 @@ workflows: - upload-coverage: requires: - llm_translation_testing + - image_gen_testing - logging_testing - litellm_router_testing - caching_unit_tests @@ -1449,6 +1498,7 @@ workflows: - load_testing - test_bad_database_url - llm_translation_testing + - image_gen_testing - logging_testing - litellm_router_testing - caching_unit_tests diff --git a/tests/local_testing/test_image_generation.py b/tests/image_gen_tests/test_image_generation.py similarity index 100% rename from tests/local_testing/test_image_generation.py rename to tests/image_gen_tests/test_image_generation.py From 61026e189d61c0b782c8adc0218c3fb7d12a56a4 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 8 Nov 2024 19:24:18 -0800 Subject: [PATCH 50/67] (feat) Add support for logging to GCS Buckets with folder paths (#6675) * use helper to log * gcs _handle_folders_in_bucket_name * add test_basic_gcs_logger_with_folder_in_bucket_name * run gcs testing in logging callback tests * include correct deps * fix gcs bucket logging test * fix test_basic_gcs_logger_with_folder_in_bucket_name * fix test_get_gcs_logging_config_without_service_account * fix test gcs bucket * remove unused file --- .circleci/config.yml | 2 + litellm/integrations/gcs_bucket/gcs_bucket.py | 98 +++++++++++----- tests/local_testing/test_gcs_bucket.py | 110 ++++++++++++++++++ 3 files changed, 183 insertions(+), 27 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 7a742afe0..4bb232421 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -645,6 +645,8 @@ jobs: pip install "pytest-cov==5.0.0" pip install "pytest-asyncio==0.21.1" pip install "respx==0.21.1" + pip install "google-generativeai==0.3.2" + pip install "google-cloud-aiplatform==1.43.0" # Run pytest and generate JUnit XML report - run: name: Run tests diff --git a/litellm/integrations/gcs_bucket/gcs_bucket.py b/litellm/integrations/gcs_bucket/gcs_bucket.py index 111730d1f..f7f36c124 100644 --- a/litellm/integrations/gcs_bucket/gcs_bucket.py +++ b/litellm/integrations/gcs_bucket/gcs_bucket.py @@ -76,28 +76,18 @@ class GCSBucketLogger(GCSBucketBase): if logging_payload is None: raise ValueError("standard_logging_object not found in kwargs") - json_logged_payload = json.dumps(logging_payload, default=str) - # Get the current date current_date = datetime.now().strftime("%Y-%m-%d") # Modify the object_name to include the date-based folder object_name = f"{current_date}/{response_obj['id']}" - try: - response = await self.async_httpx_client.post( - headers=headers, - url=f"https://storage.googleapis.com/upload/storage/v1/b/{bucket_name}/o?uploadType=media&name={object_name}", - data=json_logged_payload, - ) - except httpx.HTTPStatusError as e: - raise Exception(f"GCS Bucket logging error: {e.response.text}") - if response.status_code != 200: - verbose_logger.error("GCS Bucket logging error: %s", str(response.text)) - - verbose_logger.debug("GCS Bucket response %s", response) - verbose_logger.debug("GCS Bucket status code %s", response.status_code) - verbose_logger.debug("GCS Bucket response.text %s", response.text) + await self._log_json_data_on_gcs( + headers=headers, + bucket_name=bucket_name, + object_name=object_name, + logging_payload=logging_payload, + ) except Exception as e: verbose_logger.exception(f"GCS Bucket logging error: {str(e)}") @@ -134,8 +124,6 @@ class GCSBucketLogger(GCSBucketBase): _litellm_params = kwargs.get("litellm_params") or {} metadata = _litellm_params.get("metadata") or {} - json_logged_payload = json.dumps(logging_payload, default=str) - # Get the current date current_date = datetime.now().strftime("%Y-%m-%d") @@ -145,21 +133,67 @@ class GCSBucketLogger(GCSBucketBase): if "gcs_log_id" in metadata: object_name = metadata["gcs_log_id"] - response = await self.async_httpx_client.post( + await self._log_json_data_on_gcs( headers=headers, - url=f"https://storage.googleapis.com/upload/storage/v1/b/{bucket_name}/o?uploadType=media&name={object_name}", - data=json_logged_payload, + bucket_name=bucket_name, + object_name=object_name, + logging_payload=logging_payload, ) - if response.status_code != 200: - verbose_logger.error("GCS Bucket logging error: %s", str(response.text)) - - verbose_logger.debug("GCS Bucket response %s", response) - verbose_logger.debug("GCS Bucket status code %s", response.status_code) - verbose_logger.debug("GCS Bucket response.text %s", response.text) except Exception as e: verbose_logger.exception(f"GCS Bucket logging error: {str(e)}") + def _handle_folders_in_bucket_name( + self, + bucket_name: str, + object_name: str, + ) -> Tuple[str, str]: + """ + Handles when the user passes a bucket name with a folder postfix + + + Example: + - Bucket name: "my-bucket/my-folder/dev" + - Object name: "my-object" + - Returns: bucket_name="my-bucket", object_name="my-folder/dev/my-object" + + """ + if "/" in bucket_name: + bucket_name, prefix = bucket_name.split("/", 1) + object_name = f"{prefix}/{object_name}" + return bucket_name, object_name + return bucket_name, object_name + + async def _log_json_data_on_gcs( + self, + headers: Dict[str, str], + bucket_name: str, + object_name: str, + logging_payload: StandardLoggingPayload, + ): + """ + Helper function to make POST request to GCS Bucket in the specified bucket. + """ + json_logged_payload = json.dumps(logging_payload, default=str) + + bucket_name, object_name = self._handle_folders_in_bucket_name( + bucket_name=bucket_name, + object_name=object_name, + ) + + response = await self.async_httpx_client.post( + headers=headers, + url=f"https://storage.googleapis.com/upload/storage/v1/b/{bucket_name}/o?uploadType=media&name={object_name}", + data=json_logged_payload, + ) + + if response.status_code != 200: + verbose_logger.error("GCS Bucket logging error: %s", str(response.text)) + + verbose_logger.debug("GCS Bucket response %s", response) + verbose_logger.debug("GCS Bucket status code %s", response.status_code) + verbose_logger.debug("GCS Bucket response.text %s", response.text) + async def get_gcs_logging_config( self, kwargs: Optional[Dict[str, Any]] = {} ) -> GCSLoggingConfig: @@ -267,6 +301,11 @@ class GCSBucketLogger(GCSBucketBase): service_account_json=gcs_logging_config["path_service_account"], ) bucket_name = gcs_logging_config["bucket_name"] + bucket_name, object_name = self._handle_folders_in_bucket_name( + bucket_name=bucket_name, + object_name=object_name, + ) + url = f"https://storage.googleapis.com/storage/v1/b/{bucket_name}/o/{object_name}?alt=media" # Send the GET request to download the object @@ -302,6 +341,11 @@ class GCSBucketLogger(GCSBucketBase): service_account_json=gcs_logging_config["path_service_account"], ) bucket_name = gcs_logging_config["bucket_name"] + bucket_name, object_name = self._handle_folders_in_bucket_name( + bucket_name=bucket_name, + object_name=object_name, + ) + url = f"https://storage.googleapis.com/storage/v1/b/{bucket_name}/o/{object_name}" # Send the DELETE request to delete the object diff --git a/tests/local_testing/test_gcs_bucket.py b/tests/local_testing/test_gcs_bucket.py index fed287bd0..a01e839fa 100644 --- a/tests/local_testing/test_gcs_bucket.py +++ b/tests/local_testing/test_gcs_bucket.py @@ -528,6 +528,7 @@ async def test_get_gcs_logging_config_without_service_account(): 1. Key based logging without a service account 2. Default Callback without a service account """ + load_vertex_ai_credentials() _old_gcs_bucket_name = os.environ.get("GCS_BUCKET_NAME") os.environ.pop("GCS_BUCKET_NAME") @@ -572,3 +573,112 @@ async def test_get_gcs_logging_config_without_service_account(): if _old_gcs_service_acct is not None: os.environ["GCS_PATH_SERVICE_ACCOUNT"] = _old_gcs_service_acct + + +@pytest.mark.asyncio +async def test_basic_gcs_logger_with_folder_in_bucket_name(): + load_vertex_ai_credentials() + gcs_logger = GCSBucketLogger() + + bucket_name = "litellm-testing-bucket/test-folder-logs" + + old_bucket_name = os.environ.get("GCS_BUCKET_NAME") + os.environ["GCS_BUCKET_NAME"] = bucket_name + print("GCSBucketLogger", gcs_logger) + + litellm.callbacks = [gcs_logger] + response = await litellm.acompletion( + model="gpt-3.5-turbo", + temperature=0.7, + messages=[{"role": "user", "content": "This is a test"}], + max_tokens=10, + user="ishaan-2", + mock_response="Hi!", + metadata={ + "tags": ["model-anthropic-claude-v2.1", "app-ishaan-prod"], + "user_api_key": "88dc28d0f030c55ed4ab77ed8faf098196cb1c05df778539800c9f1243fe6b4b", + "user_api_key_alias": None, + "user_api_end_user_max_budget": None, + "litellm_api_version": "0.0.0", + "global_max_parallel_requests": None, + "user_api_key_user_id": "116544810872468347480", + "user_api_key_org_id": None, + "user_api_key_team_id": None, + "user_api_key_team_alias": None, + "user_api_key_metadata": {}, + "requester_ip_address": "127.0.0.1", + "requester_metadata": {"foo": "bar"}, + "spend_logs_metadata": {"hello": "world"}, + "headers": { + "content-type": "application/json", + "user-agent": "PostmanRuntime/7.32.3", + "accept": "*/*", + "postman-token": "92300061-eeaa-423b-a420-0b44896ecdc4", + "host": "localhost:4000", + "accept-encoding": "gzip, deflate, br", + "connection": "keep-alive", + "content-length": "163", + }, + "endpoint": "http://localhost:4000/chat/completions", + "model_group": "gpt-3.5-turbo", + "deployment": "azure/chatgpt-v-2", + "model_info": { + "id": "4bad40a1eb6bebd1682800f16f44b9f06c52a6703444c99c7f9f32e9de3693b4", + "db_model": False, + }, + "api_base": "https://openai-gpt-4-test-v-1.openai.azure.com/", + "caching_groups": None, + "raw_request": "\n\nPOST Request Sent from LiteLLM:\ncurl -X POST \\\nhttps://openai-gpt-4-test-v-1.openai.azure.com//openai/ \\\n-H 'Authorization: *****' \\\n-d '{'model': 'chatgpt-v-2', 'messages': [{'role': 'system', 'content': 'you are a helpful assistant.\\n'}, {'role': 'user', 'content': 'bom dia'}], 'stream': False, 'max_tokens': 10, 'user': '116544810872468347480', 'extra_body': {}}'\n", + }, + ) + + print("response", response) + + await asyncio.sleep(5) + + # Get the current date + # Get the current date + current_date = datetime.now().strftime("%Y-%m-%d") + + # Modify the object_name to include the date-based folder + object_name = f"{current_date}%2F{response.id}" + + print("object_name", object_name) + + # Check if object landed on GCS + object_from_gcs = await gcs_logger.download_gcs_object(object_name=object_name) + print("object from gcs=", object_from_gcs) + # convert object_from_gcs from bytes to DICT + parsed_data = json.loads(object_from_gcs) + print("object_from_gcs as dict", parsed_data) + + print("type of object_from_gcs", type(parsed_data)) + + gcs_payload = StandardLoggingPayload(**parsed_data) + + print("gcs_payload", gcs_payload) + + assert gcs_payload["model"] == "gpt-3.5-turbo" + assert gcs_payload["messages"] == [{"role": "user", "content": "This is a test"}] + + assert gcs_payload["response"]["choices"][0]["message"]["content"] == "Hi!" + + assert gcs_payload["response_cost"] > 0.0 + + assert gcs_payload["status"] == "success" + + assert ( + gcs_payload["metadata"]["user_api_key_hash"] + == "88dc28d0f030c55ed4ab77ed8faf098196cb1c05df778539800c9f1243fe6b4b" + ) + assert gcs_payload["metadata"]["user_api_key_user_id"] == "116544810872468347480" + + assert gcs_payload["metadata"]["requester_metadata"] == {"foo": "bar"} + + # Delete Object from GCS + print("deleting object from GCS") + await gcs_logger.delete_gcs_object(object_name=object_name) + + # clean up + if old_bucket_name is not None: + os.environ["GCS_BUCKET_NAME"] = old_bucket_name From 979dfe8ab23976acc67dd4782324844609161f4e Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 8 Nov 2024 19:26:03 -0800 Subject: [PATCH 51/67] (feat) Add Bedrock Stability.ai Stable Diffusion 3 Image Generation models (#6673) * add bedrock image gen async support * added async support for bedrock image gen * move image gen testing * add AmazonStability3Config * add AmazonStability3Config config * update AmazonStabilityConfig * update get_optional_params_image_gen * use 1 helper for _get_request_body * add transform_response_dict_to_openai_response for stability3 * test sd3-large-v1:0 * unit testing for bedrock image gen * fix load_vertex_ai_credentials * fix test_aimage_generation_vertex_ai * add stability.sd3-large-v1:0 to model cost map * add stability.stability.sd3-large-v1:0 to docs --- docs/my-website/docs/providers/bedrock.md | 1 + litellm/__init__.py | 1 + .../image/amazon_stability1_transformation.py | 35 ++++ .../image/amazon_stability3_transformation.py | 94 +++++++++ litellm/llms/bedrock/image/image_handler.py | 85 +++++--- ...bility_stable_diffusion1_transformation.py | 73 ------- litellm/main.py | 3 +- ...odel_prices_and_context_window_backup.json | 7 + litellm/types/llms/bedrock.py | 29 +++ litellm/utils.py | 19 +- model_prices_and_context_window.json | 7 + .../test_bedrock_image_gen_unit_tests.py | 187 ++++++++++++++++++ .../image_gen_tests/test_image_generation.py | 85 +++++++- tests/image_gen_tests/vertex_key.json | 13 ++ 14 files changed, 528 insertions(+), 111 deletions(-) create mode 100644 litellm/llms/bedrock/image/amazon_stability3_transformation.py delete mode 100644 litellm/llms/bedrock/image/stability_stable_diffusion1_transformation.py create mode 100644 tests/image_gen_tests/test_bedrock_image_gen_unit_tests.py create mode 100644 tests/image_gen_tests/vertex_key.json diff --git a/docs/my-website/docs/providers/bedrock.md b/docs/my-website/docs/providers/bedrock.md index afd1fee39..579353d65 100644 --- a/docs/my-website/docs/providers/bedrock.md +++ b/docs/my-website/docs/providers/bedrock.md @@ -1082,5 +1082,6 @@ print(f"response: {response}") | Model Name | Function Call | |----------------------|---------------------------------------------| +| Stable Diffusion 3 - v0 | `embedding(model="bedrock/stability.stability.sd3-large-v1:0", prompt=prompt)` | | Stable Diffusion - v0 | `embedding(model="bedrock/stability.stable-diffusion-xl-v0", prompt=prompt)` | | Stable Diffusion - v0 | `embedding(model="bedrock/stability.stable-diffusion-xl-v1", prompt=prompt)` | \ No newline at end of file diff --git a/litellm/__init__.py b/litellm/__init__.py index 5872c4a2f..b739afb93 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -988,6 +988,7 @@ from .llms.bedrock.common_utils import ( AmazonBedrockGlobalConfig, ) from .llms.bedrock.image.amazon_stability1_transformation import AmazonStabilityConfig +from .llms.bedrock.image.amazon_stability3_transformation import AmazonStability3Config from .llms.bedrock.embed.amazon_titan_g1_transformation import AmazonTitanG1Config from .llms.bedrock.embed.amazon_titan_multimodal_transformation import ( AmazonTitanMultimodalEmbeddingG1Config, diff --git a/litellm/llms/bedrock/image/amazon_stability1_transformation.py b/litellm/llms/bedrock/image/amazon_stability1_transformation.py index 83cccb947..880881e97 100644 --- a/litellm/llms/bedrock/image/amazon_stability1_transformation.py +++ b/litellm/llms/bedrock/image/amazon_stability1_transformation.py @@ -1,6 +1,10 @@ import types from typing import List, Optional +from openai.types.image import Image + +from litellm.types.utils import ImageResponse + class AmazonStabilityConfig: """ @@ -67,3 +71,34 @@ class AmazonStabilityConfig: ) and v is not None } + + @classmethod + def get_supported_openai_params(cls, model: Optional[str] = None) -> List: + return ["size"] + + @classmethod + def map_openai_params( + cls, + non_default_params: dict, + optional_params: dict, + ): + _size = non_default_params.get("size") + if _size is not None: + width, height = _size.split("x") + optional_params["width"] = int(width) + optional_params["height"] = int(height) + + return optional_params + + @classmethod + def transform_response_dict_to_openai_response( + cls, model_response: ImageResponse, response_dict: dict + ) -> ImageResponse: + image_list: List[Image] = [] + for artifact in response_dict["artifacts"]: + _image = Image(b64_json=artifact["base64"]) + image_list.append(_image) + + model_response.data = image_list + + return model_response diff --git a/litellm/llms/bedrock/image/amazon_stability3_transformation.py b/litellm/llms/bedrock/image/amazon_stability3_transformation.py new file mode 100644 index 000000000..784e86b04 --- /dev/null +++ b/litellm/llms/bedrock/image/amazon_stability3_transformation.py @@ -0,0 +1,94 @@ +import types +from typing import List, Optional + +from openai.types.image import Image + +from litellm.types.llms.bedrock import ( + AmazonStability3TextToImageRequest, + AmazonStability3TextToImageResponse, +) +from litellm.types.utils import ImageResponse + + +class AmazonStability3Config: + """ + Reference: https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=stability.stable-diffusion-xl-v0 + + Stability API Ref: https://platform.stability.ai/docs/api-reference#tag/Generate/paths/~1v2beta~1stable-image~1generate~1sd3/post + """ + + @classmethod + def get_config(cls): + return { + k: v + for k, v in cls.__dict__.items() + if not k.startswith("__") + and not isinstance( + v, + ( + types.FunctionType, + types.BuiltinFunctionType, + classmethod, + staticmethod, + ), + ) + and v is not None + } + + @classmethod + def get_supported_openai_params(cls, model: Optional[str] = None) -> List: + """ + No additional OpenAI params are mapped for stability 3 + """ + return [] + + @classmethod + def _is_stability_3_model(cls, model: Optional[str] = None) -> bool: + """ + Returns True if the model is a Stability 3 model + + Stability 3 models follow this pattern: + sd3-large + sd3-large-turbo + sd3-medium + sd3.5-large + sd3.5-large-turbo + """ + if model and ("sd3" in model or "sd3.5" in model): + return True + return False + + @classmethod + def transform_request_body( + cls, prompt: str, optional_params: dict + ) -> AmazonStability3TextToImageRequest: + """ + Transform the request body for the Stability 3 models + """ + data = AmazonStability3TextToImageRequest(prompt=prompt, **optional_params) + return data + + @classmethod + def map_openai_params(cls, non_default_params: dict, optional_params: dict) -> dict: + """ + Map the OpenAI params to the Bedrock params + + No OpenAI params are mapped for Stability 3, so directly return the optional_params + """ + return optional_params + + @classmethod + def transform_response_dict_to_openai_response( + cls, model_response: ImageResponse, response_dict: dict + ) -> ImageResponse: + """ + Transform the response dict to the OpenAI response + """ + + stability_3_response = AmazonStability3TextToImageResponse(**response_dict) + openai_images: List[Image] = [] + for _img in stability_3_response.get("images", []): + openai_images.append(Image(b64_json=_img)) + + model_response.data = openai_images + return model_response diff --git a/litellm/llms/bedrock/image/image_handler.py b/litellm/llms/bedrock/image/image_handler.py index edf852fd3..31af2910f 100644 --- a/litellm/llms/bedrock/image/image_handler.py +++ b/litellm/llms/bedrock/image/image_handler.py @@ -183,28 +183,9 @@ class BedrockImageGeneration(BaseAWSLLM): boto3_credentials_info.aws_region_name, ) - # transform request - ### FORMAT IMAGE GENERATION INPUT ### - provider = model.split(".")[0] - inference_params = copy.deepcopy(optional_params) - inference_params.pop( - "user", None - ) # make sure user is not passed in for bedrock call - data = {} - if provider == "stability": - prompt = prompt.replace(os.linesep, " ") - ## LOAD CONFIG - config = litellm.AmazonStabilityConfig.get_config() - for k, v in config.items(): - if ( - k not in inference_params - ): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in - inference_params[k] = v - data = {"text_prompts": [{"text": prompt, "weight": 1}], **inference_params} - else: - raise BedrockError( - status_code=422, message=f"Unsupported model={model}, passed in" - ) + data = self._get_request_body( + model=model, prompt=prompt, optional_params=optional_params + ) # Make POST Request body = json.dumps(data).encode("utf-8") @@ -239,6 +220,51 @@ class BedrockImageGeneration(BaseAWSLLM): data=data, ) + def _get_request_body( + self, + model: str, + prompt: str, + optional_params: dict, + ) -> dict: + """ + Get the request body for the Bedrock Image Generation API + + Checks the model/provider and transforms the request body accordingly + + Returns: + dict: The request body to use for the Bedrock Image Generation API + """ + provider = model.split(".")[0] + inference_params = copy.deepcopy(optional_params) + inference_params.pop( + "user", None + ) # make sure user is not passed in for bedrock call + data = {} + if provider == "stability": + if litellm.AmazonStability3Config._is_stability_3_model(model): + request_body = litellm.AmazonStability3Config.transform_request_body( + prompt=prompt, optional_params=optional_params + ) + return dict(request_body) + else: + prompt = prompt.replace(os.linesep, " ") + ## LOAD CONFIG + config = litellm.AmazonStabilityConfig.get_config() + for k, v in config.items(): + if ( + k not in inference_params + ): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in + inference_params[k] = v + data = { + "text_prompts": [{"text": prompt, "weight": 1}], + **inference_params, + } + else: + raise BedrockError( + status_code=422, message=f"Unsupported model={model}, passed in" + ) + return data + def _transform_response_dict_to_openai_response( self, model_response: ImageResponse, @@ -265,11 +291,14 @@ class BedrockImageGeneration(BaseAWSLLM): if response_dict is None: raise ValueError("Error in response object format, got None") - image_list: List[Image] = [] - for artifact in response_dict["artifacts"]: - _image = Image(b64_json=artifact["base64"]) - image_list.append(_image) - - model_response.data = image_list + config_class = ( + litellm.AmazonStability3Config + if litellm.AmazonStability3Config._is_stability_3_model(model=model) + else litellm.AmazonStabilityConfig + ) + config_class.transform_response_dict_to_openai_response( + model_response=model_response, + response_dict=response_dict, + ) return model_response diff --git a/litellm/llms/bedrock/image/stability_stable_diffusion1_transformation.py b/litellm/llms/bedrock/image/stability_stable_diffusion1_transformation.py deleted file mode 100644 index a83b26226..000000000 --- a/litellm/llms/bedrock/image/stability_stable_diffusion1_transformation.py +++ /dev/null @@ -1,73 +0,0 @@ -import copy -import os -import types -from typing import Any, Dict, List, Optional, TypedDict, Union - -import litellm - - -class AmazonStability1Config: - """ - Reference: https://us-west-2.console.aws.amazon.com/bedrock/home?region=us-west-2#/providers?model=stability.stable-diffusion-xl-v0 - - Supported Params for the Amazon / Stable Diffusion models: - - - `cfg_scale` (integer): Default `7`. Between [ 0 .. 35 ]. How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt) - - - `seed` (float): Default: `0`. Between [ 0 .. 4294967295 ]. Random noise seed (omit this option or use 0 for a random seed) - - - `steps` (array of strings): Default `30`. Between [ 10 .. 50 ]. Number of diffusion steps to run. - - - `width` (integer): Default: `512`. multiple of 64 >= 128. Width of the image to generate, in pixels, in an increment divible by 64. - Engine-specific dimension validation: - - - SDXL Beta: must be between 128x128 and 512x896 (or 896x512); only one dimension can be greater than 512. - - SDXL v0.9: must be one of 1024x1024, 1152x896, 1216x832, 1344x768, 1536x640, 640x1536, 768x1344, 832x1216, or 896x1152 - - SDXL v1.0: same as SDXL v0.9 - - SD v1.6: must be between 320x320 and 1536x1536 - - - `height` (integer): Default: `512`. multiple of 64 >= 128. Height of the image to generate, in pixels, in an increment divible by 64. - Engine-specific dimension validation: - - - SDXL Beta: must be between 128x128 and 512x896 (or 896x512); only one dimension can be greater than 512. - - SDXL v0.9: must be one of 1024x1024, 1152x896, 1216x832, 1344x768, 1536x640, 640x1536, 768x1344, 832x1216, or 896x1152 - - SDXL v1.0: same as SDXL v0.9 - - SD v1.6: must be between 320x320 and 1536x1536 - """ - - cfg_scale: Optional[int] = None - seed: Optional[float] = None - steps: Optional[List[str]] = None - width: Optional[int] = None - height: Optional[int] = None - - def __init__( - self, - cfg_scale: Optional[int] = None, - seed: Optional[float] = None, - steps: Optional[List[str]] = None, - width: Optional[int] = None, - height: Optional[int] = None, - ) -> None: - locals_ = locals() - for key, value in locals_.items(): - if key != "self" and value is not None: - setattr(self.__class__, key, value) - - @classmethod - def get_config(cls): - return { - k: v - for k, v in cls.__dict__.items() - if not k.startswith("__") - and not isinstance( - v, - ( - types.FunctionType, - types.BuiltinFunctionType, - classmethod, - staticmethod, - ), - ) - and v is not None - } diff --git a/litellm/main.py b/litellm/main.py index 5be596e94..afb46c698 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -4448,6 +4448,7 @@ def image_generation( # noqa: PLR0915 k: v for k, v in kwargs.items() if k not in default_params } # model-specific params - pass them straight to the model/provider optional_params = get_optional_params_image_gen( + model=model, n=n, quality=quality, response_format=response_format, @@ -4540,7 +4541,7 @@ def image_generation( # noqa: PLR0915 elif custom_llm_provider == "bedrock": if model is None: raise Exception("Model needs to be set for bedrock") - model_response = bedrock_image_generation.image_generation( + model_response = bedrock_image_generation.image_generation( # type: ignore model=model, prompt=prompt, timeout=timeout, diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index a9c65b2c9..6e57fd4ed 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -5611,6 +5611,13 @@ "litellm_provider": "bedrock", "mode": "image_generation" }, + "stability.stability.sd3-large-v1:0": { + "max_tokens": 77, + "max_input_tokens": 77, + "output_cost_per_image": 0.08, + "litellm_provider": "bedrock", + "mode": "image_generation" + }, "sagemaker/meta-textgeneration-llama-2-7b": { "max_tokens": 4096, "max_input_tokens": 4096, diff --git a/litellm/types/llms/bedrock.py b/litellm/types/llms/bedrock.py index 737aac3c3..c80b16f6e 100644 --- a/litellm/types/llms/bedrock.py +++ b/litellm/types/llms/bedrock.py @@ -275,3 +275,32 @@ AmazonEmbeddingRequest = Union[ AmazonTitanV2EmbeddingRequest, AmazonTitanG1EmbeddingRequest, ] + + +class AmazonStability3TextToImageRequest(TypedDict, total=False): + """ + Request for Amazon Stability 3 Text to Image API + + Ref here: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-diffusion-3-text-image.html + """ + + prompt: str + aspect_ratio: Literal[ + "16:9", "1:1", "21:9", "2:3", "3:2", "4:5", "5:4", "9:16", "9:21" + ] + mode: Literal["image-to-image", "text-to-image"] + output_format: Literal["JPEG", "PNG"] + seed: int + negative_prompt: str + + +class AmazonStability3TextToImageResponse(TypedDict, total=False): + """ + Response for Amazon Stability 3 Text to Image API + + Ref: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-diffusion-3-text-image.html + """ + + images: List[str] + seeds: List[str] + finish_reasons: List[str] diff --git a/litellm/utils.py b/litellm/utils.py index e4e84398f..d07d86f7d 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -2174,6 +2174,7 @@ def get_optional_params_transcription( def get_optional_params_image_gen( + model: Optional[str] = None, n: Optional[int] = None, quality: Optional[str] = None, response_format: Optional[str] = None, @@ -2186,6 +2187,7 @@ def get_optional_params_image_gen( ): # retrieve all parameters passed to the function passed_params = locals() + model = passed_params.pop("model", None) custom_llm_provider = passed_params.pop("custom_llm_provider") additional_drop_params = passed_params.pop("additional_drop_params", None) special_params = passed_params.pop("kwargs") @@ -2232,7 +2234,7 @@ def get_optional_params_image_gen( elif k not in supported_params: raise UnsupportedParamsError( status_code=500, - message=f"Setting user/encoding format is not supported by {custom_llm_provider}. To drop it from the call, set `litellm.drop_params = True`.", + message=f"Setting `{k}` is not supported by {custom_llm_provider}. To drop it from the call, set `litellm.drop_params = True`.", ) return non_default_params @@ -2243,12 +2245,17 @@ def get_optional_params_image_gen( ): optional_params = non_default_params elif custom_llm_provider == "bedrock": - supported_params = ["size"] + # use stability3 config class if model is a stability3 model + config_class = ( + litellm.AmazonStability3Config + if litellm.AmazonStability3Config._is_stability_3_model(model=model) + else litellm.AmazonStabilityConfig + ) + supported_params = config_class.get_supported_openai_params(model=model) _check_valid_arg(supported_params=supported_params) - if size is not None: - width, height = size.split("x") - optional_params["width"] = int(width) - optional_params["height"] = int(height) + optional_params = config_class.map_openai_params( + non_default_params=non_default_params, optional_params={} + ) elif custom_llm_provider == "vertex_ai": supported_params = ["n"] """ diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index a9c65b2c9..6e57fd4ed 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -5611,6 +5611,13 @@ "litellm_provider": "bedrock", "mode": "image_generation" }, + "stability.stability.sd3-large-v1:0": { + "max_tokens": 77, + "max_input_tokens": 77, + "output_cost_per_image": 0.08, + "litellm_provider": "bedrock", + "mode": "image_generation" + }, "sagemaker/meta-textgeneration-llama-2-7b": { "max_tokens": 4096, "max_input_tokens": 4096, diff --git a/tests/image_gen_tests/test_bedrock_image_gen_unit_tests.py b/tests/image_gen_tests/test_bedrock_image_gen_unit_tests.py new file mode 100644 index 000000000..e04eb2a1a --- /dev/null +++ b/tests/image_gen_tests/test_bedrock_image_gen_unit_tests.py @@ -0,0 +1,187 @@ +import logging +import os +import sys +import traceback + +from dotenv import load_dotenv +from openai.types.image import Image + +logging.basicConfig(level=logging.DEBUG) +load_dotenv() +import asyncio +import os + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import pytest + +import litellm +from litellm.llms.bedrock.image.amazon_stability3_transformation import ( + AmazonStability3Config, +) +from litellm.llms.bedrock.image.amazon_stability1_transformation import ( + AmazonStabilityConfig, +) +from litellm.types.llms.bedrock import ( + AmazonStability3TextToImageRequest, + AmazonStability3TextToImageResponse, +) +from litellm.types.utils import ImageResponse +from unittest.mock import MagicMock, patch +from litellm.llms.bedrock.image.image_handler import ( + BedrockImageGeneration, + BedrockImagePreparedRequest, +) + + +@pytest.mark.parametrize( + "model,expected", + [ + ("sd3-large", True), + ("sd3-large-turbo", True), + ("sd3-medium", True), + ("sd3.5-large", True), + ("sd3.5-large-turbo", True), + ("gpt-4", False), + (None, False), + ("other-model", False), + ], +) +def test_is_stability_3_model(model, expected): + result = AmazonStability3Config._is_stability_3_model(model) + assert result == expected + + +def test_transform_request_body(): + prompt = "A beautiful sunset" + optional_params = {"size": "1024x1024"} + + result = AmazonStability3Config.transform_request_body(prompt, optional_params) + + assert result["prompt"] == prompt + assert result["size"] == "1024x1024" + + +def test_map_openai_params(): + non_default_params = {"n": 2, "size": "1024x1024"} + optional_params = {"cfg_scale": 7} + + result = AmazonStability3Config.map_openai_params( + non_default_params, optional_params + ) + + assert result == optional_params + assert "n" not in result # OpenAI params should not be included + + +def test_transform_response_dict_to_openai_response(): + # Create a mock response + response_dict = {"images": ["base64_encoded_image_1", "base64_encoded_image_2"]} + model_response = ImageResponse() + + result = AmazonStability3Config.transform_response_dict_to_openai_response( + model_response, response_dict + ) + + assert isinstance(result, ImageResponse) + assert len(result.data) == 2 + assert all(hasattr(img, "b64_json") for img in result.data) + assert [img.b64_json for img in result.data] == response_dict["images"] + + +def test_amazon_stability_get_supported_openai_params(): + result = AmazonStabilityConfig.get_supported_openai_params() + assert result == ["size"] + + +def test_amazon_stability_map_openai_params(): + # Test with size parameter + non_default_params = {"size": "512x512"} + optional_params = {"cfg_scale": 7} + + result = AmazonStabilityConfig.map_openai_params( + non_default_params, optional_params + ) + + assert result["width"] == 512 + assert result["height"] == 512 + assert result["cfg_scale"] == 7 + + +def test_amazon_stability_transform_response(): + # Create a mock response + response_dict = { + "artifacts": [ + {"base64": "base64_encoded_image_1"}, + {"base64": "base64_encoded_image_2"}, + ] + } + model_response = ImageResponse() + + result = AmazonStabilityConfig.transform_response_dict_to_openai_response( + model_response, response_dict + ) + + assert isinstance(result, ImageResponse) + assert len(result.data) == 2 + assert all(hasattr(img, "b64_json") for img in result.data) + assert [img.b64_json for img in result.data] == [ + "base64_encoded_image_1", + "base64_encoded_image_2", + ] + + +def test_get_request_body_stability3(): + handler = BedrockImageGeneration() + prompt = "A beautiful sunset" + optional_params = {} + model = "stability.sd3-large" + + result = handler._get_request_body( + model=model, prompt=prompt, optional_params=optional_params + ) + + assert result["prompt"] == prompt + + +def test_get_request_body_stability(): + handler = BedrockImageGeneration() + prompt = "A beautiful sunset" + optional_params = {"cfg_scale": 7} + model = "stability.stable-diffusion-xl" + + result = handler._get_request_body( + model=model, prompt=prompt, optional_params=optional_params + ) + + assert result["text_prompts"][0]["text"] == prompt + assert result["text_prompts"][0]["weight"] == 1 + assert result["cfg_scale"] == 7 + + +def test_transform_response_dict_to_openai_response_stability3(): + handler = BedrockImageGeneration() + model_response = ImageResponse() + model = "stability.sd3-large" + logging_obj = MagicMock() + prompt = "A beautiful sunset" + + # Mock response for Stability AI SD3 + mock_response = MagicMock() + mock_response.text = '{"images": ["base64_image_1", "base64_image_2"]}' + mock_response.json.return_value = {"images": ["base64_image_1", "base64_image_2"]} + + result = handler._transform_response_dict_to_openai_response( + model_response=model_response, + model=model, + logging_obj=logging_obj, + prompt=prompt, + response=mock_response, + data={}, + ) + + assert isinstance(result, ImageResponse) + assert len(result.data) == 2 + assert all(hasattr(img, "b64_json") for img in result.data) + assert [img.b64_json for img in result.data] == ["base64_image_1", "base64_image_2"] diff --git a/tests/image_gen_tests/test_image_generation.py b/tests/image_gen_tests/test_image_generation.py index 85f619f2f..cf46f90bb 100644 --- a/tests/image_gen_tests/test_image_generation.py +++ b/tests/image_gen_tests/test_image_generation.py @@ -20,6 +20,81 @@ sys.path.insert( import pytest import litellm +import json +import tempfile + + +def get_vertex_ai_creds_json() -> dict: + # Define the path to the vertex_key.json file + print("loading vertex ai credentials") + filepath = os.path.dirname(os.path.abspath(__file__)) + vertex_key_path = filepath + "/vertex_key.json" + # Read the existing content of the file or create an empty dictionary + try: + with open(vertex_key_path, "r") as file: + # Read the file content + print("Read vertexai file path") + content = file.read() + + # If the file is empty or not valid JSON, create an empty dictionary + if not content or not content.strip(): + service_account_key_data = {} + else: + # Attempt to load the existing JSON content + file.seek(0) + service_account_key_data = json.load(file) + except FileNotFoundError: + # If the file doesn't exist, create an empty dictionary + service_account_key_data = {} + + # Update the service_account_key_data with environment variables + private_key_id = os.environ.get("VERTEX_AI_PRIVATE_KEY_ID", "") + private_key = os.environ.get("VERTEX_AI_PRIVATE_KEY", "") + private_key = private_key.replace("\\n", "\n") + service_account_key_data["private_key_id"] = private_key_id + service_account_key_data["private_key"] = private_key + + return service_account_key_data + + +def load_vertex_ai_credentials(): + # Define the path to the vertex_key.json file + print("loading vertex ai credentials") + filepath = os.path.dirname(os.path.abspath(__file__)) + vertex_key_path = filepath + "/vertex_key.json" + + # Read the existing content of the file or create an empty dictionary + try: + with open(vertex_key_path, "r") as file: + # Read the file content + print("Read vertexai file path") + content = file.read() + + # If the file is empty or not valid JSON, create an empty dictionary + if not content or not content.strip(): + service_account_key_data = {} + else: + # Attempt to load the existing JSON content + file.seek(0) + service_account_key_data = json.load(file) + except FileNotFoundError: + # If the file doesn't exist, create an empty dictionary + service_account_key_data = {} + + # Update the service_account_key_data with environment variables + private_key_id = os.environ.get("VERTEX_AI_PRIVATE_KEY_ID", "") + private_key = os.environ.get("VERTEX_AI_PRIVATE_KEY", "") + private_key = private_key.replace("\\n", "\n") + service_account_key_data["private_key_id"] = private_key_id + service_account_key_data["private_key"] = private_key + + # Create a temporary file + with tempfile.NamedTemporaryFile(mode="w+", delete=False) as temp_file: + # Write the updated content to the temporary files + json.dump(service_account_key_data, temp_file, indent=2) + + # Export the temporary file as GOOGLE_APPLICATION_CREDENTIALS + os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = os.path.abspath(temp_file.name) def test_image_generation_openai(): @@ -163,12 +238,17 @@ async def test_async_image_generation_azure(): pytest.fail(f"An exception occurred - {str(e)}") -def test_image_generation_bedrock(): +@pytest.mark.asyncio +@pytest.mark.parametrize( + "model", + ["bedrock/stability.sd3-large-v1:0", "bedrock/stability.stable-diffusion-xl-v1"], +) +def test_image_generation_bedrock(model): try: litellm.set_verbose = True response = litellm.image_generation( prompt="A cute baby sea otter", - model="bedrock/stability.stable-diffusion-xl-v1", + model=model, aws_region_name="us-west-2", ) @@ -213,7 +293,6 @@ from openai.types.image import Image @pytest.mark.parametrize("sync_mode", [True, False]) @pytest.mark.asyncio async def test_aimage_generation_vertex_ai(sync_mode): - from test_amazing_vertex_completion import load_vertex_ai_credentials litellm.set_verbose = True diff --git a/tests/image_gen_tests/vertex_key.json b/tests/image_gen_tests/vertex_key.json new file mode 100644 index 000000000..e2fd8512b --- /dev/null +++ b/tests/image_gen_tests/vertex_key.json @@ -0,0 +1,13 @@ +{ + "type": "service_account", + "project_id": "adroit-crow-413218", + "private_key_id": "", + "private_key": "", + "client_email": "test-adroit-crow@adroit-crow-413218.iam.gserviceaccount.com", + "client_id": "104886546564708740969", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test-adroit-crow%40adroit-crow-413218.iam.gserviceaccount.com", + "universe_domain": "googleapis.com" +} From 70aa85af1fc01d30934ee71a703b8d3982420d05 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 8 Nov 2024 19:51:35 -0800 Subject: [PATCH 52/67] fix model cost map stability.sd3-large-v1:0 --- litellm/model_prices_and_context_window_backup.json | 2 +- model_prices_and_context_window.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 6e57fd4ed..e8aeac2cb 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -5611,7 +5611,7 @@ "litellm_provider": "bedrock", "mode": "image_generation" }, - "stability.stability.sd3-large-v1:0": { + "stability.sd3-large-v1:0": { "max_tokens": 77, "max_input_tokens": 77, "output_cost_per_image": 0.08, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 6e57fd4ed..e8aeac2cb 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -5611,7 +5611,7 @@ "litellm_provider": "bedrock", "mode": "image_generation" }, - "stability.stability.sd3-large-v1:0": { + "stability.sd3-large-v1:0": { "max_tokens": 77, "max_input_tokens": 77, "output_cost_per_image": 0.08, From eb92ed4156a259fe4109df9da4e7e7c151f3a773 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sun, 10 Nov 2024 22:05:34 -0800 Subject: [PATCH 53/67] (Feat) 273% improvement GCS Bucket Logger - use Batched Logging (#6679) * use CustomBatchLogger for GCS * add GCS bucket logging type * use batch logging for GCs bucket * add gcs_bucket * allow setting flush_interval on CustomBatchLogger * set GCS_FLUSH_INTERVAL to 1s * fix test_key_logging * fix test_key_logging * add docs on new env vars --- .circleci/config.yml | 1 + docs/my-website/docs/proxy/configs.md | 2 + litellm/integrations/custom_batch_logger.py | 3 +- litellm/integrations/gcs_bucket/gcs_bucket.py | 146 +++++++++++------- .../gcs_bucket/gcs_bucket_base.py | 7 +- .../key_management_endpoints.py | 4 +- litellm/proxy/proxy_config.yaml | 8 +- litellm/types/integrations/gcs_bucket.py | 28 ++++ tests/local_testing/test_gcs_bucket.py | 1 + 9 files changed, 128 insertions(+), 72 deletions(-) create mode 100644 litellm/types/integrations/gcs_bucket.py diff --git a/.circleci/config.yml b/.circleci/config.yml index d2d83cd0e..88e83fa7f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1006,6 +1006,7 @@ jobs: -e AWS_REGION_NAME=$AWS_REGION_NAME \ -e APORIA_API_KEY_1=$APORIA_API_KEY_1 \ -e COHERE_API_KEY=$COHERE_API_KEY \ + -e GCS_FLUSH_INTERVAL="1" \ --name my-app \ -v $(pwd)/litellm/proxy/example_config_yaml/otel_test_config.yaml:/app/config.yaml \ -v $(pwd)/litellm/proxy/example_config_yaml/custom_guardrail.py:/app/custom_guardrail.py \ diff --git a/docs/my-website/docs/proxy/configs.md b/docs/my-website/docs/proxy/configs.md index 1adc4943d..b4d70a4e7 100644 --- a/docs/my-website/docs/proxy/configs.md +++ b/docs/my-website/docs/proxy/configs.md @@ -934,6 +934,8 @@ router_settings: | EMAIL_SUPPORT_CONTACT | Support contact email address | GCS_BUCKET_NAME | Name of the Google Cloud Storage bucket | GCS_PATH_SERVICE_ACCOUNT | Path to the Google Cloud service account JSON file +| GCS_FLUSH_INTERVAL | Flush interval for GCS logging (in seconds). Specify how often you want a log to be sent to GCS. +| GCS_BATCH_SIZE | Batch size for GCS logging. Specify after how many logs you want to flush to GCS. If `BATCH_SIZE` is set to 10, logs are flushed every 10 logs. | GENERIC_AUTHORIZATION_ENDPOINT | Authorization endpoint for generic OAuth providers | GENERIC_CLIENT_ID | Client ID for generic OAuth providers | GENERIC_CLIENT_SECRET | Client secret for generic OAuth providers diff --git a/litellm/integrations/custom_batch_logger.py b/litellm/integrations/custom_batch_logger.py index aa7f0bba2..7ef63d25c 100644 --- a/litellm/integrations/custom_batch_logger.py +++ b/litellm/integrations/custom_batch_logger.py @@ -21,6 +21,7 @@ class CustomBatchLogger(CustomLogger): self, flush_lock: Optional[asyncio.Lock] = None, batch_size: Optional[int] = DEFAULT_BATCH_SIZE, + flush_interval: Optional[int] = DEFAULT_FLUSH_INTERVAL_SECONDS, **kwargs, ) -> None: """ @@ -28,7 +29,7 @@ class CustomBatchLogger(CustomLogger): flush_lock (Optional[asyncio.Lock], optional): Lock to use when flushing the queue. Defaults to None. Only used for custom loggers that do batching """ self.log_queue: List = [] - self.flush_interval = DEFAULT_FLUSH_INTERVAL_SECONDS # 10 seconds + self.flush_interval = flush_interval or DEFAULT_FLUSH_INTERVAL_SECONDS self.batch_size: int = batch_size or DEFAULT_BATCH_SIZE self.last_flush_time = time.time() self.flush_lock = flush_lock diff --git a/litellm/integrations/gcs_bucket/gcs_bucket.py b/litellm/integrations/gcs_bucket/gcs_bucket.py index f7f36c124..0b637f9b6 100644 --- a/litellm/integrations/gcs_bucket/gcs_bucket.py +++ b/litellm/integrations/gcs_bucket/gcs_bucket.py @@ -1,3 +1,4 @@ +import asyncio import json import os import uuid @@ -10,10 +11,12 @@ from pydantic import BaseModel, Field import litellm from litellm._logging import verbose_logger +from litellm.integrations.custom_batch_logger import CustomBatchLogger from litellm.integrations.custom_logger import CustomLogger from litellm.integrations.gcs_bucket.gcs_bucket_base import GCSBucketBase from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler from litellm.proxy._types import CommonProxyErrors, SpendLogsMetadata, SpendLogsPayload +from litellm.types.integrations.gcs_bucket import * from litellm.types.utils import ( StandardCallbackDynamicParams, StandardLoggingMetadata, @@ -27,12 +30,8 @@ else: IAM_AUTH_KEY = "IAM_AUTH" - - -class GCSLoggingConfig(TypedDict): - bucket_name: str - vertex_instance: VertexBase - path_service_account: Optional[str] +GCS_DEFAULT_BATCH_SIZE = 2048 +GCS_DEFAULT_FLUSH_INTERVAL_SECONDS = 20 class GCSBucketLogger(GCSBucketBase): @@ -41,6 +40,21 @@ class GCSBucketLogger(GCSBucketBase): super().__init__(bucket_name=bucket_name) self.vertex_instances: Dict[str, VertexBase] = {} + + # Init Batch logging settings + self.log_queue: List[GCSLogQueueItem] = [] + self.batch_size = int(os.getenv("GCS_BATCH_SIZE", GCS_DEFAULT_BATCH_SIZE)) + self.flush_interval = int( + os.getenv("GCS_FLUSH_INTERVAL", GCS_DEFAULT_FLUSH_INTERVAL_SECONDS) + ) + asyncio.create_task(self.periodic_flush()) + self.flush_lock = asyncio.Lock() + super().__init__( + flush_lock=self.flush_lock, + batch_size=self.batch_size, + flush_interval=self.flush_interval, + ) + if premium_user is not True: raise ValueError( f"GCS Bucket logging is a premium feature. Please upgrade to use it. {CommonProxyErrors.not_premium_user.value}" @@ -60,44 +74,23 @@ class GCSBucketLogger(GCSBucketBase): kwargs, response_obj, ) - gcs_logging_config: GCSLoggingConfig = await self.get_gcs_logging_config( - kwargs - ) - headers = await self.construct_request_headers( - vertex_instance=gcs_logging_config["vertex_instance"], - service_account_json=gcs_logging_config["path_service_account"], - ) - bucket_name = gcs_logging_config["bucket_name"] - logging_payload: Optional[StandardLoggingPayload] = kwargs.get( "standard_logging_object", None ) - if logging_payload is None: raise ValueError("standard_logging_object not found in kwargs") - # Get the current date - current_date = datetime.now().strftime("%Y-%m-%d") - - # Modify the object_name to include the date-based folder - object_name = f"{current_date}/{response_obj['id']}" - - await self._log_json_data_on_gcs( - headers=headers, - bucket_name=bucket_name, - object_name=object_name, - logging_payload=logging_payload, + # Add to logging queue - this will be flushed periodically + self.log_queue.append( + GCSLogQueueItem( + payload=logging_payload, kwargs=kwargs, response_obj=response_obj + ) ) + except Exception as e: verbose_logger.exception(f"GCS Bucket logging error: {str(e)}") async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time): - from litellm.proxy.proxy_server import premium_user - - if premium_user is not True: - raise ValueError( - f"GCS Bucket logging is a premium feature. Please upgrade to use it. {CommonProxyErrors.not_premium_user.value}" - ) try: verbose_logger.debug( "GCS Logger: async_log_failure_event logging kwargs: %s, response_obj: %s", @@ -105,44 +98,77 @@ class GCSBucketLogger(GCSBucketBase): response_obj, ) - gcs_logging_config: GCSLoggingConfig = await self.get_gcs_logging_config( - kwargs - ) - headers = await self.construct_request_headers( - vertex_instance=gcs_logging_config["vertex_instance"], - service_account_json=gcs_logging_config["path_service_account"], - ) - bucket_name = gcs_logging_config["bucket_name"] - logging_payload: Optional[StandardLoggingPayload] = kwargs.get( "standard_logging_object", None ) - if logging_payload is None: raise ValueError("standard_logging_object not found in kwargs") - _litellm_params = kwargs.get("litellm_params") or {} - metadata = _litellm_params.get("metadata") or {} - - # Get the current date - current_date = datetime.now().strftime("%Y-%m-%d") - - # Modify the object_name to include the date-based folder - object_name = f"{current_date}/failure-{uuid.uuid4().hex}" - - if "gcs_log_id" in metadata: - object_name = metadata["gcs_log_id"] - - await self._log_json_data_on_gcs( - headers=headers, - bucket_name=bucket_name, - object_name=object_name, - logging_payload=logging_payload, + # Add to logging queue - this will be flushed periodically + self.log_queue.append( + GCSLogQueueItem( + payload=logging_payload, kwargs=kwargs, response_obj=response_obj + ) ) except Exception as e: verbose_logger.exception(f"GCS Bucket logging error: {str(e)}") + async def async_send_batch(self): + """Process queued logs in batch - sends logs to GCS Bucket""" + if not self.log_queue: + return + + try: + for log_item in self.log_queue: + logging_payload = log_item["payload"] + kwargs = log_item["kwargs"] + response_obj = log_item.get("response_obj", None) or {} + + gcs_logging_config: GCSLoggingConfig = ( + await self.get_gcs_logging_config(kwargs) + ) + headers = await self.construct_request_headers( + vertex_instance=gcs_logging_config["vertex_instance"], + service_account_json=gcs_logging_config["path_service_account"], + ) + bucket_name = gcs_logging_config["bucket_name"] + object_name = self._get_object_name( + kwargs, logging_payload, response_obj + ) + await self._log_json_data_on_gcs( + headers=headers, + bucket_name=bucket_name, + object_name=object_name, + logging_payload=logging_payload, + ) + + # Clear the queue after processing + self.log_queue.clear() + + except Exception as e: + verbose_logger.exception(f"GCS Bucket batch logging error: {str(e)}") + + def _get_object_name( + self, kwargs: Dict, logging_payload: StandardLoggingPayload, response_obj: Any + ) -> str: + """ + Get the object name to use for the current payload + """ + current_date = datetime.now().strftime("%Y-%m-%d") + if logging_payload.get("error_str", None) is not None: + object_name = f"{current_date}/failure-{uuid.uuid4().hex}" + else: + object_name = f"{current_date}/{response_obj.get('id', '')}" + + # used for testing + _litellm_params = kwargs.get("litellm_params", None) or {} + _metadata = _litellm_params.get("metadata", None) or {} + if "gcs_log_id" in _metadata: + object_name = _metadata["gcs_log_id"] + + return object_name + def _handle_folders_in_bucket_name( self, bucket_name: str, diff --git a/litellm/integrations/gcs_bucket/gcs_bucket_base.py b/litellm/integrations/gcs_bucket/gcs_bucket_base.py index 56df3aa80..9615b9b21 100644 --- a/litellm/integrations/gcs_bucket/gcs_bucket_base.py +++ b/litellm/integrations/gcs_bucket/gcs_bucket_base.py @@ -9,7 +9,7 @@ from pydantic import BaseModel, Field import litellm from litellm._logging import verbose_logger -from litellm.integrations.custom_logger import CustomLogger +from litellm.integrations.custom_batch_logger import CustomBatchLogger from litellm.llms.custom_httpx.http_handler import ( get_async_httpx_client, httpxSpecialProvider, @@ -21,8 +21,8 @@ else: VertexBase = Any -class GCSBucketBase(CustomLogger): - def __init__(self, bucket_name: Optional[str] = None) -> None: +class GCSBucketBase(CustomBatchLogger): + def __init__(self, bucket_name: Optional[str] = None, **kwargs) -> None: self.async_httpx_client = get_async_httpx_client( llm_provider=httpxSpecialProvider.LoggingCallback ) @@ -30,6 +30,7 @@ class GCSBucketBase(CustomLogger): _bucket_name = bucket_name or os.getenv("GCS_BUCKET_NAME") self.path_service_account_json: Optional[str] = _path_service_account self.BUCKET_NAME: Optional[str] = _bucket_name + super().__init__(**kwargs) async def construct_request_headers( self, diff --git a/litellm/proxy/management_endpoints/key_management_endpoints.py b/litellm/proxy/management_endpoints/key_management_endpoints.py index 01baa232f..2c240a17f 100644 --- a/litellm/proxy/management_endpoints/key_management_endpoints.py +++ b/litellm/proxy/management_endpoints/key_management_endpoints.py @@ -1599,7 +1599,9 @@ async def test_key_logging( details=f"Logging test failed: {str(e)}", ) - await asyncio.sleep(1) # wait for callbacks to run + await asyncio.sleep( + 2 + ) # wait for callbacks to run, callbacks use batching so wait for the flush event # Check if any logger exceptions were triggered log_contents = log_capture_string.getvalue() diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index 694c1613d..b4a18baa4 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -7,10 +7,4 @@ model_list: litellm_settings: - callbacks: ["prometheus"] - service_callback: ["prometheus_system"] - - -general_settings: - allow_requests_on_db_unavailable: true - + callbacks: ["gcs_bucket"] diff --git a/litellm/types/integrations/gcs_bucket.py b/litellm/types/integrations/gcs_bucket.py new file mode 100644 index 000000000..18636ae1f --- /dev/null +++ b/litellm/types/integrations/gcs_bucket.py @@ -0,0 +1,28 @@ +from typing import TYPE_CHECKING, Any, Dict, Optional, TypedDict + +from litellm.types.utils import StandardLoggingPayload + +if TYPE_CHECKING: + from litellm.llms.vertex_ai_and_google_ai_studio.vertex_llm_base import VertexBase +else: + VertexBase = Any + + +class GCSLoggingConfig(TypedDict): + """ + Internal LiteLLM Config for GCS Bucket logging + """ + + bucket_name: str + vertex_instance: VertexBase + path_service_account: Optional[str] + + +class GCSLogQueueItem(TypedDict): + """ + Internal Type, used for queueing logs to be sent to GCS Bucket + """ + + payload: StandardLoggingPayload + kwargs: Dict[str, Any] + response_obj: Optional[Any] diff --git a/tests/local_testing/test_gcs_bucket.py b/tests/local_testing/test_gcs_bucket.py index a01e839fa..4d431b662 100644 --- a/tests/local_testing/test_gcs_bucket.py +++ b/tests/local_testing/test_gcs_bucket.py @@ -28,6 +28,7 @@ verbose_logger.setLevel(logging.DEBUG) def load_vertex_ai_credentials(): # Define the path to the vertex_key.json file print("loading vertex ai credentials") + os.environ["GCS_FLUSH_INTERVAL"] = "1" filepath = os.path.dirname(os.path.abspath(__file__)) vertex_key_path = filepath + "/adroit-crow-413218-bc47f303efc9.json" From b8ae08b8ebaf17d7b029c4ee0874fa951cee2550 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Mon, 11 Nov 2024 22:57:33 +0530 Subject: [PATCH 54/67] =?UTF-8?q?bump:=20version=201.52.3=20=E2=86=92=201.?= =?UTF-8?q?52.4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 33d308d9f..099f33bd8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.52.3" +version = "1.52.4" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT" @@ -91,7 +91,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.52.3" +version = "1.52.4" version_files = [ "pyproject.toml:^version" ] From f59cb46e71401698ae5e40e8277223a76c4905e3 Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Tue, 12 Nov 2024 00:16:35 +0530 Subject: [PATCH 55/67] Litellm dev 11 11 2024 (#6693) * fix(__init__.py): add 'watsonx_text' as mapped llm api route Fixes https://github.com/BerriAI/litellm/issues/6663 * fix(opentelemetry.py): fix passing parallel tool calls to otel Fixes https://github.com/BerriAI/litellm/issues/6677 * refactor(test_opentelemetry_unit_tests.py): create a base set of unit tests for all logging integrations - test for parallel tool call handling reduces bugs in repo * fix(__init__.py): update provider-model mapping to include all known provider-model mappings Fixes https://github.com/BerriAI/litellm/issues/6669 * feat(anthropic): support passing document in llm api call * docs(anthropic.md): add pdf anthropic call to docs + expose new 'supports_pdf_input' function * fix(factory.py): fix linting error --- docs/my-website/docs/providers/anthropic.md | 93 + litellm/__init__.py | 76 +- litellm/integrations/opentelemetry.py | 45 +- litellm/llms/anthropic/chat/handler.py | 3 +- litellm/llms/anthropic/chat/transformation.py | 18 + litellm/llms/prompt_templates/factory.py | 41 +- ...odel_prices_and_context_window_backup.json | 3 +- litellm/proxy/_new_secret_config.yaml | 60 +- litellm/proxy/auth/user_api_key_auth.py | 1 - litellm/types/llms/anthropic.py | 11 +- litellm/types/utils.py | 5 - litellm/utils.py | 2134 +---------------- model_prices_and_context_window.json | 3 +- tests/llm_translation/base_llm_unit_tests.py | 27 + .../test_anthropic_completion.py | 38 + tests/local_testing/test_get_llm_provider.py | 8 + tests/local_testing/test_get_model_list.py | 11 - .../test_opentelemetry_unit_tests.py | 41 - tests/local_testing/test_utils.py | 21 + tests/logging_callback_tests/base_test.py | 100 + .../test_opentelemetry_unit_tests.py | 58 + 21 files changed, 533 insertions(+), 2264 deletions(-) delete mode 100644 tests/local_testing/test_get_model_list.py delete mode 100644 tests/local_testing/test_opentelemetry_unit_tests.py create mode 100644 tests/logging_callback_tests/base_test.py create mode 100644 tests/logging_callback_tests/test_opentelemetry_unit_tests.py diff --git a/docs/my-website/docs/providers/anthropic.md b/docs/my-website/docs/providers/anthropic.md index 0c7b2a442..290e094d0 100644 --- a/docs/my-website/docs/providers/anthropic.md +++ b/docs/my-website/docs/providers/anthropic.md @@ -864,3 +864,96 @@ Human: How do I boil water? Assistant: ``` + +## Usage - PDF + +Pass base64 encoded PDF files to Anthropic models using the `image_url` field. + + + + +### **using base64** +```python +from litellm import completion, supports_pdf_input +import base64 +import requests + +# URL of the file +url = "https://storage.googleapis.com/cloud-samples-data/generative-ai/pdf/2403.05530.pdf" + +# Download the file +response = requests.get(url) +file_data = response.content + +encoded_file = base64.b64encode(file_data).decode("utf-8") + +## check if model supports pdf input - (2024/11/11) only claude-3-5-haiku-20241022 supports it +supports_pdf_input("anthropic/claude-3-5-haiku-20241022") # True + +response = completion( + model="anthropic/claude-3-5-haiku-20241022", + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": "You are a very professional document summarization specialist. Please summarize the given document."}, + { + "type": "image_url", + "image_url": f"data:application/pdf;base64,{encoded_file}", # 👈 PDF + }, + ], + } + ], + max_tokens=300, +) + +print(response.choices[0]) +``` + + + +1. Add model to config + +```yaml +- model_name: claude-3-5-haiku-20241022 + litellm_params: + model: anthropic/claude-3-5-haiku-20241022 + api_key: os.environ/ANTHROPIC_API_KEY +``` + +2. Start Proxy + +``` +litellm --config /path/to/config.yaml +``` + +3. Test it! + +```bash +curl http://0.0.0.0:4000/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer " \ + -d '{ + "model": "claude-3-5-haiku-20241022", + "messages": [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "You are a very professional document summarization specialist. Please summarize the given document" + }, + { + "type": "image_url", + "image_url": "data:application/pdf;base64,{encoded_file}" # 👈 PDF + } + } + ] + } + ], + "max_tokens": 300 + }' + +``` + + diff --git a/litellm/__init__.py b/litellm/__init__.py index b739afb93..9812de1d8 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -375,6 +375,7 @@ open_ai_text_completion_models: List = [] cohere_models: List = [] cohere_chat_models: List = [] mistral_chat_models: List = [] +text_completion_codestral_models: List = [] anthropic_models: List = [] empower_models: List = [] openrouter_models: List = [] @@ -401,6 +402,19 @@ deepinfra_models: List = [] perplexity_models: List = [] watsonx_models: List = [] gemini_models: List = [] +xai_models: List = [] +deepseek_models: List = [] +azure_ai_models: List = [] +voyage_models: List = [] +databricks_models: List = [] +cloudflare_models: List = [] +codestral_models: List = [] +friendliai_models: List = [] +palm_models: List = [] +groq_models: List = [] +azure_models: List = [] +anyscale_models: List = [] +cerebras_models: List = [] def add_known_models(): @@ -477,6 +491,34 @@ def add_known_models(): # ignore the 'up-to', '-to-' model names -> not real models. just for cost tracking based on model params. if "-to-" not in key: fireworks_ai_embedding_models.append(key) + elif value.get("litellm_provider") == "text-completion-codestral": + text_completion_codestral_models.append(key) + elif value.get("litellm_provider") == "xai": + xai_models.append(key) + elif value.get("litellm_provider") == "deepseek": + deepseek_models.append(key) + elif value.get("litellm_provider") == "azure_ai": + azure_ai_models.append(key) + elif value.get("litellm_provider") == "voyage": + voyage_models.append(key) + elif value.get("litellm_provider") == "databricks": + databricks_models.append(key) + elif value.get("litellm_provider") == "cloudflare": + cloudflare_models.append(key) + elif value.get("litellm_provider") == "codestral": + codestral_models.append(key) + elif value.get("litellm_provider") == "friendliai": + friendliai_models.append(key) + elif value.get("litellm_provider") == "palm": + palm_models.append(key) + elif value.get("litellm_provider") == "groq": + groq_models.append(key) + elif value.get("litellm_provider") == "azure": + azure_models.append(key) + elif value.get("litellm_provider") == "anyscale": + anyscale_models.append(key) + elif value.get("litellm_provider") == "cerebras": + cerebras_models.append(key) add_known_models() @@ -722,6 +764,20 @@ model_list = ( + vertex_language_models + watsonx_models + gemini_models + + text_completion_codestral_models + + xai_models + + deepseek_models + + azure_ai_models + + voyage_models + + databricks_models + + cloudflare_models + + codestral_models + + friendliai_models + + palm_models + + groq_models + + azure_models + + anyscale_models + + cerebras_models ) @@ -778,6 +834,7 @@ class LlmProviders(str, Enum): FIREWORKS_AI = "fireworks_ai" FRIENDLIAI = "friendliai" WATSONX = "watsonx" + WATSONX_TEXT = "watsonx_text" TRITON = "triton" PREDIBASE = "predibase" DATABRICKS = "databricks" @@ -794,6 +851,7 @@ provider_list: List[Union[LlmProviders, str]] = list(LlmProviders) models_by_provider: dict = { "openai": open_ai_chat_completion_models + open_ai_text_completion_models, + "text-completion-openai": open_ai_text_completion_models, "cohere": cohere_models + cohere_chat_models, "cohere_chat": cohere_chat_models, "anthropic": anthropic_models, @@ -817,6 +875,23 @@ models_by_provider: dict = { "watsonx": watsonx_models, "gemini": gemini_models, "fireworks_ai": fireworks_ai_models + fireworks_ai_embedding_models, + "aleph_alpha": aleph_alpha_models, + "text-completion-codestral": text_completion_codestral_models, + "xai": xai_models, + "deepseek": deepseek_models, + "mistral": mistral_chat_models, + "azure_ai": azure_ai_models, + "voyage": voyage_models, + "databricks": databricks_models, + "cloudflare": cloudflare_models, + "codestral": codestral_models, + "nlp_cloud": nlp_cloud_models, + "friendliai": friendliai_models, + "palm": palm_models, + "groq": groq_models, + "azure": azure_models, + "anyscale": anyscale_models, + "cerebras": cerebras_models, } # mapping for those models which have larger equivalents @@ -889,7 +964,6 @@ from .utils import ( supports_system_messages, get_litellm_params, acreate, - get_model_list, get_max_tokens, get_model_info, register_prompt_template, diff --git a/litellm/integrations/opentelemetry.py b/litellm/integrations/opentelemetry.py index 8102f2c60..30a280e57 100644 --- a/litellm/integrations/opentelemetry.py +++ b/litellm/integrations/opentelemetry.py @@ -2,14 +2,16 @@ import os from dataclasses import dataclass from datetime import datetime from functools import wraps -from typing import TYPE_CHECKING, Any, Dict, Optional, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union import litellm from litellm._logging import verbose_logger from litellm.integrations.custom_logger import CustomLogger from litellm.types.services import ServiceLoggerPayload from litellm.types.utils import ( + ChatCompletionMessageToolCall, EmbeddingResponse, + Function, ImageResponse, ModelResponse, StandardLoggingPayload, @@ -403,6 +405,28 @@ class OpenTelemetry(CustomLogger): except Exception: return "" + @staticmethod + def _tool_calls_kv_pair( + tool_calls: List[ChatCompletionMessageToolCall], + ) -> Dict[str, Any]: + from litellm.proxy._types import SpanAttributes + + kv_pairs: Dict[str, Any] = {} + for idx, tool_call in enumerate(tool_calls): + _function = tool_call.get("function") + if not _function: + continue + + keys = Function.__annotations__.keys() + for key in keys: + _value = _function.get(key) + if _value: + kv_pairs[ + f"{SpanAttributes.LLM_COMPLETIONS}.{idx}.function_call.{key}" + ] = _value + + return kv_pairs + def set_attributes( # noqa: PLR0915 self, span: Span, kwargs, response_obj: Optional[Any] ): @@ -597,18 +621,13 @@ class OpenTelemetry(CustomLogger): message = choice.get("message") tool_calls = message.get("tool_calls") if tool_calls: - self.safe_set_attribute( - span=span, - key=f"{SpanAttributes.LLM_COMPLETIONS}.{idx}.function_call.name", - value=tool_calls[0].get("function").get("name"), - ) - self.safe_set_attribute( - span=span, - key=f"{SpanAttributes.LLM_COMPLETIONS}.{idx}.function_call.arguments", - value=tool_calls[0] - .get("function") - .get("arguments"), - ) + kv_pairs = OpenTelemetry._tool_calls_kv_pair(tool_calls) # type: ignore + for key, value in kv_pairs.items(): + self.safe_set_attribute( + span=span, + key=key, + value=value, + ) except Exception as e: verbose_logger.exception( diff --git a/litellm/llms/anthropic/chat/handler.py b/litellm/llms/anthropic/chat/handler.py index da95ac075..2d119a28f 100644 --- a/litellm/llms/anthropic/chat/handler.py +++ b/litellm/llms/anthropic/chat/handler.py @@ -71,11 +71,12 @@ def validate_environment( prompt_caching_set = AnthropicConfig().is_cache_control_set(messages=messages) computer_tool_used = AnthropicConfig().is_computer_tool_used(tools=tools) - + pdf_used = AnthropicConfig().is_pdf_used(messages=messages) headers = AnthropicConfig().get_anthropic_headers( anthropic_version=anthropic_version, computer_tool_used=computer_tool_used, prompt_caching_set=prompt_caching_set, + pdf_used=pdf_used, api_key=api_key, ) diff --git a/litellm/llms/anthropic/chat/transformation.py b/litellm/llms/anthropic/chat/transformation.py index ec3285473..18c53b696 100644 --- a/litellm/llms/anthropic/chat/transformation.py +++ b/litellm/llms/anthropic/chat/transformation.py @@ -104,6 +104,7 @@ class AnthropicConfig: anthropic_version: Optional[str] = None, computer_tool_used: bool = False, prompt_caching_set: bool = False, + pdf_used: bool = False, ) -> dict: import json @@ -112,6 +113,8 @@ class AnthropicConfig: betas.append("prompt-caching-2024-07-31") if computer_tool_used: betas.append("computer-use-2024-10-22") + if pdf_used: + betas.append("pdfs-2024-09-25") headers = { "anthropic-version": anthropic_version or "2023-06-01", "x-api-key": api_key, @@ -365,6 +368,21 @@ class AnthropicConfig: return True return False + def is_pdf_used(self, messages: List[AllMessageValues]) -> bool: + """ + Set to true if media passed into messages. + """ + for message in messages: + if ( + "content" in message + and message["content"] is not None + and isinstance(message["content"], list) + ): + for content in message["content"]: + if "type" in content: + return True + return False + def translate_system_message( self, messages: List[AllMessageValues] ) -> List[AnthropicSystemMessageContent]: diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index aee304760..80ad2ca35 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -1330,7 +1330,10 @@ def convert_to_anthropic_tool_invoke( def add_cache_control_to_content( anthropic_content_element: Union[ - dict, AnthropicMessagesImageParam, AnthropicMessagesTextParam + dict, + AnthropicMessagesImageParam, + AnthropicMessagesTextParam, + AnthropicMessagesDocumentParam, ], orignal_content_element: Union[dict, AllMessageValues], ): @@ -1343,6 +1346,32 @@ def add_cache_control_to_content( return anthropic_content_element +def _anthropic_content_element_factory( + image_chunk: GenericImageParsingChunk, +) -> Union[AnthropicMessagesImageParam, AnthropicMessagesDocumentParam]: + if image_chunk["media_type"] == "application/pdf": + _anthropic_content_element: Union[ + AnthropicMessagesDocumentParam, AnthropicMessagesImageParam + ] = AnthropicMessagesDocumentParam( + type="document", + source=AnthropicContentParamSource( + type="base64", + media_type=image_chunk["media_type"], + data=image_chunk["data"], + ), + ) + else: + _anthropic_content_element = AnthropicMessagesImageParam( + type="image", + source=AnthropicContentParamSource( + type="base64", + media_type=image_chunk["media_type"], + data=image_chunk["data"], + ), + ) + return _anthropic_content_element + + def anthropic_messages_pt( # noqa: PLR0915 messages: List[AllMessageValues], model: str, @@ -1400,15 +1429,9 @@ def anthropic_messages_pt( # noqa: PLR0915 openai_image_url=m["image_url"]["url"] ) - _anthropic_content_element = AnthropicMessagesImageParam( - type="image", - source=AnthropicImageParamSource( - type="base64", - media_type=image_chunk["media_type"], - data=image_chunk["data"], - ), + _anthropic_content_element = ( + _anthropic_content_element_factory(image_chunk) ) - _content_element = add_cache_control_to_content( anthropic_content_element=_anthropic_content_element, orignal_content_element=dict(m), diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index e8aeac2cb..48b25523e 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -1898,7 +1898,8 @@ "supports_function_calling": true, "tool_use_system_prompt_tokens": 264, "supports_assistant_prefill": true, - "supports_prompt_caching": true + "supports_prompt_caching": true, + "supports_pdf_input": true }, "claude-3-opus-20240229": { "max_tokens": 4096, diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index c44a46a67..cd723275b 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -1,63 +1,7 @@ model_list: - - model_name: claude-3-5-sonnet-20240620 + - model_name: "*" litellm_params: - model: claude-3-5-sonnet-20240620 - api_key: os.environ/ANTHROPIC_API_KEY - - model_name: claude-3-5-sonnet-aihubmix - litellm_params: - model: openai/claude-3-5-sonnet-20240620 - input_cost_per_token: 0.000003 # 3$/M - output_cost_per_token: 0.000015 # 15$/M - api_base: "https://exampleopenaiendpoint-production.up.railway.app" - api_key: my-fake-key - - model_name: fake-openai-endpoint-2 - litellm_params: - model: openai/my-fake-model - api_key: my-fake-key - api_base: https://exampleopenaiendpoint-production.up.railway.app/ - stream_timeout: 0.001 - timeout: 1 - rpm: 1 - - model_name: fake-openai-endpoint - litellm_params: - model: openai/my-fake-model - api_key: my-fake-key - api_base: https://exampleopenaiendpoint-production.up.railway.app/ - ## bedrock chat completions - - model_name: "*anthropic.claude*" - litellm_params: - model: bedrock/*anthropic.claude* - aws_access_key_id: os.environ/BEDROCK_AWS_ACCESS_KEY_ID - aws_secret_access_key: os.environ/BEDROCK_AWS_SECRET_ACCESS_KEY - aws_region_name: os.environ/AWS_REGION_NAME - guardrailConfig: - "guardrailIdentifier": "h4dsqwhp6j66" - "guardrailVersion": "2" - "trace": "enabled" - -## bedrock embeddings - - model_name: "*amazon.titan-embed-*" - litellm_params: - model: bedrock/amazon.titan-embed-* - aws_access_key_id: os.environ/BEDROCK_AWS_ACCESS_KEY_ID - aws_secret_access_key: os.environ/BEDROCK_AWS_SECRET_ACCESS_KEY - aws_region_name: os.environ/AWS_REGION_NAME - - model_name: "*cohere.embed-*" - litellm_params: - model: bedrock/cohere.embed-* - aws_access_key_id: os.environ/BEDROCK_AWS_ACCESS_KEY_ID - aws_secret_access_key: os.environ/BEDROCK_AWS_SECRET_ACCESS_KEY - aws_region_name: os.environ/AWS_REGION_NAME - - - model_name: gpt-4 - litellm_params: - model: azure/chatgpt-v-2 - api_base: https://openai-gpt-4-test-v-1.openai.azure.com/ - api_version: "2023-05-15" - api_key: os.environ/AZURE_API_KEY # The `os.environ/` prefix tells litellm to read this from the env. See https://docs.litellm.ai/docs/simple_proxy#load-api-keys-from-vault - rpm: 480 - timeout: 300 - stream_timeout: 60 + model: "*" litellm_settings: fallbacks: [{ "claude-3-5-sonnet-20240620": ["claude-3-5-sonnet-aihubmix"] }] diff --git a/litellm/proxy/auth/user_api_key_auth.py b/litellm/proxy/auth/user_api_key_auth.py index ff1acc3c9..6032a72af 100644 --- a/litellm/proxy/auth/user_api_key_auth.py +++ b/litellm/proxy/auth/user_api_key_auth.py @@ -1236,7 +1236,6 @@ def _return_user_api_key_auth_obj( start_time: datetime, user_role: Optional[LitellmUserRoles] = None, ) -> UserAPIKeyAuth: - traceback.print_stack() end_time = datetime.now() user_api_key_service_logger_obj.service_success_hook( service=ServiceTypes.AUTH, diff --git a/litellm/types/llms/anthropic.py b/litellm/types/llms/anthropic.py index bb65a372d..b0a3780b8 100644 --- a/litellm/types/llms/anthropic.py +++ b/litellm/types/llms/anthropic.py @@ -74,7 +74,7 @@ class AnthopicMessagesAssistantMessageParam(TypedDict, total=False): """ -class AnthropicImageParamSource(TypedDict): +class AnthropicContentParamSource(TypedDict): type: Literal["base64"] media_type: str data: str @@ -82,7 +82,13 @@ class AnthropicImageParamSource(TypedDict): class AnthropicMessagesImageParam(TypedDict, total=False): type: Required[Literal["image"]] - source: Required[AnthropicImageParamSource] + source: Required[AnthropicContentParamSource] + cache_control: Optional[Union[dict, ChatCompletionCachedContent]] + + +class AnthropicMessagesDocumentParam(TypedDict, total=False): + type: Required[Literal["document"]] + source: Required[AnthropicContentParamSource] cache_control: Optional[Union[dict, ChatCompletionCachedContent]] @@ -108,6 +114,7 @@ AnthropicMessagesUserMessageValues = Union[ AnthropicMessagesTextParam, AnthropicMessagesImageParam, AnthropicMessagesToolResultParam, + AnthropicMessagesDocumentParam, ] diff --git a/litellm/types/utils.py b/litellm/types/utils.py index c0a9764e8..a2b62f9cc 100644 --- a/litellm/types/utils.py +++ b/litellm/types/utils.py @@ -1322,11 +1322,6 @@ class TranscriptionResponse(OpenAIObject): class GenericImageParsingChunk(TypedDict): - # { - # "type": "base64", - # "media_type": f"image/{image_format}", - # "data": base64_data, - # } type: str media_type: str data: str diff --git a/litellm/utils.py b/litellm/utils.py index d07d86f7d..b10c94859 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1835,6 +1835,13 @@ def supports_audio_input(model: str, custom_llm_provider: Optional[str] = None) ) +def supports_pdf_input(model: str, custom_llm_provider: Optional[str] = None) -> bool: + """Check if a given model supports pdf input in a chat completion call""" + return _supports_factory( + model=model, custom_llm_provider=custom_llm_provider, key="supports_pdf_input" + ) + + def supports_audio_output( model: str, custom_llm_provider: Optional[str] = None ) -> bool: @@ -5420,2121 +5427,6 @@ def register_prompt_template( return litellm.custom_prompt_dict -####### DEPRECATED ################ - - -def get_all_keys(llm_provider=None): - try: - global last_fetched_at_keys - # if user is using hosted product -> instantiate their env with their hosted api keys - refresh every 5 minutes - print_verbose(f"Reaches get all keys, llm_provider: {llm_provider}") - user_email = ( - os.getenv("LITELLM_EMAIL") - or litellm.email - or litellm.token - or os.getenv("LITELLM_TOKEN") - ) - if user_email: - time_delta = 0 - if last_fetched_at_keys is not None: - current_time = time.time() - time_delta = current_time - last_fetched_at_keys - if ( - time_delta > 300 or last_fetched_at_keys is None or llm_provider - ): # if the llm provider is passed in , assume this happening due to an AuthError for that provider - # make the api call - last_fetched_at = time.time() - print_verbose(f"last_fetched_at: {last_fetched_at}") - response = requests.post( - url="http://api.litellm.ai/get_all_keys", - headers={"content-type": "application/json"}, - data=json.dumps({"user_email": user_email}), - ) - print_verbose(f"get model key response: {response.text}") - data = response.json() - # update model list - for key, value in data[ - "model_keys" - ].items(): # follows the LITELLM API KEY format - _API_KEY - e.g. HUGGINGFACE_API_KEY - os.environ[key] = value - # set model alias map - for model_alias, value in data["model_alias_map"].items(): - litellm.model_alias_map[model_alias] = value - return "it worked!" - return None - return None - except Exception: - print_verbose( - f"[Non-Blocking Error] get_all_keys error - {traceback.format_exc()}" - ) - pass - - -def get_model_list(): - global last_fetched_at, print_verbose - try: - # if user is using hosted product -> get their updated model list - user_email = ( - os.getenv("LITELLM_EMAIL") - or litellm.email - or litellm.token - or os.getenv("LITELLM_TOKEN") - ) - if user_email: - # make the api call - last_fetched_at = time.time() - print_verbose(f"last_fetched_at: {last_fetched_at}") - response = requests.post( - url="http://api.litellm.ai/get_model_list", - headers={"content-type": "application/json"}, - data=json.dumps({"user_email": user_email}), - ) - print_verbose(f"get_model_list response: {response.text}") - data = response.json() - # update model list - model_list = data["model_list"] - # # check if all model providers are in environment - # model_providers = data["model_providers"] - # missing_llm_provider = None - # for item in model_providers: - # if f"{item.upper()}_API_KEY" not in os.environ: - # missing_llm_provider = item - # break - # # update environment - if required - # threading.Thread(target=get_all_keys, args=(missing_llm_provider)).start() - return model_list - return [] # return empty list by default - except Exception: - print_verbose( - f"[Non-Blocking Error] get_model_list error - {traceback.format_exc()}" - ) - - -######## Streaming Class ############################ -# wraps the completion stream to return the correct format for the model -# replicate/anthropic/cohere - -# class CustomStreamWrapper: -# def __init__( -# self, -# completion_stream, -# model, -# logging_obj: Any, -# custom_llm_provider: Optional[str] = None, -# stream_options=None, -# make_call: Optional[Callable] = None, -# _response_headers: Optional[dict] = None, -# ): -# self.model = model -# self.make_call = make_call -# self.custom_llm_provider = custom_llm_provider -# self.logging_obj: LiteLLMLoggingObject = logging_obj -# self.completion_stream = completion_stream -# self.sent_first_chunk = False -# self.sent_last_chunk = False -# self.system_fingerprint: Optional[str] = None -# self.received_finish_reason: Optional[str] = None -# self.special_tokens = [ -# "<|assistant|>", -# "<|system|>", -# "<|user|>", -# "", -# "", -# "<|im_end|>", -# "<|im_start|>", -# ] -# self.holding_chunk = "" -# self.complete_response = "" -# self.response_uptil_now = "" -# _model_info = ( -# self.logging_obj.model_call_details.get("litellm_params", {}).get( -# "model_info", {} -# ) -# or {} -# ) -# self._hidden_params = { -# "model_id": (_model_info.get("id", None)), -# } # returned as x-litellm-model-id response header in proxy - -# self._hidden_params["additional_headers"] = process_response_headers( -# _response_headers or {} -# ) # GUARANTEE OPENAI HEADERS IN RESPONSE - -# self._response_headers = _response_headers -# self.response_id = None -# self.logging_loop = None -# self.rules = Rules() -# self.stream_options = stream_options or getattr( -# logging_obj, "stream_options", None -# ) -# self.messages = getattr(logging_obj, "messages", None) -# self.sent_stream_usage = False -# self.send_stream_usage = ( -# True if self.check_send_stream_usage(self.stream_options) else False -# ) -# self.tool_call = False -# self.chunks: List = ( -# [] -# ) # keep track of the returned chunks - used for calculating the input/output tokens for stream options -# self.is_function_call = self.check_is_function_call(logging_obj=logging_obj) - -# def __iter__(self): -# return self - -# def __aiter__(self): -# return self - -# def check_send_stream_usage(self, stream_options: Optional[dict]): -# return ( -# stream_options is not None -# and stream_options.get("include_usage", False) is True -# ) - -# def check_is_function_call(self, logging_obj) -> bool: -# if hasattr(logging_obj, "optional_params") and isinstance( -# logging_obj.optional_params, dict -# ): -# if ( -# "litellm_param_is_function_call" in logging_obj.optional_params -# and logging_obj.optional_params["litellm_param_is_function_call"] -# is True -# ): -# return True - -# return False - -# def process_chunk(self, chunk: str): -# """ -# NLP Cloud streaming returns the entire response, for each chunk. Process this, to only return the delta. -# """ -# try: -# chunk = chunk.strip() -# self.complete_response = self.complete_response.strip() - -# if chunk.startswith(self.complete_response): -# # Remove last_sent_chunk only if it appears at the start of the new chunk -# chunk = chunk[len(self.complete_response) :] - -# self.complete_response += chunk -# return chunk -# except Exception as e: -# raise e - -# def safety_checker(self) -> None: -# """ -# Fixes - https://github.com/BerriAI/litellm/issues/5158 - -# if the model enters a loop and starts repeating the same chunk again, break out of loop and raise an internalservererror - allows for retries. - -# Raises - InternalServerError, if LLM enters infinite loop while streaming -# """ -# if len(self.chunks) >= litellm.REPEATED_STREAMING_CHUNK_LIMIT: -# # Get the last n chunks -# last_chunks = self.chunks[-litellm.REPEATED_STREAMING_CHUNK_LIMIT :] - -# # Extract the relevant content from the chunks -# last_contents = [chunk.choices[0].delta.content for chunk in last_chunks] - -# # Check if all extracted contents are identical -# if all(content == last_contents[0] for content in last_contents): -# if ( -# last_contents[0] is not None -# and isinstance(last_contents[0], str) -# and len(last_contents[0]) > 2 -# ): # ignore empty content - https://github.com/BerriAI/litellm/issues/5158#issuecomment-2287156946 -# # All last n chunks are identical -# raise litellm.InternalServerError( -# message="The model is repeating the same chunk = {}.".format( -# last_contents[0] -# ), -# model="", -# llm_provider="", -# ) - -# def check_special_tokens(self, chunk: str, finish_reason: Optional[str]): -# """ -# Output parse / special tokens for sagemaker + hf streaming. -# """ -# hold = False -# if ( -# self.custom_llm_provider != "huggingface" -# and self.custom_llm_provider != "sagemaker" -# ): -# return hold, chunk - -# if finish_reason: -# for token in self.special_tokens: -# if token in chunk: -# chunk = chunk.replace(token, "") -# return hold, chunk - -# if self.sent_first_chunk is True: -# return hold, chunk - -# curr_chunk = self.holding_chunk + chunk -# curr_chunk = curr_chunk.strip() - -# for token in self.special_tokens: -# if len(curr_chunk) < len(token) and curr_chunk in token: -# hold = True -# self.holding_chunk = curr_chunk -# elif len(curr_chunk) >= len(token): -# if token in curr_chunk: -# self.holding_chunk = curr_chunk.replace(token, "") -# hold = True -# else: -# pass - -# if hold is False: # reset -# self.holding_chunk = "" -# return hold, curr_chunk - -# def handle_anthropic_text_chunk(self, chunk): -# """ -# For old anthropic models - claude-1, claude-2. - -# Claude-3 is handled from within Anthropic.py VIA ModelResponseIterator() -# """ -# str_line = chunk -# if isinstance(chunk, bytes): # Handle binary data -# str_line = chunk.decode("utf-8") # Convert bytes to string -# text = "" -# is_finished = False -# finish_reason = None -# if str_line.startswith("data:"): -# data_json = json.loads(str_line[5:]) -# type_chunk = data_json.get("type", None) -# if type_chunk == "completion": -# text = data_json.get("completion") -# finish_reason = data_json.get("stop_reason") -# if finish_reason is not None: -# is_finished = True -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# elif "error" in str_line: -# raise ValueError(f"Unable to parse response. Original response: {str_line}") -# else: -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } - -# def handle_vertexai_anthropic_chunk(self, chunk): -# """ -# - MessageStartEvent(message=Message(id='msg_01LeRRgvX4gwkX3ryBVgtuYZ', content=[], model='claude-3-sonnet-20240229', role='assistant', stop_reason=None, stop_sequence=None, type='message', usage=Usage(input_tokens=8, output_tokens=1)), type='message_start'); custom_llm_provider: vertex_ai -# - ContentBlockStartEvent(content_block=ContentBlock(text='', type='text'), index=0, type='content_block_start'); custom_llm_provider: vertex_ai -# - ContentBlockDeltaEvent(delta=TextDelta(text='Hello', type='text_delta'), index=0, type='content_block_delta'); custom_llm_provider: vertex_ai -# """ -# text = "" -# prompt_tokens = None -# completion_tokens = None -# is_finished = False -# finish_reason = None -# type_chunk = getattr(chunk, "type", None) -# if type_chunk == "message_start": -# message = getattr(chunk, "message", None) -# text = "" # lets us return a chunk with usage to user -# _usage = getattr(message, "usage", None) -# if _usage is not None: -# prompt_tokens = getattr(_usage, "input_tokens", None) -# completion_tokens = getattr(_usage, "output_tokens", None) -# elif type_chunk == "content_block_delta": -# """ -# Anthropic content chunk -# chunk = {'type': 'content_block_delta', 'index': 0, 'delta': {'type': 'text_delta', 'text': 'Hello'}} -# """ -# delta = getattr(chunk, "delta", None) -# if delta is not None: -# text = getattr(delta, "text", "") -# else: -# text = "" -# elif type_chunk == "message_delta": -# """ -# Anthropic -# chunk = {'type': 'message_delta', 'delta': {'stop_reason': 'max_tokens', 'stop_sequence': None}, 'usage': {'output_tokens': 10}} -# """ -# # TODO - get usage from this chunk, set in response -# delta = getattr(chunk, "delta", None) -# if delta is not None: -# finish_reason = getattr(delta, "stop_reason", "stop") -# is_finished = True -# _usage = getattr(chunk, "usage", None) -# if _usage is not None: -# prompt_tokens = getattr(_usage, "input_tokens", None) -# completion_tokens = getattr(_usage, "output_tokens", None) - -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# "prompt_tokens": prompt_tokens, -# "completion_tokens": completion_tokens, -# } - -# def handle_predibase_chunk(self, chunk): -# try: -# if not isinstance(chunk, str): -# chunk = chunk.decode( -# "utf-8" -# ) # DO NOT REMOVE this: This is required for HF inference API + Streaming -# text = "" -# is_finished = False -# finish_reason = "" -# print_verbose(f"chunk: {chunk}") -# if chunk.startswith("data:"): -# data_json = json.loads(chunk[5:]) -# print_verbose(f"data json: {data_json}") -# if "token" in data_json and "text" in data_json["token"]: -# text = data_json["token"]["text"] -# if data_json.get("details", False) and data_json["details"].get( -# "finish_reason", False -# ): -# is_finished = True -# finish_reason = data_json["details"]["finish_reason"] -# elif data_json.get( -# "generated_text", False -# ): # if full generated text exists, then stream is complete -# text = "" # don't return the final bos token -# is_finished = True -# finish_reason = "stop" -# elif data_json.get("error", False): -# raise Exception(data_json.get("error")) -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# elif "error" in chunk: -# raise ValueError(chunk) -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# except Exception as e: -# raise e - -# def handle_huggingface_chunk(self, chunk): -# try: -# if not isinstance(chunk, str): -# chunk = chunk.decode( -# "utf-8" -# ) # DO NOT REMOVE this: This is required for HF inference API + Streaming -# text = "" -# is_finished = False -# finish_reason = "" -# print_verbose(f"chunk: {chunk}") -# if chunk.startswith("data:"): -# data_json = json.loads(chunk[5:]) -# print_verbose(f"data json: {data_json}") -# if "token" in data_json and "text" in data_json["token"]: -# text = data_json["token"]["text"] -# if data_json.get("details", False) and data_json["details"].get( -# "finish_reason", False -# ): -# is_finished = True -# finish_reason = data_json["details"]["finish_reason"] -# elif data_json.get( -# "generated_text", False -# ): # if full generated text exists, then stream is complete -# text = "" # don't return the final bos token -# is_finished = True -# finish_reason = "stop" -# elif data_json.get("error", False): -# raise Exception(data_json.get("error")) -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# elif "error" in chunk: -# raise ValueError(chunk) -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# except Exception as e: -# raise e - -# def handle_ai21_chunk(self, chunk): # fake streaming -# chunk = chunk.decode("utf-8") -# data_json = json.loads(chunk) -# try: -# text = data_json["completions"][0]["data"]["text"] -# is_finished = True -# finish_reason = "stop" -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# except Exception: -# raise ValueError(f"Unable to parse response. Original response: {chunk}") - -# def handle_maritalk_chunk(self, chunk): # fake streaming -# chunk = chunk.decode("utf-8") -# data_json = json.loads(chunk) -# try: -# text = data_json["answer"] -# is_finished = True -# finish_reason = "stop" -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# except Exception: -# raise ValueError(f"Unable to parse response. Original response: {chunk}") - -# def handle_nlp_cloud_chunk(self, chunk): -# text = "" -# is_finished = False -# finish_reason = "" -# try: -# if "dolphin" in self.model: -# chunk = self.process_chunk(chunk=chunk) -# else: -# data_json = json.loads(chunk) -# chunk = data_json["generated_text"] -# text = chunk -# if "[DONE]" in text: -# text = text.replace("[DONE]", "") -# is_finished = True -# finish_reason = "stop" -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# except Exception: -# raise ValueError(f"Unable to parse response. Original response: {chunk}") - -# def handle_aleph_alpha_chunk(self, chunk): -# chunk = chunk.decode("utf-8") -# data_json = json.loads(chunk) -# try: -# text = data_json["completions"][0]["completion"] -# is_finished = True -# finish_reason = "stop" -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# except Exception: -# raise ValueError(f"Unable to parse response. Original response: {chunk}") - -# def handle_cohere_chunk(self, chunk): -# chunk = chunk.decode("utf-8") -# data_json = json.loads(chunk) -# try: -# text = "" -# is_finished = False -# finish_reason = "" -# index: Optional[int] = None -# if "index" in data_json: -# index = data_json.get("index") -# if "text" in data_json: -# text = data_json["text"] -# elif "is_finished" in data_json: -# is_finished = data_json["is_finished"] -# finish_reason = data_json["finish_reason"] -# else: -# raise Exception(data_json) -# return { -# "index": index, -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# except Exception: -# raise ValueError(f"Unable to parse response. Original response: {chunk}") - -# def handle_cohere_chat_chunk(self, chunk): -# chunk = chunk.decode("utf-8") -# data_json = json.loads(chunk) -# print_verbose(f"chunk: {chunk}") -# try: -# text = "" -# is_finished = False -# finish_reason = "" -# if "text" in data_json: -# text = data_json["text"] -# elif "is_finished" in data_json and data_json["is_finished"] is True: -# is_finished = data_json["is_finished"] -# finish_reason = data_json["finish_reason"] -# else: -# return -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# except Exception: -# raise ValueError(f"Unable to parse response. Original response: {chunk}") - -# def handle_azure_chunk(self, chunk): -# is_finished = False -# finish_reason = "" -# text = "" -# print_verbose(f"chunk: {chunk}") -# if "data: [DONE]" in chunk: -# text = "" -# is_finished = True -# finish_reason = "stop" -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# elif chunk.startswith("data:"): -# data_json = json.loads(chunk[5:]) # chunk.startswith("data:"): -# try: -# if len(data_json["choices"]) > 0: -# delta = data_json["choices"][0]["delta"] -# text = "" if delta is None else delta.get("content", "") -# if data_json["choices"][0].get("finish_reason", None): -# is_finished = True -# finish_reason = data_json["choices"][0]["finish_reason"] -# print_verbose( -# f"text: {text}; is_finished: {is_finished}; finish_reason: {finish_reason}" -# ) -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# except Exception: -# raise ValueError( -# f"Unable to parse response. Original response: {chunk}" -# ) -# elif "error" in chunk: -# raise ValueError(f"Unable to parse response. Original response: {chunk}") -# else: -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } - -# def handle_replicate_chunk(self, chunk): -# try: -# text = "" -# is_finished = False -# finish_reason = "" -# if "output" in chunk: -# text = chunk["output"] -# if "status" in chunk: -# if chunk["status"] == "succeeded": -# is_finished = True -# finish_reason = "stop" -# elif chunk.get("error", None): -# raise Exception(chunk["error"]) -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# except Exception: -# raise ValueError(f"Unable to parse response. Original response: {chunk}") - -# def handle_openai_chat_completion_chunk(self, chunk): -# try: -# print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") -# str_line = chunk -# text = "" -# is_finished = False -# finish_reason = None -# logprobs = None -# usage = None -# if str_line and str_line.choices and len(str_line.choices) > 0: -# if ( -# str_line.choices[0].delta is not None -# and str_line.choices[0].delta.content is not None -# ): -# text = str_line.choices[0].delta.content -# else: # function/tool calling chunk - when content is None. in this case we just return the original chunk from openai -# pass -# if str_line.choices[0].finish_reason: -# is_finished = True -# finish_reason = str_line.choices[0].finish_reason - -# # checking for logprobs -# if ( -# hasattr(str_line.choices[0], "logprobs") -# and str_line.choices[0].logprobs is not None -# ): -# logprobs = str_line.choices[0].logprobs -# else: -# logprobs = None - -# usage = getattr(str_line, "usage", None) - -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# "logprobs": logprobs, -# "original_chunk": str_line, -# "usage": usage, -# } -# except Exception as e: -# raise e - -# def handle_azure_text_completion_chunk(self, chunk): -# try: -# print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") -# text = "" -# is_finished = False -# finish_reason = None -# choices = getattr(chunk, "choices", []) -# if len(choices) > 0: -# text = choices[0].text -# if choices[0].finish_reason is not None: -# is_finished = True -# finish_reason = choices[0].finish_reason -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } - -# except Exception as e: -# raise e - -# def handle_openai_text_completion_chunk(self, chunk): -# try: -# print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") -# text = "" -# is_finished = False -# finish_reason = None -# usage = None -# choices = getattr(chunk, "choices", []) -# if len(choices) > 0: -# text = choices[0].text -# if choices[0].finish_reason is not None: -# is_finished = True -# finish_reason = choices[0].finish_reason -# usage = getattr(chunk, "usage", None) -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# "usage": usage, -# } - -# except Exception as e: -# raise e - -# def handle_baseten_chunk(self, chunk): -# try: -# chunk = chunk.decode("utf-8") -# if len(chunk) > 0: -# if chunk.startswith("data:"): -# data_json = json.loads(chunk[5:]) -# if "token" in data_json and "text" in data_json["token"]: -# return data_json["token"]["text"] -# else: -# return "" -# data_json = json.loads(chunk) -# if "model_output" in data_json: -# if ( -# isinstance(data_json["model_output"], dict) -# and "data" in data_json["model_output"] -# and isinstance(data_json["model_output"]["data"], list) -# ): -# return data_json["model_output"]["data"][0] -# elif isinstance(data_json["model_output"], str): -# return data_json["model_output"] -# elif "completion" in data_json and isinstance( -# data_json["completion"], str -# ): -# return data_json["completion"] -# else: -# raise ValueError( -# f"Unable to parse response. Original response: {chunk}" -# ) -# else: -# return "" -# else: -# return "" -# except Exception as e: -# verbose_logger.exception( -# "litellm.CustomStreamWrapper.handle_baseten_chunk(): Exception occured - {}".format( -# str(e) -# ) -# ) -# return "" - -# def handle_cloudlfare_stream(self, chunk): -# try: -# print_verbose(f"\nRaw OpenAI Chunk\n{chunk}\n") -# chunk = chunk.decode("utf-8") -# str_line = chunk -# text = "" -# is_finished = False -# finish_reason = None - -# if "[DONE]" in chunk: -# return {"text": text, "is_finished": True, "finish_reason": "stop"} -# elif str_line.startswith("data:"): -# data_json = json.loads(str_line[5:]) -# print_verbose(f"delta content: {data_json}") -# text = data_json["response"] -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# else: -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } - -# except Exception as e: -# raise e - -# def handle_ollama_stream(self, chunk): -# try: -# if isinstance(chunk, dict): -# json_chunk = chunk -# else: -# json_chunk = json.loads(chunk) -# if "error" in json_chunk: -# raise Exception(f"Ollama Error - {json_chunk}") - -# text = "" -# is_finished = False -# finish_reason = None -# if json_chunk["done"] is True: -# text = "" -# is_finished = True -# finish_reason = "stop" -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# elif json_chunk["response"]: -# print_verbose(f"delta content: {json_chunk}") -# text = json_chunk["response"] -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# else: -# raise Exception(f"Ollama Error - {json_chunk}") -# except Exception as e: -# raise e - -# def handle_ollama_chat_stream(self, chunk): -# # for ollama_chat/ provider -# try: -# if isinstance(chunk, dict): -# json_chunk = chunk -# else: -# json_chunk = json.loads(chunk) -# if "error" in json_chunk: -# raise Exception(f"Ollama Error - {json_chunk}") - -# text = "" -# is_finished = False -# finish_reason = None -# if json_chunk["done"] is True: -# text = "" -# is_finished = True -# finish_reason = "stop" -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# elif "message" in json_chunk: -# print_verbose(f"delta content: {json_chunk}") -# text = json_chunk["message"]["content"] -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# } -# else: -# raise Exception(f"Ollama Error - {json_chunk}") -# except Exception as e: -# raise e - -# def handle_watsonx_stream(self, chunk): -# try: -# if isinstance(chunk, dict): -# parsed_response = chunk -# elif isinstance(chunk, (str, bytes)): -# if isinstance(chunk, bytes): -# chunk = chunk.decode("utf-8") -# if "generated_text" in chunk: -# response = chunk.replace("data: ", "").strip() -# parsed_response = json.loads(response) -# else: -# return { -# "text": "", -# "is_finished": False, -# "prompt_tokens": 0, -# "completion_tokens": 0, -# } -# else: -# print_verbose(f"chunk: {chunk} (Type: {type(chunk)})") -# raise ValueError( -# f"Unable to parse response. Original response: {chunk}" -# ) -# results = parsed_response.get("results", []) -# if len(results) > 0: -# text = results[0].get("generated_text", "") -# finish_reason = results[0].get("stop_reason") -# is_finished = finish_reason != "not_finished" -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# "prompt_tokens": results[0].get("input_token_count", 0), -# "completion_tokens": results[0].get("generated_token_count", 0), -# } -# return {"text": "", "is_finished": False} -# except Exception as e: -# raise e - -# def handle_triton_stream(self, chunk): -# try: -# if isinstance(chunk, dict): -# parsed_response = chunk -# elif isinstance(chunk, (str, bytes)): -# if isinstance(chunk, bytes): -# chunk = chunk.decode("utf-8") -# if "text_output" in chunk: -# response = chunk.replace("data: ", "").strip() -# parsed_response = json.loads(response) -# else: -# return { -# "text": "", -# "is_finished": False, -# "prompt_tokens": 0, -# "completion_tokens": 0, -# } -# else: -# print_verbose(f"chunk: {chunk} (Type: {type(chunk)})") -# raise ValueError( -# f"Unable to parse response. Original response: {chunk}" -# ) -# text = parsed_response.get("text_output", "") -# finish_reason = parsed_response.get("stop_reason") -# is_finished = parsed_response.get("is_finished", False) -# return { -# "text": text, -# "is_finished": is_finished, -# "finish_reason": finish_reason, -# "prompt_tokens": parsed_response.get("input_token_count", 0), -# "completion_tokens": parsed_response.get("generated_token_count", 0), -# } -# return {"text": "", "is_finished": False} -# except Exception as e: -# raise e - -# def handle_clarifai_completion_chunk(self, chunk): -# try: -# if isinstance(chunk, dict): -# parsed_response = chunk -# elif isinstance(chunk, (str, bytes)): -# if isinstance(chunk, bytes): -# parsed_response = chunk.decode("utf-8") -# else: -# parsed_response = chunk -# else: -# raise ValueError("Unable to parse streaming chunk") -# if isinstance(parsed_response, dict): -# data_json = parsed_response -# else: -# data_json = json.loads(parsed_response) -# text = ( -# data_json.get("outputs", "")[0] -# .get("data", "") -# .get("text", "") -# .get("raw", "") -# ) -# len( -# encoding.encode( -# data_json.get("outputs", "")[0] -# .get("input", "") -# .get("data", "") -# .get("text", "") -# .get("raw", "") -# ) -# ) -# len(encoding.encode(text)) -# return { -# "text": text, -# "is_finished": True, -# } -# except Exception as e: -# verbose_logger.exception( -# "litellm.CustomStreamWrapper.handle_clarifai_chunk(): Exception occured - {}".format( -# str(e) -# ) -# ) -# return "" - -# def model_response_creator( -# self, chunk: Optional[dict] = None, hidden_params: Optional[dict] = None -# ): -# _model = self.model -# _received_llm_provider = self.custom_llm_provider -# _logging_obj_llm_provider = self.logging_obj.model_call_details.get("custom_llm_provider", None) # type: ignore -# if ( -# _received_llm_provider == "openai" -# and _received_llm_provider != _logging_obj_llm_provider -# ): -# _model = "{}/{}".format(_logging_obj_llm_provider, _model) -# if chunk is None: -# chunk = {} -# else: -# # pop model keyword -# chunk.pop("model", None) - -# model_response = ModelResponse( -# stream=True, model=_model, stream_options=self.stream_options, **chunk -# ) -# if self.response_id is not None: -# model_response.id = self.response_id -# else: -# self.response_id = model_response.id # type: ignore -# if self.system_fingerprint is not None: -# model_response.system_fingerprint = self.system_fingerprint -# if hidden_params is not None: -# model_response._hidden_params = hidden_params -# model_response._hidden_params["custom_llm_provider"] = _logging_obj_llm_provider -# model_response._hidden_params["created_at"] = time.time() -# model_response._hidden_params = { -# **model_response._hidden_params, -# **self._hidden_params, -# } - -# if ( -# len(model_response.choices) > 0 -# and getattr(model_response.choices[0], "delta") is not None -# ): -# # do nothing, if object instantiated -# pass -# else: -# model_response.choices = [StreamingChoices(finish_reason=None)] -# return model_response - -# def is_delta_empty(self, delta: Delta) -> bool: -# is_empty = True -# if delta.content is not None: -# is_empty = False -# elif delta.tool_calls is not None: -# is_empty = False -# elif delta.function_call is not None: -# is_empty = False -# return is_empty - -# def return_processed_chunk_logic( # noqa -# self, -# completion_obj: dict, -# model_response: ModelResponseStream, -# response_obj: dict, -# ): - -# print_verbose( -# f"completion_obj: {completion_obj}, model_response.choices[0]: {model_response.choices[0]}, response_obj: {response_obj}" -# ) -# if ( -# "content" in completion_obj -# and ( -# isinstance(completion_obj["content"], str) -# and len(completion_obj["content"]) > 0 -# ) -# or ( -# "tool_calls" in completion_obj -# and completion_obj["tool_calls"] is not None -# and len(completion_obj["tool_calls"]) > 0 -# ) -# or ( -# "function_call" in completion_obj -# and completion_obj["function_call"] is not None -# ) -# ): # cannot set content of an OpenAI Object to be an empty string -# self.safety_checker() -# hold, model_response_str = self.check_special_tokens( -# chunk=completion_obj["content"], -# finish_reason=model_response.choices[0].finish_reason, -# ) # filter out bos/eos tokens from openai-compatible hf endpoints -# print_verbose(f"hold - {hold}, model_response_str - {model_response_str}") -# if hold is False: -# ## check if openai/azure chunk -# original_chunk = response_obj.get("original_chunk", None) -# if original_chunk: -# model_response.id = original_chunk.id -# self.response_id = original_chunk.id -# if len(original_chunk.choices) > 0: -# choices = [] -# for choice in original_chunk.choices: -# try: -# if isinstance(choice, BaseModel): -# choice_json = choice.model_dump() -# choice_json.pop( -# "finish_reason", None -# ) # for mistral etc. which return a value in their last chunk (not-openai compatible). -# print_verbose(f"choice_json: {choice_json}") -# choices.append(StreamingChoices(**choice_json)) -# except Exception: -# choices.append(StreamingChoices()) -# print_verbose(f"choices in streaming: {choices}") -# setattr(model_response, "choices", choices) -# else: -# return -# model_response.system_fingerprint = ( -# original_chunk.system_fingerprint -# ) -# setattr( -# model_response, -# "citations", -# getattr(original_chunk, "citations", None), -# ) -# print_verbose(f"self.sent_first_chunk: {self.sent_first_chunk}") -# if self.sent_first_chunk is False: -# model_response.choices[0].delta["role"] = "assistant" -# self.sent_first_chunk = True -# elif self.sent_first_chunk is True and hasattr( -# model_response.choices[0].delta, "role" -# ): -# _initial_delta = model_response.choices[0].delta.model_dump() -# _initial_delta.pop("role", None) -# model_response.choices[0].delta = Delta(**_initial_delta) -# print_verbose( -# f"model_response.choices[0].delta: {model_response.choices[0].delta}" -# ) -# else: -# ## else -# completion_obj["content"] = model_response_str -# if self.sent_first_chunk is False: -# completion_obj["role"] = "assistant" -# self.sent_first_chunk = True - -# model_response.choices[0].delta = Delta(**completion_obj) -# _index: Optional[int] = completion_obj.get("index") -# if _index is not None: -# model_response.choices[0].index = _index -# print_verbose(f"returning model_response: {model_response}") -# return model_response -# else: -# return -# elif self.received_finish_reason is not None: -# if self.sent_last_chunk is True: -# # Bedrock returns the guardrail trace in the last chunk - we want to return this here -# if self.custom_llm_provider == "bedrock" and "trace" in model_response: -# return model_response - -# # Default - return StopIteration -# raise StopIteration -# # flush any remaining holding chunk -# if len(self.holding_chunk) > 0: -# if model_response.choices[0].delta.content is None: -# model_response.choices[0].delta.content = self.holding_chunk -# else: -# model_response.choices[0].delta.content = ( -# self.holding_chunk + model_response.choices[0].delta.content -# ) -# self.holding_chunk = "" -# # if delta is None -# _is_delta_empty = self.is_delta_empty(delta=model_response.choices[0].delta) - -# if _is_delta_empty: -# # get any function call arguments -# model_response.choices[0].finish_reason = map_finish_reason( -# finish_reason=self.received_finish_reason -# ) # ensure consistent output to openai - -# self.sent_last_chunk = True - -# return model_response -# elif ( -# model_response.choices[0].delta.tool_calls is not None -# or model_response.choices[0].delta.function_call is not None -# ): -# if self.sent_first_chunk is False: -# model_response.choices[0].delta["role"] = "assistant" -# self.sent_first_chunk = True -# return model_response -# elif ( -# len(model_response.choices) > 0 -# and hasattr(model_response.choices[0].delta, "audio") -# and model_response.choices[0].delta.audio is not None -# ): -# return model_response -# else: -# if hasattr(model_response, "usage"): -# self.chunks.append(model_response) -# return - -# def chunk_creator(self, chunk): # type: ignore # noqa: PLR0915 -# model_response = self.model_response_creator() -# response_obj: dict = {} -# try: -# # return this for all models -# completion_obj = {"content": ""} -# from litellm.litellm_core_utils.streaming_utils import ( -# generic_chunk_has_all_required_fields, -# ) -# from litellm.types.utils import GenericStreamingChunk as GChunk - -# if ( -# isinstance(chunk, dict) -# and generic_chunk_has_all_required_fields( -# chunk=chunk -# ) # check if chunk is a generic streaming chunk -# ) or ( -# self.custom_llm_provider -# and ( -# self.custom_llm_provider == "anthropic" -# or self.custom_llm_provider in litellm._custom_providers -# ) -# ): - -# if self.received_finish_reason is not None: -# if "provider_specific_fields" not in chunk: -# raise StopIteration -# anthropic_response_obj: GChunk = chunk -# completion_obj["content"] = anthropic_response_obj["text"] -# if anthropic_response_obj["is_finished"]: -# self.received_finish_reason = anthropic_response_obj[ -# "finish_reason" -# ] - -# if anthropic_response_obj["usage"] is not None: -# model_response.usage = litellm.Usage( -# **anthropic_response_obj["usage"] -# ) - -# if ( -# "tool_use" in anthropic_response_obj -# and anthropic_response_obj["tool_use"] is not None -# ): -# completion_obj["tool_calls"] = [anthropic_response_obj["tool_use"]] - -# if ( -# "provider_specific_fields" in anthropic_response_obj -# and anthropic_response_obj["provider_specific_fields"] is not None -# ): -# for key, value in anthropic_response_obj[ -# "provider_specific_fields" -# ].items(): -# setattr(model_response, key, value) - -# response_obj = anthropic_response_obj -# elif ( -# self.custom_llm_provider -# and self.custom_llm_provider == "anthropic_text" -# ): -# response_obj = self.handle_anthropic_text_chunk(chunk) -# completion_obj["content"] = response_obj["text"] -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# elif self.custom_llm_provider and self.custom_llm_provider == "clarifai": -# response_obj = self.handle_clarifai_completion_chunk(chunk) -# completion_obj["content"] = response_obj["text"] -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# elif self.model == "replicate" or self.custom_llm_provider == "replicate": -# response_obj = self.handle_replicate_chunk(chunk) -# completion_obj["content"] = response_obj["text"] -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# elif self.custom_llm_provider and self.custom_llm_provider == "huggingface": -# response_obj = self.handle_huggingface_chunk(chunk) -# completion_obj["content"] = response_obj["text"] -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# elif self.custom_llm_provider and self.custom_llm_provider == "predibase": -# response_obj = self.handle_predibase_chunk(chunk) -# completion_obj["content"] = response_obj["text"] -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# elif ( -# self.custom_llm_provider and self.custom_llm_provider == "baseten" -# ): # baseten doesn't provide streaming -# completion_obj["content"] = self.handle_baseten_chunk(chunk) -# elif ( -# self.custom_llm_provider and self.custom_llm_provider == "ai21" -# ): # ai21 doesn't provide streaming -# response_obj = self.handle_ai21_chunk(chunk) -# completion_obj["content"] = response_obj["text"] -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# elif self.custom_llm_provider and self.custom_llm_provider == "maritalk": -# response_obj = self.handle_maritalk_chunk(chunk) -# completion_obj["content"] = response_obj["text"] -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# elif self.custom_llm_provider and self.custom_llm_provider == "vllm": -# completion_obj["content"] = chunk[0].outputs[0].text -# elif ( -# self.custom_llm_provider and self.custom_llm_provider == "aleph_alpha" -# ): # aleph alpha doesn't provide streaming -# response_obj = self.handle_aleph_alpha_chunk(chunk) -# completion_obj["content"] = response_obj["text"] -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# elif self.custom_llm_provider == "nlp_cloud": -# try: -# response_obj = self.handle_nlp_cloud_chunk(chunk) -# completion_obj["content"] = response_obj["text"] -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# except Exception as e: -# if self.received_finish_reason: -# raise e -# else: -# if self.sent_first_chunk is False: -# raise Exception("An unknown error occurred with the stream") -# self.received_finish_reason = "stop" -# elif self.custom_llm_provider == "vertex_ai": -# import proto # type: ignore - -# if self.model.startswith("claude-3"): -# response_obj = self.handle_vertexai_anthropic_chunk(chunk=chunk) -# if response_obj is None: -# return -# completion_obj["content"] = response_obj["text"] -# setattr(model_response, "usage", Usage()) -# if response_obj.get("prompt_tokens", None) is not None: -# model_response.usage.prompt_tokens = response_obj[ -# "prompt_tokens" -# ] -# if response_obj.get("completion_tokens", None) is not None: -# model_response.usage.completion_tokens = response_obj[ -# "completion_tokens" -# ] -# if hasattr(model_response.usage, "prompt_tokens"): -# model_response.usage.total_tokens = ( -# getattr(model_response.usage, "total_tokens", 0) -# + model_response.usage.prompt_tokens -# ) -# if hasattr(model_response.usage, "completion_tokens"): -# model_response.usage.total_tokens = ( -# getattr(model_response.usage, "total_tokens", 0) -# + model_response.usage.completion_tokens -# ) - -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# elif hasattr(chunk, "candidates") is True: -# try: -# try: -# completion_obj["content"] = chunk.text -# except Exception as e: -# if "Part has no text." in str(e): -# ## check for function calling -# function_call = ( -# chunk.candidates[0].content.parts[0].function_call -# ) - -# args_dict = {} - -# # Check if it's a RepeatedComposite instance -# for key, val in function_call.args.items(): -# if isinstance( -# val, -# proto.marshal.collections.repeated.RepeatedComposite, -# ): -# # If so, convert to list -# args_dict[key] = [v for v in val] -# else: -# args_dict[key] = val - -# try: -# args_str = json.dumps(args_dict) -# except Exception as e: -# raise e -# _delta_obj = litellm.utils.Delta( -# content=None, -# tool_calls=[ -# { -# "id": f"call_{str(uuid.uuid4())}", -# "function": { -# "arguments": args_str, -# "name": function_call.name, -# }, -# "type": "function", -# } -# ], -# ) -# _streaming_response = StreamingChoices(delta=_delta_obj) -# _model_response = ModelResponse(stream=True) -# _model_response.choices = [_streaming_response] -# response_obj = {"original_chunk": _model_response} -# else: -# raise e -# if ( -# hasattr(chunk.candidates[0], "finish_reason") -# and chunk.candidates[0].finish_reason.name -# != "FINISH_REASON_UNSPECIFIED" -# ): # every non-final chunk in vertex ai has this -# self.received_finish_reason = chunk.candidates[ -# 0 -# ].finish_reason.name -# except Exception: -# if chunk.candidates[0].finish_reason.name == "SAFETY": -# raise Exception( -# f"The response was blocked by VertexAI. {str(chunk)}" -# ) -# else: -# completion_obj["content"] = str(chunk) -# elif self.custom_llm_provider == "cohere": -# response_obj = self.handle_cohere_chunk(chunk) -# completion_obj["content"] = response_obj["text"] -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# elif self.custom_llm_provider == "cohere_chat": -# response_obj = self.handle_cohere_chat_chunk(chunk) -# if response_obj is None: -# return -# completion_obj["content"] = response_obj["text"] -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] - -# elif self.custom_llm_provider == "petals": -# if len(self.completion_stream) == 0: -# if self.received_finish_reason is not None: -# raise StopIteration -# else: -# self.received_finish_reason = "stop" -# chunk_size = 30 -# new_chunk = self.completion_stream[:chunk_size] -# completion_obj["content"] = new_chunk -# self.completion_stream = self.completion_stream[chunk_size:] -# elif self.custom_llm_provider == "palm": -# # fake streaming -# response_obj = {} -# if len(self.completion_stream) == 0: -# if self.received_finish_reason is not None: -# raise StopIteration -# else: -# self.received_finish_reason = "stop" -# chunk_size = 30 -# new_chunk = self.completion_stream[:chunk_size] -# completion_obj["content"] = new_chunk -# self.completion_stream = self.completion_stream[chunk_size:] -# elif self.custom_llm_provider == "ollama": -# response_obj = self.handle_ollama_stream(chunk) -# completion_obj["content"] = response_obj["text"] -# print_verbose(f"completion obj content: {completion_obj['content']}") -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# elif self.custom_llm_provider == "ollama_chat": -# response_obj = self.handle_ollama_chat_stream(chunk) -# completion_obj["content"] = response_obj["text"] -# print_verbose(f"completion obj content: {completion_obj['content']}") -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# elif self.custom_llm_provider == "cloudflare": -# response_obj = self.handle_cloudlfare_stream(chunk) -# completion_obj["content"] = response_obj["text"] -# print_verbose(f"completion obj content: {completion_obj['content']}") -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# elif self.custom_llm_provider == "watsonx": -# response_obj = self.handle_watsonx_stream(chunk) -# completion_obj["content"] = response_obj["text"] -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# elif self.custom_llm_provider == "triton": -# response_obj = self.handle_triton_stream(chunk) -# completion_obj["content"] = response_obj["text"] -# print_verbose(f"completion obj content: {completion_obj['content']}") -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# elif self.custom_llm_provider == "text-completion-openai": -# response_obj = self.handle_openai_text_completion_chunk(chunk) -# completion_obj["content"] = response_obj["text"] -# print_verbose(f"completion obj content: {completion_obj['content']}") -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# if response_obj["usage"] is not None: -# model_response.usage = litellm.Usage( -# prompt_tokens=response_obj["usage"].prompt_tokens, -# completion_tokens=response_obj["usage"].completion_tokens, -# total_tokens=response_obj["usage"].total_tokens, -# ) -# elif self.custom_llm_provider == "text-completion-codestral": -# response_obj = litellm.MistralTextCompletionConfig()._chunk_parser( -# chunk -# ) -# completion_obj["content"] = response_obj["text"] -# print_verbose(f"completion obj content: {completion_obj['content']}") -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# if "usage" in response_obj is not None: -# model_response.usage = litellm.Usage( -# prompt_tokens=response_obj["usage"].prompt_tokens, -# completion_tokens=response_obj["usage"].completion_tokens, -# total_tokens=response_obj["usage"].total_tokens, -# ) -# elif self.custom_llm_provider == "azure_text": -# response_obj = self.handle_azure_text_completion_chunk(chunk) -# completion_obj["content"] = response_obj["text"] -# print_verbose(f"completion obj content: {completion_obj['content']}") -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# elif self.custom_llm_provider == "cached_response": -# response_obj = { -# "text": chunk.choices[0].delta.content, -# "is_finished": True, -# "finish_reason": chunk.choices[0].finish_reason, -# "original_chunk": chunk, -# "tool_calls": ( -# chunk.choices[0].delta.tool_calls -# if hasattr(chunk.choices[0].delta, "tool_calls") -# else None -# ), -# } - -# completion_obj["content"] = response_obj["text"] -# if response_obj["tool_calls"] is not None: -# completion_obj["tool_calls"] = response_obj["tool_calls"] -# print_verbose(f"completion obj content: {completion_obj['content']}") -# if hasattr(chunk, "id"): -# model_response.id = chunk.id -# self.response_id = chunk.id -# if hasattr(chunk, "system_fingerprint"): -# self.system_fingerprint = chunk.system_fingerprint -# if response_obj["is_finished"]: -# self.received_finish_reason = response_obj["finish_reason"] -# else: # openai / azure chat model -# if self.custom_llm_provider == "azure": -# if hasattr(chunk, "model"): -# # for azure, we need to pass the model from the orignal chunk -# self.model = chunk.model -# response_obj = self.handle_openai_chat_completion_chunk(chunk) -# if response_obj is None: -# return -# completion_obj["content"] = response_obj["text"] -# print_verbose(f"completion obj content: {completion_obj['content']}") -# if response_obj["is_finished"]: -# if response_obj["finish_reason"] == "error": -# raise Exception( -# "{} raised a streaming error - finish_reason: error, no content string given. Received Chunk={}".format( -# self.custom_llm_provider, response_obj -# ) -# ) -# self.received_finish_reason = response_obj["finish_reason"] -# if response_obj.get("original_chunk", None) is not None: -# if hasattr(response_obj["original_chunk"], "id"): -# model_response.id = response_obj["original_chunk"].id -# self.response_id = model_response.id -# if hasattr(response_obj["original_chunk"], "system_fingerprint"): -# model_response.system_fingerprint = response_obj[ -# "original_chunk" -# ].system_fingerprint -# self.system_fingerprint = response_obj[ -# "original_chunk" -# ].system_fingerprint -# if response_obj["logprobs"] is not None: -# model_response.choices[0].logprobs = response_obj["logprobs"] - -# if response_obj["usage"] is not None: -# if isinstance(response_obj["usage"], dict): -# model_response.usage = litellm.Usage( -# prompt_tokens=response_obj["usage"].get( -# "prompt_tokens", None -# ) -# or None, -# completion_tokens=response_obj["usage"].get( -# "completion_tokens", None -# ) -# or None, -# total_tokens=response_obj["usage"].get("total_tokens", None) -# or None, -# ) -# elif isinstance(response_obj["usage"], BaseModel): -# model_response.usage = litellm.Usage( -# **response_obj["usage"].model_dump() -# ) - -# model_response.model = self.model -# print_verbose( -# f"model_response finish reason 3: {self.received_finish_reason}; response_obj={response_obj}" -# ) -# ## FUNCTION CALL PARSING -# if ( -# response_obj is not None -# and response_obj.get("original_chunk", None) is not None -# ): # function / tool calling branch - only set for openai/azure compatible endpoints -# # enter this branch when no content has been passed in response -# original_chunk = response_obj.get("original_chunk", None) -# model_response.id = original_chunk.id -# self.response_id = original_chunk.id -# if original_chunk.choices and len(original_chunk.choices) > 0: -# delta = original_chunk.choices[0].delta -# if delta is not None and ( -# delta.function_call is not None or delta.tool_calls is not None -# ): -# try: -# model_response.system_fingerprint = ( -# original_chunk.system_fingerprint -# ) -# ## AZURE - check if arguments is not None -# if ( -# original_chunk.choices[0].delta.function_call -# is not None -# ): -# if ( -# getattr( -# original_chunk.choices[0].delta.function_call, -# "arguments", -# ) -# is None -# ): -# original_chunk.choices[ -# 0 -# ].delta.function_call.arguments = "" -# elif original_chunk.choices[0].delta.tool_calls is not None: -# if isinstance( -# original_chunk.choices[0].delta.tool_calls, list -# ): -# for t in original_chunk.choices[0].delta.tool_calls: -# if hasattr(t, "functions") and hasattr( -# t.functions, "arguments" -# ): -# if ( -# getattr( -# t.function, -# "arguments", -# ) -# is None -# ): -# t.function.arguments = "" -# _json_delta = delta.model_dump() -# print_verbose(f"_json_delta: {_json_delta}") -# if "role" not in _json_delta or _json_delta["role"] is None: -# _json_delta["role"] = ( -# "assistant" # mistral's api returns role as None -# ) -# if "tool_calls" in _json_delta and isinstance( -# _json_delta["tool_calls"], list -# ): -# for tool in _json_delta["tool_calls"]: -# if ( -# isinstance(tool, dict) -# and "function" in tool -# and isinstance(tool["function"], dict) -# and ("type" not in tool or tool["type"] is None) -# ): -# # if function returned but type set to None - mistral's api returns type: None -# tool["type"] = "function" -# model_response.choices[0].delta = Delta(**_json_delta) -# except Exception as e: -# verbose_logger.exception( -# "litellm.CustomStreamWrapper.chunk_creator(): Exception occured - {}".format( -# str(e) -# ) -# ) -# model_response.choices[0].delta = Delta() -# elif ( -# delta is not None and getattr(delta, "audio", None) is not None -# ): -# model_response.choices[0].delta.audio = delta.audio -# else: -# try: -# delta = ( -# dict() -# if original_chunk.choices[0].delta is None -# else dict(original_chunk.choices[0].delta) -# ) -# print_verbose(f"original delta: {delta}") -# model_response.choices[0].delta = Delta(**delta) -# print_verbose( -# f"new delta: {model_response.choices[0].delta}" -# ) -# except Exception: -# model_response.choices[0].delta = Delta() -# else: -# if ( -# self.stream_options is not None -# and self.stream_options["include_usage"] is True -# ): -# return model_response -# return -# print_verbose( -# f"model_response.choices[0].delta: {model_response.choices[0].delta}; completion_obj: {completion_obj}" -# ) -# print_verbose(f"self.sent_first_chunk: {self.sent_first_chunk}") - -# ## CHECK FOR TOOL USE -# if "tool_calls" in completion_obj and len(completion_obj["tool_calls"]) > 0: -# if self.is_function_call is True: # user passed in 'functions' param -# completion_obj["function_call"] = completion_obj["tool_calls"][0][ -# "function" -# ] -# completion_obj["tool_calls"] = None - -# self.tool_call = True - -# ## RETURN ARG -# return self.return_processed_chunk_logic( -# completion_obj=completion_obj, -# model_response=model_response, # type: ignore -# response_obj=response_obj, -# ) - -# except StopIteration: -# raise StopIteration -# except Exception as e: -# traceback.format_exc() -# e.message = str(e) -# raise exception_type( -# model=self.model, -# custom_llm_provider=self.custom_llm_provider, -# original_exception=e, -# ) - -# def set_logging_event_loop(self, loop): -# """ -# import litellm, asyncio - -# loop = asyncio.get_event_loop() # 👈 gets the current event loop - -# response = litellm.completion(.., stream=True) - -# response.set_logging_event_loop(loop=loop) # 👈 enables async_success callbacks for sync logging - -# for chunk in response: -# ... -# """ -# self.logging_loop = loop - -# def run_success_logging_and_cache_storage(self, processed_chunk, cache_hit: bool): -# """ -# Runs success logging in a thread and adds the response to the cache -# """ -# if litellm.disable_streaming_logging is True: -# """ -# [NOT RECOMMENDED] -# Set this via `litellm.disable_streaming_logging = True`. - -# Disables streaming logging. -# """ -# return -# ## ASYNC LOGGING -# # Create an event loop for the new thread -# if self.logging_loop is not None: -# future = asyncio.run_coroutine_threadsafe( -# self.logging_obj.async_success_handler( -# processed_chunk, None, None, cache_hit -# ), -# loop=self.logging_loop, -# ) -# future.result() -# else: -# asyncio.run( -# self.logging_obj.async_success_handler( -# processed_chunk, None, None, cache_hit -# ) -# ) -# ## SYNC LOGGING -# self.logging_obj.success_handler(processed_chunk, None, None, cache_hit) - -# ## Sync store in cache -# if self.logging_obj._llm_caching_handler is not None: -# self.logging_obj._llm_caching_handler._sync_add_streaming_response_to_cache( -# processed_chunk -# ) - -# def finish_reason_handler(self): -# model_response = self.model_response_creator() -# complete_streaming_response = litellm.stream_chunk_builder( -# chunks=self.chunks -# ) -# _finish_reason = complete_streaming_response.choices[0].finish_reason - -# print(f"_finish_reason: {_finish_reason}") -# if _finish_reason is not None: -# model_response.choices[0].finish_reason = _finish_reason -# else: -# model_response.choices[0].finish_reason = "stop" - -# ## if tool use -# if ( -# model_response.choices[0].finish_reason == "stop" and self.tool_call -# ): # don't overwrite for other - potential error finish reasons -# model_response.choices[0].finish_reason = "tool_calls" -# return model_response - -# def __next__(self): # noqa: PLR0915 -# cache_hit = False -# if ( -# self.custom_llm_provider is not None -# and self.custom_llm_provider == "cached_response" -# ): -# cache_hit = True -# try: -# if self.completion_stream is None: -# self.fetch_sync_stream() -# while True: -# if ( -# isinstance(self.completion_stream, str) -# or isinstance(self.completion_stream, bytes) -# or isinstance(self.completion_stream, ModelResponse) -# ): -# chunk = self.completion_stream -# else: -# chunk = next(self.completion_stream) -# if chunk is not None and chunk != b"": -# print_verbose( -# f"PROCESSED CHUNK PRE CHUNK CREATOR: {chunk}; custom_llm_provider: {self.custom_llm_provider}" -# ) -# response: Optional[ModelResponse] = self.chunk_creator(chunk=chunk) -# print_verbose(f"PROCESSED CHUNK POST CHUNK CREATOR: {response}") - -# if response is None: -# continue -# ## LOGGING -# threading.Thread( -# target=self.run_success_logging_and_cache_storage, -# args=(response, cache_hit), -# ).start() # log response -# choice = response.choices[0] -# if isinstance(choice, StreamingChoices): -# self.response_uptil_now += choice.delta.get("content", "") or "" -# else: -# self.response_uptil_now += "" -# self.rules.post_call_rules( -# input=self.response_uptil_now, model=self.model -# ) -# # HANDLE STREAM OPTIONS -# self.chunks.append(response) -# if hasattr( -# response, "usage" -# ): # remove usage from chunk, only send on final chunk -# # Convert the object to a dictionary -# obj_dict = response.dict() - -# # Remove an attribute (e.g., 'attr2') -# if "usage" in obj_dict: -# del obj_dict["usage"] - -# # Create a new object without the removed attribute -# response = self.model_response_creator( -# chunk=obj_dict, hidden_params=response._hidden_params -# ) -# # add usage as hidden param -# if self.sent_last_chunk is True and self.stream_options is None: -# usage = calculate_total_usage(chunks=self.chunks) -# response._hidden_params["usage"] = usage -# # RETURN RESULT -# return response - -# except StopIteration: -# if self.sent_last_chunk is True: -# complete_streaming_response = litellm.stream_chunk_builder( -# chunks=self.chunks, messages=self.messages -# ) -# response = self.model_response_creator() -# if complete_streaming_response is not None: -# setattr( -# response, -# "usage", -# getattr(complete_streaming_response, "usage"), -# ) - -# ## LOGGING -# threading.Thread( -# target=self.logging_obj.success_handler, -# args=(response, None, None, cache_hit), -# ).start() # log response - -# if self.sent_stream_usage is False and self.send_stream_usage is True: -# self.sent_stream_usage = True -# return response -# raise # Re-raise StopIteration -# else: -# self.sent_last_chunk = True -# processed_chunk = self.finish_reason_handler() -# if self.stream_options is None: # add usage as hidden param -# usage = calculate_total_usage(chunks=self.chunks) -# processed_chunk._hidden_params["usage"] = usage -# ## LOGGING -# threading.Thread( -# target=self.run_success_logging_and_cache_storage, -# args=(processed_chunk, cache_hit), -# ).start() # log response -# return processed_chunk -# except Exception as e: -# traceback_exception = traceback.format_exc() -# # LOG FAILURE - handle streaming failure logging in the _next_ object, remove `handle_failure` once it's deprecated -# threading.Thread( -# target=self.logging_obj.failure_handler, args=(e, traceback_exception) -# ).start() -# if isinstance(e, OpenAIError): -# raise e -# else: -# raise exception_type( -# model=self.model, -# original_exception=e, -# custom_llm_provider=self.custom_llm_provider, -# ) - -# def fetch_sync_stream(self): -# if self.completion_stream is None and self.make_call is not None: -# # Call make_call to get the completion stream -# self.completion_stream = self.make_call(client=litellm.module_level_client) -# self._stream_iter = self.completion_stream.__iter__() - -# return self.completion_stream - -# async def fetch_stream(self): -# if self.completion_stream is None and self.make_call is not None: -# # Call make_call to get the completion stream -# self.completion_stream = await self.make_call( -# client=litellm.module_level_aclient -# ) -# self._stream_iter = self.completion_stream.__aiter__() - -# return self.completion_stream - -# async def __anext__(self): # noqa: PLR0915 -# cache_hit = False -# if ( -# self.custom_llm_provider is not None -# and self.custom_llm_provider == "cached_response" -# ): -# cache_hit = True -# try: -# if self.completion_stream is None: -# await self.fetch_stream() - -# if ( -# self.custom_llm_provider == "openai" -# or self.custom_llm_provider == "azure" -# or self.custom_llm_provider == "custom_openai" -# or self.custom_llm_provider == "text-completion-openai" -# or self.custom_llm_provider == "text-completion-codestral" -# or self.custom_llm_provider == "azure_text" -# or self.custom_llm_provider == "anthropic" -# or self.custom_llm_provider == "anthropic_text" -# or self.custom_llm_provider == "huggingface" -# or self.custom_llm_provider == "ollama" -# or self.custom_llm_provider == "ollama_chat" -# or self.custom_llm_provider == "vertex_ai" -# or self.custom_llm_provider == "vertex_ai_beta" -# or self.custom_llm_provider == "sagemaker" -# or self.custom_llm_provider == "sagemaker_chat" -# or self.custom_llm_provider == "gemini" -# or self.custom_llm_provider == "replicate" -# or self.custom_llm_provider == "cached_response" -# or self.custom_llm_provider == "predibase" -# or self.custom_llm_provider == "databricks" -# or self.custom_llm_provider == "bedrock" -# or self.custom_llm_provider == "triton" -# or self.custom_llm_provider == "watsonx" -# or self.custom_llm_provider in litellm.openai_compatible_endpoints -# or self.custom_llm_provider in litellm._custom_providers -# ): -# async for chunk in self.completion_stream: -# if chunk == "None" or chunk is None: -# raise Exception -# elif ( -# self.custom_llm_provider == "gemini" -# and hasattr(chunk, "parts") -# and len(chunk.parts) == 0 -# ): -# continue -# # chunk_creator() does logging/stream chunk building. We need to let it know its being called in_async_func, so we don't double add chunks. -# # __anext__ also calls async_success_handler, which does logging -# print_verbose(f"PROCESSED ASYNC CHUNK PRE CHUNK CREATOR: {chunk}") - -# processed_chunk: Optional[ModelResponse] = self.chunk_creator( -# chunk=chunk -# ) -# print_verbose( -# f"PROCESSED ASYNC CHUNK POST CHUNK CREATOR: {processed_chunk}" -# ) -# if processed_chunk is None: -# continue -# ## LOGGING -# ## LOGGING -# executor.submit( -# self.logging_obj.success_handler, -# result=processed_chunk, -# start_time=None, -# end_time=None, -# cache_hit=cache_hit, -# ) - -# asyncio.create_task( -# self.logging_obj.async_success_handler( -# processed_chunk, cache_hit=cache_hit -# ) -# ) - -# if self.logging_obj._llm_caching_handler is not None: -# asyncio.create_task( -# self.logging_obj._llm_caching_handler._add_streaming_response_to_cache( -# processed_chunk=processed_chunk, -# ) -# ) - -# choice = processed_chunk.choices[0] -# if isinstance(choice, StreamingChoices): -# self.response_uptil_now += choice.delta.get("content", "") or "" -# else: -# self.response_uptil_now += "" -# self.rules.post_call_rules( -# input=self.response_uptil_now, model=self.model -# ) -# self.chunks.append(processed_chunk) -# if hasattr( -# processed_chunk, "usage" -# ): # remove usage from chunk, only send on final chunk -# # Convert the object to a dictionary -# obj_dict = processed_chunk.dict() - -# # Remove an attribute (e.g., 'attr2') -# if "usage" in obj_dict: -# del obj_dict["usage"] - -# # Create a new object without the removed attribute -# processed_chunk = self.model_response_creator(chunk=obj_dict) -# print_verbose(f"final returned processed chunk: {processed_chunk}") -# return processed_chunk -# raise StopAsyncIteration -# else: # temporary patch for non-aiohttp async calls -# # example - boto3 bedrock llms -# while True: -# if isinstance(self.completion_stream, str) or isinstance( -# self.completion_stream, bytes -# ): -# chunk = self.completion_stream -# else: -# chunk = next(self.completion_stream) -# if chunk is not None and chunk != b"": -# print_verbose(f"PROCESSED CHUNK PRE CHUNK CREATOR: {chunk}") -# processed_chunk: Optional[ModelResponse] = self.chunk_creator( -# chunk=chunk -# ) -# print_verbose( -# f"PROCESSED CHUNK POST CHUNK CREATOR: {processed_chunk}" -# ) -# if processed_chunk is None: -# continue -# ## LOGGING -# threading.Thread( -# target=self.logging_obj.success_handler, -# args=(processed_chunk, None, None, cache_hit), -# ).start() # log processed_chunk -# asyncio.create_task( -# self.logging_obj.async_success_handler( -# processed_chunk, cache_hit=cache_hit -# ) -# ) - -# choice = processed_chunk.choices[0] -# if isinstance(choice, StreamingChoices): -# self.response_uptil_now += ( -# choice.delta.get("content", "") or "" -# ) -# else: -# self.response_uptil_now += "" -# self.rules.post_call_rules( -# input=self.response_uptil_now, model=self.model -# ) -# # RETURN RESULT -# self.chunks.append(processed_chunk) -# return processed_chunk -# except (StopAsyncIteration, StopIteration): -# if self.sent_last_chunk is True: -# # log the final chunk with accurate streaming values -# complete_streaming_response = litellm.stream_chunk_builder( -# chunks=self.chunks, messages=self.messages -# ) -# response = self.model_response_creator() -# if complete_streaming_response is not None: -# setattr( -# response, -# "usage", -# getattr(complete_streaming_response, "usage"), -# ) -# ## LOGGING -# threading.Thread( -# target=self.logging_obj.success_handler, -# args=(response, None, None, cache_hit), -# ).start() # log response -# asyncio.create_task( -# self.logging_obj.async_success_handler( -# response, cache_hit=cache_hit -# ) -# ) -# if self.sent_stream_usage is False and self.send_stream_usage is True: -# self.sent_stream_usage = True -# return response -# raise StopAsyncIteration # Re-raise StopIteration -# else: -# self.sent_last_chunk = True -# processed_chunk = self.finish_reason_handler() -# ## LOGGING -# threading.Thread( -# target=self.logging_obj.success_handler, -# args=(processed_chunk, None, None, cache_hit), -# ).start() # log response -# asyncio.create_task( -# self.logging_obj.async_success_handler( -# processed_chunk, cache_hit=cache_hit -# ) -# ) -# return processed_chunk -# except httpx.TimeoutException as e: # if httpx read timeout error occues -# traceback_exception = traceback.format_exc() -# ## ADD DEBUG INFORMATION - E.G. LITELLM REQUEST TIMEOUT -# traceback_exception += "\nLiteLLM Default Request Timeout - {}".format( -# litellm.request_timeout -# ) -# if self.logging_obj is not None: -# ## LOGGING -# threading.Thread( -# target=self.logging_obj.failure_handler, -# args=(e, traceback_exception), -# ).start() # log response -# # Handle any exceptions that might occur during streaming -# asyncio.create_task( -# self.logging_obj.async_failure_handler(e, traceback_exception) -# ) -# raise e -# except Exception as e: -# traceback_exception = traceback.format_exc() -# if self.logging_obj is not None: -# ## LOGGING -# threading.Thread( -# target=self.logging_obj.failure_handler, -# args=(e, traceback_exception), -# ).start() # log response -# # Handle any exceptions that might occur during streaming -# asyncio.create_task( -# self.logging_obj.async_failure_handler(e, traceback_exception) # type: ignore -# ) -# ## Map to OpenAI Exception -# raise exception_type( -# model=self.model, -# custom_llm_provider=self.custom_llm_provider, -# original_exception=e, -# completion_kwargs={}, -# extra_kwargs={}, -# ) - - class TextCompletionStreamWrapper: def __init__( self, @@ -7977,7 +5869,6 @@ def get_valid_models() -> List[str]: if expected_provider_key in environ_keys: # key is set valid_providers.append(provider) - for provider in valid_providers: if provider == "azure": valid_models.append("Azure-LLM") @@ -8253,10 +6144,13 @@ def validate_chat_completion_user_messages(messages: List[AllMessageValues]): if isinstance(item, dict): if item.get("type") not in ValidUserMessageContentTypes: raise Exception("invalid content type") - except Exception: - raise Exception( - f"Invalid user message={m} at index {idx}. Please ensure all user messages are valid OpenAI chat completion messages." - ) + except Exception as e: + if "invalid content type" in str(e): + raise Exception( + f"Invalid user message={m} at index {idx}. Please ensure all user messages are valid OpenAI chat completion messages." + ) + else: + raise e return messages diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index e8aeac2cb..48b25523e 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -1898,7 +1898,8 @@ "supports_function_calling": true, "tool_use_system_prompt_tokens": 264, "supports_assistant_prefill": true, - "supports_prompt_caching": true + "supports_prompt_caching": true, + "supports_pdf_input": true }, "claude-3-opus-20240229": { "max_tokens": 4096, diff --git a/tests/llm_translation/base_llm_unit_tests.py b/tests/llm_translation/base_llm_unit_tests.py index 4f9cd9c25..96004eb4e 100644 --- a/tests/llm_translation/base_llm_unit_tests.py +++ b/tests/llm_translation/base_llm_unit_tests.py @@ -44,3 +44,30 @@ class BaseLLMChatTest(ABC): messages=messages, ) assert response is not None + + @pytest.fixture + def pdf_messages(self): + import base64 + + import requests + + # URL of the file + url = "https://storage.googleapis.com/cloud-samples-data/generative-ai/pdf/2403.05530.pdf" + + response = requests.get(url) + file_data = response.content + + encoded_file = base64.b64encode(file_data).decode("utf-8") + url = f"data:application/pdf;base64,{encoded_file}" + + image_content = [ + {"type": "text", "text": "What's this file about?"}, + { + "type": "image_url", + "image_url": {"url": url}, + }, + ] + + image_messages = [{"role": "user", "content": image_content}] + + return image_messages diff --git a/tests/llm_translation/test_anthropic_completion.py b/tests/llm_translation/test_anthropic_completion.py index 46f01e0ec..9d7c9af73 100644 --- a/tests/llm_translation/test_anthropic_completion.py +++ b/tests/llm_translation/test_anthropic_completion.py @@ -36,6 +36,7 @@ from litellm.types.llms.anthropic import AnthropicResponse from litellm.llms.anthropic.common_utils import process_anthropic_headers from httpx import Headers +from base_llm_unit_tests import BaseLLMChatTest def test_anthropic_completion_messages_translation(): @@ -624,3 +625,40 @@ def test_anthropic_tool_helper(cache_control_location): tool = AnthropicConfig()._map_tool_helper(tool=tool) assert tool["cache_control"] == {"type": "ephemeral"} + + +from litellm import completion + + +class TestAnthropicCompletion(BaseLLMChatTest): + def get_base_completion_call_args(self) -> dict: + return {"model": "claude-3-haiku-20240307"} + + def test_pdf_handling(self, pdf_messages): + from litellm.llms.custom_httpx.http_handler import HTTPHandler + from litellm.types.llms.anthropic import AnthropicMessagesDocumentParam + import json + + client = HTTPHandler() + + with patch.object(client, "post", new=MagicMock()) as mock_client: + response = completion( + model="claude-3-5-sonnet-20241022", + messages=pdf_messages, + client=client, + ) + + mock_client.assert_called_once() + + json_data = json.loads(mock_client.call_args.kwargs["data"]) + headers = mock_client.call_args.kwargs["headers"] + + assert headers["anthropic-beta"] == "pdfs-2024-09-25" + + json_data["messages"][0]["role"] == "user" + _document_validation = AnthropicMessagesDocumentParam( + **json_data["messages"][0]["content"][1] + ) + assert _document_validation["type"] == "document" + assert _document_validation["source"]["media_type"] == "application/pdf" + assert _document_validation["source"]["type"] == "base64" diff --git a/tests/local_testing/test_get_llm_provider.py b/tests/local_testing/test_get_llm_provider.py index f7126cec0..6654c10c2 100644 --- a/tests/local_testing/test_get_llm_provider.py +++ b/tests/local_testing/test_get_llm_provider.py @@ -169,3 +169,11 @@ def test_get_llm_provider_hosted_vllm(): assert custom_llm_provider == "hosted_vllm" assert model == "llama-3.1-70b-instruct" assert dynamic_api_key == "" + + +def test_get_llm_provider_watson_text(): + model, custom_llm_provider, dynamic_api_key, api_base = litellm.get_llm_provider( + model="watsonx_text/watson-text-to-speech", + ) + assert custom_llm_provider == "watsonx_text" + assert model == "watson-text-to-speech" diff --git a/tests/local_testing/test_get_model_list.py b/tests/local_testing/test_get_model_list.py deleted file mode 100644 index 7663eebf5..000000000 --- a/tests/local_testing/test_get_model_list.py +++ /dev/null @@ -1,11 +0,0 @@ -import os, sys, traceback - -sys.path.insert( - 0, os.path.abspath("../..") -) # Adds the parent directory to the system path -import litellm -from litellm import get_model_list - -print(get_model_list()) -print(get_model_list()) -# print(litellm.model_list) diff --git a/tests/local_testing/test_opentelemetry_unit_tests.py b/tests/local_testing/test_opentelemetry_unit_tests.py deleted file mode 100644 index 530adc6ab..000000000 --- a/tests/local_testing/test_opentelemetry_unit_tests.py +++ /dev/null @@ -1,41 +0,0 @@ -# What is this? -## Unit tests for opentelemetry integration - -# What is this? -## Unit test for presidio pii masking -import sys, os, asyncio, time, random -from datetime import datetime -import traceback -from dotenv import load_dotenv - -load_dotenv() -import os -import asyncio - -sys.path.insert( - 0, os.path.abspath("../..") -) # Adds the parent directory to the system path -import pytest -import litellm -from unittest.mock import patch, MagicMock, AsyncMock - - -@pytest.mark.asyncio -async def test_opentelemetry_integration(): - """ - Unit test to confirm the parent otel span is ended - """ - - parent_otel_span = MagicMock() - litellm.callbacks = ["otel"] - - await litellm.acompletion( - model="gpt-3.5-turbo", - messages=[{"role": "user", "content": "Hello, world!"}], - mock_response="Hey!", - metadata={"litellm_parent_otel_span": parent_otel_span}, - ) - - await asyncio.sleep(1) - - parent_otel_span.end.assert_called_once() diff --git a/tests/local_testing/test_utils.py b/tests/local_testing/test_utils.py index 5aa3b610c..b3f8208bf 100644 --- a/tests/local_testing/test_utils.py +++ b/tests/local_testing/test_utils.py @@ -943,3 +943,24 @@ def test_validate_chat_completion_user_messages(messages, expected_bool): ## Invalid message with pytest.raises(Exception): validate_chat_completion_user_messages(messages=messages) + + +def test_models_by_provider(): + """ + Make sure all providers from model map are in the valid providers list + """ + from litellm import models_by_provider + + providers = set() + for k, v in litellm.model_cost.items(): + if "_" in v["litellm_provider"] and "-" in v["litellm_provider"]: + continue + elif k == "sample_spec": + continue + elif v["litellm_provider"] == "sagemaker": + continue + else: + providers.add(v["litellm_provider"]) + + for provider in providers: + assert provider in models_by_provider.keys() diff --git a/tests/logging_callback_tests/base_test.py b/tests/logging_callback_tests/base_test.py new file mode 100644 index 000000000..0d1e7dfcf --- /dev/null +++ b/tests/logging_callback_tests/base_test.py @@ -0,0 +1,100 @@ +import asyncio +import httpx +import json +import pytest +import sys +from typing import Any, Dict, List +from unittest.mock import MagicMock, Mock, patch +import os + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import litellm +from litellm.exceptions import BadRequestError +from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler +from litellm.utils import CustomStreamWrapper +from litellm.types.utils import ModelResponse + +# test_example.py +from abc import ABC, abstractmethod + + +class BaseLoggingCallbackTest(ABC): + """ + Abstract base test class that enforces a common test across all test classes. + """ + + @pytest.fixture + def mock_response_obj(self): + from litellm.types.utils import ( + ModelResponse, + Choices, + Message, + ChatCompletionMessageToolCall, + Function, + Usage, + CompletionTokensDetailsWrapper, + PromptTokensDetailsWrapper, + ) + + # Create a mock response object with the structure you need + return ModelResponse( + id="chatcmpl-ASId3YJWagBpBskWfoNEMPFSkmrEw", + created=1731308157, + model="gpt-4o-mini-2024-07-18", + object="chat.completion", + system_fingerprint="fp_0ba0d124f1", + choices=[ + Choices( + finish_reason="tool_calls", + index=0, + message=Message( + content=None, + role="assistant", + tool_calls=[ + ChatCompletionMessageToolCall( + function=Function( + arguments='{"city": "New York"}', name="get_weather" + ), + id="call_PngsQS5YGmIZKnswhnUOnOVb", + type="function", + ), + ChatCompletionMessageToolCall( + function=Function( + arguments='{"city": "New York"}', name="get_news" + ), + id="call_1zsDThBu0VSK7KuY7eCcJBnq", + type="function", + ), + ], + function_call=None, + ), + ) + ], + usage=Usage( + completion_tokens=46, + prompt_tokens=86, + total_tokens=132, + completion_tokens_details=CompletionTokensDetailsWrapper( + accepted_prediction_tokens=0, + audio_tokens=0, + reasoning_tokens=0, + rejected_prediction_tokens=0, + text_tokens=None, + ), + prompt_tokens_details=PromptTokensDetailsWrapper( + audio_tokens=0, cached_tokens=0, text_tokens=None, image_tokens=None + ), + ), + service_tier=None, + ) + + @abstractmethod + def test_parallel_tool_calls(self, mock_response_obj: ModelResponse): + """ + Check if parallel tool calls are correctly logged by Logging callback + + Relevant issue - https://github.com/BerriAI/litellm/issues/6677 + """ + pass diff --git a/tests/logging_callback_tests/test_opentelemetry_unit_tests.py b/tests/logging_callback_tests/test_opentelemetry_unit_tests.py new file mode 100644 index 000000000..b0d09562c --- /dev/null +++ b/tests/logging_callback_tests/test_opentelemetry_unit_tests.py @@ -0,0 +1,58 @@ +# What is this? +## Unit tests for opentelemetry integration + +# What is this? +## Unit test for presidio pii masking +import sys, os, asyncio, time, random +from datetime import datetime +import traceback +from dotenv import load_dotenv + +load_dotenv() +import os +import asyncio + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import pytest +import litellm +from unittest.mock import patch, MagicMock, AsyncMock +from base_test import BaseLoggingCallbackTest +from litellm.types.utils import ModelResponse + + +class TestOpentelemetryUnitTests(BaseLoggingCallbackTest): + def test_parallel_tool_calls(self, mock_response_obj: ModelResponse): + tool_calls = mock_response_obj.choices[0].message.tool_calls + from litellm.integrations.opentelemetry import OpenTelemetry + from litellm.proxy._types import SpanAttributes + + kv_pair_dict = OpenTelemetry._tool_calls_kv_pair(tool_calls) + + assert kv_pair_dict == { + f"{SpanAttributes.LLM_COMPLETIONS}.0.function_call.arguments": '{"city": "New York"}', + f"{SpanAttributes.LLM_COMPLETIONS}.0.function_call.name": "get_weather", + f"{SpanAttributes.LLM_COMPLETIONS}.1.function_call.arguments": '{"city": "New York"}', + f"{SpanAttributes.LLM_COMPLETIONS}.1.function_call.name": "get_news", + } + + @pytest.mark.asyncio + async def test_opentelemetry_integration(self): + """ + Unit test to confirm the parent otel span is ended + """ + + parent_otel_span = MagicMock() + litellm.callbacks = ["otel"] + + await litellm.acompletion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Hello, world!"}], + mock_response="Hey!", + metadata={"litellm_parent_otel_span": parent_otel_span}, + ) + + await asyncio.sleep(1) + + parent_otel_span.end.assert_called_once() From d889bce0c459cb5d5c6d60c8578673372413a566 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Mon, 11 Nov 2024 11:49:44 -0800 Subject: [PATCH 56/67] add clear doc string for GCS bucket logging --- litellm/integrations/gcs_bucket/gcs_bucket.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/litellm/integrations/gcs_bucket/gcs_bucket.py b/litellm/integrations/gcs_bucket/gcs_bucket.py index 0b637f9b6..83b831904 100644 --- a/litellm/integrations/gcs_bucket/gcs_bucket.py +++ b/litellm/integrations/gcs_bucket/gcs_bucket.py @@ -115,7 +115,17 @@ class GCSBucketLogger(GCSBucketBase): verbose_logger.exception(f"GCS Bucket logging error: {str(e)}") async def async_send_batch(self): - """Process queued logs in batch - sends logs to GCS Bucket""" + """ + Process queued logs in batch - sends logs to GCS Bucket + + + GCS Bucket does not have a Batch endpoint to batch upload logs + + Instead, we + - collect the logs to flush every `GCS_FLUSH_INTERVAL` seconds + - during async_send_batch, we make 1 POST request per log to GCS Bucket + + """ if not self.log_queue: return From 1e2ba3e04563743615ea16ce95604331b9d34909 Mon Sep 17 00:00:00 2001 From: Dinmukhamed Mailibay <47117969+dinmukhamedm@users.noreply.github.com> Date: Mon, 11 Nov 2024 12:15:47 -0800 Subject: [PATCH 57/67] Add docs to export logs to Laminar (#6674) * Add docs to export logs to Laminar * minor fix: newline at end of file * place laminar after http and grpc --- docs/my-website/.gitignore | 1 + .../docs/observability/opentelemetry_integration.md | 12 +++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/docs/my-website/.gitignore b/docs/my-website/.gitignore index b2d6de306..4d8604572 100644 --- a/docs/my-website/.gitignore +++ b/docs/my-website/.gitignore @@ -18,3 +18,4 @@ npm-debug.log* yarn-debug.log* yarn-error.log* +yarn.lock diff --git a/docs/my-website/docs/observability/opentelemetry_integration.md b/docs/my-website/docs/observability/opentelemetry_integration.md index ba5ef2ff8..218064b3d 100644 --- a/docs/my-website/docs/observability/opentelemetry_integration.md +++ b/docs/my-website/docs/observability/opentelemetry_integration.md @@ -49,9 +49,19 @@ OTEL_ENDPOINT="http://0.0.0.0:4317" + + +```shell +OTEL_EXPORTER="otlp_grpc" +OTEL_ENDPOINT="https://api.lmnr.ai:8443" +OTEL_HEADERS="authorization=Bearer " +``` + + + -Use just 2 lines of code, to instantly log your LLM responses **across all providers** with OpenTelemetry: +Use just 1 line of code, to instantly log your LLM responses **across all providers** with OpenTelemetry: ```python litellm.callbacks = ["otel"] From c3bc9e6b12b29414e0bb23e10f5e41952c7df914 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Mon, 11 Nov 2024 13:58:06 -0800 Subject: [PATCH 58/67] (Feat) Add langsmith key based logging (#6682) * add langsmith_api_key to StandardCallbackDynamicParams * create a file for langsmith types * langsmith add key / team based logging * add key based logging for langsmith * fix langsmith key based logging * fix linting langsmith * remove NOQA violation * add unit test coverage for all helpers in test langsmith * test_langsmith_key_based_logging * docs langsmith key based logging * run langsmith tests in logging callback tests * fix logging testing * test_langsmith_key_based_logging * test_add_callback_via_key_litellm_pre_call_utils_langsmith * add debug statement langsmith key based logging * test_langsmith_key_based_logging --- .circleci/config.yml | 1 + docs/my-website/docs/proxy/team_logging.md | 45 ++ litellm/integrations/langsmith.py | 293 +++++++------ litellm/proxy/proxy_config.yaml | 2 + litellm/types/integrations/langsmith.py | 61 +++ litellm/types/utils.py | 5 + tests/local_testing/test_langsmith.py | 55 --- .../test_langsmith_unit_test.py | 394 ++++++++++++++++++ tests/proxy_unit_tests/test_proxy_server.py | 133 ++++++ 9 files changed, 810 insertions(+), 179 deletions(-) create mode 100644 litellm/types/integrations/langsmith.py create mode 100644 tests/logging_callback_tests/test_langsmith_unit_test.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 88e83fa7f..7961cfddb 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -686,6 +686,7 @@ jobs: pip install "pytest-retry==1.6.3" pip install "pytest-cov==5.0.0" pip install "pytest-asyncio==0.21.1" + pip install pytest-mock pip install "respx==0.21.1" pip install "google-generativeai==0.3.2" pip install "google-cloud-aiplatform==1.43.0" diff --git a/docs/my-website/docs/proxy/team_logging.md b/docs/my-website/docs/proxy/team_logging.md index e2fcfa4b5..8286ac449 100644 --- a/docs/my-website/docs/proxy/team_logging.md +++ b/docs/my-website/docs/proxy/team_logging.md @@ -281,6 +281,51 @@ curl -X POST 'http://0.0.0.0:4000/key/generate' \ }' ``` + + + + +1. Create Virtual Key to log to a specific Langsmith Project + + ```bash + curl -X POST 'http://0.0.0.0:4000/key/generate' \ + -H 'Authorization: Bearer sk-1234' \ + -H 'Content-Type: application/json' \ + -d '{ + "metadata": { + "logging": [{ + "callback_name": "langsmith", # "otel", "gcs_bucket" + "callback_type": "success", # "success", "failure", "success_and_failure" + "callback_vars": { + "langsmith_api_key": "os.environ/LANGSMITH_API_KEY", # API Key for Langsmith logging + "langsmith_project": "pr-brief-resemblance-72", # project name on langsmith + "langsmith_base_url": "https://api.smith.langchain.com" + } + }] + } + }' + + ``` + +2. Test it - `/chat/completions` request + + Use the virtual key from step 3 to make a `/chat/completions` request + + You should see your logs on your Langsmith project on a successful request + + ```shell + curl -i http://localhost:4000/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer sk-Fxq5XSyWKeXDKfPdqXZhPg" \ + -d '{ + "model": "fake-openai-endpoint", + "messages": [ + {"role": "user", "content": "Hello, Claude"} + ], + "user": "hello", + }' + ``` + diff --git a/litellm/integrations/langsmith.py b/litellm/integrations/langsmith.py index 951393445..4abd2a2c3 100644 --- a/litellm/integrations/langsmith.py +++ b/litellm/integrations/langsmith.py @@ -23,34 +23,8 @@ from litellm.llms.custom_httpx.http_handler import ( get_async_httpx_client, httpxSpecialProvider, ) -from litellm.types.utils import StandardLoggingPayload - - -class LangsmithInputs(BaseModel): - model: Optional[str] = None - messages: Optional[List[Any]] = None - stream: Optional[bool] = None - call_type: Optional[str] = None - litellm_call_id: Optional[str] = None - completion_start_time: Optional[datetime] = None - temperature: Optional[float] = None - max_tokens: Optional[int] = None - custom_llm_provider: Optional[str] = None - input: Optional[List[Any]] = None - log_event_type: Optional[str] = None - original_response: Optional[Any] = None - response_cost: Optional[float] = None - - # LiteLLM Virtual Key specific fields - user_api_key: Optional[str] = None - user_api_key_user_id: Optional[str] = None - user_api_key_team_alias: Optional[str] = None - - -class LangsmithCredentialsObject(TypedDict): - LANGSMITH_API_KEY: str - LANGSMITH_PROJECT: str - LANGSMITH_BASE_URL: str +from litellm.types.integrations.langsmith import * +from litellm.types.utils import StandardCallbackDynamicParams, StandardLoggingPayload def is_serializable(value): @@ -93,15 +67,16 @@ class LangsmithLogger(CustomBatchLogger): ) if _batch_size: self.batch_size = int(_batch_size) + self.log_queue: List[LangsmithQueueObject] = [] asyncio.create_task(self.periodic_flush()) self.flush_lock = asyncio.Lock() super().__init__(**kwargs, flush_lock=self.flush_lock) def get_credentials_from_env( self, - langsmith_api_key: Optional[str], - langsmith_project: Optional[str], - langsmith_base_url: Optional[str], + langsmith_api_key: Optional[str] = None, + langsmith_project: Optional[str] = None, + langsmith_base_url: Optional[str] = None, ) -> LangsmithCredentialsObject: _credentials_api_key = langsmith_api_key or os.getenv("LANGSMITH_API_KEY") @@ -132,42 +107,19 @@ class LangsmithLogger(CustomBatchLogger): LANGSMITH_PROJECT=_credentials_project, ) - def _prepare_log_data( # noqa: PLR0915 - self, kwargs, response_obj, start_time, end_time + def _prepare_log_data( + self, + kwargs, + response_obj, + start_time, + end_time, + credentials: LangsmithCredentialsObject, ): - import json - from datetime import datetime as dt - try: _litellm_params = kwargs.get("litellm_params", {}) or {} metadata = _litellm_params.get("metadata", {}) or {} - new_metadata = {} - for key, value in metadata.items(): - if ( - isinstance(value, list) - or isinstance(value, str) - or isinstance(value, int) - or isinstance(value, float) - ): - new_metadata[key] = value - elif isinstance(value, BaseModel): - new_metadata[key] = value.model_dump_json() - elif isinstance(value, dict): - for k, v in value.items(): - if isinstance(v, dt): - value[k] = v.isoformat() - new_metadata[key] = value - - metadata = new_metadata - - kwargs["user_api_key"] = metadata.get("user_api_key", None) - kwargs["user_api_key_user_id"] = metadata.get("user_api_key_user_id", None) - kwargs["user_api_key_team_alias"] = metadata.get( - "user_api_key_team_alias", None - ) - project_name = metadata.get( - "project_name", self.default_credentials["LANGSMITH_PROJECT"] + "project_name", credentials["LANGSMITH_PROJECT"] ) run_name = metadata.get("run_name", self.langsmith_default_run_name) run_id = metadata.get("id", None) @@ -175,16 +127,10 @@ class LangsmithLogger(CustomBatchLogger): trace_id = metadata.get("trace_id", None) session_id = metadata.get("session_id", None) dotted_order = metadata.get("dotted_order", None) - tags = metadata.get("tags", []) or [] verbose_logger.debug( f"Langsmith Logging - project_name: {project_name}, run_name {run_name}" ) - # filter out kwargs to not include any dicts, langsmith throws an erros when trying to log kwargs - # logged_kwargs = LangsmithInputs(**kwargs) - # kwargs = logged_kwargs.model_dump() - - # new_kwargs = {} # Ensure everything in the payload is converted to str payload: Optional[StandardLoggingPayload] = kwargs.get( "standard_logging_object", None @@ -193,7 +139,6 @@ class LangsmithLogger(CustomBatchLogger): if payload is None: raise Exception("Error logging request payload. Payload=none.") - new_kwargs = payload metadata = payload[ "metadata" ] # ensure logged metadata is json serializable @@ -201,12 +146,12 @@ class LangsmithLogger(CustomBatchLogger): data = { "name": run_name, "run_type": "llm", # this should always be llm, since litellm always logs llm calls. Langsmith allow us to log "chain" - "inputs": new_kwargs, - "outputs": new_kwargs["response"], + "inputs": payload, + "outputs": payload["response"], "session_name": project_name, - "start_time": new_kwargs["startTime"], - "end_time": new_kwargs["endTime"], - "tags": tags, + "start_time": payload["startTime"], + "end_time": payload["endTime"], + "tags": payload["request_tags"], "extra": metadata, } @@ -243,37 +188,6 @@ class LangsmithLogger(CustomBatchLogger): except Exception: raise - def _send_batch(self): - if not self.log_queue: - return - - langsmith_api_key = self.default_credentials["LANGSMITH_API_KEY"] - langsmith_api_base = self.default_credentials["LANGSMITH_BASE_URL"] - - url = f"{langsmith_api_base}/runs/batch" - - headers = {"x-api-key": langsmith_api_key} - - try: - response = requests.post( - url=url, - json=self.log_queue, - headers=headers, - ) - - if response.status_code >= 300: - verbose_logger.error( - f"Langsmith Error: {response.status_code} - {response.text}" - ) - else: - verbose_logger.debug( - f"Batch of {len(self.log_queue)} runs successfully created" - ) - - self.log_queue.clear() - except Exception: - verbose_logger.exception("Langsmith Layer Error - Error sending batch.") - def log_success_event(self, kwargs, response_obj, start_time, end_time): try: sampling_rate = ( @@ -295,8 +209,20 @@ class LangsmithLogger(CustomBatchLogger): kwargs, response_obj, ) - data = self._prepare_log_data(kwargs, response_obj, start_time, end_time) - self.log_queue.append(data) + credentials = self._get_credentials_to_use_for_request(kwargs=kwargs) + data = self._prepare_log_data( + kwargs=kwargs, + response_obj=response_obj, + start_time=start_time, + end_time=end_time, + credentials=credentials, + ) + self.log_queue.append( + LangsmithQueueObject( + data=data, + credentials=credentials, + ) + ) verbose_logger.debug( f"Langsmith, event added to queue. Will flush in {self.flush_interval} seconds..." ) @@ -323,8 +249,20 @@ class LangsmithLogger(CustomBatchLogger): kwargs, response_obj, ) - data = self._prepare_log_data(kwargs, response_obj, start_time, end_time) - self.log_queue.append(data) + credentials = self._get_credentials_to_use_for_request(kwargs=kwargs) + data = self._prepare_log_data( + kwargs=kwargs, + response_obj=response_obj, + start_time=start_time, + end_time=end_time, + credentials=credentials, + ) + self.log_queue.append( + LangsmithQueueObject( + data=data, + credentials=credentials, + ) + ) verbose_logger.debug( "Langsmith logging: queue length %s, batch size %s", len(self.log_queue), @@ -349,8 +287,20 @@ class LangsmithLogger(CustomBatchLogger): return # Skip logging verbose_logger.info("Langsmith Failure Event Logging!") try: - data = self._prepare_log_data(kwargs, response_obj, start_time, end_time) - self.log_queue.append(data) + credentials = self._get_credentials_to_use_for_request(kwargs=kwargs) + data = self._prepare_log_data( + kwargs=kwargs, + response_obj=response_obj, + start_time=start_time, + end_time=end_time, + credentials=credentials, + ) + self.log_queue.append( + LangsmithQueueObject( + data=data, + credentials=credentials, + ) + ) verbose_logger.debug( "Langsmith logging: queue length %s, batch size %s", len(self.log_queue), @@ -365,31 +315,58 @@ class LangsmithLogger(CustomBatchLogger): async def async_send_batch(self): """ - sends runs to /batch endpoint + Handles sending batches of runs to Langsmith - Sends runs from self.log_queue + self.log_queue contains LangsmithQueueObjects + Each LangsmithQueueObject has the following: + - "credentials" - credentials to use for the request (langsmith_api_key, langsmith_project, langsmith_base_url) + - "data" - data to log on to langsmith for the request + + + This function + - groups the queue objects by credentials + - loops through each unique credentials and sends batches to Langsmith + + + This was added to support key/team based logging on langsmith + """ + if not self.log_queue: + return + + batch_groups = self._group_batches_by_credentials() + for batch_group in batch_groups.values(): + await self._log_batch_on_langsmith( + credentials=batch_group.credentials, + queue_objects=batch_group.queue_objects, + ) + + async def _log_batch_on_langsmith( + self, + credentials: LangsmithCredentialsObject, + queue_objects: List[LangsmithQueueObject], + ): + """ + Logs a batch of runs to Langsmith + sends runs to /batch endpoint for the given credentials + + Args: + credentials: LangsmithCredentialsObject + queue_objects: List[LangsmithQueueObject] Returns: None Raises: Does not raise an exception, will only verbose_logger.exception() """ - if not self.log_queue: - return - - langsmith_api_base = self.default_credentials["LANGSMITH_BASE_URL"] - + langsmith_api_base = credentials["LANGSMITH_BASE_URL"] + langsmith_api_key = credentials["LANGSMITH_API_KEY"] url = f"{langsmith_api_base}/runs/batch" - - langsmith_api_key = self.default_credentials["LANGSMITH_API_KEY"] - headers = {"x-api-key": langsmith_api_key} + elements_to_log = [queue_object["data"] for queue_object in queue_objects] try: response = await self.async_httpx_client.post( url=url, - json={ - "post": self.log_queue, - }, + json={"post": elements_to_log}, headers=headers, ) response.raise_for_status() @@ -411,6 +388,74 @@ class LangsmithLogger(CustomBatchLogger): f"Langsmith Layer Error - {traceback.format_exc()}" ) + def _group_batches_by_credentials(self) -> Dict[CredentialsKey, BatchGroup]: + """Groups queue objects by credentials using a proper key structure""" + log_queue_by_credentials: Dict[CredentialsKey, BatchGroup] = {} + + for queue_object in self.log_queue: + credentials = queue_object["credentials"] + key = CredentialsKey( + api_key=credentials["LANGSMITH_API_KEY"], + project=credentials["LANGSMITH_PROJECT"], + base_url=credentials["LANGSMITH_BASE_URL"], + ) + + if key not in log_queue_by_credentials: + log_queue_by_credentials[key] = BatchGroup( + credentials=credentials, queue_objects=[] + ) + + log_queue_by_credentials[key].queue_objects.append(queue_object) + + return log_queue_by_credentials + + def _get_credentials_to_use_for_request( + self, kwargs: Dict[str, Any] + ) -> LangsmithCredentialsObject: + """ + Handles key/team based logging + + If standard_callback_dynamic_params are provided, use those credentials. + + Otherwise, use the default credentials. + """ + standard_callback_dynamic_params: Optional[StandardCallbackDynamicParams] = ( + kwargs.get("standard_callback_dynamic_params", None) + ) + if standard_callback_dynamic_params is not None: + credentials = self.get_credentials_from_env( + langsmith_api_key=standard_callback_dynamic_params.get( + "langsmith_api_key", None + ), + langsmith_project=standard_callback_dynamic_params.get( + "langsmith_project", None + ), + langsmith_base_url=standard_callback_dynamic_params.get( + "langsmith_base_url", None + ), + ) + else: + credentials = self.default_credentials + return credentials + + def _send_batch(self): + """Calls async_send_batch in an event loop""" + if not self.log_queue: + return + + try: + # Try to get the existing event loop + loop = asyncio.get_event_loop() + if loop.is_running(): + # If we're already in an event loop, create a task + asyncio.create_task(self.async_send_batch()) + else: + # If no event loop is running, run the coroutine directly + loop.run_until_complete(self.async_send_batch()) + except RuntimeError: + # If we can't get an event loop, create a new one + asyncio.run(self.async_send_batch()) + def get_run_by_id(self, run_id): langsmith_api_key = self.default_credentials["LANGSMITH_API_KEY"] diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index b4a18baa4..29d14c910 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -6,5 +6,7 @@ model_list: api_base: https://exampleopenaiendpoint-production.up.railway.app/ + litellm_settings: callbacks: ["gcs_bucket"] + diff --git a/litellm/types/integrations/langsmith.py b/litellm/types/integrations/langsmith.py new file mode 100644 index 000000000..48c8e2e0a --- /dev/null +++ b/litellm/types/integrations/langsmith.py @@ -0,0 +1,61 @@ +from dataclasses import dataclass +from datetime import datetime +from typing import Any, Dict, List, NamedTuple, Optional, TypedDict + +from pydantic import BaseModel + + +class LangsmithInputs(BaseModel): + model: Optional[str] = None + messages: Optional[List[Any]] = None + stream: Optional[bool] = None + call_type: Optional[str] = None + litellm_call_id: Optional[str] = None + completion_start_time: Optional[datetime] = None + temperature: Optional[float] = None + max_tokens: Optional[int] = None + custom_llm_provider: Optional[str] = None + input: Optional[List[Any]] = None + log_event_type: Optional[str] = None + original_response: Optional[Any] = None + response_cost: Optional[float] = None + + # LiteLLM Virtual Key specific fields + user_api_key: Optional[str] = None + user_api_key_user_id: Optional[str] = None + user_api_key_team_alias: Optional[str] = None + + +class LangsmithCredentialsObject(TypedDict): + LANGSMITH_API_KEY: str + LANGSMITH_PROJECT: str + LANGSMITH_BASE_URL: str + + +class LangsmithQueueObject(TypedDict): + """ + Langsmith Queue Object - this is what gets stored in the internal system queue before flushing to Langsmith + + We need to store: + - data[Dict] - data that should get logged on langsmith + - credentials[LangsmithCredentialsObject] - credentials to use for logging to langsmith + """ + + data: Dict + credentials: LangsmithCredentialsObject + + +class CredentialsKey(NamedTuple): + """Immutable key for grouping credentials""" + + api_key: str + project: str + base_url: str + + +@dataclass +class BatchGroup: + """Groups credentials with their associated queue objects""" + + credentials: LangsmithCredentialsObject + queue_objects: List[LangsmithQueueObject] diff --git a/litellm/types/utils.py b/litellm/types/utils.py index a2b62f9cc..e3df357be 100644 --- a/litellm/types/utils.py +++ b/litellm/types/utils.py @@ -1595,3 +1595,8 @@ class StandardCallbackDynamicParams(TypedDict, total=False): # GCS dynamic params gcs_bucket_name: Optional[str] gcs_path_service_account: Optional[str] + + # Langsmith dynamic params + langsmith_api_key: Optional[str] + langsmith_project: Optional[str] + langsmith_base_url: Optional[str] diff --git a/tests/local_testing/test_langsmith.py b/tests/local_testing/test_langsmith.py index 6a98f244d..ab387e444 100644 --- a/tests/local_testing/test_langsmith.py +++ b/tests/local_testing/test_langsmith.py @@ -22,61 +22,6 @@ litellm.set_verbose = True import time -@pytest.mark.asyncio -async def test_langsmith_queue_logging(): - try: - # Initialize LangsmithLogger - test_langsmith_logger = LangsmithLogger() - - litellm.callbacks = [test_langsmith_logger] - test_langsmith_logger.batch_size = 6 - litellm.set_verbose = True - - # Make multiple calls to ensure we don't hit the batch size - for _ in range(5): - response = await litellm.acompletion( - model="gpt-3.5-turbo", - messages=[{"role": "user", "content": "Test message"}], - max_tokens=10, - temperature=0.2, - mock_response="This is a mock response", - ) - - await asyncio.sleep(3) - - # Check that logs are in the queue - assert len(test_langsmith_logger.log_queue) == 5 - - # Now make calls to exceed the batch size - for _ in range(3): - response = await litellm.acompletion( - model="gpt-3.5-turbo", - messages=[{"role": "user", "content": "Test message"}], - max_tokens=10, - temperature=0.2, - mock_response="This is a mock response", - ) - - # Wait a short time for any asynchronous operations to complete - await asyncio.sleep(1) - - print( - "Length of langsmith log queue: {}".format( - len(test_langsmith_logger.log_queue) - ) - ) - # Check that the queue was flushed after exceeding batch size - assert len(test_langsmith_logger.log_queue) < 5 - - # Clean up - for cb in litellm.callbacks: - if isinstance(cb, LangsmithLogger): - await cb.async_httpx_client.client.aclose() - - except Exception as e: - pytest.fail(f"Error occurred: {e}") - - # test_langsmith_logging() diff --git a/tests/logging_callback_tests/test_langsmith_unit_test.py b/tests/logging_callback_tests/test_langsmith_unit_test.py new file mode 100644 index 000000000..3e106666f --- /dev/null +++ b/tests/logging_callback_tests/test_langsmith_unit_test.py @@ -0,0 +1,394 @@ +import io +import os +import sys + + +sys.path.insert(0, os.path.abspath("../..")) + +import asyncio +import gzip +import json +import logging +import time +from unittest.mock import AsyncMock, patch, MagicMock +import pytest +from datetime import datetime, timezone +from litellm.integrations.langsmith import ( + LangsmithLogger, + LangsmithQueueObject, + CredentialsKey, + BatchGroup, +) + +import litellm + + +# Test get_credentials_from_env +@pytest.mark.asyncio +async def test_get_credentials_from_env(): + # Test with direct parameters + logger = LangsmithLogger( + langsmith_api_key="test-key", + langsmith_project="test-project", + langsmith_base_url="http://test-url", + ) + + credentials = logger.get_credentials_from_env( + langsmith_api_key="custom-key", + langsmith_project="custom-project", + langsmith_base_url="http://custom-url", + ) + + assert credentials["LANGSMITH_API_KEY"] == "custom-key" + assert credentials["LANGSMITH_PROJECT"] == "custom-project" + assert credentials["LANGSMITH_BASE_URL"] == "http://custom-url" + + # assert that the default api base is used if not provided + credentials = logger.get_credentials_from_env() + assert credentials["LANGSMITH_BASE_URL"] == "https://api.smith.langchain.com" + + +@pytest.mark.asyncio +async def test_group_batches_by_credentials(): + + logger = LangsmithLogger(langsmith_api_key="test-key") + + # Create test queue objects + queue_obj1 = LangsmithQueueObject( + data={"test": "data1"}, + credentials={ + "LANGSMITH_API_KEY": "key1", + "LANGSMITH_PROJECT": "proj1", + "LANGSMITH_BASE_URL": "url1", + }, + ) + + queue_obj2 = LangsmithQueueObject( + data={"test": "data2"}, + credentials={ + "LANGSMITH_API_KEY": "key1", + "LANGSMITH_PROJECT": "proj1", + "LANGSMITH_BASE_URL": "url1", + }, + ) + + logger.log_queue = [queue_obj1, queue_obj2] + + grouped = logger._group_batches_by_credentials() + + # Check grouping + assert len(grouped) == 1 # Should have one group since credentials are same + key = list(grouped.keys())[0] + assert isinstance(key, CredentialsKey) + assert len(grouped[key].queue_objects) == 2 + + +@pytest.mark.asyncio +async def test_group_batches_by_credentials_multiple_credentials(): + + # Test with multiple different credentials + logger = LangsmithLogger(langsmith_api_key="test-key") + + queue_obj1 = LangsmithQueueObject( + data={"test": "data1"}, + credentials={ + "LANGSMITH_API_KEY": "key1", + "LANGSMITH_PROJECT": "proj1", + "LANGSMITH_BASE_URL": "url1", + }, + ) + + queue_obj2 = LangsmithQueueObject( + data={"test": "data2"}, + credentials={ + "LANGSMITH_API_KEY": "key2", # Different API key + "LANGSMITH_PROJECT": "proj1", + "LANGSMITH_BASE_URL": "url1", + }, + ) + + queue_obj3 = LangsmithQueueObject( + data={"test": "data3"}, + credentials={ + "LANGSMITH_API_KEY": "key1", + "LANGSMITH_PROJECT": "proj2", # Different project + "LANGSMITH_BASE_URL": "url1", + }, + ) + + logger.log_queue = [queue_obj1, queue_obj2, queue_obj3] + + grouped = logger._group_batches_by_credentials() + + # Check grouping + assert len(grouped) == 3 # Should have three groups since credentials differ + for key, batch_group in grouped.items(): + assert isinstance(key, CredentialsKey) + assert len(batch_group.queue_objects) == 1 # Each group should have one object + + +# Test make_dot_order +@pytest.mark.asyncio +async def test_make_dot_order(): + logger = LangsmithLogger(langsmith_api_key="test-key") + run_id = "729cff0e-f30c-4336-8b79-45d6b61c64b4" + dot_order = logger.make_dot_order(run_id) + + print("dot_order=", dot_order) + + # Check format: YYYYMMDDTHHMMSSfffZ + run_id + # Check the timestamp portion (first 23 characters) + timestamp_part = dot_order[:-36] # 36 is length of run_id + assert len(timestamp_part) == 22 + assert timestamp_part[8] == "T" # Check T separator + assert timestamp_part[-1] == "Z" # Check Z suffix + + # Verify timestamp format + try: + # Parse the timestamp portion (removing the Z) + datetime.strptime(timestamp_part[:-1], "%Y%m%dT%H%M%S%f") + except ValueError: + pytest.fail("Timestamp portion is not in correct format") + + # Verify run_id portion + assert dot_order[-36:] == run_id + + +# Test is_serializable +@pytest.mark.asyncio +async def test_is_serializable(): + from litellm.integrations.langsmith import is_serializable + from pydantic import BaseModel + + # Test basic types + assert is_serializable("string") is True + assert is_serializable(123) is True + assert is_serializable({"key": "value"}) is True + + # Test non-serializable types + async def async_func(): + pass + + assert is_serializable(async_func) is False + + class TestModel(BaseModel): + field: str + + assert is_serializable(TestModel(field="test")) is False + + +@pytest.mark.asyncio +async def test_async_send_batch(): + logger = LangsmithLogger(langsmith_api_key="test-key") + + # Mock the httpx client + mock_response = AsyncMock() + mock_response.status_code = 200 + logger.async_httpx_client = AsyncMock() + logger.async_httpx_client.post.return_value = mock_response + + # Add test data to queue + logger.log_queue = [ + LangsmithQueueObject( + data={"test": "data"}, credentials=logger.default_credentials + ) + ] + + await logger.async_send_batch() + + # Verify the API call + logger.async_httpx_client.post.assert_called_once() + call_args = logger.async_httpx_client.post.call_args + assert "runs/batch" in call_args[1]["url"] + assert "x-api-key" in call_args[1]["headers"] + + +@pytest.mark.asyncio +async def test_langsmith_key_based_logging(mocker): + """ + In key based logging langsmith_api_key and langsmith_project are passed directly to litellm.acompletion + """ + try: + # Mock the httpx post request + mock_post = mocker.patch( + "litellm.llms.custom_httpx.http_handler.AsyncHTTPHandler.post" + ) + mock_post.return_value.status_code = 200 + mock_post.return_value.raise_for_status = lambda: None + litellm.set_verbose = True + + litellm.callbacks = [LangsmithLogger()] + response = await litellm.acompletion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Test message"}], + max_tokens=10, + temperature=0.2, + mock_response="This is a mock response", + langsmith_api_key="fake_key_project2", + langsmith_project="fake_project2", + ) + print("Waiting for logs to be flushed to Langsmith.....") + await asyncio.sleep(15) + + print("done sleeping 15 seconds...") + + # Verify the post request was made with correct parameters + mock_post.assert_called_once() + call_args = mock_post.call_args + + print("call_args", call_args) + + # Check URL contains /runs/batch + assert "/runs/batch" in call_args[1]["url"] + + # Check headers contain the correct API key + assert call_args[1]["headers"]["x-api-key"] == "fake_key_project2" + + # Verify the request body contains the expected data + request_body = call_args[1]["json"] + assert "post" in request_body + assert len(request_body["post"]) == 1 # Should contain one run + + # EXPECTED BODY + expected_body = { + "post": [ + { + "name": "LLMRun", + "run_type": "llm", + "inputs": { + "id": "chatcmpl-82699ee4-7932-4fc0-9585-76abc8caeafa", + "call_type": "acompletion", + "model": "gpt-3.5-turbo", + "messages": [{"role": "user", "content": "Test message"}], + "model_parameters": { + "temperature": 0.2, + "max_tokens": 10, + "extra_body": {}, + }, + }, + "outputs": { + "id": "chatcmpl-82699ee4-7932-4fc0-9585-76abc8caeafa", + "model": "gpt-3.5-turbo", + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "This is a mock response", + "role": "assistant", + "tool_calls": None, + "function_call": None, + }, + } + ], + "usage": { + "completion_tokens": 20, + "prompt_tokens": 10, + "total_tokens": 30, + }, + }, + "session_name": "fake_project2", + } + ] + } + + # Print both bodies for debugging + actual_body = call_args[1]["json"] + print("\nExpected body:") + print(json.dumps(expected_body, indent=2)) + print("\nActual body:") + print(json.dumps(actual_body, indent=2)) + + assert len(actual_body["post"]) == 1 + + # Assert only the critical parts we care about + assert actual_body["post"][0]["name"] == expected_body["post"][0]["name"] + assert ( + actual_body["post"][0]["run_type"] == expected_body["post"][0]["run_type"] + ) + assert ( + actual_body["post"][0]["inputs"]["messages"] + == expected_body["post"][0]["inputs"]["messages"] + ) + assert ( + actual_body["post"][0]["inputs"]["model_parameters"] + == expected_body["post"][0]["inputs"]["model_parameters"] + ) + assert ( + actual_body["post"][0]["outputs"]["choices"] + == expected_body["post"][0]["outputs"]["choices"] + ) + assert ( + actual_body["post"][0]["outputs"]["usage"]["completion_tokens"] + == expected_body["post"][0]["outputs"]["usage"]["completion_tokens"] + ) + assert ( + actual_body["post"][0]["outputs"]["usage"]["prompt_tokens"] + == expected_body["post"][0]["outputs"]["usage"]["prompt_tokens"] + ) + assert ( + actual_body["post"][0]["outputs"]["usage"]["total_tokens"] + == expected_body["post"][0]["outputs"]["usage"]["total_tokens"] + ) + assert ( + actual_body["post"][0]["session_name"] + == expected_body["post"][0]["session_name"] + ) + + except Exception as e: + pytest.fail(f"Error occurred: {e}") + + +@pytest.mark.asyncio +async def test_langsmith_queue_logging(): + try: + # Initialize LangsmithLogger + test_langsmith_logger = LangsmithLogger() + + litellm.callbacks = [test_langsmith_logger] + test_langsmith_logger.batch_size = 6 + litellm.set_verbose = True + + # Make multiple calls to ensure we don't hit the batch size + for _ in range(5): + response = await litellm.acompletion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Test message"}], + max_tokens=10, + temperature=0.2, + mock_response="This is a mock response", + ) + + await asyncio.sleep(3) + + # Check that logs are in the queue + assert len(test_langsmith_logger.log_queue) == 5 + + # Now make calls to exceed the batch size + for _ in range(3): + response = await litellm.acompletion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Test message"}], + max_tokens=10, + temperature=0.2, + mock_response="This is a mock response", + ) + + # Wait a short time for any asynchronous operations to complete + await asyncio.sleep(1) + + print( + "Length of langsmith log queue: {}".format( + len(test_langsmith_logger.log_queue) + ) + ) + # Check that the queue was flushed after exceeding batch size + assert len(test_langsmith_logger.log_queue) < 5 + + # Clean up + for cb in litellm.callbacks: + if isinstance(cb, LangsmithLogger): + await cb.async_httpx_client.client.aclose() + + except Exception as e: + pytest.fail(f"Error occurred: {e}") diff --git a/tests/proxy_unit_tests/test_proxy_server.py b/tests/proxy_unit_tests/test_proxy_server.py index 76cdf1a54..5588d0414 100644 --- a/tests/proxy_unit_tests/test_proxy_server.py +++ b/tests/proxy_unit_tests/test_proxy_server.py @@ -1632,6 +1632,139 @@ async def test_add_callback_via_key_litellm_pre_call_utils_gcs_bucket( assert new_data["failure_callback"] == expected_failure_callbacks +@pytest.mark.asyncio +@pytest.mark.parametrize( + "callback_type, expected_success_callbacks, expected_failure_callbacks", + [ + ("success", ["langsmith"], []), + ("failure", [], ["langsmith"]), + ("success_and_failure", ["langsmith"], ["langsmith"]), + ], +) +async def test_add_callback_via_key_litellm_pre_call_utils_langsmith( + prisma_client, callback_type, expected_success_callbacks, expected_failure_callbacks +): + import json + + from fastapi import HTTPException, Request, Response + from starlette.datastructures import URL + + from litellm.proxy.litellm_pre_call_utils import add_litellm_data_to_request + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + await litellm.proxy.proxy_server.prisma_client.connect() + + proxy_config = getattr(litellm.proxy.proxy_server, "proxy_config") + + request = Request(scope={"type": "http", "method": "POST", "headers": {}}) + request._url = URL(url="/chat/completions") + + test_data = { + "model": "azure/chatgpt-v-2", + "messages": [ + {"role": "user", "content": "write 1 sentence poem"}, + ], + "max_tokens": 10, + "mock_response": "Hello world", + "api_key": "my-fake-key", + } + + json_bytes = json.dumps(test_data).encode("utf-8") + + request._body = json_bytes + + data = { + "data": { + "model": "azure/chatgpt-v-2", + "messages": [{"role": "user", "content": "write 1 sentence poem"}], + "max_tokens": 10, + "mock_response": "Hello world", + "api_key": "my-fake-key", + }, + "request": request, + "user_api_key_dict": UserAPIKeyAuth( + token=None, + key_name=None, + key_alias=None, + spend=0.0, + max_budget=None, + expires=None, + models=[], + aliases={}, + config={}, + user_id=None, + team_id=None, + max_parallel_requests=None, + metadata={ + "logging": [ + { + "callback_name": "langsmith", + "callback_type": callback_type, + "callback_vars": { + "langsmith_api_key": "ls-1234", + "langsmith_project": "pr-brief-resemblance-72", + "langsmith_base_url": "https://api.smith.langchain.com", + }, + } + ] + }, + tpm_limit=None, + rpm_limit=None, + budget_duration=None, + budget_reset_at=None, + allowed_cache_controls=[], + permissions={}, + model_spend={}, + model_max_budget={}, + soft_budget_cooldown=False, + litellm_budget_table=None, + org_id=None, + team_spend=None, + team_alias=None, + team_tpm_limit=None, + team_rpm_limit=None, + team_max_budget=None, + team_models=[], + team_blocked=False, + soft_budget=None, + team_model_aliases=None, + team_member_spend=None, + team_metadata=None, + end_user_id=None, + end_user_tpm_limit=None, + end_user_rpm_limit=None, + end_user_max_budget=None, + last_refreshed_at=None, + api_key=None, + user_role=None, + allowed_model_region=None, + parent_otel_span=None, + ), + "proxy_config": proxy_config, + "general_settings": {}, + "version": "0.0.0", + } + + new_data = await add_litellm_data_to_request(**data) + print("NEW DATA: {}".format(new_data)) + + assert "langsmith_api_key" in new_data + assert new_data["langsmith_api_key"] == "ls-1234" + assert "langsmith_project" in new_data + assert new_data["langsmith_project"] == "pr-brief-resemblance-72" + assert "langsmith_base_url" in new_data + assert new_data["langsmith_base_url"] == "https://api.smith.langchain.com" + + if expected_success_callbacks: + assert "success_callback" in new_data + assert new_data["success_callback"] == expected_success_callbacks + + if expected_failure_callbacks: + assert "failure_callback" in new_data + assert new_data["failure_callback"] == expected_failure_callbacks + + @pytest.mark.asyncio async def test_gemini_pass_through_endpoint(): from starlette.datastructures import URL From 9d20c19e0c6cb3c346cb875d086e035b7a867903 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Mon, 11 Nov 2024 18:03:41 -0800 Subject: [PATCH 59/67] (fix) OpenAI's optional messages[].name does not work with Mistral API (#6701) * use helper for _transform_messages mistral * add test_message_with_name to base LLMChat test * fix linting --- .../mistral/mistral_chat_transformation.py | 57 +++++++++++++++++++ litellm/llms/prompt_templates/factory.py | 39 +------------ tests/llm_translation/base_llm_unit_tests.py | 8 +++ tests/llm_translation/test_mistral_api.py | 34 +++++++++++ 4 files changed, 100 insertions(+), 38 deletions(-) create mode 100644 tests/llm_translation/test_mistral_api.py diff --git a/litellm/llms/mistral/mistral_chat_transformation.py b/litellm/llms/mistral/mistral_chat_transformation.py index 5d1a54c3a..aeb1a90fd 100644 --- a/litellm/llms/mistral/mistral_chat_transformation.py +++ b/litellm/llms/mistral/mistral_chat_transformation.py @@ -10,6 +10,7 @@ import types from typing import List, Literal, Optional, Tuple, Union from litellm.secret_managers.main import get_secret_str +from litellm.types.llms.openai import AllMessageValues class MistralConfig: @@ -148,3 +149,59 @@ class MistralConfig: or get_secret_str("MISTRAL_API_KEY") ) return api_base, dynamic_api_key + + @classmethod + def _transform_messages(cls, messages: List[AllMessageValues]): + """ + - handles scenario where content is list and not string + - content list is just text, and no images + - if image passed in, then just return as is (user-intended) + - if `name` is passed, then drop it for mistral API: https://github.com/BerriAI/litellm/issues/6696 + + Motivation: mistral api doesn't support content as a list + """ + new_messages = [] + for m in messages: + special_keys = ["role", "content", "tool_calls", "function_call"] + extra_args = {} + if isinstance(m, dict): + for k, v in m.items(): + if k not in special_keys: + extra_args[k] = v + texts = "" + _content = m.get("content") + if _content is not None and isinstance(_content, list): + for c in _content: + _text: Optional[str] = c.get("text") + if c["type"] == "image_url": + return messages + elif c["type"] == "text" and isinstance(_text, str): + texts += _text + elif _content is not None and isinstance(_content, str): + texts = _content + + new_m = {"role": m["role"], "content": texts, **extra_args} + + if m.get("tool_calls"): + new_m["tool_calls"] = m.get("tool_calls") + + new_m = cls._handle_name_in_message(new_m) + + new_messages.append(new_m) + return new_messages + + @classmethod + def _handle_name_in_message(cls, message: dict) -> dict: + """ + Mistral API only supports `name` in tool messages + + If role == tool, then we keep `name` + Otherwise, we drop `name` + """ + if message.get("name") is not None: + if message["role"] == "tool": + message["name"] = message.get("name") + else: + message.pop("name", None) + + return message diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index 80ad2ca35..29028e053 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -259,43 +259,6 @@ def mistral_instruct_pt(messages): return prompt -def mistral_api_pt(messages): - """ - - handles scenario where content is list and not string - - content list is just text, and no images - - if image passed in, then just return as is (user-intended) - - Motivation: mistral api doesn't support content as a list - """ - new_messages = [] - for m in messages: - special_keys = ["role", "content", "tool_calls", "function_call"] - extra_args = {} - if isinstance(m, dict): - for k, v in m.items(): - if k not in special_keys: - extra_args[k] = v - texts = "" - if m.get("content", None) is not None and isinstance(m["content"], list): - for c in m["content"]: - if c["type"] == "image_url": - return messages - elif c["type"] == "text" and isinstance(c["text"], str): - texts += c["text"] - elif m.get("content", None) is not None and isinstance(m["content"], str): - texts = m["content"] - - new_m = {"role": m["role"], "content": texts, **extra_args} - - if new_m["role"] == "tool" and m.get("name"): - new_m["name"] = m["name"] - if m.get("tool_calls"): - new_m["tool_calls"] = m["tool_calls"] - - new_messages.append(new_m) - return new_messages - - # Falcon prompt template - from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py#L110 def falcon_instruct_pt(messages): prompt = "" @@ -2853,7 +2816,7 @@ def prompt_factory( else: return gemini_text_image_pt(messages=messages) elif custom_llm_provider == "mistral": - return mistral_api_pt(messages=messages) + return litellm.MistralConfig._transform_messages(messages=messages) elif custom_llm_provider == "bedrock": if "amazon.titan-text" in model: return amazon_titan_pt(messages=messages) diff --git a/tests/llm_translation/base_llm_unit_tests.py b/tests/llm_translation/base_llm_unit_tests.py index 96004eb4e..18ac7216f 100644 --- a/tests/llm_translation/base_llm_unit_tests.py +++ b/tests/llm_translation/base_llm_unit_tests.py @@ -45,6 +45,14 @@ class BaseLLMChatTest(ABC): ) assert response is not None + def test_message_with_name(self): + base_completion_call_args = self.get_base_completion_call_args() + messages = [ + {"role": "user", "content": "Hello", "name": "test_name"}, + ] + response = litellm.completion(**base_completion_call_args, messages=messages) + assert response is not None + @pytest.fixture def pdf_messages(self): import base64 diff --git a/tests/llm_translation/test_mistral_api.py b/tests/llm_translation/test_mistral_api.py new file mode 100644 index 000000000..b2cb36541 --- /dev/null +++ b/tests/llm_translation/test_mistral_api.py @@ -0,0 +1,34 @@ +import asyncio +import os +import sys +import traceback + +from dotenv import load_dotenv + +import litellm.types +import litellm.types.utils +from litellm.llms.anthropic.chat import ModelResponseIterator + +load_dotenv() +import io +import os + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +from typing import Optional +from unittest.mock import MagicMock, patch + +import pytest + +import litellm + +from litellm.llms.anthropic.common_utils import process_anthropic_headers +from httpx import Headers +from base_llm_unit_tests import BaseLLMChatTest + + +class TestMistralCompletion(BaseLLMChatTest): + def get_base_completion_call_args(self) -> dict: + litellm.set_verbose = True + return {"model": "mistral/mistral-small-latest"} From 4fd0c6c8f257c902b9f995976e7b5ba167f90997 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Mon, 11 Nov 2024 18:05:36 -0800 Subject: [PATCH 60/67] add xAI on Admin UI (#6680) --- ui/litellm-dashboard/src/components/model_dashboard.tsx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ui/litellm-dashboard/src/components/model_dashboard.tsx b/ui/litellm-dashboard/src/components/model_dashboard.tsx index b09df5d7c..cd915a9be 100644 --- a/ui/litellm-dashboard/src/components/model_dashboard.tsx +++ b/ui/litellm-dashboard/src/components/model_dashboard.tsx @@ -151,6 +151,7 @@ enum Providers { Cohere = "Cohere", Databricks = "Databricks", Ollama = "Ollama", + xAI = "xAI", } const provider_map: Record = { @@ -166,6 +167,7 @@ const provider_map: Record = { OpenAI_Compatible: "openai", Vertex_AI: "vertex_ai", Databricks: "databricks", + xAI: "xai", Deepseek: "deepseek", Ollama: "ollama", From e5051a93a86dcfc3ff10bbe734611c294ec66fa3 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Mon, 11 Nov 2024 19:25:53 -0800 Subject: [PATCH 61/67] (docs) add benchmarks on 1K RPS (#6704) * docs litellm proxy benchmarks * docs GCS bucket * doc fix - reduce clutter on logging doc title --- docs/my-website/docs/benchmarks.md | 41 +++++++++++++++++++++++++++ docs/my-website/docs/proxy/bucket.md | 12 ++++++-- docs/my-website/docs/proxy/logging.md | 20 ++++++------- docs/my-website/sidebars.js | 1 + 4 files changed, 62 insertions(+), 12 deletions(-) create mode 100644 docs/my-website/docs/benchmarks.md diff --git a/docs/my-website/docs/benchmarks.md b/docs/my-website/docs/benchmarks.md new file mode 100644 index 000000000..86699008b --- /dev/null +++ b/docs/my-website/docs/benchmarks.md @@ -0,0 +1,41 @@ +# Benchmarks + +Benchmarks for LiteLLM Gateway (Proxy Server) + +Locust Settings: +- 2500 Users +- 100 user Ramp Up + + +## Basic Benchmarks + +Overhead when using a Deployed Proxy vs Direct to LLM +- Latency overhead added by LiteLLM Proxy: 107ms + +| Metric | Direct to Fake Endpoint | Basic Litellm Proxy | +|--------|------------------------|---------------------| +| RPS | 1196 | 1133.2 | +| Median Latency (ms) | 33 | 140 | + + +## Logging Callbacks + +### [GCS Bucket Logging](https://docs.litellm.ai/docs/proxy/bucket) + +Using GCS Bucket has **no impact on latency, RPS compared to Basic Litellm Proxy** + +| Metric | Basic Litellm Proxy | LiteLLM Proxy with GCS Bucket Logging | +|--------|------------------------|---------------------| +| RPS | 1133.2 | 1137.3 | +| Median Latency (ms) | 140 | 138 | + + +### [LangSmith logging](https://docs.litellm.ai/docs/proxy/logging) + +Using LangSmith has **no impact on latency, RPS compared to Basic Litellm Proxy** + +| Metric | Basic Litellm Proxy | LiteLLM Proxy with LangSmith | +|--------|------------------------|---------------------| +| RPS | 1133.2 | 1135 | +| Median Latency (ms) | 140 | 132 | + diff --git a/docs/my-website/docs/proxy/bucket.md b/docs/my-website/docs/proxy/bucket.md index 3422d0371..d1b9e6076 100644 --- a/docs/my-website/docs/proxy/bucket.md +++ b/docs/my-website/docs/proxy/bucket.md @@ -9,7 +9,7 @@ LiteLLM Supports Logging to the following Cloud Buckets - (Enterprise) ✨ [Google Cloud Storage Buckets](#logging-proxy-inputoutput-to-google-cloud-storage-buckets) - (Free OSS) [Amazon s3 Buckets](#logging-proxy-inputoutput---s3-buckets) -## Logging Proxy Input/Output to Google Cloud Storage Buckets +## Google Cloud Storage Buckets Log LLM Logs to [Google Cloud Storage Buckets](https://cloud.google.com/storage?hl=en) @@ -20,6 +20,14 @@ Log LLM Logs to [Google Cloud Storage Buckets](https://cloud.google.com/storage? ::: +| Property | Details | +|----------|---------| +| Description | Log LLM Input/Output to cloud storage buckets | +| Load Test Benchmarks | [Benchmarks](https://docs.litellm.ai/docs/benchmarks) | +| Google Docs on Cloud Storage | [Google Cloud Storage](https://cloud.google.com/storage?hl=en) | + + + ### Usage 1. Add `gcs_bucket` to LiteLLM Config.yaml @@ -85,7 +93,7 @@ curl --location 'http://0.0.0.0:4000/chat/completions' \ 6. Save the JSON file and add the path to `GCS_PATH_SERVICE_ACCOUNT` -## Logging Proxy Input/Output - s3 Buckets +## s3 Buckets We will use the `--config` to set diff --git a/docs/my-website/docs/proxy/logging.md b/docs/my-website/docs/proxy/logging.md index 94faa7734..5867a8f23 100644 --- a/docs/my-website/docs/proxy/logging.md +++ b/docs/my-website/docs/proxy/logging.md @@ -107,7 +107,7 @@ class StandardLoggingModelInformation(TypedDict): model_map_value: Optional[ModelInfo] ``` -## Logging Proxy Input/Output - Langfuse +## Langfuse We will use the `--config` to set `litellm.success_callback = ["langfuse"]` this will log all successfull LLM calls to langfuse. Make sure to set `LANGFUSE_PUBLIC_KEY` and `LANGFUSE_SECRET_KEY` in your environment @@ -463,7 +463,7 @@ You will see `raw_request` in your Langfuse Metadata. This is the RAW CURL comma -## Logging Proxy Input/Output in OpenTelemetry format +## OpenTelemetry format :::info @@ -1216,7 +1216,7 @@ litellm_settings: Start the LiteLLM Proxy and make a test request to verify the logs reached your callback API -## Logging LLM IO to Langsmith +## Langsmith 1. Set `success_callback: ["langsmith"]` on litellm config.yaml @@ -1261,7 +1261,7 @@ Expect to see your log on Langfuse -## Logging LLM IO to Arize AI +## Arize AI 1. Set `success_callback: ["arize"]` on litellm config.yaml @@ -1309,7 +1309,7 @@ Expect to see your log on Langfuse -## Logging LLM IO to Langtrace +## Langtrace 1. Set `success_callback: ["langtrace"]` on litellm config.yaml @@ -1351,7 +1351,7 @@ curl --location 'http://0.0.0.0:4000/chat/completions' \ ' ``` -## Logging LLM IO to Galileo +## Galileo [BETA] @@ -1466,7 +1466,7 @@ curl --location 'http://0.0.0.0:4000/chat/completions' \ -## Logging Proxy Input/Output - DataDog +## DataDog LiteLLM Supports logging to the following Datdog Integrations: - `datadog` [Datadog Logs](https://docs.datadoghq.com/logs/) @@ -1543,7 +1543,7 @@ Expected output on Datadog -## Logging Proxy Input/Output - DynamoDB +## DynamoDB We will use the `--config` to set @@ -1669,7 +1669,7 @@ Your logs should be available on DynamoDB } ``` -## Logging Proxy Input/Output - Sentry +## Sentry If api calls fail (llm/database) you can log those to Sentry: @@ -1711,7 +1711,7 @@ Test Request litellm --test ``` -## Logging Proxy Input/Output Athina +## Athina [Athina](https://athina.ai/) allows you to log LLM Input/Output for monitoring, analytics, and observability. diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index 18ad940f8..1dc33f554 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -266,6 +266,7 @@ const sidebars = { type: "category", label: "Load Testing", items: [ + "benchmarks", "load_test", "load_test_advanced", "load_test_sdk", From 25bae4cc237a02c8d46d189898ef631c0935a5a6 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Mon, 11 Nov 2024 20:21:44 -0800 Subject: [PATCH 62/67] (feat) add cost tracking stable diffusion 3 on Bedrock (#6676) * add cost tracking for sd3 * test_image_generation_bedrock * fix get model info for image cost * add cost_calculator for stability 1 models * add unit testing for bedrock image cost calc * test_cost_calculator_with_no_optional_params * add test_cost_calculator_basic * correctly allow size Optional * fix cost_calculator * sd3 unit tests cost calc --- litellm/cost_calculator.py | 25 ++++-- litellm/llms/bedrock/image/cost_calculator.py | 41 +++++++++ litellm/utils.py | 1 + .../test_bedrock_image_gen_unit_tests.py | 84 ++++++++++++++++++- .../image_gen_tests/test_image_generation.py | 3 + 5 files changed, 146 insertions(+), 8 deletions(-) create mode 100644 litellm/llms/bedrock/image/cost_calculator.py diff --git a/litellm/cost_calculator.py b/litellm/cost_calculator.py index 0be7f1d38..2aff3b04c 100644 --- a/litellm/cost_calculator.py +++ b/litellm/cost_calculator.py @@ -28,6 +28,9 @@ from litellm.llms.azure_ai.cost_calculator import ( from litellm.llms.AzureOpenAI.cost_calculation import ( cost_per_token as azure_openai_cost_per_token, ) +from litellm.llms.bedrock.image.cost_calculator import ( + cost_calculator as bedrock_image_cost_calculator, +) from litellm.llms.cohere.cost_calculator import ( cost_per_query as cohere_rerank_cost_per_query, ) @@ -521,12 +524,13 @@ def completion_cost( # noqa: PLR0915 custom_llm_provider=None, region_name=None, # used for bedrock pricing ### IMAGE GEN ### - size=None, + size: Optional[str] = None, quality=None, n=None, # number of images ### CUSTOM PRICING ### custom_cost_per_token: Optional[CostPerToken] = None, custom_cost_per_second: Optional[float] = None, + optional_params: Optional[dict] = None, ) -> float: """ Calculate the cost of a given completion call fot GPT-3.5-turbo, llama2, any litellm supported llm. @@ -667,7 +671,17 @@ def completion_cost( # noqa: PLR0915 # https://cloud.google.com/vertex-ai/generative-ai/pricing # Vertex Charges Flat $0.20 per image return 0.020 - + elif custom_llm_provider == "bedrock": + if isinstance(completion_response, ImageResponse): + return bedrock_image_cost_calculator( + model=model, + size=size, + image_response=completion_response, + optional_params=optional_params, + ) + raise TypeError( + "completion_response must be of type ImageResponse for bedrock image cost calculation" + ) if size is None: size = "1024-x-1024" # openai default # fix size to match naming convention @@ -677,9 +691,9 @@ def completion_cost( # noqa: PLR0915 image_gen_model_name_with_quality = image_gen_model_name if quality is not None: image_gen_model_name_with_quality = f"{quality}/{image_gen_model_name}" - size = size.split("-x-") - height = int(size[0]) # if it's 1024-x-1024 vs. 1024x1024 - width = int(size[1]) + size_parts = size.split("-x-") + height = int(size_parts[0]) # if it's 1024-x-1024 vs. 1024x1024 + width = int(size_parts[1]) verbose_logger.debug(f"image_gen_model_name: {image_gen_model_name}") verbose_logger.debug( f"image_gen_model_name_with_quality: {image_gen_model_name_with_quality}" @@ -844,6 +858,7 @@ def response_cost_calculator( model=model, call_type=call_type, custom_llm_provider=custom_llm_provider, + optional_params=optional_params, ) else: if custom_pricing is True: # override defaults if custom pricing is set diff --git a/litellm/llms/bedrock/image/cost_calculator.py b/litellm/llms/bedrock/image/cost_calculator.py new file mode 100644 index 000000000..0a20b44cb --- /dev/null +++ b/litellm/llms/bedrock/image/cost_calculator.py @@ -0,0 +1,41 @@ +from typing import Optional + +import litellm +from litellm.types.utils import ImageResponse + + +def cost_calculator( + model: str, + image_response: ImageResponse, + size: Optional[str] = None, + optional_params: Optional[dict] = None, +) -> float: + """ + Bedrock image generation cost calculator + + Handles both Stability 1 and Stability 3 models + """ + if litellm.AmazonStability3Config()._is_stability_3_model(model=model): + pass + else: + # Stability 1 models + optional_params = optional_params or {} + + # see model_prices_and_context_window.json for details on how steps is used + # Reference pricing by steps for stability 1: https://aws.amazon.com/bedrock/pricing/ + _steps = optional_params.get("steps", 50) + steps = "max-steps" if _steps > 50 else "50-steps" + + # size is stored in model_prices_and_context_window.json as 1024-x-1024 + # current size has 1024x1024 + size = size or "1024-x-1024" + model = f"{size}/{steps}/{model}" + + _model_info = litellm.get_model_info( + model=model, + custom_llm_provider="bedrock", + ) + + output_cost_per_image: float = _model_info.get("output_cost_per_image") or 0.0 + num_images: int = len(image_response.data) + return output_cost_per_image * num_images diff --git a/litellm/utils.py b/litellm/utils.py index b10c94859..1e8025be4 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -4636,6 +4636,7 @@ def get_model_info( # noqa: PLR0915 "output_cost_per_character_above_128k_tokens", None ), output_cost_per_second=_model_info.get("output_cost_per_second", None), + output_cost_per_image=_model_info.get("output_cost_per_image", None), output_vector_size=_model_info.get("output_vector_size", None), litellm_provider=_model_info.get( "litellm_provider", custom_llm_provider diff --git a/tests/image_gen_tests/test_bedrock_image_gen_unit_tests.py b/tests/image_gen_tests/test_bedrock_image_gen_unit_tests.py index e04eb2a1a..10845a895 100644 --- a/tests/image_gen_tests/test_bedrock_image_gen_unit_tests.py +++ b/tests/image_gen_tests/test_bedrock_image_gen_unit_tests.py @@ -9,12 +9,14 @@ from openai.types.image import Image logging.basicConfig(level=logging.DEBUG) load_dotenv() import asyncio -import os sys.path.insert( 0, os.path.abspath("../..") ) # Adds the parent directory to the system path import pytest +from litellm.llms.bedrock.image.cost_calculator import cost_calculator +from litellm.types.utils import ImageResponse, ImageObject +import os import litellm from litellm.llms.bedrock.image.amazon_stability3_transformation import ( @@ -27,7 +29,6 @@ from litellm.types.llms.bedrock import ( AmazonStability3TextToImageRequest, AmazonStability3TextToImageResponse, ) -from litellm.types.utils import ImageResponse from unittest.mock import MagicMock, patch from litellm.llms.bedrock.image.image_handler import ( BedrockImageGeneration, @@ -149,7 +150,7 @@ def test_get_request_body_stability(): handler = BedrockImageGeneration() prompt = "A beautiful sunset" optional_params = {"cfg_scale": 7} - model = "stability.stable-diffusion-xl" + model = "stability.stable-diffusion-xl-v1" result = handler._get_request_body( model=model, prompt=prompt, optional_params=optional_params @@ -185,3 +186,80 @@ def test_transform_response_dict_to_openai_response_stability3(): assert len(result.data) == 2 assert all(hasattr(img, "b64_json") for img in result.data) assert [img.b64_json for img in result.data] == ["base64_image_1", "base64_image_2"] + + +def test_cost_calculator_stability3(): + # Mock image response + image_response = ImageResponse( + data=[ + ImageObject(b64_json="base64_image_1"), + ImageObject(b64_json="base64_image_2"), + ] + ) + + cost = cost_calculator( + model="stability.sd3-large-v1:0", + size="1024-x-1024", + image_response=image_response, + ) + + print("cost", cost) + + # Assert cost is calculated correctly for 2 images + assert isinstance(cost, float) + assert cost > 0 + + +def test_cost_calculator_stability1(): + # Mock image response + image_response = ImageResponse(data=[ImageObject(b64_json="base64_image_1")]) + + # Test with different step configurations + cost_default_steps = cost_calculator( + model="stability.stable-diffusion-xl-v1", + size="1024-x-1024", + image_response=image_response, + optional_params={"steps": 50}, + ) + + cost_max_steps = cost_calculator( + model="stability.stable-diffusion-xl-v1", + size="1024-x-1024", + image_response=image_response, + optional_params={"steps": 51}, + ) + + # Assert costs are calculated correctly + assert isinstance(cost_default_steps, float) + assert isinstance(cost_max_steps, float) + assert cost_default_steps > 0 + assert cost_max_steps > 0 + # Max steps should be more expensive + assert cost_max_steps > cost_default_steps + + +def test_cost_calculator_with_no_optional_params(): + image_response = ImageResponse(data=[ImageObject(b64_json="base64_image_1")]) + + cost = cost_calculator( + model="stability.stable-diffusion-xl-v0", + size="512-x-512", + image_response=image_response, + optional_params=None, + ) + + assert isinstance(cost, float) + assert cost > 0 + + +def test_cost_calculator_basic(): + image_response = ImageResponse(data=[ImageObject(b64_json="base64_image_1")]) + + cost = cost_calculator( + model="stability.stable-diffusion-xl-v1", + image_response=image_response, + optional_params=None, + ) + + assert isinstance(cost, float) + assert cost > 0 diff --git a/tests/image_gen_tests/test_image_generation.py b/tests/image_gen_tests/test_image_generation.py index cf46f90bb..e94d62c1f 100644 --- a/tests/image_gen_tests/test_image_generation.py +++ b/tests/image_gen_tests/test_image_generation.py @@ -253,6 +253,9 @@ def test_image_generation_bedrock(model): ) print(f"response: {response}") + print("response hidden params", response._hidden_params) + + assert response._hidden_params["response_cost"] is not None from openai.types.images_response import ImagesResponse ImagesResponse.model_validate(response.model_dump()) From de2f9aed3a1d15309a5ef9d711a7c5b93fdc0ce6 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Mon, 11 Nov 2024 21:00:39 -0800 Subject: [PATCH 63/67] fix raise correct error 404 when /key/info is called on non-existent key (#6653) * fix raise correct error on /key/info * add not_found_error error * fix key not found in DB error * use 1 helper for checking token hash * fix error code on key info * fix test key gen prisma * test_generate_and_call_key_info * test fix test_call_with_valid_model_using_all_models * fix key info tests --- litellm/proxy/_types.py | 1 + litellm/proxy/auth/route_checks.py | 10 +- .../key_management_endpoints.py | 57 +- litellm/proxy/utils.py | 26 +- .../local_testing/test_key_generate_prisma.py | 3469 +++++++++++++++++ .../test_route_check_unit_tests.py | 17 - .../test_key_generate_prisma.py | 61 +- tests/test_keys.py | 9 +- 8 files changed, 3593 insertions(+), 57 deletions(-) create mode 100644 tests/local_testing/test_key_generate_prisma.py diff --git a/litellm/proxy/_types.py b/litellm/proxy/_types.py index fd9ef8556..2d869af85 100644 --- a/litellm/proxy/_types.py +++ b/litellm/proxy/_types.py @@ -1894,6 +1894,7 @@ class ProxyErrorTypes(str, enum.Enum): auth_error = "auth_error" internal_server_error = "internal_server_error" bad_request_error = "bad_request_error" + not_found_error = "not_found_error" class SSOUserDefinedValues(TypedDict): diff --git a/litellm/proxy/auth/route_checks.py b/litellm/proxy/auth/route_checks.py index a237b0bdd..1b593162c 100644 --- a/litellm/proxy/auth/route_checks.py +++ b/litellm/proxy/auth/route_checks.py @@ -44,14 +44,8 @@ class RouteChecks: route in LiteLLMRoutes.info_routes.value ): # check if user allowed to call an info route if route == "/key/info": - # check if user can access this route - query_params = request.query_params - key = query_params.get("key") - if key is not None and hash_token(token=key) != api_key: - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail="user not allowed to access this key's info", - ) + # handled by function itself + pass elif route == "/user/info": # check if user can access this route query_params = request.query_params diff --git a/litellm/proxy/management_endpoints/key_management_endpoints.py b/litellm/proxy/management_endpoints/key_management_endpoints.py index 2c240a17f..c50aa0f9f 100644 --- a/litellm/proxy/management_endpoints/key_management_endpoints.py +++ b/litellm/proxy/management_endpoints/key_management_endpoints.py @@ -32,7 +32,7 @@ from litellm.proxy.auth.auth_checks import ( ) from litellm.proxy.auth.user_api_key_auth import user_api_key_auth from litellm.proxy.management_helpers.utils import management_endpoint_wrapper -from litellm.proxy.utils import _duration_in_seconds +from litellm.proxy.utils import _duration_in_seconds, _hash_token_if_needed from litellm.secret_managers.main import get_secret router = APIRouter() @@ -734,13 +734,37 @@ async def info_key_fn( raise Exception( "Database not connected. Connect a database to your proxy - https://docs.litellm.ai/docs/simple_proxy#managing-auth---virtual-keys" ) - if key is None: - key = user_api_key_dict.api_key - key_info = await prisma_client.get_data(token=key) + + # default to using Auth token if no key is passed in + key = key or user_api_key_dict.api_key + hashed_key: Optional[str] = key + if key is not None: + hashed_key = _hash_token_if_needed(token=key) + key_info = await prisma_client.db.litellm_verificationtoken.find_unique( + where={"token": hashed_key}, # type: ignore + include={"litellm_budget_table": True}, + ) if key_info is None: + raise ProxyException( + message="Key not found in database", + type=ProxyErrorTypes.not_found_error, + param="key", + code=status.HTTP_404_NOT_FOUND, + ) + + if ( + _can_user_query_key_info( + user_api_key_dict=user_api_key_dict, + key=key, + key_info=key_info, + ) + is not True + ): raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail={"message": "No keys found"}, + status_code=status.HTTP_403_FORBIDDEN, + detail="You are not allowed to access this key's info. Your role={}".format( + user_api_key_dict.user_role + ), ) ## REMOVE HASHED TOKEN INFO BEFORE RETURNING ## try: @@ -1540,6 +1564,27 @@ async def key_health( ) +def _can_user_query_key_info( + user_api_key_dict: UserAPIKeyAuth, + key: Optional[str], + key_info: LiteLLM_VerificationToken, +) -> bool: + """ + Helper to check if the user has access to the key's info + """ + if ( + user_api_key_dict.user_role == LitellmUserRoles.PROXY_ADMIN.value + or user_api_key_dict.user_role == LitellmUserRoles.PROXY_ADMIN_VIEW_ONLY.value + ): + return True + elif user_api_key_dict.api_key == key: + return True + # user can query their own key info + elif key_info.user_id == user_api_key_dict.user_id: + return True + return False + + async def test_key_logging( user_api_key_dict: UserAPIKeyAuth, request: Request, diff --git a/litellm/proxy/utils.py b/litellm/proxy/utils.py index 9d33244a0..009d65873 100644 --- a/litellm/proxy/utils.py +++ b/litellm/proxy/utils.py @@ -1424,9 +1424,7 @@ class PrismaClient: # check if plain text or hash if token is not None: if isinstance(token, str): - hashed_token = token - if token.startswith("sk-"): - hashed_token = self.hash_token(token=token) + hashed_token = _hash_token_if_needed(token=token) verbose_proxy_logger.debug( f"PrismaClient: find_unique for token: {hashed_token}" ) @@ -1493,8 +1491,7 @@ class PrismaClient: if token is not None: where_filter["token"] = {} if isinstance(token, str): - if token.startswith("sk-"): - token = self.hash_token(token=token) + token = _hash_token_if_needed(token=token) where_filter["token"]["in"] = [token] elif isinstance(token, list): hashed_tokens = [] @@ -1630,9 +1627,7 @@ class PrismaClient: # check if plain text or hash if token is not None: if isinstance(token, str): - hashed_token = token - if token.startswith("sk-"): - hashed_token = self.hash_token(token=token) + hashed_token = _hash_token_if_needed(token=token) verbose_proxy_logger.debug( f"PrismaClient: find_unique for token: {hashed_token}" ) @@ -1912,8 +1907,7 @@ class PrismaClient: if token is not None: print_verbose(f"token: {token}") # check if plain text or hash - if token.startswith("sk-"): - token = self.hash_token(token=token) + token = _hash_token_if_needed(token=token) db_data["token"] = token response = await self.db.litellm_verificationtoken.update( where={"token": token}, # type: ignore @@ -2424,6 +2418,18 @@ def hash_token(token: str): return hashed_token +def _hash_token_if_needed(token: str) -> str: + """ + Hash the token if it's a string and starts with "sk-" + + Else return the token as is + """ + if token.startswith("sk-"): + return hash_token(token=token) + else: + return token + + def _extract_from_regex(duration: str) -> Tuple[int, str]: match = re.match(r"(\d+)(mo|[smhd]?)", duration) diff --git a/tests/local_testing/test_key_generate_prisma.py b/tests/local_testing/test_key_generate_prisma.py new file mode 100644 index 000000000..a1e136313 --- /dev/null +++ b/tests/local_testing/test_key_generate_prisma.py @@ -0,0 +1,3469 @@ +# Test the following scenarios: +# 1. Generate a Key, and use it to make a call +# 2. Make a call with invalid key, expect it to fail +# 3. Make a call to a key with invalid model - expect to fail +# 4. Make a call to a key with valid model - expect to pass +# 5. Make a call with user over budget, expect to fail +# 6. Make a streaming chat/completions call with user over budget, expect to fail +# 7. Make a call with an key that never expires, expect to pass +# 8. Make a call with an expired key, expect to fail +# 9. Delete a Key +# 10. Generate a key, call key/info. Assert info returned is the same as generated key info +# 11. Generate a Key, cal key/info, call key/update, call key/info +# 12. Make a call with key over budget, expect to fail +# 14. Make a streaming chat/completions call with key over budget, expect to fail +# 15. Generate key, when `allow_user_auth`=False - check if `/key/info` returns key_name=null +# 16. Generate key, when `allow_user_auth`=True - check if `/key/info` returns key_name=sk... + + +# function to call to generate key - async def new_user(data: NewUserRequest): +# function to validate a request - async def user_auth(request: Request): + +import os +import sys +import traceback +import uuid +from datetime import datetime + +from dotenv import load_dotenv +from fastapi import Request +from fastapi.routing import APIRoute +import httpx + +load_dotenv() +import io +import os +import time + +# this file is to test litellm/proxy + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import asyncio +import logging + +import pytest + +import litellm +from litellm._logging import verbose_proxy_logger +from litellm.proxy.management_endpoints.internal_user_endpoints import ( + new_user, + user_info, + user_update, +) +from litellm.proxy.auth.auth_checks import get_key_object +from litellm.proxy.management_endpoints.key_management_endpoints import ( + delete_key_fn, + generate_key_fn, + generate_key_helper_fn, + info_key_fn, + list_keys, + regenerate_key_fn, + update_key_fn, +) +from litellm.proxy.management_endpoints.team_endpoints import ( + new_team, + team_info, + update_team, +) +from litellm.proxy.proxy_server import ( + LitellmUserRoles, + audio_transcriptions, + chat_completion, + completion, + embeddings, + image_generation, + model_list, + moderations, + new_end_user, + user_api_key_auth, +) +from litellm.proxy.spend_tracking.spend_management_endpoints import ( + global_spend, + spend_key_fn, + spend_user_fn, + view_spend_logs, +) +from litellm.proxy.utils import PrismaClient, ProxyLogging, hash_token, update_spend + +verbose_proxy_logger.setLevel(level=logging.DEBUG) + +from starlette.datastructures import URL + +from litellm.caching.caching import DualCache +from litellm.proxy._types import ( + DynamoDBArgs, + GenerateKeyRequest, + KeyRequest, + LiteLLM_UpperboundKeyGenerateParams, + NewCustomerRequest, + NewTeamRequest, + NewUserRequest, + ProxyErrorTypes, + ProxyException, + UpdateKeyRequest, + UpdateTeamRequest, + UpdateUserRequest, + UserAPIKeyAuth, +) + +proxy_logging_obj = ProxyLogging(user_api_key_cache=DualCache()) + + +request_data = { + "model": "azure-gpt-3.5", + "messages": [ + {"role": "user", "content": "this is my new test. respond in 50 lines"} + ], +} + + +@pytest.fixture +def prisma_client(): + from litellm.proxy.proxy_cli import append_query_params + + ### add connection pool + pool timeout args + params = {"connection_limit": 100, "pool_timeout": 60} + database_url = os.getenv("DATABASE_URL") + modified_url = append_query_params(database_url, params) + os.environ["DATABASE_URL"] = modified_url + + # Assuming PrismaClient is a class that needs to be instantiated + prisma_client = PrismaClient( + database_url=os.environ["DATABASE_URL"], proxy_logging_obj=proxy_logging_obj + ) + + # Reset litellm.proxy.proxy_server.prisma_client to None + litellm.proxy.proxy_server.litellm_proxy_budget_name = ( + f"litellm-proxy-budget-{time.time()}" + ) + litellm.proxy.proxy_server.user_custom_key_generate = None + + return prisma_client + + +@pytest.mark.asyncio() +@pytest.mark.flaky(retries=6, delay=1) +async def test_new_user_response(prisma_client): + try: + + print("prisma client=", prisma_client) + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + + await litellm.proxy.proxy_server.prisma_client.connect() + from litellm.proxy.proxy_server import user_api_key_cache + + _team_id = "ishaan-special-team_{}".format(uuid.uuid4()) + await new_team( + NewTeamRequest( + team_id=_team_id, + ), + http_request=Request(scope={"type": "http"}), + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + + _response = await new_user( + data=NewUserRequest( + models=["azure-gpt-3.5"], + team_id=_team_id, + tpm_limit=20, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + ) + print(_response) + assert _response.models == ["azure-gpt-3.5"] + assert _response.team_id == _team_id + assert _response.tpm_limit == 20 + + except Exception as e: + print("Got Exception", e) + pytest.fail(f"Got exception {e}") + + +@pytest.mark.parametrize( + "api_route", + [ + # chat_completion + APIRoute(path="/engines/{model}/chat/completions", endpoint=chat_completion), + APIRoute( + path="/openai/deployments/{model}/chat/completions", + endpoint=chat_completion, + ), + APIRoute(path="/chat/completions", endpoint=chat_completion), + APIRoute(path="/v1/chat/completions", endpoint=chat_completion), + # completion + APIRoute(path="/completions", endpoint=completion), + APIRoute(path="/v1/completions", endpoint=completion), + APIRoute(path="/engines/{model}/completions", endpoint=completion), + APIRoute(path="/openai/deployments/{model}/completions", endpoint=completion), + # embeddings + APIRoute(path="/v1/embeddings", endpoint=embeddings), + APIRoute(path="/embeddings", endpoint=embeddings), + APIRoute(path="/openai/deployments/{model}/embeddings", endpoint=embeddings), + # image generation + APIRoute(path="/v1/images/generations", endpoint=image_generation), + APIRoute(path="/images/generations", endpoint=image_generation), + # audio transcriptions + APIRoute(path="/v1/audio/transcriptions", endpoint=audio_transcriptions), + APIRoute(path="/audio/transcriptions", endpoint=audio_transcriptions), + # moderations + APIRoute(path="/v1/moderations", endpoint=moderations), + APIRoute(path="/moderations", endpoint=moderations), + # model_list + APIRoute(path="/v1/models", endpoint=model_list), + APIRoute(path="/models", endpoint=model_list), + # threads + APIRoute( + path="/v1/threads/thread_49EIN5QF32s4mH20M7GFKdlZ", endpoint=model_list + ), + ], + ids=lambda route: str(dict(route=route.endpoint.__name__, path=route.path)), +) +def test_generate_and_call_with_valid_key(prisma_client, api_route): + # 1. Generate a Key, and use it to make a call + + print("prisma client=", prisma_client) + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + try: + + async def test(): + await litellm.proxy.proxy_server.prisma_client.connect() + from litellm.proxy.proxy_server import user_api_key_cache + + user_api_key_dict = UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ) + request = NewUserRequest(user_role=LitellmUserRoles.INTERNAL_USER) + key = await new_user(request, user_api_key_dict=user_api_key_dict) + print(key) + user_id = key.user_id + + # check /user/info to verify user_role was set correctly + new_user_info = await user_info( + user_id=user_id, user_api_key_dict=user_api_key_dict + ) + new_user_info = new_user_info.user_info + print("new_user_info=", new_user_info) + assert new_user_info["user_role"] == LitellmUserRoles.INTERNAL_USER + assert new_user_info["user_id"] == user_id + + generated_key = key.key + bearer_token = "Bearer " + generated_key + + assert generated_key not in user_api_key_cache.in_memory_cache.cache_dict + + value_from_prisma = await prisma_client.get_data( + token=generated_key, + ) + print("token from prisma", value_from_prisma) + + request = Request( + { + "type": "http", + "route": api_route, + "path": api_route.path, + "headers": [("Authorization", bearer_token)], + } + ) + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + + asyncio.run(test()) + except Exception as e: + pytest.fail(f"An exception occurred - {str(e)}") + + +def test_call_with_invalid_key(prisma_client): + # 2. Make a call with invalid key, expect it to fail + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + try: + + async def test(): + await litellm.proxy.proxy_server.prisma_client.connect() + generated_key = "sk-126666" + bearer_token = "Bearer " + generated_key + + request = Request(scope={"type": "http"}, receive=None) + request._url = URL(url="/chat/completions") + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("got result", result) + pytest.fail(f"This should have failed!. IT's an invalid key") + + asyncio.run(test()) + except Exception as e: + print("Got Exception", e) + print(e.message) + assert "Authentication Error, Invalid proxy server token passed" in e.message + pass + + +def test_call_with_invalid_model(prisma_client): + litellm.set_verbose = True + # 3. Make a call to a key with an invalid model - expect to fail + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + try: + + async def test(): + await litellm.proxy.proxy_server.prisma_client.connect() + request = NewUserRequest(models=["mistral"]) + key = await new_user( + data=request, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + print(key) + + generated_key = key.key + bearer_token = "Bearer " + generated_key + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + async def return_body(): + return b'{"model": "gemini-pro-vision"}' + + request.body = return_body + + # use generated key to auth in + print( + "Bearer token being sent to user_api_key_auth() - {}".format( + bearer_token + ) + ) + result = await user_api_key_auth(request=request, api_key=bearer_token) + pytest.fail(f"This should have failed!. IT's an invalid model") + + asyncio.run(test()) + except Exception as e: + assert ( + e.message + == "Authentication Error, API Key not allowed to access model. This token can only access models=['mistral']. Tried to access gemini-pro-vision" + ) + pass + + +def test_call_with_valid_model(prisma_client): + # 4. Make a call to a key with a valid model - expect to pass + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + try: + + async def test(): + await litellm.proxy.proxy_server.prisma_client.connect() + request = NewUserRequest(models=["mistral"]) + key = await new_user( + request, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + print(key) + + generated_key = key.key + bearer_token = "Bearer " + generated_key + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + async def return_body(): + return b'{"model": "mistral"}' + + request.body = return_body + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + + asyncio.run(test()) + except Exception as e: + pytest.fail(f"An exception occurred - {str(e)}") + + +@pytest.mark.asyncio +async def test_call_with_valid_model_using_all_models(prisma_client): + """ + Do not delete + this is the Admin UI flow + 1. Create a team with model = `all-proxy-models` + 2. Create a key with model = `all-team-models` + 3. Call /chat/completions with the key -> expect to pass + """ + # Make a call to a key with model = `all-proxy-models` this is an Alias from LiteLLM Admin UI + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + try: + + await litellm.proxy.proxy_server.prisma_client.connect() + + team_request = NewTeamRequest( + team_alias="testing-team", + models=["all-proxy-models"], + ) + + new_team_response = await new_team( + data=team_request, + user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN), + http_request=Request(scope={"type": "http"}), + ) + print("new_team_response", new_team_response) + created_team_id = new_team_response["team_id"] + + request = GenerateKeyRequest( + models=["all-team-models"], team_id=created_team_id + ) + key = await generate_key_fn(data=request) + print(key) + + generated_key = key.key + bearer_token = "Bearer " + generated_key + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + async def return_body(): + return b'{"model": "mistral"}' + + request.body = return_body + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + + # call /key/info for key - models == "all-proxy-models" + key_info = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, token=bearer_token + ), + ) + print("key_info", key_info) + models = key_info["info"]["models"] + assert models == ["all-team-models"] + + except Exception as e: + pytest.fail(f"An exception occurred - {str(e)}") + + +def test_call_with_user_over_budget(prisma_client): + # 5. Make a call with a key over budget, expect to fail + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + try: + + async def test(): + await litellm.proxy.proxy_server.prisma_client.connect() + request = NewUserRequest(max_budget=0.00001) + key = await new_user( + data=request, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + print(key) + + generated_key = key.key + user_id = key.user_id + bearer_token = "Bearer " + generated_key + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + + # update spend using track_cost callback, make 2nd request, it should fail + from litellm import Choices, Message, ModelResponse, Usage + from litellm.proxy.proxy_server import ( + _PROXY_track_cost_callback as track_cost_callback, + ) + + resp = ModelResponse( + id="chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac", + choices=[ + Choices( + finish_reason=None, + index=0, + message=Message( + content=" Sure! Here is a short poem about the sky:\n\nA canvas of blue, a", + role="assistant", + ), + ) + ], + model="gpt-35-turbo", # azure always has model written like this + usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), + ) + await track_cost_callback( + kwargs={ + "stream": False, + "litellm_params": { + "metadata": { + "user_api_key": generated_key, + "user_api_key_user_id": user_id, + } + }, + "response_cost": 0.00002, + }, + completion_response=resp, + start_time=datetime.now(), + end_time=datetime.now(), + ) + await asyncio.sleep(5) + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + pytest.fail("This should have failed!. They key crossed it's budget") + + asyncio.run(test()) + except Exception as e: + print("got an errror=", e) + error_detail = e.message + assert "ExceededBudget:" in error_detail + assert isinstance(e, ProxyException) + assert e.type == ProxyErrorTypes.budget_exceeded + print(vars(e)) + + +def test_end_user_cache_write_unit_test(): + """ + assert end user object is being written to cache as expected + """ + pass + + +def test_call_with_end_user_over_budget(prisma_client): + # Test if a user passed to /chat/completions is tracked & fails when they cross their budget + # we only check this when litellm.max_end_user_budget is set + import random + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + setattr(litellm, "max_end_user_budget", 0.00001) + try: + + async def test(): + await litellm.proxy.proxy_server.prisma_client.connect() + user = f"ishaan {uuid.uuid4().hex}" + request = NewCustomerRequest( + user_id=user, max_budget=0.000001 + ) # create a key with no budget + await new_end_user( + request, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + bearer_token = "Bearer sk-1234" + + result = await user_api_key_auth(request=request, api_key=bearer_token) + + async def return_body(): + return_string = f'{{"model": "gemini-pro-vision", "user": "{user}"}}' + # return string as bytes + return return_string.encode() + + request.body = return_body + + # update spend using track_cost callback, make 2nd request, it should fail + from litellm import Choices, Message, ModelResponse, Usage + from litellm.proxy.proxy_server import ( + _PROXY_track_cost_callback as track_cost_callback, + ) + + resp = ModelResponse( + id="chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac", + choices=[ + Choices( + finish_reason=None, + index=0, + message=Message( + content=" Sure! Here is a short poem about the sky:\n\nA canvas of blue, a", + role="assistant", + ), + ) + ], + model="gpt-35-turbo", # azure always has model written like this + usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), + ) + await track_cost_callback( + kwargs={ + "stream": False, + "litellm_params": { + "metadata": { + "user_api_key": "sk-1234", + "user_api_key_user_id": user, + }, + "proxy_server_request": { + "body": { + "user": user, + } + }, + }, + "response_cost": 10, + }, + completion_response=resp, + start_time=datetime.now(), + end_time=datetime.now(), + ) + + await asyncio.sleep(10) + await update_spend( + prisma_client=prisma_client, + db_writer_client=None, + proxy_logging_obj=proxy_logging_obj, + ) + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + pytest.fail("This should have failed!. They key crossed it's budget") + + asyncio.run(test()) + except Exception as e: + error_detail = e.message + assert "Budget has been exceeded! Current" in error_detail + assert isinstance(e, ProxyException) + assert e.type == ProxyErrorTypes.budget_exceeded + print(vars(e)) + + +def test_call_with_proxy_over_budget(prisma_client): + # 5.1 Make a call with a proxy over budget, expect to fail + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + litellm_proxy_budget_name = f"litellm-proxy-budget-{time.time()}" + setattr( + litellm.proxy.proxy_server, + "litellm_proxy_admin_name", + litellm_proxy_budget_name, + ) + setattr(litellm, "max_budget", 0.00001) + from litellm.proxy.proxy_server import user_api_key_cache + + user_api_key_cache.set_cache( + key="{}:spend".format(litellm_proxy_budget_name), value=0 + ) + setattr(litellm.proxy.proxy_server, "user_api_key_cache", user_api_key_cache) + try: + + async def test(): + await litellm.proxy.proxy_server.prisma_client.connect() + request = NewUserRequest() + key = await new_user( + data=request, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + print(key) + + generated_key = key.key + user_id = key.user_id + bearer_token = "Bearer " + generated_key + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + + # update spend using track_cost callback, make 2nd request, it should fail + from litellm import Choices, Message, ModelResponse, Usage + from litellm.proxy.proxy_server import ( + _PROXY_track_cost_callback as track_cost_callback, + ) + + resp = ModelResponse( + id="chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac", + choices=[ + Choices( + finish_reason=None, + index=0, + message=Message( + content=" Sure! Here is a short poem about the sky:\n\nA canvas of blue, a", + role="assistant", + ), + ) + ], + model="gpt-35-turbo", # azure always has model written like this + usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), + ) + await track_cost_callback( + kwargs={ + "stream": False, + "litellm_params": { + "metadata": { + "user_api_key": generated_key, + "user_api_key_user_id": user_id, + } + }, + "response_cost": 0.00002, + }, + completion_response=resp, + start_time=datetime.now(), + end_time=datetime.now(), + ) + + await asyncio.sleep(5) + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + pytest.fail(f"This should have failed!. They key crossed it's budget") + + asyncio.run(test()) + except Exception as e: + if hasattr(e, "message"): + error_detail = e.message + else: + error_detail = traceback.format_exc() + assert "Budget has been exceeded" in error_detail + assert isinstance(e, ProxyException) + assert e.type == ProxyErrorTypes.budget_exceeded + print(vars(e)) + + +def test_call_with_user_over_budget_stream(prisma_client): + # 6. Make a call with a key over budget, expect to fail + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + import logging + + from litellm._logging import verbose_proxy_logger + + litellm.set_verbose = True + verbose_proxy_logger.setLevel(logging.DEBUG) + try: + + async def test(): + await litellm.proxy.proxy_server.prisma_client.connect() + request = NewUserRequest(max_budget=0.00001) + key = await new_user( + data=request, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + print(key) + + generated_key = key.key + user_id = key.user_id + bearer_token = "Bearer " + generated_key + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + + # update spend using track_cost callback, make 2nd request, it should fail + from litellm import Choices, Message, ModelResponse, Usage + from litellm.proxy.proxy_server import ( + _PROXY_track_cost_callback as track_cost_callback, + ) + + resp = ModelResponse( + id="chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac", + choices=[ + Choices( + finish_reason=None, + index=0, + message=Message( + content=" Sure! Here is a short poem about the sky:\n\nA canvas of blue, a", + role="assistant", + ), + ) + ], + model="gpt-35-turbo", # azure always has model written like this + usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), + ) + await track_cost_callback( + kwargs={ + "stream": True, + "complete_streaming_response": resp, + "litellm_params": { + "metadata": { + "user_api_key": generated_key, + "user_api_key_user_id": user_id, + } + }, + "response_cost": 0.00002, + }, + completion_response=ModelResponse(), + start_time=datetime.now(), + end_time=datetime.now(), + ) + await asyncio.sleep(5) + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + pytest.fail("This should have failed!. They key crossed it's budget") + + asyncio.run(test()) + except Exception as e: + error_detail = e.message + assert "ExceededBudget:" in error_detail + assert isinstance(e, ProxyException) + assert e.type == ProxyErrorTypes.budget_exceeded + print(vars(e)) + + +def test_call_with_proxy_over_budget_stream(prisma_client): + # 6.1 Make a call with a global proxy over budget, expect to fail + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + litellm_proxy_budget_name = f"litellm-proxy-budget-{time.time()}" + setattr( + litellm.proxy.proxy_server, + "litellm_proxy_admin_name", + litellm_proxy_budget_name, + ) + setattr(litellm, "max_budget", 0.00001) + from litellm.proxy.proxy_server import user_api_key_cache + + user_api_key_cache.set_cache( + key="{}:spend".format(litellm_proxy_budget_name), value=0 + ) + setattr(litellm.proxy.proxy_server, "user_api_key_cache", user_api_key_cache) + + import logging + + from litellm._logging import verbose_proxy_logger + + litellm.set_verbose = True + verbose_proxy_logger.setLevel(logging.DEBUG) + try: + + async def test(): + await litellm.proxy.proxy_server.prisma_client.connect() + ## CREATE PROXY + USER BUDGET ## + # request = NewUserRequest( + # max_budget=0.00001, user_id=litellm_proxy_budget_name + # ) + request = NewUserRequest() + key = await new_user( + data=request, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + print(key) + + generated_key = key.key + user_id = key.user_id + bearer_token = "Bearer " + generated_key + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + + # update spend using track_cost callback, make 2nd request, it should fail + from litellm import Choices, Message, ModelResponse, Usage + from litellm.proxy.proxy_server import ( + _PROXY_track_cost_callback as track_cost_callback, + ) + + resp = ModelResponse( + id="chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac", + choices=[ + Choices( + finish_reason=None, + index=0, + message=Message( + content=" Sure! Here is a short poem about the sky:\n\nA canvas of blue, a", + role="assistant", + ), + ) + ], + model="gpt-35-turbo", # azure always has model written like this + usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), + ) + await track_cost_callback( + kwargs={ + "stream": True, + "complete_streaming_response": resp, + "litellm_params": { + "metadata": { + "user_api_key": generated_key, + "user_api_key_user_id": user_id, + } + }, + "response_cost": 0.00002, + }, + completion_response=ModelResponse(), + start_time=datetime.now(), + end_time=datetime.now(), + ) + await asyncio.sleep(5) + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + pytest.fail(f"This should have failed!. They key crossed it's budget") + + asyncio.run(test()) + except Exception as e: + error_detail = e.message + assert "Budget has been exceeded" in error_detail + print(vars(e)) + + +def test_generate_and_call_with_valid_key_never_expires(prisma_client): + # 7. Make a call with an key that never expires, expect to pass + + print("prisma client=", prisma_client) + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + try: + + async def test(): + await litellm.proxy.proxy_server.prisma_client.connect() + request = NewUserRequest(duration=None) + key = await new_user( + data=request, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + print(key) + + generated_key = key.key + bearer_token = "Bearer " + generated_key + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + + asyncio.run(test()) + except Exception as e: + pytest.fail(f"An exception occurred - {str(e)}") + + +def test_generate_and_call_with_expired_key(prisma_client): + # 8. Make a call with an expired key, expect to fail + + print("prisma client=", prisma_client) + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + try: + + async def test(): + await litellm.proxy.proxy_server.prisma_client.connect() + request = NewUserRequest(duration="0s") + key = await new_user( + data=request, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + print(key) + + generated_key = key.key + bearer_token = "Bearer " + generated_key + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + pytest.fail(f"This should have failed!. IT's an expired key") + + asyncio.run(test()) + except Exception as e: + print("Got Exception", e) + print(e.message) + assert "Authentication Error" in e.message + assert e.type == ProxyErrorTypes.expired_key + + pass + + +def test_delete_key(prisma_client): + # 9. Generate a Key, delete it. Check if deletion works fine + + print("prisma client=", prisma_client) + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + setattr(litellm.proxy.proxy_server, "user_custom_auth", None) + try: + + async def test(): + await litellm.proxy.proxy_server.prisma_client.connect() + from litellm.proxy.proxy_server import user_api_key_cache + + request = NewUserRequest() + key = await new_user( + data=request, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + print(key) + + generated_key = key.key + bearer_token = "Bearer " + generated_key + + delete_key_request = KeyRequest(keys=[generated_key]) + + bearer_token = "Bearer sk-1234" + + request = Request(scope={"type": "http"}) + request._url = URL(url="/key/delete") + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print(f"result: {result}") + result.user_role = LitellmUserRoles.PROXY_ADMIN + # delete the key + result_delete_key = await delete_key_fn( + data=delete_key_request, user_api_key_dict=result + ) + print("result from delete key", result_delete_key) + assert result_delete_key == {"deleted_keys": [generated_key]} + + assert generated_key not in user_api_key_cache.in_memory_cache.cache_dict + assert ( + hash_token(generated_key) + not in user_api_key_cache.in_memory_cache.cache_dict + ) + + asyncio.run(test()) + except Exception as e: + pytest.fail(f"An exception occurred - {str(e)}") + + +def test_delete_key_auth(prisma_client): + # 10. Generate a Key, delete it, use it to make a call -> expect fail + + print("prisma client=", prisma_client) + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + try: + + async def test(): + await litellm.proxy.proxy_server.prisma_client.connect() + from litellm.proxy.proxy_server import user_api_key_cache + + request = NewUserRequest() + key = await new_user( + data=request, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + print(key) + + generated_key = key.key + bearer_token = "Bearer " + generated_key + + delete_key_request = KeyRequest(keys=[generated_key]) + + # delete the key + bearer_token = "Bearer sk-1234" + + request = Request(scope={"type": "http"}) + request._url = URL(url="/key/delete") + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print(f"result: {result}") + result.user_role = LitellmUserRoles.PROXY_ADMIN + + result_delete_key = await delete_key_fn( + data=delete_key_request, user_api_key_dict=result + ) + + print("result from delete key", result_delete_key) + assert result_delete_key == {"deleted_keys": [generated_key]} + + request = Request(scope={"type": "http"}, receive=None) + request._url = URL(url="/chat/completions") + + assert generated_key not in user_api_key_cache.in_memory_cache.cache_dict + assert ( + hash_token(generated_key) + not in user_api_key_cache.in_memory_cache.cache_dict + ) + + # use generated key to auth in + bearer_token = "Bearer " + generated_key + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("got result", result) + pytest.fail(f"This should have failed!. IT's an invalid key") + + asyncio.run(test()) + except Exception as e: + print("Got Exception", e) + print(e.message) + assert "Authentication Error" in e.message + pass + + +def test_generate_and_call_key_info(prisma_client): + # 10. Generate a Key, cal key/info + + print("prisma client=", prisma_client) + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + try: + + async def test(): + await litellm.proxy.proxy_server.prisma_client.connect() + request = NewUserRequest( + metadata={"team": "litellm-team3", "project": "litellm-project3"} + ) + key = await new_user( + data=request, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + print(key) + + generated_key = key.key + + # use generated key to auth in + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, token="sk-1234" + ), + ) + print("result from info_key_fn", result) + assert result["key"] == generated_key + print("\n info for key=", result["info"]) + assert result["info"]["max_parallel_requests"] == None + assert result["info"]["metadata"] == { + "team": "litellm-team3", + "project": "litellm-project3", + } + + # cleanup - delete key + delete_key_request = KeyRequest(keys=[generated_key]) + bearer_token = "Bearer sk-1234" + + request = Request(scope={"type": "http"}) + request._url = URL(url="/key/delete") + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print(f"result: {result}") + result.user_role = LitellmUserRoles.PROXY_ADMIN + + result_delete_key = await delete_key_fn( + data=delete_key_request, user_api_key_dict=result + ) + + asyncio.run(test()) + except Exception as e: + pytest.fail(f"An exception occurred - {str(e)}") + + +def test_generate_and_update_key(prisma_client): + # 11. Generate a Key, cal key/info, call key/update, call key/info + # Check if data gets updated + # Check if untouched data does not get updated + import uuid + + print("prisma client=", prisma_client) + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + try: + + async def test(): + await litellm.proxy.proxy_server.prisma_client.connect() + + # create team "litellm-core-infra@gmail.com"" + print("creating team litellm-core-infra@gmail.com") + _team_1 = "litellm-core-infra@gmail.com_{}".format(uuid.uuid4()) + await new_team( + NewTeamRequest( + team_id=_team_1, + ), + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + http_request=Request(scope={"type": "http"}), + ) + + _team_2 = "ishaan-special-team_{}".format(uuid.uuid4()) + await new_team( + NewTeamRequest( + team_id=_team_2, + ), + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + http_request=Request(scope={"type": "http"}), + ) + + request = NewUserRequest( + metadata={"project": "litellm-project3"}, + team_id=_team_1, + ) + + key = await new_user( + data=request, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + print(key) + + generated_key = key.key + + # use generated key to auth in + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, token="sk-1234" + ), + ) + print("result from info_key_fn", result) + assert result["key"] == generated_key + print("\n info for key=", result["info"]) + assert result["info"]["max_parallel_requests"] == None + assert result["info"]["metadata"] == { + "project": "litellm-project3", + } + assert result["info"]["team_id"] == _team_1 + + request = Request(scope={"type": "http"}) + request._url = URL(url="/update/key") + + # update the key + response1 = await update_key_fn( + request=Request, + data=UpdateKeyRequest( + key=generated_key, + models=["ada", "babbage", "curie", "davinci"], + ), + ) + + print("response1=", response1) + + # update the team id + response2 = await update_key_fn( + request=Request, + data=UpdateKeyRequest(key=generated_key, team_id=_team_2), + ) + print("response2=", response2) + + # get info on key after update + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, token="sk-1234" + ), + ) + print("result from info_key_fn", result) + assert result["key"] == generated_key + print("\n info for key=", result["info"]) + assert result["info"]["max_parallel_requests"] == None + assert result["info"]["metadata"] == { + "project": "litellm-project3", + } + assert result["info"]["models"] == ["ada", "babbage", "curie", "davinci"] + assert result["info"]["team_id"] == _team_2 + + # cleanup - delete key + delete_key_request = KeyRequest(keys=[generated_key]) + + # delete the key + bearer_token = "Bearer sk-1234" + + request = Request(scope={"type": "http"}) + request._url = URL(url="/key/delete") + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print(f"result: {result}") + result.user_role = LitellmUserRoles.PROXY_ADMIN + + result_delete_key = await delete_key_fn( + data=delete_key_request, user_api_key_dict=result + ) + + asyncio.run(test()) + except Exception as e: + print("Got Exception", e) + pytest.fail(f"An exception occurred - {str(e)}\n{traceback.format_exc()}") + + +def test_key_generate_with_custom_auth(prisma_client): + # custom - generate key function + async def custom_generate_key_fn(data: GenerateKeyRequest) -> dict: + """ + Asynchronous function for generating a key based on the input data. + + Args: + data (GenerateKeyRequest): The input data for key generation. + + Returns: + dict: A dictionary containing the decision and an optional message. + { + "decision": False, + "message": "This violates LiteLLM Proxy Rules. No team id provided.", + } + """ + + # decide if a key should be generated or not + print("using custom auth function!") + data_json = data.json() # type: ignore + + # Unpacking variables + team_id = data_json.get("team_id") + duration = data_json.get("duration") + models = data_json.get("models") + aliases = data_json.get("aliases") + config = data_json.get("config") + spend = data_json.get("spend") + user_id = data_json.get("user_id") + max_parallel_requests = data_json.get("max_parallel_requests") + metadata = data_json.get("metadata") + tpm_limit = data_json.get("tpm_limit") + rpm_limit = data_json.get("rpm_limit") + + if team_id is not None and team_id == "litellm-core-infra@gmail.com": + # only team_id="litellm-core-infra@gmail.com" can make keys + return { + "decision": True, + } + else: + print("Failed custom auth") + return { + "decision": False, + "message": "This violates LiteLLM Proxy Rules. No team id provided.", + } + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + setattr( + litellm.proxy.proxy_server, "user_custom_key_generate", custom_generate_key_fn + ) + try: + + async def test(): + try: + await litellm.proxy.proxy_server.prisma_client.connect() + request = GenerateKeyRequest() + key = await generate_key_fn(request) + pytest.fail(f"Expected an exception. Got {key}") + except Exception as e: + # this should fail + print("Got Exception", e) + print(e.message) + print("First request failed!. This is expected") + assert ( + "This violates LiteLLM Proxy Rules. No team id provided." + in e.message + ) + + request_2 = GenerateKeyRequest( + team_id="litellm-core-infra@gmail.com", + ) + + key = await generate_key_fn(request_2) + print(key) + generated_key = key.key + + asyncio.run(test()) + except Exception as e: + print("Got Exception", e) + print(e.message) + pytest.fail(f"An exception occurred - {str(e)}") + + +def test_call_with_key_over_budget(prisma_client): + # 12. Make a call with a key over budget, expect to fail + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + try: + + async def test(): + await litellm.proxy.proxy_server.prisma_client.connect() + request = GenerateKeyRequest(max_budget=0.00001) + key = await generate_key_fn(request) + print(key) + + generated_key = key.key + user_id = key.user_id + bearer_token = "Bearer " + generated_key + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + + # update spend using track_cost callback, make 2nd request, it should fail + from litellm import Choices, Message, ModelResponse, Usage + from litellm.caching.caching import Cache + from litellm.proxy.proxy_server import ( + _PROXY_track_cost_callback as track_cost_callback, + ) + + litellm.cache = Cache() + import time + import uuid + + request_id = f"chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac{uuid.uuid4()}" + + resp = ModelResponse( + id=request_id, + choices=[ + Choices( + finish_reason=None, + index=0, + message=Message( + content=" Sure! Here is a short poem about the sky:\n\nA canvas of blue, a", + role="assistant", + ), + ) + ], + model="gpt-35-turbo", # azure always has model written like this + usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), + ) + await track_cost_callback( + kwargs={ + "model": "chatgpt-v-2", + "stream": False, + "litellm_params": { + "metadata": { + "user_api_key": hash_token(generated_key), + "user_api_key_user_id": user_id, + } + }, + "response_cost": 0.00002, + }, + completion_response=resp, + start_time=datetime.now(), + end_time=datetime.now(), + ) + await update_spend( + prisma_client=prisma_client, + db_writer_client=None, + proxy_logging_obj=proxy_logging_obj, + ) + # test spend_log was written and we can read it + spend_logs = await view_spend_logs( + request_id=request_id, + user_api_key_dict=UserAPIKeyAuth(api_key=generated_key), + ) + + print("read spend logs", spend_logs) + assert len(spend_logs) == 1 + + spend_log = spend_logs[0] + + assert spend_log.request_id == request_id + assert spend_log.spend == float("2e-05") + assert spend_log.model == "chatgpt-v-2" + assert ( + spend_log.cache_key + == "c891d64397a472e6deb31b87a5ac4d3ed5b2dcc069bc87e2afe91e6d64e95a1e" + ) + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + pytest.fail("This should have failed!. They key crossed it's budget") + + asyncio.run(test()) + except Exception as e: + # print(f"Error - {str(e)}") + traceback.print_exc() + if hasattr(e, "message"): + error_detail = e.message + else: + error_detail = str(e) + assert "Budget has been exceeded" in error_detail + assert isinstance(e, ProxyException) + assert e.type == ProxyErrorTypes.budget_exceeded + print(vars(e)) + + +def test_call_with_key_over_budget_no_cache(prisma_client): + # 12. Make a call with a key over budget, expect to fail + # ✅ Tests if spend trackign works when the key does not exist in memory + # Related to this: https://github.com/BerriAI/litellm/issues/3920 + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + try: + + async def test(): + await litellm.proxy.proxy_server.prisma_client.connect() + request = GenerateKeyRequest(max_budget=0.00001) + key = await generate_key_fn(request) + print(key) + + generated_key = key.key + user_id = key.user_id + bearer_token = "Bearer " + generated_key + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + + # update spend using track_cost callback, make 2nd request, it should fail + from litellm.proxy.proxy_server import ( + _PROXY_track_cost_callback as track_cost_callback, + ) + from litellm.proxy.proxy_server import user_api_key_cache + + user_api_key_cache.in_memory_cache.cache_dict = {} + setattr(litellm.proxy.proxy_server, "proxy_batch_write_at", 1) + + from litellm import Choices, Message, ModelResponse, Usage + from litellm.caching.caching import Cache + + litellm.cache = Cache() + import time + import uuid + + request_id = f"chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac{uuid.uuid4()}" + + resp = ModelResponse( + id=request_id, + choices=[ + Choices( + finish_reason=None, + index=0, + message=Message( + content=" Sure! Here is a short poem about the sky:\n\nA canvas of blue, a", + role="assistant", + ), + ) + ], + model="gpt-35-turbo", # azure always has model written like this + usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), + ) + await track_cost_callback( + kwargs={ + "model": "chatgpt-v-2", + "stream": False, + "litellm_params": { + "metadata": { + "user_api_key": hash_token(generated_key), + "user_api_key_user_id": user_id, + } + }, + "response_cost": 0.00002, + }, + completion_response=resp, + start_time=datetime.now(), + end_time=datetime.now(), + ) + await asyncio.sleep(10) + await update_spend( + prisma_client=prisma_client, + db_writer_client=None, + proxy_logging_obj=proxy_logging_obj, + ) + # test spend_log was written and we can read it + spend_logs = await view_spend_logs( + request_id=request_id, + user_api_key_dict=UserAPIKeyAuth(api_key=generated_key), + ) + + print("read spend logs", spend_logs) + assert len(spend_logs) == 1 + + spend_log = spend_logs[0] + + assert spend_log.request_id == request_id + assert spend_log.spend == float("2e-05") + assert spend_log.model == "chatgpt-v-2" + assert ( + spend_log.cache_key + == "c891d64397a472e6deb31b87a5ac4d3ed5b2dcc069bc87e2afe91e6d64e95a1e" + ) + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + pytest.fail(f"This should have failed!. They key crossed it's budget") + + asyncio.run(test()) + except Exception as e: + # print(f"Error - {str(e)}") + traceback.print_exc() + if hasattr(e, "message"): + error_detail = e.message + else: + error_detail = str(e) + assert "Budget has been exceeded" in error_detail + assert isinstance(e, ProxyException) + assert e.type == ProxyErrorTypes.budget_exceeded + print(vars(e)) + + +def test_call_with_key_over_model_budget(prisma_client): + # 12. Make a call with a key over budget, expect to fail + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + try: + + async def test(): + await litellm.proxy.proxy_server.prisma_client.connect() + + # set budget for chatgpt-v-2 to 0.000001, expect the next request to fail + request = GenerateKeyRequest( + max_budget=1000, + model_max_budget={ + "chatgpt-v-2": 0.000001, + }, + metadata={"user_api_key": 0.0001}, + ) + key = await generate_key_fn(request) + print(key) + + generated_key = key.key + user_id = key.user_id + bearer_token = "Bearer " + generated_key + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + async def return_body(): + return b'{"model": "chatgpt-v-2"}' + + request.body = return_body + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + + # update spend using track_cost callback, make 2nd request, it should fail + from litellm import Choices, Message, ModelResponse, Usage + from litellm.caching.caching import Cache + from litellm.proxy.proxy_server import ( + _PROXY_track_cost_callback as track_cost_callback, + ) + + litellm.cache = Cache() + import time + import uuid + + request_id = f"chatcmpl-{uuid.uuid4()}" + + resp = ModelResponse( + id=request_id, + choices=[ + Choices( + finish_reason=None, + index=0, + message=Message( + content=" Sure! Here is a short poem about the sky:\n\nA canvas of blue, a", + role="assistant", + ), + ) + ], + model="gpt-35-turbo", # azure always has model written like this + usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), + ) + await track_cost_callback( + kwargs={ + "model": "chatgpt-v-2", + "stream": False, + "litellm_params": { + "metadata": { + "user_api_key": hash_token(generated_key), + "user_api_key_user_id": user_id, + } + }, + "response_cost": 0.00002, + }, + completion_response=resp, + start_time=datetime.now(), + end_time=datetime.now(), + ) + await update_spend( + prisma_client=prisma_client, + db_writer_client=None, + proxy_logging_obj=proxy_logging_obj, + ) + # test spend_log was written and we can read it + spend_logs = await view_spend_logs( + request_id=request_id, + user_api_key_dict=UserAPIKeyAuth(api_key=generated_key), + ) + + print("read spend logs", spend_logs) + assert len(spend_logs) == 1 + + spend_log = spend_logs[0] + + assert spend_log.request_id == request_id + assert spend_log.spend == float("2e-05") + assert spend_log.model == "chatgpt-v-2" + assert ( + spend_log.cache_key + == "c891d64397a472e6deb31b87a5ac4d3ed5b2dcc069bc87e2afe91e6d64e95a1e" + ) + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + pytest.fail("This should have failed!. They key crossed it's budget") + + asyncio.run(test()) + except Exception as e: + # print(f"Error - {str(e)}") + traceback.print_exc() + error_detail = e.message + assert "Budget has been exceeded!" in error_detail + assert isinstance(e, ProxyException) + assert e.type == ProxyErrorTypes.budget_exceeded + print(vars(e)) + + +@pytest.mark.asyncio() +async def test_call_with_key_never_over_budget(prisma_client): + # Make a call with a key with budget=None, it should never fail + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + try: + await litellm.proxy.proxy_server.prisma_client.connect() + request = GenerateKeyRequest(max_budget=None) + key = await generate_key_fn(request) + print(key) + + generated_key = key.key + user_id = key.user_id + bearer_token = "Bearer " + generated_key + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key: {result}") + + # update spend using track_cost callback, make 2nd request, it should fail + import time + import uuid + + from litellm import Choices, Message, ModelResponse, Usage + from litellm.proxy.proxy_server import ( + _PROXY_track_cost_callback as track_cost_callback, + ) + + request_id = f"chatcmpl-{uuid.uuid4()}" + + resp = ModelResponse( + id=request_id, + choices=[ + Choices( + finish_reason=None, + index=0, + message=Message( + content=" Sure! Here is a short poem about the sky:\n\nA canvas of blue, a", + role="assistant", + ), + ) + ], + model="gpt-35-turbo", # azure always has model written like this + usage=Usage( + prompt_tokens=210000, completion_tokens=200000, total_tokens=41000 + ), + ) + await track_cost_callback( + kwargs={ + "model": "chatgpt-v-2", + "stream": False, + "litellm_params": { + "metadata": { + "user_api_key": hash_token(generated_key), + "user_api_key_user_id": user_id, + } + }, + "response_cost": 200000, + }, + completion_response=resp, + start_time=datetime.now(), + end_time=datetime.now(), + ) + await update_spend( + prisma_client=prisma_client, + db_writer_client=None, + proxy_logging_obj=proxy_logging_obj, + ) + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + except Exception as e: + pytest.fail(f"This should have not failed!. They key uses max_budget=None. {e}") + + +@pytest.mark.asyncio() +async def test_call_with_key_over_budget_stream(prisma_client): + # 14. Make a call with a key over budget, expect to fail + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + import logging + + from litellm._logging import verbose_proxy_logger + + litellm.set_verbose = True + verbose_proxy_logger.setLevel(logging.DEBUG) + try: + await litellm.proxy.proxy_server.prisma_client.connect() + request = GenerateKeyRequest(max_budget=0.00001) + key = await generate_key_fn(request) + print(key) + + generated_key = key.key + user_id = key.user_id + bearer_token = "Bearer " + generated_key + print(f"generated_key: {generated_key}") + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + + # update spend using track_cost callback, make 2nd request, it should fail + import time + import uuid + + from litellm import Choices, Message, ModelResponse, Usage + from litellm.proxy.proxy_server import ( + _PROXY_track_cost_callback as track_cost_callback, + ) + + request_id = f"chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac{uuid.uuid4()}" + resp = ModelResponse( + id=request_id, + choices=[ + Choices( + finish_reason=None, + index=0, + message=Message( + content=" Sure! Here is a short poem about the sky:\n\nA canvas of blue, a", + role="assistant", + ), + ) + ], + model="gpt-35-turbo", # azure always has model written like this + usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), + ) + await track_cost_callback( + kwargs={ + "call_type": "acompletion", + "model": "sagemaker-chatgpt-v-2", + "stream": True, + "complete_streaming_response": resp, + "litellm_params": { + "metadata": { + "user_api_key": hash_token(generated_key), + "user_api_key_user_id": user_id, + } + }, + "response_cost": 0.00005, + }, + completion_response=resp, + start_time=datetime.now(), + end_time=datetime.now(), + ) + await update_spend( + prisma_client=prisma_client, + db_writer_client=None, + proxy_logging_obj=proxy_logging_obj, + ) + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + pytest.fail(f"This should have failed!. They key crossed it's budget") + + except Exception as e: + print("Got Exception", e) + error_detail = e.message + assert "Budget has been exceeded" in error_detail + + print(vars(e)) + + +@pytest.mark.asyncio() +async def test_view_spend_per_user(prisma_client): + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + await litellm.proxy.proxy_server.prisma_client.connect() + try: + user_by_spend = await spend_user_fn(user_id=None) + assert type(user_by_spend) == list + assert len(user_by_spend) > 0 + first_user = user_by_spend[0] + + print("\nfirst_user=", first_user) + assert first_user["spend"] > 0 + except Exception as e: + print("Got Exception", e) + pytest.fail(f"Got exception {e}") + + +@pytest.mark.asyncio() +async def test_view_spend_per_key(prisma_client): + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + await litellm.proxy.proxy_server.prisma_client.connect() + try: + key_by_spend = await spend_key_fn() + assert type(key_by_spend) == list + assert len(key_by_spend) > 0 + first_key = key_by_spend[0] + + print("\nfirst_key=", first_key) + assert first_key.spend > 0 + except Exception as e: + print("Got Exception", e) + pytest.fail(f"Got exception {e}") + + +@pytest.mark.asyncio() +async def test_key_name_null(prisma_client): + """ + - create key + - get key info + - assert key_name is null + """ + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + os.environ["DISABLE_KEY_NAME"] = "True" + await litellm.proxy.proxy_server.prisma_client.connect() + try: + request = GenerateKeyRequest() + key = await generate_key_fn(request) + print("generated key=", key) + generated_key = key.key + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, token="sk-1234" + ), + ) + print("result from info_key_fn", result) + assert result["info"]["key_name"] is None + except Exception as e: + print("Got Exception", e) + pytest.fail(f"Got exception {e}") + finally: + os.environ["DISABLE_KEY_NAME"] = "False" + + +@pytest.mark.asyncio() +async def test_key_name_set(prisma_client): + """ + - create key + - get key info + - assert key_name is not null + """ + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + setattr(litellm.proxy.proxy_server, "general_settings", {"allow_user_auth": True}) + await litellm.proxy.proxy_server.prisma_client.connect() + try: + request = GenerateKeyRequest() + key = await generate_key_fn(request) + generated_key = key.key + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, token="sk-1234" + ), + ) + print("result from info_key_fn", result) + assert isinstance(result["info"]["key_name"], str) + except Exception as e: + print("Got Exception", e) + pytest.fail(f"Got exception {e}") + + +@pytest.mark.asyncio() +async def test_default_key_params(prisma_client): + """ + - create key + - get key info + - assert key_name is not null + """ + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + setattr(litellm.proxy.proxy_server, "general_settings", {"allow_user_auth": True}) + litellm.default_key_generate_params = {"max_budget": 0.000122} + await litellm.proxy.proxy_server.prisma_client.connect() + try: + request = GenerateKeyRequest() + key = await generate_key_fn(request) + generated_key = key.key + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, token="sk-1234" + ), + ) + print("result from info_key_fn", result) + assert result["info"]["max_budget"] == 0.000122 + except Exception as e: + print("Got Exception", e) + pytest.fail(f"Got exception {e}") + + +@pytest.mark.asyncio() +async def test_upperbound_key_param_larger_budget(prisma_client): + """ + - create key + - get key info + - assert key_name is not null + """ + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + litellm.upperbound_key_generate_params = LiteLLM_UpperboundKeyGenerateParams( + max_budget=0.001, budget_duration="1m" + ) + await litellm.proxy.proxy_server.prisma_client.connect() + try: + request = GenerateKeyRequest( + max_budget=200000, + budget_duration="30d", + ) + key = await generate_key_fn(request) + # print(result) + except Exception as e: + assert e.code == str(400) + + +@pytest.mark.asyncio() +async def test_upperbound_key_param_larger_duration(prisma_client): + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + litellm.upperbound_key_generate_params = LiteLLM_UpperboundKeyGenerateParams( + max_budget=100, duration="14d" + ) + await litellm.proxy.proxy_server.prisma_client.connect() + try: + request = GenerateKeyRequest( + max_budget=10, + duration="30d", + ) + key = await generate_key_fn(request) + pytest.fail("Expected this to fail but it passed") + # print(result) + except Exception as e: + assert e.code == str(400) + + +@pytest.mark.asyncio() +async def test_upperbound_key_param_none_duration(prisma_client): + from datetime import datetime, timedelta + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + litellm.upperbound_key_generate_params = LiteLLM_UpperboundKeyGenerateParams( + max_budget=100, duration="14d" + ) + await litellm.proxy.proxy_server.prisma_client.connect() + try: + request = GenerateKeyRequest() + key = await generate_key_fn(request) + + print(key) + # print(result) + + assert key.max_budget == 100 + assert key.expires is not None + + _date_key_expires = key.expires.date() + _fourteen_days_from_now = (datetime.now() + timedelta(days=14)).date() + + assert _date_key_expires == _fourteen_days_from_now + except Exception as e: + pytest.fail(f"Got exception {e}") + + +def test_get_bearer_token(): + from litellm.proxy.auth.user_api_key_auth import _get_bearer_token + + # Test valid Bearer token + api_key = "Bearer valid_token" + result = _get_bearer_token(api_key) + assert result == "valid_token", f"Expected 'valid_token', got '{result}'" + + # Test empty API key + api_key = "" + result = _get_bearer_token(api_key) + assert result == "", f"Expected '', got '{result}'" + + # Test API key without Bearer prefix + api_key = "invalid_token" + result = _get_bearer_token(api_key) + assert result == "", f"Expected '', got '{result}'" + + # Test API key with Bearer prefix and extra spaces + api_key = " Bearer valid_token " + result = _get_bearer_token(api_key) + assert result == "", f"Expected '', got '{result}'" + + # Test API key with Bearer prefix and no token + api_key = "Bearer sk-1234" + result = _get_bearer_token(api_key) + assert result == "sk-1234", f"Expected 'valid_token', got '{result}'" + + +def test_update_logs_with_spend_logs_url(prisma_client): + """ + Unit test for making sure spend logs list is still updated when url passed in + """ + from litellm.proxy.proxy_server import _set_spend_logs_payload + + payload = {"startTime": datetime.now(), "endTime": datetime.now()} + _set_spend_logs_payload(payload=payload, prisma_client=prisma_client) + + assert len(prisma_client.spend_log_transactions) > 0 + + prisma_client.spend_log_transactions = [] + + spend_logs_url = "" + payload = {"startTime": datetime.now(), "endTime": datetime.now()} + _set_spend_logs_payload( + payload=payload, spend_logs_url=spend_logs_url, prisma_client=prisma_client + ) + + assert len(prisma_client.spend_log_transactions) > 0 + + +@pytest.mark.asyncio +async def test_user_api_key_auth(prisma_client): + from litellm.proxy.proxy_server import ProxyException + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + setattr(litellm.proxy.proxy_server, "general_settings", {"allow_user_auth": True}) + await litellm.proxy.proxy_server.prisma_client.connect() + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + # Test case: No API Key passed in + try: + await user_api_key_auth(request, api_key=None) + pytest.fail(f"This should have failed!. IT's an invalid key") + except ProxyException as exc: + print(exc.message) + assert exc.message == "Authentication Error, No api key passed in." + + # Test case: Malformed API Key (missing 'Bearer ' prefix) + try: + await user_api_key_auth(request, api_key="my_token") + pytest.fail(f"This should have failed!. IT's an invalid key") + except ProxyException as exc: + print(exc.message) + assert ( + exc.message + == "Authentication Error, Malformed API Key passed in. Ensure Key has `Bearer ` prefix. Passed in: my_token" + ) + + # Test case: User passes empty string API Key + try: + await user_api_key_auth(request, api_key="") + pytest.fail(f"This should have failed!. IT's an invalid key") + except ProxyException as exc: + print(exc.message) + assert ( + exc.message + == "Authentication Error, Malformed API Key passed in. Ensure Key has `Bearer ` prefix. Passed in: " + ) + + +@pytest.mark.asyncio +async def test_user_api_key_auth_without_master_key(prisma_client): + # if master key is not set, expect all calls to go through + try: + from litellm.proxy.proxy_server import ProxyException + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", None) + setattr( + litellm.proxy.proxy_server, "general_settings", {"allow_user_auth": True} + ) + await litellm.proxy.proxy_server.prisma_client.connect() + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + # Test case: No API Key passed in + + await user_api_key_auth(request, api_key=None) + await user_api_key_auth(request, api_key="my_token") + await user_api_key_auth(request, api_key="") + await user_api_key_auth(request, api_key="Bearer " + "1234") + except Exception as e: + print("Got Exception", e) + pytest.fail(f"Got exception {e}") + + +@pytest.mark.asyncio +async def test_key_with_no_permissions(prisma_client): + """ + - create key + - get key info + - assert key_name is null + """ + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + setattr(litellm.proxy.proxy_server, "general_settings", {"allow_user_auth": False}) + await litellm.proxy.proxy_server.prisma_client.connect() + try: + response = await generate_key_helper_fn( + request_type="key", + **{"duration": "1hr", "key_max_budget": 0, "models": [], "aliases": {}, "config": {}, "spend": 0, "user_id": "ishaan", "team_id": "litellm-dashboard"}, # type: ignore + ) + + print(response) + key = response["token"] + + # make a /chat/completions call -> it should fail + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key="Bearer " + key) + print("result from user auth with new key", result) + pytest.fail(f"This should have failed!. IT's an invalid key") + except Exception as e: + print("Got Exception", e) + print(e.message) + + +async def track_cost_callback_helper_fn(generated_key: str, user_id: str): + import uuid + + from litellm import Choices, Message, ModelResponse, Usage + from litellm.proxy.proxy_server import ( + _PROXY_track_cost_callback as track_cost_callback, + ) + + request_id = f"chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac{uuid.uuid4()}" + resp = ModelResponse( + id=request_id, + choices=[ + Choices( + finish_reason=None, + index=0, + message=Message( + content=" Sure! Here is a short poem about the sky:\n\nA canvas of blue, a", + role="assistant", + ), + ) + ], + model="gpt-35-turbo", # azure always has model written like this + usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), + ) + await track_cost_callback( + kwargs={ + "call_type": "acompletion", + "model": "sagemaker-chatgpt-v-2", + "stream": True, + "complete_streaming_response": resp, + "litellm_params": { + "metadata": { + "user_api_key": hash_token(generated_key), + "user_api_key_user_id": user_id, + } + }, + "response_cost": 0.00005, + }, + completion_response=resp, + start_time=datetime.now(), + end_time=datetime.now(), + ) + + +@pytest.mark.skip(reason="High traffic load test for spend tracking") +@pytest.mark.asyncio +async def test_proxy_load_test_db(prisma_client): + """ + Run 1500 req./s against track_cost_callback function + """ + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + import logging + import time + + from litellm._logging import verbose_proxy_logger + + litellm.set_verbose = True + verbose_proxy_logger.setLevel(logging.DEBUG) + try: + start_time = time.time() + await litellm.proxy.proxy_server.prisma_client.connect() + request = GenerateKeyRequest(max_budget=0.00001) + key = await generate_key_fn(request) + print(key) + + generated_key = key.key + user_id = key.user_id + bearer_token = "Bearer " + generated_key + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + # use generated key to auth in + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("result from user auth with new key", result) + # update spend using track_cost callback, make 2nd request, it should fail + n = 5000 + tasks = [ + track_cost_callback_helper_fn(generated_key=generated_key, user_id=user_id) + for _ in range(n) + ] + completions = await asyncio.gather(*tasks) + await asyncio.sleep(120) + try: + # call spend logs + spend_logs = await view_spend_logs( + api_key=generated_key, + user_api_key_dict=UserAPIKeyAuth(api_key=generated_key), + ) + + print(f"len responses: {len(spend_logs)}") + assert len(spend_logs) == n + print(n, time.time() - start_time, len(spend_logs)) + except Exception: + print(n, time.time() - start_time, 0) + raise Exception(f"it worked! key={key.key}") + except Exception as e: + pytest.fail(f"An exception occurred - {str(e)}") + + +@pytest.mark.asyncio() +async def test_master_key_hashing(prisma_client): + try: + import uuid + + print("prisma client=", prisma_client) + + master_key = "sk-1234" + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", master_key) + + await litellm.proxy.proxy_server.prisma_client.connect() + from litellm.proxy.proxy_server import user_api_key_cache + + _team_id = "ishaans-special-team_{}".format(uuid.uuid4()) + user_api_key_dict = UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ) + await new_team( + NewTeamRequest(team_id=_team_id), + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + http_request=Request(scope={"type": "http"}), + ) + + _response = await new_user( + data=NewUserRequest( + models=["azure-gpt-3.5"], + team_id=_team_id, + tpm_limit=20, + ), + user_api_key_dict=user_api_key_dict, + ) + print(_response) + assert _response.models == ["azure-gpt-3.5"] + assert _response.team_id == _team_id + assert _response.tpm_limit == 20 + + bearer_token = "Bearer " + master_key + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + # use generated key to auth in + result: UserAPIKeyAuth = await user_api_key_auth( + request=request, api_key=bearer_token + ) + + assert result.api_key == hash_token(master_key) + + except Exception as e: + print("Got Exception", e) + pytest.fail(f"Got exception {e}") + + +@pytest.mark.asyncio +async def test_reset_spend_authentication(prisma_client): + """ + 1. Test master key can access this route -> ONLY MASTER KEY SHOULD BE ABLE TO RESET SPEND + 2. Test that non-master key gets rejected + 3. Test that non-master key with role == LitellmUserRoles.PROXY_ADMIN or admin gets rejected + """ + + print("prisma client=", prisma_client) + + master_key = "sk-1234" + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", master_key) + + await litellm.proxy.proxy_server.prisma_client.connect() + from litellm.proxy.proxy_server import user_api_key_cache + + bearer_token = "Bearer " + master_key + + request = Request(scope={"type": "http"}) + request._url = URL(url="/global/spend/reset") + + # Test 1 - Master Key + result: UserAPIKeyAuth = await user_api_key_auth( + request=request, api_key=bearer_token + ) + + print("result from user auth with Master key", result) + assert result.token is not None + + # Test 2 - Non-Master Key + _response = await new_user( + data=NewUserRequest( + tpm_limit=20, + ) + ) + + generate_key = "Bearer " + _response.key + + try: + await user_api_key_auth(request=request, api_key=generate_key) + pytest.fail(f"This should have failed!. IT's an expired key") + except Exception as e: + print("Got Exception", e) + assert ( + "Tried to access route=/global/spend/reset, which is only for MASTER KEY" + in e.message + ) + + # Test 3 - Non-Master Key with role == LitellmUserRoles.PROXY_ADMIN or admin + _response = await new_user( + data=NewUserRequest( + user_role=LitellmUserRoles.PROXY_ADMIN, + tpm_limit=20, + ) + ) + + generate_key = "Bearer " + _response.key + + try: + await user_api_key_auth(request=request, api_key=generate_key) + pytest.fail(f"This should have failed!. IT's an expired key") + except Exception as e: + print("Got Exception", e) + assert ( + "Tried to access route=/global/spend/reset, which is only for MASTER KEY" + in e.message + ) + + +@pytest.mark.asyncio() +async def test_create_update_team(prisma_client): + """ + - Set max_budget, budget_duration, max_budget, tpm_limit, rpm_limit + - Assert response has correct values + + - Update max_budget, budget_duration, max_budget, tpm_limit, rpm_limit + - Assert response has correct values + + - Call team_info and assert response has correct values + """ + print("prisma client=", prisma_client) + + master_key = "sk-1234" + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", master_key) + import datetime + + await litellm.proxy.proxy_server.prisma_client.connect() + from litellm.proxy.proxy_server import user_api_key_cache + + _team_id = "test-team_{}".format(uuid.uuid4()) + response = await new_team( + NewTeamRequest( + team_id=_team_id, + max_budget=20, + budget_duration="30d", + tpm_limit=20, + rpm_limit=20, + ), + http_request=Request(scope={"type": "http"}), + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + + print("RESPONSE from new_team", response) + + assert response["team_id"] == _team_id + assert response["max_budget"] == 20 + assert response["tpm_limit"] == 20 + assert response["rpm_limit"] == 20 + assert response["budget_duration"] == "30d" + assert response["budget_reset_at"] is not None and isinstance( + response["budget_reset_at"], datetime.datetime + ) + + # updating team budget duration and reset at + + response = await update_team( + UpdateTeamRequest( + team_id=_team_id, + max_budget=30, + budget_duration="2d", + tpm_limit=30, + rpm_limit=30, + ), + http_request=Request(scope={"type": "http"}), + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + + print("RESPONSE from update_team", response) + _updated_info = response["data"] + _updated_info = dict(_updated_info) + + assert _updated_info["team_id"] == _team_id + assert _updated_info["max_budget"] == 30 + assert _updated_info["tpm_limit"] == 30 + assert _updated_info["rpm_limit"] == 30 + assert _updated_info["budget_duration"] == "2d" + assert _updated_info["budget_reset_at"] is not None and isinstance( + _updated_info["budget_reset_at"], datetime.datetime + ) + + # now hit team_info + try: + response = await team_info( + team_id=_team_id, + http_request=Request(scope={"type": "http"}), + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + except Exception as e: + print(e) + pytest.fail("Receives error - {}".format(e)) + + _team_info = response["team_info"] + _team_info = dict(_team_info) + + assert _team_info["team_id"] == _team_id + assert _team_info["max_budget"] == 30 + assert _team_info["tpm_limit"] == 30 + assert _team_info["rpm_limit"] == 30 + assert _team_info["budget_duration"] == "2d" + assert _team_info["budget_reset_at"] is not None and isinstance( + _team_info["budget_reset_at"], datetime.datetime + ) + + +@pytest.mark.asyncio() +async def test_enforced_params(prisma_client): + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + from litellm.proxy.proxy_server import general_settings + + general_settings["enforced_params"] = [ + "user", + "metadata", + "metadata.generation_name", + ] + + await litellm.proxy.proxy_server.prisma_client.connect() + request = NewUserRequest() + key = await new_user( + data=request, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + print(key) + + generated_key = key.key + bearer_token = "Bearer " + generated_key + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + # Case 1: Missing user + async def return_body(): + return b'{"model": "gemini-pro-vision"}' + + request.body = return_body + try: + await user_api_key_auth(request=request, api_key=bearer_token) + pytest.fail(f"This should have failed!. IT's an invalid request") + except Exception as e: + assert ( + "BadRequest please pass param=user in request body. This is a required param" + in e.message + ) + + # Case 2: Missing metadata["generation_name"] + async def return_body_2(): + return b'{"model": "gemini-pro-vision", "user": "1234", "metadata": {}}' + + request.body = return_body_2 + try: + await user_api_key_auth(request=request, api_key=bearer_token) + pytest.fail(f"This should have failed!. IT's an invalid request") + except Exception as e: + assert ( + "Authentication Error, BadRequest please pass param=[metadata][generation_name] in request body" + in e.message + ) + general_settings.pop("enforced_params") + + +@pytest.mark.asyncio() +async def test_update_user_role(prisma_client): + """ + Tests if we update user role, incorrect values are not stored in cache + -> create a user with role == INTERNAL_USER + -> access an Admin only route -> expect to fail + -> update user role to == PROXY_ADMIN + -> access an Admin only route -> expect to succeed + """ + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + await litellm.proxy.proxy_server.prisma_client.connect() + key = await new_user( + data=NewUserRequest( + user_role=LitellmUserRoles.INTERNAL_USER, + ) + ) + + print(key) + api_key = "Bearer " + key.key + + api_route = APIRoute(path="/global/spend", endpoint=global_spend) + request = Request( + { + "type": "http", + "route": api_route, + "path": "/global/spend", + "headers": [("Authorization", api_key)], + } + ) + + request._url = URL(url="/global/spend") + + # use generated key to auth in + try: + result = await user_api_key_auth(request=request, api_key=api_key) + print("result from user auth with new key", result) + except Exception as e: + print(e) + pass + + await user_update( + data=UpdateUserRequest( + user_id=key.user_id, user_role=LitellmUserRoles.PROXY_ADMIN + ) + ) + + # await asyncio.sleep(3) + + # use generated key to auth in + print("\n\nMAKING NEW REQUEST WITH UPDATED USER ROLE\n\n") + result = await user_api_key_auth(request=request, api_key=api_key) + print("result from user auth with new key", result) + + +@pytest.mark.asyncio() +async def test_custom_api_key_header_name(prisma_client): + """ """ + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + setattr( + litellm.proxy.proxy_server, + "general_settings", + {"litellm_key_header_name": "x-litellm-key"}, + ) + await litellm.proxy.proxy_server.prisma_client.connect() + + api_route = APIRoute(path="/chat/completions", endpoint=chat_completion) + request = Request( + { + "type": "http", + "route": api_route, + "path": api_route.path, + "headers": [ + (b"x-litellm-key", b"Bearer sk-1234"), + ], + } + ) + + # this should pass because we pass the master key as X-Litellm-Key and litellm_key_header_name="X-Litellm-Key" in general settings + result = await user_api_key_auth(request=request, api_key="Bearer invalid-key") + + # this should fail because X-Litellm-Key is invalid + request = Request( + { + "type": "http", + "route": api_route, + "path": api_route.path, + "headers": [], + } + ) + try: + result = await user_api_key_auth(request=request, api_key="Bearer sk-1234") + pytest.fail(f"This should have failed!. invalid Auth on this request") + except Exception as e: + print("failed with error", e) + assert ( + "No LiteLLM Virtual Key pass. Please set header=x-litellm-key: Bearer " + in e.message + ) + pass + + # this should pass because X-Litellm-Key is valid + + +@pytest.mark.asyncio() +async def test_generate_key_with_model_tpm_limit(prisma_client): + print("prisma client=", prisma_client) + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + await litellm.proxy.proxy_server.prisma_client.connect() + request = GenerateKeyRequest( + metadata={ + "team": "litellm-team3", + "model_tpm_limit": {"gpt-4": 100}, + "model_rpm_limit": {"gpt-4": 2}, + } + ) + key = await generate_key_fn( + data=request, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + print(key) + + generated_key = key.key + + # use generated key to auth in + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, token="sk-1234" + ), + ) + print("result from info_key_fn", result) + assert result["key"] == generated_key + print("\n info for key=", result["info"]) + assert result["info"]["metadata"] == { + "team": "litellm-team3", + "model_tpm_limit": {"gpt-4": 100}, + "model_rpm_limit": {"gpt-4": 2}, + "tags": None, + } + + # Update model tpm_limit and rpm_limit + request = UpdateKeyRequest( + key=generated_key, + model_tpm_limit={"gpt-4": 200}, + model_rpm_limit={"gpt-4": 3}, + ) + _request = Request(scope={"type": "http"}) + _request._url = URL(url="/update/key") + + await update_key_fn(data=request, request=_request) + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, token="sk-1234" + ), + ) + print("result from info_key_fn", result) + assert result["key"] == generated_key + print("\n info for key=", result["info"]) + assert result["info"]["metadata"] == { + "team": "litellm-team3", + "model_tpm_limit": {"gpt-4": 200}, + "model_rpm_limit": {"gpt-4": 3}, + "tags": None, + } + + +@pytest.mark.asyncio() +async def test_generate_key_with_guardrails(prisma_client): + print("prisma client=", prisma_client) + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + await litellm.proxy.proxy_server.prisma_client.connect() + request = GenerateKeyRequest( + guardrails=["aporia-pre-call"], + metadata={ + "team": "litellm-team3", + }, + ) + key = await generate_key_fn( + data=request, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + print("generated key=", key) + + generated_key = key.key + + # use generated key to auth in + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, token="sk-1234" + ), + ) + print("result from info_key_fn", result) + assert result["key"] == generated_key + print("\n info for key=", result["info"]) + assert result["info"]["metadata"] == { + "team": "litellm-team3", + "guardrails": ["aporia-pre-call"], + "tags": None, + } + + # Update model tpm_limit and rpm_limit + request = UpdateKeyRequest( + key=generated_key, + guardrails=["aporia-pre-call", "aporia-post-call"], + ) + _request = Request(scope={"type": "http"}) + _request._url = URL(url="/update/key") + + await update_key_fn(data=request, request=_request) + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, token="sk-1234" + ), + ) + print("result from info_key_fn", result) + assert result["key"] == generated_key + print("\n info for key=", result["info"]) + assert result["info"]["metadata"] == { + "team": "litellm-team3", + "guardrails": ["aporia-pre-call", "aporia-post-call"], + "tags": None, + } + + +@pytest.mark.asyncio() +@pytest.mark.flaky(retries=6, delay=1) +async def test_team_access_groups(prisma_client): + """ + Test team based model access groups + + - Test calling a model in the access group -> pass + - Test calling a model not in the access group -> fail + """ + litellm.set_verbose = True + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + await litellm.proxy.proxy_server.prisma_client.connect() + # create router with access groups + litellm_router = litellm.Router( + model_list=[ + { + "model_name": "gemini-pro-vision", + "litellm_params": { + "model": "vertex_ai/gemini-1.0-pro-vision-001", + }, + "model_info": {"access_groups": ["beta-models"]}, + }, + { + "model_name": "gpt-4o", + "litellm_params": { + "model": "gpt-4o", + }, + "model_info": {"access_groups": ["beta-models"]}, + }, + ] + ) + setattr(litellm.proxy.proxy_server, "llm_router", litellm_router) + + # Create team with models=["beta-models"] + team_request = NewTeamRequest( + team_alias="testing-team", + models=["beta-models"], + ) + + new_team_response = await new_team( + data=team_request, + user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN), + http_request=Request(scope={"type": "http"}), + ) + print("new_team_response", new_team_response) + created_team_id = new_team_response["team_id"] + + # create key with team_id=created_team_id + request = GenerateKeyRequest( + team_id=created_team_id, + ) + + key = await generate_key_fn( + data=request, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + api_key="sk-1234", + user_id="1234", + ), + ) + print(key) + + generated_key = key.key + bearer_token = "Bearer " + generated_key + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + for model in ["gpt-4o", "gemini-pro-vision"]: + # Expect these to pass + async def return_body(): + return_string = f'{{"model": "{model}"}}' + # return string as bytes + return return_string.encode() + + request.body = return_body + + # use generated key to auth in + print( + "Bearer token being sent to user_api_key_auth() - {}".format(bearer_token) + ) + result = await user_api_key_auth(request=request, api_key=bearer_token) + + for model in ["gpt-4", "gpt-4o-mini", "gemini-experimental"]: + # Expect these to fail + async def return_body_2(): + return_string = f'{{"model": "{model}"}}' + # return string as bytes + return return_string.encode() + + request.body = return_body_2 + + # use generated key to auth in + print( + "Bearer token being sent to user_api_key_auth() - {}".format(bearer_token) + ) + try: + result = await user_api_key_auth(request=request, api_key=bearer_token) + pytest.fail(f"This should have failed!. IT's an invalid model") + except Exception as e: + print("got exception", e) + assert ( + "not allowed to call model" in e.message + and "Allowed team models" in e.message + ) + + +@pytest.mark.asyncio() +async def test_team_tags(prisma_client): + """ + - Test setting tags on a team + - Assert this is returned when calling /team/info + - Team/update with tags should update the tags + - Assert new tags are returned when calling /team/info + """ + litellm.set_verbose = True + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + await litellm.proxy.proxy_server.prisma_client.connect() + + _new_team = NewTeamRequest( + team_alias="test-teamA", + tags=["teamA"], + ) + + new_team_response = await new_team( + data=_new_team, + user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN), + http_request=Request(scope={"type": "http"}), + ) + + print("new_team_response", new_team_response) + + # call /team/info + team_info_response = await team_info( + team_id=new_team_response["team_id"], + user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN), + http_request=Request(scope={"type": "http"}), + ) + print("team_info_response", team_info_response) + + assert team_info_response["team_info"].metadata["tags"] == ["teamA"] + + # team update with tags + team_update_response = await update_team( + data=UpdateTeamRequest( + team_id=new_team_response["team_id"], + tags=["teamA", "teamB"], + ), + user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN), + http_request=Request(scope={"type": "http"}), + ) + + print("team_update_response", team_update_response) + + # call /team/info again + team_info_response = await team_info( + team_id=new_team_response["team_id"], + user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN), + http_request=Request(scope={"type": "http"}), + ) + + print("team_info_response", team_info_response) + assert team_info_response["team_info"].metadata["tags"] == ["teamA", "teamB"] + + +@pytest.mark.asyncio +async def test_admin_only_routes(prisma_client): + """ + Tests if setting admin_only_routes works + + only an admin should be able to access admin only routes + """ + litellm.set_verbose = True + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + await litellm.proxy.proxy_server.prisma_client.connect() + general_settings = { + "allowed_routes": ["/embeddings", "/key/generate"], + "admin_only_routes": ["/key/generate"], + } + from litellm.proxy import proxy_server + + initial_general_settings = getattr(proxy_server, "general_settings") + + setattr(proxy_server, "general_settings", general_settings) + + admin_user = await new_user( + data=NewUserRequest( + user_name="admin", + user_role=LitellmUserRoles.PROXY_ADMIN, + ), + user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN), + ) + + non_admin_user = await new_user( + data=NewUserRequest( + user_name="non-admin", + user_role=LitellmUserRoles.INTERNAL_USER, + ), + user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN), + ) + + admin_user_key = admin_user.key + non_admin_user_key = non_admin_user.key + + assert admin_user_key is not None + assert non_admin_user_key is not None + + # assert non-admin can not access admin routes + request = Request(scope={"type": "http"}) + request._url = URL(url="/key/generate") + await user_api_key_auth( + request=request, + api_key="Bearer " + admin_user_key, + ) + + # this should pass + + try: + await user_api_key_auth( + request=request, + api_key="Bearer " + non_admin_user_key, + ) + pytest.fail("Expected this call to fail. User is over limit.") + except Exception as e: + print("error str=", str(e.message)) + error_str = str(e.message) + assert "Route" in error_str and "admin only route" in error_str + pass + + setattr(proxy_server, "general_settings", initial_general_settings) + + +@pytest.mark.asyncio +async def test_list_keys(prisma_client): + """ + Test the list_keys function: + - Test basic key + - Test pagination + - Test filtering by user_id, and key_alias + """ + from fastapi import Query + + from litellm.proxy.proxy_server import hash_token + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + await litellm.proxy.proxy_server.prisma_client.connect() + + # Test basic listing + request = Request(scope={"type": "http", "query_string": b""}) + response = await list_keys( + request, + UserAPIKeyAuth(), + page=1, + size=10, + ) + print("response=", response) + assert "keys" in response + assert len(response["keys"]) > 0 + assert "total_count" in response + assert "current_page" in response + assert "total_pages" in response + + # Test pagination + response = await list_keys(request, UserAPIKeyAuth(), page=1, size=2) + print("pagination response=", response) + assert len(response["keys"]) == 2 + assert response["current_page"] == 1 + + # Test filtering by user_id + + unique_id = str(uuid.uuid4()) + team_id = f"key-list-team-{unique_id}" + key_alias = f"key-list-alias-{unique_id}" + user_id = f"key-list-user-{unique_id}" + response = await new_user( + data=NewUserRequest( + user_id=f"key-list-user-{unique_id}", + user_role=LitellmUserRoles.INTERNAL_USER, + key_alias=f"key-list-alias-{unique_id}", + ), + user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN), + ) + + _key = hash_token(response.key) + + await asyncio.sleep(2) + + # Test filtering by user_id + response = await list_keys( + request, UserAPIKeyAuth(), user_id=user_id, page=1, size=10 + ) + print("filtered user_id response=", response) + assert len(response["keys"]) == 1 + assert _key in response["keys"] + + # Test filtering by key_alias + response = await list_keys( + request, UserAPIKeyAuth(), key_alias=key_alias, page=1, size=10 + ) + assert len(response["keys"]) == 1 + assert _key in response["keys"] + + +@pytest.mark.asyncio +async def test_key_list_unsupported_params(prisma_client): + """ + Test the list_keys function: + - Test unsupported params + """ + + from litellm.proxy.proxy_server import hash_token + + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + await litellm.proxy.proxy_server.prisma_client.connect() + + request = Request(scope={"type": "http", "query_string": b"alias=foo"}) + + try: + await list_keys(request, UserAPIKeyAuth(), page=1, size=10) + pytest.fail("Expected this call to fail") + except Exception as e: + print("error str=", str(e.message)) + error_str = str(e.message) + assert "Unsupported parameter" in error_str + pass + + +@pytest.mark.asyncio +async def test_auth_vertex_ai_route(prisma_client): + """ + If user is premium user and vertex-ai route is used. Assert Virtual Key checks are run + """ + litellm.set_verbose = True + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "premium_user", True) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + await litellm.proxy.proxy_server.prisma_client.connect() + + route = "/vertex-ai/publishers/google/models/gemini-1.5-flash-001:generateContent" + request = Request(scope={"type": "http"}) + request._url = URL(url=route) + request._headers = {"Authorization": "Bearer sk-12345"} + try: + await user_api_key_auth(request=request, api_key="Bearer " + "sk-12345") + pytest.fail("Expected this call to fail. User is over limit.") + except Exception as e: + print(vars(e)) + print("error str=", str(e.message)) + error_str = str(e.message) + assert e.code == "401" + assert "Invalid proxy server token passed" in error_str + + pass + + +@pytest.mark.asyncio +async def test_service_accounts(prisma_client): + """ + Do not delete + this is the Admin UI flow + """ + # Make a call to a key with model = `all-proxy-models` this is an Alias from LiteLLM Admin UI + setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + setattr( + litellm.proxy.proxy_server, + "general_settings", + {"service_account_settings": {"enforced_params": ["user"]}}, + ) + + await litellm.proxy.proxy_server.prisma_client.connect() + + request = GenerateKeyRequest( + metadata={"service_account_id": f"prod-service-{uuid.uuid4()}"}, + ) + response = await generate_key_fn( + data=request, + ) + + print("key generated=", response) + generated_key = response.key + bearer_token = "Bearer " + generated_key + # make a bad /chat/completions call expect it to fail + + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + async def return_body(): + return b'{"model": "gemini-pro-vision"}' + + request.body = return_body + + # use generated key to auth in + print("Bearer token being sent to user_api_key_auth() - {}".format(bearer_token)) + try: + result = await user_api_key_auth(request=request, api_key=bearer_token) + pytest.fail("Expected this call to fail. Bad request using service account") + except Exception as e: + print("error str=", str(e.message)) + assert "This is a required param for service account" in str(e.message) + + # make a good /chat/completions call it should pass + async def good_return_body(): + return b'{"model": "gemini-pro-vision", "user": "foo"}' + + request.body = good_return_body + + result = await user_api_key_auth(request=request, api_key=bearer_token) + print("response from user_api_key_auth", result) + + setattr(litellm.proxy.proxy_server, "general_settings", {}) + + +@pytest.mark.asyncio +async def test_user_api_key_auth_db_unavailable(): + """ + Test that user_api_key_auth handles DB connection failures appropriately when: + 1. DB connection fails during token validation + 2. allow_requests_on_db_unavailable=True + """ + litellm.set_verbose = True + + # Mock dependencies + class MockPrismaClient: + async def get_data(self, *args, **kwargs): + print("MockPrismaClient.get_data() called") + raise httpx.ConnectError("Failed to connect to DB") + + async def connect(self): + print("MockPrismaClient.connect() called") + pass + + class MockDualCache: + async def async_get_cache(self, *args, **kwargs): + return None + + async def async_set_cache(self, *args, **kwargs): + pass + + async def set_cache(self, *args, **kwargs): + pass + + # Set up test environment + setattr(litellm.proxy.proxy_server, "prisma_client", MockPrismaClient()) + setattr(litellm.proxy.proxy_server, "user_api_key_cache", MockDualCache()) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + setattr( + litellm.proxy.proxy_server, + "general_settings", + {"allow_requests_on_db_unavailable": True}, + ) + + # Create test request + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + # Run test with a sample API key + result = await user_api_key_auth( + request=request, + api_key="Bearer sk-123456789", + ) + + # Verify results + assert isinstance(result, UserAPIKeyAuth) + assert result.key_name == "failed-to-connect-to-db" + assert result.user_id == litellm.proxy.proxy_server.litellm_proxy_admin_name + + +@pytest.mark.asyncio +async def test_user_api_key_auth_db_unavailable_not_allowed(): + """ + Test that user_api_key_auth raises an exception when: + This is default behavior + + 1. DB connection fails during token validation + 2. allow_requests_on_db_unavailable=False (default behavior) + """ + + # Mock dependencies + class MockPrismaClient: + async def get_data(self, *args, **kwargs): + print("MockPrismaClient.get_data() called") + raise httpx.ConnectError("Failed to connect to DB") + + async def connect(self): + print("MockPrismaClient.connect() called") + pass + + class MockDualCache: + async def async_get_cache(self, *args, **kwargs): + return None + + async def async_set_cache(self, *args, **kwargs): + pass + + async def set_cache(self, *args, **kwargs): + pass + + # Set up test environment + setattr(litellm.proxy.proxy_server, "prisma_client", MockPrismaClient()) + setattr(litellm.proxy.proxy_server, "user_api_key_cache", MockDualCache()) + setattr(litellm.proxy.proxy_server, "general_settings", {}) + setattr(litellm.proxy.proxy_server, "master_key", "sk-1234") + + # Create test request + request = Request(scope={"type": "http"}) + request._url = URL(url="/chat/completions") + + # Run test with a sample API key + with pytest.raises(litellm.proxy._types.ProxyException): + await user_api_key_auth( + request=request, + api_key="Bearer sk-123456789", + ) diff --git a/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py b/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py index 9a4ec8467..001cc0640 100644 --- a/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py +++ b/tests/proxy_admin_ui_tests/test_route_check_unit_tests.py @@ -147,23 +147,6 @@ def test_key_info_route_allowed(route_checks): ) -def test_key_info_route_forbidden(route_checks): - """ - Internal User is not allowed to access /key/info route for a key they're not using in Authenticated API Key - """ - with pytest.raises(HTTPException) as exc_info: - route_checks.non_proxy_admin_allowed_routes_check( - user_obj=None, - _user_role=LitellmUserRoles.INTERNAL_USER.value, - route="/key/info", - request=MockRequest(query_params={"key": "wrong_key"}), - valid_token=UserAPIKeyAuth(api_key="test_key"), - api_key="test_key", - request_data={}, - ) - assert exc_info.value.status_code == 403 - - def test_user_info_route_allowed(route_checks): """ Internal User is allowed to access /user/info route for their own user_id diff --git a/tests/proxy_unit_tests/test_key_generate_prisma.py b/tests/proxy_unit_tests/test_key_generate_prisma.py index 66b9c7b8f..78b558cd2 100644 --- a/tests/proxy_unit_tests/test_key_generate_prisma.py +++ b/tests/proxy_unit_tests/test_key_generate_prisma.py @@ -456,7 +456,10 @@ async def test_call_with_valid_model_using_all_models(prisma_client): print("result from user auth with new key", result) # call /key/info for key - models == "all-proxy-models" - key_info = await info_key_fn(key=generated_key) + key_info = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN), + ) print("key_info", key_info) models = key_info["info"]["models"] assert models == ["all-team-models"] @@ -1179,7 +1182,12 @@ def test_generate_and_call_key_info(prisma_client): generated_key = key.key # use generated key to auth in - result = await info_key_fn(key=generated_key) + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + ), + ) print("result from info_key_fn", result) assert result["key"] == generated_key print("\n info for key=", result["info"]) @@ -1271,7 +1279,12 @@ def test_generate_and_update_key(prisma_client): generated_key = key.key # use generated key to auth in - result = await info_key_fn(key=generated_key) + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + ), + ) print("result from info_key_fn", result) assert result["key"] == generated_key print("\n info for key=", result["info"]) @@ -1303,7 +1316,12 @@ def test_generate_and_update_key(prisma_client): print("response2=", response2) # get info on key after update - result = await info_key_fn(key=generated_key) + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth( + user_role=LitellmUserRoles.PROXY_ADMIN, + ), + ) print("result from info_key_fn", result) assert result["key"] == generated_key print("\n info for key=", result["info"]) @@ -1989,7 +2007,10 @@ async def test_key_name_null(prisma_client): key = await generate_key_fn(request) print("generated key=", key) generated_key = key.key - result = await info_key_fn(key=generated_key) + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN), + ) print("result from info_key_fn", result) assert result["info"]["key_name"] is None except Exception as e: @@ -2014,7 +2035,10 @@ async def test_key_name_set(prisma_client): request = GenerateKeyRequest() key = await generate_key_fn(request) generated_key = key.key - result = await info_key_fn(key=generated_key) + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN), + ) print("result from info_key_fn", result) assert isinstance(result["info"]["key_name"], str) except Exception as e: @@ -2038,7 +2062,10 @@ async def test_default_key_params(prisma_client): request = GenerateKeyRequest() key = await generate_key_fn(request) generated_key = key.key - result = await info_key_fn(key=generated_key) + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN), + ) print("result from info_key_fn", result) assert result["info"]["max_budget"] == 0.000122 except Exception as e: @@ -2804,7 +2831,10 @@ async def test_generate_key_with_model_tpm_limit(prisma_client): generated_key = key.key # use generated key to auth in - result = await info_key_fn(key=generated_key) + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN), + ) print("result from info_key_fn", result) assert result["key"] == generated_key print("\n info for key=", result["info"]) @@ -2825,7 +2855,10 @@ async def test_generate_key_with_model_tpm_limit(prisma_client): _request._url = URL(url="/update/key") await update_key_fn(data=request, request=_request) - result = await info_key_fn(key=generated_key) + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN), + ) print("result from info_key_fn", result) assert result["key"] == generated_key print("\n info for key=", result["info"]) @@ -2863,7 +2896,10 @@ async def test_generate_key_with_guardrails(prisma_client): generated_key = key.key # use generated key to auth in - result = await info_key_fn(key=generated_key) + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN), + ) print("result from info_key_fn", result) assert result["key"] == generated_key print("\n info for key=", result["info"]) @@ -2882,7 +2918,10 @@ async def test_generate_key_with_guardrails(prisma_client): _request._url = URL(url="/update/key") await update_key_fn(data=request, request=_request) - result = await info_key_fn(key=generated_key) + result = await info_key_fn( + key=generated_key, + user_api_key_dict=UserAPIKeyAuth(user_role=LitellmUserRoles.PROXY_ADMIN), + ) print("result from info_key_fn", result) assert result["key"] == generated_key print("\n info for key=", result["info"]) diff --git a/tests/test_keys.py b/tests/test_keys.py index 554a084c9..437afc336 100644 --- a/tests/test_keys.py +++ b/tests/test_keys.py @@ -412,7 +412,7 @@ async def test_key_info(): Get key info - as admin -> 200 - as key itself -> 200 - - as random key -> 403 + - as non existent key -> 404 """ async with aiohttp.ClientSession() as session: key_gen = await generate_key(session=session, i=0) @@ -425,10 +425,9 @@ async def test_key_info(): # as key itself, use the auth param, and no query key needed await get_key_info(session=session, call_key=key) # as random key # - key_gen = await generate_key(session=session, i=0) - random_key = key_gen["key"] - status = await get_key_info(session=session, get_key=key, call_key=random_key) - assert status == 403 + random_key = f"sk-{uuid.uuid4()}" + status = await get_key_info(session=session, get_key=random_key, call_key=key) + assert status == 404 @pytest.mark.asyncio From 33ceb7ca1ffab1401decca773253b58d5fc461a2 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Mon, 11 Nov 2024 21:01:05 -0800 Subject: [PATCH 64/67] =?UTF-8?q?bump:=20version=201.52.4=20=E2=86=92=201.?= =?UTF-8?q?52.5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 099f33bd8..5a5363a1d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.52.4" +version = "1.52.5" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT" @@ -91,7 +91,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.52.4" +version = "1.52.5" version_files = [ "pyproject.toml:^version" ] From d39fd608011478dcbf98e7c954944a8b8fbcc899 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Tue, 12 Nov 2024 07:12:51 -0800 Subject: [PATCH 65/67] add defaults used for GCS logging --- docs/my-website/docs/proxy/configs.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/my-website/docs/proxy/configs.md b/docs/my-website/docs/proxy/configs.md index b4d70a4e7..c6b9f2d45 100644 --- a/docs/my-website/docs/proxy/configs.md +++ b/docs/my-website/docs/proxy/configs.md @@ -934,8 +934,8 @@ router_settings: | EMAIL_SUPPORT_CONTACT | Support contact email address | GCS_BUCKET_NAME | Name of the Google Cloud Storage bucket | GCS_PATH_SERVICE_ACCOUNT | Path to the Google Cloud service account JSON file -| GCS_FLUSH_INTERVAL | Flush interval for GCS logging (in seconds). Specify how often you want a log to be sent to GCS. -| GCS_BATCH_SIZE | Batch size for GCS logging. Specify after how many logs you want to flush to GCS. If `BATCH_SIZE` is set to 10, logs are flushed every 10 logs. +| GCS_FLUSH_INTERVAL | Flush interval for GCS logging (in seconds). Specify how often you want a log to be sent to GCS. **Default is 20 seconds** +| GCS_BATCH_SIZE | Batch size for GCS logging. Specify after how many logs you want to flush to GCS. If `BATCH_SIZE` is set to 10, logs are flushed every 10 logs. **Default is 2048** | GENERIC_AUTHORIZATION_ENDPOINT | Authorization endpoint for generic OAuth providers | GENERIC_CLIENT_ID | Client ID for generic OAuth providers | GENERIC_CLIENT_SECRET | Client secret for generic OAuth providers From 9160d80fa54f964eeaf0abe40475666f3f88fb3a Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Tue, 12 Nov 2024 22:50:51 +0530 Subject: [PATCH 66/67] LiteLLM Minor Fixes & Improvements (11/12/2024) (#6705) * fix(caching): convert arg to equivalent kwargs in llm caching handler prevent unexpected errors * fix(caching_handler.py): don't pass args to caching * fix(caching): remove all *args from caching.py * fix(caching): consistent function signatures + abc method * test(caching_unit_tests.py): add unit tests for llm caching ensures coverage for common caching scenarios across different implementations * refactor(litellm_logging.py): move to using cache key from hidden params instead of regenerating one * fix(router.py): drop redis password requirement * fix(proxy_server.py): fix faulty slack alerting check * fix(langfuse.py): avoid copying functions/thread lock objects in metadata fixes metadata copy error when parent otel span in metadata * test: update test --- litellm/caching/base_cache.py | 7 +- litellm/caching/caching.py | 75 +++--- litellm/caching/caching_handler.py | 74 ++++-- litellm/caching/disk_cache.py | 7 +- litellm/caching/dual_cache.py | 3 +- litellm/caching/qdrant_semantic_cache.py | 7 + litellm/caching/redis_cache.py | 2 +- litellm/caching/redis_semantic_cache.py | 7 + litellm/caching/s3_cache.py | 7 + .../SlackAlerting/slack_alerting.py | 2 +- litellm/integrations/langfuse/langfuse.py | 65 +++-- litellm/litellm_core_utils/litellm_logging.py | 7 +- litellm/proxy/_new_secret_config.yaml | 70 +++++- litellm/proxy/proxy_server.py | 4 +- litellm/proxy/utils.py | 2 +- litellm/router.py | 6 +- litellm/utils.py | 3 +- tests/local_testing/cache_unit_tests.py | 223 ++++++++++++++++++ tests/local_testing/test_alerting.py | 2 +- tests/local_testing/test_caching.py | 75 ------ .../test_disk_cache_unit_tests.py | 11 + tests/local_testing/test_dual_cache.py | 2 +- .../test_langfuse_unit_tests.py | 68 ++++-- 23 files changed, 525 insertions(+), 204 deletions(-) create mode 100644 tests/local_testing/cache_unit_tests.py create mode 100644 tests/local_testing/test_disk_cache_unit_tests.py diff --git a/litellm/caching/base_cache.py b/litellm/caching/base_cache.py index a50e09bf9..7109951d1 100644 --- a/litellm/caching/base_cache.py +++ b/litellm/caching/base_cache.py @@ -8,6 +8,7 @@ Has 4 methods: - async_get_cache """ +from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Any, Optional if TYPE_CHECKING: @@ -18,7 +19,7 @@ else: Span = Any -class BaseCache: +class BaseCache(ABC): def __init__(self, default_ttl: int = 60): self.default_ttl = default_ttl @@ -37,6 +38,10 @@ class BaseCache: async def async_set_cache(self, key, value, **kwargs): raise NotImplementedError + @abstractmethod + async def async_set_cache_pipeline(self, cache_list, **kwargs): + pass + def get_cache(self, key, **kwargs): raise NotImplementedError diff --git a/litellm/caching/caching.py b/litellm/caching/caching.py index 5fd972a76..17c09b997 100644 --- a/litellm/caching/caching.py +++ b/litellm/caching/caching.py @@ -233,19 +233,18 @@ class Cache: if self.namespace is not None and isinstance(self.cache, RedisCache): self.cache.namespace = self.namespace - def get_cache_key(self, *args, **kwargs) -> str: + def get_cache_key(self, **kwargs) -> str: """ Get the cache key for the given arguments. Args: - *args: args to litellm.completion() or embedding() **kwargs: kwargs to litellm.completion() or embedding() Returns: str: The cache key generated from the arguments, or None if no cache key could be generated. """ cache_key = "" - verbose_logger.debug("\nGetting Cache key. Kwargs: %s", kwargs) + # verbose_logger.debug("\nGetting Cache key. Kwargs: %s", kwargs) preset_cache_key = self._get_preset_cache_key_from_kwargs(**kwargs) if preset_cache_key is not None: @@ -521,7 +520,7 @@ class Cache: return cached_response return cached_result - def get_cache(self, *args, **kwargs): + def get_cache(self, **kwargs): """ Retrieves the cached result for the given arguments. @@ -533,13 +532,13 @@ class Cache: The cached result if it exists, otherwise None. """ try: # never block execution - if self.should_use_cache(*args, **kwargs) is not True: + if self.should_use_cache(**kwargs) is not True: return messages = kwargs.get("messages", []) if "cache_key" in kwargs: cache_key = kwargs["cache_key"] else: - cache_key = self.get_cache_key(*args, **kwargs) + cache_key = self.get_cache_key(**kwargs) if cache_key is not None: cache_control_args = kwargs.get("cache", {}) max_age = cache_control_args.get( @@ -553,29 +552,28 @@ class Cache: print_verbose(f"An exception occurred: {traceback.format_exc()}") return None - async def async_get_cache(self, *args, **kwargs): + async def async_get_cache(self, **kwargs): """ Async get cache implementation. Used for embedding calls in async wrapper """ + try: # never block execution - if self.should_use_cache(*args, **kwargs) is not True: + if self.should_use_cache(**kwargs) is not True: return kwargs.get("messages", []) if "cache_key" in kwargs: cache_key = kwargs["cache_key"] else: - cache_key = self.get_cache_key(*args, **kwargs) + cache_key = self.get_cache_key(**kwargs) if cache_key is not None: cache_control_args = kwargs.get("cache", {}) max_age = cache_control_args.get( "s-max-age", cache_control_args.get("s-maxage", float("inf")) ) - cached_result = await self.cache.async_get_cache( - cache_key, *args, **kwargs - ) + cached_result = await self.cache.async_get_cache(cache_key, **kwargs) return self._get_cache_logic( cached_result=cached_result, max_age=max_age ) @@ -583,7 +581,7 @@ class Cache: print_verbose(f"An exception occurred: {traceback.format_exc()}") return None - def _add_cache_logic(self, result, *args, **kwargs): + def _add_cache_logic(self, result, **kwargs): """ Common implementation across sync + async add_cache functions """ @@ -591,7 +589,7 @@ class Cache: if "cache_key" in kwargs: cache_key = kwargs["cache_key"] else: - cache_key = self.get_cache_key(*args, **kwargs) + cache_key = self.get_cache_key(**kwargs) if cache_key is not None: if isinstance(result, BaseModel): result = result.model_dump_json() @@ -613,7 +611,7 @@ class Cache: except Exception as e: raise e - def add_cache(self, result, *args, **kwargs): + def add_cache(self, result, **kwargs): """ Adds a result to the cache. @@ -625,41 +623,42 @@ class Cache: None """ try: - if self.should_use_cache(*args, **kwargs) is not True: + if self.should_use_cache(**kwargs) is not True: return cache_key, cached_data, kwargs = self._add_cache_logic( - result=result, *args, **kwargs + result=result, **kwargs ) self.cache.set_cache(cache_key, cached_data, **kwargs) except Exception as e: verbose_logger.exception(f"LiteLLM Cache: Excepton add_cache: {str(e)}") - async def async_add_cache(self, result, *args, **kwargs): + async def async_add_cache(self, result, **kwargs): """ Async implementation of add_cache """ try: - if self.should_use_cache(*args, **kwargs) is not True: + if self.should_use_cache(**kwargs) is not True: return if self.type == "redis" and self.redis_flush_size is not None: # high traffic - fill in results in memory and then flush - await self.batch_cache_write(result, *args, **kwargs) + await self.batch_cache_write(result, **kwargs) else: cache_key, cached_data, kwargs = self._add_cache_logic( - result=result, *args, **kwargs + result=result, **kwargs ) + await self.cache.async_set_cache(cache_key, cached_data, **kwargs) except Exception as e: verbose_logger.exception(f"LiteLLM Cache: Excepton add_cache: {str(e)}") - async def async_add_cache_pipeline(self, result, *args, **kwargs): + async def async_add_cache_pipeline(self, result, **kwargs): """ Async implementation of add_cache for Embedding calls Does a bulk write, to prevent using too many clients """ try: - if self.should_use_cache(*args, **kwargs) is not True: + if self.should_use_cache(**kwargs) is not True: return # set default ttl if not set @@ -668,29 +667,27 @@ class Cache: cache_list = [] for idx, i in enumerate(kwargs["input"]): - preset_cache_key = self.get_cache_key(*args, **{**kwargs, "input": i}) + preset_cache_key = self.get_cache_key(**{**kwargs, "input": i}) kwargs["cache_key"] = preset_cache_key embedding_response = result.data[idx] cache_key, cached_data, kwargs = self._add_cache_logic( result=embedding_response, - *args, **kwargs, ) cache_list.append((cache_key, cached_data)) - async_set_cache_pipeline = getattr( - self.cache, "async_set_cache_pipeline", None - ) - if async_set_cache_pipeline: - await async_set_cache_pipeline(cache_list=cache_list, **kwargs) - else: - tasks = [] - for val in cache_list: - tasks.append(self.cache.async_set_cache(val[0], val[1], **kwargs)) - await asyncio.gather(*tasks) + + await self.cache.async_set_cache_pipeline(cache_list=cache_list, **kwargs) + # if async_set_cache_pipeline: + # await async_set_cache_pipeline(cache_list=cache_list, **kwargs) + # else: + # tasks = [] + # for val in cache_list: + # tasks.append(self.cache.async_set_cache(val[0], val[1], **kwargs)) + # await asyncio.gather(*tasks) except Exception as e: verbose_logger.exception(f"LiteLLM Cache: Excepton add_cache: {str(e)}") - def should_use_cache(self, *args, **kwargs): + def should_use_cache(self, **kwargs): """ Returns true if we should use the cache for LLM API calls @@ -708,10 +705,8 @@ class Cache: return True return False - async def batch_cache_write(self, result, *args, **kwargs): - cache_key, cached_data, kwargs = self._add_cache_logic( - result=result, *args, **kwargs - ) + async def batch_cache_write(self, result, **kwargs): + cache_key, cached_data, kwargs = self._add_cache_logic(result=result, **kwargs) await self.cache.batch_cache_write(cache_key, cached_data, **kwargs) async def ping(self): diff --git a/litellm/caching/caching_handler.py b/litellm/caching/caching_handler.py index f4e7d8476..11ae600b7 100644 --- a/litellm/caching/caching_handler.py +++ b/litellm/caching/caching_handler.py @@ -137,7 +137,7 @@ class LLMCachingHandler: if litellm.cache is not None and self._is_call_type_supported_by_cache( original_function=original_function ): - print_verbose("Checking Cache") + verbose_logger.debug("Checking Cache") cached_result = await self._retrieve_from_cache( call_type=call_type, kwargs=kwargs, @@ -145,7 +145,7 @@ class LLMCachingHandler: ) if cached_result is not None and not isinstance(cached_result, list): - print_verbose("Cache Hit!") + verbose_logger.debug("Cache Hit!") cache_hit = True end_time = datetime.datetime.now() model, _, _, _ = litellm.get_llm_provider( @@ -215,6 +215,7 @@ class LLMCachingHandler: final_embedding_cached_response=final_embedding_cached_response, embedding_all_elements_cache_hit=embedding_all_elements_cache_hit, ) + verbose_logger.debug(f"CACHE RESULT: {cached_result}") return CachingHandlerResponse( cached_result=cached_result, final_embedding_cached_response=final_embedding_cached_response, @@ -233,12 +234,19 @@ class LLMCachingHandler: from litellm.utils import CustomStreamWrapper args = args or () + new_kwargs = kwargs.copy() + new_kwargs.update( + convert_args_to_kwargs( + self.original_function, + args, + ) + ) cached_result: Optional[Any] = None if litellm.cache is not None and self._is_call_type_supported_by_cache( original_function=original_function ): print_verbose("Checking Cache") - cached_result = litellm.cache.get_cache(*args, **kwargs) + cached_result = litellm.cache.get_cache(**new_kwargs) if cached_result is not None: if "detail" in cached_result: # implies an error occurred @@ -475,14 +483,21 @@ class LLMCachingHandler: if litellm.cache is None: return None + new_kwargs = kwargs.copy() + new_kwargs.update( + convert_args_to_kwargs( + self.original_function, + args, + ) + ) cached_result: Optional[Any] = None if call_type == CallTypes.aembedding.value and isinstance( - kwargs["input"], list + new_kwargs["input"], list ): tasks = [] - for idx, i in enumerate(kwargs["input"]): + for idx, i in enumerate(new_kwargs["input"]): preset_cache_key = litellm.cache.get_cache_key( - *args, **{**kwargs, "input": i} + **{**new_kwargs, "input": i} ) tasks.append(litellm.cache.async_get_cache(cache_key=preset_cache_key)) cached_result = await asyncio.gather(*tasks) @@ -493,9 +508,9 @@ class LLMCachingHandler: cached_result = None else: if litellm.cache._supports_async() is True: - cached_result = await litellm.cache.async_get_cache(*args, **kwargs) + cached_result = await litellm.cache.async_get_cache(**new_kwargs) else: # for s3 caching. [NOT RECOMMENDED IN PROD - this will slow down responses since boto3 is sync] - cached_result = litellm.cache.get_cache(*args, **kwargs) + cached_result = litellm.cache.get_cache(**new_kwargs) return cached_result def _convert_cached_result_to_model_response( @@ -580,6 +595,7 @@ class LLMCachingHandler: model_response_object=EmbeddingResponse(), response_type="embedding", ) + elif ( call_type == CallTypes.arerank.value or call_type == CallTypes.rerank.value ) and isinstance(cached_result, dict): @@ -603,6 +619,13 @@ class LLMCachingHandler: response_type="audio_transcription", hidden_params=hidden_params, ) + + if ( + hasattr(cached_result, "_hidden_params") + and cached_result._hidden_params is not None + and isinstance(cached_result._hidden_params, dict) + ): + cached_result._hidden_params["cache_hit"] = True return cached_result def _convert_cached_stream_response( @@ -658,12 +681,19 @@ class LLMCachingHandler: Raises: None """ - kwargs.update(convert_args_to_kwargs(result, original_function, kwargs, args)) + + new_kwargs = kwargs.copy() + new_kwargs.update( + convert_args_to_kwargs( + original_function, + args, + ) + ) if litellm.cache is None: return # [OPTIONAL] ADD TO CACHE if self._should_store_result_in_cache( - original_function=original_function, kwargs=kwargs + original_function=original_function, kwargs=new_kwargs ): if ( isinstance(result, litellm.ModelResponse) @@ -673,29 +703,29 @@ class LLMCachingHandler: ): if ( isinstance(result, EmbeddingResponse) - and isinstance(kwargs["input"], list) + and isinstance(new_kwargs["input"], list) and litellm.cache is not None and not isinstance( litellm.cache.cache, S3Cache ) # s3 doesn't support bulk writing. Exclude. ): asyncio.create_task( - litellm.cache.async_add_cache_pipeline(result, **kwargs) + litellm.cache.async_add_cache_pipeline(result, **new_kwargs) ) elif isinstance(litellm.cache.cache, S3Cache): threading.Thread( target=litellm.cache.add_cache, args=(result,), - kwargs=kwargs, + kwargs=new_kwargs, ).start() else: asyncio.create_task( litellm.cache.async_add_cache( - result.model_dump_json(), **kwargs + result.model_dump_json(), **new_kwargs ) ) else: - asyncio.create_task(litellm.cache.async_add_cache(result, **kwargs)) + asyncio.create_task(litellm.cache.async_add_cache(result, **new_kwargs)) def sync_set_cache( self, @@ -706,16 +736,20 @@ class LLMCachingHandler: """ Sync internal method to add the result to the cache """ - kwargs.update( - convert_args_to_kwargs(result, self.original_function, kwargs, args) + new_kwargs = kwargs.copy() + new_kwargs.update( + convert_args_to_kwargs( + self.original_function, + args, + ) ) if litellm.cache is None: return if self._should_store_result_in_cache( - original_function=self.original_function, kwargs=kwargs + original_function=self.original_function, kwargs=new_kwargs ): - litellm.cache.add_cache(result, **kwargs) + litellm.cache.add_cache(result, **new_kwargs) return @@ -865,9 +899,7 @@ class LLMCachingHandler: def convert_args_to_kwargs( - result: Any, original_function: Callable, - kwargs: Dict[str, Any], args: Optional[Tuple[Any, ...]] = None, ) -> Dict[str, Any]: # Get the signature of the original function diff --git a/litellm/caching/disk_cache.py b/litellm/caching/disk_cache.py index 2c086ed50..94f82926d 100644 --- a/litellm/caching/disk_cache.py +++ b/litellm/caching/disk_cache.py @@ -24,7 +24,6 @@ class DiskCache(BaseCache): self.disk_cache = dc.Cache(disk_cache_dir) def set_cache(self, key, value, **kwargs): - print_verbose("DiskCache: set_cache") if "ttl" in kwargs: self.disk_cache.set(key, value, expire=kwargs["ttl"]) else: @@ -33,10 +32,10 @@ class DiskCache(BaseCache): async def async_set_cache(self, key, value, **kwargs): self.set_cache(key=key, value=value, **kwargs) - async def async_set_cache_pipeline(self, cache_list, ttl=None): + async def async_set_cache_pipeline(self, cache_list, **kwargs): for cache_key, cache_value in cache_list: - if ttl is not None: - self.set_cache(key=cache_key, value=cache_value, ttl=ttl) + if "ttl" in kwargs: + self.set_cache(key=cache_key, value=cache_value, ttl=kwargs["ttl"]) else: self.set_cache(key=cache_key, value=cache_value) diff --git a/litellm/caching/dual_cache.py b/litellm/caching/dual_cache.py index ddcd02abe..a6c218c01 100644 --- a/litellm/caching/dual_cache.py +++ b/litellm/caching/dual_cache.py @@ -314,7 +314,8 @@ class DualCache(BaseCache): f"LiteLLM Cache: Excepton async add_cache: {str(e)}" ) - async def async_batch_set_cache( + # async_batch_set_cache + async def async_set_cache_pipeline( self, cache_list: list, local_only: bool = False, **kwargs ): """ diff --git a/litellm/caching/qdrant_semantic_cache.py b/litellm/caching/qdrant_semantic_cache.py index be67001f6..acaa8e918 100644 --- a/litellm/caching/qdrant_semantic_cache.py +++ b/litellm/caching/qdrant_semantic_cache.py @@ -9,6 +9,7 @@ Has 4 methods: """ import ast +import asyncio import json from typing import Any @@ -422,3 +423,9 @@ class QdrantSemanticCache(BaseCache): async def _collection_info(self): return self.collection_info + + async def async_set_cache_pipeline(self, cache_list, **kwargs): + tasks = [] + for val in cache_list: + tasks.append(self.async_set_cache(val[0], val[1], **kwargs)) + await asyncio.gather(*tasks) diff --git a/litellm/caching/redis_cache.py b/litellm/caching/redis_cache.py index 40bb49f44..e15a3f83d 100644 --- a/litellm/caching/redis_cache.py +++ b/litellm/caching/redis_cache.py @@ -404,7 +404,7 @@ class RedisCache(BaseCache): parent_otel_span=_get_parent_otel_span_from_kwargs(kwargs), ) ) - return results + return None except Exception as e: ## LOGGING ## end_time = time.time() diff --git a/litellm/caching/redis_semantic_cache.py b/litellm/caching/redis_semantic_cache.py index 444a3259f..e3098f085 100644 --- a/litellm/caching/redis_semantic_cache.py +++ b/litellm/caching/redis_semantic_cache.py @@ -9,6 +9,7 @@ Has 4 methods: """ import ast +import asyncio import json from typing import Any @@ -331,3 +332,9 @@ class RedisSemanticCache(BaseCache): async def _index_info(self): return await self.index.ainfo() + + async def async_set_cache_pipeline(self, cache_list, **kwargs): + tasks = [] + for val in cache_list: + tasks.append(self.async_set_cache(val[0], val[1], **kwargs)) + await asyncio.gather(*tasks) diff --git a/litellm/caching/s3_cache.py b/litellm/caching/s3_cache.py index c22347a7f..6be16e289 100644 --- a/litellm/caching/s3_cache.py +++ b/litellm/caching/s3_cache.py @@ -10,6 +10,7 @@ Has 4 methods: """ import ast +import asyncio import json from typing import Any, Optional @@ -153,3 +154,9 @@ class S3Cache(BaseCache): async def disconnect(self): pass + + async def async_set_cache_pipeline(self, cache_list, **kwargs): + tasks = [] + for val in cache_list: + tasks.append(self.async_set_cache(val[0], val[1], **kwargs)) + await asyncio.gather(*tasks) diff --git a/litellm/integrations/SlackAlerting/slack_alerting.py b/litellm/integrations/SlackAlerting/slack_alerting.py index 85d54a337..d585e235b 100644 --- a/litellm/integrations/SlackAlerting/slack_alerting.py +++ b/litellm/integrations/SlackAlerting/slack_alerting.py @@ -423,7 +423,7 @@ class SlackAlerting(CustomBatchLogger): latency_cache_keys = [(key, 0) for key in latency_keys] failed_request_cache_keys = [(key, 0) for key in failed_request_keys] combined_metrics_cache_keys = latency_cache_keys + failed_request_cache_keys - await self.internal_usage_cache.async_batch_set_cache( + await self.internal_usage_cache.async_set_cache_pipeline( cache_list=combined_metrics_cache_keys ) diff --git a/litellm/integrations/langfuse/langfuse.py b/litellm/integrations/langfuse/langfuse.py index 18892871e..73485a0bd 100644 --- a/litellm/integrations/langfuse/langfuse.py +++ b/litellm/integrations/langfuse/langfuse.py @@ -3,8 +3,9 @@ import copy import os import traceback +import types from collections.abc import MutableMapping, MutableSequence, MutableSet -from typing import TYPE_CHECKING, Any, Dict, Optional +from typing import TYPE_CHECKING, Any, Dict, Optional, cast from packaging.version import Version from pydantic import BaseModel @@ -355,17 +356,28 @@ class LangFuseLogger: ) ) - def _prepare_metadata(self, metadata) -> Any: + def is_base_type(self, value: Any) -> bool: + # Check if the value is of a base type + base_types = (int, float, str, bool, list, dict, tuple) + return isinstance(value, base_types) + + def _prepare_metadata(self, metadata: Optional[dict]) -> Any: try: - return copy.deepcopy(metadata) # Avoid modifying the original metadata - except (TypeError, copy.Error) as e: - verbose_logger.warning(f"Langfuse Layer Error - {e}") + if metadata is None: + return None + + # Filter out function types from the metadata + sanitized_metadata = {k: v for k, v in metadata.items() if not callable(v)} + + return copy.deepcopy(sanitized_metadata) + except Exception as e: + verbose_logger.debug(f"Langfuse Layer Error - {e}, metadata: {metadata}") new_metadata: Dict[str, Any] = {} # if metadata is not a MutableMapping, return an empty dict since we can't call items() on it if not isinstance(metadata, MutableMapping): - verbose_logger.warning( + verbose_logger.debug( "Langfuse Layer Logging - metadata is not a MutableMapping, returning empty dict" ) return new_metadata @@ -373,25 +385,40 @@ class LangFuseLogger: for key, value in metadata.items(): try: if isinstance(value, MutableMapping): - new_metadata[key] = self._prepare_metadata(value) - elif isinstance(value, (MutableSequence, MutableSet)): - new_metadata[key] = type(value)( - *( - ( - self._prepare_metadata(v) - if isinstance(v, MutableMapping) - else copy.deepcopy(v) - ) - for v in value + new_metadata[key] = self._prepare_metadata(cast(dict, value)) + elif isinstance(value, MutableSequence): + # For lists or other mutable sequences + new_metadata[key] = list( + ( + self._prepare_metadata(cast(dict, v)) + if isinstance(v, MutableMapping) + else copy.deepcopy(v) ) + for v in value + ) + elif isinstance(value, MutableSet): + # For sets specifically, create a new set by passing an iterable + new_metadata[key] = set( + ( + self._prepare_metadata(cast(dict, v)) + if isinstance(v, MutableMapping) + else copy.deepcopy(v) + ) + for v in value ) elif isinstance(value, BaseModel): new_metadata[key] = value.model_dump() + elif self.is_base_type(value): + new_metadata[key] = value else: - new_metadata[key] = copy.deepcopy(value) + verbose_logger.debug( + f"Langfuse Layer Error - Unsupported metadata type: {type(value)} for key: {key}" + ) + continue + except (TypeError, copy.Error): - verbose_logger.warning( - f"Langfuse Layer Error - Couldn't copy metadata key: {key} - {traceback.format_exc()}" + verbose_logger.debug( + f"Langfuse Layer Error - Couldn't copy metadata key: {key}, type of key: {type(key)}, type of value: {type(value)} - {traceback.format_exc()}" ) return new_metadata diff --git a/litellm/litellm_core_utils/litellm_logging.py b/litellm/litellm_core_utils/litellm_logging.py index 2ab905e85..d2e65742c 100644 --- a/litellm/litellm_core_utils/litellm_logging.py +++ b/litellm/litellm_core_utils/litellm_logging.py @@ -2774,11 +2774,6 @@ def get_standard_logging_object_payload( metadata=metadata ) - if litellm.cache is not None: - cache_key = litellm.cache.get_cache_key(**kwargs) - else: - cache_key = None - saved_cache_cost: float = 0.0 if cache_hit is True: @@ -2820,7 +2815,7 @@ def get_standard_logging_object_payload( completionStartTime=completion_start_time_float, model=kwargs.get("model", "") or "", metadata=clean_metadata, - cache_key=cache_key, + cache_key=clean_hidden_params["cache_key"], response_cost=response_cost, total_tokens=usage.total_tokens, prompt_tokens=usage.prompt_tokens, diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index cd723275b..806b55994 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -1,12 +1,80 @@ model_list: - model_name: "*" litellm_params: - model: "*" + model: claude-3-5-sonnet-20240620 + api_key: os.environ/ANTHROPIC_API_KEY + - model_name: claude-3-5-sonnet-aihubmix + litellm_params: + model: openai/claude-3-5-sonnet-20240620 + input_cost_per_token: 0.000003 # 3$/M + output_cost_per_token: 0.000015 # 15$/M + api_base: "https://exampleopenaiendpoint-production.up.railway.app" + api_key: my-fake-key + - model_name: fake-openai-endpoint-2 + litellm_params: + model: openai/my-fake-model + api_key: my-fake-key + api_base: https://exampleopenaiendpoint-production.up.railway.app/ + stream_timeout: 0.001 + timeout: 1 + rpm: 1 + - model_name: fake-openai-endpoint + litellm_params: + model: openai/my-fake-model + api_key: my-fake-key + api_base: https://exampleopenaiendpoint-production.up.railway.app/ + ## bedrock chat completions + - model_name: "*anthropic.claude*" + litellm_params: + model: bedrock/*anthropic.claude* + aws_access_key_id: os.environ/BEDROCK_AWS_ACCESS_KEY_ID + aws_secret_access_key: os.environ/BEDROCK_AWS_SECRET_ACCESS_KEY + aws_region_name: os.environ/AWS_REGION_NAME + guardrailConfig: + "guardrailIdentifier": "h4dsqwhp6j66" + "guardrailVersion": "2" + "trace": "enabled" + +## bedrock embeddings + - model_name: "*amazon.titan-embed-*" + litellm_params: + model: bedrock/amazon.titan-embed-* + aws_access_key_id: os.environ/BEDROCK_AWS_ACCESS_KEY_ID + aws_secret_access_key: os.environ/BEDROCK_AWS_SECRET_ACCESS_KEY + aws_region_name: os.environ/AWS_REGION_NAME + - model_name: "*cohere.embed-*" + litellm_params: + model: bedrock/cohere.embed-* + aws_access_key_id: os.environ/BEDROCK_AWS_ACCESS_KEY_ID + aws_secret_access_key: os.environ/BEDROCK_AWS_SECRET_ACCESS_KEY + aws_region_name: os.environ/AWS_REGION_NAME + + - model_name: "bedrock/*" + litellm_params: + model: bedrock/* + aws_access_key_id: os.environ/BEDROCK_AWS_ACCESS_KEY_ID + aws_secret_access_key: os.environ/BEDROCK_AWS_SECRET_ACCESS_KEY + aws_region_name: os.environ/AWS_REGION_NAME + + - model_name: gpt-4 + litellm_params: + model: azure/chatgpt-v-2 + api_base: https://openai-gpt-4-test-v-1.openai.azure.com/ + api_version: "2023-05-15" + api_key: os.environ/AZURE_API_KEY # The `os.environ/` prefix tells litellm to read this from the env. See https://docs.litellm.ai/docs/simple_proxy#load-api-keys-from-vault + rpm: 480 + timeout: 300 + stream_timeout: 60 litellm_settings: fallbacks: [{ "claude-3-5-sonnet-20240620": ["claude-3-5-sonnet-aihubmix"] }] callbacks: ["otel", "prometheus"] default_redis_batch_cache_expiry: 10 + # default_team_settings: + # - team_id: "dbe2f686-a686-4896-864a-4c3924458709" + # success_callback: ["langfuse"] + # langfuse_public_key: os.environ/LANGFUSE_PUB_KEY_1 # Project 1 + # langfuse_secret: os.environ/LANGFUSE_PRIVATE_KEY_1 # Project 1 # litellm_settings: # cache: True diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 12e80876c..c9c6af77f 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -1308,7 +1308,7 @@ async def update_cache( # noqa: PLR0915 await _update_team_cache() asyncio.create_task( - user_api_key_cache.async_batch_set_cache( + user_api_key_cache.async_set_cache_pipeline( cache_list=values_to_update_in_cache, ttl=60, litellm_parent_otel_span=parent_otel_span, @@ -2978,7 +2978,7 @@ class ProxyStartupEvent: if ( proxy_logging_obj is not None - and proxy_logging_obj.slack_alerting_instance is not None + and proxy_logging_obj.slack_alerting_instance.alerting is not None and prisma_client is not None ): print("Alerting: Initializing Weekly/Monthly Spend Reports") # noqa diff --git a/litellm/proxy/utils.py b/litellm/proxy/utils.py index 009d65873..c143d30e4 100644 --- a/litellm/proxy/utils.py +++ b/litellm/proxy/utils.py @@ -175,7 +175,7 @@ class InternalUsageCache: local_only: bool = False, **kwargs, ) -> None: - return await self.dual_cache.async_batch_set_cache( + return await self.dual_cache.async_set_cache_pipeline( cache_list=cache_list, local_only=local_only, litellm_parent_otel_span=litellm_parent_otel_span, diff --git a/litellm/router.py b/litellm/router.py index 0bdd1d1e0..4735d422b 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -339,11 +339,7 @@ class Router: cache_config: Dict[str, Any] = {} self.client_ttl = client_ttl - if redis_url is not None or ( - redis_host is not None - and redis_port is not None - and redis_password is not None - ): + if redis_url is not None or (redis_host is not None and redis_port is not None): cache_type = "redis" if redis_url is not None: diff --git a/litellm/utils.py b/litellm/utils.py index 1e8025be4..802bcfc04 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -796,7 +796,7 @@ def client(original_function): # noqa: PLR0915 and kwargs.get("_arealtime", False) is not True ): # allow users to control returning cached responses from the completion function # checking cache - print_verbose("INSIDE CHECKING CACHE") + verbose_logger.debug("INSIDE CHECKING SYNC CACHE") caching_handler_response: CachingHandlerResponse = ( _llm_caching_handler._sync_get_cache( model=model or "", @@ -808,6 +808,7 @@ def client(original_function): # noqa: PLR0915 args=args, ) ) + if caching_handler_response.cached_result is not None: return caching_handler_response.cached_result diff --git a/tests/local_testing/cache_unit_tests.py b/tests/local_testing/cache_unit_tests.py new file mode 100644 index 000000000..da56c773f --- /dev/null +++ b/tests/local_testing/cache_unit_tests.py @@ -0,0 +1,223 @@ +from abc import ABC, abstractmethod +from litellm.caching import LiteLLMCacheType +import os +import sys +import time +import traceback +import uuid + +from dotenv import load_dotenv +from test_rerank import assert_response_shape + +load_dotenv() +import os + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import asyncio +import hashlib +import random + +import pytest + +import litellm +from litellm.caching import Cache +from litellm import completion, embedding + + +class LLMCachingUnitTests(ABC): + + @abstractmethod + def get_cache_type(self) -> LiteLLMCacheType: + pass + + @pytest.mark.parametrize("sync_mode", [True, False]) + @pytest.mark.asyncio + async def test_cache_completion(self, sync_mode): + litellm._turn_on_debug() + + random_number = random.randint( + 1, 100000 + ) # add a random number to ensure it's always adding / reading from cache + messages = [ + { + "role": "user", + "content": f"write a one sentence poem about: {random_number}", + } + ] + + cache_type = self.get_cache_type() + litellm.cache = Cache( + type=cache_type, + ) + + if sync_mode: + response1 = completion( + "gpt-3.5-turbo", + messages=messages, + caching=True, + max_tokens=20, + mock_response="This number is so great!", + ) + else: + response1 = await litellm.acompletion( + "gpt-3.5-turbo", + messages=messages, + caching=True, + max_tokens=20, + mock_response="This number is so great!", + ) + # response2 is mocked to a different response from response1, + # but the completion from the cache should be used instead of the mock + # response since the input is the same as response1 + await asyncio.sleep(0.5) + if sync_mode: + response2 = completion( + "gpt-3.5-turbo", + messages=messages, + caching=True, + max_tokens=20, + mock_response="This number is great!", + ) + else: + response2 = await litellm.acompletion( + "gpt-3.5-turbo", + messages=messages, + caching=True, + max_tokens=20, + mock_response="This number is great!", + ) + if ( + response1["choices"][0]["message"]["content"] + != response2["choices"][0]["message"]["content"] + ): # 1 and 2 should be the same + # 1&2 have the exact same input params. This MUST Be a CACHE HIT + print(f"response1: {response1}") + print(f"response2: {response2}") + pytest.fail( + f"Error occurred: response1 - {response1['choices'][0]['message']['content']} != response2 - {response2['choices'][0]['message']['content']}" + ) + # Since the parameters are not the same as response1, response3 should actually + # be the mock response + if sync_mode: + response3 = completion( + "gpt-3.5-turbo", + messages=messages, + caching=True, + temperature=0.5, + mock_response="This number is awful!", + ) + else: + response3 = await litellm.acompletion( + "gpt-3.5-turbo", + messages=messages, + caching=True, + temperature=0.5, + mock_response="This number is awful!", + ) + + print("\nresponse 1", response1) + print("\nresponse 2", response2) + print("\nresponse 3", response3) + # print("\nresponse 4", response4) + litellm.cache = None + litellm.success_callback = [] + litellm._async_success_callback = [] + + # 1 & 2 should be exactly the same + # 1 & 3 should be different, since input params are diff + + if ( + response1["choices"][0]["message"]["content"] + == response3["choices"][0]["message"]["content"] + ): + # if input params like max_tokens, temperature are diff it should NOT be a cache hit + print(f"response1: {response1}") + print(f"response3: {response3}") + pytest.fail( + f"Response 1 == response 3. Same model, diff params shoudl not cache Error" + f" occurred:" + ) + + assert response1.id == response2.id + assert response1.created == response2.created + assert ( + response1.choices[0].message.content == response2.choices[0].message.content + ) + + @pytest.mark.parametrize("sync_mode", [True, False]) + @pytest.mark.asyncio + async def test_disk_cache_embedding(self, sync_mode): + litellm._turn_on_debug() + + random_number = random.randint( + 1, 100000 + ) # add a random number to ensure it's always adding / reading from cache + input = [f"hello {random_number}"] + litellm.cache = Cache( + type="disk", + ) + + if sync_mode: + response1 = embedding( + "openai/text-embedding-ada-002", + input=input, + caching=True, + ) + else: + response1 = await litellm.aembedding( + "openai/text-embedding-ada-002", + input=input, + caching=True, + ) + # response2 is mocked to a different response from response1, + # but the completion from the cache should be used instead of the mock + # response since the input is the same as response1 + await asyncio.sleep(0.5) + if sync_mode: + response2 = embedding( + "openai/text-embedding-ada-002", + input=input, + caching=True, + ) + else: + response2 = await litellm.aembedding( + "openai/text-embedding-ada-002", + input=input, + caching=True, + ) + + if response2._hidden_params["cache_hit"] is not True: + pytest.fail("Cache hit should be True") + + # Since the parameters are not the same as response1, response3 should actually + # be the mock response + if sync_mode: + response3 = embedding( + "openai/text-embedding-ada-002", + input=input, + user="charlie", + caching=True, + ) + else: + response3 = await litellm.aembedding( + "openai/text-embedding-ada-002", + input=input, + caching=True, + user="charlie", + ) + + print("\nresponse 1", response1) + print("\nresponse 2", response2) + print("\nresponse 3", response3) + # print("\nresponse 4", response4) + litellm.cache = None + litellm.success_callback = [] + litellm._async_success_callback = [] + + # 1 & 2 should be exactly the same + # 1 & 3 should be different, since input params are diff + + if response3._hidden_params.get("cache_hit") is True: + pytest.fail("Cache hit should not be True") diff --git a/tests/local_testing/test_alerting.py b/tests/local_testing/test_alerting.py index b79438ffc..cc668801f 100644 --- a/tests/local_testing/test_alerting.py +++ b/tests/local_testing/test_alerting.py @@ -438,7 +438,7 @@ async def test_send_daily_reports_ignores_zero_values(): slack_alerting.internal_usage_cache.async_batch_get_cache = AsyncMock( return_value=[None, 0, 10, 0, 0, None] ) - slack_alerting.internal_usage_cache.async_batch_set_cache = AsyncMock() + slack_alerting.internal_usage_cache.async_set_cache_pipeline = AsyncMock() router.get_model_info.side_effect = lambda x: {"litellm_params": {"model": x}} diff --git a/tests/local_testing/test_caching.py b/tests/local_testing/test_caching.py index 479c1204e..222013a86 100644 --- a/tests/local_testing/test_caching.py +++ b/tests/local_testing/test_caching.py @@ -1103,81 +1103,6 @@ async def test_redis_cache_acompletion_stream_bedrock(): raise e -def test_disk_cache_completion(): - litellm.set_verbose = False - - random_number = random.randint( - 1, 100000 - ) # add a random number to ensure it's always adding / reading from cache - messages = [ - {"role": "user", "content": f"write a one sentence poem about: {random_number}"} - ] - litellm.cache = Cache( - type="disk", - ) - - response1 = completion( - model="gpt-3.5-turbo", - messages=messages, - caching=True, - max_tokens=20, - mock_response="This number is so great!", - ) - # response2 is mocked to a different response from response1, - # but the completion from the cache should be used instead of the mock - # response since the input is the same as response1 - response2 = completion( - model="gpt-3.5-turbo", - messages=messages, - caching=True, - max_tokens=20, - mock_response="This number is awful!", - ) - # Since the parameters are not the same as response1, response3 should actually - # be the mock response - response3 = completion( - model="gpt-3.5-turbo", - messages=messages, - caching=True, - temperature=0.5, - mock_response="This number is awful!", - ) - - print("\nresponse 1", response1) - print("\nresponse 2", response2) - print("\nresponse 3", response3) - # print("\nresponse 4", response4) - litellm.cache = None - litellm.success_callback = [] - litellm._async_success_callback = [] - - # 1 & 2 should be exactly the same - # 1 & 3 should be different, since input params are diff - if ( - response1["choices"][0]["message"]["content"] - != response2["choices"][0]["message"]["content"] - ): # 1 and 2 should be the same - # 1&2 have the exact same input params. This MUST Be a CACHE HIT - print(f"response1: {response1}") - print(f"response2: {response2}") - pytest.fail(f"Error occurred:") - if ( - response1["choices"][0]["message"]["content"] - == response3["choices"][0]["message"]["content"] - ): - # if input params like max_tokens, temperature are diff it should NOT be a cache hit - print(f"response1: {response1}") - print(f"response3: {response3}") - pytest.fail( - f"Response 1 == response 3. Same model, diff params shoudl not cache Error" - f" occurred:" - ) - - assert response1.id == response2.id - assert response1.created == response2.created - assert response1.choices[0].message.content == response2.choices[0].message.content - - # @pytest.mark.skip(reason="AWS Suspended Account") @pytest.mark.parametrize("sync_mode", [True, False]) @pytest.mark.asyncio diff --git a/tests/local_testing/test_disk_cache_unit_tests.py b/tests/local_testing/test_disk_cache_unit_tests.py new file mode 100644 index 000000000..c777d04ec --- /dev/null +++ b/tests/local_testing/test_disk_cache_unit_tests.py @@ -0,0 +1,11 @@ +from cache_unit_tests import LLMCachingUnitTests +from litellm.caching import LiteLLMCacheType + + +class TestDiskCacheUnitTests(LLMCachingUnitTests): + def get_cache_type(self) -> LiteLLMCacheType: + return LiteLLMCacheType.DISK + + +# if __name__ == "__main__": +# pytest.main([__file__, "-v", "-s"]) diff --git a/tests/local_testing/test_dual_cache.py b/tests/local_testing/test_dual_cache.py index c3f3216d5..e81424a9f 100644 --- a/tests/local_testing/test_dual_cache.py +++ b/tests/local_testing/test_dual_cache.py @@ -146,7 +146,7 @@ async def test_dual_cache_batch_operations(is_async): # Set values if is_async: - await dual_cache.async_batch_set_cache(cache_list) + await dual_cache.async_set_cache_pipeline(cache_list) else: for key, value in cache_list: dual_cache.set_cache(key, value) diff --git a/tests/logging_callback_tests/test_langfuse_unit_tests.py b/tests/logging_callback_tests/test_langfuse_unit_tests.py index 20b33f81b..c10b6110c 100644 --- a/tests/logging_callback_tests/test_langfuse_unit_tests.py +++ b/tests/logging_callback_tests/test_langfuse_unit_tests.py @@ -212,26 +212,48 @@ def test_get_langfuse_logger_for_request_with_cached_logger(): assert result == cached_logger mock_cache.get_cache.assert_called_once() -@pytest.mark.parametrize("metadata", [ - {'a': 1, 'b': 2, 'c': 3}, - {'a': {'nested_a': 1}, 'b': {'nested_b': 2}}, - {'a': [1, 2, 3], 'b': {4, 5, 6}}, - {'a': (1, 2), 'b': frozenset([3, 4]), 'c': {'d': [5, 6]}}, - {'lock': threading.Lock()}, - {'func': lambda x: x + 1}, - { - 'int': 42, - 'str': 'hello', - 'list': [1, 2, 3], - 'set': {4, 5}, - 'dict': {'nested': 'value'}, - 'non_copyable': threading.Lock(), - 'function': print - }, - ['list', 'not', 'a', 'dict'], - {'timestamp': datetime.now()}, - {}, - None, -]) -def test_langfuse_logger_prepare_metadata(metadata): - global_langfuse_logger._prepare_metadata(metadata) + +@pytest.mark.parametrize( + "metadata, expected_metadata", + [ + ({"a": 1, "b": 2, "c": 3}, {"a": 1, "b": 2, "c": 3}), + ( + {"a": {"nested_a": 1}, "b": {"nested_b": 2}}, + {"a": {"nested_a": 1}, "b": {"nested_b": 2}}, + ), + ({"a": [1, 2, 3], "b": {4, 5, 6}}, {"a": [1, 2, 3], "b": {4, 5, 6}}), + ( + {"a": (1, 2), "b": frozenset([3, 4]), "c": {"d": [5, 6]}}, + {"a": (1, 2), "b": frozenset([3, 4]), "c": {"d": [5, 6]}}, + ), + ({"lock": threading.Lock()}, {}), + ({"func": lambda x: x + 1}, {}), + ( + { + "int": 42, + "str": "hello", + "list": [1, 2, 3], + "set": {4, 5}, + "dict": {"nested": "value"}, + "non_copyable": threading.Lock(), + "function": print, + }, + { + "int": 42, + "str": "hello", + "list": [1, 2, 3], + "set": {4, 5}, + "dict": {"nested": "value"}, + }, + ), + ( + {"list": ["list", "not", "a", "dict"]}, + {"list": ["list", "not", "a", "dict"]}, + ), + ({}, {}), + (None, None), + ], +) +def test_langfuse_logger_prepare_metadata(metadata, expected_metadata): + result = global_langfuse_logger._prepare_metadata(metadata) + assert result == expected_metadata From 5081b912eb31533c674f4a021e4c1e8edbfe138c Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 12 Nov 2024 23:53:07 +0530 Subject: [PATCH 67/67] =?UTF-8?q?bump:=20version=201.52.5=20=E2=86=92=201.?= =?UTF-8?q?52.6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5a5363a1d..17d37c0ce 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.52.5" +version = "1.52.6" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT" @@ -91,7 +91,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.52.5" +version = "1.52.6" version_files = [ "pyproject.toml:^version" ]

ATdn&eCTgasHa1yEfYFPS@cn@tzbSDPnnYk6)rgekT{Th1-JRub+J&4($2gpymFas_<)7D~*^t|E`L`6~5Y5LH|GnQw1ifuT|6J^9B_NFfD zAWJJwRr4GleNIgwkuHWT*ux~wVyv%o30|SAJIwuij-ZG1DCl>RaS@^mW)CDS2 zE6#@Pji&yZg+720bh+H7-y=x;$erGZHb7-8@-mrA-K;Ny0Q^EMHr=eTEJ!th9u$B2}o!+J@l-=A1x^j!jhyCE)E8r** z^bP$X!dpq_m5(dXo^#pSj5EF)-RXgInBR#`bD*G?qNjrO*N2^vUq43vd>ib2D{RLm z`Wfa^9yM0c|5#TZA!n2qfhOm)^-;1oH#pxpv!kAv&;7|g(62No%nSEPc(_|!FHlpg z$?U`FPOl@83cv&TF}iERh^7b8H*pw7+923alfbn)>5rIHx(@Zg>8Pik@eWh7QM0qb@Rs(e}A zj832+_k#K4f#vj$7;6FE>k|Dw?@Vwq=0;v-qUsTH<@scf{a_dF<0C()2lb#fx{jQB zEe!F2uwl>R4@7QOH1^~kmN_@NR?T4e?;=7-N8Pm?igIn?{x6Vjqj9$qcDhd+%p(?K zie5%k=0c@=U@(oizaPK=Hd0Ifh)r9?^Q^%G?!eDoCL_;@5?L@^>`j@CoCQYBYtWQl z)bqwuO=@Ojg`@GB+O6-c9Q7+FYW7E%sak|-m}m8MdLQ_bx2ZX)u)t@5W%T0f71YwN z>!*3HEqZUg1I(M!R0Qv$-hT`p)O+x7g^KAYb%?fE4>5-c8R+R9f;ROMF}vij4O0w9 z1ao=at`yfNcMrc=ekIx8*X|td+^)3FXh#{xX-9WwE~f`v#*%NqFBnWe(G5_pbH;YK zgpV~pR&_5-&ih&)aF_yQ;F21z_S4dX_uT}MD1nE1haC;0i{To4-U>vxZ-wRJC=@Lt zK5f~+vJ-{0x0m8qUL<}O50`>ajHdKsv6*Sy_sAH^OINVJ#ptuE z2P3{db|np6MJeq49x|En#0Vxa_g=b&chg_>k-0Q*0i+Bta?hfa7!Ue4M_Mh7l6pu@ zrQc}6HAl_92GQ9OJhPWPW)obxl5iKVlYQ@i1GgH>6l1S(lb?78CQVClpEoEv zrqc&-9s-ym(M3>Daa{xkeL{XC&$OMfA5t3boxfJet0NKJqGV@ZVMIXd;?C*YT(aY!OkwB+;c6#*+5f9L1PsDJT*s~12eiC&)pYmT5BCRsq?@rWXcXK-};;M?EZo%Sb z{-1)X+Hqp1bHq(dDIwN+%SS`#yibeQoK6ho#AkdWlKX^(KTTA7j|kMKUecOODVmt4 zG4}Z!NJm#XRUdGxW#)RSU_qbh8*ng7D>JjPz7t9GAd9$92akp2?~mVnO%8ID*_4sw z*?G)wc%HTFQb$gvPgSBJJbgQu`ay2)O7?LP4BWg}-b38&CE)l&sXex37aRXQ*OQnc z{*KN8$xH#ADL{`%bI_Zebd;3iv=8Df3bl&Ed`%dumX2u4pPMomuQH7;o?hrTw~+m? zi&3`8wq>^0wjuU{%5L!gp^jmW5suD|ct?KcB9q>qIJ?XP@5P5 zmhNN%bpTbS{>%)yt%YETM&OT+p`LmbwptW9+e)eyX^1G_5tU3Jhx$%L6Gk1TAssLi zxS^+s&%9vnnb8idNqx?OFuKtK9uM>8{2XNM2LvFgT5$ryh6T6pf2 zsSYh9`hQOD@*E$1kn{fu9B&-nbU(YjnrP!OyQVN(ISuoof9caYNp zHx;d3VA6G1iFw)??H0Q3C*h=i)^dZxcVhx`X}+T<^SoE-pQz|mV0RmF5?XO;CNXD9 zq$^|`-0o&{O|7P1@+haV8}*%bXaf8|Bi4gTYzummY)l%?=iKW|bXv|-=QU;!Z*)Fk zR`g5f0u(XYI+ns%JwZQH6Lz=1?HEcRYamk#V8y@_|VFCY-n5Vjwrq z6jd}HE)%~GX1_|XUkTJ>P7?D^;C1&TLvDao*vjrt<@`4yUrSH#T?g{7L-YgKi05^z zz!@;CO<+|+h`fBcT2@d89|Qac(dtyrM^kzgD`9cGU?0=Xp0Ji$plp(YN14g=;7sTg zyv3(Z*9S7kyA}5Of7F_@=nAv6vop)#xONsbn-C^y59401V7B=kcGFJ8F_f5M3jI3+ zVTAYKo?pa922nR@&0AG`{}0$X{jnN}rcCx!oA}@oyrmA{Q+e?vHKl*0LUM|H$=1|< zg`LiUqV#qp12d)V&JNt%6vuAv^Dsv&|6X-Wb39eLD{Jhb=uN+{eX}iM()4WkBz(X) zv`KqX8T${E;SM?8DR9!7<|}OX2)yMjo@)qIo5IA*IrO)j*x8)b^>kt!(crBgPheO;=Ti3*9cn`X#D{_e*}g&2b?$$ z$odQH&rZ63Iub#@!k0cn2kASy2)~$!@)lj9m;C#Xk6qEXVWZdTmv}FU=_S5N&5eyn zy5P34Y8U9z%Fh#zr$_oV9#01;bAm>F;QwjKl%|m<%qBPAORV=0TNH!`EeBpx1DvHM z^^&P%4>8<^Sa8&yyz*zD09)}XrKLtp^VrMO;>EJtHpsTxcFK0!_RLnAUX8c*3e-vl zDkV^^IAh<1D*FTKyIt+U_8Ye0wp_%&6NrAZ%E#$RyZ~D?1Rmca*5nsejrvr~u7UD) zrM~c)smf`LZQR*bdP8{GPNrVm$1-ivmTSv+JIG!y$Bv)ZzG+^1E2?0**Ai3yz&e#9 zlbVa3j|3;P0Sb|C&5BgLACY;M;Wi$Dxta-UJPMui^Y~4l)^clUg0!1me8k?}!fqXr zwzAWk*xO-LRk}&7q_V`zzc~*Fu?CB<{N1q0f51G)_ zG64*HiMfLA$NQj4zt}?s{NX02_&8WeC6J~5c&7oN4KY}-heWIU$P|5@wco&9d^t!58k(OZFdG!vRplIG%Ad(d7j0VmRu#`G{H~ zK=sdKo8su3jwWLq09qHzv&V8iCh`6>bZT~kBi+R2`7}JUU<3QnDYFAiQp7j9Kr;Nu zv2xI^zpsuT)XO(8J1nnXy0z`K_*67~&TGJ>Y_P1KfRMZ+Qu_reK92jjfw*rDi1T&gh1Rf=3)30Cn!Eb|gs?k! z?BQIi0#>dYh-n+^n3)yiArfq-1su>%W^*_exA1Lg zdDhi*8C)|rGC_GEr#itb3Hv<_7)}Cc?NTiDPF_JIc*G%|FoIVeB(}jt7(7`T< zS-|R|m|A`g_ZC+9JA9cg;D`0C9aP{C;ssaIk^O=!qL*39I!BFOvIc?|$yA4$ zTKmm<@Q~(OYs{TIeH{C^k{uZc;uQxjSed7+3-0dgjBgAZq$XT)6C`T{=wn)**J39y!PGzz9lkZ{f{N;kWg z`*c)zF4i-@z+bB@c7ayeH5}k(C=^nfZi9=0%tXbInyCd4+@&Dk+PwbN;Y^>T&x{(fNOZ@9ROI*)DLA ztYR^4Mx(#k@JQlDm-JZ>ta?o3Z6w#AhY7tzfv#!p-ysym4qI!@j^YWSI=?f4N^5Cm zKi9${HDfYx4p6hzRw}P9I~?l!+_|x^V{X9)y34(5hwUFF(A&>i<`D{W?hi7nu`;Kq zF*mF(td&aO2$#v`b}+T3rP!PNt&6ygyIKuS^hc24iQMj2+}}VkopD46lbVCntuh`< zm#si!7E$AJc$%xsTtuXk@GnDPML(h@GXORQ>i*(Cpg=9TufK#PW~#6hHf9(&Uv5$0 zR-ZGQ;JN3(SKMIE5sQG6m!cZ7h}-xCe%NwzGS!`MaNclpAy2oLSNNSOb)b}Aj5gxT zNx~f|lle?Pg0-$~Z8rWe(S1KWx~)V{v%v@(dp;K-d2deJ#Q2mbE?A=Qk9tD8=A z!{D_Ehs0m(+hfr7{oul_tSRPdaMVs@<{iam)L-+O>&ap?vjiuggLF!`Vk~F+Y*pdD z(Ne6;bkHW&b8@Yw#(pca_z5pIoU>j)Z0f75@_WNYhh>A+nub1xdEAif#&5BsG>*8n z4=On=`F)R!%0gAChFB4sdyY6G3;S4&c>FY#2E%&8t=M5?6LLtEh4)5jvo)u3HC`hp z6$yL=6Qi+;@bz}l@l==I>rwo!ci4kV?BXl)7jxWqGrK!4aeq4Mj)ORBy9D%4#52M? zQsdFZuF}))NgleidnX_lX(5(T({VIKJx=;t!UTr4F z?uE%)my;DKg|PBFnJ679FvnC%v^p}+tD)4xe1OhCNh=>*&mGiP@>?b4S)k}wO(&Ss zY5ge5C5xp5W)z(g_gg>3gT@^C4X+z_g^Ct>Y&?l>{)auyC|nU28q-igm}@rT9yG(^ z78iF~Hw>5AN@^%JG*@!3J9B$$z!1M;ZWr2!$MF@>)+1>KUCq-u=bt#~qnPkLT3X2D z_;px}=kN(fh@*@))@C78K4?BOo{1r3-B*~`+lK0Y5Ag7j;%4~F&**@7DWVlADliYr zVMz|c9BxA8DlL<-i-C0(f<1E)^`%cjZRwyjgrDG&=FmU9mZ{|`6{p7ByJ+((UDoZz z7G{``SpC58)`RD3h0b7U>xXcL`?gnTWj%$rS{hE)G`xB` zCQ=Wv$oJu&_JS8%P~_GNPtC5r+Om)$_Aq|KN^UMbgx$Ck25uR$?M700VXxlbdMlMP zdl2zu6E_J(jW}V2lvSu~Ocx5l2VQ7wGWSa5zy!8)dsZ;DTL}I1eJD#*rYh5x3d<4V z{;t+(X^#2E@E5v^w}fE5m^GhF%tn-}izw97O+Si>-lxcv8dE>3ZOju3a8eJ$Qz<09 zrcb^+`*s1`VXd_l+h5LfNpqF6*_bG9w@$(Uh%t{?)1@$Bx0#H6>xZ?eNVFe>XFn|NloHL}*rtP09!p?u zc~v3S^rG<5N;qqvlq~fYs-pVvgDC7RHu3>h?TmFmTtGx$3mY}riZ%>5$OFw6_z&5N z65>qfM%TDF}{oc03`onaD)<(J3)r7%6X}!|JBg zfZpU3PSGc^1W&gDUU><=b1dijk?9d?i5tll-By&e6MIlWIKT;QX1ym~@YUPT5Gme) zjT;6h{2LZ`xU_*ie{FuYE{ccX0q@}4sMZ>z8gXPSXTKX>u|M~H7Iv~6?DofapbpY} zYcjZCC^=R=BRzI2RLm?aHg^+;%_B!^O)tk5@gQuBSHw+mL}ts;*jaA%r()z&J^rcx zv38J6A2rJvOk#jXyN;&3K7fqlrv%|<`EkY+5gY@ zgR=C09OUL_Afo^m#jmF!2YQ328A#MQlKN0M3Jn8@GkU|BUC4LkBxW?%Kp6^94K8i@p&NG$9HePUg_iT8llL!1*eKmL7LTYzc?2Jr(Z}*rHy%*TMb zmSb-APBeV-5sy;IMU`bTu|@^((qUMT7(C1xUVS>O`v5qi53z(Lh&tm?pU^>Ju*dA~ zEn?d1<`sCKRmDt1OA(yJ@%-c_WN=S;r9sp;@PXt>#aVA(zs5b3i7bB4JWkXLo*!AA}v2J1#=OB-I-QX=k|+~91iQX1-K(fGD9 z8aPM6b8GNkdG+5vxi731-S@93_1Lri|1%;E+bTvy3LM{whksgFD+ zCs~3|_{3BGB=RZFF3H4Vqge+hv43Z>%@<@QFUgAiIZ5@{dtdeADp}$ctp93m@*b+% zgL%FL(5eWS(9P*%s`R&~%z<_2zzQb80YA=9Y0Z;FVh> zh0^h5P@22+8d&@(NKD^>=rNQSdm0~cutUS}{WbBaBgrVeC~CZ+TF^u~%pBy6QX{#C zZH=uAEWw)g4E7Lv5qlf>yc6vU>_2VOY>jNax=qm(jTfd?*ftb`{u2Z!oW3VE(7cPkZqHVO0LVf=ZNR4JVOh|DbXe$^A=1w%D4S zyc#FdhwHkq^}oQC7J^#kA!b?y_wbLA!`uueyo^fa2oUBS^d>|bMT~mj{=rnsK2y`H z36j1Ygk~Kb!*jqn3enNAocGFu-6m80Nd%#stw-x`P!w$mN9&t06iA_XK<-}bm4fN#h;x$o) z+{Dja&-}m`DqXu(hq@V7OdU@FR_ROX^3;i`+foOoo=fSQ;!24~nU^9kQMgv>`qW*i zIXw}~UC*u$HfM>2?dzNy{XPd23wj-tB~9_P&(oF2@Hs>OjF&UKNdG3?rgZbu)=g79 zxNG1z|EjK>%4#`G=xKPh3+ip}K2OfnFUhNt1ZFPAB%VxspLi~DX`(wRTXLS1WvTyo zcW5a_b)kkdLf*+-`Uu7C$V9y_pR0_kg6oSbi@O6;FcVx`Tz8#`j%P}O?J0TuduFZ& z>#tx|XY$tayhx2tU6?vB^=!(pl=dkx{M|5R8eFI_&tjC>cj+C?^W=J;#0GL#+jm=I z`(RjOeU)X3p$uWq8#@m;MOUCJ(z(>}kMhM9YCA8*fJywO3tBcRz*MV6kH7+EPT%o{ zc`JAhr>;xwo%%j?il?Esu^ObgK%6Ua>T`lH&Sn>;F#q(BlqmIsiK>8(Z-=RN%~l!B z$+U7gX{k5@2Gvk=I+L#h;Cqxq@pTw{vp5v`&v}-?sI29w=ZW+*_YCo@^N8Ns-cm3F zf1=gB&3nNc3SX|SnnR6;DLslQ*@@~G?YLgZ>MgH#B>80t${)Nk=uqIupm}LFr?X`U z$Z$BFH%(}oAHi3H3j_}e?BKuM-O^RkvCAflul1u|HFYT}_T5r{Bs-JxCmIPy5*H<9 zO>UGNn!F=fOZk=hz*B&e-N^_MD$ALaKaPy92Ci1FwC*VP7k9j0H@}}$<}bQBy4E{G zoLd|kJc&&97VD6 zL}E|Ev?=i1ezP`nyp6mSy;;3uypO%Fy?>Z_aN7GF9^5tct`=;Bn2UfXE>e?-)5d83Xiv~u|K#o9je~bK);m-?XBLqS#|=Lv z@M&P0pawzxgX^ZXr!SM?X!@S%9x?y=Sdf2E@xXimNBwrXySrLBrrWj%eRYTTLrR*I zqsf<&MkSv3)9QDQ-vfUS{xdP5Wn$K(V#%jd@_HJ;6q}&sHJHXHJ-3&2E_2mn@@7Fl zKmRZOQv+|YNWQ|<}C9Bd)Kn-8{nq*s=ldhTmcLF8`b_i+|{3uPmbZOJaq$`v5EPIn7Xj0&Z0I&Z6 zzX9%|u6>UA_D<3nbAi^<8{~PAGB(*Ssa1mipL4&n{W=D`b?RXL6<>F$tltJyHpCtiYBP zbe3|Kbjr?ej<H!Zh!wg0rvyW1ojI4n5Ja9 zp6QOHy_DuvaJ}IFg3<@22AKXK{?j<^nOxhHy7F`Dmu{-dyrVsznZ(>DF@m|p)e}Z1 zY)rVG5S{oTsb)&|R0S)0#Pi(yS!)5VG#WMj-T&w4EWo1Lx-dL*&Y5KBuscw(ySuKf z*xh1dcVf4{-Hol-b?xrBcDEu(OxKzD-^0Vhy?}6H@3mKc>syjd(^dCZ-;^mRADE7P z#q`89-?TNLx3RHdwEn41W=?V^>4M;ZC8vz<7c#gTn-JmZg{HIFhuST+=eFARANF&O z9nP7qV)!^mJZauo-&wy+^-6tAh=aAe1NPuAb@D@+n%cYCFkPhX1U;|aG)w7lZwViI zeWtCqh9R#ZeOFtVi5o6|qmHfdZlhc5DZOGhT#wK~7WOIy{omAm!aW|2iX-xHUrTC6 zSI9zL0WdVKsZNLa{|0C?fEKL>>$10?xFG% z)j_G0exmV2z(7-cVC&$zp^d{UMp(j2hPMeT9acEZ99lor7hE%_exTFT+BDLbQ#V3% z`Pa(>y^B0i?(L2!TW!lI^BD6!^HQ_PoRnFNiQEgUU2PL>J#0^G`5lW~vEG@!2V|>W zt2>K5HN$lVgWs^+ST&%8={DWd3rv!!PQYnnEVF(D!y4UBOvi-EKLz9oU zjVGpY*tlE}0)Q zUuV|Llrr~bZm>+X%g)>GxgL>8&9C4gt0tyMm6!u?PhZ=Z6tL3t(6q)>!&J^R)HKnQ z$MnhgkKvTAt#%_l`VOhCWERG$163bD2jB9p@^o=magBElaCX8IN_X^j6m(2+41uel ziMt~lF$u0|E)#Zrh)3{lrh7ZVcibNZ>ed8{!&&C2&!bcK2I#><+LmC5j%m7R&Dw@K zmo{8G5KHx$&d)C7%KnqVHF~1mC0#k3xg5{!6Y)ysxn{U8xd*Uw!AMJCcQ5xG_Y8N0 zCjp=Pu&g0pvRZvncq_W3wM1!gQX8?h(2+S>6|sD|)lbPnGzUNH2WK$Ge}KO9Io<=F zVeTTX6^=C9Nox-4H%pMUyfxCg+|u9j#4^a*#J1n=b-wa6^Sgvt&2a5Rn97&x3K&xZ zV?t|($A|wH_D|U9u=MaJ5%!4k5ouwL(5s;XLkoml2%KshtXrYU34-RNlqwwb2YRcy zRyl6ktJ??J3R`Vvjd@+>{>)#Qe=|R`M?K6VEE}vpZCCB39RnTj9D%Mc?l$rb|30;v z$s^Nwo3Gju`cT6Fe7o$1NJAIHKEnw^f+5BrGA|?55N=##$fnPwts%VxQ8|GB7g3T4|s_)$IXza-E7>jo3=o$eoRLf+PG2pG5QF}W?o}nl4 zI7dR4r+x_J*fUS0=a4dSG;jS*5?zpyv_Jbx< zDlHyYU6G4;W@qt|Y&rY+bY@~+8)_69Z~R>)c4Hu zew9nmEtRfn2U?>6NQK4fM&Rj&NOd)9H48LzHA$LQS}jZoOLZpQ56xw%h15y9Ar(PS z_10)$xvwW_q&wn7@g_U~=fV5u!8hCNn?`-l4K6o<`384juqXniF9$Ja0ncUkakmas z&0uitD?orfVw%=V*4_vneunQI+|rw|FqNn@UIs(Zh5eaIopA(Ngl_a&OosERD7ck4 zdLcVAqrfA-1AjE1Oo1Mznr<*g9rg_-m-CnGN-`PF9Atbxk|X*`rX?R$h-RFe6=ahB zM;2x;d9KxDOUCj2cyb+9y7}gl)0jwxp(A+R+;j%$=s4(34`V&@H!D=5)B}W6a67e# zufo7tnP3Tj0CM^)-QF|k)%_{%W!A-35Lx@FKlkTz5fO7qw09?XXnuq3>q2FvJN@J_ zbZnHRzP$)L`WgJn3i2-d)nQoE8q6xOf(U#G(yt_3F@4C&XoOFoZ|{N?`^x?}s6DI5 z44xz-KZl%83R#)+AXe7GLNJTI<*9VnJ_WU}Ohz6K{^KRNy1r!FHj=%KAxHL+oL~#G zdHwildA{GwHA1OWo&y#38T9cY*0UW92a#l8n!rxJ4qV%45MOn{)NcUqB(gT$$xEH^ zePUf!fDh1t<7!Ris~I(cl4J!pf^&OIkH>R5VDEwj89^>N4UP4k>i;M@!H$p}?Ze-c zBdZokrT7k4=}-S)Zb;%TkTUm~=G&Yaq7R0dH9XlL@{nFQCG#Qk zqu9ev!U=F$xx_)7zHIQFg-KfJ9laGQ__dl#t)<3NG(Ulqn(rft-HSS<~f6Z$Hk&d*2pjBq%%!*+E|E!O89cp}z)RpQGfr zic=e#0n&09Xrq!;IocuH&FM^;jGWivT&O{o&qc;7qD$%$%?x4A*&KNFI)Lq(!2aB% zH|7_$%sk*@oAd4`naa?DyYD3z_nMl2B>+INA`QmG5Vwh|1g-$Tz$B3Yc# zRJ^K?&2_Ruk?f071sKIgU$X8C=d{I;e4Y0cAQE@9qmE$R$1}m6$*bttP0C5-`bzxFlT zd<44tp|pgYrb^l;_7^LQ+39(HLRZ3Mp%q!M06J05!|ITYirs$R>k3$&FW_evlcjH* z)rT8OCU-NowHAI>Iue#aPE+~nI|%KybhAhJlEBebgdM8_JRcSKD!-fyv~xbN`))7L zB)qTOV3rrk7v!sO^c6&(+#?rSiEOAct#~UsX9k!nFiY^WO&~Y_FL<_>)QdGBi1IQ= z?-+Kg8#T4fWb0S3%JacTE+K>66K!VU>?%{rZoI6xFca1J(u8oa^pd?Yt-1D8(fnvW3HUC*1zpw5L?tU8HmAC9S8{st@4% zX0l$B(G?|mo{r?BtAG}hz))&I*#-0evvg=EQ+$e}F&2@V-T_MXI@ah3dH3s}>t3OC z)+3W~AObs+>s$;{?*+)a`(QBdAtm8t@8Rx+%Tr6m^d5hAA6~?8q-Z=ysids;)zAqX ziD!_O^<6R6`xZ#aR&?MD$>3JC<5AxLlow*H3Xu_LKKnw0?8uC@5Ya_U?rc_A% z$nzpl|8}rux8Ob zCy3R(^v~9W^`;HpVNPsSeYu`o6P$jA_cDm=KE(RN;5gkwoK=jvOfoC53eS3#x*D7a z5yAv=*W=awg!Z7x)5z3Wgc(@qE?})kB45)$#WX;ZZK48po743_>N>MQ*XE_0EZy6d z*;>1}R|rqlp+kT&#G*t*aKL8VQ@ z116cO297Z$8rSPT=zcIm>a#f7SHbngzQI}Fn@N|$Ro6UgYQ}^Nf94O%XzOarYjb`} zlC`@%!Lf_`UU4jOz4SCkGOoio*1}&*tfAXt_+VJ1&!yY0?WQ}We_>c<%*9OC#sOxd z3SP|Rx~oj3Y6eH;3bC_#H}V;XL{;@ah8fQ66dX5g9c)*L9Je^;!^h@uE^;OlE4Oj( zam9NI%5!{!;3!_BE-1c~g0(A|Id+bzV;8j+?N;4({Zqpl<5=TR!z-OedqH}J6)r3; z7Pd2`hUo^rf4$Q^1>N_Y=NuM$oIQ^{%~sw1*?!s4-dWvw*-_M?cMNk3a^82vdTx4O z!&ToGjk=Fo^lthUW9fklhPP+ExQWS4=fqi}QOqa!@lb4dJl|jyjh5?pqdYxaM;+1j zrPh^}?dFQ+66VI{f6eF3c5^k$O3QHTE!$>ie>qAF*55UB&}WciwCUo4enyth)i3w5 zoIkSdiyDwEe-0sMM2^oLLUe~PQ0byDW_%p;aR)>76H znIHeEQ__=1{JE2o$YEZ3z41HpQtW@_c>U4pvkW{1-uP>7`!s% zpODtUErV(V#+yC_To3pfa4z73F0Yd_x23-w$6}ZXt zgR9S^FY>vew&9Jwxc;!VjMQFz!}pA=Pfhn7XGO;)+Y53A8?Do<&n$Y&60^ZF#OkuO zcT91aTQS<*Y) zKTGgSt#tzosFP4t*ZnDNt*?tc{k+3i(x=xgztr1ofaJaqbIrYnodrVz}t_Ej> zlnkpMo*14tJT^2VsH>@kv7Eu7zoV_}9WqglE) zZC!em%z>7>Hk)In>l*9$(ibCyYR4PYrrv?i0uKkq2KEcQ8F(Y;Y;cW`Bf*7(-UZY& z{Lp@r@`};mE3gm(`h*}dYN6;nGTn$k9QGh z$eH3UO>y06{e7csIutY}cx7;#;EKT`g55!<13Lt?(O1T6L7__Bd}NGygXvT+68LmxsrWF+j(s`}~~<p!|3sLgl?|Du-$6M-UW9ZF4GGH^-X)?{Wb4S{5l2IvfvXKgHA6w4 zr+G%X?${-3wanvb%Tmw&shzy$chc{KDH`#YD@+XT+oenNuOP8V&=XZp{SXsTqo98f>trqO7;t#75XX|$RT zqE@)<*UJyx1zh3Iv5tfGBev((Cf3H**VZ34$+6IJ!tvf=bdGlZbRLJPx3I_Ox$G?< z@AgjfymA$AwxgC^*}2#G$&uUs*Rs^y#WKqw_$TVuhn|Uwj+z}3ZMZFkYr_MVM&-+U zF@K-D&2m(TkV4agPMPW&wn=~dPR}#vPn+9(J|ik)VrEg`rY@-CkqQAY*8ruo0nGgFS(h zOq~Mq8;9tNX|IS2z^)IKqdaq*!FIiMs5vlmdwR~a=c$uZhyFdD{@&ccKH8P(`7Xnv zq8hJG5++FJwJiCzo4rZ*v+QOKiuji>-gHS8QMH6`dof^;Y)0@f3&S*Ww*PRNod1 zXJz8W4DedbKu5jvU6gltH+#~_AWw76ayD~h*p689S{`Rs%ee>W@&IAziW_Awa(1{yx;Zfjp^4x?$C zs+WR{TMQC3(!Wez>RIRN<{a$o#8l@3?sA?J-m+vFs*>?2Nw?Axe9oSdA9j^)n%SD$ zniy>l?HSDiX`c92XeNB2{&))?bF23v^9wGxJGik62`Ji)}bBgn(bF6Ee zyCKZ9y*x>tP2Q#0JstXVx2y&qvsKPV4q-fYvy!~g`=954`;@bzeUEjFrKF{XwYELV zRnXT)^VC!`Y*~1@kio_=nq`_C0XM_?$$78tFN<|bHD44ceDDS_O`J_;Hkjvrpw01`pP=Jc9y0UvzZH1SE?)Z6$=Xc!KOa+ z&U3$WXl&cfT{F96t}-vSF0|{Nsm{DEuk#vfcEj1)HOD>Co9sKJo+ioKaKka<%YbF3 zO{U_eCIPvO4%kzw==L(-d8Dqa-lw~$Ehc4Cckul~4QCMfdG3cct}{L-a>cA4NwG?y&6w(cf4ymeDAC6 zi8h06we^42@77+n5c_+3nj_u0#wEIoxQD{1dES%m)sdM@AunkJpI!v4dUv1MYxLNi zZ|!PZOY0=-ciSpwKKYH%N59c@IPk17S#wdnQ+-lX$22qSY-D_7uJDqfA)%R}Tf(%V z>82yPg+f#RFEDRvc{136Ekq;^-y832=U?-Kw1mH5>FJplt<~*j`)kJ>_hjEvVS(nX z)~Q)7b%d#Wig-b6C~eU6(AP2L4k;GaGBhnP*04m=Nt_AiN(H8={ZC#?_im!Rg#1N{ zJkfj4UC*`8@!M7ti5X`ZY^&`oVE2`>o#g@YPN}GsTT&)-`Sqt&G(=?_4wm?yhC93e#Df6%fwbzXCqkIW8KR=b>+PN z!>Shi_E?a%d13qMDda*=7S@&2cQ)iQRx*AubgWM!rkMO8x0dEzPk(t~&;xy#sC?C10q%!o^fSRv%+!&|a)kFK z=$Hf4@V2{a(Xl-a{(z*`MlQLS_oGP)oc^hT$_EZ>z{v{vvmA z491e&CH@61sq1Xzktw_HKe_hrEY* zrBQTN)CE(T7aZ{=kk3(ItM95x&;va`tDiX&4ilBwMKCk{E4!QqpM>n`!@3-U$@nBY zebSrD+ZMLhxp0cNf|-A^*Nf*(4u`&t>uAeqbO0{yN7z!#1HX8=>-t10(~Ss>oK8_Y%vcp<7|6D9i1<9toPzyINMuO`>mg|4AF!g6sG z=(mS(iRTl?!8fE4o*|v1!7@K38)fo6f=QCmv&rqJz!y-Q zyxJbRmAX>9DupfjAN_KosYr zD%=d&2?Kv$4TSp&o-7y~cMEXoi(!w*4r|MKbqBbh4wCJj&$CP=58NLt_H@3#%zI`M z|23p*p=j2uvDz?n4FjLxRU4UEu@T(OV{-l@$W0HXDpv+#}7yNXFE~nC%4W^(du^7A?FTgf*;H{p+Nq2*f z6?BR>hMA!>7^1#R18oGa$_75kw0V;~?>%e1gPD?Df!%8hkI+>r42f`^iR78@gUjCu zi@-qY98EYq*T}{e<1D|XPPLVa!UXUTC1IAz&5FN(g`zfb=LP1Ugi?Q)O5XCL>}3AM zENWcs!Y-|De2pUK|_!9a=ik zzhEOD2>HY&VnulQ&%$vtg3P)RzMJZN_km1kCVBpjV6Coka(c_9<>%hXoX1?=Xzay4 z*tS$mQrECzU8%h1Q>l55*;yR4qR^0*2(!4K>U$xLPue5(f|Wjxv_)(zhKm{8bvkT1 z4$i?dIy0Wbq0>rU=rwq6dhSwXf8+7Pk`>{F7T)9bgnRQc)nX8=S(m)Y-aOP_PRk`> zD>%lPEah{-`d^NUZGUq2HIa-y)IZN6cV&EY<^SL&>EoFL4$bc@;LPVpv=6sewujg& z+8f(9*+)C~d6U%Hwfpd34{3IDR@ZA(0b4^>M&ym!7r8d#On8s5x*>^y)dQS{gN7tS zHbXXD38^z3ORae3Wj@K<-Z|d7AhS$H@eD)eFmo>JZ`&8gAXk1E_U|&YMC~f&Ddf|t zr;DvXXXnwC*4H)+g-J{YI;Bg1!!Sax*LBve(GJvEbkB4zv~Q(s!dIWubIH};>2b7z zIB*ja270-MJ4ZYI!yD=4hydRh&ism_t_|*R??+IXqo8>$0@`bl6o4eu6$?nGG^2Eh zdW~V3eu1u%_Jg!coFHsgD;+1LRqM!~pOkNUo1k0U!DTbYGuhqVwbX zO=}A^s}O1=o9Uk}qHZIMCbp@lZKZ9aovr<=Eez(M3-WzYTMDe_O|UM0>M+%W^XgLS zN-DYzLEnhj%2288)@Zq$kVmu(PS~hMcF$ z@_`Q1O7P5_fGI0Ueg)2bGbeSSd|S@WxlID2c8yGt5A>J?{8(Lgq`QOb1qi-bjwnYd z`!8EDdo@QDS4D4t`mScYp;JI)z(swM=CqVkH!t8rNRLQW_IcU!WP2MSgt>xrrZa}4 z`sw;E`Ww2c+9x1ZzxsxeduQH{tDd8uEx{UVy>8uNJ8s|aC(^!1kG zz2Tl#u5yn2wwabK=IiFymgTl34xjUaYm3X}G&kfJ?8W0z$y2>X?-1`LIXB(O&DDQ|@35e$U^z4CQjo$k@X@~oA9Mq}`f6P{U844Z z<`8etS}id1I|H4xjh$IYy=Atyho^$Of@`Joq_e2&F>}WngPL0C-UDJP!(9gEHVv3d zKiJ-J@(v=N|KVqxq=vYVo~nPTx))WaQgt3pMbkr7P7BAuJ2*#z;cmF=OP2Rg|JVjr z-R>Ifayw<-pdJz*e%A=>FDNs<$UP=&-Yr$r~Oyw)ZUdIP$Aqc{m_pHY!>=6 zyiLTVuE`K}kWC0=pUKYO{$+U{o$M)7Xmt7OWcR>+Q|$e(i{} zFSTv44W!<2n=_Z;D&;QauH`Pl#FK~M6olz!QF!S zIDMEvbH>wF-r`GuSL1P3-7X2d%1`=^BGkEW-hMW>e1Z&wamU7oqGgIVc|AT^GuhA{R02mS7Q zvEw?J8m@ESbnHPf_=x&|;dw_?Q`NT(oIyD63L!Rk?sw8(R z4ku49YNB63C}amUoPf7@pQ^WvFDUrG`AWl*To%ki4rMkbQP*TNen~2FJ(SypBj8RV%BBgzrt<;D{YJ;>-HE)R8C7n~ZK`-l{>$Q5X_L}A$lY?GLOEp!reYHn4 z4W;?yO56G8vL9`W8ALU7)Et;VFHGdCm(bdo2F407vE{_;3~?ady1T zLezd=sq&IXzmCVUj?ee_vrI3i2J#0EuxRq@e^iZ_Akb1cCD`fB+5(@=dRFWUOd?I_ znmfncdclFxpS3EVWj&`-0J#KvX$%#~Hu(FZ|35tZM)<4)eP4V7$smou11U}1)<@k2 zUbz<3$QMvAzYnem&Ia`(vLB(~buWSzO~|@-YZx&efk_GTJ;WxogQxL^_dj^9j={ti zjXwT~E@%KosJeVZu8l7?jQ+U~eD0vu`;rQ08Bh!FU;|2p+hsCuX=aw}a@c>4z-Kv< zDXuH=iN=9SjZk-kA!r(0K0m;h6hS5pbQlg`4b$n5tfwvwC))$AP?LVyH!$|t=`l@$ zP33?1j7j{r2m0hY9xXZzrkPsm9sGPMygTvqT6P9^Vx;capLbfsdi`Q1(^jH@DzGEX z1fTI;e#?4of|0C0c$7-`lS7fvRPbWEv-o{Zu$y+(Sy&ZzGJowzp< z4kn=&$S5_~jA@*(f3e&N^oGvo_j+;erx4v;r{-d#(rE;1QHajQJlKohyyahh7DYEo zH&7uv>5SfkhhLWy=!fUm#e~=9^iB5X*>q@-7I4eF%JR9Nfqi%eH{3g44(@n_=jcKl zV&(gfAU#~ZhD0$FRR(Z2Gmx?>X!{pnhSq`>8i=g>RBhSq9<0=SqLp1dbtqcv9Vm)u zP9h!+%oypu_t=Yyy!8#9qbLZDzR0~Y!EPwA&np;tTHt9`1TAxxzPZlyD-P%CD_|C? zM&Ici&V<5VwVulSMq>Fi_;VB%a1Hk53bAi-g#(vZq&6$>V-;Q_RX@R>lw;k8u@8at zb`Awm7Ef&WBWop6`1cP)r*G&vRoLo(!FOGPh-)H~F}`EbauKKf;0fi#}%yU-&Ula!dXE&?7mEW7j(~d@Flfm&Na+>#Z-8XdIKH0i2>=T~)6%3+=EcsH=E4+`$uPs^EFEDDI@ICf9K>_3k zd(#AYsX_iMFl&~@5G3ddea+3tO*Cg`E)dII10{5u$p9C@_8kI)k&Asf1eVZ3jCug% zmJ+XiMJwE4wZEx!$XFjZ@G251TfkBsN3upB5pzHq9!D-B+0WBtKmG?Aaw44F|6|3E zB58v`XB6h1J?PqvXAN&43mQ0i`f^4`vvQ?amkf5|Ga1D+kWF8Bo;{qs0sMXkVt`KQ zm$Bd)*72T`*oB=u^)FDTm66BF?06B*S`4eJobaNYvIzF90eh!m*OkcuZ&*?Dq}!e8cIgd0v4| zf5y43#7S(-lja4BWzITlZ;|sfdj9o1O(pbSCp^;_@HzKb|6}aJAAe1rXFMLtG&G__ zKff~Lt_XYCk^9%;jf?Z{?b(ZotacYF2}YO$E~BFrTlkgiOfnxAIps6SP;NrT=kZJr zIqj#B#U*%V%0!RxNL4csV~w-WY03piNt1cX z)9lk4{*A0LXXQ&4HV-%>EBGGmr(>0yaPFsyuebXiXJ&Bxz_APxCBGqqXA1t1jL zf^;;Yi=$ZQp5RQGrH-WSX8)HVZ>!jiUPxbl^l(+U7)tTmZgkWi?){$q`NG#>+{wcK zf1uf1?5=^k=0;8;KqqSXJLQ}}5`A5Rg z5()Coro;z6n>+zL;Y3z{D7}}@b?k6gBs_|4ABD4f4N?`&Px^D_Y^>~8 zxUc>}_T$N@Rp+M({C!T4gj@J62a-9G-ycLw5P;sB3eV6%{OFe8J&V9PGL`*qhb->R z!im;LkHzzbAJ}b8mgQ5iGBONeQ_(9E*zxI{y>0wUdu&)k{>=`rM=KZ&V&Hq&%;`{A z<*y;D%AArW3KI_!R)D!wWV(>sQ%KcjPR~K^k%zZw1RC`obL5AQhkDPB?KO!5HxIg)^s)b`t^A6KHnf@=P{AZ zCHx&7AHRI%c?yZ^4TAZw9oPH8xp(vV9&Ip#b5It$P?&7?3oz;m`nfWm=ycYsE2sAl z*5)$n^U`0Me=7-}%sA{+Bdl)@-f$e88pZf0cuhu&z&oc1BSU0dY4EYGIqsd6%pB+a)74dr1S#_~1~ z$yHmB-Ce9)J*4b4YxtQR4&Vfp;tZ$o2D`Cgq3{GeLmu;RqF0g=mx+R^!YMQ#87z+O zSb(kyhjplak2JJJraz!l?txmW#d-N3Kgo`L+``H% zK#Mj+Mt5LYmEF>^t3B|)Y7@(T!$YdT%Kw7b($8mJyy1bwtS>;~NZES4rWdeX$duH@eW(WC`=voKEDMC7|9r+gGV zze?m5}> z_C0oYKX20(8LY{|wQGz9?ZH_n#QVhXRE>$d@^YGDS@T-NT?Xtze(oE9)>UR%9%fHR za)y)8cRA1t|FBv*WP1@S7zQ@FDR0z+Rhmr4N~8L*F!F&FkaSzvayb z@g_6zg1_M#*XJn({?5T9@N`b$4OagH>#OHECvy5{v0m?y*){BSE-Xt9-a84IxrQBA zJokU`2o@nB|6wZ&;YS&HlLp8_MRq?3Ng2u6eMe49=E|$^HTz?Wl?h^BxZ49H-G&Bw z!>KEZ2NlZdedfd}6OxwWKXgL_^<%{~FgA_D&pC)Mt7V<@qq%m`7tw-!9gno;;4GBj zJwBjWw{q$(a?OHhs94s=&brUy9aoyqEX)Sv0Jy&_e->%`=qlq9^V>@&4oXt6x&-jga{_+gNnrB#6X_aR$ql^A3wHDuS~4B)2d)V`+Aheq z!nj)+-$(I`O#D}wv~!Ac6^GTDiWTTh+^k2_tLWG`2BXhNsw&;+DVq=9PeWgMs-Fdz z+|e1f_Az)aF;rHSuE}U-w`^jf(;FgoFHznX&=C<>%*M!4Zn_tVKr!)H>D6bg$=#7Sh&PO0mQ#WX*(xecw1 z!;LA1Vfv!Fx%4(R(62GHH@-7;*I(9l*OZgii1AF_ybiV^kN8-<7|yW{bSvGX%R%Wy z{RPKOORtlu2GO1SgMl1UWhxZ8sUR{3*w-H${y*$fPIWah0kL>1Qr4W5 zkHTxRd2N^sy8yO{c&cS1$OB$NBShkz74>hWviy^~?V;nV2NMvQP${WH^_pZRUti&^ zN>N>1O#UUDoYGSEFPf~EGG%rnoZ%PMf#jmr2veBsxP+;pB3-UI1wWnoYsvZ@!*XY$ zsm~H~j9~TBK(DBL2bh*K9**!O-1QgzeM+AH5}mX|kSUKZ50-8M`xbz&HXJ$LgdR-9 zswJRr3nE4Tankm2iU$(W|D>iqfiASC@F`OHfn{Yg$*1p03bIg>NouQJgBa0B$uVmjB_5wXoeMk;4zy3QfD z52*5Oq`p~@^=e1ISX(sk3-tp!SVd7o_sB?LgrwIEGF&$vMS@dP;h%Zk0#J^o|%0_zcew&pUSww;FbY zukLf6Ti$(48|g&XhlX7D8M;pUsz$?Ix=vWh#KeNEdM=>|GdNR)9^w${t4yg7F2Qqt z7oD&QpL#id(+aZOu~cmP($|+yE&{KOH~lt-Js#q#XK3yD$b{G-SQ zZ}8P3OZ1icuNvEQniKcUHw5m)mT;Wv=rGDop1L-Ft9TlEByOld+$?oZtpGNwR-N;rQa50PEIf(W0?5T5PZO35REOk ze>wI!1`DFFyAQw{%T!fIeh)Ey@;kNTvE*|vkR6+ftU1X5M{x!{h>x6CnsVDO2KVa+xLN)RsGNn6F3={@C$3H_NWcg5bEbAG&#k5APjCv zdP8T^l)zOcjj@L=4lGp}&2{Z%eIDZPhmhl(oo1JGvaLzN^^b{!E__5|4^b@h+2x zYS1e>f~j17u>u{W$xQ9~&2-=mFyDVsrI97Sh?nsIUwAJvQJ7OykF4%&GMIBgQS?V6 zj^?ba^WFAISil#=eG8GJ-FU<=VFryuj~trr(7d91CvT4wuAN z&UI#J6&XxqrAKn62HOs!=pEZ}gs7?gV!| z&mnJ5-!-__FOfa6AoWF=&NN9lCC-O?sG#OAEH~e<{ezg_mO>S|339PRyurl!7i2}w zbJ~@eU>ThI-K=S(e40t-ci|sM@qPrk@|ph|S!o~t4MoeQ%S(N=k$DFm&U<{A6GX*{ zs_E)rG+QA724uR;H!^j>L0z#XA13y$3;Bx6=-K-Cc;|@MJCT#k02{UrJjNH+au+?h zn?S;hqUxh)p_;z4avYeFF1|@HMsH*;)H`(-;S5apUBGsY5{in^(m3fK>62JZOb}ip zm8VeOUg}K0h{D3LZo}{+KG9>|6`kz^(RH0laXxRTH`1F5KD{lvPb>FC?+u~zpb99M zKKK)3h#wjd`F3 z;?vM=G88@iQhttQ>I;r#64v|?A0o3Eldx6IsR{MLZb#$y(Nc8|1H*rC27T+);@oC>f7gd6?ZhEgu(l(g4$(~Z~1>l^3_Y2FCG z;m(Z4XD>~Lp&A$flkW_CU*$bvo|o?B?jGQWpVB8Y1sqLT?+DLhn0%&t?s$j$o{&8_ zi*2pW$%w%puc2NcOr=xtHItkVV^zYK74lW+EAFCGtudCRo^TQ#=x5X(TI1gpAwRj; z@1cjHGS=r6(9*f_CK~gu9kBQ><+^D8E%ay(!MM#*h2WMFq zI3ddj|5BNIt?nz_=B)3h()ECx$wd6ny+m`<$y&EUTBg&{eVTfjhZ#wI`Iy1P7!9^; zF}>|cXv7xS#}H(v3q1X^)V1ie)C*6L#FfD9pmoCrY-mcy{)Sr)mY?zG=&{C&_j~sT0b-tDG?*`&; zH^zS+hW9%k`;x961Li10ctr28PMCt0$&SsrPtWaMaw1zc`DqP2RS_*sbJWMwasL4 z<`MgRBu~=>A8-y>#iQbNskbItQ%`CthKiM>OwDTD41F*CNL_94G?g{YH9a)*K%TFn z!$VKk+W|5hgNag8$r%^*pJ7)06W02=H^4jJQ^E7goeoD$LDnOu{GGW)My6AB^;QIP zVIr2(5TB-!YpljIWLF>Hq&^nR0=-9cOsd38)?$?~75rUiVJJ)!I$@xC1sJl?$bUSM z;%c%qC-Lzo$#0o`rS#N>b4He6H@46TIgtA4QM~`@zCzro5_WtHQDA+%nmBa#FeX?P zLtYxddZGBa${fKqRJPBc$-&sG*O3n;ONP&`u!-&?ewl?{I4$R3#S@sH^%hB;f*w!7 znm?06KoNwJ6_`$xnF59NiA&>UosKtfpAPbCRNesH5w_EnTdm_zZGNl z&Lk%AsMW1iQ~e7;ryc-NbOC>*I-SxFutZn*_}9CieLdlQ=Z)gsqIsg};1&jeA4|cD zXobHc`==58KcM>bib`L7I)~@euRUEoPd!sznJM(k$E@O?8Wo>igtvb`u{fj2a!t zi^P)UJ)kP6u8uU<;C^+O9VIYrv9VMTey9UtRbjCDm3pkWMpIe$PFG!bMUxKZFj9Cf z)RT5<+G}%a{h&m02-(zEh}4@h&#fmJ!BQ|jeZ-^4OQbXbTj z`+T(W7cBNmZ1o~4C-_uvPl5)V{G@&d^R zg%izA!rN@lPbA_#CZ3QZ7=$z{EJ#MO2N$y9h1TfTMO;0Ci2en+|GVVy&XB{h!K8eg z?9yUlmlU!C*T_1ML+4o)6v9k6Sl;mOSMiOF)M-W77fj%Kb|K43O(_pL@ga9yNj_m& zR@557EZK(ON0aGtAA>h~i50ox8w;mJZcw~m_~}+Sa6y&xPFkwiapVqiVEGS`e@P%$ zeHuhad-RPH-n3kB?LOj6uFqms{S2Pv0F#fafvA2*>{Ekm^;OmGi3iu~yrD)hTpv03a@D>5hf$%CchO&q5hqQ|ZdU@bkY z@MWIQ4!`nZys|0CXm4u!{m2W=Cr`8u>ve}~-X}Aa!sjEh^S{9}KLqXZ0zAcU&hlt{ zQi+UEVfIt0<~8D-+mlI#{EPh4cs$#Ee^}O zmm~P}bJ(rp{LV?%QsG}IKrY=$E+-Nl(~%YK$6CdZS1Q71Q?hMc@ZTD);^*ZiN=!Y`Ufef%e|YBnO5R7C0ZNKcXiQOamd3YGRs~0O(jch zAbvMd1=QlTtFY)r`0gF+w16jC&gnYDYVIYob%JMHOci1U=WRVdy~yu8BnvH)vv@>S zZY{d_Fj@N$B_^S>5y0uV(Tao$(;{;DNnmLe+FuVSy?eD8JTZau>3X zIq45+i|5e}zhwefNvBsZmTNT6s@7b>t{TbeB@_P(XvUr7cQ5c=4&)>U>(UchzJ&}M zk@S9?#tX>B5~{=d@d)Q3hkbZQJ$lGX42~j~pPLNe1wPDJ_wM}7CU*D$ zCrHuhp`2|qSJq;wx+3*sS^oyqC)%P9_VNT>IZMmHv@B!iZgQr+f(ILdf3*l?*;p*} zR3Z)^{8`n6szf^N;Pxmkd}EU4EBwHpAnlfu*QkyiTt)U_BC=YXER%=d!o)TYta$3<$kp`>g9}Z5lDcPjf zbZyKhN7_JMN@vI_Q08AftzakFMqcqOEM{Y1gX#@nCDw8L6(KEH-7ZkiCoJiuTF z4t+2V3v>;pA!WktPpVQ+(NzVAxkiyUstfPYM)H>@IX%0O&u`Q{67d$kkQEw@r@w&r z-3My9A6``v@^=YvI=y5^Yw!`_FNxKB$XyEK`E0@tD3c`qN9D^x?jwR2qXHjA!1?7P z=lTJ=y&4Va!~3$3$5tj!WN=RAaFuMF;Bd}d4Es?sYhNn!EbY)360H0c(Y)30&o?rw z`Z7{8L^#W}jl?f*^-!|A0bQ|AI z=I4r?&||Zo;QQP~?_5TcD>kG*=E2t;h)D5Uz#E|usVad9QeY2s{R-!GJP~*LbgpA>@4&qOhg3F8E zWqG1JfPB|TdT$odTQix=-V}1^`-thM^8H$((kR~VAV|&rXqeo*^97>6O6d3>Nb6c+ znQ7?subiS#yskLx!!S-n%PhZd5OyvI|9BCecs8U&VQ%b#?&*z|+C-JGHE%Wtzg$bM zWfN z1`e@kX`hrVJ&;C7vba+8pmUz1p=!geS&houV{GpuJp7Mna2s!0k0^XO{eya+kIti8 zXq7HhURq-%p4wdA(!tt=;So#3u%+>& zwiAE0<+(?aDH%-0z9*P>wfZu8W)A+&KNnY#crxr3 z`f|$%X-wp`5gYCx@=!1kKk)-PP#?OE2ebfLKa7kHB>v1sJ}(Eha4fdsHIliRlh_3B zPT>Qcgx~&%TD~2xF*|-$DLxgnK^df`EY_|A^3s!7<^-Nb8RSb@k$s$iL3o9^m>P2s z2G+{-UB?o;)g?R9hOdUf$hv{MtmY{e^WI8@IUYYFmFPf0K+or{E4izl41k{;u^n&Y z2m0zh_?J*Tt`JahKG0IrIFnIKVcV_tQ<-UiC0-(Y6v~N<(J`swC2@l|9tmrX))^>v zh1)Y&d@gJP%O{gto`zN^gMTm&FZ2}|nrX;zP5h^8JX=?ELP6>e$`tI;RHmnp(;3YZ zKc=TC7H{ed8Z-#+?YZUUvEzK83(@$s8rqnqLx58Uicojb%Pi$ETtNfM-PwC~_$(5>L=>y@KY{GRXU?)4`<%e*hzLV#B$$Ne!W3!9A zPApQdWN~`ooh_oyw~+5!gJ+mZ0c`ZEcfqW=BY9aZY1Jnsuv5%|CEBwTUMBuMTM7kf4kA1wyBanJga2h{F z9(M6Qikyr@iUuP?W69Mh$m`o^Nm^UCvJy@B=F_68qekIHxuCxhZ?9Ctqh1jnYFj#-6 zitFh(2~}IT!g%r=ZOFTqC(6vuo8}}cH{!J%AudypO)GfrT}0O7S)1(mWDX)dBh@Vf zHEu69=^dT7(~!O4ph`xfwI-lrSNV<*|6V{gzY-ZLGrMw;X>j3xOL*mZxQ-Gz=H!gV zu@i%k>-xm^YI3wD{xt>!qQZ?*h}9X&37v+9KaNLiCR$vBemKH@-oStGP@iCa6YH=G z89a=wKg*u%<>N5_KZ~s0AcJ@rH1rEHG6qm{Pm#Gz*xbRaV=1Iv04cVaGccD3w*pVl zBx}vb;kR^U{Y&$l#dwaYWVFhHY^slxC|v+!*}rB)z@4!5Yk8u1$W9wjz03*a%|Mi7R81Y3r*OCru>G|?Gs9ME!tm+9qLW& zx*N%y&exS#_a0n-Gd6q}a->L8PxeO9mt*;zd+5<;cp`@$9mJ+O0MOLL5TCq94+BCHG19s;$tFj(Rd&()Yp$9)_lY^LS`R;_GP%x%T)jM%L!~R=82ge! zWO*LjF_TF19J^;jhrP!hD3&^zPcNU@@liBnnp+@1+)u^d-# z#apyumqYl;8?tMk@vD?f+9A&ML9E?2R$({mwiWw-6|1Kp@z3KK+#|=cA?ws7WLdD; zS+V~{G|)`+<_JFL@>j}C#lu*ug4fqu!_@)A*dqLo&~-bd0@a?{<dXB`q5W3z(@l6Eld$en zvb>P(ypzJfy&k<3$5Zy;_qTF|8`!W*Tv^E$DjmkFCMlBsBOT{EHRD7ay@ve!PNA^qmI%XUFf{h;83Z zrbCH~lkpRBsU7&WH}GzmW6e{|!Qx&b-~JTe;w?}7nJ2i-^WRkEA-{B#XVa)JVI$tF z@)JwvQvc#^gNTW;b0+JsW-oZ6|Fd)!U{aM|1E0Bbr#D%arD5srSh_(%Lg@wpL9i)7 zP(fM*0TEE7B&EAUI+pJ4USf0R)_lJ+|L>V+VRvTkz3+Rv-gA<{`avlFH*Cbe;gzFs zo*xL72C~C72(PhJklHB zY8_A5&7DVr_5NYKJ`9X^MU!;py(zpVpOjnZILch!r}X`~3#LWN=#Me}2t7ATM_A3o!Bk z{*zwq5BOWoOWuQ?g6vsNRAsU1O+5c2UY|qb<9XI!Q1)t8HH$kpg?EzRxKH4<-pJdE z$n2a%2dkkmKZLtvKE@m@tshx|aL8v!>sh?UvWpqOVIC6iP?#b_HaU@AC5U9iVt2Mf zDt5ynk(u)jWLj}DkRJABva%TX{}I>iU~LCz7)7E2M;BL?Vvi`r9E=G zD3-zlIPWR7&~@P>iPDzgo<;brJ=Wmo{Cvmra+0I+a%Dy2dv!j~ft-GVCOnK3dxz{! z;F+cI+?kIJ#FrqCFY>9xz;D4Ba^C1Q`mHs*SdLh)X49Z&pwVh`V=}2me^}Zx; z=nYFBfl@Bk;{xzJV5KWGPz9RE z2Pf48YR!-{_0S{9?EQ8)JE|hv-msUaK=%>wkTW`Ofya7Y|52+i(-Br-?F|Lza<1(z z_Q+FgXwH5?O}E(p6X?ecuB)Qy>wx{jVE3ai-N@O{jy$g#bkc$+%UUE3A(M~qA!kd5 zGLlK8w5dv3v<%Jc0~VBf~mz2K!GQbbO^4hv@t znuPUuVV)=FWP0=TkD)%9uG|c$)nf-SoXd^pIypn<2FH=$wHEx+md{<}*dKx)i#v-C zmw+X97nr;OQVLhfDdL>u#xKDwOW>A`Xt0gp3fFDGYCG89jU8|ntc!j;1!S`5ztEYL zv_)&zhNAO|hW!!E?^)1_9eQqr1wI_VXau@MqWt;bifXK?Gv412{%r*(M1z-1 z?20Z4v>oxK1^cLOJKhQAKqs}J^z06RI)cai6&0mV%4bPoRf6F!x*{cBk5dGP)t z*2zr%I*Xi-<0-|UMd{*jgC{>`brpbjWv~^5R>zu@O{?1b<6rkt0NoL?kXpPZs1h-9Dd z;jHI;GnQ{A@gsimAvobYoOTDuCPRapprt8z4M(8kRKEQZoZJT5H=sF@q9Vz2b5F71 zGq~;*aEjzF7Z$Yi#}FLZkwB05w;Gzc73*+<<2J~+)w==cB6iknRIEzC93oMdE^;xn~q`ErERP3$)}Md=6;T&wC!A$jA)nxGeW8 z0H4M4$t!4V5BCv&>H*k&2ko8*rw_ngi24l4x0L034z83w01?2k9GDmdq^t3}8n#jH zb6pCUKF6xIA~k=-!x5>v9Qu8S9C!~6TxMq?xl>rP^sXoYP3c^53YysnO-_b}7lkc_ zZD^Ub(DyHJ;wWt0zme4!p(J{=vb#KJlXLLG!tiT8&)5ldFU5Y8`l?}EwU@Q$2V(j8 zwg>LZ;Mv=t%VX?Ms+Dd*8Ta5x6M78tX(k+a4!NcRW6_+su_=ZEy(-AFBH+3?6jd9o zA4FzL?bSNgcbGAOQRlX&Do64RQypJR7EtxyY84EdO{+d(jjyOJUbg1aRhw% zd0HCx-N%}Cu(D&UOXkRUcs~mHTM)Sr&%d!y)hnKUf_+>7jym-rIiRmPP<(0bFQ=@_ z!C_5#r#e(5=ZXaz&-ihp4XXUVmJ@qds7fNC{owP~Xb7n;FUkKEx#J_`mCSRx6@G>t zIKKjho4~u&&$-z}O?bL0_fLRIr7lgwdMg57^g+@JS2W=+8Q@!Fm*@>2lqdeoUHhOv5xp<6ZdJDE`I|shwd8Whm7zyWPtu3IH zR#-xffPh$H2}qXg?4tq{^da_ISF~hzR#S^lCAL!fVPhTKrufI;mr)l~u_ZTLMWFqU{u z4t8CTy_ZDFK8G8ZpyhVMx6;q#KF{0)=S)MNAH#aN!uzL?G*ToLdg)k1G%N8c&P%$#G-PE`wdZDUG8^O_@W}+5f$)9{fNML~I)RT9T_M`ejkGBo zrppo7Cu72rx+?38MJK6Hlvw%F%VaBjxed;|jVws#jww9jt4J7}l{@J{5}5(_SY4Iwyt0+hwVb3mEVtW+jE zNzKGFWP#4}Zu2?|)c!_B?MJE~05j4_L~ykg8E}z3%Ngv8Jn<-yTMixV0@hF2hgcbJ zfM5iapAGtxJ|!jjKRftPc~4f53%Cha7`zh+G^N+t3i3Bgi3PQR>ip;enTC0b-9HV> zdFku)8tB@A_XBWujuix1??W(}%O;op5k5k{j)W+9a% zzxs|H{Ks!s!1uGTJ`rzp6Oez&JzcC+D(h}?rRXuC>;O_;=4zeh4u60pv6}uwJ|5#) z4y1VslJy;svsgtL=&=%pb1;6MRf&ha72I54?=mYhnZK{_4e`dV z0-@{RI~`dpR`zS&6=^Q`-T~iFXXj7BmP*b0Ht;O)NeD|^(G-dN7RU1vSYK7>z5?&W zgFCTBW1-%X$erBKVL^5x-_wXzh_*C%UNkHHfE|B<9rYzPM8)uToLE!Gps9Q0xbnbD z@xZ~!{WuWE|90%KldSnBd)N(K-30Qor{}Dy5U^_n{Y%Cu2Xqw8)s=x!9iAewv3lqM zAJmi&xaVPwCUDolPCS2A=X-LZd?@Sa&9`M*VRK|$S7@&`{2}$BAF&%Zc+Z3a5?Omv z*r)ygJT&BcJ=sGa?opE6Nl&a+cp`nM&RK*%{Uy4l9{OQ4_y3&Lw1+37p#6M&*FpEF zN7$@lflGEodoW}8~$bnACZ;291+}H`$uL2Q?_ne1TZ@{nDS*J+e5S%1dLjeB21SC$f zUXk3=<7)vcIRM=*#HJa|YSyreRjm64*QB6{vVfx06pCcO&K3XiogY}OVe-lk_lVUZRPGA%O%d*%i}z}WCA9c322$%W(oA^w z0T74)#}4i(5jHQ>Ecq9e-}1vrWq8#D`vww6V_gY6zbCZa5xVY%e38CwF-Q-wHp)WZ z?V#>DtgAJVpK-*0<`UWdmhZRW(>z!Qjp2}nTqhNio$)N&^HZ0*#PgDRYUxNNejP2T zS>qw7WEYr|999O}#*IJs8a)1mY+MHn_d{3vp`$BMmqe9kV26&!nw|mvB+e)lZiH*P zgZNb0*roV(5{Z#sRw_^m@)QO56y#Hd{fiA2fsV5w;iV_L~w7B`2_v*lRr8*a4__0J`UFH~+zF6;g8-ob@-~7(vEo7P@aEe6j|f7CkO@u4L<)LbLhUg%9k#02?`= zC+RyBjcm=wnq}=nh^36DSJ5bBrr3j3!k%79WMEBbs|*xX0?1oP@+9yhwx&d;Lg=p& zd|nfplKyF8CrLcxJ(Bzm>wE|`iMM2h3UbFH;PVyFf6dNsK^Z5Zt@B8a$KXZkD1`qd zp0)?B-@wl2!qF3vLfheYiPcAOe;;%sJ;bELrF5E;x&pCc3-Wx4waB*uVEQ)SaYHo) zS$lcDA-h$I~~2S1(Zl>6p?HKB|ZnU7DTV5AR2E zhB44#8t`3=y}6E6CjhY@kq$+}9=7B@S3!4IS%Jp0_OOf3p~?x!pZ8pC5&4v}DNfcI zfXZ{judVo7ddt-b%Z}naPhgxqjGabE`3B(X1r%{9dwPkNHm{Fj|8u_M@7F7@Fr$!nB{%Zfuc-MDfvaBdC$D)8L~NZ8K6p*(cflE1_%@^aTG=&=;{?7`ED z<6|WAdd6NQGEtH)AG3k_di2{NUXOU!gvwHQNmk-2a<~b1lM0kv(5qO~g?L_fR+pQV zRSeT+d9e6BkrlC~rNiHM$j#+cp?%M_BAX=ggQusxq!bttOeyMS9t1Uu)Yj^ zB|Q-z0L@C=U$R|6XzC<9{SdAYJy3}IJmUKS*5wW3Cl7M>EL1-OPXCR29Ot`2JsOx1 z85zxrb0FnaDEJmMlN|{o)|vF%yT^0iLl5te(9$Pq39@-EE07G|GA!LY&_e;fuVWSF z0IJ2gx&Sz62v%#cuT-ey8IV%o9f3<#K1)ZUONUQ`Youq`|6r7hmAvJ<;vq%xl1_ob zy$|>%^VaCvQxbetf?smOA$g&Ll4zE4?D0N)E|$hsbYd<(HQ_wzhqRR|@AK^U{Cx?} zTp}bI{85W_%3W+|>ziRN^|DgY1>ytTXI**0NdaIG3sfX-B6*pW-0vE=&jy?*;fLb= z{Qm|Ge-()k>>)PF@aAUYxM$QKiHP6t@Jj( zgyi{OUNn_=B-3~R?tX!O%nmlBJCs1b2=}SUT4aVwY5s2j_8P-W1%Y#6tXt`$+W`J? za8)@lERl#zb`lNmhtMrzA4+swdQ8Uh3LrfME3#L~1d8AJ2pDBR1%kU6JlR*s7O72B z{@)%+2Sz&9O>N}CN8rbgtoS`#-6T?QKX|#xkI>Dp#MB0btIF2$9{t+E*jA+Acl>OI z9;Ft;!zwbt+eYlPsnF?B?w$dUNVZU7!|Sjgc7J?X<)5t>SYing*Y>4&_V=NxBsYoNOMXsG|qIhSmva8eK%A^8UJ zf+bri^3Q?1kkf~wh`Dw|28e}Ff)$B&6q_oFJ;^CV@sxdhCU`3Z^mFmIV96eqni3)N zphHgrCCN#j<^F}ic5&7xxE0Hs9v;A@I?}Q*@3^6#2!3)x#r@Ik?SVr{b|f*4eC)Iy zzgh5vbccLHuAvMvs}XmR$cy+Se%=x7UJZPugUg3dw#4Z2qpb>X4~g{`Wj{T!HOJv? zeH6w|E}mBc%`u*+`*iY39l@K(^lDINd8k&R0n!I|C)j$#?j-x3$djb^>Pu)y>T@KY zEBQImi)DFL2RH4Zb%8;4R^#J;iBFYewRy3iZi1WT;N=o4%*$tze^2DkITi%|WF@O9!niLANFRnPhC1y_ic ze})7}1u8zCF7;tmfmi@4eFV*l^vi-D#QrJFPkC_o0s9j_Fq5aH@u|cX5AcltS;G=N zmP%FWX(&0cYskM{!2SsLIsz?kLLy#ZN0Ntm&FXIO9O_D*l^v1DCwD#fk&m=>p%p3pApEy-15n;6dtTGehx6samYJ2KX$`-dcw( zQK?(V2|Y!#PvL;P@OINMe%d10TC;!gDGl5Knr zj3kd7fo`}8b-!fSQaPZ5wSTdJdXuA*lfB8T(GPbNbMVpA8a`XQwbo(zzO0jIc=n#o;OYiDaU?r9J zq{rt9@Ol=?dm7f~QSj_F-kXPRl&J^T!RH<8UQ&g3o^Oh!ew4d0eF=z&w3Gabq#I=_|&cT)542pnGmyU)3&MAvTcjUcoX1m?Yv>d`@=jYc0>pCK@GkcPaI6T=)d8<4)XEh)1769qEBD*-U~k6 z$PN~Q!)rW$e;5`L*n{+sp7zA9~m^6>~I-8$MfApD6~9u zU!DI8aedyflzfB4d(GX`Sxbl?>FM_Xj5@-+B^p~eRYMj?)I+6)=@NT735>H?p@Yv~ z@y=7Oje$m9vC`}8`VQaO5~dc3H9bbgNG*og2xr)tbWWBk%ERnaa%^k}HI* z0A729efdu4T;b82(0Unu6A3Mv>@64&ZQubia?cF3fyB!9ai@E%S7kMs@aGnEs`M-u zUtTne3rM{JyBbog1p5(fBwfv~vVX~*%bHf=LrPVxL=r_iN)|%0f+u)tJg_c>yf4jt zoMDM5c7*J!29i>;S7M=x7imKhUSl=luir+lNlrqdQTM{ME|`b{Ub3IJNIy5Qm;93+ z#=g|fh=a&U{ILP2`cJ_W7oJzhy zGOhno5ix5E3xv@tX1sy|3&A-UM>txiQOJWW?Zba^T09_2*`TVv9Khb zBpGS3E99w_kUdS=b#|_ed@o`Fm8kcGN*nXi%?$<`AH6z8sXC`-ERN`6pga&!VF^?;XPB7vt# zoKK=OXSv5^_WJ}|xj3{Wd4UZ6J^*AMgA1v=5IZpr+RDS7BsZtATd8r00Gc9qg!WTe zyL5aNjlx(jup0@kWU7apw1^2)ekr6?e^w}dv&7WJB9z#RoL3gAO@SV7az}}i=7UxW zLnY3zB#PnLCHPycU5h9EuR^5|?@8T{^cs+yj>MDCfX!XpN36KJ>~0^QoZ(;Tk1-F4 zI-O|RcX$)u5-s@?FJvMWv{EB28T=i{m&fc)s+w*>bvk^1hWp+JQ?L2#->~dX20GVy z?=4a$2P-NLjyn)L{*L;Q{zx0C!Rt(C8{8KAMtO+9+Wt)$gf23m&Utdl}aC&1ME*CwVT5K zN*{!I>}@R^FZrQlIBPAkeHGGuBYTkS(o5czIV)0KnasZ@S&PgAITw~)674t%{G>8c zEQuRo&OF4r1lqa#e{NPfuKw`C5fsUGoD_nfBh+AasM zD#|t4kW=?rO}GF58)??V>JS&HCTsu)-|98p-_n<6#@q0 zm&%-mT0lUcQW4BaUA@FQ-mnr6*GpfEK}e~b>@o@5+KHjxWt~sJ#dTIEH3?GZ{1k}C z@D$OiURIF>?nB@}`jyCjb8<3N{I}dZwFERMvofN1Z#Po^A`sgj#{WtvVI@|@vT)w# zUnt@LSBV5T0mhGnDb5EUrvXQ?Q+3uTkp$89QcHac8M%`+slco#P?t&~$@T>3R-c#8 z?}vui5*#@c{OZsuPFA;L+Fg6PxJKCVQ8?kX*J_D8ky_Te*eT~ZwNryQLkH^q#{lJc z*x)&Vqy=n!*w(Sg$o^!ng>$R2chORf ziSCU<&Ypwb&jG0iKu@as?;(RyxlVH0|C0<0kSmfES-`z_bH#kP#)ka8d<$?=Cx*eadXFCdkHtVJ&6jZx=d8@Ij?z&3{z|W8ZPvBoSv|ox0bO|)qQ7SDLu+;ZcR=VnXdw?={g~){ zHRz`er{3!Elxj?t_?+{(muz*FJ+^Md)PCaha52_2m9vIFb9G&6H6?%67^>`sEOGH~ z2_Sch6DyxWflRLj>T*hGH)q#c(T`&R_(+8-Kcqrfs)X|cfgZdnlPO$?UYY~W*TGjU zIDgiP^MMW7+gVQKl&2rR51LCuA`V7I*X13J*j)qiZ8{jZ$l7P%R~Mxl#ZY!zh<@Rb z%!GSk)#NPWX-FC?Yna4o27H7X@f=*3h?X8fSO_BQ((R0d^ z5ZTcVI+(+05b5aI1sojU_Z8@wy!3G}=;pDMouo69vM7|ej=$uTMkl<^+3cn>@^=7| zVmtS*#k>8`w!_ggQ-RbAaCZ}))95VP53Mth6(k^m^F!zJ$nVF~UuO{bnHGlgKTyG8 zQ#^r%lbENnOoEf?c3`bGjKN-pz$(crt_u3-=ngPQH++XUoA3z1d zLFKqs&INqK$-|1!hL=6x;(X#eItav}uk*qK%qMQ@B9pMO0wr_mS6g zcvm7UjnVe!z(s5FfV_a+0JRnp?s$x=VP;TcDev_DCTj}@@MRkZ^LyIzoNl2f#xo(Q?Y)&Vs)p$ z=}@SvHSm87Ee+;LqtSAcSao-s^oiSxNV8>2(qT#^Cr0;H1IB+jqo=n~Q zXY9Bj`L5CY{w2EXEZ5Wpnq}dSj=-iHy0$j&q`~9Ik&E}>!WcL}{N3_Evo5=;hBTc7 zK3DOq;(SxiwRAw@t^~h%nZPpzsQio0nS^Z22@Y%V)XqeQCvp0#4&8KR*1-X^Z*5>H z=Y&6nJMG}<9(G!9=<+{kunUrHHZnO4&iW8Zejv>EC$T|)M63MO7`H#bq&>GTW5|Hagw}h@?SvZkXj&Z~=)0r#w2wU$S zy73{hsUNbaJzD4@>m1J>(%_3)R5(B9Y@FY|0&aSZEXdEvJ|7*jYGBzaU@`$17s3)) zz-hA}9W^_n4S#~C9-@;kAPsASw=+E9L$qxieJFO&FTA?_OK`W2v;5ELGFQP)6*Zj- zX49K)8?sY+8!m%ZOF(svprr(OG6Knd0Qy=+RkdWwVvu0%z|$tEBN|CKjh+i-$%bA+ zawmpl2Jj|&JIxLCu^j}%E5Jcpw7>V3t-lEr9V)@F<#4d2c zN66o$V7Mgb3-3cUo#5Px;OI|kIOMct8tWej?zdtaEW~n~1GH+>$>kn>w`w9?rP}%k z=gr^J;bJQGRGRe@ny3!aZvb?35-RSGEp?dFm+d&qpO5Ya-R;Yu%Z8kA^w3}62)S&T z{ulx`DywV*+?GQjauTf~`ga`9xJ+lwo3@?yyL1IDVoyS&Zo|@S#)<8o^i60_AEANR zB-4>7=XvrpPEKBgb5?W0xwd^iy|`bpuR7dsIQIQzPBqVgdn0URLIvp4UKmTppr`F| zba4>ryb`&QY$eg%=N?@`MxhVlLVwalSvteEW&(F5x_I2SzM>yR3wqiaNM@egytXthO@F0$_vLqHNx44&Se)Jebof#K<8R#F4uGC zF14ehgVKpJuJ?fXDLBY$AH@y|VY!Szdi)4q9pTi%b6aux1Yf1!U?ckyp4t+wXooE7 z4F0F0sVanIhQeR;63mNaIgX4<#M%q6le2VSh=n>{FcoD4`Mv>koVY`mhMM%Bt4v?= z$Mk1v2HrlVDn-toPNyztg$60M{hwZx@dz`C1d;NPOPWs<@*LsS1x_Ec^Cq{i3{eATO$nCyl zS3Spz&=LCm%`rdY=@qR`<~bwX$O(-WVCG5zYhh@x-RYRFUUmNNn#t_)a_TGSaUhd1 z+R+=T67xWI+wzCnn)#T%-C4`8_0jD5L4C9FgZYN8R|m|&W?lMQyQ~jF7j4ay=FDn{ zaa87n^g8G<2FY=p^C!E|PTkSc{h2;di+(nPtuu75%8Tag2i47nr$+$8lThbqPCs`+ zuI`{O@*w+V@^saqfZJHCBk+_i(QzajXPMukr^-OPKVsKE!_FCn1oqg5hECB5a5A_n zPbar;=`sE_Gd7ZqN%Uy`of#XcMoId0U8fkXAbpn_(kG@5eVKC5nYcXtJN8<6u-@t- zsY@fZp3%K22AMn$Ykp{0Dt`wR4Trw=a?L>c@7y!<(YrmT=`fEQZOtNdT7PRcf*Zy% z8%8tJm;|#Job|T0nw^MOScm}U-1?pRB%`_c(wu4)!G14n|4~`uD5H*1C#$>E$Err*@glXFI-JQ6 z&$!bv^vMf6l!Br4^s73{#GJD9J95LPUm;yyhdxF--Gg4bq4zh#zXmXGLFX)uuIVk1 zhZBI}$3|VFrcoz652Fa($eO`lQFO!@f(}@V<+c>;mXlE0987o~Mq)iN#2x@`yL66o)Pd46|!;nx>^gK_s_SvdBt~jfCo_ZVl zdU>;Xesa}uR#$J+U5k0<_6N3Ap?cOE;}2ccehqZXnwr@rQ_K7+@TE4(7-Ag=4Zylz zL3iZe?LC+ekW0<({KvV+Iof$$UFfK(T(AuYWwR2^*2WwCxV}lBpug4HXj_8Cg9iii z0;d9_gQK-%Jv;cwj@*~J{vp&BAHveiMJIvF_OIxDW+@94Mft$K8e4TL{ahNrryiy< z%~W-!9s63br!Tr!ut#8*uEbLkY<&nlSz9l zm}Z<>T!Y2;HQ(-yt?>*?PB~+kK1$1_ z9SS}Ob`5q9E)Jg1zBAqSIj&g|onqI-nb}@Or$)T>ed>Q1**!KrZdLZkxO>ri{j1%D z)w=d^);Yab@aL?Znf0=E2JUIEjK*{T3eh(*(Wq=pFw;X%l%vjT-p-)}g&f!HcZ6kTFdADzGqfc*b`bJu*XCB{f~2Z2o5Tz-EcDzp!sq20KnW zBGm%C+NiVD?do{-l4FkY+*TwfF~eSC9!)&as$$qvcdrq1kFWZeZ z^wM`RYhx(gD7%qd1OEhC zYPXDlt&n>{l;JI49ae#guJgUd3T zXEhG4(XX37g(jk9CWJn*=9&A)}{m@;*{jKwmay9gw+0d4lMN3)9I!=(RgS4V_vbk*c7FgBc1sp`5i5kOZG?h0m@S)pM%bw zN)vluJc?SOZ9ue?={25dnb2Uv;KtymTD1NrQe`N1M+!3!rkHol-1NBp7~8TwHaVWG zy_WK+(q6es=dA^Jk3HzCHVDh>XCr)IA1YUCY$=eu@?#QhN(%l z?aV>tUZ0q^jAcOjmQm2$Y}R5%Ty89)<@jWYL>G43iqh%X&3uS^Oo-?R)tn@Xl4Vu5 zrkTaeE6nw%Y24L+V~X2Mtv-9tuC>(uLa&X}D;XEiu^pLnkqADoGCd^#C7#5JozC2m z`}!c=u5Z$6Xf|zbFnjPq;6|WT@ItVrHeIWxeG&XIkT*~*&`M)Ao!ZA&GrE3k-NWIj`hDYL>sw-{ zxtO|p3k$rYd7d7yHRuNW*vv*>x%|Y??Dj&+6~)W!j7ViZowcSA2l@~Cj%5}@7BS=3 z^aM*b^Oz%zv-+2MydI_3)8{gurxOs!%~YV$$i7s|%eqo5Kepou=2C8^w^Ua;7w({A z%W8Y79ez_(M;S+Hhu=}i@rmQUqmw!v-p{M{aExR&TMYi{EWEEAwi94Xdbw@D%aL4% zR4WCDO4Y|p%4^?j&&~v+()4GYPam6&$kx{QhS`W+ZzXP%N*Ch|*tnaFHcUS`uCLWM z>z8$xQHZHRY3RAG`UiR|eI&16^?&rEdQsyOW2&*%IAdIcZe|%-ukj!+}s^ z6{czYYfds3n}5^Yx{7%L%~;eZ$ZVfq;kbNeWxU6DGstwg<=9DOc~!)M$!}ycY({o0 zs3FXc`i40`<&B_zS5ISR$qoIG-e1qqMrlc!Pm9!QYjd?2{iN%3oizxF=!Zu716H;EV?;f?s*zrgpv`;oV}w-G=4JO`N^n8nn# zj~rzkwH>7#E0{8Km-u2^G=gE3C-N(O_v6X9l%resZD!W|Y@MRR^;0m}Pc5RJr`u^JUAYe``>=~TDW#MbOeyI}$H5oGN`~SK<|Lwi zljz@%blJ3P?t{NpA`u+3)Ys0^?);v`o)?~?-m%`Cz8Ag`{)PT({$aj_-k@iV=LgRW&k|1` z&t-RYcT-nOXD9WhV+|T4nh807+LMWjOE1ZPu(Z43#dZRkCz!DL0`56MtYS9aU}GkQ zot8>h0HP57F9iNU@jhch27nJaV*@2@33V@=uP!Y+E-eXb~Lyr z`%>R#SY~dhF$PKALn-Wd;8?G=a4If``@DOs=c4C~XQStiXS#QjZ;}6;zmos0ccEvV zyM%kF`+$3iJKo*S^|A9S)vMN1`>HL~4EjF*rKCceW3kEPjMqjyt_-^H7Ny5?EdMqI zCziRzoQa^6Gtj1l%P`2(TUE(haD*n-SMAeqN4_S@+NZvMqsJr!OEIV zH&C1X9?__+=*iE>#=f|`60^l(R(E~T@cu&{& zyU^KuqHs%~+Xm(XV+4}(EL!qceTM$C-d!)CU(hCKZM8;P3#|$A@?>ya@WWu0U=GIe z<_<0oF4gYn`OT7+4=r`bp4$ zK?nRnFY*KCKg{QQi2vgu24S#|`{=P!Xs_yUTmdGgUSnRt0pwi__>*a(Rf)dNBzBS; zk1K<`PDA+RW1{RofVI4#`$*y|*eiGFSl<~cxCx#AjwEWqOoeXD{%Z`q#}fs~FdW#0 z&6r!;4U2s%k&l_y8><`f*gE7A?$9B(8#6Tm_Sjw;M-{7|i~)>M+(#rd<%`P1s0=-N_`Da4krp!$qZnx#2M%GefmZwUJTYNGH3FXwobdMDcEQcdbA$U z4r<@w3tnYP>F2?^!36yCTY=+&xZsOmXT6%)oKm=NiEFG=S34KG{&4s6qp(6I@&13r zHuhq7mLz_5g6gYg=&ldQD`vCp$AfM~Y_c@*--Te$Z5>BK_C@csGrQBxei+jt>oNs1 zlb-(-&_JV@99h?lLhICnI~$;@@?v4_qh_rck>N&k*B*v1aECc~WgUYZ2OSA&TeZ8| zLmi|JR)0``RtKs*!Du@$8li5(Yn-4wq#v)-zK7UcA*gF5lAtO2xDa&J4V(1}nY$Cj zYm%U{HP&?Os*z~M;$ZL{ytgK-oj3FEQsOnuv2Hq}`O9IMZiM%WVr8z<7HRvnTTFsX z)pF?-cvV8K{-A%4#VQ_Ry0$?ZsWsQiXce>~V77qP4!^3ZG2Sd3+GC4B4^~neI6Jx) zxQ}~|co+LR`!D%pBXUF(iFogyjV>(ZZ{Tn4PxMcOC%^G5a!0u*x!kTE&X3i?j#tF3 zrx3?_O6K}Lv5Zkn9mv7-Y0IpQRy&D(UmQ9;O*~*QcH=mDP;bKeSdFEagRI2>WTjLH zv?GSn0@-u|I1hok2az>PBLyi_?o&hpN zqfFE-r*=|@s^6<4)Nj=$Ob5ttc(89;W2a69yM@&gOwN3xOjLAwou6l|6Y%8M&{sZ* ziNS5r51)}+_!6IH9~5d3?aqf}dcyBd>5e>{_+AJbdpeU6x?$ziz$VRaJ~37p{qQm_ z=;P6bHhr%)NgJpAL)4-bR!yd!!$@Lg;xOiM{9(-HXNB>XG02ECe$(^mSMadcV4qgd z-Ug2aUj|ESd$nbHsxim<%vK2QG}QX85$@9-hcD89-k%gPHezGMy9j^e%ZM)`&iE($ zfA){`$NBU7)_SVC4QB`EMX2_WQ@+;z)_sIIy0#=FEZ{dkLoz04- z+k9o%nZmOe9h!lk;XoheM=pn;bD7au7TzlZ2P*JkJLbn`5ZU|-XzF;et;ldoAJ*H{ z7!C^eX}UoMyA}54dwUgSD)X}YFasn7$vRw}t1eZStNYb=>POD0&R?BFoOPYC&YR4V zX+cc)Uq@ZXF{T!Lk8RT(&XkG~jjrmmq1HY`NmoECQp@loQB*mLzk=$9M)*_C&^4^r z&e#H5(TSDKbYh|%@Hw*Z8#ZBu{;ki{r-IdA^;!J>pMFulqZ@j(QJ6@{G%Q<%Yq}$M zIzkB%NFFy+-**}_@ft7bFc*dXu{U-sRG&GEx&QRs z^;Y(`k9ZPMEV4pmeB`l+QV~lyj6Kqq*SE#n*}KTI$GzIM#2Mv0iG^<9J!ey*h<@j! zS|wX(0Q&DM^i*!5$p0CC8;i&Xd;!0VfHU`F2?(c+#{>8Y+p#S^NlRc}3r_YCi7Y^j z^7C-U=O*y^DJ-|IATgchNoHX#WP)gW;{*K5zlnK7qY(=-gMO~5o1O614$~9-KfH^x z{67QVtqxX3SuCrnOjurv9{YnTkJ{wePm%fPX}@BRQfex#m1asMB|*tgq^O=UKv_dX zAeW;NHe5$0%(i19(l{bL=N)sHj(v|Qv%fLH{2kGQ{>&q|PekDxJbmf$Rv?^N{hNH3 zg${3rE$7Ety@@sVoYmQ^YhinD9+RK0g*DDuvX0}?@ZHc8J<;dYnQ{3FXzd_-av!d^ z!z}7#LUI`c_?VIpiN zBcHKNZ$}KMmG(S1GdNzWVBEBdDs!EQo-N)%zD<62)asZPaZ|E)$v!dL>gXoB49 zQ{`vn2C`0b3`Iv3R`WR?+ILWQ^2oYk{%YjVX9Oz+s%D+abY*SG+7s|-W%VQ@J67BL z&^uc}c&diO?HH^)klCU3@#L7_;fcnX1BpE6G#Z2LL@X~SQ!EQH<*Eo7zYmdlZ^^VZ zG@D>4#em=LQYA;v;EIj{YNS(lo^#cA|KuL;PIS+59fv0!YH3GfrJa2ocW+8B*+lAy z){$qfj{kT9kL@6nYW`GCDSs&{v+$llCC7;I*CWoe0{Sc!axiNtf{7q)=@WhjDm#w$ zc!agk5PRVds?Fpy!#Bhd_ENFXnkb%39-fE%UWfPeo1roPs}G)I4AQlc{+WJ8uWf8K zDq%}z$G*6TAJvX5Z=MiwWW2RENTR!N%1Ly}2&lI&^B_j!168!nV>9F@uegB-!!ErE z`gs!^+*vEHtqpz=tQAZN?8GWg(sCJBtVl<1_i+E9s1-5(Y&+um=Sa)hH@A_ieZru) z`%yLg<2*Z@wUty;4K7aK`TqBJ%ifiGAAJ8bbwGOdtOwc>>$QED`mgJjC(V}}u|Bd& zg`%7@Uk{y<@Orw#$GPoiTA2^y-C#!pA`;61+*V5Z(l*%lc zH7d|a+hz0&9ku`K(AA=@j_!$`&fY=Z@t&Ek;c91Py6uV8$XuXb4~`4$%lbZRZ`Q|w zJi&rYt2d3I*6h$PWYKFXtFVP;;(>hVc%iIU_A=|apraSQ$##cR9fbs5tX5UmJKi!8 zet^9*@yMRm7-j;uhX)E7RhWl7+MHlr3u(4%_6}rQb1~0+26NlSDPP+Q*{)#^<~1XU zq)ukqe8AjBbixdE;G5@_-j1_8qm|$>aGShSmndoQC>sn0osxh8e3nsH&$HIIT{?V~EzaU#O+K4jF>-Y3$!~$>ViAd)f z#P^wfMr1fAp4f4GX_>c^!`_oFYe}K+&;}!rc594U#9(jekC6GtiA%iIpTncw^ji9L ztt_1WPtY4Y5ZD|T5p1V74YgMrdXpmtNB54c8aFS;&$+wgdzWu}p0f#Nwi8ij{Zl-x z)lJs!K=rh*-Y!m=lJZ|l(wlm32aqO*?rMj3ABtVHM!tirbBEtAoC6`3bC!d%1z zrJ77qIRlq2!aDm2ep(Uw%~qBvz>Aq?TUq&6sg6Wn<7nepPCdwTDkDC}+K;DNaWy_p zrdb0^Zg`0KA@;(=LU$^Alwxkd}ocOmA*?g0+jt-tXv z63l5tuMGVQDjLoN6ScEiHuDGjQs)ZqPXAw#YV?tqxa_YJp5?lct9!zXxH-{%BU<>T zyYo9Xo5QjiruyCI6n-_024r@k{!N^puP|S@D4#fz^Q{fp3Fjw2OKLbAy#+ zTVaoPj8%_1-@7V#T6h&-PhTG2J5OErVrO1;k@Cp4&T<(IgISrIGMbaAOUoFWnK!F+ z;BK&xvCe8?U*wqT^t$6b%RJjXO+4f96zUMO&Z{)E4X|8hb1b9>`c`bCsroyz3>qGY zjo7Y(T-;fERi!_#mP&CYPBOgA1l)oiUx&YZg2`Dq?UV55%2F{m2RXkOUtTwhV{@-% zLeF4o1J~Mqvrix*a^LZ-x=fv-Hdl`_EASPyJx4=7TFuS6Mj?HS=F!rE4}-owWd5ZR_!b1S*0de}3c7*mYhWQYbqO*@&oJs9cV zmX$~+mo0b`-KbI7O1-Si%9%jbZ+EH}kCSsRf|n;V`|?qfW@1$hA{Re}`nwL)yS-+1 z$3UVmX;5^3o|Ml>(i64qflgUpWqy>IGfNNlwW>OPaQE=tk60TuKlXBbz1%1BrsZv% zJ0i!Z*g#}e|0B1qBpZ7&p1&>p`s~ZSFM}`3y!z(VuGhEUZcQJeoeF)aCV49QFGc2w z=@2_O_WPLEQ6(cQ`rmqvIo(PlYmj~;SS=VEoEzwnm6lOFV^&7qtSfcJ4*aNVSeq*0#XhrWFfR%50O- zE#qCr&zXVDV6~0^8P`@z#=9vi`>8!WJ7h`t!E<}G=`PdAqVeAQ)E$H zw%%$+#{RPHbNg+(OL=cU3C-?d@=kB6b`otpLbI8#=D-_mK!kWCe#~TSh+n8ny=5C= zKg=}QV>AJcQ*tuxzN|8l9W@D!C(6Iwa2kiTKEb?!PqJ=i*3LYbk&^K;^GvXlr&U&@{^{AACDbG{( zyczKRLB`kmD%*3lndg`B!3I4}+(uF7kiWE;Lpp*C zbkWcxbd=p(iPv41%7x#t@rzOtLoG)4&hA8 za_rQhd`HgC3?z!RnricL)c?$+?(iA@b{*>n^9B5}PWvI)DDXP-LPoueMHy=|Uj|E9 zr5#J%b^Os$gJPP+l})&p`%T`}dB^A4pZ!|&2LG>~GETaWXkVwDO!?|%p_iRs`BH|w zY4P^ZyX9$L2Bus6)Uw{@5k_Q8v_IxSj6b$}%%Z5*{`Vfc%k5}tyJ_Ck2WrcLNx@rz zHi2KV6so4WWi<}e4=%%=bp^8m3xfeI(|B%m#YXS0oO0}T9(6DAp7Djy9reAB-NT69 zo>m6f)|r{w)WE9D+8MpmtEcZwe~~dPD_>C4@|r({x+@uKUH4WrPRLWv)5iU+YnU@Z zt*@v&Kheme@1i~;gw5$THX47JTP;pU*b|jBrM08J!|phs^aA@AnQ~PepQbz~bE{CV z6Kk(c_QGR7M(%J7yk@6X=N|Pn=aIKh%?fCalf-Gh=Bz^ze1i7)7b$k9a-4}{x$W!8 zyS$|a?f|yNEF!0$!s#>2L*`y;ulAT%!2MYw%CF7y=1FL)BeiuOGh_BoGH5~kgZsdW;}(Mhlv$(7)tDJbu*_Ld-U4+WNlOM`#_hhA(;g-Kg;|fYp+&2 z^oiQU+aNMB=03f*%JPYqRw2#u9TfHr9%gjl}*?lzcAA# zoqGhxp_U|`w+`*{9X{VSDk3|Dr?q^6maj;a?YC3|WiuyZ{l}3X@f*4E@b?;BsD@0p z)>2I`9f9gnPyH>nK}9NDUsKKXipP1H*FJh_mUxa1QpDNH^XpiUieN=s4=j_LG zVrgIFd#a&J);;E^e!&{knMK=$97O}FWjm7BIZu3hje$n5Pv!0H}my(s7 zl@!Pk{911mI^?M2x$nOgwJIhvc6Z#``1}c75<0|}i>)5f*WJu)qOv zCMiqaWP86O?NP>@thC?^)2saH8tgkAu{iRh$RiOYA_n{W`bK$TT+FS6rp=F^l_}`u;&< z^l>@V9rlkyMa@LLUGQAirm0spGIrdwO(43UeK6i8dgQ3feV0N8LY|m$}Z}?Y)V)1(Ce5|lty*X zGBCS~OyUXZ@@A4nn#V~ICDavc%re^(>G_OEZCNz*i z6(_ZPp`F$+GD)$-JLXgEdzg#XxQHE~J zG!w8h_ES5bZp%p}e<$p}iJYF?Xf?8y67PIz6ee@kin)#>sMeotD?q*Bbo-A?IIIL6 zp5Z)Ye(aZt)Uy9b=HNysi4y~VkB#JikJFl@c2C`ywk+d#ma5e^R$1TJr>i5~bG@T|pZgAYo!&N{G479Do7G!NcKg40 zL-);^R0Gc^ZoQS7@fv1|QNk#y_oYIrqc%tTMVmrgq?7)yvD!Lh+o1GNw>sas-lGB1 zJPSNOx;Huhc1*Aj41I1c(er6pfhqwt&@WIaI7Ca+51Q9Qw~;K=0Xh2+(Jk!UL;kv- zGSha?`U*d;iBX#z$snTT?~M88D(g6PP&MqsnI$GH128V&Iolw4Wer ze-~`@#Lmx6Y+(wqVuKSdlfleC7GSL1XzFUA(0UuI>_A_ypHP}gC zTFcDU#uhkaowi&%prvaqsNooHj4^i-Rmf)dD=`j-dYn@es&lD&-SJAf$&T~bT82Ki zW<%FM8EuImohEXfk2w8K&ddaD&6qkn-+tVljMq@cJ|AxR9p0X5)h9055E*@#Z@wqe zoP@Ofo%0gXO)VxI9Zn#|PzTvOm%DVbFSi$=j^#R0)ay!$a#Hz3xd#PhvWrQ08WXYR ze+Pc;;ni2B7hkkL`>D!_=T7*6TR6F~1}v1u5~vFca9~rTQWUv!>dO9)c zheTETVCUAP=Rs9Y{~W{vdBzOF`}}Ki4(2Tp(gkF5a&vO|JbRTh;#bMVOu@<=NR@H( z@QID4)CYCLHXTBQ|1r9)qD4%{Iz-HN685cSp0lom@=;Hjp#JW3yAwT`*l0DpHt!5_ z^NH@3uEx$xD(i>XyV=GPS$}1HNS>q+QCT_3QJr3tekPBuwEqchS7bKqOrrZMSXU(V zn#IDCEkm4%SV-MwUQQ&&ggOw7O*Q%&ZYm^$dTF%TM&dTx&F)rCc2E}?XMp2hsHc{S z>bIN#?ZCODjy&f%pWLNFWfk9%NpKItXD!A6{jEsL2Gk|Gu)$Q~Lg$!jy$(C@Co5`}fodA)Vk_!IA5T4p!g<-e&4=N2D2BruR=u5JCO^UNGxeHk;l?x#+Q&yEI|(RH_q$bC$AQ(BgC3Mz#->rT9(ab(=m$;dUQDkXyRNLzUJ2BK@R@bi!Ocy~F6TLFG<4@VEi z-|ElnLt^!#IjRP+f6rQEj#Mfqm!psvGJB~ec$6NbOQ5;Gp^Trg)|x~6Z8<4DlXt&| zHoBo@s`2jEXo4C1TOYX~J^f_Tb}hK%9(7#9=w zco%VMAO7rq>apjejlSh%)>Qtwh}5pl{P3TtDnHICo0oVCD(BIYkTe0Lot)&jNB-gr z)R9IOqaiyvg?>ta0_$RpE&(n&GlU=UTR*Vgf|Zr!vq(;3dWnOyXVt6N+do9CKA~^P z3GCggRzGw}2rp*J{jtl;>?ame9=26{mS zKdB>mEmd!+&((mMNrm=8^>_61(unFQ_1LxxbC@Ue7~R1 z`|}>J_xQX&9yj5_S$1C@qWh{OYT?7n=)~cPk7%;SiGS=|d`?YF3-PBMeI7=-&l)TG zgqg(dtH#;f))9O8u43(Hy5r08S4q+EYuJ(lyofOcAFAPeMbz>+Re@7gkDZGGKjGwv zJ#UcJ{?(OM(|+I5ehdJ4mgtilrCRh*vQ(;lYE-IOdRn?{ZoAy(xm9v=)aoBim(DFKdp$ATBz?P@ zw!4$n=!_4b;TkI1hm)%FXwjGXcLO}jf#BaJ!)M#ER=uzY^z@r*sPkd!HZnXNWFkA$ zALq0GAF7C~z~aRzw$EqxJU)jXqC3xGR9#B@MgNxQEOxV*4k5+w;EyTpcpG_-?uT(| z?Qdl0B=;DJl8bOw8M0Q?eH-GC+f^rg<%)57&m=p!A7^R1u(;7(`fhi<4=v)v^=>%t zbGEunqFv%<*l(ul&B(*vxk7pHE7xB$c;||1_sD;xyOj7ZT?K#95s?X`&O< z@x>{8(e-B?xqI2&>RXMaMmErjJa3G@$bGGmX&FftHnGs#$w5X`;VtyIi?lrFag97# zJ5h_CUU^2&@O`n7eB5@WM?bNsp|oV2EFEW`6=fe<(`?sg_rrFj>t3J*ql0Li>$cYD zCcswx+0^sUWEw91NY&3gJ5K*#o!-TB_p@ZP*|H7z@fY`c)mXk&HS(+s?cSVKiM|lm zH_4Zi zV~LSa&+VdxxiI|_nS);-jR&kiD_j|8KzAWo<8&HsFs1sPC%RPC^8*qbfQ6@l3iZ`Zybd;cJWw0U}IX-qm5~jo9Ne)x&uXT%Ad`A zh=^Z?V_PJ-}?vi=lmgwa}&*Qq3}p+2N4I=pE&%f%$%5vzGQO?Mgj{*+|PSEB6M zg|ndb#%d8?fXNE=D(z9;D2o*SqLQy_W{GI)H3dCXP9HAVkU1yk+r%stDhE{o^tUtf_tc8?=G^%z z8ru}DRrGrG5a;ARkZx%2?RoYNHc5Z0PWr6mlZmC!%~JEMN#CTiIuAIW0JaMj#e8A%BbiP-q`le`SK5bGQ z-soUgO&9v4O5!{IR-wB0H~}_#Ydv9|`dRy1*{GLfAEL+kPvqm9EP_62CtRE;Q4A0L z*XJ{Gp3zBZh^$8C!X>QCEhOg*nj)E5Z~wzIcxnS zstMUyLsPWyh<)tM)ppg%G__ZDnCiO6RLh>Hit=#&cLm23_DM8L-IRVUeQ9b+V!RyV z$*IY?9gEH=R#!=4V=vj+njS7>VKj3?rHTw zpB5a0yB>rAI#|CGR6F(H+3)7Z3}?}=g1x3giKmNTchb3I1Dx<6FSjk(Ib8+vIV$*v z^Y`Ai%XbFPc%4d$@%VKi`+h6@dyKlx)3W?o7mD2CeA8<5(Q_njKMS##A6Sw-sg$@W z(Ld1^AKsOi2Uq1JGW61oqR+ipk;nJ}5t;lPp4rGVTqIA@fd-sLJM={Jv+0QOP;O)6 zzmr~jp0&BacVDBCrm3SpmsBqz9YZ{x@rXV_Wl6`cto#(#<8!~?!xQ(@Cm-+`M$tt3 z@J4tp``ztpGFQd7;^e(%Jdw$yt0sMLrJPv3tj9HiB*n>`34Bq475tRfxKlLXJU-nF z{1M~qL9ZSMH|G^@B{y$FyVWw=c}({~$-k)SI0@dVl$j;UR<7XN{QFg3UL)i8P(d|g zJWyCAIV}BR-mtuNYM^H1e3w|B9#{0y5|@^2Q=(I`SBo^tOXSW@eJw`xd@7lHLf)9% zFH+SL>B75sS=XW3Sk>$o(wV>MCbE`Rdy<}fh0YiSgS0KYC1*}zP4c+Z@yT~{F4xuO z%S=_Wb``66gStf#ADr{P_~{n~5Arfb(#Gf0zT4rGl7+kJl~3T6o_3dv%&D2U-p;Ih zcuP8;pIbP< z&XV$~xeHX)eVa2!AD3$4jL#&V#nE@t9Cab@v>i_m@^R+VQsr<;oLh7i4D>ynRi|(j zZr@?NFS9diRp|Cc|9*Hnx^gXpsv^g?getpf|TQ%>j^ zHs)JgaSP1;F6-Te?f%Bt=6nBA9JzySugIS}wy-n`52ka*Lo)q6ZpvzxGOYVJ8g~`$ zT+Gf*7b(pZnQ6-M^b(^fVn0l8S%XK#0$XPK=d@10P^3%IPI-?fUzMTxEmghPypk=xpwRWp@X z_~!}_*yVR1|55uNirIOzSzNX-@osWi;(Q%2#$_JYU9M>24S0Jf&*ct?>kB9}S=b4O z&k>U>R4;u%OthtV@ar<3`Qp5#GbQ<@Kj+j)l+zFCId&(PMz6uAkBCN`RbPtl8fU`K zf^beR9HCE44fM^=#MwA!7cP*~yqKQb2+5vD8%$yQ+OZ@Zv%d1>e8msR|1bEclAJ}M z{OoSl=2>>*46ASw3sg@{$NT(-BVuh8q4ERtN;8%|mrebRt?b4c#R;(2^R8M!Dg|P? zQ^fKn;^>j#|!}9_X*>bYTTEd_>)8L;C`*;&q;mw=)F^am?dAuPNVq zGGuZe>06<9(P`q05!q=Edq(%v-Z0j4@cvfy0dc;0oQ%{NdP|6~&$1R%MRPw;-SZxQ z^A4zb1$)`d_t895}?)PBEXijH8~2L=kl#-R$O=qN2TsYWAnp z8J5kgrGrWqY|UR>@MuoS^jCQ^@;0Tm6h2?DCG%BkY0=9|+*AC#BHyO(PB+gxM=e#k zBHy!Eqf!UbGjglvKAYN`xG%9bacc7H#Pm$t{L)9a9c`C+R4u{$!msos>70K{!3VTy zi%iRcGU`+M>6UdrZGH|s5xwWW7c;!U*zd`?TGqLD;w4=eN{eZ96#=M#o|n>^SBeDm zwN{;Umgz~3pSCg){i<&z!h)<2+a6eWl=dD&9*fIh-3aAeSJ=9+JYQ!I&37_9RI%_6 zp6IDidlwpdl<3AckbPs`(NfXvm!Z?@?%zAn#`UfhEji%T-yqp56X!!IyGZYFeq43Y z!5_%&D)#0c-+IjdSK{k3aDrZLJgybSb&vIVNmQ?ai0WTv@~5BAt?HL>%nbZIP=v5C zPWf-=LT}jiBGp1y8}-xVy_Xq3u9s6EeDOrKPG=}9^$`R(5FQ-n*;H}C+PHp;oZGqX zd<|sQ8WnHLSqpK!2h~0*?pzo0K7>2(g8g^%R1S-n+`~Tp%rASK{XR_79ab;4owg}j zI0{nUOqx=KFOsxf1r=zxhe%mxwxg$UJO+)P&gW`wFXz|n;y&`y4ehQWy=P?4JQ$jD zcjBVtA{F14t8%(M*($X?)h^u;);--$`5L*Ca)09){g_UtC+od%OuAjVnZ3Fvq#lwJ zot^kfhI_oc*va_(w4C=zMomaC_Bk9^ctvKBdWTcoceNdI=jz(ET7CEFyuz&orTl*? znJVS^YM7{@C`(s$ls#G5r6SZ1*bVtq)(0LymM=p2JFVZbG=5bb=^n-{c|72U@YGTj z4STKE9u(Qai;nXThl^@Y<2BTvS#GnUH)k{5=ZZR=Xx2SMWEQYdkK(`*Jc{T(9p_cW zsSrt;CC;rYV4WJ$N^uTuS>Jg!hXq$fel{IajQ(gRGH?&9PzWj9$Sdz)EbnAF;hIRM^^9tz`e{lA$eqCNMrS{Kj&XQ18jFkcN>hD@eJ z_OG&}p(z|PgqDeH!Dz_3GMw}^-|Z=p);Lx7Noc((&#ftKQ#x^mjN;>Rd-)0-G}b9;DPA=*|n_qX(~>A9k@i}fjLoc>n@#oeh>Q#rcqT%x+_ zMfl=)+;NE<(sOJ=vhWi=acS7M5}&Uxd$7G=q8j;4tV=~c^bGZpZ`gyrO>B6)=g)Wy zm1X>{V6i>W-C&nhy!_AD^YQR*e_SzNoUA}*C(eJLLCd~|A`^4|p?&v~tYs=~C$j<1 z$ywCq8RVeDDXjS~qWj~`^IZPZbu`CPF@>~P$U_iDpwT<5@RNMa$iKaqbC89tlvs}L zZ_y}E`qX{wNf#)$t~E&07;#RB?soJ_6Y-R)aszF>e-)Hc7NT56CT8<^;CFU#7nyh( zub&NT7i4!Y?KkhI&GS39x`wI%J*4bSTMBP=Bg*Bv;&6i$z>{!4V?Jl?FUWjXsCxan3vRr z_49KtE!>%w`%Ww;;wJs@aSs--wy4I3Wb+mGOVVItS>!eL5hqhuq+UwBm-=67v(5|y z(i78Lb*!tz&eYCL<}OXoOn;evPsF}Mx^(&zJ67AJN~gBdH|He3<6EwWu5KVt)#WEA z(WRZBs>!_OF1WN29Z`;@8>4dSZuL>^3tHPL+ef8bFMmI&vTl|Ns!B$7sVL7xe(edO z5Ld9uLqr1OjQLqOqm&hh^MKCc`$S*%v!MNJ$j~h)(3{6{8_RJn`H%BW;?&nG#TtrQ zv%~aFee!jbCHPl$W=)Z&*pXJz_g=zLAL6NJ@Je)ie+Q2|k8^$!wJNO_;X3b+5;vKq zPv|>1W<1_}-}`-eU$yZ@Yu-_FS*z_(YYw#j18v?4rdY*~Sz(p`f`d2n*|*{MmuZiL z{RPpHp)|>@o;gewepD$oyFf*ZJXR}v5c)x~??SvU!?kP0?5oitIfZ*g6S}ZrU%*$bNOm5y z){c(c<9^X2?mO9|Zwh`_qd!+BeIzZ`Mg9CW@>-MR`9}ENN8WKLL|Y!N8;PPhsx&VU zL7&HBtb}BKMvracZU<=nj1_N=8@u9zW~4Z}w8zQ8rQLlC?GWeY#iF@tuxH8TT zj|j;|-p+^C?A>e>CC(BLPkHjXaNDyOEB2 zkNr5HBB?Cjehb|?1qzvoN2Z8DO~BWWlAXJ7@;}z*Whg4Tsb9x_l@oPJit5B3z3Ysu zFa6OUfBc;}lVtv)`(!g3_DAUEnZ)kI`N=m$>yIScrQS*HNHtP-e`UIR`l56XzpwLG zb-NGO+SC4)-FoMyTBpuWbxhSvElA#D7uN(la%$oz+3H44zf}JgeM+{77u~_aRkt&3 z9G-ZB)vGBnESn~!UL?!a9C#)3T$CDOW~JVevZNQ?dXU& zQ+7Chc@1~>!qeSJ$|&F?czyy zhESHsAa<7t|DIl|u1mwXL>X(nga-L1SvGZ({qn8TpRqAzbC0SQI35PMCHJA+2lVKw zBB!(3PO&S}RnvR%(i&OgyG3ee>Vt5Fm|RhNuczB{RW&gMqPw1KFEXlU&8@F-RV-Xj zqYV+|x!jJ@O0v_%?a0c<84>5X9c~)~b9@PX%;#xVh0j_;Z5Q+NuU8H80L}Q5HTj0@ z4)NCmbnRkui8CCcTW^8X&nQ3(7`^!(E&XW^eX;r29|C!F6b13wQTpXC7g%xmihm2Z9wGl1ILJvrF2+b5{ zZY(kWCM4ie2x1sS@i@yiC1<2kv$qK1%l2q=fLTk4Ty0V{Gny?O$SQZ>qn$#(R-kj&;Q5#A(zt^M`nVX& z3+!4`C?)n$=7{P?RaTm2E5;D}9VHR?xk= zf_&e_?teCXw}J&6L`pAV_kYK&^>ic{ONYHgdtE6%Q2{^9ml^-oF2aus?y*m+Ou-5} zb;sEO@IiT1xJrkp3;jPh|MUDsx_i_tXfEn=7rb3q&_ty5 z;>_J_=nLd;qL}|`(W^4%^CuLrfGk!d*B9~?d&0W^Bm2c*yUtk+(wkp15T!=a#ZzgH z6{2>FMctOD#W+rf>W--2N0wj^3?6%+{z=%&WH-wp-RP3Z0^QM8t6&`=_II(G##Vf| zz4Ah{#VbUQNJnRSC%COG{T1huwn4>C5OQ@LD`%0D0?Gv?e&@sG?YV6eti0ei3810Vl z)3gKW-s|bA6QHg*kGnE!S6n1A&M*#)e*=GNm^^epkDKA*Cp?Dw=4cphg*@0%{zM7! zr_&O*q31{T4i&Q_=sNuz9@kIix?~qWk0&40Ip^GfK{?t!Ps8Us3Ror|n z4X~5693%daA?K6mfj7PLfoDshi{&x_i}@KX@!SAW(bq)?`;)sK;-B64xgXOk?~0SY zL~9PooJcb);Gw?g^TXKTnI5xgifuGeoUb*~ryrp|-iJ6wLFRAy)KF2RCtPnh z8}^7-Mr3W;7ryzuh|4lNN9xzPe;3;UR_?*1y-}amAcKE4P|wnAPFyK zz4D>3>=+rs=y(65ccVM@pQif^S&I`C9 z&T)^k`pVcH7Pce@ zHR;>@{Ha_1YqI%|;{LW~ae6kgzQ#P4!V%lV*S2QsI9K^=5o|w=L{EUU#UaH+{W&Cs}`q?2Z)2>F9b5vdEw^A9E(3^KEi9)mdr$?UUQAW$)D7M16YvBUM9{Q+xIBtzS0gE|#%}qn&P|)l*0G zXP&3}Y+$lxae{KpP>-HiZXi)BYkS4Is%fx-tIFSN#%aZ{lkl(u-=k zzo&_PZl$ZABS)v9?J&co#;|#CA)e+A}&o`mMtNdI}=6YH8 z7Up59XUqTnyw{gG# zqidpeWD5K>oIg9-diUqmbk<+5j+|r*-c3^(l4hy}`^sqT%XFl3dgt7mc*w54`6~b0 zr)Q+E&HXF4Ltfv!F?sv*9xQUONYkQ+ii|DtQIQoz$`(DY=$AzjMSAEZ-8Xk*`keI0 z)M0(q&P`5)1WPzoY%UHy-X4Pu_G1k`dY*j-(~k7hhok9{YmVHg$Jtf7Vb#>L?tQ&I zud@reh)(oXSm7GRxixc_*wgJetQPA(-d==Wx+Z?Ezs$;HL9)KCGqsUdRfhSQ;_jbk6m{8$9;DT<%XgdA*g|oID?I9OAoORroDVtg05HXTe-uf2-s#x4Zk~quY+G zI5JP)rMr(DcVz0}{~ca(xQ@ML^N*A|dWW6J_t?MnAVhbaO2P42Nxw^^Z6$j&j7_hR zyh=4dO;NJpsqv}rQ+sqiIZ0>KL%K|LQ-k)Q_lt^|-UuoD!jg328P0+?-i0BX7yivV zy;>#j3J9@-UV=3WO4tjwG=H5w;KvuVw9oinJ9d63$P;(|iu_hYqIuKa_4`S?xTbwA{@icX31HK)_s!&r@0G{bs1 zjp_8>tD*@#$?550Q5|HY`iek34Sl{V8udAUb2XivQMV73VHdoXka`A^UXV64l z$o&=ccMsh6HSabmMwg3DKTiWShZUmYaVD(z8XNI=7FCX9^JcMmah~H&6-H}$!g)|y z^i+5n_Kzyxw;;s(AiOxIJcn#eH0qmp@4;zDaL8Vx*`KLu9({4)8hf;c^AYb){HD+A z<+_O^t-)woC6hW%B>QGn=;PCq(<|&$nwS1I{gy6otwgHQ>196EBb6)raaXdwo&1BL zx~Y7r4DK!tv2+)uTcgtYVHE&fRlT2Umqsi5%bV&H^&)AiBzM@8rdw-;T8cZp%?8%s zgFPaKd_>*X3-uYJU0>v(!0B$96z*;Slye}VkhGa%X1n-GldXiQl@e$9P%>+H60?qLlh{^-z~r+AFw#lck&@o7U$ae z7yaL;e~94-WcL@1Fd2$iAu6$!)c%!SkDM$vDMbTCe5EIz=;rtRP~~-EUgP1+wd}`g z)+J7Wic{Y6#m83oYdKC>n~ke%5`Bq?*;3YL5yUxGobqLU$20Kh!>(AzJ@&faE?Qz1 z-8PWsIs@0XWK~j`A4S-|jvXV7mj4v-qeA5gTD8tF3s0hl^j1Hqgm$Xa24B{5BVTuM9CoU)KuI z>eVtkbHrcH<|{mF|Jr0#;AQ3C-cHU+{;tdVTDvHIPX4I(*?q}}?Ws7^F7=n-mou4mA=!Lcp4l% z6b0Ja7q%W34&iNe!nG&!y&A(CRmGB;@#s=Ib8Mx-zVjFdt#@OC%Ck~=su)VaK-KA) zCh$#bQqZ0hyiC*F3z#!(~JI(YynUY`GZWsnF**naXRR!`d<^W0QqWy!evNTIt+`8tHoW zQEg}UK1e;S_g#_H+~jk~Hp$A#EqsetS(VeF$3^aYFAZ75Ihx~Twu=`oP~Y`54IZ7z zGTGgpkLr5znvCs%g0swL0KL5`Q;+>#jT5^W;}RKzV`QRhiP}6SzBJn2y>a%VPxCVl zRviZ6+%4KyJn@HGjmhTqwp>&#(d~73WH233ob8_|vvHH$=gFCEVcapEoD3 zf8OfcTDiBTr=^OgE>8|I$8+VSt|xFuR7GCTq-<}c zRiy5W#0MfKWpeJ7jVoXH9-gkvuDvef_LRpB@W$nENHZrt)TPhrv2*=oTgJ*;{!vgx zu4SGmXM5hqU_RNMc&41F$AdVeAkm&RdluUHC^>|6dqMZcd=frR-0d8?WgYLRF8loo zxxA4^Xjym^E?pyf{S02bgytxbIjW*0I%`*?yKc3zE5y5s^H_WEUq5CG%F_xFC0cE- z$jLZ=fJn-#iTCXNm_rsm5Sebrh90pW^stKPLYbIvMU6iZAAB*pFXD2Zd`mgp;Mu>$ z>W`|sE`(El%W9K2L;eN!ZWJ6b8bAES!-+G#qwB^QV&*MKYMjcOl%tRDM*e*c8?{=* z-~+nmPI{*dEcF%sd4m3X6(>G~|IW`=)0LwaPqQ2OF7olXxbYx3cP5)z0+MNBhPS}E zV_CY-jOH-fri`p4&3C*k-LFv8SYCW*5%LAN-KlnTZ!6mVfIV=7WKPepYC~0DWD4q& zk5h2OEBw%5&R@79RV%FuDEI8#QMre5JLlcar>dW~J@-fZ=YN%Nc{p_VM9fpQ_&BUkJ3Q-t!;flt;6@ll$m)b|>HFTe+!vBz}+x!aH_d zzM+R_cX6mQTy+GGQ_|tyw!3W*ps&=r?IlRU;SxfUW$@>NlJ&7(KE>c-her24U&z)J3MTw&jd2<@_ zPQ8L!Cy#}#KF2FLi5c=8?a*^CtP$CsC+L)iV98!^NoQy!q7j#Qb&%Ju=Bd0cei0|y zZ*az9Ykt*_a9n#;(_?AI@16M6-pLh@LULd6#3niY={6o{A>aNN9NAS?V!f)9LG*rg zJ$%NhyE~he`b&=QJ{qZ~*lr6EhEi%qYRXep#wFdeTr+}ZTS%uZhpJ2Y#;K6=oibg6 zMRl%)HEw0W9)X1Vup$x7sjY5$PSzrSY^^R;e{mguVu*a#1aurtBgT&Bne^HSS>@q0 z=O_|)BkfpEbmP)YY5dkp_A(`!bs$**D!w##YwoJN9YvNE-Bt9pqCJatD>^`Dh8jgv zMYiYW=3bT_n0h36apEuWg$Zh-`W3vW6V0hdTOaLk^a4G_e>$?{NLBr(habJkxi6iL z-hXuU(OPmV4V(h-e!l*9eBDb){$uvRokDtd$Rp>eP zc81oVTh^-98fOjP(`n%q>(hcae{$*>7IkrInChqtM2JeHK24sQ{6A_WpfH;~-6Vu6+DjUG#?0{eRh6SteC3RV;NZ3-F^I zat)Ik(78!sHOah8=JGd@?qSeV2XUg8JByWz8a;?J?$;Jb+&OS$ygkHtJL{EZso(Q-gf_0@saQ zV$};bqxY*KOv7OR$is~#13Tf(eVHPp`5IDlK2PXs6dPtw$y#T^&dfO>@wyz^t!fsg zriQ26=l+meBCmX2v%IVG2IURNtDEegnQ zwou=?kz(DKiogz6D_2V2lD%|wz04MwuAAgn8x=I<8TaNHe=1UXq~L6p@CO;vI%e>; z?7>Vt_zR9Yfqon>?|BlGFp8dDl-MIbS2cNF@`7ZGWG!=;AXj*ftj`v4(H?B?4-iB< z9FxZ@jy~&4$;Mxq3b?I2T6Uz($B3;A_wy4QT`ZfquOtIHfM*@}{A1O*IrPb&BJs=k z!L!w|U7+$~r%xa9oo8t3bLiEIY;8FdZ-s|CYpKjrm{~{ zU9VE2kxW%bkE`W1uNVF82#u^$pVdbkpt;EPYP;hLcrZV+GMBs0JtXTQ($bqYzaN*^ zQD4!JcU_!ZJi<2JVzuL(?btI_oE>RpRiXm!S~%uh8IE$+cAHGijlS2|3f2;rkDPxa zW4+N8K1A!0DmdOjjqgRN`;x#G?9hKZ)iy$}hcZzOakihH#?gbc4DkC37P+nK)Hkl@ zAe%wFoFCcQzeJF#^UKbcC9Y&X5uL8d=h)8!_<=^bo<<3svO#>PzIf31!n@eyV|cNr zt28`8e~9WNY_m#_r_>wvPVW+Nc|JK>CTxx@NRgbo#RXqry;||*yV|2tCbO}i82_a= z526d()`|6+o+)Z%&5d%Voq3h{5PS7*JCzr)EO{0QJBfUqll;^Usx2y73Q^!1t9O>D z<66~J4O!e)kmY{1wW^3lAk0Iw-`^sTn=@st!V+F_8xQA+@>K7_lZ*LfTV&xgtbV7& zTyk+peB~0_>3e=|89NfckqDzBSX_%6?3@9ShIaFs7n*BhD95dk@FluX1D?>;DKZ<#BrTZnWzGcRVJtI}if+ zpQy=N*ElL-6n#pI@b61|R+}8JLWzpx>0;M9QLR@+eH?108|BW)ZJIYKFP9%vzQ~_> z59ck(?U?(%eYP9LHh*xQ{VYD;m+B)T?` z3fHl7>opxHz7PdFT44VTfBZPI^tT>K)oF}#*}?YqkMB#Jl&+Ots+MSj^Hh4XfR{Tt z{0|(Kmzk=2-yI%3=<{n`?|IRpMMii$-n$i_edC%1nJY-m$!ZXG<=m6V(Qo+oWa-r9 zFvG2>YjE1{x&zITZ|!eaKy*vFiWgG~N>0=0%}{;>DlcPEXUX3GirSe>_3V!6Um^Sc zIN~ekYOPN`#b3NF{iL%e_CU#ZCErebMY8)t#Dn+@-_xNTWe-lw)<=G22jLA?bCj#@ zCJ8APdn2p)o;w`K#4d%UqK-GnQm>V0aE#$PM*z5G%oxd1shq7^1S&ztn=YaKlQC&=iH)$*L#kD%bkc8A2in*#Zo z22j>YS?#OEtg49=?=GmobGi&SuMwgDg3Nu)U+)Yrp32)x!MgW}sSjf7meU9y(l`HR z+Os<&JU(T0Qr4@E%p0r#VIzH$p6#u-QuRbByR_G&2CE^?PcLx>*?YXg;>li+N_0kjl5OiM=2tXl zIZJy1Z~76QUvWL<){EUnlRREn$%(PCtmV5nCjWNsfa`k5w!9 z0G~Zy#qiQ>J<9=nF~jI?gAy9id3(v_1halcoUb#@>eTwo-LTJn@W|~rE|X{shn$`5 zFZ!@-^IK$M9#8CI($tj)Hq;uv%?>oe(OcQPKUux4ca-5|d>$O5{v~Pj7=UdO$ASAb<*x_%AHgKr_G1sGbm~iybFdvawe| zE49VVW~gJDC_;CydaB;mv!m5KKJgd&-5_r8hRDhQRTXn!%9~^-?vy_o4oQBQJ-MuW zwi|IjeDi0frF*a78O5%ccg3Ms>!nqYQ&ctIN6`F0@_)TKZ#U07;P71{8;|iv+VLF+ zS=$;$Js7@-n!acFs1Nw-L!7co-k~&){A&ocnCw$CC}^`)x=*z2Y%!D*#VOl+)ws<^MSPh2BTcCp!=?ES55>Wd-^<3xgw@Dtj*V<$9e<|6JLHxG^?|y51d_ZMN1qGh=aY($>5=Yi{UyAHX1JiF`o~*I z;V?S(Q#$ZV(Sj9ZvzGPx&3vL0)H5F3eqT6*9ecwzy(x;eY1(beou z^#_B|e5Sn!9jx))qTU~?DE|bCAIZahgBDmJI=3IkM?bHUvLqYDW-eDrSU+2tRSB(z z!$ZfYeTvsPPfr^4b)5P&{F-9tnq=#G|B@Sv{8(_v z87$rrvA)-7Eq4qmc$LJ!@ibV z8U<+%CDljC!}<1D4>g9^do+qJc$qdh8BTu-;$AO4ITBW{Te!?SBg7lOqG^{vpI4yD zR&m5FG~`1(gPkl@4$acY3Vv!e_sPb@F1n1|+aXtZjYskp-dtiwMO4kzrpYg`B0JD> z8qHJ4c673G{mpx{XzIl%b_yA+f`6Z}p0ngTnkDKbqLO$CJ@f!d-9f+I!6uAR$96z8 zGARSU1fA|B!BK~|61_{h>Tp;k!-hyR&8kJo+2S0D0%ry?Ow(zM%nMgsHElqvM3zXGy|ic&jIW>N>pl zDsOfn@2G_PS4O+atY8Kn--gz=6rRBLFNLaiWJ=Rc*OH!mp367l)79nLimO2y%<47D z?myTiFOh?)(c7>SFR!uK;X8P^WZ|l86mMUqsTDmI*G+{5!J9CsNjHyPKSDkj&E zCb$9@59GCt!SR!=-w5*D&Fkm0%pIYQ`{}b!%;{PF!wUPDj@W(s5*ePU5^;qZwU21b zbNM3wsQmd8=kG$v42jQG)n470Vi(9_YkI^8in2Xrt?f^AY)jvW+OLDW!Vb9Q9-I=n zyl!-EB{W${Z@h{kQJeO#dCf4}s;odn(!XX|Up20g=<|;8-auyGHHX{D<3JkeIg!r? zy#J~7-NrAulI#schpXU)ZfL$w4E`SL(M5c0W>ycRvNgm1kiQ?y^hzuD1nriCci$v& zwaJ&WU=!bn?f&AO&}{8k^Sf#N9fjS=@&P(3dMD7VL$XE~)9-Z6X|#Gb`aVgjA0xTld@riR?_|y&aQ_(#Za%6`xdc>`b~6r&^!nD#(gL@RJ499WVet+$DWOi#<-h~ z>152e&{xqrtO`WGH~Y>G`Y+D7%*)nL=Na8r68}A0IY%Fe4J2keZE%dN?@nv=t1GT$ zohSKOL!xGog#F&FqDCVs$q##ND(;%?vA}i9kr2Xk7f{p^xBfp2uC z`0Tlb|H}N&k&P;E#XmOcicm}!lshAXru^6|Yx@@?o)^ z488P*$8;;$g7iGju6<6%KcWYZx0C)!K0p&$m6mYFMwVn0O*qfU-tz6M@aa0=f7fG} z_5B$oZl^;>vy6Ap1ILn&L+EhS%0~6hHe=ZAGa+F&8{wnu&-F%s2c2~fT~Lp1U1wEa z!IzE1OB;~3@A*AlP=AzrPNy+GWs?W7WwHOKFN&T`-fyJIhroS(%=ZwP|3}Oz7pE4* zB>_p$j7F_$5uyB#5(?uR6b`V z=b_ntt8|L@Izj{;t#jz~z`*rfa}SD6ca<-VVH;FZ8S<%!E>+DadUwP(E3?ptvJ!m| z{R+_K2O}JXSEl*qEShRK`o$j6Y3{czyN4)g%???I+0e&FW;MyYzBcA>eQpA+{~^pc z-Zx)1&JkpK9ywoZWlO-C=Nf11t-TdLolTDF8fOY`7bUYP@_y8OE@B;DkokWV|2*u| z7Z~d)yu{N;ZcV%poucN@28DJoACD_apvoTf4C_%D!aK=lkGCRqaKRbbl?n8GHm%XX zJ&ySHTw|Ys(tGgJIwPNrPN7#L_j(+Di(bY{XuKoVrWSgY$Xcb8ZrD@k+R;{E9Quzn z>yJEE(B=s?wS=*i5K~&`yR-1fZZvOCOFxgF`j|&VR2$%f>`Sk_(3x@&a9tjYGI(J1zcmG)|`^{ZmeCHx_`&}#!-YMKD7vd0?@C9EXNNHyhu=$}QS1_nK3VURji*T1 zo8}yQ{6@kli)rzt)^x4=ta6`4cr>Eb6VdSp--7npGQ~HW zX2%pA7s}%KN+^De*;TSW=hKjF>HU51b*#!7e6z`{x1nytjH=`9>qzHw^hrO@TGK?a z2eJ`5L=U>zxb1(&w3k+?g&wh&@+{OV#~S4Mc4<<%!zkvV|6Y<4dDk`Sj>GOQfeEIP z(wXXhCYtX{#{HSUqmC?Te3gwhWsSCyxn0@yu7XyzQG31n&T-XQWB&jF+1HG?H7>92%B8c&_z(Bm>6(*W ze+_Oa1`&jQX>6@dc9r^BE7X<-C~6hs5j@h2dL?ls2$UuU-cu2J6WHLS@Mu5lH;-rnyfR;h$FJ8BMju3L)K9d*UMbi-%l zAa?c4M!%`%{j;mh#VxT{{-DQZ8hVB~9JWLD7^AAmBAn$afBViV^jksNX1dOBD>%jD zJKv7|*Wnv3z*XO(!2)`0x$A!GbJKk03*X)3+R;z9idp`P?^n_7yU_eE*ISH-mYeG~ zcMZLAg7N43dzE*h|G{x)cq}au-Dc19INeoBlBTL=7G6t9f92rDysWOQZ@jHYQzPH0 z>v614pN!I{d-pUn`OAF|*wGff-V1TqA#^_IzPa9SfzILkEikLGc-n@iGZv0HVs@Bd>>7Fz3%>Bx`C(j0eNWbR8* zemrC}27mnP&P|OgFyKZKw%fP%)3q6&`NaFbX0hgWqge0tU0&VfGlx8n3f?WQIT>%w z!7+<`vbt+EHq#U=t)yT?t&c@-^q8S3>Sai`Nj$D?<3>z`*OWAE09 zWVP#05i^Re0Y%`tUDj*~X${Rfj-T>5K3hl|Mm6pd7Gtv4hLMJmR(>)mpMXmy z(<497(+5#4EZ;x$)n>Bus~Ht{&3&+b#@$L~?;V}66Gm1@jsi=T&8}J-t5?_i!Lhqh zb`2U9H@9l8xDNOHhQlM;cFu}zTY;^<^}bJT zH=;C|XlRaec}Ne_`_GxdM|d|pr0{aq`A*uXQtV5hx+U)U84a+Iv_>U*8m*4Ek`2&2 zEPVqsJq=ac`oFamEWrwg_BiNXp=bA!qYdU*9`BVfx*Ayy3RIAAr84GK+f@pAca^P7 zCw@w2{LvDcYDfRnF!P8)U4VP8!MSI8=TbOq0BnCd`DlX|cW0$6;aO!|5dFr>qH-mp ztb;#phS|>ce5cj<&|E_2e}`@{zVLu2(Z|1|X*E<2-z<=23ICsw<(7_Q@meFg!u^{V zNo8Y>owntSyByoSk4^~Bud=(I;M+~i@)*=DZ3U0vZ&~fK z$elM^slBKjo#yiK$#U=QvSxXxvIoWgunH?^se{(%SJE)s3f)T>W+FXA?NwK&#y4X-Cq68XFl?&K-GU*=N(2K=NgoT%fcp{Ktp%U>g{u^L3_3{ zI)*f4@tUAxV4d)9Q!GXTXI3Dm%iSsZiv8|tfd?DWEj4`O3_lI6Wf?rY13&%YKKoEO zH1pqV*qbCM<^A#~av{Ds2Ce?Gh6}v6)<^@zuJ*|i?oxvOKF+DiCyFfj`aQ7x&uYbKw&O zGCg99p$9k6fYC*~w)Lz@s-osT`X1!a4ZF$2Y_$B+_fyt1hwi9p=4Cxfn|Uj9YVLP^ z+$!A#75o}{`#+x{cApmUnPcgy zH7FEXFk&s;ab_>8)YhGj&EEG6cZ@tjbv&2ru7Rl?it9TIQROTD&qCSoRS%j;8TSa! z>Y!J4dL_<%NxM=7^QcOaTN-^&eBa*hOWZHKo`_v!jAt*Nn+5ZHKsQdKS-y1riRd~T zk8VW0qE|MXW|xg^;Bk zWV4QWpJwKTzDoprBlxDkszlfJ3C8y~y>kr8l_MLw&EPk>HhP@Tb;sXv=58|jul3q) z-L~U{EoQaawfB;V_5M%!Zs6m{MeN73+kEe9*O*6&{|AXaDt7r2To$NwxVg-R7ebZ; zfiJSc%W0I~y)qlW{9vZPxOQ<=j`_q`3*9>x7gonnw8~Mx=hHSH`{okz zbHG~dCV6o>%3*w5$!aI*>(JZLXRlD)wJv>@@x3%Ds)+v^uvdNfe1SGEkqH|h$`l!^ zoABf-*{(9@v2my1j4J$zzs)dgWYicW&@8&>hp+dq(fs4Szw;lyF}Gztx!pL|(&J;v z-S6lTG0Mf>nQx^(@%YAE|LdL-_BSGwe_5mbzPHUBLweWv^pB_=cA}QY$#^z!c^VI& zU`!VobKuX$v{A$?BQsM6m5O)-syYduU6#d=O=+JxJ{2dYbY?X!_pA~r+hr9)+7G#R z*oT_vQHKNt=56eG3uvJu9&heN4+(u&qF?dr3# zQI9XPp3bLYLT})%MR;PFxvuxKz+Z*v96r@nI%JFc#;lWOU+DQ!ug|CXXS>oIvUZez z5uDtT<__BzzDy;bEa|5x`G_uaVQ;^Ol1H!^tI)a_%I;(bLwjw-;qkNC`b2bbgAp(F zxd&AM46#aMeeY{>A5rzNnR(`%V-zvtLs^N79w-G?V4v|XbIqbwuO>?Fp)tRL`DS~y z0KMwr)#_F_bn+=ys}=ipu9Y~=tJQIKc-#B&LC`B~N$Ak<1OiEvK<$&P$AwwW??IaT zlko1OrI%L%G1kdy|A^y7>>?uk(Pga>nwZL}5a<+q?QnWMd{S+esEGx%#m*crI%FjM_h%e+gqNZJc3ozV*q7uK$HEB5E`n zMI&mm2Ce6t;|}jEB15CC&`PiU0v){~ClDtxzAS@uuh{;3^z^&_dRzoN>W-J;oC$2j zG;2DZKL63*KluHa&qNi#D4DI-Y4wO^eac69)ARS;<3qWyS3RGIA13>Mq~~wC+q-Pr zqoSN+J&zvigZO7(8|xgr{fruj-ZDz<)hpaCkM*#-JSErh5iGa>pUpG7z;}zyb&|0~ z-e#Wh{Ddcqlh){nS<3o!x5^Q-?BrQj{zy+4qZb}{R^IGmaj+?>&Nu5TmnVB!SNDRZ zb^!IXbMP5^#6DE1J9*ls>b^QSDWq(oz<#yZhk2s9gRXWSx3L4YjjGEoa`;E& zfS1bcR#A0P-wH$*r!GF%&F3yqZFHe)w9t>IqMEIBMzhl>R?Da!Rx4Uht;VTpk!pC< z^4->YY_#>gD{w>b!}-S1S{CmF72Rc>b5kO5K&?ojJ;{gcKKwIhrF`~W^@T%ZvWCj@ z#EB}g8##QeC6LPJbn6_FGElzu5n8jSTJ1)%uk}$eUtQQ8*61P?Z(Xh35Or?@)cti- zmwv!bzW1G;_CLLtUUX{NDeCJh6;vxYL8bbMc2CtQD6TT{Z;##ihn()-*#3@d?emBl z{|gFkR4@OgeO7ndb$6}bW3p>~xS)}%zichzgr(@gw@m%lZ1rD1;;-_un}^lo?8vmH zBZiUk!Lqi`sK$7V2AR#;{7E~^rX8Yl>#gSVs!zSgZhTJLtfSk1r{}}djBb@Dvn`u& z$j9FQkXP^)RQMQOFos51z~Tgg2;8K;-(ctJ19n6GsG@bNz7!+8I@&SXFl?@wk!wUOoz7_>(LQ7Z5dIsT(Z{QoeNuE2Z zd?~6@CwdXCmJOb(GIK_DZ`cga*Q&=Wtq@~Nc6Zcp^?fg89#BzztDQYJ+s9koqrPg| zugL!<)w4%4B?^m>$LJN*R4qU|8QXfQymM4r9`M*|zd@c#jN|Q$IYkGC%dP7`djAdj zJo0GstXWz1t2s*+F@y}iuDs`sVWmb{v`~kAiWO}NA-3~4KkIWw58H?lm1kLd;riFD z%_H)%!(@UU@_bM>)7pZcP~0_RH~+O&V;z#^8h8-J#KkxU6 zzA7<#WNS#O>)W5B683oXi4%lss*Bx?K5@p_L6z7QQLu@sqgvjnM8`Gs*$OC|=aq(j zVwZMPRcp7fSdaLLZe<^k?7%%gEM&9cG zUvcbsH3u)K0vSk;-EOS0+UHsIY7o;wDDq^;qz)cg$hv>bO7};H7tQ}?bf3Uh-KIt+ z_AH#KBBh5s{w?Z;+Sx18TD?dwYwJ;YE;y?RqgnEU|>Vu0U{`G%YhOiC2*o_u+c6a(< zAWQwcz2Vof04M0v-+)Cq-MekriF6|7y(A|GJ&#T7RjK-Uwuba^8P=CoDO^DY8pEZ< zviLO*uC0SJE0c%FeMDAwChi*!7rn;ceGso*rGoYt)zN9Zaym|m{#2XT?UnXdd`+gm z)_Y>Qdd62(r9MyUSEvYCrIz}8`yOs*2hLYz{y2;N0(<+CeFtOgejKC5c^$n_P8Iih zc6p-Dj~{S65prl)4YeNn;iF}WXz`+A#6>`<+17gaespAI@jVn1o#&W{*Z``~DM7RQVeXV^7Hj-#C&vMh#^pub05ZozU(o zH2aw(??jP^%mpGZ@GK$;5m!6~ZmUl|a`NRQ8`=F?Eiq;OM4c*087xOk?+mt|72*D zD!8yJNvY3k4gV@S2h|Cy_6ohl-G2D z`(0+eBMTgv?uaCuYAh{1q6cD}2^~JxxklBMwu+Nu8;NFR%rSDH5eryml_IKekhKfn zEQ1CcXu@CUi0?@FAN1jFSEFeR}c>WA&zs;;}!FT=DGW7HAFo=CRO*0dE{~UIYt{AWIC>~Hl@(|5^E)5mg zpEz+evO7^7($$^o(Gbz|ZL#^5@GRdPXPSR>E&K{n`VbC!LXPKiaygm?9gy|A!f%>@ z5>tF;0zElS1nEa~`+#PB*>@w7`vcxujnh`5>wdQJPpi8Pg=V5{WO=voiB7P74U8fx zRpNZsh?GQT@oe7Fxj41~PKZu}EwigWz`RD7|J~+uH%{!DokMpNjdLun_Kl{la}v(H zJZr12z&Vi}jM~WZxO1nut+75UtXjU6-$g>F`77e_k^R}kHwa(wHIL}W`M%i-=gg*RE(;D zMQB?AAC*(vyU1%x`8RvbD(znNva}1@R&=$5pU74g^(>+jOIgE@ShR7jx5QN<3bqOV zZn3UY{mgNXs8ieLy{P!gC*3*ZAhI`U*QrX%nvssku@typL`CXa)05$aYw6u5;jGT0 zbLV6^En+_rg*eT+U1Fv^MK$lVo^SJdN6?~IW!F3;IjV_6GaPo`17uQnOV~a7H++Dz zq7p0YTXct>gwpf8I||B=v-a+%3D4!B-@@h(vr@NOsfTFb*F+JXwQkRQ{c?7`EBiN$ zWtj)hN4EYMoIQ|zAMg3weh;_0|M790&9js?pNgL&PrngOB0e0Ij*-pWfg0;k=rA-| zQ9R~vqlvnKxqOVM7^{whBaR<&ld#PZ^({jq?xQ6ls$Wip&mx+53eNl4cZ#8C6WTJe z=MiU1`U$yBI@dDdT#lEqdif|&6s4QGLLKyu^^Hux4p({$-wtxE$iN@5QjtZ>LCgKD zd~t=Wh6xWx)a9ahv@MQ-&>D_qL>PN$LY z%&u_Mciv3?E+My370}&xZlHNyQ`!5G$80EQG?X2+{MXRktz0|uDp5Dyi4@=JE`gik z^r5b_T-4QehCYYz$)l70ZQdVAtBp_-dOLl1J^gs1$Z=Oc5%sQS%neZ?bXyaT zmhY7UEAwlX4$J)huP)+sQa8_?qjGN->gT%q7P@YQ_t&#CKN`m`-reiHdggkOpXfqc zLQTR}D^nC_#?J45(LMUlTx_lv`9IDeC~M4-9}Ah?;3wk4G1k3gBSkJM4VIC}+j1VIyN0tTKhR$>mtW!Y zKE^yBDxR+1S2u|2Qo2(=C_tJ3B``!a|*hn;; z1b2Q+3P$6-UE=61aBC^_EuHfYPOD-UU{pM=MB&}&9Vdg%bFD=aY03M zpO0Vn(VzJ`a_kq;Y3TLkR&bLQK0!~3P59(Q+#Ffi-m;M*By$sc6m&ybzT&?TqAj+O<*kJ|*mYKGjvD zI;J1FYOe}C0ZFzd_dR$Zcay%i;KGigjf3Fwp)77E^tpySG-Ok6@`xyNPt@$o*7oHK zO`}^o!A8BUL0?$%7Mk^II`$Rbe1F#X4LIvN|M&2z`yuT+Vaj)TQV;R<`|$eT^^LZ! z{SaL<#CuP;M^Blz`)Hlm5&D$zHSua|ueG2dPl3of!!$Q}y&0*D8t7-xsSB;&hYpHb zotDO3pCn#NW^&Q-68baFfe3jGZLt_PMs2`ovNDb@I29L0HP>AH^qsX{h_1g`@!zan zAHGmvfq}5-aCZGAyPP_ajFAxCmp(fY7e;1afh#X|{k=G7lUMG;L9h8%RKgCxt8>U` z$if2eE%e!)Bxs#aeC)b+`_5pJF;H~nK7AP;gft$@M!+A>#vlj4Af0^bCVNkY8~G&P zEl*FhpjR?v`X4`$0ceSDqAokoev&PVij7;W^7*J2b?=$NK*t@tAEz}PAW6r0cEtXl zvUF_Jm6U@v*Wm4zvIkGIkKI|<$bh!NhZjR(^|R<;4{rXMRF+o#6OK~r$7)ylFY0zsg+atV%FuA(_xw()y@)z* zv$!k0GMV(po|aAYPMlu9lFpb&MjxT4hG%utCs}>6*=HA9%?t@BXLf;Xq9Up&`Kd+@ zS{YR(6$5`--#n5OxxN1?19xPtP}H$4VIA^~HO^R?@P9R(c~F(t8HXpHPsf4 zkp@&06afKQ1limWMG|xSJfH2{xh(g7-}%mY_w%0Ldk!4fh&|ZK|FwZFsKa{gM)!8W z(Gsp`hX(D8##1{IgJ(>F?qhkL>&D}t`b|dJjeqbIku&JNXytV1em7UA;M@GpU8}Ou ztrc+b0Y2}*)@KHu(#U=|*YyV;RO@*RSrx(QxmKP@CL;Vewl|HjhM~=0$q2fx^%k~F ztUwzSd6%m`VyxYGDrc)R?{{itDa=s2yN@()KJDW9eH?~ z9*XC&+9Zs9n~Q8XJ5NW$()c-&c;N?V++yTy06u)mh@BzDAg>ppp~%F45J7E+dwb|x zHBY;M1vLNSO5(SXI8Ou{%`^p;_=PS zVa4;0K$osV`$Sq?id9^RwYd*|iRu=0@DTT|#sbdbUSqs;+IfQLQwx;?mE0fBIZXm> zHDF(y2bV+lFnd*GKrt($2O+7G89^rPYKc5Mt`Dw>x)T@TX-Bbq>q*MJ$i94YFV;Tp zG}~~KNT`Dzi>Zi4-d!O*jf_@ujp&4SWX^LUoX1U|Cq}w)jLY+KoO5JA#WjpWA36*l zxe_~(1xG}niZ(ObVa(l$2?V!b?BU(}3Ow3Z$USrPmF)fSJm;iNippEi|2j6rYq!|6)?yTb*yQA|%V&-gX4JC$cVjE_j+Q*)i-?9>Wlygp>snuC+je#rmwEr%GDv(=$+ zEs`v>2rbte)Q4)Q;*3B<&~SPY&C^*TgObXc7Vj)=nb8!_=gc4;d5Yw_I~LJXp2Vhg zK}BPPICi8b2R!G zvGc@wwAnSN2xMI}rWFSrjK`XUc%scpM*2Dti+iqYVP)eN$nUSQ>7J2!g1eo0UE?)? z4mvAs!0w3AvHHuZq|cEDtxFs+fmX}i$M4}uF-V_#O{`}g|6^W7Smt?*EfHJ#II-P* z$c;FGD8@LSxbzvWSjosn(NANVc;rGXSsneTq*dkEJg75Z>#*Ftd=QeMok-=k_@62~ z)On;x`RhhMx8vri zTNKj2_~aD4t3>m6py8gGEiXHv$|dsY8E`ZYjYy#P_abA)vNMo}82Tl}+j&+J9CNmk ziX2A3agj6Pg2e+`6_bwjb-vsUcj|asyJDs@j%z#NmT2BKdT*Y>Q_`w=zIe4u^kGoo zC$uQ8;_YEHb&OGapze$Nb5D#pyyxQ)MB6Tb3v=PegK)(>%|@cj`RMh0^#6H2b8Ro1 zvCPCSh(CG^-Co5#t{kOfb-u%_<4K~(N0H8I@vg>KsBskFL@b_;0L)tk*o(Tmuqb;!v)+8M{Q zzRulx^&V)`51pjO0Mu@W0;TY>7MgTHC3RZ7spvzsClxu;V#tHr=!RS>g}aSNKm-(X z))|X8(brnL)y`G1#K@KCn`nM>5n3Jdn;#IJ?L>>jbvyU0=K8moGyEA%EaSiPfd^8< zwQ0RboET5Dof8>nBGO<5%r;`w_sQoRLe{icV#qUSU+k7~LK*$ok4#&Aw1bw<2ALdqxdY*n%VfWvouiMZB<=Xt^UfQgpM|~iNe*I$ewfd!D=_D6Ccq5r5qc^f zSGrpJwhRAR%rm;6fa_3K0*YCKVD*`qoO1!a%Fs!(UnR79gnPyLTkUiM+meT5WMa9@ zxSpZyPI_{cpOyT5l9sx#PGg|%_mDhSrH$$~v%3E@aZw?3%0ULkB5B5JdWsfWSN}zg z{a;r{pvANBWg)cBR~PbSMT`hupIi~ho8H3ex;R7AZuf3*V?5t zw73u*w%Wod}xX=SjxA)4>Y zXM{$c)`N*Xw%$3CD_wU>2*#RBdsbYTk#FYvJK58dw4^3lvnDFjnXVC+I$VvtH%o7Q ztFvkmvGy&Fk<7ztnN`*ITc2FWc;;}Ok&0FLXKA+?&WUE#bL~g|kMcUm+*h>WZgj;J zq>@0Uoi$q}ZMxuQBX3Uh*Wp=#b~<;yI@6?=kV$? z)9|XEC%6DQixcUT`oOJCyHx> z3u(dM&bu;sdET@u7S0ZqK-C#MaSWa!#LU1NM03o}#tP_%wH7z&ZzB|_!#-I{cpOc0 z)?C2d1<2$Lo?-Q+GLgdl=IeW))+r?4*i>0RAIzgaGF$e_a`#%pI$+J3k@@T8`u_;t-2bhgzH9OHWJ-4*e|cJN5^Z~$jF@q;mf^q}$jKBW zKwn}VnDS(`#4#*ED#3>T^d*t2v#(%<^Q2gD;PyI>I>iJ_f|YpDNh@W)R~iV&Ga+u za5;bJyN~n#zp>Eb%?>aMPf7_x>m2Nu>ypOiMle?Rq@g>;L@R=9f^`^+kqmcp7>;zh zGV?8LTMXlIW-|iWv(hw|&)nz0)t*==n+~-Hp)Dy$jysV(#GU4paNiD9gq%RrVq zW!+eBOkkT`YfYeSt%GtTsv;ajg04VoO~he{g0eQ!)hTNT#dnG6a1D1DGMr719tmQ+ zM~U^!D6gT<)?$pr0%Y_2QQXxGe_D~UR>qNx)@9R!1$=M4-+cVdIwHyCjLr@K(DfNr)3;)B=i%3^qXd!yIg?%NP!^hZb@tyF_(D?9njOiJo&15p+k!bZgC_R|*&P3mj1abRnzE8!jJ_Vg} zuuHY{xs%AD2M(3sGpvOCjQSA2@x;_BG9`u7NnXZYIRA}70DTh!GV*&5d~3vGmwEQ9ZniWggaw z)LK1j)q=5uS(3}#@0xEX-xaeKbPzxEGBUppI*Uknf&VrT;a$W=x>wKh;qRlnf8gqU zLDVLu;TSKo>CTVLSGtB*K_9im<-E-HAj_gLBJ~9{cFJLOLtNZyX`F&UDO{}Io+=VU3qYtx)WAZ^)Tt)(}a8_CoS_AiAgeJw@ zU&a+zpyl1n8*JYyZP%==`Y{J)+-t679OH1FYVOZ$%~kTJg=q9@#vn?e4vJsqvnlBJ HLiFXo&2)c3 literal 0 HcmV?d00001 diff --git a/tests/proxy_unit_tests/large_text.py b/tests/proxy_unit_tests/large_text.py new file mode 100644 index 000000000..86904a6d1 --- /dev/null +++ b/tests/proxy_unit_tests/large_text.py @@ -0,0 +1,112 @@ +text = """ +Alexander the Great +This article is about the ancient king of Macedonia. For other uses, see Alexander the Great (disambiguation). +Alexander III of Macedon (Ancient Greek: Ἀλέξανδρος, romanized: Alexandros; 20/21 July 356 BC – 10/11 June 323 BC), most commonly known as Alexander the Great,[c] was a king of the ancient Greek kingdom of Macedon.[d] He succeeded his father Philip II to the throne in 336 BC at the age of 20 and spent most of his ruling years conducting a lengthy military campaign throughout Western Asia, Central Asia, parts of South Asia, and Egypt. By the age of 30, he had created one of the largest empires in history, stretching from Greece to northwestern India.[1] He was undefeated in battle and is widely considered to be one of history's greatest and most successful military commanders.[2][3] + +Until the age of 16, Alexander was tutored by Aristotle. In 335 BC, shortly after his assumption of kingship over Macedon, he campaigned in the Balkans and reasserted control over Thrace and parts of Illyria before marching on the city of Thebes, which was subsequently destroyed in battle. Alexander then led the League of Corinth, and used his authority to launch the pan-Hellenic project envisaged by his father, assuming leadership over all Greeks in their conquest of Persia.[4][5] + +In 334 BC, he invaded the Achaemenid Persian Empire and began a series of campaigns that lasted for 10 years. Following his conquest of Asia Minor, Alexander broke the power of Achaemenid Persia in a series of decisive battles, including those at Issus and Gaugamela; he subsequently overthrew Darius III and conquered the Achaemenid Empire in its entirety.[e] After the fall of Persia, the Macedonian Empire held a vast swath of territory between the Adriatic Sea and the Indus River. Alexander endeavored to reach the "ends of the world and the Great Outer Sea" and invaded India in 326 BC, achieving an important victory over Porus, an ancient Indian king of present-day Punjab, at the Battle of the Hydaspes. Due to the demand of his homesick troops, he eventually turned back at the Beas River and later died in 323 BC in Babylon, the city of Mesopotamia that he had planned to establish as his empire's capital. Alexander's death left unexecuted an additional series of planned military and mercantile campaigns that would have begun with a Greek invasion of Arabia. In the years following his death, a series of civil wars broke out across the Macedonian Empire, eventually leading to its disintegration at the hands of the Diadochi. + +With his death marking the start of the Hellenistic period, Alexander's legacy includes the cultural diffusion and syncretism that his conquests engendered, such as Greco-Buddhism and Hellenistic Judaism. He founded more than twenty cities, with the most prominent being the city of Alexandria in Egypt. Alexander's settlement of Greek colonists and the resulting spread of Greek culture led to the overwhelming dominance of Hellenistic civilization and influence as far east as the Indian subcontinent. The Hellenistic period developed through the Roman Empire into modern Western culture; the Greek language became the lingua franca of the region and was the predominant language of the Byzantine Empire up until its collapse in the mid-15th century AD. Alexander became legendary as a classical hero in the mould of Achilles, featuring prominently in the historical and mythical traditions of both Greek and non-Greek cultures. His military achievements and unprecedented enduring successes in battle made him the measure against which many later military leaders would compare themselves,[f] and his tactics remain a significant subject of study in military academies worldwide.[6] Legends of Alexander's exploits coalesced into the third-century Alexander Romance which, in the premodern period, went through over one hundred recensions, translations, and derivations and was translated into almost every European vernacular and every language of the Islamic world.[7] After the Bible, it was the most popular form of European literature.[8] + +Early life + +Lineage and childhood + +Alexander III was born in Pella, the capital of the Kingdom of Macedon,[9] on the sixth day of the ancient Greek month of Hekatombaion, which probably corresponds to 20 July 356 BC (although the exact date is uncertain).[10][11] He was the son of the erstwhile king of Macedon, Philip II, and his fourth wife, Olympias (daughter of Neoptolemus I, king of Epirus).[12][g] Although Philip had seven or eight wives, Olympias was his principal wife for some time, likely because she gave birth to Alexander.[13] + +Several legends surround Alexander's birth and childhood.[14] According to the ancient Greek biographer Plutarch, on the eve of the consummation of her marriage to Philip, Olympias dreamed that her womb was struck by a thunderbolt that caused a flame to spread "far and wide" before dying away. Sometime after the wedding, Philip is said to have seen himself, in a dream, securing his wife's womb with a seal engraved with a lion's image.[15] Plutarch offered a variety of interpretations for these dreams: that Olympias was pregnant before her marriage, indicated by the sealing of her womb; or that Alexander's father was Zeus. Ancient commentators were divided about whether the ambitious Olympias promulgated the story of Alexander's divine parentage, variously claiming that she had told Alexander, or that she dismissed the suggestion as impious.[15] + +On the day Alexander was born, Philip was preparing a siege on the city of Potidea on the peninsula of Chalcidice. That same day, Philip received news that his general Parmenion had defeated the combined Illyrian and Paeonian armies and that his horses had won at the Olympic Games. It was also said that on this day, the Temple of Artemis in Ephesus, one of the Seven Wonders of the World, burnt down. This led Hegesias of Magnesia to say that it had burnt down because Artemis was away, attending the birth of Alexander.[16] Such legends may have emerged when Alexander was king, and possibly at his instigation, to show that he was superhuman and destined for greatness from conception.[14] + +In his early years, Alexander was raised by a nurse, Lanike, sister of Alexander's future general Cleitus the Black. Later in his childhood, Alexander was tutored by the strict Leonidas, a relative of his mother, and by Lysimachus of Acarnania.[17] Alexander was raised in the manner of noble Macedonian youths, learning to read, play the lyre, ride, fight, and hunt.[18] When Alexander was ten years old, a trader from Thessaly brought Philip a horse, which he offered to sell for thirteen talents. The horse refused to be mounted, and Philip ordered it away. Alexander, however, detecting the horse's fear of its own shadow, asked to tame the horse, which he eventually managed.[14] Plutarch stated that Philip, overjoyed at this display of courage and ambition, kissed his son tearfully, declaring: "My boy, you must find a kingdom big enough for your ambitions. Macedon is too small for you", and bought the horse for him.[19] Alexander named it Bucephalas, meaning "ox-head". Bucephalas carried Alexander as far as India. When the animal died (because of old age, according to Plutarch, at age 30), Alexander named a city after him, Bucephala.[20] + +Education + +When Alexander was 13, Philip began to search for a tutor, and considered such academics as Isocrates and Speusippus, the latter offering to resign from his stewardship of the Academy to take up the post. In the end, Philip chose Aristotle and provided the Temple of the Nymphs at Mieza as a classroom. In return for teaching Alexander, Philip agreed to rebuild Aristotle's hometown of Stageira, which Philip had razed, and to repopulate it by buying and freeing the ex-citizens who were slaves, or pardoning those who were in exile.[21] + +Mieza was like a boarding school for Alexander and the children of Macedonian nobles, such as Ptolemy, Hephaistion, and Cassander. Many of these students would become his friends and future generals, and are often known as the "Companions". Aristotle taught Alexander and his companions about medicine, philosophy, morals, religion, logic, and art. Under Aristotle's tutelage, Alexander developed a passion for the works of Homer, and in particular the Iliad; Aristotle gave him an annotated copy, which Alexander later carried on his campaigns.[22] Alexander was able to quote Euripides from memory.[23] + +During his youth, Alexander was also acquainted with Persian exiles at the Macedonian court, who received the protection of Philip II for several years as they opposed Artaxerxes III.[24][25][26] Among them were Artabazos II and his daughter Barsine, possible future mistress of Alexander, who resided at the Macedonian court from 352 to 342 BC, as well as Amminapes, future satrap of Alexander, or a Persian nobleman named Sisines.[24][27][28][29] This gave the Macedonian court a good knowledge of Persian issues, and may even have influenced some of the innovations in the management of the Macedonian state.[27] + +Suda writes that Anaximenes of Lampsacus was one of Alexander's teachers, and that Anaximenes also accompanied Alexander on his campaigns.[30] + +Heir of Philip II + +Regency and ascent of Macedon + +Main articles: Philip II of Macedon and Rise of Macedon +Further information: History of Macedonia (ancient kingdom) +At the age of 16, Alexander's education under Aristotle ended. Philip II had waged war against the Thracians to the north, which left Alexander in charge as regent and heir apparent.[14] During Philip's absence, the Thracian tribe of Maedi revolted against Macedonia. Alexander responded quickly and drove them from their territory. The territory was colonized, and a city, named Alexandropolis, was founded.[31] + +Upon Philip's return, Alexander was dispatched with a small force to subdue the revolts in southern Thrace. Campaigning against the Greek city of Perinthus, Alexander reportedly saved his father's life. Meanwhile, the city of Amphissa began to work lands that were sacred to Apollo near Delphi, a sacrilege that gave Philip the opportunity to further intervene in Greek affairs. While Philip was occupied in Thrace, Alexander was ordered to muster an army for a campaign in southern Greece. Concerned that other Greek states might intervene, Alexander made it look as though he was preparing to attack Illyria instead. During this turmoil, the Illyrians invaded Macedonia, only to be repelled by Alexander.[32] + +Philip and his army joined his son in 338 BC, and they marched south through Thermopylae, taking it after stubborn resistance from its Theban garrison. They went on to occupy the city of Elatea, only a few days' march from both Athens and Thebes. The Athenians, led by Demosthenes, voted to seek alliance with Thebes against Macedonia. Both Athens and Philip sent embassies to win Thebes's favour, but Athens won the contest.[33] Philip marched on Amphissa (ostensibly acting on the request of the Amphictyonic League), capturing the mercenaries sent there by Demosthenes and accepting the city's surrender. Philip then returned to Elatea, sending a final offer of peace to Athens and Thebes, who both rejected it.[34] + +As Philip marched south, his opponents blocked him near Chaeronea, Boeotia. During the ensuing Battle of Chaeronea, Philip commanded the right wing and Alexander the left, accompanied by a group of Philip's trusted generals. According to the ancient sources, the two sides fought bitterly for some time. Philip deliberately commanded his troops to retreat, counting on the untested Athenian hoplites to follow, thus breaking their line. Alexander was the first to break the Theban lines, followed by Philip's generals. Having damaged the enemy's cohesion, Philip ordered his troops to press forward and quickly routed them. With the Athenians lost, the Thebans were surrounded. Left to fight alone, they were defeated.[35] + +After the victory at Chaeronea, Philip and Alexander marched unopposed into the Peloponnese, welcomed by all cities; however, when they reached Sparta, they were refused, but did not resort to war.[36] At Corinth, Philip established a "Hellenic Alliance" (modelled on the old anti-Persian alliance of the Greco-Persian Wars), which included most Greek city-states except Sparta. Philip was then named Hegemon (often translated as "Supreme Commander") of this league (known by modern scholars as the League of Corinth), and announced his plans to attack the Persian Empire.[37][38] + +Exile and return + +When Philip returned to Pella, he fell in love with and married Cleopatra Eurydice in 338 BC,[39] the niece of his general Attalus.[40] The marriage made Alexander's position as heir less secure, since any son of Cleopatra Eurydice would be a fully Macedonian heir, while Alexander was only half-Macedonian.[41] During the wedding banquet, a drunken Attalus publicly prayed to the gods that the union would produce a legitimate heir.[40] + +At the wedding of Cleopatra, whom Philip fell in love with and married, she being much too young for him, her uncle Attalus in his drink desired the Macedonians would implore the gods to give them a lawful successor to the kingdom by his niece. This so irritated Alexander, that throwing one of the cups at his head, "You villain," said he, "what, am I then a bastard?" Then Philip, taking Attalus's part, rose up and would have run his son through; but by good fortune for them both, either his over-hasty rage, or the wine he had drunk, made his foot slip, so that he fell down on the floor. At which Alexander reproachfully insulted over him: "See there," said he, "the man who makes preparations to pass out of Europe into Asia, overturned in passing from one seat to another." + +— Plutarch, describing the feud at Philip's wedding.[42]none +In 337 BC, Alexander fled Macedon with his mother, dropping her off with her brother, King Alexander I of Epirus in Dodona, capital of the Molossians.[43] He continued to Illyria,[43] where he sought refuge with one or more Illyrian kings, perhaps with Glaucias, and was treated as a guest, despite having defeated them in battle a few years before.[44] However, it appears Philip never intended to disown his politically and militarily trained son.[43] Accordingly, Alexander returned to Macedon after six months due to the efforts of a family friend, Demaratus, who mediated between the two parties.[45] + +In the following year, the Persian satrap (governor) of Caria, Pixodarus, offered his eldest daughter to Alexander's half-brother, Philip Arrhidaeus.[43] Olympias and several of Alexander's friends suggested this showed Philip intended to make Arrhidaeus his heir.[43] Alexander reacted by sending an actor, Thessalus of Corinth, to tell Pixodarus that he should not offer his daughter's hand to an illegitimate son, but instead to Alexander. When Philip heard of this, he stopped the negotiations and scolded Alexander for wishing to marry the daughter of a Carian, explaining that he wanted a better bride for him.[43] Philip exiled four of Alexander's friends, Harpalus, Nearchus, Ptolemy and Erigyius, and had the Corinthians bring Thessalus to him in chains.[46] + +King of Macedon + +Accession + +Further information: Government of Macedonia (ancient kingdom) +In summer 336 BC, while at Aegae attending the wedding of his daughter Cleopatra to Olympias's brother, Alexander I of Epirus, Philip was assassinated by the captain of his bodyguards, Pausanias.[h] As Pausanias tried to escape, he tripped over a vine and was killed by his pursuers, including two of Alexander's companions, Perdiccas and Leonnatus. Alexander was proclaimed king on the spot by the nobles and army at the age of 20.[47][48][49] + +Consolidation of power + +Alexander began his reign by eliminating potential rivals to the throne. He had his cousin, the former Amyntas IV, executed.[51] He also had two Macedonian princes from the region of Lyncestis killed for having been involved in his father's assassination, but spared a third, Alexander Lyncestes. Olympias had Cleopatra Eurydice, and Europa, her daughter by Philip, burned alive. When Alexander learned about this, he was furious. Alexander also ordered the murder of Attalus,[51] who was in command of the advance guard of the army in Asia Minor and Cleopatra's uncle.[52] + +Attalus was at that time corresponding with Demosthenes, regarding the possibility of defecting to Athens. Attalus also had severely insulted Alexander, and following Cleopatra's murder, Alexander may have considered him too dangerous to be left alive.[52] Alexander spared Arrhidaeus, who was by all accounts mentally disabled, possibly as a result of poisoning by Olympias.[47][49][53] + +News of Philip's death roused many states into revolt, including Thebes, Athens, Thessaly, and the Thracian tribes north of Macedon. When news of the revolts reached Alexander, he responded quickly. Though advised to use diplomacy, Alexander mustered 3,000 Macedonian cavalry and rode south towards Thessaly. He found the Thessalian army occupying the pass between Mount Olympus and Mount Ossa, and ordered his men to ride over Mount Ossa. When the Thessalians awoke the next day, they found Alexander in their rear and promptly surrendered, adding their cavalry to Alexander's force. He then continued south towards the Peloponnese.[54] + +Alexander stopped at Thermopylae, where he was recognized as the leader of the Amphictyonic League before heading south to Corinth. Athens sued for peace and Alexander pardoned the rebels. The famous encounter between Alexander and Diogenes the Cynic occurred during Alexander's stay in Corinth. When Alexander asked Diogenes what he could do for him, the philosopher disdainfully asked Alexander to stand a little to the side, as he was blocking the sunlight.[55] This reply apparently delighted Alexander, who is reported to have said "But verily, if I were not Alexander, I would like to be Diogenes."[56] At Corinth, Alexander took the title of Hegemon ("leader") and, like Philip, was appointed commander for the coming war against Persia. He also received news of a Thracian uprising.[57] + +Balkan campaign + +Main article: Alexander's Balkan campaign +Before crossing to Asia, Alexander wanted to safeguard his northern borders. In the spring of 335 BC, he advanced to suppress several revolts. Starting from Amphipolis, he travelled east into the country of the "Independent Thracians"; and at Mount Haemus, the Macedonian army attacked and defeated the Thracian forces manning the heights.[58] The Macedonians marched into the country of the Triballi, and defeated their army near the Lyginus river[59] (a tributary of the Danube). Alexander then marched for three days to the Danube, encountering the Getae tribe on the opposite shore. Crossing the river at night, he surprised them and forced their army to retreat after the first cavalry skirmish.[60] + +News then reached Alexander that the Illyrian chieftain Cleitus and King Glaukias of the Taulantii were in open revolt against his authority. Marching west into Illyria, Alexander defeated each in turn, forcing the two rulers to flee with their troops. With these victories, he secured his northern frontier.[61] + +Destruction of Thebes + +While Alexander campaigned north, the Thebans and Athenians rebelled once again. Alexander immediately headed south.[62] While the other cities again hesitated, Thebes decided to fight. The Theban resistance was ineffective, and Alexander razed the city and divided its territory between the other Boeotian cities. The end of Thebes cowed Athens, leaving all of Greece temporarily at peace.[62] Alexander then set out on his Asian campaign, leaving Antipater as regent.[63] + +Conquest of the Achaemenid Persian Empire + +Main articles: Wars of Alexander the Great and Chronology of the expedition of Alexander the Great into Asia +Asia Minor + +Further information: Battle of the Granicus, Siege of Halicarnassus, and Siege of Miletus +After his victory at the Battle of Chaeronea (338 BC), Philip II began the work of establishing himself as hēgemṓn (Greek: ἡγεμών) of a league which according to Diodorus was to wage a campaign against the Persians for the sundry grievances Greece suffered in 480 and free the Greek cities of the western coast and islands from Achaemenid rule. In 336 he sent Parmenion, Amyntas, Andromenes, Attalus, and an army of 10,000 men into Anatolia to make preparations for an invasion.[64][65] At first, all went well. The Greek cities on the western coast of Anatolia revolted until the news arrived that Philip had been murdered and had been succeeded by his young son Alexander. The Macedonians were demoralized by Philip's death and were subsequently defeated near Magnesia by the Achaemenids under the command of the mercenary Memnon of Rhodes.[64][65] + +Taking over the invasion project of Philip II, Alexander's army crossed the Hellespont in 334 BC with approximately 48,100 soldiers, 6,100 cavalry and a fleet of 120 ships with crews numbering 38,000,[62] drawn from Macedon and various Greek city-states, mercenaries, and feudally raised soldiers from Thrace, Paionia, and Illyria.[66][i] He showed his intent to conquer the entirety of the Persian Empire by throwing a spear into Asian soil and saying he accepted Asia as a gift from the gods. This also showed Alexander's eagerness to fight, in contrast to his father's preference for diplomacy.[62] + +After an initial victory against Persian forces at the Battle of the Granicus, Alexander accepted the surrender of the Persian provincial capital and treasury of Sardis; he then proceeded along the Ionian coast, granting autonomy and democracy to the cities. Miletus, held by Achaemenid forces, required a delicate siege operation, with Persian naval forces nearby. Further south, at Halicarnassus, in Caria, Alexander successfully waged his first large-scale siege, eventually forcing his opponents, the mercenary captain Memnon of Rhodes and the Persian satrap of Caria, Orontobates, to withdraw by sea.[67] Alexander left the government of Caria to a member of the Hecatomnid dynasty, Ada, who adopted Alexander.[68] + +From Halicarnassus, Alexander proceeded into mountainous Lycia and the Pamphylian plain, asserting control over all coastal cities to deny the Persians naval bases. From Pamphylia onwards the coast held no major ports and Alexander moved inland. At Termessos, Alexander humbled but did not storm the Pisidian city.[69] At the ancient Phrygian capital of Gordium, Alexander "undid" the hitherto unsolvable Gordian Knot, a feat said to await the future "king of Asia".[70] According to the story, Alexander proclaimed that it did not matter how the knot was undone and hacked it apart with his sword.[71] + +The Levant and Syria + +Further information: Battle of Issus and Siege of Tyre (332 BC) +In spring 333 BC, Alexander crossed the Taurus into Cilicia. After a long pause due to an illness, he marched on towards Syria. Though outmanoeuvered by Darius's significantly larger army, he marched back to Cilicia, where he defeated Darius at Issus. Darius fled the battle, causing his army to collapse, and left behind his wife, his two daughters, his mother Sisygambis, and a fabulous treasure.[72] He offered a peace treaty that included the lands he had already lost, and a ransom of 10,000 talents for his family. Alexander replied that since he was now king of Asia, it was he alone who decided territorial divisions.[73] Alexander proceeded to take possession of Syria, and most of the coast of the Levant.[68] In the following year, 332 BC, he was forced to attack Tyre, which he captured after a long and difficult siege.[74][75] The men of military age were massacred and the women and children sold into slavery.[76] + +Egypt + +Further information: Siege of Gaza (332 BCE) +When Alexander destroyed Tyre, most of the towns on the route to Egypt quickly capitulated. However, Alexander was met with resistance at Gaza. The stronghold was heavily fortified and built on a hill, requiring a siege. When "his engineers pointed out to him that because of the height of the mound it would be impossible... this encouraged Alexander all the more to make the attempt".[77] After three unsuccessful assaults, the stronghold fell, but not before Alexander had received a serious shoulder wound. As in Tyre, men of military age were put to the sword and the women and children were sold into slavery.[78] +""" diff --git a/tests/proxy_unit_tests/log.txt b/tests/proxy_unit_tests/log.txt new file mode 100644 index 000000000..9b8654df0 --- /dev/null +++ b/tests/proxy_unit_tests/log.txt @@ -0,0 +1,104 @@ +============================= test session starts ============================== +platform darwin -- Python 3.11.4, pytest-8.3.2, pluggy-1.5.0 -- /Users/krrishdholakia/Documents/litellm/myenv/bin/python3.11 +cachedir: .pytest_cache +rootdir: /Users/krrishdholakia/Documents/litellm +configfile: pyproject.toml +plugins: asyncio-0.23.8, respx-0.21.1, anyio-4.6.0 +asyncio: mode=Mode.STRICT +collecting ... collected 1 item + +test_function_calling.py::test_aaparallel_function_call[claude-3-haiku-20240307] + + +Request to litellm: +litellm.completion(model='claude-3-haiku-20240307', messages=[{'role': 'user', 'content': "What's the weather like in San Francisco, Tokyo, and Paris? - give me 3 responses"}], tools=[{'type': 'function', 'function': {'name': 'get_current_weather', 'description': 'Get the current weather in a given location', 'parameters': {'type': 'object', 'properties': {'location': {'type': 'string', 'description': 'The city and state'}, 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}}, 'required': ['location']}}}], tool_choice='auto') + + +SYNC kwargs[caching]: False; litellm.cache: None; kwargs.get('cache')['no-cache']: False +Final returned optional params: {'tools': [{'type': 'function', 'function': {'name': 'get_current_weather', 'description': 'Get the current weather in a given location', 'parameters': {'type': 'object', 'properties': {'location': {'type': 'string', 'description': 'The city and state'}, 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}}, 'required': ['location']}}}], 'tool_choice': {'type': 'auto'}} +optional_params: {'tools': [{'type': 'function', 'function': {'name': 'get_current_weather', 'description': 'Get the current weather in a given location', 'parameters': {'type': 'object', 'properties': {'location': {'type': 'string', 'description': 'The city and state'}, 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}}, 'required': ['location']}}}], 'tool_choice': {'type': 'auto'}} +SENT optional_params: {'tools': [{'type': 'function', 'function': {'name': 'get_current_weather', 'description': 'Get the current weather in a given location', 'parameters': {'type': 'object', 'properties': {'location': {'type': 'string', 'description': 'The city and state'}, 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}}, 'required': ['location']}}}], 'tool_choice': {'type': 'auto'}, 'max_tokens': 4096} +tool: {'type': 'function', 'function': {'name': 'get_current_weather', 'description': 'Get the current weather in a given location', 'parameters': {'type': 'object', 'properties': {'location': {'type': 'string', 'description': 'The city and state'}, 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}}, 'required': ['location']}}} + + +POST Request Sent from LiteLLM: +curl -X POST \ +https://api.anthropic.com/v1/messages \ +-H 'accept: *****' -H 'anthropic-version: *****' -H 'content-type: *****' -H 'x-api-key: sk-ant-api03-bJf1M8qp-JDptRcZRE5ve5efAfSIaL5u-SZ9vItIkvuFcV5cUsd********************************************' -H 'anthropic-beta: *****' \ +-d '{'messages': [{'role': 'user', 'content': [{'type': 'text', 'text': "What's the weather like in San Francisco, Tokyo, and Paris? - give me 3 responses"}]}], 'tools': [{'name': 'get_current_weather', 'description': 'Get the current weather in a given location', 'input_schema': {'type': 'object', 'properties': {'location': {'type': 'string', 'description': 'The city and state'}, 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}}, 'required': ['location']}}], 'tool_choice': {'type': 'auto'}, 'max_tokens': 4096, 'model': 'claude-3-haiku-20240307'}' + + +_is_function_call: False +RAW RESPONSE: +{"id":"msg_01HRugqzL4WmcxMmbvDheTph","type":"message","role":"assistant","model":"claude-3-haiku-20240307","content":[{"type":"text","text":"Okay, let's check the current weather in those three cities:"},{"type":"tool_use","id":"toolu_016U6G3kpxjHSiJLwVCrrScz","name":"get_current_weather","input":{"location":"San Francisco","unit":"celsius"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":379,"output_tokens":87}} + + +raw model_response: {"id":"msg_01HRugqzL4WmcxMmbvDheTph","type":"message","role":"assistant","model":"claude-3-haiku-20240307","content":[{"type":"text","text":"Okay, let's check the current weather in those three cities:"},{"type":"tool_use","id":"toolu_016U6G3kpxjHSiJLwVCrrScz","name":"get_current_weather","input":{"location":"San Francisco","unit":"celsius"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":379,"output_tokens":87}} +Logging Details LiteLLM-Success Call: Cache_hit=None +Looking up model=claude-3-haiku-20240307 in model_cost_map +Looking up model=claude-3-haiku-20240307 in model_cost_map +Response + ModelResponse(id='chatcmpl-7222f6c2-962a-4776-8639-576723466cb7', choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content="Okay, let's check the current weather in those three cities:", role='assistant', tool_calls=[ChatCompletionMessageToolCall(index=1, function=Function(arguments='{"location": "San Francisco", "unit": "celsius"}', name='get_current_weather'), id='toolu_016U6G3kpxjHSiJLwVCrrScz', type='function')], function_call=None))], created=1727897483, model='claude-3-haiku-20240307', object='chat.completion', system_fingerprint=None, usage=Usage(completion_tokens=87, prompt_tokens=379, total_tokens=466, completion_tokens_details=None)) +length of tool calls 1 +Expecting there to be 3 tool calls +tool_calls: [ChatCompletionMessageToolCall(index=1, function=Function(arguments='{"location": "San Francisco", "unit": "celsius"}', name='get_current_weather'), id='toolu_016U6G3kpxjHSiJLwVCrrScz', type='function')] +Response message + Message(content="Okay, let's check the current weather in those three cities:", role='assistant', tool_calls=[ChatCompletionMessageToolCall(index=1, function=Function(arguments='{"location": "San Francisco", "unit": "celsius"}', name='get_current_weather'), id='toolu_016U6G3kpxjHSiJLwVCrrScz', type='function')], function_call=None) +messages: [{'role': 'user', 'content': "What's the weather like in San Francisco, Tokyo, and Paris? - give me 3 responses"}, Message(content="Okay, let's check the current weather in those three cities:", role='assistant', tool_calls=[ChatCompletionMessageToolCall(index=1, function=Function(arguments='{"location": "San Francisco", "unit": "celsius"}', name='get_current_weather'), id='toolu_016U6G3kpxjHSiJLwVCrrScz', type='function')], function_call=None), {'tool_call_id': 'toolu_016U6G3kpxjHSiJLwVCrrScz', 'role': 'tool', 'name': 'get_current_weather', 'content': '{"location": "San Francisco", "temperature": "72", "unit": "fahrenheit"}'}] + + +Request to litellm: +litellm.completion(model='claude-3-haiku-20240307', messages=[{'role': 'user', 'content': "What's the weather like in San Francisco, Tokyo, and Paris? - give me 3 responses"}, Message(content="Okay, let's check the current weather in those three cities:", role='assistant', tool_calls=[ChatCompletionMessageToolCall(index=1, function=Function(arguments='{"location": "San Francisco", "unit": "celsius"}', name='get_current_weather'), id='toolu_016U6G3kpxjHSiJLwVCrrScz', type='function')], function_call=None), {'tool_call_id': 'toolu_016U6G3kpxjHSiJLwVCrrScz', 'role': 'tool', 'name': 'get_current_weather', 'content': '{"location": "San Francisco", "temperature": "72", "unit": "fahrenheit"}'}], temperature=0.2, seed=22, drop_params=True) + + +SYNC kwargs[caching]: False; litellm.cache: None; kwargs.get('cache')['no-cache']: False +Final returned optional params: {'temperature': 0.2, 'tools': [{'type': 'function', 'function': {'name': 'dummy-tool', 'description': '', 'parameters': {'type': 'object', 'properties': {}}}}]} +optional_params: {'temperature': 0.2, 'tools': [{'type': 'function', 'function': {'name': 'dummy-tool', 'description': '', 'parameters': {'type': 'object', 'properties': {}}}}]} +SENT optional_params: {'temperature': 0.2, 'tools': [{'type': 'function', 'function': {'name': 'dummy-tool', 'description': '', 'parameters': {'type': 'object', 'properties': {}}}}], 'max_tokens': 4096} +tool: {'type': 'function', 'function': {'name': 'dummy-tool', 'description': '', 'parameters': {'type': 'object', 'properties': {}}}} + + +POST Request Sent from LiteLLM: +curl -X POST \ +https://api.anthropic.com/v1/messages \ +-H 'accept: *****' -H 'anthropic-version: *****' -H 'content-type: *****' -H 'x-api-key: sk-ant-api03-bJf1M8qp-JDptRcZRE5ve5efAfSIaL5u-SZ9vItIkvuFcV5cUsd********************************************' -H 'anthropic-beta: *****' \ +-d '{'messages': [{'role': 'user', 'content': [{'type': 'text', 'text': "What's the weather like in San Francisco, Tokyo, and Paris? - give me 3 responses"}]}, {'role': 'assistant', 'content': [{'type': 'tool_use', 'id': 'toolu_016U6G3kpxjHSiJLwVCrrScz', 'name': 'get_current_weather', 'input': {'location': 'San Francisco', 'unit': 'celsius'}}]}, {'role': 'user', 'content': [{'type': 'tool_result', 'tool_use_id': 'toolu_016U6G3kpxjHSiJLwVCrrScz', 'content': '{"location": "San Francisco", "temperature": "72", "unit": "fahrenheit"}'}]}], 'temperature': 0.2, 'tools': [{'name': 'dummy-tool', 'description': '', 'input_schema': {'type': 'object', 'properties': {}}}], 'max_tokens': 4096, 'model': 'claude-3-haiku-20240307'}' + + +_is_function_call: False +RAW RESPONSE: +{"id":"msg_01Wp8NVScugz6yAGsmB5trpZ","type":"message","role":"assistant","model":"claude-3-haiku-20240307","content":[{"type":"text","text":"The current weather in San Francisco is 72°F (22°C)."},{"type":"tool_use","id":"toolu_01HTXEYDX4MspM76STtJqs1n","name":"get_current_weather","input":{"location":"Tokyo","unit":"celsius"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":426,"output_tokens":90}} + + +raw model_response: {"id":"msg_01Wp8NVScugz6yAGsmB5trpZ","type":"message","role":"assistant","model":"claude-3-haiku-20240307","content":[{"type":"text","text":"The current weather in San Francisco is 72°F (22°C)."},{"type":"tool_use","id":"toolu_01HTXEYDX4MspM76STtJqs1n","name":"get_current_weather","input":{"location":"Tokyo","unit":"celsius"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":426,"output_tokens":90}} +Logging Details LiteLLM-Success Call: Cache_hit=None +Looking up model=claude-3-haiku-20240307 in model_cost_map +Looking up model=claude-3-haiku-20240307 in model_cost_map +second response + ModelResponse(id='chatcmpl-c4ed5c25-ba7c-49e5-a6be-5720ab25fff0', choices=[Choices(finish_reason='tool_calls', index=0, message=Message(content='The current weather in San Francisco is 72°F (22°C).', role='assistant', tool_calls=[ChatCompletionMessageToolCall(index=1, function=Function(arguments='{"location": "Tokyo", "unit": "celsius"}', name='get_current_weather'), id='toolu_01HTXEYDX4MspM76STtJqs1n', type='function')], function_call=None))], created=1727897484, model='claude-3-haiku-20240307', object='chat.completion', system_fingerprint=None, usage=Usage(completion_tokens=90, prompt_tokens=426, total_tokens=516, completion_tokens_details=None)) +PASSED + +=============================== warnings summary =============================== +../../myenv/lib/python3.11/site-packages/pydantic/_internal/_config.py:284 + /Users/krrishdholakia/Documents/litellm/myenv/lib/python3.11/site-packages/pydantic/_internal/_config.py:284: PydanticDeprecatedSince20: Support for class-based `config` is deprecated, use ConfigDict instead. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ + warnings.warn(DEPRECATION_MESSAGE, DeprecationWarning) + +../../litellm/utils.py:17 + /Users/krrishdholakia/Documents/litellm/litellm/utils.py:17: DeprecationWarning: 'imghdr' is deprecated and slated for removal in Python 3.13 + import imghdr + +../../litellm/utils.py:124 + /Users/krrishdholakia/Documents/litellm/litellm/utils.py:124: DeprecationWarning: open_text is deprecated. Use files() instead. Refer to https://importlib-resources.readthedocs.io/en/latest/using.html#migrating-from-legacy for migration advice. + with resources.open_text("litellm.llms.tokenizers", "anthropic_tokenizer.json") as f: + +test_function_calling.py:56 + /Users/krrishdholakia/Documents/litellm/tests/local_testing/test_function_calling.py:56: PytestUnknownMarkWarning: Unknown pytest.mark.flaky - is this a typo? You can register custom marks to avoid this warning - for details, see https://docs.pytest.org/en/stable/how-to/mark.html + @pytest.mark.flaky(retries=3, delay=1) + +tests/local_testing/test_function_calling.py::test_aaparallel_function_call[claude-3-haiku-20240307] +tests/local_testing/test_function_calling.py::test_aaparallel_function_call[claude-3-haiku-20240307] + /Users/krrishdholakia/Documents/litellm/myenv/lib/python3.11/site-packages/httpx/_content.py:202: DeprecationWarning: Use 'content=<...>' to upload raw bytes/text content. + warnings.warn(message, DeprecationWarning) + +-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html +======================== 1 passed, 6 warnings in 1.89s ========================= diff --git a/tests/proxy_unit_tests/messages_with_counts.py b/tests/proxy_unit_tests/messages_with_counts.py new file mode 100644 index 000000000..da27a9755 --- /dev/null +++ b/tests/proxy_unit_tests/messages_with_counts.py @@ -0,0 +1,733 @@ +system_message_short = { + "message": { + "role": "system", + "content": "You are a bot.", + }, + "count": 12, +} + +system_message = { + "message": { + "role": "system", + "content": "You are a helpful, pattern-following assistant that translates corporate jargon into plain English.", + }, + "count": 25, +} + +system_message_long = { + "message": { + "role": "system", + "content": "Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers.", + }, + "count": 31, +} + +system_message_unicode = { + "message": { + "role": "system", + "content": "á", + }, + "count": 8, +} + +system_message_with_name = { + "message": { + "role": "system", + "name": "example_user", + "content": "New synergies will help drive top-line growth.", + }, + "count": 20, +} + +user_message = { + "message": { + "role": "user", + "content": "Hello, how are you?", + }, + "count": 13, +} + +user_message_unicode = { + "message": { + "role": "user", + "content": "á", + }, + "count": 8, +} + +user_message_perf = { + "message": { + "role": "user", + "content": "What happens in a performance review?", + }, + "count": 14, +} + +assistant_message_perf = { + "message": { + "role": "assistant", + "content": "During the performance review at Contoso Electronics, the supervisor will discuss the employee's performance over the past year and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals and objectives for the upcoming year [employee_handbook-3.pdf].", + }, + "count": 106, +} + +assistant_message_perf_short = { + "message": { + "role": "assistant", + "content": "The supervisor will discuss the employee's performance and provide feedback on areas for improvement. They will also provide an opportunity for the employee to discuss their goals and objectives for the upcoming year. The review is a two-way dialogue between managers and employees, and employees will receive a written summary of their performance review which will include a rating of their performance, feedback, and goals for the upcoming year [employee_handbook-3.pdf].", + }, + "count": 91, +} + +user_message_dresscode = { + "message": { + "role": "user", + "content": "Is there a dress code?", + }, + "count": 13, +} + +assistant_message_dresscode = { + "message": { + "role": "assistant", + "content": "Yes, there is a dress code at Contoso Electronics. Look sharp! [employee_handbook-1.pdf]", + }, + "count": 30, +} + +user_message_pm = { + "message": { + "role": "user", + "content": "What does a Product Manager do?", + }, + "count": 14, +} + +text_and_image_message = { + "message": { + "role": "user", + "content": [ + {"type": "text", "text": "Describe this picture:"}, + { + "type": "image_url", + "image_url": { + "url": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z/C/HgAGgwJ/lK3Q6wAAAABJRU5ErkJggg==", + "detail": "high", + }, + }, + ], + }, + "count": 266, +} + + +search_sources_toolchoice_auto = { + "system_message": { + "role": "system", + "content": "You are a bot.", + }, + "tools": [ + { + "type": "function", + "function": { + "name": "search_sources", + "description": "Retrieve sources from the Azure AI Search index", + "parameters": { + "type": "object", + "properties": { + "search_query": { + "type": "string", + "description": "Query string to retrieve documents from azure search eg: 'Health care plan'", + } + }, + "required": ["search_query"], + }, + }, + } + ], + "tool_choice": "auto", + "count": 66, +} + +search_sources_toolchoice_none = { + "system_message": { + "role": "system", + "content": "You are a bot.", + }, + "tools": [ + { + "type": "function", + "function": { + "name": "search_sources", + "description": "Retrieve sources from the Azure AI Search index", + "parameters": { + "type": "object", + "properties": { + "search_query": { + "type": "string", + "description": "Query string to retrieve documents from azure search eg: 'Health care plan'", + } + }, + "required": ["search_query"], + }, + }, + } + ], + "tool_choice": "none", + "count": 67, +} + +search_sources_toolchoice_name = { + "system_message": { + "role": "system", + "content": "You are a bot.", + }, + "tools": [ + { + "type": "function", + "function": { + "name": "search_sources", + "description": "Retrieve sources from the Azure AI Search index", + "parameters": { + "type": "object", + "properties": { + "search_query": { + "type": "string", + "description": "Query string to retrieve documents from azure search eg: 'Health care plan'", + } + }, + "required": ["search_query"], + }, + }, + } + ], + "tool_choice": {"type": "function", "function": {"name": "search_sources"}}, + "count": 75, +} + +integer_enum = { + "system_message": { + "role": "system", + "content": "You are a bot.", + }, + "tools": [ + { + "type": "function", + "function": { + "name": "data_demonstration", + "description": "This is the main function description", + "parameters": { + "type": "object", + "properties": { + "integer_enum": {"type": "integer", "enum": [-1, 1]} + }, + }, + }, + } + ], + "tool_choice": "none", + "count": 54, +} + + +integer_enum_tool_choice_name = { + "system_message": { + "role": "system", + "content": "You are a bot.", + }, + "tools": [ + { + "type": "function", + "function": { + "name": "data_demonstration", + "description": "This is the main function description", + "parameters": { + "type": "object", + "properties": { + "integer_enum": {"type": "integer", "enum": [-1, 1]} + }, + }, + }, + } + ], + "tool_choice": { + "type": "function", + "function": {"name": "data_demonstration"}, + }, # 4 tokens for "data_demonstration" + "count": 64, +} + +no_parameters = { + "system_message": { + "role": "system", + "content": "You are a bot.", + }, + "tools": [ + { + "type": "function", + "function": { + "name": "search_sources", + "description": "Retrieve sources from the Azure AI Search index", + }, + } + ], + "tool_choice": "auto", + "count": 42, +} + +no_parameters_tool_choice_name = { + "system_message": { + "role": "system", + "content": "You are a bot.", + }, + "tools": [ + { + "type": "function", + "function": { + "name": "search_sources", + "description": "Retrieve sources from the Azure AI Search index", + }, + } + ], + "tool_choice": { + "type": "function", + "function": {"name": "search_sources"}, + }, # 2 tokens for "search_sources" + "count": 51, +} + +no_parameter_description_or_required = { + "system_message": { + "role": "system", + "content": "You are a bot.", + }, + "tools": [ + { + "type": "function", + "function": { + "name": "search_sources", + "description": "Retrieve sources from the Azure AI Search index", + "parameters": { + "type": "object", + "properties": {"search_query": {"type": "string"}}, + }, + }, + } + ], + "tool_choice": "auto", + "count": 49, +} + +no_parameter_description = { + "system_message": { + "role": "system", + "content": "You are a bot.", + }, + "tools": [ + { + "type": "function", + "function": { + "name": "search_sources", + "description": "Retrieve sources from the Azure AI Search index", + "parameters": { + "type": "object", + "properties": {"search_query": {"type": "string"}}, + "required": ["search_query"], + }, + }, + } + ], + "tool_choice": "auto", + "count": 49, +} + +string_enum = { + "system_message": { + "role": "system", + "content": "You are a bot.", + }, + "tools": [ + { + "type": "function", + "function": { + "name": "summarize_order", + "description": "Summarize the customer order request", + "parameters": { + "type": "object", + "properties": { + "product_name": { + "type": "string", + "description": "Product name ordered by customer", + }, + "quantity": { + "type": "integer", + "description": "Quantity ordered by customer", + }, + "unit": { + "type": "string", + "enum": ["meals", "days"], + "description": "unit of measurement of the customer order", + }, + }, + "required": ["product_name", "quantity", "unit"], + }, + }, + } + ], + "tool_choice": "none", + "count": 86, +} + +inner_object = { + "system_message": { + "role": "system", + "content": "You are a bot.", + }, + "tools": [ + { + "type": "function", + "function": { + "name": "data_demonstration", + "description": "This is the main function description", + "parameters": { + "type": "object", + "properties": { + "object_1": { + "type": "object", + "description": "The object data type as a property", + "properties": { + "string1": {"type": "string"}, + }, + } + }, + "required": ["object_1"], + }, + }, + } + ], + "tool_choice": "none", + "count": 65, # counted 67, over by 2 +} +""" +namespace functions { + +// This is the main function description +type data_demonstration = (_: { +// The object data type as a property +object_1: { + string1?: string, +}, +}) => any; + +} // namespace functions +""" + +inner_object_with_enum_only = { + "system_message": { + "role": "system", + "content": "You are a bot.", + }, + "tools": [ + { + "type": "function", + "function": { + "name": "data_demonstration", + "description": "This is the main function description", + "parameters": { + "type": "object", + "properties": { + "object_1": { + "type": "object", + "description": "The object data type as a property", + "properties": { + "string_2a": { + "type": "string", + "enum": ["Happy", "Sad"], + } + }, + } + }, + "required": ["object_1"], + }, + }, + } + ], + "tool_choice": "none", + "count": 73, # counted 74, over by 1 +} +""" +namespace functions { + +// This is the main function description +type data_demonstration = (_: { +// The object data type as a property +object_1: { + string_2a?: "Happy" | "Sad", +}, +}) => any; + +} // namespace functions +""" + +inner_object_with_enum = { + "system_message": { + "role": "system", + "content": "You are a bot.", + }, + "tools": [ + { + "type": "function", + "function": { + "name": "data_demonstration", + "description": "This is the main function description", + "parameters": { + "type": "object", + "properties": { + "object_1": { + "type": "object", + "description": "The object data type as a property", + "properties": { + "string_2a": { + "type": "string", + "enum": ["Happy", "Sad"], + }, + "string_2b": { + "type": "string", + "description": "Description in a second object is lost", + }, + }, + } + }, + "required": ["object_1"], + }, + }, + } + ], + "tool_choice": "none", + "count": 89, # counted 92, over by 3 +} +""" +namespace functions { + +// This is the main function description +type data_demonstration = (_: { +// The object data type as a property +object_1: { + string_2a?: "Happy" | "Sad", + // Description in a second object is lost + string_2b?: string, +}, +}) => any; + +} // namespace functions +""" + +inner_object_and_string = { + "system_message": { + "role": "system", + "content": "You are a bot.", + }, + "tools": [ + { + "type": "function", + "function": { + "name": "data_demonstration", + "description": "This is the main function description", + "parameters": { + "type": "object", + "properties": { + "object_1": { + "type": "object", + "description": "The object data type as a property", + "properties": { + "string_2a": { + "type": "string", + "enum": ["Happy", "Sad"], + }, + "string_2b": { + "type": "string", + "description": "Description in a second object is lost", + }, + }, + }, + "string_1": { + "type": "string", + "description": "Not required gets a question mark", + }, + }, + "required": ["object_1"], + }, + }, + } + ], + "tool_choice": "none", + "count": 103, # counted 106, over by 3 +} +""" +namespace functions { + +// This is the main function description +type data_demonstration = (_: { +// The object data type as a property +object_1: { + string_2a?: "Happy" | "Sad", + // Description in a second object is lost + string_2b?: string, +}, +// Not required gets a question mark +string_1?: string, +}) => any; + +} // namespace functions +""" + +boolean = { + "system_message": { + "role": "system", + "content": "You are a bot.", + }, + "tools": [ + { + "type": "function", + "function": { + "name": "human_escalation", + "description": "Check if user wants to escalate to a human", + "parameters": { + "type": "object", + "properties": { + "requires_escalation": { + "type": "boolean", + "description": "If user is showing signs of frustration or anger in the query. Also if the user says they want to talk to a real person and not a chat bot.", + } + }, + "required": ["requires_escalation"], + }, + }, + } + ], + "tool_choice": "none", + "count": 89, # over by 3 +} + +array = { + "system_message": { + "role": "system", + "content": "You are a bot.", + }, + "tools": [ + { + "type": "function", + "function": { + "name": "get_coordinates", + "description": "Get the latitude and longitude of multiple mailing addresses", + "parameters": { + "type": "object", + "properties": { + "addresses": { + "type": "array", + "description": "The mailing addresses to be located", + "items": {"type": "string"}, + } + }, + "required": ["addresses"], + }, + }, + } + ], + "tool_choice": "none", + "count": 59, +} + +null = { + "system_message": { + "role": "system", + "content": "You are a bot.", + }, + "tools": [ + { + "type": "function", + "function": { + "name": "get_null", + "description": "Get the null value", + "parameters": { + "type": "object", + "properties": { + "null_value": { + "type": "null", + "description": "The null value to be returned", + } + }, + "required": ["null_value"], + }, + }, + } + ], + "tool_choice": "none", + "count": 55, +} + +no_type = { + "system_message": { + "role": "system", + "content": "You are a bot.", + }, + "tools": [ + { + "type": "function", + "function": { + "name": "get_no_type", + "description": "Get the no type value", + "parameters": { + "type": "object", + "properties": { + "no_type_value": { + "description": "The no type value to be returned", + } + }, + "required": ["no_type_value"], + }, + }, + } + ], + "tool_choice": "none", + "count": 59, +} + +MESSAGES_TEXT = [ + system_message, + system_message_short, + system_message_long, + system_message_unicode, + system_message_with_name, + user_message, + user_message_unicode, + user_message_perf, + user_message_dresscode, + user_message_pm, + assistant_message_perf, + assistant_message_perf_short, + assistant_message_dresscode, +] + +MESSAGES_WITH_IMAGES = [text_and_image_message] + +MESSAGES_WITH_TOOLS = [ + inner_object, + inner_object_and_string, + inner_object_with_enum_only, + inner_object_with_enum, + search_sources_toolchoice_auto, + search_sources_toolchoice_none, + search_sources_toolchoice_name, + integer_enum, + integer_enum_tool_choice_name, + no_parameters, + no_parameters_tool_choice_name, + no_parameter_description_or_required, + no_parameter_description, + string_enum, + boolean, + array, + no_type, + null, +] diff --git a/tests/proxy_unit_tests/model_cost.json b/tests/proxy_unit_tests/model_cost.json new file mode 100644 index 000000000..8d6f6851e --- /dev/null +++ b/tests/proxy_unit_tests/model_cost.json @@ -0,0 +1,3 @@ +{ + "gpt-3.5-turbo": 7.7e-05 +} \ No newline at end of file diff --git a/tests/proxy_unit_tests/openai_batch_completions.jsonl b/tests/proxy_unit_tests/openai_batch_completions.jsonl new file mode 100644 index 000000000..05448952a --- /dev/null +++ b/tests/proxy_unit_tests/openai_batch_completions.jsonl @@ -0,0 +1,2 @@ +{"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-3.5-turbo-0125", "messages": [{"role": "system", "content": "You are a helpful assistant."},{"role": "user", "content": "Hello world!"}],"max_tokens": 10}} +{"custom_id": "request-2", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-3.5-turbo-0125", "messages": [{"role": "system", "content": "You are an unhelpful assistant."},{"role": "user", "content": "Hello world!"}],"max_tokens": 10}} \ No newline at end of file diff --git a/tests/proxy_unit_tests/openai_batch_completions_router.jsonl b/tests/proxy_unit_tests/openai_batch_completions_router.jsonl new file mode 100644 index 000000000..8a4c99ca8 --- /dev/null +++ b/tests/proxy_unit_tests/openai_batch_completions_router.jsonl @@ -0,0 +1,3 @@ +{"custom_id": "task-0", "method": "POST", "url": "/chat/completions", "body": {"model": "my-custom-name", "messages": [{"role": "system", "content": "You are an AI assistant that helps people find information."}, {"role": "user", "content": "When was Microsoft founded?"}]}} +{"custom_id": "task-1", "method": "POST", "url": "/chat/completions", "body": {"model": "my-custom-name", "messages": [{"role": "system", "content": "You are an AI assistant that helps people find information."}, {"role": "user", "content": "When was the first XBOX released?"}]}} +{"custom_id": "task-2", "method": "POST", "url": "/chat/completions", "body": {"model": "my-custom-name", "messages": [{"role": "system", "content": "You are an AI assistant that helps people find information."}, {"role": "user", "content": "What is Altair Basic?"}]}} \ No newline at end of file diff --git a/tests/proxy_unit_tests/speech_vertex.mp3 b/tests/proxy_unit_tests/speech_vertex.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..c676110335e39090b8d5fa8f65a8c6c9b9a2f70e GIT binary patch literal 133244 zcmXUs1H4?j(>Y%6-rc*`wrcy;wr$(CUv2Z%ezjli-uCvk*S6O<%$N56{qhS=GBeRk zPBNMF>)NT)0vQelw;j}R(#!=RIsgEM+>4O_ta$9`*ac(zAegP77w7?+gO;Ev zXpK_6z(_C^%m?$pdaxKQ1S^qSf&2n66HErvzy#!GqVhE;Z#CEl4uHSG7O(?s0~-+h z3a|_PEq6z@d-ACx)-XsF zK=@lxYCX#PJzi`7l>W}!fW9^%h~Fc>0%5KBLw7TRSb@fT)t~&ue_FNrPx)qqvkB2V zgnF?9_3k)|kDR5yNu^GXyLbd;H&jwWI41_ZUj0PjX0MH+C$z+5*3}KEy;cptF5XC8|rE?LTxu|qD z3MV0~2`K#=b37^^_b2_k&P9mkDl}#jQTZfTeP(9=RU zrXY$_{y>aJ(1TH}qfqWh6b}SL5X3;lyX`?I&<(T$jgk8ceYHY--4e7xsdlKOGb-zd zTvyNs^hI$8l-C`7bwseO(N{~*5Yz`PkZX!kwLx9v>Z7<0s0SMSq1g~M0C5LuTF?YM^sx2&;&_T>;I3Nqjd8>G=5{YL3Oo7t@#b#8HK%&>x(G#M&bWb zy-|(!AgFGr zuCAz#-y_ffG(x@^qSXY2%@J-R6#njWr$6wc5&i%)lM~R0jzE44nv=1AW@Q2jhojN% zhT!@jm_ew9erR@vpqzex=H@r-R5WXo(R|NF9JCDa&sro4OHkfaMDh1}F$Zzn@3|X~ z=6fE3TY}tb#N{iIUyr2kZ{+tQ2|a|Qb~pOkiKJvJa;uSBj(BC>AMX6kwbTD_*KaPH zfv7G*^{quPzh!Mcl7-*(%t9rz{_x-L+D4+*k3oI{YV*iHqcj+e)Iiks{-_T_{^bAW zr!lB>Fv{zL!rwFc+jT?Z(+-VND>NT}p?Uv3uT2miG(oA~bMkv$fAh)jxv2vZKs*Qs zVaSJoNZ<$jfhTg_zys)k92n7>s{gU?Xl zfv4bUcpRlpq3`4H1Uw6m!b|AuGRixL!vDG32=`xj6S=#H(p?naKsDS$HQYfo|3&^L zynv`(KseX_P&$t2oPfvR9=IKDgNKmcirf;o0WO7W;4HWZE{8MWDwJA)QtMIfIuve( zyHUIYu0^=N@%F$Oh|cf)<0yY0+=TGg!bxy0oDLTwKMI9i;6ykWHit2P=)}S;PzHM< z$R2P$tc_qM!r7?iZtw`a4nM#eRKrxLfGQXc>!Ng1*b9wi7jOv#0xj$#b(eJDE_NM{ z!e@cq(hl*1_*GhhO~Ka_FR(gNW6@8zBhAGQ;|iiZHXSC58A5+?6%4={;c3`?7%lY@ zouV4r;X1@6Hbj2_41=815!E>awR$Nmg(jE^C&TU1cIl-w6AnjQ83g9R=hAxVh*Tpj zhCI}OMA#T6N~fe((rDDmeQ1OkI1d&{sZvwe0G>y)xEj%#k4Eq%dYBT1Z({xf3`@Bd!ufB%a% zq55Ydxm}D_@8AFXy?-F4qBUb3lEqaBYB-Xb_DIGWBPw$c_97&~osj%&Kr*=lJU}qF z5#~s=HpQd0rV*0Wx+v@fYLFaM!z$F<-|KEa)Vh8MdpF1iLD*1i2eu#6;!p8fIN#mgx8r|j6XyLWkSW*+=ve&Y|nREDJ@e6-SXfK7} zkI8z}4)Q$w%9XfRxd#g$AcwWW2Y`I(sZd{>DEOJ%*sP$5hVH6fiVXEW&mDgH;H=OUfm;p!@>rsj*e)wo zJy#x~D?lOZw0T8XnZ`y%KlbQ^AHWQ zy}Z14`;7FR~rY;3R3QlFAvq~GEn;Br35m1_y8#>=J@?#^qH10R6kxfUZs@}q1+INSKRYmiygUkh3%E;W%Znj{uTEt z?o~B5#aUiiJ?$2|!r8@ri|Y%25aXE%iq*+_Zkuz1?N!Z%ioD`Z`7bl0Q_lQ&_ANi@!B0!d zrWF65hm+!d4oWv=iaDO?LsF+@$CNCyhlxi-$$i4^W4p+zNPmz20tSY?3%TJ-d4_xR z_3;Vco3OWjlRB|6_kvgZ{A(QP738}kU`ZhFJ4qiW7r_r!UT2+oJKT#H0vZq8=5|SUDH#NI;`mpq^SyUcVl3i9+&?AdUFU$xo+*0Ztcm5u_DxI!;tddt;{M#>)QyV~)F2S%MwWA85By}d3P-+4Xs+30)CH`y0> z4Kdu+IkcR5fqI+rm243;9*dO*x|5tet&2=zMW^D1IbNv?lTBaSeXIPu?{nZ6=8HS= zT=K@`=^tKy@O;zw3zmDWB)2lB_;b#X?4t#}D~__Q=qOc;$79t@swZVrUGg~)W3TZ=Q9T1ox5FYeRsLbk-6 zWZz&uU(u*!b#Y1Y%<}T83)P)#zEnq6_pe@JGFgnSYdj9>5>~t$TJH?9^~xzai|0`9 zUA`B5jlOw4qOU3-IdFUMz2K;zhW@L3PIx7I`gxwvuhAV+kC2y=G>qV*T`%mPtu<98 zrLFVoq_t1pkQABt{>$Sp>0ge08emR_aJAX-G+w8Te`6>C?s^an1 zmi!8`AM+glf{UWBJju8%As);ZU(U6YUo4ddEI#0SB^1tH@@ zr-Zf-IN^CqwVM8j^#s8nQ<4dx?v1v|)vwDNmyIg(E^k!%sd8tXtzDEBTk=*Lz)aT9q+x#XW z(Jw0@$2)abvi4_MvXs@dysPyr_Z%FQ3fOHzIB`)u!hcoxt%z+=nDo@DGJJAwe3Ay{Ato2k46R>H2KLQhkwUM{k|q z*nqSEh5sI(EnW+ZT|K`Ty6G>g3zY|$vqUMp%NIIYnZ2vd7mm!^m`VLg{~nzTla)Vn zQg?rO`tst-v9Di!KAzS;DuzthZ3G$daQIQezB6~&N4kLr__`UR;tC^xaNe?5>kPR@c z*noXw?{EHGv!!Z5<=m>S)rYIO>ep2l%D-0Xt3R1$JL_i+CwTrvrV}|CQ=KqLUxIx zjwQL`ukuO7Uo#`qZE58xAbVYw>09ReDR1AtxtwTDJC(BN*TtXnzSaGH|5sSXn}Us| zOvfZ!fIY`@%r+lH%J1tJ`}GaX2rLau3R)7>C2D=`h4s=J&8fd2UKu+mx@D9mx?4-e`nKY zuz(Fv%ocp*rvT^80vQ_;7I_6_C9H0vt#*^y$!WJ;#x)Zi@6g0 zHTrq%p2)+YZvyg+MY=-mLhV?!pK37E86%}-><_2b@zOre9_mSIzmz3L;*{a#<^P2jak($YB57h~IJkvy0LeSV| zSeO4`3$i_{>0DA%R6qAy`h?7VISsQrXY;>aeEj=k<4=muOVT!_&H2?eJv_O`w|zh3 zGRtxkYrfe|n!DOZTc+4{icP4^n&Dnw{Ga*`3@`*l`VWZ8iTPOPNB#G8-Y2|^9Tzhx zvU_Z!n1<1wF*8Cx`S~o)Ub#$L{d$>JaBkTjM8I~1R zfBQyfp}UgX!E1z*{8k}ZT!D?H{$b)2_2uW}?PZgh0rF(|Q$?Igq1q0 zw7*udqa2+rhb!+E*t18a%QGM4HZ80vc#yvI+mQF`-!D%*nLIk%nMJ05OwIV-H|g0= z--4@^rPd7AVmW0=v@$LcJXUTqPVk!;i1{z{t>TloFpk74Pra5Pt4o4o3EDALT zoba7uR2UEHx9cP|uCOy}DKoJG;xJd3BKV0lTpDj;hjFpoVs0bXo4+k~k}ks9;3Mn= zreQur92rkBR10bdNfI0Jb-;wqeQ&VKu$cSk3bjPk+^^bP;ZxyL(llS2(=MYy*4vz8 zS=ZC9{XCa=G4WCI%9J`8C$r+RoT+=$PNfgeOfFhhUB}+m+1jOZ8l3rJ9L2YtDWGM*rm)nAQ*l>otxgzMYfmf@yDvI7x_MNe$bFG_Lr#V^4K(`h^D%l(@K~qz zR}WUu@=h`>8HQh!YDw*c;ldtqv-n)xBJ36JaJ6|sh!R`F{^(qug!I2gyeTGNDm(qdp9*AopR#XfrDuEPT+A%_iKobt-u;M8 zk4-zA{UPH-wwNWRpUM&PSCxM-jd#v-_Hu|^zF-1ga&K(|{Z8+r{@?w!2HAqXgnC4! zME8%a8}lHxTkMY*Iwm}FQ+UtNH6izd2L|2?@bbIkWi;&9zE!T5UzC54wUiB^Hxiq{ z66tTTu^2C|kaoiosev?7^cG{qMv^zI2b9PA;PT*0!G;KPM5ib!GBx6K z#Hp~f(1*d@0y+O&|4F{>eKNgjds;jcniSKW?e@U{8-`@i*12$1_v_8;YcDxfVoS#I-7@-Z5H zJhSzzCP}wc7p3Z_bjcsehEv;#*Mth6i+{sDfY%^h%0qgf*V0DRQa>>S1R>3tgmiP! z#9FKqY=|HzagZ289>ixu9KVV|_PDzx`_e9OSA=tJ+BwK+wuzPPO?@jzSFSK0skl-S zS-GU@TYg-zyyk7$^3p36=PGKI#FwwCnOJqZX11-9HO_k2+0HS;VdJJCPM!o*L=U2x zjFOF0tW@?;P1ok=-s-+-{?<}SYsF``b+=tGlgtEg#BuN>NN2h_Xx{U*BbFl%{$W-x5CoGyt1aR#bNcZ zo;Tevy>f}Rd!|0NXVycue{Fs3lep`w$<>K(CT{1exclxf<>HW@?3#2GZWOjN-xZr+2sVzcPql{Qz&4zrM^Yv-PJUN*51&nr#BWM-NPnD{ z8mqoDBd|&uqn1G)Sg3zoZrqzW2}0?Puqzw{(y>FLFIuGo1qb{hP3H4u?}Su)d+IAp zcGcnUfWPoN?q+PFx|chzx>ET>n!$Z??ShM_uJ)6X16z!BWWA-$6yX>tO{7}cR$zOG zTt_6F3?^ew*thO)%69A3OZJX)wfm~f#OSN9%uskCLj zzv#WDx3W7-ZF?=GIX@^SI34(KcPO@9_JaQ3DpdJC4dDHgH|=YWZ4Whs~&a0(`2M8$5y5 zwnRHy+|TxBk|ifw7yDQCl|9dGk{gH`^KBwnbJco*EkPQqeU%Ns0Iv;|8NxMvoZ~UK zj@m2Ua^IITylDSNz5xdEGsPw{qd3tyS-paAS)x=4)Fg8;u~K)(+6Hf?#hgc=TV`V? zSf|KulAj$fhLv7) zuXhc`Rmv^=WTAwvhDU8Stq1qolCB9QN!u5ChGwQE54)^Pw&YP4i2^YlKPz;T1+gLc zTKG4cLjUKs^CGd8^dh`?jNYqGa=wPoh_2FlJ`sE*zOso#4&KR?NuL!{_|uq^{ffnt zfr3lez?2DN;4QK<)|vZ(-Ga^ea)#rVKplL8=QD)sFd<;a@dMH**GG??LV+to7a{#3 z!}z-Rc?D-54Oc2&y1jT>+d{bGoGNcEW=T!>3yLt;OZJg|fW00ziT>z1#K_r9Y8-s& zx~y>6Dv3740-+w2f^^3!u^-tOd&`E(r+^aJ0@P40*FKD*<1L3YBe}2QK2@ddxNHmF zNgTxVwHow4Yzf#@svn0l25W!K3@(RVMDGyxF)7?9kV|_=A~p`BlZVB&;G{s(-K7O= zJz$qxCAR8^kB76^*3aA1ycp}O_|BW)QYqR~S2>l~q`qr=3*Ml!ZXa8WZi#TzdD3T* zopFygWVz;{o2VAd0in6useA643;xE_F)yfeT{V2b4yQrtJu1sawhacA%o$H<3y_4;A*@pV5{1xjx zufe>x`Mt3O%ddVE>@gm-KNVI z>f84i+jH+6sk$TV1F!%e$~@x#mToH^+6w8uN_TY&*+`|2a~ZZAj%TWci@3m)IlNH!lI&nMuk$e+S_*;xT_Jrn_$zpcOg0lm-m zPT>!q$TGPvLZpGO(5f!c+Z`f35x<6MxJA@8=5OaKY^REHwBdT{i%la*Kk6DcQnl7T zm-Yw$38&O%%VEZk*z7KlujD%8?ddVr76#VU#434rvkk@^hz+XC?##@!=sfKF8 z^h=o{+!bxacIiLH%{GKbHA%KP#2~7ct1(%FkCgmjQ{pk!T>3(p@nY^f*^x@M7t1ts zq=RFM2vUG}S9F)x3HGJpz$mtnVhJxpe*`=9qHglJUG)W20X_(R0sG(z zsu;TY$;<+&4G`c&<_cR^nk#?Gbr5Q+da{`GPPyJTfeKY@x8J1>kb}iZa0yn7-xdpq z|HRF>H~r0(OuwSqxtCJ^5$D`Z$q(>=bQ=qykHaP0Az2LN<5(?Q!nAalRr{rEp^S_p z2Dwvkll-{#E1gFSaDQcTq_NTy(#_W=d*V8wGvmfexnk;)kPD{ZpWIjI&%zy{D|1*F z2%j($xRulbw6|F%Ysy_Adx1n^4nE7x$c}=oZZF~(748nfqp2fYUqXvB;ueO+S2&y+ zy|_(yPPKB|=>1@xP(%2`@7M*9D7D0N_+HABdyiong}tLxq1+=*;)mc3=o0n~If%D| zX>^ifvOEd1@%^Zs!h7-)+0xlb#xwI=&A<%xB$q`zAm6}#hfzdR7!EvyuJZc=Ei6_v z7owzh)Ml)lqh#G-XW=$E2A?4W<2T5TQi9ZuYDd0yEGL)9rrV!mYv~qJ4POnLtFPK3 zV610rODGp*m}#FP+*5eK^8y4fvFEUZSOW)QhlQa;H_GW6LlQDA+eu87CyNa^A7V5u z7i8{JddZO}9hG0;%77c5$BM-tQZV^YYQtYw?hqF_s|>k(JI5wv3HHqKjq0KpZi!)5 zD!*Gg6X|3N>@P~$-J=HCwWBO2Y5Tnrulpg{MM3 z-4@;PI>AiX8_Sbck`&rAJ*DEEUU)nCGxrv0wLIF@2E3vrXB^oFUnedBuQ5sLEkR-- zcAhnnO3+Ps53b_FSsY6xr@5cv-eit6mG_rvFq~~dn5g003V4%Qz^|8%k#B{wa2xUa zPJ2JGRy-w2U?Uz2{^O#UZ}6zg&GZ8M?GcJ)l1BJ|)u&RptFEV-AlG2_ojgSdU>7ov zv6ikEvd7>uS58%llo&!n;ia?*za#Dh-dH|1SG-F$=OZwN?%`TZWRWd60~k#c;yE@# z@kD%Vlj)|4JIu?JiDZCzDz#14!;%9X>RFCW?mUn2Y?3Qcr3W^Ln|e&0bFLH{(NDxn z?sV#-WMyMzi?D5cJ8BJ{?+&2rh*t$CJ)UiXmBD7@d;S8qP@Ttq<<2uBh#c2E2$d)7 za`CvTiSsgFpq$3maT_)Mu%Fea?n&;m-J@E|ra0D#Ym`^qBG1Zexed|~x)t2zo`OH6 z<~wWCLqL0}4f%wPZJ5O z6=U?>nL{Nps>Yf-=zlv|LHSyAI1kIaNqU#0unX1fblGU3ns<}eutJxTy2_L|he(8cA+L9LQ#&06(lO?_ zqc1a9g2g{W2D-ibFKRGc4({<& zNh3MWHc9>kPq2QM-vRU4S7@j9Ksw`op~UT-Aygfs227JQu6p=t>tlU^D43wxu+fsbD{V)7TK{IbTkU!BbodfS>0ak*s{6saY*(~{;WS5~ya(>@yhz?4ud}u2d0?Hi3=_B%=DU#N zZYiGtZn*^*pq@~3op`RSs9DVL(4C64#mzC0pC?&`Hc0hZot7%H&r_uc^PKY_5a}l34By z!6W{CjkPBxif`b2#1jLBJb6b}iFdoo#8)@{<1{i z=2XOJ;(%~V@?{dZv!E@r(cKNlh>3iE@+g+g?x4>BLf8yulGoU!L?r%^Ux6V;} zy`<~m{?9dAUAU25Iz(8yo(GQ4{8L!3%U_>56DH?A_(tV&#Dagdy{*Wi$Mb}xBNygz#+~hKtuO&Ur;_eMeslN0NG989 z$D1;>>{CIEs*7WQRH;3~Dmgn9NrrRdgnya2!d(0xG`p8XH~2172|Vr^s};*M}u zioge8l#ocBlo|ssqP8$pD#QceXs#o<6>cY*5jE0OYylBS;BY;@7~76?vKwd%vS(UK zEJ0`BD(Mm|BzGX|rz{Z9+;rZ7^_8B&U*ZaUySUY*pp;-K{}$Q!MB^X0F))d4Ey&pc z^gW`hb0W2fz@;p59+VSd0jFB z$l>@IX|)tdrilMa3#m`wyn7m+kAD)*;zPkzsV`O~E(2erDZD>E9At`v;d?kt{2RZF z`G9-G4DmT}TAa@vkd;X<`M-#T)Icy+e1T8Ht?1944(^L9!E7EN@g?ET8mV zaUk&)uSea+#*3ry9bk@Nk_5CnjFGzHlksj+2TU&Q73&h6u)mOY-$aDq1Nf%cFp38) zgb=J9-d_v?*O0A?o|+8YVhkKbMB{7uaaa*yk`jcGpf-6K_7JvVM(R885~jdA_&E?O zAsrJLfFBa-N^glc;wd*w>Oo(?27}&WTWlopMNk7H(GyRV{*i{`76RFIiyG`-awyhH z918OB-N?c*5Zg+82GM9H1Iha6JaPiY;Fm$JG#^`s>~;fCuf2f{{41;!&3F&2gkJ}S z6Z63@sWZHfA$wi1J^Y1vVV~h(aE+Lb=OJAu2OFXF=NyzHc{qe`!Zrg)yvLe>P55$T z*N0&WtT!AA3h=Wygq3K0YzlJ3>F^B3g2B>#*a+8Rr4SZ$F0Y93AMqSp-{wOt28G%ryKL+3dd-Vx?DB<=6+z6RU^&;jb_or~u2cg~+;hG_u1@hXCt> zMM4p*#(qk<(jH{D8ZV8J&LX?Rv9MZdhHOgZFkJ#t2k8*(EL|4s!vtg{`dR!A9nxQ- zC~gs5qFbV*C*m2HCoPjAq&kStf~Ah)Y^f*A5+6%Wkb`dT?t%*Vk8}gwY|)6vIBXYq z4@}r-JOhit^+YOO5ATg95&i^&?jVCH8?l+(PsNad^H~=HAY;~dCKuf-p-ZpXS;tH+lNe7l!>`g+npRqG&m>(V*#M6RM$eF-_-k>F zSTb&3w103;@QuI^0V5&{!bS!M`0etlZLH;W#yDNIjv?Sa{yaa2E#eRJ1-y}cY$3mFxa#lHAIZ&yP50Twayi~N*{jCdBt)rCO6ia*+UD>91 zZ}GXR_cguQRQKlc`gvl-!+<{&;dvVP77wB55w z;IOcEA^!zG4a39Tv=l8?}L+@R{;?opm( z$(N2QFM2*zbmH%jwRJHeOXFIH-3UqusUNmDcumNXpv_^U>u1*uh(6-KNIOA0*gMT@ zm}Y`(5lj)v1j+T}Z)-5? z_ZgNMSi>}9wTGw1$TSiwoj%r!rgl}Qii^{0|2k2$q+*_Fn`LzB>d%{gd`L6rJ}A>w zwW+zuEf5pUGfaDJiEy|lW?U*O$J1#>e<$)&#IJxgK_~nJLn1?x0>=fOh9Xe~dDc#hz(yJt|U`oM1tKkpA1n(y|KArt+t2kq!iAoUv@+juuIA@r`)qeBubOJaJ^Ir=r?i*whg7NNU649#lW#NM`933q@`LsT zt_W-qHm}zF`0|iho`;nqHM|$;WB1suxB`OsJI)&G2eZzy%-YcOxm+k3UAU}tam`k1 zCwmvieS1H5GnhkTn$w0I#+QbnhPnC|hGZk~{$On2@kw4saydh-|5bzP?BWNhZ@xdy zs8`x$&2g-c9hZ5&q@I*IYDU-{m0%CS(51spZ@m0cki*?G^l0nZFc zh7;Z|1Ga`74%!rOBcON8!MIgn&3tn-TeP*j2m9SJCTg0HU-?4kJ8Ob1*q-ZHVkIgY zlsqmNSzKOOYW`|J=|1Q#a5ocMH(;y$h+`;Qec7drQSypY{6ncndCq%OP8+rpbJj5gw&JzJb;L-+ey#oDUruIV`+=Fy{X` zpl!&mpo`vR+Q!TsELGYm8l}Nf03T=5R~#-rrS_E3%fEqdUE>tlkVx`urZGKQoEseK;l52)wK5~)b$@hZiOOKV>MymSq3q%9>MetyU*;kn3kWFA;+dg}5`vS)@*E{zx z_f2=YkV8CDKG!@`K&pTkOnf4mU>}5Hu8XV2{+d0%c; zZgg>m>8ZFI>r5`ERubM)rRA?`Xq!t+@R*?8O-^*7e}JQeuYgx&wC8=_5&mw^Cfe=V z&VD&jUlO|4GR6{N_XA43mg}GD0}a#l_tg3HayXXT;L5cvvf1qyol&m#4!Jed8sJ>c zzT(>mvG6~FkXsc2OfV@Dhsmqtd7$AX*DXgg`^=iNCGYe7vc{x)W=-%U3zmZR70w-F5jA-s=8hm0OzI zL)j6+QFLB-MWxCPGl%eC;DbT5H+=+OvLo$JEpyEqDv08Cc@r|4X6lRFWwuJIg+pvd&VOUAwWfrcZVhdc?7V~Q#~TwLaAU|;2|%PkO+1<_q?zi(gR z8qZD?&Hz1GOEyTJO|AsH;52XxtAOKqw_~AIZysIwt+;3Y?#vVEUvsw>H!k~H?OE}r zAf&iunN)q>S(QHuMMAK1f;H3n(b-G54Q9bULRUVUy~Z8lec&EunaT%%_+1Le)BZP*Fvo!CY?EcwDJDMWIJ z2heN4RhX9apsN`N^O9PLZhk3HBOc~&I1{Z+OtmWCmDJ>q$^jXt(z|3m%xa%|IX^Y8 zSH8J0xukD-u!*wuck%3fx0kcf(%W{)9^rn=cjI3RLwU%Z<083^@D^pzq!=!EmH7Pf z?HTkn)E)Ufc76QRTBG9+#{`FG1*Q1k@iBT|GB|bHRLL?Yslqye5V#+`CA^E?XJViT z`~v;(KIC`mG*dyZquk)9VjWxu?gU%Le&AX?Y!;sLN_@(*#}$}EFn88`s$8(7Wi5M zd_y;cS47>3i%-~6>q)|-xZG$WVrfW&fGxh0y@nZ{>bk2kCVDv&!qk>Qx+9%Wh7%94P^cCv+%N617XPXvW!^>m@(yHQNDoeLpBk9Trz}j} zm^upC()wgS$(0or6n7{;S81-iTK%|sxaqY?Z!uYR+D1DrJ9@jOxF2vP^a5oQvq81m zgZ4h+Hz{~Rm=t*?wrPB`TH6x#CxEy+FbLZfnbq0){KP`3WN_J6^fo4^vP1Qr8l_3F_*kFY*E+U4*SYOnsuYgfm?Fg} zk0HiYzFC3Zp%WtRM8A*i8Sjy>C;qRvr7^!E*M~)ioD1mahkM6(Zu0Qf&R36BE>z5t z=gJ6qnM{(^MlUsI$-U$o(Adi93+RS?HCQ69;Oe33`-MDqg4547*i=xtqHKS0X8z1v zG3$Bew~T}7ThbS%Elt0i{ygJ$=AG=%x$gY>MNf*Rm257ZSvIm_SLNrbOcP^iY`bqS zbpCYL5j@cg(e|>T%9*-Wp2^blv9<{Ha9nSVxC|Ao&vAJz@ov$ z&x+@l#+Eg!I8@oN`h%&K<(;j@QG;%aN$5)qq`ef|)x2(nXSDZpzYhT!LC-^;g$@kc z5LP$re(1Z9+98!e<$(*)d&{oA|3}hUKu3|ZU%!3aB{P}WM2P^w-F-?&<2Pr=EN7ulBfI&_C0>> zh!`)XrP~z|s)?G9+B5n`Zu5-1F~C&f{@&f-o@sb!nB}&{ZN6Tu|E%@UN;Nap$5maG zHHzQzVRBTqPofa76SfxI!7(fWp+It6!QEgy>Eq-PSBAsekzgBRYi~VlAuK^H(w2-S zwrN?DtMN#qv2kXjXJh+DS>yOdbK`)<L0+MT zGsjpx-yihN2cXrMACXJ^6d=M>VWY4_)L!f*`678Jc_djRxha_-87x^L*&~UUAks&Y z^OCL-U&&eVdU2!ZjcAN$v#>~TfoMzI!~USThy@OSfgg!K&SkP{_9?TLPN5>GGgLKM zOpXOzwM5r*R~y$&=S$~S=W1t}Bgx@(d~=L-xI5DAD;==2)EVUpCiju=$X(PS;AuX= zz6L#3Cm<6v^Pd10ppkpZ#dEK?{(KUs8*E%S5Ajn#GXFAE0J^L#$OTl0iLs5?3QU9j zhq|Lcc}C>G_dOc)w|l^y;RPURun=U_Hv$Gs7BmUC_KV?0*axT$=ixlSd`RYJ@XcHW z+sw{l?=u$qC_Rb(N>Nm2`WZc#Y0V5{k{OJh%MN8rK<%;$TumGo%YEe%x#iqdE*z+G z*&r#9#k^y3m>_mI8_6C7KhciY0tMa=B=nyGC6|B>Lc zo=^z%3EX86xS|wr?c*T>plb96*CvEocppGWuz_sGQou=B4qVrV0Ttl|xY}9>0d?qQy@sNpSST5a0(Z3wkPQ|>W(Wmj4?UnSv;kTL1HM5kFNK_76wU{n zp`MTtkPZSNF(iZD1470Fz{WTUm=_Ph-EHP)0)E3oJ_9^S0?-Wl{*Sk?4lorK{*OB{ z2rw2#0_uS$MDt~U^YIw`cZxs5M*+gdH$I+E0-uWoW8f%wCT9WdVj<9`RshPxH1M5m z<)?%HPX)A%OMw3I5&U#9`1FhaQ9fRPCmjQxd=dD~)!^9+0NX(d_#^GXH!uWzE0X}r zVH!}=r+{ZS`F|{xo!|-mAIV|E|5#2x!Mo4|k&pv?M|nV1i-vNcufTWz|G7>7#~+#l zJ~tBFdpB?gJ^shUF$0>35OVM>;J2oL>y8H36b0@e3;ex?hXDD+52%O}z;9azxGrkA zJ=}z>!TW<6FcN9Uo7tQE3R${hm0&B^mA=gd(nnmUnR|fu@GrNUy+e&>4E!2)4wc|Y zr#zsJY#f}3L)cTU#3d%poCsM;XaqR+6&cS3^Shu}R7jk~c+3smggzAHNvkB?L{Y>~ zXcJIhq*$=vJJFMXk$&7DZVmJTwZJcQgY^th8lq4sj&jeT^Y|ESfMB`sJ9B}_J8gowp@3kHb8MI!|Zg$d%#LZR@9=&Wpz zVw3!Ybh3D%c$RRL@D7+SS4jJd=L-$OJH$?6Fi4W$!%Lv=>^%AcyB$h_YmgeG5qSg| zxGL@x?7+4PTCj0g5UK;Z+Ei>Oz6l=&_G!h?bhr&N20e|nAriq|9O6Tv09Xn~!K1-G zQ_LIKWBeQN2B>%$H?mb(~O{aH~Ni@WMVwZrO&ptMdtp&cRiKNrn zkI_+jY7Uj^Omdbx+cG|MFJ=s#;iQ0vVmNi)wUqoy|8R98MWl(OK~gfuImbmaDV&kn z;uJbMvBTj&_y|`^wWDeBBejFM3wLIRQJJnCj(x5(_IYG9cL%!3RFHS6&TI$jgJT-C zmikQfrEk*r$@kS13ieI1Z_h^upWBMqqNb*(F>3vXbV)rbYO2Y-I;o5 zFH!{6a8djNZZc;BO6qHV9h`}T@xBaLU(hf3PHY3Vn(s?G$cOw7^f7iHWbFv93;U4n zz?@_IKzGq*ptz>7lUkp{2M6L|_-f1se`4Cv4&aU%j*iC9!CuS&SFCeAdjWRf zB2g+9%B`VIOlQ6nheh`UHgqG`h1y2lXpskL{iK?K$d^pN_~CpZ^hhh_*u@OS)g?jUe0_COa40)$=g zSMYjfFg1wk#g5>&K-aJ~r~n>_GnkQ#6)Gj%1PieuWH+eQqxpMC z5vU6;!`I;6@L41j?F(k_nP^wIm{~w?r%X&Ms6Dy}y$e?NL;QLMp?|Q+$W(kfUVtbV zA$gD5!JOKcmcD;9&D!D$SJ-$HZ1n`1?GVx0*Ga)HCBOU^3bH(m-H zPmO3BWEar*UvdXoHFq5BJYQn%P#Mry&oS-k4eWDh2G*NcN>pKYft%wq>&LC)=R$AL zGTa~SiqX&<_5<^PEri;l{~)1|7aI-IvnQEB{0Jl%L6Ir&LQwMzTb=pkfaLD^466fV{$D;d5L7M}wYO9se0F1HJ(-z5}y_q8#H~<5_nw z6O=-y;QJu8x|-j}bm05&`N%*bf{=^$pi>|>emmQpb%7L;8}Owp3h<0N@u7fy)dsRa)$jwX3-Ja| zLALT8?KXVkiU+ z$0rIdVx9S|%y{N2cMRe{C7I8?W-um{sbDSv&%+q*47(L%rSI|!kf$J5Sq*n&FOc5U zC-xEYnkWkc$tr;pBL+SRPvD0!)97Sc&gH{DkV9ZiuEA2UW+ag>0raf~=qr2;@UVKY z0o+QYEB+5Uoj=87(I2>PU<~a>uOlTq2JUw$aEG3zw{izz0(l7Cho*sE<5cb^Sp6oz z5)^?exQomxE(@eZfrA8kz#jnanD)pDu9z9Xq_ZK=Amk=|6x7Gd`45}`c(l%9U5N^O zJSycDu><)4?mpKV)V3$N59~Wu2-%^}=u}Jsa>LmSMRfzNy>IAtbOjv952JgMeW-ug zU0{CMih!I3UjsEGYJ3*a4(|*PW2~f+de44DWW*+X3(}U2B44{QsZ8KHc?CUzdmwr! zl|9Y&;8Iv`t{b3ywdcD*I=B|T4}IlHZV9i4c5#8gr4j|+z8F3eS`79@o4MVbjGxUf zh9<(>Ac6~FKkxzIy{L!$f$RALplqiBhzY#~6x!LQkQ$^UJW!7=*-v9moOZ57o$QV;oc?1wWtBY`FY$(`Wv-_4dEZao8TIdJig6t<{ME3$QEvdJ~N)A)hPzu+AwAW_n2!B zB_bP%>w;tG-|#QyGUZ7nFrPt}A{`rt#_+|glj+C};|(ASe-u{m|I+34ZxO>Qa{3bD4a0ab`SoQ|hitJ0arxW->cp3Z--3FL{_hBa##4V%q=_s~{ zPed08_6isRMbGh4u93w!l%ERMp*U8B4!~@PA0JO%a;|rFVD5nI$uE2`vJZ3}9#T`; z?(k3eKDw8v5{wmW7VO85!Oh$Q#zpm{V(Dlw&*gIlkY^7ptDod+2l;@@QoTGuSPJ+nanv~G3RHkxf^PBinVqhg&Q7j@^gwn69}RX;mCyk6 z2?h}`QHIPwG}sy9Jk}A~!*q16v7dGhrEzEqrp6r5I?fC1TEgMKQ58A?Ud|i19sCQ# zi7N$@iII3GSiuY?{eV(p15P$KL6Yz{@de8R{)iAZp8Z5#0FLYF;3`HFy(K4v2auC| z3#*1+;s-?o#8ZWvVTf5nhB)h8G2~(P4oDldXZ_e){BrEK@T{moBog%}V9bE1;czGr ziA7H%S2-cw!S&7Y-f_oy%W=Z~%b8Br(_i3!1zwT|(w%@Fw_2=_G)X`z06oFIrKSQU zr_@nwJ8WrZwOS&qlSvusE|DnKsamTLX z@Es8io+1Dd4{4S3w2;CW&YyZgEo0K5ANUwldWsWlxO2L3>k?&MQDCDo4`b}Tlm4C({lss4t#du*dacT_5gjIJn#m0=D^MezKYQ3DPpDY zm(X2MffRCm=|7|w1+3c8zkt9L#m|K=@}22<RtOg0NpKSP zjM~C}hxUQ$+RXoC%b7}UJCq5!C}a6&EDR_lrD%iT5g_Bqhr76s0<`aFKIn7n^BavQ2E#U!llQyUURzR(}YU&^QH8sW+z`TM75MM<-#8-(o z&^_i1c+M2FoHwx~6TwY_cq~}#D~5CqT2%-RMGi40vO{fqj{VUCJ(IHvkQ|+1}RC znR*E_w-fN6!gyh@C|b~r_aUq(zy@%2q|7zmwVKRjj<9u{0I}lJg?oS(J%N8hM}i(= z3*C>d;%pFLeS(@}5c&_e&;86_O`=-yEYVYZ4rJnYa`%`u>>l