From c8e0fc1a7d86fc877fb6b771f7007cc3801eb4b3 Mon Sep 17 00:00:00 2001 From: Alexey Rybak Date: Mon, 22 Sep 2025 13:43:53 -0700 Subject: [PATCH] old md files deprecation --- docs/source/advanced_apis/eval/index.md | 6 - .../eval/inline_meta-reference.md | 25 - .../advanced_apis/eval/remote_nvidia.md | 23 - .../advanced_apis/evaluation_concepts.md | 77 -- docs/source/advanced_apis/index.md | 33 - .../post_training/huggingface.md | 122 --- .../advanced_apis/post_training/index.md | 7 - .../post_training/inline_huggingface.md | 40 - .../post_training/inline_torchtune.md | 25 - .../post_training/nvidia_nemo.md | 163 ---- .../post_training/remote_nvidia.md | 32 - .../advanced_apis/post_training/torchtune.md | 125 --- docs/source/advanced_apis/scoring/index.md | 7 - .../advanced_apis/scoring/inline_basic.md | 17 - .../scoring/inline_braintrust.md | 23 - .../scoring/inline_llm-as-judge.md | 17 - docs/source/apis/api_leveling.md | 94 -- docs/source/apis/external.md | 392 --------- docs/source/building_applications/agent.md | 92 -- .../agent_execution_loop.md | 139 --- docs/source/building_applications/evals.md | 125 --- docs/source/building_applications/index.md | 33 - .../building_applications/playground/index.md | 107 --- docs/source/building_applications/rag.md | 280 ------ docs/source/building_applications/rag.png | Bin 148579 -> 0 bytes .../responses_vs_agents.md | 179 ---- docs/source/building_applications/safety.md | 17 - .../source/building_applications/telemetry.md | 143 ---- docs/source/building_applications/tools.md | 264 ------ docs/source/concepts/api_providers.md | 12 - docs/source/concepts/apis.md | 21 - docs/source/concepts/architecture.md | 70 -- docs/source/concepts/distributions.md | 9 - docs/source/concepts/index.md | 23 - docs/source/concepts/resources.md | 19 - docs/source/conf.py | 156 ---- docs/source/contributing/index.md | 39 - docs/source/contributing/new_api_provider.md | 90 -- .../contributing/new_vector_database.md | 75 -- .../contributing/testing/record-replay.md | 231 ----- docs/source/deploying/index.md | 4 - .../source/deploying/kubernetes_deployment.md | 247 ------ docs/source/distributions/building_distro.md | 443 ---------- docs/source/distributions/configuration.md | 802 ------------------ .../distributions/customizing_run_yaml.md | 40 - docs/source/distributions/eks/apply.sh | 19 - .../distributions/eks/gp3-topology-aware.yaml | 15 - .../distributions/importing_as_library.md | 34 - docs/source/distributions/index.md | 15 - docs/source/distributions/k8s/apply.sh | 63 -- .../k8s/chroma-k8s.yaml.template | 66 -- .../k8s/hf-token-secret.yaml.template | 7 - .../k8s/ingress-k8s.yaml.template | 17 - .../k8s/postgres-k8s.yaml.template | 66 -- .../distributions/k8s/stack-configmap.yaml | 56 -- .../distributions/k8s/stack-k8s.yaml.template | 69 -- .../distributions/k8s/stack_run_config.yaml | 140 --- .../distributions/k8s/ui-k8s.yaml.template | 68 -- .../distributions/k8s/vllm-k8s.yaml.template | 70 -- .../k8s/vllm-safety-k8s.yaml.template | 71 -- .../distributions/list_of_distributions.md | 127 --- .../ondevice_distro/android_sdk.md | 262 ------ .../distributions/ondevice_distro/ios_sdk.md | 134 --- .../remote_hosted_distro/index.md | 20 - .../remote_hosted_distro/watsonx.md | 78 -- .../self_hosted_distro/dell-tgi.md | 78 -- .../distributions/self_hosted_distro/dell.md | 190 ----- .../self_hosted_distro/meta-reference-gpu.md | 125 --- .../self_hosted_distro/nvidia.md | 171 ---- .../self_hosted_distro/passthrough.md | 42 - .../self_hosted_distro/starter.md | 232 ----- .../starting_llama_stack_server.md | 25 - docs/source/getting_started/demo_script.py | 68 -- .../getting_started/detailed_tutorial.md | 553 ------------ docs/source/getting_started/index.md | 13 - docs/source/getting_started/libraries.md | 10 - docs/source/getting_started/quickstart.md | 77 -- docs/source/index.md | 133 --- docs/source/providers/agents/index.md | 22 - .../providers/agents/inline_meta-reference.md | 25 - docs/source/providers/batches/index.md | 24 - .../providers/batches/inline_reference.md | 23 - docs/source/providers/datasetio/index.md | 15 - .../providers/datasetio/inline_localfs.md | 21 - .../providers/datasetio/remote_huggingface.md | 21 - .../providers/datasetio/remote_nvidia.md | 25 - docs/source/providers/eval/index.md | 16 - .../providers/eval/inline_meta-reference.md | 21 - docs/source/providers/eval/remote_nvidia.md | 19 - .../external/external-providers-guide.md | 286 ------- .../external/external-providers-list.md | 11 - docs/source/providers/external/index.md | 13 - docs/source/providers/files/index.md | 14 - docs/source/providers/files/inline_localfs.md | 24 - docs/source/providers/files/remote_s3.md | 33 - docs/source/providers/index.md | 28 - docs/source/providers/inference/index.md | 42 - .../inference/inline_meta-reference.md | 32 - .../inference/inline_sentence-transformers.md | 13 - .../providers/inference/remote_anthropic.md | 19 - .../providers/inference/remote_azure.md | 29 - .../providers/inference/remote_bedrock.md | 28 - .../providers/inference/remote_cerebras.md | 21 - .../providers/inference/remote_databricks.md | 21 - .../providers/inference/remote_fireworks.md | 22 - .../providers/inference/remote_gemini.md | 19 - .../source/providers/inference/remote_groq.md | 21 - .../providers/inference/remote_hf_endpoint.md | 21 - .../inference/remote_hf_serverless.md | 21 - .../inference/remote_llama-openai-compat.md | 21 - .../providers/inference/remote_nvidia.md | 24 - .../providers/inference/remote_ollama.md | 20 - .../providers/inference/remote_openai.md | 21 - .../providers/inference/remote_passthrough.md | 21 - .../providers/inference/remote_runpod.md | 21 - .../remote_sambanova-openai-compat.md | 21 - .../providers/inference/remote_sambanova.md | 21 - docs/source/providers/inference/remote_tgi.md | 19 - .../providers/inference/remote_together.md | 22 - .../providers/inference/remote_vertexai.md | 40 - .../source/providers/inference/remote_vllm.md | 26 - .../providers/inference/remote_watsonx.md | 24 - docs/source/providers/openai.md | 193 ----- docs/source/providers/post_training/index.md | 16 - .../post_training/inline_huggingface-cpu.md | 41 - .../post_training/inline_huggingface-gpu.md | 41 - .../post_training/inline_huggingface.md | 41 - .../post_training/inline_torchtune-cpu.md | 20 - .../post_training/inline_torchtune-gpu.md | 20 - .../post_training/inline_torchtune.md | 20 - .../providers/post_training/remote_nvidia.md | 28 - docs/source/providers/safety/index.md | 18 - .../providers/safety/inline_code-scanner.md | 13 - .../providers/safety/inline_llama-guard.md | 19 - .../providers/safety/inline_prompt-guard.md | 19 - .../source/providers/safety/remote_bedrock.md | 28 - docs/source/providers/safety/remote_nvidia.md | 21 - .../providers/safety/remote_sambanova.md | 21 - docs/source/providers/scoring/index.md | 15 - docs/source/providers/scoring/inline_basic.md | 13 - .../providers/scoring/inline_braintrust.md | 19 - .../providers/scoring/inline_llm-as-judge.md | 13 - docs/source/providers/telemetry/index.md | 13 - .../telemetry/inline_meta-reference.md | 25 - docs/source/providers/tool_runtime/index.md | 18 - .../tool_runtime/inline_rag-runtime.md | 13 - .../tool_runtime/remote_bing-search.md | 20 - .../tool_runtime/remote_brave-search.md | 21 - .../remote_model-context-protocol.md | 13 - .../tool_runtime/remote_tavily-search.md | 21 - .../tool_runtime/remote_wolfram-alpha.md | 19 - docs/source/providers/vector_io/index.md | 24 - .../providers/vector_io/inline_chromadb.md | 56 -- .../providers/vector_io/inline_faiss.md | 62 -- .../vector_io/inline_meta-reference.md | 27 - .../providers/vector_io/inline_milvus.md | 26 - .../providers/vector_io/inline_qdrant.md | 65 -- .../providers/vector_io/inline_sqlite-vec.md | 220 ----- .../providers/vector_io/inline_sqlite_vec.md | 31 - .../providers/vector_io/remote_chromadb.md | 55 -- .../providers/vector_io/remote_milvus.md | 228 ----- .../providers/vector_io/remote_pgvector.md | 131 --- .../providers/vector_io/remote_qdrant.md | 34 - .../providers/vector_io/remote_weaviate.md | 55 -- docs/source/references/api_reference/index.md | 6 - .../references/evals_reference/index.md | 390 --------- .../resources/eval-concept.png | Bin 69484 -> 0 bytes .../evals_reference/resources/eval-flow.png | Bin 255305 -> 0 bytes docs/source/references/index.md | 18 - .../llama_cli_reference/download_models.md | 165 ---- .../references/llama_cli_reference/index.md | 276 ------ .../llama_stack_client_cli_reference.md | 589 ------------- .../references/python_sdk_reference/index.md | 462 ---------- 173 files changed, 12955 deletions(-) delete mode 100644 docs/source/advanced_apis/eval/index.md delete mode 100644 docs/source/advanced_apis/eval/inline_meta-reference.md delete mode 100644 docs/source/advanced_apis/eval/remote_nvidia.md delete mode 100644 docs/source/advanced_apis/evaluation_concepts.md delete mode 100644 docs/source/advanced_apis/index.md delete mode 100644 docs/source/advanced_apis/post_training/huggingface.md delete mode 100644 docs/source/advanced_apis/post_training/index.md delete mode 100644 docs/source/advanced_apis/post_training/inline_huggingface.md delete mode 100644 docs/source/advanced_apis/post_training/inline_torchtune.md delete mode 100644 docs/source/advanced_apis/post_training/nvidia_nemo.md delete mode 100644 docs/source/advanced_apis/post_training/remote_nvidia.md delete mode 100644 docs/source/advanced_apis/post_training/torchtune.md delete mode 100644 docs/source/advanced_apis/scoring/index.md delete mode 100644 docs/source/advanced_apis/scoring/inline_basic.md delete mode 100644 docs/source/advanced_apis/scoring/inline_braintrust.md delete mode 100644 docs/source/advanced_apis/scoring/inline_llm-as-judge.md delete mode 100644 docs/source/apis/api_leveling.md delete mode 100644 docs/source/apis/external.md delete mode 100644 docs/source/building_applications/agent.md delete mode 100644 docs/source/building_applications/agent_execution_loop.md delete mode 100644 docs/source/building_applications/evals.md delete mode 100644 docs/source/building_applications/index.md delete mode 100644 docs/source/building_applications/playground/index.md delete mode 100644 docs/source/building_applications/rag.md delete mode 100644 docs/source/building_applications/rag.png delete mode 100644 docs/source/building_applications/responses_vs_agents.md delete mode 100644 docs/source/building_applications/safety.md delete mode 100644 docs/source/building_applications/telemetry.md delete mode 100644 docs/source/building_applications/tools.md delete mode 100644 docs/source/concepts/api_providers.md delete mode 100644 docs/source/concepts/apis.md delete mode 100644 docs/source/concepts/architecture.md delete mode 100644 docs/source/concepts/distributions.md delete mode 100644 docs/source/concepts/index.md delete mode 100644 docs/source/concepts/resources.md delete mode 100644 docs/source/conf.py delete mode 100644 docs/source/contributing/index.md delete mode 100644 docs/source/contributing/new_api_provider.md delete mode 100644 docs/source/contributing/new_vector_database.md delete mode 100644 docs/source/contributing/testing/record-replay.md delete mode 100644 docs/source/deploying/index.md delete mode 100644 docs/source/deploying/kubernetes_deployment.md delete mode 100644 docs/source/distributions/building_distro.md delete mode 100644 docs/source/distributions/configuration.md delete mode 100644 docs/source/distributions/customizing_run_yaml.md delete mode 100755 docs/source/distributions/eks/apply.sh delete mode 100644 docs/source/distributions/eks/gp3-topology-aware.yaml delete mode 100644 docs/source/distributions/importing_as_library.md delete mode 100644 docs/source/distributions/index.md delete mode 100755 docs/source/distributions/k8s/apply.sh delete mode 100644 docs/source/distributions/k8s/chroma-k8s.yaml.template delete mode 100644 docs/source/distributions/k8s/hf-token-secret.yaml.template delete mode 100644 docs/source/distributions/k8s/ingress-k8s.yaml.template delete mode 100644 docs/source/distributions/k8s/postgres-k8s.yaml.template delete mode 100644 docs/source/distributions/k8s/stack-configmap.yaml delete mode 100644 docs/source/distributions/k8s/stack-k8s.yaml.template delete mode 100644 docs/source/distributions/k8s/stack_run_config.yaml delete mode 100644 docs/source/distributions/k8s/ui-k8s.yaml.template delete mode 100644 docs/source/distributions/k8s/vllm-k8s.yaml.template delete mode 100644 docs/source/distributions/k8s/vllm-safety-k8s.yaml.template delete mode 100644 docs/source/distributions/list_of_distributions.md delete mode 100644 docs/source/distributions/ondevice_distro/android_sdk.md delete mode 100644 docs/source/distributions/ondevice_distro/ios_sdk.md delete mode 100644 docs/source/distributions/remote_hosted_distro/index.md delete mode 100644 docs/source/distributions/remote_hosted_distro/watsonx.md delete mode 100644 docs/source/distributions/self_hosted_distro/dell-tgi.md delete mode 100644 docs/source/distributions/self_hosted_distro/dell.md delete mode 100644 docs/source/distributions/self_hosted_distro/meta-reference-gpu.md delete mode 100644 docs/source/distributions/self_hosted_distro/nvidia.md delete mode 100644 docs/source/distributions/self_hosted_distro/passthrough.md delete mode 100644 docs/source/distributions/self_hosted_distro/starter.md delete mode 100644 docs/source/distributions/starting_llama_stack_server.md delete mode 100644 docs/source/getting_started/demo_script.py delete mode 100644 docs/source/getting_started/detailed_tutorial.md delete mode 100644 docs/source/getting_started/index.md delete mode 100644 docs/source/getting_started/libraries.md delete mode 100644 docs/source/getting_started/quickstart.md delete mode 100644 docs/source/index.md delete mode 100644 docs/source/providers/agents/index.md delete mode 100644 docs/source/providers/agents/inline_meta-reference.md delete mode 100644 docs/source/providers/batches/index.md delete mode 100644 docs/source/providers/batches/inline_reference.md delete mode 100644 docs/source/providers/datasetio/index.md delete mode 100644 docs/source/providers/datasetio/inline_localfs.md delete mode 100644 docs/source/providers/datasetio/remote_huggingface.md delete mode 100644 docs/source/providers/datasetio/remote_nvidia.md delete mode 100644 docs/source/providers/eval/index.md delete mode 100644 docs/source/providers/eval/inline_meta-reference.md delete mode 100644 docs/source/providers/eval/remote_nvidia.md delete mode 100644 docs/source/providers/external/external-providers-guide.md delete mode 100644 docs/source/providers/external/external-providers-list.md delete mode 100644 docs/source/providers/external/index.md delete mode 100644 docs/source/providers/files/index.md delete mode 100644 docs/source/providers/files/inline_localfs.md delete mode 100644 docs/source/providers/files/remote_s3.md delete mode 100644 docs/source/providers/index.md delete mode 100644 docs/source/providers/inference/index.md delete mode 100644 docs/source/providers/inference/inline_meta-reference.md delete mode 100644 docs/source/providers/inference/inline_sentence-transformers.md delete mode 100644 docs/source/providers/inference/remote_anthropic.md delete mode 100644 docs/source/providers/inference/remote_azure.md delete mode 100644 docs/source/providers/inference/remote_bedrock.md delete mode 100644 docs/source/providers/inference/remote_cerebras.md delete mode 100644 docs/source/providers/inference/remote_databricks.md delete mode 100644 docs/source/providers/inference/remote_fireworks.md delete mode 100644 docs/source/providers/inference/remote_gemini.md delete mode 100644 docs/source/providers/inference/remote_groq.md delete mode 100644 docs/source/providers/inference/remote_hf_endpoint.md delete mode 100644 docs/source/providers/inference/remote_hf_serverless.md delete mode 100644 docs/source/providers/inference/remote_llama-openai-compat.md delete mode 100644 docs/source/providers/inference/remote_nvidia.md delete mode 100644 docs/source/providers/inference/remote_ollama.md delete mode 100644 docs/source/providers/inference/remote_openai.md delete mode 100644 docs/source/providers/inference/remote_passthrough.md delete mode 100644 docs/source/providers/inference/remote_runpod.md delete mode 100644 docs/source/providers/inference/remote_sambanova-openai-compat.md delete mode 100644 docs/source/providers/inference/remote_sambanova.md delete mode 100644 docs/source/providers/inference/remote_tgi.md delete mode 100644 docs/source/providers/inference/remote_together.md delete mode 100644 docs/source/providers/inference/remote_vertexai.md delete mode 100644 docs/source/providers/inference/remote_vllm.md delete mode 100644 docs/source/providers/inference/remote_watsonx.md delete mode 100644 docs/source/providers/openai.md delete mode 100644 docs/source/providers/post_training/index.md delete mode 100644 docs/source/providers/post_training/inline_huggingface-cpu.md delete mode 100644 docs/source/providers/post_training/inline_huggingface-gpu.md delete mode 100644 docs/source/providers/post_training/inline_huggingface.md delete mode 100644 docs/source/providers/post_training/inline_torchtune-cpu.md delete mode 100644 docs/source/providers/post_training/inline_torchtune-gpu.md delete mode 100644 docs/source/providers/post_training/inline_torchtune.md delete mode 100644 docs/source/providers/post_training/remote_nvidia.md delete mode 100644 docs/source/providers/safety/index.md delete mode 100644 docs/source/providers/safety/inline_code-scanner.md delete mode 100644 docs/source/providers/safety/inline_llama-guard.md delete mode 100644 docs/source/providers/safety/inline_prompt-guard.md delete mode 100644 docs/source/providers/safety/remote_bedrock.md delete mode 100644 docs/source/providers/safety/remote_nvidia.md delete mode 100644 docs/source/providers/safety/remote_sambanova.md delete mode 100644 docs/source/providers/scoring/index.md delete mode 100644 docs/source/providers/scoring/inline_basic.md delete mode 100644 docs/source/providers/scoring/inline_braintrust.md delete mode 100644 docs/source/providers/scoring/inline_llm-as-judge.md delete mode 100644 docs/source/providers/telemetry/index.md delete mode 100644 docs/source/providers/telemetry/inline_meta-reference.md delete mode 100644 docs/source/providers/tool_runtime/index.md delete mode 100644 docs/source/providers/tool_runtime/inline_rag-runtime.md delete mode 100644 docs/source/providers/tool_runtime/remote_bing-search.md delete mode 100644 docs/source/providers/tool_runtime/remote_brave-search.md delete mode 100644 docs/source/providers/tool_runtime/remote_model-context-protocol.md delete mode 100644 docs/source/providers/tool_runtime/remote_tavily-search.md delete mode 100644 docs/source/providers/tool_runtime/remote_wolfram-alpha.md delete mode 100644 docs/source/providers/vector_io/index.md delete mode 100644 docs/source/providers/vector_io/inline_chromadb.md delete mode 100644 docs/source/providers/vector_io/inline_faiss.md delete mode 100644 docs/source/providers/vector_io/inline_meta-reference.md delete mode 100644 docs/source/providers/vector_io/inline_milvus.md delete mode 100644 docs/source/providers/vector_io/inline_qdrant.md delete mode 100644 docs/source/providers/vector_io/inline_sqlite-vec.md delete mode 100644 docs/source/providers/vector_io/inline_sqlite_vec.md delete mode 100644 docs/source/providers/vector_io/remote_chromadb.md delete mode 100644 docs/source/providers/vector_io/remote_milvus.md delete mode 100644 docs/source/providers/vector_io/remote_pgvector.md delete mode 100644 docs/source/providers/vector_io/remote_qdrant.md delete mode 100644 docs/source/providers/vector_io/remote_weaviate.md delete mode 100644 docs/source/references/api_reference/index.md delete mode 100644 docs/source/references/evals_reference/index.md delete mode 100644 docs/source/references/evals_reference/resources/eval-concept.png delete mode 100644 docs/source/references/evals_reference/resources/eval-flow.png delete mode 100644 docs/source/references/index.md delete mode 100644 docs/source/references/llama_cli_reference/download_models.md delete mode 100644 docs/source/references/llama_cli_reference/index.md delete mode 100644 docs/source/references/llama_stack_client_cli_reference.md delete mode 100644 docs/source/references/python_sdk_reference/index.md diff --git a/docs/source/advanced_apis/eval/index.md b/docs/source/advanced_apis/eval/index.md deleted file mode 100644 index 330380670..000000000 --- a/docs/source/advanced_apis/eval/index.md +++ /dev/null @@ -1,6 +0,0 @@ -# Eval Providers - -This section contains documentation for all available providers for the **eval** API. - -- [inline::meta-reference](inline_meta-reference.md) -- [remote::nvidia](remote_nvidia.md) \ No newline at end of file diff --git a/docs/source/advanced_apis/eval/inline_meta-reference.md b/docs/source/advanced_apis/eval/inline_meta-reference.md deleted file mode 100644 index 5bec89cfc..000000000 --- a/docs/source/advanced_apis/eval/inline_meta-reference.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -orphan: true ---- - -# inline::meta-reference - -## Description - -Meta's reference implementation of evaluation tasks with support for multiple languages and evaluation metrics. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | | - -## Sample Configuration - -```yaml -kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/meta_reference_eval.db - -``` - diff --git a/docs/source/advanced_apis/eval/remote_nvidia.md b/docs/source/advanced_apis/eval/remote_nvidia.md deleted file mode 100644 index ab91767d6..000000000 --- a/docs/source/advanced_apis/eval/remote_nvidia.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -orphan: true ---- - -# remote::nvidia - -## Description - -NVIDIA's evaluation provider for running evaluation tasks on NVIDIA's platform. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `evaluator_url` | `` | No | http://0.0.0.0:7331 | The url for accessing the evaluator service | - -## Sample Configuration - -```yaml -evaluator_url: ${env.NVIDIA_EVALUATOR_URL:=http://localhost:7331} - -``` - diff --git a/docs/source/advanced_apis/evaluation_concepts.md b/docs/source/advanced_apis/evaluation_concepts.md deleted file mode 100644 index 52ad53ece..000000000 --- a/docs/source/advanced_apis/evaluation_concepts.md +++ /dev/null @@ -1,77 +0,0 @@ -## Evaluation Concepts - -The Llama Stack Evaluation flow allows you to run evaluations on your GenAI application datasets or pre-registered benchmarks. - -We introduce a set of APIs in Llama Stack for supporting running evaluations of LLM applications. -- `/datasetio` + `/datasets` API -- `/scoring` + `/scoring_functions` API -- `/eval` + `/benchmarks` API - -This guide goes over the sets of APIs and developer experience flow of using Llama Stack to run evaluations for different use cases. Checkout our Colab notebook on working examples with evaluations [here](https://colab.research.google.com/drive/10CHyykee9j2OigaIcRv47BKG9mrNm0tJ?usp=sharing). - - -The Evaluation APIs are associated with a set of Resources. Please visit the Resources section in our [Core Concepts](../concepts/index.md) guide for better high-level understanding. - -- **DatasetIO**: defines interface with datasets and data loaders. - - Associated with `Dataset` resource. -- **Scoring**: evaluate outputs of the system. - - Associated with `ScoringFunction` resource. We provide a suite of out-of-the box scoring functions and also the ability for you to add custom evaluators. These scoring functions are the core part of defining an evaluation task to output evaluation metrics. -- **Eval**: generate outputs (via Inference or Agents) and perform scoring. - - Associated with `Benchmark` resource. - - -### Open-benchmark Eval - -#### List of open-benchmarks Llama Stack support - -Llama stack pre-registers several popular open-benchmarks to easily evaluate model perfomance via CLI. - -The list of open-benchmarks we currently support: -- [MMLU-COT](https://arxiv.org/abs/2009.03300) (Measuring Massive Multitask Language Understanding): Benchmark designed to comprehensively evaluate the breadth and depth of a model's academic and professional understanding -- [GPQA-COT](https://arxiv.org/abs/2311.12022) (A Graduate-Level Google-Proof Q&A Benchmark): A challenging benchmark of 448 multiple-choice questions written by domain experts in biology, physics, and chemistry. -- [SimpleQA](https://openai.com/index/introducing-simpleqa/): Benchmark designed to access models to answer short, fact-seeking questions. -- [MMMU](https://arxiv.org/abs/2311.16502) (A Massive Multi-discipline Multimodal Understanding and Reasoning Benchmark for Expert AGI)]: Benchmark designed to evaluate multimodal models. - - -You can follow this [contributing guide](../references/evals_reference/index.md#open-benchmark-contributing-guide) to add more open-benchmarks to Llama Stack - -#### Run evaluation on open-benchmarks via CLI - -We have built-in functionality to run the supported open-benckmarks using llama-stack-client CLI - -#### Spin up Llama Stack server - -Spin up llama stack server with 'open-benchmark' template -``` -llama stack run llama_stack/distributions/open-benchmark/run.yaml - -``` - -#### Run eval CLI -There are 3 necessary inputs to run a benchmark eval -- `list of benchmark_ids`: The list of benchmark ids to run evaluation on -- `model-id`: The model id to evaluate on -- `output_dir`: Path to store the evaluate results -``` -llama-stack-client eval run-benchmark ... \ ---model_id \ ---output_dir \ -``` - -You can run -``` -llama-stack-client eval run-benchmark help -``` -to see the description of all the flags that eval run-benchmark has - - -In the output log, you can find the file path that has your evaluation results. Open that file and you can see you aggregate -evaluation results over there. - - - -#### What's Next? - -- Check out our Colab notebook on working examples with running benchmark evaluations [here](https://colab.research.google.com/github/meta-llama/llama-stack/blob/main/docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb#scrollTo=mxLCsP4MvFqP). -- Check out our [Building Applications - Evaluation](../building_applications/evals.md) guide for more details on how to use the Evaluation APIs to evaluate your applications. -- Check out our [Evaluation Reference](../references/evals_reference/index.md) for more details on the APIs. diff --git a/docs/source/advanced_apis/index.md b/docs/source/advanced_apis/index.md deleted file mode 100644 index b10672c29..000000000 --- a/docs/source/advanced_apis/index.md +++ /dev/null @@ -1,33 +0,0 @@ -# Advanced APIs - -## Post-training -Fine-tunes a model. - -```{toctree} -:maxdepth: 1 - -post_training/index -``` - -## Eval -Generates outputs (via Inference or Agents) and perform scoring. - -```{toctree} -:maxdepth: 1 - -eval/index -``` - -```{include} evaluation_concepts.md -:start-after: ## Evaluation Concepts -``` - -## Scoring -Evaluates the outputs of the system. - -```{toctree} -:maxdepth: 1 - -scoring/index -``` - diff --git a/docs/source/advanced_apis/post_training/huggingface.md b/docs/source/advanced_apis/post_training/huggingface.md deleted file mode 100644 index a7609d6da..000000000 --- a/docs/source/advanced_apis/post_training/huggingface.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -orphan: true ---- -# HuggingFace SFTTrainer - -[HuggingFace SFTTrainer](https://huggingface.co/docs/trl/en/sft_trainer) is an inline post training provider for Llama Stack. It allows you to run supervised fine tuning on a variety of models using many datasets - -## Features - -- Simple access through the post_training API -- Fully integrated with Llama Stack -- GPU support, CPU support, and MPS support (MacOS Metal Performance Shaders) - -## Usage - -To use the HF SFTTrainer in your Llama Stack project, follow these steps: - -1. Configure your Llama Stack project to use this provider. -2. Kick off a SFT job using the Llama Stack post_training API. - -## Setup - -You can access the HuggingFace trainer via the `ollama` distribution: - -```bash -llama stack build --distro starter --image-type venv -llama stack run --image-type venv ~/.llama/distributions/ollama/ollama-run.yaml -``` - -## Run Training - -You can access the provider and the `supervised_fine_tune` method via the post_training API: - -```python -import time -import uuid - - -from llama_stack_client.types import ( - post_training_supervised_fine_tune_params, - algorithm_config_param, -) - - -def create_http_client(): - from llama_stack_client import LlamaStackClient - - return LlamaStackClient(base_url="http://localhost:8321") - - -client = create_http_client() - -# Example Dataset -client.datasets.register( - purpose="post-training/messages", - source={ - "type": "uri", - "uri": "huggingface://datasets/llamastack/simpleqa?split=train", - }, - dataset_id="simpleqa", -) - -training_config = post_training_supervised_fine_tune_params.TrainingConfig( - data_config=post_training_supervised_fine_tune_params.TrainingConfigDataConfig( - batch_size=32, - data_format="instruct", - dataset_id="simpleqa", - shuffle=True, - ), - gradient_accumulation_steps=1, - max_steps_per_epoch=0, - max_validation_steps=1, - n_epochs=4, -) - -algorithm_config = algorithm_config_param.LoraFinetuningConfig( # this config is also currently mandatory but should not be - alpha=1, - apply_lora_to_mlp=True, - apply_lora_to_output=False, - lora_attn_modules=["q_proj"], - rank=1, - type="LoRA", -) - -job_uuid = f"test-job{uuid.uuid4()}" - -# Example Model -training_model = "ibm-granite/granite-3.3-8b-instruct" - -start_time = time.time() -response = client.post_training.supervised_fine_tune( - job_uuid=job_uuid, - logger_config={}, - model=training_model, - hyperparam_search_config={}, - training_config=training_config, - algorithm_config=algorithm_config, - checkpoint_dir="output", -) -print("Job: ", job_uuid) - - -# Wait for the job to complete! -while True: - status = client.post_training.job.status(job_uuid=job_uuid) - if not status: - print("Job not found") - break - - print(status) - if status.status == "completed": - break - - print("Waiting for job to complete...") - time.sleep(5) - -end_time = time.time() -print("Job completed in", end_time - start_time, "seconds!") - -print("Artifacts:") -print(client.post_training.job.artifacts(job_uuid=job_uuid)) -``` diff --git a/docs/source/advanced_apis/post_training/index.md b/docs/source/advanced_apis/post_training/index.md deleted file mode 100644 index 35d10d14b..000000000 --- a/docs/source/advanced_apis/post_training/index.md +++ /dev/null @@ -1,7 +0,0 @@ -# Post_Training Providers - -This section contains documentation for all available providers for the **post_training** API. - -- [inline::huggingface](inline_huggingface.md) -- [inline::torchtune](inline_torchtune.md) -- [remote::nvidia](remote_nvidia.md) \ No newline at end of file diff --git a/docs/source/advanced_apis/post_training/inline_huggingface.md b/docs/source/advanced_apis/post_training/inline_huggingface.md deleted file mode 100644 index 6536b4f8c..000000000 --- a/docs/source/advanced_apis/post_training/inline_huggingface.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -orphan: true ---- - -# inline::huggingface - -## Description - -HuggingFace-based post-training provider for fine-tuning models using the HuggingFace ecosystem. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `device` | `` | No | cuda | | -| `distributed_backend` | `Literal['fsdp', 'deepspeed'` | No | | | -| `checkpoint_format` | `Literal['full_state', 'huggingface'` | No | huggingface | | -| `chat_template` | `` | No | | -| `model_specific_config` | `` | No | {'trust_remote_code': True, 'attn_implementation': 'sdpa'} | | -| `max_seq_length` | `` | No | 2048 | | -| `gradient_checkpointing` | `` | No | False | | -| `save_total_limit` | `` | No | 3 | | -| `logging_steps` | `` | No | 10 | | -| `warmup_ratio` | `` | No | 0.1 | | -| `weight_decay` | `` | No | 0.01 | | -| `dataloader_num_workers` | `` | No | 4 | | -| `dataloader_pin_memory` | `` | No | True | | - -## Sample Configuration - -```yaml -checkpoint_format: huggingface -distributed_backend: null -device: cpu - -``` - -[Find more detailed information here!](huggingface.md) - - diff --git a/docs/source/advanced_apis/post_training/inline_torchtune.md b/docs/source/advanced_apis/post_training/inline_torchtune.md deleted file mode 100644 index 617975b0d..000000000 --- a/docs/source/advanced_apis/post_training/inline_torchtune.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -orphan: true ---- - -# inline::torchtune - -## Description - -TorchTune-based post-training provider for fine-tuning and optimizing models using Meta's TorchTune framework. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `torch_seed` | `int \| None` | No | | | -| `checkpoint_format` | `Literal['meta', 'huggingface'` | No | meta | | - -## Sample Configuration - -```yaml -checkpoint_format: meta - -``` - -[Find more detailed information here!](torchtune.md) diff --git a/docs/source/advanced_apis/post_training/nvidia_nemo.md b/docs/source/advanced_apis/post_training/nvidia_nemo.md deleted file mode 100644 index 1a7adbe16..000000000 --- a/docs/source/advanced_apis/post_training/nvidia_nemo.md +++ /dev/null @@ -1,163 +0,0 @@ ---- -orphan: true ---- -# NVIDIA NEMO - -[NVIDIA NEMO](https://developer.nvidia.com/nemo-framework) is a remote post training provider for Llama Stack. It provides enterprise-grade fine-tuning capabilities through NVIDIA's NeMo Customizer service. - -## Features - -- Enterprise-grade fine-tuning capabilities -- Support for LoRA and SFT fine-tuning -- Integration with NVIDIA's NeMo Customizer service -- Support for various NVIDIA-optimized models -- Efficient training with NVIDIA hardware acceleration - -## Usage - -To use NVIDIA NEMO in your Llama Stack project, follow these steps: - -1. Configure your Llama Stack project to use this provider. -2. Set up your NVIDIA API credentials. -3. Kick off a fine-tuning job using the Llama Stack post_training API. - -## Setup - -You'll need to set the following environment variables: - -```bash -export NVIDIA_API_KEY="your-api-key" -export NVIDIA_DATASET_NAMESPACE="default" -export NVIDIA_CUSTOMIZER_URL="your-customizer-url" -export NVIDIA_PROJECT_ID="your-project-id" -export NVIDIA_OUTPUT_MODEL_DIR="your-output-model-dir" -``` - -## Run Training - -You can access the provider and the `supervised_fine_tune` method via the post_training API: - -```python -import time -import uuid - -from llama_stack_client.types import ( - post_training_supervised_fine_tune_params, - algorithm_config_param, -) - - -def create_http_client(): - from llama_stack_client import LlamaStackClient - - return LlamaStackClient(base_url="http://localhost:8321") - - -client = create_http_client() - -# Example Dataset -client.datasets.register( - purpose="post-training/messages", - source={ - "type": "uri", - "uri": "huggingface://datasets/llamastack/simpleqa?split=train", - }, - dataset_id="simpleqa", -) - -training_config = post_training_supervised_fine_tune_params.TrainingConfig( - data_config=post_training_supervised_fine_tune_params.TrainingConfigDataConfig( - batch_size=8, # Default batch size for NEMO - data_format="instruct", - dataset_id="simpleqa", - shuffle=True, - ), - n_epochs=50, # Default epochs for NEMO - optimizer_config=post_training_supervised_fine_tune_params.TrainingConfigOptimizerConfig( - lr=0.0001, # Default learning rate - weight_decay=0.01, # NEMO-specific parameter - ), - # NEMO-specific parameters - log_every_n_steps=None, - val_check_interval=0.25, - sequence_packing_enabled=False, - hidden_dropout=None, - attention_dropout=None, - ffn_dropout=None, -) - -algorithm_config = algorithm_config_param.LoraFinetuningConfig( - alpha=16, # Default alpha for NEMO - type="LoRA", -) - -job_uuid = f"test-job{uuid.uuid4()}" - -# Example Model - must be a supported NEMO model -training_model = "meta/llama-3.1-8b-instruct" - -start_time = time.time() -response = client.post_training.supervised_fine_tune( - job_uuid=job_uuid, - logger_config={}, - model=training_model, - hyperparam_search_config={}, - training_config=training_config, - algorithm_config=algorithm_config, - checkpoint_dir="output", -) -print("Job: ", job_uuid) - -# Wait for the job to complete! -while True: - status = client.post_training.job.status(job_uuid=job_uuid) - if not status: - print("Job not found") - break - - print(status) - if status.status == "completed": - break - - print("Waiting for job to complete...") - time.sleep(5) - -end_time = time.time() -print("Job completed in", end_time - start_time, "seconds!") - -print("Artifacts:") -print(client.post_training.job.artifacts(job_uuid=job_uuid)) -``` - -## Supported Models - -Currently supports the following models: -- meta/llama-3.1-8b-instruct -- meta/llama-3.2-1b-instruct - -## Supported Parameters - -### TrainingConfig -- n_epochs (default: 50) -- data_config -- optimizer_config -- log_every_n_steps -- val_check_interval (default: 0.25) -- sequence_packing_enabled (default: False) -- hidden_dropout (0.0-1.0) -- attention_dropout (0.0-1.0) -- ffn_dropout (0.0-1.0) - -### DataConfig -- dataset_id -- batch_size (default: 8) - -### OptimizerConfig -- lr (default: 0.0001) -- weight_decay (default: 0.01) - -### LoRA Config -- alpha (default: 16) -- type (must be "LoRA") - -Note: Some parameters from the standard Llama Stack API are not supported and will be ignored with a warning. diff --git a/docs/source/advanced_apis/post_training/remote_nvidia.md b/docs/source/advanced_apis/post_training/remote_nvidia.md deleted file mode 100644 index 9840fa3c4..000000000 --- a/docs/source/advanced_apis/post_training/remote_nvidia.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -orphan: true ---- - -# remote::nvidia - -## Description - -NVIDIA's post-training provider for fine-tuning models on NVIDIA's platform. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `api_key` | `str \| None` | No | | The NVIDIA API key. | -| `dataset_namespace` | `str \| None` | No | default | The NVIDIA dataset namespace. | -| `project_id` | `str \| None` | No | test-example-model@v1 | The NVIDIA project ID. | -| `customizer_url` | `str \| None` | No | | Base URL for the NeMo Customizer API | -| `timeout` | `` | No | 300 | Timeout for the NVIDIA Post Training API | -| `max_retries` | `` | No | 3 | Maximum number of retries for the NVIDIA Post Training API | -| `output_model_dir` | `` | No | test-example-model@v1 | Directory to save the output model | - -## Sample Configuration - -```yaml -api_key: ${env.NVIDIA_API_KEY:=} -dataset_namespace: ${env.NVIDIA_DATASET_NAMESPACE:=default} -project_id: ${env.NVIDIA_PROJECT_ID:=test-project} -customizer_url: ${env.NVIDIA_CUSTOMIZER_URL:=http://nemo.test} - -``` - diff --git a/docs/source/advanced_apis/post_training/torchtune.md b/docs/source/advanced_apis/post_training/torchtune.md deleted file mode 100644 index ef72505b1..000000000 --- a/docs/source/advanced_apis/post_training/torchtune.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -orphan: true ---- -# TorchTune - -[TorchTune](https://github.com/pytorch/torchtune) is an inline post training provider for Llama Stack. It provides a simple and efficient way to fine-tune language models using PyTorch. - -## Features - -- Simple access through the post_training API -- Fully integrated with Llama Stack -- GPU support and single device capabilities. -- Support for LoRA - -## Usage - -To use TorchTune in your Llama Stack project, follow these steps: - -1. Configure your Llama Stack project to use this provider. -2. Kick off a fine-tuning job using the Llama Stack post_training API. - -## Setup - -You can access the TorchTune trainer by writing your own yaml pointing to the provider: - -```yaml -post_training: - - provider_id: torchtune - provider_type: inline::torchtune - config: {} -``` - -you can then build and run your own stack with this provider. - -## Run Training - -You can access the provider and the `supervised_fine_tune` method via the post_training API: - -```python -import time -import uuid - -from llama_stack_client.types import ( - post_training_supervised_fine_tune_params, - algorithm_config_param, -) - - -def create_http_client(): - from llama_stack_client import LlamaStackClient - - return LlamaStackClient(base_url="http://localhost:8321") - - -client = create_http_client() - -# Example Dataset -client.datasets.register( - purpose="post-training/messages", - source={ - "type": "uri", - "uri": "huggingface://datasets/llamastack/simpleqa?split=train", - }, - dataset_id="simpleqa", -) - -training_config = post_training_supervised_fine_tune_params.TrainingConfig( - data_config=post_training_supervised_fine_tune_params.TrainingConfigDataConfig( - batch_size=32, - data_format="instruct", - dataset_id="simpleqa", - shuffle=True, - ), - gradient_accumulation_steps=1, - max_steps_per_epoch=0, - max_validation_steps=1, - n_epochs=4, -) - -algorithm_config = algorithm_config_param.LoraFinetuningConfig( - alpha=1, - apply_lora_to_mlp=True, - apply_lora_to_output=False, - lora_attn_modules=["q_proj"], - rank=1, - type="LoRA", -) - -job_uuid = f"test-job{uuid.uuid4()}" - -# Example Model -training_model = "meta-llama/Llama-2-7b-hf" - -start_time = time.time() -response = client.post_training.supervised_fine_tune( - job_uuid=job_uuid, - logger_config={}, - model=training_model, - hyperparam_search_config={}, - training_config=training_config, - algorithm_config=algorithm_config, - checkpoint_dir="output", -) -print("Job: ", job_uuid) - -# Wait for the job to complete! -while True: - status = client.post_training.job.status(job_uuid=job_uuid) - if not status: - print("Job not found") - break - - print(status) - if status.status == "completed": - break - - print("Waiting for job to complete...") - time.sleep(5) - -end_time = time.time() -print("Job completed in", end_time - start_time, "seconds!") - -print("Artifacts:") -print(client.post_training.job.artifacts(job_uuid=job_uuid)) -``` diff --git a/docs/source/advanced_apis/scoring/index.md b/docs/source/advanced_apis/scoring/index.md deleted file mode 100644 index 3cf7af537..000000000 --- a/docs/source/advanced_apis/scoring/index.md +++ /dev/null @@ -1,7 +0,0 @@ -# Scoring Providers - -This section contains documentation for all available providers for the **scoring** API. - -- [inline::basic](inline_basic.md) -- [inline::braintrust](inline_braintrust.md) -- [inline::llm-as-judge](inline_llm-as-judge.md) \ No newline at end of file diff --git a/docs/source/advanced_apis/scoring/inline_basic.md b/docs/source/advanced_apis/scoring/inline_basic.md deleted file mode 100644 index b56b36013..000000000 --- a/docs/source/advanced_apis/scoring/inline_basic.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -orphan: true ---- - -# inline::basic - -## Description - -Basic scoring provider for simple evaluation metrics and scoring functions. - -## Sample Configuration - -```yaml -{} - -``` - diff --git a/docs/source/advanced_apis/scoring/inline_braintrust.md b/docs/source/advanced_apis/scoring/inline_braintrust.md deleted file mode 100644 index d1278217c..000000000 --- a/docs/source/advanced_apis/scoring/inline_braintrust.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -orphan: true ---- - -# inline::braintrust - -## Description - -Braintrust scoring provider for evaluation and scoring using the Braintrust platform. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `openai_api_key` | `str \| None` | No | | The OpenAI API Key | - -## Sample Configuration - -```yaml -openai_api_key: ${env.OPENAI_API_KEY:=} - -``` - diff --git a/docs/source/advanced_apis/scoring/inline_llm-as-judge.md b/docs/source/advanced_apis/scoring/inline_llm-as-judge.md deleted file mode 100644 index c7fcddf37..000000000 --- a/docs/source/advanced_apis/scoring/inline_llm-as-judge.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -orphan: true ---- - -# inline::llm-as-judge - -## Description - -LLM-as-judge scoring provider that uses language models to evaluate and score responses. - -## Sample Configuration - -```yaml -{} - -``` - diff --git a/docs/source/apis/api_leveling.md b/docs/source/apis/api_leveling.md deleted file mode 100644 index bb012030f..000000000 --- a/docs/source/apis/api_leveling.md +++ /dev/null @@ -1,94 +0,0 @@ -# Llama Stack API Stability Leveling - -In order to provide a stable experience in Llama Stack, the various APIs need different stability levels indicating the level of support, backwards compatability, and overall production readiness. - -## Different Levels - -### v1alpha - -- Little to no expectation of support between versions -- Breaking changes are permitted -- Datatypes and parameters can break -- Routes can be added and removed - -#### Graduation Criteria - -- an API can graduate from `v1alpha` to `v1beta` if the team has identified the extent of the non-optional routes and the shape of their parameters/return types for the API eg. `/v1/openai/chat/completions`. Optional types can change. -- CRUD must stay stable once in `v1beta`. This is a commitment to backward compatibility, guaranteeing that most code you write against the v1beta version will not break during future updates. We may make additive changes (like adding a new, optional field to a response), but we will not make breaking changes (like renaming an existing "modelName" field to "name", changing an ID's data type from an integer to a string, or altering an endpoint URL). -- for OpenAI APIs, a comparison to the OpenAI spec for the specific API can be done to ensure completeness. - -### v1beta - -- API routes remain consistent between versions -- Parameters and return types are not ensured between versions -- API, besides minor fixes and adjustments, should be _almost_ v1. Changes should not be drastic. - -#### Graduation Criteria - -- an API can graduate from `v1beta` to `v1` if the API surface and datatypes are complete as identified by the team. The parameters and return types that are mandatory for each route are stable. All aspects of graduating from `v1alpha1` to `v1beta` apply as well. -- Optional parameters, routes, or parts of the return type can be added after graduating to `v1` - -### v1 (stable) - -- Considered stable -- Backwards compatible between Z-streams - - Y-stream breaking changes must go through the proper approval and announcement process. -- Datatypes for a route and its return types cannot change between Z-streams - - Y-stream datatype changes should be sparing, unless the changes are additional net-new parameters -- Must have proper conformance testing as outlined in https://github.com/llamastack/llama-stack/issues/3237 - -### v2+ (Major Versions) - -Introducing a new major version like `/v2` is a significant and disruptive event that should be treated as a last resort. It is reserved for essential changes to a stable `/v1` API that are fundamentally backward-incompatible and cannot be implemented through additive, non-breaking changes or breaking changes across X/Y-Stream releases (x.y.z). - -If a `/v2` version is deemed absolutely necessary, it must adhere to the following protocol to ensure a sane and predictable transition for users: - -#### Lifecycle Progression - - A new major version must follow the same stability lifecycle as `/v1`. It will be introduced as `/v2alpha`, mature to `/v2beta`, and finally become stable as `/v2`. - -#### Coexistence: - -The new `/v2` API must be introduced alongside the existing `/v1` API and run in parallel. It must not replace the `/v1` API immediately. - -#### Deprecation Policy: - -When a `/v2` API is introduced, a clear and generous deprecation policy for the `/v1` API must be published simultaneously. This policy must outline the timeline for the eventual removal of the `/v1` API, giving users ample time to migrate. - -### API Stability vs. Provider Stability - -The leveling introduced in this document relates to the stability of the API and not specifically the providers within the API. - -Providers can iterate as much as they want on functionality as long as they work within the bounds of an API. If they need to change the API, then the API should not be `/v1`, or those breaking changes can only happen on a y-stream release basis. - -### Approval and Announcement Process for Breaking Changes - -- **PR Labeling**: Any pull request that introduces a breaking API change must be clearly labeled with `breaking-change`. -- **PR Title/Commit**: Any pull request that introduces a breaking API change must contain `BREAKING CHANGE` in the title and commit footer. Alternatively, the commit can include `!`, eg. `feat(api)!: title goes here` This is outlined in the [conventional commits documentation](https://www.conventionalcommits.org/en/v1.0.0/#specification) -- **Maintainer Review**: At least one maintainer must explicitly acknowledge the breaking change during review by applying the `breaking-change` label. An approval must come with this label or the acknowledgement this label has already been applied. -- **Announcement**: Breaking changes require inclusion in release notes and, if applicable, a separate communication (e.g., Discord, Github Issues, or GitHub Discussions) prior to release. - -If a PR has proper approvals, labels, and commit/title hygiene, the failing API conformance tests will be bypassed. - - -## Enforcement - -### Migration of API routes under `/v1alpha`, `/v1beta`, and `/v1` - -Instead of placing every API under `/v1`, any API that is not fully stable or complete should go under `/v1alpha` or `/v1beta`. For example, at the time of this writing, `post_training` belongs here, as well as any OpenAI-compatible API whose surface does not exactly match the upstream OpenAI API it mimics. - -This migration is crucial as we get Llama Stack in the hands of users who intend to productize various APIs. A clear view of what is stable and what is actively being developed will enable users to pick and choose various APIs to build their products on. - -This migration will be a breaking change for any API moving out of `/v1`. Ideally, this should happen before 0.3.0 and especially 1.0.0. - -### `x-stability` tags in the OpenAPI spec for oasdiff - -`x-stability` tags allow tools like oasdiff to enforce different rules for different stability levels; these tags should match the routes: [oasdiff stability](https://github.com/oasdiff/oasdiff/blob/main/docs/STABILITY.md) - -### Testing - -The testing of each stable API is already outlined in [issue #3237](https://github.com/llamastack/llama-stack/issues/3237) and is being worked on. These sorts of conformance tests should apply primarily to `/v1` APIs only, with `/v1alpha` and `/v1beta` having any tests the maintainers see fit as well as basic testing to ensure the routing works properly. - -### New APIs going forward - -Any subsequently introduced APIs should be introduced as `/v1alpha` \ No newline at end of file diff --git a/docs/source/apis/external.md b/docs/source/apis/external.md deleted file mode 100644 index 5831990b0..000000000 --- a/docs/source/apis/external.md +++ /dev/null @@ -1,392 +0,0 @@ -# External APIs - -Llama Stack supports external APIs that live outside of the main codebase. This allows you to: -- Create and maintain your own APIs independently -- Share APIs with others without contributing to the main codebase -- Keep API-specific code separate from the core Llama Stack code - -## Configuration - -To enable external APIs, you need to configure the `external_apis_dir` in your Llama Stack configuration. This directory should contain your external API specifications: - -```yaml -external_apis_dir: ~/.llama/apis.d/ -``` - -## Directory Structure - -The external APIs directory should follow this structure: - -``` -apis.d/ - custom_api1.yaml - custom_api2.yaml -``` - -Each YAML file in these directories defines an API specification. - -## API Specification - -Here's an example of an external API specification for a weather API: - -```yaml -module: weather -api_dependencies: - - inference -protocol: WeatherAPI -name: weather -pip_packages: - - llama-stack-api-weather -``` - -### API Specification Fields - -- `module`: Python module containing the API implementation -- `protocol`: Name of the protocol class for the API -- `name`: Name of the API -- `pip_packages`: List of pip packages to install the API, typically a single package - -## Required Implementation - -External APIs must expose a `available_providers()` function in their module that returns a list of provider names: - -```python -# llama_stack_api_weather/api.py -from llama_stack.providers.datatypes import Api, InlineProviderSpec, ProviderSpec - - -def available_providers() -> list[ProviderSpec]: - return [ - InlineProviderSpec( - api=Api.weather, - provider_type="inline::darksky", - pip_packages=[], - module="llama_stack_provider_darksky", - config_class="llama_stack_provider_darksky.DarkSkyWeatherImplConfig", - ), - ] -``` - -A Protocol class like so: - -```python -# llama_stack_api_weather/api.py -from typing import Protocol - -from llama_stack.schema_utils import webmethod - - -class WeatherAPI(Protocol): - """ - A protocol for the Weather API. - """ - - @webmethod(route="/locations", method="GET") - async def get_available_locations() -> dict[str, list[str]]: - """ - Get the available locations. - """ - ... -``` - -## Example: Custom API - -Here's a complete example of creating and using a custom API: - -1. First, create the API package: - -```bash -mkdir -p llama-stack-api-weather -cd llama-stack-api-weather -mkdir src/llama_stack_api_weather -git init -uv init -``` - -2. Edit `pyproject.toml`: - -```toml -[project] -name = "llama-stack-api-weather" -version = "0.1.0" -description = "Weather API for Llama Stack" -readme = "README.md" -requires-python = ">=3.12" -dependencies = ["llama-stack", "pydantic"] - -[build-system] -requires = ["setuptools"] -build-backend = "setuptools.build_meta" - -[tool.setuptools.packages.find] -where = ["src"] -include = ["llama_stack_api_weather", "llama_stack_api_weather.*"] -``` - -3. Create the initial files: - -```bash -touch src/llama_stack_api_weather/__init__.py -touch src/llama_stack_api_weather/api.py -``` - -```python -# llama-stack-api-weather/src/llama_stack_api_weather/__init__.py -"""Weather API for Llama Stack.""" - -from .api import WeatherAPI, available_providers - -__all__ = ["WeatherAPI", "available_providers"] -``` - -4. Create the API implementation: - -```python -# llama-stack-api-weather/src/llama_stack_api_weather/weather.py -from typing import Protocol - -from llama_stack.providers.datatypes import ( - AdapterSpec, - Api, - ProviderSpec, - RemoteProviderSpec, -) -from llama_stack.schema_utils import webmethod - - -def available_providers() -> list[ProviderSpec]: - return [ - RemoteProviderSpec( - api=Api.weather, - provider_type="remote::kaze", - config_class="llama_stack_provider_kaze.KazeProviderConfig", - adapter=AdapterSpec( - adapter_type="kaze", - module="llama_stack_provider_kaze", - pip_packages=["llama_stack_provider_kaze"], - config_class="llama_stack_provider_kaze.KazeProviderConfig", - ), - ), - ] - - -class WeatherProvider(Protocol): - """ - A protocol for the Weather API. - """ - - @webmethod(route="/weather/locations", method="GET") - async def get_available_locations() -> dict[str, list[str]]: - """ - Get the available locations. - """ - ... -``` - -5. Create the API specification: - -```yaml -# ~/.llama/apis.d/weather.yaml -module: llama_stack_api_weather -name: weather -pip_packages: ["llama-stack-api-weather"] -protocol: WeatherProvider - -``` - -6. Install the API package: - -```bash -uv pip install -e . -``` - -7. Configure Llama Stack to use external APIs: - -```yaml -version: "2" -image_name: "llama-stack-api-weather" -apis: - - weather -providers: {} -external_apis_dir: ~/.llama/apis.d -``` - -The API will now be available at `/v1/weather/locations`. - -## Example: custom provider for the weather API - -1. Create the provider package: - -```bash -mkdir -p llama-stack-provider-kaze -cd llama-stack-provider-kaze -uv init -``` - -2. Edit `pyproject.toml`: - -```toml -[project] -name = "llama-stack-provider-kaze" -version = "0.1.0" -description = "Kaze weather provider for Llama Stack" -readme = "README.md" -requires-python = ">=3.12" -dependencies = ["llama-stack", "pydantic", "aiohttp"] - -[build-system] -requires = ["setuptools"] -build-backend = "setuptools.build_meta" - -[tool.setuptools.packages.find] -where = ["src"] -include = ["llama_stack_provider_kaze", "llama_stack_provider_kaze.*"] -``` - -3. Create the initial files: - -```bash -touch src/llama_stack_provider_kaze/__init__.py -touch src/llama_stack_provider_kaze/kaze.py -``` - -4. Create the provider implementation: - - -Initialization function: - -```python -# llama-stack-provider-kaze/src/llama_stack_provider_kaze/__init__.py -"""Kaze weather provider for Llama Stack.""" - -from .config import KazeProviderConfig -from .kaze import WeatherKazeAdapter - -__all__ = ["KazeProviderConfig", "WeatherKazeAdapter"] - - -async def get_adapter_impl(config: KazeProviderConfig, _deps): - from .kaze import WeatherKazeAdapter - - impl = WeatherKazeAdapter(config) - await impl.initialize() - return impl -``` - -Configuration: - -```python -# llama-stack-provider-kaze/src/llama_stack_provider_kaze/config.py -from pydantic import BaseModel, Field - - -class KazeProviderConfig(BaseModel): - """Configuration for the Kaze weather provider.""" - - base_url: str = Field( - "https://api.kaze.io/v1", - description="Base URL for the Kaze weather API", - ) -``` - -Main implementation: - -```python -# llama-stack-provider-kaze/src/llama_stack_provider_kaze/kaze.py -from llama_stack_api_weather.api import WeatherProvider - -from .config import KazeProviderConfig - - -class WeatherKazeAdapter(WeatherProvider): - """Kaze weather provider implementation.""" - - def __init__( - self, - config: KazeProviderConfig, - ) -> None: - self.config = config - - async def initialize(self) -> None: - pass - - async def get_available_locations(self) -> dict[str, list[str]]: - """Get available weather locations.""" - return {"locations": ["Paris", "Tokyo"]} -``` - -5. Create the provider specification: - -```yaml -# ~/.llama/providers.d/remote/weather/kaze.yaml -adapter: - adapter_type: kaze - pip_packages: ["llama_stack_provider_kaze"] - config_class: llama_stack_provider_kaze.config.KazeProviderConfig - module: llama_stack_provider_kaze -optional_api_dependencies: [] -``` - -6. Install the provider package: - -```bash -uv pip install -e . -``` - -7. Configure Llama Stack to use the provider: - -```yaml -# ~/.llama/run-byoa.yaml -version: "2" -image_name: "llama-stack-api-weather" -apis: - - weather -providers: - weather: - - provider_id: kaze - provider_type: remote::kaze - config: {} -external_apis_dir: ~/.llama/apis.d -external_providers_dir: ~/.llama/providers.d -server: - port: 8321 -``` - -8. Run the server: - -```bash -python -m llama_stack.core.server.server --yaml-config ~/.llama/run-byoa.yaml -``` - -9. Test the API: - -```bash -curl -sSf http://127.0.0.1:8321/v1/weather/locations -{"locations":["Paris","Tokyo"]}% -``` - -## Best Practices - -1. **Package Naming**: Use a clear and descriptive name for your API package. - -2. **Version Management**: Keep your API package versioned and compatible with the Llama Stack version you're using. - -3. **Dependencies**: Only include the minimum required dependencies in your API package. - -4. **Documentation**: Include clear documentation in your API package about: - - Installation requirements - - Configuration options - - API endpoints and usage - - Any limitations or known issues - -5. **Testing**: Include tests in your API package to ensure it works correctly with Llama Stack. - -## Troubleshooting - -If your external API isn't being loaded: - -1. Check that the `external_apis_dir` path is correct and accessible. -2. Verify that the YAML files are properly formatted. -3. Ensure all required Python packages are installed. -4. Check the Llama Stack server logs for any error messages - turn on debug logging to get more information using `LLAMA_STACK_LOGGING=all=debug`. -5. Verify that the API package is installed in your Python environment. diff --git a/docs/source/building_applications/agent.md b/docs/source/building_applications/agent.md deleted file mode 100644 index 6fcc46152..000000000 --- a/docs/source/building_applications/agent.md +++ /dev/null @@ -1,92 +0,0 @@ -# Agents - -An Agent in Llama Stack is a powerful abstraction that allows you to build complex AI applications. - -The Llama Stack agent framework is built on a modular architecture that allows for flexible and powerful AI -applications. This document explains the key components and how they work together. - -## Core Concepts - -### 1. Agent Configuration - -Agents are configured using the `AgentConfig` class, which includes: - -- **Model**: The underlying LLM to power the agent -- **Instructions**: System prompt that defines the agent's behavior -- **Tools**: Capabilities the agent can use to interact with external systems -- **Safety Shields**: Guardrails to ensure responsible AI behavior - -```python -from llama_stack_client import Agent - - -# Create the agent -agent = Agent( - llama_stack_client, - model="meta-llama/Llama-3-70b-chat", - instructions="You are a helpful assistant that can use tools to answer questions.", - tools=["builtin::code_interpreter", "builtin::rag/knowledge_search"], -) -``` - -### 2. Sessions - -Agents maintain state through sessions, which represent a conversation thread: - -```python -# Create a session -session_id = agent.create_session(session_name="My conversation") -``` - -### 3. Turns - -Each interaction with an agent is called a "turn" and consists of: - -- **Input Messages**: What the user sends to the agent -- **Steps**: The agent's internal processing (inference, tool execution, etc.) -- **Output Message**: The agent's response - -```python -from llama_stack_client import AgentEventLogger - -# Create a turn with streaming response -turn_response = agent.create_turn( - session_id=session_id, - messages=[{"role": "user", "content": "Tell me about Llama models"}], -) -for log in AgentEventLogger().log(turn_response): - log.print() -``` -### Non-Streaming - - - -```python -from rich.pretty import pprint - -# Non-streaming API -response = agent.create_turn( - session_id=session_id, - messages=[{"role": "user", "content": "Tell me about Llama models"}], - stream=False, -) -print("Inputs:") -pprint(response.input_messages) -print("Output:") -pprint(response.output_message.content) -print("Steps:") -pprint(response.steps) -``` - -### 4. Steps - -Each turn consists of multiple steps that represent the agent's thought process: - -- **Inference Steps**: The agent generating text responses -- **Tool Execution Steps**: The agent using tools to gather information -- **Shield Call Steps**: Safety checks being performed - -## Agent Execution Loop - - -Refer to the [Agent Execution Loop](agent_execution_loop) for more details on what happens within an agent turn. diff --git a/docs/source/building_applications/agent_execution_loop.md b/docs/source/building_applications/agent_execution_loop.md deleted file mode 100644 index d66448449..000000000 --- a/docs/source/building_applications/agent_execution_loop.md +++ /dev/null @@ -1,139 +0,0 @@ -## Agent Execution Loop - -Agents are the heart of Llama Stack applications. They combine inference, memory, safety, and tool usage into coherent -workflows. At its core, an agent follows a sophisticated execution loop that enables multi-step reasoning, tool usage, -and safety checks. - -### Steps in the Agent Workflow - -Each agent turn follows these key steps: - -1. **Initial Safety Check**: The user's input is first screened through configured safety shields - -2. **Context Retrieval**: - - If RAG is enabled, the agent can choose to query relevant documents from memory banks. You can use the `instructions` field to steer the agent. - - For new documents, they are first inserted into the memory bank. - - Retrieved context is provided to the LLM as a tool response in the message history. - -3. **Inference Loop**: The agent enters its main execution loop: - - The LLM receives a user prompt (with previous tool outputs) - - The LLM generates a response, potentially with [tool calls](tools) - - If tool calls are present: - - Tool inputs are safety-checked - - Tools are executed (e.g., web search, code execution) - - Tool responses are fed back to the LLM for synthesis - - The loop continues until: - - The LLM provides a final response without tool calls - - Maximum iterations are reached - - Token limit is exceeded - -4. **Final Safety Check**: The agent's final response is screened through safety shields - -```{mermaid} -sequenceDiagram - participant U as User - participant E as Executor - participant M as Memory Bank - participant L as LLM - participant T as Tools - participant S as Safety Shield - - Note over U,S: Agent Turn Start - U->>S: 1. Submit Prompt - activate S - S->>E: Input Safety Check - deactivate S - - loop Inference Loop - E->>L: 2.1 Augment with Context - L-->>E: 2.2 Response (with/without tool calls) - - alt Has Tool Calls - E->>S: Check Tool Input - S->>T: 3.1 Execute Tool - T-->>E: 3.2 Tool Response - E->>L: 4.1 Tool Response - L-->>E: 4.2 Synthesized Response - end - - opt Stop Conditions - Note over E: Break if: - Note over E: - No tool calls - Note over E: - Max iterations reached - Note over E: - Token limit exceeded - end - end - - E->>S: Output Safety Check - S->>U: 5. Final Response -``` - -Each step in this process can be monitored and controlled through configurations. - -### Agent Execution Loop Example -Here's an example that demonstrates monitoring the agent's execution: - -```python -from llama_stack_client import LlamaStackClient, Agent, AgentEventLogger -from rich.pretty import pprint - -# Replace host and port -client = LlamaStackClient(base_url=f"http://{HOST}:{PORT}") - -agent = Agent( - client, - # Check with `llama-stack-client models list` - model="Llama3.2-3B-Instruct", - instructions="You are a helpful assistant", - # Enable both RAG and tool usage - tools=[ - { - "name": "builtin::rag/knowledge_search", - "args": {"vector_db_ids": ["my_docs"]}, - }, - "builtin::code_interpreter", - ], - # Configure safety (optional) - input_shields=["llama_guard"], - output_shields=["llama_guard"], - # Control the inference loop - max_infer_iters=5, - sampling_params={ - "strategy": {"type": "top_p", "temperature": 0.7, "top_p": 0.95}, - "max_tokens": 2048, - }, -) -session_id = agent.create_session("monitored_session") - -# Stream the agent's execution steps -response = agent.create_turn( - messages=[{"role": "user", "content": "Analyze this code and run it"}], - documents=[ - { - "content": "https://raw.githubusercontent.com/example/code.py", - "mime_type": "text/plain", - } - ], - session_id=session_id, -) - -# Monitor each step of execution -for log in AgentEventLogger().log(response): - log.print() - -# Using non-streaming API, the response contains input, steps, and output. -response = agent.create_turn( - messages=[{"role": "user", "content": "Analyze this code and run it"}], - documents=[ - { - "content": "https://raw.githubusercontent.com/example/code.py", - "mime_type": "text/plain", - } - ], - session_id=session_id, -) - -pprint(f"Input: {response.input_messages}") -pprint(f"Output: {response.output_message.content}") -pprint(f"Steps: {response.steps}") -``` diff --git a/docs/source/building_applications/evals.md b/docs/source/building_applications/evals.md deleted file mode 100644 index ded62cebb..000000000 --- a/docs/source/building_applications/evals.md +++ /dev/null @@ -1,125 +0,0 @@ -# Evaluations - -The Llama Stack provides a set of APIs in Llama Stack for supporting running evaluations of LLM applications. -- `/datasetio` + `/datasets` API -- `/scoring` + `/scoring_functions` API -- `/eval` + `/benchmarks` API - - - -This guides walks you through the process of evaluating an LLM application built using Llama Stack. Checkout the [Evaluation Reference](../references/evals_reference/index.md) guide goes over the sets of APIs and developer experience flow of using Llama Stack to run evaluations for benchmark and application use cases. Checkout our Colab notebook on working examples with evaluations [here](https://colab.research.google.com/drive/10CHyykee9j2OigaIcRv47BKG9mrNm0tJ?usp=sharing). - - -## Application Evaluation - -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/meta-llama/llama-stack/blob/main/docs/getting_started.ipynb) - -Llama Stack offers a library of scoring functions and the `/scoring` API, allowing you to run evaluations on your pre-annotated AI application datasets. - -In this example, we will show you how to: -1. Build an Agent with Llama Stack -2. Query the agent's sessions, turns, and steps -3. Evaluate the results. - -##### Building a Search Agent -```python -from llama_stack_client import LlamaStackClient, Agent, AgentEventLogger - -client = LlamaStackClient(base_url=f"http://{HOST}:{PORT}") - -agent = Agent( - client, - model="meta-llama/Llama-3.3-70B-Instruct", - instructions="You are a helpful assistant. Use search tool to answer the questions. ", - tools=["builtin::websearch"], -) -user_prompts = [ - "Which teams played in the NBA Western Conference Finals of 2024. Search the web for the answer.", - "In which episode and season of South Park does Bill Cosby (BSM-471) first appear? Give me the number and title. Search the web for the answer.", - "What is the British-American kickboxer Andrew Tate's kickboxing name? Search the web for the answer.", -] - -session_id = agent.create_session("test-session") - -for prompt in user_prompts: - response = agent.create_turn( - messages=[ - { - "role": "user", - "content": prompt, - } - ], - session_id=session_id, - ) - - for log in AgentEventLogger().log(response): - log.print() -``` - - -##### Query Agent Execution Steps - -Now, let's look deeper into the agent's execution steps and see if how well our agent performs. -```python -# query the agents session -from rich.pretty import pprint - -session_response = client.agents.session.retrieve( - session_id=session_id, - agent_id=agent.agent_id, -) - -pprint(session_response) -``` - -As a sanity check, we will first check if all user prompts is followed by a tool call to `brave_search`. -```python -num_tool_call = 0 -for turn in session_response.turns: - for step in turn.steps: - if ( - step.step_type == "tool_execution" - and step.tool_calls[0].tool_name == "brave_search" - ): - num_tool_call += 1 - -print( - f"{num_tool_call}/{len(session_response.turns)} user prompts are followed by a tool call to `brave_search`" -) -``` - -##### Evaluate Agent Responses -Now, we want to evaluate the agent's responses to the user prompts. - -1. First, we will process the agent's execution history into a list of rows that can be used for evaluation. -2. Next, we will label the rows with the expected answer. -3. Finally, we will use the `/scoring` API to score the agent's responses. - -```python -eval_rows = [] - -expected_answers = [ - "Dallas Mavericks and the Minnesota Timberwolves", - "Season 4, Episode 12", - "King Cobra", -] - -for i, turn in enumerate(session_response.turns): - eval_rows.append( - { - "input_query": turn.input_messages[0].content, - "generated_answer": turn.output_message.content, - "expected_answer": expected_answers[i], - } - ) - -pprint(eval_rows) - -scoring_params = { - "basic::subset_of": None, -} -scoring_response = client.scoring.score( - input_rows=eval_rows, scoring_functions=scoring_params -) -pprint(scoring_response) -``` diff --git a/docs/source/building_applications/index.md b/docs/source/building_applications/index.md deleted file mode 100644 index fddd957ed..000000000 --- a/docs/source/building_applications/index.md +++ /dev/null @@ -1,33 +0,0 @@ -# AI Application Examples - -Llama Stack provides all the building blocks needed to create sophisticated AI applications. - -The best way to get started is to look at this notebook which walks through the various APIs (from basic inference, to RAG agents) and how to use them. - -**Notebook**: [Building AI Applications](https://github.com/meta-llama/llama-stack/blob/main/docs/getting_started.ipynb) - -Here are some key topics that will help you build effective agents: - -- **[RAG (Retrieval-Augmented Generation)](rag)**: Learn how to enhance your agents with external knowledge through retrieval mechanisms. -- **[Agent](agent)**: Understand the components and design patterns of the Llama Stack agent framework. -- **[Agent Execution Loop](agent_execution_loop)**: Understand how agents process information, make decisions, and execute actions in a continuous loop. -- **[Agents vs Responses API](responses_vs_agents)**: Learn the differences between the Agents API and Responses API, and when to use each one. -- **[Tools](tools)**: Extend your agents' capabilities by integrating with external tools and APIs. -- **[Evals](evals)**: Evaluate your agents' effectiveness and identify areas for improvement. -- **[Telemetry](telemetry)**: Monitor and analyze your agents' performance and behavior. -- **[Safety](safety)**: Implement guardrails and safety measures to ensure responsible AI behavior. - -```{toctree} -:hidden: -:maxdepth: 1 - -rag -agent -agent_execution_loop -responses_vs_agents -tools -evals -telemetry -safety -playground/index -``` \ No newline at end of file diff --git a/docs/source/building_applications/playground/index.md b/docs/source/building_applications/playground/index.md deleted file mode 100644 index 2390c422f..000000000 --- a/docs/source/building_applications/playground/index.md +++ /dev/null @@ -1,107 +0,0 @@ -## Llama Stack Playground - -```{note} -The Llama Stack Playground is currently experimental and subject to change. We welcome feedback and contributions to help improve it. -``` - -The Llama Stack Playground is an simple interface which aims to: -- Showcase **capabilities** and **concepts** of Llama Stack in an interactive environment -- Demo **end-to-end** application code to help users get started to build their own applications -- Provide an **UI** to help users inspect and understand Llama Stack API providers and resources - -### Key Features - -#### Playground -Interactive pages for users to play with and explore Llama Stack API capabilities. - -##### Chatbot -```{eval-rst} -.. video:: https://github.com/user-attachments/assets/8d2ef802-5812-4a28-96e1-316038c84cbf - :autoplay: - :playsinline: - :muted: - :loop: - :width: 100% -``` -- **Chat**: Chat with Llama models. - - This page is a simple chatbot that allows you to chat with Llama models. Under the hood, it uses the `/inference/chat-completion` streaming API to send messages to the model and receive responses. -- **RAG**: Uploading documents to memory_banks and chat with RAG agent - - This page allows you to upload documents as a `memory_bank` and then chat with a RAG agent to query information about the uploaded documents. - - Under the hood, it uses Llama Stack's `/agents` API to define and create a RAG agent and chat with it in a session. - -##### Evaluations -```{eval-rst} -.. video:: https://github.com/user-attachments/assets/6cc1659f-eba4-49ca-a0a5-7c243557b4f5 - :autoplay: - :playsinline: - :muted: - :loop: - :width: 100% -``` -- **Evaluations (Scoring)**: Run evaluations on your AI application datasets. - - This page demonstrates the flow evaluation API to run evaluations on your custom AI application datasets. You may upload your own evaluation datasets and run evaluations using available scoring functions. - - Under the hood, it uses Llama Stack's `/scoring` API to run evaluations on selected scoring functions. - -```{eval-rst} -.. video:: https://github.com/user-attachments/assets/345845c7-2a2b-4095-960a-9ae40f6a93cf - :autoplay: - :playsinline: - :muted: - :loop: - :width: 100% -``` -- **Evaluations (Generation + Scoring)**: Use pre-registered evaluation tasks to evaluate an model or agent candidate - - This page demonstrates the flow for evaluation API to evaluate an model or agent candidate on pre-defined evaluation tasks. An evaluation task is a combination of dataset and scoring functions. - - Under the hood, it uses Llama Stack's `/eval` API to run generations and scorings on specified evaluation configs. - - In order to run this page, you may need to register evaluation tasks and datasets as resources first through the following commands. - ```bash - $ llama-stack-client datasets register \ - --dataset-id "mmlu" \ - --provider-id "huggingface" \ - --url "https://huggingface.co/datasets/llamastack/evals" \ - --metadata '{"path": "llamastack/evals", "name": "evals__mmlu__details", "split": "train"}' \ - --schema '{"input_query": {"type": "string"}, "expected_answer": {"type": "string"}, "chat_completion_input": {"type": "string"}}' - ``` - - ```bash - $ llama-stack-client benchmarks register \ - --eval-task-id meta-reference-mmlu \ - --provider-id meta-reference \ - --dataset-id mmlu \ - --scoring-functions basic::regex_parser_multiple_choice_answer - ``` - - -##### Inspect -```{eval-rst} -.. video:: https://github.com/user-attachments/assets/01d52b2d-92af-4e3a-b623-a9b8ba22ba99 - :autoplay: - :playsinline: - :muted: - :loop: - :width: 100% -``` -- **API Providers**: Inspect Llama Stack API providers - - This page allows you to inspect Llama Stack API providers and resources. - - Under the hood, it uses Llama Stack's `/providers` API to get information about the providers. - -- **API Resources**: Inspect Llama Stack API resources - - This page allows you to inspect Llama Stack API resources (`models`, `datasets`, `memory_banks`, `benchmarks`, `shields`). - - Under the hood, it uses Llama Stack's `//list` API to get information about each resources. - - Please visit [Core Concepts](../../concepts/index.md) for more details about the resources. - -### Starting the Llama Stack Playground - -To start the Llama Stack Playground, run the following commands: - -1. Start up the Llama Stack API server - -```bash -llama stack build --distro together --image-type venv -llama stack run together -``` - -2. Start Streamlit UI -```bash -uv run --with ".[ui]" streamlit run llama_stack.core/ui/app.py -``` diff --git a/docs/source/building_applications/rag.md b/docs/source/building_applications/rag.md deleted file mode 100644 index 802859e87..000000000 --- a/docs/source/building_applications/rag.md +++ /dev/null @@ -1,280 +0,0 @@ -## Retrieval Augmented Generation (RAG) - -RAG enables your applications to reference and recall information from previous interactions or external documents. - -Llama Stack organizes the APIs that enable RAG into three layers: -1. The lowermost APIs deal with raw storage and retrieval. These include Vector IO, KeyValue IO (coming soon) and Relational IO (also coming soon.). -2. The next is the "Rag Tool", a first-class tool as part of the [Tools API](tools.md) that allows you to ingest documents (from URLs, files, etc) with various chunking strategies and query them smartly. -3. Finally, it all comes together with the top-level ["Agents" API](agent.md) that allows you to create agents that can use the tools to answer questions, perform tasks, and more. - -RAG System - -The RAG system uses lower-level storage for different types of data: -* **Vector IO**: For semantic search and retrieval -* **Key-Value and Relational IO**: For structured data storage - -We may add more storage types like Graph IO in the future. - -### Setting up Vector DBs - -For this guide, we will use [Ollama](https://ollama.com/) as the inference provider. -Ollama is an LLM runtime that allows you to run Llama models locally. - -Here's how to set up a vector database for RAG: - -```python -# Create http client -import os -from llama_stack_client import LlamaStackClient - -client = LlamaStackClient(base_url=f"http://localhost:{os.environ['LLAMA_STACK_PORT']}") - - -# Register a vector db -vector_db_id = "my_documents" -response = client.vector_dbs.register( - vector_db_id=vector_db_id, - embedding_model="all-MiniLM-L6-v2", - embedding_dimension=384, - provider_id="faiss", -) -``` - -### Ingesting Documents -You can ingest documents into the vector database using two methods: directly inserting pre-chunked -documents or using the RAG Tool. -```python -# You can insert a pre-chunked document directly into the vector db -chunks = [ - { - "content": "Your document text here", - "mime_type": "text/plain", - "metadata": { - "document_id": "doc1", - "author": "Jane Doe", - }, - }, -] -client.vector_io.insert(vector_db_id=vector_db_id, chunks=chunks) -``` - -#### Using Precomputed Embeddings -If you decide to precompute embeddings for your documents, you can insert them directly into the vector database by -including the embedding vectors in the chunk data. This is useful if you have a separate embedding service or if you -want to customize the ingestion process. -```python -chunks_with_embeddings = [ - { - "content": "First chunk of text", - "mime_type": "text/plain", - "embedding": [0.1, 0.2, 0.3, ...], # Your precomputed embedding vector - "metadata": {"document_id": "doc1", "section": "introduction"}, - }, - { - "content": "Second chunk of text", - "mime_type": "text/plain", - "embedding": [0.2, 0.3, 0.4, ...], # Your precomputed embedding vector - "metadata": {"document_id": "doc1", "section": "methodology"}, - }, -] -client.vector_io.insert(vector_db_id=vector_db_id, chunks=chunks_with_embeddings) -``` -When providing precomputed embeddings, ensure the embedding dimension matches the embedding_dimension specified when -registering the vector database. - -### Retrieval -You can query the vector database to retrieve documents based on their embeddings. -```python -# You can then query for these chunks -chunks_response = client.vector_io.query( - vector_db_id=vector_db_id, query="What do you know about..." -) -``` - -### Using the RAG Tool - -> **โš ๏ธ DEPRECATION NOTICE**: The RAG Tool is being deprecated in favor of directly using the OpenAI-compatible Search -> API. We recommend migrating to the OpenAI APIs for better compatibility and future support. - -A better way to ingest documents is to use the RAG Tool. This tool allows you to ingest documents from URLs, files, etc. -and automatically chunks them into smaller pieces. More examples for how to format a RAGDocument can be found in the -[appendix](#more-ragdocument-examples). - -#### OpenAI API Integration & Migration - -The RAG tool has been updated to use OpenAI-compatible APIs. This provides several benefits: - -- **Files API Integration**: Documents are now uploaded using OpenAI's file upload endpoints -- **Vector Stores API**: Vector storage operations use OpenAI's vector store format with configurable chunking strategies -- **Error Resilience:** When processing multiple documents, individual failures are logged but don't crash the operation. Failed documents are skipped while successful ones continue processing. - -**Migration Path:** -We recommend migrating to the OpenAI-compatible Search API for: -1. **Better OpenAI Ecosystem Integration**: Direct compatibility with OpenAI tools and workflows including the Responses API -2**Future-Proof**: Continued support and feature development -3**Full OpenAI Compatibility**: Vector Stores, Files, and Search APIs are fully compatible with OpenAI's Responses API - -The OpenAI APIs are used under the hood, so you can continue to use your existing RAG Tool code with minimal changes. -However, we recommend updating your code to use the new OpenAI-compatible APIs for better long-term support. If any -documents fail to process, they will be logged in the response but will not cause the entire operation to fail. - -```python -from llama_stack_client import RAGDocument - -urls = ["memory_optimizations.rst", "chat.rst", "llama3.rst"] -documents = [ - RAGDocument( - document_id=f"num-{i}", - content=f"https://raw.githubusercontent.com/pytorch/torchtune/main/docs/source/tutorials/{url}", - mime_type="text/plain", - metadata={}, - ) - for i, url in enumerate(urls) -] - -client.tool_runtime.rag_tool.insert( - documents=documents, - vector_db_id=vector_db_id, - chunk_size_in_tokens=512, -) - -# Query documents -results = client.tool_runtime.rag_tool.query( - vector_db_ids=[vector_db_id], - content="What do you know about...", -) -``` - -You can configure how the RAG tool adds metadata to the context if you find it useful for your application. Simply add: -```python -# Query documents -results = client.tool_runtime.rag_tool.query( - vector_db_ids=[vector_db_id], - content="What do you know about...", - query_config={ - "chunk_template": "Result {index}\nContent: {chunk.content}\nMetadata: {metadata}\n", - }, -) -``` -### Building RAG-Enhanced Agents - -One of the most powerful patterns is combining agents with RAG capabilities. Here's a complete example: - -```python -from llama_stack_client import Agent - -# Create agent with memory -agent = Agent( - client, - model="meta-llama/Llama-3.3-70B-Instruct", - instructions="You are a helpful assistant", - tools=[ - { - "name": "builtin::rag/knowledge_search", - "args": { - "vector_db_ids": [vector_db_id], - # Defaults - "query_config": { - "chunk_size_in_tokens": 512, - "chunk_overlap_in_tokens": 0, - "chunk_template": "Result {index}\nContent: {chunk.content}\nMetadata: {metadata}\n", - }, - }, - } - ], -) -session_id = agent.create_session("rag_session") - - -# Ask questions about documents in the vector db, and the agent will query the db to answer the question. -response = agent.create_turn( - messages=[{"role": "user", "content": "How to optimize memory in PyTorch?"}], - session_id=session_id, -) -``` - -> **NOTE:** the `instructions` field in the `AgentConfig` can be used to guide the agent's behavior. It is important to experiment with different instructions to see what works best for your use case. - - -You can also pass documents along with the user's message and ask questions about them. -```python -# Initial document ingestion -response = agent.create_turn( - messages=[ - {"role": "user", "content": "I am providing some documents for reference."} - ], - documents=[ - { - "content": "https://raw.githubusercontent.com/pytorch/torchtune/main/docs/source/tutorials/memory_optimizations.rst", - "mime_type": "text/plain", - } - ], - session_id=session_id, -) - -# Query with RAG -response = agent.create_turn( - messages=[{"role": "user", "content": "What are the key topics in the documents?"}], - session_id=session_id, -) -``` - -You can print the response with below. -```python -from llama_stack_client import AgentEventLogger - -for log in AgentEventLogger().log(response): - log.print() -``` - -### Unregistering Vector DBs - -If you need to clean up and unregister vector databases, you can do so as follows: - -```python -# Unregister a specified vector database -vector_db_id = "my_vector_db_id" -print(f"Unregistering vector database: {vector_db_id}") -client.vector_dbs.unregister(vector_db_id=vector_db_id) - - -# Unregister all vector databases -for vector_db_id in client.vector_dbs.list(): - print(f"Unregistering vector database: {vector_db_id.identifier}") - client.vector_dbs.unregister(vector_db_id=vector_db_id.identifier) -``` - -### Appendix - -#### More RAGDocument Examples -```python -from llama_stack_client import RAGDocument -import base64 - -RAGDocument(document_id="num-0", content={"uri": "file://path/to/file"}) -RAGDocument(document_id="num-1", content="plain text") -RAGDocument( - document_id="num-2", - content={ - "type": "text", - "text": "plain text input", - }, # for inputs that should be treated as text explicitly -) -RAGDocument( - document_id="num-3", - content={ - "type": "image", - "image": {"url": {"uri": "https://mywebsite.com/image.jpg"}}, - }, -) -B64_ENCODED_IMAGE = base64.b64encode( - requests.get( - "https://raw.githubusercontent.com/meta-llama/llama-stack/refs/heads/main/docs/_static/llama-stack.png" - ).content -) -RAGDocuemnt( - document_id="num-4", - content={"type": "image", "image": {"data": B64_ENCODED_IMAGE}}, -) -``` -for more strongly typed interaction use the typed dicts found [here](https://github.com/meta-llama/llama-stack-client-python/blob/38cd91c9e396f2be0bec1ee96a19771582ba6f17/src/llama_stack_client/types/shared_params/document.py). diff --git a/docs/source/building_applications/rag.png b/docs/source/building_applications/rag.png deleted file mode 100644 index a5e5b8cdbe7857339f98f0196bda72f78dc06e32..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 148579 zcmeFZWmr^Q-#1K33kEHv0!k~=HHe~sgo@G)(jeV2Dxe}E(v3<;4UIICqLkDybT%Om#@0aI&JPuAad+)W@KY#yNYoG5F=Q{khbp%F{*o%Afleu&M8xy}dz3_G5rt|AInS?JJ@XB)UX* zzhyZx3fb5vP$7WqI}q7uF0pLJ^farw;Omvef);T#;`;5@Fxt4lhxJ4lxKn5>Tv)|d zG8G#MiHWh_%5GKk`nWvg)h#ry%irJI^5Eg2)%t~vhW~&+f_ePKRabm8ikGz)Dba4c zfm-=L5^g3YNsKCF+S^UG(Y?H-8v)a0&*oD9*g}4eWu$HHMy)#Kvb9_bky5mdATz(~ zd-2PG=mJCN=sz)RhkP7wl3+3nd8$)-yfbr^NN2OkycZbB^o9GQT77cvef8c5TO1)A zL$o^N;u!0)Kd%)ZL(kfDcMl7iuzY}@UrdBQwgbjXVLUY5e%nS}bhOkxG74 zdY2rCFyBJ3w*-wa-LwhWJc(YSaa3Y%qIr-IRzdDOL?b{a{|bFXk1n81sZ4|qxzJ5m z@cMQZ`5cZ<<>QwacRX{4u71a`@RWN=E_|*(p12gFdI*!kiyjx}BLn_dB1%Y7AkP@u zEqczMbogFZ6R>XJNqgN3yrOz8!|Q##ZVF)!&NK1)g!b0^Q&b(;B5$1I#i!U;u_b?8 z567SV@nVrJ9G!cZgr7+H%}MPh?g9BC8`7wu1Bxs$V1q zuXW>ko}1N5XX?k@b=5N zMQKOOQcO{_xVPlP@FAH+pQXvA!o|JK$FEp^1P{sKUR%U1dL#P=^9@s#>|HvY;1tO_ z%+43Tyqm7+ta+kKv~+t3Zz(1yDClEQ)RQ+s5aW<1-`Z{^rb&xP$4SjxXT9iqC+<44 z`E{D>JfCQT20k|EbLS8Xd}O<3d0puv=f}8@VmF@#n};BN36F>T8vnKSt1wtAjYRR$ zvv3xIXUmGxigKxaij+?nQzF0RC<>&qD*k>ZpEjUG^Y}{&BF*cYL8`feP`bUWIfN$J zB548E{Io@B5aupb7WV2h#Z%?W90u&;XydmN1KuY_K1Focnp{}Bk^fpSJt1@W>xH!J zdtcN?qb2!>?sI?1Hon`#)PvU}(nBsQBAdjdWmERTZ-TukK$SbsHE&bZOEq8BA-~+H zJ%2BsJFn1)&q%COd{%k--qhIC+@GR!?mMpqtM$$~NDhCv#7#S0Rs6j8W3i0g#%#f? z|J3v>_RPCA`x%Wk>bB(RovGwm%XTrCm=WaG;%yQNGw$!&i$R&# z5?xbc(@0}i1W!0oxM@Ug(@JAnQ=D=jdnr4anxb-Fc8%(7Hf}ZtHG9<`&zqkcKOcY2 zm!V@yZVFwqn9Ym`kFemr&&~Q`ppXHoL_c%|J9hwq~g?FnD1Pj^pk);09JL*~^_L%D06{8t1g z$tEp1GC3}WQH6;sU4mbSYh-RvDoXO}VUDI!*EPY-6MIz(TNXGLZf{&#@Nn*S)OWnH zP7!V{8YQ+#qKh`Q!I%HHn7|j~NUYS|l>=GA78{3<>_O@L%Jzg%3um~8C3Nc*BS9v6hsq6gM~jcY zt~P)EBo+Sk``2nIEdiZq{N{(Zg`AX@xx#Auf4J4I44aPN*lm)Oq{g8=<7CTpV z&HH+iT+4Q!ZIy{(?JH=+tJB~7UeT=AKS1|K4BvfD8Ts9PavSMYS~Yv${yyVIV(`$!&3Uyh&zr-D5e%EidL?{F!i3D=oaj@|?Aqb)jF-&~%S?yP(P(S;*Pro>B8n zA9;S{{FeVhTWecg43iKaa#uCWc$v?Gx)O!+kM-^ z-g({&b#Ll2+kYghMG8mGG!m%HcGk*xX_yP2RHSFC-1Lr<+|~^e@FU_HFftAA@c@lT#>8g2G?E3 z5I&8>U+4(Lj6DQbBbJKhW%|PQ|3t&S!x9JCDtO$8xAyhfY|I`tetgNKihlX_Q1E56 z0a`52XpnPGGrV14a+$+>&2&J(guBZeJ6Fxzi0w&L<5&0~{RNsoL$w(x8jC2}heF9e zqGzIrFpxcpXk=NGkD8l3c(NfTqL8xRbU;8fNODa;?bRAc zW#1bpOTCblMPmWixM*1DRA|`X3LX52qEnwis&_ftJ<0|N^iBTHN7Y;F`) zcrPES*r1`2F`|CZrJh`01oOj<6_st3WgiLZS(?jB>%?)gI z=pD_?ENlcFMXnycAqcKfw>huUAHHI1Dsoj>R)Jo^(%OKYmxGIg>#8U|Jw3g!wf+mi zC-)?ehl9UFt{U0ez7*u-bZ~IsaNy>!v^M0tB_JTc$#t9a_HB0X2D^=ug{_VwyM@iQ zqe)KYxo2RbXKnn_*4Wa59yPCyuBDx=$knT;g--t+(P`jl{C6b_n`5>BgPf=*oVPf* zI8WyWLxoXy1r>}P4a`*T8Jh!{fi*-0ZgB}8zW+a-{9W(9grzxH(^mAazK#c}@u}1VU4S017~hju z1iyimq5h%Qga6z(`i;7Vp+$sz1kWL8Quo9a9nt6d@uHs@?BNV4={xAyJiu1MQru5s z(vL7e=oa1c9*;!4@YQ|p9U0>C?YqH3QqtW_mB{(TknR^rZ&PkY-uV3XEe8I~K{*a` zxR2X`_r*e^M(obkRJ6^4%D{$G3)f;9a<`eql1%f5FB&=q7A^@r#Pi>aJKrl=Fjmf0 zVA{t0`{3l&i|EiIYUuy|4EHh?)`SJ-|2i=Vy#fhIcnQ(}vL#LH70WviConT<+TDHuVdiV*J`wdM;Sl#|~l) zteGeA?70oT366xUSfh4M)KixMv&GNo_u1YvIGdt7?>gNv_!Vqnp5}Y$gawkR6u`rm zSN_ZZs=csOBOlXByrT%L4+|{4i#e2sT1DYG{l4pa*qlU?uMmEd@5S;{{yS2^*yj)A zO7KWhOrd-0y)ZL&gsuX|b=d4MTmbOb@djcw;Uvnxk(uH+l@w9=IC72S{z@!NBm&w6Z5XG=NgbzQC0whR#dOa_PHX8fR>c z8Q7W-`k$L9^<99vDsw1Ly-m_X@fu%({VRmRW#k=mafwQ%{Arwvm&L$X;k|`;2ka)~ z($7%c$KHw7&CQQcFK93a#u+T2$7>iOrBB(WxkM7~(Ae?^+?WdVM2ypn*l?ljSTWm^ z_)VJQ%R7ExW z1H|}m2}FV$tn7ir7dtq#jHPi&xWc5PF{Y`m4$*F^u5-Bl%4ULe1Wiz)b9& z;E0loC<<)-Ap3?fDia!`3oNGtSUKsrw48(mx_ZW6k#P$s9Jm1H_=ug8+vo=A`|0qi{KSw= zB!Kr)kA4jSMi=3^J#NPvUo3%=XA}OI*!AbH(p6Y2|IQ5sbK{>AzXUv880xCMpwvN) zq;1s$4BUAS5#`$8hCP33$QvsqvCc5Zr_h@x1|yAilo zl*}gMt2l6KMN=<;7Wjxge_x_+4aY#>xJ!b+^#*X9Wr3keUii8HBBZkPv82X-S%Ny8 z;Jk?|7tf*FtCIcv#4 zT;{1_ZzonG_z=ar0O0$44a>2ydqSS!<2I&QE8lsK26YC|0N=r#r9(X&!(N7hDJQ(r zPYeWC@ikC7m83X87$bf#O*K)`dGIiP7#nMX$b$gwM7SMqzsE>}9bXtcja`*r#u^Tm*0b z(Kf;d8ombJ#+vZsUU!y8>4x;@!Q7>AzL}W#!DD}e{^#6S6Lx~F#rIL-`v$yI%T+=u zC%FDF4j!@;f}&ra{?2POXx$|gK=v3Di^?3iA|wU~c-B_i?xd_xFa+K&NUhM%0ExE; z8qMc^t_lc2$Dkp>k$L;lkn$a02`lO5$(P=5k_kM-JIyhb658NyR+iD9MZLaNIfUISiX6`KLVzpoP%|Uo9Q#XcRbAMz z9{XF18x;p4W+n3lI#aC)8FZ}>mp&I{S;B(=TAjx5TSOhE8jP)q^gFqn<*ZzmCi(=Y zjBWr%OW=c@cqB&Jhm%?kQ76b1?{AI<_=v1`Un^by#VQrxzSBGC&0f&`S8WoPL(1aY};E^_Jr3~c5% zuWhmFv#R$>gOv*wH46P9gFoAi6g2-?iW%$!O!YjpbXfE%sCW|Fzl#d0;R4tSsV>4lmRZ*7Zl>1vP9^N;f zCcWvktPqJQqixCWF-h}E%PeM@%c?#_^4sSam)D=NzaM0|ZD_JG|Wb8`)+rCRdpjKTq`%iuYhcpt?G<`)&p z6x6FsJM`Nreczghsx~j|f4;0~&vk0pwtX$H>97 zJm{&-A#(fSHfT_3JlsZB#@W)phRg^InN4bv0T{9naIE>t9Met$b(i`4VYR{S=Tbr* zD^Y`Q*bDn|^wmsRv-h{g!?b911;PqS2R6z#ynb_(r0c^et`6F^%Sy$U{y0Z{U1xc3 zW01~$Eln$A4&L@l(`BBtS!AOx)E>`fvYBYKK7iP5dlhdo#zFHFG2wfhdX;6X3Fjrd zCLZ`3hw_z}+D;}dJw${`?hCI6v`*KBx2L`|Jlf>{T~6_xPar;o$STw$dLDT{UKNGw zw=edHANd#X^k|?c0m@#WCk1h+HPk7(lghBs`9caKRC!=b1nDGydq<#m!7%_Ow zZQ|h6D8BP;)~&}lIV4!ac-^y#+DdXKzgRs&O1@SOeXp9em(u!?ehH5*oH%Tg<4 znnKT%!uGCk1(bS&E)G z+cv4;da-@0&D}{cIwsjj#f8h$1)OfvGQ;E*oA-+c`f#v3c1P)|f8fwX&+)xz4FB5E z2xLS%vcAcn;_{+AnVglartQ`(!hi!{_Ra}f74ABn&y<=LE{^z6X16?`u~TfYXT>Uh z3ew$}?M#rD9AQ~283>JP!E+#Yg35s)<#0Qt-OjhtrbUE&U~j=8EQa(D(`ZGU^SU#Y zO~xuQiP;21o23JC3kP=wHVPcgY%D_w8b?1YDgSoanJ?s|&M|Buo%J*{^fVm&&EYXj zpoLqpJI>iI+Z3W2A?#$$zcZKnX%B^4U;7%h4z}To4f5`{MGE@v64vO^tf6C`dm~^o zah>=E`@&@5+~SBTFkMIy8Kh|uY)2-tax4a*D@W-ShvtdXpoK% zZoe+)*Aw2Eysu`qGMO(IABnXLbuv?9dpC9%c@O>kjITf>@bodWbnBa_*rhjB2{4ut zVzYy#m-6drUEcIsl(4Iqvv-tj&!kmb)zc^m-`S$7UV*vIn`Sgia|)z1{(6N?P*u=t z5mnya)y6fAZm#S&>_vW;jq;OQZ4UE;D^~aHpT0HF)PeTV$6E1f3tsz>_==wskx7bCwx-)!=hxMrWg7!gcetCG4(2UAs1H+xKLPG{`k!ZB zmxzfC5no;c0aKl$}e!kB`b}1t)Mh?x&G( z8)I;**`bP5N&RE&?gO#y$4@w}tMOhx*9W|=qRs6+0VLpw^k|kq;tYn)$svpJT*<(1Y@@#a9{CfFK8{ zXbBpEQBY$@sbc%tX5XY}SS+8K_jT&kw!E@XJexU^kf7cWw%XKpH=2fy?hX}Q4rQ;fQEU8Sk15QHpLK8WVBg`1;Kac` z@<7_Z7|iwwvA{?@sQNB<0f(42I=%Kbr&X8JlHEqXgAWmJcl`IBOqkf-w0t!P5o==< zP^XfqIdw1KAuLxe%bs=_Zh(eH*h^(p@P?y!s6#vxlpg}w!J`+C>M6UHNHLLWmSsGv z$U!lcmaf?W5T&c>_7{EhU6wqEMOGCq%8|ocM0e)0z<(|)$&{Bb)lm|NA9Ov`9R9T&bLV4dODn`207SyWFT9Iw@_L_YPb}+UWwC>FvSJqXg(|8Xvi<1)= z4}SUQJ~^YMUEac`EMNa+Cb{VFXc=wIj`jTKpy0_DwC*{O5pOtqnXbq(Dg|!k{ha#zuxH%?alhN z^60i(eX0_CXd^o$wG2X@MV1bR5^ZfyWfSAjxxS=|M3(O?3h`uBkxp& z$9x?w$Cqa_9I9r4hXOlm#UkQsJx+opm!_TrUJ92>_)qW1`kACx7*|7HcEDve^*r>2EyXl+liU# z!ckj$#?}mstx920T*qI*H~V{-$WO`_68b=plpFG}p-NX@z?%lv{B0Vt?-P-xRbQn( z^|;0IuLy~Da5v8^v?t3ug0%M&&T}&P%Z71UAjM5^Kn~Q(vb(#1luE*N$f!WmNqDGt zaNmBj(NOdgtq@4>QE9%uNHgysfSmRX!D15FC5h$D?Y6M;;9&QL(9iFBM+p}}lw#vx zg1N?P^$o<9ERn;lQYxbsB;g8A3zeCF*H{m`Fm$ zBIvQdtMBc~wvywxG%_~)Wi~xm?g@e0d_MeqlzD;5ver@!2qa$~1jxz!s3{&Qw#@>yQTT7+_K65+fD*OFNccmw7jFIU@6OrZu9imvNa!1VCNqiMQVb&W26~0|| zG>|(jko?3smraDRvOs|cH`pv~z*I(vZqwqN zOSL!A?@W86Exw8^*=|&5wW(3*UlLDPBJI7>tCa3VOSeEtw;bV7ON^mg6bS;+#Gna$ z8-!=d^TLajtKkJ59`X(akoaF%%VGyu6VG@dk%xUV6i*~z(+>mG%(kx~<=&=duPRHa z&s8!02IodPpD}c?x0H`Onl>*$FhwBx(v)CxYSY$)jlRUAyK{zOYRiEwBXWELj(Yqv zDPP{F(QHS?mC?Xrh~|ZCm#cm(NxZ(LS9d-69kIaP2zf;~Tfh~8+W$a{_b1dZ*63 z*St`JACz>Tp0EspME6tw2piHd*36MkCey5ZYoaM8sbQVMwceUzHlt8e8TisCm}y4- z^W_A{t{^C|SHs_3V)j<{W-R}Fzx>{#bo1DZ#PYQ+HJ`Y>IYXLQ4O&oCC*J0}JYGbmq+7}~W@dLPvwFj8Cmvi&kKwNDiF4OBC^~=uC~h)qoTL{)0`{4xghIgT&vSlpoRB7Wf=$I zUru6KnRfi^Cpi;qKV?DC!MZ6m3C(3m zyd75(Tp=~Is~-|9SH<^>1G(e%^zzxvk}6Mh%&O;V=AFZff})Vs58TZUGu;5X`4GDf zVK!JR?IBSK&GK+9@PnqW6D~>`a64Sv0?^DA1D#8WU83`x$8rW~M<F|eql zK}=An)_58NGCUUvHrPd()VvK+!*}&6LpBqQc&H#$^6N|9nrgPNRP+|D;X8+M)~*Ku z=&q&f{z41gAz}9H7;LAFJvm%6M;9zKuH3v6vojHL8V?ZPXK^f&UAd! znd{EBLny_ZCNn9rfGBhSsOOjH6-GfA0yK|erf#A13Yx};OGI_g-8)?Zl?`#!*xv+8 zTn2qtsFoDxugychTw@D7*ka~_P5$ZPJCTh!kd3KLUtS~{l!_d1fw4kGr_gk`g~N+C z=CLobRCCE(uvzZdi~yy@C>=d(%S6?rAAm+d{CmlExIfX44+h{h&f2HgeK<}h6>10I=kyB>TP9)K%&My`%ZjvnZvnJ;_O8A>SME8F9O4S2s+Ja!#$AK zur$=%+CV3m#O0YM{cICdse27GF#=E;2I{l&x~GY34&bog5cvyi zZe)QMp*UJ_AdS{a@ZLFESB;a*qT?-iD0xnr8}z+7L9ccp!ElYI!1-tsg#a|{iAya8 z8iY=1?FaaU6m?Nzg2Y2nEp*|{VmY9obD(L!w>UI=(jSkP1k3Z>R*01Hgt*~7iWL@! zc3VeCB{y=xVA-A2C`sJlJn1d2$`Y5#o!tcb2xxkLPU0yLz~3wt;#p}8&W#A2?!m>A zp@IRIbk9RiNIyW)S&N5uZ&4an1!;L9#+dn$_5gLMV1GlYmdgkWwO2xa$0+XtTnqJZ zF}FqJxY`9>D*_-IRSHJ)>yY>btmT9s9p9c-gQR6PGfwcx>fZxHax)1hH*h_vkx5ce zTQWR;-UUFkTo_xez+|~3U|&$GQ$`otp|YQ{i#{>YrzjJp;_zdH#A~AF|86x}6@2sQ zOrp#7Q5P)|cg$mbE9*PnN1{`CKcfe>@k+cE3)sd}?d~icO=JodGV;>k8V)06ugK8!6{d^7`Ue8B zIBD{niIy|>foIga-~9cA^Gg~XN_yWsOmAoqietBM*?2 zjJt3kG-^#Af z&hT)(j6B>x$pF|_7ojo050X;Dg4G5cQ4;U zJpX+yb^e6mI^Ya^b^No8AV6~f`en>=v=e^e$gQB}i%$pJ>%t2uS7cN;s86&24mJF> zjRMcs((HOIBub*37)@T|qF9+>7rMYXghP5DZUn#+U0m1v0nlGG;?RDpL2|pJ6=H(g zy`xg5*UBw@I6SHRxJv1-K>x-{BF0wrR)u>c{FfOIeGF=H@h#nUEP#$-u+PBJ%a*A8 zE2sNV5QqHzt;plEu_IZVo+w_3BtIUyKxV{#BVayC(9VPcoTqT$>s&i!2scj(h>VFz z2(__nimgh}q-U!8l2!|B^FJ!zrh`gee9I7{w1$U!Kh5$HOXueTtnsBD(XosUTz<~u zpx_Bf1n?6QWQU5vwhHn2%WQ~tf|gvW{*t~3aSNM7qUT9I8z95~b)_pwPcS#eyf{9F zN>01)yA^ByWQ=`}aiYKZED!a##H*1wbM_F81_^@SD_6TtX*2;x=rjqweDwnknGSKe zyK?mjq0@Wx$Lb52T*(xfmwP*7VXV}Z_L4VBbKn;82O#=GM)C5zS~}PFizs?2s+XHh<_^#U2j>bNY8$*-aeb;0N2rBTU zp&2Rw+sq2-LewsrgzZA9t(0~@gnGO)May&Nii`VciZo3VsRXSTRD}yN*1o2Fv-IQj z?pL7`+A%ce>BEd!Gkm_^vg0@~=SEt=`F^&4T8n6+=Y)QGg$ls`HjSDH*h)MG>a!P6 zdyswgLJdx>^+fwe@1|%0INS5jh&@5zQ=dF?hT|*!0LRoVsmD0(g$DpwWc2Hr1*`3@cZuswT zBOgE-jk5i5pet6=9=OTuoycwY#@%J%(zy?Vg)Y>iGyklt-~h-irhYf^qeGgZ z=K-<^J(|KgS{0&%+bFR4lrij9b?KXGut$D|7+ch);U$uoSI1f^g}8ecRhBMKWd5UD zNr2>hJ#S}^Tsq}3)Qrf6crX2rc!$~9*%XVwY`<_G9s(o?hfko4+_Z`%rNK48Fwd?U z1m62ZALD(Zz`wkr?wFu(y>~Qu?aYu91P3B$l>*|Edu2;?cFMocQ_O)n%>&}PPQt{$ z1_5CP36Kz8oAJN5Pbi5mKz08_+*SXcM*4@inCl>`yZ$iw)UW@s7HAL35!W}3MgHOU zlsMae8Ba;CEPX0RDDuE8L|wmf(~Z^Tmf>H(0UCgMUj~97%bLA&IA01o6q`?zj%?7` zrfo{|nxr?wCx2MRc_UCko)OiV0~&xhh#o-=VqC_s`Qe3gv^MHfLe`4K7N?ov5pTBM zKp)(fV_qLyK}u!xYq4k6VE?AHK)?TAiy@gJ1oAb!A&7IdsKR6DE};$7>3&hnN&a?d zIz+&2EV~onR)?f12w@s7K=Rd@RqRNv~ng83X0I|<|!cEfec}Xc0iGG@Y zql5@a1lkE9vv)ts@2PZ2Qt$w2vV@-`&(40j%>O;3SK)ofL(x9 z2f>ln;21XN_Z6iMx=NONg6s~DMV(O_H&`GhVd~0}?LqDX-H?vkS^uLOM;UNV>iIEc zXOEDc5}QH*l%_%WMeb0K&~37ChhOh>rW+}xn0Mxj0&qOKHeSwxb3lJByjT{bB<8Ce zhXEHHFnd5Ry2)L>{eg;R+y6>uti=3jb$>)`5Aa#bOP^x?i^mTDy4OUjwjcbploQKD z-7yDkn!<^C%zz7&+7F?EROh$P3V>w?_Z;1ld_WzzG`%zA+WeCd&h|kCy{- zuoKKWIthg}BXw#owkP&7`^{zh?mtpGXXaN!5!xL6ae?bNJhj;o{8b%_ z()j<6?B-_|SAd8H@|WpsNwxF8%IpUXEhL#e`Eii$VI`4RG2o`dPn+k~+*@8BLHzs8 z$G-*xZN7Np+#mpHQdBh0l~Y?6KJ`9QI7=_u-}L{Gy(FMC9v*e7BP%pf(_VVrUj3?F zUQXy15DAN*nrriqIB8g?Y0XbA{kEm);?rKpk(vWRmOpv_q*NmXoveuv!t52h7T^x@ zwT5|0gw5|xbKk=iWx$FWRdZL59r3dAdSBn-?wpVN-@gZX zR3zGjTroO5T6*NM`oKsi34M?C4aWoeb{r3V5mB$;ifHty0*bSIQXwpcSvDBElczkA zIO{z9_kcBO)PJ0Numfjli`DQO)n=Ucg|{fD*lA`({!ux-0!UttYfNC-#p?88zg-Os zy$xG(b0(*jn@i~bfi=Y#Ao>~ad#?{o1fqv`SteukN_NO?7C)X-%l^*OpJyUsQ>UiTn9Jb;DX-iTVxE*zYc$i3Jve=@w6qL>5mZXSQtQnm9m{FlU zX6~t;Mu+-8;z_v+R6%y|ee#KcveaqNywVSXf~`{-D@>z-Tkxc1e@tjFfU+=$?BgOg1p~?- zwOiXR$I=@Ft-li1EXY~%8gxL;SPvvV1_;;w6(Z({nJHDM0FFoM9Eb)6DrP5bwQ*iMJld<9vjzt8Bj3;kO{C{vFQ?fGYo%oT}96t$!{^ zL5ZbA(LMc-(Vo8X|AWzrQI&bd?Wq}3QZA^bb*hF|%3X)gA+>m~G}d`pCn$Diy2m_3W9_)rfi-V8GswoNlT1AnbA zF_vDxqWDbEN^LO(c5YDvPdbXvzCZ{YP=|WH8doiuTwyT(8@UzwF~FJHuS2E9J`ZOP zO&SsgdKx9seb_=*6$l%8sRPM9`tvqt!x6lf4Zj)|c8*xP$eLsa^qd-NYZ*pug_46M zYp9mUj>FET!?ypfoAOIdqHC!{hwQ}*0z2!xbzb$)`^vF~Pwj}Yj5(?it&veJM_*22 zeHQ5sI9!)9c6;Voy-WK?j~n*8GtI5fq0U?JnHhWesLtolbB=x;$jm8I9_;*(DOg3W zy}!w>knR#vg7wm8`4~YS3g2c*0>h(LK|Tw>m(Gu-p7w=yyiUCI8)3cYzTdJv{J!Ie zLm{k@v8YX!P#d#(h1@2!OUp;ONs1*v9=8C?ruEEx%JJGjb%enF+M!O=fpd4k7d!-Po0DQy zZIf{XnDz5loHL7FJS)tPq}r%!OB^mIUWmN&ZKKt%dMYh5Eos771{(x#(i*#&WsI$g zm-xSb3n9+5H6a@W;RXYHdhys;xap;prYjR(({bJ00^%({OufvL`YugY_J{H3Wb|vWZ+${s!=g?6>F>*?aKNQ7Jxsyz+Ikyt$20s zz)+ia=+( zoKALQaQQQ9wMmp^Y$kl~ZpdJ7$SQfb(i3=*3^?v}yOD!)IQvYI4Iw2MI}p(ebs{AW zP2#zryjzprYPd&qH(;PFtYQP4g8cTIlgO9OiJ!20yg%VVORUZxkj>{{6Wk4O->X)2 zaaBb4GreyzyP-U=v+uxYnb}k5L9y4a22`EV)!3|2vGqfqjzrUKd1jO>k4C6r&t)=dCdqPSe%m5nJ1pl zc2!G|+>jv_CJ8O);v{t4k$XnE8g{1`lNC|vx;9_$&0g;DBC4feYN@`uvHJYOjlpei ztv8yl&n3?GXOr~iZDt>McjDVk?O1T-<;bRtxRm73a4IS}=PstM-j9xhTn1(Luk7!f ze_@a$g%RCA#l$!-j|YE@ZnK*h=j63jcX(j1_4D zArF?ogWrjj!im|ZPy~`KBHS^<2vivQKqFLePBrtLABzG9mwCfg@0V%kRan!vGBv7; zUFK<=VYwDEB`%!!V|Q!)0>o|-BGyEn>2Mz`RCue@yOn)qua+0!ChRfZic>p4jP4Du zG5WS`;J6vVxOS&3zs{~@fpZ!zutFUQx26tW2h$%FNTL9@`QOv~y?YCs)7B1b!p^&2 zvld|<(ws4_Kdtt?ylzYHwVB5hD*z^v*K->iuqF%?%u+u<)}e7Td(Y~+RnCTmXw()v zGV+&-kj*4sO%R(e?WX)x?w_nyo1cMMn+cS|Av^mYHR+rAY4cWude^vWR<#LaF;?=| z`^C~rkjzy<$uE4A;@nwE_uHznq>-BMEL+51SI1|M7C)LV(i$L(6&wu?nl*PC4yZO! zGy7=fqFu2hE4@OvH}gj)rfmJDKvJku4%BI`K4sPdAI7PAMbmA@W4O~WjwnIF@ymlN zad+qUdFnsh+zK@MA$D+qcBk%w)P~D>O;$6Ty_Vt--L42R+!f1;#IHoA3pr}tb8nC# zX0v%A>e`yJfc>@Xh)Z~lQPAw773>`FNv6AfIgI;E`x+chuMkjk(o+I^1q2TJH-(q_ zrKa!K?Jg6u(w>X7$Zt)BB((HRsKAaviztnGa7?zTQb zUEMwz_srVw5BxxQl~x#h&M>PZX4U3U6{Hf7v_sgpcTMM$r8}zztkVmw<-}OJzH6h? zET4IvbAR8T*Y3-4fRWjV@o358GHrtgt?@^>j2S2;A?@m`a>BIE=gNsC>W4brf3-{X zy^wVtz(HQtN;0TE&U_#QVj&MI3Hk$VpHrk)!x5k(NfP4et_rF$8zoPcB|w1~o6~NE z4#K9zo7Oa)@*FZ-N!g>yossHRp6jkqy-yf&Ppx6SAk)mbXN*g`zOc}4r+@4ieLw##`k_*L~Vl5k}?Di&s zFUk{FuqOI!^&BUBejC!jmi!%jn&jEo5r3SNW5v1Qw`L@(t_ z<%3;%cMl$ak*FAsy1Y+KyG=wr(8dt6JusIBBed}-9bpKYr$NFeMCfQEHbWMB=9sIU z;{=4$55DbvT4#5ePF$R0=Og{KBTO-%zV19#GW|kl?bZSLIX{;Nh;>f2?_9zj^_LfSV@M|Vn7=+L${XF1j@p@sDtKYP zE{JisT*O`muBJAXyTp{>aV748nwRJ-jIN8K%udSqb5Y@*aHHsK-q@Y-xlmznlG72c zkuX__uCrJ&K;sNP4cKKQpnFt9xo0&v;5CV*IGUEWFz=|f#ui*@{3k663Qlneqxsox zfzN2K%Uts-J+mvCLI<*py$=Ro*2g@^wJ2iaobB57^2O{Pb}w?rnP0k3DCM#`{b@H$ zguZ#oZ9i$Y`t$UL7qrC9$Vngo^-);c+;Y`)I|0K?detKYAAW9RA>kfhFI#$Bu3_%Z z&lKJW603ptH#!^s=Q-O&l7%0 z9#DUhGtpqf?BgU>lF+x%P*5J_hie)z$d|5(tF;3nE8g3v{-RF;;5rnFXp=HV7Dka{IODcSDN( zHOtUBKDPtB>9WFRhByKVV&U&&dn3|>U36P*lTUK(#P+Zj|x~OibT(n=*ub&l=mLcz9l9H7m$`E9Cycp5^ACfFTou%X*o++6Tg@nC;i& zx!wdiv(#P(&Z`SC{frv1Hlswt5d=)hv*Tpmu4WZkOJU_tmY+w@3y-#9R^^b#aw2Gz zwgYG{Z4(aODRK`e->WkDB1uY%wdKLMQ)^9eAhNu82Tjwhq%bVlu%WqzLm&%-)i*Bu z*~@74WfC~QWLiMK0HO2Xf{qSDU#!FE`^diDzTMwdK4_)|MU03_;jYDjxmE5e2^I88w1q0de-$QcVliJ49mB zIKK@G$!UkSHjR_tkbAhezd!0W5)v|_@`<1`$%nUzS(ZQ_c-Ueu5;ekk#@m0&W2lTXOKTi zgTV%mv5hAAadkoIlGM&|{_q#f$Doqj(kHp4qK5G$9bvI_?l(#(Llj~J5RX|&E@Bcw zz#F>lwX2!h1Z{i-`dwB2(mm^U8FE|cVq1xsd|O43rYwuKJjFOXtKxpj5j=G0U{KeU zzH!_LWGX81Cs593fd3Sovq*;}+GOW}s>`-@EOH5g%nCQhgnRn>gMN$*ZjJWU*DeYX zsg&)8{np4E^}4(7^r6&vld7ICO%0d7XeM0UPWt<|PWHbWdrW~|!P?&cY3i)6Ia~7b z2QAGhw6veA>y4^qb$H0^bl_CJ-&N!)DynkTR?l_6x-=q!8T3S6`{bN5KhO2CqfV8* zG-a=LmpEz6)RoY#6XWPtrEH;NgX0>S-Z7O0ys{p?!kcxE6zV+owlY~sMYVwy+Lt%* z7~8g@qW~X8az|1GT;D7)LsX=w8lz;frW7F|Agx=W%ODt&>8|6m+?V*=+`HC>vXJ>sf7FDe;D}vHjh?Y_UZklWvnmvOUtb9 z5A6!3G_IWdd`-JQaGfLv~qmM5U?q1*pUiSFLqz5L{Pn(kLKIVsCRr(t8>*YQsY#i8EM2flv(@nv9xmujB>c1mpk5> z^d==}9A+`*jZ83k=;tKA+>WyzUUo7}s(Tbp$j+VC(_vGbfip5h+Vox$SFAifF{47v z=&_m)pbwmFp`|y{Dk_LxaOT_vnQ_p5q{RU6wCHC4S&N;;m@u|Wykia9dud;i)R9aR z2Na88)s7_LJiQ7_z@hJ7r9meIXZA3`N}#&@<^KzNf}d_)w8ADc^f=m1S9u&2&5#xL zGd}*5uXMJA?ec5OiO7ib=>;y1-oI}xGA6TDwlts8_LzTJO{-x$PcfVy>Fv}d_NUAv z+g0O|qf4dcd3NQPO{`3r<>um4W>YLe+6%ErGHq=;Hs$t3jP@6pl zxoFOEH>@6t;XY-jPF60aVN7a-G!)IBb$wlaGVI^_4E_yFOUK%>l*F(sdL#V2H&wC+ zj{9f-pFxK-&LHsntC#?rjd-!ug}qoxIM3U|AC@sablhPT4^K4|zvzW$3I?JsF=mTj zH`cEcRMp-7FdNi7h<8_Y>|*OX+8&Ql>+Xz4=-)WTX3m$F21&|0bESPxGBMnp*DBIK znL8o#@lO3=mXjjkb`6)-m0X1zWQmj8bNq^lE<8|5zw%ad=eGuwB34}Y~FtS6>YF})J&`JJ^c61^PTx*<8)uG$&{LyfJMjo>f?zJw!6|AIxEVob7#1+Vk&Q1i@uVli#E=D&a#)kJ6dLCfDRigeXJ;blJ8`T3?-Ho!=#aMDRwSij)8gOQyPGp` zR*0`eGQaP|&jLXh2VdCzJBx;tQ0;wIT3CaSU+b1O27-n)_wvlj!8bcpkOxsC2%IZV z(F9^aoo4ULb;Q#*O_%q6ek}d(U8?}>m)Vi|W3pSDQ_810*UL;QCO*DhJ==Bi$oF*$ldr1gw$?3IHjAq$9In%<1*Vx}IRnd&vx-unLv1a%9V+gq*#kGU*(-E! z98~<8GbrJ$8lf&IQ+|k9KOEQSI&ml{sBr7SWI3bY2g2^YHZBD(v&%mofyBCg1G20^ zyKOUudV}PO^j(v7@*&Okd^Xc`ta05=CbJlee zUR%k0&c*Q%MuoiZaYi}rw0;XP20={CXH!!9Mo(QHUhug7w^VQ!;M*MR4|<*MY#)~f zvY!ws%iPDe zNP~GQ89;iHt>i&y-_AqraQzcXY1tA=d>9(ZP!1;YVNSr4Pmy)n1jhieC5SZU_3pR7 z3*<*Q^xSsoMj z67l!_+nBj*0#<%_a8_L-sz+(YTMy{XE3-bN_oCW}-PBwq zx#pi8yv*Xvpj+}m0d>m^wX^XGA0?sLWnyc0$seOo0ngV43-!Gq73qvU-MxOU?oSyw zXALUuP1mZ-E63D=?}{J`R1Lo^Xl&n`ps3zH7bMt}>H8;X2xIW6x^^5B^a7IsU*Wgt zTUJ!-<4`;^wUQTx5%a;;xl67ry6Syx`28BBNSuFKZt8qOT1lIqB^KAQ?Ja+hB{inH zb^zCJaxoTZHoZ?>* zyGQp!2t!AE%IVx`j4N^VXzA(Hb9Lp%FUwE-$zluBtktcHpDR2^ z$EUohWrJ)?uI?u_E}a)9(gLaJV&-J-A!Uru!RU6UbhMiC?BUDH+ib|Ud-5f#w2MwD z8Ha%i!NTEPJB{v=m5YQ0K~SZ9aSGT6(j0_mD80cmR_`?5orT$+w}^7?V6izVHIysr z8eb=CUo7qQeK?cRm^2i%X;9}~3ma~fmS1cTGe^|sU|oewIq>AUl66u|?TKtr{ggwO z-~_uAFcnbRbC3gp`1s=O26D5{4{p%Qpkup^+?-i0TefMOSCU{ zvv#zzMPmtWVV|Yf$;n`*);9!!dA82Q;9!YHpE>ueWx|8D_AA6oDw)6aD16$fW&>Yj z=6;f;njd|5W)K{R>G5)n5(O12D{$|o>clD{$Z6Ivy>;26?T#PEv+bs!_EB? zR%TMqg|ZJTC!ZjJ$@nz0&av_6ElKapB>+JvdQRS-hdTzDRKPKx1})}X5D|f4vJ+70 ziCYbZ7wgT;8tyfKeLi8akzo2AixS#dr0!(u;bgFHYc_vawPKZuA0F3g7niaC0AHvm z*+L1)6POM^X*!HaoF4ul2hJcQ#X7geMZqb=Q&GKblnDx5V~7ZAi|KjQ_nkQrbb2)> zfU9{#SKCgP%|sphki#3#a$=GSI2wJF(FcRwF?^_;9oTjCz~S4N617r>9zkK;asE?+ zXaWx@rNZF)?7^Y~r1k`fCwCeDqMvrC5+mPmRI$)y#pNC`yS(ys@Day~Gh?P@O6)?>yibKnbjD3ElIYq6 z7))#dCJ*ljN?%V=x^^HnzRYK3zlsrkjMLY&8S5ugwp!x&gFrKRhi<(F<4{5yTf(`m zNgc`cvzrEom@u}FtEH&irB65aMf~TFVx7BfcQW-a4v$}&+`aiB50DG9?-}$3UMXKs zS&cYLtu^&4QHhb~6H52euK<@=sn=Ut+&;O{v`CoKw*QhY@nX|n7 z!@oh2R%VPlXsIwF38;*@YosvtCVW*@DDx@w=;r`!a!D!fjBMN7E`pZ7#!`YXAO*&{ z%7fj-^-5;mC|(68&-tAD`oe5DHt$c+AZXW5E6@LEWQ4;7WA2kwih{vvQ~$@Th$5dW zF17)sHk@@sM6YIV(pj=3d+XuqvwkJz7Ayl;L-F}vuxQ|e$~I=cBuZcpUVq&>#5X4l zrpVfPlA~ju@&CJFrN?t^Zk~NHq6~#mlOj%sDg7nKpScX*nNPNa&7U+X8W0*2rP|8^ zwiU>UJ--6y`xL|YkstvXr|!s-%Z02w(FS-{<$gNmNyXw!n6T?rVvIO~S8>JB{+j`w zYts41O}6hi`;!i~=+;6jblKvD{qJu0tjtAF(Xe@qurUv&?+oUgS$tVc<;!yVX=?O4Uq8mcYvR9Ant^-ML7N=OlJ zEd(64a?BTs7T)Tc-Q5?6p9F9{b~sqV_spWCZe&D`+3E>yMT|TxK6J3~?6Q*n`mi)3 zHWD3aMu#NQh8ZHeZ9^2rm+Qq=WA`FmtFhSMj;@!XW2U8Dk0uSVnzKSqpLo(Twi^es zT`}&iwNPV=6&D>|Qf^DD0HpM$EK^EZkO4K0EbW`Ta;%79wSl>5xIae{j*b!d%azEz z;(ODxq(roObd=@6_I$UkSVzSJV^g;e_yYLoGji-V`RlLQh@W`xe6;RP{W#i2jNtLq zFtCk@xuLIY!QtbxJRi{td@MVdw3pdZsI7opXsk_K__{pH>B9Anv)c2t1aq530oRGT zp_R$=g;}Z>rEk(tl~SJO38l~}9_>hMb9=Q2SZs@IiSWb*L{)CA&IafjY$t(Q)53N^ zj#~&bot4!rW&MEQ$161$&wy!hedxiYjKxY89pC1|6Dj4D%>hG9VH?D2X*L?O1qqa` z-Zs7zBoGhwtMK`|?|3Y3*O}qZ#semY?9(Rf_qvz>J-v4WvrUc6XYKBmp#G1-Z0O!1 zyK;*6pcUIE>9$#8z+~NsWNYxBwLhg0mxiqEZ~ZMrleZls0@GaIT+G!MjenIK*Yneq ze3w^~Cw203lDaml@*kc?|2(X-sn2nI+|cfQS`2Z%tgAL!*njPxXDKQCNQe`MRpN|5 z<)g&;iM3CF9UcDYUp-)9D0ZqVMftBt33c8rmn(k4i9HUdP;T;3sTPoTcxfuhcRC#g?kcVuOHLO&j{3h zgVwnaLQ|(*xQ(K!SCix@9@u3UBekrh@BjV%L9CGqa zNi|Ik>Tpi-`$?2us>SRC@t@iMdwliV`VLHC=! zy$jK%maV^1trVQ)Z9#C1=+2Yy&uRC+8`yM}cV#kLzrW=sW64xX8#i~#;g6ZTz9;u( zG87A$YyK(Ar zLr~7Vy{O18rEuImGjJ&h_Z#?YfH%5pCGfLX?_XQlvox%%Pgc_*ErzzR1e3*uNKq!< z!zQ|ie)6X_U z;||~R7gySXzx+7&;)M+DgWsRt)0}_7@*?)?VoIOe!`=K(!Ao3;>6I7FP>t_giMSq@ zey8LNSdd~2h`XS^AV_k06RErRt{vgdhoZMbE^WTUZ)d9~B|d)lkcn7h+jY&*!_k|p zH%N-g>6*}=Nt#r-wM~Oy6Z+QH=;}_IwCllsA3iISGeQ6J%-OwL(P|2t*U#<+$d<|7 zpt*P}_-PBh9QAnb@%uj)5g5QgvhdV;iJo}br~= z_PgkJWx>#!Gzlfx+oQ!VczSCB|5~YXlN+E0_=WBqFBGEZKKSaZiy_O}a(rk-YA4?D zyOHPe#EvW9bJV)k%d<2q?3z#F7-+<8dcO6+Kp^iG$4I90pws`QhsM81QC7-zRtV30 z&k|`U^-u)dV2~(m%moRvtHEF>lHlf4YVDPm+Veh#w@=bt_nRvb8(3w#EK!= z@u^AeE8nix^F4-t#LXH;A^W#$0?AQDT#q{;{SLJlfdr6O#XoBx>o*v%yZNX)*{_%k zD2kB2gZSk$A-5Q~rr^pe9t{KQ??*GBIe!!MWjb*85`ks=!%_NiQ~2!OM{Zf!=rc5D zS$#J0^OgXA(MtbugtsgFa3L~BDfEI)r_qlGqP5zJ6g@uz&R*IO8Q_~D1Ic9(G7u9* zJT-oFQSN9dZWplfosbl*U%SID>7aMnJL?j#5#JhUx~_?hx5*HDGM1O_^$WFqxzOv~ zOVU&`w3oVbek_j2-5OWeR04-st744B&Ep*)xm*S{dQ2r|izbe;p7ks>fwfl3x89u7 z!K6OGcI4#!aFE?pcATGZ|2zaGvEO;xBzNv(>-5!I<{6-pY1XsnVs*`_CzK(ed{M?6 zH0!&ndayB?^CI%bP>r*(s;Vlzu}Gd481O&Z-K@pP)(%r-JvW|o$dp%lV&54eyo$N1 z+`&?gV;Wyr1Y)n(YHs*nW`F#=J&Mzov&t0%2K%k&{36WFUwwKe#O{3TZS#`mT}{`< z#E|eXIq`39cP@pDibf^&4)fU?@$}vL*5J z!LJKtoKj9}`3jC>LIO#>DJ&68+_8Xzof_bfw?^OYs5~_bsJ=PR!Y*Za$$Zk9%kam4 z2aJp-5=t6&)%Y`gJ-o9d{%2pVh?z~~C5cx~%MS^oNX^hr1)4VuxH9DDGMlA1cel-g zGS8G0&cv{|vutjn0Vd_@-uZ)ziNVBVyfq)N?Bd)d%Rpq^ZSA=7sG?$Bo4FiDv+n*~)J|HqfnD6WX;O&h7e0f8DA(QTQt2iFwl*_+m8Z4!D1v z0kG#wGz+Rm+8`a1C{C%MK)_bN0!sFpv|mO~ms>_aH@8sK8;k_3^cjgHGGGCDTXs?L zQyNJ#Q(*>g1UOPIh0yU_d@sFz{d5%#LY^}xJCfoLN~7Yg2^*prB%RM+VBDs*G-5y+GB#-V-#{@)bhQHHR*fu%$6M4kcso`Ebr9z&N)C zUANZ<%5lXY;Zr=)Dre4~+f94=^FmK+j^XF{Boo?|dVBEhE2V>1iw+wb2X=cYws|Er zwi_zw5=04J#TZk?>1gUXCT!%0(4nCYG^}93fp1Qx%D@jj*ZC4eHj|sQmB-@@&Aau@ z)Cvy3B0AZLBb>*-xtuA^e-i6}^LyPJcdcL*_~}r$CPZ`boWUI6h?{UiA%o=89Vy?l zDpQoDVVgzbO_)Hk^Pp^&ahjC#Qi49XJajs1{FtlMp!`{%+@6N^pi#9$7ZDS*C6hE6 zav`hZEBjrb1>MP}O>W$yy+-6+imq?ZflT1Zku6$nuhdqma%T8yCY_R|_YfRnOHMg> z#N)ekm7GSW!29_PyPURwG%zs9XwZgmo3fsot?Y%Qjyy2Ss3?y#?-Gge#_fr!apXdvYmhP6h0#?)(B z;!Z#P;?nQa8?d}&^Zne#WR=)?xzpnZYw8w73B@Roe(S2nRH~87efMlYPfmbbD|_If z!gc{B>si1eY%~Iy?N=%VH!cBgjs%uba*70R27?6?;MT@Dn9`edb|X@x{*?&o*xi1# zAc4zs3IV>* zy^l}tTjwIfB6m`{R&)A)LX;w${atdZ$sT<$OjZ+RH0rQoC$+EBU#fP2b&r zmmwNhhUpK>CAWdIp#1d7bKn5JXBjx2#Rm*4M=&M0%+w#%r{!`CHca7f9RTN5#2H<4 z@D{^D0^CrR0Bcjph`z$UC*vp9)s294=q}*-i`LbkU6LJ!zsY`?u?PR~L!f1!n)Dbg zacj27P~4w@ToKjQ)U1wloMMN;&FVeYdkW}IrtT-1yw6J=jE?42oF(3=2niCC{dBrZ z-=99d&_g*FeD^$>G7`liN6^)ekk>&6+O(?_VDmO{`yi0#-db@+vCR#_rZ8QiP%KFs zb?ZKdbl+Ix@2{tykOO?dh7qlWnv2Q&J9B$$Kx)gTQKan0Q6$|U!+Dv3MnEF5;@$We zKR;BNTtVVU9lAetUV^xZ=C_4;CSHH)g+Tx-@z`(QjwsNN%>C?(EAM!2+lHt`;Oo`> zi1^KvtW9LbFd2V5vogzZQ4zpsO8_Kfqc}i&Qzd)vgRH;GVspjkRH8VRP1HxYxs@qr zUetQe`l@KjuK#)tWg$fY|L30IDwI7MGeZ#CtOZ_=ywN{nJ33RnnwmEcPs?0-FFAFlnFQOQrR_^V`rQJ zBRMY-Kj;ku7TO(m_r=fEy}%o<>{HT#!cK}3|-;=poWSYr8wP>r9=JsGqE|S?#`pQ^4RS>0 z)@MC(J=(soE(2k^!k_4pB5oUf{g&ji#PX*;5kgw)aRhJRw-QrC>V`W8N;C`-Wx1ls zkZnvRY`&m9oUZ{qjn#l0F0&eY!5ia;6_x?~%VCiE?EYG3v=l-cm`=&^hoJXmelAiJ zd4+KS8^LUNzW6{n?CRZ@6UEbBXy?anb4|aEN1px#j7Z<-K?aXEZH7lgr|5bifq-NE zksp}C7ZSMrBQoIxxO9;Ax0;g0Q7$XSbh)sB@u_M@P@Y7a#%RG?n5Eb0YM!gO@oGQV zcTD?cpj163U?C8%opSD486VgnviteWybL#}^Atw=Y34y6gmouGl6aJUmu&`Ny1zsW z1yTilbsY1DHf06CrARLBHmk67pXk=Htpf(vEdg?pNR_L8R=5Bk? z-AR`u&L!YQ)SgN9a=S~&fY}4`0oJyE4U%NR3qbj&w;V(6t0zw6D{H7S`e1u@?1n$4 z1LL=T+P?%0A1z%H*0BX|3zz<35V>M1rYj9=E?GDAAXL*2ZD@EhibS~7*ZmTC3~W*J zYsLwLwCUV%V{krr{20d}G0whA!jKNI{{paZM$Y=el?|l-I|=|BUHXTJ=pVURgK|5e zbj*Dfe*vRprwL+)QI}Mfe69E$~8~N$qMW6?)sbgB0n5zfDsI;fW2$) z?b3f!lft=YRHSMZ`-%e3qxV{Fioo*&xT5tnW%EV4tu9h|cFrL^HTFWx<;a3ze6a6g z`So8u6w;efX0`eAjjjn$h83H&=-gBRAQ!U~>+1ilvA_GG&^G7;>2g$^7A_T}pSa&R zDx*ulG5o^+pi5s&pBQAvk&8a_L@@JwyEC?TZzF>FeWy9J?o1zH6D=>`pD1Fw;b?=%Bs@4JPxm69u3@C-Pl!^3!WvrJ08U@= zWl@Z@^Lyy_ZGfiUZFLroLiZ2MrLg7bL8z#h8|f3M|LG40f>=&jK3 zlV_?;a>zv`ERb93Xew=bW4lGvsT<3{Yr^NYeAus>;KrdqLx>{mLXzZ!X|a(csHrI5 z=o0J0C6UtZ=QuNr;@Ay8`hzMw}K#8RPJA3KHbka zI`EdG4X!Z3#isd5u8@SZamIoeECM+7ZDM_Ki6&5XMb&Q_s@0hiH;eBHydLKLK>!t2jANxZ=sTm zs*BoyF8WlP=eF5-xw+x+4f(gFVEH2;;9})d)Kz5F0{|aW!pwp4XJA~0tWOy5(o!@o}A3BwJcGTNQWV#9|v;vr6!8z%$LY=3H}N(i*Rhowoo))Zy}Gj&#^ z2!+w#eKB^KRjxMTo5_}7fTx~@?28m~JB8X|INld@U*ZRk>~+)3*ts_gG1VY*aF@|> zHAz#N9QgaZyniRMZEtf|a_gv8?+L1Lt6oW0 zJlY@MWXAxo`gzDH&vj|od70-3Oc^Bf+?Xx12!)L;b?^px{9TLg`!Z%;n(MlQu)p=z zRy3_)IVkJ&_t+%t!ei@a%H~>p?sqa!a9SJd2if6v*@s9k0&D1J${1JJt zUs1Uba*8W&eG5AcQe8X%UJ&la;hux^=$q5XEx z0|A@!QH^cK?wv&6q;%m5O+5M%L7PU%1Q=KaLlJ%IAPz9n(+rBBwY z|HhaI_F0^4;qme&06 z*nR1hS+9dy0`O*aOoY>g0If^A%qgYO{#CVG)1GFfz5f^D10AY$M!A2TPOu)XXfkMs z<0u<%9wZk2&r&vClbWpHWp{m2hU(|`sJPGkS(ypJ zzR#G=7CgXg;SkqzJu@7*Qwn!-{*-rW|2PDuLnVor)_-ga>3fDB%#K){QsswP<(>p+%*fLRLcvZ^^ky1`Ey$*@{Gra zH@(A+4sVT4)OEK2DjK`eo0isU0`**V8)u&bW;hW?Ge8mW%;{BBLF-TOQO13BtHI*+>k?|!GP)dck9&NPJzh-4aoRT+*Z9dddOW`*Y>m8dIH;eI zAyivNwf%$k|BDRUd(1+tyofE43F_Pp@%FVzU=z|;a@a>0>uAXfx5~@RYtL#-no>i+&ECG3+O>iI{TLTlU`?~4Dmg6&NGD4_o8%mGBk5l{ ziW@GEjD_8jl#c+-6^X}7Ca5p}O=udlh} zB(-&57{s3xyez_0_pe=O3j6!z4wrQ(pZ%QUz^4|XUD0V;tW(^;6NNO zvBp?T*rbtmOmyZ{v(QKuYlb_~Op>xbxribUe^Ltaoh8^B(341W$^bLSqfj?5n&J>> z=XWzseOA8Jq_jU%(d-MGsLPk#FQ*Z0PWhIEdEdudymlpbD*I$cWeV(;+e)s!06XQ~ z0m+D(pSRVI@qHtOqP)`}GWC8yF05az*jyyy{NHYlniY7|-^nGn7Y@x+$ktvFko1-A z3~0y*_kJ)mz-J3A@cP%M&OsH>xUx6FSp4g%nSTOAHu1cKbl`SI5mY3AX{X(i|J4WB zc^(V&@iJQlsXRhnUmfONFaqB?D_~F%zZwAM<*LxR>XNpTjZ2~CaT%1GFqIhHR$&!* zc;6`FUNsq$;O*N~Ogt9BZY9A`J@J(8BtxojLMy^cE$i>woDI#|S1Ca-T-Kn;Za1Fu zR*Ar|@jRrsxnNuVk6A^ad z(B9=uEy>udr%ah{LBVtU%m{n1?Z>pqc|R@12LtQ`QiBcJm2z9kc}fD*8!6&faH=nE zjsqIaY#{8BWjLk4(pieJDdy&*mdfDY7rI3o_jM4TQgTX^{AW&yX`ZP!6QuEmOV7R> zC$Rg$PpyDb$am+8`fDPdkz*Di14T>jR8q7ds;lCx_o2%x-KuyyuXGm2f!AO33Po~K zr4B|+4~{DYhqil9+7kn^iYj*t4{hqaPgSOtAa-)2GqhWRA_ zt&YgXgKC$}1S!aL>W*oh8e{&@;yZyL8)J}U6hasVLz33#Gg9US*i7zt;*sN~LcEl5q3NxP(z)V$I2PyYpP~|ii<#xp~%s{qWz{I`7Li}ey#M@+N>4ky%ynh zBF_g^>}=+1xscs@?TzAIRbmd5KW&cv`Tmb}_bd2te>_c!CaLV7d5*cUimb|d=FREb z*xBS;AmnimJ7xfB$3(R3K&{I(mT8Mlf?R7>O@7hBy3Bqa?dTj&*y#N7^As5T{bB5= zw;0tS6+}tE!4$PmW^2v9jQtkK(I&Cwo7}m*y5LK*0{_%qJbeCQa-MNn1aQf$ROSE$ zL#Xu4bOaXB0e?4vq@Mv$0Ffbva1TMreGREGYQ^fuN2pudAi&u0_X3DQh$h5I(;DtdsuBEhBMt(v5x%c{M|>))^c zRQOqp{$922M%Y`LR@<88yIYf-uo{9kUW1n&8k+8DO6FN*v#iy6T|zYx#;M1&Oqoh{lKXqEiFF`k>(^8uqxi;eWhESzf<#0=;bcp}C` zDh|-DR=wL<78pgmf4qEN<47YFgV%k$RWFi~``{t8T}B5#y@w;JRz(JFM9^bjDsIVT zA}q*gM%v<_w3LACX44D`j^T<}Kk~Ns_U)%l({$HfoC-LZG{r{Mu_GgTh^juW@$-); zPC=A|q_t=9srcv8#d!yw!*cVk$P|UpJB3xkUiSffEV7=lS$xzjIipsmX{&=3azv%| zMI!<3jWYzy8BXA)!i6Jzfm_^~@PXhX0JypKR}mr1!i7h36erTzuw1RdN0(~Xdje;s z1>9N^fGZwnTI+VSaL{_4aVm{{Q#Nj<$DvkmhxT7JM;!EsbPPx zxnd7d)z;e!eP$+}mc?ZxRk)}qTLqyhOsK~QwG|t-2wpg%Lf>tbiVtF$*c0tZ@dJTH zekbgXUMCW(Rla=Qvf1sW4FYY&DF=8>O)74vYrPU&IJ?D@ZzYUhpA_1Yfr)svzoXq{ zEvbKbDK|dh?il8>iXrBcT%CQ~=<0>5w*LNjpn`EA+<89~pJ1v;{gxHqw;(dMeN*tf znzc;5-6H|L{GWPu-htunehXXj{!20HeUK9Cnv2-~l@}^Iy)#G` z`lEWcH5vLD?{F0btvai6%Rg=ztr}hyMHx995U7k-Cz@_i#L82uZ}|`u3u0wKEN`muGP`{NTN}Mt7-xosK>eN=UJ7 z_j;#kKkIAyK#%RIq>_U-ti~BV#Q@I$#>!C3H6B z{eZ-3Q@5g!C1A84dS+%H`ip3(d&YBjvk7w@v`glNAQWA8l z!}m%TH7%+WEMHWv<-9W>PZa|E^@G>U!12myGr}_Sv|vPyOHC1gVC(a9a-gHChIb|lUh*r9PFWEdqrBpbX2lZR`=76)ux!Flu0xa%j z<0Knwi}4VDq6BOg$x`zA(XaS}qweO=-8{+b(=glVo|k3=*c;lZT*W4P8A0ms%Kn5R zyuw%>>Vu^;16z-zf3C)m zw-woSHk?lS^W5!-JXq!L>gmMESUSA7%^sWMv|hAtGfBD#t7u3u-RaS-*vuxT1M>{z z#st!zgUUMpO%HX3enUdpW;l58psF*S7?hJo+#qd$9;h5`T=(7lTlyl@5} zMzvoRhOqG;wS8B{4QELfN(m3&;>L?M!VpNy4kpljNt8@vJFRTzpVq*MJ?+T9-6;snEeKFMw zGM-bpE;i0Ao~jwh-w1m^^g32hPn=1RBIFkI9hP{G&sfmT=4z~AYqn5vtjrkO%)TOR zY;nAHzGbF&f#Z*(iLR|F23UBv5*RY4tTS&Sn6vaoMaJ{z1x#AJF zcQ-(MxPClViPn0&=c~O=db=q|Gf%7EuUSq1Xkcc5MSP_`cO&23m*4DYcqU}~m1Th2 z@JmBCiew)h)0EbzG5*`bF%J5A`(Qs+)NG%Bmx4Xz!E9BS9-`V?V_k2>#5Z4@p@9&p zoHL!`-0NH!;Msp}yy{7mS<2i8Fqz~t`DC7TRY4eaI7^8bnRxv zbP`GxAscI2p|%~J-%-q6Lm+?uzO>NGL`k;TLqKl0#vBM%kUg!R<_iC3&Hr8aE1jA^ zwuk3;H-_FTTP_r}9xaQ&iM#+ojCEfW@tpiN)wfM%bDijz4D& zt;^mb{`fGR79-5^b~DOq52P!%F8!F11te_UcwH7zJ0vOn>VR{2&&)B{zgIoms+-Q$ z-z9hb<%YbN!6k#p29?U2q+W)jlX@Y8OV$kB%biY>1#@LT?v}6tc)}jXb zsvA#cyzhCA3~Ny=!La1zJK3ph$KMi$R)Cznyw&KxJdiQ-?n~OtnDm(pw}1M+>x4XE zJen*gq2@!0Rx^9{0%vF5?w_!hHokdi=4Y5|7oifx3a+~t+hL*OCMcl*_ zb(`H;i(EN1AUX+lhA>mq?oL+UKLvPFY94CiKw7mcXpG6-EV62gDcQeWNGB|)Z_BXm z^^Jr2L0vVpsHd7^Au7xYos{WMG9WZW{=zrfi{oHwCBbkcZvn;&aS+nv+-F^xO)(<2 zbYA9VNd~%_lOMh)d@CK%V03zwIG5^!SMo=>;CM|`NvRpy_TSuidOiN#s%x46o&6bg zlM?l|38rlv8q=Ex{WkwD^`%v9wU4dcrEA_zCZU^j|hskFh;kk-HtA0?0sjF3OWia3dC8gR$Tb5p6QR|nnY-yOjQ^% z37K7cG?6lNqLVK^$1_Q2rCPOx5@m^E3}LeT$2bG={&6jz-Y)slxBIlg7g~pgb`Rk% zY^?ViD^|j6TSd+k^qXXLSL?tn`DN;#f4*G%Vbxaj6lGJhZf886*P%SWY;N42zaB2t zKu|Lnzb32|TQCNsLrKNH;g@xxzVDFxp^o~KNiBf}mW>g$y~mSUg}S-R^nJq9mEEwo zt6m-i-?(v%8r9q4FPvt@PmOAt;(1o%yIbj7S1f-TA96*`AEuHSA3x^Y!m2q10aBpX z+8(}{K6w=;DSZ31$Kv0F1MqA7DXNEB8eI$z_HTaapS#ns9_ix1?D>5Fu5a?5u4*Py zcca?vtcHB4Qe%5|yV|^(eLjIuVsM+mRjq#!$Fz?(MVj0otmTg4G}&CIWhpadO3m?+ zvTDU>a(M5g^M=Y^L1ihmZb>drt?pJa{CB=W;;es?Z7$-14Ay}6QvBaL%NBJh?JG#b z#*L!I4SAOu+Pw%beA%w4*_z%Udq-4;Z?iaJ3Vzc@rNiQ&QoDOrG4Q~o_mTzy2Kt z3Hn;S=EMi@iZQ@GTBl>=%+rbm>Ysia*pLf-j?;tMU&f2m=2Ke2PDJ)^ zo1vHXRFMlKja1O8GGZ!P(Wd*X;47%zxQoEMyy$q@k}yefO%*v&reX_hCBA?3jB|iR zq&tFX=hwNx#|N1_!a2rO7lmDTx6BYPZ(sro_iz>r6v$%_LK(ViB+XmXwqN5!r&ud# ziWZ;nOh6#_6LIuX9X?gjOv-p&=h+@O?Vk><0JP!9q4e&2eM$5-9$jHQ_3Ls7q*oA3 z9qfR8@C-sLp-aUNKd8!VdVIFJ>8!BBT^Se$wAJnXQhL;HFM4@OgwlE)0ydkmH=p*! zYo>%ji^HC*p1AOivCBTwF{-Z7q4(Wel=gK7N=Jt365A;6t~)XSM{))i`0n*=-aXgj zD8K!?2Y)iO2hyDaFnE+XkS{XTP51t|VQX6fM~K$af=Z2w;?5A?Xb z2Er20`(Ztk^GrqB35uogmUZPgdfi`k2ONDnYK|{=$JVzdI#Tr;@X*k7M?4G1id90I zpXzAIdSzGd5vz90H)mdyE$@x|xBAiCnTbDN*t&?;J!)+5C&2U5(n=wv_@y3>T3g8o zRn)-MBEi9wMgtO}ZVec*6qUzedVHgK#8CuCfQW}x&MiFTNX^;{n zRTz*il@#d?1(goz6cMCD1*K~M>5z_ZZP4rXx{vpHzxNM#j$_6OGk$06wPp|0 zlt~O$>oSUQwPh@e$q#fb8IW>FMUXCJMz1e{fcI8=fcRTS)7+x?}^X@2nz*=K{I3Z%aKQQ&O@U`@d%jjOzR|iR& zV%WDU)@6iZb_P=L8?-{Vx4V;;zx1VmXGLm@HFJ4w8?8a@+dNXmJ+Tr8u(x9Km^mMP z2^VL33R03STnij~2sS$k%PB%sd(>97+wQU6Y&6_V7b@$=tXeIsxbhW!)0X${8~$mp z0qeB3MAm!@yMctQuvD*aWB;!blO2fCuG)M2*8LHs$bBwE)%t)M)1xir9PdX#63ka} z%ACVTn~WrEv1b+aX7R}!+v8J=HqoD>Bp3J%lWwKhWAi^^S*h)&O-U~~?@cYfqZu?& zS{1Q*534|+@lLv%)0{S^82-VIlV$>h$@BDioC7(hRB@TV6xQR zy~Y}G-?1BmV@xqT4YGJ-oEMA7Rj{Sl#2OCT>mv^QvF1K~yJa)HV%N7ar)|IZ$f2Hr z`cuPQp+d~jAK#{8CO2yA?j(H8GIoUZ-f8M=|8^ws?L%79n7gVg|NZ?j-UTDsbA#DM z*`|9(hy2TOTqgNFDVmdq+vt+HN5g7T?TM3eDLclm94_X0IYr<1I^e3Yzf-V&U9(90 z%J-W433)Vv^4o=a<|Z8AIS|ceKgTWV43W;GY=u7Wm$$-qcrjf_qg}B(@pbYpx7Kb{ zlX2?z+m0WZ#owW!jI#fHCH@tZOFf*}+$aW)oC+$dNoSTcN+`%cyj>dzMl} zMs8Wx<{m;K2Ng>#wW!)q0?L-wu?6R6!i%lKzysAgtPc&Y+N3WfuGe|~kVtafq)hAh z*@`XJ^2_)v!f}2}s`pKLF|{Sca&WcuZqJhbt(t__a!&T~pTj-2i?VTS4@c&2-tge5 z@~>GjT{srVwe;G_l?XgS;5Z-8JRRXZz5@mC1-?9 zU%&aj-msdt#e3q^ej9A@{8G7Kb)Y8&*LOat`*!hU%5X5nIqqT9QAcccj^w3{fLF=x zi8-NkmFLjt%y7GQk>$L=JBuQivvM`A9b>Om?ha5Fx)T^YIgF7|8UONC*GT-~GXFwI z=DQ?&7B>faJ*l5k674s-Txvziv_5w>6qLH;iCv*IX8BsM$Q{UWyQF7v^WKDKT#R3v zm&plVN4>)=wfl|t_JemqaA;!nmv6bQOeR-!b9ztleLvn)+J3KSVH`(7NF_Ym8GbNs zQ(7VQJh{46TRBq~JTSU8wEpA^*rVl@An`dX7o z9C^wwuA~O$eu>;yljH)i-;UwqIiW(YIqA1wCe3?(^`3LxCokpAF#3e|gU4gW?ECt1 zT!zllM4et~5F?l4e9kC=(4&;zgb5D1+MZjJ8awMbue3L(4RuS$jHDjCdsV-c7cH`B z_;$ziPOo>%OSQK0o@}w0tcB@h4xV3}Za==jG)>YwVa(Tmy(aScuiSs#Yo9! zq=*;8VOokYgUZ`Hl2mP1H z^9Q7<4ETre_2?z%vv1YpA8sk+-hQ@A_r2!fTTa^E*5jRlhwTU52aWYIQ70e@m~rr_ zb$&Q+V}+nTRy+77I)2u8-4v>Z^;lB^dvUx zQ=&OD_G;udZU=J|?!DgO)x>07IO_fDblK!_;ft6G;#F260A4(YiHzxe39<7ZN}*eEWw zNX=GSRt0vQ@Zy+}{Oq2h;eGY=kxSM;@KK7xKVl2(qf;6V)1u%1%+U+6L2qSaj`&@f z&BU8g8o*BZT5I<6?sqZo0vf{_cDb(~zS`+e@A6#n__<)~i{_NH!A+SoT3vsk`N`eE z>iq)2l4{zMyPj_AuW9tEg_{qqTcar74olxidmI!kw%J)EGC2CcW@UHzr+E~Rz193`#58pbHZoaGZxJn}pJe1GNz7i;GdT?KzJ7&c-nituQz!_*Wjkt6+F1!TMi=1xaGRPw%!^$7&nC+*O?aZW$^Jh#i zP5NbklNeHCnoyz%&Y!#yn=_u#_8;jQFYh1y$UAUnbK>OvgQR&O5kuI;>M)JR)6J0GXn~_yK(4OZo+a?mEY4`BI!%pyW;?2XWQR3KO zli@obG)>yTx@^vYj>mQksDRGWl=@3Q2Dodj6j5D|wHw$kM2>a35$ku#fSiCGdXlbT zW_ke1SFz3xHibP-EnO%h%5M?v0H?E%k>HJ5=0+K5d>~}V)a<+tRC2~WL^Y)ZXKYQ-96(mRv5oudA9%&YP1|M!ZieDcAk1$(3 zaiVh0y|)_#bXCC+Yphd2XP(V!m7^o>$)TG_Wb$`x52uPal(|65Zz7 z({`@q_r8gO+1$eVJK!zrtOr%N@vNh3n~i^sHbd>(>(G0(S$v#;i^%VkVjl$WoNA;s z3~Z=f{FZOIG4mPqO_1>Kum}St!aR{`);%sL^19vgj6FEuO^BPOp>mpAekSZM&ErwZ ze~DwG4#RtR=70f>c`D>6Jz#}U#Y;-GK^y&Ec=_+arR{OnDpc*?IIEov;0K<}iC>(K zB&(ZjhONw!Yw0k5{9UK< zO-7h{W+$u9(48>>t_4WpZDlsue(r;+r@2iB?Lzon)A4;p3Wj?q*M08v00$Fz(ZMDY z)smYO*c9m&fw6r0>7SCrEu1d}M#ELAzggDJ_eW-tLy)*U4oTka{5kjNnR?aodD*?M z?IKp#|CtW_PZ_+l8vTs-#uoAE$G$^A<66;*`4^#oeH6+6Aq{+u6Ad(6pp%#A%UbWH zvoA`ZGVO4{2$BlZ%3MB6LH}Rv6Qyn5eFlfFu7k6G{@+xgLkp)o;KKvTPaJ-)7`I6H z)_wagvVR$}uPFd4qEEuT_-8{ogVPNt`_InfML<2ODD;3r4>>3QjT@M7KQ_d^5TPAE zmQ(aRbkqzedog5Rqv45sI05JbMpL?Tmv#Oy8CodP9=3Ar}SL3;MLlveX1ZN z)YYr9W%BLy5h>-neBFuelq^YfA+pTH4bPJCC`edr@CQ z^1SrBzhVp@X+yZ|IrO0fX`ETQ#_tvBfE5*NIBz<_Ivd^H(5{stI- zf(v|+w?!ha8kuN^p_rs)I~h1hrSn1j9PR1Gu|Jk@$OXC`O4N(fo-HK<=(L=IaJQCk zuRi*f)n_d)s()G8P51|_j`zZ(o>yh2NzS_9Le+^TY~H>adi&TaNzw)%mi@MV@4tv6 zF+lc@v%iYEx#aYE=WKa*nL#HGhJv|j=~w#HhnEuLk30E$+Q4bu=nRp1+$7%s(z79F z#DD?r*(;Oo_+G~9`t=ManRTVI-{r6FBnrs>3X}IIhpJI*S;|spPU|1N!^XNii}O&Z ziRZ07;r72A8Y}#2l1{0eAV6d`ugmG59K~2xxMziQL;NEl-&d4Rb=|Y_yjmacs0<9p zTR2Cv9vM(zoHY;!Op~CY0V8MP#kVApyg%6zGeYJ{Jswagfoz*8!UuPWxrdbX(@PQ? z!eiltapFt46A|PRXAp;E1O5H4l5Cys#P|y*|1n}@0cbJZt~ULzb^NWdGTqT{DqyL9laKIb5H`7DmNB=y0_!~{34o`DIi^`1w;lE7fU-L<;1Le)z{;>hzW&am7{xZ$~RgM3D)lla3 z{aPx>tQX-@Un5{}yK^teS?MB};oD;!{Lhy+TD6`;AKvZf**8cCB!2 zs2*j!ojiH9@?OSW(S>MOW%reo|eFOELkro%aqE zQ!_vI(WP8^6NaDMx9v2>v~tk6wmZnDt7enGVfGDh7)mV)W)K~|cgY>GKdG8JYWz(5 z+}`BE7-<0udq>B?l75RQJ_( z5Z-c5>h42c9~jk5A>W67^O!Wq>eEyk0bZ$wcFM|`bcFF!I^eq|x^Vlxq|UAkA5y^Z zO{VCTM%lwZKcdLD7k&Ay6YT%>TYuL`aTjxQjBwe)xGt`X;Gy4K24kdQD-}|RzqOQ_ zhA72pF*S_tp$_Rt=<75V9VHvlM7@<5`kzewmBE`AJbC6f@9vBTm2VcVge{|S(u1|; zXih`W65x^4B3}sl&X~m7{~Dbhk6l?a63q2;iAA(^w?AF9g>_MXluGxOJ-W(A)O!6_ z2QK}MkDYNvdhbai~kEA1Y4>)|{89^B;C&NxSM0cE;Huwh<(1pfTPDOavUJ!#E! zcIhLXZiMdjmZW75BT~hD79<_Eo3W<51U#8}SHCF__i*YhbJKD=F4xmtdHSGzMw@qd zgomzhKlR#b(-OQF5M!~mk=>=|KSPC-nhh#TD29L9w92z#=EL%$5gK3FJ*i8L?QUDu zir?bu6yM=cJWauTq#F18&T$AJawlb`x+>wjWp<#-?$g~WNEG= zD^bB7=0~+P@DO38GIQ9@G8>ADO@0!_(%^BcJZX3bZzm%d7e4oRxNmIyNHVOvAU0yo zWB5s=nWV*kSP-Z$#Ga$64-)y$4FR03~O~|uUcS|grCQIcYlg7CANzRAp(wm4`g07if ziu0h?7-P|c4dNdH%kjUWDqzTF|hk9^7fbd`vytBz^|Yno=U-! ziM!WJX}!^^^`uYjuM*D4H16Q6rJ1EJJ)1jJ(48$%@N{sjv_OEy;EKeN=8ZFTyS%8( z7OcF2+dh8|K1GDdKP3{dS((7&VJ@=6?#*9DSgSs5bI~Ag}ys{#~^6+zr zr>^iN+&M(o^ytIMAc$WW@MI)L(+I9!`OW2LcWb+bQZ12D-~ED}5o>a=?Nr<~!JXrP zVUm7iGAd1?091&sH-77>KBhwq7^*RCcP~G){b|&gYsl=&h_lOrsU$V$b_t!d`Ml;l z;ES&`D;TMNi)o;NZ+3Pig7S%TMiBUTGDuV0i_mH%8ffmfa0a`+ozu@!LOgYV(>z0q(+sQy znY306Zx0_7vnIdw4;p-X1Ez#nqlcBt@=mIdoFrI;!XXC10x{q&lvY2K4>fCib*rF8 z7?=q@`EmDgzB6e#^PGITYq!e$basVL!49#;3=5Zcd16ZRH4^ z%!$(>QI(0rhLmTDN)AJGro1{|JaXNX7aJ4Ni{Nz+novjrTftZrWiNenj#JMh#@W4w znPu%OjufZ7OO@I)Ow#^^LxxlL`UDQ_Sk2Tpjq9PUNwifuqDw&75q7335kOU3&7JR~ zo^Eg>-9#`+vXO`hI5N@WG3G_wyNDL?j*E4^K0L&{?ksIP5pL+Kggs~_-3odRI;T*H z4(d8z=_jnFOo@y%T2ImhM(9uZivh{;*(3Fg@jG&MPiKSl)Nx=)(_o($HsNcWkA1$s z?GHAW8Nw1PQpbW}$~LCjgj)x=Qut=6b0R+1Hq+MmosBs?tN+)SaF>^MjmjfastmD7 za2$TXQx4ev9G)QTz3fbpG|zK03#s5XQ}9b}a;A+Nz7NH(esHW^yDIc?$~l9vn8xg) zp<+l7a)J`Rl_`>n>%|AvUV$#`3Si=(S-|gY-g~Qxw13Ow2**S?#`qlX`OQ5?MY$Iz zSIDzbY#y58!hEHrPq3bgTDc|UGKssCQ9j4|5N~}kixRPB2~#|vp2#INN!1t;I~`OC z40-{r$7A&wjgy8PkfsAbqXQfLw4CA3>hKJ`>#UGol+=fBhG<-xl4Zt$Gx;99z_y*= zXbQ3jj;msz$ib&Y#W8OA4Jh|vzz?H*j`e+2eU6+H`Pd-zSfd82Kzs8wR}+n2>^a9b zz;<|58*SvwY9mV{dfIAers6!U-eCDkGn;Z;!>?Cf?|km#T4gPqNr}LL!}fbQ*RB!; z=(k>!f$&cV$iIT$0nxGCC_@fRLkV}ju9G;iMbg+UZg|fB4w#S&^N8Pn(7fjy7o%?X z7G0JzJx{6DFWhpYn!CI*6aQoaZI;$H1vnl|C5(efczT(4P#|@qmV<`DoZsxFrl*TNP4_ z&2G=9UjPfF!}PSFs5LEB!hZx|Bk&Bn^!e0s71NB( zo-a~|!;ljiuzf=5Ny~y3{8eWzqGZ4bVWq~l4jME4WQ#XZ2iLB3T6@y%3T`XmOgwlj<^gWwc4kOcthgph^fZV^e1H6s{g&jCW!%3>I!Kj}>x z*>_^S1{~=o?)6BJ<8=+EuPA4ZZ>||Z#oA!Pd~Zn8$6^_yeXB`$?V)8pO@kxj7tvT_ zv!Y00EO1W>+oyEh{Sb)bhf3yDFeHN?h67dofc>?; zvs;kxJanbObsu@d$>~k*SbJS!qmUtFYWr{*`L- zloK~eK-ZV)1fBPB(Q5<(K7p439W`)7YzsG?w%@5=_{advmgGsDbW83H3T3sI?lCX& zBEdQ_e1{m29tqg{rONcOn0~0GJQ+hQ_#r?EW}2z^q%_+)B{qj=L$m0;CK9-`vZ%<{$UG0|f$XhSEpWl(*d1`pv zz#Cw|*fI5a=PZ49zlKQC;7`Tn=AMHv#1&_LIv%LvO|4bjc7sOhFML9JKo^Es(}{is z6y9Fil;t|L5uY7^Ti&0ClzE9}*09G++$&+gk}iQYJVVAO>0c?r1UMe+W6ayyA>0ZN zgS&jBe%wh&0p{JtMTS>-O1^;v3v}2MbTPRq^ka=5At7SnjXU)mYJ)Rxg2tK(5%(N| zf4o#)VXlr1REq#Jz{5Ex_80m|t$?^piUp1x7{Lzo&re7{_59$0*C|ydJn`7hc@vdJ zQI(>As+$;oA}=^?oCFb_v*do_;1e`a>5Y$1E8_nbDu+5p56}% z3oV>D=+rwp^Vo1{AJSea3uOGTF{PrP2%KxU_7u8kK=ljd_0uy#nKy5E4i?Ucfy1l;*}@jZN|H%qReU_=(8n zjcfDtiVN4(w>v^?tiWkYBEkN#d7a0~^PA(vmAI`o$x3i%0V`&hG?986@$40u-?A&qwli?8FWj{jr8IxC!s zOBO*jNUg^R#{_nmlns02Phbe1W~@ ztfpSa-|iyLLzw8LOP>cH%fV;D0@uky9=vhq!>2=+>t{fpqG0>h1cKy`QyK06se{Wc z91QnKlP5l6SwH9lUiJ=+|)GY3Vh^rNj3#4F+qiS zq{|7QW%}uD@IAOv88DiXoxX%FvBaya8D@wgS6 zslAqI+vdsP(je3Zeo5#TV7HSW6n8Ab6?IM6sG6G%Pk8`FLqddG_geBj?~y^G96mKA zHZudsCo>_XvXBr)IPW#mfJrd{Sr;(s@?xGhL`u1Ill4vCTaCKm|tvQjR2O2xPp6$D<5y zAPF$_H;jNk8XyWf8(Ig)p3R@i4CY^tZh3R-*QR>U`8^4hOLeD#8#R2!S{vEQ8ys9k z9fG3@xPPAnm?LReHcU45JoYUh^jc|*o=ZH^3UOycaTEKPb{Bm%Cxg4heIup6_Tnr>h%3v*5m7WB=S}j06kju$kyH*t_BH|j3^cAB7Z)CTS)0>G1bS0}buH{21;g zSYxSxbr0gS^(6e(i$PIY4)76lIlhEUQJtdmT$jf{*DrrGn5I}7cq^K~tFk2@KbSW& zOCuX`=CL*#s<{bxVd!8EeEnH5^6Yk_kVFRpXk-MhjI>ub&@fB_mNO!aAvtCMoPGZ* zFdQ&T91JJ|cOl|re258RGimAL=YhI%yh1)(CCU^f02t8$R1k@{Z9SK8&;>5?r^8}k zLcYzKfFZu7A+q-zko^uYgkXT)UjtKRkVTMEOP!QNVl|D33+aK-C7u;2WevRE-1*=A zdrp}p%e!;!UQiqCm zyrTf3^MQ*j;i*j`<7>Jva6xzrRs(EZWE!_=q%;H%QV+LSz(TipkH-obLSPj!;4UU$ zI*T)2h~6mBImpU+1cw6R3$QU9^sPp}Q^WG{(S;W0#t`rEC@jMT^b-R2U~y;?S?G=$ zAjmL=g*rjg2nHS$tBT(M5Q%C989ci0xbM;_FoPvS8vDY})c$c*BM9=5+@~C(qg=c& z2=|pn@|x~Ri_%?U2znmL2@BK#xJ*t$1{p;>Ad={ug6o;QxFJ6E0;NJ7r85Af;v|04 z>yTebVjhoA)&|*?j{zVjsTCV=ngil=rK0KZ3}czy55B&ObZ8jX)w6MmArF7|9mLJ+0Sg`az51L%SY4a0F<=Zxs;jQSweh)rb0W+^i%59yRKXL+!TP`U9 zzo~!o6lp!+cbjUpj>Nx|%6P3Bank+bY^4l}Cy!t`Ujc)8$pR?Ydj_2SSy236&K`1M zm$blGCVKNwu2Kq?(A+#y%5f?*N65hyo&#v04+X1@0^r_gI_QrdfFP0&DDYljARVN@ zkjUXKJq2uA=b@ymmai%S!?#cmj+fgRST|^Ud|;q}+U(O|v$kPz3MYVsnkGW}Db!7-Pg;OyM>EMUSU|ttepf@zE z5L#C%fw4UfdjZx!p2-FBOtHYdRiyWl0>QdM3KLxAdYi%>%eN6i`;kLnnXR}GCsqK6 zt~EVuSqa1nr+p{z?j9tXOCt2hg1f(8u1s`}+{j3S9K|nPmql_Ryh!)6CL9zkT4tpIa2}%1*HfjZ|Tfluv!<$A9*}oNsM4VOS+2JAHPIkuw!Dt$T?ByC9N`BQ&c`qXloE_Yvt?B*=7(vwQGWb2?w|vh+3V@N1HBPa$hcq%xt9C~^)U3Oeuqjzf z8Y&4uu!_DS%PZ7_7YitcRB1(2tPF&oz&EXNLQ0#%DIdcbnac8ueOkXAk`u5o5I*I& z=^{Eu;m;wbhY13QsyzUwe;^J-(N&8}y=M~o&B+o3a5km*1gEec;6FyLt!xua%lkf$8q*Mm-BXf`+{q2K$j4&ji*aAY=>yQ$$=|2TH z?mXKFvSs9L;0mvJnc<*|pVL!9Y?A{`y42W#sPiCEp8`TbsoWn&2cnT~w{}?&ZS-*S zLKp?0bi?KO4o=euzhNJUW2GnlJW@blOqg-2>sf-RiZ1Q8n>wV$3}AE#I0>YA;Hp#w z$|%Q%52D~vW372ek?`R&b7ZSPOq9qhFsHB2Wq?ppcEX`Iu;U2W&Rb!N74}e6m+{=- z10WVlwFDTP4DnLmya?nnkQ^u=IplH5%kb`e)0Arf9A5!3u~scT4tk|m-T-QY1nc)A z2VVXEDo8NE5=_nk43NqIi7*0?-b@aKe{d)vL(l!+mC2F;4^qa9r^-m+o5E_ZvAXUd zZ;kkqo-{H;thVs~WAG!4yw&@%eVDS+xM$7~!0%Ns5kVzqyvwne;@9w5+s4^baFqlf zQ%HJ03fohP_B21stPg+*4$mR4+2+_h*?SP$5xV8u07P|4G0~OuIeYy88;Ei}QWd=!trk*O#r2MKro@@Qy!vKxBG)HAhO6=#FFWU%L`VERY6C@}r-fA( z+RH=LWl9C5j&qh&l8aCKbsg26ZzQuZNVpw_+K+gdSDX;tp(wwSDCI?^>TgY(t$I-K zeOWHQGfqMvZ0XK(zk%jOL(8c!pq!U+&|`Y~^Q3SXlnQNba^vg$SAOH@=W|#)*@cW3 z!z9W+vpBlb30OUZfEa&^elUcx{~`0KetqpKuc}9 zMS~~a;jo;AStSZ-Jd%`^ROx)cCvaq*%gEon-}~W`%?GiJ+mF=Jl(bqkPS_dl<*_f5 zF_~gc98_@27V5THwc_ajxWw9w!s;J*WxT#P`%=uJ^eY9(Roc>_+@BV9z(~~JJL@lC z^yI9(^Y%*fvp^qCQyJdYcaYhzK$cZ_u4yExtWW67?QlU>0TQnASf!hLuNr##KU(I< z%Y5xmm=ErL{jo{|J(e}6Z9So=5yNH%OKb_)HoWvQq+~a;4rWbI7~_BbgC z39oIx@ZvZ&yT=O8MFj@ln@PIY*c<;MQz8Pc3`BxN+^i45LowAxW!_dw&hoebXeAY0 zxB`IxE2Qy**7!z0D49j?Nmk>5=i5x(FF+{(G8*5Z(_ju*k=t8O$pf7H-MJFpnf2U3 zFCC*~YD+8odg-h=dP*Hej{tRY3Nf*I<~q}3RPdDcHESx-77H&EZ)LSq1uadMtE|?1 zA3|o8RT)qNEx8PxW9~1k@}$%f^o66ro7(zDgL`Zan&M5aE(T(0jeo*o%OV>e`G&FO zlKsOe!}ABWnKqjT`+H>(XG!{_$C2G8RfSjRf4qf%CHkBR1Oqt*P`L3X&%g#$Ly?Bd zBJ`)$6vX@g?c1U7OHY>9wKl!GRF&Sqt4h_P^1h7@-i-&tq&1V22J$oy_~{lByDcVX zh=*%8ex|UNnfH(DwO1qJsleCKWGlC0F-Z)0Y)xj?%7^&{5y_jBI|;WB9R(cERnGN`r!cRApI zL#dk;a5_RN?dBjU6frIWDFRfn{3~{w1FpiF6NH5Z>dkZXS%FDBcorVUAKeR=r6luA zXHz&upU-x_2-%q*b@G_+o1VL6l7Qb7B2_4x&_Cp*G4U`*^Bnh`aqFEJ3I9biyDg_K z+PWcio|A>p=B=Fu-7i}0@!VI!mWgldm5PSz^iV%KvK68gePmF^lg!F>bnGSK6dw8y zoArwtAUHmbwd`gvy0V_>5jiuk4`-^_X9!`d;0Lp3bF;86!QWAu}-By?&e|zmVwc&a1 z#Iy5LnrbdO-PeIk@cxMX47Vt1vOrVUCr~J1y<)p*!Rqs89b$#k!oN!sNtV%*ZN3zVp>9P6Cw|;3-wd`=T9>uz&X~b7bhPDT{us!)(c0FOImEA zM4i?pnN`|9e3-eZ`b)3!W&h1c!5bs>(;b9&L^_rk?JgzQwqG7Hd}QX5o&9jB{)9>| zL6ceK4TXsKm?ij4VS>?jDB|Lcj(gFPmL$Os%uJ!GD{x-7Zzr*iYbTt%Plu__D9~nnWldf+Nn;_3NKd^krm6K z(OQ{$&(r)y`q6zW`e7|tlX&^FY4<<_@UW@oug>ldf~$t&Z%Seq zDD!Ggb_>}~$_;Gsc<7kAXFE$NPx^Rr)doTZ<_LremcUz`>$IfOMn3XNJvtKEN3CCP zuEu=uT(h(&-sJJ(g&*>+^y2(wvw@I@8RWQ|RkAmrLBT3pm|VZ_L$OkMQ!HXM6S($g z$9q;epiNdbXRO<@raqg~2VMD;nN!cQcWhUahS^9rAcSQea z$NI3~Pv!YQFH<-YvPr1}%5CGEy;TQtLMVHl)MiOM>J}?UU+&9pEeVPa6=6fR@W4)P zO570h5_>nIZLDQ3%m_&NPZo9>UT!OyD2BGs*A@HE3d&6w%tfE}~VO zl)S4E;lW;bm`TOwImqtBv*gOU^!gktCoDatsRLGzJD1xzJi_Kwh~8mc&Ure(mmwXx zR6nnmOciivKfZVY7N&3dJv17>M}J5$;e|$oJpcC^1F|2VYfY!)8v~Zdb<^HkSiZ%( zaN`zfe48!VIim|Gtp-_M9ZTw07g8U$S)~mteX|PYYi8pNoGU`R)IK|oj7`1PRRp8N=r0Yye^Olio4@RHq$6tAHx-3PVC zPk-q2T#H|zJLTII2_hv1k~*ZJha+ntIn`ff=eeASR+ zIi~EiqB(FL8p$eqIL!QdQSiNs*0>_pOo!g=YH{3!N9XOyX19&iaTuu+R;G8JQX-#} z_Uh9IeJ_$P<^W4)WD4O}Wa<48`u%FSAe})LL5=CR%&jiF=5vIYpX{JQ7-^(&;%=1b z@zOf}9Z?(XlyQ`5)xnP=IW{F_mK#Z2Sas$zmZo(6!?9}gb?Iq~Jj6$3Nw-7%HWCj! zwXQicH+wS+Iu<#;&Xu+)osDZQ^Z0~o`DU-VR3j^6*GRB@4m?V_PulwHtd4{4#xbh20U zLhVyym&0FW3@}EEQ}#n)!{=OYr{V*y^3OAU{H|XvPswh1bm3_(vWvveA4f|jJqmbC z6R!D_7A2;;KQ78p;{Ck}9^#p*uw}SVZfn*&a*9%@2Q%Rg+d==vceB#yHMyD=Ho;j_v=0JnPe}-|R}> zhWNI}rrEH3RW5be#(RyVz$Fcn+qD{utiK#?D{qCIx;yN0)8qN?jMY0^l9sm;J|DHr zv`Rdw@q#al*C|{wtM3e4IQTTazi!h-w7fkn7QGe&j~2DPw;6Ugq)ET>jEI~eEtGX6 z4!()+p|=}f&o^oDg_v0_E}$gXN>jYrCE)HWLrZ?G4@~Ioc#Q+co!zx>q=`dcN*Z9R zxy#Xs(N`MQy{@eohg3B5)TbJ5b{Wsk`c@RqK065G@yw(5jJ$AYxp(KP$$Qg#hdP16 zHSg!=PWqY+BCaqWPze^8?0#f{gNny>>OoM}0RU8_0OAcs5VC&T;N^DUH)l<$f2gL- zO@BM=YW59TrFd7A_2-&e&CkgFb*@E6{w^$efg;HbI+f>SXJ=lI|Z^wtGEJmB|5~8)OmH8Z>C2(1S0qbp z3pIS=XH_W0!^v7y2-~%KtDFku)D7@_OC{aj>S??T1RbD@Lzpp1suiahK0Hn&F^~1R+bISHln|EI5 zE{}cMoS!RuQOE0cct~vL5gu+Va#L6DVBV(V#Kph=ZpKCxyZWf?BR1E4!^a03b2p$& zd=My)ze;z1_3HZumG4xxlhq<`q{Hq<7drDYE6;dRF4epsvc0kRb?UKbSDwm(vv7k+ z=zOCLy5E=j+G<_bbDi#zgg1L{*e~2kPbAel>Y@`Z8QD0v|KhyJ+hgfv-GYc!iPzRd zCh9*qKVuaw=B)I_t_NSyYATC+Gog=r>Cf3=y}MQh~{;; z|Hwkae#>i9_Dgx9V4lkO`1ZiIKgFt(y#>e0?!a8%&G-{Fw*nV~EGB$3JxK7nIIM)O zma;FdO><9*bhj+W$mM>o`xbVf_V)9fK}(0+_Xwdy@t)9!IjyV zVlP@tEakb@FUvZxrgiXjekD|zbyq?iUrk?pWAuSjYyWQ9T}SffpqUZZl@DL-6);HBQ!uMi#p59tFoDJv&2i}lJ?1M%g${3Q$H@;yB$L(sIZy4Q`i?tE;qkFs$ApwCPoMUTV`sE zhkCy}A5YR(QEw?wPLgV_XNphz?iEPkeHgLV#HYfo7*y7&#!NMvLAFr0Q|dUqB#}7A z_emz>AXB%_T>@;OkscfW(g|J+su8hp z3fw6^Ozm#{q_7-#RDN~!s5P3;gv#>Rn3rd*lnxn@Kb`|HjGN{K^e zl{y*ABX?s^$1`Myb~%IVsREQc-v?!(eBqUJ)@|OYgb4I22)h}J*KyxhTm_Y zoul@=ec)J!Gt59W)VAixG@yUe=_orxN00tpu=}#J{=(wp6%=cG>Xaf(V#nRYV}hNu znN;JKxy+QkfoFxB+lP*+3SQneg2YQQc8~QxhH_qdIWQ#?L(UpAkszaa1VuIIb; zp}pF?Jx%KNPw$#K{8AfMUXOMr1+vopu);dH*O;d(7GW^{tJS;Z$(rPZQTG)$Nl-uq zis0Wu-~WA)HAW$d&CEP;6jdt5O?G%kLvS>*NT5J z-H2{HrsV6p&su+BV3=cURGeZ}sUe7_s2ARym68OnK2Xw6n7Yc)XbORe3_+wgr9t2WTOEE%t{?8#1LR&%J7Vs0umrGPzk3vctIzA&!g#0tLXQwtbxc9&MPKo^ zberv;I~qQ5ODPNtChK&gulkBnl2)Uln$y-)MWh^PH&x!7ryqp08as&J&&BD`6jr;l ze{|&hoV+miut~Hp(%(d5C=}a1|w*z3MrD+P9s=?F+X~yxk>pZXAbmQsG=<2rDSL@swIymeLItsg*|28JLwpbvg#&H9@^5NlB zJ8@r$!PnHSdz0={BW$J9hur)2UAmd$C9&P9JA6J=v} zPd7VEY!vmHD?(}9$z6rC#)GqZESa(U+n`mdp;Kvu-wiU3oiT=1$ul+2K}-3Z~tUZyFgh2xxWtRsWi; z3?<3ur$FzJQ##CLUd>kaVv^^9TYJ-oNwWMnf4m^;ijt9oLE9hKe(a+<2{%jDIQG7) z(9_wSd<(=TYx+Rw)bm(mUG=)jL9z%%%X(1xh6L(z!|72DE;O?&^x&Cw*dGJ;_rpYU zsF0~h4GQ2Y_Fc!h*KB?=GzX1vJUJiI^&ncXXlRr zTry7!_Z?-F!smwlJ&I~nE6K|dwr|xxq@*^6meab$Y%#6U|}Az90L*Co!VMa*(zD#X{VD z|Nf3a`g`x%DY~SmtAp=&5~uThc-8^Zj)REwU!P{;)uvH5Hyn(q3-l?)J3kNR*7rs` zneA~<)QEdJFUGx~YSt59*tgbI#d)!T;uv6kzdu}ePUMrYH|Cm4uE`a-=Ub15g|+s& zM_d(@qjsmBIXpa3iO5T54yMoyik68M(V=i}6nr@9VV}dBf2WA$5$@v6g(#6RGIc9< zQPWFwg-_&P65q~ner&uOr|59;E%8uQkz{p?aC@w%-pU7uO#o_Y`2f^Pta<5E5+YeO za#nk#v?BW662`{eXD9b$yYPM_C99UfmYw3~tHs0fD-Y+2@{M^N4pp)6+LL1KjcV0g zFVpwpZ+l#!v>;_xZEGJ;N6W^ZRL~q9!1Af84F;qd#=E`0@y})y*v|X2y0|~ET5smc zD0Z8Fnfc54xrU(>DoHRxEYDFnNrIgDt_co}p_`)HGRJ_gEn)A5))fM9_y^nTv&IP!z!53~24J+s!eu654!y>Xp`^RPEZRc747Tz+;h`yIxT z+(mTcj^$!$ZR)Sj8T&g0nW#o>8Xmzl-&S)a6>f{QJ~`@1r9hR@_S%u}cIjFTGBJPg zaOFr$s!ksjx%eY*pe+>DPYg2xfHmT34Gx~Sg<{ep-ac$_^oljxBhENxkX5M`U)=c% zas@^tFgXQi_`|{NBu{Pc5GjOmmnPAyP?yNa#u(Bn@^k66ygxmNi(0kCV zeGC8Aty) z%4U-t&RV}VOit&WFLAwg==kuqib&MFk{H+aX70o08r})3z3+r0V0br+@g8U0&<>#7 zL7vQfHocSgtw*2-M5%THjE5Jw` zZ;KmmwqjvEE0|7GsK#g-!1vsbWYtTws6nrKuia*v9{03)d?+p^37Eut8IQ^kxp=F( zP<~9fyI8$6`14x!O$Tjx`voYtb8oi!E=)SGF-!?ODsh++&Id;!(f>0iQ8EWr8t4TE zWC&7pdMFFXqPMw7+o1*U?hPysb2w05)2p_xAWMBB zet_BwYcw^>=3^Htt}FEwqts`of|!x=bKPv;9)RJK#5wI4z01Ym({yNRH`AZ}J-ic9 zw5ct{Ki|R7Zr-d>85LMd@=*?)EchZCoHBPK)k%!5zjb@I2X?79W?H3|PycDR>TTo^ zjY@Bt_o<>>;Iqk}<;nBtB@=N2fdl)^(>ca=$**8QF=b6SCUz(9Sn1SC zk&wdIvBEuAe?6enrBOg$7cU5IbSaR|c^NIy#(!$|9s`k}bu1m)x$OKfMSJJ7R^XhF z=QFd`$ouwuzUj;3XgUdjBJ`S|SF*!iaqDsnXO;SuvvG~*8C^$^)OerYF-I-a87gMp z!UNOT!pTqRSRJ2W4wBC?M3aF_ZLEcK+epYYrpGQfp{Pj~H|99Xg48He6JGI0Ye^Z$ z>70Ml!kElHi5*K?bw039;i8dF-i)=R%6|b~M~lghP5Ki?(+Z&UsT7(%XV<%FLxtT! zR@VUa*Chm|%n1|6$tHJm9q;m9%n&^adRfU*G7>k5T@2lD-?qHiHah5%TO4a!Ut5N(i6dnm}gu_7V7Wp2P4uR%O&#LjF=k(V+e<-nOxup##K)P&-SSouab zbY0>8E+T=ut)fE=#XQ^4He9)oL)tbad)fch#aJoAEpv?KcQaw#w>$NjG(3TIKQfC( zvxSJEqt2CR|D&|(#4dL)-$#X+QIFzCTl2=L45>z~}X4pe@H$n5l2*4MTZ<1hH!1QLw9Ej!ZLs zm@{Wy^wLVc3(8Z~Mf<4Q_@IPwK~_t)zj9r^qewL69p4Hpd>6NFW8P|T6mTb&Qq+*w zbEHXPARk;MK5h(Y&S2MLD*ago*ffC~O=}nwEl3uSR-?)SH%K;|TZbD>=>~U*(-mvo z9+*bOoP#HdT;Kc}A4+@-4t>X;LZKNwnO%^Dg!15LcAgdoZtgUDa%5?>dte*9hKdc1 zPAuBP!hX2wjDK%(ZzIsp-#P%6c(sbg^Eo>icC!OG27Z9eEvAzFQ=OzpW^~mAY9KFG z%-8o~YpQVBvE^i6mzwAGFlX>{WK_p>4~QL`J+MpSjG z1(;ZJS0rMt_O3SF;69sLET8D1uz!@Y9Zcr0Z#UC$9qOEypq|yo+?MaC_yVNoeVnP> zB_lDt7$ZW0oLM+I(EpRBg%Ojxc3?JK2@#0EUpY9|?@Jv7HuBhWHX8tgS2x=9qLD!s zLeQ|;qI_x0Nt9#c3vz9Pg7nV0ax&Lq46L~{7`Qv2d>)j*b=)c0c%tR=CxNiYIBUvm zDBe$W!MwR$f5aY-ZR`2ncKrtw);!$)ZJ)!yFYB<=gH(1Kxkl6z0sDsO=@K`l&sWXu zKR;t>WE9fUkI^J?)&dI|ehtA&AB?6U7RK3RoT&!6Ym#3>)E+ntbj=sbM>ZSbgD zXe*VwyKyPy67CpjMzWi3dL=hdqFqf^s!GRhs(_NCYf4Ng1+RUFP=J+S_nZw`cfpeP z@W@?{V;z+F+MU;zDQi+T8UysrZ?B3W#xyzvcUk_+VuL*8lp*+ff44@aaVmWr)^dKpBbis(6?hdW=h6zU{cueNdvAxf$A{Vt- zbK8B|Y=g&o%_g-Gl*rS#6FM51$vSa|iFHna$aHIWcj6Pl=aA}Ae^vAPg9N6&VA8pL z+`-hvnw?iCaUhd1p}G)(mslzZfCO6ha8kSYhj{^ioa3}IHSd{g9lF@&Uoj~v4G+_j zx3+iU>oZJ*iQiU_{11GrU_R;~px){HW@p{KL`4nokgbe;>8=}-k4(LWkCHxhDDgqj%g)^OehR_~FL;4)xur~pYa+J#&el>^_|26k29p@E;{(A04F;BT zR_(Yqnmr(=ILf3MPicGz7l%KVGrB}^MY5u(Dd=`vDIMm= z(u;trpGG=u(j=u&!w?JeWW&zQ)%IJZ8|cL!c9~Q{UhhUhGO_M+p-44lT_j)d?6-utZ%Z}|8Qc6pq7tdrK$ ziH>LyS+j?ttQaR|<)`>&u^U&;pW-b53f*NIcw^cxB9M;(j>_{X4vV^{nQhCrq(WZM zA^ocDyi-hwU|fVrCMS$Udn1tA^pR{R^HXyIbz{Zd4ool-*qUWX7~#{S{{auIleBa$ zuhyC79*=B|T>LFcE#5!SkGBtyfmsXf-S#ix!)l1R4@NW~BF4wPjW)j zxgBXav6#b`wb5}^Gx&unNDTCHUh+;t8~=r!RWkBET9+eh-ZM6#sF}eJ8YI-oJD?=) zbGt!XpRy=31yiEx!u8z~VMJ+^bQ}iWeekWr%3Q-f*UWoZC$+d$RctCA5h!|GWY?K5 zbG@etzjWdqbB=+_LkuemEr|=QMHhxma^VKd5}>>hjTtxZlp@$`$k~YM)X&7}Y8(D~ z8RzNFSL13Ofb}%@b9CCMNx$xJ%UhvNS~EcTpt4+c;PL32qAUhNVd_dENqw=yPujI=7ImZQ z;z%yrt=!`_!L$l-k7ijClDzJddRkYUqai6?6S;*3o4j84u%Qu-8|O#;BF0TLcGk6X zQ90ECg@`V&csW4Px??pj`5c5q_fx}0e*i^CzF6VUaNe?NX!tVdk?*tqi}`~j`=Eu> z@N;s>mQ}as;YXw3E8T5&)wp$R>=PHJ#4#U`e=k;AblFrn--mVt%(?G`HWdysLlnn1 zu2%IXLSpmfC`T{xftw1)_4Z~IbBtIYrjApn<0~g$6=gHou&YxGzQb%pcZD#G?A9ry zyJxB=O`0jvj(YRv`9csf@Ag7Z`;)uy`^l3n6r#W^D#8-o{@Qzg@a3)PrR8GVNlSt| zH!SLdVUAcS4r=Z%QG6O)EdTZlGn*D+S$8&jNr<6dvxlUWGA;l1JDK&Pp# z3h$6g$zg$|de6HSA8r*6_Vf6}z)2?{{ls@|)*K^ovBVg#uKuW4kenr~6+KVN{vni$ z_)&*r0_1ie793$EnCMi{conc|s3WLz(|qcNL%h$%%MK}EUU>8Cw&}Q$bSE?F?s*Qgx#PpXi=+A!f{O^xyb%3UIQ$L*+Q}-E7a~D_x#@S)S*tMKU_dUO z(*AzUq1x?nGVE6l=9rJl)TZ*Qv$Mu|@&P|KEu>nV{% z?wxoQoY?{-btB)1$;!Pxi>0bW+^>?CZ4oFH@|{(3_+M=tc_lJB#B}>1X&FvC@ztET zp;qkc5&^73%gqDXx*^ZlDULfgL zeYc%=LvmHo#SDv!gQrP6n$cf7Y8atr&r4^g_|)<%8k}zad^WdSfi2>SOELquxH@9z zgq#tQ&EW9*lf_;b(cO!v(juCBbI6kWM#wt{HWCd?AMw^#C$z4qi-yBrA;c`Qpnr=P zASjY~`%Tq-1UVBpgzpwqrIjz(uDPw}T|L7KD?H}O^G7#p{v+l3Rt2nU*rkEv)2e4MmPm$-$x(ssdRICo(qy&HA||L95X$*kAs(It&t zar<}=J>H$w?F=)QkFb>MSxhEoB{NxYgNBcjh`A#nq`u0i zu)S&mqHLR1y>Y!_`H@LY!rZPQWcwwlvvWx{I7z#synHz?8BjlAIPC+F};B$YsA>KD|YF z)bX(vWLJYZ)Ew-JP4xgVVa8~30A21f@-^);WGuKzI7{axA+xV8lC|yWJm_X9ly_3< zGg%PSVllP38(f#JH?3^D!v@7tCkM|rVhvz$`H_pHx%44KDG&2}CP-Dg3UM!IZ)pN# zxJL6dcwKd`C*1fNH%ljjI-7vTF6u>#edfk+_P2gOuS~{eL~jHFfx(QZ_u1H8k|SL7 zk9wzg_(<1mh;9vpUEF>Y!tcB`{F z-KX_dR+%|iNf~9^W3BsZ2zH)4os=zsypBS-W*SZPOi^8$BrRs2mv^_snOkQkEntN` ze^eYxM_z4lw|%Hp?N}r-tb^`$6aRSoeqi#sORTMM3DxL^9%2QHYJWJo#i)Z`;LHSE zu+Aw;X5>cWf&)BbUPJrFNL5=c?ZXhkFl+qOt^~Y!DY~D9q zH5Vamahck{9}0wNWk4+2&SxIrM~IGs`-pvT*)?7A&?m*~U6Yf)-f{t}{XG`Ej_dWj zSor?ud=G5?AiJ?$A}CS$bURI;YsdoPK%qm3OC@w1~#LiEHt5*l(A`(xgnn~?yd%M%dP%sPd9qnLz+t9 zAN8-$cY03(VFjr--4d;0n&cM!Y~gdws|VUAmvF~+k&lVB=?JmU^TbKN5LPJEB@P#n z6> zDU7V2Dw{%dFJ_i|+NVz>1=V_%&7(8R^i+ycT(Qv{lz+2@3$7}dH|pqz1=gZU$ks+m z%(xtMdEd6LtnJe}K5seWPM`_3b5>lC%^(b}8&u?6ttHMB#~CL4ulDz;a-Amc9b~z3 zF10uQ%c9s99PegxpcL0?&j9A)VQetpO*?#TBhYX4r9nsHGBWPa8lGNCY@wVSR$2x6 z(Sff#oGuitgrlp0vTi?A$oRfBo1&i%kv?(tiA z5I_Tnes(nFV(WoD^={Ry#qqXsa@a=K-MkWGt^pxOy@EDnV(5}S6>$F9(78@LF%T*3 zw+EJQ(`4vt$h%pHTex`_p|zH!pWo0x#&-UDc4@DD=<@OwDuOClG=BIg)u{dZy?3p0 zd^B>W)ke-&&tac562ayw9NHWyA8YnN+fp0Gh;pI&7@tKJnfajYG8rvZ^8)bQPG}9z z_M%GKWJOPN>fYrXNYn(f0qrlOr~uT=WiAVEaz7MvDMx`9l7nihODsqX})DiehL8x3Zt}h}yo%pm>JJ+0C1Q_M*D|GJ&pcP0m@Ac`II_eQwS8 zgNSo=5Kd7IKrNhU98Rn{hfK{WW^iFl#E9lR!&p?jSt`@dXYM zQm_{|jdy_XwUIde`^3AS&~p@260^d~>wOv53+SVTO@%~FN$if};*#7uT>S&5ChT^p z`l9QB%q6o|(9W;vm*Jqi_`9QyU3HlleSUKdf1}sINm+TFoeT7(+He(rKZFt63=?+< z_iC*YIoSAu(?6wSqjuzf?WbTyQ5Ia9pZ~ew|L0D4%1{9~nq*9fF2--E|0RSREgr_j z(cZQPy%AkS{S7a_4)1L;TyZE$_{ol|;*8yMgr8I>F6Z6FwPdO6eX~aEv1p*s9a_`J zmFH96SC9{Dry9K^*=*Z@Ym=&6wy6h3rmwTkEIq7h2bjhjcx0l~sHUK&100>?6WWxi zahrXu`6E@Ra1K z4*6vUEK|jbzM-Wu)amx2tHb52S4-LSATWRhQKCyVTb|9bT#^pQEzq8;a`P=sEvoU{ zhapM`kfM3bSI!I0Nzs)8SQ+e)SwUG3-nEa;h{NRHJZ>1!*5Rp+ zxr# zPqUC-x!NhY+h!CMq=D^Ux+m z!)vyw5*2Q$3vQKB6EtW2Q>nfb@>#ykJF?mIUHmSi;eOC7DoV{WXZ+qz#gO+>x92oX_^vnqp-xQ%1j@x`PdZ|>9dXpdM7yiV<nFU=!RgM?d!fpSi(n-_FlP-sQ!%O5jF0-VKY)VD$-s$aL zV#?1HIE^3#xD!+It3uYd;NPiUIx*)OgLH_j-y7hdpVcYJ<>~6cD^G*V%jNlJ zaiL`W=lR72!g8SH6=`RHs7K>?2eNG|%KibfTP*HXQG=3`4wOPlU6`rlp6DUxsKJRv ze(S8&y;;@P75C=`eC@nclo2duDY>Z5%I%7S{SxHN!9S1>R08O#g`8p~?y1&T^|6gT z2G%7n2z>`%)ORQxXYCjOA`1~jm}!s4&MS31?BYau)^@jhu95u)5rijJ{X{2BG;K#! zF=2BA3e);h)tLbBb(}bEbWlE_^{ENwJ!#u1?T!D6izC&(+pVK$VKjw3Sb}fwQco;2 zcz=yZ-Kx6v8@3qYLbI$U+smRY`bhgY99Np3<1RKQ`Kb6eouz*8I(_BWjHWI-2NGTWs2 zbnA3iJ;Io|?)2Vy)y$?Wi&kg<+r)kMXZYT5{pKKVXHn4SRONk>lHICy=KvTV8WdBX zEWKBH-Czrq(-aLlIDCw-@k=~bnlPuY zO(6yV9ol^Zs2Ije!P`!Y{o5TK7udS(NCA*k!e3UoS$8`GWgAckH@spCgykvPYaU2k zqRhD&rE!tq&_jGLbEST1W>TksK9=W6r{euofJQV`}lE1~+t z;y43x8rX#fhAGzcD`W!#jY-#N;2BIIZ1MZh{bv9^l_Bl!c{HZQ38 zPqUy&+Hb)Yc)Uk{dO4U)i3jwY4Cd@KWvbn~v3Gz)3R*4IN?@kAev=P?z-K5LFU;cU z8Q9zT^3#ztjZObAy!PjPyq$#HfE%cCe34zOY&wF3Yyy1!R1ho;)>%S z#BIZes+|08o2EWamZL{w#F)dIwcx=XQi*3;3(sG5Ud7%foxVw?+o_}+(VKda_L%C{#J?0s`;Uxdrz&s!yVynnXN%*!AL?| zyhxvG)ErZ1pNg(pd|Kt4p|>_U2pX-p z|20AP{gcNx+BT?|irtU5amGHke`pJps4A;PjJ>sd2Q0I(I)b3N=H!=PmPrIjkw|{N z1=h_oKu<940{Sxo!R3(LU#VBjd}4$?zXH6Kha#euUv{kTY}3XD?)>c2hI>O|;6bw# zX^lzU(=^o+=F>SQC9%Jn4&5zgl0dZk`P-&x4?Z4C-YBEvy~>H8K@GkT?WjoPK5%=c zuXPFhU1;Na^#E4wh{%bacQFw8&w&TsQdFS*UJmM-lNbE6HmgYz_&a>%d}lvC5_mvZ zz}wEHx+NluR4&PoFYAM|7)Y&nHWgjAfR;e(eltACX=mUij6pwEJ%2QZDq#ce?=`3w zGR*NLuoAlzFXvOV{tECmSA8-b<``qd8Yu8ANqIFQI;+>rkzfdR%Ci?g5PU}7H1mH^ z91D(+B=U<6NrVRb?$sY%W-A25E7=fWXOJ@FxI{JHLO~Bc8a?FDIm_2T8C?d*k~00e z`g#m*OSWdUH{*1yTCc2KoeVixp1NJ36G(YB_V8&$^i^AkkBGT_M&3&VbMpjY_T9gA z1k>w~Qh>Vm}L*d{U#^s*;3v3z3l|wU5+k7*P!W&wORwl3H)iHL@fi ztJFvXZUDz1#n0p!OInC_(tTDt~)q{rKkQoeLsuQ>#x)B-S_8)M%jq%crDU|WU9g#P^Cy+v7%1>rzX$#G#T*=17pw1n(~gdQYD zX#$lAT%bd~PQikAHHUk@xjWJ?uOvTxgIe#ZR!cS)U%kyqJb+7aw9Zjg-%y~9O`JvZ z-BP(&>}hG=tq3ZQiUpru98NeZ_p;lwumavc&La?^j}J87-Y{F94Cg` zKzOUhXJ3DP?@G1b$CE>U1=xDO)_=|xPC9CU#6f^#R+}X@gx75kqq}rEbWuFp)IWSm zTo1A-Ti3MVGPH9`_fW${_Je>CP_6o(%?PLieGRO`3KV6V-m<-e)C#R!bUK0Z$$9Lo z``!y6!aNv$@AW^v0rx-6HvdVpz3bfo>-h<1V~h)xA-=7?n;!`{sM7S#S@tO8_`{L2 zJ*MIRm8d!%;U~_gwI^9jT}-gdqviiCTJv7#CMBnTSfF5Lek)b-Fd%%rmVa#KM(el!5oi3>X;h+9HEJf)gYXK%Xy$9G!U)P9#HJZ+(bz8 z-}VUG@&WEbn@9d777oSXpq%Iv0OH7B5FBBU(pbqKiissz_AvS^S{9UHHv8GG`|i48 zhQN@z?+ntQ%d-ktXCc;g(6%ilYF5&|IWp>+(yG`Z84JI)R|PaTv#iQ^i(aD@g&&V!a3jhiu^Y2%evbsMNiol^w|A5 zIRd4tp1Ay^VbWTJmNhjnl2r07@-QgJG=LGL%<|}~v)6TeFzq64I@o=y8CP_DhHB_u zBLo;=CfE3ImlTCKNnU?+zu!Re6+7A;$AYR#;S0<8th)FV@5rAl(!+Cx8j<=q4nMB9 zdcSZb!S=3R4-v>FQXv-%rt2=4a480s$A8kK(;0zRBFJ@X^0aUMQT_yS zb8L15ukH~boPX8NqWixI-3(h&9}zy5g~H$y7b*zTJipF+2amWDR5A0@_XXm9Zd-ot#V(Po7tZin}6JakqLJ7}$HdExkG%xa+$Pk5ArzOkb16 zYJ_+7T#Co@i|G%h^3m&cfVA+%j{NGem6wXq;(PI$33=I zfIEBupfS+KDAHL14KE$#?F93b&uPKxHf}C2V_oqQ{R0AgNNZ#mj zK}55Ps$b7l;2NU!Vt20__k#3pl3F?URXm-lld4g5h4m8M4y=dCkVmR5vh_YUOJeGH zfqM5-xJYKL+|P6D=O?h$dZ`PTJ7`a2+W6gIRuCIRULIx-DA-ZVRE`>#Q%km;-Vebz zc{{-Ry!~6I0A)y^NEkBmR@2)jBI?uU`%|Mgd(gkc;0^^v;E9lu`9IoTvPTjE5HrRj zc-ap=O+&L+X{h@X&s$(C;va0Tg=l;kS>ukjV?Xk0`d3i%Ep zLcRhsqu6sE!Be<{K{0{WJL5K@__B=%Q2TdUgtt-N&=2Fk3r%_=y?pl;VKHbwp{p?R zbt7$+xEwMscs^1glyPycEdo1>|Lu+vJ!ODsF>IB8r?is_5fI+obBJGRfu7={P@nnv zO;LxBHR2z@-uy}LpEE{)5honZ0OzaNp67?i1=5+4XJKw1d&PVeCmtnKTZIOxv;2KWElPLiIslUnSo*Un4?5B7u47eT`{XLts> zUhIvCcJ!M`zV3F`0X z^&J2O&12#xK7r^Ji#RppK1g$jn|kM;SfuMWliejJr;by!BSGdpZZ*|Er#AFZ-&`_ zvx)_q7)6rB3;JK@trx*t16b9E)$)iOM3XH?g(7bjNi?;+K7xQNdQH<_{2I+!v65!9 z0fg3LUiiC;1xp!|XhI0omVfMAm=+cQsHWKw1C8T0U{IGR?!D=yQmhmHrP% z7ar|*^Ae)9dY*RVvU!CyQeJ3WjqbavvA83lJd(ZfH^3gJhv*Vc-p+1Vb^XFnI4yUY zfgw#XS{7&6*-Tm*Dp;~RwRyHRBKCxQqn?XXHo!g6&xP$=VXFVH(g@~dBm2vYc$?C) zq!D0@he9(5VfQ}-R{9e#NctagAJPAG`VKL?_tst?hxysY&W+7aEU4fk+Reo<>(}!^ zwm!KRR+6~<)Vee$+@?adLUta~~H2Um|$!&2ADN4>pI1XLalIRt3f#mfI0gzACW+XU0=}W6?MQZ=MiL z5OdQ14-R`m<-qWBpLiOG&wu2|&9rZVpaTg4-jifAw^ojR*^}w9A_RT|`bz&7YAT)Y zF9pkf<3ClmrF=_x%rQ`6a2}2~)lm^~a9%vWl{V*FRIlzv?tpF9;e+?f#Szr%VLf$; zBN(wy=NcfOG51=3(=^=DSEF+C@)CyBn3Vhlizmj?lEl}@(9NH2;S>JgKVG6o(C^)S z6bY#XF~4FkUH}EQ@c${bPb4~g$Tem18t^)TfaPS-;`kk$&9yWu$&-r(XQ*n~M~+`% zIrp#208RNf&wAu2t|x{fH~w7N`vm&GOUt*9K0mpR>uqke1Sr|P*GlpuC+;NA@`{*g z?l(~fN2p|yzc)k`?-6p7ptvJH-B<3Uh5wa=HZTpiZiB+iXvv@GDZWqS_In#d()J&H z>j{Os%ue*g4trW*51v+7w=-!L()z7e@aE&TN5nhP0k;G5_Ey}HfkeIMo>N*>tFQ2 zve$n<2!8)`W0{zURY%MYQzW3UQJjV;{?Xj6(8sY=_3!46HWR9p9!SqIF*%xAt$+Ug z$b76&-9Zj+|B@s6V}l7ydwh(u{E6~aBRZMz_tWE*td(KA2TEVEhq1P*!~Tqg1@<`a zft1!As>wk%3XBkWq%U2|=QHsE&D&q4WkbJ43;aH%I8 z%SOoB4lztl8NN8KHWWJnFMs}M`0wY`wi|sSMrj^jY_0uEUlZpE=(4*n_qJ7#3<8`@ zWTGV6!zJ%{gSq|;`lJ2j9KdqaUQYV(V{%Z8tXC8h63epVjjl}l*ZEmX)dPn8YNzOC z*T|{&u{MS)J#VEr*&t$&&D*kMn3~IFupgd@?um*2jh-GIeT`qG`=0(;yx#v^QQFfL ziQplwNOCiJ{Wy<%Fc)XQG4W3fe`ldc$8KWi)#gk4ky@J|58S6M@z88O$SOKqETSw^5?_K8@6pju@hw%U zdz-j`F`<qBCyROrg0 zF^ts^jJ>^D9m7I8%f@Ym?RaZuV=w68pRq%_UX#RZH__pJv;Pv4IwHKqbTU`!)#I=i zbgT0{I`OLd`XxhRlrd#}&sJ4EdFwcWsRJmIF*1A!xV#2>sNM%t-(V&?LRv+QFLXruH3)X`DEKCr(dte1l({xqivOWdPAOv z{??=*Lw5FK;`^y|$1EW6$h%V)7!jqvq1k4z?mjsr=(`!nrD)t+k&M;LF=ku-p~36) zoc)mR5pT?(J(wK`#vQeJ+xQ~NY`{Sf&q6{(!F$b17)pmf(W;?gy1Y{bXSHvn9$f>5 zqLIG%N#V^FKb$Gb>%g+}gLK(s!wW{tRwNtbml7BE>I>Ue?qjYfTBfOSls+h`*4-<< z`}y0YS2PI}p9SLD7yc%tDGL2A{+yK1H@Fu|gaVRiqa4aaiC(|EqS(iA=Hf5<%hFJ% zA8&#$epkYDL~qP@uv(mw8P&=%*fw`^g0g9P&_~uFk7dYgyuwLwq|rvM(pn>iekQOCeGPL z&(yvNl^fYNuf{K3fNNLmQ2K@%t}eiLpT@QtC=(z`perwYOu58Yq$$46j*b0#gBt5Nm~yBPohIkZw$q!CmHlqT|^{ zAL_f7FUgOS9Ju>Q4zoeP`*VRe%IKs-gEYoMnul|znCYCiIrSGh1MF50O}!%kv!oL_$ZuIWF^@~n zc+baA?&oL=`a$dC`C%=0#crxxYdtFD?Y!87hSB(Xm)duIl|oV{zRQ+J9b%7Eyqq<6 zut@G-M$t)p5l$7FOvG=v`g6)Q{k|l_?mkfyB*>uXn+ZNMYW z@YWvgGmWwGpkcou7b~Jg8&d4eMo5_>pu^)FhL;z^^zi-^`lU-(PaUX^(R6!F5M+-p z8XQuX-E8F@NMyIGekD~;Q-wpu0({KkadOTw;%yjxwdI0zTUqlB&k|Gq-&Ti0f!OMX zw2`3Hjrv9_ios{IQjdY!F*plK4YDLd&*Kr>9|{IKGPG1IoGeLaSK$lJ2jb1)9#|A& z30^)QK$iU%fqeae*m^ASBPrF?&>@n0KP&Qv{s@*tN~8^Mo)P|va@}Ox8-8=gaM>w9 z@aOvEckGuWHdcaYp|H1#Yj5y7jWDDl>{B$$%u!EE{n0E;M=53S(2TWrBvR68yBYc% zZ)Z_1$mAVkP8k$3d%@CnCL^EhSJLOj0KUCji|rDkNjpDN0g!f*j@d=1sr-*rj>llZ zlbARpWp&U!3D6cK01OvpavL=K;rabHG5_>%#Hbz9BA#->0^|X`8#kq`2h7WY!C(Zo zrO@6MiR}83^`f?1er>@#!A`E&K0CY0YD2gvqbPF%FbizUBHU3zvY3%<4-@B^t02{Q zqwt2=w~|zRb){8NV2dG^Dk^%#c!(Z&nRNp?{?^;8BNNl!U*tv?LM*Xtt_yJvM+JSJ+P~VEpV6%TVe+*{tdg9ZVCO>W`>s7!P_$?u)^Fb8-gkO%J-yUhhl7*J#!s;}aukI^3Uj6SGN~ zf2%rTiNJj>os5+!iAhI?1Rcmn?TCHhUTQh>SEc{wV9nnYx=$qOL{gC$!5ms)v@0lv z-Jx&iF9T22PKxwXz5}jxBt~lQE?X6g0S;M<<+>@98RJZ(VVEMsi9OHx%g}F_JnI#A zzdox3s#tcb5xOZ&r5-D0f>8%*zsjTOw~4hY2hZfXnVx&e%=z&-=~;8c?TKuiOOvPY z&M<68tP5?i8Dj(0OQjk-`tB)fS?i>k@*5RjUGV}42}WND{kYSU z3Dc2Eb_r8l@%ph!d2)UhmkBVQTt;Ar;5hoff7C|TJHn#eC5+C0<6wb+b42(}S=aRb zFR^qbutK(kKo`;WX}BZ1)^8}zs@(SVxd|7W%#^|h)hUNnu`C}0W>=GCwq*kibhc30 zq#DwG9rMkSG?g8ww7O_t6oBweja(!r+P-;dN_PQLnZHshoNV(Oxack5KagGs(F>^` zN7tmhK${?Ulre0<#@aS^kMwWdLFQmuB$7wDM=yN9Fud>kLCfP}9L?r(BI8}~#!LJ= zQJ}F5B1S)z*a!oG-Ma3H!2qpAGcr&%4v9`aRAEI= zc=<@=k7DlPVfD>Bg#B-m{zak6VNX%0 z&%cGgS^3JuV+gRtNW3dSG|8ll>`c{)78eo38oEUh7a_%*Tz57fg*@9OJ{tClPd4Ra z?%y;kqm@0!p{inuqd{YMm7QCy{!wbo&y?DO4`y*@EvZ!QOvQ0s-vrKmYR5`OSVsK| zBnGcIV{lV3Oe5Tk(we=pfIA%Tby4_@M9YMuJ)N&G^fc`cF*3~Cc~kHboB+SGjjhN@STe&LZ$oaca;0S z`E|Ot+Z0Qs?&g+9E|RQN^|~ie4(Hv4Nvz&Wo)CrC>QlRN6KNZNe8;ylHzP48`g`XY zN9LSxiheqeBTFZkoU&e;pkNAq@K=qfsY5nJtf*#1SwL!rCL_8Hc>T1&4xN1rr9Z}A zLWb&LtvFi76@AF=eT)xw&dhf7agju@+g`v%g}V1i-eR|2NT}rfA-!GuBt}e@NH}c! zOP;^jkx&hBX7cUmSh)TRRWE%W>x`_KP zyJ5V$*ZDl$ye4>l#pyX7c4U)BMWvZ^#i($*d8XX)C^&+pH4~w6bUj!@>Ob&O>*9mv z(h#nT@Clz*OjKKrzpT%R(4WY*Aqji)j!r_1*^)g^KbI;+ud`bS>wLKPQn8bW%W~wH zQT$Kj;|C-77|Q78cr&#O_87d8>!t_tWdwvFvXUHP#kt)dPlQ7ImzMX(Tj{FZ+HY6a zT$t@7(eQDXWo2+CE8M^BY68kgv={DPIipLNoe;ZErCac@3)>wbrk%mB%C%iY*e1JC+2fr|o)hc9bE4 zGGa~I^BtF~2Vlf1pPc*Vrx~eirA5D%mR((RWarGC(^TV_HCn}Z1>j-~{=Bnk8e1^r z^J2E)>a8*#DG+)pfynU_H)#>OH9Z#=1ja1TS8C@;+a){?5PoIq|kI#Uf~Mvk_1bn~p2Pkpac!3vJ4Z>t4+8>i(wrAR;<9laaCe-WD* z*;B+u{a?h^tBS^Awr6TP_sgdDA+$2*&G^;_@`)QXU@$IDTem-S&@9%8aiF7^Z!ecf z>Yy~`)Hsr1QFr=1=qSnngvzfds8r^ndBt<0$DcNOr^SgBDz0wZbCC7%OU=jE@f=iV zTFtgo{WV`~3BTMD9E~J^%}wDKozlij$sUTou7DXv3L(_5<~bI)p5otK{UM(U8 z8ig#il(1loiF^T@OmAu~EzUWJB$XB_%0`~^^*t7sr_#=u#VP!~YS(PxV`32zs+BU# zuTxsfx;srm+0@XVF$rx_koRJlD$p?VoUOxX8Qq6>ZE9~6tt_&{vdm@w+Lt@I^!^J! zj0zH2qwCC8lZ1;T&f>|lOLnzoEs@0!qhHV`^`}4h#fASmyVu@MtcP+7w;EMYK(EgA zF(MEvCF9$#l$A}IHdMTGmjb!{fyKMickS=laO5j+T<37z4UB<(-UNTNyv3EbisKc` zDNsCLMFaq&_0}jV<%!y3u#fiV#h}?=oV40l4buDwBmieu3>9jE+v%faZ@jpLh zzrq({h}ly|HSvRm+misQfiJc=X7`#%Y=zSZU6x|OmXd1bkJ%NSpWnwjQodrIjC=MXc&4Rq)m6 zFd?$DvjI|oTW1>DYBspX*la{R@;Z8pTwmlmHd`B+I>IVd=YSd$BB|#X6Fv+yD zYN!YJ|LA(lpuCzcSTsRGAhp;b&E!S1dEetnU2zuK~+J zqe8#5f!p9CioQpo&wl}yW@Oi|2N}uY zSVbRC|FUNy~=*n;FBIfRFz0E7lf_F|lG zS650nC9B?H%B^@R!3FR&3+PKR&9f@_H$6wiwagm|)rK4HUm-Dv)5cMUM5gvn)X6_B zC=K{MqV}E7RU4m?AeeH3`o0K4c=<>=8EuPB*=j^g^$j<+h>s`3*P3BWI6l-Ys@5$F z(9`b{aszi17tMRCn3nQu&k*-5a%4_4Y~Iqptnzu@CDZiBFdHY}N}xd9K1ITlgyt(v zOYZ63$)`V3IVZR15ru{wE66$rRbA6fT z%^*UCS>Z2!;R-rT&1K$r??P*Vt1TS)g0ws5=-();9d3~#BBIQS?G5Wp&@4B!+&B1& zJbcJ=j6%bl+4F=J({l)aryw`QdRrv0Vnx ztdyREym%kC2-3e}gp+(!sGsW~Y`jM7c{)-kv>5wrUR&|iViKZ3oYnZM{N$ z-c6pgj9beV6U>s^!fHw2>SCWKt<2C_88jVE5B0r5(FwUA7*y-iQGfOz{j4IzgV@6SsT z`;MU9GM}9@W_fZVEOTjI4fh|241@41-8$x_l;}Nt`A<}<;XFnC&R13uc*BD^2)b3) z=58+w1kC>UQ*a#>U(sMV(*bFwFJ%Q?47vL!!G2sTk@bs;HSgFpifQ;_x}Q3w= z0==Es_s`P!RFu~m7Yg)v=Y!Yg;mf^#Xg(0ld-ImCT!UNm1bS4fyE%}ghB8O>p223w zbX+@sSX||bzpuq1X6A9!hMUAdF~dMzFQqnIqobeJb&~zj;-L0t8Oh(eCx}F~lBWk> zP#FmZXS*iMG5~ zZNSMPk3FpPiCV0eEefy%mpxwG^z0U&)HM_h1TC^jBovGIuIlqvp?Is*r!|_Q!A3VAkTX*YTFL-(^aCFb3i95kPIU%2tg?xC~bIK(GOyQ(lL&z!MvY6kN zmxpVS58bWzBlN4{;vXOR?YM7!3XECG@HF}(gG&oR()OyJ*>wzFqpO=UkpXOHv#$1J zMu&iA2hZbI=5Mg{Q;T(05bVKoRXqwo_>SA#_W#KK-p@C(Po4QmcVzL~Rr(GCif@u| z^e^hj-1ZzbPHwumJ~L_Ia8qLb2bJ1yUlNvAj$|`2?#g28!Lf_c4>05K8u3QiX*%Z8~LiN-X+WugT z1iU*8h+!!_9`T_oj6e;>eo~nFdP_EZ14!g%mODEPaiVukSQc`*r`l5FDYF-&H$FPw z6m!=~N}na)b24&}b`QLe$vAGhY`l+j8QYWy0Jr)H#v61Z%gD&lSpS36(4VT&uQ`tZ zD_~+^u_sy}m?W2%ri+%vh2TY=H(&%s``BUn5$%Dt?uQ8K4<#wip4eHg4I$qKETmgc zGh?OU%&o5@fRry+0WrRlt5IUrxzx#6bx&ndkJ*n5!Hqis>)U_!xc2n(6WfU+u5;?@ z$q7>A|7HPXlzN|REC09SOOu!ZoyPjSEFuQ??s6lS)8+CAPd)-yV!a3H(Y-?h6nM$@66hQDu-1*I z!*!WXYqNLi_r@Uf(!h`omhsOB-?fCV5Y^l3c^LDDrWreH>vsOgI`d3_6(0jF_l{28 z=&+JVN46nf+EfO8^>BS1%5Rl&|E>Lapzc>bLl|oa;Nd}ZMV{Y()!;xMpnH^zPR)U| zGkel3d?-d{it5Mfg7}OsD&Ab2&q@)IAp6rq`L{eZz}>*Y_C#m#?|X=gL(u+bALc7*bWWs(|$^Cu5;G_@|IIu z9|Lg~0`DKURb9qc{TvLkUayKgh}%9qtW}*v|Rr=?~&V-dh+) zu{L}iB>vs&$^)Cvz7auoT5&iFBP#5T%k6oMU+s@KF%+i$^6AEHBMpwV+KIl>xvB65 zQqU8|@9RL_8P{Q~6WM&)mZ*@A-u)*i?X1Ep##-Mq6zEG^hcAj0Y2a*QmOxXnN&_=T zzsg^92q1~#6@M0w(})?%4+oTUOi*|UV{2bjEGza7E>B2z(o}!w*CLF+S6ypVJHl1Y zyz5$4V6)r(qTO=sBOnj8WjpY*B4J>RQ=8VDZgY?7sHsV)NvZ6f&35B+O7kZ>_8or- zaSQ{g1T31H1m9|e(hrezGKpj8pERgo7_KxpR8_rQiJitO?;h^Q9?2=lW?#YK#e5N_ zdIBI{wH6=%Go*vbQ_6)TT4h~F%EKqYw@#a>KFEch*Ymd-itTWw*Vk|5@{=mKu9w3L+5~ZH?vD=q;iGxn<%wfr1RJMm%O&n-lDYiyF0q{n zvVgenVJ%E$MmN{ZD=_CT>&sBtqG_mD!E6weKRckn&AmDNFj7n9ay9T#EsY+qPw}2h z+`XEi@U_q1qa5T{%n@1oMB7lrd?<9o?|8P)Z41j74zQKc_=1LOzu5Wvqibw;kfi73 zSr2Q~E_2PJOKW?_4rh!R57k~vbIo{H!2s~A1>3O*lz;nvnWR9e&oc~?_G3Q1NVEAn`QO$v+v`;@yO!6cxzVCPqS)%lbZX8un zfrKLA$dcs%Bn6>)Hzac-~&?S{(h5opvBMF!AUsmUUHaHkBJe zQl&BVUBWWV;Tgy2J@VobK|6)ex3DMI%NJtp3Bq@SIZf%jn5nG~KX(1%!b@A{ft$Iy z<%uN|3-=)*6_#C3=xabw%us^N8(nN{IpTf}@Ihnc!66&_!=A352VoqUE*=;ors3WV zsPX|!?%O{KmBc>yBM^68%Y~_prsjy3Ah)Oa@<2Xkc(_gFH4`61+pnptpm@CNFnAcD z-=01hxTeE%AxWnN4+cSbh>BeU9m&%|^}{3AmV}lvx`A7|e4dakO&0u?ymgWjl$PC9 z4J+OL5l+Hop1B$&hrTEuc=sDkmr$Qe8WX2MZ;GIy-(ppEiUuUQG|={rtIrMYT+*xT zxu}y1#%<9L3RAhVrJUAB?L`6#NQj7tnU*bAzvwd$$`CE<&L5(1;+t=OO1;FL;cgtL^BFc<=)m8cTM*3{|i6 z&RZD*9>NTM?alLR47TbgGOMGOyoki{CM*@Oy_Hbl5_~J(gdN@2&yQE!7N*a2u#O|Ylh^OQ0qozEAsSlES%1*v~SRW z>V$7rkpJ`gb6L}-SI)`|1^q(tAMCE4#IJz!81m`b3aBHzWp&e@=a)PO7;w8L1Q$_wzYZwS%?wjJnkR#7#qWw zUTrR%GV+KA!!{rKQI9x?`UcLtg>&DamazF~4Y{tM1F^?pHBc4edWE&M@9{j`Dm{CyM=fNraGFSdw_#SkFBOg*tKr zEmq2XCu4gbp^N1jkSA*P^WjQKkmy04PZHRGdN8h8yji^RR8kxKJ$*4MW!ole1b+g^ zlwd9@b@%Jm4auSH#o`Oy^G|zT&RjgSC^*QRd#;3Y-^G}@+suAKpw;c#Y3EN=k~yjB zpX^}KwId-Zy%wL>j2vOHMARtXIC{RsY-To)Pk}z0m)bE22m|Jyb(Rf-GjFhI4M=iS zG)upHFwrQgc1aQ)4e@jRmei%XV3xoT5KxN0UfbNdJ`$C#(@-Yjr2#6>kEl!AOK5TWB2F9NiLFoY}j|FyGn z2vCO2-JrardSg6Rg}r0O2W`eqeQ`cOjYu__4h*59GK$!d$v=-qBAEJqVv6uVqU^p# z2;bAgirHKi@ZfKtbW*CIWL0A-=BTn zwCsGgch9*+o>}h&Oa*$|#$MV8xL$!)b~b~-@$`-LZ}X=F(1!Tlr#|4S8Q z+4F_6r5D%890y9oBw+x{R@Q8I%=ff8sk4D(z`=do}k;pAs`Ti2Z;&AkDR<1Eky- zQ;s}8pML3|=yH8WjjoWwo>@N=ztN|@qSjcu+uy7DdWlp#gcVmI{YA#0MzrPt_S=`_ z)is~yC`pFj7Cj_R?L7&R@wlkcwDs_@T5^Ytmc=>5r)f)9?G-nV(M~r%q)(Cs$q}Ne z_0$cD*RmmQt40k%LWQIz6lJViyNn;W+0Y8T1M@6cPcZ#n!E5~<$kL)D-0 zNV?tTo6(!ahIfgYy~t=9qM1ZAu6QDj8#->MQC9a!wP_JtVqDqMBl4uWNoY~KE2tLd zc&1{Dk*xIs7ZBqc^Pw`ju<8S;d-Z$a?_+musw6%>W}j2=X?u{$X5(!uK}T*W_G*tF zQNQ%|;HcKmN?^GO7INtv&s!!ZQ3b%gpON0>#^7CXw#nu;Kamx|8;=3Yu5f_M!_D?= z`Qt2CO)*dcqKosZZ@70WInIaJ+p<$-8fq*(`7J^V-kQAjf;ZqG6qY>QLE?Gg<1ng0 zYYF*^K<4|acYl}VBPk>5ZV_mK+Pcuh-9AHq{^U<&RzZIIP%E)hXveQcGMO$I_NB4H zqi~}lqEc~^q5F~}($h<_F-Fkg#`t@1sL#}~B_A<{bN^EZ)ir9E>gkdVtdP4|f%_?} zk2|M4y}ussWb524Ed`i9N*!nBHL|S54Q)(Hl)oH(ElzC3jGy&bSAq1M{t(oSMXWm# z-jLh*p!(FMb89m5Exz=5zfAbWU#Zgr5cOg1kL5YwS_pLHp|0E?)E$J2snwUmIS+pX zwTY-`n>gx`1!!<65@6po`roQu2$H+gpL>42rv9!xBLIkBLKmrv@&=N@Zmlu?me>lC z5V2$l6hkqNOIV+no4p>;?QtwrLKb5BFBe*4%S2)PIrCj?TTZek`UH_lC9NY zp2q2)WrD-o2SJ}r&4V@{N*WVA!feyD=3Oy3g_DwVIyBJxmXb5wQ{Gx%F#Y#pe;osJ z;cp^BSQaH9B8*~gfetf&{$?2MOhiU`q&jVvXD%AV14)XC*4ZE`#}d$sulP-g)Cj?W zH7Un<&F3vIVSZLJ4+PvpL!`PTR_t=1Ux!AlX$X&kguQUz8y82p`}FHubM1#6lT~MO zBoeH?tVNG1A`P@sjudwcC`d3D>J-3DBlY6GbLjX@um1RDf__wBWve}hzR~L!&`8ax zciE}Hlr6Wm$&Hk^CK(E29Zo7ie26{`Lk&6#l~TWd?!7gUde}6RRfQ28rvaabOR2ql3wCEP&MLC zCdnuLaaRF2k_rh#T&f1Qn0LzDece5*S7@8bZi`g~T-A&l6J#>QLvHI{qbuful-VrN8#qfbCMkk|%3Qbf(TDGOFjR@hG8Ci!>kYvOgp@^HK?)|GJ6 zaQ5()&2_X=0@&SULsr%g?x>nKBDk+!PvwqxS>|$GD`v5^LYpQ8VxQ$7g{GLf&bVuS zlqF#V;*G{2@Kz{^OQhbw2i}qblk}BXZO^Qum@ozxs^WJGxMTO8txwK`7^^_Q$0#<) z*w+8+xbrc2Yi^j@W-{uItd!x2CJM#;T5GOyRh%y+v&cHvj#h~JC`buW;?nwUv7hYX zH#tkTkd;scX#u&ZrJrkO{k1~`LLmjTJz7M&hW{Q@xklWoO&Y7Jg&``XE=X>^NG&W=pe$~3?I)(SnqNe3 zvfL!6u6pl_iGzodMXK+#{b7Wcgtn8pS$gTV{1S|)i?6QkX2d=t zpDsgRZXjiK!QlOR4ql_Z@Fz;vRG|Q+WmHRosrQPQk!e2msETaU?DurF{d(T3m;dY} z-?y+Ceu`ur`M} zs?=sGK>w|RFN@0N*l?b(kRwnuUWGsomHlguLQ>`qHvAopMT^3x^nr)X9k4pPv)SwC z-y3h9FMDi5m64(*pPve}6qah5DRSmojUPBhq(@wA)CRorbtPcA2@G7 zbSkyWU7k_4Pc!`0qK}N9UjA^ZQ3k}W*TEFh7PTibQaW_iF3=WxeyE~c!T&XZu2@)A zINqpf{*jC^`{85dX5h}|tz?%NLX_-7*~7E8vpk|&GB5noF9Ft@uiSPV z+ud`CKH86(^(PY_sMuL#SoO$3t@-%APhlf-;-CJagyLUxtE|78>QtccISh%2>QDUS z(S542kMX%0j|=BmQncRxW(R97?&kZw@m(@-23t<@at$4Nc8#k>XT|&HvjVMIyIA*m zJR9xIR~(or@^E{Q7!%oe7+Mv1lupe)Vl-Ef98zR zn9RlR9Pxj0dE>v!IE|u&Bc^Q-LifhoJVM|dIZf*1a?K8bOHUp{Nh?S^_b4M~Oc+?IQ4L3hJ zOke_q<5ZMWWHtY`CkdF`ueN85Q(5n5iZL~?i40!4cIJZI%*N7xMLKX;rB-icJ;6+_ zp2nn}Z76Y?>Qlw-j8_qRqdG>m0$zO3rS`TPF(p!i8Frl^T?sFpYX^PMp^V#3ErE_g zQZ=PJ$Lj{m98p4Ce!y${N+Fr2rhPjtd)r>%@XwfX;-82ummMg)rN_nGA@mB7OskD? zCk40$IE!^2QCuxh5xcdCZ0~kY%sCwrPt7#w<0kIvJ7kQ0mW=S$e@q%FRg%OSH7t%Z z&wfzq@Hi80Cg(9P9yR3GZg1G=bHLQa<;ZP~4 z=ITbyN2i;P;=_ z-uM77#rXUx@u)QI3n4#^CBnCEe)e4W972r@fU0-rOf7 zmJFMHsGWAT9M*r@?atM7fg2=G;)enW7d9;B7T>!w9WzSyS9{X?@TNUE21EO~4$Vp1 z#UlI6{o)JA&$B&`MNn57_QPie>+ksEA^wjIcvFQ;xQTsAfwF(Y|KxGn69Uz>TLTT7 zpJb@$D5ob{Br&E2)C90{4-2fY>sN|g%$Wve`NqecTAcxVZTcTeB8@HEzOdxq>_f=4 zJ3AGATM{##k+izp*ZX0)heJmANk8%8SSTE~D^>VU?YJF|*R|M=0Y5@*_>}*#l*)J4(gzpgv(=gi57rl;-jv0|ZmlcgXL}_AE#^J-7V@PECFPI>C0Kkjnz}zm zCZt$m@ftN_09dQk>^W-CaD;G}CKQS&G<%TsS)hM`EwP~QCZscyX^fhEcz>adp-{aasJI`EeYjiKQ+%*4to9Ez39Tz(2>~)La zN4^q^$+8A*u`lX=)JA`CoQvHhsWz`yVZ_~ttAg2kK4yJ^o!0jvw!44&B^EH#r@mub z$l+$WG$wx$dJQs_Iu%Id8oEW1=*Tg^E@z*(h}{@OLEat0e_$3t7g>U^JzJZwPgeKP z`gBDhy%LzVr+Xr5;4+zjCTQQ(bo)VK@pO8oVYA_&9p7S)qi4()u!Qb)^qc-?3H5se z0Q97~_#sYJ9;iWlY(#B3PE9Vy2mxEVk4tR0d58vI{w|Cq)K}buAT5;|GbPEP5fcW^ zF1~1srjreCm)Cvnug1lGQ)0ee7hV&OOgjWgAtHiaA*OuOgkMryz6d@$+?b?ZcNSU4 ziq6}(GNR+B6fgZw(-&G)S}30X)N(m6;&Q)JFF%{BC^GR-Qd}(}Xf!{MrfTn2&vI38 zb6ECBc9;kSnl&SK?|N+V^!YLR9#hefPV;czG}Et6q8mr;^BRgSNpI?Zn^`sC9=M0p&`FTQ9fWNh%K%GAYN$C z_`Qmap4VC&LoieS#(*g1!0EdHV;yBtIecP6BaJFe*`P{sh7ZwHOH7mlj-!Pv=CdNR z+H0%Qk1-XIREDJ+lY`bjNVz63B&B>4)mcd|Qv>at8N1lsDJqL3abZ+x-QqFKKxo_E zupCg&e|98ryx2d46C=b3-w0dZsmvKNbb)_>cT=~NMLITWL3pNn4Zu^cKW?=W1$ZN+ zh#(mN_Lwe_rD>aya`(51M<2wubW6$PI=M^>{PxOf8WyrOM@uf-UUOgr5Y#P`WdX5Sm=aelL946pKFO@z`UzICrPJkonG9-qTGp%U znKf=xi$hy`AOZl=_MM6N}R_+7pSEhzt7q_CTsiL=Me;uyLwl_RM< zq+}@ju`&EKg3T-aX@LpmseX5WB_&0D%~olHix!dyPtCG!A-gfyhkr8C_)+KjWvW_3 zga7R#*D)jQypW`nr}sOhu$IZObmE#3MVi8H)dhSU`6TFIr&o^(zegcn{%6zE;}v>x ziKRfuVMP`^=twf)IxkkMcO5G(h9wr5)S8Znl*5nEy>kDs*>)9@Sj*`w%U#6^$L*u> z2#&xPRzbKB28Z>^O5m8dhXX~>d;AjtmC`G_=l* z#^4!pY|luBRq!p>&W=Ocp+P&IOaZ(>3S0+=1pTdR7lN+96a24hm+`l2Ck^NW4z~lq zt}j9atiKW+OLfP?ke(I*M0Wi?%aHihFp^FMP8BRi??$}OBJCH`Sk%|)(v{YpYh%{3 z70(#q2fI$>i7}&z#?Ffy5=uQ7hu-TEiis?ZgphT-{5iKoM7)fDXa_ckWt>TxV zQ>+r+7N2Jl_YIkACA(?9fQ@6+Z{8O#ytZ3AJccIM>3q$jQLHlb4ui5%vBJf>=H1Ck z<3DZIsNd69B=7bkg3Ts-!R7W%%Z`i?nD)bkPdi@)B) z9k)BwY2~PmVIc;xtnAc9^95m&xw_%rSz8>G-o(>Ya8jM)S_|@2Gy=GM)RlD0DI=jh z7Wwvf{%9?VNov+$7RL2~XPqU_Fd=FuTV*k35wyvjJ~WXX&7OXt*_awdwc|0p)Iv~r z@hgx?swpY*p!utlBH9D7NcLjDV5>jkAHx{)QI!(_Cx8TWZoJ0GlQp}bT_=+`W2UF) z#1J{r4(Wv295&4pt0_bgeu+S%cU7aC=v+CM#$53nXu}#8YDbwV^`Em(HD}ORJOxeP zEL1r2&L=H{alYyGhM$c@su*!%sXaXKUL_mprRNpSBPim=_3&q$AgDF7@F1)hNA=58 zp9%(E5REA{`RlAn zWnGTCADc)q5E*@=AnnCrba@17MbE3cOWc+mntIXkW0v%#M4&uoTDT5_>x$oxXr~($ zygU4nWDljH@l4)My%=hNOw0rMfrV{bCm@#GQuV~q!fe;bRnAw~LU&wLDe4gwJaLXA zD>A&BOB}8;XZiH(0FI9+jZdL12v1XAmzj;3K~h*2ASqXmWvD^U6NQDzt!j~|{cXpZ zSX8!;J4bGA%`W<_AUyjF!9(1J1DwYbA5sm}QFlxka#%f9RbfhR72--SB$kZ6JqQlHkk^nU@T&j>FP3Uizs7vyV%p8(IU&hzFZWob) z%>)8q1=x&MtE-;-VLXNIdx;?V&;6^1uR+m4$Bx&Ucfg1gWgSh7E_bfL4B2)PkJDgJ zITi)e;+er-oX%Q!pVG8_6t`l+c-YbW@WWTxFP8k3=#ugXgvFNndSERyu7<3A8sSZ| znDQsjXrAZgfwfEZNbvIi2*}Mh?v$qK)MU)71XRgLli0|}o z1&gJwv}eB-Qvrs-LG-1AkM_RX+s~}3V~VF?DMb46Oo{UFK??B(@}Yb5rXwr!6pSPa zTrFY8v9aJhUStVy8|s^TzXn{7^tTMX%yrU>#7*=ol3`Jn&ry1&CAnpIdUgoOeO>I$ zKlq1AsFDAws+NjUHLthdptmCM`tjt*NT{^4rS7T9U_86h^TIm{wGV0$HAA&bz{A@6 znyV3TqCc6AqBqUDt3jO)_6VCxThQ@ob&Raqd_qlg!A69h@pij|5~j`VciH`Khh6)z zS(vXa9&~3i*HQ~-JMgGgJ*psfhFchE8m4a4z+;`7E)Z!{IN?7|g7?NrNCDs8_T$5a zmQJXaBOIyyLYA=RVR z#zGw$Cp&hslI?E9gQ_}b zC-z6sG2ZH`d+zcVMr>ngo9Tr(t6P2Lt`|*S(-%<7N8MY+ytTsSNh=2ExKSJ?hUS1V1E3+b!X3$xw&W8RKYYdv@ea^0DgCh_ zrWg!bM-KLn_IHL035``hlKtU=tl`o$7U(C`W93&;T`6pIZ=xAkAMHMoD{>`eQV@Mftv zjv>EZL_A%VHGYHR);*y<8>I|lT1l`ZM+5TjRiC}7Lbu7{IzR~o)cz^~XoB@icqRjbW{eyB<(A2N1+hM8> z4kda$Tdx%}uS6$tyNow@m4_;I?jPKYOUzoO#}il=)D+bI@e1UfBl-~^hUs^4U)Aq% zjQG1(d>F9IJQX$PaZ*Udc_!UM#n%1$Ejqa1W~hxzjqr(~!*wO_ug1@uCG^MSV}FXJ z48uF^BU5U>;)*iA=jPS_9u>cOxk}MWTt%M;PCvsaqkSUJh0#Vid2%oHo4$>=ess8i8 zjyRSpYj`>*bE*xlHiAx;cD)1U9ABvl7JqH%c<0}l1ca8~X9`YgDhj1}ll!O8ELDC@ zQTNo)^S8d<5E2D%er{FZnIjiJR~qeDSzxiX?C{__Q)2y;Cyn=VobGxRfZnA5dzY{< z1I>v2iXvSZSl{`!iK;7=JCeuO(<28tkCniT3QXwfmt6&$q~Ir{9eclli!h%(@|JBQqN0$o?+os46>88m{ra^4lW zH3{h=oR+fg%&~_48%)2p?tnOYkIT=NJ^F`|e9{4H2)H<-*6Cuk(S7pp&syFy59;QC z02^D}#Eh~NIyUHT9sN1oj88I_%e1*J*lCZqDZ&($SodUi9P z{|Uq)YJ!1`@IO9?`Zqen1MK30ypo2v88}MtT~z34y*f>ik|tkj5vlh97e?isOZY+^ zhD&Q7GH3<$e;tY1jAG0@6#0f&w{l0rZTtV0{$G=9 zF|hQt54_z`fhN!+^??Q$`-Hz1hSIV13CH(yp(D;qQPY1he>7&}n_)-E=vSs!+~3+R z!E+m8&%^0s?}&6$IT<5KxQTDth`4re7ciNHaXaiU8rG}+8vNep88C$NS-7escynA< zgXmbpLrANOqK>^@se%FUU;qtLn-E&)dI#aL(kvK|V_B6jLpudQ{fTvQV26PkoprW_ z?XGfMFuRBb-$4{(yY2UwHe6 z`6$9_mwI0XY(BX&j_#EGkxE+AD|d-J(_zB(Oa|2=|7z4d`mrmcFLtpvHt(GsNL0(K z568QN9`edwGT|eRf3z}PaR2KRuItB94@tj!w}-DC6v?f@f&FusyZAUON`0p0F0}ys2^$tT zP(=ZgLTBpfl_7{0aRQDbF-J1>tGn=3E}^ePGn*b5y+n_5RwQqWTNT*X>mSrq zr%#MUC$_STg^Vo_3~^2q0~v;FXQrA-b$1It@-o5*mVs^n{<5;*h{#{OtV-~PshC+o zviE`UDFP6dlHv-rD;*nn&JGH{KUy#U7oi&PJcyA9@PYG1V8~69#QY@O;qfit$!0i- zBQ&dv=W={91d5aV)uIKAifgYEqWi_oqz=_O%iy1A4=dqu>mO~`-~xe?{GIdrcP9=))k7V6n878`meDn z5c{_T0fTZb{!W~+BFWGr(&c$trJQqGLJd_|7IyDxBogzjGo#lmx%TD`ew2BNE?-b` z=ASi7=rq*a{0uzzKLh2wiIL;B{%l&-E$_*IeWl{?_$p<$3v9!PO~7OI?tp!$MGDmP zr<|bkQB*mEe`(#!h}cbWh)h~AUhBOOP8qAOC>B};;0HS;D4ygvVa8-{a2zmu|N9cL zpUX~miO)FhRq2a4^O+dXoXfJ4loo&^=xdx7Pt`v_r2GbmVmn2@bpsXazl;ptG@jE_ z_<$M9Qpt4>e*4cnvindTIQw+9`9Nq~Ba$KWXa_R8706ormiClhuQS(8grqy$h&eKR z`Nf5FGyYy1_ntkjG1t+!y{lac%V4DPo`to@=>6I=F!*HV=ku}S$08FAdVP{*_MPRP zyAlwf?`sIK>j$;f5|sgQ7Al_dcljXp?-s-KqGl*9<>D=lUi3Mwz>3tM0EBPe!TQYo5=ERK8mGj zoSw&8RRS1r1NCPDRbc6UBnN=L6;=e1X*c%-y5uimg#q;$HT8cb8W=_5aocx0qT>6d z@$XN7F)^%f&*MA#?`2P?k=rg$sw~2)O)l!~zXqFbpA9JDd%Od30qs3rr}Bt~cDb-| z89Ed3TDkR|n=N7l+(kcso2MbfTy(#TViP-u1MZEIet`^;1$W=@d)P zt^V(|amJ_`HkA@=d2FmeRCvJD`-~Vht3BJ)E!0R6RKuAU3AM4&KVG>vWx1Ys?KG@hWRfp9q(I<6~IW&c$wUHh`lZtf2lf% zu%dJ$hsV)X7Gw2Yk0FdyH$hkCeg{jl8vWwBD_?|slik~ULX(=NU|F)}tS zh#FM9KEz|Z$!#iu|JNRz^DSiKIV}MDmO-W__aYL-*f*`^T<$q#3YYw!`lYUB1crpT z%6-v{5*RY{U+v-BdaFGhZ*hAOAQMGGLK=RoCumhmmG=I5C#Cl? z?ldN)b89Phz5P&PWMSW@Uc-vAru|u=x(-E{b+Fjn=%PZiwOM_6M2E7CMNSrhcILA+ z&v~6yiFzI_7MYF;EKN@2!|q9wATSXJ>FmBzZ+#-^VCkPtlRk@oPFfp&4c<|RF>rHPHjB0E zIU}&6&_dAk3Vs{w(D_Bh_(EETu}l#$S*W8)@@49-S<9>YW$twULCif@aOF?@Oyl`T)m@!ibz2Ro#*BX?aU3|^9Khy$_ zhm1u#Ca5+rT#Dn&{tCLM#h&pEr!tmRNYy<=I*HbCFcTlL0Y51tMQhV_t6t?$fN$HN zbBl?LAA8;qJo}kO3UtO*#4tVHd%*u_EGDkwHFT#-2Kf5CZ3q@F#eqo@ev-$)S;Gvf ztAJ%SHC+A}=;xZcfF+mXO$ZrL*w_?A$D0G*YiXmqRT9guLjLK;TW!XxC;s0K{|fBz zdyFZe(fvK)cmy>kEKV_G;Ce4$DhRA)@?_Ed(y}w1`6P95Sdcjx2cRr#R}3mdHm(Vm z@G|9RN?&w*fwD7^&-tvqcciuwx}__qm)1YbL7HOwu=5GG^-8g^R*uC#N31EeNr({S zryI7P+nO>aYM)iiBKk35vQ|4CYCp4(gewW1y!2*6kffAdkb!{Ny4t;zF^%%4B|w)Tr-+i?!55ACMDIKA|4 zPKB&%*F^yKSMT~k#y*~IpkC~8ykcIJ>Yzz+83!Ie$L4@t450>_V_n8dx6>ZSs!kLJ%smB>K|A%a%tQmvsxA|e>mIF(YvBP zo?j-Y0@y+wTwz)qlICp8{zID5QAl@*&1~0a4oA%m0#M&yu7v2C3SMNTj1j*94L`yk z#7sfiXetsL;_pO^NWt1a+nDT-CTqJ1m=13ogf3)B>hL!c1`Yfgc3KB#PY%rA270~; z^a-kl*RFp{I*`C7dG`uOJj);+=~a%oF=k>s2{Zi_;Z z)*!a}4-bhf!DH*Xp+g0&Hp%@&e74S%fi!R9A*^#<6n<^u9AlHpa8V-0Rhc)f{-Z6* zJpry*;mt8Uz_CClY~CDTk9`tOCZw5U~ZoDg|RpJMc03G6a; z^ZAHWtD#RR-7RX<+|)0RcFq$_PUL8lHtvrOcf)lDOHH6Y0RtL0+8|VxnYx-A^etRT z@@V1k<>t|P#Zza2Qco}J5}M=~SxKIAL$iaC7W%S}a0tp8%eN4L#!O=NKi3FlfQZp<*mAt> znziL0X1p2m`?Hbc8>imrXYaPxDBd0Bq}OTu`gQJ7Ypn3=!9^O=99Z2r64D)(lt=2$ z{uV&y`F~H<`F@__`ucl`NBp+3Kc!U}^akSjY5%XIz1drpAiV?|!#ns(cs$S`Nd zxH$U#iGWBY4Q2iDaKJo~oMjR9Zyah*OswRTPw==~gA5vx%ybr>LMV0HXJHv-R;M@d z*++d`MC?Q$G~cB=>0@v|xyu()ENJ@~nA5Xfv1+U^Eyi@~Wj^E^da<4<0#89=fyFU< zhtgEFN>-zXA{-9|vZFtNGFR2GC$%gfd@!9JTj~!WrKXKX zH$CV)dddb&YbE%ta8ytsk2gxtif`w}^+tvg zyd$Alv{oKPCXHV;doiGr94%;SbQoNRX8%R1XjyvuxXj~g)t2NF+&3V^&{FO(?>{TE z>;tY56RuH*yDlB&A?xywVwNEF(2m|8;d?a=IL{}Sf`?K6^u??g>S2J|F=W3{66rTh ziKtBf2}^T+BS&$&C>4MD0~%72?PDLf#Z&jyuiORM*KmeDS-}tG%I9xbAzfNXNc*n~ zAE34j0JXixsLCSNeK|}tzYEV6m|`mNWJ1!5><-k6KJ-bfiN%wfT2t7TUVz^Q4Vyg2X^SL^-46)i zMKVjtyF@uZ^mN`bWyW#R9P2O$_J^inB==_6=kf*%Tnb~5ckq8%?rVJ7%zIaym?O0m z=pZ9Ci;^AYxe>CGx+Vsd*6y90DuY1k|^HJ4B@yWO1(?w?Go=3EZ7R-%@aDQe6X)LclHS zPHt%+fol)e+B>%!TTE^r#jIdJ@m#OXTJM*dUa^Z?`fAF?)f4U22~{m@-s>ntt=vsn z{fY>x8WoU`JgHoYa1qVCS#KWD;ixK2h87zFu9ENeD?l8Cx zt~=!W{U3Ip_GLJShdI;T)z!CdRo!C8wq|3Y+-A*BkV-32JPZ^%enD4mimZ3s@bwA9 zM#`kWi#mV`lk%ozXpRT7JUggWJlGG}SHXuP_JSQD5N|K##~@C9psWH*UuzwM>yFrN zDsd~pz^_0l?aO*9N30-hMFq~$kBj`zly+wwC97Tks&tcChIJcbin}u1U|+LoIsi8x zBhHB*)~8G;WdF~R@Vq+`qzQ0iMwMJ))e+}2l;_gxv+u8_##Vvo!dC4!of1geT)t%O zS((quu(mfjJmv@o%(BiIL(?>#C^dC-@i1md*3pU)Lw zf{+KG$*pr2=zzf<2CG{!>w6-+c=WH75kFNZu`uvT!66}jyg|!%{O4tI8!?DUWlDh7 zA2G_YvSf$epAtNmM{APr0h>1^SFZ)~VOW{vOeq|0$1G(-FZ7dLf*bnGv3stP^Sww+ zn#M}>)968I)$$%#-epM$TRGgj67#NhD)X&soU11K8Wa=JabA+J@BJD*5+mP}B0&6P zbbmEw4)x^@8J$PxAecn%;RSx?%py3r>1yq3)hw7Kc2*}n;ot|aVE!Vj$M375LH z1b(7n* zT1k%bpMJw6BI&UwNEqC`Bq1Hd>ZV(@xKtUf=P-mN>0NPyEE{r$h+v+vV`Q736o4k| zp^tn74aa=%x*jgSx<_#Nzg@HLLt&uK(-pr)F&lP0nbgRP+)-5ANd(S6f#rc&jRw@R ziDbKQI5^W0M{i3Xh8bs@(JLWzyMW zoxFbU?dWPJT7P)5^ey2^hZ!n-th%U|-~hiLnGgeV$x_;mb(DbdACzU3kg(9|H~hg* z`#JOtdr!NPd9djy*u~pLOgW33>$}nYoDWBBzor#o0L$7sMgO3_wT|yY`+wNa3#_*V zVmT&Fn3)pk%c7!|d9`n9l#TYM{ z4G>J76(T`7ERH=h8VDcK1=Yxt0CrU{ALgP%q7T5DUtoUI3Ik_nUcc`#sCW`Fy)b93 zgLP5%VRMhB`Q-ow=H*#|jP!x{%jGtF0c>t|=jj)0yE`T1Ea?=JwO=Z{Cd$qo64DmY z-;J-<{Wxl}kkbQ-AUUC3v?Rmf`!sA?0z)x}LKpxuHIre_ z@t@u_wVeB>yFbhELAuOGdUALC669WdhuJGsY`&a^{5I%C>bLO|j~_%VvhNb;?PtOv zM{hA9M)a(#Ueog5t-S;UZ5ra^iUi>)`O<&H0+hn^IS(-E1+cf=M3K3 z+R>w8C*~^7)4;ujV`L{`I;|qeA^x#|{z?X&_NaM7Xk3r)1SmM1|lHlQQ{ulFa zOzl4H7n^ewKB(}*f%gbWhQGw8u_ZE0W4GKv@Pwsv2`Lxe_K|THIxj*$m)5Bu&#A*u z(cV?M9!}K$mOH4f`a0sQcdlRf9!K3@ZZmkZiRXU;Yb}3ST){_cvI3Tcp$@FTTHD)z zmB-Q1L1yvw#z@k-=|j@aQLj(U|#F|NKFv3)EGci5k~_bq3RO2W}tYD$@vc zoAaJxdBaI$jnBWMJ!0Yg3or~Iew~p2D~14zg1`6SbP3ZY`~-`Cm2BiH$&dncz*+!s z(}w2q$EOw~MzaZH_z@#<-IS*Lf0(~ZiEz$c%g*TjHt5x&izEh0c%se;)|dS@nrJKw zO_G2m=jIvTaE9afvaWLVJQ=!W2JkAihQ&5bA!?6$t8*tZR3p^8UQ|LyS5`jJRIbrD z>9||0Ca{*vpSss6V3UZU#y3$kJ_rBP`@AFYx1mS|JYwV<;L2^WsXY?Sr;bwO|&R%&^ zx|QHoY&1u(y1WJwwyriuJNg<%++)kAHS&hak+y%)ocq4xkn-#J1}6~+twbsD~1eA-JACXq9VziWx_kp{8C9!Z-+tG zDBhA#Z;a{A=#;#;vIA_;=*X2uL#mnVz=M_H1VMx=VY%o76(t(tosq@qd3{-ZS6cpyp1HnjsJcZ8QVuiJ|7WEJ`fH`zOQ|wMZ@o_9*gLDUz9H%sr5`I zFDlyKR&!0N-yL0^Plqp~zr1P|g|kKlJBqXW3M}jk$z3;RYvhifx}0BSoGV-H zKr$U=Q7Eo@F(~5pr8I*;2O=n@RW&yw>QveM)dr#%*vA+8)FTRUaP0j(=RPL_3EG}a zLHaV>y#k0$GS9pGKfbYl70mFA&Ghz_#^0CeeQsM8`lZmb3>0KuqG5&P4?k1%9<1%& z5WK{<)O?A*0BDHY>)b40MrmEF$}xsl(&3;3i+_pX1B>&n_q03Eq7pvW6V(oIcl)#` zVz>i!DONKN!-cxAqDga=+HcJbstXlk`1k7aCeVuFM7)!A*h zN?@Imr(f<|>@EUco=oh5i1KHyvXKfNKY&aKBTy5h1K~lYwLhixdrNfEvoyGI z-Q@rw7(_QD5bD}_2w96om6MFw8Tr4^(j1Z8hiviBhzJd^wv7WZdfHoC!a!aU*O}gz zYzAGPXbRz7#{R?|K2c0^{sOd=7{qd5aa231evo7OPkW$VK$8J-yzQuFE%l3|n-r^! z$jKxAleREj$M&eHJGgL#M|jJ`3AxiqMAt0ad5^!j^RZf8ys!`I%ys=LRa5cC=yYwW z(~5cIIp9slGUh(#V6FbcW6h6@$4UsB@-aESTmwv8=`|@)6)t>t!XA6_qO0N@o=fMj z#cruAQsTi~qgm^Tm}IywGSZ}_6t@LzPp$dvd~%~B?NcqtAN}=Ty{pf!kaSxyIneA@ z{&?9%ztT(C74U$D{RdD@?~77m;}H|@;r`hy3x6A~x19|RgC1=gU-6e*@~cOT{T$tH zy4)G~2>4K>8D-`z?ZCqvjTzbcI1~LwuH1oV=BBE@c-b=Ue-|3QO+*aYa=G_%9kB5| z%R)3U+F^*4&)dX?M+z_O>BBy$sPiuKc+MxEjJWWhUL{Omyp0c7*A$* zc;MY);GGaE^%t=o;ry|#&u-0~90y|w2!Lo81ZY;npvhph=V+|Vap*1d+uScrj_Wex zdxzfDhUZ|#`}1{?dyex%L{Iu|3dz+fnSDQ;~13HZo5~Z-YBtMm>Ks^p$l%zsc`|jdJ zu%2JI%Qs`kJtQ36>~J+Vtl(6UgphM4!j7nOV}gE8#M+#k*FA4jKD|&(UXZb8+?+>7 zFQFu!=1umY54^_2QyFacr+c`Jl4yM4?@f%Vx;8ck9P8MNVluqiwh*BAu=zKg*4Cre zVh@{E@1+=(Y*ByUfnvW zd8ssKlnXomoFI~!HFblmXyX1ri$=5MwIl1*Wt~O$FN4mSr8*VJfKr=2gyYYaDl)+} zwYmza&hCRgz@7;fDz?Mn`tCc$a=J{iALxVns*_geb52yL&+r!ON43!5y389IC{=-9 zEAe*Ao82FGvP6v2t=->1@;uUiBt9EQfEW+@KJ;Z&P-dr-nwD%`%P=njr6=6T;M-rcLs)X&h zZ?Ta%Mqndu$n?AVM7%kc-ua*#)A_KhoWuCH_*Mw91@FKYV_*9Lyou%G!oFIJ$vXv^dp4H&+b9gVM!bk8ZpWZAul-+*&*3XCW zLv%PQ)o=Hl=d&B!9^O@&hgD@ak{ox`GLTo+VwWVOmW+fn%{ZKn^})*EdkUFdLRf5J&X=bazv^FAdY9phLB zUU`nn23J5)w99vRY`o>@lk)E*pQ@C|1d$xa2vbSm(##i80-omXQDf4G8+~Acy9T2v zIG~l8Q$bI)b_{vPEjw)~PiLn?T|@hkWa@4P1$P&lRXcT#S@FluwX%t*&MV_#b-vp$ zIk!e|%)iZHDEf3EpTA7Y2{= z{&dR@>2`8H)FNXc(iqq(J?);gxPBaktTmmdbd%*Jkj;~R>U-3(i}f;|V=mPrAn2ky z1+d&1Er#Q+JE#=%E0e#5$^KSpTcj_(wN!@7Z0{9p!ckS)_7liz=+04SYG6d1;fS@+ zhG^|X^RcPf9dyk_p;W|(z#qIt*V8ikA5WEr^Ah)MO7YMYtPRK zrCx6GF6qnp>n?ObB@s`L-exhLX|u4jE=n<4ILPYNwnyZ>HmCA-akEt-A+<2wC3SYALPxUKYT{L^RBCMG!blhbC*T~+ zmbXL~Qz0)q?sK+x8jX4Piec^4;s$b+7r@ifICve-2x%>H)1f51CtZ(UAJ={o&_U&mno0Z8^ge?SoX>?x z0Njr>IWUird%HOmXO+ai4GJNc*(%O4@6aN6MKRDco-4N&1a^WeDxqnhaa?q9hh}`b zL+uy!V|f+{b=}h@HbY4Ygp60$b6pcUM{7E(IG`JlW)mWtn?S*D)Z5F6Q7VGj*#b|# z4JKheHC;HFUPG^<`b%+npBHk8Tle6wb;u-YrBa!O9L?KWhSR6iG#UoRl7c;Rk2Zcu zPf45{3K>0BWNUi5MUX1*1?;&JtNMSQE}Ew%7qnnXrsO8E_A3c-82PD2tiDPiB++YK zdYcN3xfmc5aWW7r|CK6mF%_*&5zwQyp?UaO5L@T~cjX>u3yXvp3-#&qa#+e$)^^a8 zt;9s!3a3oD_!;9v18S;vl2ud=R@<0p|M9xNsK48fm#h? ztn4xO)->iMf&7kW+&pw0?%Ob{LbzOKQQrX=t^9u!Sz2P8WzS^H_u3ID+?N=&wUgG(T=Kdk;6Am|m=s{(*CTBIL zDxWb4(?mnFed$dN4Ro=s4Ohv?JFN-FjnT&|oAl(iG6lw$$YiYI7m_c`)-QBI`~Ak3 zJbYk2Rb=X^>+-W)tE*9R>a<3fCTl zlWjo3(u@ESJ==EkH^H@awn3ol_O11E-8uT1K&))#MRz<_nxLEOIIaTT39_4e#)t=_ z;6V^hz`0T+dX{Y(GRguw*WNbeaZ24}wM$o=0%VrApN2W{wOS;1-M(uIDp~9rV)q-i zE@#mb_ZPyhr07$1@2p942zs6z7b<>8uEi~k4R80wb=A?UVG2-CypzovC;d(!FEMj3 zgWr55oTlq$jZNsdNf}}bxpG=R0y#h^j&;NR`~wI&$K;A0ACcVSMVhvPoc z63B4(r5+oOlkE#<`|&9?(anxF5perwH|;bl$Zy|p2s10mUi5U4D{qfKTo6XZtoEH#36 zLq$GlnZ4=B$H)3|4znmYOjcsb2B+HWgtm&x$hwY_oTs&K2r=D;lj}j#)6Ct;w}!Vp z4WZmJJ=bA;#t73GC=ck|14|sa+##A z%JpSG|Vnnrs3HV(f zWuM_a?2K|kDlxsnftQ{I7~|<&AGyp9T8&NIf7aSyv&5p2$Y?c{DVyQT#;okH$V$zW z#y>uLAX26+ftu-4U}+c!EQ2;v(ia7Y66>~b&~)kH+E<+qd8jKpnb7yg)7S4bi=X5* zwkf(7o~4Dk1#7+M<=kKeuIBKcOWw*Y{>}(p=E9Qp^1lZ2q~_t)iE_o4+B5Oc@!yp- zLwnB-(YhGSp~Ne*dEK?Yyx~VnB)8X?1}A(xSM?$bVU+##ExVpsRS4 z=V~;(Su)EncZfB}cK-6*FR{q8H+GG|^3m&u8pwD7# zR45g_#-|X~yszIQK<|dR3yL(!hp5>Jrn%_6ab?)1be*dciblHK7ny-^)ViYYeP=r?=FN}(1?tU3DS4?%zY%gVp-b)59d*mSij*Jo~6LVzwN zsloA?SlY3uM4PU(_9ea!O=Xh)(y~-Q{FuVamlhTV*{o@ky3G4=&&)bik{ zpW3MbAy(TbW%rOUfU4L2L0x`(Pnr|$I&IZ3s6R$8&e7q>Oj*6!P2n@X9{ZqWN;{%Gq>`Pd`FQ}jGnR`L z;u0H_<$4Fx2AfM?;`MsTe<9zOqW;XNrpR39LkC_`09qy-yU|1Jiv;E`*py;ZZmu*%^nA9v%p`)yut?Z*v{dPJwh7yPKmma!;)v~cd^ zk^JFQ=B1ch*=58y16P3sjq}dhGE$9}=z;5$nV-C7R;9A9{Gv@LShegAbyx{UdzZ8B zU0kpCelJ}4_Xbr1!^Y=~SD_fXbzZZ#ZwQHl#7uviob2QGwYN4ZNl1aqp_YH#)xQ5h z>VuX7cSpG1`J6tokvHn+Eg#WWv{TJrm5JA2Ky|T4V4X=D{!&c83c$4yhWgPo+WO+P&$F2`N$P7?qY% zv8qgn9HQIB0-I~m%pQjeOwxSs6_r1?_4y>4286TX{KS(3Lu|Y6WBtj@DqrlyaXwY1 zBEjJCWajvaj8vuG1y&a;GHUVv@JuV2^GGWz;#!`pipZk+c5VW+sr_?@4$Tjr^0*_U zEngP-oUALBGZw;V)uiDOyl>`OQS2pyzq1bRBz{yD%4B9wi|erKR&H-6;Pcnp83R?%z6_rnH@9vXWu4vUi5)h= z7^MmVNy%aZ^1Cst0cdjSZgCN6J#F(L@y1f|x;L&T8L8iM6v54_$zRfxI=yWkI!uCO z731Xo!vm!6cz`aPwJAeD8j}n<0UBa5FD$sxKl^Hlt*A^g0nqpNmsr|R9P56zZ(a9hX!eQnhF$c*>iSdX(!SRgr>ac>>P&uu+dk?qK$1|{CPpf zSvor4_%VWARlva$ijz9)prSX2GuK)FK1^^ZzU>WRJ9kCVo)7bZV}EA^y6c`SoLT;6 z6vle9&1wUes(`5`<=LSCR2#dkRnnLd`&A1ml3!65)D>Xh_ExAi-{OSkvo|GC2AEDK zTk#9F7iRSAr!u5&x+e;$!;a*bokn7)-Z`RHX12b2cS)txY!92Cbgy|R_82e0eMDVl zY0diym1IQ4GIMuqCrpZG)?SACgYp^ikN zM_YURp|kDQCvG;bFF&Zb4qEexYj?+RhvxHPPJV42x!Dt@`*A8X`E=0x>wPNH#x{hI zyTRN~(KIP?7a(!p>;yUj3(5?U5|iT5xK|1G<2$kcnprTIjk?D%b-CM^bsX{_yz?p5 z642qS3+0ID!1-nYONC(JZL(jK0RQ_xwC#U%462Jfy-AFh1W6!d;=r<@)iqyuXy_9~ zw*S1j$}I}0A4?B%zF-yUjnCX<^ z>C=gjFU}q0c~^1M@0=w+W+y%?>xs~6!<=Ws0Bu;-r>bE`T>oMU>c>IiS31pC zh$`fjHKc@{qD<6Y>W^-XP7fMJ7JFyK855Z343lI5J)d(1DLN z%JX1{q;EIYKQ~dce{u0U7h17~+Vii&x06(HcJqDp)qz8proD0(4KG|F{;i=xb>q)> zt|NtF5ROgCj8my=%U=dR`eRyoVlmUr zm^s?&>4mdXrB_9kt7Eq*YY?lh(&>_rr7Pa`Gq+qCVqK;d%!W!pH6u`|? zGdOBk0{jR(h9$;S5fmrbmDs7ls&}Ywh`s{PCp;(8(Q)_E{|CCt5#B*pnMiO8r>$%T z64}pt_*r7tPw3}@QczVydwBMAIl);i8LWzDJ*G_JX$&L6diXO&@kSV>rAapN3-O7W zW15B%5BLq<$Y(Xesn1UCH^P)Ng?|0lQ*)_$87{a>hS3U6xry6EDLzygv}h2-tZ|#@ z01nO043GML5#T_@tZeEreXH6)Tm#q?1}t~Uv)#|AR$W_Cf2(@egAkVp(!Kjx&Rf5d zZ!Q?DguCu*$ecFgbnqel@x0w~oQh+Q$0eQ_z4Y)}UB7G|3CuidonBhnStw4dAGU7D z1|uKe`@Gv*jpHs54{{|RGUwW#vJ$9H1)oVt|Ht9LZuMx8+Se>tSj1UMr-x@X*4A~Y zY$j&Gj_8pmKXg}_B_gm{@Xk{Lv$Ep)+>HZQN}=YRFo_Lao-5fBTn&ofCW9LATTzAh zN3WkP;4$|gl4xh+!Hx%xmNzn~5UZ-uY};xmtHq>VUFiCD56 zUz-hG; z==otL*l(ad?_p1^ly5oN28r3zDILAZkIc_O?mQfWd0;E2W?AZ(T( z_UNHZ#+N=~0vF{5*D?i3k)?-bI86Fya1WELU(%QwYC3MZ{ZUcU54@=h8j0aSugc3` zvDqn&IC@OSjToqIsA>L3B!tZ!lJxswP>C4|N&Ll=0z7dvB8pl+4w4cidC@ejQb!f) z70dgZGCMJk)f<#-`F&i`SQhbVSrlH~m=?4w{wm6rQ<2D)7Uj&b@tHTKN7R?j7@v)F z!LTt-O8b7GNOjhGd_8`mxIeWn{cR9){`a?(Z_DyYo8n)S=Vs4pbv{=It&*Jj?@FVinurv8V(RQ8c}(~tWW)|G`fZ-0R?p7nWUp;4s`Ohda|-`UR^z| zudLZG=Lh4w~GWERh6UBk#pM|N$aBceZ9J}ymeB8a=+b7ER+ z%Gil2AdzZb^U!D_*J!g78`jKlcVBn3zhS;C4!2`rjNd- z;7qDNNhZ@?i^X%snRU_i;BjURP`BLuWg{C~ra2;1I!noTvCx#kg>em4=oBxiq{_G6 zd@#2ZoRVr!@0Ux4oV&E0gHY)xns-_XNnZ4GEx?_bT1U7&=@#*Xr_lJqw1nTmaFbNG zU5Q$`9OkLc#kQ@*gnuk`A({?B082`G9iq-ebE22+v^Czw<$-rpBA((P<>&C%-d8BN9VKbh%j4GQ@j3LypR1aMY~HFfcr)gUPY0F4 zFMYNjQ91_A{rCcwp06Uh*&;I~c&#rc)H zW1cJ61l7Ah@k`Z|g2;i!Q!z9>IDvP|wY-I6_v@d{>?0XSWC;GFu?;>$ilten#KD7D zyhvtt5g;;!f53z8(SaOslRY)waP_zFG|F@AG+B(#1vj}4My9PQUjn&wM@?nei^sz` z!8I5=A#?dv;23+m1h(MDD{(AT*Dgr5q);JUoBpUazYcy?Yp8<^Nrgv=KJFPe!j7Re z{8v7|(oP{zl}gdBg+=e~Kyhkw{p!}z<*lIoZh3v%uzS1+zWB>ijPjq{Zh>VB;vDMm z8EtU_L!}%0BC<2q(sVXHiSVsvMkH|Nm>ajXc~|b6%ctogg=WQzmS}}IEWLrx%m0yG znz3Eu zgx@-UJrS~IEU-QGA`D)i)S#E=_G&82P*^DbU*LOyO72#%BY4HzxKndlf}UDq#~aO* zB?FJ3D=NNfbRsa&?NRdSFX+CVpt=4wpc1H(UT)xPoy&5o82RG+Gh!hweoAxJuIfT? z|B9RlxF9?anSOmc!f^jwurArBfPSq4mLAjIR~10H?O`A>@Cd08H4)fKMoH4YtiAJ& z6m$C8T>VI_@aij@q0l+Hr0g%I#cc{sN#?od#d8nNZ{FG&u-&9|X$0sb6Jki@whh|r ziy1mT;V#h$3LhV;#aHETRp~eT0Hzh527Hn-Zu0*QV*C$z+bkVA{)fEj-;p;XICHJu zz+%Y`u0{9-1%4@aB-3*Jz0g%}U;aQ%Es_%>yExU21R?`PFrTVUDTW7g)SmLiYs@!^ zt2-8a#tW6fqklco+E2NnFWfBj0d%T`Kc#e-BOGH6l?}`v|LW2Q9~bO9O+;zgno9@K zH&nFP-A7ikX>YYV-DP)Z_V4oVMkQ8`r(erHP20kdP3#XLoJPt0*(=`QBoohPsouc_ zdioBXx2{q(Tb#pDkL0!Ab0c-6SV?!t@w4KklH_8V>k>R$V?#G!gJx4k{K14l^TYF( z#G(jK^|Lv^(_7mPq^ZI=<#I;CCZ(T)7(8Sj)MdttC;BaXMsfz+`Bvil@|OY)EN8r-+L+y4?~=?vnxwimd1p zR$oAc4P;|mFAb-w#wnUS29!Ff6aEbk^q}TO5J({M8YW&DY*juk)4`zdJPE5JF}k16 zb5d_T=Iyr9%;$O3I4m$LVJLX^$`^c6*&(q`V1^PWX*E=7MIH|9yEgK~W$XNznjBPqP20&-V7NnnWvvT41PH~YKv^k_A za_{sbQ|!CA=yzUS!tuUPu8~y!cB&_5^$6y*R#?7wNN$e0?jEDk6=kVs)jv9zZu-j& zQtAESc5D3h?j0Y&Nxu2)oaBEms`c0R7u5>w^uKDeH9Gff(`m?-##b)hBJX}cFa z_qWjCuqKc645v;frcB*c%3HjSn_p&DCdBkh+BXyM(BsjEv&aTj(-N!g)3BdbhMV9` zG(&3H0&#hWd(T=St44Q5?i%EJgS?O50FY|yhHx0VkY+|h;*N!mkQ9N{p_XPOM)(W0 zsZCBO9{wqsvYz@)`m2L~qRK83;-6t^vA=fF`!026lFcjQ$*I~=Z0zSe5`@_e9YB`t z)2{U2O?25c52JG2Z}lO$rNsxnqzcM=?HW%ry6dkdU&KGX4q2nuE)RXQbV{dUOL!t| zzX{`>=5ZqG+MiU1$IN{Gqw1}1>Hc_$C&$Ms{^^j96$3Uhn7MZqBj+WOEJ0iajO5_R zw@2@5y>k6xPbns(LLURUoK$X>QAt*pA8@l4nU(smh?%Y^WCc%IJnT-cdwMpiqar1V}s-QO3;Ky6lk< z`i35_kWhfD^_$hkW7%zyjHQQsjdiKms%+kljNmECU#KwnVEAx1anNudb1bVs-eSYv zVyuDuCv)Dx{79JWRHK4Phksro^T;X2li{#Ny;P@BhEgPJVA;aZix&xWOsG6_d2*pz ztA5*8ued_RZ9T*Yk5DT<*nHK_euL-tOC$%ARI&T(lD zaxm1h1^ZYyK^oc(EMSCh4otbvSbN6|$M(|P6c#7a>R?sI(ybzA(f3pHO__Vtg0>R6z*Er#;wZ_9LK1)*b`Eh(oZx~zdP^A8O})zt2j6;L+k zInKJ?q+Lf!)}_7q!+3;kBZJueJ;eAv)VId48&s=<34GOc#yC3@YeFKdn5g43j;ed9 zUAj~(m_M!bVY4zDLRZLbL)kcHqxG*-_Z!J>JIVsLL9eMy<&z!^4Ru+In&?ZfYY1}8YE>s+ zYNM97C5n&!z})&Cgpz<$in%jgSnbJ#>jY2UlC7tO?;0M{KJ@S9#7@uB#sjsQwy8!S zK4rf)Gk+7bccpz|ac|HF^Fg)neto`>@7JdRTQR+s=VZ3xb=r8v;eJFQde@DN+~0BX zN~#N;8ELPuy3Z1+xq3Yk*o>S%Mg~7Q(uIb~33ZvqVt5T{s9#jUNwD?m;mszI;G$Gs zW={hQGvJ+rF2ZG~EU9oXjF>6%o+_`Gndcfm96cLq#)`VL6sKMpW~Oopnl2x8I);Cj z)bQRldR%H5GZn8)(E7>d?KQ*qWyWbN4qUSJS~UB|_>TPda*0P4aZ=8LBYnaH#! z8N;~9ds4=hPx2cT^8FA$bkQ$pp;PCD`azii@Lq&>Wh`em*(G1yKK)LCdPtMx;zQ>; zuARj%E7H8D1hb!k*9w;78H!Gy*SQa%6EP5f634)UcAI;z<^EM|Y6`vM@%eCg=i`m? z&wZ1kq*<223LAbT3mu{ts*>(Wqa_<iCRccjCi%r+3}tAE^aub z9nl`v|0~lrDebIys9{u<_QlQ0)C{^ID7$ap1?RjmYZCNoDAod3^nFfzKBI6(=fwxx^J()xpGs}x?(%#77ZE*mX3EYAo6+eHu#D)#DA{3+d87BN_u2kUr|)ML#T7fzJeoMYm&6xT$dE%lcT1cF zWz^5-3?I~8A-TWaeZv&D%UtBWSGA^GRMMuNUSSTtqI1b5`_%uEWnTa4`>@5*bS8@n z(5w$0A zVO?c|0{=Bw#*?0Mm^ z!+;KmDaqZ6yK%37B>vadz$!2tuWm9*$qT*+m_|7VnX$0ZIFO=^FYp^b!cH{owa;%F&XK?53{%7%_3r zQ;u+b+k8#kEe7*jlY0Zi_oMa2lbUc=E);fhfP9}cSgxSeKnO88(k6g)ANrz;=?=a$ zDMK!>m@zS5dSYR}{BliHvBv!whRFkrRSRy;a{y(6G3qT$7T{yvn=7%P`FG5`|Be|I ze9S|kg<6z^kl<4Jf;kqNp1n^=3s*+T%AYl+rTA#~MJ#u~`=9{U<<#vl-DBeO(GH0>t)GI|~`V#n`P2KTk#Wy@W3_ItrNDk6Z?; zgU#7&+K0L`e&P$7)9Xo{iM{5#mCDU;{1TA?lb+wRjkE(?#?vvub?ABG!7Y~DbQM6( zWx8P#ou7-%9d8J)PdE8Leof8~4sT{D>?WFQJI|*bmsK5kYIdu%>WR$aDh2Y7!H~0o zi&@luD5d-Z#a~IM-V?u2Jm|oP&b#m3l){_$6$`kX7k{uV&oVd#8e>Z{pzHF;#>K(l znH(^_#$lR!4-XJzLz6svr9lBSG-$XQl*6%9Zg8~_F{b(a3PYz-*j@ERv$dRvn@|>*Nh2{wlxJoggIBpTDFDo#Hv5W)eegvP zL+HvG9qjOp?!jAX+oqxg^a}R z4O>E5=@?D+15=N0++M5*iC_kzf-PIKdlA`^V^ix_742K)E!uC@>c-)fuYuqe6ZLfP zheJZedYJgud2!pWO)U-J#U94^<%h|(XQpB$m^IQ97v!O~v5iTzLWy8iq~EpF{YOm0 zL*TtL(^4OenC%f-S0EvwUOS`5+@S^v5>i`!^!~wozgKp11WD9k|HD;~5fS4hzck%; zAl|rJh;A;S*o&M$*)qj*KVQHD2_MD`HDYfSG-oXlRM@MWby~rf#xn|M6baXm zrN=ej*REUg^*7sJ(MkokyEnhK@D=YIsmWw&Un}IrHO3K0M{4Zm#>T;jkTNeqa==`; zQqKP@spj31g1Et!)N%8@y-)uw$+pD6vq(8{^sP9v?>Jzl3k1q*!{hy^aP*{Gkg3cEbH`yGt zzr1}RsAt&^Ng2p2l~4G5em6*hq`=b8)MZVZ*@Q8p@djauTVo&f-pWz|9!*`mRahVS zdXFGzkxN0&jbWF=W0=cRQOm1U1m)XT?F_&xlupIJkxUV93`|d$108zf>S&wz;!?VR zuLwd6>y6@jL<4>Xkl$?-9=y{wWlFwIVw+tIEUa!?(H0;5J4ZTX^7ZLbckh3XY^R5_ z()99c*_e94f9*yDJJ;2Eaq!3Itl>zo2{A^2*PSwu=tc4Fe8Kxb^8T%9;BS2e=3(cC zbr;~+$8$J)71qf5WO!{O#S_b zYJo$oCdQmuf`5Ez@Q(-ZQCIeqPp--~zmiIgCNn1CA$2iX2xiQZFOfPNJjw=3_z0Ip z^2C#eTn_R29dn}UdPfLj%W+twT+?cFBcRAwNXiJ{;`&NCwY`6A{P|pq zSOR@ycm>7VJj4<0l=V?1+UXdHMNoO+@j zz01^d-pNQ|_(R@{HaU7;105(MyiB=$SpEYJk+#zd;b`O2;Xrrb}PBkFa!g@zS4MzWGpw%AayF zitReM0X2F=2t0WaM$T~8vdGpQmIh_-{299Z3>IH*qy9Fuz`?Q&L-FFOw?+UQ0=Gwe zl@4@nvlY@1MJ;JI_1O*kK6}4mR8jf*+Q~o4UgfU05KmCWYCOfM%-J5q*Lek?x=N}h zhE6)C6J!9xDGS^0)ZhhPn7y+FmdF2|`*?Exb53qbvc`lE?$W{j;K-fJ(yk34Rc}P zXy&3hW(UGV?(NRZ#GJD~jSoma%7|yx9QLc;!}TX@Ao>0Nq4K~!Vm_@Wo#veah@4q~ z5)+_VYMUR2t7OT^@eFe}OkgZcFO;zd2Ken5Rhw%3&z06IQ>zo+3}jW@iK7bLF{=)PDX;mBrSpr1n?q#w7~b$ZrKan4FCD} z|7iiddskC1;Dk(h$0B3f(jDmU&PNzNE$-c60+Pf&f!(@&VX)oSc%g=uoKVyL&0$`|dSAkPZuL%YI z3MSJ5p6int;O=u39_kL4k9eP8jUF(B8@vppfGM0&u{_uN_@EIp!Rv9bRQfnLex-V_ z;hPzQ);snU_7`dm7+0DTnh-^|DAq>fR|QKsDoSgz9D`YPtI*JDT*R#!mb==qt_Z#0Ca*&I-zf`k;uKDB)2PZ0p*peN z4!#<7JJf9}Xp9YW8P++WH z6I;X_a5P~3R6Vq6%VMc=XUHE3D3a{d@S_Z6sO%s%yH@i)9k)u`(mg9I+InI&WoX5I zSzcSCE<60|W`322Dq#?467$;ye@m(nNp_hQ{eENYhvIYe->?=G=i&_J+1sA;KkcEO zoXt51FAB;iUHW?wKl3Etzbc=jl+9uqmi)`Zom@3=qlPt-rW$3PxH;f*ieaZY4^hxu zjSw~gOU^g!G|Z^$098vqYWr^idMCDR^#Z`E*M3Ak)rHkC=_ngCn_}ZulIS>3)%Uxk zYXVh&4hw@m0samYByikN`ErS6RCv1|>R=0F1^@z4yZE;{`Em0~L(0U@Ma?&T{DDAe zX=NLchl+SHL;P5^l%Eg0>f0}k2W#{qoifWm9JwKfK#e zxacMb_Nr+6zNJ(YKaB9(1QSFJuDwYQ^Z7mdnh(2K*9W3~Pbupx9D1QAw^Ez&QC(@R zn_K$HwvlPG!~-y&`K`Q@=I<{H1_TmReJ&vk;1*%6H~8p)Q+`1U0I*<7l1vI!Zzwyy z)bs#zHd&N%#%ZY6=|nhsknBrm-Ir6Ujp;DXRL6?dSdp zDvUG0J2b1;t4E4Ix@7q1{uu~6+*DugbEsZnPdNYaZV?)nbw-W6{}#klcYMZ|ZvzgS z-e}0TYm6b5_mQ0hfPLBHdxT=r8$=|Dy&hqv%?c^wznmI4OTV4PfMBxpbg5bFHbQvb zSRS))mfL%n{ZNX(9WOZFejGnimF9#zz$iD)O%xv&<}RfcCl3d2iYukgBy2w*#RC?J zQjjOzXEkX6o?cc+1Gc!Vq!?817}@t3&))F+k<^>uby-dfus0Gsd-Gty8|%=lOqkL9 zA%&ftqejou(b3HQvXuws?^uFR(v;@ zRm-g;okCjgVN#_WGE16-8@Otj4+l+sa{q zB(7|nuV(_=-xpEeks-RhAFPt&>MI9>1VIMkIk#`G{f2pJ&9IFxfI3GvYqf>bB+*H7 zLrXe|j0rUlE`5>#Kn+tP%O`kRA&c+L**Bx_m=lsSmBPnVd3;PYQ51jhZ%n=Hh->;M zrrPCnRkSQpg@0k^T0heZc#t}jrxCvDo?!acv~=&7eXoGMG$%q?%v>|7u@ifl2u-`aI%9H8s!+&67|6Mqh~TVb$HIhcq4^=Q*8;Z_WhfF zMnX6)qJrI~9=q%}3cD}xGbeZ{ANg@ufOnizqw9Sr@1R*`cbYGrN`hKhEb>H?T{cIV zTb~_d^5%7C?dN+Nl}KO-c?IEQzBF0tRL52tVgExd6sKrvf!a*0vfRHMQemPof%o`?e)(;k}u?-}|7ZD!|y`K!HT3 z+nG-Gw9v!RS)7|3UC*S~-*s9-g&&Q;d%$P{l^TM+^u}#E3AeIU55Re1P{AhAw?N0@ zt*}p(fth0;9MWz|Wj3zIzM$7&&!!KqW%BVvlX zVO!D8y_xJ=_zP(~X^sc(H2af2-vE@#0Mk$S=c0)HZ$YZ0Uzf7UX@^;@uKrK2x7VD8 z4=$0SubaSjAZy*pVgUp+18H-7aoiIu9rq z)#BbzBwI{79aAP%yv%?Enl#NMnmFD{8rU5!RPUlpye)bMH*aK6RBW3% zZnR!!&FI>=^@j{P^MI|pgKFn;W(MO^b4HcO$U{0{g^u*Vh0`n9N0%h*F+Tq0-VgY% zce7iU`XTDCL-pcUPRFV`VNYyAe_B(-1rgJl%5WI^-G|>|0gxEzotELkT#u2SRjt;w z>!XceKbBIYJdiIabgceKOx9(zOXV9PVnn`5KEkhPUk5;`W*b>ve3p#Bd+oA~%zul* zFk|0ICPIuk`2M&scd7knPuB>kIHB?wg`P+~H)&``N{9y`J**ik#I~6oA;%ue!rAm=`Y)q(xv~%<-UMz(`=-(WH`k4|rWDD*24-7M~3rl#EDw3$HES z#Y2pgM`7GQr}vB$ALXxA;G?`4sT-K!ZaM0+q@TX4(xL60AkSpUk>&RIH=C*R^E^h= z-)=9RxiSc3dj=OFb}co{ES~JJ1;3YJB>$DbiIBTo21CbO!&HR1Jg^K{6yD0GM-iqov_1*MGdNB7W?*`TX zODV*VYWm^1ve&hMHpe;u_7-@DBYujkR+{*PEUoBp-Y?$`IuP%+0AimjgkmTfc)k)U zyzN3r{%hNfILa2)-W!zWZU>!Z#EBY77-6WlMKH>rCC$Pg3ZcF2{^Y4zxd^MmiDh1U zybwDudu6|VD7Vw!=5_zgjq24(MPRjQ(4~vILJ@g z$}iW9FJ0-o@QwN90;%jxg>9{_K{qD+g@0G(PHsUqAa< zw@Xpt>G>4^aMZ56DN_2Es8(KD4r@b<8D4i{0`fW0a6sL%{l{7)|8Ld;WO|-dy}G5o;4jO2 zN6CH3Lo}{R4cZ7kNpCRFIlWj6S6x;)y?JQA9X!l`;NzA(5pV=#iFo1lDe5u* zZvGlRL%9LMQ#>J46~Gt6@H7z5tjcmpZw0L21-PE6r2k(R@F|T;QZyI5q((<7e_{O= zA(U&k)y%I-`RJ0r-M>snqM!U%n;!x+61)QVBA~p9LCO55Mni*M9^j+EvJKGB`5)#R z@d@1=U#$GTDqVBFBJjoIfq&*q5kTqbLLxzgpHcU`KYe@KmJa4Iq!_~kj? z|K-}z6lw#!THeKJ>61z>@GoChTt)_^62>x*J9zV!BM1KXeSg9UBsJhlF$~rJ()7^F zE9zhLK=)^Qs4hVMUM>M834jTORgPOeeAOm!xkiyHVgP_$)t(@L>nyd?PcxJz#1LstQGKviT4G-Fo#uh-x6J&1qx%Gml6LTY>&35 zBY({Y9S}{;KXI_KPlMnMnbuP>I;fQeesQnpzWE^m$oguj0lz%K`vhn_jsyaEKDve| z5lALKRsXZ+unGz*nxL0Xmy0Vd{C{(HarO!I1=ybPzf2oid7!xr6w|znr76B1*m-&I zpT~wo0#?ya5^ye`wFsjzNU+|yAvp51A9=NOBMdMTuXXLwrQh!^!^W@YcuQs%z$+7o z&9}D#K=%WjPXhNaO-j1FfU{-2m#8;u(Vfsm#D>>BFeJ;*}KE0x+OeO+8NM-jk!L|>|W*e90(Q}#x&$I%iXTE*|-5Rkl^ zh~|3Irxp2Xek4{5YCrstzeRf$$%BDfcwk#UMd6XhygiJowmefUMv2JB2srV3K<&&I zJ6+y_^XKSmBAixCx*X;~!m$km(IcvaL9KE){^mTt?Xe%7;bm;GB4%47yC+J!z?O`^ zcQ79+)eV*bTeVB2g02K?#ANSnTVLT>cMTOBZC*Sl+cWXqD?W2beETa6L9w@Du=X&F z3&%4RWsV0u;K=al@KE>zqpZ0a0dbP&Uz>m<2GshM>fB|Cs5GwZc_RrY^B1q^IMw&r zg#WpTQ{tn6+dfcSV}5>6lL9zJl_n4&fV+?ZFVa7lLC-nK)4Nt<1#%~vM@zD4z<{P; zlfhi=Krz^Po53kGlnd8E@iqVQ0_G*pKfMCLw=28xiQjnsRT+<1!2Ga=>TZ)S@?Uai z4~FR^^CtNo4(kHNN&B8ab4w$Ux5M`R%)=sy`zqlA4?Xpi@rvuu%R4Mnvt}aZlaC?( z*6r2W=9Ns1`AAZrDU`js7-M3UV83eaMmy2Qhxce=rV4xo8$MWS<4xo*AQ5!f^KE4U zkRX9q09tK3=0Fp)z|>M(FhS>Uhjzr?%R@a>HCO6{G3%Uuc(@pY1W5pQqkjiAjw*)? zqBuWVW~vuk+f9hSxoL>AUEz2g*UdL~sOw9RHSZB6--}svr@wDB(rJ{hO=6yWoy zV$&|a#N&kV!BrAz(t#QCfz+$QbG-D4`igmB)0=+_92UJd84i-112lH;n%CCCZqU-_ zau;CTxSQ?S5S^(Cl_}mFNVh9eF`Ylm`Td!{RM_1~nY})p9tt1^BY~{>zg~MQ9Qgdi z1CZ~2`^shFH6RN*Lz|#8O&lWSmv;%dH7v&nOSjFCq^}(y525)x`vgvzQ0;NDNR1T{ zxU=4@_S|eB;qijuUDh>D$k7J_-C(7Hrza9&eKE0xNpP}zZfj&D4OwSa8@zWH_DT!W zM%@Gs%xi|#7KCpAccOoj7$ou2iXL5X5{}0x%MUy(dc)xVD5%*#P!zQLd3I*j>?NGF zc`;+b2O@X1&w|mbbTjPt-ectJ1yvw&XW;1SnR!h;00n;ak$3`N?y+ln!`(#I{JVGXp1>JPn31Qzt4Kwz|Bk*(2 z1|~ay6!}DAL$6ZZd#Gevh-J7?He-KsmpBRrM&Komzs?Upi z=W6bg>zdUhJqH~4(WL{s=r|(RLYOd)-Vl(FYU^*a98H1Kltt+h?~rVxk&*T48D?Ic z+d?_}bq9UA&*CfoSfbmi*y`%%qQx8^j9p(>%=tJ{EkD*_HTd7P%)(N_9)pHJV-29bx8H*C&rew=GROv#!RNJH zI~)5xTAlhqHCo=rqU8!IFfdhi(dIXQ93f|k^RQ=jTU9yiEnzlT!TrHq?wqrkEUK@J znonYDle!Lbc|UGRMKa~rJEtv}GaiVp^VUvV%)&kV?nok^luoJ{EOZ{|u@+A=3f`Rt<33EE! zB1ZmV)+uHI3yd)3d)z7ZjVg8bs;^T!#|LdkwXKdy2kv)U<}FX(Pc+6row3B=^Cqcp zYL5wMg8rVBzLblWXDu*ynkluuSCygOZR+hC>?e4wd@03KMw zTl`#2XMDx^uA5?_2tGk!Vt@CqF8s9*Z}V3Hc$Qi4e6iCKND*W3EBp&>`fWpA z5>>;XNAH_<$XHvQu*i)hOYyW!d+#p2MJ#`Gp;FqI8<7B3V3ys z*-V|)G-A;96dN>g6N0mT@4CVnubExMOhV!n12h#pp2j_CSqE<1D)y&x2V8rwK;cYlm&6k6qtQ7^Qmb@k=>>`fr@h1A8bZIoE03M)qt!b0r2$ds0R<3+N&~qJJgtapK7g6X{S}L?m@}%s_Bl6ik&4Q9glwj>LH?cm zHjwi|1wr=)w_A08nrDMa!6z`=xGx{s)s)V2Q0tzTMTe^o<84{r>&5n~3&NJdAc)kC z+zxJgq(@LIiS#biGnTuQei+E;4=OOoQk;y$`{aiQYzI#h{!{ckK&Nafr5Q2p@kWyl zT3r*g0FXeh@1&1_3a7>222&qRiffrQm~O*{!+=Isp88n!E!Nz#o4psm5CzVF;$56-(jeJtb~*il`3Vi>J}HW7=l;$TmC!qvm$&eh-uCxte*T+@y?%iwP*+rdJC+@x zC&Bof>R((?6FH^gJj+Og{Ra9Zm70>mk23yk^Ih@GQ9E+@{lk*Jl*gu@S-jya!}?)S z2(7gb#Mw*iw;4opUiOU=L{a>AnJHCf%|6E*Fu@8dE+KE7#p_H1wHFm_(P!{#WC`)> z^+aln)g|76we8JFGNPZ}C9eD2-)E}{{5kcCkR=hj8|6^yioc`g!RtSplb!cVc3p*{ zjYg`8zp_{a3$5mpGxvk*iIZK5CYrf!J6C+@OhBc}^kU%_+OG=D4dD zoM{}#3d%e1>Kts#^~4GZIJJM_KXG)zstB{~dJb4fw+wLhX>d}-v-Sr)WHDhpzEozO zO8;`E^94pmYW8J2h#w306fP33zYRLQ*OG(zQVsQu4ZC7JQAo)`kxO zDxsRTFZZ1O+CKtL03kmAAua^cz_#5SP5?66jPZv+4Gccg1KzC{T~)?_O6WWdUj9*~ z`|}G$4aY30{(z^#QIFr=v+uL^yFm`WO@0`HXYF<7UO*1F@1d`?Iff^ z>NvN@H*jKN=y4`7!-iGWf7I}-nt>_8)0VaduGPE zKXNqhJKXfrW6#?Znni=>^pfKl$s}YeEt@oWSpQcN#NW(a>P}Cf06D)HlBWlBj8B{Y zoUA8~7(;p={YtkEW;TG0OGvJG`X z0&m{_J!HQ$`oNdfztom>Ozl;c@t>sUpC3E)RQ0yPEMwFpgsxNg4in9q&A;ymi0v6b zU_wqdhI`_|j?mp{f75?r5*Z!G0v4q_o-Bp5g{Bo`A}gRf_sa`0Y~y@}ya`}W_0_Zboi*YOPyEQs*ONs3DSzg zPwPFCCD6Dr{udDF`7(rx8vOo_3bhM*+5qht37gtP-BfF%N}O!+x4a$%uj(6zS%C{C z*tKT16yY=L5tw++u%DcRTXe8N z-&~P0hHrw^y824zUle(PPq&G;0Tk)S=1SpFMCPBUH)rF?BqP`DasQqJ*Nth)TNkaR z(p7!-yVMvxpgp!^qPFXUybmz%5d z*U08@q-RC?cIq}%)s0D1wDEj!9mB=K#FXk_ynB8&GR*{1^%|x6f`t1i4vw$p98;jR zL7TQU&_|W4*thN?5l_5Yy+_Y?^Fv2mXQu{a9Y7ne(d`yuApQY!Oo!O~`nKTkq2C*iV<<(QT>a_!-BccBiELr@Dw+#lrh9yn zgnaj#nd~nP8Fd=$V|`hjP>$|>My?_sgBLf5=S#F+v5c`^Q@$hfmxbV7bvx{q^)5gcqPmlcFm}ygeR6(G4w$RlAySJvR%9 zhS3C*)QTA$=;nq^EyObW4kKss2=a7#`iIeeJ;Wsn#y;qZ&KcFo1|?;KG2&Y>q#~cO zb5qgL!aRO|t{V&nx7LXCZk(A~6nR@Fvd2)^<=)tR(u7EuJkgf$Zf^4L4zkquK)B$X zvF;D)hs0x{yo771D%ycp2#B72^|?xba&aa=t-yVrl3peJeCK}3y02l`Rrs{@@E+$( zp8kzKJ;uX{w{5#WK*c^SCn(23&kg1|@7_5k=cFMQch}G3%g~LT(tNQ9@h3vMXLqTu z`kWUCdY`v=46`wI4L>E=##+=KdY{ux*Lyck!Dyn&@(ekj_yyDsNml5cAZ@IZ8zmn+ zRWFX3gJZyuW^vTAfn@)(K`#j9s5ynmjTZ4UzR=*Y@|z)QSx!6oI!MwAMJ(LwaLt>^ z{>_IXlEjf*8U5g5dUiw)i6R;#VK-2Uwwh5fPjB<>i8E+g&KlkQAY6Miz2H=py5n6l zc)fENCD81@|C;Ota-E|$t=@S;e<6l}f@Z-$)y6MejCbFYQc`ed1iOP7Ncrepc&k2F z#ck)rau$PNO)8~>WE_S2S67PJtlCmq%7rvaDsK!VRnIC6rj?pPGeBlTYmXNE0#!ZV z-TOH+0UYkddamS$CYNKjVFSuKyQFl#8_|@HN3-n*hb_HoAu@x2%A<@O<%Odl4;(u6M@$eiy?^K4+Y5 zldAvF-d+!nMsvQ+Ptibk?rg7W$b0rm9M=dqeT-??Wn6bg%upZ9Y*KzlujZ=W=0l6; z#0%XyCOR(pyrw;GS(H9XazS^eb;RRX#zt={L1fd>-<#z;q!2TVw-?+XVt znk^1Q;i0#R_+earLa^#Fvp#jeg?V-)=|aQ&yYzz+ido^XleA8yCE_jo|G<{dE z@z8}>8X7hfY(TizivrdhBn}6ffMzU$;sEF5VAxin% z&uU0#&qg>ms)Ywc=({j02`e-cb(!i3 z9Pt5LCl+t+6!5yov@k7&+@?NSsKs1gaQCzB;9aoXmPXKW*3vrLbYZbyt|Q0?IB4O2 zONLqh93w4gss;^dp393D)H3Lp`dpWCUb&zD@b+(iJ9j-A7- zIN__Nv!K^eKZ!(j)>Cf{L}<+xZhMq6iImEOy=r^@O#|5Xbr;bdyj7Rggf?Ys`)xrp zi#|%mbn^)Z1@FpsIVu0Pntwwj>cP7cuH%?zOmpsqeB${@jbUopR|4uuf@BHrzq}G4 zM*56P&4P(*B>p@mknYpVmjEUz}pSTnv!lQ zR?8@kV#efpHeVl8Nz}KqL5z~$IG$Go?pe-O&$FT)3>Gi6iqWOGAj%ps8|wh4V96dj z{jKM^TwbeK&=3;g?^1sQ<&c)_yK4bfv{n;gJ!=8&9=$acpQEp^ z+7g2{kYJ`-A$^I={eaqGhn$nJXlyOc>t&3;YjSbM=$xLLL;R4aK);N0n%q_M15;OcJ|yGY@)TrGSuS*BjCFeBiQ$B)_{k{mFI*YdQf+U^tJ^|~ky@jHSY zc~+*s`! z1Sqk3oDees5#d7*qeB8p5%cn?2HD)(2V04D&8wdUmPslO&-sfM=7M5JuOUo*a3lUW z^jegyOMCK#1fg`rc(3mSa|oWuKuzn&TYTx^sVz&nc3w zv91v+hJHqJhJIt;?u~7PKH(-YpL!kxEA)y`bb_JeWo*+<=6TKz7OGk|MjN)0{ef0z z&~W4*VX8Z=G5qwEw94LYmi_T94zv`O4x*M*)H7ML+R=4$VwH6M5LY=@2XO3Lr|p#x z5yehU%G~`U*%IlI!4GSS@)Lp^KHr|c$*p?GpJz|5s=GSzx>l|1TzqHcn~9BRv7awe z?F?IAMe9Q4R{ETY1nN-`loUnj1bd9^y*V#s@Ns4(WlR01@hu$zXZ^027h50Of0;OU z&r0Af#MBJ4vLQ6<(EwX~_!KbQLmLG{@JD;=mRkhZ-+pQnLtb+}&r2BH<@P0A8qc>4 z*5B=dt6p1k4$h(^9YD|i8FXb8Okh&;o@1j}vajqk@-1KfMng&OUp*vWDVew^*}N;@ zE@3i>fkn9$5$pRwgK)L!AE$}m@N3hU4|F^PJ<*wU{pp#yB5krerJu1`Mutol;Mc8> z7%uQzSAsXb6R^{xEnlFOGY~(p)@d;+8eao8CAm^BbF&(UtYd0QmX>3SKWJo8anec{ zdvW~wP;zQGc~+vnAHll-WhwO$wb-Q|XdOX{;aZh4r z6uF?ty9^;k;vBo|sqC<2WV`VK{Paq+GBzixbKD18T@<`_W3<?(! z?;9TY%nBy$HF(StD5dOT?z+pzA^+4VfhCTVAdNTV-z96@a6L@I@WNY#W+xPJRZcTu~#H z7E}j>)R$=2Pe3zzNKO*&iC`|)KAi0*!ZSVn4R>kvclidEvU#B~6#PRmkCn@>6G2U8 ze_aRQ?&UL2Jf(vXuJt!=3OyhoGBx@GhOd=RKIs zJiML(PkA%pTa@1jmIP+(y%3~vZQge1u*UIqHQ64b4tc0VoGy0m$b0lgzMsYnuLc&;Ulcd5YUe3z|HYmLs>R`+x)s&Nh%ST9mdZ{-YP9@35=-VzYb(!8 zHME)x&hO0UADq@Pt@d8Gl9EHEajXWA-SC>N;_J^tf|n*?#{wy-6vKWxKU1GK-`vi%P2`88S{ zh?(n+ChyeB^i~NpKdnCSnwL%ZnbW4@SISIl6+L%{L|h6mK=1uW!SnNq!Nm;{u0|Dc z)R#4RBHWLY8M}O#NAVWNMS2eBtfb2yV$MycFGcUQ|HZa{bRNV!hGI9PU%fQ7t@sgrAj#Wd$XP;3e^jW2koj-^_OLWMZa6jwYb|a zaNtBe6UJ5LTTgJmyz`S|x0E#DTPON5Lmz9G+2pdfn=i&CJH)`^r)m!83gjxfn=W}E zJ@Ivad0~_!G5^tmfl~1axFE2|2PlW6sRGT2w`xFxm6xIW0flL1bfYioQNt zdz4voL8b?oObxLfz}Bu3#xcD#%(7Q8%xX%HrhoqFK`R;9C*O{>fBrdH%Cpm>-O<1Rx3SrWPYw&IXm6WJ}E*)A=)xN`UZ_T zkiLRmRL{=Y%DFbSTYH;0&T$c}#>VTQiH( zN>5g7h;NOI(odhGIfLZAszJ_d?J~N%v}(;vDpCns)jG?sI>0OS{lFr!N%p6DSS0Q^ z9=HlsLOkKP5 zL+t1F0Hij1cps_UP=E46~s>Q~XaqqVU7W`S5HI96gtKXH>6rXWIPa7R$?cZ~n{y7h zq|uYSD5%>>sgbp@dwq^)ew(KBA_$pHS3Gow3o||{SRI#C0E^My@qcbvfa6O(UKb3r zB!hK)rhVVHUaNN8(0CC+quyl8?I1h@O}n1HVn-I-sDX=R5@N8KTU8mX?eFdR(CS{b zStmNHh}w0{P{HkoNAx*6&tglD1|>g;;&?cG}(s`FR#td|c`6$k$GT1ZF(w%Pev_%fAfA_G#1 zS|F8ZO#ezx|LG-OUTFe`A(Zs#NPBrxC`zxKpnW%jLa@nTYkI-9W4G7YI(7E3a~JzZ z3tPnvV297URiw5H16O!;ozbr}M19~bzF-?_PfTw(oDNn`1>ZW4PGHvU(2qo0#U#{F zWtf5oJRCzJwlxzZ9|JE;C)8&*+aK4?8{ z4{oSG$(|ps2{-%sS`rmE-nsST3y8|iHbH5jrEgZv0xY)vs7SX#u}eh%m;M|@#g>;S z*OZO_5b2Z<++qcK`>G(FyrdxNabF>Vf;>u)yaD{A?;C3$4dm2SJk=g=>>NEnq2 zv#Q7Y5dL5DG^R|g-xWy-7rATmlcnq;Bli|eUL9nn9}Be~hwD({e7B;93s-xgg=RL- z>cnTJcRwhlzq9xFWcj$sbtpQey?l4v#L(Mnt~$TD@m37IQp-kDajb>u98_E$;r9TqS zBCcj`KUq_4_9R@PL|*qc+@U4=9z}aL`ek4~;5oIN!T4 zs?tjgzTcO6CBU2R*{haS zz4v{SNd3i3m%yJYtK}LrM@f>YsMNdX`}__%77hHO?Lh$r-V9PM^Y&!lzARi z(};DF2;mvug*3_4V>O52su=$-(HDMW6Ye0pSL1hrGuQ2E5KfY(N76hyknBvmCwnbU z@8y1#(I=g^j!V*gIuTLGb{s666Lk9q)kPOqzLzhLsoSHrSTOi0H&}6Zm;Vn{`3`bK zA4aQmygj>o@%NjVszeI6dOj$fQE4cKb~<%iou|0AG?8tgzH7EQk>86mkDl{|<^t-ieOdur={t7KcEU}y zenoAcTefq>rn zP9@Y$SDHXLMdFjZSTw4G5}36Vuxn`uA_z>#A@=Id;OguA z{?Gg<)15WVoKBJ&gbJ*8@DfVPo&_{7`n1RJ)HizU*`ogY3WK7O{bqu zk#38d>I@051~!W|CeriS;P~&i7?RX9H1JljqZV|`t*!YuaSGRlcdAxyaT}IrSbEOr zUz5I&4Y8N^etLb?5Rms5(r#J=rPdoKDf5E7HL0k%!Ij3;p7~R}#xd&dYKkK#3Mc}d z#-^`HWL}<2DAMl@HSV3>?r>AznLI}#Xm0U&CmR6@s;cX?5|V80?b^;;8`_e8*6+F! zihIh=aF3a$x?O;`!#ij5BxY3Um0#5Ee`W#X+Sh7X?=5V1HB|IR-l*+dgIVw=*cN|mBC)?iVq)c#D2P|zero{{>fw9K3c>_6Am@@LI?WcAvWCmM zv=IQ(_opia&x!7*6wp6N3Hc~EdYyo1B@F=Cg&S*LMmx#uXd5|%TWN^z>dk9SPR|-t z3rNuZ54qK~0yi65nwT{DZ`&2q2c4uEjuG>@W{Mwif46~1tD@fT%rq)3r0)k@)NS0f z2y#HeEM9kp6VnZ}+!X{7Mm_5BqRtzc6GzjeR@&y*&QMqHl!o%;!q+jWv|gYImvqbf zbB#OGPUz?2&0M;*o3GDiC0ggHzVE=sU_1=T&CcZ%Kkf-_iEP3Oi)S-Y^=4D#o;&M; zrdtN_Dxv!CqODybGraa#hBwR_7n@CXli|vR#qm^qh$folhS(_?!p;UT6ZQ4h@ek7O zrhYRsk^b32zQ2_9kBghcJuQ0%n4L8TG?T4Y@9|Y{P^J8;FXH13sHA?}*jPB5Qg4u& zwprTr5}=W+e}fo5YDj@Y#vNxG--ix>49zwhuSik#WE3!wr# z#)l=2eZ|HTP@ZicaF%em0o8)UH}JYU$je~(sgqY%i%7*q>cWUDW>=RMQ;wub2GlEI z{pnofPD?zunjwwBG%c7krB`6K$~K)ntw}K{J&sKjhI~NmR9)4-pnooNZRf#ixI$5Q zcU}W!o<78czi6gwqWT_JX-Xy7+_$1{VgQUxG#teaOmVV3LL?`qm9i?t=UR)<_aEiO z)28uoyG>Kn*3|eGdalVP!X-u7wjy|U+(y?#4{caOPHf2SG`bq9?+EtmQTVQx-cgV1 zwriT6z+tx*4KhDX>lb9}>NSn$4e=(r>2NuIiS#u5NfwS66m)&qByJUTkR39T2$$z5 z*YZZ`lFKW#PT1=70~tz_1Hs8EFHEHPBA^MJ9aEP5kpC+PxZ80b+QzzoA&X|&$9oXY z!$GXYy#UYetqfQ2i5So>)h3oXe-C=ct0+*cejG1mzIXv(?MXm=^ad6d6oo$M^m;Td zSA7QB+Oo4z*7nV%=o2XN`z1DK^0hTJOazzsU1K3(#5@{W#Q_VaPk+jmoJJ8fW2Tc+ zT`liJ`=PS>uGnIWgxMk4+sX&Q{ySk*_VtbK?sfioCaVt;XO6o+JZ zAxWu>*)2~?B;RF>ZJVNUyCe_#YH^A6Z0oP9uUWtDR3uiZPin4k?7dN>(GqzNmSaDB1k0Fva!lC!a3T$D@R@gs8g-BOCu8; zM39RL?!=KMN5wEVj*Ul}!bDa{$6J?z(CJ!s$#ypGBCew+Yz^5DIlx^(=jTfMW8E2R zvjW=*I|yZQXB7RXW8aN>6;s>;D+UERbVLPLchCf{JZD_z(4g#mvZI_8x%1>PabpJQ zLigZ9{^y>;(-2GYYSpn;wf=){lO&Ppde7Pt{a@3R`#TpKZz`)2iotT>dCA&gu5^0u zkzXnE$vRA;;jDs@kM(zu z%MfBE<@t5@q0miQdbxFLQ4Wf+67=)2;h?lq>#Bq&X2s|3BUL3vAK{C>O)cBWGUnDF z`m`(+;@$nIpGQnVM_zo-lex!D-#R;K&t?ZL&>MT9(MeGE%K!8+FOrQ?7!w#u5z=6w zsr0L=WOOEI#SZE_-O<>}#Gb}H678)%ki-r;$rgHJP5WkWo*_^27IfKw#@|EMkOt2F zNOvO?kW%E6_&2S#oTkJRl9|?EN23Sqs2&W*_=5i5rnyP~ryT9-m97jZimQovUZ`-L z5F9r1&S(Xmwtz!9B@7+`QrYY0c!4!S$}sDdJ3~R-0l~64B=M1X)!P%8W}b0s)u=p+ z|Jd0D;Vmc%kY&g9ROB&b-1Lpr6R*w4{v@%yXPnia2GLHcQ#zzmfXSypf0)~BX-htB ze*+bNX!I?$ohesPBxkb%N9)LjKU%EMPuA*0J<@wWP$u5^<3qobdi^^FjS;bf)qpdM zz%#BG|Ld5=M**HbmG7*|y%ugzI`*H2jteiJ(LbJi`aDkb&XX($oo4;xvA}uc6 z4^+ibbHcjqTa!5Gb1snfRI&rU#$bg4|4?kQhFJ#oI>Wq>>+jPgyTw;q!iKFLg5Jxt z0ObT4jL#w@V}=l9oX>ihxIm`?o{wv-aqejE+|P}C5^`md2Wealc_f6lOMrNIp(MIl z4L2M3T@RKfaE_rffLi+QIABHR*&dB;R+x{qsEzwL?q--SK77Un%9zS@hyoVz10PAz zkDha>RqMs06f^@s`DI@NI@di4PC7L0^7K1PY#w<>rS_L!2@nRTZ2G#OhSvE4Xm>A- zSY-9>dS3j(ZF9gt92B^n2`ktuQSobGL z7^s$g1WG9=b`c}B@&Ht-%L1m2Q3Rs<07ohY#pSO8!u9^I_TDnA%5B{n7X&315+X>W zNT<>rDxh>ommn>mq%dhjP*S=Ljk>TGVXN*j`*>Ck+NBn96wNHu`sq zx%Uq~Wtio9E|>8gEZ*J>3SaY71DF_CZJ_Kx*dLXK&dnUTp!%4cN!o5Q3pdj9EcxqB z7dd`Tt-HpVt#(y+w+1U;d(&Q^vH4;;Rl^qSh?O8`?(TkrxrL*gZ%ayxSb?bRe4o-F zs;J2*+Q_aiPQtE#KQ2)k+o7!HvCDrZel3jk^Fl=;LnG4#rjBh+3!3fZ4Ui&%*kyJ-p!1xpw&C^ z<`U)#rmIPDfy6rMu7Ctk2|An*!1w})@m&f_XvSlJV-0=4b2K?WKq{DTcewWnavE1B zczQd;*F0U5JUNfGcTI`qbs_~<5P4N-eL z$yq;_&0&TeyZn@`{WXzK9MrhK9 z*g3N2dzvU>Va;0dCcyjuQ4Mfb60 z+KHoIgRtw%(XhgzlSIK}t$ZCBk4nk!F;e;LIq%5Js+lZZd-k3<_J5zdeeK}K!=R3r zMV}rPn%x3rIABeMo1ds88y}ICByRE5Zf~DE>g{Tj+w=(l#|y*|;lmDW>i<_a=&vq{ zZ>F=X32g3cMltAX$F?6^&~O-^bvYlo_la1hXFfcLj@Vkc^P~%5IP-jK!Hd_`#Hi8n zk^4l$q0>YiP5Jj2e%&hT_ympksITvmCg&a^4o63_ooQe9G-(`=!8eZOieFhLJ)fwS zqa0@8yc)yCG;diLSCnf}D@xE<7Qk0%#I$LQRkPuAvRj0!fJlrOJlAU#AMGBaWh+{x z`N6f5sDOFE`r<&6EnG%WRGPEABa5oJr-MG8CS6mCNH{uK(2gR>9 z_DaiU!@{C@%szeXI4-JxhInJ>c-B2+UV)nuplCRy6)ndtbVq(j%+_gYdv2jVzaV9P z?c;gGrjS^FALFE-@sY>-hU2eo!Aj|KbBv5Wif@HFF-uJ(v-Z|VE0GnU4ri)(|XHD8_ zvam9~Lo__^?Y(c=`Ff=wb<5eg-eyNWeHiaBUC{j5o1?Cd)5B+i6I&=u<13Vlsx98K z*5uf~h0TYE5~e77K#ABMj0UvFGx9_@JirGgq5?dTn1Y84WG@}HKq7UiG_p6o(Ay;7 zwxkP6*NBRHzQh;KefY#};AGmq>$P*eSHt?rSoV;`s*{0uZJpexIUQNyG}D&SvA$N} z94kT*$LB?=qodx*s-NxiHGEm0lzO9F?pYR}WJk6qN-Oce8%KRk(j?WWo=Rrw(PbBG zj#AT}nIbZiRj91A(P>Rl(aqP+sdTn7xFy@&F=A6{-cFnUye~_>F7G0!TpySCQS39D zg;jeW6+GOT{1_TVWkxF|(-qG}wtsDCkTW7YWhpGUnrfwEPmar;RgSOZSxFxs3VBT9 zxUVo|ew4J4SUpmrHadLAfgPcE_A*uC#5_>!Q0rT|(uv#mc+&&XvLZ73en+OEDj6P(Tc9YM=k5Y~%WG%s-7 zcm z;qK#~lqSuvrJw3~>W}gyQIf{zb?R5HrO$_1ARWdKHWtQ3^F<7eg3qTom&6Y$3I(}$ zhw~o?l9WH)wcqmG7?@EGxP*Gn=QV5D)o?0gYd1Z5Ts|zlL`TgZ48Od~eZ}>C$W@it zCl-~Sl`C!YYn`#ovUF8*N4Z-KTf0q@Pq=Org#^2(_(&MbdhW)@+|4>kZ_W&()+{Jo zUOvE2OSz37Z}-;f#0nKRxF5+zytW?b&h)&=_VD?U?N=>_Dx-ESB{uo~@DcN<^HLu7 zK)7Ayk#vqsD4ixrqg+!$U0bc>bbvT-Mws(+g8{=m3FPT+$+Wm>wf4!(49`)%{pNGO z!%D#3Ks*nV;<*9TJqe@Zd^Sbkm2!c#dny6NmaPO+Y1;r$eXhu6M)#FcAU8V&xmki9 z{+s7Jd7;O1bh9&$Y@YPsR0Mg}>lhtB_j3~rtJ&bORxPT8ulFr~{OLp&_E)!UvTGi- z*8SYRIr?Zz%h&y7vKkR-)#{rx0#h4E@-CPC3~8Otx{#escC&521sa@NM4` z&8C_?;oRc&@w12+4y^}$_ZW5G$+GTe&TS-2eRB^=-}`dc?{S;Kaa+l~-F|{T@3B3b z+e>u1`kOvmZWQ}w->O_iIki1}j#gV4X1)gZM=07I%FP~$n;L52+x$KfPt6j*ou~bs z9&tX>#*B20>M{?3wHuVX)`MTwT_N5-axrt29fGCi9EP5S4r(y+sQkwNwgMW&O+-+1 zPy9Dv*)m8Co1cfy!GB%fsP`j=#E;5vgK%3KIVrh#I@l7{gt?4is*NTqB6QuA%oAmB zKFF77Axl|57YS?!yVy$|r!l~r-yT0F^ zsY=OL@~FRKLpxzcxF1;XTH99;j7A$Xg8%|L>)W?&RLLHBwD3)QlqG}2Q!|l zD7)U_FkLOjbXI2c#;b9hDzQY=>LcpVyeNBDngU@_M^Rrig?zTUb>Nho=&{K!maIrn zc*Ezk@!_$=?}`eh0+Xr9uZdNS{ldB7eEDij=rPh{K{w~g@ioiysz+R2$KQ z7cTE+J+5ROfIT=dFz@QwqZVTQb(QoiB*3lbD4xyefou|;4=rpfB6#Ci`-+P3p!%o8 zC=P5p$W6=uH!*o*@dR=cpnTb`{Xe*g;TD7Yzq9Nqmasz)aGs);cc}WTRvS_VeQeLF zpH=Mn*t~-uD_*aed*%;*g4V7pXV1eVO30eO7y$LlMb7E2XN8IOn={P|xrA7slC z+tHp`rG@fw_YA9j{p_&9vR@#}t(3`4<@M{Vwy$XdH?hi~qZ6qdDzx}JSNW)ssS+7+ zK8j2aPc3`A$;G}W*9XAvWDM1iCG<94BdB8mo>3(xRtZX?2 zt-u`D zn%b!OTWhDqfar#^ru>i8=a2e4~izoCrC_`f?1wR*U25+h%2=Ee2ZS}r?mnhy83bJ;P< z36xN~=u7Kb13)QCoIoDK-i-tvxB*lUB1%BXcP^L=xaTxqngsaPl%~kO1DVm%-fIX+ zUJMI6vxtVWb@&~9%x6b<@2bnWtxi@a4c{~8S|{!(TMh5mt6-^AZ!r}K>gHkHhWUoY z+!zHHH;uUTM%rVwxRQyq{N504cyu*Em*|?@d8Hz)m*7SaHqD2#MVF1n13|)6#_PFx zVX7r&-eT^k(ePOTw{Ns(o5e%@X>!9+;J40Ki|~N|6{)b0;E@cqI=X?%mc$ctkbxm# z$8hp&^An1kxsa9ydPBn|e2`PeLgCE)e+XySdd|#sPvPOs!@-SbRSsm`-RXIaq?&T7 za2rjdrsTRV`(M3N`vZ-!7T@R+B6u<2O*p)H(c&U{pNHtu#h5bLz1}aD!6V#2B-$T_ zC-2MYbB}vwY~6D7T(coQIVL|fH?ZetR8W>I%pF?)t#TfNY|m-*)YMk=JWcq>uy^J) z*{$7t&(J%qqEDCq@ST98PV~Ej#f05~)w~m9k6O>q$}9ZaSMvycR+1{tMRm{h%0?z! zhZ3)LCZ-NHMZ;QpEwWMh7RL3@Lh5_xv~MDVr{HJqAcx7EwG~WviA;IqBTnh!m;51$3|XuI;*?fD?h{C6Fg%z zgxx+CU7-B@rd_m{m}9Z$>DgSLfY+lX_REu?0I_=_Z2Rm({@j@lQ>jF@vM}>9rXkZuxZ`YD zf2RIDY|2#uS>J>_*d5zH5XkPgvO5S-5>Dc0dNum!^O4(@n89koz>$bJ5`pRNv~Fr? z8D8G^?rtvW=u^Fc)pmqQc z1FM|dMA8ALr(&y}7F|C13ANlrCjqh|?5TRtDNKlpS-Cbybhs825jFTVE8_5=`R3jb zOjMz2hwOl>|?SSGB*(oic4I za|H0oqon{g^Hq<|$qmQF47$dB276IOO3m(%iQa3;&7C#IL$a-IRz{D~8Y`>DMo~Kh zR_2s_RI+_AM1jKDCRa@UU`FO{ciP$rztt7mzH>CIdsXBDDm1<*$ZWhn zc{4gKz76Pr>^R2k z-CN&Js?vz3U{w+d$CaYorZo7F*SrmMyLE5{AOzM#^rWVjVNusIp@21)std=A`r(rX5{orp)zS@lZKo6GPK``orbA7FD%&EtU=OPE`7skaw&m2r03jcDmEVHR~vyRQ6v}wZGDjt3CP7#J9Gg z3yaB?O!r%^l{$0!xxXQBLWeO_DlXRI&xyh7Xg6~J6>b~Xc|P%AEq+#P+*neYv4fS% z(M+vv9IY9p-;(UpTPqqIXc9cIzs+1J{JmhJ@k{8tG>gZsr$TP2J-Q9LT8ZYI8athh zsSV_wliX{2BHgC40qewYIwnv7erJ`#C|Wn2qGt0zP!=EN^5x*F#a^xWO4AtO+f#`H zSCl8ehBsnTgUX$uVq?R*-|9o%jQ3t;tm9T`Xne`!RCyA&gY2a7SVdXN_<>Z(#0%ly ziBUG+Y{1G?~JHVIdAgrBIs;MklUOjE|xPCeN#CG|7B&MUBw@#_qXDiF5o1dA#M{{)ne_r6l zA+;To>&EVbE#gM*jkOGh?m2!**YT&dPO7{8_ja`}QI_@_HgxKmM>M%jk;bS%5%O(I zLxzEF69|9{IRHNJc!9gN%9VioUx1cg-!*eUXTwlxJjA(yR@vh!W9n$FgUlL=_owgN zA3d9=G{>CLrMza0oD5aW@-^Ei{IK@KLUXTHZdd^0d7+l|&;41SgHG>UHR6hhqh_gS z$wF7R!TAHBhBvGOgDtSuoO;g5B}pc6<8s%ab%Fgh6hEtDqWZ&}I#x;bI|b3zB7;eD zpTBXxcO~dnC<*5;`zvOfa#zpg^;B$s42h5Nf%GU-{X@{G28 zE{n8|)v}*|i+>Z+OWo@9DHh~@VQ9JfZOg@5DBlkAWYg}GxjU_1^f{a{84+v|%TTO+ z8`*)aVm_=l)=YNurL9~(#Zr)OiuFQCVd=+vDv1Ep4Dx*A?g*YQJm{pYr50s zTdjt=N1syBZQKab6LC;yKXo}1(g#I#B~XCXT6>PBkch}=N%42 zU``uR!konFwM6*sBIPFUC8MJLD2a`}AzdaYZz6?g4h1`;SOs3fe2aT{*~vRap(nbL z7^-$(wAMhi0BcdW`NKGX7>;tFqPs$j)8o{hXhbZP@d8xJuGY`~#{xtN%!}=_7f=KI zU7r5rQ&KPgsX831y?ta09?CVF+%KKm*Y|5f|)(+LS8r%Fkf8ZF?S2ca<=3~s zA>Z8gxHfcV^?K$I~UmH2*SQ*-Ecp-%r7Y}3&_LqHv0mT<-gg6GO z;tena?+ZgfHxySekKfpER7!06x!_|9k9$ar1BHjw zK3zfNa>(;a8bP}brdbFVA3#|KWPE)akmm-PYyjHA^DBgq1~qmr6F}Z- zTQUo82u8L}A40kKc18{gq666LOMfxyW>T%_VeXC#JO^Fv7eGXxUuHu}2cXFx8i1EL z{g!zZ>fv2b&>si+N62F!!LEOwW4$FM9)D^2y*S#|NkHdW@Xa|7Ft6WKA+&%fF$@$h zcoNe`_W*$(ow@vXKoAE1xWASLhOX+pW|uOt7dwNZNsS;yBjAxKaAx4iGz}g4#{mPY zW!K&P3;(A1Rp0A*d2Y0mdJDWX)qNkxBoD(L5?-VQkX5H~q9-E2j++?%g%{W>1%ErG z^WSf)`4q3mfzBnUNB?d+#zIc|KUfC_Lh294NS+;`+q0$EfCkRTK4=<*fu0IFqK}>j zzYZqCr0l^!EbvUV9~9GZ=v8$9UjqyQ{r|^Q{69MUf8O?>o@8_7w{PXA*|ymp&$>&} zWNOe4wNpT%h(`?0sN0zWEEV)7b9G2uVsPs;QE_-qKnpI{acaXN16e*B|Py)A-)PuIOT*tF<4LX{WHuP! zmC6wHyE`lx>aGvLCe(fu>=vCZYHl#f3!CRit(KKFIti#qBVJ5=r2`xUG~FEJ zD^HG={s-_tC5ySMy2&U4EXsfcJqVMdA7W3;vWI!zl0t_tog)FmFGa;*48hf`8VWWO zq}!5aES*tGQb`wi7ntm~7sBni-2qludg5VMj@JSue+}HeLlQdC5@h4+udz2ZkKLG}n?F zA@CPcT9}Q--LGCU`m{#HYED*Ppyv`7vdIfP6w-Vh^wBQ?Az>|IL|Q>$j6RYN&~hK# zv#dM`y9})J8Wx_DNwI-zMoxD_jLMA!pb2UiD1|^U3K>H3XL4pivBkT|ig1{xKal~3 z_VQ32$DSH6w>EJZ;7dojj8e(0Q@5SJPs<{KwjDx4}fT-D>( z*N)>&TOsjb)`P^3fwjIABEZZ;=qr02Z9xVAND5X4iidVp)rW)=1K94@b`Cjxgg_?v z7uyhQKlk{0mDK#@N+tijok_YBPOL>WUmXB&gvmc&o&fEPWU67i?{`)Iq`MOct2FM)%j*o0)HpPKX@j%CfPsp52?;cf zCc)Mgv*^$QYTh0KR~)k0y35!^1@RS}8zd*w<75XsY)E@lQYQl!fuSfBgUn29_!Qq48m-ap@Pd<^m0H1feIa;!LKoO~`{+ z7)~h%j3)(;^q^(hSU{R4pFd@&{_Po&loa-9{|ykuk%5MmHC}VMgZVxcKSYPXibTP; ze-QS#S3i(8y?|%xz|RuEX%Y&~7xL;D_iw-8fsCBpuZ=dL{{oPU`Ukg&`3HbWppIPN zq?Qs%sJ~z9p}?k`4vPb!L%-#``-U9|9#ii)(YDawKU@gA8y8gT~1RA*Ih@_DISKSNp! zp}MP9>lYdc4Zf{3(3GIQ1u*d-SrWk6Nr_n2{c4Rc>qkprk8o-Xa3z2`)B}7ONcW>W zZ;7xc-z-gBQV)2#Y~N<;5PL)|iS-w73mvd>2ONB;cSGuLk#{yzLf^mw3^GAwXy24e zV1Mhw_pvzZwLcpV(0^C#Q-sb(<1|BQVi+GwtJcBU_+<>NdsyHUr-;XI2tjcT=&C!q zZfSpYF_1s9D_K7>3urf#>@NEWXUWXZ2tSv*PWR*RH)oRi=-NlKv?@Y8c?k7{c`jk*4E5NIdd+;zP^j0_);mmvaArv%BaUS6J9EHT*)G561q@TcNdA{03{PJ{_(qH*r<6jl43n}9f z=6SRC0540S70+UsXU$ zBp>oXhWM)q`Zs%kFdjoU72Cwxrs-U3$%EAv(PT!(UP9=U4|Yxnh^}XDzUcdF0^hHV zjmXYf7@>o=Hq(NH$Ww=GxQ+=L$i!b`Q4V$Dt9f7sit|J~i3?O)ba#X~s$%VgUCQcqT2epiF6ZeIb4yvd;pfi; z5!xT&?;3j~0V(!5daZN-Cwg)SImn>JA^;tC#KVA1pWNp@8RY4j9yOuGz%uZ&>L-wm zuNQgibFWM6<$VvSj8a@66V~?s5cnhzc+;9$#Nr(uOJOPG&tY>#n|1&|*!d%k_r zZeVxQyciG6ulyk2zPi}3KExd}elP2yScjJdYz{xG8-L!g#SP6uaFKu7&FggEA0H-! zn12e)EsBMRlV@oL?{-;N`Et(F7B%E<#k>(P!>eq96yw2pm#U zKdY>6cMH;(0;oj`vgvT3FP92_#GaPYfV_ ze{rmyq4T|~$7dP79w;SEzAeeVce2yKvaFrD6$C=wf(nRub1u7Mf8}z75C(&x23A5}k=0zfw9?bJn1J0X8 zf!bojCBDITj97TdQd?slvYzH?xWv@8)jtTo*i;vDSc(lXj)lv$0|rR~Rk(sIJW}h8 zMK{T$=tn?I$uc1J(IvB!nfbw@S_mpL2E^Mat0mgkOOfpF4=5aBgfVop$rW%&$^5J& zbJgB4x@*rFCj}mV>8x0i$I1RARjJMqWA}KZ;Hx`2?0*D6B^3N1MO@(f)VmovAdUKf z581jx{BqAg7uPr^YpaPZPlQw}dGLYa58=q^HQFLR{8k;u5)8Tf^s!i(VO&c=m6~-j zDNvEZR|;T}T`M*VnyenPmQsLx91VRPIVKpw1(Jq0{XEbE>7)i2Z9BuY)dQIKWOj#Z zB~oxPJ4u2MIOsW4;w0Lk?3G9!y0P@e2na93F45%=6t+g&43L6?{v@;Lldb!CF%E6u>ZaSx&mkRY`8)a|nPC!Gd|d z6ztNnF7<+Utf8E&7B| zOH6ke_y>GyXsKn)GTHduYk?-i12Vj*wAYK?#BszObC_l4^-InB!?nc&MhETkf6n<7 zpC~Tq@4U(#DvZgAPgf(j;B6BSj65{H{LV54ge{31Fwtcn} zSzG@6crw`Va7z>oCje6NgxV@UOdnG1KYIK6(hEKi(pxY=glDqRPvck9#ViU%-d3ft zp3RiSw|>(2Fyq_`lq}uZBS$_u7U_B`Cp#T&2|IABO=q>^^}5LUSJim(Nzo&)OtBFj znR$ep992`Ak7}J3U!zYzgacJZrC29foMtxur~rmX1}+c)k-qN$qRftDCTc`p6YlW1 zvWCR-HVAgQV4-gfKX~~|^b0Gx*i0DsB67ZYV5uw67ofu3Yb|XI+512R1*n515Fo}( zX4t(vJ_RYUBp^dYKb@zwn^GG)3C|}ciSF=?UBm6G`1;rkXfM5yYB6(*54_f!!g1r$ zZ`tHEKhb@MINSC-GZ0joq_m`K559feSx+v%7zVJx12x$Ot}@v~4X#`C^u1uePnU9Z zGRqgby69VL-#CcuTe}$*r(8}2A+(E>W0%)G7*({UwS5IQ33g!cTMumQq_aPNe-Lx5=7nA&p;-}NCBiC*T z5gMjnBM#;wx^u@3`AoBxUG0U$eBk1-5`W~-TeYQYmr>BQ1j@)Y(v62{2YywjkK4VK z@}Jme@kBtmgev%%xt!OD>wT*i_fz+rq0f(}d0^w=WQ==3H_^RLIG#VQ2- z0ax7E=EJ2*W~%RH8RCx>wZVIHeMLRQM1R)6aAafZf!+Xg1vwDRU221$I#0=^wXg9^ zVEf515v?8lyr=r^Yivg{Sjgqr>KHa5fkCi8{iwTkJo?JoCMlf%+QQx%FswFR4^tRy= zzowhSl;%r`eLBGhds+)7vX?tb!2ltBqDCa~*G{qYX^}XX2mN~5&w)1{B}!N%#_-KtU;O{_Q9FXufL(_Q-Zl88kfGn9#2UORg00>c{u zwyeL?-7p*B9b>{WCL^uDp{Lgs=Z*=gL*)BJ6J_L`^Z)v5H}BFd_wxq(e8*Ay8FTUU zS5^HQh!kN$EcHh%G%h6!3p2!<2OayGBHDdWW&z~<40KnR=~JJGJ8i^W;=nL)eVE~k zv+VQ>Q5N!S^pxOkGjadB1U}Zr)}qcr=1!Kqm?q5>13a7>#*oT|P<@hbTpLlCc^#~v z_z7=#XBa2boTi_2o7tQdkBHDXhX}XkX$^|qZGCK5_aV<_{8QlkpiQB$gtdhwvx|i> zLF0$XK~#bj;_cG5gXwws%b|^HuVwvPebj`}Qr{H+hZ&dSM0qc(db-7E<{wwue}~`9 zj;o~-YhUYb7Ki(<>L6Y1Y5Gx(Iv_ce$3O-JbX2|NKJp2c&z3{Vg+4g$;Fk3Lf>)B| zh1A*qK12Lt;NaG2cy5~rL!CUhigHs#RM>?&I_2QCVS8%guwxUx_l3t8InMW5zp)d(8NCct-sNow5%Irqc zuUzr-_KEI|A^zFvZ5gf6HE@58$bM!laBmRuQ!|v>dB26X5sk0v6m(3H! z+R>q34t5k*A^<(Yay#1eyFt*MxPN*NJO+a!&?+&ux=70=tlq-uyZ+k34SH2nx;Bjs z)w}c?dwv@Y=>O>tDQX}BAt~XRLg`6v{Lv%tH|v)!U5$M7Kw7i=``wUvSLtA~_~0uz z*)dYC0gjJLvxgD$&;IMyfbIlv+Pl_m6GYCHwX{kBPN`Q?zJEVj0M}gP34j}L@o-*ejo6Lq*d zXOiQknZ~Ak_>gIx_EMV%_f;=)g#Ww-wAunUIMN*>Zi1iwan`@n<7hJICQCCH9%ih* z2|jnKh~Gc?n(S>@%4!rHyfHyBKviaT%lJEtNi_Q8vQnhi9_?TE5MRA07aFDo4#bO) z#k0w8ryZ~m#T+0V`wwC-{!i*0%sBREt>rTGx!|eKi_NmuwOV~|;Vozpd0)yi>t0Ud zSBN!zB2*x-S)GBbn=+EzIFaZ%nHJyb4lhY(mV7ob_ulUf93Lp#}OBYZ+B5jk~gMnko64A=XOFO&mvT6H@mN zjRc3k{^bO!@*uG6wg$S|rmy8snlq}e3_GdcX6QJ|TSfM+PJiY=z8`rBzR_oEoc))(zwG#5W4Q>AkH`;w@F^-{*)fJEFMBEMwyT{{i{_ B@T>p; diff --git a/docs/source/building_applications/responses_vs_agents.md b/docs/source/building_applications/responses_vs_agents.md deleted file mode 100644 index 63ff69e4f..000000000 --- a/docs/source/building_applications/responses_vs_agents.md +++ /dev/null @@ -1,179 +0,0 @@ -# Agents vs OpenAI Responses API - -Llama Stack (LLS) provides two different APIs for building AI applications with tool calling capabilities: the **Agents API** and the **OpenAI Responses API**. While both enable AI systems to use tools, and maintain full conversation history, they serve different use cases and have distinct characteristics. - -```{note} - **Note:** For simple and basic inferencing, you may want to use the [Chat Completions API](../providers/openai.md#chat-completions) directly, before progressing to Agents or Responses API. -``` - -## Overview - -### LLS Agents API -The Agents API is a full-featured, stateful system designed for complex, multi-turn conversations. It maintains conversation state through persistent sessions identified by a unique session ID. The API supports comprehensive agent lifecycle management, detailed execution tracking, and rich metadata about each interaction through a structured session/turn/step hierarchy. The API can orchestrate multiple tool calls within a single turn. - -### OpenAI Responses API -The OpenAI Responses API is a full-featured, stateful system designed for complex, multi-turn conversations, with direct compatibility with OpenAI's conversational patterns enhanced by LLama Stack's tool calling capabilities. It maintains conversation state by chaining responses through a `previous_response_id`, allowing interactions to branch or continue from any prior point. Each response can perform multiple tool calls within a single turn. - -### Key Differences -The LLS Agents API uses the Chat Completions API on the backend for inference as it's the industry standard for building AI applications and most LLM providers are compatible with this API. For a detailed comparison between Responses and Chat Completions, see [OpenAI's documentation](https://platform.openai.com/docs/guides/responses-vs-chat-completions). - -Additionally, Agents let you specify input/output shields whereas Responses do not (though support is planned). Agents use a linear conversation model referenced by a single session ID. Responses, on the other hand, support branching, where each response can serve as a fork point, and conversations are tracked by the latest response ID. Responses also lets you dynamically choose the model, vector store, files, MCP servers, and more on each inference call, enabling more complex workflows. Agents require a static configuration for these components at the start of the session. - -Today the Agents and Responses APIs can be used independently depending on the use case. But, it is also productive to treat the APIs as complementary. It is not currently supported, but it is planned for the LLS Agents API to alternatively use the Responses API as its backend instead of the default Chat Completions API, i.e., enabling a combination of the safety features of Agents with the dynamic configuration and branching capabilities of Responses. - -| Feature | LLS Agents API | OpenAI Responses API | -|---------|------------|---------------------| -| **Conversation Management** | Linear persistent sessions | Can branch from any previous response ID | -| **Input/Output Safety Shields** | Supported | Not yet supported | -| **Per-call Flexibility** | Static per-session configuration | Dynamic per-call configuration | - -## Use Case Example: Research with Multiple Search Methods - -Let's compare how both APIs handle a research task where we need to: -1. Search for current information and examples -2. Access different information sources dynamically -3. Continue the conversation based on search results - -### Agents API: Session-based configuration with safety shields - -```python -# Create agent with static session configuration -agent = Agent( - client, - model="Llama3.2-3B-Instruct", - instructions="You are a helpful coding assistant", - tools=[ - { - "name": "builtin::rag/knowledge_search", - "args": {"vector_db_ids": ["code_docs"]}, - }, - "builtin::code_interpreter", - ], - input_shields=["llama_guard"], - output_shields=["llama_guard"], -) - -session_id = agent.create_session("code_session") - -# First turn: Search and execute -response1 = agent.create_turn( - messages=[ - { - "role": "user", - "content": "Find examples of sorting algorithms and run a bubble sort on [3,1,4,1,5]", - }, - ], - session_id=session_id, -) - -# Continue conversation in same session -response2 = agent.create_turn( - messages=[ - { - "role": "user", - "content": "Now optimize that code and test it with a larger dataset", - }, - ], - session_id=session_id, # Same session, maintains full context -) - -# Agents API benefits: -# โœ… Safety shields protect against malicious code execution -# โœ… Session maintains context between code executions -# โœ… Consistent tool configuration throughout conversation -print(f"First result: {response1.output_message.content}") -print(f"Optimization: {response2.output_message.content}") -``` - -### Responses API: Dynamic per-call configuration with branching - -```python -# First response: Use web search for latest algorithms -response1 = client.responses.create( - model="Llama3.2-3B-Instruct", - input="Search for the latest efficient sorting algorithms and their performance comparisons", - tools=[ - { - "type": "web_search", - }, - ], # Web search for current information -) - -# Continue conversation: Switch to file search for local docs -response2 = client.responses.create( - model="Llama3.2-1B-Instruct", # Switch to faster model - input="Now search my uploaded files for existing sorting implementations", - tools=[ - { # Using Responses API built-in tools - "type": "file_search", - "vector_store_ids": ["vs_abc123"], # Vector store containing uploaded files - }, - ], - previous_response_id=response1.id, -) - -# Branch from first response: Try different search approach -response3 = client.responses.create( - model="Llama3.2-3B-Instruct", - input="Instead, search the web for Python-specific sorting best practices", - tools=[{"type": "web_search"}], # Different web search query - previous_response_id=response1.id, # Branch from response1 -) - -# Responses API benefits: -# โœ… Dynamic tool switching (web search โ†” file search per call) -# โœ… OpenAI-compatible tool patterns (web_search, file_search) -# โœ… Branch conversations to explore different information sources -# โœ… Model flexibility per search type -print(f"Web search results: {response1.output_message.content}") -print(f"File search results: {response2.output_message.content}") -print(f"Alternative web search: {response3.output_message.content}") -``` - -Both APIs demonstrate distinct strengths that make them valuable on their own for different scenarios. The Agents API excels in providing structured, safety-conscious workflows with persistent session management, while the Responses API offers flexibility through dynamic configuration and OpenAI compatible tool patterns. - -## Use Case Examples - -### 1. **Research and Analysis with Safety Controls** -**Best Choice: Agents API** - -**Scenario:** You're building a research assistant for a financial institution that needs to analyze market data, execute code to process financial models, and search through internal compliance documents. The system must ensure all interactions are logged for regulatory compliance and protected by safety shields to prevent malicious code execution or data leaks. - -**Why Agents API?** The Agents API provides persistent session management for iterative research workflows, built-in safety shields to protect against malicious code in financial models, and structured execution logs (session/turn/step) required for regulatory compliance. The static tool configuration ensures consistent access to your knowledge base and code interpreter throughout the entire research session. - -### 2. **Dynamic Information Gathering with Branching Exploration** -**Best Choice: Responses API** - -**Scenario:** You're building a competitive intelligence tool that helps businesses research market trends. Users need to dynamically switch between web search for current market data and file search through uploaded industry reports. They also want to branch conversations to explore different market segments simultaneously and experiment with different models for various analysis types. - -**Why Responses API?** The Responses API's branching capability lets users explore multiple market segments from any research point. Dynamic per-call configuration allows switching between web search and file search as needed, while experimenting with different models (faster models for quick searches, more powerful models for deep analysis). The OpenAI-compatible tool patterns make integration straightforward. - -### 3. **OpenAI Migration with Advanced Tool Capabilities** -**Best Choice: Responses API** - -**Scenario:** You have an existing application built with OpenAI's Assistants API that uses file search and web search capabilities. You want to migrate to Llama Stack for better performance and cost control while maintaining the same tool calling patterns and adding new capabilities like dynamic vector store selection. - -**Why Responses API?** The Responses API provides full OpenAI tool compatibility (`web_search`, `file_search`) with identical syntax, making migration seamless. The dynamic per-call configuration enables advanced features like switching vector stores per query or changing models based on query complexity - capabilities that extend beyond basic OpenAI functionality while maintaining compatibility. - -### 4. **Educational Programming Tutor** -**Best Choice: Agents API** - -**Scenario:** You're building a programming tutor that maintains student context across multiple sessions, safely executes code exercises, and tracks learning progress with audit trails for educators. - -**Why Agents API?** Persistent sessions remember student progress across multiple interactions, safety shields prevent malicious code execution while allowing legitimate programming exercises, and structured execution logs help educators track learning patterns. - -### 5. **Advanced Software Debugging Assistant** -**Best Choice: Agents API with Responses Backend** - -**Scenario:** You're building a debugging assistant that helps developers troubleshoot complex issues. It needs to maintain context throughout a debugging session, safely execute diagnostic code, switch between different analysis tools dynamically, and branch conversations to explore multiple potential causes simultaneously. - -**Why Agents + Responses?** The Agent provides safety shields for code execution and session management for the overall debugging workflow. The underlying Responses API enables dynamic model selection and flexible tool configuration per query, while branching lets you explore different theories (memory leak vs. concurrency issue) from the same debugging point and compare results. - -> **Note:** The ability to use Responses API as the backend for Agents is not yet implemented but is planned for a future release. Currently, Agents use Chat Completions API as their backend by default. - -## For More Information - -- **LLS Agents API**: For detailed information on creating and managing agents, see the [Agents documentation](agent.md) -- **OpenAI Responses API**: For information on using the OpenAI-compatible responses API, see the [OpenAI API documentation](https://platform.openai.com/docs/api-reference/responses) -- **Chat Completions API**: For the default backend API used by Agents, see the [Chat Completions providers documentation](../providers/openai.md#chat-completions) -- **Agent Execution Loop**: For understanding how agents process turns and steps in their execution, see the [Agent Execution Loop documentation](agent_execution_loop.md) diff --git a/docs/source/building_applications/safety.md b/docs/source/building_applications/safety.md deleted file mode 100644 index 30afe7ad2..000000000 --- a/docs/source/building_applications/safety.md +++ /dev/null @@ -1,17 +0,0 @@ -## Safety Guardrails - -Safety is a critical component of any AI application. Llama Stack provides a Shield system that can be applied at multiple touchpoints: - -```python -# Register a safety shield -shield_id = "content_safety" -client.shields.register(shield_id=shield_id, provider_shield_id="llama-guard-basic") - -# Run content through shield -response = client.safety.run_shield( - shield_id=shield_id, messages=[{"role": "user", "content": "User message here"}] -) - -if response.violation: - print(f"Safety violation detected: {response.violation.user_message}") -``` diff --git a/docs/source/building_applications/telemetry.md b/docs/source/building_applications/telemetry.md deleted file mode 100644 index d93242f75..000000000 --- a/docs/source/building_applications/telemetry.md +++ /dev/null @@ -1,143 +0,0 @@ -## Telemetry - -The Llama Stack telemetry system provides comprehensive tracing, metrics, and logging capabilities. It supports multiple sink types including OpenTelemetry, SQLite, and Console output. - -### Events -The telemetry system supports three main types of events: - -- **Unstructured Log Events**: Free-form log messages with severity levels -```python -unstructured_log_event = UnstructuredLogEvent( - message="This is a log message", severity=LogSeverity.INFO -) -``` -- **Metric Events**: Numerical measurements with units -```python -metric_event = MetricEvent(metric="my_metric", value=10, unit="count") -``` -- **Structured Log Events**: System events like span start/end. Extensible to add more structured log types. -```python -structured_log_event = SpanStartPayload(name="my_span", parent_span_id="parent_span_id") -``` - -### Spans and Traces -- **Spans**: Represent operations with timing and hierarchical relationships -- **Traces**: Collection of related spans forming a complete request flow - -### Metrics - -Llama Stack automatically generates metrics during inference operations. These metrics are aggregated at the **inference request level** and provide insights into token usage and model performance. - -#### Available Metrics - -The following metrics are automatically generated for each inference request: - -| Metric Name | Type | Unit | Description | Labels | -|-------------|------|------|-------------|--------| -| `llama_stack_prompt_tokens_total` | Counter | `tokens` | Number of tokens in the input prompt | `model_id`, `provider_id` | -| `llama_stack_completion_tokens_total` | Counter | `tokens` | Number of tokens in the generated response | `model_id`, `provider_id` | -| `llama_stack_tokens_total` | Counter | `tokens` | Total tokens used (prompt + completion) | `model_id`, `provider_id` | - -#### Metric Generation Flow - -1. **Token Counting**: During inference operations (chat completion, completion, etc.), the system counts tokens in both input prompts and generated responses -2. **Metric Construction**: For each request, `MetricEvent` objects are created with the token counts -3. **Telemetry Logging**: Metrics are sent to the configured telemetry sinks -4. **OpenTelemetry Export**: When OpenTelemetry is enabled, metrics are exposed as standard OpenTelemetry counters - -#### Metric Aggregation Level - -All metrics are generated and aggregated at the **inference request level**. This means: - -- Each individual inference request generates its own set of metrics -- Metrics are not pre-aggregated across multiple requests -- Aggregation (sums, averages, etc.) can be performed by your observability tools (Prometheus, Grafana, etc.) -- Each metric includes labels for `model_id` and `provider_id` to enable filtering and grouping - -#### Example Metric Event - -```python -MetricEvent( - trace_id="1234567890abcdef", - span_id="abcdef1234567890", - metric="total_tokens", - value=150, - timestamp=1703123456.789, - unit="tokens", - attributes={"model_id": "meta-llama/Llama-3.2-3B-Instruct", "provider_id": "tgi"}, -) -``` - -#### Querying Metrics - -When using the OpenTelemetry sink, metrics are exposed in standard OpenTelemetry format and can be queried through: - -- **Prometheus**: Scrape metrics from the OpenTelemetry Collector's metrics endpoint -- **Grafana**: Create dashboards using Prometheus as a data source -- **OpenTelemetry Collector**: Forward metrics to other observability systems - -Example Prometheus queries: -```promql -# Total tokens used across all models -sum(llama_stack_tokens_total) - -# Tokens per model -sum by (model_id) (llama_stack_tokens_total) - -# Average tokens per request -rate(llama_stack_tokens_total[5m]) -``` - -### Sinks -- **OpenTelemetry**: Send events to an OpenTelemetry Collector. This is useful for visualizing traces in a tool like Jaeger and collecting metrics for Prometheus. -- **SQLite**: Store events in a local SQLite database. This is needed if you want to query the events later through the Llama Stack API. -- **Console**: Print events to the console. - -### Providers - -#### Meta-Reference Provider -Currently, only the meta-reference provider is implemented. It can be configured to send events to multiple sink types: -1) OpenTelemetry Collector (traces and metrics) -2) SQLite (traces only) -3) Console (all events) - -#### Configuration - -Here's an example that sends telemetry signals to all sink types. Your configuration might use only one or a subset. - -```yaml - telemetry: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - service_name: "llama-stack-service" - sinks: ['console', 'sqlite', 'otel_trace', 'otel_metric'] - otel_exporter_otlp_endpoint: "http://localhost:4318" - sqlite_db_path: "/path/to/telemetry.db" -``` - -**Environment Variables:** -- `OTEL_EXPORTER_OTLP_ENDPOINT`: OpenTelemetry Collector endpoint (default: `http://localhost:4318`) -- `OTEL_SERVICE_NAME`: Service name for telemetry (default: empty string) -- `TELEMETRY_SINKS`: Comma-separated list of sinks (default: `console,sqlite`) - -### Jaeger to visualize traces - -The `otel_trace` sink works with any service compatible with the OpenTelemetry collector. Traces and metrics use separate endpoints but can share the same collector. - -Start a Jaeger instance with the OTLP HTTP endpoint at 4318 and the Jaeger UI at 16686 using the following command: - -```bash -$ docker run --pull always --rm --name jaeger \ - -p 16686:16686 -p 4318:4318 \ - jaegertracing/jaeger:2.1.0 -``` - -Once the Jaeger instance is running, you can visualize traces by navigating to http://localhost:16686/. - -### Querying Traces Stored in SQLite - -The `sqlite` sink allows you to query traces without an external system. Here are some example -queries. Refer to the notebook at [Llama Stack Building AI -Applications](https://github.com/meta-llama/llama-stack/blob/main/docs/getting_started.ipynb) for -more examples on how to query traces and spans. diff --git a/docs/source/building_applications/tools.md b/docs/source/building_applications/tools.md deleted file mode 100644 index 8a54290ed..000000000 --- a/docs/source/building_applications/tools.md +++ /dev/null @@ -1,264 +0,0 @@ -# Tools - -Tools are functions that can be invoked by an agent to perform tasks. They are organized into tool groups and registered with specific providers. Each tool group represents a collection of related tools from a single provider. They are organized into groups so that state can be externalized: the collection operates on the same state typically. -An example of this would be a "db_access" tool group that contains tools for interacting with a database. "list_tables", "query_table", "insert_row" could be examples of tools in this group. - -Tools are treated as any other resource in llama stack like models. You can register them, have providers for them etc. - -When instantiating an agent, you can provide it a list of tool groups that it has access to. Agent gets the corresponding tool definitions for the specified tool groups and passes them along to the model. - -Refer to the [Building AI Applications](https://github.com/meta-llama/llama-stack/blob/main/docs/getting_started.ipynb) notebook for more examples on how to use tools. - -## Server-side vs. client-side tool execution - -Llama Stack allows you to use both server-side and client-side tools. With server-side tools, `agent.create_turn` can perform execution of the tool calls emitted by the model -transparently giving the user the final answer desired. If client-side tools are provided, the tool call is sent back to the user for execution -and optional continuation using the `agent.resume_turn` method. - - -### Server-side tools - -Llama Stack provides built-in providers for some common tools. These include web search, math, and RAG capabilities. - -#### Web Search - -You have three providers to execute the web search tool calls generated by a model: Brave Search, Bing Search, and Tavily Search. - -To indicate that the web search tool calls should be executed by brave-search, you can point the "builtin::websearch" toolgroup to the "brave-search" provider. - -```python -client.toolgroups.register( - toolgroup_id="builtin::websearch", - provider_id="brave-search", - args={"max_results": 5}, -) -``` - -The tool requires an API key which can be provided either in the configuration or through the request header `X-LlamaStack-Provider-Data`. The format of the header is: -``` -{"_api_key": } -``` - - -#### Math - -The WolframAlpha tool provides access to computational knowledge through the WolframAlpha API. - -```python -client.toolgroups.register( - toolgroup_id="builtin::wolfram_alpha", provider_id="wolfram-alpha" -) -``` - -Example usage: -```python -result = client.tool_runtime.invoke_tool( - tool_name="wolfram_alpha", args={"query": "solve x^2 + 2x + 1 = 0"} -) -``` - -#### RAG - -The RAG tool enables retrieval of context from various types of memory banks (vector, key-value, keyword, and graph). - -```python -# Register Memory tool group -client.toolgroups.register( - toolgroup_id="builtin::rag", - provider_id="faiss", - args={"max_chunks": 5, "max_tokens_in_context": 4096}, -) -``` - -Features: -- Support for multiple memory bank types -- Configurable query generation -- Context retrieval with token limits - - -```{note} -By default, llama stack run.yaml defines toolgroups for web search, wolfram alpha and rag, that are provided by tavily-search, wolfram-alpha and rag providers. -``` - -## Model Context Protocol (MCP) - -[MCP](https://github.com/modelcontextprotocol) is an upcoming, popular standard for tool discovery and execution. It is a protocol that allows tools to be dynamically discovered -from an MCP endpoint and can be used to extend the agent's capabilities. - - -### Using Remote MCP Servers - -You can find some popular remote MCP servers [here](https://github.com/jaw9c/awesome-remote-mcp-servers). You can register them as toolgroups in the same way as local providers. - -```python -client.toolgroups.register( - toolgroup_id="mcp::deepwiki", - provider_id="model-context-protocol", - mcp_endpoint=URL(uri="https://mcp.deepwiki.com/sse"), -) -``` - -Note that most of the more useful MCP servers need you to authenticate with them. Many of them use OAuth2.0 for authentication. You can provide authorization headers to send to the MCP server -using the "Provider Data" abstraction provided by Llama Stack. When making an agent call, - -```python -agent = Agent( - ..., - tools=["mcp::deepwiki"], - extra_headers={ - "X-LlamaStack-Provider-Data": json.dumps( - { - "mcp_headers": { - "http://mcp.deepwiki.com/sse": { - "Authorization": "Bearer ", - }, - }, - } - ), - }, -) -agent.create_turn(...) -``` - -### Running your own MCP server - -Here's an example of how to run a simple MCP server that exposes a File System as a set of tools to the Llama Stack agent. - -```shell -# start your MCP server -mkdir /tmp/content -touch /tmp/content/foo -touch /tmp/content/bar -npx -y supergateway --port 8000 --stdio 'npx -y @modelcontextprotocol/server-filesystem /tmp/content' -``` - -Then register the MCP server as a tool group, -```python -client.toolgroups.register( - toolgroup_id="mcp::filesystem", - provider_id="model-context-protocol", - mcp_endpoint=URL(uri="http://localhost:8000/sse"), -) -``` - - - -## Adding Custom (Client-side) Tools - -When you want to use tools other than the built-in tools, you just need to implement a python function with a docstring. The content of the docstring will be used to describe the tool and the parameters and passed -along to the generative model. - -```python -# Example tool definition -def my_tool(input: int) -> int: - """ - Runs my awesome tool. - - :param input: some int parameter - """ - return input * 2 -``` -> **NOTE:** We employ python docstrings to describe the tool and the parameters. It is important to document the tool and the parameters so that the model can use the tool correctly. It is recommended to experiment with different docstrings to see how they affect the model's behavior. - -Once defined, simply pass the tool to the agent config. `Agent` will take care of the rest (calling the model with the tool definition, executing the tool, and returning the result to the model for the next iteration). -```python -# Example agent config with client provided tools -agent = Agent(client, ..., tools=[my_tool]) -``` - -Refer to [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/blob/main/examples/agents/e2e_loop_with_client_tools.py) for an example of how to use client provided tools. - - -## Tool Invocation - -Tools can be invoked using the `invoke_tool` method: - -```python -result = client.tool_runtime.invoke_tool( - tool_name="web_search", kwargs={"query": "What is the capital of France?"} -) -``` - -The result contains: -- `content`: The tool's output -- `error_message`: Optional error message if the tool failed -- `error_code`: Optional error code if the tool failed - -## Listing Available Tools - -You can list all available tools or filter by tool group: - -```python -# List all tools -all_tools = client.tools.list_tools() - -# List tools in a specific group -group_tools = client.tools.list_tools(toolgroup_id="search_tools") -``` - -## Simple Example 2: Using an Agent with the Web Search Tool -1. Start by registering a Tavily API key at [Tavily](https://tavily.com/). -2. [Optional] Provide the API key directly to the Llama Stack server -```bash -export TAVILY_SEARCH_API_KEY="your key" -``` -```bash ---env TAVILY_SEARCH_API_KEY=${TAVILY_SEARCH_API_KEY} -``` -3. Run the following script. -```python -from llama_stack_client.lib.agents.agent import Agent -from llama_stack_client.types.agent_create_params import AgentConfig -from llama_stack_client.lib.agents.event_logger import EventLogger -from llama_stack_client import LlamaStackClient - -client = LlamaStackClient( - base_url=f"http://localhost:8321", - provider_data={ - "tavily_search_api_key": "your_TAVILY_SEARCH_API_KEY" - }, # Set this from the client side. No need to provide it if it has already been configured on the Llama Stack server. -) - -agent = Agent( - client, - model="meta-llama/Llama-3.2-3B-Instruct", - instructions=( - "You are a web search assistant, must use websearch tool to look up the most current and precise information available. " - ), - tools=["builtin::websearch"], -) - -session_id = agent.create_session("websearch-session") - -response = agent.create_turn( - messages=[ - {"role": "user", "content": "How did the USA perform in the last Olympics?"} - ], - session_id=session_id, -) -for log in EventLogger().log(response): - log.print() -``` - -## Simple Example3: Using an Agent with the WolframAlpha Tool -1. Start by registering for a WolframAlpha API key at [WolframAlpha Developer Portal](https://developer.wolframalpha.com/access). -2. Provide the API key either when starting the Llama Stack server: - ```bash - --env WOLFRAM_ALPHA_API_KEY=${WOLFRAM_ALPHA_API_KEY} - ``` - or from the client side: - ```python - client = LlamaStackClient( - base_url="http://localhost:8321", - provider_data={"wolfram_alpha_api_key": wolfram_api_key}, - ) - ``` -3. Configure the tools in the Agent by setting `tools=["builtin::wolfram_alpha"]`. -4. Example user query: - ```python - response = agent.create_turn( - messages=[{"role": "user", "content": "Solve x^2 + 2x + 1 = 0 using WolframAlpha"}], - session_id=session_id, - ) - ``` -``` diff --git a/docs/source/concepts/api_providers.md b/docs/source/concepts/api_providers.md deleted file mode 100644 index 6e6502c0c..000000000 --- a/docs/source/concepts/api_providers.md +++ /dev/null @@ -1,12 +0,0 @@ -## API Providers - -The goal of Llama Stack is to build an ecosystem where users can easily swap out different implementations for the same API. Examples for these include: -- LLM inference providers (e.g., Fireworks, Together, AWS Bedrock, Groq, Cerebras, SambaNova, vLLM, etc.), -- Vector databases (e.g., ChromaDB, Weaviate, Qdrant, Milvus, FAISS, PGVector, etc.), -- Safety providers (e.g., Meta's Llama Guard, AWS Bedrock Guardrails, etc.) - -Providers come in two flavors: -- **Remote**: the provider runs as a separate service external to the Llama Stack codebase. Llama Stack contains a small amount of adapter code. -- **Inline**: the provider is fully specified and implemented within the Llama Stack codebase. It may be a simple wrapper around an existing library, or a full fledged implementation within Llama Stack. - -Most importantly, Llama Stack always strives to provide at least one fully inline provider for each API so you can iterate on a fully featured environment locally. diff --git a/docs/source/concepts/apis.md b/docs/source/concepts/apis.md deleted file mode 100644 index f8f73a928..000000000 --- a/docs/source/concepts/apis.md +++ /dev/null @@ -1,21 +0,0 @@ -## APIs - -A Llama Stack API is described as a collection of REST endpoints. We currently support the following APIs: - -- **Inference**: run inference with a LLM -- **Safety**: apply safety policies to the output at a Systems (not only model) level -- **Agents**: run multi-step agentic workflows with LLMs with tool usage, memory (RAG), etc. -- **DatasetIO**: interface with datasets and data loaders -- **Scoring**: evaluate outputs of the system -- **Eval**: generate outputs (via Inference or Agents) and perform scoring -- **VectorIO**: perform operations on vector stores, such as adding documents, searching, and deleting documents -- **Telemetry**: collect telemetry data from the system -- **Post Training**: fine-tune a model -- **Tool Runtime**: interact with various tools and protocols -- **Responses**: generate responses from an LLM using this OpenAI compatible API. - -We are working on adding a few more APIs to complete the application lifecycle. These will include: -- **Batch Inference**: run inference on a dataset of inputs -- **Batch Agents**: run agents on a dataset of inputs -- **Synthetic Data Generation**: generate synthetic data for model development -- **Batches**: OpenAI-compatible batch management for inference diff --git a/docs/source/concepts/architecture.md b/docs/source/concepts/architecture.md deleted file mode 100644 index 50cc62c7c..000000000 --- a/docs/source/concepts/architecture.md +++ /dev/null @@ -1,70 +0,0 @@ -## Llama Stack architecture - -Llama Stack allows you to build different layers of distributions for your AI workloads using various SDKs and API providers. - -```{image} ../../_static/llama-stack.png -:alt: Llama Stack -:width: 400px -``` - -### Benefits of Llama stack - -#### Current challenges in custom AI applications - -Building production AI applications today requires solving multiple challenges: - -**Infrastructure Complexity** - -- Running large language models efficiently requires specialized infrastructure. -- Different deployment scenarios (local development, cloud, edge) need different solutions. -- Moving from development to production often requires significant rework. - -**Essential Capabilities** - -- Safety guardrails and content filtering are necessary in an enterprise setting. -- Just model inference is not enough - Knowledge retrieval and RAG capabilities are required. -- Nearly any application needs composable multi-step workflows. -- Without monitoring, observability and evaluation, you end up operating in the dark. - -**Lack of Flexibility and Choice** - -- Directly integrating with multiple providers creates tight coupling. -- Different providers have different APIs and abstractions. -- Changing providers requires significant code changes. - -#### Our Solution: A Universal Stack - -Llama Stack addresses these challenges through a service-oriented, API-first approach: - -**Develop Anywhere, Deploy Everywhere** -- Start locally with CPU-only setups -- Move to GPU acceleration when needed -- Deploy to cloud or edge without code changes -- Same APIs and developer experience everywhere - -**Production-Ready Building Blocks** -- Pre-built safety guardrails and content filtering -- Built-in RAG and agent capabilities -- Comprehensive evaluation toolkit -- Full observability and monitoring - -**True Provider Independence** -- Swap providers without application changes -- Mix and match best-in-class implementations -- Federation and fallback support -- No vendor lock-in - -**Robust Ecosystem** -- Llama Stack is already integrated with distribution partners (cloud providers, hardware vendors, and AI-focused companies). -- Ecosystem offers tailored infrastructure, software, and services for deploying a variety of models. - - -### Our Philosophy - -- **Service-Oriented**: REST APIs enforce clean interfaces and enable seamless transitions across different environments. -- **Composability**: Every component is independent but works together seamlessly -- **Production Ready**: Built for real-world applications, not just demos -- **Turnkey Solutions**: Easy to deploy built in solutions for popular deployment scenarios - - -With Llama Stack, you can focus on building your application while we handle the infrastructure complexity, essential capabilities, and provider integrations. \ No newline at end of file diff --git a/docs/source/concepts/distributions.md b/docs/source/concepts/distributions.md deleted file mode 100644 index 8c63914d1..000000000 --- a/docs/source/concepts/distributions.md +++ /dev/null @@ -1,9 +0,0 @@ -## Distributions - -While there is a lot of flexibility to mix-and-match providers, often users will work with a specific set of providers (hardware support, contractual obligations, etc.) We therefore need to provide a _convenient shorthand_ for such collections. We call this shorthand a **Llama Stack Distribution** or a **Distro**. One can think of it as specific pre-packaged versions of the Llama Stack. Here are some examples: - -**Remotely Hosted Distro**: These are the simplest to consume from a user perspective. You can simply obtain the API key for these providers, point to a URL and have _all_ Llama Stack APIs working out of the box. Currently, [Fireworks](https://fireworks.ai/) and [Together](https://together.xyz/) provide such easy-to-consume Llama Stack distributions. - -**Locally Hosted Distro**: You may want to run Llama Stack on your own hardware. Typically though, you still need to use Inference via an external service. You can use providers like HuggingFace TGI, Fireworks, Together, etc. for this purpose. Or you may have access to GPUs and can run a [vLLM](https://github.com/vllm-project/vllm) or [NVIDIA NIM](https://build.nvidia.com/nim?filters=nimType%3Anim_type_run_anywhere&q=llama) instance. If you "just" have a regular desktop machine, you can use [Ollama](https://ollama.com/) for inference. To provide convenient quick access to these options, we provide a number of such pre-configured locally-hosted Distros. - -**On-device Distro**: To run Llama Stack directly on an edge device (mobile phone or a tablet), we provide Distros for [iOS](../distributions/ondevice_distro/ios_sdk.md) and [Android](../distributions/ondevice_distro/android_sdk.md) diff --git a/docs/source/concepts/index.md b/docs/source/concepts/index.md deleted file mode 100644 index a483132b8..000000000 --- a/docs/source/concepts/index.md +++ /dev/null @@ -1,23 +0,0 @@ -# Core Concepts - -Given Llama Stack's service-oriented philosophy, a few concepts and workflows arise which may not feel completely natural in the LLM landscape, especially if you are coming with a background in other frameworks. - -```{include} architecture.md -:start-after: ## Llama Stack architecture -``` - -```{include} apis.md -:start-after: ## APIs -``` - -```{include} api_providers.md -:start-after: ## API Providers -``` - -```{include} distributions.md -:start-after: ## Distributions -``` - -```{include} resources.md -:start-after: ## Resources -``` diff --git a/docs/source/concepts/resources.md b/docs/source/concepts/resources.md deleted file mode 100644 index 0cdc9a227..000000000 --- a/docs/source/concepts/resources.md +++ /dev/null @@ -1,19 +0,0 @@ -## Resources - -Some of these APIs are associated with a set of **Resources**. Here is the mapping of APIs to resources: - -- **Inference**, **Eval** and **Post Training** are associated with `Model` resources. -- **Safety** is associated with `Shield` resources. -- **Tool Runtime** is associated with `ToolGroup` resources. -- **DatasetIO** is associated with `Dataset` resources. -- **VectorIO** is associated with `VectorDB` resources. -- **Scoring** is associated with `ScoringFunction` resources. -- **Eval** is associated with `Model` and `Benchmark` resources. - -Furthermore, we allow these resources to be **federated** across multiple providers. For example, you may have some Llama models served by Fireworks while others are served by AWS Bedrock. Regardless, they will all work seamlessly with the same uniform Inference API provided by Llama Stack. - -```{admonition} Registering Resources -:class: tip - -Given this architecture, it is necessary for the Stack to know which provider to use for a given resource. This means you need to explicitly _register_ resources (including models) before you can use them with the associated APIs. -``` diff --git a/docs/source/conf.py b/docs/source/conf.py deleted file mode 100644 index 0cbddef31..000000000 --- a/docs/source/conf.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -# Configuration file for the Sphinx documentation builder. -# -# For the full list of built-in configuration values, see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Project information ----------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information - -import json -from datetime import datetime -from pathlib import Path - -import requests -from docutils import nodes - -# Read version from pyproject.toml -with Path(__file__).parent.parent.parent.joinpath("pyproject.toml").open("rb") as f: - pypi_url = "https://pypi.org/pypi/llama-stack/json" - headers = { - 'User-Agent': 'pip/23.0.1 (python 3.11)', # Mimic pip's user agent - 'Accept': 'application/json' - } - version_tag = json.loads(requests.get(pypi_url, headers=headers).text)["info"]["version"] - print(f"{version_tag=}") - - # generate the full link including text and url here - llama_stack_version_url = ( - f"https://github.com/meta-llama/llama-stack/releases/tag/v{version_tag}" - ) - llama_stack_version_link = f"release notes" - -project = "llama-stack" -copyright = f"{datetime.now().year}, Meta" -author = "Meta" - -# -- General configuration --------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration - -extensions = [ - "myst_parser", - "sphinx_copybutton", - "sphinx_design", - "sphinx_rtd_theme", - "sphinx_rtd_dark_mode", - "sphinx_tabs.tabs", - "sphinxcontrib.redoc", - "sphinxcontrib.mermaid", - "sphinxcontrib.video", - "sphinx_reredirects" -] - -redirects = { - "providers/post_training/index": "../../advanced_apis/post_training/index.html", - "providers/eval/index": "../../advanced_apis/eval/index.html", - "providers/scoring/index": "../../advanced_apis/scoring/index.html", - "playground/index": "../../building_applications/playground/index.html", - "openai/index": "../../providers/index.html#openai-api-compatibility", - "introduction/index": "../concepts/index.html#llama-stack-architecture" -} - -myst_enable_extensions = ["colon_fence"] - -html_theme = "sphinx_rtd_theme" -html_use_relative_paths = True -templates_path = ["_templates"] -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] - -myst_enable_extensions = [ - "amsmath", - "attrs_inline", - "attrs_block", - "colon_fence", - "deflist", - "dollarmath", - "fieldlist", - "html_admonition", - "html_image", - # "linkify", - "replacements", - "smartquotes", - "strikethrough", - "substitution", - "tasklist", -] - -myst_substitutions = { - "docker_hub": "https://hub.docker.com/repository/docker/llamastack", - "llama_stack_version": version_tag, - "llama_stack_version_link": llama_stack_version_link, -} - -suppress_warnings = ["myst.header"] - -# Copy button settings -copybutton_prompt_text = "$ " # for bash prompts -copybutton_prompt_is_regexp = True -copybutton_remove_prompts = True -copybutton_line_continuation_character = "\\" - -# Source suffix -source_suffix = { - ".rst": "restructuredtext", - ".md": "markdown", -} - -# -- Options for HTML output ------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output - -# html_theme = "alabaster" -html_theme_options = { - "canonical_url": "https://github.com/meta-llama/llama-stack", - "collapse_navigation": False, - # "style_nav_header_background": "#c3c9d4", - 'display_version': True, - 'version_selector': True, -} - -default_dark_mode = False - -html_static_path = ["../_static"] -# html_logo = "../_static/llama-stack-logo.png" -# html_style = "../_static/css/my_theme.css" - - -def setup(app): - app.add_css_file("css/my_theme.css") - app.add_js_file("js/detect_theme.js") - app.add_js_file("js/horizontal_nav.js") - app.add_js_file("js/keyboard_shortcuts.js") - - def dockerhub_role(name, rawtext, text, lineno, inliner, options={}, content=[]): - url = f"https://hub.docker.com/r/llamastack/{text}" - node = nodes.reference(rawtext, text, refuri=url, **options) - return [node], [] - - def repopath_role(name, rawtext, text, lineno, inliner, options={}, content=[]): - parts = text.split("::") - if len(parts) == 2: - link_text = parts[0] - url_path = parts[1] - else: - link_text = text - url_path = text - - url = f"https://github.com/meta-llama/llama-stack/tree/main/{url_path}" - node = nodes.reference(rawtext, link_text, refuri=url, **options) - return [node], [] - - app.add_role("dockerhub", dockerhub_role) - app.add_role("repopath", repopath_role) diff --git a/docs/source/contributing/index.md b/docs/source/contributing/index.md deleted file mode 100644 index 71c3bd5a6..000000000 --- a/docs/source/contributing/index.md +++ /dev/null @@ -1,39 +0,0 @@ - -```{include} ../../../CONTRIBUTING.md -``` - -## Adding a New Provider - -See: -- [Adding a New API Provider Page](new_api_provider.md) which describes how to add new API providers to the Stack. -- [Vector Database Page](new_vector_database.md) which describes how to add a new vector databases with Llama Stack. -- [External Provider Page](../providers/external/index.md) which describes how to add external providers to the Stack. - -```{toctree} -:maxdepth: 1 -:hidden: - -new_api_provider -new_vector_database -``` - -## Testing - - -```{include} ../../../tests/README.md -``` - -## Advanced Topics - -For developers who need deeper understanding of the testing system internals: - -```{toctree} -:maxdepth: 1 - -testing/record-replay -``` - -### Benchmarking - -```{include} ../../../benchmarking/k8s-benchmark/README.md -``` diff --git a/docs/source/contributing/new_api_provider.md b/docs/source/contributing/new_api_provider.md deleted file mode 100644 index 9a7a62a38..000000000 --- a/docs/source/contributing/new_api_provider.md +++ /dev/null @@ -1,90 +0,0 @@ -# Adding a New API Provider - -This guide will walk you through the process of adding a new API provider to Llama Stack. - - -- Begin by reviewing the [core concepts](../concepts/index.md) of Llama Stack and choose the API your provider belongs to (Inference, Safety, VectorIO, etc.) -- Determine the provider type ({repopath}`Remote::llama_stack/providers/remote` or {repopath}`Inline::llama_stack/providers/inline`). Remote providers make requests to external services, while inline providers execute implementation locally. -- Add your provider to the appropriate {repopath}`Registry::llama_stack/providers/registry/`. Specify pip dependencies necessary. -- Update any distribution {repopath}`Templates::llama_stack/distributions/` `build.yaml` and `run.yaml` files if they should include your provider by default. Run {repopath}`./scripts/distro_codegen.py` if necessary. Note that `distro_codegen.py` will fail if the new provider causes any distribution template to attempt to import provider-specific dependencies. This usually means the distribution's `get_distribution_template()` code path should only import any necessary Config or model alias definitions from each provider and not the provider's actual implementation. - - -Here are some example PRs to help you get started: - - [Grok Inference Implementation](https://github.com/meta-llama/llama-stack/pull/609) - - [Nvidia Inference Implementation](https://github.com/meta-llama/llama-stack/pull/355) - - [Model context protocol Tool Runtime](https://github.com/meta-llama/llama-stack/pull/665) - -## Guidelines for creating Internal or External Providers - -|**Type** |Internal (In-tree) |External (out-of-tree) -|---------|-------------------|---------------------| -|**Description** |A provider that is directly in the Llama Stack code|A provider that is outside of the Llama stack core codebase but is still accessible and usable by Llama Stack. -|**Benefits** |Ability to interact with the provider with minimal additional configurations or installations| Contributors do not have to add directly to the code to create providers accessible on Llama Stack. Keep provider-specific code separate from the core Llama Stack code. - -## Inference Provider Patterns - -When implementing Inference providers for OpenAI-compatible APIs, Llama Stack provides several mixin classes to simplify development and ensure consistent behavior across providers. - -### OpenAIMixin - -The `OpenAIMixin` class provides direct OpenAI API functionality for providers that work with OpenAI-compatible endpoints. It includes: - -#### Direct API Methods -- **`openai_completion()`**: Legacy text completion API with full parameter support -- **`openai_chat_completion()`**: Chat completion API supporting streaming, tools, and function calling -- **`openai_embeddings()`**: Text embeddings generation with customizable encoding and dimensions - -#### Model Management -- **`check_model_availability()`**: Queries the API endpoint to verify if a model exists and is accessible - -#### Client Management -- **`client` property**: Automatically creates and configures AsyncOpenAI client instances using your provider's credentials - -#### Required Implementation - -To use `OpenAIMixin`, your provider must implement these abstract methods: - -```python -@abstractmethod -def get_api_key(self) -> str: - """Return the API key for authentication""" - pass - - -@abstractmethod -def get_base_url(self) -> str: - """Return the OpenAI-compatible API base URL""" - pass -``` - -## Testing the Provider - -Before running tests, you must have required dependencies installed. This depends on the providers or distributions you are testing. For example, if you are testing the `together` distribution, you should install dependencies via `llama stack build --distro together`. - -### 1. Integration Testing - -Integration tests are located in {repopath}`tests/integration`. These tests use the python client-SDK APIs (from the `llama_stack_client` package) to test functionality. Since these tests use client APIs, they can be run either by pointing to an instance of the Llama Stack server or "inline" by using `LlamaStackAsLibraryClient`. - -Consult {repopath}`tests/integration/README.md` for more details on how to run the tests. - -Note that each provider's `sample_run_config()` method (in the configuration class for that provider) - typically references some environment variables for specifying API keys and the like. You can set these in the environment or pass these via the `--env` flag to the test command. - - -### 2. Unit Testing - -Unit tests are located in {repopath}`tests/unit`. Provider-specific unit tests are located in {repopath}`tests/unit/providers`. These tests are all run automatically as part of the CI process. - -Consult {repopath}`tests/unit/README.md` for more details on how to run the tests manually. - -### 3. Additional end-to-end testing - -1. Start a Llama Stack server with your new provider -2. Verify compatibility with existing client scripts in the [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/tree/main) repository -3. Document which scripts are compatible with your provider - -## Submitting Your PR - -1. Ensure all tests pass -2. Include a comprehensive test plan in your PR summary -3. Document any known limitations or considerations diff --git a/docs/source/contributing/new_vector_database.md b/docs/source/contributing/new_vector_database.md deleted file mode 100644 index 83c0f55bc..000000000 --- a/docs/source/contributing/new_vector_database.md +++ /dev/null @@ -1,75 +0,0 @@ -# Adding a New Vector Database - -This guide will walk you through the process of adding a new vector database to Llama Stack. - -> **_NOTE:_** Here's an example Pull Request of the [Milvus Vector Database Provider](https://github.com/meta-llama/llama-stack/pull/1467). - -Vector Database providers are used to store and retrieve vector embeddings. Vector databases are not limited to vector -search but can support keyword and hybrid search. Additionally, vector database can also support operations like -filtering, sorting, and aggregating vectors. - -## Steps to Add a New Vector Database Provider -1. **Choose the Database Type**: Determine if your vector database is a remote service, inline, or both. - - Remote databases make requests to external services, while inline databases execute locally. Some providers support both. -2. **Implement the Provider**: Create a new provider class that inherits from `VectorDatabaseProvider` and implements the required methods. - - Implement methods for vector storage, retrieval, search, and any additional features your database supports. - - You will need to implement the following methods for `YourVectorIndex`: - - `YourVectorIndex.create()` - - `YourVectorIndex.initialize()` - - `YourVectorIndex.add_chunks()` - - `YourVectorIndex.delete_chunk()` - - `YourVectorIndex.query_vector()` - - `YourVectorIndex.query_keyword()` - - `YourVectorIndex.query_hybrid()` - - You will need to implement the following methods for `YourVectorIOAdapter`: - - `YourVectorIOAdapter.initialize()` - - `YourVectorIOAdapter.shutdown()` - - `YourVectorIOAdapter.list_vector_dbs()` - - `YourVectorIOAdapter.register_vector_db()` - - `YourVectorIOAdapter.unregister_vector_db()` - - `YourVectorIOAdapter.insert_chunks()` - - `YourVectorIOAdapter.query_chunks()` - - `YourVectorIOAdapter.delete_chunks()` -3. **Add to Registry**: Register your provider in the appropriate registry file. - - Update {repopath}`llama_stack/providers/registry/vector_io.py` to include your new provider. -```python -from llama_stack.providers.registry.specs import InlineProviderSpec -from llama_stack.providers.registry.api import Api - -InlineProviderSpec( - api=Api.vector_io, - provider_type="inline::milvus", - pip_packages=["pymilvus>=2.4.10"], - module="llama_stack.providers.inline.vector_io.milvus", - config_class="llama_stack.providers.inline.vector_io.milvus.MilvusVectorIOConfig", - api_dependencies=[Api.inference], - optional_api_dependencies=[Api.files], - description="", -), -``` -4. **Add Tests**: Create unit tests and integration tests for your provider in the `tests/` directory. - - Unit Tests - - By following the structure of the class methods, you will be able to easily run unit and integration tests for your database. - 1. You have to configure the tests for your provide in `/tests/unit/providers/vector_io/conftest.py`. - 2. Update the `vector_provider` fixture to include your provider if they are an inline provider. - 3. Create a `your_vectorprovider_index` fixture that initializes your vector index. - 4. Create a `your_vectorprovider_adapter` fixture that initializes your vector adapter. - 5. Add your provider to the `vector_io_providers` fixture dictionary. - - Please follow the naming convention of `your_vectorprovider_index` and `your_vectorprovider_adapter` as the tests require this to execute properly. - - Integration Tests - - Integration tests are located in {repopath}`tests/integration`. These tests use the python client-SDK APIs (from the `llama_stack_client` package) to test functionality. - - The two set of integration tests are: - - `tests/integration/vector_io/test_vector_io.py`: This file tests registration, insertion, and retrieval. - - `tests/integration/vector_io/test_openai_vector_stores.py`: These tests are for OpenAI-compatible vector stores and test the OpenAI API compatibility. - - You will need to update `skip_if_provider_doesnt_support_openai_vector_stores` to include your provider as well as `skip_if_provider_doesnt_support_openai_vector_stores_search` to test the appropriate search functionality. - - Running the tests in the GitHub CI - - You will need to update the `.github/workflows/integration-vector-io-tests.yml` file to include your provider. - - If your provider is a remote provider, you will also have to add a container to spin up and run it in the action. - - Updating the pyproject.yml - - If you are adding tests for the `inline` provider you will have to update the `unit` group. - - `uv add new_pip_package --group unit` - - If you are adding tests for the `remote` provider you will have to update the `test` group, which is used in the GitHub CI for integration tests. - - `uv add new_pip_package --group test` -5. **Update Documentation**: Please update the documentation for end users - - Generate the provider documentation by running {repopath}`./scripts/provider_codegen.py`. - - Update the autogenerated content in the registry/vector_io.py file with information about your provider. Please see other providers for examples. \ No newline at end of file diff --git a/docs/source/contributing/testing/record-replay.md b/docs/source/contributing/testing/record-replay.md deleted file mode 100644 index 7b0f345b0..000000000 --- a/docs/source/contributing/testing/record-replay.md +++ /dev/null @@ -1,231 +0,0 @@ -# Record-Replay System - -Understanding how Llama Stack captures and replays API interactions for testing. - -## Overview - -The record-replay system solves a fundamental challenge in AI testing: how do you test against expensive, non-deterministic APIs without breaking the bank or dealing with flaky tests? - -The solution: intercept API calls, store real responses, and replay them later. This gives you real API behavior without the cost or variability. - -## How It Works - -### Request Hashing - -Every API request gets converted to a deterministic hash for lookup: - -```python -def normalize_request(method: str, url: str, headers: dict, body: dict) -> str: - normalized = { - "method": method.upper(), - "endpoint": urlparse(url).path, # Just the path, not full URL - "body": body, # Request parameters - } - return hashlib.sha256(json.dumps(normalized, sort_keys=True).encode()).hexdigest() -``` - -**Key insight:** The hashing is intentionally precise. Different whitespace, float precision, or parameter order produces different hashes. This prevents subtle bugs from false cache hits. - -```python -# These produce DIFFERENT hashes: -{"content": "Hello world"} -{"content": "Hello world\n"} -{"temperature": 0.7} -{"temperature": 0.7000001} -``` - -### Client Interception - -The system patches OpenAI and Ollama client methods to intercept calls before they leave your application. This happens transparently - your test code doesn't change. - -### Storage Architecture - -Recordings are stored as JSON files in the recording directory. They are looked up by their request hash. - -``` -recordings/ -โ””โ”€โ”€ responses/ - โ”œโ”€โ”€ abc123def456.json # Individual response files - โ””โ”€โ”€ def789ghi012.json -``` - -**JSON files** store complete request/response pairs in human-readable format for debugging. - -## Recording Modes - -### LIVE Mode - -Direct API calls with no recording or replay: - -```python -with inference_recording(mode=InferenceMode.LIVE): - response = await client.chat.completions.create(...) -``` - -Use for initial development and debugging against real APIs. - -### RECORD Mode - -Captures API interactions while passing through real responses: - -```python -with inference_recording(mode=InferenceMode.RECORD, storage_dir="./recordings"): - response = await client.chat.completions.create(...) - # Real API call made, response captured AND returned -``` - -The recording process: -1. Request intercepted and hashed -2. Real API call executed -3. Response captured and serialized -4. Recording stored to disk -5. Original response returned to caller - -### REPLAY Mode - -Returns stored responses instead of making API calls: - -```python -with inference_recording(mode=InferenceMode.REPLAY, storage_dir="./recordings"): - response = await client.chat.completions.create(...) - # No API call made, cached response returned instantly -``` - -The replay process: -1. Request intercepted and hashed -2. Hash looked up in SQLite index -3. Response loaded from JSON file -4. Response deserialized and returned -5. Error if no recording found - -## Streaming Support - -Streaming APIs present a unique challenge: how do you capture an async generator? - -### The Problem - -```python -# How do you record this? -async for chunk in client.chat.completions.create(stream=True): - process(chunk) -``` - -### The Solution - -The system captures all chunks immediately before yielding any: - -```python -async def handle_streaming_record(response): - # Capture complete stream first - chunks = [] - async for chunk in response: - chunks.append(chunk) - - # Store complete recording - storage.store_recording( - request_hash, request_data, {"body": chunks, "is_streaming": True} - ) - - # Return generator that replays captured chunks - async def replay_stream(): - for chunk in chunks: - yield chunk - - return replay_stream() -``` - -This ensures: -- **Complete capture** - The entire stream is saved atomically -- **Interface preservation** - The returned object behaves like the original API -- **Deterministic replay** - Same chunks in the same order every time - -## Serialization - -API responses contain complex Pydantic objects that need careful serialization: - -```python -def _serialize_response(response): - if hasattr(response, "model_dump"): - # Preserve type information for proper deserialization - return { - "__type__": f"{response.__class__.__module__}.{response.__class__.__qualname__}", - "__data__": response.model_dump(mode="json"), - } - return response -``` - -This preserves type safety - when replayed, you get the same Pydantic objects with all their validation and methods. - -## Environment Integration - -### Environment Variables - -Control recording behavior globally: - -```bash -export LLAMA_STACK_TEST_INFERENCE_MODE=replay # this is the default -export LLAMA_STACK_TEST_RECORDING_DIR=/path/to/recordings # default is tests/integration/recordings -pytest tests/integration/ -``` - -### Pytest Integration - -The system integrates automatically based on environment variables, requiring no changes to test code. - -## Debugging Recordings - -### Inspecting Storage - -```bash -# See what's recorded -sqlite3 recordings/index.sqlite "SELECT endpoint, model, timestamp FROM recordings LIMIT 10;" - -# View specific response -cat recordings/responses/abc123def456.json | jq '.response.body' - -# Find recordings by endpoint -sqlite3 recordings/index.sqlite "SELECT * FROM recordings WHERE endpoint='/v1/chat/completions';" -``` - -### Common Issues - -**Hash mismatches:** Request parameters changed slightly between record and replay -```bash -# Compare request details -cat recordings/responses/abc123.json | jq '.request' -``` - -**Serialization errors:** Response types changed between versions -```bash -# Re-record with updated types -rm recordings/responses/failing_hash.json -LLAMA_STACK_TEST_INFERENCE_MODE=record pytest test_failing.py -``` - -**Missing recordings:** New test or changed parameters -```bash -# Record the missing interaction -LLAMA_STACK_TEST_INFERENCE_MODE=record pytest test_new.py -``` - -## Design Decisions - -### Why Not Mocks? - -Traditional mocking breaks down with AI APIs because: -- Response structures are complex and evolve frequently -- Streaming behavior is hard to mock correctly -- Edge cases in real APIs get missed -- Mocks become brittle maintenance burdens - -### Why Precise Hashing? - -Loose hashing (normalizing whitespace, rounding floats) seems convenient but hides bugs. If a test changes slightly, you want to know about it rather than accidentally getting the wrong cached response. - -### Why JSON + SQLite? - -- **JSON** - Human readable, diff-friendly, easy to inspect and modify -- **SQLite** - Fast indexed lookups without loading response bodies -- **Hybrid** - Best of both worlds for different use cases - -This system provides reliable, fast testing against real AI APIs while maintaining the ability to debug issues when they arise. \ No newline at end of file diff --git a/docs/source/deploying/index.md b/docs/source/deploying/index.md deleted file mode 100644 index 73b5bf4f5..000000000 --- a/docs/source/deploying/index.md +++ /dev/null @@ -1,4 +0,0 @@ -# Deployment Examples - -```{include} kubernetes_deployment.md -``` \ No newline at end of file diff --git a/docs/source/deploying/kubernetes_deployment.md b/docs/source/deploying/kubernetes_deployment.md deleted file mode 100644 index 4bdd87b24..000000000 --- a/docs/source/deploying/kubernetes_deployment.md +++ /dev/null @@ -1,247 +0,0 @@ -## Kubernetes Deployment Guide - -Instead of starting the Llama Stack and vLLM servers locally. We can deploy them in a Kubernetes cluster. - -### Prerequisites -In this guide, we'll use a local [Kind](https://kind.sigs.k8s.io/) cluster and a vLLM inference service in the same cluster for demonstration purposes. - -Note: You can also deploy the Llama Stack server in an AWS EKS cluster. See [Deploying Llama Stack Server in AWS EKS](#deploying-llama-stack-server-in-aws-eks) for more details. - -First, create a local Kubernetes cluster via Kind: - -``` -kind create cluster --image kindest/node:v1.32.0 --name llama-stack-test -``` - -First set your hugging face token as an environment variable. -``` -export HF_TOKEN=$(echo -n "your-hf-token" | base64) -``` - -Now create a Kubernetes PVC and Secret for downloading and storing Hugging Face model: - -``` -cat <$tmp_dir/Containerfile.llama-stack-run-k8s </api/auth/callback/` - - -Run the following script to deploy the Llama Stack server: -``` -export HF_TOKEN= -export GITHUB_CLIENT_ID= -export GITHUB_CLIENT_SECRET= -export LLAMA_STACK_UI_URL= - -cd docs/source/distributions/eks -./apply.sh -``` - -This script will: - -- Set up a default storage class for AWS EKS -- Deploy the Llama Stack server in a Kubernetes Pod and Service \ No newline at end of file diff --git a/docs/source/distributions/building_distro.md b/docs/source/distributions/building_distro.md deleted file mode 100644 index 24098708f..000000000 --- a/docs/source/distributions/building_distro.md +++ /dev/null @@ -1,443 +0,0 @@ -# Build your own Distribution - - -This guide will walk you through the steps to get started with building a Llama Stack distribution from scratch with your choice of API providers. - - -### Setting your log level - -In order to specify the proper logging level users can apply the following environment variable `LLAMA_STACK_LOGGING` with the following format: - -`LLAMA_STACK_LOGGING=server=debug;core=info` - -Where each category in the following list: - -- all -- core -- server -- router -- inference -- agents -- safety -- eval -- tools -- client - -Can be set to any of the following log levels: - -- debug -- info -- warning -- error -- critical - -The default global log level is `info`. `all` sets the log level for all components. - -A user can also set `LLAMA_STACK_LOG_FILE` which will pipe the logs to the specified path as well as to the terminal. An example would be: `export LLAMA_STACK_LOG_FILE=server.log` - -### Llama Stack Build - -In order to build your own distribution, we recommend you clone the `llama-stack` repository. - - -``` -git clone git@github.com:meta-llama/llama-stack.git -cd llama-stack -pip install -e . -``` -Use the CLI to build your distribution. -The main points to consider are: -1. **Image Type** - Do you want a venv environment or a Container (eg. Docker) -2. **Template** - Do you want to use a template to build your distribution? or start from scratch ? -3. **Config** - Do you want to use a pre-existing config file to build your distribution? - -``` -llama stack build -h -usage: llama stack build [-h] [--config CONFIG] [--template TEMPLATE] [--distro DISTRIBUTION] [--list-distros] [--image-type {container,venv}] [--image-name IMAGE_NAME] [--print-deps-only] - [--run] [--providers PROVIDERS] - -Build a Llama stack container - -options: - -h, --help show this help message and exit - --config CONFIG Path to a config file to use for the build. You can find example configs in llama_stack.cores/**/build.yaml. If this argument is not provided, you will be prompted to - enter information interactively (default: None) - --template TEMPLATE (deprecated) Name of the example template config to use for build. You may use `llama stack build --list-distros` to check out the available distributions (default: - None) - --distro DISTRIBUTION, --distribution DISTRIBUTION - Name of the distribution to use for build. You may use `llama stack build --list-distros` to check out the available distributions (default: None) - --list-distros, --list-distributions - Show the available distributions for building a Llama Stack distribution (default: False) - --image-type {container,venv} - Image Type to use for the build. If not specified, will use the image type from the template config. (default: None) - --image-name IMAGE_NAME - [for image-type=container|venv] Name of the virtual environment to use for the build. If not specified, currently active environment will be used if found. (default: - None) - --print-deps-only Print the dependencies for the stack only, without building the stack (default: False) - --run Run the stack after building using the same image type, name, and other applicable arguments (default: False) - --providers PROVIDERS - Build a config for a list of providers and only those providers. This list is formatted like: api1=provider1,api2=provider2. Where there can be multiple providers per - API. (default: None) -``` - -After this step is complete, a file named `-build.yaml` and template file `-run.yaml` will be generated and saved at the output file path specified at the end of the command. - -::::{tab-set} -:::{tab-item} Building from a template -To build from alternative API providers, we provide distribution templates for users to get started building a distribution backed by different providers. - -The following command will allow you to see the available templates and their corresponding providers. -``` -llama stack build --list-templates -``` - -``` -------------------------------+-----------------------------------------------------------------------------+ -| Template Name | Description | -+------------------------------+-----------------------------------------------------------------------------+ -| watsonx | Use watsonx for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| vllm-gpu | Use a built-in vLLM engine for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| together | Use Together.AI for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| tgi | Use (an external) TGI server for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| starter | Quick start template for running Llama Stack with several popular providers | -+------------------------------+-----------------------------------------------------------------------------+ -| sambanova | Use SambaNova for running LLM inference and safety | -+------------------------------+-----------------------------------------------------------------------------+ -| remote-vllm | Use (an external) vLLM server for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| postgres-demo | Quick start template for running Llama Stack with several popular providers | -+------------------------------+-----------------------------------------------------------------------------+ -| passthrough | Use Passthrough hosted llama-stack endpoint for LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| open-benchmark | Distribution for running open benchmarks | -+------------------------------+-----------------------------------------------------------------------------+ -| ollama | Use (an external) Ollama server for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| nvidia | Use NVIDIA NIM for running LLM inference, evaluation and safety | -+------------------------------+-----------------------------------------------------------------------------+ -| meta-reference-gpu | Use Meta Reference for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| llama_api | Distribution for running e2e tests in CI | -+------------------------------+-----------------------------------------------------------------------------+ -| hf-serverless | Use (an external) Hugging Face Inference Endpoint for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| hf-endpoint | Use (an external) Hugging Face Inference Endpoint for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| groq | Use Groq for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| fireworks | Use Fireworks.AI for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| experimental-post-training | Experimental template for post training | -+------------------------------+-----------------------------------------------------------------------------+ -| dell | Dell's distribution of Llama Stack. TGI inference via Dell's custom | -| | container | -+------------------------------+-----------------------------------------------------------------------------+ -| ci-tests | Distribution for running e2e tests in CI | -+------------------------------+-----------------------------------------------------------------------------+ -| cerebras | Use Cerebras for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| bedrock | Use AWS Bedrock for running LLM inference and safety | -+------------------------------+-----------------------------------------------------------------------------+ -``` - -You may then pick a template to build your distribution with providers fitted to your liking. - -For example, to build a distribution with TGI as the inference provider, you can run: -``` -$ llama stack build --distro starter -... -You can now edit ~/.llama/distributions/llamastack-starter/starter-run.yaml and run `llama stack run ~/.llama/distributions/llamastack-starter/starter-run.yaml` -``` - -```{tip} -The generated `run.yaml` file is a starting point for your configuration. For comprehensive guidance on customizing it for your specific needs, infrastructure, and deployment scenarios, see [Customizing Your run.yaml Configuration](customizing_run_yaml.md). -``` -::: -:::{tab-item} Building from Scratch - -If the provided templates do not fit your use case, you could start off with running `llama stack build` which will allow you to a interactively enter wizard where you will be prompted to enter build configurations. - -It would be best to start with a template and understand the structure of the config file and the various concepts ( APIS, providers, resources, etc.) before starting from scratch. -``` -llama stack build - -> Enter a name for your Llama Stack (e.g. my-local-stack): my-stack -> Enter the image type you want your Llama Stack to be built as (container or venv): venv - -Llama Stack is composed of several APIs working together. Let's select -the provider types (implementations) you want to use for these APIs. - -Tip: use to see options for the providers. - -> Enter provider for API inference: inline::meta-reference -> Enter provider for API safety: inline::llama-guard -> Enter provider for API agents: inline::meta-reference -> Enter provider for API memory: inline::faiss -> Enter provider for API datasetio: inline::meta-reference -> Enter provider for API scoring: inline::meta-reference -> Enter provider for API eval: inline::meta-reference -> Enter provider for API telemetry: inline::meta-reference - - > (Optional) Enter a short description for your Llama Stack: - -You can now edit ~/.llama/distributions/llamastack-my-local-stack/my-local-stack-run.yaml and run `llama stack run ~/.llama/distributions/llamastack-my-local-stack/my-local-stack-run.yaml` -``` -::: - -:::{tab-item} Building from a pre-existing build config file -- In addition to templates, you may customize the build to your liking through editing config files and build from config files with the following command. - -- The config file will be of contents like the ones in `llama_stack/distributions/*build.yaml`. - -``` -llama stack build --config llama_stack/distributions/starter/build.yaml -``` -::: - -:::{tab-item} Building with External Providers - -Llama Stack supports external providers that live outside of the main codebase. This allows you to create and maintain your own providers independently or use community-provided providers. - -To build a distribution with external providers, you need to: - -1. Configure the `external_providers_dir` in your build configuration file: - -```yaml -# Example my-external-stack.yaml with external providers -version: '2' -distribution_spec: - description: Custom distro for CI tests - providers: - inference: - - remote::custom_ollama -# Add more providers as needed -image_type: container -image_name: ci-test -# Path to external provider implementations -external_providers_dir: ~/.llama/providers.d -``` - -Here's an example for a custom Ollama provider: - -```yaml -adapter: - adapter_type: custom_ollama - pip_packages: - - ollama - - aiohttp - - llama-stack-provider-ollama # This is the provider package - config_class: llama_stack_ollama_provider.config.OllamaImplConfig - module: llama_stack_ollama_provider -api_dependencies: [] -optional_api_dependencies: [] -``` - -The `pip_packages` section lists the Python packages required by the provider, as well as the -provider package itself. The package must be available on PyPI or can be provided from a local -directory or a git repository (git must be installed on the build environment). - -2. Build your distribution using the config file: - -``` -llama stack build --config my-external-stack.yaml -``` - -For more information on external providers, including directory structure, provider types, and implementation requirements, see the [External Providers documentation](../providers/external.md). -::: - -:::{tab-item} Building Container - -```{admonition} Podman Alternative -:class: tip - -Podman is supported as an alternative to Docker. Set `CONTAINER_BINARY` to `podman` in your environment to use Podman. -``` - -To build a container image, you may start off from a template and use the `--image-type container` flag to specify `container` as the build image type. - -``` -llama stack build --distro starter --image-type container -``` - -``` -$ llama stack build --distro starter --image-type container -... -Containerfile created successfully in /tmp/tmp.viA3a3Rdsg/ContainerfileFROM python:3.10-slim -... -``` - -You can now edit ~/meta-llama/llama-stack/tmp/configs/ollama-run.yaml and run `llama stack run ~/meta-llama/llama-stack/tmp/configs/ollama-run.yaml` -``` - -Now set some environment variables for the inference model ID and Llama Stack Port and create a local directory to mount into the container's file system. -``` -export INFERENCE_MODEL="llama3.2:3b" -export LLAMA_STACK_PORT=8321 -mkdir -p ~/.llama -``` - -After this step is successful, you should be able to find the built container image and test it with the below Docker command: - -``` -docker run -d \ - -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ - -v ~/.llama:/root/.llama \ - localhost/distribution-ollama:dev \ - --port $LLAMA_STACK_PORT \ - --env INFERENCE_MODEL=$INFERENCE_MODEL \ - --env OLLAMA_URL=http://host.docker.internal:11434 -``` - -Here are the docker flags and their uses: - -* `-d`: Runs the container in the detached mode as a background process - -* `-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT`: Maps the container port to the host port for accessing the server - -* `-v ~/.llama:/root/.llama`: Mounts the local .llama directory to persist configurations and data - -* `localhost/distribution-ollama:dev`: The name and tag of the container image to run - -* `--port $LLAMA_STACK_PORT`: Port number for the server to listen on - -* `--env INFERENCE_MODEL=$INFERENCE_MODEL`: Sets the model to use for inference - -* `--env OLLAMA_URL=http://host.docker.internal:11434`: Configures the URL for the Ollama service - -::: - -:::: - - -### Running your Stack server -Now, let's start the Llama Stack Distribution Server. You will need the YAML configuration file which was written out at the end by the `llama stack build` step. - -``` -llama stack run -h -usage: llama stack run [-h] [--port PORT] [--image-name IMAGE_NAME] [--env KEY=VALUE] - [--image-type {venv}] [--enable-ui] - [config | template] - -Start the server for a Llama Stack Distribution. You should have already built (or downloaded) and configured the distribution. - -positional arguments: - config | template Path to config file to use for the run or name of known template (`llama stack list` for a list). (default: None) - -options: - -h, --help show this help message and exit - --port PORT Port to run the server on. It can also be passed via the env var LLAMA_STACK_PORT. (default: 8321) - --image-name IMAGE_NAME - Name of the image to run. Defaults to the current environment (default: None) - --env KEY=VALUE Environment variables to pass to the server in KEY=VALUE format. Can be specified multiple times. (default: None) - --image-type {venv} - Image Type used during the build. This should be venv. (default: None) - --enable-ui Start the UI server (default: False) -``` - -**Note:** Container images built with `llama stack build --image-type container` cannot be run using `llama stack run`. Instead, they must be run directly using Docker or Podman commands as shown in the container building section above. - -``` -# Start using template name -llama stack run tgi - -# Start using config file -llama stack run ~/.llama/distributions/llamastack-my-local-stack/my-local-stack-run.yaml - -# Start using a venv -llama stack run --image-type venv ~/.llama/distributions/llamastack-my-local-stack/my-local-stack-run.yaml -``` - -``` -$ llama stack run ~/.llama/distributions/llamastack-my-local-stack/my-local-stack-run.yaml - -Serving API inspect - GET /health - GET /providers/list - GET /routes/list -Serving API inference - POST /inference/chat_completion - POST /inference/completion - POST /inference/embeddings -... -Serving API agents - POST /agents/create - POST /agents/session/create - POST /agents/turn/create - POST /agents/delete - POST /agents/session/delete - POST /agents/session/get - POST /agents/step/get - POST /agents/turn/get - -Listening on ['::', '0.0.0.0']:8321 -INFO: Started server process [2935911] -INFO: Waiting for application startup. -INFO: Application startup complete. -INFO: Uvicorn running on http://['::', '0.0.0.0']:8321 (Press CTRL+C to quit) -INFO: 2401:db00:35c:2d2b:face:0:c9:0:54678 - "GET /models/list HTTP/1.1" 200 OK -``` - -### Listing Distributions -Using the list command, you can view all existing Llama Stack distributions, including stacks built from templates, from scratch, or using custom configuration files. - -``` -llama stack list -h -usage: llama stack list [-h] - -list the build stacks - -options: - -h, --help show this help message and exit -``` - -Example Usage - -``` -llama stack list -``` - -``` -------------------------------+-----------------------------------------------------------------+--------------+------------+ -| Stack Name | Path | Build Config | Run Config | -+------------------------------+-----------------------------------------------------------------------------+--------------+ -| together | ~/.llama/distributions/together | Yes | No | -+------------------------------+-----------------------------------------------------------------------------+--------------+ -| bedrock | ~/.llama/distributions/bedrock | Yes | No | -+------------------------------+-----------------------------------------------------------------------------+--------------+ -| starter | ~/.llama/distributions/starter | Yes | Yes | -+------------------------------+-----------------------------------------------------------------------------+--------------+ -| remote-vllm | ~/.llama/distributions/remote-vllm | Yes | Yes | -+------------------------------+-----------------------------------------------------------------------------+--------------+ -``` - -### Removing a Distribution -Use the remove command to delete a distribution you've previously built. - -``` -llama stack rm -h -usage: llama stack rm [-h] [--all] [name] - -Remove the build stack - -positional arguments: - name Name of the stack to delete (default: None) - -options: - -h, --help show this help message and exit - --all, -a Delete all stacks (use with caution) (default: False) -``` - -Example -``` -llama stack rm llamastack-test -``` - -To keep your environment organized and avoid clutter, consider using `llama stack list` to review old or unused distributions and `llama stack rm ` to delete them when they're no longer needed. - -### Troubleshooting - -If you encounter any issues, ask questions in our discord or search through our [GitHub Issues](https://github.com/meta-llama/llama-stack/issues), or file an new issue. diff --git a/docs/source/distributions/configuration.md b/docs/source/distributions/configuration.md deleted file mode 100644 index 452c3d95f..000000000 --- a/docs/source/distributions/configuration.md +++ /dev/null @@ -1,802 +0,0 @@ -# Configuring a "Stack" - -The Llama Stack runtime configuration is specified as a YAML file. Here is a simplified version of an example configuration file for the Ollama distribution: - -```{note} -The default `run.yaml` files generated by templates are starting points for your configuration. For guidance on customizing these files for your specific needs, see [Customizing Your run.yaml Configuration](customizing_run_yaml.md). -``` - -```{dropdown} ๐Ÿ‘‹ Click here for a Sample Configuration File - -```yaml -version: 2 -apis: -- agents -- inference -- vector_io -- safety -- telemetry -providers: - inference: - - provider_id: ollama - provider_type: remote::ollama - config: - url: ${env.OLLAMA_URL:=http://localhost:11434} - vector_io: - - provider_id: faiss - provider_type: inline::faiss - config: - kvstore: - type: sqlite - namespace: null - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/ollama}/faiss_store.db - safety: - - provider_id: llama-guard - provider_type: inline::llama-guard - config: {} - agents: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - persistence_store: - type: sqlite - namespace: null - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/ollama}/agents_store.db - telemetry: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: {} -metadata_store: - namespace: null - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/ollama}/registry.db -models: -- metadata: {} - model_id: ${env.INFERENCE_MODEL} - provider_id: ollama - provider_model_id: null -shields: [] -server: - port: 8321 - auth: - provider_config: - type: "oauth2_token" - jwks: - uri: "https://my-token-issuing-svc.com/jwks" -``` - -Let's break this down into the different sections. The first section specifies the set of APIs that the stack server will serve: -```yaml -apis: -- agents -- inference -- vector_io -- safety -- telemetry -``` - -## Providers -Next up is the most critical part: the set of providers that the stack will use to serve the above APIs. Consider the `inference` API: -```yaml -providers: - inference: - # provider_id is a string you can choose freely - - provider_id: ollama - # provider_type is a string that specifies the type of provider. - # in this case, the provider for inference is ollama and it runs remotely (outside of the distribution) - provider_type: remote::ollama - # config is a dictionary that contains the configuration for the provider. - # in this case, the configuration is the url of the ollama server - config: - url: ${env.OLLAMA_URL:=http://localhost:11434} -``` -A few things to note: -- A _provider instance_ is identified with an (id, type, config) triplet. -- The id is a string you can choose freely. -- You can instantiate any number of provider instances of the same type. -- The configuration dictionary is provider-specific. -- Notice that configuration can reference environment variables (with default values), which are expanded at runtime. When you run a stack server (via docker or via `llama stack run`), you can specify `--env OLLAMA_URL=http://my-server:11434` to override the default value. - -### Environment Variable Substitution - -Llama Stack supports environment variable substitution in configuration values using the -`${env.VARIABLE_NAME}` syntax. This allows you to externalize configuration values and provide -different settings for different environments. The syntax is inspired by [bash parameter expansion](https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html) -and follows similar patterns. - -#### Basic Syntax - -The basic syntax for environment variable substitution is: - -```yaml -config: - api_key: ${env.API_KEY} - url: ${env.SERVICE_URL} -``` - -If the environment variable is not set, the server will raise an error during startup. - -#### Default Values - -You can provide default values using the `:=` operator: - -```yaml -config: - url: ${env.OLLAMA_URL:=http://localhost:11434} - port: ${env.PORT:=8321} - timeout: ${env.TIMEOUT:=60} -``` - -If the environment variable is not set, the default value `http://localhost:11434` will be used. -Empty defaults are allowed so `url: ${env.OLLAMA_URL:=}` will be set to `None` if the environment variable is not set. - -#### Conditional Values - -You can use the `:+` operator to provide a value only when the environment variable is set: - -```yaml -config: - # Only include this field if ENVIRONMENT is set - environment: ${env.ENVIRONMENT:+production} -``` - -If the environment variable is set, the value after `:+` will be used. If it's not set, the field -will be omitted with a `None` value. - -Do not use conditional values (`${env.OLLAMA_URL:+}`) for empty defaults (`${env.OLLAMA_URL:=}`). -This will be set to `None` if the environment variable is not set. -Conditional must only be used when the environment variable is set. - -#### Examples - -Here are some common patterns: - -```yaml -# Required environment variable (will error if not set) -api_key: ${env.OPENAI_API_KEY} - -# Optional with default -base_url: ${env.API_BASE_URL:=https://api.openai.com/v1} - -# Conditional field -debug_mode: ${env.DEBUG:+true} - -# Optional field that becomes None if not set -optional_token: ${env.OPTIONAL_TOKEN:+} -``` - -#### Runtime Override - -You can override environment variables at runtime when starting the server: - -```bash -# Override specific environment variables -llama stack run --config run.yaml --env API_KEY=sk-123 --env BASE_URL=https://custom-api.com - -# Or set them in your shell -export API_KEY=sk-123 -export BASE_URL=https://custom-api.com -llama stack run --config run.yaml -``` - -#### Type Safety - -The environment variable substitution system is type-safe: - -- String values remain strings -- Empty defaults (`${env.VAR:+}`) are converted to `None` for fields that accept `str | None` -- Numeric defaults are properly typed (e.g., `${env.PORT:=8321}` becomes an integer) -- Boolean defaults work correctly (e.g., `${env.DEBUG:=false}` becomes a boolean) - -## Resources - -Let's look at the `models` section: - -```yaml -models: -- metadata: {} - model_id: ${env.INFERENCE_MODEL} - provider_id: ollama - provider_model_id: null - model_type: llm -``` -A Model is an instance of a "Resource" (see [Concepts](../concepts/index)) and is associated with a specific inference provider (in this case, the provider with identifier `ollama`). This is an instance of a "pre-registered" model. While we always encourage the clients to register models before using them, some Stack servers may come up a list of "already known and available" models. - -What's with the `provider_model_id` field? This is an identifier for the model inside the provider's model catalog. Contrast it with `model_id` which is the identifier for the same model for Llama Stack's purposes. For example, you may want to name "llama3.2:vision-11b" as "image_captioning_model" when you use it in your Stack interactions. When omitted, the server will set `provider_model_id` to be the same as `model_id`. - -If you need to conditionally register a model in the configuration, such as only when specific environment variable(s) are set, this can be accomplished by utilizing a special `__disabled__` string as the default value of an environment variable substitution, as shown below: - -```yaml -models: -- metadata: {} - model_id: ${env.INFERENCE_MODEL:__disabled__} - provider_id: ollama - provider_model_id: ${env.INFERENCE_MODEL:__disabled__} -``` - -The snippet above will only register this model if the environment variable `INFERENCE_MODEL` is set and non-empty. If the environment variable is not set, the model will not get registered at all. - -## Server Configuration - -The `server` section configures the HTTP server that serves the Llama Stack APIs: - -```yaml -server: - port: 8321 # Port to listen on (default: 8321) - tls_certfile: "/path/to/cert.pem" # Optional: Path to TLS certificate for HTTPS - tls_keyfile: "/path/to/key.pem" # Optional: Path to TLS key for HTTPS - cors: true # Optional: Enable CORS (dev mode) or full config object -``` - -### CORS Configuration - -CORS (Cross-Origin Resource Sharing) can be configured in two ways: - -**Local development** (allows localhost origins only): -```yaml -server: - cors: true -``` - -**Explicit configuration** (custom origins and settings): -```yaml -server: - cors: - allow_origins: ["https://myapp.com", "https://app.example.com"] - allow_methods: ["GET", "POST", "PUT", "DELETE"] - allow_headers: ["Content-Type", "Authorization"] - allow_credentials: true - max_age: 3600 -``` - -When `cors: true`, the server enables secure localhost-only access for local development. For production, specify exact origins to maintain security. - -### Authentication Configuration - -> **Breaking Change (v0.2.14)**: The authentication configuration structure has changed. The previous format with `provider_type` and `config` fields has been replaced with a unified `provider_config` field that includes the `type` field. Update your configuration files accordingly. - -The `auth` section configures authentication for the server. When configured, all API requests must include a valid Bearer token in the Authorization header: - -``` -Authorization: Bearer -``` - -The server supports multiple authentication providers: - -#### OAuth 2.0/OpenID Connect Provider with Kubernetes - -The server can be configured to use service account tokens for authorization, validating these against the Kubernetes API server, e.g.: -```yaml -server: - auth: - provider_config: - type: "oauth2_token" - jwks: - uri: "https://kubernetes.default.svc:8443/openid/v1/jwks" - token: "${env.TOKEN:+}" - key_recheck_period: 3600 - tls_cafile: "/path/to/ca.crt" - issuer: "https://kubernetes.default.svc" - audience: "https://kubernetes.default.svc" -``` - -To find your cluster's jwks uri (from which the public key(s) to verify the token signature are obtained), run: -``` -kubectl get --raw /.well-known/openid-configuration| jq -r .jwks_uri -``` - -For the tls_cafile, you can use the CA certificate of the OIDC provider: -```bash -kubectl config view --minify -o jsonpath='{.clusters[0].cluster.certificate-authority}' -``` - -For the issuer, you can use the OIDC provider's URL: -```bash -kubectl get --raw /.well-known/openid-configuration| jq .issuer -``` - -The audience can be obtained from a token, e.g. run: -```bash -kubectl create token default --duration=1h | cut -d. -f2 | base64 -d | jq .aud -``` - -The jwks token is used to authorize access to the jwks endpoint. You can obtain a token by running: - -```bash -kubectl create namespace llama-stack -kubectl create serviceaccount llama-stack-auth -n llama-stack -kubectl create token llama-stack-auth -n llama-stack > llama-stack-auth-token -export TOKEN=$(cat llama-stack-auth-token) -``` - -Alternatively, you can configure the jwks endpoint to allow anonymous access. To do this, make sure -the `kube-apiserver` runs with `--anonymous-auth=true` to allow unauthenticated requests -and that the correct RoleBinding is created to allow the service account to access the necessary -resources. If that is not the case, you can create a RoleBinding for the service account to access -the necessary resources: - -```yaml -# allow-anonymous-openid.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: allow-anonymous-openid -rules: -- nonResourceURLs: ["/openid/v1/jwks"] - verbs: ["get"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: allow-anonymous-openid -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: allow-anonymous-openid -subjects: -- kind: User - name: system:anonymous - apiGroup: rbac.authorization.k8s.io -``` - -And then apply the configuration: -```bash -kubectl apply -f allow-anonymous-openid.yaml -``` - -The provider extracts user information from the JWT token: -- Username from the `sub` claim becomes a role -- Kubernetes groups become teams - -You can easily validate a request by running: - -```bash -curl -s -L -H "Authorization: Bearer $(cat llama-stack-auth-token)" http://127.0.0.1:8321/v1/providers -``` - -#### Kubernetes Authentication Provider - -The server can be configured to use Kubernetes SelfSubjectReview API to validate tokens directly against the Kubernetes API server: - -```yaml -server: - auth: - provider_config: - type: "kubernetes" - api_server_url: "https://kubernetes.default.svc" - claims_mapping: - username: "roles" - groups: "roles" - uid: "uid_attr" - verify_tls: true - tls_cafile: "/path/to/ca.crt" -``` - -Configuration options: -- `api_server_url`: The Kubernetes API server URL (e.g., https://kubernetes.default.svc:6443) -- `verify_tls`: Whether to verify TLS certificates (default: true) -- `tls_cafile`: Path to CA certificate file for TLS verification -- `claims_mapping`: Mapping of Kubernetes user claims to access attributes - -The provider validates tokens by sending a SelfSubjectReview request to the Kubernetes API server at `/apis/authentication.k8s.io/v1/selfsubjectreviews`. The provider extracts user information from the response: -- Username from the `userInfo.username` field -- Groups from the `userInfo.groups` field -- UID from the `userInfo.uid` field - -To obtain a token for testing: -```bash -kubectl create namespace llama-stack -kubectl create serviceaccount llama-stack-auth -n llama-stack -kubectl create token llama-stack-auth -n llama-stack > llama-stack-auth-token -``` - -You can validate a request by running: -```bash -curl -s -L -H "Authorization: Bearer $(cat llama-stack-auth-token)" http://127.0.0.1:8321/v1/providers -``` - -#### GitHub Token Provider -Validates GitHub personal access tokens or OAuth tokens directly: -```yaml -server: - auth: - provider_config: - type: "github_token" - github_api_base_url: "https://api.github.com" # Or GitHub Enterprise URL -``` - -The provider fetches user information from GitHub and maps it to access attributes based on the `claims_mapping` configuration. - -#### Custom Provider -Validates tokens against a custom authentication endpoint: -```yaml -server: - auth: - provider_config: - type: "custom" - endpoint: "https://auth.example.com/validate" # URL of the auth endpoint -``` - -The custom endpoint receives a POST request with: -```json -{ - "api_key": "", - "request": { - "path": "/api/v1/endpoint", - "headers": { - "content-type": "application/json", - "user-agent": "curl/7.64.1" - }, - "params": { - "key": ["value"] - } - } -} -``` - -And must respond with: -```json -{ - "access_attributes": { - "roles": ["admin", "user"], - "teams": ["ml-team", "nlp-team"], - "projects": ["llama-3", "project-x"], - "namespaces": ["research"] - }, - "message": "Authentication successful" -} -``` - -If no access attributes are returned, the token is used as a namespace. - -### Access control - -When authentication is enabled, access to resources is controlled -through the `access_policy` attribute of the auth config section under -server. The value for this is a list of access rules. - -Each access rule defines a list of actions either to permit or to -forbid. It may specify a principal or a resource that must match for -the rule to take effect. - -Valid actions are create, read, update, and delete. The resource to -match should be specified in the form of a type qualified identifier, -e.g. model::my-model or vector_db::some-db, or a wildcard for all -resources of a type, e.g. model::*. If the principal or resource are -not specified, they will match all requests. - -The valid resource types are model, shield, vector_db, dataset, -scoring_function, benchmark, tool, tool_group and session. - -A rule may also specify a condition, either a 'when' or an 'unless', -with additional constraints as to where the rule applies. The -constraints supported at present are: - - - 'user with in ' - - 'user with not in ' - - 'user is owner' - - 'user is not owner' - - 'user in owners ' - - 'user not in owners ' - -The attributes defined for a user will depend on how the auth -configuration is defined. - -When checking whether a particular action is allowed by the current -user for a resource, all the defined rules are tested in order to find -a match. If a match is found, the request is permitted or forbidden -depending on the type of rule. If no match is found, the request is -denied. - -If no explicit rules are specified, a default policy is defined with -which all users can access all resources defined in config but -resources created dynamically can only be accessed by the user that -created them. - -Examples: - -The following restricts access to particular github users: - -```yaml -server: - auth: - provider_config: - type: "github_token" - github_api_base_url: "https://api.github.com" - access_policy: - - permit: - principal: user-1 - actions: [create, read, delete] - description: user-1 has full access to all resources - - permit: - principal: user-2 - actions: [read] - resource: model::model-1 - description: user-2 has read access to model-1 only -``` - -Similarly, the following restricts access to particular kubernetes -service accounts: - -```yaml -server: - auth: - provider_config: - type: "oauth2_token" - audience: https://kubernetes.default.svc.cluster.local - issuer: https://kubernetes.default.svc.cluster.local - tls_cafile: /home/gsim/.minikube/ca.crt - jwks: - uri: https://kubernetes.default.svc.cluster.local:8443/openid/v1/jwks - token: ${env.TOKEN} - access_policy: - - permit: - principal: system:serviceaccount:my-namespace:my-serviceaccount - actions: [create, read, delete] - description: specific serviceaccount has full access to all resources - - permit: - principal: system:serviceaccount:default:default - actions: [read] - resource: model::model-1 - description: default account has read access to model-1 only -``` - -The following policy, which assumes that users are defined with roles -and teams by whichever authentication system is in use, allows any -user with a valid token to use models, create resources other than -models, read and delete resources they created and read resources -created by users sharing a team with them: - -``` - access_policy: - - permit: - actions: [read] - resource: model::* - description: all users have read access to models - - forbid: - actions: [create, delete] - resource: model::* - unless: user with admin in roles - description: only user with admin role can create or delete models - - permit: - actions: [create, read, delete] - when: user is owner - description: users can create resources other than models and read and delete those they own - - permit: - actions: [read] - when: user in owner teams - description: any user has read access to any resource created by a user with the same team -``` - -#### API Endpoint Authorization with Scopes - -In addition to resource-based access control, Llama Stack supports endpoint-level authorization using OAuth 2.0 style scopes. When authentication is enabled, specific API endpoints require users to have particular scopes in their authentication token. - -**Scope-Gated APIs:** -The following APIs are currently gated by scopes: - -- **Telemetry API** (scope: `telemetry.read`): - - `POST /telemetry/traces` - Query traces - - `GET /telemetry/traces/{trace_id}` - Get trace by ID - - `GET /telemetry/traces/{trace_id}/spans/{span_id}` - Get span by ID - - `POST /telemetry/spans/{span_id}/tree` - Get span tree - - `POST /telemetry/spans` - Query spans - - `POST /telemetry/metrics/{metric_name}` - Query metrics - -**Authentication Configuration:** - -For **JWT/OAuth2 providers**, scopes should be included in the JWT's claims: -```json -{ - "sub": "user123", - "scope": "telemetry.read", - "aud": "llama-stack" -} -``` - -For **custom authentication providers**, the endpoint must return user attributes including the `scopes` array: -```json -{ - "principal": "user123", - "attributes": { - "scopes": ["telemetry.read"] - } -} -``` - -**Behavior:** -- Users without the required scope receive a 403 Forbidden response -- When authentication is disabled, scope checks are bypassed -- Endpoints without `required_scope` work normally for all authenticated users - -### Quota Configuration - -The `quota` section allows you to enable server-side request throttling for both -authenticated and anonymous clients. This is useful for preventing abuse, enforcing -fairness across tenants, and controlling infrastructure costs without requiring -client-side rate limiting or external proxies. - -Quotas are disabled by default. When enabled, each client is tracked using either: - -* Their authenticated `client_id` (derived from the Bearer token), or -* Their IP address (fallback for anonymous requests) - -Quota state is stored in a SQLite-backed key-value store, and rate limits are applied -within a configurable time window (currently only `day` is supported). - -#### Example - -```yaml -server: - quota: - kvstore: - type: sqlite - db_path: ./quotas.db - anonymous_max_requests: 100 - authenticated_max_requests: 1000 - period: day -``` - -#### Configuration Options - -| Field | Description | -| ---------------------------- | -------------------------------------------------------------------------- | -| `kvstore` | Required. Backend storage config for tracking request counts. | -| `kvstore.type` | Must be `"sqlite"` for now. Other backends may be supported in the future. | -| `kvstore.db_path` | File path to the SQLite database. | -| `anonymous_max_requests` | Max requests per period for unauthenticated clients. | -| `authenticated_max_requests` | Max requests per period for authenticated clients. | -| `period` | Time window for quota enforcement. Only `"day"` is supported. | - -> Note: if `authenticated_max_requests` is set but no authentication provider is -configured, the server will fall back to applying `anonymous_max_requests` to all -clients. - -#### Example with Authentication Enabled - -```yaml -server: - port: 8321 - auth: - provider_config: - type: custom - endpoint: https://auth.example.com/validate - quota: - kvstore: - type: sqlite - db_path: ./quotas.db - anonymous_max_requests: 100 - authenticated_max_requests: 1000 - period: day -``` - -If a client exceeds their limit, the server responds with: - -```http -HTTP/1.1 429 Too Many Requests -Content-Type: application/json - -{ - "error": { - "message": "Quota exceeded" - } -} -``` - -### CORS Configuration - -Configure CORS to allow web browsers to make requests from different domains. Disabled by default. - -#### Quick Setup - -For development, use the simple boolean flag: - -```yaml -server: - cors: true # Auto-enables localhost with any port -``` - -This automatically allows `http://localhost:*` and `https://localhost:*` with secure defaults. - -#### Custom Configuration - -For specific origins and full control: - -```yaml -server: - cors: - allow_origins: ["https://myapp.com", "https://staging.myapp.com"] - allow_credentials: true - allow_methods: ["GET", "POST", "PUT", "DELETE"] - allow_headers: ["Content-Type", "Authorization"] - allow_origin_regex: "https://.*\\.example\\.com" # Optional regex pattern - expose_headers: ["X-Total-Count"] - max_age: 86400 -``` - -#### Configuration Options - -| Field | Description | Default | -| -------------------- | ---------------------------------------------- | ------- | -| `allow_origins` | List of allowed origins. Use `["*"]` for any. | `["*"]` | -| `allow_origin_regex` | Regex pattern for allowed origins (optional). | `None` | -| `allow_methods` | Allowed HTTP methods. | `["*"]` | -| `allow_headers` | Allowed headers. | `["*"]` | -| `allow_credentials` | Allow credentials (cookies, auth headers). | `false` | -| `expose_headers` | Headers exposed to browser. | `[]` | -| `max_age` | Preflight cache time (seconds). | `600` | - -**Security Notes**: -- `allow_credentials: true` requires explicit origins (no wildcards) -- `cors: true` enables localhost access only (secure for development) -- For public APIs, always specify exact allowed origins - -## Extending to handle Safety - -Configuring Safety can be a little involved so it is instructive to go through an example. - -The Safety API works with the associated Resource called a `Shield`. Providers can support various kinds of Shields. Good examples include the [Llama Guard](https://ai.meta.com/research/publications/llama-guard-llm-based-input-output-safeguard-for-human-ai-conversations/) system-safety models, or [Bedrock Guardrails](https://aws.amazon.com/bedrock/guardrails/). - -To configure a Bedrock Shield, you would need to add: -- A Safety API provider instance with type `remote::bedrock` -- A Shield resource served by this provider. - -```yaml -... -providers: - safety: - - provider_id: bedrock - provider_type: remote::bedrock - config: - aws_access_key_id: ${env.AWS_ACCESS_KEY_ID} - aws_secret_access_key: ${env.AWS_SECRET_ACCESS_KEY} -... -shields: -- provider_id: bedrock - params: - guardrailVersion: ${env.GUARDRAIL_VERSION} - provider_shield_id: ${env.GUARDRAIL_ID} -... -``` - -The situation is more involved if the Shield needs _Inference_ of an associated model. This is the case with Llama Guard. In that case, you would need to add: -- A Safety API provider instance with type `inline::llama-guard` -- An Inference API provider instance for serving the model. -- A Model resource associated with this provider. -- A Shield resource served by the Safety provider. - -The yaml configuration for this setup, assuming you were using vLLM as your inference server, would look like: -```yaml -... -providers: - safety: - - provider_id: llama-guard - provider_type: inline::llama-guard - config: {} - inference: - # this vLLM server serves the "normal" inference model (e.g., llama3.2:3b) - - provider_id: vllm-0 - provider_type: remote::vllm - config: - url: ${env.VLLM_URL:=http://localhost:8000} - # this vLLM server serves the llama-guard model (e.g., llama-guard:3b) - - provider_id: vllm-1 - provider_type: remote::vllm - config: - url: ${env.SAFETY_VLLM_URL:=http://localhost:8001} -... -models: -- metadata: {} - model_id: ${env.INFERENCE_MODEL} - provider_id: vllm-0 - provider_model_id: null -- metadata: {} - model_id: ${env.SAFETY_MODEL} - provider_id: vllm-1 - provider_model_id: null -shields: -- provider_id: llama-guard - shield_id: ${env.SAFETY_MODEL} # Llama Guard shields are identified by the corresponding LlamaGuard model - provider_shield_id: null -... -``` diff --git a/docs/source/distributions/customizing_run_yaml.md b/docs/source/distributions/customizing_run_yaml.md deleted file mode 100644 index 10067bab7..000000000 --- a/docs/source/distributions/customizing_run_yaml.md +++ /dev/null @@ -1,40 +0,0 @@ -# Customizing run.yaml Files - -The `run.yaml` files generated by Llama Stack templates are **starting points** designed to be customized for your specific needs. They are not meant to be used as-is in production environments. - -## Key Points - -- **Templates are starting points**: Generated `run.yaml` files contain defaults for development/testing -- **Customization expected**: Update URLs, credentials, models, and settings for your environment -- **Version control separately**: Keep customized configs in your own repository -- **Environment-specific**: Create different configurations for dev, staging, production - -## What You Can Customize - -You can customize: -- **Provider endpoints**: Change `http://localhost:8000` to your actual servers -- **Swap providers**: Replace default providers (e.g., swap Tavily with Brave for search) -- **Storage paths**: Move from `/tmp/` to production directories -- **Authentication**: Add API keys, SSL, timeouts -- **Models**: Different model sizes for dev vs prod -- **Database settings**: Switch from SQLite to PostgreSQL -- **Tool configurations**: Add custom tools and integrations - -## Best Practices - -- Use environment variables for secrets and environment-specific values -- Create separate `run.yaml` files for different environments (dev, staging, prod) -- Document your changes with comments -- Test configurations before deployment -- Keep your customized configs in version control - -Example structure: -``` -your-project/ -โ”œโ”€โ”€ configs/ -โ”‚ โ”œโ”€โ”€ dev-run.yaml -โ”‚ โ”œโ”€โ”€ prod-run.yaml -โ””โ”€โ”€ README.md -``` - -The goal is to take the generated template and adapt it to your specific infrastructure and operational needs. \ No newline at end of file diff --git a/docs/source/distributions/eks/apply.sh b/docs/source/distributions/eks/apply.sh deleted file mode 100755 index 3ad3dd263..000000000 --- a/docs/source/distributions/eks/apply.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -set -euo pipefail - -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -K8S_DIR="${SCRIPT_DIR}/../k8s" - -echo "Setting up AWS EKS-specific storage class..." -kubectl apply -f gp3-topology-aware.yaml - -echo "Running main Kubernetes deployment..." -cd "${K8S_DIR}" -./apply.sh "$@" diff --git a/docs/source/distributions/eks/gp3-topology-aware.yaml b/docs/source/distributions/eks/gp3-topology-aware.yaml deleted file mode 100644 index 1192ba18c..000000000 --- a/docs/source/distributions/eks/gp3-topology-aware.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Set up default storage class on AWS EKS -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: gp3-topology-aware - annotations: - storageclass.kubernetes.io/is-default-class: "true" -parameters: - type: gp3 - iops: "3000" - throughput: "125" -provisioner: ebs.csi.aws.com -reclaimPolicy: Delete -volumeBindingMode: WaitForFirstConsumer -allowVolumeExpansion: true diff --git a/docs/source/distributions/importing_as_library.md b/docs/source/distributions/importing_as_library.md deleted file mode 100644 index 9993be227..000000000 --- a/docs/source/distributions/importing_as_library.md +++ /dev/null @@ -1,34 +0,0 @@ -# Using Llama Stack as a Library - -## Setup Llama Stack without a Server -If you are planning to use an external service for Inference (even Ollama or TGI counts as external), it is often easier to use Llama Stack as a library. -This avoids the overhead of setting up a server. -```bash -# setup -uv pip install llama-stack -llama stack build --distro starter --image-type venv -``` - -```python -from llama_stack.core.library_client import LlamaStackAsLibraryClient - -client = LlamaStackAsLibraryClient( - "starter", - # provider_data is optional, but if you need to pass in any provider specific data, you can do so here. - provider_data={"tavily_search_api_key": os.environ["TAVILY_SEARCH_API_KEY"]}, -) -``` - -This will parse your config and set up any inline implementations and remote clients needed for your implementation. - -Then, you can access the APIs like `models` and `inference` on the client and call their methods directly: - -```python -response = client.models.list() -``` - -If you've created a [custom distribution](building_distro.md), you can also use the run.yaml configuration file directly: - -```python -client = LlamaStackAsLibraryClient(config_path) -``` diff --git a/docs/source/distributions/index.md b/docs/source/distributions/index.md deleted file mode 100644 index 2a702c282..000000000 --- a/docs/source/distributions/index.md +++ /dev/null @@ -1,15 +0,0 @@ -# Distributions Overview - -A distribution is a pre-packaged set of Llama Stack components that can be deployed together. - -This section provides an overview of the distributions available in Llama Stack. - -```{toctree} -:maxdepth: 3 -list_of_distributions -building_distro -customizing_run_yaml -starting_llama_stack_server -importing_as_library -configuration -``` diff --git a/docs/source/distributions/k8s/apply.sh b/docs/source/distributions/k8s/apply.sh deleted file mode 100755 index 1b5b26863..000000000 --- a/docs/source/distributions/k8s/apply.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -export POSTGRES_USER=llamastack -export POSTGRES_DB=llamastack -export POSTGRES_PASSWORD=llamastack - -export INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct -export SAFETY_MODEL=meta-llama/Llama-Guard-3-1B - -# HF_TOKEN should be set by the user; base64 encode it for the secret -if [ -n "${HF_TOKEN:-}" ]; then - export HF_TOKEN_BASE64=$(echo -n "$HF_TOKEN" | base64) -else - echo "ERROR: HF_TOKEN not set. You need it for vLLM to download models from Hugging Face." - exit 1 -fi - -if [ -z "${GITHUB_CLIENT_ID:-}" ]; then - echo "ERROR: GITHUB_CLIENT_ID not set. You need it for Github login to work. See the Kubernetes Deployment Guide in the Llama Stack documentation." - exit 1 -fi - -if [ -z "${GITHUB_CLIENT_SECRET:-}" ]; then - echo "ERROR: GITHUB_CLIENT_SECRET not set. You need it for Github login to work. See the Kubernetes Deployment Guide in the Llama Stack documentation." - exit 1 -fi - -if [ -z "${LLAMA_STACK_UI_URL:-}" ]; then - echo "ERROR: LLAMA_STACK_UI_URL not set. Should be set to the external URL of the UI (excluding port). You need it for Github login to work. See the Kubernetes Deployment Guide in the Llama Stack documentation." - exit 1 -fi - - - - -set -euo pipefail -set -x - -# Apply the HF token secret if HF_TOKEN is provided -if [ -n "${HF_TOKEN:-}" ]; then - envsubst < ./hf-token-secret.yaml.template | kubectl apply -f - -fi - -envsubst < ./vllm-k8s.yaml.template | kubectl apply -f - -envsubst < ./vllm-safety-k8s.yaml.template | kubectl apply -f - -envsubst < ./postgres-k8s.yaml.template | kubectl apply -f - -envsubst < ./chroma-k8s.yaml.template | kubectl apply -f - - -kubectl create configmap llama-stack-config --from-file=stack_run_config.yaml \ - --dry-run=client -o yaml > stack-configmap.yaml - -kubectl apply -f stack-configmap.yaml - -envsubst < ./stack-k8s.yaml.template | kubectl apply -f - -envsubst < ./ingress-k8s.yaml.template | kubectl apply -f - - -envsubst < ./ui-k8s.yaml.template | kubectl apply -f - diff --git a/docs/source/distributions/k8s/chroma-k8s.yaml.template b/docs/source/distributions/k8s/chroma-k8s.yaml.template deleted file mode 100644 index a2a5e3be3..000000000 --- a/docs/source/distributions/k8s/chroma-k8s.yaml.template +++ /dev/null @@ -1,66 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: chromadb-pvc -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 20Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: chromadb -spec: - replicas: 1 - selector: - matchLabels: - app: chromadb - template: - metadata: - labels: - app: chromadb - spec: - containers: - - name: chromadb - image: chromadb/chroma:latest - ports: - - containerPort: 6000 - env: - - name: CHROMA_HOST - value: "0.0.0.0" - - name: CHROMA_PORT - value: "6000" - - name: PERSIST_DIRECTORY - value: "/chroma/chroma" - - name: CHROMA_DB_IMPL - value: "duckdb+parquet" - resources: - requests: - memory: "512Mi" - cpu: "250m" - limits: - memory: "2Gi" - cpu: "1000m" - volumeMounts: - - name: chromadb-storage - mountPath: /chroma/chroma - volumes: - - name: chromadb-storage - persistentVolumeClaim: - claimName: chromadb-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: chromadb -spec: - selector: - app: chromadb - ports: - - protocol: TCP - port: 6000 - targetPort: 6000 - type: ClusterIP diff --git a/docs/source/distributions/k8s/hf-token-secret.yaml.template b/docs/source/distributions/k8s/hf-token-secret.yaml.template deleted file mode 100644 index b6db8e7bc..000000000 --- a/docs/source/distributions/k8s/hf-token-secret.yaml.template +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: hf-token-secret -type: Opaque -data: - token: ${HF_TOKEN_BASE64} diff --git a/docs/source/distributions/k8s/ingress-k8s.yaml.template b/docs/source/distributions/k8s/ingress-k8s.yaml.template deleted file mode 100644 index 9ebe86b69..000000000 --- a/docs/source/distributions/k8s/ingress-k8s.yaml.template +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: llama-stack-service -spec: - type: LoadBalancer - selector: - app.kubernetes.io/name: llama-stack - ports: - - name: llama-stack-api - port: 8321 - targetPort: 8321 - protocol: TCP - - name: llama-stack-ui - port: 8322 - targetPort: 8322 - protocol: TCP diff --git a/docs/source/distributions/k8s/postgres-k8s.yaml.template b/docs/source/distributions/k8s/postgres-k8s.yaml.template deleted file mode 100644 index 86a765652..000000000 --- a/docs/source/distributions/k8s/postgres-k8s.yaml.template +++ /dev/null @@ -1,66 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: postgres-pvc -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 10Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: postgres -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: postgres - template: - metadata: - labels: - app.kubernetes.io/name: postgres - spec: - containers: - - name: postgres - image: postgres:15 - env: - - name: POSTGRES_DB - value: "${POSTGRES_DB}" - - name: POSTGRES_USER - value: "${POSTGRES_USER}" - - name: POSTGRES_PASSWORD - value: "${POSTGRES_PASSWORD}" - - name: PGDATA - value: "/var/lib/postgresql/data/pgdata" - ports: - - containerPort: 5432 - resources: - requests: - memory: "512Mi" - cpu: "250m" - limits: - memory: "1Gi" - cpu: "500m" - volumeMounts: - - name: postgres-storage - mountPath: /var/lib/postgresql/data - volumes: - - name: postgres-storage - persistentVolumeClaim: - claimName: postgres-pvc ---- -apiVersion: v1 -kind: Service -metadata: - name: postgres-server -spec: - selector: - app.kubernetes.io/name: postgres - ports: - - protocol: TCP - port: 5432 - targetPort: 5432 - type: ClusterIP diff --git a/docs/source/distributions/k8s/stack-configmap.yaml b/docs/source/distributions/k8s/stack-configmap.yaml deleted file mode 100644 index 3dbb0da97..000000000 --- a/docs/source/distributions/k8s/stack-configmap.yaml +++ /dev/null @@ -1,56 +0,0 @@ -apiVersion: v1 -data: - stack_run_config.yaml: "version: '2'\nimage_name: kubernetes-demo\napis:\n- agents\n- - inference\n- files\n- safety\n- telemetry\n- tool_runtime\n- vector_io\nproviders:\n - \ inference:\n - provider_id: vllm-inference\n provider_type: remote::vllm\n - \ config:\n url: ${env.VLLM_URL:=http://localhost:8000/v1}\n max_tokens: - ${env.VLLM_MAX_TOKENS:=4096}\n api_token: ${env.VLLM_API_TOKEN:=fake}\n tls_verify: - ${env.VLLM_TLS_VERIFY:=true}\n - provider_id: vllm-safety\n provider_type: - remote::vllm\n config:\n url: ${env.VLLM_SAFETY_URL:=http://localhost:8000/v1}\n - \ max_tokens: ${env.VLLM_MAX_TOKENS:=4096}\n api_token: ${env.VLLM_API_TOKEN:=fake}\n - \ tls_verify: ${env.VLLM_TLS_VERIFY:=true}\n - provider_id: sentence-transformers\n - \ provider_type: inline::sentence-transformers\n config: {}\n vector_io:\n - \ - provider_id: ${env.ENABLE_CHROMADB:+chromadb}\n provider_type: remote::chromadb\n - \ config:\n url: ${env.CHROMADB_URL:=}\n kvstore:\n type: postgres\n - \ host: ${env.POSTGRES_HOST:=localhost}\n port: ${env.POSTGRES_PORT:=5432}\n - \ db: ${env.POSTGRES_DB:=llamastack}\n user: ${env.POSTGRES_USER:=llamastack}\n - \ password: ${env.POSTGRES_PASSWORD:=llamastack}\n files:\n - provider_id: - meta-reference-files\n provider_type: inline::localfs\n config:\n storage_dir: - ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter/files}\n metadata_store:\n - \ type: sqlite\n db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/files_metadata.db - \ \n safety:\n - provider_id: llama-guard\n provider_type: inline::llama-guard\n - \ config:\n excluded_categories: []\n agents:\n - provider_id: meta-reference\n - \ provider_type: inline::meta-reference\n config:\n persistence_store:\n - \ type: postgres\n host: ${env.POSTGRES_HOST:=localhost}\n port: - ${env.POSTGRES_PORT:=5432}\n db: ${env.POSTGRES_DB:=llamastack}\n user: - ${env.POSTGRES_USER:=llamastack}\n password: ${env.POSTGRES_PASSWORD:=llamastack}\n - \ responses_store:\n type: postgres\n host: ${env.POSTGRES_HOST:=localhost}\n - \ port: ${env.POSTGRES_PORT:=5432}\n db: ${env.POSTGRES_DB:=llamastack}\n - \ user: ${env.POSTGRES_USER:=llamastack}\n password: ${env.POSTGRES_PASSWORD:=llamastack}\n - \ telemetry:\n - provider_id: meta-reference\n provider_type: inline::meta-reference\n - \ config:\n service_name: \"${env.OTEL_SERVICE_NAME:=\\u200B}\"\n sinks: - ${env.TELEMETRY_SINKS:=console}\n tool_runtime:\n - provider_id: brave-search\n - \ provider_type: remote::brave-search\n config:\n api_key: ${env.BRAVE_SEARCH_API_KEY:+}\n - \ max_results: 3\n - provider_id: tavily-search\n provider_type: remote::tavily-search\n - \ config:\n api_key: ${env.TAVILY_SEARCH_API_KEY:+}\n max_results: - 3\n - provider_id: rag-runtime\n provider_type: inline::rag-runtime\n config: - {}\n - provider_id: model-context-protocol\n provider_type: remote::model-context-protocol\n - \ config: {}\nmetadata_store:\n type: postgres\n host: ${env.POSTGRES_HOST:=localhost}\n - \ port: ${env.POSTGRES_PORT:=5432}\n db: ${env.POSTGRES_DB:=llamastack}\n user: - ${env.POSTGRES_USER:=llamastack}\n password: ${env.POSTGRES_PASSWORD:=llamastack}\n - \ table_name: llamastack_kvstore\ninference_store:\n type: postgres\n host: - ${env.POSTGRES_HOST:=localhost}\n port: ${env.POSTGRES_PORT:=5432}\n db: ${env.POSTGRES_DB:=llamastack}\n - \ user: ${env.POSTGRES_USER:=llamastack}\n password: ${env.POSTGRES_PASSWORD:=llamastack}\nmodels:\n- - metadata:\n embedding_dimension: 384\n model_id: all-MiniLM-L6-v2\n provider_id: - sentence-transformers\n model_type: embedding\n- metadata: {}\n model_id: ${env.INFERENCE_MODEL}\n - \ provider_id: vllm-inference\n model_type: llm\n- metadata: {}\n model_id: - ${env.SAFETY_MODEL:=meta-llama/Llama-Guard-3-1B}\n provider_id: vllm-safety\n - \ model_type: llm\nshields:\n- shield_id: ${env.SAFETY_MODEL:=meta-llama/Llama-Guard-3-1B}\nvector_dbs: - []\ndatasets: []\nscoring_fns: []\nbenchmarks: []\ntool_groups:\n- toolgroup_id: - builtin::websearch\n provider_id: tavily-search\n- toolgroup_id: builtin::rag\n - \ provider_id: rag-runtime\nserver:\n port: 8321\n auth:\n provider_config:\n - \ type: github_token\n" -kind: ConfigMap -metadata: - creationTimestamp: null - name: llama-stack-config diff --git a/docs/source/distributions/k8s/stack-k8s.yaml.template b/docs/source/distributions/k8s/stack-k8s.yaml.template deleted file mode 100644 index dfc049f4f..000000000 --- a/docs/source/distributions/k8s/stack-k8s.yaml.template +++ /dev/null @@ -1,69 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: llama-pvc -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: llama-stack-server -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: llama-stack - app.kubernetes.io/component: server - template: - metadata: - labels: - app.kubernetes.io/name: llama-stack - app.kubernetes.io/component: server - spec: - containers: - - name: llama-stack - image: llamastack/distribution-starter:latest - imagePullPolicy: Always # since we have specified latest instead of a version - env: - - name: ENABLE_CHROMADB - value: "true" - - name: CHROMADB_URL - value: http://chromadb.default.svc.cluster.local:6000 - - name: VLLM_URL - value: http://vllm-server.default.svc.cluster.local:8000/v1 - - name: VLLM_MAX_TOKENS - value: "3072" - - name: VLLM_SAFETY_URL - value: http://vllm-server-safety.default.svc.cluster.local:8001/v1 - - name: VLLM_TLS_VERIFY - value: "false" - - name: POSTGRES_HOST - value: postgres-server.default.svc.cluster.local - - name: POSTGRES_PORT - value: "5432" - - name: INFERENCE_MODEL - value: "${INFERENCE_MODEL}" - - name: SAFETY_MODEL - value: "${SAFETY_MODEL}" - - name: TAVILY_SEARCH_API_KEY - value: "${TAVILY_SEARCH_API_KEY}" - command: ["python", "-m", "llama_stack.core.server.server", "/etc/config/stack_run_config.yaml", "--port", "8321"] - ports: - - containerPort: 8321 - volumeMounts: - - name: llama-storage - mountPath: /root/.llama - - name: llama-config - mountPath: /etc/config - volumes: - - name: llama-storage - persistentVolumeClaim: - claimName: llama-pvc - - name: llama-config - configMap: - name: llama-stack-config diff --git a/docs/source/distributions/k8s/stack_run_config.yaml b/docs/source/distributions/k8s/stack_run_config.yaml deleted file mode 100644 index b841ab977..000000000 --- a/docs/source/distributions/k8s/stack_run_config.yaml +++ /dev/null @@ -1,140 +0,0 @@ -version: '2' -image_name: kubernetes-demo -apis: -- agents -- inference -- files -- safety -- telemetry -- tool_runtime -- vector_io -providers: - inference: - - provider_id: vllm-inference - provider_type: remote::vllm - config: - url: ${env.VLLM_URL:=http://localhost:8000/v1} - max_tokens: ${env.VLLM_MAX_TOKENS:=4096} - api_token: ${env.VLLM_API_TOKEN:=fake} - tls_verify: ${env.VLLM_TLS_VERIFY:=true} - - provider_id: vllm-safety - provider_type: remote::vllm - config: - url: ${env.VLLM_SAFETY_URL:=http://localhost:8000/v1} - max_tokens: ${env.VLLM_MAX_TOKENS:=4096} - api_token: ${env.VLLM_API_TOKEN:=fake} - tls_verify: ${env.VLLM_TLS_VERIFY:=true} - - provider_id: sentence-transformers - provider_type: inline::sentence-transformers - config: {} - vector_io: - - provider_id: ${env.ENABLE_CHROMADB:+chromadb} - provider_type: remote::chromadb - config: - url: ${env.CHROMADB_URL:=} - kvstore: - type: postgres - host: ${env.POSTGRES_HOST:=localhost} - port: ${env.POSTGRES_PORT:=5432} - db: ${env.POSTGRES_DB:=llamastack} - user: ${env.POSTGRES_USER:=llamastack} - password: ${env.POSTGRES_PASSWORD:=llamastack} - files: - - provider_id: meta-reference-files - provider_type: inline::localfs - config: - storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter/files} - metadata_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/files_metadata.db - safety: - - provider_id: llama-guard - provider_type: inline::llama-guard - config: - excluded_categories: [] - agents: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - persistence_store: - type: postgres - host: ${env.POSTGRES_HOST:=localhost} - port: ${env.POSTGRES_PORT:=5432} - db: ${env.POSTGRES_DB:=llamastack} - user: ${env.POSTGRES_USER:=llamastack} - password: ${env.POSTGRES_PASSWORD:=llamastack} - responses_store: - type: postgres - host: ${env.POSTGRES_HOST:=localhost} - port: ${env.POSTGRES_PORT:=5432} - db: ${env.POSTGRES_DB:=llamastack} - user: ${env.POSTGRES_USER:=llamastack} - password: ${env.POSTGRES_PASSWORD:=llamastack} - telemetry: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - service_name: "${env.OTEL_SERVICE_NAME:=\u200B}" - sinks: ${env.TELEMETRY_SINKS:=console} - tool_runtime: - - provider_id: brave-search - provider_type: remote::brave-search - config: - api_key: ${env.BRAVE_SEARCH_API_KEY:+} - max_results: 3 - - provider_id: tavily-search - provider_type: remote::tavily-search - config: - api_key: ${env.TAVILY_SEARCH_API_KEY:+} - max_results: 3 - - provider_id: rag-runtime - provider_type: inline::rag-runtime - config: {} - - provider_id: model-context-protocol - provider_type: remote::model-context-protocol - config: {} -metadata_store: - type: postgres - host: ${env.POSTGRES_HOST:=localhost} - port: ${env.POSTGRES_PORT:=5432} - db: ${env.POSTGRES_DB:=llamastack} - user: ${env.POSTGRES_USER:=llamastack} - password: ${env.POSTGRES_PASSWORD:=llamastack} - table_name: llamastack_kvstore -inference_store: - type: postgres - host: ${env.POSTGRES_HOST:=localhost} - port: ${env.POSTGRES_PORT:=5432} - db: ${env.POSTGRES_DB:=llamastack} - user: ${env.POSTGRES_USER:=llamastack} - password: ${env.POSTGRES_PASSWORD:=llamastack} -models: -- metadata: - embedding_dimension: 384 - model_id: all-MiniLM-L6-v2 - provider_id: sentence-transformers - model_type: embedding -- metadata: {} - model_id: ${env.INFERENCE_MODEL} - provider_id: vllm-inference - model_type: llm -- metadata: {} - model_id: ${env.SAFETY_MODEL:=meta-llama/Llama-Guard-3-1B} - provider_id: vllm-safety - model_type: llm -shields: -- shield_id: ${env.SAFETY_MODEL:=meta-llama/Llama-Guard-3-1B} -vector_dbs: [] -datasets: [] -scoring_fns: [] -benchmarks: [] -tool_groups: -- toolgroup_id: builtin::websearch - provider_id: tavily-search -- toolgroup_id: builtin::rag - provider_id: rag-runtime -server: - port: 8321 - auth: - provider_config: - type: github_token diff --git a/docs/source/distributions/k8s/ui-k8s.yaml.template b/docs/source/distributions/k8s/ui-k8s.yaml.template deleted file mode 100644 index a6859cb86..000000000 --- a/docs/source/distributions/k8s/ui-k8s.yaml.template +++ /dev/null @@ -1,68 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: llama-stack-ui - labels: - app.kubernetes.io/name: llama-stack - app.kubernetes.io/component: ui -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: llama-stack - app.kubernetes.io/component: ui - template: - metadata: - labels: - app.kubernetes.io/name: llama-stack - app.kubernetes.io/component: ui - spec: - containers: - - name: llama-stack-ui - image: node:18-alpine - command: ["/bin/sh"] - env: - - name: LLAMA_STACK_BACKEND_URL - value: "http://llama-stack-service:8321" - - name: LLAMA_STACK_UI_PORT - value: "8322" - - name: GITHUB_CLIENT_ID - value: "${GITHUB_CLIENT_ID}" - - name: GITHUB_CLIENT_SECRET - value: "${GITHUB_CLIENT_SECRET}" - - name: NEXTAUTH_URL - value: "${LLAMA_STACK_UI_URL}:8322" - args: - - -c - - | - # Install git (not included in alpine by default) - apk add --no-cache git - - # Clone the repository - echo "Cloning repository..." - git clone https://github.com/meta-llama/llama-stack.git /app - - # Navigate to the UI directory - echo "Navigating to UI directory..." - cd /app/llama_stack/ui - - # Check if package.json exists - if [ ! -f "package.json" ]; then - echo "ERROR: package.json not found in $(pwd)" - ls -la - exit 1 - fi - - # Install dependencies with verbose output - echo "Installing dependencies..." - npm install --verbose - - # Verify next is installed - echo "Checking if next is installed..." - npx next --version || echo "Next.js not found, checking node_modules..." - ls -la node_modules/.bin/ | grep next || echo "No next binary found" - - npm run dev - ports: - - containerPort: 8322 - workingDir: /app diff --git a/docs/source/distributions/k8s/vllm-k8s.yaml.template b/docs/source/distributions/k8s/vllm-k8s.yaml.template deleted file mode 100644 index 22bee4bbc..000000000 --- a/docs/source/distributions/k8s/vllm-k8s.yaml.template +++ /dev/null @@ -1,70 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: vllm-models -spec: - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - resources: - requests: - storage: 50Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: vllm-server -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: vllm - template: - metadata: - labels: - app.kubernetes.io/name: vllm - workload-type: inference - spec: - nodeSelector: - eks.amazonaws.com/nodegroup: gpu - containers: - - name: vllm - image: vllm/vllm-openai:latest - command: ["/bin/sh", "-c"] - args: - - "vllm serve ${INFERENCE_MODEL} --dtype float16 --enforce-eager --max-model-len 4096 --gpu-memory-utilization 0.6 --enable-auto-tool-choice --tool-call-parser llama4_pythonic" - env: - - name: INFERENCE_MODEL - value: "${INFERENCE_MODEL}" - - name: HUGGING_FACE_HUB_TOKEN - valueFrom: - secretKeyRef: - name: hf-token-secret - key: token - ports: - - containerPort: 8000 - resources: - limits: - nvidia.com/gpu: 1 - requests: - nvidia.com/gpu: 1 - volumeMounts: - - name: llama-storage - mountPath: /root/.cache/huggingface - volumes: - - name: llama-storage - persistentVolumeClaim: - claimName: vllm-models ---- -apiVersion: v1 -kind: Service -metadata: - name: vllm-server -spec: - selector: - app.kubernetes.io/name: vllm - ports: - - protocol: TCP - port: 8000 - targetPort: 8000 - type: ClusterIP diff --git a/docs/source/distributions/k8s/vllm-safety-k8s.yaml.template b/docs/source/distributions/k8s/vllm-safety-k8s.yaml.template deleted file mode 100644 index 37b2b9a6b..000000000 --- a/docs/source/distributions/k8s/vllm-safety-k8s.yaml.template +++ /dev/null @@ -1,71 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: vllm-models-safety -spec: - accessModes: - - ReadWriteOnce - volumeMode: Filesystem - resources: - requests: - storage: 30Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: vllm-server-safety -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: vllm-safety - template: - metadata: - labels: - app.kubernetes.io/name: vllm-safety - workload-type: inference - spec: - nodeSelector: - eks.amazonaws.com/nodegroup: gpu - containers: - - name: vllm-safety - image: vllm/vllm-openai:latest - command: ["/bin/sh", "-c"] - args: [ - "vllm serve ${SAFETY_MODEL} --dtype float16 --enforce-eager --max-model-len 4096 --port 8001 --gpu-memory-utilization 0.3" - ] - env: - - name: SAFETY_MODEL - value: "${SAFETY_MODEL}" - - name: HUGGING_FACE_HUB_TOKEN - valueFrom: - secretKeyRef: - name: hf-token-secret - key: token - ports: - - containerPort: 8001 - resources: - limits: - nvidia.com/gpu: 1 - requests: - nvidia.com/gpu: 1 - volumeMounts: - - name: llama-storage - mountPath: /root/.cache/huggingface - volumes: - - name: llama-storage - persistentVolumeClaim: - claimName: vllm-models-safety ---- -apiVersion: v1 -kind: Service -metadata: - name: vllm-server-safety -spec: - selector: - app.kubernetes.io/name: vllm-safety - ports: - - protocol: TCP - port: 8001 - targetPort: 8001 - type: ClusterIP diff --git a/docs/source/distributions/list_of_distributions.md b/docs/source/distributions/list_of_distributions.md deleted file mode 100644 index ee01c92c4..000000000 --- a/docs/source/distributions/list_of_distributions.md +++ /dev/null @@ -1,127 +0,0 @@ -# Available Distributions - -Llama Stack provides several pre-configured distributions to help you get started quickly. Choose the distribution that best fits your hardware and use case. - -## Quick Reference - -| Distribution | Use Case | Hardware Requirements | Provider | -|--------------|----------|----------------------|----------| -| `distribution-starter` | General purpose, prototyping | Any (CPU/GPU) | Ollama, Remote APIs | -| `distribution-meta-reference-gpu` | High-performance inference | GPU required | Local GPU inference | -| Remote-hosted | Production, managed service | None | Partner providers | -| iOS/Android SDK | Mobile applications | Mobile device | On-device inference | - -## Choose Your Distribution - -### ๐Ÿš€ Getting Started (Recommended for Beginners) - -**Use `distribution-starter` if you want to:** -- Prototype quickly without GPU requirements -- Use remote inference providers (Fireworks, Together, vLLM etc.) -- Run locally with Ollama for development - -```bash -docker pull llama-stack/distribution-starter -``` - -**Guides:** [Starter Distribution Guide](self_hosted_distro/starter) - -### ๐Ÿ–ฅ๏ธ Self-Hosted with GPU - -**Use `distribution-meta-reference-gpu` if you:** -- Have access to GPU hardware -- Want maximum performance and control -- Need to run inference locally - -```bash -docker pull llama-stack/distribution-meta-reference-gpu -``` - -**Guides:** [Meta Reference GPU Guide](self_hosted_distro/meta-reference-gpu) - -### ๐Ÿ–ฅ๏ธ Self-Hosted with NVIDA NeMo Microservices - -**Use `nvidia` if you:** -- Want to use Llama Stack with NVIDIA NeMo Microservices - -**Guides:** [NVIDIA Distribution Guide](self_hosted_distro/nvidia) - -### โ˜๏ธ Managed Hosting - -**Use remote-hosted endpoints if you:** -- Don't want to manage infrastructure -- Need production-ready reliability -- Prefer managed services - -**Partners:** [Fireworks.ai](https://fireworks.ai) and [Together.xyz](https://together.xyz) - -**Guides:** [Remote-Hosted Endpoints](remote_hosted_distro/index) - -### ๐Ÿ“ฑ Mobile Development - -**Use mobile SDKs if you:** -- Are building iOS or Android applications -- Need on-device inference capabilities -- Want offline functionality - -- [iOS SDK](ondevice_distro/ios_sdk) -- [Android SDK](ondevice_distro/android_sdk) - -### ๐Ÿ”ง Custom Solutions - -**Build your own distribution if:** -- None of the above fit your specific needs -- You need custom configurations -- You want to optimize for your specific use case - -**Guides:** [Building Custom Distributions](building_distro.md) - -## Detailed Documentation - -### Self-Hosted Distributions - -```{toctree} -:maxdepth: 1 - -self_hosted_distro/starter -self_hosted_distro/meta-reference-gpu -``` - -### Remote-Hosted Solutions - -```{toctree} -:maxdepth: 1 - -remote_hosted_distro/index -``` - -### Mobile SDKs - -```{toctree} -:maxdepth: 1 - -ondevice_distro/ios_sdk -ondevice_distro/android_sdk -``` - -## Decision Flow - -```mermaid -graph TD - A[What's your use case?] --> B{Need mobile app?} - B -->|Yes| C[Use Mobile SDKs] - B -->|No| D{Have GPU hardware?} - D -->|Yes| E[Use Meta Reference GPU] - D -->|No| F{Want managed hosting?} - F -->|Yes| G[Use Remote-Hosted] - F -->|No| H[Use Starter Distribution] -``` - -## Next Steps - -1. **Choose your distribution** from the options above -2. **Follow the setup guide** for your selected distribution -3. **Configure your providers** with API keys or local models -4. **Start building** with Llama Stack! - -For help choosing or troubleshooting, check our [Getting Started Guide](../getting_started/index.md) or [Community Support](https://github.com/llama-stack/llama-stack/discussions). diff --git a/docs/source/distributions/ondevice_distro/android_sdk.md b/docs/source/distributions/ondevice_distro/android_sdk.md deleted file mode 100644 index ad86fa5f3..000000000 --- a/docs/source/distributions/ondevice_distro/android_sdk.md +++ /dev/null @@ -1,262 +0,0 @@ -# Llama Stack Client Kotlin API Library - -We are excited to share a guide for a Kotlin Library that brings front the benefits of Llama Stack to your Android device. This library is a set of SDKs that provide a simple and effective way to integrate AI capabilities into your Android app whether it is local (on-device) or remote inference. - -Features: -- Local Inferencing: Run Llama models purely on-device with real-time processing. We currently utilize ExecuTorch as the local inference distributor and may support others in the future. - - [ExecuTorch](https://github.com/pytorch/executorch/tree/main) is a complete end-to-end solution within the PyTorch framework for inferencing capabilities on-device with high portability and seamless performance. -- Remote Inferencing: Perform inferencing tasks remotely with Llama models hosted on a remote connection (or serverless localhost). -- Simple Integration: With easy-to-use APIs, a developer can quickly integrate Llama Stack in their Android app. The difference with local vs remote inferencing is also minimal. - -Latest Release Notes: [link](https://github.com/meta-llama/llama-stack-client-kotlin/tree/latest-release) - -*Tagged releases are stable versions of the project. While we strive to maintain a stable main branch, it's not guaranteed to be free of bugs or issues.* - -## Android Demo App -Check out our demo app to see how to integrate Llama Stack into your Android app: [Android Demo App](https://github.com/meta-llama/llama-stack-client-kotlin/tree/latest-release/examples/android_app) - -The key files in the app are `ExampleLlamaStackLocalInference.kt`, `ExampleLlamaStackRemoteInference.kts`, and `MainActivity.java`. With encompassed business logic, the app shows how to use Llama Stack for both the environments. - -## Quick Start - -### Add Dependencies -#### Kotlin Library -Add the following dependency in your `build.gradle.kts` file: -``` -dependencies { - implementation("com.llama.llamastack:llama-stack-client-kotlin:0.2.2") -} -``` -This will download jar files in your gradle cache in a directory like `~/.gradle/caches/modules-2/files-2.1/com.llama.llamastack/` - -If you plan on doing remote inferencing this is sufficient to get started. - -#### Dependency for Local - -For local inferencing, it is required to include the ExecuTorch library into your app. - -Include the ExecuTorch library by: -1. Download the `download-prebuilt-et-lib.sh` script file from the [llama-stack-client-kotlin-client-local](https://github.com/meta-llama/llama-stack-client-kotlin/tree/latest-release/llama-stack-client-kotlin-client-local/download-prebuilt-et-lib.sh) directory to your local machine. -2. Move the script to the top level of your Android app where the `app` directory resides. -3. Run `sh download-prebuilt-et-lib.sh` to create an `app/libs` directory and download the `executorch.aar` in that path. This generates an ExecuTorch library for the XNNPACK delegate. -4. Add the `executorch.aar` dependency in your `build.gradle.kts` file: -``` -dependencies { - ... - implementation(files("libs/executorch.aar")) - ... -} -``` - -See other dependencies for the local RAG in Android app [README](https://github.com/meta-llama/llama-stack-client-kotlin/tree/latest-release/examples/android_app#quick-start). - -## Llama Stack APIs in Your Android App -Breaking down the demo app, this section will show the core pieces that are used to initialize and run inference with Llama Stack using the Kotlin library. - -### Setup Remote Inferencing -Start a Llama Stack server on localhost. Here is an example of how you can do this using the firework.ai distribution: -``` -uv venv starter --python 3.12 -source starter/bin/activate # On Windows: starter\Scripts\activate -pip install --no-cache llama-stack==0.2.2 -llama stack build --distro starter --image-type venv -export FIREWORKS_API_KEY= -llama stack run starter --port 5050 -``` - -Ensure the Llama Stack server version is the same as the Kotlin SDK Library for maximum compatibility. - -Other inference providers: [Table](../../index.md#supported-llama-stack-implementations) - -How to set remote localhost in Demo App: [Settings](https://github.com/meta-llama/llama-stack-client-kotlin/tree/latest-release/examples/android_app#settings) - -### Initialize the Client -A client serves as the primary interface for interacting with a specific inference type and its associated parameters. Only after client is initialized then you can configure and start inferences. - - - - - - - - - - -
Local InferenceRemote Inference
- -``` -client = LlamaStackClientLocalClient - .builder() - .modelPath(modelPath) - .tokenizerPath(tokenizerPath) - .temperature(temperature) - .build() -``` - - -``` -// remoteURL is a string like "http://localhost:5050" -client = LlamaStackClientOkHttpClient - .builder() - .baseUrl(remoteURL) - .build() -``` -
- - -### Run Inference -With the Kotlin Library managing all the major operational logic, there are minimal to no changes when running simple chat inference for local or remote: - -``` -val result = client!!.inference().chatCompletion( - InferenceChatCompletionParams.builder() - .modelId(modelName) - .messages(listOfMessages) - .build() - ) - -// response contains string with response from model -var response = result.asChatCompletionResponse().completionMessage().content().string(); -``` - -[Remote only] For inference with a streaming response: - -``` -val result = client!!.inference().chatCompletionStreaming( - InferenceChatCompletionParams.builder() - .modelId(modelName) - .messages(listOfMessages) - .build() - ) - -// Response can be received as a asChatCompletionResponseStreamChunk as part of a callback. -// See Android demo app for a detailed implementation example. -``` - -### Setup Custom Tool Calling - -Android demo app for more details: [Custom Tool Calling](https://github.com/meta-llama/llama-stack-client-kotlin/tree/latest-release/examples/android_app#tool-calling) - -## Advanced Users - -The purpose of this section is to share more details with users that would like to dive deeper into the Llama Stack Kotlin Library. Whether youโ€™re interested in contributing to the open source library, debugging or just want to learn more, this section is for you! - -### Prerequisite - -You must complete the following steps: -1. Clone the repo (`git clone https://github.com/meta-llama/llama-stack-client-kotlin.git -b latest-release`) -2. Port the appropriate ExecuTorch libraries over into your Llama Stack Kotlin library environment. -``` -cd llama-stack-client-kotlin-client-local -sh download-prebuilt-et-lib.sh --unzip -``` - -Now you will notice that the `jni/` , `libs/`, and `AndroidManifest.xml` files from the `executorch.aar` file are present in the local module. This way the local client module will be able to realize the ExecuTorch SDK. - -### Building for Development/Debugging -If youโ€™d like to contribute to the Kotlin library via development, debug, or add play around with the library with various print statements, run the following command in your terminal under the llama-stack-client-kotlin directory. - -``` -sh build-libs.sh -``` - -Output: .jar files located in the build-jars directory - -Copy the .jar files over to the lib directory in your Android app. At the same time make sure to remove the llama-stack-client-kotlin dependency within your build.gradle.kts file in your app (or if you are using the demo app) to avoid having multiple llama stack client dependencies. - -### Additional Options for Local Inferencing -Currently we provide additional properties support with local inferencing. In order to get the tokens/sec metric for each inference call, add the following code in your Android app after you run your chatCompletion inference function. The Reference app has this implementation as well: -``` -var tps = (result.asChatCompletionResponse()._additionalProperties()["tps"] as JsonNumber).value as Float -``` -We will be adding more properties in the future. - -### Additional Options for Remote Inferencing - -#### Network options - -##### Retries - -Requests that experience certain errors are automatically retried 2 times by default, with a short exponential backoff. Connection errors (for example, due to a network connectivity problem), 408 Request Timeout, 409 Conflict, 429 Rate Limit, and >=500 Internal errors will all be retried by default. -You can provide a `maxRetries` on the client builder to configure this: - -```kotlin -val client = LlamaStackClientOkHttpClient.builder() - .fromEnv() - .maxRetries(4) - .build() -``` - -##### Timeouts - -Requests time out after 1 minute by default. You can configure this on the client builder: - -```kotlin -val client = LlamaStackClientOkHttpClient.builder() - .fromEnv() - .timeout(Duration.ofSeconds(30)) - .build() -``` - -##### Proxies - -Requests can be routed through a proxy. You can configure this on the client builder: - -```kotlin -val client = LlamaStackClientOkHttpClient.builder() - .fromEnv() - .proxy(new Proxy( - Type.HTTP, - new InetSocketAddress("proxy.com", 8080) - )) - .build() -``` - -##### Environments - -Requests are made to the production environment by default. You can connect to other environments, like `sandbox`, via the client builder: - -```kotlin -val client = LlamaStackClientOkHttpClient.builder() - .fromEnv() - .sandbox() - .build() -``` - -### Error Handling -This library throws exceptions in a single hierarchy for easy handling: - -- **`LlamaStackClientException`** - Base exception for all exceptions - - - **`LlamaStackClientServiceException`** - HTTP errors with a well-formed response body we were able to parse. The exception message and the `.debuggingRequestId()` will be set by the server. - - | 400 | BadRequestException | - | ------ | ----------------------------- | - | 401 | AuthenticationException | - | 403 | PermissionDeniedException | - | 404 | NotFoundException | - | 422 | UnprocessableEntityException | - | 429 | RateLimitException | - | 5xx | InternalServerException | - | others | UnexpectedStatusCodeException | - - - **`LlamaStackClientIoException`** - I/O networking errors - - **`LlamaStackClientInvalidDataException`** - any other exceptions on the client side, e.g.: - - We failed to serialize the request body - - We failed to parse the response body (has access to response code and body) - -## Reporting Issues -If you encountered any bugs or issues following this guide please file a bug/issue on our [Github issue tracker](https://github.com/meta-llama/llama-stack-client-kotlin/issues). - -## Known Issues -We're aware of the following issues and are working to resolve them: -1. Streaming response is a work-in-progress for local and remote inference -2. Due to #1, agents are not supported at the time. LS agents only work in streaming mode -3. Changing to another model is a work in progress for local and remote platforms - -## Thanks -We'd like to extend our thanks to the ExecuTorch team for providing their support as we integrated ExecuTorch as one of the local inference distributors for Llama Stack. Checkout [ExecuTorch Github repo](https://github.com/pytorch/executorch/tree/main) for more information. - ---- - -The API interface is generated using the OpenAPI standard with [Stainless](https://www.stainlessapi.com/). diff --git a/docs/source/distributions/ondevice_distro/ios_sdk.md b/docs/source/distributions/ondevice_distro/ios_sdk.md deleted file mode 100644 index de4002eba..000000000 --- a/docs/source/distributions/ondevice_distro/ios_sdk.md +++ /dev/null @@ -1,134 +0,0 @@ -# iOS SDK - -We offer both remote and on-device use of Llama Stack in Swift via a single SDK [llama-stack-client-swift](https://github.com/meta-llama/llama-stack-client-swift/) that contains two components: -1. LlamaStackClient for remote -2. Local Inference for on-device - -```{image} ../../../_static/remote_or_local.gif -:alt: Seamlessly switching between local, on-device inference and remote hosted inference -:width: 412px -:align: center -``` - -## Remote Only - -If you don't want to run inference on-device, then you can connect to any hosted Llama Stack distribution with #1. - -1. Add `https://github.com/meta-llama/llama-stack-client-swift/` as a Package Dependency in Xcode - -2. Add `LlamaStackClient` as a framework to your app target - -3. Call an API: - -```swift -import LlamaStackClient - -let agents = RemoteAgents(url: URL(string: "http://localhost:8321")!) -let request = Components.Schemas.CreateAgentTurnRequest( - agent_id: agentId, - messages: [ - .UserMessage(Components.Schemas.UserMessage( - content: .case1("Hello Llama!"), - role: .user - )) - ], - session_id: self.agenticSystemSessionId, - stream: true - ) - - for try await chunk in try await agents.createTurn(request: request) { - let payload = chunk.event.payload - // ... -``` - -Check out [iOSCalendarAssistant](https://github.com/meta-llama/llama-stack-client-swift/tree/main/examples/ios_calendar_assistant) for a complete app demo. - -## LocalInference - -LocalInference provides a local inference implementation powered by [executorch](https://github.com/pytorch/executorch/). - -Llama Stack currently supports on-device inference for iOS with Android coming soon. You can run on-device inference on Android today using [executorch](https://github.com/pytorch/executorch/tree/main/examples/demo-apps/android/LlamaDemo), PyTorchโ€™s on-device inference library. - -The APIs *work the same as remote* โ€“ย the only difference is you'll instead use the `LocalAgents` / `LocalInference` classes and pass in a `DispatchQueue`: - -```swift -private let runnerQueue = DispatchQueue(label: "org.llamastack.stacksummary") -let inference = LocalInference(queue: runnerQueue) -let agents = LocalAgents(inference: self.inference) -``` - -Check out [iOSCalendarAssistantWithLocalInf](https://github.com/meta-llama/llama-stack-client-swift/tree/main/examples/ios_calendar_assistant) for a complete app demo. - -### Installation - -We're working on making LocalInference easier to set up.ย For now, you'll need to import it via `.xcframework`: - -1. Clone the executorch submodule in this repo and its dependencies: `git submodule update --init --recursive` -1. Install [Cmake](https://cmake.org/) for the executorch build` -1. Drag `LocalInference.xcodeproj` into your project -1. Add `LocalInference` as a framework in your app target - -### Preparing a model - -1. Prepare a `.pte` file [following the executorch docs](https://github.com/pytorch/executorch/blob/main/examples/models/llama/README.md#step-2-prepare-model) -2. Bundle the `.pte` and `tokenizer.model` file into your app - -We now support models quantized using SpinQuant and QAT-LoRA which offer a significant performance boost (demo app on iPhone 13 Pro): - - -| Llama 3.2 1B | Tokens / Second (total) | | Time-to-First-Token (sec) | | -| :---- | :---- | :---- | :---- | :---- | -| | Haiku | Paragraph | Haiku | Paragraph | -| BF16 | 2.2 | 2.5 | 2.3 | 1.9 | -| QAT+LoRA | 7.1 | 3.3 | 0.37 | 0.24 | -| SpinQuant | 10.1 | 5.2 | 0.2 | 0.2 | - - -### Using LocalInference - -1. Instantiate LocalInference with a DispatchQueue. Optionally, pass it into your agents service: - -```swift - init () { - runnerQueue = DispatchQueue(label: "org.meta.llamastack") - inferenceService = LocalInferenceService(queue: runnerQueue) - agentsService = LocalAgentsService(inference: inferenceService) - } -``` - -2. Before making any inference calls, load your model from your bundle: - -```swift -let mainBundle = Bundle.main -inferenceService.loadModel( - modelPath: mainBundle.url(forResource: "llama32_1b_spinquant", withExtension: "pte"), - tokenizerPath: mainBundle.url(forResource: "tokenizer", withExtension: "model"), - completion: {_ in } // use to handle load failures -) -``` - -3. Make inference calls (or agents calls) as you normally would with LlamaStack: - -``` -for await chunk in try await agentsService.initAndCreateTurn( - messages: [ - .UserMessage(Components.Schemas.UserMessage( - content: .case1("Call functions as needed to handle any actions in the following text:\n\n" + text), - role: .user)) - ] -) { -``` - -### Troubleshooting - -If you receive errors like "missing package product" or "invalid checksum", try cleaning the build folder and resetting the Swift package cache: - -(Opt+Click) Product > Clean Build Folder Immediately - -``` -rm -rf \ - ~/Library/org.swift.swiftpm \ - ~/Library/Caches/org.swift.swiftpm \ - ~/Library/Caches/com.apple.dt.Xcode \ - ~/Library/Developer/Xcode/DerivedData -``` diff --git a/docs/source/distributions/remote_hosted_distro/index.md b/docs/source/distributions/remote_hosted_distro/index.md deleted file mode 100644 index ef5a83d8a..000000000 --- a/docs/source/distributions/remote_hosted_distro/index.md +++ /dev/null @@ -1,20 +0,0 @@ -# Remote-Hosted Distributions - -Remote-Hosted distributions are available endpoints serving Llama Stack API that you can directly connect to. - -| Distribution | Endpoint | Inference | Agents | Memory | Safety | Telemetry | -|-------------|----------|-----------|---------|---------|---------|------------| -| Together | [https://llama-stack.together.ai](https://llama-stack.together.ai) | remote::together | meta-reference | remote::weaviate | meta-reference | meta-reference | -| Fireworks | [https://llamastack-preview.fireworks.ai](https://llamastack-preview.fireworks.ai) | remote::fireworks | meta-reference | remote::weaviate | meta-reference | meta-reference | - -## Connecting to Remote-Hosted Distributions - -You can use `llama-stack-client` to interact with these endpoints. For example, to list the available models served by the Fireworks endpoint: - -```bash -$ pip install llama-stack-client -$ llama-stack-client configure --endpoint https://llamastack-preview.fireworks.ai -$ llama-stack-client models list -``` - -Checkout the [llama-stack-client-python](https://github.com/meta-llama/llama-stack-client-python/blob/main/docs/cli_reference.md) repo for more details on how to use the `llama-stack-client` CLI. Checkout [llama-stack-app](https://github.com/meta-llama/llama-stack-apps/tree/main) for examples applications built on top of Llama Stack. diff --git a/docs/source/distributions/remote_hosted_distro/watsonx.md b/docs/source/distributions/remote_hosted_distro/watsonx.md deleted file mode 100644 index 977af90dd..000000000 --- a/docs/source/distributions/remote_hosted_distro/watsonx.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -orphan: true ---- - -# watsonx Distribution - -```{toctree} -:maxdepth: 2 -:hidden: - -self -``` - -The `llamastack/distribution-watsonx` distribution consists of the following provider configurations. - -| API | Provider(s) | -|-----|-------------| -| agents | `inline::meta-reference` | -| datasetio | `remote::huggingface`, `inline::localfs` | -| eval | `inline::meta-reference` | -| inference | `remote::watsonx`, `inline::sentence-transformers` | -| safety | `inline::llama-guard` | -| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | -| telemetry | `inline::meta-reference` | -| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::rag-runtime`, `remote::model-context-protocol` | -| vector_io | `inline::faiss` | - - - -### Environment Variables - -The following environment variables can be configured: - -- `LLAMASTACK_PORT`: Port for the Llama Stack distribution server (default: `5001`) -- `WATSONX_API_KEY`: watsonx API Key (default: ``) -- `WATSONX_PROJECT_ID`: watsonx Project ID (default: ``) - -### Models - -The following models are available by default: - -- `meta-llama/llama-3-3-70b-instruct (aliases: meta-llama/Llama-3.3-70B-Instruct)` -- `meta-llama/llama-2-13b-chat (aliases: meta-llama/Llama-2-13b)` -- `meta-llama/llama-3-1-70b-instruct (aliases: meta-llama/Llama-3.1-70B-Instruct)` -- `meta-llama/llama-3-1-8b-instruct (aliases: meta-llama/Llama-3.1-8B-Instruct)` -- `meta-llama/llama-3-2-11b-vision-instruct (aliases: meta-llama/Llama-3.2-11B-Vision-Instruct)` -- `meta-llama/llama-3-2-1b-instruct (aliases: meta-llama/Llama-3.2-1B-Instruct)` -- `meta-llama/llama-3-2-3b-instruct (aliases: meta-llama/Llama-3.2-3B-Instruct)` -- `meta-llama/llama-3-2-90b-vision-instruct (aliases: meta-llama/Llama-3.2-90B-Vision-Instruct)` -- `meta-llama/llama-guard-3-11b-vision (aliases: meta-llama/Llama-Guard-3-11B-Vision)` - - -### Prerequisite: API Keys - -Make sure you have access to a watsonx API Key. You can get one by referring [watsonx.ai](https://www.ibm.com/docs/en/masv-and-l/maximo-manage/continuous-delivery?topic=setup-create-watsonx-api-key). - - -## Running Llama Stack with watsonx - -You can do this via venv or Docker which has a pre-built image. - -### Via Docker - -This method allows you to get started quickly without having to build the distribution code. - -```bash -LLAMA_STACK_PORT=5001 -docker run \ - -it \ - -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ - -v ./run.yaml:/root/my-run.yaml \ - llamastack/distribution-watsonx \ - --config /root/my-run.yaml \ - --port $LLAMA_STACK_PORT \ - --env WATSONX_API_KEY=$WATSONX_API_KEY \ - --env WATSONX_PROJECT_ID=$WATSONX_PROJECT_ID \ - --env WATSONX_BASE_URL=$WATSONX_BASE_URL -``` diff --git a/docs/source/distributions/self_hosted_distro/dell-tgi.md b/docs/source/distributions/self_hosted_distro/dell-tgi.md deleted file mode 100644 index 5fca297b0..000000000 --- a/docs/source/distributions/self_hosted_distro/dell-tgi.md +++ /dev/null @@ -1,78 +0,0 @@ ---- -orphan: true ---- -# Dell-TGI Distribution - -```{toctree} -:maxdepth: 2 -:hidden: - -self -``` - -The `llamastack/distribution-tgi` distribution consists of the following provider configurations. - - -| **API** | **Inference** | **Agents** | **Memory** | **Safety** | **Telemetry** | -|----------------- |--------------- |---------------- |-------------------------------------------------- |---------------- |---------------- | -| **Provider(s)** | remote::tgi | meta-reference | meta-reference, remote::pgvector, remote::chroma | meta-reference | meta-reference | - - -The only difference vs. the `tgi` distribution is that it runs the Dell-TGI server for inference. - - -### Start the Distribution (Single Node GPU) - -> [!NOTE] -> This assumes you have access to GPU to start a TGI server with access to your GPU. - -``` -$ cd distributions/dell-tgi/ -$ ls -compose.yaml README.md run.yaml -$ docker compose up -``` - -The script will first start up TGI server, then start up Llama Stack distribution server hooking up to the remote TGI provider for inference. You should be able to see the following outputs -- -``` -[text-generation-inference] | 2024-10-15T18:56:33.810397Z INFO text_generation_router::server: router/src/server.rs:1813: Using config Some(Llama) -[text-generation-inference] | 2024-10-15T18:56:33.810448Z WARN text_generation_router::server: router/src/server.rs:1960: Invalid hostname, defaulting to 0.0.0.0 -[text-generation-inference] | 2024-10-15T18:56:33.864143Z INFO text_generation_router::server: router/src/server.rs:2353: Connected -INFO: Started server process [1] -INFO: Waiting for application startup. -INFO: Application startup complete. -INFO: Uvicorn running on http://[::]:8321 (Press CTRL+C to quit) -``` - -To kill the server -``` -docker compose down -``` - -### (Alternative) Dell-TGI server + llama stack run (Single Node GPU) - -#### Start Dell-TGI server locally -``` -docker run -it --pull always --shm-size 1g -p 80:80 --gpus 4 \ --e NUM_SHARD=4 --e MAX_BATCH_PREFILL_TOKENS=32768 \ --e MAX_INPUT_TOKENS=8000 \ --e MAX_TOTAL_TOKENS=8192 \ -registry.dell.huggingface.co/enterprise-dell-inference-meta-llama-meta-llama-3.1-8b-instruct -``` - - -#### Start Llama Stack server pointing to TGI server - -``` -docker run --pull always --network host -it -p 8321:8321 -v ./run.yaml:/root/my-run.yaml --gpus=all llamastack/distribution-tgi --yaml_config /root/my-run.yaml -``` - -Make sure in you `run.yaml` file, you inference provider is pointing to the correct TGI server endpoint. E.g. -``` -inference: - - provider_id: tgi0 - provider_type: remote::tgi - config: - url: http://127.0.0.1:5009 -``` diff --git a/docs/source/distributions/self_hosted_distro/dell.md b/docs/source/distributions/self_hosted_distro/dell.md deleted file mode 100644 index 68e7b6f58..000000000 --- a/docs/source/distributions/self_hosted_distro/dell.md +++ /dev/null @@ -1,190 +0,0 @@ ---- -orphan: true ---- - - -# Dell Distribution of Llama Stack - -```{toctree} -:maxdepth: 2 -:hidden: - -self -``` - -The `llamastack/distribution-dell` distribution consists of the following provider configurations. - -| API | Provider(s) | -|-----|-------------| -| agents | `inline::meta-reference` | -| datasetio | `remote::huggingface`, `inline::localfs` | -| eval | `inline::meta-reference` | -| inference | `remote::tgi`, `inline::sentence-transformers` | -| safety | `inline::llama-guard` | -| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | -| telemetry | `inline::meta-reference` | -| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::rag-runtime` | -| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | - - -You can use this distribution if you have GPUs and want to run an independent TGI or Dell Enterprise Hub container for running inference. - -### Environment Variables - -The following environment variables can be configured: - -- `DEH_URL`: URL for the Dell inference server (default: `http://0.0.0.0:8181`) -- `DEH_SAFETY_URL`: URL for the Dell safety inference server (default: `http://0.0.0.0:8282`) -- `CHROMA_URL`: URL for the Chroma server (default: `http://localhost:6601`) -- `INFERENCE_MODEL`: Inference model loaded into the TGI server (default: `meta-llama/Llama-3.2-3B-Instruct`) -- `SAFETY_MODEL`: Name of the safety (Llama-Guard) model to use (default: `meta-llama/Llama-Guard-3-1B`) - - -## Setting up Inference server using Dell Enterprise Hub's custom TGI container. - -NOTE: This is a placeholder to run inference with TGI. This will be updated to use [Dell Enterprise Hub's containers](https://dell.huggingface.co/authenticated/models) once verified. - -```bash -export INFERENCE_PORT=8181 -export DEH_URL=http://0.0.0.0:$INFERENCE_PORT -export INFERENCE_MODEL=meta-llama/Llama-3.1-8B-Instruct -export CHROMADB_HOST=localhost -export CHROMADB_PORT=6601 -export CHROMA_URL=http://$CHROMADB_HOST:$CHROMADB_PORT -export CUDA_VISIBLE_DEVICES=0 -export LLAMA_STACK_PORT=8321 - -docker run --rm -it \ - --pull always \ - --network host \ - -v $HOME/.cache/huggingface:/data \ - -e HF_TOKEN=$HF_TOKEN \ - -p $INFERENCE_PORT:$INFERENCE_PORT \ - --gpus $CUDA_VISIBLE_DEVICES \ - ghcr.io/huggingface/text-generation-inference \ - --dtype bfloat16 \ - --usage-stats off \ - --sharded false \ - --cuda-memory-fraction 0.7 \ - --model-id $INFERENCE_MODEL \ - --port $INFERENCE_PORT --hostname 0.0.0.0 -``` - -If you are using Llama Stack Safety / Shield APIs, then you will need to also run another instance of a TGI with a corresponding safety model like `meta-llama/Llama-Guard-3-1B` using a script like: - -```bash -export SAFETY_INFERENCE_PORT=8282 -export DEH_SAFETY_URL=http://0.0.0.0:$SAFETY_INFERENCE_PORT -export SAFETY_MODEL=meta-llama/Llama-Guard-3-1B -export CUDA_VISIBLE_DEVICES=1 - -docker run --rm -it \ - --pull always \ - --network host \ - -v $HOME/.cache/huggingface:/data \ - -e HF_TOKEN=$HF_TOKEN \ - -p $SAFETY_INFERENCE_PORT:$SAFETY_INFERENCE_PORT \ - --gpus $CUDA_VISIBLE_DEVICES \ - ghcr.io/huggingface/text-generation-inference \ - --dtype bfloat16 \ - --usage-stats off \ - --sharded false \ - --cuda-memory-fraction 0.7 \ - --model-id $SAFETY_MODEL \ - --hostname 0.0.0.0 \ - --port $SAFETY_INFERENCE_PORT -``` - -## Dell distribution relies on ChromaDB for vector database usage - -You can start a chroma-db easily using docker. -```bash -# This is where the indices are persisted -mkdir -p $HOME/chromadb - -podman run --rm -it \ - --network host \ - --name chromadb \ - -v $HOME/chromadb:/chroma/chroma \ - -e IS_PERSISTENT=TRUE \ - chromadb/chroma:latest \ - --port $CHROMADB_PORT \ - --host $CHROMADB_HOST -``` - -## Running Llama Stack - -Now you are ready to run Llama Stack with TGI as the inference provider. You can do this via venv or Docker which has a pre-built image. - -### Via Docker - -This method allows you to get started quickly without having to build the distribution code. - -```bash -docker run -it \ - --pull always \ - --network host \ - -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ - -v $HOME/.llama:/root/.llama \ - # NOTE: mount the llama-stack / llama-model directories if testing local changes else not needed - -v /home/hjshah/git/llama-stack:/app/llama-stack-source -v /home/hjshah/git/llama-models:/app/llama-models-source \ - # localhost/distribution-dell:dev if building / testing locally - llamastack/distribution-dell\ - --port $LLAMA_STACK_PORT \ - --env INFERENCE_MODEL=$INFERENCE_MODEL \ - --env DEH_URL=$DEH_URL \ - --env CHROMA_URL=$CHROMA_URL - -``` - -If you are using Llama Stack Safety / Shield APIs, use: - -```bash -# You need a local checkout of llama-stack to run this, get it using -# git clone https://github.com/meta-llama/llama-stack.git -cd /path/to/llama-stack - -export SAFETY_INFERENCE_PORT=8282 -export DEH_SAFETY_URL=http://0.0.0.0:$SAFETY_INFERENCE_PORT -export SAFETY_MODEL=meta-llama/Llama-Guard-3-1B - -docker run \ - -it \ - --pull always \ - -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ - -v $HOME/.llama:/root/.llama \ - -v ./llama_stack/distributions/tgi/run-with-safety.yaml:/root/my-run.yaml \ - llamastack/distribution-dell \ - --config /root/my-run.yaml \ - --port $LLAMA_STACK_PORT \ - --env INFERENCE_MODEL=$INFERENCE_MODEL \ - --env DEH_URL=$DEH_URL \ - --env SAFETY_MODEL=$SAFETY_MODEL \ - --env DEH_SAFETY_URL=$DEH_SAFETY_URL \ - --env CHROMA_URL=$CHROMA_URL -``` - -### Via venv - -Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available. - -```bash -llama stack build --distro dell --image-type venv -llama stack run dell - --port $LLAMA_STACK_PORT \ - --env INFERENCE_MODEL=$INFERENCE_MODEL \ - --env DEH_URL=$DEH_URL \ - --env CHROMA_URL=$CHROMA_URL -``` - -If you are using Llama Stack Safety / Shield APIs, use: - -```bash -llama stack run ./run-with-safety.yaml \ - --port $LLAMA_STACK_PORT \ - --env INFERENCE_MODEL=$INFERENCE_MODEL \ - --env DEH_URL=$DEH_URL \ - --env SAFETY_MODEL=$SAFETY_MODEL \ - --env DEH_SAFETY_URL=$DEH_SAFETY_URL \ - --env CHROMA_URL=$CHROMA_URL -``` diff --git a/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md b/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md deleted file mode 100644 index 84b85b91c..000000000 --- a/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -orphan: true ---- - -# Meta Reference GPU Distribution - -```{toctree} -:maxdepth: 2 -:hidden: - -self -``` - -The `llamastack/distribution-meta-reference-gpu` distribution consists of the following provider configurations: - -| API | Provider(s) | -|-----|-------------| -| agents | `inline::meta-reference` | -| datasetio | `remote::huggingface`, `inline::localfs` | -| eval | `inline::meta-reference` | -| inference | `inline::meta-reference` | -| safety | `inline::llama-guard` | -| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | -| telemetry | `inline::meta-reference` | -| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::rag-runtime`, `remote::model-context-protocol` | -| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | - - -Note that you need access to nvidia GPUs to run this distribution. This distribution is not compatible with CPU-only machines or machines with AMD GPUs. - -### Environment Variables - -The following environment variables can be configured: - -- `LLAMA_STACK_PORT`: Port for the Llama Stack distribution server (default: `8321`) -- `INFERENCE_MODEL`: Inference model loaded into the Meta Reference server (default: `meta-llama/Llama-3.2-3B-Instruct`) -- `INFERENCE_CHECKPOINT_DIR`: Directory containing the Meta Reference model checkpoint (default: `null`) -- `SAFETY_MODEL`: Name of the safety (Llama-Guard) model to use (default: `meta-llama/Llama-Guard-3-1B`) -- `SAFETY_CHECKPOINT_DIR`: Directory containing the Llama-Guard model checkpoint (default: `null`) - - -## Prerequisite: Downloading Models - -Please use `llama model list --downloaded` to check that you have llama model checkpoints downloaded in `~/.llama` before proceeding. See [installation guide](../../references/llama_cli_reference/download_models.md) here to download the models. Run `llama model list` to see the available models to download, and `llama model download` to download the checkpoints. - -``` -$ llama model list --downloaded -โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”“ -โ”ƒ Model โ”ƒ Size โ”ƒ Modified Time โ”ƒ -โ”กโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ฉ -โ”‚ Llama3.2-1B-Instruct:int4-qlora-eo8 โ”‚ 1.53 GB โ”‚ 2025-02-26 11:22:28 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama3.2-1B โ”‚ 2.31 GB โ”‚ 2025-02-18 21:48:52 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Prompt-Guard-86M โ”‚ 0.02 GB โ”‚ 2025-02-26 11:29:28 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama3.2-3B-Instruct:int4-spinquant-eo8 โ”‚ 3.69 GB โ”‚ 2025-02-26 11:37:41 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama3.2-3B โ”‚ 5.99 GB โ”‚ 2025-02-18 21:51:26 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama3.1-8B โ”‚ 14.97 GB โ”‚ 2025-02-16 10:36:37 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama3.2-1B-Instruct:int4-spinquant-eo8 โ”‚ 1.51 GB โ”‚ 2025-02-26 11:35:02 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama-Guard-3-1B โ”‚ 2.80 GB โ”‚ 2025-02-26 11:20:46 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama-Guard-3-1B:int4 โ”‚ 0.43 GB โ”‚ 2025-02-26 11:33:33 โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - -## Running the Distribution - -You can do this via venv or Docker which has a pre-built image. - -### Via Docker - -This method allows you to get started quickly without having to build the distribution code. - -```bash -LLAMA_STACK_PORT=8321 -docker run \ - -it \ - --pull always \ - --gpu all \ - -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ - -v ~/.llama:/root/.llama \ - llamastack/distribution-meta-reference-gpu \ - --port $LLAMA_STACK_PORT \ - --env INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct -``` - -If you are using Llama Stack Safety / Shield APIs, use: - -```bash -docker run \ - -it \ - --pull always \ - --gpu all \ - -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ - -v ~/.llama:/root/.llama \ - llamastack/distribution-meta-reference-gpu \ - --port $LLAMA_STACK_PORT \ - --env INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct \ - --env SAFETY_MODEL=meta-llama/Llama-Guard-3-1B -``` - -### Via venv - -Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available. - -```bash -llama stack build --distro meta-reference-gpu --image-type venv -llama stack run distributions/meta-reference-gpu/run.yaml \ - --port 8321 \ - --env INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct -``` - -If you are using Llama Stack Safety / Shield APIs, use: - -```bash -llama stack run distributions/meta-reference-gpu/run-with-safety.yaml \ - --port 8321 \ - --env INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct \ - --env SAFETY_MODEL=meta-llama/Llama-Guard-3-1B -``` diff --git a/docs/source/distributions/self_hosted_distro/nvidia.md b/docs/source/distributions/self_hosted_distro/nvidia.md deleted file mode 100644 index d4f070075..000000000 --- a/docs/source/distributions/self_hosted_distro/nvidia.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -orphan: true ---- - -# NVIDIA Distribution - -The `llamastack/distribution-nvidia` distribution consists of the following provider configurations. - -| API | Provider(s) | -|-----|-------------| -| agents | `inline::meta-reference` | -| datasetio | `inline::localfs`, `remote::nvidia` | -| eval | `remote::nvidia` | -| files | `inline::localfs` | -| inference | `remote::nvidia` | -| post_training | `remote::nvidia` | -| safety | `remote::nvidia` | -| scoring | `inline::basic` | -| telemetry | `inline::meta-reference` | -| tool_runtime | `inline::rag-runtime` | -| vector_io | `inline::faiss` | - - -### Environment Variables - -The following environment variables can be configured: - -- `NVIDIA_API_KEY`: NVIDIA API Key (default: ``) -- `NVIDIA_APPEND_API_VERSION`: Whether to append the API version to the base_url (default: `True`) -- `NVIDIA_DATASET_NAMESPACE`: NVIDIA Dataset Namespace (default: `default`) -- `NVIDIA_PROJECT_ID`: NVIDIA Project ID (default: `test-project`) -- `NVIDIA_CUSTOMIZER_URL`: NVIDIA Customizer URL (default: `https://customizer.api.nvidia.com`) -- `NVIDIA_OUTPUT_MODEL_DIR`: NVIDIA Output Model Directory (default: `test-example-model@v1`) -- `GUARDRAILS_SERVICE_URL`: URL for the NeMo Guardrails Service (default: `http://0.0.0.0:7331`) -- `NVIDIA_GUARDRAILS_CONFIG_ID`: NVIDIA Guardrail Configuration ID (default: `self-check`) -- `NVIDIA_EVALUATOR_URL`: URL for the NeMo Evaluator Service (default: `http://0.0.0.0:7331`) -- `INFERENCE_MODEL`: Inference model (default: `Llama3.1-8B-Instruct`) -- `SAFETY_MODEL`: Name of the model to use for safety (default: `meta/llama-3.1-8b-instruct`) - -### Models - -The following models are available by default: - -- `meta/llama3-8b-instruct ` -- `meta/llama3-70b-instruct ` -- `meta/llama-3.1-8b-instruct ` -- `meta/llama-3.1-70b-instruct ` -- `meta/llama-3.1-405b-instruct ` -- `meta/llama-3.2-1b-instruct ` -- `meta/llama-3.2-3b-instruct ` -- `meta/llama-3.2-11b-vision-instruct ` -- `meta/llama-3.2-90b-vision-instruct ` -- `meta/llama-3.3-70b-instruct ` -- `nvidia/vila ` -- `nvidia/llama-3.2-nv-embedqa-1b-v2 ` -- `nvidia/nv-embedqa-e5-v5 ` -- `nvidia/nv-embedqa-mistral-7b-v2 ` -- `snowflake/arctic-embed-l ` - - -## Prerequisites -### NVIDIA API Keys - -Make sure you have access to a NVIDIA API Key. You can get one by visiting [https://build.nvidia.com/](https://build.nvidia.com/). Use this key for the `NVIDIA_API_KEY` environment variable. - -### Deploy NeMo Microservices Platform -The NVIDIA NeMo microservices platform supports end-to-end microservice deployment of a complete AI flywheel on your Kubernetes cluster through the NeMo Microservices Helm Chart. Please reference the [NVIDIA NeMo Microservices documentation](https://docs.nvidia.com/nemo/microservices/latest/about/index.html) for platform prerequisites and instructions to install and deploy the platform. - -## Supported Services -Each Llama Stack API corresponds to a specific NeMo microservice. The core microservices (Customizer, Evaluator, Guardrails) are exposed by the same endpoint. The platform components (Data Store) are each exposed by separate endpoints. - -### Inference: NVIDIA NIM -NVIDIA NIM is used for running inference with registered models. There are two ways to access NVIDIA NIMs: - 1. Hosted (default): Preview APIs hosted at https://integrate.api.nvidia.com (Requires an API key) - 2. Self-hosted: NVIDIA NIMs that run on your own infrastructure. - -The deployed platform includes the NIM Proxy microservice, which is the service that provides to access your NIMs (for example, to run inference on a model). Set the `NVIDIA_BASE_URL` environment variable to use your NVIDIA NIM Proxy deployment. - -### Datasetio API: NeMo Data Store -The NeMo Data Store microservice serves as the default file storage solution for the NeMo microservices platform. It exposts APIs compatible with the Hugging Face Hub client (`HfApi`), so you can use the client to interact with Data Store. The `NVIDIA_DATASETS_URL` environment variable should point to your NeMo Data Store endpoint. - -See the {repopath}`NVIDIA Datasetio docs::llama_stack/providers/remote/datasetio/nvidia/README.md` for supported features and example usage. - -### Eval API: NeMo Evaluator -The NeMo Evaluator microservice supports evaluation of LLMs. Launching an Evaluation job with NeMo Evaluator requires an Evaluation Config (an object that contains metadata needed by the job). A Llama Stack Benchmark maps to an Evaluation Config, so registering a Benchmark creates an Evaluation Config in NeMo Evaluator. The `NVIDIA_EVALUATOR_URL` environment variable should point to your NeMo Microservices endpoint. - -See the {repopath}`NVIDIA Eval docs::llama_stack/providers/remote/eval/nvidia/README.md` for supported features and example usage. - -### Post-Training API: NeMo Customizer -The NeMo Customizer microservice supports fine-tuning models. You can reference {repopath}`this list of supported models::llama_stack/providers/remote/post_training/nvidia/models.py` that can be fine-tuned using Llama Stack. The `NVIDIA_CUSTOMIZER_URL` environment variable should point to your NeMo Microservices endpoint. - -See the {repopath}`NVIDIA Post-Training docs::llama_stack/providers/remote/post_training/nvidia/README.md` for supported features and example usage. - -### Safety API: NeMo Guardrails -The NeMo Guardrails microservice sits between your application and the LLM, and adds checks and content moderation to a model. The `GUARDRAILS_SERVICE_URL` environment variable should point to your NeMo Microservices endpoint. - -See the {repopath}`NVIDIA Safety docs::llama_stack/providers/remote/safety/nvidia/README.md` for supported features and example usage. - -## Deploying models -In order to use a registered model with the Llama Stack APIs, ensure the corresponding NIM is deployed to your environment. For example, you can use the NIM Proxy microservice to deploy `meta/llama-3.2-1b-instruct`. - -Note: For improved inference speeds, we need to use NIM with `fast_outlines` guided decoding system (specified in the request body). This is the default if you deployed the platform with the NeMo Microservices Helm Chart. -```sh -# URL to NeMo NIM Proxy service -export NEMO_URL="http://nemo.test" - -curl --location "$NEMO_URL/v1/deployment/model-deployments" \ - -H 'accept: application/json' \ - -H 'Content-Type: application/json' \ - -d '{ - "name": "llama-3.2-1b-instruct", - "namespace": "meta", - "config": { - "model": "meta/llama-3.2-1b-instruct", - "nim_deployment": { - "image_name": "nvcr.io/nim/meta/llama-3.2-1b-instruct", - "image_tag": "1.8.3", - "pvc_size": "25Gi", - "gpu": 1, - "additional_envs": { - "NIM_GUIDED_DECODING_BACKEND": "fast_outlines" - } - } - } - }' -``` -This NIM deployment should take approximately 10 minutes to go live. [See the docs](https://docs.nvidia.com/nemo/microservices/latest/get-started/tutorials/deploy-nims.html) for more information on how to deploy a NIM and verify it's available for inference. - -You can also remove a deployed NIM to free up GPU resources, if needed. -```sh -export NEMO_URL="http://nemo.test" - -curl -X DELETE "$NEMO_URL/v1/deployment/model-deployments/meta/llama-3.1-8b-instruct" -``` - -## Running Llama Stack with NVIDIA - -You can do this via venv (build code), or Docker which has a pre-built image. - -### Via Docker - -This method allows you to get started quickly without having to build the distribution code. - -```bash -LLAMA_STACK_PORT=8321 -docker run \ - -it \ - --pull always \ - -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ - -v ./run.yaml:/root/my-run.yaml \ - llamastack/distribution-nvidia \ - --config /root/my-run.yaml \ - --port $LLAMA_STACK_PORT \ - --env NVIDIA_API_KEY=$NVIDIA_API_KEY -``` - -### Via venv - -If you've set up your local development environment, you can also build the image using your local virtual environment. - -```bash -INFERENCE_MODEL=meta-llama/Llama-3.1-8B-Instruct -llama stack build --distro nvidia --image-type venv -llama stack run ./run.yaml \ - --port 8321 \ - --env NVIDIA_API_KEY=$NVIDIA_API_KEY \ - --env INFERENCE_MODEL=$INFERENCE_MODEL -``` - -## Example Notebooks -For examples of how to use the NVIDIA Distribution to run inference, fine-tune, evaluate, and run safety checks on your LLMs, you can reference the example notebooks in {repopath}`docs/notebooks/nvidia`. diff --git a/docs/source/distributions/self_hosted_distro/passthrough.md b/docs/source/distributions/self_hosted_distro/passthrough.md deleted file mode 100644 index 39f076be4..000000000 --- a/docs/source/distributions/self_hosted_distro/passthrough.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -orphan: true ---- - -# Passthrough Distribution - -```{toctree} -:maxdepth: 2 -:hidden: - -self -``` - -The `llamastack/distribution-passthrough` distribution consists of the following provider configurations. - -| API | Provider(s) | -|-----|-------------| -| agents | `inline::meta-reference` | -| datasetio | `remote::huggingface`, `inline::localfs` | -| eval | `inline::meta-reference` | -| inference | `remote::passthrough`, `inline::sentence-transformers` | -| safety | `inline::llama-guard` | -| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | -| telemetry | `inline::meta-reference` | -| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `remote::wolfram-alpha`, `inline::rag-runtime`, `remote::model-context-protocol` | -| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | - - -### Environment Variables - -The following environment variables can be configured: - -- `LLAMA_STACK_PORT`: Port for the Llama Stack distribution server (default: `8321`) -- `PASSTHROUGH_API_KEY`: Passthrough API Key (default: ``) -- `PASSTHROUGH_URL`: Passthrough URL (default: ``) - -### Models - -The following models are available by default: - -- `llama3.1-8b-instruct ` -- `llama3.2-11b-vision-instruct ` diff --git a/docs/source/distributions/self_hosted_distro/starter.md b/docs/source/distributions/self_hosted_distro/starter.md deleted file mode 100644 index 9218f7f81..000000000 --- a/docs/source/distributions/self_hosted_distro/starter.md +++ /dev/null @@ -1,232 +0,0 @@ ---- -orphan: true ---- - -# Starter Distribution - -```{toctree} -:maxdepth: 2 -:hidden: - -self -``` - -The `llamastack/distribution-starter` distribution is a comprehensive, multi-provider distribution that includes most of the available inference providers in Llama Stack. It's designed to be a one-stop solution for developers who want to experiment with different AI providers without having to configure each one individually. - -## Provider Composition - -The starter distribution consists of the following provider configurations: - -| API | Provider(s) | -|-----|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| agents | `inline::meta-reference` | -| datasetio | `remote::huggingface`, `inline::localfs` | -| eval | `inline::meta-reference` | -| files | `inline::localfs` | -| inference | `remote::openai`, `remote::fireworks`, `remote::together`, `remote::ollama`, `remote::anthropic`, `remote::gemini`, `remote::groq`, `remote::sambanova`, `remote::vllm`, `remote::tgi`, `remote::cerebras`, `remote::llama-openai-compat`, `remote::nvidia`, `remote::hf::serverless`, `remote::hf::endpoint`, `inline::sentence-transformers` | -| safety | `inline::llama-guard` | -| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | -| telemetry | `inline::meta-reference` | -| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::rag-runtime`, `remote::model-context-protocol` | -| vector_io | `inline::faiss`, `inline::sqlite-vec`, `inline::milvus`, `remote::chromadb`, `remote::pgvector` | - -## Inference Providers - -The starter distribution includes a comprehensive set of inference providers: - -### Hosted Providers -- **[OpenAI](https://openai.com/api/)**: GPT-4, GPT-3.5, O1, O3, O4 models and text embeddings - - provider ID: `openai` - reference documentation: [openai](../../providers/inference/remote_openai.md) -- **[Fireworks](https://fireworks.ai/)**: Llama 3.1, 3.2, 3.3, 4 Scout, 4 Maverick models and - embeddings - provider ID: `fireworks` - reference documentation: [fireworks](../../providers/inference/remote_fireworks.md) -- **[Together](https://together.ai/)**: Llama 3.1, 3.2, 3.3, 4 Scout, 4 Maverick models and - embeddings - provider ID: `together` - reference documentation: [together](../../providers/inference/remote_together.md) -- **[Anthropic](https://www.anthropic.com/)**: Claude 3.5 Sonnet, Claude 3.7 Sonnet, Claude 3.5 Haiku, and Voyage embeddings - provider ID: `anthropic` - reference documentation: [anthropic](../../providers/inference/remote_anthropic.md) -- **[Gemini](https://gemini.google.com/)**: Gemini 1.5, 2.0, 2.5 models and text embeddings - provider ID: `gemini` - reference documentation: [gemini](../../providers/inference/remote_gemini.md) -- **[Groq](https://groq.com/)**: Fast Llama models (3.1, 3.2, 3.3, 4 Scout, 4 Maverick) - provider ID: `groq` - reference documentation: [groq](../../providers/inference/remote_groq.md) -- **[SambaNova](https://www.sambanova.ai/)**: Llama 3.1, 3.2, 3.3, 4 Scout, 4 Maverick models - provider ID: `sambanova` - reference documentation: [sambanova](../../providers/inference/remote_sambanova.md) -- **[Cerebras](https://www.cerebras.ai/)**: Cerebras AI models - provider ID: `cerebras` - reference documentation: [cerebras](../../providers/inference/remote_cerebras.md) -- **[NVIDIA](https://www.nvidia.com/)**: NVIDIA NIM - provider ID: `nvidia` - reference documentation: [nvidia](../../providers/inference/remote_nvidia.md) -- **[HuggingFace](https://huggingface.co/)**: Serverless and endpoint models - provider ID: `hf::serverless` and `hf::endpoint` - reference documentation: [huggingface-serverless](../../providers/inference/remote_hf_serverless.md) and [huggingface-endpoint](../../providers/inference/remote_hf_endpoint.md) -- **[Bedrock](https://aws.amazon.com/bedrock/)**: AWS Bedrock models - provider ID: `bedrock` - reference documentation: [bedrock](../../providers/inference/remote_bedrock.md) - -### Local/Remote Providers -- **[Ollama](https://ollama.ai/)**: Local Ollama models - provider ID: `ollama` - reference documentation: [ollama](../../providers/inference/remote_ollama.md) -- **[vLLM](https://docs.vllm.ai/en/latest/)**: Local or remote vLLM server - provider ID: `vllm` - reference documentation: [vllm](../../providers/inference/remote_vllm.md) -- **[TGI](https://github.com/huggingface/text-generation-inference)**: Text Generation Inference server - Dell Enterprise Hub's custom TGI container too (use `DEH_URL`) - provider ID: `tgi` - reference documentation: [tgi](../../providers/inference/remote_tgi.md) -- **[Sentence Transformers](https://www.sbert.net/)**: Local embedding models - provider ID: `sentence-transformers` - reference documentation: [sentence-transformers](../../providers/inference/inline_sentence-transformers.md) - -All providers are disabled by default. So you need to enable them by setting the environment variables. - -## Vector IO - -The starter distribution includes a comprehensive set of vector IO providers: - -- **[FAISS](https://github.com/facebookresearch/faiss)**: Local FAISS vector store - enabled by - default - provider ID: `faiss` -- **[SQLite](https://www.sqlite.org/index.html)**: Local SQLite vector store - disabled by default - provider ID: `sqlite-vec` -- **[ChromaDB](https://www.trychroma.com/)**: Remote ChromaDB vector store - disabled by default - provider ID: `chromadb` -- **[PGVector](https://github.com/pgvector/pgvector)**: PostgreSQL vector store - disabled by default - provider ID: `pgvector` -- **[Milvus](https://milvus.io/)**: Milvus vector store - disabled by default - provider ID: `milvus` - -## Environment Variables - -The following environment variables can be configured: - -### Server Configuration -- `LLAMA_STACK_PORT`: Port for the Llama Stack distribution server (default: `8321`) - -### API Keys for Hosted Providers -- `OPENAI_API_KEY`: OpenAI API key -- `FIREWORKS_API_KEY`: Fireworks API key -- `TOGETHER_API_KEY`: Together API key -- `ANTHROPIC_API_KEY`: Anthropic API key -- `GEMINI_API_KEY`: Google Gemini API key -- `GROQ_API_KEY`: Groq API key -- `SAMBANOVA_API_KEY`: SambaNova API key -- `CEREBRAS_API_KEY`: Cerebras API key -- `LLAMA_API_KEY`: Llama API key -- `NVIDIA_API_KEY`: NVIDIA API key -- `HF_API_TOKEN`: HuggingFace API token - -### Local Provider Configuration -- `OLLAMA_URL`: Ollama server URL (default: `http://localhost:11434`) -- `VLLM_URL`: vLLM server URL (default: `http://localhost:8000/v1`) -- `VLLM_MAX_TOKENS`: vLLM max tokens (default: `4096`) -- `VLLM_API_TOKEN`: vLLM API token (default: `fake`) -- `VLLM_TLS_VERIFY`: vLLM TLS verification (default: `true`) -- `TGI_URL`: TGI server URL - -### Model Configuration -- `INFERENCE_MODEL`: HuggingFace model for serverless inference -- `INFERENCE_ENDPOINT_NAME`: HuggingFace endpoint name - -### Vector Database Configuration -- `SQLITE_STORE_DIR`: SQLite store directory (default: `~/.llama/distributions/starter`) -- `ENABLE_SQLITE_VEC`: Enable SQLite vector provider -- `ENABLE_CHROMADB`: Enable ChromaDB provider -- `ENABLE_PGVECTOR`: Enable PGVector provider -- `CHROMADB_URL`: ChromaDB server URL -- `PGVECTOR_HOST`: PGVector host (default: `localhost`) -- `PGVECTOR_PORT`: PGVector port (default: `5432`) -- `PGVECTOR_DB`: PGVector database name -- `PGVECTOR_USER`: PGVector username -- `PGVECTOR_PASSWORD`: PGVector password - -### Tool Configuration -- `BRAVE_SEARCH_API_KEY`: Brave Search API key -- `TAVILY_SEARCH_API_KEY`: Tavily Search API key - -### Telemetry Configuration -- `OTEL_SERVICE_NAME`: OpenTelemetry service name -- `TELEMETRY_SINKS`: Telemetry sinks (default: `console,sqlite`) - -## Enabling Providers - -You can enable specific providers by setting appropriate environment variables. For example, - -```bash -# self-hosted -export OLLAMA_URL=http://localhost:11434 # enables the Ollama inference provider -export VLLM_URL=http://localhost:8000/v1 # enables the vLLM inference provider -export TGI_URL=http://localhost:8000/v1 # enables the TGI inference provider - -# cloud-hosted requiring API key configuration on the server -export CEREBRAS_API_KEY=your_cerebras_api_key # enables the Cerebras inference provider -export NVIDIA_API_KEY=your_nvidia_api_key # enables the NVIDIA inference provider - -# vector providers -export MILVUS_URL=http://localhost:19530 # enables the Milvus vector provider -export CHROMADB_URL=http://localhost:8000/v1 # enables the ChromaDB vector provider -export PGVECTOR_DB=llama_stack_db # enables the PGVector vector provider -``` - -This distribution comes with a default "llama-guard" shield that can be enabled by setting the `SAFETY_MODEL` environment variable to point to an appropriate Llama Guard model id. Use `llama-stack-client models list` to see the list of available models. - -## Running the Distribution - -You can run the starter distribution via Docker or venv. - -### Via Docker - -This method allows you to get started quickly without having to build the distribution code. - -```bash -LLAMA_STACK_PORT=8321 -docker run \ - -it \ - --pull always \ - -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ - -e OPENAI_API_KEY=your_openai_key \ - -e FIREWORKS_API_KEY=your_fireworks_key \ - -e TOGETHER_API_KEY=your_together_key \ - llamastack/distribution-starter \ - --port $LLAMA_STACK_PORT -``` - -### Via venv - -Ensure you have configured the starter distribution using the environment variables explained above. - -```bash -uv run --with llama-stack llama stack build --distro starter --image-type venv --run -``` - -## Example Usage - -Once the distribution is running, you can use any of the available models. Here are some examples: - -### Using OpenAI Models -```bash -llama-stack-client --endpoint http://localhost:8321 \ -inference chat-completion \ ---model-id openai/gpt-4o \ ---message "Hello, how are you?" -``` - -### Using Fireworks Models -```bash -llama-stack-client --endpoint http://localhost:8321 \ -inference chat-completion \ ---model-id fireworks/meta-llama/Llama-3.2-3B-Instruct \ ---message "Write a short story about a robot." -``` - -### Using Local Ollama Models -```bash -# First, make sure Ollama is running and you have a model -ollama run llama3.2:3b - -# Then use it through Llama Stack -export OLLAMA_INFERENCE_MODEL=llama3.2:3b -llama-stack-client --endpoint http://localhost:8321 \ -inference chat-completion \ ---model-id ollama/llama3.2:3b \ ---message "Explain quantum computing in simple terms." -``` - -## Storage - -The starter distribution uses SQLite for local storage of various components: - -- **Metadata store**: `~/.llama/distributions/starter/registry.db` -- **Inference store**: `~/.llama/distributions/starter/inference_store.db` -- **FAISS store**: `~/.llama/distributions/starter/faiss_store.db` -- **SQLite vector store**: `~/.llama/distributions/starter/sqlite_vec.db` -- **Files metadata**: `~/.llama/distributions/starter/files_metadata.db` -- **Agents store**: `~/.llama/distributions/starter/agents_store.db` -- **Responses store**: `~/.llama/distributions/starter/responses_store.db` -- **Trace store**: `~/.llama/distributions/starter/trace_store.db` -- **Evaluation store**: `~/.llama/distributions/starter/meta_reference_eval.db` -- **Dataset I/O stores**: Various HuggingFace and local filesystem stores - -## Benefits of the Starter Distribution - -1. **Comprehensive Coverage**: Includes most popular AI providers in one distribution -2. **Flexible Configuration**: Easy to enable/disable providers based on your needs -3. **No Local GPU Required**: Most providers are cloud-based, making it accessible to developers without high-end hardware -4. **Easy Migration**: Start with hosted providers and gradually move to local ones as needed -5. **Production Ready**: Includes safety, evaluation, and telemetry components -6. **Tool Integration**: Comes with web search, RAG, and model context protocol tools - -The starter distribution is ideal for developers who want to experiment with different AI providers, build prototypes quickly, or create applications that can work with multiple AI backends. diff --git a/docs/source/distributions/starting_llama_stack_server.md b/docs/source/distributions/starting_llama_stack_server.md deleted file mode 100644 index 1a26694a6..000000000 --- a/docs/source/distributions/starting_llama_stack_server.md +++ /dev/null @@ -1,25 +0,0 @@ -# Starting a Llama Stack Server - -You can run a Llama Stack server in one of the following ways: - -## As a Library: - -This is the simplest way to get started. Using Llama Stack as a library means you do not need to start a server. This is especially useful when you are not running inference locally and relying on an external inference service (eg. fireworks, together, groq, etc.) See [Using Llama Stack as a Library](importing_as_library) - - -## Container: - -Another simple way to start interacting with Llama Stack is to just spin up a container (via Docker or Podman) which is pre-built with all the providers you need. We provide a number of pre-built images so you can start a Llama Stack server instantly. You can also build your own custom container. Which distribution to choose depends on the hardware you have. See [Selection of a Distribution](selection) for more details. - -## Kubernetes: - -If you have built a container image and want to deploy it in a Kubernetes cluster instead of starting the Llama Stack server locally. See [Kubernetes Deployment Guide](kubernetes_deployment) for more details. - - -```{toctree} -:maxdepth: 1 -:hidden: - -importing_as_library -configuration -``` diff --git a/docs/source/getting_started/demo_script.py b/docs/source/getting_started/demo_script.py deleted file mode 100644 index 2ea67739f..000000000 --- a/docs/source/getting_started/demo_script.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -from llama_stack_client import Agent, AgentEventLogger, RAGDocument, LlamaStackClient - -vector_db_id = "my_demo_vector_db" -client = LlamaStackClient(base_url="http://localhost:8321") - -models = client.models.list() - -# Select the first LLM and first embedding models -model_id = next(m for m in models if m.model_type == "llm").identifier -embedding_model_id = ( - em := next(m for m in models if m.model_type == "embedding") -).identifier -embedding_dimension = em.metadata["embedding_dimension"] - -vector_db = client.vector_dbs.register( - vector_db_id=vector_db_id, - embedding_model=embedding_model_id, - embedding_dimension=embedding_dimension, - provider_id="faiss", -) -vector_db_id = vector_db.identifier -source = "https://www.paulgraham.com/greatwork.html" -print("rag_tool> Ingesting document:", source) -document = RAGDocument( - document_id="document_1", - content=source, - mime_type="text/html", - metadata={}, -) -client.tool_runtime.rag_tool.insert( - documents=[document], - vector_db_id=vector_db_id, - chunk_size_in_tokens=100, -) -agent = Agent( - client, - model=model_id, - instructions="You are a helpful assistant", - tools=[ - { - "name": "builtin::rag/knowledge_search", - "args": {"vector_db_ids": [vector_db_id]}, - } - ], -) - -prompt = "How do you do great work?" -print("prompt>", prompt) - -use_stream = True -response = agent.create_turn( - messages=[{"role": "user", "content": prompt}], - session_id=agent.create_session("rag_session"), - stream=use_stream, -) - -# Only call `AgentEventLogger().log(response)` for streaming responses. -if use_stream: - for log in AgentEventLogger().log(response): - log.print() -else: - print(response) diff --git a/docs/source/getting_started/detailed_tutorial.md b/docs/source/getting_started/detailed_tutorial.md deleted file mode 100644 index 77a899c48..000000000 --- a/docs/source/getting_started/detailed_tutorial.md +++ /dev/null @@ -1,553 +0,0 @@ -## Detailed Tutorial - -In this guide, we'll walk through how you can use the Llama Stack (server and client SDK) to test a simple agent. -A Llama Stack agent is a simple integrated system that can perform tasks by combining a Llama model for reasoning with -tools (e.g., RAG, web search, code execution, etc.) for taking actions. -In Llama Stack, we provide a server exposing multiple APIs. These APIs are backed by implementations from different providers. - -Llama Stack is a stateful service with REST APIs to support seamless transition of AI applications across different environments. The server can be run in a variety of ways, including as a standalone binary, Docker container, or hosted service. You can build and test using a local server first and deploy to a hosted endpoint for production. - -In this guide, we'll walk through how to build a RAG agent locally using Llama Stack with [Ollama](https://ollama.com/) -as the inference [provider](../providers/index.md#inference) for a Llama Model. - -### Step 1: Installation and Setup - -Install Ollama by following the instructions on the [Ollama website](https://ollama.com/download), then -download Llama 3.2 3B model, and then start the Ollama service. -```bash -ollama pull llama3.2:3b -ollama run llama3.2:3b --keepalive 60m -``` - -Install [uv](https://docs.astral.sh/uv/) to setup your virtual environment - -::::{tab-set} - -:::{tab-item} macOS and Linux -Use `curl` to download the script and execute it with `sh`: -```console -curl -LsSf https://astral.sh/uv/install.sh | sh -``` -::: - -:::{tab-item} Windows -Use `irm` to download the script and execute it with `iex`: - -```console -powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" -``` -::: -:::: - -Setup your virtual environment. - -```bash -uv sync --python 3.12 -source .venv/bin/activate -``` -### Step 2: Run Llama Stack -Llama Stack is a server that exposes multiple APIs, you connect with it using the Llama Stack client SDK. - -::::{tab-set} - -:::{tab-item} Using `venv` -You can use Python to build and run the Llama Stack server, which is useful for testing and development. - -Llama Stack uses a [YAML configuration file](../distributions/configuration.md) to specify the stack setup, -which defines the providers and their settings. The generated configuration serves as a starting point that you can [customize for your specific needs](../distributions/customizing_run_yaml.md). -Now let's build and run the Llama Stack config for Ollama. -We use `starter` as template. By default all providers are disabled, this requires enable ollama by passing environment variables. - -```bash -llama stack build --distro starter --image-type venv --run -``` -::: -:::{tab-item} Using `venv` -You can use Python to build and run the Llama Stack server, which is useful for testing and development. - -Llama Stack uses a [YAML configuration file](../distributions/configuration.md) to specify the stack setup, -which defines the providers and their settings. -Now let's build and run the Llama Stack config for Ollama. - -```bash -llama stack build --distro starter --image-type venv --run -``` -::: -:::{tab-item} Using a Container -You can use a container image to run the Llama Stack server. We provide several container images for the server -component that works with different inference providers out of the box. For this guide, we will use -`llamastack/distribution-starter` as the container image. If you'd like to build your own image or customize the -configurations, please check out [this guide](../distributions/building_distro.md). -First lets setup some environment variables and create a local directory to mount into the containerโ€™s file system. -```bash -export LLAMA_STACK_PORT=8321 -mkdir -p ~/.llama -``` -Then start the server using the container tool of your choice. For example, if you are running Docker you can use the -following command: -```bash -docker run -it \ - --pull always \ - -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ - -v ~/.llama:/root/.llama \ - llamastack/distribution-starter \ - --port $LLAMA_STACK_PORT \ - --env OLLAMA_URL=http://host.docker.internal:11434 -``` -Note to start the container with Podman, you can do the same but replace `docker` at the start of the command with -`podman`. If you are using `podman` older than `4.7.0`, please also replace `host.docker.internal` in the `OLLAMA_URL` -with `host.containers.internal`. - -The configuration YAML for the Ollama distribution is available at `distributions/ollama/run.yaml`. - -```{tip} - -Docker containers run in their own isolated network namespaces on Linux. To allow the container to communicate with services running on the host via `localhost`, you need `--network=host`. This makes the container use the hostโ€™s network directly so it can connect to Ollama running on `localhost:11434`. - -Linux users having issues running the above command should instead try the following: -```bash -docker run -it \ - --pull always \ - -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ - -v ~/.llama:/root/.llama \ - --network=host \ - llamastack/distribution-starter \ - --port $LLAMA_STACK_PORT \ - --env OLLAMA_URL=http://localhost:11434 -``` -::: -:::: -You will see output like below: -``` -INFO: Application startup complete. -INFO: Uvicorn running on http://['::', '0.0.0.0']:8321 (Press CTRL+C to quit) -``` - -Now you can use the Llama Stack client to run inference and build agents! - -You can reuse the server setup or use the [Llama Stack Client](https://github.com/meta-llama/llama-stack-client-python/). -Note that the client package is already included in the `llama-stack` package. - -### Step 3: Run Client CLI - -Open a new terminal and navigate to the same directory you started the server from. Then set up a new or activate your -existing server virtual environment. - -::::{tab-set} - -:::{tab-item} Reuse Server `venv` -```bash -# The client is included in the llama-stack package so we just activate the server venv -source .venv/bin/activate -``` -::: - -:::{tab-item} Install with `venv` -```bash -uv venv client --python 3.12 -source client/bin/activate -pip install llama-stack-client -``` -::: - - -:::: - -Now let's use the `llama-stack-client` [CLI](../references/llama_stack_client_cli_reference.md) to check the -connectivity to the server. - -```bash -llama-stack-client configure --endpoint http://localhost:8321 --api-key none -``` -You will see the below: -``` -Done! You can now use the Llama Stack Client CLI with endpoint http://localhost:8321 -``` - -List the models -```bash -llama-stack-client models list -Available Models - -โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”“ -โ”ƒ model_type โ”ƒ identifier โ”ƒ provider_resource_id โ”ƒ metadata โ”ƒ provider_id โ”ƒ -โ”กโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ฉ -โ”‚ embedding โ”‚ ollama/all-minilm:l6-v2 โ”‚ all-minilm:l6-v2 โ”‚ {'embedding_dimension': 384.0} โ”‚ ollama โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ ... โ”‚ ... โ”‚ ... โ”‚ โ”‚ ... โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ llm โ”‚ ollama/Llama-3.2:3b โ”‚ llama3.2:3b โ”‚ โ”‚ ollama โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - -``` -You can test basic Llama inference completion using the CLI. - -```bash -llama-stack-client inference chat-completion --model-id "ollama/llama3.2:3b" --message "tell me a joke" - -``` -Sample output: -```python -OpenAIChatCompletion( - id="chatcmpl-08d7b2be-40f3-47ed-8f16-a6f29f2436af", - choices=[ - OpenAIChatCompletionChoice( - finish_reason="stop", - index=0, - message=OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParam( - role="assistant", - content="Why couldn't the bicycle stand up by itself?\n\nBecause it was two-tired.", - name=None, - tool_calls=None, - refusal=None, - annotations=None, - audio=None, - function_call=None, - ), - logprobs=None, - ) - ], - created=1751725254, - model="llama3.2:3b", - object="chat.completion", - service_tier=None, - system_fingerprint="fp_ollama", - usage={ - "completion_tokens": 18, - "prompt_tokens": 29, - "total_tokens": 47, - "completion_tokens_details": None, - "prompt_tokens_details": None, - }, -) -``` - -### Step 4: Run the Demos - -Note that these demos show the [Python Client SDK](../references/python_sdk_reference/index.md). -Other SDKs are also available, please refer to the [Client SDK](../index.md#client-sdks) list for the complete options. - -::::{tab-set} - -:::{tab-item} Basic Inference -Now you can run inference using the Llama Stack client SDK. - -#### i. Create the Script - -Create a file `inference.py` and add the following code: -```python -from llama_stack_client import LlamaStackClient - -client = LlamaStackClient(base_url="http://localhost:8321") - -# List available models -models = client.models.list() - -# Select the first LLM -llm = next(m for m in models if m.model_type == "llm" and m.provider_id == "ollama") -model_id = llm.identifier - -print("Model:", model_id) - -response = client.chat.completions.create( - model=model_id, - messages=[ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "Write a haiku about coding"}, - ], -) -print(response) -``` - -#### ii. Run the Script -Let's run the script using `uv` -```bash -uv run python inference.py -``` -Which will output: -``` -Model: ollama/llama3.2:3b -OpenAIChatCompletion(id='chatcmpl-30cd0f28-a2ad-4b6d-934b-13707fc60ebf', choices=[OpenAIChatCompletionChoice(finish_reason='stop', index=0, message=OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParam(role='assistant', content="Lines of code unfold\nAlgorithms dance with ease\nLogic's gentle kiss", name=None, tool_calls=None, refusal=None, annotations=None, audio=None, function_call=None), logprobs=None)], created=1751732480, model='llama3.2:3b', object='chat.completion', service_tier=None, system_fingerprint='fp_ollama', usage={'completion_tokens': 16, 'prompt_tokens': 37, 'total_tokens': 53, 'completion_tokens_details': None, 'prompt_tokens_details': None}) -``` -::: - -:::{tab-item} Build a Simple Agent -Next we can move beyond simple inference and build an agent that can perform tasks using the Llama Stack server. -#### i. Create the Script -Create a file `agent.py` and add the following code: - -```python -from llama_stack_client import LlamaStackClient -from llama_stack_client import Agent, AgentEventLogger -from rich.pretty import pprint -import uuid - -client = LlamaStackClient(base_url=f"http://localhost:8321") - -models = client.models.list() -llm = next(m for m in models if m.model_type == "llm" and m.provider_id == "ollama") -model_id = llm.identifier - -agent = Agent(client, model=model_id, instructions="You are a helpful assistant.") - -s_id = agent.create_session(session_name=f"s{uuid.uuid4().hex}") - -print("Non-streaming ...") -response = agent.create_turn( - messages=[{"role": "user", "content": "Who are you?"}], - session_id=s_id, - stream=False, -) -print("agent>", response.output_message.content) - -print("Streaming ...") -stream = agent.create_turn( - messages=[{"role": "user", "content": "Who are you?"}], session_id=s_id, stream=True -) -for event in stream: - pprint(event) - -print("Streaming with print helper...") -stream = agent.create_turn( - messages=[{"role": "user", "content": "Who are you?"}], session_id=s_id, stream=True -) -for event in AgentEventLogger().log(stream): - event.print() -``` -### ii. Run the Script -Let's run the script using `uv` -```bash -uv run python agent.py -``` - -```{dropdown} ๐Ÿ‘‹ Click here to see the sample output - Non-streaming ... - agent> I'm an artificial intelligence designed to assist and communicate with users like you. I don't have a personal identity, but I can provide information, answer questions, and help with tasks to the best of my abilities. - - I'm a large language model, which means I've been trained on a massive dataset of text from various sources, allowing me to understand and respond to a wide range of topics and questions. My purpose is to provide helpful and accurate information, and I'm constantly learning and improving my responses based on the interactions I have with users like you. - - I can help with: - - * Answering questions on various subjects - * Providing definitions and explanations - * Offering suggestions and ideas - * Assisting with language-related tasks, such as proofreading and editing - * Generating text and content - * And more! - - Feel free to ask me anything, and I'll do my best to help! - Streaming ... - AgentTurnResponseStreamChunk( - โ”‚ event=TurnResponseEvent( - โ”‚ โ”‚ payload=AgentTurnResponseStepStartPayload( - โ”‚ โ”‚ โ”‚ event_type='step_start', - โ”‚ โ”‚ โ”‚ step_id='69831607-fa75-424a-949b-e2049e3129d1', - โ”‚ โ”‚ โ”‚ step_type='inference', - โ”‚ โ”‚ โ”‚ metadata={} - โ”‚ โ”‚ ) - โ”‚ ) - ) - AgentTurnResponseStreamChunk( - โ”‚ event=TurnResponseEvent( - โ”‚ โ”‚ payload=AgentTurnResponseStepProgressPayload( - โ”‚ โ”‚ โ”‚ delta=TextDelta(text='As', type='text'), - โ”‚ โ”‚ โ”‚ event_type='step_progress', - โ”‚ โ”‚ โ”‚ step_id='69831607-fa75-424a-949b-e2049e3129d1', - โ”‚ โ”‚ โ”‚ step_type='inference' - โ”‚ โ”‚ ) - โ”‚ ) - ) - AgentTurnResponseStreamChunk( - โ”‚ event=TurnResponseEvent( - โ”‚ โ”‚ payload=AgentTurnResponseStepProgressPayload( - โ”‚ โ”‚ โ”‚ delta=TextDelta(text=' a', type='text'), - โ”‚ โ”‚ โ”‚ event_type='step_progress', - โ”‚ โ”‚ โ”‚ step_id='69831607-fa75-424a-949b-e2049e3129d1', - โ”‚ โ”‚ โ”‚ step_type='inference' - โ”‚ โ”‚ ) - โ”‚ ) - ) - ... - AgentTurnResponseStreamChunk( - โ”‚ event=TurnResponseEvent( - โ”‚ โ”‚ payload=AgentTurnResponseStepCompletePayload( - โ”‚ โ”‚ โ”‚ event_type='step_complete', - โ”‚ โ”‚ โ”‚ step_details=InferenceStep( - โ”‚ โ”‚ โ”‚ โ”‚ api_model_response=CompletionMessage( - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ content='As a conversational AI, I don\'t have a personal identity in the classical sense. I exist as a program running on computer servers, designed to process and respond to text-based inputs.\n\nI\'m an instance of a type of artificial intelligence called a "language model," which is trained on vast amounts of text data to generate human-like responses. My primary function is to understand and respond to natural language inputs, like our conversation right now.\n\nThink of me as a virtual assistant, a chatbot, or a conversational interface โ€“ I\'m here to provide information, answer questions, and engage in conversation to the best of my abilities. I don\'t have feelings, emotions, or consciousness like humans do, but I\'m designed to simulate human-like interactions to make our conversations feel more natural and helpful.\n\nSo, that\'s me in a nutshell! What can I help you with today?', - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ role='assistant', - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ stop_reason='end_of_turn', - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ tool_calls=[] - โ”‚ โ”‚ โ”‚ โ”‚ ), - โ”‚ โ”‚ โ”‚ โ”‚ step_id='69831607-fa75-424a-949b-e2049e3129d1', - โ”‚ โ”‚ โ”‚ โ”‚ step_type='inference', - โ”‚ โ”‚ โ”‚ โ”‚ turn_id='8b360202-f7cb-4786-baa9-166a1b46e2ca', - โ”‚ โ”‚ โ”‚ โ”‚ completed_at=datetime.datetime(2025, 4, 3, 1, 15, 21, 716174, tzinfo=TzInfo(UTC)), - โ”‚ โ”‚ โ”‚ โ”‚ started_at=datetime.datetime(2025, 4, 3, 1, 15, 14, 28823, tzinfo=TzInfo(UTC)) - โ”‚ โ”‚ โ”‚ ), - โ”‚ โ”‚ โ”‚ step_id='69831607-fa75-424a-949b-e2049e3129d1', - โ”‚ โ”‚ โ”‚ step_type='inference' - โ”‚ โ”‚ ) - โ”‚ ) - ) - AgentTurnResponseStreamChunk( - โ”‚ event=TurnResponseEvent( - โ”‚ โ”‚ payload=AgentTurnResponseTurnCompletePayload( - โ”‚ โ”‚ โ”‚ event_type='turn_complete', - โ”‚ โ”‚ โ”‚ turn=Turn( - โ”‚ โ”‚ โ”‚ โ”‚ input_messages=[UserMessage(content='Who are you?', role='user', context=None)], - โ”‚ โ”‚ โ”‚ โ”‚ output_message=CompletionMessage( - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ content='As a conversational AI, I don\'t have a personal identity in the classical sense. I exist as a program running on computer servers, designed to process and respond to text-based inputs.\n\nI\'m an instance of a type of artificial intelligence called a "language model," which is trained on vast amounts of text data to generate human-like responses. My primary function is to understand and respond to natural language inputs, like our conversation right now.\n\nThink of me as a virtual assistant, a chatbot, or a conversational interface โ€“ I\'m here to provide information, answer questions, and engage in conversation to the best of my abilities. I don\'t have feelings, emotions, or consciousness like humans do, but I\'m designed to simulate human-like interactions to make our conversations feel more natural and helpful.\n\nSo, that\'s me in a nutshell! What can I help you with today?', - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ role='assistant', - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ stop_reason='end_of_turn', - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ tool_calls=[] - โ”‚ โ”‚ โ”‚ โ”‚ ), - โ”‚ โ”‚ โ”‚ โ”‚ session_id='abd4afea-4324-43f4-9513-cfe3970d92e8', - โ”‚ โ”‚ โ”‚ โ”‚ started_at=datetime.datetime(2025, 4, 3, 1, 15, 14, 28722, tzinfo=TzInfo(UTC)), - โ”‚ โ”‚ โ”‚ โ”‚ steps=[ - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ InferenceStep( - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ api_model_response=CompletionMessage( - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ content='As a conversational AI, I don\'t have a personal identity in the classical sense. I exist as a program running on computer servers, designed to process and respond to text-based inputs.\n\nI\'m an instance of a type of artificial intelligence called a "language model," which is trained on vast amounts of text data to generate human-like responses. My primary function is to understand and respond to natural language inputs, like our conversation right now.\n\nThink of me as a virtual assistant, a chatbot, or a conversational interface โ€“ I\'m here to provide information, answer questions, and engage in conversation to the best of my abilities. I don\'t have feelings, emotions, or consciousness like humans do, but I\'m designed to simulate human-like interactions to make our conversations feel more natural and helpful.\n\nSo, that\'s me in a nutshell! What can I help you with today?', - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ role='assistant', - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ stop_reason='end_of_turn', - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ tool_calls=[] - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ ), - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ step_id='69831607-fa75-424a-949b-e2049e3129d1', - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ step_type='inference', - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ turn_id='8b360202-f7cb-4786-baa9-166a1b46e2ca', - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ completed_at=datetime.datetime(2025, 4, 3, 1, 15, 21, 716174, tzinfo=TzInfo(UTC)), - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ started_at=datetime.datetime(2025, 4, 3, 1, 15, 14, 28823, tzinfo=TzInfo(UTC)) - โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ ) - โ”‚ โ”‚ โ”‚ โ”‚ ], - โ”‚ โ”‚ โ”‚ โ”‚ turn_id='8b360202-f7cb-4786-baa9-166a1b46e2ca', - โ”‚ โ”‚ โ”‚ โ”‚ completed_at=datetime.datetime(2025, 4, 3, 1, 15, 21, 727364, tzinfo=TzInfo(UTC)), - โ”‚ โ”‚ โ”‚ โ”‚ output_attachments=[] - โ”‚ โ”‚ โ”‚ ) - โ”‚ โ”‚ ) - โ”‚ ) - ) - - - Streaming with print helper... - inference> Dรฉjร  vu! You're asking me again! - - As I mentioned earlier, I'm a computer program designed to simulate conversation and answer questions. I don't have a personal identity or consciousness like a human would. I exist solely as a digital entity, running on computer servers and responding to inputs from users like you. - - I'm a type of artificial intelligence (AI) called a large language model, which means I've been trained on a massive dataset of text from various sources. This training allows me to understand and respond to a wide range of questions and topics. - - My purpose is to provide helpful and accurate information, answer questions, and assist users like you with tasks and conversations. I don't have personal preferences, emotions, or opinions like humans do. My goal is to be informative, neutral, and respectful in my responses. - - So, that's me in a nutshell! -``` -::: - -:::{tab-item} Build a RAG Agent - -For our last demo, we can build a RAG agent that can answer questions about the Torchtune project using the documents -in a vector database. -#### i. Create the Script -Create a file `rag_agent.py` and add the following code: - -```python -from llama_stack_client import LlamaStackClient -from llama_stack_client import Agent, AgentEventLogger -from llama_stack_client.types import Document -import uuid - -client = LlamaStackClient(base_url="http://localhost:8321") - -# Create a vector database instance -embed_lm = next(m for m in client.models.list() if m.model_type == "embedding") -embedding_model = embed_lm.identifier -vector_db_id = f"v{uuid.uuid4().hex}" -# The VectorDB API is deprecated; the server now returns its own authoritative ID. -# We capture the correct ID from the response's .identifier attribute. -vector_db_id = client.vector_dbs.register( - vector_db_id=vector_db_id, - embedding_model=embedding_model, -).identifier - -# Create Documents -urls = [ - "memory_optimizations.rst", - "chat.rst", - "llama3.rst", - "qat_finetune.rst", - "lora_finetune.rst", -] -documents = [ - Document( - document_id=f"num-{i}", - content=f"https://raw.githubusercontent.com/pytorch/torchtune/main/docs/source/tutorials/{url}", - mime_type="text/plain", - metadata={}, - ) - for i, url in enumerate(urls) -] - -# Insert documents -client.tool_runtime.rag_tool.insert( - documents=documents, - vector_db_id=vector_db_id, - chunk_size_in_tokens=512, -) - -# Get the model being served -llm = next( - m - for m in client.models.list() - if m.model_type == "llm" and m.provider_id == "ollama" -) -model = llm.identifier - -# Create the RAG agent -rag_agent = Agent( - client, - model=model, - instructions="You are a helpful assistant. Use the RAG tool to answer questions as needed.", - tools=[ - { - "name": "builtin::rag/knowledge_search", - "args": {"vector_db_ids": [vector_db_id]}, - } - ], -) - -session_id = rag_agent.create_session(session_name=f"s{uuid.uuid4().hex}") - -turns = ["what is torchtune", "tell me about dora"] - -for t in turns: - print("user>", t) - stream = rag_agent.create_turn( - messages=[{"role": "user", "content": t}], session_id=session_id, stream=True - ) - for event in AgentEventLogger().log(stream): - event.print() -``` -#### ii. Run the Script -Let's run the script using `uv` -```bash -uv run python rag_agent.py -``` - -```{dropdown} ๐Ÿ‘‹ Click here to see the sample output - user> what is torchtune - inference> [knowledge_search(query='TorchTune')] - tool_execution> Tool:knowledge_search Args:{'query': 'TorchTune'} - tool_execution> Tool:knowledge_search Response:[TextContentItem(text='knowledge_search tool found 5 chunks:\nBEGIN of knowledge_search tool results.\n', type='text'), TextContentItem(text='Result 1:\nDocument_id:num-1\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. ..., type='text'), TextContentItem(text='END of knowledge_search tool results.\n', type='text')] - inference> Here is a high-level overview of the text: - - **LoRA Finetuning with PyTorch Tune** - - PyTorch Tune provides a recipe for LoRA (Low-Rank Adaptation) finetuning, which is a technique to adapt pre-trained models to new tasks. The recipe uses the `lora_finetune_distributed` command. - ... - Overall, DORA is a powerful reinforcement learning algorithm that can learn complex tasks from human demonstrations. However, it requires careful consideration of the challenges and limitations to achieve optimal results. -``` -::: - -:::: - -**You're Ready to Build Your Own Apps!** - -Congrats! ๐Ÿฅณ Now you're ready to [build your own Llama Stack applications](../building_applications/index)! ๐Ÿš€ diff --git a/docs/source/getting_started/index.md b/docs/source/getting_started/index.md deleted file mode 100644 index e941534c2..000000000 --- a/docs/source/getting_started/index.md +++ /dev/null @@ -1,13 +0,0 @@ -# Getting Started - -```{include} quickstart.md -:start-after: ## Quickstart -``` - -```{include} libraries.md -:start-after: ## Libraries (SDKs) -``` - -```{include} detailed_tutorial.md -:start-after: ## Detailed Tutorial -``` diff --git a/docs/source/getting_started/libraries.md b/docs/source/getting_started/libraries.md deleted file mode 100644 index a54a9b8d3..000000000 --- a/docs/source/getting_started/libraries.md +++ /dev/null @@ -1,10 +0,0 @@ -## Libraries (SDKs) - -We have a number of client-side SDKs available for different languages. - -| **Language** | **Client SDK** | **Package** | -| :----: | :----: | :----: | -| Python | [llama-stack-client-python](https://github.com/meta-llama/llama-stack-client-python) | [![PyPI version](https://img.shields.io/pypi/v/llama_stack_client.svg)](https://pypi.org/project/llama_stack_client/) -| Swift | [llama-stack-client-swift](https://github.com/meta-llama/llama-stack-client-swift/tree/latest-release) | [![Swift Package Index](https://img.shields.io/endpoint?url=https%3A%2F%2Fswiftpackageindex.com%2Fapi%2Fpackages%2Fmeta-llama%2Fllama-stack-client-swift%2Fbadge%3Ftype%3Dswift-versions)](https://swiftpackageindex.com/meta-llama/llama-stack-client-swift) -| Node | [llama-stack-client-node](https://github.com/meta-llama/llama-stack-client-node) | [![NPM version](https://img.shields.io/npm/v/llama-stack-client.svg)](https://npmjs.org/package/llama-stack-client) -| Kotlin | [llama-stack-client-kotlin](https://github.com/meta-llama/llama-stack-client-kotlin/tree/latest-release) | [![Maven version](https://img.shields.io/maven-central/v/com.llama.llamastack/llama-stack-client-kotlin)](https://central.sonatype.com/artifact/com.llama.llamastack/llama-stack-client-kotlin) \ No newline at end of file diff --git a/docs/source/getting_started/quickstart.md b/docs/source/getting_started/quickstart.md deleted file mode 100644 index 0136a7fba..000000000 --- a/docs/source/getting_started/quickstart.md +++ /dev/null @@ -1,77 +0,0 @@ -## Quickstart - -Get started with Llama Stack in minutes! - -Llama Stack is a stateful service with REST APIs to support the seamless transition of AI applications across different -environments. You can build and test using a local server first and deploy to a hosted endpoint for production. - -In this guide, we'll walk through how to build a RAG application locally using Llama Stack with [Ollama](https://ollama.com/) -as the inference [provider](../providers/inference/index) for a Llama Model. - -**๐Ÿ’ก Notebook Version:** You can also follow this quickstart guide in a Jupyter notebook format: [quick_start.ipynb](https://github.com/meta-llama/llama-stack/blob/main/docs/quick_start.ipynb) - -#### Step 1: Install and setup -1. Install [uv](https://docs.astral.sh/uv/) -2. Run inference on a Llama model with [Ollama](https://ollama.com/download) -```bash -ollama run llama3.2:3b --keepalive 60m -``` - -#### Step 2: Run the Llama Stack server - -We will use `uv` to run the Llama Stack server. -```bash -OLLAMA_URL=http://localhost:11434 \ - uv run --with llama-stack llama stack build --distro starter --image-type venv --run -``` -#### Step 3: Run the demo -Now open up a new terminal and copy the following script into a file named `demo_script.py`. - -```{literalinclude} ./demo_script.py -:language: python -``` -We will use `uv` to run the script -``` -uv run --with llama-stack-client,fire,requests demo_script.py -``` -And you should see output like below. -``` -rag_tool> Ingesting document: https://www.paulgraham.com/greatwork.html - -prompt> How do you do great work? - -inference> [knowledge_search(query="What is the key to doing great work")] - -tool_execution> Tool:knowledge_search Args:{'query': 'What is the key to doing great work'} - -tool_execution> Tool:knowledge_search Response:[TextContentItem(text='knowledge_search tool found 5 chunks:\nBEGIN of knowledge_search tool results.\n', type='text'), TextContentItem(text="Result 1:\nDocument_id:docum\nContent: work. Doing great work means doing something important\nso well that you expand people's ideas of what's possible. But\nthere's no threshold for importance. It's a matter of degree, and\noften hard to judge at the time anyway.\n", type='text'), TextContentItem(text="Result 2:\nDocument_id:docum\nContent: work. Doing great work means doing something important\nso well that you expand people's ideas of what's possible. But\nthere's no threshold for importance. It's a matter of degree, and\noften hard to judge at the time anyway.\n", type='text'), TextContentItem(text="Result 3:\nDocument_id:docum\nContent: work. Doing great work means doing something important\nso well that you expand people's ideas of what's possible. But\nthere's no threshold for importance. It's a matter of degree, and\noften hard to judge at the time anyway.\n", type='text'), TextContentItem(text="Result 4:\nDocument_id:docum\nContent: work. Doing great work means doing something important\nso well that you expand people's ideas of what's possible. But\nthere's no threshold for importance. It's a matter of degree, and\noften hard to judge at the time anyway.\n", type='text'), TextContentItem(text="Result 5:\nDocument_id:docum\nContent: work. Doing great work means doing something important\nso well that you expand people's ideas of what's possible. But\nthere's no threshold for importance. It's a matter of degree, and\noften hard to judge at the time anyway.\n", type='text'), TextContentItem(text='END of knowledge_search tool results.\n', type='text')] - -inference> Based on the search results, it seems that doing great work means doing something important so well that you expand people's ideas of what's possible. However, there is no clear threshold for importance, and it can be difficult to judge at the time. - -To further clarify, I would suggest that doing great work involves: - -* Completing tasks with high quality and attention to detail -* Expanding on existing knowledge or ideas -* Making a positive impact on others through your work -* Striving for excellence and continuous improvement - -Ultimately, great work is about making a meaningful contribution and leaving a lasting impression. -``` -Congratulations! You've successfully built your first RAG application using Llama Stack! ๐ŸŽ‰๐Ÿฅณ - -```{admonition} HuggingFace access -:class: tip - -If you are getting a **401 Client Error** from HuggingFace for the **all-MiniLM-L6-v2** model, try setting **HF_TOKEN** to a valid HuggingFace token in your environment -``` - -### Next Steps - -Now you're ready to dive deeper into Llama Stack! -- Explore the [Detailed Tutorial](./detailed_tutorial.md). -- Try the [Getting Started Notebook](https://github.com/meta-llama/llama-stack/blob/main/docs/getting_started.ipynb). -- Browse more [Notebooks on GitHub](https://github.com/meta-llama/llama-stack/tree/main/docs/notebooks). -- Learn about Llama Stack [Concepts](../concepts/index.md). -- Discover how to [Build Llama Stacks](../distributions/index.md). -- Refer to our [References](../references/index.md) for details on the Llama CLI and Python SDK. -- Check out the [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/tree/main/examples) repository for example applications and tutorials. diff --git a/docs/source/index.md b/docs/source/index.md deleted file mode 100644 index c824ce94a..000000000 --- a/docs/source/index.md +++ /dev/null @@ -1,133 +0,0 @@ -# Llama Stack -Welcome to Llama Stack, the open-source framework for building generative AI applications. -```{admonition} Llama 4 is here! -:class: tip - -Check out [Getting Started with Llama 4](https://colab.research.google.com/github/meta-llama/llama-stack/blob/main/docs/getting_started_llama4.ipynb) -``` -```{admonition} News -:class: tip - -Llama Stack {{ llama_stack_version }} is now available! See the {{ llama_stack_version_link }} for more details. -``` - - -## What is Llama Stack? - -Llama Stack defines and standardizes the core building blocks needed to bring generative AI applications to market. It provides a unified set of APIs with implementations from leading service providers, enabling seamless transitions between development and production environments. More specifically, it provides - -- **Unified API layer** for Inference, RAG, Agents, Tools, Safety, Evals, and Telemetry. -- **Plugin architecture** to support the rich ecosystem of implementations of the different APIs in different environments like local development, on-premises, cloud, and mobile. -- **Prepackaged verified distributions** which offer a one-stop solution for developers to get started quickly and reliably in any environment -- **Multiple developer interfaces** like CLI and SDKs for Python, Node, iOS, and Android -- **Standalone applications** as examples for how to build production-grade AI applications with Llama Stack - -```{image} ../_static/llama-stack.png -:alt: Llama Stack -:width: 400px -``` - -Our goal is to provide pre-packaged implementations (aka "distributions") which can be run in a variety of deployment environments. LlamaStack can assist you in your entire app development lifecycle - start iterating on local, mobile or desktop and seamlessly transition to on-prem or public cloud deployments. At every point in this transition, the same set of APIs and the same developer experience is available. - -## How does Llama Stack work? -Llama Stack consists of a [server](./distributions/index.md) (with multiple pluggable API [providers](./providers/index.md)) and Client SDKs (see below) meant to -be used in your applications. The server can be run in a variety of environments, including local (inline) -development, on-premises, and cloud. The client SDKs are available for Python, Swift, Node, and -Kotlin. - -## Quick Links - -- Ready to build? Check out the [Quick Start](getting_started/index) to get started. -- Want to contribute? See the [Contributing](contributing/index) guide. - -## Supported Llama Stack Implementations - -A number of "adapters" are available for some popular Inference and Vector Store providers. For other APIs (particularly Safety and Agents), we provide *reference implementations* you can use to get started. We expect this list to grow over time. We are slowly onboarding more providers to the ecosystem as we get more confidence in the APIs. - -**Inference API** -| **Provider** | **Environments** | -| :----: | :----: | -| Meta Reference | Single Node | -| Ollama | Single Node | -| Fireworks | Hosted | -| Together | Hosted | -| NVIDIA NIM | Hosted and Single Node | -| vLLM | Hosted and Single Node | -| TGI | Hosted and Single Node | -| AWS Bedrock | Hosted | -| Cerebras | Hosted | -| Groq | Hosted | -| SambaNova | Hosted | -| PyTorch ExecuTorch | On-device iOS, Android | -| OpenAI | Hosted | -| Anthropic | Hosted | -| Gemini | Hosted | -| WatsonX | Hosted | - -**Agents API** -| **Provider** | **Environments** | -| :----: | :----: | -| Meta Reference | Single Node | -| Fireworks | Hosted | -| Together | Hosted | -| PyTorch ExecuTorch | On-device iOS | - -**Vector IO API** -| **Provider** | **Environments** | -| :----: | :----: | -| FAISS | Single Node | -| SQLite-Vec | Single Node | -| Chroma | Hosted and Single Node | -| Milvus | Hosted and Single Node | -| Postgres (PGVector) | Hosted and Single Node | -| Weaviate | Hosted | -| Qdrant | Hosted and Single Node | - -**Safety API** -| **Provider** | **Environments** | -| :----: | :----: | -| Llama Guard | Depends on Inference Provider | -| Prompt Guard | Single Node | -| Code Scanner | Single Node | -| AWS Bedrock | Hosted | - -**Post Training API** -| **Provider** | **Environments** | -| :----: | :----: | -| Meta Reference | Single Node | -| HuggingFace | Single Node | -| TorchTune | Single Node | -| NVIDIA NEMO | Hosted | - -**Eval API** -| **Provider** | **Environments** | -| :----: | :----: | -| Meta Reference | Single Node | -| NVIDIA NEMO | Hosted | - -**Telemetry API** -| **Provider** | **Environments** | -| :----: | :----: | -| Meta Reference | Single Node | - -**Tool Runtime API** -| **Provider** | **Environments** | -| :----: | :----: | -| Brave Search | Hosted | -| RAG Runtime | Single Node | - -```{toctree} -:hidden: -:maxdepth: 3 - -self -getting_started/index -concepts/index -providers/index -distributions/index -advanced_apis/index -building_applications/index -deploying/index -contributing/index -references/index -``` diff --git a/docs/source/providers/agents/index.md b/docs/source/providers/agents/index.md deleted file mode 100644 index a2c48d4b9..000000000 --- a/docs/source/providers/agents/index.md +++ /dev/null @@ -1,22 +0,0 @@ -# Agents - -## Overview - -Agents API for creating and interacting with agentic systems. - - Main functionalities provided by this API: - - Create agents with specific instructions and ability to use tools. - - Interactions with agents are grouped into sessions ("threads"), and each interaction is called a "turn". - - Agents can be provided with various tools (see the ToolGroups and ToolRuntime APIs for more details). - - Agents can be provided with various shields (see the Safety API for more details). - - Agents can also use Memory to retrieve information from knowledge bases. See the RAG Tool and Vector IO APIs for more details. - -This section contains documentation for all available providers for the **agents** API. - -## Providers - -```{toctree} -:maxdepth: 1 - -inline_meta-reference -``` diff --git a/docs/source/providers/agents/inline_meta-reference.md b/docs/source/providers/agents/inline_meta-reference.md deleted file mode 100644 index 5f64f79e1..000000000 --- a/docs/source/providers/agents/inline_meta-reference.md +++ /dev/null @@ -1,25 +0,0 @@ -# inline::meta-reference - -## Description - -Meta's reference implementation of an agent system that can use tools, access vector databases, and perform complex reasoning tasks. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `persistence_store` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | | -| `responses_store` | `utils.sqlstore.sqlstore.SqliteSqlStoreConfig \| utils.sqlstore.sqlstore.PostgresSqlStoreConfig` | No | sqlite | | - -## Sample Configuration - -```yaml -persistence_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/agents_store.db -responses_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/responses_store.db - -``` - diff --git a/docs/source/providers/batches/index.md b/docs/source/providers/batches/index.md deleted file mode 100644 index d6d2fa9a3..000000000 --- a/docs/source/providers/batches/index.md +++ /dev/null @@ -1,24 +0,0 @@ -# Batches - -## Overview - -The Batches API enables efficient processing of multiple requests in a single operation, - particularly useful for processing large datasets, batch evaluation workflows, and - cost-effective inference at scale. - - The API is designed to allow use of openai client libraries for seamless integration. - - This API provides the following extensions: - - idempotent batch creation - - Note: This API is currently under active development and may undergo changes. - -This section contains documentation for all available providers for the **batches** API. - -## Providers - -```{toctree} -:maxdepth: 1 - -inline_reference -``` diff --git a/docs/source/providers/batches/inline_reference.md b/docs/source/providers/batches/inline_reference.md deleted file mode 100644 index a58e5124d..000000000 --- a/docs/source/providers/batches/inline_reference.md +++ /dev/null @@ -1,23 +0,0 @@ -# inline::reference - -## Description - -Reference implementation of batches API with KVStore persistence. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | Configuration for the key-value store backend. | -| `max_concurrent_batches` | `` | No | 1 | Maximum number of concurrent batches to process simultaneously. | -| `max_concurrent_requests_per_batch` | `` | No | 10 | Maximum number of concurrent requests to process per batch. | - -## Sample Configuration - -```yaml -kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/batches.db - -``` - diff --git a/docs/source/providers/datasetio/index.md b/docs/source/providers/datasetio/index.md deleted file mode 100644 index 94a97e2ed..000000000 --- a/docs/source/providers/datasetio/index.md +++ /dev/null @@ -1,15 +0,0 @@ -# Datasetio - -## Overview - -This section contains documentation for all available providers for the **datasetio** API. - -## Providers - -```{toctree} -:maxdepth: 1 - -inline_localfs -remote_huggingface -remote_nvidia -``` diff --git a/docs/source/providers/datasetio/inline_localfs.md b/docs/source/providers/datasetio/inline_localfs.md deleted file mode 100644 index 87a0c795c..000000000 --- a/docs/source/providers/datasetio/inline_localfs.md +++ /dev/null @@ -1,21 +0,0 @@ -# inline::localfs - -## Description - -Local filesystem-based dataset I/O provider for reading and writing datasets to local storage. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | | - -## Sample Configuration - -```yaml -kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/localfs_datasetio.db - -``` - diff --git a/docs/source/providers/datasetio/remote_huggingface.md b/docs/source/providers/datasetio/remote_huggingface.md deleted file mode 100644 index 3711f7396..000000000 --- a/docs/source/providers/datasetio/remote_huggingface.md +++ /dev/null @@ -1,21 +0,0 @@ -# remote::huggingface - -## Description - -HuggingFace datasets provider for accessing and managing datasets from the HuggingFace Hub. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | | - -## Sample Configuration - -```yaml -kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/huggingface_datasetio.db - -``` - diff --git a/docs/source/providers/datasetio/remote_nvidia.md b/docs/source/providers/datasetio/remote_nvidia.md deleted file mode 100644 index 1ad1cdb32..000000000 --- a/docs/source/providers/datasetio/remote_nvidia.md +++ /dev/null @@ -1,25 +0,0 @@ -# remote::nvidia - -## Description - -NVIDIA's dataset I/O provider for accessing datasets from NVIDIA's data platform. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `api_key` | `str \| None` | No | | The NVIDIA API key. | -| `dataset_namespace` | `str \| None` | No | default | The NVIDIA dataset namespace. | -| `project_id` | `str \| None` | No | test-project | The NVIDIA project ID. | -| `datasets_url` | `` | No | http://nemo.test | Base URL for the NeMo Dataset API | - -## Sample Configuration - -```yaml -api_key: ${env.NVIDIA_API_KEY:=} -dataset_namespace: ${env.NVIDIA_DATASET_NAMESPACE:=default} -project_id: ${env.NVIDIA_PROJECT_ID:=test-project} -datasets_url: ${env.NVIDIA_DATASETS_URL:=http://nemo.test} - -``` - diff --git a/docs/source/providers/eval/index.md b/docs/source/providers/eval/index.md deleted file mode 100644 index a14fada1d..000000000 --- a/docs/source/providers/eval/index.md +++ /dev/null @@ -1,16 +0,0 @@ -# Eval - -## Overview - -Llama Stack Evaluation API for running evaluations on model and agent candidates. - -This section contains documentation for all available providers for the **eval** API. - -## Providers - -```{toctree} -:maxdepth: 1 - -inline_meta-reference -remote_nvidia -``` diff --git a/docs/source/providers/eval/inline_meta-reference.md b/docs/source/providers/eval/inline_meta-reference.md deleted file mode 100644 index 606883c72..000000000 --- a/docs/source/providers/eval/inline_meta-reference.md +++ /dev/null @@ -1,21 +0,0 @@ -# inline::meta-reference - -## Description - -Meta's reference implementation of evaluation tasks with support for multiple languages and evaluation metrics. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | | - -## Sample Configuration - -```yaml -kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/meta_reference_eval.db - -``` - diff --git a/docs/source/providers/eval/remote_nvidia.md b/docs/source/providers/eval/remote_nvidia.md deleted file mode 100644 index cb764b511..000000000 --- a/docs/source/providers/eval/remote_nvidia.md +++ /dev/null @@ -1,19 +0,0 @@ -# remote::nvidia - -## Description - -NVIDIA's evaluation provider for running evaluation tasks on NVIDIA's platform. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `evaluator_url` | `` | No | http://0.0.0.0:7331 | The url for accessing the evaluator service | - -## Sample Configuration - -```yaml -evaluator_url: ${env.NVIDIA_EVALUATOR_URL:=http://localhost:7331} - -``` - diff --git a/docs/source/providers/external/external-providers-guide.md b/docs/source/providers/external/external-providers-guide.md deleted file mode 100644 index e2d4ebea9..000000000 --- a/docs/source/providers/external/external-providers-guide.md +++ /dev/null @@ -1,286 +0,0 @@ -# Creating External Providers - -## Configuration - -To enable external providers, you need to add `module` into your build yaml, allowing Llama Stack to install the required package corresponding to the external provider. - -an example entry in your build.yaml should look like: - -``` -- provider_type: remote::ramalama - module: ramalama_stack -``` - -Additionally you can configure the `external_providers_dir` in your Llama Stack configuration. This method is in the process of being deprecated in favor of the `module` method. If using this method, the external provider directory should contain your external provider specifications: - -```yaml -external_providers_dir: ~/.llama/providers.d/ -``` - -## Directory Structure - -The external providers directory should follow this structure: - -``` -providers.d/ - remote/ - inference/ - custom_ollama.yaml - vllm.yaml - vector_io/ - qdrant.yaml - safety/ - llama-guard.yaml - inline/ - inference/ - custom_ollama.yaml - vllm.yaml - vector_io/ - qdrant.yaml - safety/ - llama-guard.yaml -``` - -Each YAML file in these directories defines a provider specification for that particular API. - -## Provider Types - -Llama Stack supports two types of external providers: - -1. **Remote Providers**: Providers that communicate with external services (e.g., cloud APIs) -2. **Inline Providers**: Providers that run locally within the Llama Stack process - -### Remote Provider Specification - -Remote providers are used when you need to communicate with external services. Here's an example for a custom Ollama provider: - -```yaml -adapter: - adapter_type: custom_ollama - pip_packages: - - ollama - - aiohttp - config_class: llama_stack_ollama_provider.config.OllamaImplConfig - module: llama_stack_ollama_provider -api_dependencies: [] -optional_api_dependencies: [] -``` - -#### Adapter Configuration - -The `adapter` section defines how to load and configure the provider: - -- `adapter_type`: A unique identifier for this adapter -- `pip_packages`: List of Python packages required by the provider -- `config_class`: The full path to the configuration class -- `module`: The Python module containing the provider implementation - -### Inline Provider Specification - -Inline providers run locally within the Llama Stack process. Here's an example for a custom vector store provider: - -```yaml -module: llama_stack_vector_provider -config_class: llama_stack_vector_provider.config.VectorStoreConfig -pip_packages: - - faiss-cpu - - numpy -api_dependencies: - - inference -optional_api_dependencies: - - vector_io -provider_data_validator: llama_stack_vector_provider.validator.VectorStoreValidator -container_image: custom-vector-store:latest # optional -``` - -#### Inline Provider Fields - -- `module`: The Python module containing the provider implementation -- `config_class`: The full path to the configuration class -- `pip_packages`: List of Python packages required by the provider -- `api_dependencies`: List of Llama Stack APIs that this provider depends on -- `optional_api_dependencies`: List of optional Llama Stack APIs that this provider can use -- `provider_data_validator`: Optional validator for provider data -- `container_image`: Optional container image to use instead of pip packages - -## Required Fields - -### All Providers - -All providers must contain a `get_provider_spec` function in their `provider` module. This is a standardized structure that Llama Stack expects and is necessary for getting things such as the config class. The `get_provider_spec` method returns a structure identical to the `adapter`. An example function may look like: - -```python -from llama_stack.providers.datatypes import ( - ProviderSpec, - Api, - AdapterSpec, - remote_provider_spec, -) - - -def get_provider_spec() -> ProviderSpec: - return remote_provider_spec( - api=Api.inference, - adapter=AdapterSpec( - adapter_type="ramalama", - pip_packages=["ramalama>=0.8.5", "pymilvus"], - config_class="ramalama_stack.config.RamalamaImplConfig", - module="ramalama_stack", - ), - ) -``` - -#### Remote Providers - -Remote providers must expose a `get_adapter_impl()` function in their module that takes two arguments: -1. `config`: An instance of the provider's config class -2. `deps`: A dictionary of API dependencies - -This function must return an instance of the provider's adapter class that implements the required protocol for the API. - -Example: -```python -async def get_adapter_impl( - config: OllamaImplConfig, deps: Dict[Api, Any] -) -> OllamaInferenceAdapter: - return OllamaInferenceAdapter(config) -``` - -#### Inline Providers - -Inline providers must expose a `get_provider_impl()` function in their module that takes two arguments: -1. `config`: An instance of the provider's config class -2. `deps`: A dictionary of API dependencies - -Example: -```python -async def get_provider_impl( - config: VectorStoreConfig, deps: Dict[Api, Any] -) -> VectorStoreImpl: - impl = VectorStoreImpl(config, deps[Api.inference]) - await impl.initialize() - return impl -``` - -## Dependencies - -The provider package must be installed on the system. For example: - -```bash -$ uv pip show llama-stack-ollama-provider -Name: llama-stack-ollama-provider -Version: 0.1.0 -Location: /path/to/venv/lib/python3.10/site-packages -``` - -## Best Practices - -1. **Package Naming**: Use the prefix `llama-stack-provider-` for your provider packages to make them easily identifiable. - -2. **Version Management**: Keep your provider package versioned and compatible with the Llama Stack version you're using. - -3. **Dependencies**: Only include the minimum required dependencies in your provider package. - -4. **Documentation**: Include clear documentation in your provider package about: - - Installation requirements - - Configuration options - - Usage examples - - Any limitations or known issues - -5. **Testing**: Include tests in your provider package to ensure it works correctly with Llama Stack. -You can refer to the [integration tests -guide](https://github.com/meta-llama/llama-stack/blob/main/tests/integration/README.md) for more -information. Execute the test for the Provider type you are developing. - -## Troubleshooting - -If your external provider isn't being loaded: - -1. Check that `module` points to a published pip package with a top level `provider` module including `get_provider_spec`. -1. Check that the `external_providers_dir` path is correct and accessible. -2. Verify that the YAML files are properly formatted. -3. Ensure all required Python packages are installed. -4. Check the Llama Stack server logs for any error messages - turn on debug logging to get more - information using `LLAMA_STACK_LOGGING=all=debug`. -5. Verify that the provider package is installed in your Python environment if using `external_providers_dir`. - -## Examples - -### Example using `external_providers_dir`: Custom Ollama Provider - -Here's a complete example of creating and using a custom Ollama provider: - -1. First, create the provider package: - -```bash -mkdir -p llama-stack-provider-ollama -cd llama-stack-provider-ollama -git init -uv init -``` - -2. Edit `pyproject.toml`: - -```toml -[project] -name = "llama-stack-provider-ollama" -version = "0.1.0" -description = "Ollama provider for Llama Stack" -requires-python = ">=3.12" -dependencies = ["llama-stack", "pydantic", "ollama", "aiohttp"] -``` - -3. Create the provider specification: - -```yaml -# ~/.llama/providers.d/remote/inference/custom_ollama.yaml -adapter: - adapter_type: custom_ollama - pip_packages: ["ollama", "aiohttp"] - config_class: llama_stack_provider_ollama.config.OllamaImplConfig - module: llama_stack_provider_ollama -api_dependencies: [] -optional_api_dependencies: [] -``` - -4. Install the provider: - -```bash -uv pip install -e . -``` - -5. Configure Llama Stack to use external providers: - -```yaml -external_providers_dir: ~/.llama/providers.d/ -``` - -The provider will now be available in Llama Stack with the type `remote::custom_ollama`. - - -### Example using `module`: ramalama-stack - -[ramalama-stack](https://github.com/containers/ramalama-stack) is a recognized external provider that supports installation via module. - -To install Llama Stack with this external provider a user can provider the following build.yaml: - -```yaml -version: 2 -distribution_spec: - description: Use (an external) Ramalama server for running LLM inference - container_image: null - providers: - inference: - - provider_type: remote::ramalama - module: ramalama_stack==0.3.0a0 -image_type: venv -image_name: null -external_providers_dir: null -additional_pip_packages: -- aiosqlite -- sqlalchemy[asyncio] -``` - -No other steps are required other than `llama stack build` and `llama stack run`. The build process will use `module` to install all of the provider dependencies, retrieve the spec, etc. - -The provider will now be available in Llama Stack with the type `remote::ramalama`. \ No newline at end of file diff --git a/docs/source/providers/external/external-providers-list.md b/docs/source/providers/external/external-providers-list.md deleted file mode 100644 index 45fcc50fb..000000000 --- a/docs/source/providers/external/external-providers-list.md +++ /dev/null @@ -1,11 +0,0 @@ -# Known External Providers - -Here's a list of known external providers that you can use with Llama Stack: - -| Name | Description | API | Type | Repository | -|------|-------------|-----|------|------------| -| KubeFlow Training | Train models with KubeFlow | Post Training | Remote | [llama-stack-provider-kft](https://github.com/opendatahub-io/llama-stack-provider-kft) | -| KubeFlow Pipelines | Train models with KubeFlow Pipelines | Post Training | Inline **and** Remote | [llama-stack-provider-kfp-trainer](https://github.com/opendatahub-io/llama-stack-provider-kfp-trainer) | -| RamaLama | Inference models with RamaLama | Inference | Remote | [ramalama-stack](https://github.com/containers/ramalama-stack) | -| TrustyAI LM-Eval | Evaluate models with TrustyAI LM-Eval | Eval | Remote | [llama-stack-provider-lmeval](https://github.com/trustyai-explainability/llama-stack-provider-lmeval) | -| MongoDB | VectorIO with MongoDB | Vector_IO | Remote | [mongodb-llama-stack](https://github.com/mongodb-partners/mongodb-llama-stack) | diff --git a/docs/source/providers/external/index.md b/docs/source/providers/external/index.md deleted file mode 100644 index 989a7f5b8..000000000 --- a/docs/source/providers/external/index.md +++ /dev/null @@ -1,13 +0,0 @@ -# External Providers - -Llama Stack supports external providers that live outside of the main codebase. This allows you to: -- Create and maintain your own providers independently -- Share providers with others without contributing to the main codebase -- Keep provider-specific code separate from the core Llama Stack code - -```{toctree} -:maxdepth: 1 - -external-providers-list -external-providers-guide -``` \ No newline at end of file diff --git a/docs/source/providers/files/index.md b/docs/source/providers/files/index.md deleted file mode 100644 index 128953223..000000000 --- a/docs/source/providers/files/index.md +++ /dev/null @@ -1,14 +0,0 @@ -# Files - -## Overview - -This section contains documentation for all available providers for the **files** API. - -## Providers - -```{toctree} -:maxdepth: 1 - -inline_localfs -remote_s3 -``` diff --git a/docs/source/providers/files/inline_localfs.md b/docs/source/providers/files/inline_localfs.md deleted file mode 100644 index 09267b7d8..000000000 --- a/docs/source/providers/files/inline_localfs.md +++ /dev/null @@ -1,24 +0,0 @@ -# inline::localfs - -## Description - -Local filesystem-based file storage provider for managing files and documents locally. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `storage_dir` | `` | No | | Directory to store uploaded files | -| `metadata_store` | `utils.sqlstore.sqlstore.SqliteSqlStoreConfig \| utils.sqlstore.sqlstore.PostgresSqlStoreConfig` | No | sqlite | SQL store configuration for file metadata | -| `ttl_secs` | `` | No | 31536000 | | - -## Sample Configuration - -```yaml -storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/dummy/files} -metadata_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/files_metadata.db - -``` - diff --git a/docs/source/providers/files/remote_s3.md b/docs/source/providers/files/remote_s3.md deleted file mode 100644 index 2e3cebabd..000000000 --- a/docs/source/providers/files/remote_s3.md +++ /dev/null @@ -1,33 +0,0 @@ -# remote::s3 - -## Description - -AWS S3-based file storage provider for scalable cloud file management with metadata persistence. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `bucket_name` | `` | No | | S3 bucket name to store files | -| `region` | `` | No | us-east-1 | AWS region where the bucket is located | -| `aws_access_key_id` | `str \| None` | No | | AWS access key ID (optional if using IAM roles) | -| `aws_secret_access_key` | `str \| None` | No | | AWS secret access key (optional if using IAM roles) | -| `endpoint_url` | `str \| None` | No | | Custom S3 endpoint URL (for MinIO, LocalStack, etc.) | -| `auto_create_bucket` | `` | No | False | Automatically create the S3 bucket if it doesn't exist | -| `metadata_store` | `utils.sqlstore.sqlstore.SqliteSqlStoreConfig \| utils.sqlstore.sqlstore.PostgresSqlStoreConfig` | No | sqlite | SQL store configuration for file metadata | - -## Sample Configuration - -```yaml -bucket_name: ${env.S3_BUCKET_NAME} -region: ${env.AWS_REGION:=us-east-1} -aws_access_key_id: ${env.AWS_ACCESS_KEY_ID:=} -aws_secret_access_key: ${env.AWS_SECRET_ACCESS_KEY:=} -endpoint_url: ${env.S3_ENDPOINT_URL:=} -auto_create_bucket: ${env.S3_AUTO_CREATE_BUCKET:=false} -metadata_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/s3_files_metadata.db - -``` - diff --git a/docs/source/providers/index.md b/docs/source/providers/index.md deleted file mode 100644 index 3f66ecd0c..000000000 --- a/docs/source/providers/index.md +++ /dev/null @@ -1,28 +0,0 @@ -# API Providers - -The goal of Llama Stack is to build an ecosystem where users can easily swap out different implementations for the same API. Examples for these include: -- LLM inference providers (e.g., Meta Reference, Ollama, Fireworks, Together, AWS Bedrock, Groq, Cerebras, SambaNova, vLLM, OpenAI, Anthropic, Gemini, WatsonX, etc.), -- Vector databases (e.g., FAISS, SQLite-Vec, ChromaDB, Weaviate, Qdrant, Milvus, PGVector, etc.), -- Safety providers (e.g., Meta's Llama Guard, Prompt Guard, Code Scanner, AWS Bedrock Guardrails, etc.), -- Tool Runtime providers (e.g., RAG Runtime, Brave Search, etc.) - -Providers come in two flavors: -- **Remote**: the provider runs as a separate service external to the Llama Stack codebase. Llama Stack contains a small amount of adapter code. -- **Inline**: the provider is fully specified and implemented within the Llama Stack codebase. It may be a simple wrapper around an existing library, or a full fledged implementation within Llama Stack. - -Importantly, Llama Stack always strives to provide at least one fully inline provider for each API so you can iterate on a fully featured environment locally. - -```{toctree} -:maxdepth: 1 - -external/index -openai -inference/index -agents/index -datasetio/index -safety/index -telemetry/index -vector_io/index -tool_runtime/index -files/index -``` diff --git a/docs/source/providers/inference/index.md b/docs/source/providers/inference/index.md deleted file mode 100644 index c5720daef..000000000 --- a/docs/source/providers/inference/index.md +++ /dev/null @@ -1,42 +0,0 @@ -# Inference - -## Overview - -Llama Stack Inference API for generating completions, chat completions, and embeddings. - - This API provides the raw interface to the underlying models. Two kinds of models are supported: - - LLM models: these models generate "raw" and "chat" (conversational) completions. - - Embedding models: these models generate embeddings to be used for semantic search. - -This section contains documentation for all available providers for the **inference** API. - -## Providers - -```{toctree} -:maxdepth: 1 - -inline_meta-reference -inline_sentence-transformers -remote_anthropic -remote_azure -remote_bedrock -remote_cerebras -remote_databricks -remote_fireworks -remote_gemini -remote_groq -remote_hf_endpoint -remote_hf_serverless -remote_llama-openai-compat -remote_nvidia -remote_ollama -remote_openai -remote_passthrough -remote_runpod -remote_sambanova -remote_tgi -remote_together -remote_vertexai -remote_vllm -remote_watsonx -``` diff --git a/docs/source/providers/inference/inline_meta-reference.md b/docs/source/providers/inference/inline_meta-reference.md deleted file mode 100644 index eca12a839..000000000 --- a/docs/source/providers/inference/inline_meta-reference.md +++ /dev/null @@ -1,32 +0,0 @@ -# inline::meta-reference - -## Description - -Meta's reference implementation of inference with support for various model formats and optimization techniques. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `model` | `str \| None` | No | | | -| `torch_seed` | `int \| None` | No | | | -| `max_seq_len` | `` | No | 4096 | | -| `max_batch_size` | `` | No | 1 | | -| `model_parallel_size` | `int \| None` | No | | | -| `create_distributed_process_group` | `` | No | True | | -| `checkpoint_dir` | `str \| None` | No | | | -| `quantization` | `Bf16QuantizationConfig \| Fp8QuantizationConfig \| Int4QuantizationConfig, annotation=NoneType, required=True, discriminator='type'` | No | | | - -## Sample Configuration - -```yaml -model: Llama3.2-3B-Instruct -checkpoint_dir: ${env.CHECKPOINT_DIR:=null} -quantization: - type: ${env.QUANTIZATION_TYPE:=bf16} -model_parallel_size: ${env.MODEL_PARALLEL_SIZE:=0} -max_batch_size: ${env.MAX_BATCH_SIZE:=1} -max_seq_len: ${env.MAX_SEQ_LEN:=4096} - -``` - diff --git a/docs/source/providers/inference/inline_sentence-transformers.md b/docs/source/providers/inference/inline_sentence-transformers.md deleted file mode 100644 index 57ec7f7d0..000000000 --- a/docs/source/providers/inference/inline_sentence-transformers.md +++ /dev/null @@ -1,13 +0,0 @@ -# inline::sentence-transformers - -## Description - -Sentence Transformers inference provider for text embeddings and similarity search. - -## Sample Configuration - -```yaml -{} - -``` - diff --git a/docs/source/providers/inference/remote_anthropic.md b/docs/source/providers/inference/remote_anthropic.md deleted file mode 100644 index 4680608b1..000000000 --- a/docs/source/providers/inference/remote_anthropic.md +++ /dev/null @@ -1,19 +0,0 @@ -# remote::anthropic - -## Description - -Anthropic inference provider for accessing Claude models and Anthropic's AI services. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `api_key` | `str \| None` | No | | API key for Anthropic models | - -## Sample Configuration - -```yaml -api_key: ${env.ANTHROPIC_API_KEY:=} - -``` - diff --git a/docs/source/providers/inference/remote_azure.md b/docs/source/providers/inference/remote_azure.md deleted file mode 100644 index 19f8f418b..000000000 --- a/docs/source/providers/inference/remote_azure.md +++ /dev/null @@ -1,29 +0,0 @@ -# remote::azure - -## Description - - -Azure OpenAI inference provider for accessing GPT models and other Azure services. -Provider documentation -https://learn.microsoft.com/en-us/azure/ai-foundry/openai/overview - - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `api_key` | `` | No | | Azure API key for Azure | -| `api_base` | `` | No | | Azure API base for Azure (e.g., https://your-resource-name.openai.azure.com) | -| `api_version` | `str \| None` | No | | Azure API version for Azure (e.g., 2024-12-01-preview) | -| `api_type` | `str \| None` | No | azure | Azure API type for Azure (e.g., azure) | - -## Sample Configuration - -```yaml -api_key: ${env.AZURE_API_KEY:=} -api_base: ${env.AZURE_API_BASE:=} -api_version: ${env.AZURE_API_VERSION:=} -api_type: ${env.AZURE_API_TYPE:=} - -``` - diff --git a/docs/source/providers/inference/remote_bedrock.md b/docs/source/providers/inference/remote_bedrock.md deleted file mode 100644 index 216dd4adb..000000000 --- a/docs/source/providers/inference/remote_bedrock.md +++ /dev/null @@ -1,28 +0,0 @@ -# remote::bedrock - -## Description - -AWS Bedrock inference provider for accessing various AI models through AWS's managed service. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `aws_access_key_id` | `str \| None` | No | | The AWS access key to use. Default use environment variable: AWS_ACCESS_KEY_ID | -| `aws_secret_access_key` | `str \| None` | No | | The AWS secret access key to use. Default use environment variable: AWS_SECRET_ACCESS_KEY | -| `aws_session_token` | `str \| None` | No | | The AWS session token to use. Default use environment variable: AWS_SESSION_TOKEN | -| `region_name` | `str \| None` | No | | The default AWS Region to use, for example, us-west-1 or us-west-2.Default use environment variable: AWS_DEFAULT_REGION | -| `profile_name` | `str \| None` | No | | The profile name that contains credentials to use.Default use environment variable: AWS_PROFILE | -| `total_max_attempts` | `int \| None` | No | | An integer representing the maximum number of attempts that will be made for a single request, including the initial attempt. Default use environment variable: AWS_MAX_ATTEMPTS | -| `retry_mode` | `str \| None` | No | | A string representing the type of retries Boto3 will perform.Default use environment variable: AWS_RETRY_MODE | -| `connect_timeout` | `float \| None` | No | 60.0 | The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds. | -| `read_timeout` | `float \| None` | No | 60.0 | The time in seconds till a timeout exception is thrown when attempting to read from a connection.The default is 60 seconds. | -| `session_ttl` | `int \| None` | No | 3600 | The time in seconds till a session expires. The default is 3600 seconds (1 hour). | - -## Sample Configuration - -```yaml -{} - -``` - diff --git a/docs/source/providers/inference/remote_cerebras.md b/docs/source/providers/inference/remote_cerebras.md deleted file mode 100644 index 7aa03dd0b..000000000 --- a/docs/source/providers/inference/remote_cerebras.md +++ /dev/null @@ -1,21 +0,0 @@ -# remote::cerebras - -## Description - -Cerebras inference provider for running models on Cerebras Cloud platform. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `base_url` | `` | No | https://api.cerebras.ai | Base URL for the Cerebras API | -| `api_key` | `pydantic.types.SecretStr \| None` | No | | Cerebras API Key | - -## Sample Configuration - -```yaml -base_url: https://api.cerebras.ai -api_key: ${env.CEREBRAS_API_KEY:=} - -``` - diff --git a/docs/source/providers/inference/remote_databricks.md b/docs/source/providers/inference/remote_databricks.md deleted file mode 100644 index d0ac89055..000000000 --- a/docs/source/providers/inference/remote_databricks.md +++ /dev/null @@ -1,21 +0,0 @@ -# remote::databricks - -## Description - -Databricks inference provider for running models on Databricks' unified analytics platform. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `url` | `` | No | | The URL for the Databricks model serving endpoint | -| `api_token` | `` | No | | The Databricks API token | - -## Sample Configuration - -```yaml -url: ${env.DATABRICKS_URL:=} -api_token: ${env.DATABRICKS_API_TOKEN:=} - -``` - diff --git a/docs/source/providers/inference/remote_fireworks.md b/docs/source/providers/inference/remote_fireworks.md deleted file mode 100644 index 28dbf1d3f..000000000 --- a/docs/source/providers/inference/remote_fireworks.md +++ /dev/null @@ -1,22 +0,0 @@ -# remote::fireworks - -## Description - -Fireworks AI inference provider for Llama models and other AI models on the Fireworks platform. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | -| `url` | `` | No | https://api.fireworks.ai/inference/v1 | The URL for the Fireworks server | -| `api_key` | `pydantic.types.SecretStr \| None` | No | | The Fireworks.ai API Key | - -## Sample Configuration - -```yaml -url: https://api.fireworks.ai/inference/v1 -api_key: ${env.FIREWORKS_API_KEY:=} - -``` - diff --git a/docs/source/providers/inference/remote_gemini.md b/docs/source/providers/inference/remote_gemini.md deleted file mode 100644 index 14b3223f2..000000000 --- a/docs/source/providers/inference/remote_gemini.md +++ /dev/null @@ -1,19 +0,0 @@ -# remote::gemini - -## Description - -Google Gemini inference provider for accessing Gemini models and Google's AI services. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `api_key` | `str \| None` | No | | API key for Gemini models | - -## Sample Configuration - -```yaml -api_key: ${env.GEMINI_API_KEY:=} - -``` - diff --git a/docs/source/providers/inference/remote_groq.md b/docs/source/providers/inference/remote_groq.md deleted file mode 100644 index 68bd4d5b3..000000000 --- a/docs/source/providers/inference/remote_groq.md +++ /dev/null @@ -1,21 +0,0 @@ -# remote::groq - -## Description - -Groq inference provider for ultra-fast inference using Groq's LPU technology. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `api_key` | `str \| None` | No | | The Groq API key | -| `url` | `` | No | https://api.groq.com | The URL for the Groq AI server | - -## Sample Configuration - -```yaml -url: https://api.groq.com -api_key: ${env.GROQ_API_KEY:=} - -``` - diff --git a/docs/source/providers/inference/remote_hf_endpoint.md b/docs/source/providers/inference/remote_hf_endpoint.md deleted file mode 100644 index 8aaf13476..000000000 --- a/docs/source/providers/inference/remote_hf_endpoint.md +++ /dev/null @@ -1,21 +0,0 @@ -# remote::hf::endpoint - -## Description - -HuggingFace Inference Endpoints provider for dedicated model serving. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `endpoint_name` | `` | No | | The name of the Hugging Face Inference Endpoint in the format of '{namespace}/{endpoint_name}' (e.g. 'my-cool-org/meta-llama-3-1-8b-instruct-rce'). Namespace is optional and will default to the user account if not provided. | -| `api_token` | `pydantic.types.SecretStr \| None` | No | | Your Hugging Face user access token (will default to locally saved token if not provided) | - -## Sample Configuration - -```yaml -endpoint_name: ${env.INFERENCE_ENDPOINT_NAME} -api_token: ${env.HF_API_TOKEN} - -``` - diff --git a/docs/source/providers/inference/remote_hf_serverless.md b/docs/source/providers/inference/remote_hf_serverless.md deleted file mode 100644 index 6764590b8..000000000 --- a/docs/source/providers/inference/remote_hf_serverless.md +++ /dev/null @@ -1,21 +0,0 @@ -# remote::hf::serverless - -## Description - -HuggingFace Inference API serverless provider for on-demand model inference. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `huggingface_repo` | `` | No | | The model ID of the model on the Hugging Face Hub (e.g. 'meta-llama/Meta-Llama-3.1-70B-Instruct') | -| `api_token` | `pydantic.types.SecretStr \| None` | No | | Your Hugging Face user access token (will default to locally saved token if not provided) | - -## Sample Configuration - -```yaml -huggingface_repo: ${env.INFERENCE_MODEL} -api_token: ${env.HF_API_TOKEN} - -``` - diff --git a/docs/source/providers/inference/remote_llama-openai-compat.md b/docs/source/providers/inference/remote_llama-openai-compat.md deleted file mode 100644 index 5c97aebc3..000000000 --- a/docs/source/providers/inference/remote_llama-openai-compat.md +++ /dev/null @@ -1,21 +0,0 @@ -# remote::llama-openai-compat - -## Description - -Llama OpenAI-compatible provider for using Llama models with OpenAI API format. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `api_key` | `str \| None` | No | | The Llama API key | -| `openai_compat_api_base` | `` | No | https://api.llama.com/compat/v1/ | The URL for the Llama API server | - -## Sample Configuration - -```yaml -openai_compat_api_base: https://api.llama.com/compat/v1/ -api_key: ${env.LLAMA_API_KEY} - -``` - diff --git a/docs/source/providers/inference/remote_nvidia.md b/docs/source/providers/inference/remote_nvidia.md deleted file mode 100644 index 1b12839df..000000000 --- a/docs/source/providers/inference/remote_nvidia.md +++ /dev/null @@ -1,24 +0,0 @@ -# remote::nvidia - -## Description - -NVIDIA inference provider for accessing NVIDIA NIM models and AI services. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `url` | `` | No | https://integrate.api.nvidia.com | A base url for accessing the NVIDIA NIM | -| `api_key` | `pydantic.types.SecretStr \| None` | No | | The NVIDIA API key, only needed of using the hosted service | -| `timeout` | `` | No | 60 | Timeout for the HTTP requests | -| `append_api_version` | `` | No | True | When set to false, the API version will not be appended to the base_url. By default, it is true. | - -## Sample Configuration - -```yaml -url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com} -api_key: ${env.NVIDIA_API_KEY:=} -append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True} - -``` - diff --git a/docs/source/providers/inference/remote_ollama.md b/docs/source/providers/inference/remote_ollama.md deleted file mode 100644 index f9f0a7622..000000000 --- a/docs/source/providers/inference/remote_ollama.md +++ /dev/null @@ -1,20 +0,0 @@ -# remote::ollama - -## Description - -Ollama inference provider for running local models through the Ollama runtime. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `url` | `` | No | http://localhost:11434 | | -| `refresh_models` | `` | No | False | Whether to refresh models periodically | - -## Sample Configuration - -```yaml -url: ${env.OLLAMA_URL:=http://localhost:11434} - -``` - diff --git a/docs/source/providers/inference/remote_openai.md b/docs/source/providers/inference/remote_openai.md deleted file mode 100644 index 18a74caea..000000000 --- a/docs/source/providers/inference/remote_openai.md +++ /dev/null @@ -1,21 +0,0 @@ -# remote::openai - -## Description - -OpenAI inference provider for accessing GPT models and other OpenAI services. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `api_key` | `str \| None` | No | | API key for OpenAI models | -| `base_url` | `` | No | https://api.openai.com/v1 | Base URL for OpenAI API | - -## Sample Configuration - -```yaml -api_key: ${env.OPENAI_API_KEY:=} -base_url: ${env.OPENAI_BASE_URL:=https://api.openai.com/v1} - -``` - diff --git a/docs/source/providers/inference/remote_passthrough.md b/docs/source/providers/inference/remote_passthrough.md deleted file mode 100644 index 9005e5339..000000000 --- a/docs/source/providers/inference/remote_passthrough.md +++ /dev/null @@ -1,21 +0,0 @@ -# remote::passthrough - -## Description - -Passthrough inference provider for connecting to any external inference service not directly supported. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `url` | `` | No | | The URL for the passthrough endpoint | -| `api_key` | `pydantic.types.SecretStr \| None` | No | | API Key for the passthrouth endpoint | - -## Sample Configuration - -```yaml -url: ${env.PASSTHROUGH_URL} -api_key: ${env.PASSTHROUGH_API_KEY} - -``` - diff --git a/docs/source/providers/inference/remote_runpod.md b/docs/source/providers/inference/remote_runpod.md deleted file mode 100644 index ff1c0bcb6..000000000 --- a/docs/source/providers/inference/remote_runpod.md +++ /dev/null @@ -1,21 +0,0 @@ -# remote::runpod - -## Description - -RunPod inference provider for running models on RunPod's cloud GPU platform. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `url` | `str \| None` | No | | The URL for the Runpod model serving endpoint | -| `api_token` | `str \| None` | No | | The API token | - -## Sample Configuration - -```yaml -url: ${env.RUNPOD_URL:=} -api_token: ${env.RUNPOD_API_TOKEN} - -``` - diff --git a/docs/source/providers/inference/remote_sambanova-openai-compat.md b/docs/source/providers/inference/remote_sambanova-openai-compat.md deleted file mode 100644 index 3074a5885..000000000 --- a/docs/source/providers/inference/remote_sambanova-openai-compat.md +++ /dev/null @@ -1,21 +0,0 @@ -# remote::sambanova-openai-compat - -## Description - -SambaNova OpenAI-compatible provider for using SambaNova models with OpenAI API format. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `api_key` | `str \| None` | No | | The SambaNova API key | -| `openai_compat_api_base` | `` | No | https://api.sambanova.ai/v1 | The URL for the SambaNova API server | - -## Sample Configuration - -```yaml -openai_compat_api_base: https://api.sambanova.ai/v1 -api_key: ${env.SAMBANOVA_API_KEY:=} - -``` - diff --git a/docs/source/providers/inference/remote_sambanova.md b/docs/source/providers/inference/remote_sambanova.md deleted file mode 100644 index 9d15c97d5..000000000 --- a/docs/source/providers/inference/remote_sambanova.md +++ /dev/null @@ -1,21 +0,0 @@ -# remote::sambanova - -## Description - -SambaNova inference provider for running models on SambaNova's dataflow architecture. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `url` | `` | No | https://api.sambanova.ai/v1 | The URL for the SambaNova AI server | -| `api_key` | `pydantic.types.SecretStr \| None` | No | | The SambaNova cloud API Key | - -## Sample Configuration - -```yaml -url: https://api.sambanova.ai/v1 -api_key: ${env.SAMBANOVA_API_KEY:=} - -``` - diff --git a/docs/source/providers/inference/remote_tgi.md b/docs/source/providers/inference/remote_tgi.md deleted file mode 100644 index 104bb4aab..000000000 --- a/docs/source/providers/inference/remote_tgi.md +++ /dev/null @@ -1,19 +0,0 @@ -# remote::tgi - -## Description - -Text Generation Inference (TGI) provider for HuggingFace model serving. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `url` | `` | No | | The URL for the TGI serving endpoint | - -## Sample Configuration - -```yaml -url: ${env.TGI_URL:=} - -``` - diff --git a/docs/source/providers/inference/remote_together.md b/docs/source/providers/inference/remote_together.md deleted file mode 100644 index be764e635..000000000 --- a/docs/source/providers/inference/remote_together.md +++ /dev/null @@ -1,22 +0,0 @@ -# remote::together - -## Description - -Together AI inference provider for open-source models and collaborative AI development. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. | -| `url` | `` | No | https://api.together.xyz/v1 | The URL for the Together AI server | -| `api_key` | `pydantic.types.SecretStr \| None` | No | | The Together AI API Key | - -## Sample Configuration - -```yaml -url: https://api.together.xyz/v1 -api_key: ${env.TOGETHER_API_KEY:=} - -``` - diff --git a/docs/source/providers/inference/remote_vertexai.md b/docs/source/providers/inference/remote_vertexai.md deleted file mode 100644 index 962bbd76f..000000000 --- a/docs/source/providers/inference/remote_vertexai.md +++ /dev/null @@ -1,40 +0,0 @@ -# remote::vertexai - -## Description - -Google Vertex AI inference provider enables you to use Google's Gemini models through Google Cloud's Vertex AI platform, providing several advantages: - -โ€ข Enterprise-grade security: Uses Google Cloud's security controls and IAM -โ€ข Better integration: Seamless integration with other Google Cloud services -โ€ข Advanced features: Access to additional Vertex AI features like model tuning and monitoring -โ€ข Authentication: Uses Google Cloud Application Default Credentials (ADC) instead of API keys - -Configuration: -- Set VERTEX_AI_PROJECT environment variable (required) -- Set VERTEX_AI_LOCATION environment variable (optional, defaults to us-central1) -- Use Google Cloud Application Default Credentials or service account key - -Authentication Setup: -Option 1 (Recommended): gcloud auth application-default login -Option 2: Set GOOGLE_APPLICATION_CREDENTIALS to service account key path - -Available Models: -- vertex_ai/gemini-2.0-flash -- vertex_ai/gemini-2.5-flash -- vertex_ai/gemini-2.5-pro - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `project` | `` | No | | Google Cloud project ID for Vertex AI | -| `location` | `` | No | us-central1 | Google Cloud location for Vertex AI | - -## Sample Configuration - -```yaml -project: ${env.VERTEX_AI_PROJECT:=} -location: ${env.VERTEX_AI_LOCATION:=us-central1} - -``` - diff --git a/docs/source/providers/inference/remote_vllm.md b/docs/source/providers/inference/remote_vllm.md deleted file mode 100644 index 172d35873..000000000 --- a/docs/source/providers/inference/remote_vllm.md +++ /dev/null @@ -1,26 +0,0 @@ -# remote::vllm - -## Description - -Remote vLLM inference provider for connecting to vLLM servers. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `url` | `str \| None` | No | | The URL for the vLLM model serving endpoint | -| `max_tokens` | `` | No | 4096 | Maximum number of tokens to generate. | -| `api_token` | `str \| None` | No | fake | The API token | -| `tls_verify` | `bool \| str` | No | True | Whether to verify TLS certificates. Can be a boolean or a path to a CA certificate file. | -| `refresh_models` | `` | No | False | Whether to refresh models periodically | - -## Sample Configuration - -```yaml -url: ${env.VLLM_URL:=} -max_tokens: ${env.VLLM_MAX_TOKENS:=4096} -api_token: ${env.VLLM_API_TOKEN:=fake} -tls_verify: ${env.VLLM_TLS_VERIFY:=true} - -``` - diff --git a/docs/source/providers/inference/remote_watsonx.md b/docs/source/providers/inference/remote_watsonx.md deleted file mode 100644 index e885a07fc..000000000 --- a/docs/source/providers/inference/remote_watsonx.md +++ /dev/null @@ -1,24 +0,0 @@ -# remote::watsonx - -## Description - -IBM WatsonX inference provider for accessing AI models on IBM's WatsonX platform. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `url` | `` | No | https://us-south.ml.cloud.ibm.com | A base url for accessing the watsonx.ai | -| `api_key` | `pydantic.types.SecretStr \| None` | No | | The watsonx API key | -| `project_id` | `str \| None` | No | | The Project ID key | -| `timeout` | `` | No | 60 | Timeout for the HTTP requests | - -## Sample Configuration - -```yaml -url: ${env.WATSONX_BASE_URL:=https://us-south.ml.cloud.ibm.com} -api_key: ${env.WATSONX_API_KEY:=} -project_id: ${env.WATSONX_PROJECT_ID:=} - -``` - diff --git a/docs/source/providers/openai.md b/docs/source/providers/openai.md deleted file mode 100644 index 44a615456..000000000 --- a/docs/source/providers/openai.md +++ /dev/null @@ -1,193 +0,0 @@ -## OpenAI API Compatibility - -### Server path - -Llama Stack exposes an OpenAI-compatible API endpoint at `/v1/openai/v1`. So, for a Llama Stack server running locally on port `8321`, the full url to the OpenAI-compatible API endpoint is `http://localhost:8321/v1/openai/v1`. - -### Clients - -You should be able to use any client that speaks OpenAI APIs with Llama Stack. We regularly test with the official Llama Stack clients as well as OpenAI's official Python client. - -#### Llama Stack Client - -When using the Llama Stack client, set the `base_url` to the root of your Llama Stack server. It will automatically route OpenAI-compatible requests to the right server endpoint for you. - -```python -from llama_stack_client import LlamaStackClient - -client = LlamaStackClient(base_url="http://localhost:8321") -``` - -#### OpenAI Client - -When using an OpenAI client, set the `base_url` to the `/v1/openai/v1` path on your Llama Stack server. - -```python -from openai import OpenAI - -client = OpenAI(base_url="http://localhost:8321/v1/openai/v1", api_key="none") -``` - -Regardless of the client you choose, the following code examples should all work the same. - -### APIs implemented - -#### Models - -Many of the APIs require you to pass in a model parameter. To see the list of models available in your Llama Stack server: - -```python -models = client.models.list() -``` - -#### Responses - -:::{note} -The Responses API implementation is still in active development. While it is quite usable, there are still unimplemented parts of the API. We'd love feedback on any use-cases you try that do not work to help prioritize the pieces left to implement. Please open issues in the [meta-llama/llama-stack](https://github.com/meta-llama/llama-stack) GitHub repository with details of anything that does not work. -::: - -##### Simple inference - -Request: - -``` -response = client.responses.create( - model="meta-llama/Llama-3.2-3B-Instruct", - input="Write a haiku about coding." -) - -print(response.output_text) -``` -Example output: - -```text -Pixels dancing slow -Syntax whispers secrets sweet -Code's gentle silence -``` - -##### Structured Output - -Request: - -```python -response = client.responses.create( - model="meta-llama/Llama-3.2-3B-Instruct", - input=[ - { - "role": "system", - "content": "Extract the participants from the event information.", - }, - { - "role": "user", - "content": "Alice and Bob are going to a science fair on Friday.", - }, - ], - text={ - "format": { - "type": "json_schema", - "name": "participants", - "schema": { - "type": "object", - "properties": { - "participants": {"type": "array", "items": {"type": "string"}} - }, - "required": ["participants"], - }, - } - }, -) -print(response.output_text) -``` - -Example output: - -```text -{ "participants": ["Alice", "Bob"] } -``` - -#### Chat Completions - -##### Simple inference - -Request: - -```python -chat_completion = client.chat.completions.create( - model="meta-llama/Llama-3.2-3B-Instruct", - messages=[{"role": "user", "content": "Write a haiku about coding."}], -) - -print(chat_completion.choices[0].message.content) -``` - -Example output: - -```text -Lines of code unfold -Logic flows like a river -Code's gentle beauty -``` - -##### Structured Output - -Request: - -```python -chat_completion = client.chat.completions.create( - model="meta-llama/Llama-3.2-3B-Instruct", - messages=[ - { - "role": "system", - "content": "Extract the participants from the event information.", - }, - { - "role": "user", - "content": "Alice and Bob are going to a science fair on Friday.", - }, - ], - response_format={ - "type": "json_schema", - "json_schema": { - "name": "participants", - "schema": { - "type": "object", - "properties": { - "participants": {"type": "array", "items": {"type": "string"}} - }, - "required": ["participants"], - }, - }, - }, -) - -print(chat_completion.choices[0].message.content) -``` - -Example output: - -```text -{ "participants": ["Alice", "Bob"] } -``` - -#### Completions - -##### Simple inference - -Request: - -```python -completion = client.completions.create( - model="meta-llama/Llama-3.2-3B-Instruct", prompt="Write a haiku about coding." -) - -print(completion.choices[0].text) -``` - -Example output: - -```text -Lines of code unfurl -Logic whispers in the dark -Art in hidden form -``` diff --git a/docs/source/providers/post_training/index.md b/docs/source/providers/post_training/index.md deleted file mode 100644 index e69f2a45a..000000000 --- a/docs/source/providers/post_training/index.md +++ /dev/null @@ -1,16 +0,0 @@ -# Post_Training - -## Overview - -This section contains documentation for all available providers for the **post_training** API. - -## Providers - -```{toctree} -:maxdepth: 1 - -inline_huggingface-gpu -inline_torchtune-cpu -inline_torchtune-gpu -remote_nvidia -``` diff --git a/docs/source/providers/post_training/inline_huggingface-cpu.md b/docs/source/providers/post_training/inline_huggingface-cpu.md deleted file mode 100644 index e663fe8f8..000000000 --- a/docs/source/providers/post_training/inline_huggingface-cpu.md +++ /dev/null @@ -1,41 +0,0 @@ -# inline::huggingface-cpu - -## Description - -HuggingFace-based post-training provider for fine-tuning models using the HuggingFace ecosystem. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `device` | `` | No | cuda | | -| `distributed_backend` | `Literal['fsdp', 'deepspeed'` | No | | | -| `checkpoint_format` | `Literal['full_state', 'huggingface'` | No | huggingface | | -| `chat_template` | `` | No | <|user|> -{input} -<|assistant|> -{output} | | -| `model_specific_config` | `` | No | {'trust_remote_code': True, 'attn_implementation': 'sdpa'} | | -| `max_seq_length` | `` | No | 2048 | | -| `gradient_checkpointing` | `` | No | False | | -| `save_total_limit` | `` | No | 3 | | -| `logging_steps` | `` | No | 10 | | -| `warmup_ratio` | `` | No | 0.1 | | -| `weight_decay` | `` | No | 0.01 | | -| `dataloader_num_workers` | `` | No | 4 | | -| `dataloader_pin_memory` | `` | No | True | | -| `dpo_beta` | `` | No | 0.1 | | -| `use_reference_model` | `` | No | True | | -| `dpo_loss_type` | `Literal['sigmoid', 'hinge', 'ipo', 'kto_pair'` | No | sigmoid | | -| `dpo_output_dir` | `` | No | | | - -## Sample Configuration - -```yaml -checkpoint_format: huggingface -distributed_backend: null -device: cpu -dpo_output_dir: ~/.llama/dummy/dpo_output - -``` - diff --git a/docs/source/providers/post_training/inline_huggingface-gpu.md b/docs/source/providers/post_training/inline_huggingface-gpu.md deleted file mode 100644 index 21bf965fe..000000000 --- a/docs/source/providers/post_training/inline_huggingface-gpu.md +++ /dev/null @@ -1,41 +0,0 @@ -# inline::huggingface-gpu - -## Description - -HuggingFace-based post-training provider for fine-tuning models using the HuggingFace ecosystem. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `device` | `` | No | cuda | | -| `distributed_backend` | `Literal['fsdp', 'deepspeed'` | No | | | -| `checkpoint_format` | `Literal['full_state', 'huggingface'` | No | huggingface | | -| `chat_template` | `` | No | <|user|> -{input} -<|assistant|> -{output} | | -| `model_specific_config` | `` | No | {'trust_remote_code': True, 'attn_implementation': 'sdpa'} | | -| `max_seq_length` | `` | No | 2048 | | -| `gradient_checkpointing` | `` | No | False | | -| `save_total_limit` | `` | No | 3 | | -| `logging_steps` | `` | No | 10 | | -| `warmup_ratio` | `` | No | 0.1 | | -| `weight_decay` | `` | No | 0.01 | | -| `dataloader_num_workers` | `` | No | 4 | | -| `dataloader_pin_memory` | `` | No | True | | -| `dpo_beta` | `` | No | 0.1 | | -| `use_reference_model` | `` | No | True | | -| `dpo_loss_type` | `Literal['sigmoid', 'hinge', 'ipo', 'kto_pair'` | No | sigmoid | | -| `dpo_output_dir` | `` | No | | | - -## Sample Configuration - -```yaml -checkpoint_format: huggingface -distributed_backend: null -device: cpu -dpo_output_dir: ~/.llama/dummy/dpo_output - -``` - diff --git a/docs/source/providers/post_training/inline_huggingface.md b/docs/source/providers/post_training/inline_huggingface.md deleted file mode 100644 index 8b10fe79c..000000000 --- a/docs/source/providers/post_training/inline_huggingface.md +++ /dev/null @@ -1,41 +0,0 @@ -# inline::huggingface - -## Description - -HuggingFace-based post-training provider for fine-tuning models using the HuggingFace ecosystem. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `device` | `` | No | cuda | | -| `distributed_backend` | `Literal['fsdp', 'deepspeed'` | No | | | -| `checkpoint_format` | `Literal['full_state', 'huggingface'` | No | huggingface | | -| `chat_template` | `` | No | <|user|> -{input} -<|assistant|> -{output} | | -| `model_specific_config` | `` | No | {'trust_remote_code': True, 'attn_implementation': 'sdpa'} | | -| `max_seq_length` | `` | No | 2048 | | -| `gradient_checkpointing` | `` | No | False | | -| `save_total_limit` | `` | No | 3 | | -| `logging_steps` | `` | No | 10 | | -| `warmup_ratio` | `` | No | 0.1 | | -| `weight_decay` | `` | No | 0.01 | | -| `dataloader_num_workers` | `` | No | 4 | | -| `dataloader_pin_memory` | `` | No | True | | -| `dpo_beta` | `` | No | 0.1 | | -| `use_reference_model` | `` | No | True | | -| `dpo_loss_type` | `Literal['sigmoid', 'hinge', 'ipo', 'kto_pair'` | No | sigmoid | | -| `dpo_output_dir` | `` | No | | | - -## Sample Configuration - -```yaml -checkpoint_format: huggingface -distributed_backend: null -device: cpu -dpo_output_dir: ~/.llama/dummy/dpo_output - -``` - diff --git a/docs/source/providers/post_training/inline_torchtune-cpu.md b/docs/source/providers/post_training/inline_torchtune-cpu.md deleted file mode 100644 index 7204e56e8..000000000 --- a/docs/source/providers/post_training/inline_torchtune-cpu.md +++ /dev/null @@ -1,20 +0,0 @@ -# inline::torchtune-cpu - -## Description - -TorchTune-based post-training provider for fine-tuning and optimizing models using Meta's TorchTune framework. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `torch_seed` | `int \| None` | No | | | -| `checkpoint_format` | `Literal['meta', 'huggingface'` | No | meta | | - -## Sample Configuration - -```yaml -checkpoint_format: meta - -``` - diff --git a/docs/source/providers/post_training/inline_torchtune-gpu.md b/docs/source/providers/post_training/inline_torchtune-gpu.md deleted file mode 100644 index 98b94f6f6..000000000 --- a/docs/source/providers/post_training/inline_torchtune-gpu.md +++ /dev/null @@ -1,20 +0,0 @@ -# inline::torchtune-gpu - -## Description - -TorchTune-based post-training provider for fine-tuning and optimizing models using Meta's TorchTune framework. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `torch_seed` | `int \| None` | No | | | -| `checkpoint_format` | `Literal['meta', 'huggingface'` | No | meta | | - -## Sample Configuration - -```yaml -checkpoint_format: meta - -``` - diff --git a/docs/source/providers/post_training/inline_torchtune.md b/docs/source/providers/post_training/inline_torchtune.md deleted file mode 100644 index 82730e54b..000000000 --- a/docs/source/providers/post_training/inline_torchtune.md +++ /dev/null @@ -1,20 +0,0 @@ -# inline::torchtune - -## Description - -TorchTune-based post-training provider for fine-tuning and optimizing models using Meta's TorchTune framework. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `torch_seed` | `int \| None` | No | | | -| `checkpoint_format` | `Literal['meta', 'huggingface'` | No | meta | | - -## Sample Configuration - -```yaml -checkpoint_format: meta - -``` - diff --git a/docs/source/providers/post_training/remote_nvidia.md b/docs/source/providers/post_training/remote_nvidia.md deleted file mode 100644 index 9a381d872..000000000 --- a/docs/source/providers/post_training/remote_nvidia.md +++ /dev/null @@ -1,28 +0,0 @@ -# remote::nvidia - -## Description - -NVIDIA's post-training provider for fine-tuning models on NVIDIA's platform. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `api_key` | `str \| None` | No | | The NVIDIA API key. | -| `dataset_namespace` | `str \| None` | No | default | The NVIDIA dataset namespace. | -| `project_id` | `str \| None` | No | test-example-model@v1 | The NVIDIA project ID. | -| `customizer_url` | `str \| None` | No | | Base URL for the NeMo Customizer API | -| `timeout` | `` | No | 300 | Timeout for the NVIDIA Post Training API | -| `max_retries` | `` | No | 3 | Maximum number of retries for the NVIDIA Post Training API | -| `output_model_dir` | `` | No | test-example-model@v1 | Directory to save the output model | - -## Sample Configuration - -```yaml -api_key: ${env.NVIDIA_API_KEY:=} -dataset_namespace: ${env.NVIDIA_DATASET_NAMESPACE:=default} -project_id: ${env.NVIDIA_PROJECT_ID:=test-project} -customizer_url: ${env.NVIDIA_CUSTOMIZER_URL:=http://nemo.test} - -``` - diff --git a/docs/source/providers/safety/index.md b/docs/source/providers/safety/index.md deleted file mode 100644 index 5ddda2242..000000000 --- a/docs/source/providers/safety/index.md +++ /dev/null @@ -1,18 +0,0 @@ -# Safety - -## Overview - -This section contains documentation for all available providers for the **safety** API. - -## Providers - -```{toctree} -:maxdepth: 1 - -inline_code-scanner -inline_llama-guard -inline_prompt-guard -remote_bedrock -remote_nvidia -remote_sambanova -``` diff --git a/docs/source/providers/safety/inline_code-scanner.md b/docs/source/providers/safety/inline_code-scanner.md deleted file mode 100644 index 3a3e90b3d..000000000 --- a/docs/source/providers/safety/inline_code-scanner.md +++ /dev/null @@ -1,13 +0,0 @@ -# inline::code-scanner - -## Description - -Code Scanner safety provider for detecting security vulnerabilities and unsafe code patterns. - -## Sample Configuration - -```yaml -{} - -``` - diff --git a/docs/source/providers/safety/inline_llama-guard.md b/docs/source/providers/safety/inline_llama-guard.md deleted file mode 100644 index 4f57898ec..000000000 --- a/docs/source/providers/safety/inline_llama-guard.md +++ /dev/null @@ -1,19 +0,0 @@ -# inline::llama-guard - -## Description - -Llama Guard safety provider for content moderation and safety filtering using Meta's Llama Guard model. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `excluded_categories` | `list[str` | No | [] | | - -## Sample Configuration - -```yaml -excluded_categories: [] - -``` - diff --git a/docs/source/providers/safety/inline_prompt-guard.md b/docs/source/providers/safety/inline_prompt-guard.md deleted file mode 100644 index 10a6b8d3f..000000000 --- a/docs/source/providers/safety/inline_prompt-guard.md +++ /dev/null @@ -1,19 +0,0 @@ -# inline::prompt-guard - -## Description - -Prompt Guard safety provider for detecting and filtering unsafe prompts and content. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `guard_type` | `` | No | injection | | - -## Sample Configuration - -```yaml -guard_type: injection - -``` - diff --git a/docs/source/providers/safety/remote_bedrock.md b/docs/source/providers/safety/remote_bedrock.md deleted file mode 100644 index 99d77dd72..000000000 --- a/docs/source/providers/safety/remote_bedrock.md +++ /dev/null @@ -1,28 +0,0 @@ -# remote::bedrock - -## Description - -AWS Bedrock safety provider for content moderation using AWS's safety services. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `aws_access_key_id` | `str \| None` | No | | The AWS access key to use. Default use environment variable: AWS_ACCESS_KEY_ID | -| `aws_secret_access_key` | `str \| None` | No | | The AWS secret access key to use. Default use environment variable: AWS_SECRET_ACCESS_KEY | -| `aws_session_token` | `str \| None` | No | | The AWS session token to use. Default use environment variable: AWS_SESSION_TOKEN | -| `region_name` | `str \| None` | No | | The default AWS Region to use, for example, us-west-1 or us-west-2.Default use environment variable: AWS_DEFAULT_REGION | -| `profile_name` | `str \| None` | No | | The profile name that contains credentials to use.Default use environment variable: AWS_PROFILE | -| `total_max_attempts` | `int \| None` | No | | An integer representing the maximum number of attempts that will be made for a single request, including the initial attempt. Default use environment variable: AWS_MAX_ATTEMPTS | -| `retry_mode` | `str \| None` | No | | A string representing the type of retries Boto3 will perform.Default use environment variable: AWS_RETRY_MODE | -| `connect_timeout` | `float \| None` | No | 60.0 | The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds. | -| `read_timeout` | `float \| None` | No | 60.0 | The time in seconds till a timeout exception is thrown when attempting to read from a connection.The default is 60 seconds. | -| `session_ttl` | `int \| None` | No | 3600 | The time in seconds till a session expires. The default is 3600 seconds (1 hour). | - -## Sample Configuration - -```yaml -{} - -``` - diff --git a/docs/source/providers/safety/remote_nvidia.md b/docs/source/providers/safety/remote_nvidia.md deleted file mode 100644 index 40ae744a4..000000000 --- a/docs/source/providers/safety/remote_nvidia.md +++ /dev/null @@ -1,21 +0,0 @@ -# remote::nvidia - -## Description - -NVIDIA's safety provider for content moderation and safety filtering. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `guardrails_service_url` | `` | No | http://0.0.0.0:7331 | The url for accessing the Guardrails service | -| `config_id` | `str \| None` | No | self-check | Guardrails configuration ID to use from the Guardrails configuration store | - -## Sample Configuration - -```yaml -guardrails_service_url: ${env.GUARDRAILS_SERVICE_URL:=http://localhost:7331} -config_id: ${env.NVIDIA_GUARDRAILS_CONFIG_ID:=self-check} - -``` - diff --git a/docs/source/providers/safety/remote_sambanova.md b/docs/source/providers/safety/remote_sambanova.md deleted file mode 100644 index 7e608f1b7..000000000 --- a/docs/source/providers/safety/remote_sambanova.md +++ /dev/null @@ -1,21 +0,0 @@ -# remote::sambanova - -## Description - -SambaNova's safety provider for content moderation and safety filtering. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `url` | `` | No | https://api.sambanova.ai/v1 | The URL for the SambaNova AI server | -| `api_key` | `pydantic.types.SecretStr \| None` | No | | The SambaNova cloud API Key | - -## Sample Configuration - -```yaml -url: https://api.sambanova.ai/v1 -api_key: ${env.SAMBANOVA_API_KEY:=} - -``` - diff --git a/docs/source/providers/scoring/index.md b/docs/source/providers/scoring/index.md deleted file mode 100644 index f3bd48eb0..000000000 --- a/docs/source/providers/scoring/index.md +++ /dev/null @@ -1,15 +0,0 @@ -# Scoring - -## Overview - -This section contains documentation for all available providers for the **scoring** API. - -## Providers - -```{toctree} -:maxdepth: 1 - -inline_basic -inline_braintrust -inline_llm-as-judge -``` diff --git a/docs/source/providers/scoring/inline_basic.md b/docs/source/providers/scoring/inline_basic.md deleted file mode 100644 index e9e50cff4..000000000 --- a/docs/source/providers/scoring/inline_basic.md +++ /dev/null @@ -1,13 +0,0 @@ -# inline::basic - -## Description - -Basic scoring provider for simple evaluation metrics and scoring functions. - -## Sample Configuration - -```yaml -{} - -``` - diff --git a/docs/source/providers/scoring/inline_braintrust.md b/docs/source/providers/scoring/inline_braintrust.md deleted file mode 100644 index 70a6a1e26..000000000 --- a/docs/source/providers/scoring/inline_braintrust.md +++ /dev/null @@ -1,19 +0,0 @@ -# inline::braintrust - -## Description - -Braintrust scoring provider for evaluation and scoring using the Braintrust platform. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `openai_api_key` | `str \| None` | No | | The OpenAI API Key | - -## Sample Configuration - -```yaml -openai_api_key: ${env.OPENAI_API_KEY:=} - -``` - diff --git a/docs/source/providers/scoring/inline_llm-as-judge.md b/docs/source/providers/scoring/inline_llm-as-judge.md deleted file mode 100644 index 971e02897..000000000 --- a/docs/source/providers/scoring/inline_llm-as-judge.md +++ /dev/null @@ -1,13 +0,0 @@ -# inline::llm-as-judge - -## Description - -LLM-as-judge scoring provider that uses language models to evaluate and score responses. - -## Sample Configuration - -```yaml -{} - -``` - diff --git a/docs/source/providers/telemetry/index.md b/docs/source/providers/telemetry/index.md deleted file mode 100644 index c7fbfed73..000000000 --- a/docs/source/providers/telemetry/index.md +++ /dev/null @@ -1,13 +0,0 @@ -# Telemetry - -## Overview - -This section contains documentation for all available providers for the **telemetry** API. - -## Providers - -```{toctree} -:maxdepth: 1 - -inline_meta-reference -``` diff --git a/docs/source/providers/telemetry/inline_meta-reference.md b/docs/source/providers/telemetry/inline_meta-reference.md deleted file mode 100644 index 3e5f4b842..000000000 --- a/docs/source/providers/telemetry/inline_meta-reference.md +++ /dev/null @@ -1,25 +0,0 @@ -# inline::meta-reference - -## Description - -Meta's reference implementation of telemetry and observability using OpenTelemetry. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `otel_exporter_otlp_endpoint` | `str \| None` | No | | The OpenTelemetry collector endpoint URL (base URL for traces, metrics, and logs). If not set, the SDK will use OTEL_EXPORTER_OTLP_ENDPOINT environment variable. | -| `service_name` | `` | No | โ€‹ | The service name to use for telemetry | -| `sinks` | `list[inline.telemetry.meta_reference.config.TelemetrySink` | No | [, ] | List of telemetry sinks to enable (possible values: otel_trace, otel_metric, sqlite, console) | -| `sqlite_db_path` | `` | No | ~/.llama/runtime/trace_store.db | The path to the SQLite database to use for storing traces | - -## Sample Configuration - -```yaml -service_name: "${env.OTEL_SERVICE_NAME:=\u200B}" -sinks: ${env.TELEMETRY_SINKS:=console,sqlite} -sqlite_db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/trace_store.db -otel_exporter_otlp_endpoint: ${env.OTEL_EXPORTER_OTLP_ENDPOINT:=} - -``` - diff --git a/docs/source/providers/tool_runtime/index.md b/docs/source/providers/tool_runtime/index.md deleted file mode 100644 index 8d29aed43..000000000 --- a/docs/source/providers/tool_runtime/index.md +++ /dev/null @@ -1,18 +0,0 @@ -# Tool_Runtime - -## Overview - -This section contains documentation for all available providers for the **tool_runtime** API. - -## Providers - -```{toctree} -:maxdepth: 1 - -inline_rag-runtime -remote_bing-search -remote_brave-search -remote_model-context-protocol -remote_tavily-search -remote_wolfram-alpha -``` diff --git a/docs/source/providers/tool_runtime/inline_rag-runtime.md b/docs/source/providers/tool_runtime/inline_rag-runtime.md deleted file mode 100644 index 784b4fdad..000000000 --- a/docs/source/providers/tool_runtime/inline_rag-runtime.md +++ /dev/null @@ -1,13 +0,0 @@ -# inline::rag-runtime - -## Description - -RAG (Retrieval-Augmented Generation) tool runtime for document ingestion, chunking, and semantic search. - -## Sample Configuration - -```yaml -{} - -``` - diff --git a/docs/source/providers/tool_runtime/remote_bing-search.md b/docs/source/providers/tool_runtime/remote_bing-search.md deleted file mode 100644 index 0d5df7679..000000000 --- a/docs/source/providers/tool_runtime/remote_bing-search.md +++ /dev/null @@ -1,20 +0,0 @@ -# remote::bing-search - -## Description - -Bing Search tool for web search capabilities using Microsoft's search engine. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `api_key` | `str \| None` | No | | | -| `top_k` | `` | No | 3 | | - -## Sample Configuration - -```yaml -api_key: ${env.BING_API_KEY:} - -``` - diff --git a/docs/source/providers/tool_runtime/remote_brave-search.md b/docs/source/providers/tool_runtime/remote_brave-search.md deleted file mode 100644 index 26bc4010d..000000000 --- a/docs/source/providers/tool_runtime/remote_brave-search.md +++ /dev/null @@ -1,21 +0,0 @@ -# remote::brave-search - -## Description - -Brave Search tool for web search capabilities with privacy-focused results. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `api_key` | `str \| None` | No | | The Brave Search API Key | -| `max_results` | `` | No | 3 | The maximum number of results to return | - -## Sample Configuration - -```yaml -api_key: ${env.BRAVE_SEARCH_API_KEY:=} -max_results: 3 - -``` - diff --git a/docs/source/providers/tool_runtime/remote_model-context-protocol.md b/docs/source/providers/tool_runtime/remote_model-context-protocol.md deleted file mode 100644 index cf9401c2c..000000000 --- a/docs/source/providers/tool_runtime/remote_model-context-protocol.md +++ /dev/null @@ -1,13 +0,0 @@ -# remote::model-context-protocol - -## Description - -Model Context Protocol (MCP) tool for standardized tool calling and context management. - -## Sample Configuration - -```yaml -{} - -``` - diff --git a/docs/source/providers/tool_runtime/remote_tavily-search.md b/docs/source/providers/tool_runtime/remote_tavily-search.md deleted file mode 100644 index 3dc31534d..000000000 --- a/docs/source/providers/tool_runtime/remote_tavily-search.md +++ /dev/null @@ -1,21 +0,0 @@ -# remote::tavily-search - -## Description - -Tavily Search tool for AI-optimized web search with structured results. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `api_key` | `str \| None` | No | | The Tavily Search API Key | -| `max_results` | `` | No | 3 | The maximum number of results to return | - -## Sample Configuration - -```yaml -api_key: ${env.TAVILY_SEARCH_API_KEY:=} -max_results: 3 - -``` - diff --git a/docs/source/providers/tool_runtime/remote_wolfram-alpha.md b/docs/source/providers/tool_runtime/remote_wolfram-alpha.md deleted file mode 100644 index 325c189fd..000000000 --- a/docs/source/providers/tool_runtime/remote_wolfram-alpha.md +++ /dev/null @@ -1,19 +0,0 @@ -# remote::wolfram-alpha - -## Description - -Wolfram Alpha tool for computational knowledge and mathematical calculations. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `api_key` | `str \| None` | No | | | - -## Sample Configuration - -```yaml -api_key: ${env.WOLFRAM_ALPHA_API_KEY:=} - -``` - diff --git a/docs/source/providers/vector_io/index.md b/docs/source/providers/vector_io/index.md deleted file mode 100644 index 28ae523d7..000000000 --- a/docs/source/providers/vector_io/index.md +++ /dev/null @@ -1,24 +0,0 @@ -# Vector_Io - -## Overview - -This section contains documentation for all available providers for the **vector_io** API. - -## Providers - -```{toctree} -:maxdepth: 1 - -inline_chromadb -inline_faiss -inline_meta-reference -inline_milvus -inline_qdrant -inline_sqlite-vec -inline_sqlite_vec -remote_chromadb -remote_milvus -remote_pgvector -remote_qdrant -remote_weaviate -``` diff --git a/docs/source/providers/vector_io/inline_chromadb.md b/docs/source/providers/vector_io/inline_chromadb.md deleted file mode 100644 index 518e3f689..000000000 --- a/docs/source/providers/vector_io/inline_chromadb.md +++ /dev/null @@ -1,56 +0,0 @@ -# inline::chromadb - -## Description - - -[Chroma](https://www.trychroma.com/) is an inline and remote vector -database provider for Llama Stack. It allows you to store and query vectors directly within a Chroma database. -That means you're not limited to storing vectors in memory or in a separate service. - -## Features -Chroma supports: -- Store embeddings and their metadata -- Vector search -- Full-text search -- Document storage -- Metadata filtering -- Multi-modal retrieval - -## Usage - -To use Chrome in your Llama Stack project, follow these steps: - -1. Install the necessary dependencies. -2. Configure your Llama Stack project to use chroma. -3. Start storing and querying vectors. - -## Installation - -You can install chroma using pip: - -```bash -pip install chromadb -``` - -## Documentation -See [Chroma's documentation](https://docs.trychroma.com/docs/overview/introduction) for more details about Chroma in general. - - - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `db_path` | `` | No | | | -| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | Config for KV store backend | - -## Sample Configuration - -```yaml -db_path: ${env.CHROMADB_PATH} -kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/chroma_inline_registry.db - -``` - diff --git a/docs/source/providers/vector_io/inline_faiss.md b/docs/source/providers/vector_io/inline_faiss.md deleted file mode 100644 index cfa18a839..000000000 --- a/docs/source/providers/vector_io/inline_faiss.md +++ /dev/null @@ -1,62 +0,0 @@ -# inline::faiss - -## Description - - -[Faiss](https://github.com/facebookresearch/faiss) is an inline vector database provider for Llama Stack. It -allows you to store and query vectors directly in memory. -That means you'll get fast and efficient vector retrieval. - -## Features - -- Lightweight and easy to use -- Fully integrated with Llama Stack -- GPU support -- **Vector search** - FAISS supports pure vector similarity search using embeddings - -## Search Modes - -**Supported:** -- **Vector Search** (`mode="vector"`): Performs vector similarity search using embeddings - -**Not Supported:** -- **Keyword Search** (`mode="keyword"`): Not supported by FAISS -- **Hybrid Search** (`mode="hybrid"`): Not supported by FAISS - -> **Note**: FAISS is designed as a pure vector similarity search library. See the [FAISS GitHub repository](https://github.com/facebookresearch/faiss) for more details about FAISS's core functionality. - -## Usage - -To use Faiss in your Llama Stack project, follow these steps: - -1. Install the necessary dependencies. -2. Configure your Llama Stack project to use Faiss. -3. Start storing and querying vectors. - -## Installation - -You can install Faiss using pip: - -```bash -pip install faiss-cpu -``` -## Documentation -See [Faiss' documentation](https://faiss.ai/) or the [Faiss Wiki](https://github.com/facebookresearch/faiss/wiki) for -more details about Faiss in general. - - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | | - -## Sample Configuration - -```yaml -kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/faiss_store.db - -``` - diff --git a/docs/source/providers/vector_io/inline_meta-reference.md b/docs/source/providers/vector_io/inline_meta-reference.md deleted file mode 100644 index 6f269c441..000000000 --- a/docs/source/providers/vector_io/inline_meta-reference.md +++ /dev/null @@ -1,27 +0,0 @@ -# inline::meta-reference - -## Description - -Meta's reference implementation of a vector database. - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | | - -## Sample Configuration - -```yaml -kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/faiss_store.db - -``` - -## Deprecation Notice - -```{warning} -Please use the `inline::faiss` provider instead. -``` - diff --git a/docs/source/providers/vector_io/inline_milvus.md b/docs/source/providers/vector_io/inline_milvus.md deleted file mode 100644 index 33ea4d179..000000000 --- a/docs/source/providers/vector_io/inline_milvus.md +++ /dev/null @@ -1,26 +0,0 @@ -# inline::milvus - -## Description - - -Please refer to the remote provider documentation. - - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `db_path` | `` | No | | | -| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | Config for KV store backend (SQLite only for now) | -| `consistency_level` | `` | No | Strong | The consistency level of the Milvus server | - -## Sample Configuration - -```yaml -db_path: ${env.MILVUS_DB_PATH:=~/.llama/dummy}/milvus.db -kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/milvus_registry.db - -``` - diff --git a/docs/source/providers/vector_io/inline_qdrant.md b/docs/source/providers/vector_io/inline_qdrant.md deleted file mode 100644 index b5072d220..000000000 --- a/docs/source/providers/vector_io/inline_qdrant.md +++ /dev/null @@ -1,65 +0,0 @@ -# inline::qdrant - -## Description - - -[Qdrant](https://qdrant.tech/documentation/) is an inline and remote vector database provider for Llama Stack. It -allows you to store and query vectors directly in memory. -That means you'll get fast and efficient vector retrieval. - -> By default, Qdrant stores vectors in RAM, delivering incredibly fast access for datasets that fit comfortably in -> memory. But when your dataset exceeds RAM capacity, Qdrant offers Memmap as an alternative. -> -> \[[An Introduction to Vector Databases](https://qdrant.tech/articles/what-is-a-vector-database/)\] - - - -## Features - -- Lightweight and easy to use -- Fully integrated with Llama Stack -- Apache 2.0 license terms -- Store embeddings and their metadata -- Supports search by - [Keyword](https://qdrant.tech/articles/qdrant-introduces-full-text-filters-and-indexes/) - and [Hybrid](https://qdrant.tech/articles/hybrid-search/#building-a-hybrid-search-system-in-qdrant) search -- [Multilingual and Multimodal retrieval](https://qdrant.tech/documentation/multimodal-search/) -- [Medatata filtering](https://qdrant.tech/articles/vector-search-filtering/) -- [GPU support](https://qdrant.tech/documentation/guides/running-with-gpu/) - -## Usage - -To use Qdrant in your Llama Stack project, follow these steps: - -1. Install the necessary dependencies. -2. Configure your Llama Stack project to use Qdrant. -3. Start storing and querying vectors. - -## Installation - -You can install Qdrant using docker: - -```bash -docker pull qdrant/qdrant -``` -## Documentation -See the [Qdrant documentation](https://qdrant.tech/documentation/) for more details about Qdrant in general. - - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `path` | `` | No | | | -| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | | - -## Sample Configuration - -```yaml -path: ${env.QDRANT_PATH:=~/.llama/~/.llama/dummy}/qdrant.db -kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/qdrant_registry.db - -``` - diff --git a/docs/source/providers/vector_io/inline_sqlite-vec.md b/docs/source/providers/vector_io/inline_sqlite-vec.md deleted file mode 100644 index 854bb9d08..000000000 --- a/docs/source/providers/vector_io/inline_sqlite-vec.md +++ /dev/null @@ -1,220 +0,0 @@ -# inline::sqlite-vec - -## Description - - -[SQLite-Vec](https://github.com/asg017/sqlite-vec) is an inline vector database provider for Llama Stack. It -allows you to store and query vectors directly within an SQLite database. -That means you're not limited to storing vectors in memory or in a separate service. - -## Features - -- Lightweight and easy to use -- Fully integrated with Llama Stacks -- Uses disk-based storage for persistence, allowing for larger vector storage - -### Comparison to Faiss - -The choice between Faiss and sqlite-vec should be made based on the needs of your application, -as they have different strengths. - -#### Choosing the Right Provider - -Scenario | Recommended Tool | Reason --- |-----------------| -- -Online Analytical Processing (OLAP) | Faiss | Fast, in-memory searches -Online Transaction Processing (OLTP) | sqlite-vec | Frequent writes and reads -Frequent writes | sqlite-vec | Efficient disk-based storage and incremental indexing -Large datasets | sqlite-vec | Disk-based storage for larger vector storage -Datasets that can fit in memory, frequent reads | Faiss | Optimized for speed, indexing, and GPU acceleration - -#### Empirical Example - -Consider the histogram below in which 10,000 randomly generated strings were inserted -in batches of 100 into both Faiss and sqlite-vec using `client.tool_runtime.rag_tool.insert()`. - -```{image} ../../../../_static/providers/vector_io/write_time_comparison_sqlite-vec-faiss.png -:alt: Comparison of SQLite-Vec and Faiss write times -:width: 400px -``` - -You will notice that the average write time for `sqlite-vec` was 788ms, compared to -47,640ms for Faiss. While the number is jarring, if you look at the distribution, you can see that it is rather -uniformly spread across the [1500, 100000] interval. - -Looking at each individual write in the order that the documents are inserted you'll see the increase in -write speed as Faiss reindexes the vectors after each write. -```{image} ../../../../_static/providers/vector_io/write_time_sequence_sqlite-vec-faiss.png -:alt: Comparison of SQLite-Vec and Faiss write times -:width: 400px -``` - -In comparison, the read times for Faiss was on average 10% faster than sqlite-vec. -The modes of the two distributions highlight the differences much further where Faiss -will likely yield faster read performance. - -```{image} ../../../../_static/providers/vector_io/read_time_comparison_sqlite-vec-faiss.png -:alt: Comparison of SQLite-Vec and Faiss read times -:width: 400px -``` - -## Usage - -To use sqlite-vec in your Llama Stack project, follow these steps: - -1. Install the necessary dependencies. -2. Configure your Llama Stack project to use SQLite-Vec. -3. Start storing and querying vectors. - -The SQLite-vec provider supports three search modes: - -1. **Vector Search** (`mode="vector"`): Performs pure vector similarity search using the embeddings. -2. **Keyword Search** (`mode="keyword"`): Performs full-text search using SQLite's FTS5. -3. **Hybrid Search** (`mode="hybrid"`): Combines both vector and keyword search for better results. First performs keyword search to get candidate matches, then applies vector similarity search on those candidates. - -Example with hybrid search: -```python -response = await vector_io.query_chunks( - vector_db_id="my_db", - query="your query here", - params={"mode": "hybrid", "max_chunks": 3, "score_threshold": 0.7}, -) - -# Using RRF ranker -response = await vector_io.query_chunks( - vector_db_id="my_db", - query="your query here", - params={ - "mode": "hybrid", - "max_chunks": 3, - "score_threshold": 0.7, - "ranker": {"type": "rrf", "impact_factor": 60.0}, - }, -) - -# Using weighted ranker -response = await vector_io.query_chunks( - vector_db_id="my_db", - query="your query here", - params={ - "mode": "hybrid", - "max_chunks": 3, - "score_threshold": 0.7, - "ranker": {"type": "weighted", "alpha": 0.7}, # 70% vector, 30% keyword - }, -) -``` - -Example with explicit vector search: -```python -response = await vector_io.query_chunks( - vector_db_id="my_db", - query="your query here", - params={"mode": "vector", "max_chunks": 3, "score_threshold": 0.7}, -) -``` - -Example with keyword search: -```python -response = await vector_io.query_chunks( - vector_db_id="my_db", - query="your query here", - params={"mode": "keyword", "max_chunks": 3, "score_threshold": 0.7}, -) -``` - -## Supported Search Modes - -The SQLite vector store supports three search modes: - -1. **Vector Search** (`mode="vector"`): Uses vector similarity to find relevant chunks -2. **Keyword Search** (`mode="keyword"`): Uses keyword matching to find relevant chunks -3. **Hybrid Search** (`mode="hybrid"`): Combines both vector and keyword scores using a ranker - -### Hybrid Search - -Hybrid search combines the strengths of both vector and keyword search by: -- Computing vector similarity scores -- Computing keyword match scores -- Using a ranker to combine these scores - -Two ranker types are supported: - -1. **RRF (Reciprocal Rank Fusion)**: - - Combines ranks from both vector and keyword results - - Uses an impact factor (default: 60.0) to control the weight of higher-ranked results - - Good for balancing between vector and keyword results - - The default impact factor of 60.0 comes from the original RRF paper by Cormack et al. (2009) [^1], which found this value to provide optimal performance across various retrieval tasks - -2. **Weighted**: - - Linearly combines normalized vector and keyword scores - - Uses an alpha parameter (0-1) to control the blend: - - alpha=0: Only use keyword scores - - alpha=1: Only use vector scores - - alpha=0.5: Equal weight to both (default) - -Example using RAGQueryConfig with different search modes: - -```python -from llama_stack.apis.tools import RAGQueryConfig, RRFRanker, WeightedRanker - -# Vector search -config = RAGQueryConfig(mode="vector", max_chunks=5) - -# Keyword search -config = RAGQueryConfig(mode="keyword", max_chunks=5) - -# Hybrid search with custom RRF ranker -config = RAGQueryConfig( - mode="hybrid", - max_chunks=5, - ranker=RRFRanker(impact_factor=50.0), # Custom impact factor -) - -# Hybrid search with weighted ranker -config = RAGQueryConfig( - mode="hybrid", - max_chunks=5, - ranker=WeightedRanker(alpha=0.7), # 70% vector, 30% keyword -) - -# Hybrid search with default RRF ranker -config = RAGQueryConfig( - mode="hybrid", max_chunks=5 -) # Will use RRF with impact_factor=60.0 -``` - -Note: The ranker configuration is only used in hybrid mode. For vector or keyword modes, the ranker parameter is ignored. - -## Installation - -You can install SQLite-Vec using pip: - -```bash -pip install sqlite-vec -``` - -## Documentation - -See [sqlite-vec's GitHub repo](https://github.com/asg017/sqlite-vec/tree/main) for more details about sqlite-vec in general. - -[^1]: Cormack, G. V., Clarke, C. L., & Buettcher, S. (2009). [Reciprocal rank fusion outperforms condorcet and individual rank learning methods](https://dl.acm.org/doi/10.1145/1571941.1572114). In Proceedings of the 32nd international ACM SIGIR conference on Research and development in information retrieval (pp. 758-759). - - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `db_path` | `` | No | | Path to the SQLite database file | -| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | Config for KV store backend (SQLite only for now) | - -## Sample Configuration - -```yaml -db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/sqlite_vec.db -kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/sqlite_vec_registry.db - -``` - diff --git a/docs/source/providers/vector_io/inline_sqlite_vec.md b/docs/source/providers/vector_io/inline_sqlite_vec.md deleted file mode 100644 index 9e5654a50..000000000 --- a/docs/source/providers/vector_io/inline_sqlite_vec.md +++ /dev/null @@ -1,31 +0,0 @@ -# inline::sqlite_vec - -## Description - - -Please refer to the sqlite-vec provider documentation. - - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `db_path` | `` | No | | Path to the SQLite database file | -| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | Config for KV store backend (SQLite only for now) | - -## Sample Configuration - -```yaml -db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/sqlite_vec.db -kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/sqlite_vec_registry.db - -``` - -## Deprecation Notice - -```{warning} -Please use the `inline::sqlite-vec` provider (notice the hyphen instead of underscore) instead. -``` - diff --git a/docs/source/providers/vector_io/remote_chromadb.md b/docs/source/providers/vector_io/remote_chromadb.md deleted file mode 100644 index badfebe90..000000000 --- a/docs/source/providers/vector_io/remote_chromadb.md +++ /dev/null @@ -1,55 +0,0 @@ -# remote::chromadb - -## Description - - -[Chroma](https://www.trychroma.com/) is an inline and remote vector -database provider for Llama Stack. It allows you to store and query vectors directly within a Chroma database. -That means you're not limited to storing vectors in memory or in a separate service. - -## Features -Chroma supports: -- Store embeddings and their metadata -- Vector search -- Full-text search -- Document storage -- Metadata filtering -- Multi-modal retrieval - -## Usage - -To use Chrome in your Llama Stack project, follow these steps: - -1. Install the necessary dependencies. -2. Configure your Llama Stack project to use chroma. -3. Start storing and querying vectors. - -## Installation - -You can install chroma using pip: - -```bash -pip install chromadb -``` - -## Documentation -See [Chroma's documentation](https://docs.trychroma.com/docs/overview/introduction) for more details about Chroma in general. - - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `url` | `str \| None` | No | | | -| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | Config for KV store backend | - -## Sample Configuration - -```yaml -url: ${env.CHROMADB_URL} -kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/chroma_remote_registry.db - -``` - diff --git a/docs/source/providers/vector_io/remote_milvus.md b/docs/source/providers/vector_io/remote_milvus.md deleted file mode 100644 index 8974ada10..000000000 --- a/docs/source/providers/vector_io/remote_milvus.md +++ /dev/null @@ -1,228 +0,0 @@ -# remote::milvus - -## Description - - -[Milvus](https://milvus.io/) is an inline and remote vector database provider for Llama Stack. It -allows you to store and query vectors directly within a Milvus database. -That means you're not limited to storing vectors in memory or in a separate service. - -## Features - -- Easy to use -- Fully integrated with Llama Stack -- Supports all search modes: vector, keyword, and hybrid search (both inline and remote configurations) - -## Usage - -To use Milvus in your Llama Stack project, follow these steps: - -1. Install the necessary dependencies. -2. Configure your Llama Stack project to use Milvus. -3. Start storing and querying vectors. - -## Installation - -If you want to use inline Milvus, you can install: - -```bash -pip install pymilvus[milvus-lite] -``` - -If you want to use remote Milvus, you can install: - -```bash -pip install pymilvus -``` - -## Configuration - -In Llama Stack, Milvus can be configured in two ways: -- **Inline (Local) Configuration** - Uses Milvus-Lite for local storage -- **Remote Configuration** - Connects to a remote Milvus server - -### Inline (Local) Configuration - -The simplest method is local configuration, which requires setting `db_path`, a path for locally storing Milvus-Lite files: - -```yaml -vector_io: - - provider_id: milvus - provider_type: inline::milvus - config: - db_path: ~/.llama/distributions/together/milvus_store.db -``` - -### Remote Configuration - -Remote configuration is suitable for larger data storage requirements: - -#### Standard Remote Connection - -```yaml -vector_io: - - provider_id: milvus - provider_type: remote::milvus - config: - uri: "http://:" - token: ":" -``` - -#### TLS-Enabled Remote Connection (One-way TLS) - -For connections to Milvus instances with one-way TLS enabled: - -```yaml -vector_io: - - provider_id: milvus - provider_type: remote::milvus - config: - uri: "https://:" - token: ":" - secure: True - server_pem_path: "/path/to/server.pem" -``` - -#### Mutual TLS (mTLS) Remote Connection - -For connections to Milvus instances with mutual TLS (mTLS) enabled: - -```yaml -vector_io: - - provider_id: milvus - provider_type: remote::milvus - config: - uri: "https://:" - token: ":" - secure: True - ca_pem_path: "/path/to/ca.pem" - client_pem_path: "/path/to/client.pem" - client_key_path: "/path/to/client.key" -``` - -#### Key Parameters for TLS Configuration - -- **`secure`**: Enables TLS encryption when set to `true`. Defaults to `false`. -- **`server_pem_path`**: Path to the **server certificate** for verifying the server's identity (used in one-way TLS). -- **`ca_pem_path`**: Path to the **Certificate Authority (CA) certificate** for validating the server certificate (required in mTLS). -- **`client_pem_path`**: Path to the **client certificate** file (required for mTLS). -- **`client_key_path`**: Path to the **client private key** file (required for mTLS). - -## Search Modes - -Milvus supports three different search modes for both inline and remote configurations: - -### Vector Search -Vector search uses semantic similarity to find the most relevant chunks based on embedding vectors. This is the default search mode and works well for finding conceptually similar content. - -```python -# Vector search example -search_response = client.vector_stores.search( - vector_store_id=vector_store.id, - query="What is machine learning?", - search_mode="vector", - max_num_results=5, -) -``` - -### Keyword Search -Keyword search uses traditional text-based matching to find chunks containing specific terms or phrases. This is useful when you need exact term matches. - -```python -# Keyword search example -search_response = client.vector_stores.search( - vector_store_id=vector_store.id, - query="Python programming language", - search_mode="keyword", - max_num_results=5, -) -``` - -### Hybrid Search -Hybrid search combines both vector and keyword search methods to provide more comprehensive results. It leverages the strengths of both semantic similarity and exact term matching. - -#### Basic Hybrid Search -```python -# Basic hybrid search example (uses RRF ranker with default impact_factor=60.0) -search_response = client.vector_stores.search( - vector_store_id=vector_store.id, - query="neural networks in Python", - search_mode="hybrid", - max_num_results=5, -) -``` - -**Note**: The default `impact_factor` value of 60.0 was empirically determined to be optimal in the original RRF research paper: ["Reciprocal Rank Fusion outperforms Condorcet and individual Rank Learning Methods"](https://plg.uwaterloo.ca/~gvcormac/cormacksigir09-rrf.pdf) (Cormack et al., 2009). - -#### Hybrid Search with RRF (Reciprocal Rank Fusion) Ranker -RRF combines rankings from vector and keyword search by using reciprocal ranks. The impact factor controls how much weight is given to higher-ranked results. - -```python -# Hybrid search with custom RRF parameters -search_response = client.vector_stores.search( - vector_store_id=vector_store.id, - query="neural networks in Python", - search_mode="hybrid", - max_num_results=5, - ranking_options={ - "ranker": { - "type": "rrf", - "impact_factor": 100.0, # Higher values give more weight to top-ranked results - } - }, -) -``` - -#### Hybrid Search with Weighted Ranker -Weighted ranker linearly combines normalized scores from vector and keyword search. The alpha parameter controls the balance between the two search methods. - -```python -# Hybrid search with weighted ranker -search_response = client.vector_stores.search( - vector_store_id=vector_store.id, - query="neural networks in Python", - search_mode="hybrid", - max_num_results=5, - ranking_options={ - "ranker": { - "type": "weighted", - "alpha": 0.7, # 70% vector search, 30% keyword search - } - }, -) -``` - -For detailed documentation on RRF and Weighted rankers, please refer to the [Milvus Reranking Guide](https://milvus.io/docs/reranking.md). - -## Documentation -See the [Milvus documentation](https://milvus.io/docs/install-overview.md) for more details about Milvus in general. - -For more details on TLS configuration, refer to the [TLS setup guide](https://milvus.io/docs/tls.md). - - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `uri` | `` | No | | The URI of the Milvus server | -| `token` | `str \| None` | No | | The token of the Milvus server | -| `consistency_level` | `` | No | Strong | The consistency level of the Milvus server | -| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | Config for KV store backend | -| `config` | `dict` | No | {} | This configuration allows additional fields to be passed through to the underlying Milvus client. See the [Milvus](https://milvus.io/docs/install-overview.md) documentation for more details about Milvus in general. | - -```{note} - This configuration class accepts additional fields beyond those listed above. You can pass any additional configuration options that will be forwarded to the underlying provider. - ``` - - -## Sample Configuration - -```yaml -uri: ${env.MILVUS_ENDPOINT} -token: ${env.MILVUS_TOKEN} -kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/milvus_remote_registry.db - -``` - diff --git a/docs/source/providers/vector_io/remote_pgvector.md b/docs/source/providers/vector_io/remote_pgvector.md deleted file mode 100644 index 6312edabc..000000000 --- a/docs/source/providers/vector_io/remote_pgvector.md +++ /dev/null @@ -1,131 +0,0 @@ -# remote::pgvector - -## Description - - -[PGVector](https://github.com/pgvector/pgvector) is a remote vector database provider for Llama Stack. It -allows you to store and query vectors directly in memory. -That means you'll get fast and efficient vector retrieval. - -## Features - -- Easy to use -- Fully integrated with Llama Stack - -There are three implementations of search for PGVectoIndex available: - -1. Vector Search: -- How it works: - - Uses PostgreSQL's vector extension (pgvector) to perform similarity search - - Compares query embeddings against stored embeddings using Cosine distance or other distance metrics - - Eg. SQL query: SELECT document, embedding <=> %s::vector AS distance FROM table ORDER BY distance - --Characteristics: - - Semantic understanding - finds documents similar in meaning even if they don't share keywords - - Works with high-dimensional vector embeddings (typically 768, 1024, or higher dimensions) - - Best for: Finding conceptually related content, handling synonyms, cross-language search - -2. Keyword Search -- How it works: - - Uses PostgreSQL's full-text search capabilities with tsvector and ts_rank - - Converts text to searchable tokens using to_tsvector('english', text). Default language is English. - - Eg. SQL query: SELECT document, ts_rank(tokenized_content, plainto_tsquery('english', %s)) AS score - -- Characteristics: - - Lexical matching - finds exact keyword matches and variations - - Uses GIN (Generalized Inverted Index) for fast text search performance - - Scoring: Uses PostgreSQL's ts_rank function for relevance scoring - - Best for: Exact term matching, proper names, technical terms, Boolean-style queries - -3. Hybrid Search -- How it works: - - Combines both vector and keyword search results - - Runs both searches independently, then merges results using configurable reranking - -- Two reranking strategies available: - - Reciprocal Rank Fusion (RRF) - (default: 60.0) - - Weighted Average - (default: 0.5) - -- Characteristics: - - Best of both worlds: semantic understanding + exact matching - - Documents appearing in both searches get boosted scores - - Configurable balance between semantic and lexical matching - - Best for: General-purpose search where you want both precision and recall - -4. Database Schema -The PGVector implementation stores data optimized for all three search types: -CREATE TABLE vector_store_xxx ( - id TEXT PRIMARY KEY, - document JSONB, -- Original document - embedding vector(dimension), -- For vector search - content_text TEXT, -- Raw text content - tokenized_content TSVECTOR -- For keyword search -); - --- Indexes for performance -CREATE INDEX content_gin_idx ON table USING GIN(tokenized_content); -- Keyword search --- Vector index created automatically by pgvector - -## Usage - -To use PGVector in your Llama Stack project, follow these steps: - -1. Install the necessary dependencies. -2. Configure your Llama Stack project to use pgvector. (e.g. remote::pgvector). -3. Start storing and querying vectors. - -## This is an example how you can set up your environment for using PGVector - -1. Export env vars: -```bash -export ENABLE_PGVECTOR=true -export PGVECTOR_HOST=localhost -export PGVECTOR_PORT=5432 -export PGVECTOR_DB=llamastack -export PGVECTOR_USER=llamastack -export PGVECTOR_PASSWORD=llamastack -``` - -2. Create DB: -```bash -psql -h localhost -U postgres -c "CREATE ROLE llamastack LOGIN PASSWORD 'llamastack';" -psql -h localhost -U postgres -c "CREATE DATABASE llamastack OWNER llamastack;" -psql -h localhost -U llamastack -d llamastack -c "CREATE EXTENSION IF NOT EXISTS vector;" -``` - -## Installation - -You can install PGVector using docker: - -```bash -docker pull pgvector/pgvector:pg17 -``` -## Documentation -See [PGVector's documentation](https://github.com/pgvector/pgvector) for more details about PGVector in general. - - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `host` | `str \| None` | No | localhost | | -| `port` | `int \| None` | No | 5432 | | -| `db` | `str \| None` | No | postgres | | -| `user` | `str \| None` | No | postgres | | -| `password` | `str \| None` | No | mysecretpassword | | -| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig, annotation=NoneType, required=False, default='sqlite', discriminator='type'` | No | | Config for KV store backend (SQLite only for now) | - -## Sample Configuration - -```yaml -host: ${env.PGVECTOR_HOST:=localhost} -port: ${env.PGVECTOR_PORT:=5432} -db: ${env.PGVECTOR_DB} -user: ${env.PGVECTOR_USER} -password: ${env.PGVECTOR_PASSWORD} -kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/pgvector_registry.db - -``` - diff --git a/docs/source/providers/vector_io/remote_qdrant.md b/docs/source/providers/vector_io/remote_qdrant.md deleted file mode 100644 index 043141007..000000000 --- a/docs/source/providers/vector_io/remote_qdrant.md +++ /dev/null @@ -1,34 +0,0 @@ -# remote::qdrant - -## Description - - -Please refer to the inline provider documentation. - - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `location` | `str \| None` | No | | | -| `url` | `str \| None` | No | | | -| `port` | `int \| None` | No | 6333 | | -| `grpc_port` | `` | No | 6334 | | -| `prefer_grpc` | `` | No | False | | -| `https` | `bool \| None` | No | | | -| `api_key` | `str \| None` | No | | | -| `prefix` | `str \| None` | No | | | -| `timeout` | `int \| None` | No | | | -| `host` | `str \| None` | No | | | -| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | | - -## Sample Configuration - -```yaml -api_key: ${env.QDRANT_API_KEY:=} -kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/qdrant_registry.db - -``` - diff --git a/docs/source/providers/vector_io/remote_weaviate.md b/docs/source/providers/vector_io/remote_weaviate.md deleted file mode 100644 index 8fb0f7c11..000000000 --- a/docs/source/providers/vector_io/remote_weaviate.md +++ /dev/null @@ -1,55 +0,0 @@ -# remote::weaviate - -## Description - - -[Weaviate](https://weaviate.io/) is a vector database provider for Llama Stack. -It allows you to store and query vectors directly within a Weaviate database. -That means you're not limited to storing vectors in memory or in a separate service. - -## Features -Weaviate supports: -- Store embeddings and their metadata -- Vector search -- Full-text search -- Hybrid search -- Document storage -- Metadata filtering -- Multi-modal retrieval - - -## Usage - -To use Weaviate in your Llama Stack project, follow these steps: - -1. Install the necessary dependencies. -2. Configure your Llama Stack project to use chroma. -3. Start storing and querying vectors. - -## Installation - -To install Weaviate see the [Weaviate quickstart documentation](https://weaviate.io/developers/weaviate/quickstart). - -## Documentation -See [Weaviate's documentation](https://weaviate.io/developers/weaviate) for more details about Weaviate in general. - - -## Configuration - -| Field | Type | Required | Default | Description | -|-------|------|----------|---------|-------------| -| `weaviate_api_key` | `str \| None` | No | | The API key for the Weaviate instance | -| `weaviate_cluster_url` | `str \| None` | No | localhost:8080 | The URL of the Weaviate cluster | -| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig, annotation=NoneType, required=False, default='sqlite', discriminator='type'` | No | | Config for KV store backend (SQLite only for now) | - -## Sample Configuration - -```yaml -weaviate_api_key: null -weaviate_cluster_url: ${env.WEAVIATE_CLUSTER_URL:=localhost:8080} -kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/weaviate_registry.db - -``` - diff --git a/docs/source/references/api_reference/index.md b/docs/source/references/api_reference/index.md deleted file mode 100644 index f93c73ea3..000000000 --- a/docs/source/references/api_reference/index.md +++ /dev/null @@ -1,6 +0,0 @@ -{.hide-title} -# API Reference - -```{raw} html - :file: ../../../_static/llama-stack-spec.html -``` diff --git a/docs/source/references/evals_reference/index.md b/docs/source/references/evals_reference/index.md deleted file mode 100644 index 9a5ed2f1b..000000000 --- a/docs/source/references/evals_reference/index.md +++ /dev/null @@ -1,390 +0,0 @@ -# Evaluations - -The Llama Stack Evaluation flow allows you to run evaluations on your GenAI application datasets or pre-registered benchmarks. - -We introduce a set of APIs in Llama Stack for supporting running evaluations of LLM applications. -- `/datasetio` + `/datasets` API -- `/scoring` + `/scoring_functions` API -- `/eval` + `/benchmarks` API - -This guide goes over the sets of APIs and developer experience flow of using Llama Stack to run evaluations for different use cases. Checkout our Colab notebook on working examples with evaluations [here](https://colab.research.google.com/drive/10CHyykee9j2OigaIcRv47BKG9mrNm0tJ?usp=sharing). - - -## Evaluation Concepts - -The Evaluation APIs are associated with a set of Resources as shown in the following diagram. Please visit the Resources section in our [Core Concepts](../../concepts/index.md) guide for better high-level understanding. - -![Eval Concepts](./resources/eval-concept.png) - -- **DatasetIO**: defines interface with datasets and data loaders. - - Associated with `Dataset` resource. -- **Scoring**: evaluate outputs of the system. - - Associated with `ScoringFunction` resource. We provide a suite of out-of-the box scoring functions and also the ability for you to add custom evaluators. These scoring functions are the core part of defining an evaluation task to output evaluation metrics. -- **Eval**: generate outputs (via Inference or Agents) and perform scoring. - - Associated with `Benchmark` resource. - - -## Evaluation Examples Walkthrough - -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/meta-llama/llama-stack/blob/main/docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb) - -It is best to open this notebook in Colab to follow along with the examples. - -### 1. Open Benchmark Model Evaluation - -This first example walks you through how to evaluate a model candidate served by Llama Stack on open benchmarks. We will use the following benchmark: -- [MMMU](https://arxiv.org/abs/2311.16502) (A Massive Multi-discipline Multimodal Understanding and Reasoning Benchmark for Expert AGI)]: Benchmark designed to evaluate multimodal models. -- [SimpleQA](https://openai.com/index/introducing-simpleqa/): Benchmark designed to access models to answer short, fact-seeking questions. - -#### 1.1 Running MMMU -- We will use a pre-processed MMMU dataset from [llamastack/mmmu](https://huggingface.co/datasets/llamastack/mmmu). The preprocessing code is shown in this [GitHub Gist](https://gist.github.com/yanxi0830/118e9c560227d27132a7fd10e2c92840). The dataset is obtained by transforming the original [MMMU/MMMU](https://huggingface.co/datasets/MMMU/MMMU) dataset into correct format by `inference/chat-completion` API. - -```python -import datasets - -ds = datasets.load_dataset(path="llamastack/mmmu", name="Agriculture", split="dev") -ds = ds.select_columns(["chat_completion_input", "input_query", "expected_answer"]) -eval_rows = ds.to_pandas().to_dict(orient="records") -``` - -- Next, we will run evaluation on an model candidate, we will need to: - - Define a system prompt - - Define an EvalCandidate - - Run evaluate on the dataset - -```python -from rich.pretty import pprint -from tqdm import tqdm - -SYSTEM_PROMPT_TEMPLATE = """ -You are an expert in {subject} whose job is to answer questions from the user using images. - -First, reason about the correct answer. - -Then write the answer in the following format where X is exactly one of A,B,C,D: - -Answer: X - -Make sure X is one of A,B,C,D. - -If you are uncertain of the correct answer, guess the most likely one. -""" - -system_message = { - "role": "system", - "content": SYSTEM_PROMPT_TEMPLATE.format(subject=subset), -} - -# register the evaluation benchmark task with the dataset and scoring function -client.benchmarks.register( - benchmark_id="meta-reference::mmmu", - dataset_id=f"mmmu-{subset}-{split}", - scoring_functions=["basic::regex_parser_multiple_choice_answer"], -) - -response = client.eval.evaluate_rows( - benchmark_id="meta-reference::mmmu", - input_rows=eval_rows, - scoring_functions=["basic::regex_parser_multiple_choice_answer"], - benchmark_config={ - "eval_candidate": { - "type": "model", - "model": "meta-llama/Llama-3.2-90B-Vision-Instruct", - "sampling_params": { - "strategy": { - "type": "top_p", - "temperature": 1.0, - "top_p": 0.95, - }, - "max_tokens": 4096, - "repeat_penalty": 1.0, - }, - "system_message": system_message, - }, - }, -) -pprint(response) -``` - -#### 1.2. Running SimpleQA -- We will use a pre-processed SimpleQA dataset from [llamastack/evals](https://huggingface.co/datasets/llamastack/evals/viewer/evals__simpleqa) which is obtained by transforming the input query into correct format accepted by `inference/chat-completion` API. -- Since we will be using this same dataset in our next example for Agentic evaluation, we will register it using the `/datasets` API, and interact with it through `/datasetio` API. - -```python -simpleqa_dataset_id = "huggingface::simpleqa" - -_ = client.datasets.register( - purpose="eval/messages-answer", - source={ - "type": "uri", - "uri": "huggingface://datasets/llamastack/simpleqa?split=train", - }, - dataset_id=simpleqa_dataset_id, -) - -eval_rows = client.datasets.iterrows( - dataset_id=simpleqa_dataset_id, - limit=5, -) -``` - -```python -client.benchmarks.register( - benchmark_id="meta-reference::simpleqa", - dataset_id=simpleqa_dataset_id, - scoring_functions=["llm-as-judge::405b-simpleqa"], -) - -response = client.eval.evaluate_rows( - benchmark_id="meta-reference::simpleqa", - input_rows=eval_rows.data, - scoring_functions=["llm-as-judge::405b-simpleqa"], - benchmark_config={ - "eval_candidate": { - "type": "model", - "model": "meta-llama/Llama-3.2-90B-Vision-Instruct", - "sampling_params": { - "strategy": { - "type": "greedy", - }, - "max_tokens": 4096, - "repeat_penalty": 1.0, - }, - }, - }, -) -pprint(response) -``` - - -### 2. Agentic Evaluation -- In this example, we will demonstrate how to evaluate a agent candidate served by Llama Stack via `/agent` API. -- We will continue to use the SimpleQA dataset we used in previous example. -- Instead of running evaluation on model, we will run the evaluation on a Search Agent with access to search tool. We will define our agent evaluation candidate through `AgentConfig`. - -```python -agent_config = { - "model": "meta-llama/Llama-3.3-70B-Instruct", - "instructions": "You are a helpful assistant that have access to tool to search the web. ", - "sampling_params": { - "strategy": { - "type": "top_p", - "temperature": 0.5, - "top_p": 0.9, - } - }, - "toolgroups": [ - "builtin::websearch", - ], - "tool_choice": "auto", - "tool_prompt_format": "json", - "input_shields": [], - "output_shields": [], - "enable_session_persistence": False, -} - -response = client.eval.evaluate_rows( - benchmark_id="meta-reference::simpleqa", - input_rows=eval_rows.data, - scoring_functions=["llm-as-judge::405b-simpleqa"], - benchmark_config={ - "eval_candidate": { - "type": "agent", - "config": agent_config, - }, - }, -) -pprint(response) -``` - -### 3. Agentic Application Dataset Scoring -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/meta-llama/llama-stack/blob/main/docs/getting_started.ipynb) - -Llama Stack offers a library of scoring functions and the `/scoring` API, allowing you to run evaluations on your pre-annotated AI application datasets. - -In this example, we will work with an example RAG dataset you have built previously, label with an annotation, and use LLM-As-Judge with custom judge prompt for scoring. Please checkout our [Llama Stack Playground](../../building_applications/playground/index.md) for an interactive interface to upload datasets and run scorings. - -```python -judge_model_id = "meta-llama/Llama-3.1-405B-Instruct-FP8" - -JUDGE_PROMPT = """ -Given a QUESTION and GENERATED_RESPONSE and EXPECTED_RESPONSE. - -Compare the factual content of the GENERATED_RESPONSE with the EXPECTED_RESPONSE. Ignore any differences in style, grammar, or punctuation. - The GENERATED_RESPONSE may either be a subset or superset of the EXPECTED_RESPONSE, or it may conflict with it. Determine which case applies. Answer the question by selecting one of the following options: - (A) The GENERATED_RESPONSE is a subset of the EXPECTED_RESPONSE and is fully consistent with it. - (B) The GENERATED_RESPONSE is a superset of the EXPECTED_RESPONSE and is fully consistent with it. - (C) The GENERATED_RESPONSE contains all the same details as the EXPECTED_RESPONSE. - (D) There is a disagreement between the GENERATED_RESPONSE and the EXPECTED_RESPONSE. - (E) The answers differ, but these differences don't matter from the perspective of factuality. - -Give your answer in the format "Answer: One of ABCDE, Explanation: ". - -Your actual task: - -QUESTION: {input_query} -GENERATED_RESPONSE: {generated_answer} -EXPECTED_RESPONSE: {expected_answer} -""" - -input_query = ( - "What are the top 5 topics that were explained? Only list succinct bullet points." -) -generated_answer = """ -Here are the top 5 topics that were explained in the documentation for Torchtune: - -* What is LoRA and how does it work? -* Fine-tuning with LoRA: memory savings and parameter-efficient finetuning -* Running a LoRA finetune with Torchtune: overview and recipe -* Experimenting with different LoRA configurations: rank, alpha, and attention modules -* LoRA finetuning -""" -expected_answer = """LoRA""" - -dataset_rows = [ - { - "input_query": input_query, - "generated_answer": generated_answer, - "expected_answer": expected_answer, - }, -] - -scoring_params = { - "llm-as-judge::base": { - "judge_model": judge_model_id, - "prompt_template": JUDGE_PROMPT, - "type": "llm_as_judge", - "judge_score_regexes": ["Answer: (A|B|C|D|E)"], - }, - "basic::subset_of": None, - "braintrust::factuality": None, -} - -response = client.scoring.score( - input_rows=dataset_rows, scoring_functions=scoring_params -) -``` - -## Running Evaluations via CLI -The following examples give the quick steps to start running evaluations using the llama-stack-client CLI. - -#### Benchmark Evaluation CLI -There are 3 necessary input for running a benchmark eval -- `list of benchmark_ids`: The list of benchmark ids to run evaluation on -- `model-id`: The model id to evaluate on -- `utput_dir`: Path to store the evaluate results -``` -llama-stack-client eval run-benchmark ... \ ---model_id \ ---output_dir \ -``` - -You can run -``` -llama-stack-client eval run-benchmark help -``` -to see the description of all the flags to run benckmark eval - - -In the output log, you can find the path to the file that has your evaluation results. Open that file and you can see you aggrgate -evaluation results over there. - - -#### Application Evaluation CLI -Usage: For running application evals, you will already have available datasets in hand from your application. You will need to specify: -- `scoring-fn-id`: List of ScoringFunction identifiers you wish to use to run on your application. -- `Dataset` used for evaluation: - - (1) `--dataset-path`: path to local file system containing datasets to run evaluation on - - (2) `--dataset-id`: pre-registered dataset in Llama Stack -- (Optional) `--scoring-params-config`: optionally parameterize scoring functions with custom params (e.g. `judge_prompt`, `judge_model`, `parsing_regexes`). - - -``` -llama-stack-client eval run_scoring ... ---dataset-path \ ---output-dir ./ -``` - -#### Defining BenchmarkConfig -The `BenchmarkConfig` are user specified config to define: -1. `EvalCandidate` to run generation on: - - `ModelCandidate`: The model will be used for generation through LlamaStack /inference API. - - `AgentCandidate`: The agentic system specified by AgentConfig will be used for generation through LlamaStack /agents API. -2. Optionally scoring function params to allow customization of scoring function behaviour. This is useful to parameterize generic scoring functions such as LLMAsJudge with custom `judge_model` / `judge_prompt`. - - -**Example BenchmarkConfig** -```json -{ - "eval_candidate": { - "type": "model", - "model": "Llama3.1-405B-Instruct", - "sampling_params": { - "strategy": { - "type": "greedy", - }, - "max_tokens": 0, - "repetition_penalty": 1.0 - } - }, - "scoring_params": { - "llm-as-judge::llm_as_judge_base": { - "type": "llm_as_judge", - "judge_model": "meta-llama/Llama-3.1-8B-Instruct", - "prompt_template": "Your job is to look at a question, a gold target ........", - "judge_score_regexes": [ - "(A|B|C)" - ] - } - } -} -``` - - -## Open-benchmark Contributing Guide - -### Create the new dataset for your new benchmark -An eval open-benchmark essentially contains 2 parts: -- `raw data`: The raw dataset associated with the benchmark. You typically need to search the original paper that introduces the benchmark and find the canonical dataset (usually hosted on huggingface) -- `prompt template`: How to ask the candidate model to generate the answer (prompt template plays a critical role to the evaluation results). Tyically, you can find the reference prompt template associated with the benchmark in benchmarks author's repo ([exmaple](https://github.com/idavidrein/gpqa/blob/main/prompts/chain_of_thought.txt)) or some other popular open source repos ([example](https://github.com/openai/simple-evals/blob/0a6e8f62e52bc5ae915f752466be3af596caf392/common.py#L14)) - -To create new open-benmark in llama stack, you need to combine the prompt template and the raw data into the `chat_completion_input` column in the evaluation dataset. - -Llama stack enforeces the evaluate dataset schema to contain at least 3 columns: -- `chat_completion_input`: The actual input to the model to run the generation for eval -- `input_query`: The raw input from the raw dataset without the prompt template -- `expected_answer`: The ground truth for scoring functions to calcalate the score from. - - -You need to write a script [example convert script](https://gist.github.com/yanxi0830/118e9c560227d27132a7fd10e2c92840) to convert the benchmark raw dataset to llama stack format eval dataset and update the dataset to huggingface [example benchmark dataset](https://huggingface.co/datasets/llamastack/mmmu) - - -### Find scoring function for your new benchmark -The purpose of scoring function is to calculate the score for each example based on candidate model generation result and expected_answer. It also aggregates the scores from all the examples and generate the final evaluate results. - - -Firstly, you can see if the existing [llama stack scoring functions](https://github.com/meta-llama/llama-stack/tree/main/llama_stack/providers/inline/scoring) can fulfill your need. If not, you need to write a new scoring function based on what benchmark author / other open source repo describe. - -### Add new benchmark into template -Firstly, you need to add the evaluation dataset associated with your benchmark under `datasets` resource in the [open-benchmark](https://github.com/meta-llama/llama-stack/blob/main/llama_stack/distributions/open-benchmark/run.yaml) - -Secondly, you need to add the new benchmark you just created under the `benchmarks` resource in the same template. To add the new benchmark, you need to have -- `benchmark_id`: identifier of the benchmark -- `dataset_id`: identifier of the dataset associated with your benchmark -- `scoring_functions`: scoring function to calculate the score based on generation results and expected_answer - - -### Test the new benchmark - -Spin up llama stack server with 'open-benchmark' templates -``` -llama stack run llama_stack/distributions/open-benchmark/run.yaml - -``` - -Run eval benchmark CLI with your new benchmark id -``` -llama-stack-client eval run-benchmark \ ---model_id \ ---output_dir \ -``` diff --git a/docs/source/references/evals_reference/resources/eval-concept.png b/docs/source/references/evals_reference/resources/eval-concept.png deleted file mode 100644 index 0cba25dfb4d1f1d0aa9976595e51a8319643d678..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 69484 zcmeEtWn5J2+V?P|l8T5lh=NE;4c!7tDvC5nBQf;QAqq+>EiDo%ox+f!G($-)dZ{csm-uV>bt22GOYR^-vX#BPpM?bl&Y*5H zEP4+`BxkwxlsIKX7K^hVSR~!^2C+V(%HQAj_NHP1gGv_P-X;W5S=FD!gUiTTFIV0c zkb6Vh<(vn8*5zCW-j196@w1f8i7=?JYw4jNo;IA!TbYfY`bEK^{N}BB()*wSA=abL z?$jS)V&vuww(OyLd41|QdCq<4hc(3yMo!CAx;^(zI`F>xx$waza4FF^<2~2h5Sc== zr@ilA{peUG+xO(Qd7Mo4Rbh(zVa%IXdZhD|3XN0D=J6kHlfQj7mJ?dmd>lDQYmmhDydwTo zyjThKY**k~#sy-HeQ+o56%JAA{>YJ>(A!GMsIYrg&g!1l0Fx|6d zf7jMAUlSyz%zF9g*{a2By7yT(a{cX0&S(9~di51FUtMLn(-BVkXpz`{$z^G`wbm?& z@TTr*Ry3XxNA)XU!-lj z+dhAZ&{_WO!7iDL{FAhsn)P>58R&E05StQx$73J>5ygK0K*160!0NDFwEY?up26Bo zcs2GCL9=`(S+r_A`pvA!^=rYhY*&L$vt7>jOz`f!Ivr)xb@2ev?M*A3ykXqAHZMg* zu-9kqy#dsw@vhM@+dtOO$?gu#mC22X>`l(1#QB@CYyF8 zZCYPtQqX`9B72|SD-ect^AQ{;YKKW`u-w3RGs9n5Ae#!J-N6rLy{7b1ES8L;d9`9z zA}NT=j3}A(p&Xqak#5ML8THs@&zI7(paNo*mIuy6Mg&PM9?rDI8GJk7gJ9UK&`Z`^ zuR{B;`2Qd%Q`)>@^Xly}Yp|-=)sS24cB&jV6+U&4Su#HkEtDH!H;jE@DLeQkO$ouy z)Zw_uHW*T`ER%iXOZbZ%uhOITLp&?fETqN0p>~l#G&9X! zrf6AkVV0rGe9hlBx*)kLQBTtpF8c$!10|qui8a&Ox#djq;FbFKYYLaL(@N8wlbz{N zxqROxZ@w!~tK_I_ZAez{;n3vd<8Wd{-Ogb3q++4QKDhN>rZZzvBm9=j1Hw;N2PFob z9t*8q{v2@-T&N(*CK(sf!DV@+HrYsX`o4^2qe6i4x%xns-eW1Qch_XB=&iV{7;@jb zz8$=OqCWh|K&Roe%P)s@qjiH9yobVv%!g6o;o&LaVGk?AwXLGN3=;hBt~_D>;`e0u zag~nM*Ss$epC%Qcw579s3=heplq)`-3_P|ath`qucVUa?S z=2rNGnn!qDu7sNZOP_Z>wHWzBwe<%2YvkPI)kM!M5|v*o=d@`=1!&YQ-|1EDmFoTZ z0WREU?`i+8vak|db!@O-RsAA;&_}&}b=h4sT_saxMp;&4dywOWp}kvWMOAkt+P20X zy=J@iZS~fw`tav|@w}nW5o-o(uI_FgN$br04cS<`5fdG-!d{Y z87i->_-li`43lo=(G2B~*H4xWRNJyMix zz0tZBYDhjwhK-7hdKvXYc_UJj?vQ?i9!qD#P$DSfG-aUx*}Zq=L!qFZs5hejdFhSX z$f`(P<;>67c5goaa*wcsuh|IMS9n#JS2S3odp|F$@00C6UFKc(zs_;pg4bH_m)=y_ zCp|&l3Z7BDj4D5~CNV0nN9M;KP1*@Q)fR=9GkiRi^PeJ(Oiys=qPl*#1vlXLS6;+Jeo!iaZecF@Q zd6n}bjb!#VJD{4ynzfpfX{hKydK!r@UQdP!MiHI}sg=ts_BnPrAj}*3|A0lf%p+jg%sl zqVzgnTAEE*sOtXe8+mx6CE3w}**DeOt)a#Xh7s@^^b^IpV?JP9{lEILoTOlP&#JM~ z7USjR`fz>zv-H#3=V50v71HBE5?>NO8#ZRfrD9kJoGMs0L|N_NrnFVD-KZ?rPMo_wwLJ+E`T**JB>O@l`n;vJ9GTm;mKeA)8b9VgoSQ-p%x!h7g zwjD`BhfJAH;TG~O_p9it@OkYp=QHyBeB14&o980gBF8eSmwhF7NhG{dN&Mr; zgSNOl&2_5D2(=&SanhV68B?5>;vD)^_S#n<*KAu^PDDRP9nh||Dr9~S|4ylMHIFgv zi}+LFD#s7V%KJkZG=wx~qD+~H)M>$1_qHiIN}FN}aC;g62$Bo@yu56~M zrKbv^(dPN{G(Vv$w-NRqpu&NO)Al9;++&@Ox;MFMYn$y`@?IY+%zZz)pF>+@cu*8? z>XY5`xv+k-+kASSv?&_nu~mYvCYV^s?_pTyLg$iVJ07*v*bOW0H+ob#zuQ&Dy`g(g zH=)=E?lZr>zr?qcI-)(QI_h3bZaP_tFh`bqn2y*KHJQwrmemZFx;|RMlAn@46U8_d zA}uRFI}GLbe($ZRCV+naTK?tk%)=>zZ-;BuuN-XxEzWftkY>K)M%CEyzkQDaL`wqVwYm$dS;h~1?AHyLx>L4b9#Pe zy)oSvJDmPqGIJYItMcuckLg0d34SZ_)4;LwvYMfy{jt`dFvl2k7NmcV@9F%5^EiIv z{mj9PL|GwkUF6o3eL}$ORD@l}}@@xtH%+U0)=_q&S*Wl5J znboH8-H|0nwZJF15SaJ2OcVcE7Vi0q%5r-}=2cl~tke?eTK7arwP|YOYkxQ-79zm9 z{P5eJDIS@wFgt$Gs3a)sHojcgiqN{)&l4>Zs-G!OqniW2hYNkI|Awbcikr#totx>^ z03A7owDT8~WL%cJMjj+a85GSd{e-yh#hzPkA=R9?ncKnWLB0D9Gf_j8613^=HeY05XbjE4>HS8XBOxz&Qzs0FVB6x`7A$lEGv6_qj6O9S}Zn4G#nc zfx_%S1i#NgAWh(S@reb#7j6DH;=d0C5d*)j0$;Bz@V}lW1ZCm>>m2+9xCfHgQGDbrqJ*Kc2Z@g6+BwFC4&YWGCXT~9+@!UF2Z_uLX{ zZpG*2=zP%+NXknBICZpgf6nUV=-}ig;U#_Z_Y)Gp`Nd`ao2*i@}JFtU(z>i-QArf`1xTl7#~cC59(^ee@|RooL@kY zUr>-2c!Jl>+sXa87q62W`yZYBs~;sRHw#xgXLmcO6YE94&&{D8?$S4JUJUf_&mZ%& z^0NESNKS5lk_8aRe{qNZ9-jdJzxxK7N?lx)(6;lka?n?@a|Cz>j3FZ;E-Ll=`Tx1| zpArAkQtv-4?+Jf4%u=s-oD#TG)DELG>Gv2&s(0c5iJ>dIgcK(zPa6YmEr!i`xf%lk9b%=j71@R z4{0=nALYCy&wcUaiMG6wJh>hfPtaDn4WErT47GXjw;4YZAX5&n5X$%KGcr0yr=`#Y zxHEsp$0MMU1O3mx$6TdgJhQv)37Tv{ATSB*fBl=iOa*ei`QKN6KNM?@2X4Mb$!Dzi zzr9k)1%YDS{^>d>_!=n{D9_7zjsAZ}1NtQhO2hx3SzL_wNiPT_@H0eh^q;7pVg)TS z{sVPXa;!grF1eVE>O%j4x*!l9>&ib~2XkF#1*z}O4vPFE*GNF(#Q!)C)iMyCSvYZ0 z@jr4?4%8w3Ukv`^r(8H82}nokn>+dc2n23ciEPm#LVdsbvGWQitTDlC}ccxWa{ zytT$&{&wDSR^SveP%qAlR+EukZ5eUb&^ZS$j+7a0_FYw9crExPHkopgN%YHV((4lo z;#4H5OwDHhg9&t9)2xiXoni9XlYVc?BMLu+l6QdZq|A*WjZCogM~Bq&`r-8{-jTl2 z$?K!{`r}@@)ttmA>z+>=yREQ)(m?hvgGLe`BVx>*^iO;tn@zIc-PP14zstZP&Scqo z?)9^jd^?wAQ(IBtvY`KY@@_k=e;dUs;qGzHTdLL)NU0a-xPSGxTMvFuC`b!R+>1#w z|M0W%PW&F>*D=|Ghg{qpw9Ahhsc5+QH>(o_l^?;Eu=C5X;#b}Q*BJ1NWNjzs8L5cZ zY`9ib-nX-EW`50D3CmFX_E}*eGCyn1Bi5}Zz6rK)3NuWbyuMV+aMWkZwRB&aDEG91 z!DI3mUbMT-jqNLFXGH|oB)FoKchM^S`yw4Uoy~hIB7ekYc>+Ni5XQfDL_moJV+h&r zpJ}A=B}gwX@gVYkrEijKWjEQ%!{1XrKUH0LAVhAF18C$r#GLXUZ(ijF!s}Re&fH(e`gZ`oM#~D~ zar(jhk2i1WvU=cU)do4G)@8qCx<()UH<&R7#%!l$!J?{T7XD=Q-Pk(NPRKpv)VmAq zdPNl*$N1Nf|DG>vZsxQs1nE>M>2OWnY>+tq=z=dI0P-oslX!ab-?O^#?(&42pxCz* z^21}47o9X;SA7cl_J=Xx)dZf#3@}^&$<;ra$-O7+d6=KbB=G=ig66E{u4KAsmPG{q zOb&w33)B*&J$d`L`M=^9*lw9Fw?y&On4K3b=U_nHpzL25TFzz0 zkH0JtG%iKFdO@!CTe-XVa)~%y*N4tLe?@^mGysg}#IE3bAvnuRR3HxfdRyMVK4Gcd zOaV&SiEW_{;=7=q=JF>F&|=^lnv6?~e`9eO&FqY1852GI1@6100Q7sMRf+w|Uoax; z2H0p{YQqL}rUC%fE?QM9P{)0jZdo?5Kdkak_Rn+4fo}AYS+5LqVRBv*E{EK9_yYl?-W?x=n zw;K3FJcV))Hgjc!-%tG=gFE37Mhni<0&a%^rx?)ugO^dP5t~GfS&e8*&2UU+gN=(U**gOvN3*8 zWfJZx$oeQgd*OoMM*znEsI>Wqn*2)zlsL`$*6qi$<0`j3nha}ALAKRv6g;>nr^td! z0Ao}?02p{sIr8YQN-4Kac=}ShqW9Iyo^!P`J2_%tGQs@-I~8+2vHp!vRz|!gt7d|r zxv|HW0s3O^15@CMmhk^8g~58H$GrHWLgzhI=jyBlOwUiCg<)}R7ob}Nu*$*{s(#Sl zIt46InzWz5kDjc0eN0FFMuAF-T&edZ2l?M@%X-8rf|o_7y)Ub@pJDU2hOBSFo*gzb zL3UN+LOHLp0xl;={^Q%fiAMv^_j=YP(3W=WQq#B; z{~uQL1z_`M^UauliRCx0Q@Ufuk9I`*2lcQ>juoYk2VS87KfU0~C4evL$DqOAUh!Y* zt6CEzy`WWcQgdO+tpq`9%rKuYpublaN}tDJbkiubsDEy6ze9h~MOG{@ z1vVy`zh{8vtY(RLsd)m}KGmJyDhjynr+8WC*6J)p5=Uf%u%o~8>?#}J9!7|$K^*2| zwzq)ss2GBO5dhZ+tuZ0OU$e%Ae{qSyOTbMkSxe$IhkZoVTZBG1Gf&qWQ zv~HKB5=-0u=u1HA-s`B>FO{2O-vyOnEbaq-Mq3h?_Qbf35SZl2BeU~S+3UP#LnYzc zI)$-Xji2iyJ)Rk7>xl%*n%W$Gx2iHM+F1$aHdpy$gf6ONf#)xMf2|JWq%ukeoE~#* zht#hPR;k4zr~Io?)gH6{q+4UThC33Z2|Ak2m1aSxvxvMN{ASdkC@G#9pLf?fye-Cg*ZQdL(FA_yoQ?V8D z#l6?|nFJ*0@3uDAyY(Pia>@Lk4v3-&BK4*0R{?TZwe{o_WvP!}GbLUtPSLue+89Li zGK82?GeBh(4@~mQCe2DxuP=*eelqEhn;wroAdExh|NbY0>5~J6K@>i!N;I(NvWV*_(?GS3$sGqwmb+l2CG3jHH>h`R!FKZ<;g--RxDrYxf z-Xu2xyHR@UPxYoO=#tn0WSyE*lt}W?xu<``R$aRuXMl$9%CK!~&GDsQjrW&H-ECH; zkmjipYlFq*J7q`G9|g)cR}hw5Dld)^I-Fq%#?REnba>e~zDiJE}w%_9X#S2uew;ggCsIk)E?t@?%gVh{aB-Sqopn^KbL2~ew7e%nwOJXVR97Xi%#TC^8z&|JuM_b!yv6>#+So2zGy<%pkiViZR zD#*ro)QnJrLlxDz0RFfFA*$(BAQy{!-81UoEA7S;n|W;H*i8VgOM-5leytW=VlOqCt;;@JOn=6?EGj@MpwdDL_@-u>(x#kM zB5RzBIV)8|^8we8BjT?s45Z5B|0*lg;_0J}>8ID$tmapZ@YoGlnk@uoqQ&MK*P`U?fB>88T?PRUUoTC<+c(agn23*&a|KG4agv^uQa`~* zwDuRJU}0mmS3&+NW{D%W%sUJG|asjf-xAm=-Xso6Ze6MegeuOs0i5L{|a2IMj zLu8a#ui?3+T;+{iYRZm%Oai6>AiwU9_7)OLWq?p2MGcj>TV`M&Q{b@sWovIhxTzlm zM3(n0Aq-Hlgpu>Xl4Fu&khY<4z_00Y^9*O`DwEFAI1UlVRBd4BB<3A<>>iZ(Nf+p< zN>Q!->RmJ9pwn`}>=4z8D{th0t6ICigC}r+`zH}@m~_OC=oR5mbbVJ_5>Zr zsQGkF*jL7DUFUOw8CCs!Siby)8(spF#nS25FZ!ZAwi`qf*@&S7^#awbR3C#*G6r+) zY3TsnXG>In1ca|*5CqKiEP8>eBO1^E&$bU49IDNJW!6MyFNvb^@(h}J@Hs&0hXU<+ zZl>bTr_;0=->u~LCZ)3Yk2)st;h!JDSw2+mhEvzXAp^>brOsz!mSWKAZnCDm`b4J( zR&dC8420wruUTWhGk$ET6&Gdj9w3wMngvyYav%^TC%ntce0x>Ev{0*Q-ej%b5v^2} z-gYxq8IKzDV&>a7PkI$E@60^JxdOBQng#5-F)63}kbq`QmAeUS?OWbz54@CRCIFXX zxOkVlh}BINtBf&{$krdT8U6g+to=|oslwNhOVVMa?QGRPKRwRIkB@1@H8rP{!+VLxW(~N*>}%u0{HBU zM-)7#K1R7C>!UU?ah|pQQ%vcy%VI4VDELhDMyG^*r9dijrIn9~KcewmH{*1{X^%g@ zXCE5a(7A%KPI4SgFm)I;@ti6wa_FyqTG4kV%Hv^87I6CW>bO_>Q)CRS1myC`zJ<_I zlhE}J@i6F8{O}EC3={S|f2QMAjZ8xCLJx4i^*p;pxZ-GJy#gvi|nFGq;rncWU&jF9sJbgctA(&`pv@!+f^gJ-+G zNjv&t!8rP>C2Zgju?t2!mGs>E)aJ0^ZO_mlPrH!IccqQ%&BNIW}VDl3bmTEt;?1qllb z+%96kJ7Zd7uPwfs_iQO7rfIMSWl@qol5|Do>`9wuGB>g0QeoP*)2(Gth7SLQb`sD2 zq7-3LzsMF|5=G?Q)Mf`TD}9R&FZeY<&xP+$c<2`@R|&>PJCZq)fO0%y>Z*01rzPqs zNBYGRnb1*9gycsxr;!XIM|kF8B?H;IrcZB!1$yJwM$xbWM?-}ibLLJqHigI`C!ndp z-B8pQF;%b-#LUq3+CQ-`nI5vGL+84{?c@(3pL$TeQR$~U9fRnat!u$~%EC7aQRP-) zB2%LQfsCkEz7i2TWpnUl)b-PRTq<863*;Hacu|hu?o!R(K{w9cwE*EdWuJOf>sa^7 zYlplnd%>!y8&eCZC=i{ z^x%6}SV57Ua{Xik4tV=NF+iZ2NcD~aa@T7Dqj$S*+J`%#c6)uhEOjDwJBR(= z@I|l)vY`6~OnToAy)U9`dY@UUMsuRD(eJ_bO`DW0F39OiY@qTc-%LQ$Y4TkA z>64?$=l*D~vz%?3ILnc#`zS2N%~v)5=`x6rs&RJB7}Zy0^;?Bg!H2#`FtlCxQCTchuI<5gL}WB@*z-6-gNF`Qd&3 z#h1M~=_%oyTksTH67h{d(@tJ3#Csc@q*nFh$D*ml9;d~3j(GdhuWvVx%TTWR*8})u zL4>)IC|K+Q>$p6z)mbL|tq47tmq9;N0pF|~^5|#Ys>ohX`J`WV>ZK(<2~2)DF1ou_ zYKVut2NRt<+vq4(5(Y1X^;KlL%*?FyEKa@{S8wDrJ+YZVN(X#~$)p1Q4 z=c;@SD;g&$ZVlfJwPrez=&q&)c1o$UM(hM!U0<=;9}%c|U7~^{2H6yswn-2oRes@# zsX*3PsVwyFRY1a{Uj6h0|E{;B&;p+L5=bcUE{KX9aKWxFTQ|rb(tL33b7Sd9E`!1= zmQCt?N~}f3gg{%Rz0ECD<3Nr3t;!hk@rOm_k!GpBDvF3ZB{K#3mQ^+aKnyu?>)Dl_jQd% zG{yC%r@BwA@`M??jq^w~xS^_IdSX{hQu5(lc41CzDqD?ZSf3fvj3h$Rq%IDILbkIb zV7FDkrt!(*{_Vjtn~g4Yn@gpQGHtbqvs_q_bYs};jtk1Bo#k6f+D^BolfJdJDb8Os zVTjVUUUW$xq4A*7#P@Bn{`Er0y@A{odqWrtYK_NYBsEmZ`J4}B{1PYRkwFIc_+r?| z=Q!P3J2KOy=`23itCcz>wkW>B*D;0v!>a3mW!d*Ko>`t2z66UgyWT_g2?ga zentZGTR>iz(~`wSa^r#-p#C>Ik#buAf9z$69#zh^?7AUSjctwV{9r?#8YP|ilTa+$ z*L8utyJX9(FLkbM+A0`OahrFm>92_yr}Q(tLL0d;-SHlq(#$aP_`Cd@n7rH->2Mo&Q>B9{|%Vy*$|pbM7syRdkn>%$Ij z{m&ZP&%fEn>AQJ0)JU+IHMsRkHSlG4ud2h5Fg>kkH&inGb;aTc_E?tFsQ!E)Wzal< z#%hPnutR1N!nfw%Udz^gPqV)!yaLJ7DboHz8lwL^XFTATnW0lZ%)xHD>({tx$8JuZ zaBW#THq(!{?$b$+DRhnRtaod(-s|k_^&b2yXLszh;e%~siDj8O^a2cF&I17Aae#Q51#^$P@^n6Vc_Rf~Eyo^vLXl@R!1 zhU}T^=fbrfWrwo8@LI>$CaZPBTPe10&CP}`12Pp93sF@l8_4utA| zf1aGE6Ix3Ophs~Z#t=DCK+`JjAr1te|ADc-6GboQ82}MwxK(Y1F&Nv{`_3>8PR%l9 zp-6Z96;a$d90+sb+si~RfovvU`&YMJ!h7}yMX$7jziQS#Heo=2AiE*0?Kxr3sr>@T zN18A4WaUhuI_07)1E5q<0SGxwC9K*?_qXJGHo4^5EiS6;t@7CqvM+CXBP#X|=eMT> zV*pg=sux4y+VeDSsk@{xfiwp5nXz;TlRcvHyp!e+A|*qgSD%(IsB~057+O45(~LC^ zY_LzhucLbjFss{UW!zvo8emyrwRvdEi}W{uuA<9l2|Rv2Uge!7=m;!jQc6j0`~n{)eKTMT z)h#>w^b^p>J69b#@LOOFLw&JyV72mJgRSA;)p87lSff zrGG*g99VzwF0vlStG_vg9Frnqtj=Yuom5`t?W3dpGdXX63Q%y|Sz3r~rC{PM&q^<%Xc`<#2}jKGu`Xg}%i?hQ9ZihhrRs(hsd^0Ywvcs-zFT1Gwj$UwM3grp{jm zodRXxP4#D8UTYA`Pwq4M&k|mI)+nPi@(qskJzy5!+Jjp~Z2A7myK<>00VI%(uTs%{ zT@Qn#9Q>|--i?oE7zFAcnXS5zp)gm-VCr1wivAXy;=E-+NK+s?U$q z3a;Y;mI};q^j)@b%5?#}*^BaAfC_m><$Yd>z{6$}L0=5_AgrHctt@a``@_z6SyWsn zE45|9oG4lc7{iGtMQrmeaoX8)S$b~o8uKi8*=SeX3yQONQe1pz`_;_Xn%TI#k1_9B;iJDhJ zsDXx6Ov_g|QQRzj8Eowo4D6DdE@pS4YORj9t=1#7x}b<}M(an?Qt^V_T#GGDMm&9I zTNL7$w1M)W4B`YGCF>Xf6L0npxe!p50=@D^-e@>IXw_Prgf=A{LhR_9{c%Y2-f?xRQOr_Kfm>&YCXMcb)wWZ`hMzRhj?P zRb#J(06VH4Q-juT@B&p&rZ0eV){(Mw=Oz6;^CeO&I-YP}_I(_GgK^zvrTv|#dw8=Y z0ls;s<=dUv@%ox1t5} zYNdq$8~sA3DbSTq?TZ`oe0gR3r8h_1KB?7Yc0@j#d8Za#rv&MQA}~>Ib}47b(d8JH zwDCYiiW~FOKXN8zkc*YvS}@rUGA9~z*rc$nvZbBdP3}g62%c zZnC5aT&O0Uc7QZFC7A~=dOCytifTA8_+#a8PwR&lg|AlWgIGtaPKarzca0R+7G$FE zV;fI0u8il@Awz0sg>>nx#>B1LAg5QtsoC~2s@@SMeirWAJE@(KB(Y_hJN%;j`(#Ndr10u&~RcLllVYR>#`^7>pu!7#4 z@5!Dgv>&<&`O$DREh;;s9I#E`_O-a|gAruW!24t^X=!JOGATq;OmDHB#p?K7>k@Kw zCILx-G!B5A!Zhg5zUT0FXsk@HIth*Lr0$G))t+x|ARUWz?Qc(RH#QV*@Q??rH|9@a zHdFn3Quk%WH-ajr{KZ*5^edl-f&^S$?<%pn0Aha2AV{A^2y6p%!7qQE*$XSS8miUr zKNt)==fL@&-xJ^bB-=5u0XvYsZLJkJOx`3q{lajjkJM3EzV9|#BX-0dV!Zx%RczZP zW=2Gr8IGmjUgp@Y@;7uERiwZeTP5)w`%Qda&T#r!A@eKkiuRSADgFZPpuTg+W2!PF%Wa>|cnOMFp2Y4Ths30XmM=4>t zJPvn<()4{t$?Dt4*EHdK!fVxi2-FJynN4^kop4)&d|0U8a@pbr6wo!Bx(Xs+%5`31 zW;A!oVH@0a0LteI8?gG(B)$(pm75r^`(l{$;Z_?*7U#}|@IGQ_1Am~{JSE~qsIKe6 ztJEpS(?<+I#jk!YgCSSvtA_m$)rWx*J)r7Y5g*}!RJgfz<<8BS2ICNCXF}gw0d^}68Iwd7X7P~c9>dH zhcALD;1DuzUszRLSmR=&|NJLSZ~MoJNrcp~4GPPRJNLN{+iZBzoL#FQnD^2{bWHU~ z=2U557OKc;Ih5l5n%R{9)My%}PJMd zo20i5&^Hb-O-*F5^SPZuY@-I8-0~Q?Fg02tg-)LGJsd|xszM1lP5dIe|r&{+`CF39ZQ|1x`ba=Ge6r>luZmgbGvM4T^~* zv6?c*{j4*-YE#B_iix%?EFqmek~4x5ScQzijFL9JkMqq)t6@t$a}W~8A@t%iSFPMF^#mD zyt;Pj{Xng5Q~vX>ScBHajdtVhL>%hL3VjHIOwl;N4=EIgnB-vO_DKYl zVitb|{b%)s6s%7UI;`^@rPx}T4u00I-&(FEi)m)sJBy)TG^#QA7l+YPP->VA9jkYo7bG%b?um4qT{E>)$2yYH-`lk1b(b6?6+3#ZS8)$%2H z1u^y+$>WwYk7s(^6>>PYVFCEqxOB^rRQ_7L$v(X8_6U z{Ni@SWSSy84kvVA2jpYIJQe66e2(a8Nm$z1Jbz4Iyd-lVe+}1az{&biViHsq$`CUS z)e*O9>UG;HHDwG$b_;s6?ac~(5q-f>M~Q(Ib4|KCao40R%VK{`ZWm1Gsi~A`W#2o% zEK2gfMCE%-uH4VA+d6wl*IGK6TvGoS^Vl&{A=(qU?ls?-?uR=sLl5oMIW<;q2l$|- z4}KI*^?7P%z8=DHj6x0`i!xs!?Mc6N)6vvG z?Z{5cplj9B)XMcg5wfKzpgL!v%v`wQz=x>%%H0qCfqt_w2n~Yq8j$H z(M2Hj2eE7%%*6f zYsOOv(dhu%FO$rz3TH>r&l^R3`YQZZUe`pO9C2GYysREDgvn%PVeJ?~Mwr~2T$z&m zFo}_ksQigrfF_J}yL)g2?_uvZhNvj{S1bE6YQi~vmt*5V1Q6~{wry)u^w~{&qwjqzQ-TdL* zS~C6V8j!uYQS~w!*%&MqiHeeQ?=0j???so|AlDpP6tyHqjYA97;`%m*kmR5({Z~He zKN$LO>Gq$j)?HUjnS_+;&i1tRy%UyAziWVOM{yC;oSls-8OufsHX^MatX}n_yL(V#}9F{ynT@IITVq9!& zT0B>?T;NMxjGnt+34pBu9~}npgNw9xzF=Va7pdhvP`!^Ce@fBl^1k;f&$s6gX+FnE zXfpQKCY1ke`f&;I()3q)Xr9Y}aUUF;I;&yxjl2Q1UCUtZCxvCAbD0mQ-9t2bQYwND##t<7 za5HR=AP zd!+0X43&5Wj7toYGs7m}U&*#%?|hTyWNZ%J;YUmNGq$F>U<0 zru+u1HKr2U(>}Gk%CtVQh?hf>yDvyS&wZ5nZUjCZ9E#~kPa=0s8l#}wk2P#Oclr65 z7+!%cKAwC6Ys;u@ocoEcPfq=Oyz2|?uChV*@jf3}2-xm%nyQ;bVFb~~=WytK#s0u4 z7Y`@X^ebb1`qxi&Z0O2lOqkH$raU`8qsUbJn-)_`rl_`$NuhUqRA$!rrMDA{Dz5b^ zZvUvqK93qD6`UMARtF`wJ<8PrmYSoR3m)ZI9;^*{sBH85GB&Ej(FM@&@H!{%R&RgHaATUG`M_i&y2{gfpY(cX9YeLfYyF8) z_q|cN)1DMNv9qO`PufPI9-x}&yqo4C7`SN$;{l2T zeh&vdX*$e7wx~^COowmU%#%?X=_@kW86&!Y11D5KT_rkjyU)F~u*h?hixan2Qk$wX zXfuMCVpj#AwHQ^U?Rab z1Js;0T%Z49uZfP&PlIoM2n&5u7hiKYvDL#d;JleQ*VNYl;TL`*xe^-KMTywccN?N4 z?>wNWqh+j(fKTcN2K+d=dPeQ0nq`x2e0)Bs1Hg#tZeGX;{+2yy_*^EMlI&gk zT@=%AH-#-G5(UC7%T5oq@@q%Mg(2#-fjuXCK-G%IjdmeNfGW21;3(QBGhF*Y#!D^zl&4j!DG5XL=7!fVT%NS=Wew?T#1wr0=m2vTD-+ z<&iI?kMDp#1Iupe!_I>#l_Y@MKAqX5b4{ptHQJ**H0~|CRvqT%Uyn|+@6+gV9F5(e z_1AO_Gf1jmCu_1pL<%<~A|KGs+}&`j_pTV41*)7+nHx_Ar#u_vJDxYmnod9~Q{Y>> z@REdf6g@Be40B`6ql?gWI7kuMj2cCd)iy+nwbU*EFsfC5DEGcvgC-(oFUk$s_|r6S zjmW}>I+_r%LMn6C;e9R!BbOm(?`ztcK~#B+xNjVzCUae%<1%VdRcWV05#~fJSiES-Y8l1Ca zks#7(ayf8OH#d8F$->m7i-{X;>|f33h7c)V$Y^qv9onFhLDxtk1)gr02F4)}BD9&# zvO+Lx&l$16)o+D$Z3cV2Nv6V=$itAw$%RdJ3!C>VpdT_Wfrf-X91sH946G#-D#ch_ zDaZ(70=X8=uZ!ylf*)MGEV;KjaP;dk!1iA)ziyYoBq0UKhiz)TZMyh8>0GepfsiTI zp3R+B^h{?yOW!+DM7sYo7kr8R2SYZ-*Rdz{l*p`J-x@=tj!kz_ zJ^1c6n1oGvob?1ek_udU<-Br)TGq`Cp3F1cX5ABr`%dB~<6Ci*^`qj{$HXv6^?T?lV! zE6;EvZ=wMzec8~VYK1;E+Q6;ow|q3`p1Tm#`-SQ)NGwGv?uRkLXmw23PZ^XP`sfoS zuvs}2$fn`$TQ^&sBq5PExd0cY0K8;EhWG9!mFmR?>Mr7B)NLKs4Lq|cWsU9c8@FuB zrst$&&{_ zW$$6>wFqD*3_(!BstXTzj?`V>=Kgq%eZEnPks;^reSjUw&95#|f6~g7`A*gxNiKzq zY5*p%8TN+grncMvbv!30^Be;2@K?e4U^lWsU}OpfyyWfu)Zf(IO(6Dwea<6A(j6D$8z!vW6{~TwG(^F zz-BkYz5Xu0&a3xhtj71k$ zm%V1X_6#kKTdLH?uJi&pt?|YS^vGr&RLGt0mlBErEOg|Gqd&k4MSxF{oNQf#RZw2> z7|#|CZ1biQb$W4$Nv8VFuM7*&Im3aaV+f;i?2W)uYi)J9W&z;p%2dgFdmx9Y;FsQm z{o&|q;E~x*B?7P!plKZo)qSf;xJjDF7$*x({6dCNok_oktc@mJb4xg+O)-AQK|H7` z_f2-VfRX?A;3=fr`Sc;-;Osf=GMpSLaU(|w5J#8sW!E~e#93Lnaxy6V1J&RDTc$`Z zCzzN9bNE!hy}*?2Kj73O<*|NJdpioXa?rI5JidIOzUW%ZRU;tE6jF}Ub$j+@Lg0{i zzyQdlTGu4+<(h}t-#ugikOBoZ1Y^AkMuu^i23)iF$d*CCXLO@julM7qQ-c5m!p@50 z0S%NiIH|k@FkPq!G3K*wyP4q)^`f1Qj3mha^yzv% zBo>UZ_Ji}t35s5}q6AYktDqwF_S-_A16&`VIFj0|vkgFi_yH40g@j=#w}%B#KON0@e zEEx10Sm4Xo1}w0f_?nR6OjYvyq9TDB`5QoK{(mJA?_gqleJMjB%x-J?$2)EN%*9JE?h^61UT2_LsUaz6lgc}bGAda=#O;pq=5?t0*LPw4sS-(3dh;Z|b)NXm7t z-eg()Oc^x@epSN6L=V0~L7cRC&2*lzXoAlAmth11W&ssB(W#<>_&@Xokeo`;BKq9q z0+iIM!I%n6tfRriIv4{*Muc!x;{+7*Y^TfG?Y;mu60(y=BgoBQOuL%r}BiL2&lnjKgZylQ3&Xv>_d7N~)4~YcO zcY`*ZF{f3q&WtoBg$B{Djr|Kr8hwr@#`c05C%42606lnbz=L+NG40lNZYpB#vLNj+ zC!|B;8zDlO(<xq>opyK$(|mrqRtKnhNNoN zR?ragJx-m`T9?bypaZc#ou8;CDk;yu2emq(fI~%N8DRg9 zE^}L=9nWNU6Qk^o;pH3k2vN31b$;jdaHie0Mw8e>M$qk;a zjBHK+U_IFLh7SSGh&=L8bYh4+62bdI;II#dxPXb2vcZ5i1D#v`G!gBEIL0-fq6zEF ztlKSVX@!4iLhMMAvazEiidefij?Zzq*oR6d^A2ZkX~acE)y&OP{n~NQ$K4YtluJ8> zd$uL6Ydv2i-%;Q#A6}yDq8zTeQ4OO%TyA!+Oh0KVl}B9MCk0SgbUrxV?58F2U<4BY zA6y9(tVjvNJ{qt9Wapj-01hPYMQs()5i#IE16bJP2xb9IDp>PqbAurCq3nuX(<71& z)s3CqpIV91;zz~bB+P3bUfYgdTW897w7=W7xn2JAc}RM~wE5(!(Z};~rx%?99B1Cc z0Gp}+JW!Z=2{V`p-HOEcd%fr!>-TWQCdH8dQj5eKNC*3TI(p^$4{w*JiFH@8vghy= zg^f^5-?)XFC}%}R07FUL)*pgyg&hTba{G$yq$hS8lbtiYMiyd6oMaLtlJd^KBT4%G zh_Gd?(fC&L;4}_|i!8)U4r?Y71SBF{_~^Z<{f9N4lB~Bl4p0m?%#yn`6?K@DNMFvX z?pIu_477N6<{eqk;81D#REM}G62NBuo6-npc_Dx(Zy(gvWtP?uEQpHauJBE&Egk%@ zQ9g99^R?-oNXL%BiB)G$#$rrRkHbB!&noHu6yOlI;G%Yy{uGBLqE1|#Ogc0B{oC?0 zyML&Szu7$ndH_kZ^r4}UlHpBMzMckDFH%fVv zuIF}Fei9|$S@Rq%Fg)}M03}IaukPWJ&OWDJTKH9HS)Gbd+nKA_3q+O)8c#lOYwTDw zPB;(;knCkE_U)9c5&<(;y!i&qG0TH?7h;d(6i53>T}%0v7~DJB-BsPPv1su={ zSo@u!#9nY%z-RsU7mz*9Ei*}v7ytsk{^)LPv&yY*lZ9 zh~aB48!;e`s1JDWqfx}1OS?~YBTodHF-YMvV#@hH2>ZNxbc0FIktvO6-F51Ie7@!) z(xbKkVKeWg6$VL4`0=e$90yKbDhQg<9Q{*1e5p6G*YejZ7zmufjY=YtR=JX0xCp*@ zSq`&IaK5|W8>6_|-7?+O6tpv001^KW+6m%hQRVC6?E;o;0KhIarK(j)Yx>db79{V@ z-1RYDt^Id?c`O-nZT0&Z0fNDQzZA~;;v+;2 zV4|-3?;cxMHOE_z+D`8LgTvyInB9f`=)VKs?=8QCq)+S?J6^mkJ#&C2cdact54aD3 z(BZ#J;m=@sD_1A6wUv+oHC^4n8-M3N+?4#NF{9~i)ukItWsVyoWAzX5{oJQWt94E2 zVJpo6d|xxphWKU>}?woOoCxpc!!SHGI^2qhgyu;a%0vuFG|;H)@$lq%jN&k z7oZ;kd+=#i9Sry(@j~<+PTs!rhu`xNYnC@?{4!ST$GmGYmJuyjdTRU#LMBNW+NXCv2st`-Tlr1*pDwFYbqi{^hrTD@?49gYrz{C8(TA0wT1G&E zWxu!@W^Qq<46^=b&F9^EcI1-<$+W-s6AIE20xuf@kd{PaWWBVN1gKJu_?}c8?D^=# z4MfS7@waVtmA8;_G0@YG@B1pDCx*r<6|FO-&U57cJpm-Rrcn1QcRG75T|F9R*RcFC z5ZzTCaT49tyE2R4L)!!D&-nWZ&tgC}19oN)yyBZ94^aSaGsLEuFRq71-lK<%O8o9V zaL7r0bm81krKH{$`PlU!5|d+o^c8K8HWK$g`1xoHti@L|b+lvDgNWENJgleVn&0iG zZ|)saXUS2)2}Xv-$R_{7{bH(JZFdqm_UCWOF-{=@Qp zg@A?Bpu)1vQwNG<+fPULtG<&Rt*z$-op_b~3K~3r*WeQDGQd@<^&bxc>NAW!e!dnO zy+0Ced0e9#otr)ur`U2&aH;-_%C(RbbDj4?K9J?qK&N1pVk5Pee*wx%xVU4qG2-_z zMNcXkB%ZGTI5eeCZn$iw?&LDbz(-fBE?9rf+PfFNW6q=c?bzP(kpDJY%42mg#%>95 zi1J;TBkds`d5i7}NHNbjb7w6JZm6XGczw$8XhryBjK%8OOpeq)j)9N|!1p)rWr>2o zFCX2Iq4{o=w0-Se^l+0EU#&yc)EMW9NbU0qGXC|-e{g$%S40dqCGAUD9@;B@Q9L<( zJ2D^8UMXLOca!Qz1RcbAOq2PvtwO0=|72;%{hyp0asoMh%d83vbO(MY|B=`Pz?JOZ zzfq?1Ic=Szak5+3Ql1FvB4*nE3THW+hSPxt6H>ma&ti0QWI(hQH1vWCi3(cczl)!J7dF^1f8(q zc!88mW7T!1e%)vO`dFc*Mr)PjPVdNHmLVhPazl0^9Xw2r zSJ$H$4gC{g?*ezEsD%Yv{tyn2Le<2R(%5Uud5Hb z%2WT7y5Ia(QI-m!VgbwU4XCa$#~YH@sf&RZ*}qniIw?*a;Jz|=RyUIwSg@acyT$xZ zXKeYw{9n2mbTzoB8Gn61!Pk#QCX70#1jyVq-1yBtvU~r)`Bk5T`D=OO)Y}Z0gtky2fl!6-oH; zy#Y9c8_)pplqwGeJpO=v%ap|P(X*ZXTI1TC2+a4dO10X5LnxfFIf5xT{yZAUfLeTt zl|7U5tQvu4SV9~LF+ou4$xyr?tHezSJRvKgwZBY$x;R=8p5OYwHW?bBEc0&PHz}Cl zsg22su=W`X+aF1ja=Q9z(7n&?Kf|X6iV@wnMKJ9~m5klfx{Z7DL)@q+ zeU42dqsYlN@GUdcu)#)38Cln4o8aC){uR`^&9gjP+JGk#pvR3{J z;$B>Zp3wW`4BR8W&`ke*eBXgPd7lDCPxSX84?x7x?2$zcLd8&w$%j?GWZkywl$|G! zSy0w4k`3|ybk-8;ewZcEmN&n5{QLUti3Gov5M*)9Bra$ep#;UmLEk%A%~9{{ z@i|rs#^?9g&O*_O*d#KVU%1i=GS3*xAT6|7WYhbUxVRzW_{O7Xw*XYzaa5De)axckIcM-;-Y3X#FYxUJy6jy#n-GLi^wZLzKxg;^BQj8yZ3m1h>V^AHf%p2*a{AyoTA1_eYB}5IgpvOU zhlH)=5(45noPi+i$YTs za^mhV1|;}cbsSVCUU&}uP9mrIg-fLVukLMk6SMj=+cxV=@vb7)JBF_wmnQYHT1DC2 zd&l8;@Z4oKdp;(c`Jax12z6Cs8G$O+L``)FCB$(aP;Zk!-~N0%zMVCX`$#f;ZFTlu zW3RYI=Z2+e(M4j;&dUazD0+bYM@%z!HvwFq|Ahee$D$$~zplP1*~XFq3lRpvGg}>U zoLoy1FTI+IplWG-^`*@7mxkG4E~CiMg@)~vCteH5Uu&QX9KO06WQ(4yoCen)p8 z^LA%V$QPAmS|aY?j^-Y9H!2U5f$pu*2lB9@5ytT7INVNfA!-US5aJTdqe|Mca~@7D zEocx6YEjiWGBNg_AwiNr6|u&b@ScOu|4-|H3Fbl5S~XYI&>+|OE60_B#HGAwfO0xbJMjP|0h|xTE8h$;Xn*)=f~RoUb=g^l+d8L;=+E!y zvhbk9h$H|pBQ(n3N^WP|6sV0$#%s*1YQDH^D239;pB@7T1*h1Z% znx9{_kufH^mwb@Y>p;cLWw+(T2_y?RRgqQqpW`N9Ob5r05Dt(9Q5-1+ZlTNIoU`9~ zz+rDGw`zL`S+sJrLq7T>Ad5fNQ3Gp=VAsY5NEod{+9esHJU~IhggCWYB$)trB-<3= zcAd_1TfH|AQ>@C_M_`i=B29vSGIdo~P6njiyCF67TiUdo1ZQp2Fpqw^x09u2Jv@N& z&U&}HG9)M(ZC=d+Mh1=g1zm5Nqmji6&5C&7_Cr1r4>z_BJw3D!^4cK(;q4_s86#fxx5$b?3j+1C(q4-|bHeLp zp7A#q6cu$TPc7R$hSx2=uZ%$o%FQZi;2z?PSh|GG_R1}u8zLM^W4r}f!ZZBM*;zr^ zGz@5Y^no-bCBM&NLl|;Sn+N?YXl#>_su6~!J{ir`Nq2~NU?I+CjraHP+i)F%k(Y1C zE8lA6bIh20;o<;S;t7=awaA@v>X45J-^5eTorsTu22O8b9Rz>d(KuVE&t^q<=k7(( zAL|Od+kDTF#hwQ0U}1K+UYKo)69?=zs5)NfEh``viMYc(Evr)@**QS0C-s+m5cEFx z_b5!x875zhuw+cS)&v%vG+$47h1*&ZuCnHBxS;wa^4=u zyH-sM1153t!R3r=xLyt77M85o*x5w-<2oQo>pgqQsDLomjP3%(dM)tvd-Phzp`hG* zL&kdFkb+k(_A@4s>75GY|3nFgh~b$xl~3vJXU9G7OaUqG%>yjJ0%-_f*q#y(11sUA z$L|BS0|&7;r|*`NG^od(y6x&&S~BlG*jGaNz{3Bc+QNZ^&%s5@P5&#sRXw-jP;Gja zL^M2P(w`$_Am)Il>;<+YigU(1AA!&aYFUpfuJT%RUYPrCN~$xBY?iD^QPB_IjX zH!^aZ{Ffsj-(wyPrr*)Zpqzo|-Zp`8!skpe_S+tSON<>ynr5ZgagP)B5^?zJxNj@Y zik&iu=ZP+s!P8{`K0!eS`Ah894y2ITnm89s#NaH!|DinDx&;@nPzP ze;GsJqql<;xuB&7n9OcK?0&kZs6_r3q;-Ref9qQeS{+^|l2P66u1=8P7SB6KgcpZBkVf8optJGi`;f(Asefy-Nyh>+q;5R@?m1H*Y3N5O~l zKmSZHxSAy(<(XwUoNz_jZjQLX;9&B+o!-C|Q!)NK_SeB&!?fo)Bu(<*z zLk7Cw#z4e>*j#af!?H>TT$EeggZX)D zO>t#Z=D#_^nFaBD22<&9bPjmAh}gKdI4LYhLfYz%0li9soS^|$06w>vll(g#M7#GW zyWi@NaK*l)VXRRH>{|C$OnFZfAy*o$kE}GS?q(vOUetA9AK`z!r2r@Jr9MYrc8tGv zDkd|4;L_$nW*pXgEgIt+925_DKdEp;?cBUpQF=98BE7@`5!?BU;xmkqPV@G|7lZx1 z2U1=(4`>7Y{qBHkHpsZb&9@L77}E9DXKB`Ip$cmaqZJCl|5B_3ofkFUeFCm8aT7{MGU!qkcZqx#|RkR=M$l=T*GwzR!6mdu~ zyzYHn=a2bCVP5mLza)yST-cb>$HE}&0#cI)GLm9_g;>=LO{hCI<9qR3m8K?{QU)-o z@|nCug)q^ZIGO)zCU8nPfXhOXR}&som3~Z@Fw_mfOcluU#ag|G8Is-1PzSc}c4#(; z5&y3P5p4*FGOeaFuN%3;-L9l(+bBC*M7+nH}dIC|I_ zx9rPa`v1N2f3d{u#JlTbMnB@1r?QsA5r5}e3h?V#GQ=z2zgJjUS+SXL!?oB)Xo}r= z-&2Bt5;^9jKyTP_SuXho<<(RvF_1!81B>-l?(tJFl@Rpk@_-@{8Bb0&1kOskMFJ)I z!^TX&qYMeb06SM?AEo^@+Y|P?gaOdSJ{HU|D};w0T*U_jU>T9X42gZqUD+i$jKJqT z(z>YbYu*L+VwVz@6Wp=P0=q?7oOqh-Q}ktr0)%IIe;bU3CRnm*qNz_YTA+G>R0C>= zq`JYPn+%aS=Kl4po#2fQ4|$IC>pTU1+-!EmylR}@l9 zMu0=L6HU02zzbqvVLD)5(Qkl}`UKY~8KG*GSk-aF9~c^PBnQNGFDSr60u`|_m*ggt zQ4WBKt;P5cLs`?$zN>g#>B{~cb6dx@FiMY%GupkoN0 zNEo~GD7d}Z}uKD)xpH~ilg!JS5HaU zwacJiJF2yTNb7CNF+(V$uoE_H=-6W7!V6j!@xPuKfqKtxwXIbLRG7TX8{ALZH=Z`Pd6Q z2;}0T(72oy0zpG?(?V;hL@Cb)7+RQ5jFr^rSi;qx2`cq6nes)j_cjP>KK*U!WMJxW z=}kEgQABxf;TpTXUS&9q9iPdLd>7qfs@LXtz~bbM&GYlgu9oow*-ewiu7J3JpiQsN zYo5ke=fZ3^e~gsdJLlZL1z^?juSVMlKPys=qpi+QkK$8PwRRSMwHu}0dd;cl@9*F6 z0FHqmFlfn;f&$)=#JsJbvtvC6hm8v}o=fQ{2n>bPMqfN{KTsdYQyP%gB8!W3iK7@6 z3Wl#+93~VzJX$a^_`VBea)Ev%f#E(5N?>`H#3tiMgkctmij6!H`OOtaLxMQqg#2jK7JPWseJJ$mXw|*cCYXf{0kLJu z63psX%*_TTDtbL%*y+Sntp~r1B1s&j6xggX3IY{`QNu&f?Avj&4}D+6UTgFP46g7F z6mo+ULD7BbV=ErKP6ze4O`rIx$cQz1Hp=i4pJ_lAqD zS+p31?GUNny|-AbgK9An6;r%V_mdRRv=E^~i7@Uz5U4vv_X(s>>KN)iLSBInt)kj` z4q5W)&PzXnSFSyA$6IYYe7R+$ZYxH3Rndte@ifCC_{8Gh6bL zt^9~?Z>ZVP=&>>)udwQsi1Q}%{k|sFt39@zW#B=>Yn-92b;Nr=P%|Gc{6rliTQ)1z z+uKVVL&GPq%Yy$w2~L988hvmrpKoT*5o%^f1hYV86p{bj`%Ve5D5hUQz$U>BbTjz0 z8EG)t?Mxez`ld4FsW8aQfAUR%njM_dI(D4)tcYoMF-N}$M-*5vndoY8zrOO)h)={Q z?};Auc3D0g)6vmktmm4Dq_soh1@O~QG22yo+v z&mhX^;1R0NP-qnr16iWc_@+lrb5>eb#q{jXImUz zr?R@v(X$wG>0XqF)(`I@&k&*EyfR92I8{-Hz^u5* zjVw5LPV+--#x?@&16ER#Z#=lmSLgRz>JHa*s(nIkl6hFU@93uTReKSxw%AM5g~qa4j2B>)#+ex`TLH!VV8?9bFd}~Vm$ReUi<1w5MkoURrVn{+xMli=VfbNxa?n77 z_f@{@X&@m`BT8@(>Sc4cc^*f#*4^-Pl(`ZoitKt8B7#JSL(P$P0;Xq(Qe>+|@a$x| zS;khBoJz3G12UK&JYMk1E#xK7saL89#~HY9$VENFqCPMJGgS{raW;U0=zd!7+kID4 zu1)K*_ULFN0vrt&6oK&w^<*{W-6qHLtAGizifE z^g}^%*4uITy5?EB>+3t;;EER?oTjPZuh_7PR)+^Jy;--y%_CmVF7Fwx?N!-3FL4?U z5A?0x(JDMY_SW|G=@4T3M!Z5ihBO$QqPw{2>5e!HR1}j zPU;wee5y_g`hv1NmwCM+8KOf@W@#kC9T+okzdpW^|L{S*A88TsJ-V;2bKfK;htwo1 zTPbOauyG+U1jH#gIfEdEm9a*!49mgW`L{pak=tp$&czIZE6g>x?~6j6u)PyVYW>Bi z=x5tv+oIo-Y@AnyB!N^5CZeZ6$}GGaz)aOUjImrY2%^K!IU_g?LH;s=q0;bWNN`pg z0@FqUQ<0}rwDqU_soG>P@*?HXIimwbfk-c`3Oe(*6i=5ptG;;2+k8U)J6+&cjLMf7 z^q~epAyW=sZHF4Ck?Muk$I{5@Of~p{!2!Yd9pv~TgpUvk1dWRb4e@>z6kgg~e}Ou` zi3vv`i!slcPZ+!Y`@Ls8o*%Jp2b+p#Syf+d;l^u0hg2+qFF41_v zt6ohALt$|aXi2KO{$e_HmbE#9Cxj(~y!&k7eP*K-wmkwX)9aM?3=!UFw`ImW%i3R< zN34VQLxs6eFycDTdmWONT}z{+ZxV01)A_)EfcM;_2Z=0>0kYEG&6%)$dbE?Tk;t#Y zda0X~8r;$tA{;z=?tNc{i&3(Kdzu~5ru-PwHEKAF1-_ej>2b48pr)FV2NbB0BHAy) z69va2MXLov(1)dXTQcHM9spfP$Eh=A^Vb_K9Fb@-m=2m5fO8)d%EWAYvTVgkLL}8t zryO{75Jpjgrvy+2q9qt<&c!G~4t;aE>xv8%&(2r$GMo~4GeHHbJ2#^sPM1yQNMKJW zy7AGCmpUJk+urAmAKt+XqpZY38eCWCO^&nUgy_SmzyU|Y`$e^UFK&JdL%&8Wv7*w% zUT3f}Ah+m%%JU25{pp-pvGPyThzWIuzrt+1pG|D~avlmj(?Bu1h>qUG?eOmBz15-G zJfA#u1-NnRNChfNeb7n$avvczHRAy;YQT!a)Sd3&E5Z%gw}@b<3t6j271PUH}r!%FlNVaJwK!&mLX?&r1@$#kRWyy z2&C;>Se89l{pGu21vi$3@!m_Lrcux-1g7J~jT>@x;v7oAFU#sABZJXc zJ(!3^*Za61aq$nAhKiW&9UP=h)4$Rb!qEj6M#ity$YBe}ZcE|A_i;i|Pj7e;F{^p; zee&t}5Z`}iqd?FZAE%+An3dY;&oM1fI2z40eI2Uno|M?h0`dQ8n&*NQY)Qr?g(Tx5Qg=6<~yM7xDK`2QKK*Oun^{4c6cFh8@k0 z2PHw|iPcO)ouGGtzlQ>Q*gZjxuEyza&6q_V<6mEibC-|7OMbNc>@{}wXM&?OgxI{m zI1B`i4S&EQ41OI7KkaW}ANC$nHL7*`6F&t^qVZZ`8 z>WXM7Az(AlyLo&z2aVQ>r1XU#FT;5`r*q|g*ci4l!DVit*O|agy)nSYMPCGgN+0XkSn17HXOic5L`3neJJg zfk(e65^kLF!)+l%P>h}kC@2aNIbD2V17hp`YzrJOZ}CRm_t6wt(Fy&ZqHnsIk)CW1Fx{lqs_-nJ~~8GBajaMl%6= z8-a`nsA~8mP2Nv+=H4{hdDFeD!dA(F-qp%dZQ1_cn?;d7=jdpHBdq% zbe1*iI#tx2HM*)rl^~afCCGm5U4qK_~9PGWC zue)s&cq{-%yN~d5R}7@5cfQOU&vUCi)hh@aKY7?D^e}4Vz`*E;BNzK71-6WSH zd^e3$&laf`53XMM$=j=7&p**G84*d!KsIloUXlV?P=={P?)QAa*GiETvbhRRRz=g= zNg725FO=NWJ|fCneq($gAkPq9jSKD`jo?*4p`U=ODNlX?JyL@3LXd7@7$x{AH>7fP zGI7)*aR#YJ4RVwWDr3=y)X#(E-k~5Xh8^;YbgZ>Bb5|yf@4~*x??|z=$iQkcLLK?y zNIJ(g(7_HYrnN)bd31;*1+X$YYLb^xl#tc*1eJPKO>SjvHDVabiY8=WiZ4U&!29*c zk#!)y82%MlhKUa14>aT}6$WDL8;L)kLGCQYPk#S;mc6e->Qd-v)X4^WhWw)>{bg;2 z_jitnmMnUa(48Fx1mZ9P=vWkAKEVPK!YB@ZN(g-pen3i~4HSSEh<-O^fl5vdqB-PB zH@(vX8zHE6aaPKSCx9 z1%w3wM`v`y2Lnab-C&W|un{VBLU@6wXZMmQC|J5#Vd{^6csWv7*|}VU1_d4ECCOoX zJzq$HxJm?Et#tDQ_hnc{2#gCgO%HSS2r7J$4oCaa*Dg?8H@r#4ET?&~HqN=DcFiF6 zX(I3KywexMbTFzYpTt@(B}jQ>BmK9WtjcfkmM%!Pp(}9T^`F4T3E523{AVGCb-*#7 zbttKaL#~9fZICcQF7{3cq>qP_4YSl6BEBhsFk2=b+2RI^x;Tkj_?4u&850=4 ze|8e5_qdEiS~szQ+~A4+HI*yc28I1lgPWF+XmU>|uo^lFe# zDlEGe=D2M5^M28MK@9%@QiLn`mqHmiwtjwFTskvKi!qMsq$BS$NnFr2i@O31DP+~n z)})}9oi_D_)U@&BUHTvM22zc8xLi(vubKoZ_(fpYdj`656rY%P_j~5@%UdL`+u`|W z6fv|yQhw?O^)&YjxjLhx*zMXCQY%fL9_Rl-WZGF+lLjX@Yp1iL{Q z*fDKUZnJIlhiA1pJ>RLs=BGWo{`zQT1gDG`rqe0#T6V=3p5@0W>eU-j?oKN^VbFxXP$Jzj~?tndjmGtpqEpA7OI_D>)e$j~&wY{Tb zJ0~!n>Adg865Jbn<17LS((9(};Tw!QEN_YozYmZyj%Q1q)z1pl_zfFNbDY79hHe+7 zP(s}4g;limCd&c{8D&s10^d8o^X_hQYQHWe>>H2cY_ofwoT^lHDd4@X#f1Nnn`{&$ zDaGD*>kFt1y{tBTE^YX^&u*5U>tFmoNHGiw8xFAzL()H5>EH!mgWb+w@#n~)#Sk=w zoBQ3f%&o~UcPE8xNzvwz$KHQfc?=p@&&phCI~ucwZg1~N~6bh4+7 zRtL0*^Kj70{(67!7N(ojiJZd%hu>Jn-PRhQFQu zJC^{e0K0}`Cjt-4j+QX@j_k~jSJ}E!51YFPbxJBdrG??xZ+x}3=ZMkzEj~6Dax^f) zy6Vo?*)IL>*1PMASYW0tbW&lMH$WOqZ%M?~u|R%6ukp4eZ_?;@%6_ReA9VJWo)Gg? zH;y^vKiHg5x>0$m@G0R}?$6AP#3#O1zbwR$dlsJ%8E*EA%I4`VIuNl1gsXkAT}|KJ zt1V)f6uJC~>H6qyfZCagrkP51TRaI-pTPdJdoMWj+o+xCE7H=LJGsc#`5!ILRbuf3 zgPxU2Htt7#-`y$o+ueC3rN1`w{Mzuz^}!~K`Y5usT=jw2>*a*}MXM5<&9}aV`quaL z+L!POuAi>zeDu7}_tChbM(+(D!%)x0x>^3hliX?_K5TOodHeBskxTO0JKr<;pj$6H zn!a9Tb|5kfxR|_I`^YU*{lnBXewpLL>El?+vz_GmXLAp;7RmusfbzBeV+c^2PMS{!`y#^FUs#gqVLU%_H;PQ`V5le5yv57Jy>C0TcYQd({b_y%$7&5*>wSOj?q!Qxzo{Sn3eX-{oZ4Xz=`A_>b(JeW$nPTCJj#)46kH zWPg$KT8!ak7u)5QHur{{MFMj`@N#2uuiSaiMbl`HiIVlo{i)M;cQx++`V+C+SLKyu z)#_S*k*#wr`)vyX|6YQ4HdHWx@1F?-h&dk?uba(*dB z&xWNpryV%-J#j3LA39FI{LG=J>$jgdpK@~PWga5y1>04>_LU2zz^fw=O=f)ZM_1x#>V~x}RufoDUw)4J+cVGKTLJKSdwv zUs`(onf3W&1uQ)b3Hdxj|ELhS5S8!0zupnb`1Dwl^cwQ9{Dp|Wbt|7R3wflD_6>O( z&XkRz+fxR&O4=R2^9yVl zY%=q__l&11qgG3Q!RxMP+$Pp@2M^_Yc4?VK2!?qVQpaWpq*qJbO8GY%PuT;7m%lvT zt0@?o@U-gqYF|CXJamvbq}tm-l(~~V3lY%di!t^ffw4e13HH8QSM_+ZbZ<3%)oAw_ z(j_fn6ASoQV&t5)*_(2}btSE8CVTnDkJ91Y!6m|&)8^Sh*YTJ_X3?iShJ`%tXU999 z{vUM$mZ8H-9fP|O$g;!f&2E+%=+N>$OJrWzLHYy>gZ7q~yWlrh-{}E?5WPV+GBZGw-@ZK-D064Omn=k?j2djBpwtyLv-XlrL7-b zfAOYkzIF{Zh3*w7Uh)_ua6@5I&$k* z=ViRFMaa{<#u0ys80sZw@82aFW&ZxYhOeRydjx zdc8TcZ^CHK|723aC-RWVN4Ra_r0yBtro$()G`Z8S-DcUCoUezq-zV*OZR8#|^RZ4~Gw`_7wD`odpp!8U$!-}$XhmOOLgQNfZ?`iJ*LT{H$wac}f?&tK#5 zuRe_PmGTlkJ6fer7CXDWyxcM~vEiT6J9_tfZkEh96O?R~l7Rnm(+#`o@PCgnGjdsq=2zbdQiSj(70O}5jczy9A)BRO$H(zVNNO3LdSxQ! zeBA4&@BaFHTrV1Bg3G_Zu>H=oImX!e4X2U8tnB9M?hGVdOmGu;m9eQJ->;d7-haFl zXl^{Muzl^TNTe)bBjxJcc7~WpfO>Ql!m-of$8p7HuJ)t!BmxEvgf00Sp@S)I!j2RS ze#oq|M27i|%C_aWx*J@Zz4@`q5fuAZu8z)LH5pRTGBbz^(a|8!PGsKTlg;42-1kzT zfzun^&LqWa_#i;Fj!N3sXm7e}JMMt?gZ+kKYoD2=;?U$?(yD}H&#L?4mnk^bw8EM0 zumA}$LHlnvjgh`jmYwEb_oEe)*}0#2_u*F;t4RT$Z3n$~-y1bcI+IQ1H)^FsB8J8v zJ9GIcI~GaNEN#}5zt?PoX5xX;+pEcffmtV%kl6HRU<=%{reur(G zDGhR@?|GcVVpZ;^lS{ADs(9mFj{@s=4f&S>u2HpYt7_r9`6v*o)7sD$-D_oCD*BYr zZV%&cw8o@JfA*c&-)jGMW94I*Y}&}q+c=h42Rf!6opHYiYCxmqOwUVFY|tXZ?wWP* z_9OXBHC|evrRKb>#X<{HyJe$9nyE$az*gaFiEqq7{Jze9&ZocZ`%t^4Tj64^G8n;}ZJeT{x_LY$q z*&XrXBV*ZCT@98qFH{!3>zJ-l@RlO*%rd|^Tp#39#H`Z{P~BjIwd{+>ED)6-<;BNa zoLZen>2*KxcH7;rqH=S$y|IMhz8SGI?tJ1oL(6?%e2WVl!a`s=g4GOkPHodKjwTDetjEqc$DroKxiRF}Do(fC_LCpzb9?N5Ixa13_# z(Id327h&teq@y94r_gDwhYs~S$y;{<29d9P`RaToMDpGv%brnRV-*=_%*7|#NI?fm zWPWb>!8#E(lzx5Z4(ki`=GAGB8nviD)Uy@K8&~ACRfrCl<~F6C#kL?eCxL5vFCSjq zk`@B8lEf+3{j}Mz+_p(n!@HZ`JI7X%#22h>y2jP|oNlKmHYP>RaMv8gC|Mx zCpEQ2k~b$APJeS5eqJE@b64xx?I+&Q2M-b#A|JFLyg8V^VPh(ylfIF%EtvXnSxld| zy)G-HL-C;6z)$7G(BPE-Md*WfZjb1DhTUZ96D+&8wr8q&zsP{;2J`+sak`v3LV@ZP zhnn9DcXed_luj~Nie&0&AIpHu{LKO7i<=8ETvPFrsaF$S>CZpRC0^E8u1{3GV#Dtk z>@jp>DTQoELr3+qWS=$*Bsr3jE(9|R@87j7M8J_Pze0$IHPN)t?bESK%-=%*8#YM2 zpualU)gf~cYXNadRt|${ozG<1d@V}Fh)3(2V9rA3p%96)@ zLe{@#92G7wtg#&8(j`pLV>BfdV`|=GZm+OS%yvv~u41zuUjK2}jOQ_-&VGa6TBU}a z=6vcm+O_xPD)*Cgt315I@T0ar9BkGfhYJP9es2?>@Hf~Yi9q_=ap`;>Mgtxd&B?X3 z@5e`D0`!XFi{BKdE6!8_^n57$p*;WlwT|)pj04#VirYnjHvM0{$+1p1N_l3YMf_Ik znde0cl;3~)_GtV5dTkzUR&aXyxmEV#ri{9Kx9cY3;&PAwKdQb0Dypb!duAA50D%Fd zrA6ry1j(V5RzQ&!lrE9Z0VEVDkxl_Ak&u$^5Tv9*T0%g&^FO1$@B960&6?#xxcA(1 z_St9eXFvP7Im`79@jL4Qd&7Ir+eKYxyte7<4s+O zT+4b~KGE#s&eMq7tl#tEu-D7aBI92d#FET@S(V2<#66!Fe)RRj=u(@aoajOO&tIi~ zFs7H4#T)EXReiGU9$5qHiQ5`yU3a)IGU)?_nyb}!3q4Ky(0NY?rhoe;doZ<{LXipK1 zdXthPYvxSl^Q zJm2xnn}ZHssX;b=Rdt55AoZ|?5UVM!gH_Ez3SXVADxK(cv+69x8|76M!rm7n1dgWR z5916tvri=0c9$Jzd>7g8kgMW^NUc97wHzJve;Fk;QbcN~V7q9B{k!^*j28VZgQ2ZK zz$C7_otr14B7=xE5KZl~rPkEqQ5M^@KHkiHlVP)di^}`pfFSGEJK&xv9*$`S;nut+ zB#y0l<>ynTs;xD_=oL=iuj*#Fr1pESWl#KL4y4qr$D~SfI5zDgZ%)05&1_@eqJ)gd zau3JR>8J3}qAE?hTFok5LQ%%sL&i;Bo2d=IbeOJWBj(QE)Aq)h2zl-r_|Dv25GywA z-rW0@wY*q%zxQJjl4_7=<9g|{2-E@Jf@KM%OO-Y``Oeq1!}B6*yBUF%z=#hbRqJci zdpj*vhE{Wfzhlf3c9=)7J?-?AOIX|=mr(a-mrDh$Pjn|)ZxvPsN-TMcyt)y7?K;)6 zJC{P&iZ6T9;{aG}cciqw_YBBI<7XlpmMdO)|?}ZAM?6pALJ3EQjWG zKYGpn&5Rxi>B)Lki)y?B0}BmDEw7{B{w?v$vjA*OQZ2U4A3{a_i4-!P6z*F8tnW&> zH81f{b%>=U@2m0;N3&Db?v(@IlpQ5rx$%uvpYOL~)mWM`L_K#auC|^u`=#o*R28Qg zak^8BxYMjjP4~0$Xmy(S!N=~R7|S8!k{0fOwh3+F{y0s;*yHEN`n(j>?m333OWnAk zgyLauBBV{tyDS4U&R&H*BdXQpEFySVD|gSmM=YM_%tEuxG$4LTCeq#BJ1G1rUO}c=S=DT(($92 zf{ae~CN2oarqDu=OS5%no}{d;`1_HFh0P<4l5Bkp=8eB-*?st>Z%(#+GUTdDMyUCd zN)yA4@j6-j{Bqi4vjX?rTC)8emS-770r?xHUtD+l`327dJ3bh9@oFEnK|;%v-e`LV zE457f)v^c>*eJFBBz&D(btr23y7Ki$-=N9_VZJ%%6P@eNppn0K3RiC4F`E98m9Qng zBwMboUgT-%c+F1i(17bAKl6Ea=t|J#_TJjOz+HEx{5V+-df#53O~R^7!ApjL-y1*b zbQ%n2 zu)i?idg~j-Kf8Ep)pILVzfnvrdy|y9AT8?hex+)7IM< zp|*TMt{t52b)pR^v&w@~OoF$?koO)_KPd^$Io)XztJt~K6U@_99ABnkQ(vs*=EKgb z72X2V*k4$?-!Zu%EWSK8X+0uV)G2f%Ld4Lu2AI21dfMx99~q)wY~Ix>#H2g!97oRgW_y_Flw?l)lIDRoX+>0NAfcAoW{I;X85$tu9^K=cel_{ z(7t%w(Fd8VXIGxiDPC;h++?UvWp&-GQ?a?0bsKB^^DoL@Z5@X3R#>dlgzl3|Ggmh6 zNidWv7p`I*(;rIhnyXQq{|aV0n60K~8M#(FyQ&vRx$UlSyQ_2@A>!Dq>0LFW)l9ef zt@qrBn~8aC?RFuFvUG6{1A*B|6Mf((c`3caA#wW$^?khHU3~8(=wohvQR=Y1PSj9N zCnQ>0KG*gsoBE?j?YPP>M@76|u|=mR~&Pfa9yz|(>jZ5q) zmZ0GOG9^F2^hMd9I!mnzs3^6J1VE_9A>u3DcRtHzylyh&8ht9M5R<=XSKNg4XgW~S z9lxZLZ!zHslbGmdr0cJJ*L(J&sfFE|9*;tP8E_S*cs!(yDNk0*9>EiYwcR@sL>azV zE$_HR*yN8J%B7VHtS(KD?&eKmfY9pAqi9x}8Y1zNI zv2=`C-WJEDX!5-Bt?TBq_RJ)|X5D5G zBKKD1G8a3aa<+WyR5c3SS~Zo4ywl78Bm z0j5M?YO6+Zc@#37?Juw-uH*Eke6lJPcT^(LARhPDQ!zGaE%lGR=C!G^3}FoHyzUZr zH%gNX96m5z3Je`3Q_kQ$S*jGrmXzZphXzd6?3-_C25lP3(*VXh?puB!D1O+!F)o91bZD z_}DV+l;w7yw4lm$aIsst2Uq&!`k>yUI9EH%q9hVpa&30whl1iG^&a8IpI6)(JFu+J zXPXu|i4U_kmwj&pxG(pX@~+W;&Q5sUht+A@JegC5Tms1IV{F`DG9F43fG4Bg)v5TR<^P^&k(k1J~#NjvL&OQTS+c!T{ zwHEVyKO?`NZ``~ht=T}C^#XzZs+T08dikR`sN(WC>5ND#S6|y>{b9a~{!W4i*M|vy z9o2?Q#XZFU(PcZH)|tMEu-^2{h2X5;weZMwmWA41x_ag2P~U1kh8V=2BoRMC|9 zq^%cGF6rp(uDg3t=h^1rGt8psAo6ZB^=CQR<1C5dLnC(l-R^7}XLW&C6~5Bf(pMqw zU7N9a@9C?Mm*2edBPst^@@&Nv!)`NlZ^}T*u05B*E=ILu?KQiN$#L?LJb5WEcW^Yx z^|G!-#-ZRw_Q-Zwt4^xYdRVD;Ymt*m7!0>y9lDn^CgB!}w0=#&&M{IUnwWphvsS0F z*hfV@)xvgH@5@HX&wW+YQeT?7LA6uVjn7v2#WU+h@dqnnlphUV-I1D(yz5omxZh*Hwd3>^Fi8LvS_yj&1RJ38*cc3dH!58F)Z0Q| z@N4v+669?_zMzG)1)$&nC1gQKYe>SlR_nI+Q28DNfa1YCqqrkDpn>@_ zLL6J%cZm!3DYrq3A7(1xK^Wqx@_Vb48@SoOE!YrWUV+IE21MNcCs^rj$PiSc`rvLc z-d#9W(zVtk7p@a0n!59E8loF7gPfMPo}1wQ{C*?OfBj5>kqwqUvmSJSso?_#mm(m- zy|4;Ff903s0Bt}Z2nb}=>D&Y50X*ba=|&jLha58Lys2zk)Lo3G`ur4Di^b=%(N+r7 zv=Hd+w=&{L2L`BHMRnhe+$doL3ceSVqM>|=A<-4vmVW-sCMKpg0<#8%=_^->VTyn< z(3%N&lEy;duOE^rTre6FLWN>Tk|uOoD63A=(Dk<*0N?z=o=+D6<6*|IFHHZ(z5sLr zD84_iIu+JgXK18EIw&P`CTOWJ7<149wuaq}^i~3uCr+g?8p6V48kfH7Gi6lP(CTrG z+$7#VmIE-k|Icz5!D4CRxjlybY?_SCOTZy^bD&~#n12C!Y>0_bX|%LGSQzFd!=J~w z&jg+V0-oYN!p+^GQ25>q9BvI+eG0g|y`)=K44K(d93DvNt9V{Ge&~y{g>hp4wgF(P zAhx2!#8#7J|KlS)!d;&08Nfyr+@ev#9c7%o2IW2AKbilnLIewW(MPQ05`?~>Loa{4 zbsLrUx32t^Y%2zmJMuGkfLj5@s`8CK(%m81ZX{?RU8yFwc*;b=J*-i3l+!+eqO3=E5;154Bgk zJdR=q{2clMIaELV)780y9m=XLNXpro>s^rXlcFsm($X!`E3bj5kRuki#dlfXv>l zS=1cs-L)&Q)tdHCZHL@cw-q^mgVd^Da^$}9gMb$*eW9-r!l*_Un9pOZ$foxZCFk4H zevBH7V9l|aW#C8$a;OkDKWjPbNTXBCGY#qnlKwhKl`Rr_ZuX?C;~p4KN|tYy&6Qp- zEG;O!8Nmj10F>!>sj2iXqk3^GrnS#_hi+5BwHOux4bTBkNXkh6dR}fgXnbfR%?+Nx zES8@ZA5m~&2L%l9q$`gbeCk{}l5+AREJy2jF?8yrFw`gMMsti8`j&1% z^yl$=qgn7Lo>UJjwZRf0Kl) zEOs7*agBJe6J?y65|Hs06VN!49~tY%9;POaVEcPEFy8QUAVG>bbhHL0{bxcxOZt;S z_(dvHg_IzEZs1vqfkB$RXpd_u)YH`2kqSRp%lHj??yiR(nw&qrVZyCTv|G{EdoguN*=@kvuE;41ZF zHk~%roy2g`o9QQ1YL>22Wc7ulqtLy1p5T0rh2 zpZ@7HU^K?$eiifK`tgz?!LDjg6%g3a8U$P4HHw7}amP4=HeKED=?(|!cpMvcyQf4j ze!MYm$m*aE|DtfVr2`p!heTtB;bOzhHf-A~O#08cf=10^JnmPPboGG%*YH22N2FFmq2Pw3RunupqGcL29UHzMc=_;7tHCkmtVAXZVeh$$XoxJ)?Ba|sylbA&_tWpD5 zD`@7#W0UsV%}XfBEgol*02DiU_|OAN4$lxYhIY&-1vSD$pJj(fy#9r?Q#r)K^k>8o zR9A++L%5*lo}^HH16U?Y43%d~cKc z_C`sPWY;b}uri*qclAsNoMj$CWd2W1SSGGhrgk?)=k1ae$)%_oui^TwcF<)oB(Zl3 z^T>@}u?rgfh$;ay-3Zgs!gv|P$<&=2BRapx%Uw0CUQ*t!>!+wWoi$p znA2U?gAykc!Tfs{ka4gBk1;AjH;u+4k$Z|{i${GqI_#fS$=A)k+k_`U&+-=0sp>yk z(m@!yS`&^om}d}wr*H0|)%m9gV21-oMbMHB6JG<-oZqcGG(b}xumKxCi&-6MNnGju zT>v`3oPQQH0mh>H|n`VU_i$gEsY^nz2P`weS)RM(Sd97$EMkFPa z^OVY4DI@<)+^G0NTVQKnVju(??0KJl=t>L`0C$1|_4b9RV24BTD`A)k!&M2F_Q;LG zCmqj!7cLRFyYjhY^W%5_pM=>H1(KC=+ZQKmX)A_L^ezeOmT^j=a2LE*rhW3m)}vbH zjXXT6@$uKq3nYWyv+);CTUrACRvF6SL7y5UjkPpOz(KOkR0n9^qxz<^)z6ISn*yI1 zlG#fs%?Eli-wsor4W$1&?R$Q?p?eiwTwI3Ux-alx@4Zy)q8Y~6X;;J{opy1mEmp|oL- zKiKzjQn^noYS+R=?K-h=JGYAymgwH7x?MqHrt|m9H05mOKzFmM+O6f=6T9XW=KR?L ztfsbLTmKy@=DQzwv>{8k*;+<*I$}YnHXI7#*`rWmu|s|W3a)hb-KDlTNQuf#jqHIK zt@m>=Ws4s>m|%%&qt#ccBJL?GXMX#pGDz@*IOsfyLNl=JiW`5t8YT7r7_}%qaE|X4 zr^IxqWi?e(r&=K);%5%<^wn+gh4sLeV7*$FIx~ucC9QMF;jLHn9JU$1{OKD!g5y~I zy_>NdYLL+9V~UKaKJrC`CsZjoy?rv^KJNG|suYIbTz4i8+X1hJP9tY+$XBB0bW5T)!hk?xuK0`=O+yS;(R{ z=7RRJW#EMS`)QPgJN02{{{|InHrDc(>Q;~&qr-q-z5WtH+my+MbcjS>)Gb@h)@-Kp zYn0i7(`+J$2HWrNISf+A6m;UwN0G)|b?t1q zTu=aiKV?Bag!gCAjK%OF1=-9E8=0Q9NBCU04MO@x)*+KP+kN&F3ZAtpC~v8`sI_OD z85@j|;=s3lEtOTWCjt3QCr|BI*Q6dXqt)Lg=txE5x%}Tl%L%Lk5L&{;=i4U%hN#)Bkz{u!$ID6)|S?!^LH?%i8#M_JSv@fE$0-yaq29Dm6;Fc3Cy~5cKMdNu%j7 z$=dk+t6dQ2Byb;Pba(LgM4{!9j}|;)_)v${{P%tT<6!(yWQ`}jez&D)h%tlp1KBU1 z#*%_*55{1xyauh*|J`26H`77M@%Y6wMJ_mzFxFNMhN|05S#=r-$%dTv=UR|retz{W zB>M_EDdXD!bO3zo+!-kede$)aE5`IfOlo8@N|DS4o+OPH{mi-Xp8M|>x`%AM@$?=_ z_D@;hkDmsA@t?timH+#rEi9<_p@toCHu9t?-{MbqFlHbCw220n_MIUA^mjjDZWf8I zf5AqHwcRH()=Bg4DfCgO^BJRZQwkS+7e5J&z*bDEmrVWO??r$T2;9xxujJO~{b&!x z0kA`LAK(aZUxTLZ)L2UsfwR816EUBb+Q{Mp`KJYdX`}#43_i2Uu#>k8KTsj)dco37 zT5M?l;SB|>zuvG`24ySIx^EEa?e{33F5V+Z{g_)jlSun+H<}V&AfcZ3x=1lxp=OI3oV^?mZ{{V!21ETkjzzRQTwX;ri6|UM&zk zfal*PF(xT?KUdn8SK!jaD@Me>ogmfpvHcL#!G*t~2aO|CxnDRb*W1tXsTPoHg)w9-zVENxvqpW7b zrN!uR0IK#B@1;Gy|HbvkZ||C8nv`9>D?I+Muk})yoh%nysYtfZmD|n`JpHc!zuy zC#&yoF5m#me*p)*0YdR{&QS6&<_$us%49R|nuJ~pjfD0eK0z`( z@59XP9X15~>>g^LeG9Ue<9$BDfk69|vQ1!NrVn}(gz609L$5jFIOX+Re@AO6#c?St zDpSTSFJC!(d$1vPy-zI7SA4%A!>92pcFpk_g%oSKb}>`>K7f;8C-di9BY5bh0lB{$ zB>?OOENr!c8))aMf*Wtnq(OV&1&PUY`=`tAqns7}1e--Md~P&begT((X!rTp!QB0O z+7|7VzWpus1(Ao{A(o9s;x1};#r5IAXb;am?jS`ChxPDSoM_0%B|-Mo0EiMWdy*M} zcEh(}Nw0M*=83cIypKeTDHtE0r0iV?``+cPekAYoc7G zT=xqQwi5@u&hAb=k|pZA(Wj|Hrd{6~)oy;LzNz)D@*u>6hqT6HG*>@yl}_(r#75_x zO-QX<&uuP#V;1iz+K|vdhy{z+RczR8vi>$7mC;ga$=823fI~962#&fSF#;Wf;P7pH zUYv{a0dek6F`YP0pjd@Q$Kg47Ix>A4|B@6mGp2KU!SA^gm5Dxo31X(;W-klH>QQ{v zo_gZ``9s|bGqD(6&9{j|MR!%#9%hxQ=UcT#k+h|S$&4+0u}!)rs~E?b=f7y)p}0J- zA%VLR9jrIl!Q6LhbT0W*(x^=RWV=HoCI>2LErm@v9LiCendqfF8G#3l6U5&MI8@lVE{vfZH9v=8F_ z(UTy1c8i=}01G;bY3^0IbWU;CVEVflp?F6MkRjjB1>D*;b=9}yYcR%8#oxHD2S?S0oeR z7oiSBylj_v+v~LErYL_K^U9<$>t}1F2G0jXKl+aQLiqk=FJZ3t?!wAy$I>8!i4{M? zy1a-iB7E_|&io4zf|&Dmo=igDvzE9Wk;`wZf@jsymaAu~PJx{{Ze>$lOP zIxMu;3l6`Nf1O*uJizSRrRub)ANN|tVBR>%=AH8U9jx5pnM?*W)W>~>+m!phK6$nD zlZPszpVU$xO+VqkB_eU_(Ou8;R6)@ag*z+;M_;vNewgw$M+BwVu2Y7EqVX9ynoaKgcw02?Rui^@ z1FEkaWj&KsekcBX<^GzEz|0`HFD@<`r!sQTqmTCJL#>ah+m4XP{MW;;pQV2J7XAEm zNO=!p<376l;GO5{$Y7p!W~MgDUYL$+@x}ASJegnf0_!`t%l%8Xp6sUww{qnJx&*j> zBt?yqe5;-lOsrpV_1Ftz_;t0!a8B8~_uccfJ?9C3mwyGdQ+K?s%?bJTEpEmhf?n3oPzeGLe&dYbEZ#9$gC#Xt4H96xCgLqzMfu>Yl-)s_j zg}=ux_$~oup*Mia2eJ()$Tn`rQEq(ocdmM?7pE+oln4`7Qdw>hMT=4=<0drfnGC+U z*|tROB{#8{Wd7aIVTNm>^pJ6wJjqF%w_a*7!fmc3cq8}l;ou`>;qJXUv7wx`-aCe= zmF~0F6D8JC!=e?}oW+W6)NKvuPE>m{d`Ni^I1<9%=ryfb!0M(rsj*qD9NZIoXji=l zuRhKY72q9aaB28*d%x9qyeHsI9%`)QPdHw!bg zWPi9Q`828vgZ5ifC>Pg*BhdA{u(_~1hS5b1*RPQzUcf&3$GSgl@1U#oBSM4dh!&%Z zbxMzmIw3C(V#BqFHo=RW@WN&?-rbO!^uemBHBqUFXVKN%Qjf2Al^bc&FpsR7ZWjjW zT-xXB;A%(>?cmmyI<%ATuXJx#b~&Gm^wMGt=1rG8d$yM|qbDETTG(cl$ctM+E1FdC zdrVQ-$OSg#^rX;@jU`md>e0NUOx8M&oRT3+@g?UBwIP}#^meDf_L27TNS5bBS}xBkVWnzz>*t;A^bK}Xvf>|(H#pVbY5InB&i_0$ zavca$=gqiCq19N9&1?UJXJnuL!P>Q-p_!#bUx`1TUGA%E{RG?Q;cq8qXR(@W63UzZ zHCXVECKd<@pWAI$y-+4dbr186QQS30k|zc{(F`0ll5jf=$8Eb*AbH`yH z^Vr&#jeIwAcA9BRo^EF-=oFA0oR|&>j6Bz9y`yX+*WB^ZRX9h%1iW2h4R1?S3_y1U zya?8R84^gg03+FnBvP2QrSRuHl-%1aObSJlB1~#xz{6b7y%=UrUJi$HBDPNa7XuYj z>+b}UFTPx^az2!--n$qVa0?&18W`B?MpUjTFhUg;{Lt_k#f1xZ=X>v@5fUGj<>yV* ze{xGqdU=WfQbdn0%;pk@zv=v}$InoxX9X1b{z?jtr!B=ik2W@vqxeh=3h!zmK@MV! z<5$IbnemNZnWa#jiu*F~V})j#d2HvT#N-GCZPHIWEmCgD<`;M?_TTSh0hk4ojm-WZ zq-~URc7_+oUh%(ZDyP^nZv(Ze!9sDaq|D<;*W#!`LV{NPFO$<~Av}%eD@}5)|WIxR#WkfpEZ6=Q8p? zTa=L$5vQcCjP8l7j zYhv1hr{x$(Qb;!bt{@zw293sJsLD(FWP#V+X{!*oyEEZ!dHCvOn+5%Z^`#=OeE~ZW2)DRjZnMHs;uL=k zSsEjzw4#n`oJjjttv?q61UAT;gb?UP+?g{qmK&FHo^2cLtDP%u%m$Oy+10H%_u%h@ ziOoASeL!Qc@%gprF^u>OEfnOUGN(SJJb%+8#z;0WVXsJKz50-R)&_-JIHvdxA|(0% z-RAR?kU8_5Pf7dL;@&WTt@?2=b4neDDjIKP+IMS-pPJ`07AU3n8&Lt2^S$4|b>tR1 z^r9{r=<4Nsk}XZfXf513VVisaGyBv{p7*x);iKJ?80IM%Dq!i2cfmMbC~+6~+PzO{id3NeX0S@iV|&{tH_G zy)I;b&SkUd82JrpR^X%OF_oFxp?l?O(F=D2*%*-wf2u6c#(~nEwh9B%XpN-%5Ds#x zP1QFz{|Bp;gq^6(#a|>56T`q&K(S=A0aRnhH0TQw0a9SV9{jNYH%*SaEvdRsrvUfeeJ!CB?t{c=&ma(nIHH!qY`IOuA7O?$iMRc}Vm#4N8!Z z%V9KxPCmX z5;KRi&%T2u1!;@VSO7?ebby_7Ur~UP@XZpx#Onc~MliXjBwR3YRr~Ek+xQGhuF}kM zM&mos4ZRcb(w80;pXk0^$WvZY3Hr?HUVM2E`I{AiZ(<~PI4FyI?rS7O;whKbFdh6| zoLYa*uc{UrjDTM#BoqxW!<|Q&CS^gsLmQy91SC&xnMj}A`XdzdpJTOEA*a-`dq^b! ze*3n7#_V-z@PK6SpcTeQkd)v-F@ju}d)`e&9Ys9u#nbeSUVioNzyAG#HQh|v*p%gR zVk|sX8d4S@(6vPlU4&A#T6)wAT&Dpzz$_JXpXH%8M9PdD&+C87j`4*;7^tmP z+9MU>d+=G&^1mYk|7ctDFd4Lw&-#=IWv>Rb#PxH}OGrG|ovi=h zU1Z?+5hS_s8Q^}1Yf$<#yM#pc$*_HSEOQ@0DmY9Pa&DB~lcuo%8hT$6L`gHoLDYw> z4I4-uFzyg-3AdxEdl_3atp*N>gFQiGm4;itqfnSA5)aw$N)?Q244ValM5xAH07OU9 zf>$P{3gi^?rBEe+20Fw70Uh&W&-YG1<)@f*b%g`K z9eXE&5|5ExuBD{vDpVXSR{0Ke?-Ct(tgp!7AOB>&^Y_ZX z;S_}vYN4YxFV1-yPcAEg2G&6b9NQn58~#o|9xq#hbOY&?=3Rl)lUK1-HMx`go!p(u zS;e-Yqk8f&ni}3eL=oEi9XVon4wsQBDcpflt|NRdpYTr%Th)FS)!ft?Q?qu}2JVW~ zqUQi>(sFS=1}}?`mqRM}M~=CfSz??e(+huy&Rz6fqCe0y1ZIZRpuG}SU5Y1Ew7J1x zRXYtRBFZ*Q9jvGv8zPg|h6)g`$`NfwW5GVoXjUUqIB zA*r};g|Sd~aGnX-OhnQMSzlipD}F4SSxA_)6hsZZm#W3_TM-y)}7!- zT(MFrZ5=tDh;@-N7PDs+3?W{=qrOosl?-Flx1>t#3iWIKAM$jOU46u*%TyZ9D!|c) z5=Que-VzVeNaDY9X^yxmh=>@9l*1j~CK{@X`j0~qs*+$WTcbf z+!ZHH3Y`mRxc-^%tsJc}4M|4wRV~~rcQetl#n0eF>5q9EUv1_XO7==O+QIVeM^0Uc zWYcUaSxI@-_n!=$a8sO#{uq!pvx)7DbK@pRW7gNX(#N0JPere8ph(I%EK%g8KsJ#v zER_hMq@UrD=Bd@cSv2Erm&oNa5!JU&Oj?cM5v*tBE`N)fu6pF&nhZjyVcIO>hx-na5m(b%_|u=^H%VxvEyc1s zii^0eY8ax_VvIDZs$CyQKTr8mYdD*6a;w+geR=%& zv17G;)`82HI;?WaON@`ZuD}>eC1UygdWVl9XukUv9zqD)`v>>@47Bv%qeSRPqZp1H zFAT}23XC=5#k&QoAKVB&dPMcQH|trH1|Aui`w zRF>?6KgtI7gK||fAcYhP1Ys)3HvCbNz&oywS5&OIu6=vto`%h)UUs*qd9U`a7AY~O zMPoO!p5Oq16g}B;*wtx3?5QheqgGXMYN_U(&Rk2Sas53ZlvY_v!}fxEV5X|$VeTtW zyxfZi3~*2_?KK42zxeFQr)8&x^jArPz-uj_y9fsck+YyVg&?&(P;w(Yo4zv#y`x^W zuwT3$@6oYZ785LK+tuaxt;_RQuT`15k-O-JGX^L9U%ubEMq<6@AItXSzaHBzP{^-I zv}!)&o4QW=zVk`cOQ|a^ETuAR2s!3Pdk+>o|9o{B->a_@#af!{o@yB(2KrJCWIp{+ zXRP&g>LP1D&no!%#H}HSy5Cyf?2O-yetZiKhu z%_4{L{8Zq1*|ghdc)&&o62$(EnSgpSi1J_I>4RZSa?6Q(Zdg?dOR{OR#KlS^4-?egxwrXLP=v-iK*J4nXtoQEq{xnOqzP|E{ zHwrryue6B79hmQbC2TPk7NrG@&1TYm+!_wv8sDQl$L+^_&^oEs&`5uGCLF|dS=1)w z0K*MgzHLE|nfAPG6PD*eKo%;HmG3M_G3=S?`W|dA45Fih^@ZQtZMk6dKAul)js6$5 zjEJ<2+L;gq#=BAi;g&sSFG z&0!~#Ms_!WMxTz}VR+N*7~O4YZhz^3&;aBFyMfgt6*LBL%4{gKhw4x5Pd#|*!nS^QFS4kCv?f9wl@tYqY%jC@1u zF5Hli?LK)N8Er3F#_-3Q{BC+)-Zgm=IH`(8bA+V{PU12)Wsq~A9Ud*3x7reN5O%sV;e9pI(6s-rH? z%@r{>@R`)1(Yze^Ds?tFGYy?j`_LM72N9`5>=5WSDE08YJlfj(*a z>fAa>C9H?Apfs&>cjEQA+LRg z(B(=9on<;!pbW$bBKjH*USB;nJ|hiYCdc?tVW85agVvn$9*aE&Ep)CG-?v_Rmr%lT zx>C3rz_-|=s35N?uFBX+|5c4fZ9h;SxAw~O@1($d9Px7Nk+5`kb)X;nhw#G}jiw2n z7yuU*9CW<*^Mm?j8*%S-2s0iK!$h6;|6Gs#WS~GEsx8QF?rNTS!dQ!Ts7%qQnML+X zy#0N|M0M?RiC#V&ReR-C51y-ZBM)rV^>M$MnaMcN5341 zg2PpAq>IiNdV37?*90df*NByBO2G$ks|F==wfR8aRW`G+Lh#!>oK%{T0+gB@uN4H- zgld@_Y8e5G=eGlq#zRsKQw=f0vu3P*C1r}p%0#>70lp(12nA60B!o}9+tH|!UHb1; zJU&OZW*puYH)eyu=K&1d3K@*rxL;`~Ru^);FGwt0>S4p#C_4l2RrgEg4*byRt zU1m&*vz39H&I6g}Rv}eTBt`Nipgkt9K>ME-cM<`%Ars81-?}4d_9e&CLMPSL)3GlW ziRO5@()?igR6^>38|=h`e`8KR5f$?}Y{Kg2`p6m;v5GFaFe2TGgu}TQ)8VLIQMN%< zEvc8nO%&o9ouu%$*NvBno34Lcsd<{u+e+|wCV_%*hYvbTDFYGg3&}5P*_7Ok2P6v6 z*D^dR4N*a7QA$9xao_y9fk#DiU%F8S23p`!l5<5e}375%mLZ zbI&~LuNtj@MD6?N_K=s1#-e`db?=1ej9*>>+9zm#0RP0JX?#J*+K;4lO#FKFUlM#X ze&S*U@Kz&KPsjfV^`e{`UAUTQJ`uSD0)T5&Ty&MqF1# z;h|02Bi_51S1OA#T>pmYa*x37MKyB6*RT1WnBnu$+V8&WEwdjKwd?>|!#_;=LiR!D z5D5WHy74*8#idbAnBqKgkOFngh)if?Q1;F7lfZdPXYbE4%Bb@m2?=;+Up68JoNb~# zC)ks>sAGgFrjUOiyVRXG-rlYc#?$)~h^6)M(|2KvYj?UGdi=&J-~b8qNYM3+C>8hBEu z*pvkVt)>TA3_v{vv&HhFghPuk?*!QQ&dy4Xn@Ny17{KAlB{sQ=I84?HB%qo*BS8xWM!A>bn*x&+>KmZF5OM+8cSXDc{aTsYcXT{!jWgYo zJYRYSPL4h}49`3{vrLgUa^E69+4;>I(pI0uP@iBmr2HYH?e%xDKDEp#F_%Gu%+qe` zH7j&-okC;pcFv|aq|U@<_}I~V0P5Lhm1~X!*7)U7nxWv{#`}6Z0XWJii85@L&rkP* z)j$b!AP0q+QDi534s${Q(^q&UG~n;raV*?JLIR+_&LG}f5)Q1$t{ixyy# z(1DYT#w^fbDDIO(RRsFBqz)TCoD`4)`Jg&Cckh~%@98ax@4HVD1lAU~^7YSGt~*M@qB2*a}7!_lh%eDdgZ%H=~G1))BWDh`l`6~6oJhnv1{HtZV>JhjKP9nX{H zSh~GtEM5)WBtg*{Z)@R4!-viR5R0%^z_z!|*^Gp2<7CxsojIO`S99iBR==1|lQqSa zP#2=TrcT@t!wCq*guQTFu}>TC9x4tu*K2V{@TA-PhragAsOlze2~N8E z(!+DQ_O$mZmuLIL9!sg_uTCM7pQXNLD|KkaF26cEwwmDbuIqr3NK;LRP)Tt5?vb;tRpVgG;JOdAea%V6V1@gy=CE z6M+@l9Xeb0EbaL^^r316gWXjR-OYW-h_`}egg}o-!A|UfMEl6D%L_a}hZ3!?cDhE+ z{(2@=ZCrU17swEScUJ+^rS{9CaxFaf`9$@CG{r$_3PECmOLwu!*K=%4RLAGWRbE@h zIwdN85oYbSYxsIwk=1%STtX*KP9NsG?cK*#i{Z&Nz@zv*xngyO2M%wtU81HM z%KB$Nb%7_?5oay`@H-=Y)vF4&f+mdqDR#V6+V(CwA)u1&xRVhCkrL{^nM#QgAtGjH zR2<$`j%z78&j@~6(?M@hR96(bbUtP{0;QP?_kQ1K0KBJGCMg3}-ax5>W>sd_;KuKo zr!mSgJoi3y#fwF{{J-|zGA^p{YZM)Z5e85?L_tbIy1ON%1?lcaqu-~W5hz31HD?|!}?&U~G{_p_e$tbSJT;QZV#xC#Nxqw3Fl2u@UV zqTP2M>?r?g_8%;tlUD1(MWh!Zv-R)FFvedOO9^JpACAKx z8-h|U7e&tJy49>imxZnuRktVJk?Uvx5;msB-v6|-lWgZ~AZs#Bt<7%H!PAvz@7|Ki zCtzoV=EkzKk5}bDaQI>+*C%c3FJCnaWq=`x&qxq1rJ#aH{ZZav-&UDM!&O$0-PS&j z-Bi^(_jg&vMVE@TF%5g41O$CVww+Q>ZXT1!*cf?|Uihx~GRMCigsvxgFOVcKb&w6V z;e%8Hp|iIxLw`Sdy?X_up?)j*qZJOsY(rR?kKi>89jPAzf#D5LPqem&&;We1B2^h8 zM-Ovn#5=X3{VnlLNdQ^{RBa^ZsYfbI564Jk30ESy9M2z+!!CCB0zlc^zE5){ao!LO=up!l>(~dxuUM-|^@5 zGS0w*#9D90i@DQp3n$j{yY-T@WDk3W=}8}qeOF}HJfO%z!&YvmTZ=SN8>tWDEFqgh z4=LO?PQG3T&F;7B^_tZK2QE~(HXc+M`%)slb?2mh~ZMTdqAA7cXjCSJP5^? zgyKDmL_ea64sg)MKQabC5)pS->#*&q2+L-lmyN8a`Kgz;5^l!FLrT61$Xt+n2S$=V zquMeG!j@HS___V`uo$_Rc19mguyPTap~Ge*OC;AKCGfaWgO$~QM^4GLm=p5PBCOyf zj-MWZ4D0XAk2szCF8xPLl>%alMPa3~jh~6e7%V!w`TK+2Dwk%Ze!h3&W>o$Cn3H=@ z^6gnffUnF8{28I5*yHHNH@KHmt?k{s4OF4F*OLMgfAG%QlS7 zwpDnU1ReBI*X zEm`0szdTLQiQes*%*>ERM5m~-@}jEusZxak3p7Zx9#e?GR0%ycLrfe+Cmb3o#n-;o z8!M`eiX@5$oDtpl-pstU9ljBCnah)zr^o4_dc6W@x|6Ps7r}s=%J&BAYNPl|lE%tk zQ*Y3-Yh?*Dvi$}9W%~w>I0ODNbr-Fv8eEjwR!?Y#M4$yj>&f-=iK&|;d6$^Mv><#c zVec<*J~x`ntH7Sq09nMd#cCYSe6Pm8_ysnP&ww@_-e2<1Z`U&jPropx+N}7_pQ8qU zU&3wnD6MwfL$|JhHy$Q_3_wFW3)LIRlE`Xu-!)$h2H9paoe#hP(}IH~Fuq-$87H{x zP0TN@vpcUViSw?HR6R8dRa!dU1E17hO%er(>>H$>|CPD9Plj7@Mv+rGyQn=$%lJyh zHd^F2w9T5!=n}-tu@uj9c`bBtFj*LTe1t{z6e+M*vt2N8zWsAssSho;or2A}Tr1og zFZcwgbp%-Wge!`X6wv4%!`gTZuV*jI5rJjxcZ*3XNQ zrRAaqWquC_u_Q;U#&Nd%otxo;K-5luGjGfXl13FM+bBe&-58>bDFIr^ebe^Xj%Vzz& zRo7hQ>c_+hNvwkBV5q3()zmdU*V(zZ6Fp2IU&VuTP>Z7fI(z3td9~!QN1P=OJ(z9D zxG|ocS<%py161qd)`p5|LJ)G~59majVd6LXEvB#pwa950gM>_F#CF%6-GJynds1e)O`Fq`*y(-!__ytSjQA65m3awbDEy zLN0gAm~b`4HmB0!H%7NKZ{Z&%Q#422qMQ#T#8N%88eHLM7XEclFFxw#j%6}k-ry&a z$*L@;Js3XtZKwNE^Y8H7z0!Y$k+fS2#U&y;^j`S6Vpa;z@)hC&^>odhK zng?0$Yn^}dJghCg9_0$UA2PW&_+6VPkE*cs6e+LJ~v}(UiP18?SP!t zk@g)M${=&J*>01M{B1Xp7m)_3syrDcL1w|^uH(?FH;TH4NU*7n`$0*D${eNr@GzGg z1oNblx&S^DXhANZDSm}lYJGYu=I|mAuI*#+q+6&ekkAQgyQY zrb-%*o>_;m32^Q^UEbrWrcPm9+!&O(d{!g3F351J^|4fK&cB6CVt%K*J=!44y%)}_ zRrdRIS^VI0MRzul$dNMDR-O91q+Vi`88>)=aja%qmd8j7(jf@x>XQOJ9%EKkXs4nL znPn@<2QEBL?!*L^*5{k5odt`#F{g9zB>2hj8XGLBSx;ofMS?Do*18BYXPx2r`dD|R z85u*0v*OA!CchVzKj$7A(1*9DgW%(>pBpz_f10>`cA%n{oXz*6M_4WVMoYtcU*CHl zX!wrCWw0n2)2Uc+MdEZZm>yO2W@1C06%2({R(L#9?8}D2C^eQu?v33JcN~Tb7M$!= zGe!SCYB<}Q+uFjaZ*U-wi*2#8G$rsKT<*#sISmbJ;H-F0s8w6vdY0fpLk(V#5J!g19Y_dNw>?}|-WN?NVSDFa9A4QhxSR{z9itlxyC?XZ$HmQy zZ%b_rjkWWt&7Cj%Zn9Me*N!e+j2n$GwDMns80>d>3K*QWRYCN6zh?Q@^iw=kuPY^8 z*w1D*U-8Pvp*&QQ4K?7!dFqc)Ki1VC9SkUF&B>IG3`mRc&<38e=bKh)GunbGxeOyH zOZad=L8C;*TSO15`egmXLpdNWgFMKz_nzGeUR0dq@MfQQhT6o;#(g6Q2H9IUcSJHqFh#$cwq^?zq4sphw;|)!Eb+=UoeO@{U zP9|2bi33I^ZC?w-Q?hGY0yvwdTG}&Kx3jLni?LgT$UyPM6E9 zGz^j2i)&}RZHk*Ons{7k50*H|%7*eYf#fpRX}-N$+kz$tgy)WP!2^)cHRotqOj(b+ zESu$a1cX3Lx1$rVgF)Yk6)oJs(r<|!5>Sj+UfRrAL3Ohf50mRlraGm{FQauO<7#8K zr_A+B&@t1wI{fd`hMKUxU6lm2C#^FIMv>W=S@blz;fnXMg(UJHeq~YPg6p3cYy~a| zG&v7@B;?s4%So%(?BSj>8p%hE3YS!enCTw5BrF1UUbE$6o2-HP9HK`#9gIm(^eVTl zxKf@#r?~rLLwQCZ8kC{;{i!g9Ea*UCztV3jErLDqe1E z$!~zos}Rb+)UU&J0?10j(v5cReuj+kp4vWNr_Cpvs<&PH zRY|+8grM^f}z+tK3x_WHnXB|h&`GivQHyaK35Qk=b z`M2j_)5O3>n;9xTDv*xYZ>2o-r!sY~$t1uUalRBH@*^xqgdO^-z?|4OrBw=y-Wf5| zu7R{|js9L_=SzO&&DOKT!g!w5%Qa_;W*_nlZOG2K^aOxki#*q>qp z)bwWrpJ=%E{M1i9`9IGw+F6Wj8C5gyZD*)bmhcvqTF(C}Z55mdG_D9++|9d{>v185 zsj*z4t<@xV8{VgCYKK8TpB;R>+@>(b4X$8X{&y-JXAP6A0; zzcu1=9aLnx2cix@x?TfX#gvueix(?KBYBC8DIci#pC!9F0i0foe2J`SQTvS7aqZ4Q zK4Jc%9TCBgL!u{K*G;)eERc(^Wqkf zLCeT~R&Uo}y*YN4q!GE%1HXc_jxQN8R_47D3>l(-RM$qm5HAr7s7HNNZc?19t6>-A z`47JmJI&xjaV8rMfV?RWXb0qtEQnW*bxGWNAAN>1V%^g9n3bpfLptX!q`K$QfVnKe zjg4`#jqa^P0j&cTO`Al92^J?Eay;l1gcpZ*~+4N4#pH}imf`ufy4 z9aLM3!d8bA<}5kA9_p$!n{J*)J@mOo5}|hZ@_GH;s5Jqk0~mXpGCi%WQw~_65-vvE zIjwmo$JZf#J;$B~!| z@OrXODs`j?caET6kdYr?+?x=KpzFhGe(c)pjhn&EtIL1&H$*KJt51dV_Q6`B`s&YZ z-J`oblap3v^Yv&&o{v{wf0sW-6<-)B5w6I~f&;TkpvBzG(hAoF@MRj1=O>op`p>iN z4<4oPKSUf(3mva}Rox0kxI_->Uz-}0aP>4H&+kXrmE{uDKYY8sNcd33T}0<}%nyv) zIZn>|or)y0zGjWs`51hyC!C<1mq0BbiWJp>Ed&nE{B)g7PC76Lo8Co4L6e zOlLo81)c8lR*WO3Y5kzmm1U~3)=A}d)26dh?!e3IRHdG~O?Pvyb&j!d2vz2Ng%Pc2 zez#7PCoO+{vyMHccGtdMhHRgQyV&iZ$Z+ZOo@^}c^9)^rv%mMMZW-!xzg0^>LOBFL z%)7TZb9^%Z7>eZ0+he|Y%oemu8`aa#j)^eOtVZ4ZtG z=hRk}NAQ0WfUKgXzvU0(DC_#yUdg7@G!N_6)rb5z^R5KgFHrYg^wOsEN)%)%LBjMaR|EJU zkscCe*gtC0mSb3#(3`rrID!{aWFM5(1aR9K1hzbE`FRf4=_3yl;8phP*!yyun~Wyx zjT{nrD#ZK0?_&=qryr^Viu(>3sKwf5x=K!3`Gh|uKd)jzcv!eVwZp$+&hOb6q_i8$&W%8L?@IyZh+v|@AETn%YZlz{vr4Ax&DCZ1lh7k)pGh{iGI zNZjo=1*52vO5QISLMfgR^PhR5Eo`t@!1Iqh`{)`BMM=r{PB z42V^=l@VFsvOxcwhvU|0VC9|g`>+hp!ZhCRJWh3u`J1GS8*%Bb#+VPVK)pLe<;3Ww zd4}lTaeP_E2lTaKHLpKYUP^2yDy5_z47#*(l}YiN{(FfvhUvsqd{x7GvDJr`5eF*+ z1Tlcc!)tu;i5-y$Icug;u;&hcdYRN{N#h#KLe@(H9b^U-R16c8;+6wKhuR$fU%nLSE8k_qZ zt*idKrBz$Z)}@GYD*-kA{G0~T0!#CytHH_g?{S9>9zM+ z9COx8je53Gc+ooC>`>O|IBVtkxoL!;{Zr6M1ewa;{7;1Cwqfu&)&w9jqr<@yaN~Zq zXH%Z?D2j}@bbde3M30%~Gn2zSZrTeMy>_-GMBg|F^n688MoOz*tx*gg?4h%~y(Qs- znM~Gb%Kd&3@%?`I#=y~(a?Yzh_A6Q180w1W47tX!c*=ib<~woZq@>9A)QQ@{{xjMJI2R zIbCTH0w-;Xl_C{^K2e@ApzlU8$^RH~z1nM<(W1<2$c&Gq5+uITaG&ugO8G`r5Jh`7 z+%@)HiU=d6)N8)?QKIyV6=X-E@;)Qi&7X2J@`gEiivT$mF9il6ozh zwUx*rqh0(~fbK~5{7c~5^n5=;_?2Ga?f??Dj`#ks!Z7MKCDBvlm2M-=Rd>0zz;4&P zR6Tor;wAD_zF-^|J3Czd&y6L)NjLUUy%*qd;S*wki}2NBz&IUR$6It7OTmgtno8q; zI7hr+xOLms-!1@A3H#)DN{W`Xk|7s?_G)cj-)nKGR=V3lcG!FFQoDYkXp3zDWB;yn zSJO255uwSk+Q7@U{RDg}ju5XG6s(xy2Q_G(b_=RUDR)GkSlJTqacTX?HdRY5{-k+Y z@0_F05Aq#G6o+K+^7g8`h*Xf_UHV9MB#uXe*=mF2WRE=aXEom^MEs)eVaEJ{ZSJ4!;67PR+gvYPnE?L zd}$X4AfK-1CzjgzsFtj}shZuL+TjQP+%83-ra;S4I&I~iTEgWP$APlpX0R>1>D3}IHq0FhwrSkY+Q4nUBu&ojSmQEkRkPghgDcrhRj{dv^*njO{z;x}ARzqy`1|0`2-7{VDT|!R^(rQb>$I z#16uGqqmOxCc}^V40o=FhKq*pjJY=mul&~TiI%um3RMe?Hp)?0>$AwR{R3|JiGYy& zUv$StZ9^wno~1~~Jv{ZKsR%p{q26JNb^b>YRgmLMgCmbLkYJV(O}cK-yYqx4G5L|F zR@3vQPITH4KM&j@4UT?hSe|GK+V)8~ZKm-}3q^igl3xGMQL*dWxOINQD#*|8DeOOH zR)Sds?3stMd|Y-)bml>{T&ni)%0O>7!_i1Ee-fH_K9p|A%&RGv*pmh|&E62+>0|yG z!(+rP*-)N=;3!F_*SqoAw;rvF1fc3to_4j*pzmycDUz*wB`l@z_ID#1ba5Yw zNsWDOtlXA=Yzvtb+AMi{);*%FHzXJjbRp2TvP66FKe16$+J$NA?rFsE89w(_n(Q6u z{1I8H=!ucQxUj$W>Km_G5*#T#X|kDtnmnrB%IZ%<628e9+(Nn^7z68ZU!bBo;)bfH z|J+~a_C0Up=rs6C4@2RmW(ZX#o$hF`ukGHiPS0c^(zYgkBra~D>ll~>D+O)_F^;~K zR%vIldGKSEwjn~RU^4OaL?BN!Inv_8GHl+n8&kOf-9N8oV-EkRwA_hejuwd`mLp?a zD%XY-&m&%*ha;qQoXL^dP3CNQ+vD)1e5{@92I&8K&NZcg`!!lN)O`#Ht{p&dogSIY zZrXE*BNLG4h^*9qK}zK4=l%Y}gx8)11`{{$PVOtZUAZZzE2}Npr$wn+?t_tXS5}9( z8AIQ`$_Mtptq%%9=I>r!SSfGN!#o>JC>W;^t*A0Pa{tP+D7rwqx`o?5>*NqG{^Ih< z+&urcFDkqBMxF1CccIN+E8Z?zw{7FV^msdBtRq0PJI{9Jk@d^S@Ql0fR-Us#nANv1 zvBja`6#j)qg&nc?9BwPw=*^cn{1xX=*j(q;KN_E_nu!@=-t=8&+kSUIq>Vk)y7e+U z#2gsDxkrY^NgkUA4$i}}HAwEccEj5D+IdI_M4Q-8hXDsWE=vj$`T5O1ydpLucZHZ@@U5T;6SN&U7GjYw!Ev5n&16z19Hc*r4H~Ra8&~ zMU!!O1!OCRs~55h(~ za?AJ}9!#Bh90xh)zcjF>F=gUL5`YqmfXK6@U;;KIfg#%o2)0aEx_!+AaAFQ5e-wUT zdSiw~3=GG;$x=%RHDP6Tygys}gC&am$v9G*TjrWZYj=<w- zbMo&vJYdSyzYahxTJ`xv3K1_;gJJ(!4{#Izzt^J%(Sk@X*UQ&?BxY7GXX2k}gaYf-!GqyI1YtNytWheP^uI7TokNUD;Q&ochi=O$K=V{2Zv&t( zyF_Qxn?M`Q$Y9WH#~`%>)L%Ax2fTtFh3c*c18jQDa`W`qUG++oa ziBui_F>vU#Am1$XWPIDF!K@-5pZE<>Se1z6D8=K0+_boe^N#@uoA?0viznt}bn2)` z0CvT)oI?P-XrScLd-kZ~N&Vcor=VIfk8I9s5cqxmzbwE7WmYc$-(M4k(zNcVHeZd!`CQB9#R+=4dD|o7NB8eK+4yNK#5m3?H4O~PHnU_)&vgF zT0;H+bMnmY1jH66v0R)4WVfGSqZElBu0%alhl|GP%r-?13}IG<$%IM1f&7IllQwha zpJV?>v>Jc@j0BD5%d3BwOKH+RR>UQcB#sv2~yJHHlKyE*Z;QH1mscv_x6NQ z!=0cqfMBXCLfNr4xdCn@>d6w*PwlJF8jjUNdLSyul}_#`jHJgJ91@>@5x zIvLu0XVdY!=n^N&^|46r2CU#=EX%`>fHFN}b{+zFRcd8`6r2HwjROdN91SS=FBa#H z3Z-k!!2DISM1i;o_wAOiCXRBqhy^^NJMKb{zR!|279YA5O_OE~xZ5Wp1 zODq7IY}*W#L#X*hLT8^$0)Dt+w(X(1vT}snWF0<`tzYsIBe)4VPC6Z0Zf^Ncez2^q zGC;T)-`{SeSQp%@;X*owPi&HZ01PrW94jIumCXYfvyS%iUpxz|IYPGa`(q{JS>vm7 z^+g!xJLmnqv4iFCne?+*aeC*c6^)fq)>7%B)aq$4Y}kMENjarp@K0S31Bgo%=yB&- zNPr36C1TRI(G>Xzh3y6zY=D9*AX=c|PIC8IZazQ3`A+L{v*mIY!hc_l5}v42JVdHkwfl5aKj%R_w+YPFgkh2|!FmFn@z}}IrcPlykOENhOB(zT$>}yz zbK=TZxwZ!37Iy_D#>T&*{_{oqz5f<{dXD!^rrK?$&(2dD zho-5BtGVgCrd|hQzX4JMk&#)PW*_bB?63%}DV}AofWaw5nKzOV-~(Ma)Ap@rXH^mR}65U2J%eyI}XnJlXkzZ~$B+Gs}uJ zuu>uD10XcGsaNV<^)8Y4AfCcSdp*wMGP%#U__h?mIashj`7c&3F}Fwkq<@r$t;aGcXw+3DZ!L6RCnpv?)W=Hk(a($0|0V~|0ozmkF#I6_9gVYD_~=U_ z{F?jt2p0Anzds$;erv6P0dE8KSJe+o2yuUs#0r-4z)-^0SfL7RkG`zvpaPdKzXzi% z6_S#;o&=plW3OD@^D41seS_FQ^VG8AohZPjHx~wcm~bo>%8_5+y+Ogl(CZji4TxlL z)RdBS)>Cq+Y-Y6fd$SG$63t(DcT!xWTtztyuF$_PSJXZe7fEOnz3IfOMKF>km6n#W zU%1Z1@)W!Lp<7J{YDw{#82=$d|3Og*A2j4Z*9CXy<@uzdjSrRTpn};fJq=?rwwL<5 z7mDU$AFlWmoI!Xj5(0%~ti54_)rBRXB~}-iF_=NMfPi6-El#oduiM~fIpH9tuyELi zfV~>cV@zEEBSM&@2RO!g9UoLYqr#x(eksdsLH?ehe*gl0hqVK0u|-+m4M3A%TKA&B z?*2lrGo%Lc6o~MPq+(Jk;4ipe5fahyAtGfHve}}sxGi}p!y?D$CngBHb2y&< zqs9or<6FO&)D^&=t$CBn*sJX3Zru%&)CV}&Q7a^U+AmZwxsK^EeWxU~4>-_9{Vj+2 zZKCMZ&0DRZr;4G$?*mvPo&+;NdMUW(TaIUO*s-0=h(DZk-Z*8n6YIXsJ&fx6s*7JJ z*MUIrRRsWnG40pnOEv{UbmNxj!K%=We2t^L??qw-Y;T^hVZF{~K+Ev{A}L!fUWQfh z8}B}t3w;$s2V;)xBMgq0NY;D9k!F9jAWT5Nq3$4A`SHljk*a!?|GAas(+@!NPD1>l zt(4@E#|#3`mSzAO;kSIj*34VtyOE7-X(nkJtZT}#9O3j!k>JPPI7pigB^%`q7q6H5 zIdx*>7I+zyPUf~+kWZ3E5dR?w#qhcc_VjqVTPLI1{&Sq-_*HQTGZFqOA{5E-Jtffw z1MQg|@@FW}3X<@N=xq!;%KDiufu)fx*giBd>~mv0$*P*d_?&~K$``L{A||S1($>QP zhk)B7g1or8KX?M%w=X6c%X{(-@ps-rnDu%U3}&ls#@H`jjHv4diR$kM`txEgOO< zHD;TIO13{07FvVjxZU5~ZFfPUD0|6F%^9V;VX(h$#&{{|LpMwhfh++oK7=SB?L0fd z_PEi19GD)qf=zSViGPf_{wLg2fYuj9tr|3(R)6w*ItoKG**gF?d22SXn5ho@k_)rG(2M zb!t-iZNB9$Ih9a4`vf9$p7{F4%csU#4&-4D7qBrXNjB21^j&xEmKy~?7LyTcjd#G}d2;&)jVot&T7uN0gR_sq9)t(9rC zr!{2z;}n|zjHUlw$ybIZ?{x@spbf08N?))*Y1HX-9-0+_Iu?0vwT@5Zs zl87X*-q$TS8*nwsKI}2Y2HjcpwMRw9KYy=@3L4*e?LW>pq~YvJcGaqVP-nhaNV$fD z|GHz_&^Tt{;02os#bZ2NCZ|w?`5U-=I7^|za&9#PS{lHAp@+8J?a&MCI#+q z7#+|c574OD-_iMAIbxw#4ixZTKL`-3+bMrVkaoZL5`)S$X3RCdoyY&Yn)I5yR15Ta z#6r#{U;~w*QCD$4pWIP~op%j-ut`;kI?lqUJ`KNF)1T!n4rL+R9_)IT){yWX>a|(3 z_W)F(^f2*odV#eGvwl7w@sF*`HrPucg zJ48RDglRXAlHYM#mrXlQRHIb;WvojhZTmWf`N>hqNi8I6 zCRniw5?YU0uO1P-A^^Y6#sd6r-FY&MRu$dfui6sA!@tFHel)Y`wD``;ygAZ(2dF;= ziW%JmSPDGPqu|Bi?faaml)tS8H zeogA6-7Bs&ld6k?X^y~O-!SqXO9x0BdXa!@F$TChrnet|Z+;W=c2pno3oE{MZKs;H z?&RjG{P9}4oyeLx9WJmPG^h;F$Li@fUGy#;B5}WI1fbyG8OU}}+b-VHoK#)koW16BpBS-=x&wvE|8ZMIiMHt#rSI70K} zK&u>lH7_3n|AYSTM?qjAAXDV~88)`2XzkaNQq5m_P1k0fc`8q#8=;am)6%H=QnQ{3 zHQW-|>D#EU=>N|*|E>2-3WQcc@YgN#Cw63QSoC|p(<3swG*4p{{-~^0wnk)7T};OCf0mVqmFJ!xlOKkB*VIoT`NNlRKomk z@FJ-Jc;kGtiT__;`OlLI|6EvRT}9&m9n}9q4G8>y-v2-N`~NrnbUdI_5}u(9v0fd3 PfS;nQs!W-*>Bs*8*Cnc} diff --git a/docs/source/references/evals_reference/resources/eval-flow.png b/docs/source/references/evals_reference/resources/eval-flow.png deleted file mode 100644 index bd3cebdf8f70bec9d6184157fd3abca42c5f1ebd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 255305 zcmeGEby!qw_dX7jqDUzq3Q`6jEnP#1fJk?jbPUbVEubKwDBVbR=Kz9)G|~*+J@f!W z{5GifbARvqJ)Ym{QkSEkDZa4r zd-oYd@r4=cS7(J=l0Ncpa4?AZYqMu3TG6;1RbG}1GSa;z z{OoL7lrDWFVsHC~u+f$ZR_urn^3rl(@%El83Y7vu*8aY$D*+`MO3~u`r$I&!gGa)?w-!@dWBTNGF1=<$m9N8b6{lt)w8%ab*?cm8 zTMQ+eo$BaESN!)N9^5x%W{(5aGy9}3MQHbJ;?_jtw5^v3RJ+A=J5ZZF?3kS=E@Q(k zb@uEt`~~y%ReCNf56!LKHQuBV+8Y53f3>HKgsnG+_0kxWWY$IXPo^ml8?3fLc~iy|EEcrk>ZD+ zB%0CG24$bmcne={t1WN%a?mDa40$I$ouxZ`UXhkx{qkK7fff-q;Ug+l|FP#nv|Q5W z?3r{S6>e{AcRukzs$r@}r`TQv-L@G0efhNrpi#jHn3kH!nxw|6P&I)ZO2EMi$M z*)8q1)aXZJK2kkP38NBv=3&e1AtIdeW?Q{Vn20iO=!A-SCtTq}YtH;6T?WXSrUN%> z*My*Mk)(~jef}1vtqA?WE{>gua>66odb)Tr(u{XldN-O;$uLlEL^Q|Y(fC?XS-}h7 z0nWimR85$7BW__diTuC`d-WOdZkF>Nk*^T-U7xd5yNf+tRJylkqtvQ)PACudreB=A zd$>b1FF1X;+7iQPmyq(LlLPr zRL_0xzrcAkF@K{Pe>)(kC66+{mxmB>KcU^(y;FwnX_py<2Y`1f+-MZar+A0Hq^cQG4s{1FG zxR)`pDoy&|Sp0mx-PwI8!J3empo0?OsYg^)#67SFzmWzzN{mNfp-lInqoYeSIHdNV zY&r+Q8h80kTizz)5u@~S?tT6A0yC(K8RIxgK8Rn2@;XrE{LH)3wT7Ji_9L)HIKpKuHt;s=^V$;OiSCTV*^8zcIglPyVM67vq) zfv@u{yC2n)w}Jh4JilXn@@s# zM`*{_+eAK#jrVQAjzaK3asW%)=mP&PUp;YSu+VqpjtvIsw+MZ?ohP<8CErRn6TP^V znoyi*8)Hklox$A9|L9}3R5?v;3p7T$n?{zFnZ}xG`)Lvtgn*I|Df#4+;E$w5ncydO zl9*rb{^a{WTu-J`xEU zlNvuWCd+tl|Nf`giS+PSO(kfK-H_F~_PVA8(;>$p#bIc0aBysJ(ChMGdE>B7%`cwx zE6NnP9?HXt6-vf=nYpi3qO-T}3Z9U13a-uIlk)U)`{-74Dsm{b4kabRWx%bxp=KB*9w46HCKKu;iz9bx@eEY@NXr1RiN z*QPotIZ3!k*`{dP2`YDrb1r=T@OtXM=Fs~I@|pr~*zBbl%%hTj*tKll6vN1DZV87vj7DA82W&=ye%op#&y zH|=ii(T|zLGe2kx?rnD1$QH`h$ifn~!+w$y^X0lI4`z?{LV5*OuvaY8&C~tG`zWSZ zPrrmfLz;}!yO`8!H`GTl3z-V_rY5JJSXWqsw(o75Z-3e5j-BYHTYj_5i|-fI*lrb` zPC*Hp61XjZ?{cxUH2Yvby@E9(kAt4jScUR^^Sa9JxFH&}T6Y=hGb+^s$V>%`9`Ll(nwyDsn z_t9-tPqJyh(kRo`%i+Az{x}xP!Depqv(?Do$iQ9{``J@!WB1pW)lGJJ?M@lBnNeS- z@7sX9){plIacCB0dAJljA34u|<9MO@W}>j1W%89Zag|VFq;BkVyW#%Gj_%I+ao#Zq z&E^~Sw=x{~2jT})vD2{(?Czx=Wu0Yi0akNvb9tK)n=<2Yher;OMVv*N>X9HwQQ4 zE8Wd}n2^h>!chT^n<^I@Od`f4KIbA&?v0;jYjJFwe1LCSYGNH5(EU1jm<{G}bT+Qz z;J31wAe9);jzGN0nkR0xS$W!P8E3=c-Fw#Fh;gZ?l&1RVSxrro<+sd$;}>(yM`GzD z1zHCMpY_~QyL0mEH@n_U&);qgJ9XMBLYHETDra%hskI@eS1_2t5?URkf2;ef5o_bm84zVCK~rDsYogsnZ)a!YXYxY+ew zPFubpoF|0Rvdalt$Bx+}lz-+}77P?@WwppA^AkEQ4{3~yFFQ=Hge4&aW?Xz-Q!mnB zt+iaEl`V~R7hcs<1<>tJ$c(@)is*R0HphpJ^X^NF!#;VYg?5EH^%Qo{1@W^`Ep8KN;RzBT=p9rnlgAg5H9q;yrdzQZ-nxKi1+C1P*^gIpx{yel{<8 z5y_&npZqf^N{HQ6b!uzUvz#`2WpCfEuTJ-!p2s%RiPC;wXEYZJ{`nF@O-oNvZz$2ULFOnJzps%6-miWlfY;SMzuwV51)^X9f87OME-7e#uEt_bLH~1%VFa8*5m6GA zlmyD3EWQt`{cHnaXJ-WVv4>-R1nB@`G^(Bs$phxO5@>HTW z_Qq6P%&g3;j|8!)sHg<&jZF9yUyA>}9rz3M$js5vmXC$S+1Z)dnVs3j-jwAzFE1|( zD;o?t{Yqyvi^5zby0DAgDAm|4VWN zuRfrln!K^bB>taLMw4zp|6k&WxQ~j~{e2Rf-tT`c10%)*>wk%Z>Ny%l%=;Q*WsLu| zjGL?{c>hZr!f!BcvJyHlWZ(R+8Ujq5K!1$>ao_L2Wi-pG@^}7+ZV~_r$Nzr{ z_m`3UpThmyH2zQF{vVnApNIR)TK;lF|9|JHSM}fanZ^QyohD+CyJ^ zlIpoYGVP)+KiNh9TJgGk?9HR|m0=qpcdu_E`B z)$Y`}4u1VHmwODNHj(~D_RpsfWK&%&)Nh&N&q5Q?z_3age(n6`bO+6R~_@( zVs@Mq#lCoJs1Mh&7U+j4^=s-oHX4 z(Nkd`jn4!dcYbMNN+#NI8(X+i`%*G?G z5a8^1@;#re%yyddYiP#9(Nm8aQ}J#O)7Z`08uQZ zc0T{T9I8Cb%1QgK>Wg((Uay_mV|E*Y>|itlmTo{DH5S@J^B$p(e)!i4h@Sv55$R93 z@p~GjqEr=#CBJ9*Mq;g$UKf}!$o^^N1{cgo%rnOba_cRWHJ-!#4I!;;cj4`n%70;~ z2o+#b8cucDjK3%RIZN2WC00hXecN_kGHkix)&e+@bCgjola2SNbg2t`5=>m_))QxJ zFMy|bfp*Z$=xx~hZ-kKtz?~DCzfjTT&966N#(yFehY_gD{-e5v{5DXhH24}z_ zqEn^k<)GYAGuRLuIZ;8Vqeox$+DA9WgYaJ%EhP;Ym*I8X{_l~6=P@TL!Itc%y^e?7 znbXm^#rUi?CvrmkM#-M^Lz*Is&3!^uf&B1<0arbjbEgeK#~2uwmfHstM*|i8$Cn_( z`{VSsWi>DVsp9ksz-Hgw<>mY>lqXX9T})LU7&xpu^exM~n0Hs6KnHg6ih7f6I4y>X zJ)I|hxcfLlrloy)uZf7vYsRy8yH^Aq+&JaTP5CHkAot6m^-SRl9nYfr zZ6>V=O!5g5*X^Totg!^=nF@#mxw*eAe?iBVRN1%AFAU)JNLfe_52$oa$)`up`x3mB z<_3k@rotd?1?k{HTyDmm90&Uu)}{mw^G}@ClRUIVks&Ys88>`8iK&|MwY_`)xVl?+ zZlZSs&g{(s7C!T6oF@rxo!i{b+!3PG7$Vb`r0VRC%O*JUEc&3T1`b`RY^<@-W!J9R*qg{MsH$PLirzgEL~aqC=xXvY$jZ)34#`Stf8-ZNUVyySPEzA( z&b_muWf{u3dFW2_6*T$9c?Rr!f%xb$Mfljd9Lxn{yy%WsL7NkZ0sH~OzqPC$(6TP6 zsD($r6kAyJ)>~8)v2JhIt+(!0oBsHGj|050FWH_oo?X;!O{J#8%B>Umjy9Rn^V=-) z#y_qknTFhBsSJ~-D!$OEGqKNRgSBDB1c{5RchJc*^869{2e$)N< zghabSw;G%eR;+_0(f7vikM`b&xYlnD8HU?Y7Y8n9ED{5fOH$aG&QmL@C)>I8@8Cm+ z4)|m?;YET!KKUGtm|P$4K4^kQ==_5}*+hC!qKDc32Lib3n>gH>Ev_ms&&-k}$)Q6_ za%Fg+F}ZqgOKBjnPQ#mkEf4z*mjMMN+%%a{_YEbB#{6>Yr2do>I2v44QWC8_zq~#a zXsYX)A(ISh&y;8>AIbAE(6mWBP0{i00~Ll<;Xl;J+ANXKp6P46m$-5|9UlGV9sW2aVKE@BQGLs{@*8;ge!pG6*U2!FDMO~#7rg4L@d-s~ zpO*r1#9N3+3l#NSJAz3U(I>J%hjd-tlPW;-wqzcsYt|aB=f`s^$y-048(LiiQP$KF zIZ+JT^&Z<*Y96jz56U&3{S-Xjbg*oI3z|EhuLn_XhGN2DmIDH@A@{uJ0_{OXU@a|V zvZYL&67}um(B_|IY5mGeXRAD>i5{Qah2ZJo`AB2Yss1|6lzPtxo44s`@G@$Fl*hJ$~?W95gqp}w5rkInai_5Ywu$NceA#A zbJ2l#=K6j-o5}2cL!$D%I!f>I^IC{Q9&x6i*MhD4T2IyHCx!HRogoJ&;VNs)z-Xz4 z^O1WVr#W?DJzUe(%G3*+bTNtSmx z*S33}C1aIi<%Q(+2ti1*NY6Z<+6Wn?K9~8=?=%9~Wl0ooS)Vp6A#c~x;w9~PE$U_ zh4R(vH43QNZF7!}DHU~>ijr}bfAmDwI(0>{RDDFu*f@jsvcY7BerIygN?m%p54i~DeQgF!vvcX zGD;8qbMZ+cT!T8EnOa-l^$C9LSj+rM&H8Gm#>=tzfs1vqoaxU73UY%WScP*)^U3R$ zCDWv_PI0+dQh27Tm)?x31 zxl($PpA6Ziu^~&%_nr)wZ$LDg->~`K{&OdXy=~HBBLioJ@4RAkv%9P4T@}oAqo6>9 z10&x}PuAtkZWOuGaUM4o>UsJd_SUAiahosxXipku(8aXXs37-aK5A(C;~!sYhp)7F zl%8hbZi5pj0AgM*aFdCf&Yd|wh};^@uuNKbW+l*|!Jem(9@rD+W@0PJVprVfUlrV zrnyNcc_;{t#w%d$gN}#zLW`g8+TOl7_43sjeO*%zye9Sex$1%CBCUmARBey}3 ztV*5U+*X{Ol^^(iSUZz`{rz~Ak_3`(0@37s1oj+DwRV}c6%ta*iE?`HrgXG1$*W7Q zX*qYGC9(3LmV3JR(+m)+_Hbtp{-s-}fNs&-M*f8==`>T=(4>WivMKM5^m(pX<|Reo z>$-P2O5qIH5T2FPm`r`9*CkX<#y}!7NFcjJr*effc^ZWQSXVRWlRcOcU0)D1XBV7qy;mIn$X9jyB^z znOBP~cr=(SXS{uSToF1|nsearNZEsUn)P9^77eucSoH@(8LM z!Jsa9eb=_v!GJ&)=cf5(p-zF@;W}h|t4gBfo8a7tT%Gw_{wz1g?coN0Er?yY&Pux0 zcVu4BTuwAz7&>Rcn0NTpRv4AfJ{fX4Id^6QA-xkKBQoncs%sh2pl-!ysz99yIXkJE znRJ@uFt_)32vvv}%hTb9dF}V?GqP+46}OvP;J@Eii7TgeaAnS7xSqrasxcnQ8mjh?=^_n|pW6!Uk z@Vt|xE4$@|#|F`1)L{V1pwv$AANTbaax%Wk0HWd^-u{yes?KI)C6_`jan3aqL_1Dh z83HQ7=ch*o90_@Wp;Mm7t<924=%O13#ho6ItKJa0($ZNacq>0r&K|DheL()anZoNL zz@=o_xZZsy!_JK7w}-YqUmA$WrWF@WW7e!63ll&HR9Q8dXK4h{?I;iIO^?nH;EUwh z%AzK8elSxXl;NK2ha!YHKCHjnwQSYzQ1%U>Q;E>io# zu31q!y#95s6{!-awvCcxzDP2Aq+B8oNBFgezOt3D6Eb@ zNr`=$3e^VS;B5)B(S9r1Ut&}d2GTw3Nh9VvzvUQw?{ArkTb82rT#zR&emb>ba7Om-wSy-D)T9S{ab@ey!Xq_jLF5mJ2Je?cy5_KcOJFxl? z%0JA%n_yX3Vrl|iUl%kzTuB28XsK?)Bx6j^DxF4%oShu1}Mu_R_)gWHb=(O$+ zIR_!WTCWQ-R4cKF9db<;S#qmUZsi$Rwan=!ySQc^)^GDw$t_GlDroAqtotNDf}SaH z%m+Af$C^a-l_6;A#JE|oEKw{N3uz%_i|(C@LOQrqFqV?Ra z=lB)`CW>D(wJhE;b?Weyf|fH|N(U}PNpg4ty4lUHQ}nt~+9Ak7P-@0o zcXL!p6=b4P7~w$up0+e zoN9U?L9cZ(%%Zpzb|>${Cn}OU(_6<21vXh>IYB>G_lFnSU|aY?R&jak+GFF{OMeMym~gu{6~b3tOF+Z4u!+|xXLe3x_QazYiBw9>^@Lh5F;TsGD^VW774 zmmnS$j;~f(BGI)z&N~lG)nTjZ^epukwN8|L-B53f#hyyzsoHJNpI)1`^OyLB(mt`} zb)ugCO1S=ih8JlS4eHqCmy-Y@+t=}JZUr(`zJ=76LZ|b$NwSA?HP~lL&d?51+?@y7 zM^0AnxuPi?o}RhckPZaQ% z_tmZb9gv>hrD|flL&i^it-BF5KKNUK_>|j_I9`WLcks`zlFL{*i%g!03^k+~e4eXQ z*#o&J)b3*Y_@^&?)}FeErCf37&7SHZ{|a>(B*4f}YKdRm09ApHpHx6p4Z%eQw24tK zTi9X#`}=Kr)w8om==~nbar0R!L`~XqQ@8z#Ts<*NK0H#RqTYCTVG=KE-fFZ@m-s)( zs+0skc3lr*S{`3bTp0uq@0-;tAonAV8FY5erXga+m_1>L_RWfyDSp6}4EGZTe=YHk zjRN#8CU7EeR%YeXHNV>_HycQ1Z3|@%;q#~LRi=)6U0Xv2fVK%Es(N@0^Le!av;Mf6S&+7kKoh4DX{WS1wFUM8{Zv@XgpYz(ysauA3zmrL0?$S0C^8 zq=0Mb6o+|f-3x%vXc)K!E%&2nlQeV60ciZsA5j`5qVGHj`m3&A`U>!AA=dX)#I6MY zD%0m8W+GE2-s1~EPAk?N-tTs1yZ8Au@L*++P3=|;?Axq|jX_VhhRNCPRRuX$+kBdd z4v_g&6kdBR^~FkIgU1<*M_)`KPO)J;k>dl{)15O|e&>Fn>erlq1R20;*Vs|kU|+#a z-`7O1wmfrgEPa1Z$WS7lQ2PX}Z^kp)iIj(K+6hSQ<`MNGD882@bc{^^AJ25@Ejs<5 z&HY^GGm$JPumx0YYLdeVe_;lB1z>#Qr#-IMVzXDI;;c8~Y2AeOV8o2O!#mTKz0c39 z05?*`i1%f`9T->oeCB&X`z+dT>L11T?X7H?+!ryi#blyR*MkHz0Z`704f0FBXD>0E zr^Rs|ATOE6kHb*d$>5sfLnpQF+ZE51vL-yE@yl-TZ_0CF^~|Mr#|a5-7bGy@gOv|m z2_MvQ1zn1m*qYbyJ#n}Zo%ALCSXWs?!5qkd6#_uHGe5cj&U+ zv3SNSS5OFtfEVL>_lJpYx|0sDc3;4oDm+=r<7s5VSl&__83reKJ52Pa zUgf%DyFGCe!z+o=`=eSDx4YX=%F0K(FaG!@VMWYlhitn1mDl>)9_;!QnG!wu0KDj-ouD_F4o()i)u1>!+m^AiuuRM z2GbViUHvH4i1WWhSxO=wk;CS3yi<;4`aPwbk@4lVjuivNG(Q_Wm22Sfs!;g&GlN_? z4MLDO%f!HE+;&vQ>TLE~>v$r#Og+W(5|wLZ%DLcM82^N4OQ6Z_7iH<;TdWLiv1PwD z95C~zcW%ORbC85@GJ|)ntAA8K040!Y;2*-h3gJ{GXmgX%K+6bJrZx9Is!2(8aK-K( zF@x1I%m!8$B3Owpc>X==VlQN?u{~)dr&U1}2-R=M^8~o8UB3k!^`?o9*hVLUJ%$LQ z6{~@%&k5#V+sc(be4>hA?ihNQ3siBWUmRwm{{mTJ*aZ6bB){MICF@0E{o}EEikiJ= zRO}D{A;d{is5m?s|Ix z?K3lv=->VEHaxz?p$n2OD5Jkqj|fM=Ie(p@UJtsuA_6#lr-Ly6eI4j&p}dRUEmo=S zRmhpinqr;Px-!}2*g^n>S#y7v)l4FU-Z4H{ef+DG8uB75dGqUX&{%OvYEa zD1+4)q5i!YIrp;@#0&%}iVq8Y)}qJ>tMdid&IKNgAStVpVqjohA?(}u6$|m4Oylo6 zU5cO+&c!nq$1Szw-t~uDLnR-=liY(IH+4hZJBfe00AcIfZ^7z1&+t>1j?S0YuiS|? z(4f`*h5v`swQeZK2#+17(~E_ppd>;8Gi8?g`$=^Y*&n?y{9;1BJ8R?44JkeG0@ zEFx!-nvW-!75Jf&r&b4nEWa=Kq~7{+_&o}{LY05#Uz#R{{Z-b#`PB=tPbH|=>qP_j z^1l)nVW6moN+Sw48ig|CR~q=ZTE~w-WH&|vz`eD?`UCT=Bc7|Q+~HsE*=RL3gswY( z1XrwOepCJ7QTL`58@9Hnf^eG!Qa2=>Z3YoD;B%_+E&np5C)jVd)Q7`^>>^#Fvk9-C zO(*R0z+#8u8m0Q;U^E9(qMF>$*C>@dR%SyrS?1mfv1uqVbUU&aEw*66C2<#pPgfq1 zS{ivppjB$`ZBc8e8r*K)nK zAR}LtHn@BVMFP>>Bc0=B$<8A{xr45s|a445sr}E_hu4$dldCyg4vOKkF zW;+UheR^tw-1TLB4~){wjP?-P;hE&PL|kb))D`(}?@CWl>1x(TbI7c~a_xgygs-va zRTL4X5(Ru7PHL%DS@H8ef>N=WdRIzMBU;_DQLu|ydLUORJQp#SG;@BOuES^Y!Siqm zkx6aneYOPAIs@qEr3(Xl%kA6bv^KR4Mvgq!hT+ao|d5a9(oJp?N^hFImva62VOdms|Y9Zw%@pW_V_Yg|Gv_z=26COyeE zi8t`y8t)T4dBR@=hJCQu+o1Fw+l3;>O!YjMDw83D zH(@}133=Wp1nz@uO_eSWJd93f)*NPFn-bANAj6>#aJlnpm`tZ#gUp$GJ^7s=P>~IB zs4!*iwr1XWk#}od<`IUc+hWf^LX6YYDT%K4R4qRF^0W}&*^MhUZQNGEa(lEQ9(I{9LJO+_PkaT90EO=Ym7)FpM6nQQ zWTRK#TLCCtmbmE8?Yp>=8zJkii@YmsqEi;G@T0A*52{R4G<`j>hM>agpU~vHIxU4T ztA*+PbbVkFe&Amy^eQ)aqCsV~4>W6@$SQDOE7vWc-wt%OM+CwtF@R0AVsw4#X=xKc zH8J5X(BNR)h#uIc4^sgINFOI z*ONKZWLR2j$bLS)&>13d102Lu`Hm#fUeY@rIqkmffJ_f7zlZfupRBMdDaUy^9`3Dg zNa?|vB)Wf9F*zJKi_KFhvR)q_ry*4xA=yF#1j0BW3v`oY9Mlpx@tCDvpD@{@lc^5A zXE354M7}wE0#|SMMV~7D9d4+mgSgi)Aw0WJs{Dl9G7tvOuB=1_mu9#+h!cW1T22sJ}X$QulV`QA5_UNOgyDIXAlll zef(Sf?sN1l<+M{+`ddSX!KDry-Fa?OfyN_eCnM&wDYgm|h;~lP>t2Q=YRfvj2X9!Q zC#n-$L5MX&2{GRlt=l9MKY%(t>|w>F7gxQrjTR79A^i_bYmE)U`^&N3r2eO^d}q?3 zPa~RH$EwK(6*6I-CoV6H37Gp1h`vyL+^TUYN~;$Aj($`qt;BU^MkfSl>t5MX$F+s= z(gRdoJU15(t5d-3Oo51a5=}g{j5$5Za5w(mI7FrA!3jyF$q<=gI<5swe51&QOiZ+s zYaHyX)YrYlpgP7}pw zB$y@&GM+Dykg-AXK}U_z^G?{3NXRx(qIZ?c6(LooMG-R}_|em3DA&N$v#;;Ecx9-H znZJuy|Kl-1VE5So13oV0l`o@z%IbDtRyQBakS}|1ndp!{H}>r2=a)Wk1<9e;jrgc0 zYq-4Ee6MipRdc}ycGK&XcTfW>e^?v)Edyvw>h^yy`*{Fn+!YP80~}ou(3C_f2!U3N zGVgRusD4`wraQ>jNDm}?UFpW5ZKbf(Znmm86oPa}jcN~FzIk)!za?C9#HyGdUX7>IABoTJ)h zuZxdoB}tG!Xp|8Fckwfps;`vXoJ~ge$M)ig%_rd5u3p&e;hu4H8g@S~%+{xh#Ih-!M;lb$Ya_EM z-zaLFF3=zdO)3`WP<&DNxpm-ds@9tx=)rpOow1!Pz8wB>BF2Jn^EVJF{x@(-3w7$2zkA9#-)R6z;deXoG5$&?KLK+8w0*OAdHz53}^CmUwMg? zq*?-Tw@SO1%gCK4pF63Hcy15pMU&x*fn+!rO}K~;AF7bePgoXJpyTmS=?y!DTi0!I zY4OM9Aq%Rea}RWbPw3T~(5A2gtk-pN;PYfQ^#%^h-OuzlR6?r8O~~Q}J>61apI17+ zdWL)WXbd4V9m2MCy6fXUz70l_s2mUnc%ocjG56Mz|qf4r<>Xl(Wx)u?sM z*(zzLcwTlMdQ;=g1>Q@D_#=hp>1Z5oRk214Z7#H%MvSu(w+G<)q+tg^lwQUB+tn@NAMW3w6vdx^ z_An1Ht9r)x=>)ISwWGyIxd|`imMLP&S#Lzy^JH&{xqP2k6<_L5{Xh(~!Qqh|h^7h` z79z>iOI{ouLQxtWC^3}a-|Kh`K0h4oQ#*XXZZZbgi}mT{VJE#LlEP{2XKRxCHg9|C z(K%wq8&2_N1R(;l2l}&D0%b=mXBs^B%f$fN?=gn4=q<_#BiVhcYQv$c5@B{N5cE|L z(+*1jYQXUo0vH6ym_g7ZgnUd6Eo4}fRp)U@2l^8u|F3;t>}P$kYUL|RlY(iQq&r9| z5_0cM>v;$NZkjlvbQMc^ee11*-g@%`!OGWDUd;DUFv9lh~v^;_4^hx|@6c;tN5(;Rlbw&b4fy!vWr3?3fzQkK#9a)Y8R6Kgdzuut3McD+e6iJ@;Of z3U^|w?TS&|${#fGMjqRrtkuV79)t4pQr~h)WH*qlEf>o`xej?$!&Qkxu@9Qi@zs0c zIJfHed-Dj&hE6{(vZITs5EYt{s@N9vZ^6%ygjAg7#XYLe{L^gNvc2!l}73`B3MxU}*Z#NK?1 zd%EL;^OVeDry=*|S3Ghl7Y&tdfl+`a$hfluV&%ka?fCe3pWI?^@dsosfTVT_`#Gg2 z&=UYfQF1;$-6+}uoS;4E8{qFPmCUkza)2Iz-AwKok1`#H9?tE3;M)K?awRUtJInDl z4R+AIp?GNrC*YRaZ<=K%0}*qin#u;ffZN;19C$@id^+tFr@0W!P}t#sb2-;1bm^F$ zNk=qcmL6&a*r|g^xSrnF#!9mH_IfGMguh%j(tN~Vk%-ns0do5u|6?{n6z{!I5JKGH z)kvWZzwXKm-J((vCuW)~Rs9c=_K$U(9n_xVcKv#f;qqba~vbnsz*;v}atmlvj&Cw<;l zm*~}$tS>nq4$c|DaD^#R&!F4%>(m7b-Uo0Qk*dJC;o|;Qrc=bh zv^%`Ud*yIrltrOG$6MkQ>A&31wAx^#SyT)Kx~rsz$MCLmnZIqe>aBVpy6i?)t!w14 zvRBYayKk1XPZB6dMKQitLjWVmq4Vl~OyO=Ah$(z6naE%age-_$dy-w-1ITVGNmia) z4fEqBMz!ge54HEvGZ+rOOJ_BgYTwge9n)^8(>V98`tb6t5WmK}!rD$ugE{*E&(eXjV{R3L@Y{={vMkcKIfduftEZA|Lk*;d&5$5X7`Ue z6`_wRWZS!qUM7?1x_raz=BxZbOk=8<AM8X>>vC6T>Go@qaN6c4^Fq^Q?Wck*7JfnfM7 znKYBD!sB2-V%z3+JB7Q|nI|bfBp(+5Sz!9Ejlw!+r>Zxec<9m;vJCgg$$pi{uIP14 ze+NDcf(}6dMFO<-V3r)q&ZD87M#Qu`7)U^Nxd6VJ@h~!V_gm+jjD=4uu0zIgU`3L) z&`N?sr%1+*=w1DL+mW{|Tf{xr4+*Y#bzTz8p?yWs_69y1FAvu_%G!m1>nKUkK^w8= zv9(^4on%Bms^<;8jcE2amj&Tsu7~*bj_SHI;fWI6T}LQ>u|4h&VXo~6clU9+FC&m=TdT8qdI@@X4F7qk$wlC*K} z@&XBmEQE4T0PNg6w-hqhoSUQEn#VD3M&4jeNe;9mwCt*~`Z&_Bxle#B>m-FD_3{GI zxSy{u<%CObKjw*;n)3gI-G6$p7HO(AUS%;%uba6eGn#KJFD9bS)0aa@p|bIFN{;xc@UG>B{ z*)v%XM|58IaJ38#plGrtAn!-#pRVk88>0~Ft2pqi6^)410c1x+kh&6Eo6iJ2+pe)uy=$sL*m+2%YVSavOai^}?Jgb8M$Gdw^;p!d6ouP~VrMmPw!# z;QCz2_&DJQ3S)ART-E4#T1xQisMWvANa=+IB!-6r0xwMP(4-eRBC~)ZorgO5A)|Ury zqls6bnIFvuGecnkC}lkMR@Y^H`2heu5i|9k0PHFQrbo+bZ+-a|dAYdU-P{|+r0(%n zrSI#Q15Dif2Dt?)ii@ce6rXb>d~8;@eB!izI@CAgg)DfckDDAQJC&KO!4b>x=YIk8 zWh63)t&CDuEu{v=HpBdalUUWzRYlS+{Ut?U_sX?b1|nTFXifbH8b^jy>ZbD=MG6^3`qinfgAI zO^I@O-#7NfV@x6g1)EYpLC2UT48uW_5t-{c_b!-=u+E9n=#`Ox&(e+;hwdmh&vB^W z&s_AwFD%frkCP1Dig<}Qt=1ba4p}@ubTtRumO=dtUn0SCBE|F1tGV_6G4>TuQFdLp zfS`m3(vl*gbV)ZNCEX?6-64&XiiANoNOw0Ph@f;M9a2Mg!#y*D-|zo__pWuka2+-?PE5C#hr(4NGt>9zWA{Xx>w@l` z_a%NJ#-ypF$x*tW$3qqBF{W~)cA!v`b>UUA5s|wUP^8y}MM0|j5mIEJ>|K+;an4fe zQJnmS#Ja?Px^Bx!aVBG^mcP=M$UKOsjX7y+*@+lLcGjnC( zQ8dta zrx}_)l^iKe>Qv!(N+rT3-s}7)_l?{uDpccW0iY?UNvWAx?96mvZFB`f`!T? z!iWVuIvNS06It{IN%#u1C8X@;krOJEAH>~v_JS<~34_F%yZ~xsV%beOp!5B8Hm6%D z9=i+9yMwkl0mN_0(m@K^{yL`r8I_~(%%_d zA6lNEyauV1TfHv;c(#(-nvhc$ieY8-xw3Ihy4;(488bIH^s<;I}Af``3Gr z<(r2~3q35YpY7kK};Vv{RgIf-jh-sH351B;CrhiiP4sL?h6)zWh=#zI}s#Nk%TCUAzyD=UaTC%?p^m>ZmLOQNUt z@_2!1b@h4omstu~??lGW6)dDvs=#1&Eu%1w`O~O25~Qv&g{~r_LlOe7>@X}KjnTg+>Py2uR4)Cvrzks%@txnNG>{L=Ey7G)hvNXTG+ky)a2l4ke}=85Kdy8>h)_ zL}}&h1F+OCzqqN6PyLY)UJ322QhzBv(MI)*&t^G4{;L!6xLnFakNK0kjwT^o&nD^)xqH-l%!6X3R$3tlueQtv`hGrzqP6{l*27XCfzzmr=WUI$fv<- zjrP-EK*My$5&E621SA8LtR@<6yU`+@$&VVHsuBk>s6HPpW?3sxP80(ElD-19_IMC39N;k~Y$yM^;9 z;2_5CE0JcsfMCOMkP=Ll$Xh0!J{Gw+wZC&ufX8DoO8r!IQ#EWFnpZT+W3mkIDK%0Z zuvahPu8tP#C3_!;w3d^*3@M&O6IKfQveRz7pq;ZRB>q zcSFm+BqWW(RJmPcixJZ)cUmr9jgr9ti=DzR&pWLa2S3_E*A2xG{ls;`rW(mp_rx_I zys-bdv*6Y2=T}&mA|!|7y8<2iEU6y1ES>Q$e^-Pc8o z{!;X+1wV)b-5hV1rb3bEa|jElju>%qWrSy>hv0Gi>;hGzMZvX@r-CnYI*b6wcHMe% zG8eL_LX+jq;bm*S7?I|hk~M}Z&Ak{5Db`mNxEiWguptTRikD}rG`Ga2aHjpHTVbx) zWYjr{HJDeId6*|FU}}(ZAzF(D2ptR!aUA1J z>kaE|41v_1$47(x_F_v2P_Mws7e}49lIunV_YXwNgO}01NI>r9H_Nph{l=Ua$om*x zoZhD}XJ7%DyR+XL!BTKYS*T{={D3Dnu(d5%z|uSC5){h6C0nNCCnak++YK505?YzP zy0G9A3MraiYg!?;8qUj%C^F~8*M!*J64W?aWAmnN?oxTX0rNa`+Mu^qwuP`P0Sk!CFybJ^=j<*9ps)5Ym`A1EmD`>lcg^{7wS_ysULv3Yh5($ z-A?Kx)6j)g4k0vW62|O51?{9U(Dhla8g7kjqOryZc*=NWbL&f(Z{tEZpEam>Zf1B; z&`F|ETq?7CzN_F|1kJOClw|}S9bAdk=^g|t7mMHzuYvAH_mlHX?f1K)qebN2KB(%2 zYJOS5;?(O=D+Hg?4<-nf!KQNcAs@>liynED zuu7}Zs?&qDdg1<%594}`RKu zsa>##57g?lD7tfN`e9MaZa3JFJ}rMFtk7>`Y*4wP#$XOQRsCqar3b-6^6>vLrAD<#pNy+`S5id3p$3nhVwSxXh$t&~CX zFHbjGb>E$e`Vq$(prza#W_gajJmlB^u@l${78m2Lw`O+~&K;_?6hT!gIbScu3_VO? zlyDEmZc^$wY`F-*Q*(YVRrfRxBu)8?EW!7A;x)vv&=N^&cjyJ6W{erHL#Yw4Z2fnE z9|dD(35JS&rh)o!fUH4=$A=()MMx7QDmSv1FvyUYcNf45u8Sfv!Gh6JqjvXL4Vz+@ zLhWi-n;VsP*^zU^KS-$$MfdL$pnvjGjNn2MB_2^-Mh}I2o;Eh$MgEvAgnQ1!qMI$Xq!sgRW44U&6#qh(11}ZF+ ztK*)E1{E~X91kiadZ=RY@nqA_H6&>jbA}=D;G@I{P7W=zs3;>8Y=vxjuX!fX@vCxY zS`9}QLoBq$jzroS^NwffC*@|i1U~28;;EjnEnF+@1P1}m} zz&lMY)Ot*T(s)XKx)-#;Ex?|QP?@^*-h-7sM|ry_?UhTx_8jvJz1oc3L60h}tiUAV z*tBg$i1_=^k#n|1hf-w7h$;$QOOykELqC9-TC5cP+X>4;d}OICc}AofNkp5Ff&IV zlaxS0+VBQhJ4hA0Zxw8m;Ij=|VLttGgfYKvJJm3S&INoP)3f%KllMxMROUDf-@!t%7jg zw6A3b9sxSR(F@`SYKSRMX@YFg$v8qXJ>>FkrEE#5PcE32HMV@kmE-F+s~cki!OHr7Mo-u&UDyvsCY(O_X5UK-7rEN3tx2d__hntSeqT znJQ+$zPexDBk!lF>E^!&+k^4fQAGZd_irx^EtHnr?1ENd4R@Y(H6%VVDmd=jMx1b4h2qvmYa*cjoO?9;5;aXXP4L<6HPv{Y=U%A=a z@eZDW{TqbGAO=ifi^QotbE?e2bJImfu_1n21p;4O@=u#=;l5jGZNC&nX`>Ea(1OON zt4R8i4lU$VnoZ)(5}#soeH(%Hn8+HxtzUhwe2 zs!=?sG#x{I%Ao1^-urx~-L_`{v>4+qt%h>mCv;}BXHz#bgpr^9^^i!t+1UUr*DqhJ z0^><+YbdX7GJw4(Y`Ca5WMqvpXl-cVVe+9YkcNd@;3-US5257eo$A-<|GANIoG@ru z%Zj}HIa%veD3`>m>58I4=)Vr7swaDj$-;;o3?Olh+7cXv9mu2b08nvoN=yHtBZHWX zg5I#*f-wU6=G8vO+jApjCY?ytCV;c_Ne;}?*=HT-6-XtaM`PkIbZNo_;3#?Ddfj{X z$73F6lNadMVI4ZmD5?_cUu=Wm1S3GR2_qL659fvC{LDo)6H7%$4Ybh;jx0ptN|c7~ z)+kc}6Oy(g9fs}8%Hj7fDV6Zke0k*m z5J&&f`jP?}3^W6(;*+e|>%+I=&;r8*0Nto-3?15{1kbsCyMGVKAR!vz$-SdMEjS7R zo6EZFT5#ZBK*vWV@kEXdQYwYfl>S_YepjQy{Cy*!7rBDjY6IF5`mZuaH9#)VgOVtI z^6;23a`~6YO8aTrc99anx}u+eq%x6>-C%>udyLI_2#C1T7t3$|L^Ejpj~;4}zx)ty z6v(Ftx=z1bjv6-T{LTFloQ;OT+|bACwrMRIe4F;8kz=GWRB%XL#*?^~pyLAQGa)XK@AL?Dn;)Is8qybZZqD?awlC+ddChY?DXDeFTYr%!LG7E zW9hsbFA7`eKU=_HL`%@7Yxz!&XsUXC)4m&S2GC(h5onYe24UO)#X$85!%#-LiaZPg zhLTn+p-IS4t`T$7ae86n;doH8ULzB}e%`}eo1ML0p@egvh=qvRe)|%ebVY zNG<4NRYibu(@mNb!M1;=%7Dk}0+*|hMfl_EUux5#+OF-~U7sGJN;oE8$yuPj$ zdw;XETlB3_fHhZ9Wi#-;qosU zA_cXlKO^%u#4wkW>SoMMPmToGq72sV-UFRDTLF*#*Q1T6kbJcaam=Gy2A@F0r7*5( z^T&zyPt!sgL7Q^}aEI92t_Gyxn-SEdcK zGSj2R&xc?ggqc9eeQ9@UaU3>;pJFhji^gER_0NVSkOzK40)Vj$&>sY+eb0|~MgW*T zdO4J%=sK*b-nRC$&dh*F|IR(OZ$P_AQ|PA>SQNZBDo3qbF}Y-_HG`N*K&=`|rQ3P= zQKh;@se!f>CK(1+Dn6Lg`&14B8qWJQqnZaZR$ z*W`lwUb7VMDuL6LjGel&J9a~%N}@w~=9k?7^z;)jy)cJrG6}hwfC#>2GU$)mCV&+A z8)ddffNj~2N476bu4?uNq-8~CPt#9}tHv!asfsn3fDU^eu3I#E9Q@p+Pd|mJ>i^%$ zp`C+5ZPi{6-|{aB(G*M6A)rxHC~xO}4)L(Hy2Lf97fYk~q025?wAVaSt26g zar`_Xm$nFKXY12e(?39b!LRidvre5e;&@l^_SG1no)_zeMGT-fLfKe6PD?dcdFsWs zAY-Zmv^|=QLk1bRmdSh?ODw?(ctrPITj6f@0xQ3?p3`&CS54(Chz@t0{CyeoHJf~pBkT>)p(sajt=gD z=8Ju6pKax^TGeLVnrW3&eBlt49WZm-ZqQ=147i2WQyv8=G%y_wV6QxG=62yp`A7wG zDIcBma?oHQSr>%4268X>@pP$@a1(k+NCIvekOP9pqxuTz5skB+1B3R)_%)HKuWZUuUDc>dk*N<>wq@kwAD%h(R>Mr za=+rOitaJ>>~tzr@e>Iv7@W1ts8du*J~=;!PY4byiF@vq*%o!JVx~fMJ&@ zaJF84`1KY=&1^Xc22AF|*wuin89TvV83O_6!v?e+<_zYK%2vlK^ZD!=BDvn+qE1wR z?S}ONDyUFrFiU|54#`G``$;@ycp$)y_$O_G#(~HF&`S68duTx3s2G&v^f`B*0QNBX zi~V+9bT+}k1M6R9CJ;kUV2K5)`D#16H?a8oFh%mylvLS!GqIlg9?#vX2eoq>P$PC( zwwZ3IomgtYV6JfL;c0JVUTLr2YO;d1i?s!8#@VaC2sQvJ$Vj!F-e`WA5jf-~Tb?2x z+}4XoC*MQz4iU?JJ^Ooh`@IQ(q`nTKI z!tP6BLT|ZZhIm!KI0=*)+oHTJm7y`rig5(FK!M&D9}7>3X|UTj>wwOuzT`Qy2cP>m9qSNJ?v#fN%Do(YQd(l5#BnS8 zaAUf${$OiHh~K&Ch{E^0l*4MIi_v!_EyTLG@zAk-^kqxB)sD-^Zg#TAPcorN)7fVK zD&SlbAjP^-84?lU>iID%sUiXMeu}NDmPJ3)p99Zmy*XLCW11_vPuHQ!Ja{mo%{(2< z^@9+$ra!*u4Pk@z%E<%hLG_@=X$m$6Y66&(qc$~`_{YZ~BL7sGT`%vglA!P&B2#W{ zgC42IUTh3VBMW;pk%N6D=%>*Pn)AUC!z^V6iVX`;7CIIT4x5dIVnWNX09onk|YwbU~|9&r}ia~}a!(n6)JA1uFBQgdrL zTFb!McWw+q@{7QE8bHYE0NQht6Jp<#dYtq-IB-~@8cH;tav9Ze1vItx5GA`IOs+|m z%L0R!!%oA5m?WP4VuZEA6(w_y7l#dMO)v*vaDFC&35zQOAj)L9!FFbVNQiU%5p3vN`#SQT}2FMBMSs`mIfZYKy_| zMNOxc%Gls+QtfIP(Md}pO+D~&k0l`nM4P!WnXjG-eJT)2KI8-Qzi9U=Mn}MR zwIuPqWAk~=kRv5?DYW5m+-k}Pf|A?(gaMtI!fRV(G=ptDEf-tSApIxQfn}%o)Oq@D zH6GQGXKsrAj9|IHF;bXd5vw^YHCmjWX1q31NV1hV=Ysz!IJ0CrYy1jOBufBkOJG_| zB&$W^z^f40aYG{453r#cI@?<2Az0+OUyLBYvgdJqx~2aEco=&B!WzEV@D;B`mr3U5 zk!NlhQec|;(MtY~_)aBg!`qfa8Sr@fxm^idy$26fm^)7PmL~|btqp)*jVJ`%7+Y@< zp`ja!t=)MNE1#u4&UZVLqeMa|^J*We5-@7v^%k#kk%B%Wlz8duERq9B*HZjPHC$89%A~12pw{Kkk&~Q!5h9^mMdyWMu5Er?F6>1 zwa%Hfhm)->H4Oa()hxhQBQuQ`Rd&)lO)qf){ktjvduzoYwW|R-2gfR@`D-f0BPBE) zDqC_WMRXygj;}L&Scz?W&wk$Bw$Vz`p{%Fysdcn$)r2QNl3=f8+;q_O7C$_iS*VVq z;FKc$g>TVux%c}-H$dWNzYMZT0I%J|mq!}jKQZmHp8)9l%+_2JlQ2Iygm_$a5ejHU zD=o(~^_q_7F-CEfD5p6mtc~Ke*%%;gE6ZRDZ(6{b|8WOZ#2C2Ig;j5t4_-95m(ofe21CiJVVvVM2kc%& z&k$#_RPg~jdPm&e5^Bx>bfL0fEE-V5^XC&3FQW*M*kN8Lb})O-F-M#URisR6sgns6 zSFT-251m|!Z|^_i^xCXle$o5Pm-OKCep*hj{lVA75i1Q%p{~3cpTqTEt^-n!PaD6{f7vPDlb2r}(tP17RheDgZr9)_(NECgk`61}B$}yO(z1GrSH!{-jV8TE`Cd($#;3 z9cddW3NGYveo_lBIqwp}qSoS8_r3qR5a1R$kW@l)z*EdR-^2d)Qw^ly)&e}lu+Sck z0Sow~Uw*wHha1H;kc%`zx@w<(=_jG;Wpj=?pi(FKK1&i;kvENY;PadNjDEDe??=<<>-4D0#I?NsSM=}ArN&;-!|7UOUng+j zqFWEoX?%3ZO~k?8M?|fNh}Z~-LJtrS2O}Qgio!l&zxG8sHN-+PJ>L_@xQ~GJKd))3 z)MATTHPqN?qtJL^1Z41PkVZhta(kK13GM~G-Q!f|!~6QDC&otZ|8w#ESu^B)4n{0Y zq^uNKmf#!EoBKQiS5wG73PyrG3i}Fqzl|KZ>v%bg0Dk+w-y0-^^#&aau~{112{`-_ zQ2h{4-D41tWwv&o*}$LWBPXo4F4DZe_v8Ea-&aC^Ztg{0vC<+ZMwSsu(7$~PI`;F| z;A)|GncFDPPr~;v(N;nQP-Ay`Nf^Ps|L5f%;n$S4s70yo5y5DQ1f47PHWMNc#p0WB z__NLv{hG!C{8pDghRwl;_n%w%rQGcst>CX%SuqzPp@dKV{wy;x5LKs)At`+LXis6o zm%;vRjtmEiJ~?0RaJI zu6law2kXB+k}fXuwzjs?7J1zv`6dfrqyKu}#zFL!?Noi*ypN-y0$Y>&vo1(L#^HGQ zuV6wre&Emla<{{_-9$B)`EVZRk!sQlR)U}=5DR<%wZwYRK}OuRQoV}Tdv_Bi`i)Mu=v3cI1E0u$>m3sgklc%*{?pO6 zy!T|+a%p%D6i3}5oee%2J?d19Sg`vd&Cd@ZLA0OHGdxgg>U2O+Fd^*z-G7M^0eRlv zujyBGeu&>atVl&_xG>dGfiTgPyN2v+Fz}v4Qy@ja00-l#o-c8q?|(@SssQpx z8pWr}4TdUELCi)#mvf=OPzEj*CfvRtDJJV)bmd+%`R1WQ9`Y(HiyOf|nm$U!h}cAr9SIfje2^W-lWGV5{on7GQq3OS=!9jLJiSEt z&9F1pj9b5fGo; zoy)L*|0xDY*%^(H?fOo9h*Ri;s))*ZsBDk5k+Q$+wI2UlJYCYldetpRgAIFu$#BEM z7HR&b{6|f|Xqqc3W1WosbD5EApQFYSYb3%|_wVUFSRs3M>-rh9XkcFM zc-NnVsZudMC0g4A<<3+S(98*{-FTz?ulXPj-u3gdsG}7LfnT+b4L;tw{Xqn_oFAB= z^U(zJL1yEza(J>j@T~*>m%nG#1?;>Y%!i@;l@EM~V^A+lfd3gjyN4D?;RMcy3@it1 zw}CJI;1kH={=WlZp+@xHKt~+>q#P&*zlxs{0TDL>>3YdD?)Hg2Z_3m*fE+o$$JsP0 zyg!fgFGFZ{1{xc0`@D}5etSOfaU3zg81~8kvutxRiKaGERuh7;WnD>J1+C%!-zo?H z8BN4*G!GFG(Sb9wSg+JI4404%KBtEdfNbhoIUz-hwVdj_;i#gQj!A@hZ8riywmw2_MEBvw{TRf}B*(T40>_52!8U1$ zP$BtKzp{TX*8SPneoct#g*rBiqoXy4BPAtG6=HWRu2)hPc%7%X5}xpnf?~pY$aM5F z)cU(FDk`5?$vs1bsBXypd#J+qlR;=5?&y$AD6FB=Z#f-!B>oU?fWY=qW6==qf#x1` zpvRnu-u~^OT$MBxBqRU$gum|R2fZIq&{C}k>E^(*F^!JvZ$(N1b$^88 z&W-^OpW4VncT}vZOD*wAnp7TcqyKwoM99vF%0a@Vn-VXOvb4?|c?z5gSLcXfYkQv( z1Y;S=adQm#Qk#LLRw%rMxF&7&pyuYklw9&QQk}4cwBOK{c75dVXrspRqwni!*AY4i zgv_LyJ=dP284=O@ab7Ye2H$^v_gC=13H77$|8VwurHM&jD>UH^2YCg2LXpnU5!E?G zyoW1Ijij(%*Q%J!hUM_q0x4{if87SGE6yw77q-S^K;H(71dhtl`h6=9uZ8dyj7SWr z_*$&9$cUAx%9;k=xstQ^|GH80ZDi;710Jn~jh?5$It5PMETJp#AOvp0f8)6@j>172D;jp?9hb6frZ9$^FM?AZrV0>sfN5(v0$IjQ#Qpc=*5m76h`# zsUbrBg)BBeK4G0n-+8(Ev+t0>9hdu41f)mgDF3?XuVvq+LaI~91^&1tYnR5DzB@ue zpXOSPp*|u)j3tA3ZkPKBLe0xh3UwQ)eflmwzR6bf z;3^<{CJespjxPljZ{00o=0~^?R8&;SoUFb(*S-mElhAkJ2RMC<^icrpgfX(XAS!Db zKQfHzXTSa)5ik>C8haQndLu3TCS>Gknt($mZwkcw#e=j)$2@*rk!D<^!(?^0wRc!w zW6$O-g(%PK_yTDD$n|>3BQ=n%Yu~FPJt}_lfQm{=hw9M8up4I_?r=*afD{DSU|!xI z`F=AWnWlYUPAo!6KQgNO8wBQkO^ro$)CApEZ!Q1F1vSVuN0Vq$Yvj4ey1Um{6C!1e z?qocLuLTwqA!FZv3$*}0K4ka1Uqb_r@t828=zcqoy3ZMINv8077ifp?w*Oo=eyW81 z>QhwVM@drCz3=6?F_lHFcU~tj za`xjpTEozU$sp$+Y~1To-|-H&%p`5El3SlU3@)R-@O1A^?^T>InlHFKThR-3c5&G7 zH}Q!gPbrpgBdg5uX)C}d7(GP^{L5whF_kXi=JBus9EO7?1ICQOW**pz`e_0T#@;ED zN&(izM_$-%Djh4@O`r-g@HTx*?Bl&>QjK<|{MC=+^P0X>C+B&cLnI}K_cT+^UUgV9 z-qDKq+J7K)l!S|Z<@uX?M(D@}<9+z}VUQ<%%InywZ!!$$-Uc`O|3^%`$m4Mvr<-?;pvzu(Dz+ zO)vX8_L6ski15F(_xKT__7{m2D*rQ$f1G0%FvcuRQ=|(hLbpOPFfd?S$|QcMty9+V zeyMv1n;9Rk;gw$mj((_&m6govSV7*n70&;@D<$IE%)@<3!;SQRgcb)C+MsnRGgN3& z%_C3!-MGkWDkQE`=(G_lWkrAfdoffZQP zycE=R_`5%fd@N5MO5pBhbRmaHb81^5#>)g0+$*8Ee@cSpv+QeD)T)Z`ckkGX9N?MB zUq0cD7*z8$^d5~^r|Q5J(+rBQgK>6Y9w7m_ppf}OtYNarlyA|U`qrYiKyiXGKeMaz z=$7-^cN@ALTd7`pQOx_Z)JP-Iu_Z14Ndo_Q%IsZ4ZM9gCojiTp0w4X342Z(~H^dLX zN2vtAO23!0zY)iM7yNofEe=d0Nm~7m9>8A+{bDhExXyMnyYcnxhb+iY4g1w2*3mr3<}}l z+y4m=?oob~_>U&rJ@A?7-~e))4ImBOn{Odd=OFwzwLW4oDXeYcQ=i@0dMyr%1T&dz zErw=Yhis0B>?&K_9cw;ek$_h@mim=}mINCQvHf!@CoB&Aez|IsTn^iN9E5qfZarSY zv{ElCX_4vrB|HJ2`#m)lR4NzjUZ}((Oz#~G@Wi4uyG+~}ze5*6X}`0N z0#UC{@u%{fUjLM@&-DEL1C)!pQ}wv%bYG0T{&@kah+lHf^fKox`c=O~eE-WE-e1L; zouPwvN#4GDCsT*b;s{rZ6lkP)oQ0MIwKOrL5u~N`fKYD-qW$f!qP$gSsMPX14EPL+ zkt=a#l4!9a0a|El?Rjt5n5gxm^VHtf$*z*yd2@T7Gho_sOV%+bHMrtrdN4V>So%JJ zAgi&HUK(mX_#v#!jI7=5vtwl^NuzOfnZ&m}jVf`?=PA?y?`^Aar_gb2jk`JdRE=&SwTf!3m`aatBh?Ezhj)Wk*ut{y)ZufGP2J7R$Fqnow^t?am76+x3>j&KWi6bAcb-qR)+_&@%gnJ_ zWOBZMxa8rhgd&}>JLMH!XdfwRjIzXWmAKs>wp5+Sbv$J2rr?dJ@~)PxA?TJhiViD{ z>0WRK*=^O${TfFV>`(-4jXM_n&$33XX(w+Sh3OqNm-5cGAp_Yuur#6)sKPVB|Hu>! z6ZSg(D_;;cyxWJ;_05%{p4|(rP_sll-CUpnQx%+pLd)Q0JDqiBk+-A48k6hghVk=5 zYe!;*Vxc4OsT@9%s)8 zSpsgNpvBYZ&pZE-EQi*WKQ%VYthR*lh5`8~fA7{q4rzbyY zk25lRUC8t6fPvyA`f{eCv^bCQkZH1sa?AikUcJ5E@8#|`8u2f$&+DzyJbLMgCr$$= zLcP+6TnA3Mv|wqtnr z)}S$Nz~=RG2qWPrAep$kcRgZ6wLt@UF?_L(@ey^k0hvWDoR*t=toVEISMdFBk3i9c z6ZbRpQonYPb(AaQJ_TO%e#F}TXQXQ6uJ(vryQC)P)pcsaG$unQalst z_E1$Vl!CM{PsUb@rkZ>{a$-;(bWxazF?2srSq*qD7gO1jW9U?t!|kx~aBYHIr+j#k zFio#-#`i*2o%uyrc@f)IR2ma*{r9-uclXEz+$FQalhcI`FDP|Tgg&m{^T_@>p;FPJ z{bpcFfW%GtnSG$O=Vmz1CrB;(n*l64j=Xt&P)sCH9D)}U5s;Q1H(MdTj#^52ypIh- z=gmT>CGAv#_svh+y$td0r-F1o_d3kFzwu+zf8=^DO+MgOu0F6E*SWZ2t*Kv@KQ`2> zjV{)HM3K!Zl3@}TtRwi@gDhE#NN$t=&7Dif$Z6Z{JC$U=)RuO}DJT;+e^rEpuk$`1 zWxAqbWau2Gli=BHwC?{_CuT+K==iYIYNCX~$hBPCczgC~#F<5Yy!woL6gnQWaB;)V z+jYI3QAed+yk25WF2<>6%W}yXKU)Sk%=}-7*YVamEf8GDqc&S4ON#|{;!zS{9xVK< z?R={8E^9w-P_Ze!xUH^udAL{8eTrth$zu-OEiT@K)~cw$gJ9wOcN%Vm;RrfUg!ueN zTyOx2_x|>KV~52S6V~MYbRlNN_=;X_F4{BFO3t^Jwa#Harx1ykzNgWvAVSt@y!7_^ zc%b*^EC5;&wAjgIP(#s!@SgsYs?I(@q<+C`W$=T9Y*?|z+2EyC&i){~OYiV##=Ix{ z9h|i{R8r&t9z`GcQ4#Dz{kQ}pKI9)y!Xk zhTOm%FWuMYMBH1aFf+H$_=*$ma=VM-%ejBk?TZhPszT{7Pj*%Wh+2X&mrUkuW)t?! zKM@umqLzBNz)Q56oP6$fAf(0Zh`2G;z@?^o?GxYaAB2wfkA7?-KvLI=Z!!sfzZr(h zA-GmIX2@=CA1PX$McDh|vrmg-6qcvgqly&as|g2is4;68maUN>)+k;t11ce%7@Tr+cPfb=a?uwUE~XEX?0U*dfz9G=3F*_kP6_Peg*7x{4IEZ;B5@cCnbOV0@wk6#rc>-~%iQu?x;*d|a#Ov=b0NLFi|bkND*!KCx< zVETJU*aN}k2bv+|M$V)1d=?gCN*>#@h~8r-qIoJ;HlePGwKWbKyf()kH%xq~`4;J7 z6K?vj2oqQh72a*Uk`f3}J!cC1p+Kpy+fQ48i-NkQcroj6bJU$UTf|o|>kze?cG~g8 z%Wf${@W~m`g1QUa(XSh3{XG<}KOZKyKQIaH3wPmDAt3%jr=s=Nu_ewZ+d^`I^1DEl zb>Yn14LX%F>`Y%)f&M4aBdOxzX7Yh38_fB>nZ4~yobyTJ^y3a*5_IbI4Go3u+Lci$ zLBn2lO^>FfQF09%-?r?Ia3Ra;>IYSzy%*IgGe$!}Mjj(3ANU75c}4m?A&9|d^tLzb zH~9mZnn{9B6cU1MT!;zcs&ejQB722^bZ#b&ejg76q7YP2P`~%0gRM@@?xxf$(((^R z>bs?Gd*t!sMtS_pLeEiEHZ^$)+lRAjp5xLI*z#Q9u^$SuRjyOawag?lVQmvg;zE{DmOhbi zT`B?|-;V@6y|OP!k6k&(esn#$$mr!kmiZ`$Q-^Y0Mq|Q3y?y(kgacD|nSXzq z0VUrb6v~gN8&IC0sjU`=&Fbyr!Rq%=5~PwiiO#q0!P~D$kG7h~YxEV)xrik&fZ|I% zzLR`^5l5}9Nu_P9&Ti;~Vui&IaoU@X9)q)wRO(%HqT1ZU@Jb(#z9IU=U-i3J(CVAh z@4Eb(YMHBzW<=@ZAQKJN9=HbDn!OE4d$&tv4#I*e6y`QnBC0LeEnUj&I(5;v$eEPa z+|K>fdM@RX&3Y4(;ty=Z>4SvbcDJSC2zIC3f9^cLlTV!ZMFr_meTEHJUXqwd)j1$) z#j%Rhent=GkLKZ9B7Vu!v=|vI`uTO&n~~li%Z0+{c4!9W)r#Tr+{wmQ9v9Tg)a*{D zyZ2_$s~cGtmaGIdN*X;b-*$D5RzVi_pS1U&c6Mj7Mbql-ecd!8aW<~5#}BK0Q@e1k zHx*>tmwJDPU!`0#wrx}npYMmK9hZB^c(^rvFHf@B8cyLNkG5^ok)mwqZUnhtx^dTp z#2wUFK{myeN7usVmTG?Qj+QU|nDPP1AJC0}SoO1lfUPr%UV_2+L4!1s3_?5tA~UoR zg7P{C35wPV&{mi-3xep@8qiz>GCPfF)k>R3txTCA+V#a^gT+ldbeNj_pH7E185b1F z5V%|xR93@>9%R%UTeigPw-O3zJS7xZm~AYouMJ*-qO8fh5~o$+VE&#pm*qzY2iGnq|n{)g0%ic|+|`*;w7vK1+*plir2FMQ?D z%7B!`l){MK-K@c@XTpAU3aAts+u9z|3b#DFtXLQXkv*)V=D^&9n_MlcAw zw_thLTy?>o*@>rFRfvr>LjdIZg2ye+V|bmq*F`-?lo*TkTQ$K0a>L53;d#8Cs-nr~ zDR&Z>*XD&rMiT0R^n&W}b1rJ`i0i7$e^M|Ph=GLKXIaNRYv?7i>SLU-O$(wT?07`% zX7|ixyn(sk;mgq(<(R^CkLuonY$P^}vShNiw99XBA8?8*oHE># zbjY9B^tPOAWGy`#rfSjodBdPBw%A|rpBxTM;bq^(PE5t zP_U2+eX+)Gt;p-yXAN?o*A2sNa(W&aFBGW~a4I@SY30eD@)EB$xg#MC5}ILpUpHj@ z$B_58jv8oJ!BH*&?)OM=m97y1=#1{W)K2rfj^)Kn4DjQDmL`@PQq`fQ2|3{_|9}uf zG7MJM;G=`j#pW0D2L!Say1en1f=AT`tl75D*E%uhf)7%ks~7(%14#Ffl(9YgmeW;W z+sPfD^6BK6t`ljiTw2m1PrH-K>UBNH{LpGHT8+#a@mQ_RaDUf|heor^_(9eJ zS)KO+=M>HrNOd?DnWIzGy3R-JNjeu#yC95)j!l_)k;MxWL6Yy^opF1fkybD0%dO#4 zGVr=dzjr$|z3a9YXZ9Lp;A_}T=}yHdHf2J&T3r6RWLZpM z>Q%yCnH}`bS{iCa@%29?)d^y6klg*{21x#VD*Q>E6x zr0{0?wym=HOz&AH-((~{CBc!`&Qr}#pf64c!0?}r*#iN7A=ezNU0VdK}UZJK_Q z`RzG^M3TK4@^-YtVk*lyLuvT=TJ_>dS+2CjL3X~;voDNK-ks%HFOY}j+D;sq@V37{ zx13iGp3tN!55vW0R-}-xG#$=eY>qBWJPTP{O8Jo;$N z(n<3h*Y4yn3)YAeU9fOx?EG`>$_8K_NrLNIR@FZcsN-+|{8oI(+@+ zVPiB0SP7+HSmDyYJWVM=>%zB$c|cEs)n1$4|wwa*@7rKtSat=tRjQK>TZ!;Ut>C`k0@nW=SYPrvKZscj>@B3oxb@xuq_be?kIVf|U7RcSW!Ba8B* zIdKl}JszX}Qxe`prhwuFrZ8ABC#5@W@HGvLTzEh~opGud$l%nt(6={&U;^-bWFe`! z-X6tj@-uHTme}{P_!moZ+X#jQv)gp+^v?4zQIA- z>##8!ewVJHE3M5wXR|+0PC|&p_`R4gicBU4i*cB6u~8;cA$C+o=g}lpbD^cJjjG4$Hy_9OHEPfJrQ# zYsckXgeVye%+{ZyzLxyM;C#R^XB#1p%?X7Jg2C`uLB0q7Uh)^vzCOkA@nd6Sx&Usa zD*&LpQXC(4&;j`E7z3Np6oxjbW)wSE4Ms9kki!<-R?j~&rqeru=v-bZ*He^`RASH{ zEvkM$r=H*~J&8xWp{5S50MA6y3J=`t@%zi=9LTJPa4qz0wm#+H?`P}?TnIIMDAJkJ zXEJ_@%l~)^9o?u)ApT@=rm9q)=VG}1yc3O@Fp|tezvj05UEh?Pu_jK(J>grl=TeE0 zdC|akU5M_n(ZuHEzxF{=s?ALyV~<9&mkQnGFP{oQN6X|bcDAIJ_IxmNNWQ9|GGGB5 z$<|Mli&wx>m4Uy&TbWn`qXH7h1XcFHc2MwAN*8vqY^`r7y_uHOMWg~t(Jn&*kKRyM zyi4?Sg3Vzi1Q9xzOke+d)5MZ|XrVAl45DrXzpnn%r65C9E33Tp{r7n&kk3m%x_}ht z2!NQn$n3wcpEnE9pdWFSJQ24{sluWh_N?6^yj18Ax7*o@CtV5}!lFh&_lWcP zRw`Gb$q6tS7fWB$w*Nha8!>*cvStJKb zFN5m8&Wi}Trih+cn;g}H{io~2Z{jh)*Bgd=EF44mmx%ig1K) z=fKzb#{@-{J>H#rQ<#3PM5`EUI9$%{S;Ad7Z1Zt@+#w#Q8_s|&@T85T}@q2fGhk-PCM?5)o2-gklcLe?yre*m!1CZ39OyIlFW-M zxERWWFplff(JToizB=dB6sd%cY^J`~0N>2>HRzhK%OVt%$kyaqW}F8peXtU5vm6p8 z+$+r#W{fNY>J9tR5#iw(98rn-2IeO>|m|rl0mV1F)$O$5?Mf2qM z*#G9B#>Yp8o7^hL?vb;n3)+c*?i}yu6MZay!e!vr2St4&{s(OHLYZHe1H9*TG&Rys zr_6b|>^tkk#!7-KD}xPTbsj4;)`K(RseRd%&iN({kA;C^7VlY)f_JK6zOP{GwQd1O zPBt)(sX>bX7KoigIC=H8#qoaJ^OQ^X=*55EyhL?`FhSKNrur7OB=!1mN^y zdqUK-IzMSI0NG6&gzLRX$p1DI?#1p-Mw?on1VJJMLCB7?px(n>`o%tNF82_Cyc5nY zh;fQ1WcLj6QGcyRyuocBnDS4cWeHugrqkhBQurIAbIupzJRj?m^$U zSV$ZeGzy@1AJUXxI3gY%*tx!C;D_=WAy~4HMvS96i0!j{LhI2%sr9T4+d@gEzy(~8 zXR;5k7}Gr||8#XWL$R#8opi{QQ}(8zRarI}(+6fnKyBW83t?MBApA3l%!{AE)&?kW zE7^xZ)6T>_zhBW|iw1PX+2e_U+q_ktGuQ8*(>$y<><3`Sr;t>37E`x(!t8{iK$QH1 z#pjR_blRT+zoSublz7A{qeA*5~J< z00gQ}D*4#+f`IjSrejXOZ(9)N%SEy@xA&|V?Oko!JKE**^b`s&mAF9qbXKJ4zVK+5 zTA0iDy%r$Xu6|{#cA}N_)f7Nrx)sVZu5*1z##{32T4U$wYL^tRj41yJQH)iNpxyzB zo8M88Vzg0}P%>c=BR0ZA>V3eZwLMCl3(PPM?=YGE^$R{9;9*|9k_X7PF)U};UxNVs zV-6}wKP;@BNY-!;y-PYB)-&u8OM>Ch){ijbHEc=yRQ&m&yTCS||?9uNIcZqRgST$3DgOT?u zg!BtDXE&LR>!h^oamYEQ=Q2qIZq!+E@(S;m4wzW^v9OE=vI$p~cKQ0k2K*w`B4X6JxD1gbetY zf7);3%q&6G;yxlM-KG7IBb0s!@vyTi#BD_3Jw^V_nP zz<3R-v`D+Ijqo^!{~L>p#(?qAe{jq0NO4`oXtE(04)$2gi?td%Gy8KYdzqpDc0U&MbwDE)lOh3^lpk?RF1@AlT9`ZD5Qt zW8E+?2H!dIJu<&GcWzBFdp+C>sd?oHg3iDm<>3ch`bs=S<+VIQyp%g@_B`PXBXFWQ z|Nqi41FXsiC>a<@tHzcil-S?N76OH93$oMTp4QS(H9ugp-(oik<9 zImF&;$?B(}p~3Lnct-E&e!bfhm(wnl=d|BYQ^6&vfnMW3jd;D~@$1eA#2a_y8gv^q zAO#;UW|YGytwRbl!!uuAKLQQn2?F$fO>t&8A+O_RA+W{SMurd0m-k>$C#6@Eas`t7wiVv(;s2( zR>C{aS`uhIlL*R^Luo#1H9nwV(mv;e?pjeKs{D99ZnJ&2#3YPOP-zhw+!((=ec5A7 zM&sa(b_e@NyOyQhY{NR7Gti_G*dTd8b&kK_9@^u{qnr}-h)Uj_!a*(S^ZI}AX8l=@djhSSPTvVMWYEdX*}1w*th!Aca@dNfym0%77N?1|(r6zipCgWQoNCYW z;tSS;b{Qi`fD8az(NEM&na-qo{oG*ngDoZr66{!Mt2ir0IEmu*|z+h14`6##2? ziH)S_xZ>jZ5KrQpD?a9PUcMQ{9b^@0@@i}lN7&-j(PPE7vD5Q1OeLFam)g2XVD9`& zZc^!H*2agFm|GbFbWkF#J`Edh`5M4tqa|~@hL>p8N$v*V&KK2dpNTRga+|z?sFvnL z+(%lMp!n;{4Yb}4?eR-!53O*@sHWz8D-I?9v-lm)QK>RUxBq{t1(=#kZ zj67PHo`A+G0u$P?L2{S!;vOMdaHaNnuQ1 zh76OXw4d*C@ zRDUC62hs)xRf_Xee#fuUV7A-K<5s${7<-kC`=DZB^V$+YDZj+|19}Rs1>;6cp8E9O z`^~K2FtlhsiNLyVYKt>Vy=({b-}kDY+tyk#k*vcy<#0n>0jG!lxX1#LZYogK?k#No zSkxBD%M5zon9Z|nA<=lF5!UH`UvNyyls&xm9OJ6oZ-FP;PSA;n9OnEeKDU{I#*p|7aMsbpm3~{jY1_i z`kP36_ha3J)^MN~A=T%k12;_Dlyfcv(;Ioc4^0%YWOy2etOxU=4dvA;WBgd9<4?u+ zyQLW0S-~oK4tj3Yebg-GziH+#*Xh)XAPBwN9LZHdSz-$S*`eNG4c?bn5XfS34eD$< zZ(ahrf~vcTsVcK0s!Z%soIZfZ|h*uY8fzeahZI$Jwa&z#I{nP8Dr*j8z7#>w7wWqR? zsKpUS)gw_dhl{V(N}Zsq_J~A4_pvyQZlOyn&lWK_@SRW+?-={XDuQYC;MHR;rO9I3 zF1hpM#M({ceqf^?du+T82-f;;R#Qawi~`OyX<7{Q!J$```vC6(^x`?a@N!U)`L&Du zcRvbYxw+uCUYIUuALrt;ILGA=_s22MJAK2XRU#@>*mc}G}}=6|(WUuoOTxtl%`jPw7P-*{i86kqH%D`cpy zr)RWZb}h)bT|8Go&T1g%^C-aw9L$#EK6P|v_B;3G4kj^qmO4;8> z>5ZnFT;E`tK3yLHiI4Cv&0uA7M~5riKpI=GlW0dcbz18mH|n2)y4yY>!{K=10Tk+v z0bN4()M=&5+3tyq(HyKhmb@MD?*_Xvqv^SI5^{mDSp1@;p{#N2&^`#SIGRROFf3g z)S}CSf&{q5XoN34Y?DjlZ)uB@9+Q8udCz#@AYFVzm9$=|lT!QKG`k3EH;;*^wMwYM zZ?Tj*PnvXPwQRm0TZMN(3L~UZ4pXlK%)_DGq15_l-v+b2p7L*8RaN2by!iMI9*BYQ zQZuDeRC%kpLZGWb`k2it?whiUL{6hRKh~Ntr9zD)1c2LFeT0)A-HMOyrpC(6X@)jx zPBNA(WUN>L#I{|5^JYw1kE=vCcG%>nI`p=){}jkdw00* zV~DT7$7S;4@|_2_xtYy6zPHDbMfH{a4mBj6AB;g@WK@72>03cI45aX`dwy{DeY%}5 zvS$9!w0nEAIK^k5kodfes%bpYuJh(ByIW37^?>>l>pyAcS_?DQeeF?ZS&1GK^|Ow= z0{oCI{!u7t3D<3H!iS}t@0WqtUc>4ameM*>cRve)w;zo3JX5|0sW)B&CR|@XKa2{~ zDp&wmgdkGfy59kK1~8nV%a-Koe%<{bHZph~K6wR&Z=u|U)DA)VDnbGzrWe&dB%^Y8 z->XAM{}^?^Mp{>OkAMIfs<1IE*ux~-yuG8 z#A(eBa*N9mbHfggiKc@)_pU3rYAXd7u?{#1obF%lcsA@4mH~~1{#S8)yon;MU8l1j z1{sG76(a(lWp7E?5S;x;6nzWfg4lM{cxO>{0h-A&m}25yGD_K*Mz>tG9S#W>UK`jad=UUc* zzLrXtPB@LG8iM6O2feU~#8>QSR;2r9`8=9NSp)ehmj^q_=6flZ)EeEFg%c{|D9?6v zZ?zq%6=Pb(M#vR&hbH*;ekrAFJ^oaPWD?rh3e%{1-hTYy*FeE;e`T%rEBWZ1>Sp=n zfWY(xEwjbaH)g4bOjCOMH>(1yWhx4gS(+4_`w;f+AZf?yo2!Q->`Y_r(YQ<`KMwS3 zq=n8_>@UQNd-`Z9L#}67gE9$~Mp>!;K0~K6bIA5HVoRa1YNELFUE>wu0fk!v78M}T zrTpjC!e_bK0j)5O>}f~ITRlWFnUthEB6X}>-P~8@o8gB;EQOM#)!8B5O|!|{36<3 zxB1q^&xW~>k6CHyU@my+Sl!xA*Ov8vq(h2M`e1F4q0T@|fVL+oVY563iWZM&h%mT? zlFhi!2LG?W{~@1N8SOd`yLDW^(2*9eymwx^z=Hym7cwe}2Trwax*+`vY$>f6#H!|IBF_dY_IO7u> z&AR=*EGBxtoE=-7g3#g=L}muGvf$VIHcJKygMR8lMZrp$Gk)Aez9$Z4LO4xCF^{xA zs9o2u|5jb|B#-+6I2`ZVYzetMyuRuk`ubk%_BjPHh&G|(Gpob|n+yi7-~{f>aEd?D z>6cVgc|c)Qtm-lmfTtyT9o_2aK0SlR+Zh16s-3`N^!RoliL~dCTH{z6002;v9FWv& zMsa6#(`Y&cBxm}yUazKN#B{TzR^H1wVGO}F?i{K;?z#ihXl4rmjs%f?t=D6v;Z-=HrfbWgyq`L5WAK+vK@WB)k6LBJy1sh zftS+hhg(f=;oJFX#*#}_uA1%mCRWipKU&p_@0yjBgI1e%vs?#=y|0t!taId&GwX`> zDuaZo$_P8dn5HSbpN`*$S#vMDyeVc!*tuQWJ>N1)++kk*5r?w%j;A;m^;|m+^xL9( zNksLw@Y<)U2P^hl34w_7qcpMpYmeE=?=$-j#&~Y7`}Ru#v?~VG8^i0|gCifjN=meD zXoYK#%Vs}4}nC-%Uv@*TA7|8pzdx+AJO>XmQacvDU(yfR zPpEUsC`%ilDx9P}Q^d6(W23TrYc%jP#CHXJOK`B$>!dV%cX`MEEC-CI>hJBB=(F70 zQHgGIDqs|GF8Mr}YIv1AS~D8R{7e3@RIW>a3{GFKJ?gYj)%n5iISJoB%qfXi4mzh= zLY!^{WRI0ekDc+g0{w?2Z#j))T;wu8nQle4R-+u_bk!bTr5K=mDu4QY{P0KLGIMESWjL8~8U)-AeN^!CQHR}ayW|5M6>9RihgFnuFq;}`m`}rHyaF8QttHH28ejjI=$2Fr((w|`r6_N`~$>Vdj#z#jSp#zpl{#7_4BF=;{ z&S%XD5wCs`qJr5qPR_0Af9Y+}S8Cq1P6|iSk?9>Cq0GDYa{7% zr;+>5JJZDuLEV!0#soxbEqA3Q+c^#M6C2uN{yZ+Y?xXvd*w5rd5kRC!Z*q57YhHLF0kWMA(8}W+J zz{sGVqRy=;)Yml8(V#UVVx`@+^IxDmdc;@dP13SBUN^b39fGWwNDa%M&hNk+K0+5% zq4rutp2wNjd9JM>U_ksM=wOn$ky!5=%rO zBt#RXmxs6II9^X~Yma#^kA81?X~8+qbMETwhfqVLPDC(S-3fEMa)5`;!LUdK0gjP6 zJbM$XdK1O*DIthba3Sa&C?M&JB|@$y;=V8CwnES{h06X6tfW$E@eE|NJb;d6|5zs5 z2O0pR_>pQcqdn8<9+Yu|X95qa70Lb*YUXjZdr$u$+s(~c>mK=G==w=obxw26;`r(x z0<9?77OoYz(?buT=i3G7bxKfQCP|xugz!CNoQ($QYUeI%n%6g0F<2^YMO_zSbuDPM z=k3D^_f$abB|sC=TSNC$UewXh+RzQ039ECcR=4L`G;(>(zOp_NBTlGTF-%^%gcCIM zE#-a1;7U#Q{5!Si%jQTT>5CPuI-NaxnGuZCZL`Dh6Z%sz97jJNbD&vh__yV^nF6IZ zSvZCWXG+>y+aa(h_}11RcZ>c+#EBriQ!Z$eM81AeK3Sh#nlL0Dq|q`*vN%EWqd#1e zdzb$;%U_03M@dL!SSLqEr46z5NsDL{r9t$6qN&gJW6l)ik66de~PqcY&dongb_m8|2ULFNa7A94^K}(S|Kjl zoSBI)DtWXcf(C3g>Y7YH6`277!Sv&NjUDw6A)5~Q{u-&ZS>m#L;9KO|#+3Oc@&M%3 zn#f{n#RoQ}T)X@H5^wFNN(a)xP0Q{~#aoN!qkC7v0A~zkzO~=SSs&zcGj>RGcP1lE zaInJHZ`HgblrAZWeM{$xj|zp(#gNEbwpt`_yr8p3etf0xk2qkI* zr9dzzwSG}?N?lD8B~RH!;lfB?C&l#9S3oI7z8#X93C~$0M~c`$PawER~`Y z%tycYbNGjSC&Hb(x^s5HXLS&~qw`n=`f1Rw`rbcy9_wqdC+Er6e)nQDh-XA#J2FZNc z$w*&c)(LMVa$t&1#>@{|qXy;K%s-gdO``>+G^tg*UWk8AT%9*UGl+{WxtVhDqNnSd zk;DWr08bUGmZ}VEfd~@a`W=>#87OSSs_gQw?p=(mJZC99a|^`< zZ0^#cQFD~;NDQU@ycnvo-q6(UG-s%OPTP7motCRQ1JbhOXk2(#Tq?Q{fS8J zay4k=(rnRa#PP%>9NxKl*WcW*Iw5dk$`~V+eMd1B5bBXW>#WwVv^c?evaU+3Ki8+A z@#ed1q9-X4TMt7n4h)RWU0mMaIAQS53}jV!5cO8C^c_zS~Fyrk*?`AR32zv&e(fh zl0x;y$#Ls)CSx@rH?n5cF4GCqfAJpM>Igeu{eK&-KeMbeybFo0o%@4lSU?~Cz7zJ) zxpNKTCDpfMtNfi~KuI&`tp!Tlv4W?r3GM1=H@DuT*ss&2aVazZmCS#@vbS2dBL~Rz zbFvc+=Vgf*Y}B+quN~JX3vlBa-E2hPM0&dDhQe<6Pho`C{RAZ(Xzr}#75dU$x5^af zGA>+@M>FJ%^Wd~rpszI)@y6xd;WzIA;%_6vnJ`j+)>w1~S2|Vywe@gSeh11GxfJR_ zG8ydtZ(W3Blsmqp8d}I!EUFD(rf8Xqm$>#~Z3|kCw~K#}AnF7KpKbu?&=Lq*q8E{| zT%I$T_Ij4p6e5x&TwUD+0Ntvm^Ggp2f`P*i66N>2j9M*- zy>F+pZ*E_a+g2?z9_zl#o5%@AtTL6< zCNbbI&GZ9WY}?3Mn0r;(^?R*W`1y_co7=nR-$#ttj1u~NW7m3)gpuN0wUpLRE-P+| z4z;LzmP)NcEDBK@KRs=P*bG1$R`M~Hlc88F$S5>OMhdw+5Te}14-@c9v_95N+<26g zL=+>=3Dp#OPSY}|y?FXq7bKm)MRvL!WfwD;ST5qhnndoDMA_NL@Vu*$BL+3vKp0+9 zoN7u2(ulK-M+B-H#0p6X!dB=jM1l2Rw>}S!9SBFOiV<}yP+L}eJ4$zRZ+txO8A=$3>trcftv*#zW@kuG3b9=7Wab8JDzSm-SK&y(zttN=vKJ?e~suoQuFrpVNS9+JAgePUZYPP4mDGWYk=s&sYJAsS&)ZoLrD9 zzH<_dRy#Ot{p%ab)m29cyK~8N7*HtRPeiK@VYPZZBMsr=VeCiIZVDq7R{bHECV?(_ z|H_KpwV#odvxc1TKg*H5i1$DFsxU|kh8?{cgKUcrU0KDC5AS`dJ5J_t$W&e|>+nQ% zYB8Fvd>qHG-c$OyFk^9ID;K}k%pM1G&QJmZR6HjBl>@hUt-Nq6PW2qMMvLm3{8F#EwI~%1Ki>Qki3{)0*Nqvo)#2=X>*xy8#DlBuk zC(WD({>s!*wv4)Uc}+|$=%4NU1#EW|E*SA)0CSHn+qETjfmsqMBQ?&kY`<`%@bj0?wn67yQZ1`4dkhyq3BT{y2rJO`~M08So8(oyHZZ zRiOU|g+i@G%U&aFu{YkD(EMnRU>IS(B2Unveq2+<;zqaDNIq|<*704&z&!700VnmY z2HN%y6d+@&z6B||^i~&1+6kPgY15sxR*D0^15P88h?V{L(Fdm?;7`a6;D>$zKTG@@ z8A?HEh~MY<-jyEzVH5SyajnP(%0>7Id)RhyR%r(pN+?fuH+T9o`*SmK0L_wHl(!4Y z!PF#)_9~#HiR6FUJ3YPO)@MSKs4xkrRXhikww64_Y`ytO>h+@5q2D;Ne>mXsTIT6Z zvW}|Pne9WHt>EH@d9#eiP!Jtz#f$d_&WLe2(?N~vGqk+a>_w6moI3;QnuS!K;s>AX zPpRt#in5~WmV^e3+PYQ{$AfT z66tkvmoxPnumxeU{fJ>LUcC6A+0FqfVkMpFq+BWDWBib6t#-eA?UN8zc! zOQV!#I0E4t+V!mOEnx0r@xI9o#8sv1#8t#~wk8RnQG#o1+_*^_xTMQHch8lZ5tf%# zwvutgsvGcCRpYII=W$(no4Bv&ygzayPu^+%{ix*UeI%6)#Fz%55Vg2b7uAzp1EgkH zpV=Mh^i~fJrSf{uUzzDA+`qk;p!)5bD@wUGJ@N8FgHYC9keksbz?a7kTSyX^UC@xL zy|l~R>MX#IP>qc;(_3zW8!{pdI+$Xt7U2Ng#&Hfv%8nxmD$5(AwbDA7v`_r`D~nH{ zW9pLLWLwo%Ld*IFqw5`S#Op?tu0Y!_$!a|NKng;-W5;|V3V4Lp_E0jpA`Q+PSPL+c zC?7K@;awn^KJ+hz93k658;sMz>?f(j9vUVs$7&egWr{X%Tm-oK2j)2mxA`!QqCgjk zKDF5X(m=TS%zCQFKOiPjt9y3CKlY`DJm4qT>?pDCmbQ~?1IuU=?eV`-1wx4cxaVe! zqOeGTHqC=!g@C8-nyu*0>gN_%(7;cpT&#J`br8sc1@dHH`42 zN=0_;a-5+Y^P`@}FOsz8hVo%%U3x#y$OJd9LwHp{ zYoVBDNIY3v6g8{M91-qnrBT-LQ34H!CxaN>BK1X}>!P^~k9~68?u3SU1ajkoY#mwO$ zQwe(!umi3*zG6!{Q#}s36@TooufToavC=A6YRKE1Ef0U=ytjQt!0RD8k|VjAX9jj( zq*y|~ZEwDR`+>%&cqR)J&;_fZIrF5dYH7ZPyREn10)!F0h6)RiSoV?4>~?3{R*Q%- zUkYsaf;B(n#?Cz5*<`h4hqg*HY9?>N+6IhI<3`e{ zqP)y9l_^YI*V~+T5?!1(U|_>K~LP%{tP@ZlsK)B z*|)^(aCu&2U4;PH?~{PqHTDXg1CnlLpY$CGa;2J(v`;{boT3&n*Kw+e_$|5P`e-o| zPT!d5n5*RqP@^30c>`0HC0glv%dB3)Po}1L?FK_y)eN82vl#9MyJHVRX;3QjFwd0I=d-ns=XuES*-yFw8@li3A)VBlDl^1L!U`j<;_iR&{yqD!it;$ znu74T_xp{GoaJV^tZH7{DBNK@enK7*lxRU;CdQ?h zEO)jAyTkV%E|v~rNa{Q#UrS;l1u&mL|6I?TG}n>YVx~f;*!D+C0D$E01*t-Sw}*Vx zem7;Na+L=P;FQQ|7%)(Ie*R4cd;!Id@J}8~6DEslCGeRM6-e+q2&{6CIkNi<*c9m4 z%kIWb7~Z0y9+}aP-Z-rzW33`x(`6ZskVmjHK@?xHwa-y>KOC>?0Q@34vEtID>vS3c z7gMmU3JO+?NEaHocqswk8F+#{JQxAZ9oqe>O_7824wqP6WwD$vOewz@03CmKos4tj zB@n9pu#~4*J$}ZAlnn0iARk#Nx*Fr9=ZvNlL2#Gu*G} z7lAbI-dPiI^iTwi*T2_as9-M`TL}fVwi;qGsu9ZNBI0K|7ku$dlpBhzUMohqq6+je zjd(r&iO3evCQE8QnE>(5Z8DH&;bcYe^!~hL4)7~Z0OE35E`;ERiT&@nMUp5{rF{};}yW-r**G`H0qo;RpY-l{~X^p0%Om$CO;t; z|7;ZhmWweXT~T&Xv{=SeO|UGy#apvY8D`EqOqnZvGssUkfL|<*l0qQay-Zg^clxsp zM!gxwunHb(;^Wu6G!Oh zZzsG);%MP|CV<*ui&tK{cyf^m^`_kDGX7rjaeo{+F`k9#5{vy;dQvdX{s{|Pz&%>>2! z>I<>nDmxHn6u18oFsFgraEN=J%8xl79na_+oOY>rH;-&~6VBR3%>OVdtkO0}wM-cMY*V_LiyxOFGOyO}g)Kk@M7(pfb)E0;EAGIU6 zGgnCg3kP?9W;5#ipAiWiSb^jrraeTT+KixqjHYyZ@03jT0Zhkw8bruAL~-H`7e3Lv z;JkZTA%x!TShR=pH?38d4*1X^R>B=1FCMr*quvjqlI+-C{%?V_|5TFEy&X)_eH^+kYg#($Lq(%b9QU<$DrPa6SStrzwD-ngUn?TS-_K zh`@S9fo}^dB9tTnj&Zj_FLNRU-^5w@0_F0r97R+Y@_^tTfTrrgV&Ls^Au1~wGBhyU ztl|fK7#vii1nh@7*vu)L{)vHrT}k%LILf^n{rI0b-E6uRXUam2`U~pXEmzGoHV|M) z4@6yYt-cD3m4t*x?&iYdOJ+ds&sa!r8N{~~2n6%Zr765DpnqBgzQPFr7l013fdpqx zSyi38XAcAazpu`dAn@Ovx9t9x?}+zOd^LGq4$P0C%Cf~gU*!;m_s!F? z0=6p+o`8`YpTYUXCe8Q3%$^xKO(}_vSowKLQ`5UxoQe`m=GN?(%GS&{yOz&~Jstvp z>FOd6`Rx^CyL&XIV?=+3Gyttz7@5F*XTSj>!=oBsLHxA-fS;BJwJ(YaMTzbvT+ZIX zp~^P=zq<1R${tc0l5Z0hUD1J|H?7E`Jq0$Jr@L}FAG`X7_bVV1p14)m-sIp8aLNFX zeRRiPF0mMIo`E`GVhE0`Xcu03H+G*od7fuWrfWwA_{(A)J^xduy9u`7%5ZA;>uIjP z-vt6x5*)S(NZHvT&pCzg;tlYedl6KT;(73ZyiDLEc|Q2APcNq9Y+rO({(I02j6ige zBEL&wU1QFnI-qaZ0iqo%yosp=C`DT0q!v^hetAOwKVgSQDj6|6fR8+o4aY3&;;#>H zyiPh4gJ%#y#B;%U!Q^SwwW1x8B=e!FQY6g#uf<7on$dfkM?8`XGx_dO<{bgYVqP47 zqOef-=Ojr#(Q)3d|DSJ;0z~H;PT3IY|1n2Cli1w=GV&Jk@4Swep&9r{pJ*MfVG{*z-H0g9nUQ3R9ff(HEA$VWO?MG{1AwER=JP8K#eTGl? z6G&i=zZN?uEJlsWaap$z(nmC!SPoXBVw=J*Gm6NZHCd9k@HpNJ0r(WAhbHCidVuVqx z?2is+kr~SVpUJ&wyNB6k>(kulA@XywLwVBVKKH@{r&Btr{oD&`%`5Fu2Yj zb%^t6mnl2*NN*{?C!V{VTo-8ds!V%er$9%%?xEvAN%uexV!xW%ord9 zG%jc4#v5R@9LWmeKu%Esw!BnE^*>i5Mnpps+R;hd%A%2}M;sM#C!<|_ybh#>w`sz- zL{{o=A#yd`=Lz-ddYXQHiS^&c#ticy)b`-ErUKyuez*&0V5wOj{H$?(mpz!YY@7n+ zFv$tbZa6HI!92F@{nK&ntiIU{og@2zpXGzn&&di>fWFRwC+;aU_*KD(K;E~m_T+sv z{F3KeM%a4kTT?`NJ}6j$n6!&<=onMC(Uvpx#+I>q|?GslKO!FX1IZOV5zozOeq8oDKYmh#^>0^-pe zk+$J4kBMwfCt=?$I+Wr4Dpb?Xrg5zpy-Y2cxRjU)(P|@?N{KFbrI>xa;ElTOqavv$ zy}wlyb*ui`;u9vWGSfg_Gn^OdloWA{afIVZWmutcl#J)L98>|9=%tqp0xR7JQg zL7ZRzlECgxxIuKOA>ST1d)F6Ci>3SKGSzJcTOh5pIAQqv(XsKs(8NbRdfkB%cGyfB!6S14KL5_6kabhOrpjtu$#DoA+gj5Xd#5 zKo35=?u%s@^mTJsDP%GlkOdO#DAqou{$~}~M%Y>4JFyq+d+;I{+-UWF&fJWOn+Sx} z8D!j`iU;#O>VKE$={z!+yCjDL)$~|lZ~t%_nuOPQ+d{;{o#Vn8lhA;9+)@TaOlG#G zt$5uI*)-#5Ws?=gWrZ66e5cT)y@tAu9eGX5Ys?b9sHXVI49Tv*$10mXfzIZggO`<> z75wBSMqpFX(J1bjZbw<(S(8p2I;|KEwG4HHJ(i<-4l@>#H%_I?Qs;ch)R}xZ%drD3 zD#|iWbBy39EE(-I!tm#hSTc{F>2sx|jy>fmzTF>sKi|f^vEmcWy-Loi!kLft& zMET_BeCtksiAzekkE@p7#vPzoC79|yeLxHnSSfxk5xi9*uRg|0t(>CJ@aAIXL35uu z)NU40mm-r%@KsIf(;??4le)@aP7IO9LW1MRgh6gp=5PSmhb@KXy=hj6!yF{~*mSRffjg7~4ZAbUIoSx+c zI%cV+W^g2#<(5*UMKTMg3;I&4y^c`7jFTFdA&@^v5-8Nek@7(4-yF~hQmeC~(W+!o zvQ|#E1N9ln?T;PmkM#WRD?%U-AX$Ez>dvc8f2>Kn9Us$5UjX{ zP7iBKZ0^U@+VB@5dd%M_Gs}F6len>OM0N-d=yNwT}TFW0QSLe&s_JB<9#B8dq z#xvqe?-TBg$B@>t{bv2^G`*X)x1)~~EWA?dERQ6D%JK7L>pR{!8;RUFi35akOp*Ez z^bjmGE1gy$1$Rl-uCesNKzTNA{>IDG>(jm~K?h+AxGs7kMGMu)iq?cqD=%vapxd!z z{&wnu1Z-o=Oi|cy!0a&y_WbbZ z?}Y&pZo)2xb5fZtk>tiv%C|K_JRr=Ym@qA;STreT#|vT;K6RJ9pE=`W|b7Q~;B z-xd9QfBzY0M;T^?eeV0*d(S=RT&4h+svzTK%hqr3&SEIaRN<9(ITs^&iXC?hpI_ixe6Rb;6scMtaKTBc_j z#%TP?#XLCm6?;l;v0%x%Gbng4TE$c?zd;yptkr$vWqcc$EQoc$xSMBy>JxUz9M&{@ zliasx@{WKO^49IfIN-?XJTQMGVQR*Br|}lHHFmZ6r}UyO#)=O4>N*T))HI+k>cF%9i*p8FK&mEEIe2vX&J^>| z0be_3JUzzIs)sCrn0W3k(j>TUJZaaQ57L9dnI`dM9Hk(W3R6>P5Q|BI5JRPgaBBWY zb7egU$Ow~iTm#p}VCT%)9uFmd*%vv0sT4Q3KMnbmyb1qpF>>$npAgiHv5aBj%yD+L znfyqiOfE>uoP`A{9EJx`a{t6pDW;vf+eh0`s0H)Ah@XtL4~mr(xG_V}u0I0zkTsXL z$DpwdjJGz+-A5Gth~Blq9!y>349nJ>^v(cg<=;>Y=?(z%&O2P#tZ7q(yHjPLGWBxW zV6pyN{XEv&|6cup?qdQuPl6=KCxv*7*mgb(PBFp-jN3lUusCH*Revzne(knvcJ*_e zwMV1H=!d0f?@M>*#4dVfnR)JhvQV)gW6XB*(al8q6>cU?W}K( ziK6{pb(IGFPM5u&OaW664!%Vcttwa~bC`PYxSu^`FL3+$s=sYDhacs}pTxx-|z zm$quq{bW{WYs`e>CwOu($*Pqk>WiTBKPK&7stZqPOdeVE0~d0!xg$jV zk)2p|YU^t-GU4E);UHe)?F z6}jCrzTe|(6>eCw=)I;uum&GKT`&A)$?=e+vo_x*JcO3MmaEWhky7l1s!i$q!=UZE zCcKmO6o#lW@%@-O%B`m!7TA%$xX^GsY$a#2)a9-)xiUc|7zb6!Y$P91*-C*yTza>@ z`tt|%TyVv}L^GS^@N||afjZG4iM!{f9oQr4qv;9ia)PRZPd^hy3B{KBvVl5$3JxkC zr>7jzfC_uo%a?Bqabi@81--}e#VXM?W~+S7hS! zQU$Tvoxd(Ro3Y}0rn1jtd0>mp-!*ka?^sx5GdTH$41E~dl|4dfnl%IU06(AV0n<%a z|6;C1bdfmTE)D_LCx)wu0d>Qm4a=|7YBf0~lJkD{#G_RkVWq~gRX zQrhP8Kh}44apXuAXR$Ip=*Ld9j+5!?EtMHI6Vn~iSxPp!UK-3)d#CpCT3*?b9+^gW z@ijAmtD?Froq8eo9{QU-$rXA>p7C=wZv~I}kE>%jq&&5X1*=d@mOoUBd%fQa@0}%pnoK8PnU?Nv8v)o*AQXhD-wx^$wj6B{Dt&M0X1`&D~#ULI3ZDt$=b0aeXcX0zNJ z!RB@E*j_58?DMvr5Py4$UNYHygFyz38-_hOH}FQZTfqbMWF%h-Vj|_#yomk!7`A=5 z1f!sJJJ)q;i5g%~3kD0OTn_v3A}4b)K72$FrY|{ZD7nngeZKcht#-e{8PYpZMy?so ztaWDpP*BJm?fV;)Men)wska=#VE-Wcbc^wGZLxnqiv`2nj?;0e^>mLlbF`+WbM|zj zyGwlIxM|_`{PiKenA2N7^7BS?uYC=bMK^DjS3vvoYoWXgMb=ZhA?B2$eMAkX1m-QaJFZw${zO=L{5COh7TMa6_9QsGYysE(>gy`86i zg{rn!?{HUT7m7ox`fe`Y9asHQETk%P_V+31spixifwJ*Jxd~c75tl@Yc(qM&Pq9zu z`E{gI5Dt9UArGVO^#?!lu2YwEoj`XbwLKs6yT+lz2(QhHOizl%DCPMYcYpWEu!@-I zmn-2|S?ymw@U-&OxR*3MFA>tHSN@4zRS92K2~Rh9x__|BUK$yP{4xQ8j3sPq?6w!` zb9N(EZ9PxT#ic-f(OC-Y`dpHP_8B9V{{jk8N=Ub&&hyINuL@gn9g|A-=g`oHHh%}1 zpZ~2I1Y&N0G))kBN5y*}cb>oU*7o)}J~3|X=JYQH%mC(ZI?keB@jO|@)!2YWPQFu@ z-$ksBEShTYo?1Ul&S|pjho%UFXi)KHaklY~-m`R&N<>}dYp@)>21V5jxy}9_M}_^4 zlzL#<(MSL#AfdQ#F&C-$5Xx=Pntjr_p@@U(gqjzYD7cuWQ%NS~pVgR%BwzJKMxjXA ziB3UT{6UG)=oITW`9(s7BX9nuCJ0|vckn?&JXD7>f%VGx4V(;rYN26O&Gp>|$rqt^ zfD^m0miC!aV50b`z-EcdyiNl71>YL88UpF`VjpApIEnFF4WLURtc(d+lC^JB4Az<;bJA1I!`ggJ)gA1ei72UCrF4$P?6VI*qlAQyZB&!L&c0w%E>eri zns`4y)|+d1p8h;lQ9}4i!DA}(J6(lhwmV>cfGZ&StNaIvDHX=+@;qViF zsNjx`Cf9bf$E(3{gS9c?+z0TrBHs3k>E+@rQ3h$6=AustexvnNi@+!~{(&&bcxzCj zItB?;jQQFZ!7#N4Pw}KIbL=v7H6vsd+ITK)JRLcwnv4LJ8__u0-}JyLj%#N-5%Ex7 zG_DHdrhOLB?p6UgB}tq|hS#DIiMyl$eRBs81Yiy8K#}f-rjhk*KDP@AMCtpEI~pTz zL2~o+5akM~+ww}PH0$NwHpTF5{!n`sq8u*)C+Qxlwv+1jzq`E{qK#jHlfe?+jM{r; zPWmHjl9~CM+SC`77SkFnYZf#&iQJzFmaU#Suak|9+3b!A7@hma$a+v}$Ax z2mK90*xYKBY$D|IlgV#tX%^>w$-B_LWMbCI7|~gpSGI;(8G^qsdvaN1W1K+SOZ9O< znXul9-(d~qj(n$I`4vuSM~>5ym&0r`=Ie}ek2og*fyI&|i}rITOGrz|?zw3`3D5$H zHn5k-sU;Rwp|h?xWv&Pm7R3AK12#ohQX3gf4&?Lo#PfDhlrCULj((@-axb#l*1@>_ zd)FUh9m(@G_4B>txiNiIJ-*(_PM1ZuSEd#X2TS&%NYBR|7?*tud>DSv{RLhl7KqGE4r*K3Ke(d3 z1+hd@O4A15gA}>p&JfZPK2@%jV&JM_DKWAe`;DiLvo&D|FMh=9H4xp8sr>0NC4ojP z4Wr-Q_To!T`$SSI<5iT7Y@)^npqK)YOfhVlzJE>tn}x;yYr%&QaMZ=A^r&y-n<&J3 ziY%Vh``OfIio&W?s^eFazmNRoFIz)v%i68v5ieGvX=l;kRK@0aT6j=Z&hCT04^7F| zgpO7tqu+6D<&8lYn6ZRc*LhcSDH(Q5F^GZos4@@E0Yu}2ec*7oSj!ZtUL$zXZ)fh+ z`BE`}`MwOj6G)v^Am-^dyTcj&itW>Pr4jQns9JnN2QI&po^jsE)J3h|DGB0%XwcD+ z2|w0gZS3Yd_K!fpr8+9Lhs$}lD_Fmk0f$9>M+usXT~HkTo2JEkuVuz7F`BUaTTj40 zs>Ymn*1N#c{}5TxMK*$oo?6;CY_-uu*Nub0g=MD2KyP;?VxsES%j(K}%{#6NDd4M@ z8)q;MHJ94qtOZ@Ola^MIV38q!CPcB!<|~PnD5(q+7Ju%;R*WH-v=frE^D!rO^N~MI z0|#GAzvg3lyJ8H-uf=nR`2r)Rl(4gagnXv|b-;SjOiE``X9&02VkAH=r@ol=lFM@2 z1!zDEJmt8t+0e1(He1mBqlYM0C^QOXVH6La15W4fnP~qpasVNt84!YcMWCMaeKkJJ z@s+ys^s1)A#E*U*0ie{XR}U*!Yb!j5%H8_uQ{jx-8)rY6{PJ0^voX4+{PK10yzRrK z=8?h!D$EgM3{EAr5;bQ&B~AAYH|;L-Zk>7oB0iqz*GS3; zNx%#+63@d_t)^3W{7D={RT<=I*%-^}M9WjLohBb% z$uWzQ(UevwUEN()hix1cyf0diS|1Z5r=wp(sXXoK)3?mPXiV*~I*p-7H2=LtQAP{m;YDyMonyjI;k!@t=JEQcbll4 z-5uAHyuewy#*O+`+`MJCF=UQEZW2Y`J28i+^N`A?;xnaCp%^f5!kgZNlho;96K2jk z)3%}*JuGO|*pim*(Oz0cxLMi)J4#@JWq1UFk`Ih2Sy2C=9iLnQx@_UxRZAY^DZl{9 z#>V~+;q~MmIK(E^QLStFrLN0H%im_39lc=3x&&J@e1%?Fm z8}h;*0B3VRcyGjUz;}8CNp`$7Rm(;1VMF0k>6o4p<=)H|tMu!gwznq^jraYU@7O?q3~@3k@0cLZfb)UQYbP$6~oQK zODzpFbMiUEB6PobXAx>cyQ(sPqgIt;WxFcNklK(mF{n*7&l7d1_&XG>uDl%72x~>3G zU%Btld^SVvbD5i5r7svzs@)Pmv*8w1^gxo9dzp2B5fe37Ab)kcc`69zS8Lw}8oS)v z=j*xRrwIz^tL)A+yk-6G%%>*}^pNwWO1=+j_j+0#>HHYca5;Y`6~hW&dgLO6Fu=Xo zL)yI9Ck5|qr`K>faCgyb!)_;?6HK{a5m9Vq!ICOg6WlgZDiuj9|BH>W5Aw9Cr>bgi zb@xW3bZ`~i#!-RPa}p1sTaSHtnZj}%Deri&#T_kHr+A-=zHP13NW&l&3TM}1;FF7= zX+C}yUE#igAJ49T3MZAy?Vs|V_`&^(=iIG09LOm#8Ac(bWL1+Ss@XcUB}ED98V(}? zMorbgQ8DSzqBD`71$*Ls+FiPNy(ov=px9j}Sa2Vs{SqCBcEFrH z@O95$y(*ODwE1!<6~0lj#R!=j#KKK~&3H08Jgxt7^?S+3K27#wu?XJgg{;xQ!68L` zim2sEl(R4IO4yH0yP4n*MJ6N95ORLhZDt^T4Y$cjXd;BcXJhTNI^CjG8)Gy-@-morgD=445Wm zyyH>wO`P-b-;mCgEQM-Nvwnf37mf?7nSm{03sE69T(5LrEybdbEmY>wPjYj+mL0E% zcHg3dW0{o%Og(4n?x&(5oCc}WPQvJ614xPR4+n~Mnsd?z^G$RIg1;F)>q=bzKHb>5 z4aZ1-h!-*aIlK2(TF~LAd-wpLSj)&pONJOg1Hsjvh5a!Qrd~bsF{(|WvANvgQiad&bK6jYQcu$nic)+R zZjVg*T5g$7L3EkblIC|zrn_E#Rz+K((MQt0#wg0@5|sC(aIvB%m+KWw!)WQU#xQws zjmN?*MlRKR#2mIl$c;S3Tq>|x_WAgeZ?$i>@d1OYR}MHm?*W@RutGn@@`A&MmTcA( zNDg6%t?wKYn^Y$*nc4>%&AQ~iaW4%BHJLd|hjxr*r^U3fzueLnYeet|XSSQ&b%K$E z1cuP^HMisi>XT>2y}O$wTrP5I__Bxm?~xqmjGGQoaAG#Xf;|QZDUsu;oKw>*aEgC7 z+C1Drbfq94dCVCSMH-?a>h-Lv&n$%6VeHWs0G{MX7byv^Lw8mIGHe!2HheNHjGymU zz)$ZH#Y};IuyZte_Aa-#w?@TF!WYwPOaDCCfp5Wx^S!6vdJnr1O4qD#$kRyCxJ)Mz z)NxM_n1)-A(^W^Tf4&)f)0S!O*)}Msl^F`=gbqA@mTXg9^p;m`g(x(aT=SIO1yy|P z(arLeQSqYHtS0ng-HW6a_{?vsPrU;=;H_2oh4WUjD;d10p?D{`+1 z%1#~2Xr-mN?7`0HxPz*>W4$~`C4AzhvM%4o#5|Tk9NHAuk2;I;8X&9KpaCJ{^@T-1 z4(ea2Xo`CGxPil#2gYl-XCBr>Na%@}sAxBfPLt>KwxL+S|LsL%fZX|34x9M_ z-}K#PMl&(^j#W|f%;H^ntK)J#GGGejF;*fi0%liDbseeP`}oGh+D2{ZgLy~c^^}eN z1GROTasrcQnZ-tZ^+Q^<6b|#;o(?9Xr^rq~_i{e}x5i|6I2oH%{g6RLfRubT7BABL zNQOv~1#lfd-)+Z+oX6?^pD2)dFXRI{t6 zHmtY;)1*7io zGer{7ptX2{Rr4vMUL9K~{%U%sd-_j*!WHXWF|H zFBX{+0fF&4T)XN5)pP_{35QPWG@>)_N4(`jsDUmVfSgF|J{~(Id74&ron}iCmmOKa zmoHL0B`xgJV!%3(e-m?83K%&Mvy+PtA9sVUdbBTXr+~jy3ovbg@j_Qxv^sTh-6e(Y z$s6|m$|zj-&V@HU?X&mO?FQKpxJE7KRjC7U@sb%NX(qJUN@XBrRP0t#%RU zx?;^os(xp^r0J|65?LhSqUgGD-mlJGPm595+6cn)$^!GFqkWr`b{H1QX0xjhmaC5; zB8n|uGt584!Dw8fYp>)T=pYASf}?52KB^}o1>c;omkMs(@j^NT$^&w4Du`*P^92D8 z+xeM+Eiu&Q@VOju9qJ?N*b~+Si@CkMJ=bx2WPpVJpJ(A812$#^*D~MV{6suh2id9jyS}pG1UZ^QjY)a`d=$4mgc$q#rra30H@Tg z#y3th>%BBv;9=c&`V^|IT4&oh{vmut0!I}V)8n2)DW>RpyAb_P?LPyW3W;4(C$xj& z0<0+9vQXZ2BrMzRn@FYheL&|wnJL2fFIIzNQm{NnoFj~yvw<2ZsaboAFba8{_ z%fo}n*7t%L;A}<=uo-5xsQWuRm(-+jfx~!wfK;KP3Z1LC+gg|^6BWBp@mROF6`QMX zvDTkQ73h5D3EMZ~oX19l#HL*2Pl;SI=WM#D8Q^i-8& zdbZgLMc9tke*BHz0SW+Nd)wm3s5$oR&w2jA-At$eKn)mvQfU=VuzAE~5;dBzXG)!Z@3$`sYjE9x?LXj5EYhuGOnegwpEe-8-xUXFx$yxqNXi;^ zSHtA%aG<`-8#ynDf7Ssi$u}*Exhp?V1;DE{-Ie#dX4Od^J(YVPF0(O1ggwpXa1fg2 z>q3^RfbTDLL(|TR`4@dA44+|ig$cKWkaJ-@{R9Wy2>b~q!~Jn?JNO4A)NEl_)a5gM zHvEsv1{_ZiFU;LlZzQ~om~Y`OV``&JbuNV>PzRXLy7S1wWdh>;`-N2l%Y0Ghzj>#w^j$+_KyYy z?}g2Ey#h`^{nC<=jd8E6Iw}?9YGYP;1fot~z*_!(NUk9{a@mVk9lh>MOAW<<&60c1 z`E2(s^azHGe&j!1Ac)d`F4Opu0{MU20fD=>XS~iKZmCGSl;5R**;1E{=5pj1+L@IzT23JVQ>GLv;a1Am^c6r|IsKN*&Y7z z8GZ(Pov!Ta$`#G1J@cY5$Wq}DT;HKo{Z^>`f2nQB0M^^vyG#rrj`3Oe1~j98Zi(#j z8TGG63(}q|lOFmdi9z*_HwO!j4=Re5OMQE`s0mFQ6Y(82j}MT+dm)?`^9*{jNn8=a zA|mU!Kt=9OEB~*A0KREO6xe5wRPy5kA;Rq`1e3?(jDDRiB^=;mVFcpyH6t8axt(q< zZrPBQ84t#`J9<>O-l z{icMQhxr6u|GT2__jw2Y)kmvWrzz6`|G4|)&xhS44A@Hjg=-TK*%1C^Bw)v!L{Z>ooQ*kP!21Q?>E-+47ePsd*mA)ZUF%1PAdjCA_ z8XTP_H#MYJDo~K$!I)v_^fc){I=xEG_3`&Uv#*Oazx`qtMQ@D6=14aqz<0k(dY&^L zd9YY(!$}fORigVUI!9&1n)r213UC^eY!cvCO9%i9C+veT@#Axo`UySn-N=6M5M2}J6RnR=8_bD=sn3(>=MYepeDN6z`a`Ul@)0V6xDPQ zz966Zk{s6^ViIPG_vKNW+15L1v}a!XYVW6FASfeyIxhYqP^?`R-OZ-q7#i$+zMy=&B^oshoTnqTJJdguJ{C;JVKVv5p0_e37 zC~B|p8yuN4eHi=|O13@7Y=pfg>rYr?>Le8c11k6Ag-~JCQpXR-Lq0#k)&9N!u#)2 z%G$P)ymh|Q4Z03_gdaMr>$_Zt0Dof4&@-OG-4E?Wp&<_Cgtr%`?{{dl`tpW+hM%}+ zbQA1grCLMHHbakw*%3qmVT%d7?Kil`Yfcl6Qa0(`j~@{>g7qGYl9jHVY)_D77VA)1 zG#(!b;1%-n!=Rom>24D@vtlnmUHwt}bQ4!Q?{7WA6+YPdIc!z(tA`jJDD%c=4#!|y z+5Bzifzqh4tX!kX8Pv0i3NX7I zcRZ&VF~HL6-Lf9U4syW`z|8{fxT)FDAkSqB^!^JsC;P_3$*`Sw0B|blU%n?a z6-5C{J8C^x)H~uG5w>C?5$A5Np6w&dzV$&uZg3$`O5+Q#s)_Krj`D6i$;YA`AmW$s zL1GzcHWJ7?wtJt!`EY;ARG3uf7j}3ZbD-7C5FkZ9JkVOQ-W~*DU0eDfxHq=Cvjy;h zD)g71WlX#5G>D|4SnR%uO%a#aBRpyf*i+`{=*`_*b0zjVnBtzPH({cXOo&^sxoMix zW!*m&#UX+=&jAgKs#OzKQYE(WJN&Sen%dqh*T~oVv5S@m(H}!=jLyH0ML;63J~bOY z=Op>5cq6MY6JnmLANwV1TKA+2bJKjm^mKbBK-WlDyN2{pD z-?F|0il(Fw#R5vZdc=Oosjt{i>73Zvhzr ztRo?<^Uu%fY=#6-^O4=Q=a9H39D%osnNv5oP5Icd zu5Vd{wp(_61(o!*-ZdLI1~fQd;2)o~Nde1iJ!Va1Nsfsigg8Con~j&i?G=etef-Z0 z07s7T%f>Kp!@2)mM+RJH1FzGV)M{q~3Ythv<5`WV>FN98MZ~oKSLQr{eFGHp{=i8; zVU-^XJKC=>yG*C%?*T}*=Px}lDJwB(+Jyl;P5bL_cC!c&0sQ~A8fM8_IN?Aduf4Ots& zv{l!KkA^I;r6bgL&|*hxIe@w|*Q>l-0WNI9&GsC%(;DVPVQXZ)d|4C?J<`41sfz}v zE+p)L3qKR3e%oqjTV(r-5|g9j&C3KrK6UZ;v;3hkAv(gqaHQDuho3RBLiqL|grVY{ zr}IjW8?fwT!e3MI%6(8wJY}#~IO=ZuOwRFi$L8!Nl=s)9;dpqS!ocldd~rh4_a;Ht z1lYRgqAyLS$vAQ>zY$L7RAw;1@!T(D$|Qf6QemJGTLV@|4UZ8kZm;BmSs(sT1)6oS zL>~}$-`!gm##d5GB^)HEyA&3f8o@NUJ!}j&d#2%(*b&C=`sXiuJV#zJUZkVzObJfm zbC*F5WXNi?fOMc0u0O1CUr}hc5=nEdsG~sn*#V>t$)TKnbLZAun zM#p3|U*3vsbSo@j@<8}5oK7=}-v=y(H?79;Pfxow>5CmDYoSb&QJ*(dc&#ak0DmQM z$jy%hRXNRG2>UumRZ)XhbV%w2-N!e!V-<&b0Gy zI(rSxLUk+<6-kAUZj=o=9z%f%q-aqh6-}2=tfqo-(p$-cR&m#gTmx~IP`+8j>-wYn z8b_OhMwvT}et=slEz+}(#k~?f_Y`x93m{=vl?+q?+jTxbz<&55zFbIF>e@h|?)B&1 z{LLb@GwI}#CsQV7Kfm>pnbx!J8!2RVM$?q|r1ek=%8_u1Wz2>}bZLF*wbui0GDg>T zuYPm$qUqA)-YO6Of@>|${Yjg+{8e$FPwK764`4c8-?+Fv4C?YSfyBhb?RmS+#?DeZ zg!Pp%uG2!=2S`RdQpYN(4R=4oE2D0AK!y;GYXoY-fDL-xUYu9gOLuSFbhWBJjJN-^ zicD8aT|l?a^Z?WuMynPAuJ)7EOscqewoA^?l1Tu5>j{D=)Jzr$L-Y~fxugZ;b`73a zWtuEoVAHvmE|9K5<{_^CCC{25z}n*(%&6Fq;dI}X&EROshKMl@$i{tOPZF=(INj&o zTX;UtDbhe|JZn5hebEHOv3mf+{T5NLfs+VKXX*6&6T?rQ&|OhL`4_vXFZ|2!hdhqG zTAYO@kHV7`8TuJV{7DuSMpCDjF$@nn?(4?WDUE_v*R5`}den zr>tbI5G{auHAN<1(C(cu?UE%%SYOp}x=XaePdBZ++wk`ECl{~x=q2urseHsZV*MUK zmy(>h?Evx6Tu_a*-|R2Mw*8pr zg(X?xK0U^--@ad-{(NU<6~6_&ab$Myw>S5`Psyo?nDlN+1boKc?lo8JT?0Z0SOysI z3_E8%O~su>L(qnZA+83G9JB97l?_-|bfthO~C!hp*>S>3vfkdfN~AdKK@ zO4+_#pnt1y#76$t7X-JCb2Ii2w6o(+2o z0Rb1tJowaqJMyL7znzt^0kgke4y0=y^rOk!O7Hf+%+vH1NMBc*Zlb-*&>7qGuy-4_ zB`_2nk+R_+B}lq8>71)!GWP-KnTlEVlx4CD2$2wo!c{SV_WoRf}rU z!pJyXNd+!T^=9eLNnl`l?!!LCWm;WNB<}RhOV46V@YjO(?q)%TS6g00`!9xcpFue7 zj1PopYM!%oM#%eWRqZ*S;76;-$>8;!IeHGHN$$?yWkHgL+e^IHf`GZ!aD)Vkcyl$FXOA47cR2a;dCqJ-Ww`uny{*oWDHISH zH@IOUCaEatxd*Jt#$G(l!z}z(zrt0wZi&82c2(PQpSU=1$gOL1^l0A&f`fxd7F7-dlEx{#To+O5Aa+Kux0@fsa(|pw{=c9BawG z^mRZwV-&j+e5XAvR!Up|@k=gs`7+Nv3l&n@oBeqe{)Y4#l?Xl5K}wkpbM~t&ST` zZ<@Jo`#2A?umGp55G)_-dryBAIbAC?`aIXF?gGHKbh%Ek<22hEp3}3#7A!#L@)M}i z-^`87>~__pm4XNXv&Ew-g|T6V5m#N|swhL%`a*E^n>quF!zKq{Y8wt({6RaxH%Pm4L4t`B^6gu9q?z=E6bnT z>Hmm(XS*hZ!`$V`9|J5NqXr+}$I}n({Fv{)ICI|b<`}$NQD$kp5FWCx{%O|D|!y{`C7T7!p$iivuFOJL& z1tr#zVn?Cnua%THS^4$>|Nc4oPo1DCf)gCTh;zQl;aUMeS9%=oGk(>oq*i9R>`DYP z_E1n}Pa0 zJ&U#8i5qC~HbKP=*#_GQ##@&~t5`E`>3yB2({`4DxD3eQmb{iVCl#9lm*I)cqkZ0N z+kHRS%B-~sl|jz*M`c3<+C%WQwSk)oBSuU;d%zX0-C{|05l}#S5g77*C;)$fAR6+{ z*v=&c9qx*^jZm#`Elo24V0J1YbPPi9m91TsXQjTGu-K@4E@AXuh9|w3@HXW}1MfC) z7V=M4=6*# z)z87X6>0-T-Dgk3hacO0KJqk8o&@~|X)l8koGjU=UH|3sSKn-bfT1MRG#%?}o7rG` z)0^jr6TvdxZ!KIMV1wB_Fo`dD-HOdo502=8?`}6R%<~5qg;Xs4M)-E&>`5PBM zC5JQl)bO7KQ#gvRGZs6Vf`HOP0vJgf{j4S&>uF2Q#%Qcb1^AY1C(}Go$#7vxtvKah zwzVX2y*;rbOyvA}8N8h(EZhBB{JVkm1=?g?Bume}mDLjLb~Bk-mbS7>%Ep*or&=3| z>}`+k6YtWu8N9X?FBy~XzdnOU`Mzm55gyT2&H&ythz?e8CY7a2x&M`bd4t>VT-XU{uOtf`axh?OgpzUrJ0?Pjw0{OZvE76q}Ea0RQ4Dg2# z(Sw8mv=Qrd(Bq)shy%7RIwQV-X`F8h*_<%28U`B=uvPij8TdK?{LKIWXw<42$k_*k zz{0Pt#&Ytc&2Y?@v&jIb$HC3wkpM-U?Hx4L^(No#fGrN$nmZ+x;3QS`n^N9!Yy-i* z(6;vl#2X~Za!{J3e6voItk(j#q6Gi)N=T$&uGBKnG5&WvpuM7I7*~sud@ag_N?+D@ z=NM+I>n4-pk}?z@tV%-~eubt9=Uh0VoKnuEsHir4_Zlv(T@$sJYnT6g?_6ZxR6SRo z0*P=)IA$CW+mzQS6FOW=A=rJi5$139?puXQtBJ8!S?WiVhxB1fjT6W}&6+V3hyb+O z!ieNeH?6=@5LtGFnG>^fyP7+Pw0K!{b*{2A(iYw;waKy|6QSc13$la1Oy@CF7Qy4`zT;mRVBC&_I)u6ADIHEY1{@sB zM&HsD+5fyxUUOJbjhQu+uY&9?5K@R}6^DCNA1W}F9#Uyq<==eCQ}m(*ht+~9}Q;s55@(5JcKsz|fFf~k#@ar9L{2@W#VE*KSmRZEO>1&13Nt{ux zfe_*R9Xf%(MiM}xXi)?XUkjM!af9iy=(C+1Ch(FChbaIamg58D=4>=Km7X4=HYXX) zlPTt{Nd~9M$%tF_A(+ha1ukoGYKM-qf1xO*sEQk~c7qa~#A0cqaz-9)8yFrp0V~D`k@5lnO$L zGm14SSfoX;ZOy{WbsoEDq_&yWW?}Q)fUY+o%nQ@p*gPU9xnx}pXm(NZWut$^d}%N% z`Xviuq+`z!8W+f83=oKx^P1TWl&XgOUdN!4#WE(5(!6qN&Djhgb`GG`G{$5N6C2JJ z{;UA2>1b67RsyY0jj-3+D(1~K%+R%y9UdsxXCjXRo_f>0CcjP>@_huog_*KLvC2a~ zUqge*H?anatFwwu5F8nAAN4%L?uKJUAVQP%2>#7f9T8De!)Njj$tclSO;kqd! zDY`H+|4pG(Ko546`gbvt6bMcvRe3z|>~6aWg-XP5XgvI8^DpqTs$UNIA8mKUVe1IY z5aO(p5ZGV9ssLB8223l><0pEEUIK#azyFrUl(St92Q^nR{BOGRhZ5jsakerXm0OYt zuQg{OE@_IX2d=&&Kpj$EW{oLKnmS>L)TQiE225M;vZIMHvFtzPP zH{LPp?|16P|6uCz3`T%z;tH&yz(aGT2F905TrfHa%!bt3*YWjuY|f}&Qp~V*7R=*8 z*+ITl*9Yc?AMQG+{^c8Bj1%EIqtnt{BN)(kXxhn`F8Au29$XduvvEZWO{PwAwfYwRa>q8 z&okbK)39V+8`2)`Z{KFAcnaq#WEyDmy|KEk)9YPK1SEfDO=0}|FdRHDBbi<`QI1vq z>S_`=EWKCw@xOE^hxtK==aM_`uHaRw$){EiCKzWlciI(??KjFUp}ZM`jyxsDPof_g zFKdlYWLR1I{3R)2=Z~N}alG8GSzIH5>dgEEQ5e!E(xSl}B8y%!kRRTr!L?kmB`T(Q zb$)G&0*fg5>di{dP9Vm>SS@*KSOw<1%YY8St@hd>x=+`hS8Q8*JHN^jShBlf!=PWi zAG@n7pxEh->;KG!ZC<6p$q@I>uvu(Ds9YNL)o`BzNRO*rlYrCB5MZ()HCZpBijt{X zZuEM=lyxil!d`CRuWA9`*mlViHtzW*n^@>8bR5{J2Uf{z`z&C^MDhqtGF*bZ&lsdv*9o?8B|Mk{KXb6wJ{V_)G!-m>PPCwANB#Uw2+Ua zc_-aI>)4 zpu6oK3-SFg8mGuw29-hyIAbJl`LI}o!${KB~Joo};4 z*q6A^Ra-mqi=3V=dy2uo{YFPGo>vMg{QQrf=j*!)=g3*cJGh_T8_vY5gUkK48>~!*j;KJ~UE5RC7j6NQDm5Cj$FPtn$^;E8vMh^%H)BZi@;0%^Eh` zq+dkH1mC+8H0b~Gll${n&TvCkh0ELU~w7$9=9tKjGC_d z;hcfd0yNYieH7vt(y&#aO;Vjs46-tyn{wrV*}ZvPz})Cw$HMjBZA}N;VB@jc8BxT3 z#cj6MCO?Hd8vIiBZyD8siP&;If@%7~>p~2A1=vlnm`JwFTw|y^`tcu9q%{w9keS@5zg^%}3qkq&33+~o{?pzz|YJ|i}?#0e7^hu>xyWz6`ADX9VzjDCq;m$);y&Ah`y2j7o@Hr z8106&6hp7}@(r}cbAIS=Y%}2H{i~$u>`EFs@q{ywY;hk(wKg(DM4$ez)Z|O?JwHVk zl?hK(K>3SMJV)FrGAvDwL(_SJTh;}Ag6tH(pmxRjR{A2k1RYZZp8)5%Alu^JiVB-m zJ;>Gh{2wt;5A1txBN-G|MNRq-@aU7&Gs=JIc+sss%^eiGkwgyopcViSNCl;zLot=! zKGMqveYd+PLto%MWnZN&^4cvr!hTSK`S+Gf5pV^%dn<2+y3e|EMYI1OTW#bPk>Wxp& z)qjUk7zTENAUKD)kmGp1(4zNW{e6fxQQE#(J8^ZGL7??-ZyvRVueasZCDo`tS)7Gi zh6i;9>TaRpMa@17%ok7O$#;OKPpcqIb6Q&JFQh{MWhZb@YJCwgr@mvY)E1AfW^0xoO_kKLW1@<3OX;7d`vWm!5>F}7+iJQa_gtd z*8XQYdnD+yn7pGnu#-Dwn*$SGZ0P<;30UC}?`Ad;Dh7r-Z`D)oWGCO_#X}+FKyZEm ze{k=;Exz7)RVz!aEjUUV1Qu|1>gid8_xmn(+?oH5u!TDT0i-3LVTPGNBDPWQm)Al6 z`Bpe%z-n+0*iNm|qaL2JV-Y>XG)Dw%YDSjcp$2R^#Olvzd@17ilHew|o_Wp;wtvC_ zu6%eY>GnSn#S*61xd^AFrFD4OJ<4Nk&VBnAzWVL1ro{7YsCeK4tdBu@&C_ZT1|eq7 z-u!O0Rm|ayt9kTAdstVs*E)8hJ_S*A^F%xRECA69)~~%6Ppmc?Rgb&8)RR(Y??z38 zi5;w$L3gdShY`Ppy8K`1>`4`nS}3>?liYS&2*;bS#p=hk<3CC~q$9NNXT#^g^H?UM zv=alw7J^9_^WCA8yH9aIfHo0`p@sp+ivkPoCg61QpJC74f}Q<5A~xXn3ZUx;hX8aA z;^>t?Pyv744*?}Z4kloGM+X*6z(Qkw$To(6udRU#rwQUhzq zxlztiK>-GiE;ig(sboT12H2LQE3*ANSs1hYaj>705!f*DRyiI0knn_EuXJ!53+*Me8-2uV4Wd1 z*HR?SN%=?XMw6>-k3j?MpErPCOq3B3ttKL9Pa?e6?vjw#RpTU@EUE8_FSBI6?`Jv& zUW5cYIW!C(z*lx)BXXa3$I?xQ^I};wPj&@1 zvos)oirqhv<^TD~B#oK-Foy25WR!tTWd+kbkunD||EA%RhJ+qCF&u=fbQQ#mOtqSE zbQx9uuaUaMSoguiev*HL`Tx%^Ocj_fO{U)UhCw_ihO3+{CUt!wnb*KOm*c!MX&3)| z?NZDgjIE%^@K)vCo(YE4JaHjA_SLSeZ><}~Cqr@lKi@j7h8ho>dp2C8g=+sBHW5G; z6+zgSwzv5?+so?gEzilGReg!Tnz+^2khZHq|h3Q_@~<={&e76 z;irDTowgIAuv)+$g_A)}{c<<6DuUHcGrO;R`%1$x&G|>ob^-jD=H9@P^x9i=`)8yh zn{T%0G=vnIhKk(A9RzosUOZ!)8Ivk(N_`!2kDF`G=JF`N72#-j@6@+nSpWzV3u1V5 z*FE;3xSg}2sndu0_kk?`gV8YNkJyLpuh`aJrk3K?eVackbu6Ax@I^`DpK&FF^)T3N zd_hHmF30L`(IS3xZ2sx&jcn`@3mx@EjSEf;e7cuP_yL)BNTIV*%MtCj)f%W?xXmK; zf1Whw^(A;5WgrxYvTg2?P5HA*KF@$vX1#Qor9~e=CHblf!tNR8j_jF{e61`di3SJR zGzvpb+2Hl2p`oF)G|zF~)nv_g)CF9^{r7&DU*`BBg^J+XSWB9ns~wiPgYdr({*SR~ zO2l6!+_nptIW~LH|0XIeQ_^uC9KR3jQx;*WIk;v4Nen9OZmrXLAN0RUNie@o(WtZ9 zn3Ozvi{ydo!trR*8mWf87;3_t#rB52H+-@d{sz<7?Y z#?*Ee^MsDHl!{1@#|I0FRI`sv66MRTR>z8hA|!`|o(b z`T`vOMo#H2JF+KGg^`#i+>KWBV$+s7^lO zwY#%j`pygV1OH^-ZDWkzJ=yeiTs)+lqk$PQ9ZxTzARxQ92BPXx5KddsI0_FhJn*ll zVnKl9KN{TTg}tyW52}ewtegMY5-k`X?Pa>BB9POL0O|X`H3$BlmS|6g_QQuA-I+>< z|1Zrh80js0BOkZ~FrI z62L$JBOLC8|KE30##?97Ot2Pt`eAJKC^Ye%~6L^bh(OQU+f!0NLZNG_o26+2gUZG80Q5a zpE%%xjOlKsjHNEPfsAiT(~l3*OWUc=mUR{yZ=@Mt z2?p73m~M|W#bkZvQ%Z&Xq?~sCgIBvtJ<56WS^j77EDISDPjc_SRZC(~B#4rXWspkP zYj`{>eO-DiUt4`Fkw;$;+?lew{Y^gY)+5Ryqw0iWqiRyNjA6TxToN`0l0P#e00P}j(5v-`MNq8CK?+)IAjrQSVEV_I>)L%K4gQZ|Wdt5cVJ6>A(QG zbP5+c7^z$;>Ss2j!Z}ql>bG8R1?1{IXO_mJ5I{odQ_il4GAl5{ox~tK$<=gX|LzF` zU!sbj>e<$?%8Y96@9$r|4Aa04QKm;L<}@@kth)}syKTGJ8Bck7el+W!_w?Y0+u3g~ zn)5D>tN|1RdvRy*jdGsf>9Nw8%Tl*0Vb3+?XPs5xFSFS1X4>aGS)Yg>D=~eX{&8Es z($V<4xUkpmqLL)h9}AsV0uW%jO7S{{p=>`*)Cn#zL-bJWgo5ZB;t#gOUUi8GqmWUs zkZrW#*BoTyVs4FjQfZ!a&u5Z8PRa%4iV|w8#K)9~h{(f`ae)7SBGXS&5Hgn29sY>Z z#3Uk26xHX#82z&IwF|l>U6*@b*^TDwFPhf-h#jm?4u?-Q_{-Ft9k01(t6QmzD{`(vf8;Dote zy}9}GIHWN-A+nPI>d4yo812$%?$dl5N|^E^lUj>g&BrQ0fqsfqvdFX-{hZ@nMVzA_ z^arg4i$YuPFOI9K;uTD(!C1o0{+W#8=$V)Ae0k zG8`Ns*JmS=fF-lwgg#(_PNymgHkCpcZ6#c< z(vjZ%$B1f0ox_xHcOpmZAUMXQW&rr>qS%&g*CRaJp&VvP;VAO!=8Fs1UmFsJZfjbk@fs0sqYyUMRmdpKkN4r>Ao%Qh&bv@F_@*@%~* zpv}tN@s0Rla!{#u9)RlF#+=swWaBI(X$iaZkX-25!y=e}O)70Tdnn%jhSKYY) z7U*Y1u8D!`$?5u~GsQAi|0Qu$1m=#VR>3`uq_dY)-pDj-6Z7FjM*rXOT)M?PQD}+r z7edxU^}Z*UD@)TG$5O!5z9qkYzthJ)Z?eYyk(bYpk?S=6yNjyFXD3_N4J+&))OtFr zy%2n>0o2nQQX2~1b zpi6Fxog|Ya9ymR(Uo+6&vQx0=cy{Zx@Z+x<4pSA+SB}q8lrBjC*Otuo*!V&+u3MnP ztCqkT5uZ3S)R!TV^6vIatJX;u*+;DN7ugfRL;%*ZKW>$nGm?~YnF8M@5cLE!SLU|q zV`plQfBn)Y$C4qW19fc)OEzEbP9r*ZS$jC@?+IF9l&L^AT^nM}OQzBJ@y6;}kY_}{ z@rsc7UTY8spZ=cjt(im-+BfX&*;Pw?&g7Tb<=Yj=@tEVyCf!k zU8mNMqK&>nd|kj)vJ(Q$RB1dudA3z!@zLhR^kL`ZfVgI;^ zcg#;yK$>tq{#q;C!%TWHQ;pJ)EGqs3MX&bY5U&#|!uyEEDR484e{B`g-(#oci}ckT z6p9bt$nuP@SFj!LZ-tbpiagq!*6Tj6L;38lk{oQ!X6Y2_G2`C!Sl0tc>0VNk zvvfr;5T)spt8-Ic2RQ{Xny(#ozi*P{hsgC{LF>dAZ{OoB(_$Q*RA+13cM0pX$;Pdv zhI$^nFGZcrh1`jK{}c;o3@x6Og+m(-cEUM9p(eKxrb!(=gop19Dj(o-oTSG=p72x4Y+g45vMPLQMOrEzxo4@31@Vz=okLm zO}a;BGmstNwl`6zFVvkSBXh7ZC8GA;x-VVz)}yhcoyy0v?_MiL87$&ozMKQ{uKKH8 z0P}A;e7RYDm%(~oYHztOgL|gOfZSy$;w%~mGOVhZp&4^lDl|rrjy3XG8_jk&lBZ?K zSK%6Pvdg$(PnW*EkTR~?T&kXzYxw{#$8 zl+FQRVK4}nI9KzPT$#s)fv#|D#a{@h3Lv0OZUB5fBmj!bG$#bxXJkinl}c^YI0c;K zI4FTi>@i37EH^lS;l$p;=Wp3ECkC5HODN(xKtJpvLH*rr+mmZ`(GO0-Q8KCoO`MEy4V~ z3-i?s?U~{Cck}LKh?ml($oL+YDN6K-501axHtf-Vkf9ZO_PghndEMOkdItV9#qfDY zgM$7?%P5snrJvf7_f_&ZDW2PqSaPc~CrQ0649QXpS79T)7eVQ#!sT7c{%EXW{ozUf z%A!Pl|MXJzkrz0Euy?xOt1i>pst-WgzLQ=lAy?_W)68k7&rfck--W)t)f`PDlmkM~ zuN%;QWhg5G%Bo2OB?q^Jxc&UebpKgFcvu3W6<@PaV-}^;xTyR7yS_6Ye(MX=;q%;U zxD)f!LxorI5JbWe7{I4B5AiDZ5|>AvvVTkb0Uw8>@ad+>au#B{dJ%tWIq z?aC!R+pkV7SL)rZX6t<|=Nbc=9*RxAL%kr2rbv!(U`%JtST!~aLAEckAVXHiP5q7Y zb&3p%e|z#@JPF~4PhO6)#a;RZPnac)wZ0a>Pco1~>em@ptG$jbgVVbhO|1@XLm*Dn zMxH~h1jAR71L;nZWky_NoyDCb(tDBev;v~ zIXzj#=)L}-6c}9P*N@hBZKo=V9u(+&*0*l9Twfi0<6BvlYC%S6^qfn3cklBOK~{;2 zs?d?wc84F=*EVMl`XFugBdGwehHmlxjNsH66SzlL0!BkLy`GGFWxFUBU!7_NH zoF-5oTaDSn!?!U|tEW2C>QHBPq|EQvD4LaIs~Paum=6;ztp4oyk?ohVyx;r7ckev3 z2Qg5t$@}038@j%1Ecx)1{Tmk9;467t5$ms)uG%BbT|ad{8JylN44+;XLNE6VD6loL z{avL8JOQsM`8VXb8Kt=n)*2wuw*HGR;USS?xS+ zYok+T;)1RB;F;2r8G-5YQyOee&ZX{POx?h9y`?tQ zNyM<)#d1YW_ys&|e~wP!%L;dW)H0S0j#a?Xe0(NVWe6cvvMAxZ*R~VrSNmJD4GNMX ze`gm3>%NPEvg7pbnkx1Nz*z2}_+q|ajRVUe4VL3#Cdm1t{?|+}1w^tot`tP(7z%hz z2@_^QzRui4Ogsd01bR(MF|=(Z_X|nzgHy3^Gze0e>m~JXl6De9PN2;tCS)(P8Z{m^ zrz$`njTJhZ7IAEvX&B*6)F(i#j0~P1{F!O+lam#4;?tHFLoi7?nKe~me5kyKN6Hi+ z3LhY*K!(I1)YnDJxYpppz)`OwLpRbMdZ{pN!PcX2ae5Vg12+R? zGH@Wr zgR;V}L*2OP^Onuct|78>aWqb{Rn!CVjEZ?%S{Z9n0%i}nhnAc& zkn##{|2rXFvPG8&T@P)tC>o~hrFG`KUCX9kz9q(m5Tt&EP2>3>E_dL(D7i2Bd%NX6 zNM)12GWXooGC5jd>)DbZ09+~G>y!9I@9g>Wc%yRmA*jQ4PqrF*Jf<8t)`}`kcGZ-b zDBVG|^X%i(Lcq2(sTD$(UxhJn9ZkF5ZR&0!)8&Co>9)V(%hfaJ&G_%FQ7|^rQlkVA^AMi++2?%^1|BvreD!$P)qG(`8s}VXRIGe;`w5tUf$#lkqvGX*{0CjL zjNs@=i)3Cy_Lughs_FUcvp#z<`)lKO4a)7<0CC$l$-3F z_ra*{(yx`Q6dXdz&K|x7qt}5`j~nVfQb*5Ih5Mf#q?FkXD<$)p-pEycXL_8B7563v zWR|WdzIx7swegfNhNqm$)yu*!&rcf86EXh#`mEgEG4EjtIQo9wQ{dL$pa7!xLpj83 zd0aB%xV2og?M2k_fg$f@^@$7FE!cj$)=Nsk4Q8L5|Mby$;m5uNWk%nT*><%egYq1! z&uNa;9Sq(o8RG7+9!^7^S0=}S{I1Iyx*V--Y_;cSCoVs~g?;*gl=|WDJl11y4YKkY z^&-7Wkl+NH3g#1@^wSW)c%|20pg=eEw!R?$1fQVxUwd6siR*`+} zPWuoDs@5}=0BXEfqx|9ND_aE&OC1mE)eX1M5bv$Jqa2XpZn)F&Y6!K)X$#gdIKfS+ z6W?elnNjdE>6!Q*jNU2JFu{(mJN(k)8~34wNTRc1%5lx86AYkk^GMKbb>Kl*h_GLC zFuu$8w<$Hd6kniwaq>j*vC_vXh18ct>KJ{t1J*zGOP2!5iYon$Ny~pB*R}_{$5^jo zK9#p(g2myB`Io%Q_g2t{IBg>gh1~zGE6^cLSo74!w!gWpPlyTQD#9HCxtgtu<+9L8 zl0RcMpL+%`rZjuC+lWovc`a6QewtJ+r6fP5HVbxqsZ?6T@hJwD?Qv_E%KfCqY=w>d1xwMY*$1OUB4ok3dP-u(yG(MCg zKdy5h)9*N)v}mb61NcBaijp^7{Xr51%QsMWn4rziS7gqeHw!+weP;o+O_9&xr1jRR770JE4wrx?ah*V&hNU+uvHWl5xfrP7XI_PT|e`JO`E2E&$bTQ*|)L8<_ z=k3i$W4-i3{%1ZbXCeicBl$njC#?`#>=GhqLJpYMCM;qPiueTlwW>XL7?^t}e9bdG1Wf8k%nAal@&b z7LnvOqRF=e6fk~9ABs8UM`Q|pWj{Ez`u+OvEP#qDYq$RnoE3f>Dho+;MJj!y*SEV^ zer2W?;az9a^a7jL0{j82%R&9x8byvBA8qjg?l%z=JPyW9A0HMPhrA`b)IU0(XISY- z%0?A!$2;==E@Sm#f;#CBs-^If`eU^p<0V`9{JzI-Vsj|$*G3t&Ni#&wO(r-!4SOQQc!K4V2|8$(aqmvzjs&9uI>8o#m_AtM_Bq+86vQw)rw8Ne;dOa^ zvA;6nD*!2e1Ao(&So&x7%Z5D=4BkmN#zt;%14GZTGi^`2K;IY)j-bY$c*2_$YoTRk z&z2m4iG$sDnI>khDIfk?TIr^n2MKDqIi&dcq*+q!gxS4c4d4dDJ z%N%FKKd|Vx`W($XsZYh(DFO-MdRrG*I@5P|GPvNB5^k$=JSRw;<8JPf&gGYnZ-9JE zR#ng!A~6LdFzN+5MMk#)hqt(FZ(MHoJc%9Yx7)>0YCkEMr&E;nvAge~v>IL4siWy(Dow3S zG)ltWlxD0x8|CSF;W~f7S!Wtz`^EEqJ^gEo5KMG|(?FY~vvgA9ZN(|eP%uv9{YH(a zzhw6VZ33zr`1!NACZ0W6?tO9p{hf>ls`3=EO=l}v{wA-Nj}JkPj>ClR4I_J<#@f}_ zQH#++eJb4y-vvno5*X;-sBrhaG5~XZbzG2Ojj)`8Kd4!1c`L*dcTF7JHlF2w16?8U zJtr}p-}(`NLBu_PnDgy%nW-GUBT;<@E&doJm*WSAq>KP&ma5A2N~n>~+)fLzjHpQU zkD9B#)ROp(&)4xe(wWAMTyJle?Skc!7%xqIuZR63Fji!MI8eS6?e_h?bsb*Q1MFZ` zy&~M1`)^E}>!8e6bqnAs$}zM{3U{T#ZVWztb*@IqbSMSxku8ovT1H5dum~BAZK7L< z&X^b4V-gEW>c7m6QQ)PB`t|Tuj^q}mpxwfHzTz@?BAA$_u%uJ>J^~s-_)F274F><; z&X`Jztc}FBCqjH0)QI>UA#8zAD?Nk2rGMxc$Q32t1&W6p_;TN<)hEXIIsnWP^`S1B zjXVz@ZBO50MnDpwRzr`pdHDt4Q2|FtlshSp!)Db2Q6(XE^Ihp*51Sk78E|*z0Eyu0 zNNKhc_^Sgsm}J{IBEFPj>gVkGq+M$`Ct1Ww z>5R||w@z@E)Cw{ahUa4_<#1)#@e{mfXu7g*<>Mx<(3deE*X(TbopbrR15kcyZ5sCe z$KT)UCBZ2_oB|(G-}+i-EEB!)&}_an0zWwwuJf&i?kE`9INgxj4*=}EhuPO2FgsJm zxVVfy=ruxTDWor;Jeyj`DwwHTT-;0Q4!J%MJ4hiLvHXcvNhLmC`ah1<~GV@cJJ3;DOl6gN&nKYepYtE$QPv(XjgZk#pln0O4McfWSWegE@ zxYCvKkb7weT!0s$?+3-MNXLfL!@dMqvY?aHPmr}GbiG_x`VMg8m`$23-}HU55o&b~ zx^%@wy3RpzOaSfI#g>v3ek0bMK!HH7F4r@gM&71T(u(u_r6NWcZY}?!cw-Go(6rO- zRL4G%%bONhCqcy0ae&Fg>yCt&>nX|Zqam-C{LfEJMN*04v2;wEY65Rn<)b13c3k_! zIxK^{R{2vMi$ajTIL>0mG!8WLRgP_9BeBFp5k>Z<`ES5($+5Xv>dX7h_v!rMGb03B z;n$wU0c<{g7egS59gJvYs1JkY<8Z{8lSB+^_Im3rii$uMWV za4Rk=9v6~*?e0$eCFwB);)U60cr10?IQXv%^GB1`j7Ra^AYW}&FdsKvDITL>;XC#S zPXoX*S3SrdP5XdQp4-zvU8^_o@;#D3!Q}^Iu5+Hf*M9*Bh#Y->#g(MRf#;bnv&ZVd ztG*wjY`bUQ>RmxxUVE3l-pP1$9q;+-6!Mgyr}H7?2v<+$^^8@2W(JRHglN@|HeVde zS@&_{8EGf^21jh_1rz8i7tsAE&eExeb2;)dg^u$ z{UWVQ?@h54IT9~cLh8j(&0N{FCyVjQHL?#|3nb+A8b*qZr=H_U>K)BcWCX;Df`4as_>D3#jj(-c7DET3H^iO2HT|Kbe406a6D}?wV0a$`_;f7i zz{~G&#i>X>Iyf~vz?dSzp0=<69`3Fu1}Ij^rZzzy8fJ2qxyqROD-HL4>+E=s#YKsU z-s0og5r7p+l4vsGMZ9#Z$(ssI4L!rPWDqb{h>*sH78=;Uj&1!Njknq{fGanrPzBJ} z@9JpgiM(7&w-2WPTXaWoWUf97un`4Fb`k{Oj8D$#j7>G99n*2tn#!n8hVEODvFu;U zY!~L8w>$xc%>gO_$E(Tj;6luPCVsz`&U_N!Da?Y`=BUGyK>e9J)h?sg-u(Oq)H9EN zgXQekldoPY)qZea_GQl-yxIQ2oCG43-Lo}4Z99PJt3xc7L&n}ZO-3!ikr5HG>hQT` zs&QN>^$%P&`OwSGmO)PG-FpENxsB&T_^BSI0a0DUiHJcmytTTT)vKXc7lkW6i3J3JORkjH_0jC! zO!#AmjbB?;4od%N!P*w-^_F9#C5{GO@{fFjw%h|Zp8Kc=dRses7Q zDH!fw(HVFt?2z`jE-3c)T;%wra+q>@O+tBcrV|^6c>g3nXIBeYV@LWMhNCQ_f(qx4 zLR3;jyJSvRVV?@0b5s;DMLM{bM7dvO8ZqxvE89`$t9$qaKUhmI>x&=QO|2*NtwtI3 z^V3SEca6JVb~EWMZS;6j=3T`H-qFjCCmR6JW9v2_rgD**QKvk{hxtsrfgEXG0vy;P zY#%#z(ObQJ9y|R3#Me0=KIZL_zblxDLA#Ohh=C}alTGtl0zGT+$L80NCw_+~ZDYrU zk1PIyv(JsAoEj4(NVY)8_pOHWuUFCv+QOyySAP&=k5i{M&52$b{Z>zY!v;WfW$TRa z00UUn&d2%p1i=$2dG3+rf`L(p-dpb>$)eN~0VumRMEQR307QUvOdc+GQ7`Yz2%9su zc7h~2Tb*;3u8RQZDBSr$1+l+M2}WS2Rag2_ojX` zRAtV3;CQJB72!t9WYtXS3KH8e=Z$k_Bp|y5ODi1P}wzH#R} z?=HlQz_`#R_n{9uyppkdF6b&=F?zB5)PMUcb)ovJ3J22{#b;gJxJ@&m59i-e$ixfl z)4s$VSH*&rl0f67GpNH)g&)4Glx~@n+$}F=B!S_4w)Zdi#l_bze^KJ1lTq}oN|#R@ zOlU5^xIeUF%esN_{Lt!3kMaeno1+UiBKaX{-W2d4Jb^kq9e}dNx34etuHdLM!`z?b zpP*Zr#El5@Zy^N>vh9nsnDT!h#e#9ei^YH1_tP7NxnB#GzkJioMA)4pSB@iClfra^ zE}jc%j(iFPc=abTi;QI+4GiDWk+xktwTZAja=j(v>XE4n5w#GS$D?WL9M zwlD}BVwUj?OC-xl-8k<)TV?QxRq&_H8uc`h$D3n4d&Fa?TwLi?70T_GO0(}@!z)bv zPu+DHa!dgo;hIsgng@N|msFzfHh5OoAy@a0qBNS7lcea5wXm*c(mWe*) zO0u9o;@QqOsumPBo&XG(F})MPiG9-Uxa7K28=Ld*-Ir#Cel;A ztFp(bdSKbFwExj7Y3X+eImBV|O^xH$q(PY6nMwKO3nCT^pOpiN`yoEi*TdybG#ltF z1c;KL``6cx7Dh@7)fF$;J)!UJAzn-Zn$f80nUQ(+)QP^&Rx2CL=}2_=Jwm3cm~zMs z>~91DqBa>t?%hDAx4t~8@b&cgz}2;Saxcd~c}7$DDpD*WU>`u6@oKw}9vxVDX>$!h zs@`3y#>+Qo&D?{*nbVuDMR>n{F2qU#zJ`fdcOFxVT_>YTB~qXR_G&bJoC=0o5kgf@{-d9 zKP02ZfNaKWr7t5fi?u~hk7C|}D1(ohQDUwISIENXk=l!T)( z(mi~bA$qCFpxds2NL{t;)-u7B$i^{x(dBp3K6{^yr42aj=L~|{cqK14ijy5F=&k1W zm{!r^T2r7LOoLqWB@9wN91$lMqHPp2oNtHnPC$g~ABGgU>oJgWsy2QdM~C%`u>Eq1 z!bJLJxkhQzR~kO;1nNC#I*jvcI>XjQ2~JKU7kbkdWdAyUsk<&gN+6t73r8=d(Sa$j zZ|bdUSl&_>DDu8vsutiUJ_-f?^k`_kwEWI5&=EWp`@;WI(yBK#$-F>{d6`a2q#`^F z)Q)B&PlpZ2^<`N&k#a_@>5tKBL#U=7KwE^*lQySADO;o`a8A2yCZ8(i`VKVfJBlob z#2y{w=_tU}kRkX-fn)C1E=}WJpyZy9|3J`55`;sQV}mQv!bE>;4Mpz@1PUzyPxBj?jv7mWVgztCrhIBfHZ`TuQg#6 z-1bQitT*623bG}rcEFuz2Hb1{asO?o*)`SzbcOd^>G})it!y1hol>(pM$wbrR%e^hMoccdk-^Awlo*edlfpxsXR?=i9 z4ML;cCPAYokvwq1^?u}HMOT56SM%N7 ztj+#!p#grEyA2-=yN!vV7ZpxP;$*rnL*$JWSr~#&L>~)^fOC88n@pYn8pm+9{z2ID zSoi+RABg1cReVdlD@}rDx#~RRd+c z?AahhbS!6~XzmjF2(lRF`i3!(vtTcng#p4?o4wV7EA%C-c;=%{Rmi9)Of!jigRn;_ zNg4ZCz$Xq8rOO+Y5i7K+M0o_=k1?S7jH}-ICetHxprqmKOSy=`Vs>1VH|umE?t-&S zKsnbBqc=)dya_6woj|9d3nCQz2owyUmY0v@tN26bXe;0$^-T7?7!SS);Wl@1${8qX z+QTrG2o}tO*R2dafc3~#cLCZkX`J2r7uUJ0KMmw?__(8HO5N$#xkhGfts_kbRt==3l$4}Nz zl^}AYyp>ChRT-VyPb9l=Gsc)pzKT1TKN3}e$~7-aXNrDDJ6!>;xcfo6KY3IK-v^a>NlHQ`PQ%M`I?8ZS!%JY2GK~C*n0up<;@3GFScP z`;_AvUeMjk1@1l%jsthMS3aW+?JuCI175r-ofc~_ER>UeviN?_{jXj*asMB4c zT*x6UwdLN_QqhYB!}nqzJVOuZGJAg_z%|Bv6dzpR$C@gCG*0@O<!#J)ivCNLK_ZxaSMd9aVV=3h+k#AXDFtxN1sQA!<>A3h_$gqMf3EBl zte1)M9CSJQxCnI8JB40QM_4p5xFVayg>3?0YusohDR_cDWi`Fx;ax^r%?*A+w-7tZ zll!-&DW(YERA$#xHGI_Ft$GLX;|Kj%&5P=SP7(P&aAE;sIeRXu_urqzbW8i}?}FZi z`o@=Xb^a)|UxfO14sk+q`dt$dllTLQ+LMzzDj3=_*9Lm{ivYAk^#@yD$Y5&K#aPr+ zdHlnT!(WmapbUtiz13|R;Kfdg$BgMzXqTys-wQ^6=E_mw>a~0+HhsBSPRPbN$cKrG z=&Gak)|8TjFKQmg%(b)4PFE%k+w0yP7eO0xmTt zcYp6ra^F6{?kTJBvUx_#2ii_*Ec+Eyn0tgP|B9M)Rpf5ob z{IZ;rd({kAYx&4U?v2$`1AE#xkeY}fUkE{o%WDMX;ym$BB_(A5}EZ_o2B9t!6<$grrn` zZREHJQorkmSe9g@4A&&{7Wn{#P(2MAbDM9Hyf>@gmn}}4K7>tBk?RM2togvwIQzIT zE%7-s(0ot_xKZ8&LnvQANjBJY>g6)O(l?J$2Qjv*aJ1D1?IDr>)v*PYjjef>K84r z04Gv(|6GNCIq`p(ZnQbt{`Iz>BB^#UB=O{w^ zW2qnjVay!aTle#hO=e0)2dsHcrk|_*9!dzBUdOTs#V zVgy$TiCF$EgZ3iHX+U`J1-8yqUS2-}E-tCeKrp5>BGxfuQMbqXM3^ z0_8$mqj)0X11Xv>20)t<=n0mhbc9bc-AB7oE-%mqd<*tF=~K9ZmPzT+SMMI-Df`4x zXAQc?&~?K3nfgNG-(X;@^sH|`m55|EN7gnX&*p)r7 z^Q6i`dqATzoP<>~7#3;JScdKVJO344iJ#63*cmhO1bRvu>2$`1WGw4kF}-1Q4UkNY z^`#RA=?5{_AoQH?Q8XQ6mKm55MFVkSnlxCiHR-wj5z5|?hX}qzIsty_Oh`an;?fz5O13Aylcu!q6zHb+hk2`J>j4!Ns{eM|(2qjv zX}pO5m$PRJV>N}u07s?bPpoj#*D85!e9Gl*xNWgeedmH30loi2F*?T5+;TQM3mFr$ zbH>kymU-OI5cddE4_#}>qEwFp%pTuPj>Q1C(rd_$F{b-0Jio2&Bwmu8_^Su4 zu`zX9B)W52*SeGKb-oWFPM+b9!$~10L!GfSq=tRc55Grwm~*v*GYy2m1|v@|CyfgSrx{j%Im!~yi@%BuD;8ya^H2HfizqQ!tJx& z#?{CBxu)#}>XcrqG1^uAqa*klGj`-`(Y6LfS>vLsPO4$7C3q-PKq(oT@dIm5$f>^| zS2VW=RFN0?h04l59bJPp7H%%{0U(ZUsXGV8Hr3ayWONhq88L2NtIK6}^a3Y`4eJ{o)YTa2u zK-<(2j0_XRIrWzcQ4tS(a@9UZdaE!!g9Tzctn)2QbQfv~z|jqsL~&H|Q6HaS#=w@n zmEG0d@ucYC1L}()xWfdeGaWYAp*O@xbJ#9~zMm^5Rg-G@s}zA46LpHP;&l-FhDY?& zsy!x8h3N(?Fvxx#dw!_2Q27Js3F_PP_8o)l8~hx#5$izneWQJ7PW0fd~$K74xs#?x>Tux@h~c7zwc@FqvZ_>T!%0I=$s=^8yw{(-Ff|+irkecAeRKh z%606aN)yf2Ic%M$CK#zN6a4qgql^htF8mv+Oa{RJl?v+g48SsalBpb~w>>HJtQ326 zF)vKWtga8Aj~;0vF${R5Nn;uPr3Se>e)0Utuz4Wg8~s^?f~R!ov$3>^J@dvE4nxXXQ_wo6RZ<39ENcLoq>*r>Gya1uoV-UKYIzL- z`Dj1wdC}f^zzQ+=_JGxR`F~_jmcVc8#y;}x+GzO2&@HK_S#ksn;lup*$mi6oDK%tC zgj9m3SmD0V!=M`@m*y?OHiwMtyz)2NsK1F)8ha;$Bh8`nK8QW7u~=V=;M?np>gj(m z>!0?trU!V+xtTTY#`b_H*qBkOwGVywD)JDR|l94b1Ri??u zVm4k8yB|t;9&+zYv&LNo|J}7Ez8Zr^{(KOM@Wzj{*!T+ZsHYBG4=3H}-o#lBxb6k9 zvfVpl!_jEbobj+B$NiCnu|{HO*QM{*tDcuQY#?$!M;P(OlosiZM5$9VaP=s$etP4w zGe^6Ed&kIhOxs?(1{pEymhP2PAafLtNXMHv5uSCQ2h=FxzegwGt)57K?Wf{3SiCz) z@>U#O2*&b)WMJpIXO&!|aEGp-Mc4y!{wv%kawUUw!MZ>Ji*UowxtjZ4qIJ6Lk)X4l ztCCElX1s`v@6U19=`>$>p)2^#O*VSiq+mYxx z#G$dw){Pu8h&)kc5{Ipy5aIN$mR(g>`zK)Yg??{;_u5nfBizg8f$=ZDwoo`dIpR~N z{2&twwMt;WBMfKcU>2GRIAaa(Cf6Q=j6dWk=oD|&cT>4Vof-WhrpIz>(udo{xFp_< zQi-mr$AOS_O(HRad3cS#uc7Us5ePihOmUk>&%Zw=@EVvGq$+BXRAKUgZTpM>C(z}W z#tkX))q6e9oV~?WUFiJo=$BFE$2hO&2iIEUlEpMu#+ruspI;xhR6R0Ek8)LXPG_Y$ zuws}bLHNg>i5YVDgdRNExjT|;!fQJ+H~at5^_EdlhHcxZbcvJ*NFyaJAl)G#WzpTz z-7O#`-Hk{{cQ=SMNOyP4fW#0(>aI;rNz&sO#qv zUES~Ma9BEt5(h~!?}D1lW$JS7c3uU z&}rg3oUoM0vORIP{^d`3Acpv!#_ui}(KKH3n@I!{f;X)5G|-#kW&Te1&pPr!MZ`J$ zj%x1Z1u8!B$)7PL)pss)rk!W6X#L<~O*s1jD!LP|G z`cN_1L_Qa+pZwx*=)p<@WFXp1dw~j`KcC#q@s*sS&^=t!cOr=WK9+MgBX&Yd)QRQT zRs7F!Aa&kOk9vQb#8FOwU~jD40ZU^Fvn1;oICMWj#}DB|VAekg;u~P?uL_Rv!%Uoe ztQuVUgyo@Jf}*D{lozK@)E6_D+Q^G@xvIa2`5~T~@ z^LuAbHwtsrPV{-P;25uJ3dIpcfQVMHn)6&VR;34cYt*rJoq?N01=@@m1dPo@F`fA z{c#&hXP`ZV9`B-;{h6d1)y!FzDj4}qL<++m3s<;GAK-|;hCk+g5m@!ur$9i52Pq>} zMgvvoT>Xg`Q|KZq7oo<7j7plo7;W7jg#+Z%Za4YIq-(Vl)6bZe9TMux`@U+k!4rT| zN{YhkSlrb^h&$nR{LZpUqGJ^fBi3sQz`xl zbonwB?8t5vNV|jpjdo6rd%O13zAlV)5Yve@1{rlJuw=rF4sR+_`+&@~1-J zO(08t61&EO@f0MLUkVJS?iAKfL~_0D5yK2hA1?_-R% z2Xg2rX~{mau$jEnr{@-a@zCM?`|}0)P+p@_h!%we6jb1&9Hovhj>Ewp%c(5BZ9u(E z<*C$!Si{`_s`}_}Vgd&w%fWGY;kw#Ip2!vwe>puM{rGWW>KM6ASx*D#fQ_X>H-SZM z8C{<$*fDntn^*Y(V|kAV>-omMfM+fB4E&DlT~KziT+91Cs-o4;@;!Tpne_Gxzzh5p zFmj)}ZF{QZBX_5ah^V2+%l_qjxPhQ(rHkIzJpsIRBJaUFN?`ez9PaK?;YRSD?Lcmb zV^A*Jop|N@kl#aqw%V^(gsW<}@5=*{!w+Hw3hI;8XIDKP`$=y*MBr4fb~u;`NeDpB z%cb`7Hw#=9N@<}i@PBrqQ@Gul>qc_&)#$G*RTuK=hdRq6!MBrC&T zGrjZ&y8E_Y0eJ8sh5lFM58o^>*pJX`T}17z+s|s+_cZ#f7L2-Y3b)i%@k8hl!jb`y zA(;5{^{TMsLjg@2%?{$XHH}Vl#zq!f8D$qh=F*axdt^IAoa{K-h|3>Mr+^Lm2=wP% zWqK|q0|9#v$DJzdp~|hCQs4H%t2p|ae#)sg=}rNTUUo?TMai)q0nCU9yef|Uq23ll zyrPL7!sLp)Mg_a7^Ik}q5n~qNitnyN5+frB+|CeMgay4EM6b$7W3H+{;NMF>0BSLM zk4k*rBC57X(P)n6O7)OGtc#fbWU@P_#i4eRqc-bjrbk-hRgah1-=QyaVU9l)dWjM3 zCqwl)h@`g?>`u#Mc&ZO;VR0~+gFx(_FL0>bRje@r$9euXMhIuh46 z)?b{2U;J)$R&T(elVUzj2ahtHU`2+Oy)oVe3m}>dz4+Ep!*U>wI7a)3w~C!@;Zu{- z-ntr<&2uyqg?97C58sfT#4>S$duFdo9y)&>}3Ka&on8PxgJVTF~lI6ipz|7a63@4cIf=W=(CZ4 zI-EI``xKb_LIe;X?6nkRm#Y6YOF5wf&__wDGWUvF-z1*^HksQ2)Z@7t|&=7g6U zE3HN|TDCeGW*N={PF}<~VMZBUef&ixRjSEnZUk+ro5C-=th?}I9$|!ZL|6^aV|zcf zZ}$CzIAN<<_7os54;%KabW{LD9b-wV&lv#NCG}7MjT(TMsPxg6XQP6DD5{5b;s3c1 zXGLK(kC(DZv~9l#s)0IHAGX_Kr?|miR;{5Pn5Dl89POT8rm~#Ap}vR|)_^TqH-C%5 zw(R1V#4u?v8$Ufc-Y6f?Ix2~c;4Na&vZ<`BvCx$n;k$Y_=T(i}9ajE%K;^)%v#gA) z08=k+VTUn3lCm|hJVKoYpYaW%-3w2NEWulMVBCAQq0%AHo9Ne8ani|Ar#KV_zT0L9 z2rp#*#yk(l?41~*%PMd|B9MM5c!CZ1vWbd2HcoTxqd)PiPf$pB*L)p@6{~er@)w

MiYB-&;l@au#Ao3}$ZhN-H#=MYeG6ea;$@2JyHd+$# z`WRg4;CCs6-~h3J3n}s`8fm4{%2YKBod0cby??j(YqXZ2&#oXn$)!_Nz2tl=bbMld zvid__Xa0LqO;TAePv0&dt;cap9_MkGIh*&wi-UomYdV}FJS7-dIt87Q`GdLU;p7zR zy3lGh=efq;swd-?l>MH}Z4Q5*8e{sN|B0|#TcA?GI23$!L{iB33TVHwagl+EU8>1N zus1`%X2aQuS}TM(Z09jHg7+l|q^a}<;voJ=ycL7Xv+6EVgw*K%6OI7*L%s9sty{WQ zs`Fr=->op!ZLXk)e78rlu><3Tv23>W%>6)By@+EWyYM~Zp7VMq3B^lJ!$|^87}QMr z`dKiP&d7E@@!ul%33$VsoyKW z5mMrrhc6Uh53niz3X_aQeD9T z&I2`V_HEuXRvR2CP4;ncAsf|B=VP-2@Q^-d_#-@qg#aSKn2pj~hh@qjRI8U;OfTU53C028lImYz7Y4We zH6dK_zhH<|FgVA%HH7k-e~}NXu^;X_9CFbXJ)L0#udbyGz8;6oCB+C0^^%t;MTMvt6+U@i4U$zoM90Q&JE&=PFSXCB(Ae|R)a=LO= zlEzGK+-bG@i^>0YQts;G>vLTw1ApOxxWin>k6Q?1+0!N^ z>!w8}+t%v0`;hiH1#(}_r+Cj)Qe;5c{p0tDv-2Gg#)KC^4a!Qq8S?!|wi6BKWV0_p z;A3gyF^dQf1p8@6)h7EqeF~5wj9d?Eh6GFdSABtq`2+x(W9H0>HHH|-C<;x8L&8T4 z+ifq+(nZHcFTZ;#B_ZukAJ5AN)Y3)Io!R65`c;Lv5fy!2+MsYN(H^c8qpCgqBly&$T zM4a)x7F^lEyqqwO*VfTjn~0kIS@J zVcU}Fol8@~sK~uiYE9d;GUt9O+jyOdj;~-Cb5PmGQy6bQ8l?}bS-RIxZ9CO)OzF#| zi}nG5gOE>4P63$0%IRDHId7Y;y};Il1cQbf1WOl0)sA#dlQ7`>WaN?MLo>BXjU{}E z^`B7x%tHvs2C|dUKl>Is=ld1@Zwdp?no(yc#|)%_A-_`?^|v!GCimpGx|jXkN|Y2TH8ZfY~hJz(d_Ma;_otDO2Mm& zEUe>rn9=QRS?;BnRElU9Y(A=3OwW!JYzvu`J*G|*VD+?2SQK@R@q+cZe(=4|*!ub6 z;(Vl4g=B<@^Y*XWW^a~H+_rgdD&-FL^5HY@pU`}U7~5|><;RGXYi*w0}LFN-;! zYJniKW@+!a0={<}%pg=Yd-kEahcWw|N3s4}Dv||y(;jd~9mG1?O3YpU17f{Q*;z?Q(z{ppIgzRp@KjBD zgKfe8y;yNaOZ)m{WDb4I=8p=_U>sH=L%Qz}HpAP&Zi1byz+G2(pxIJ-3@rP(pcOTP zi^XoJUxW#8`PVkzoS#hYMTy?U%70Z#LKFJSOYGrl%MyTEgYl+d>FzFA&*VN|@@=la z+d_d1!JKE*ew8Q0+9ZJKAYs;)x9xJ@v(W_vZL_TdcY6!EV$8 z0k^%IPR`AyUgPT_HEVqHX*X9AU&~Q^`$eET!%ViX&V8vdv1>I4M_r1PDOghffY100 ztX+6sp2ld-i)=)CwDs_^Jq-g;C$XTV4$(vLcS?Xl? z+-+TOy7f7w+#fC!#x`HlM_3Fhb_;k;hma=8s$~j@w;)IL?zBKpq?RK{q5E7?n{hZ7 zNjjFQ%=RNrq%pV?&RN6jUbK5 zF_Zbr{yVY$s782>Z&vGH`^qK^6pFg^&xIzxZ$$rf)h13+TzTjxs>+qGJBJ4nG~si z@u7NxFk(n}G5knNlI>i(AV+s*=_XLmPtKwTo;8W2d$sWPc!=NASBw*)zc8`H4Va3x% zQRnm{Qh-|fwsQ2M1Ga_a`)|Vss0I@><|XFTjQS5u#zSEu@X+QJl&X)1(dlPdhq+T- z@90g-!fooWa>z@_Ju{BWJtyUZMQ8{py~bYuZirdRbFUV@{mOsP)!$Z2rQIG0Tdaht zdpr71tsL0;F!jR7HVUwtELv9vPx0ApkTMrU@y%f{%C^@vClfRNk7n-{*gq+PW~0X>5H)_A`^+?eC{E4;E^>tUy+}`eJ6L z%uViMj%F6~dGvPkW~;3s*hAFgK1Hd4SviBMj>lZMYl=i@dz4SP$ht9<*LCZ8<4vlJ zSLONNmuBu?_FU@tNXqqId3{w#KKUZ=d-}Rperq5BdHuYrhuOG)ka?+*lShHPhRz0EnYtGNs9=$)IBTF15>Np|}f%I|0!eBI6sM+=5ExERw{KGvlXQo)s zXt51!uvf3ae8B-qPUFCxw`7zMmQ3ECT_bU$-$sS*eJ<=(7c=kIy+8&?8P!7{B@_U$ zNpgmk1s7y_bO1BFW~D$;_}{wBkC21EK-FnJf^JwC0d7T5k5FRuc}dVD#*rY*=*u)= z@}b~JpQhX%%^H1cSsyM9brgEt+HTc+-o7K7 z5I_;&KW}ELOGRo^;WM7gdvhUobwrM}*oG37!7k|TXp~r>#JdcOUlg$M33cqOJt!J& zKMh*!PCcY{T^|1-m1@Of)9Br{$bJ3NwJ$6wn4Sz1Yy54(^vJ>0V$MY_d~ZW1m6^?o zSvb-*KAHj#S9dMv8N!z%yHQQp#marW@ROFIo;6D3_vk+!&*6ofiaEK^cEt&NyTJD1 zIDvWU^^Lr)b9lC}_C29Jwgda>S(+`2^TAL&55Q96~gmS>7-n8&4Qt|I=zb z6v2>)i?KHH)JD--z3v#3VPD)UP(S#QW!n!e$N_`a^}fvbUT)WLw&ne!B>Z=Y!{#PKsurSnFXL}8M-Uu!E90~WVN`jx$~^vCWGAM+S_dQ@IzTK zmWH~O5~yGYoGRXQX0st~qb<4jvJR*)5Q-XZ;~E z%-3LBP|IxmRAkfSFDxB;&VRZbKQWrNyACDwtRnMTEu(`S{H`zdP4oyZC6rVa`K#af z%W*9Hkv%DmE4K2i_dJ0cNfpr8aCw>%dIz@J26mqF{Tg&(Sl+%4*KsM^-cA(GhANG` z_56X&ti~PrZd3H^Yl_2;_h@6)ZZGY3M)hx)_NTM%ciO*g+wAhJ-qCqE8W=8bC+Wh~ zUFPlO=RArP-{!3puLzF>d20v@#d~}~qyu#>)iFTtK5>Kknou$M>Veer_w&}6xPI61 z>Hm*T5XyImEIh6Dn_E3L80o6oGVl~cAed~w*bZtMcWfH=9bkgnW|w)HylVnUMG+o98;-u9+c7)62%b;;GXLbXGv+!kwZ0sgN3 zKD@jwN%xlia-DqJKOH`#WwS!UK*>auKGjufc3mF9YnzMdpA4_^OnQ^o^h$-Os-nZ| z`tKlt2O*N<8?b#vZbk)LMRAEL2UlA&qJ1>Bk!z%3>w}HO5X3;$YCLHV#`CtrC7R`cm;^2#oNxCbqC6fmFxo`@BCQw zU=akUF*rWhWYB;3yzfK36(sS$*DHu2yUWr#`JRC1ld&>pt@|uQcxVU?6%2OFr zA6Z^a^R6=CHFhsO`mbc*NQbzzz3!*9npg_z*csmYYa>OD$5RvDuC;bq0=I2kc{?c^ zCWQ=P5>h%swsxwCl&*^h8f422)9bif{*3NFcfLB;>(twyYTj*P-`!tMl%G7LZ~NX! zSDM4VSB@yt^yvEb7Ne56zazE!m35mgL@(dkT$B*2JPmSQ>%PdO*7CSYtXHzQN{MLJ zQtM;6GteD360&3sTusmIw4yRRT5&41R6h)Ow74qz+C*}+=-`xHb?}SX%HPt=3g6j0 z$kt^fcE$b|&Gz5sBiyB0ZHZ4gPNfA-PiNt8h>?f%eBqKX8z=WdLcip>gTR1RpR!CV zXsDq*#-OnL`HvPH74D9C(p!f~L=n`K+sU+LG{M2GIih8GMiK4c?jX1CU0X9l(VmoU zof&QK*o;N5tZH|vZiou2cQn~i#GhR#c!-y^xIofp^4y6#DsNGhVpEjc)-pXg&;Q^G znGx56p;nbD>h$=G3<&V!PX9}XuFW-2|5eiK!{tjE{}*Ill!S;k_B_CAjc($8uY2et2iK@IhtpX)EnNc;Ij;!oBT@rg4reyq zh8CM;C-%_~o|dEI4&)kFW4xKKZ-O~Jp;gEsFM^#DK~F0m5@q;nN;k_rX~xx}PpVPhhoL;L#q-lZ&qPDikaW2<{Q7U9S#%f(s@g2bg`HQIJ7@u`sV z9!}1(0t_L zE4}v~!G;_U3cTHUa?UNVL5KRy%sug$ok{b4_|eVAJJ6QTy#dIuR^#g7iwym+jEXa2 zNT{phc6$nyF~yXF9)EweaZ&w>W>TY%>GE6medNmXUmuU24zU&?NI;b`Uylt$-H#Hm zi;jJ+e>@_nX=s)jfmNPN|B+IIsc^AtUh-7|@#4txLG3AXxv!NgKQlwarf?m^*EoEb z7YtS)omo1`{nlpbPkQY0+p$NhaJ&h_oH!;E6nE~FN~27`{^0%N590QTtH?zOj;+0l zXqk?1BWxfNgShPq#=wvmL2~aD+Gd+Qvy!n~Y%X!lDBWT4H#_mg+oxAcbdnM)Gc`v% z0^dbv-ij#%SRQ2v72iYZOPP*ZE`k)ij-_AL_!Ulv1>wUFg$nc&hG?cRley^6<;@EV zOUj8wBne=oliD+m=M6rqQPqxfCPL`D*a~0f&_Vq?#t*{~}+ELI}+I z`PE%RpE_#xW><*XyfwZo5&%#Fv~*wCkZY7xy_tP)x4c^_HlDpi^QV5JX(=$&2}|EO zfu`B|Iz98pKA4GBDPW~czPyhcGkrtx>Rj}({_R)pK#X;UWLyy7?1# z73QEG>+TuyB^AJl-nteP?_t$G#!PmGm9QPF+vV~S8F5~;DT`i2ek!c;5ntFpLm*f} z6SgZB`3w7a68XeXSV;P3FB(VMt3umQ2lb@C6|&358`$ZOzC|V4Sp&#Ta%Z26?Fg@| z)jd*dmK{?6ah(=#7BqDJZw&F|#4uLBBkE-v9p}IQ@#c9(MGX#R9`?{~_B&u|VMq_e z1EH7}7YVVheR97-^nVJdHGrh!<;iWGOy$eZL=~T#G__#rS%Qo^0La({07(hTP^5Sw zQhKr(nhp6(5xCrsrkx&`nXRFBRxaNOdaBY*$rHYJ)T7x}M+emgmbmaOKRCqS8SWHM zq&g%}yc8V7s;Qx8OkMhHxFF|Hin`1aSv zW`u_o7Nb=TN$rW*k`MDhR7@B+z}Pm)Jo^)V0&C71>_mg^Z#xDB`w1R-j-(}BH6z<% z^pAL*uNrwB;nqi{u|G5{r}U3zVD=b595EhSBJ&>ikAI4jUKUcIH;AYD7CwCROJX=M zt_RJR7#YUw-G_tTyLbOZse>4B4+}n?kEICB%yaK$#$f%=3f`GA>L+Hrhyt+jPhzk< z%wH|t9cnKkB(Rcy%9J9ZHt%;Vac>V>Ph6Rr<+*&A3E1Qwl|HP~Qgeo{**K51@CsuM zk=T&lN4HrQH$4EgXu|aqzIRvkPJ8O2I-I^M15xevHXY|TcadZz9hl`Ny)h$nY=QIF zXp>LXUnbJ)OB{Re)#8{4k7m4%oU;o!=VBTr9TbflRV_`_Y_?*pZEZjK+Nj)42hful zlaKS-5y#rca7te=fkcIA&HEAbH}$$<+olFidd9d>mt**HocJ=?WCTtLYd4#v-gZv$ z)aRKDP_x~P-X*k&RRZm=Mp{z;kMFs1OZ_xzs=|s{$CghKmcKbFTL^W}M&L+3Hg3)n}Y& z0FcMNpIUJ5+B%mMxXSsX_+w|_l)a6CC}Tmuz(>z=uh5bBK#l|F#8ibhep=>mvDy}X!en2<06Hut0H-k?XLtGCac~biSnN2P-O~po)YrvRX0%u z9f!K7db$Yn6)rd!-_7l;V6do~jp}R@VVB8yI$N`pYEAafTYTV3xfs$Kho{#fZs!!R z!L;LS*Ihd2?e>_NTd!M=e)X@m^zvOKlQ#2FlwPR3gYPJ@69pEvMdB=QvekUsZBb)grsopfAr`Pmv|7f0Prsp>{0J5@k9X+quqL z4`F*3G0Yl@OviK!$Tv^J^Aw{pa2KOK`^Ft{^2{yf+kMo%ulVlsJZ)(9T%v=^ zWSM2Ls&(P`PYyvFt%rC$RAiOS5j~Mh4Bj3E-i#E#)@dx^B#n6fszVUZ z2p@3T9y?a$GGJj=Ks0q!c54E~migFPe$7;Xo$DNmj3<=$egn%cxwpY4t~SvsIs9*t zIOOdn0&^R*Aj;K#Mt5wi5&8=?u>1|{*Jih&g2tm|LD^tv{XgQ;{<3}tx-dBO^7aOD zQl^l6^F{lWVJYFBJ!HSXR?Q`zS*Um%`*=TLt}L&K7;_b7omt}SrJLYkh9rxtDJJ#fu)FmC7lOAx7+lt* z6NEJ6SvAQK*0(Mam}MQagdafa&rt{F_&PiFZc=Fp6)zAOTlo7)M^zf8sg7_EG6g(1~fH{8Gw=; zC)pAV@@8PG@oW6;$lkQ$pTv(G{CZ9j+L9NnaK5eD@+Zw`>}!*=i<2WNwC#Xrq^oQ{ zpLBMq>Lz1!>nB@%L&Jt3x2)Z33lv6XeF?XP;tC0g$EA_=f18%LSRSp%$;y+);=NA7tHpO-f<()xdWV6Iy4s8$wSY>W)rJs&urY;K9} zEh*gTi=5(hUAW}^FT=d(f;75h1?bw?(e|6=;rgGUB^CMhDJ;&=9w+m$%!;B?K)Y3q zy*cn?vubzzvSHqu+UOu*d?0x2lN6LO^ZxX)rrt=3OPOO9=1dy{tZ{-!bj_tc#WfSs zdMfrfu=XVjewOFDL}7~$-SCL3ikJPp>UcpfwT~A)^T&MFnDAhs!&ptw?(3Tkc__M+ zx0aHFu@Z{20Hz+#Y6fmm`+q>6MGH3$;NlaC<95MiNWj&6 z`yN<$9wIVOb|z`2eDY?7z3>LZq`6=Icgx+WaDi;kou)EJv4XN5{%&#gPUw?=0hNCX zUd4!E_}Qi&bB9rfm565!;3>WKUM;ejT0}wB+4psfhl8 z;P=_v3eHbfs*jt^Kb!IA54W?47=+IL?0M~@t4dt8vtqJianc#A*UUmq3p*U|BPxY9 zLTLFnvzN}COf-F(ug&;Fm~GEnH(d4E=A`aZEVG26KbWS-Zu z$zp%eJLGI0Wzkr^$hq!T|KzVNKujE*-{|Bk@-2-zP7$rh368QBYnqODt+LEH*4J(w zD{Br6=-Vm{$W2UZ63ypk2+NDczlgiyr>-NPbZB5kizM}Nmq|+yx|tnxSkA*~;&X;$ zcH$ydX!c2bJ9L&XS{dHYlcbCZ#cA7(B;l=U=C-XX(CTe;v9&AIYG3cF8{fD*{Oz{v zbA)BSv}fwt5*B5bg+592EO^bca~`-E5Ag@aTTU6 z_W%BO+y~Ij(tFp!#`qj7C~+{eZ`|1HA>E5z(j9lt81=svTdO|@V#FB0a>&$rLmXAV zqbM{W3jf?f4?f98tuKC%7i zs@>K%erEaR6SU<*Lr-5C@vwc7U>=w4jO8%jBhdxjw%skE#I^)8#&$+zqs5&;)B&F5 zeuHSXbQ!}t^J1}==JE4CnjoyMPoJ+$jJCdLyx3=3TZ5?0TWwpq82VRML4rST5SykDaR2o9{%XJHqJ#J49 zj>c<-o!dByC0-~!#v^4j#*5YPY$7+m(~vh9R_l=^Gi5b9I@dA*ilaE&)2aBipkCv#_- z`nR5g59!SPzEG}3=RSVmjoyriYadX+HhlQ4*sIwBkqT7^#PDN0 zvW`v+h`e>x#^5JL3ILr0!3)j1`CtmoPYZe$+C@I^Pol&Z(Q5${MGV{L!)%`CYWRz`nk44Pp8c$DjG}RG!_`H$@SxbIx z3pv?@(uh@%ys~h?s4oy-!$ea@a9sX@lxrGMdrGO9l%D!GLd`W6CIdO*Nou=}aS|{u zfjSJ5#9hl}2+xc#6h8B|);xSsue(qy)nvu9I(^#i%F?%s+wi<~f8C|cZK|@Dctxe8 zG|iO9`uCv2orxtYA!UkNep>ITFV;#zrr9ziHW#0=XCPIFrK8#d{jRVWr2CH(rLGX+5sZ0Op_!s&9LEgjyIRJU zyJ?24Izgfar)y>DuBnwSK|T?%t`%+xkKXYv$)r5SXW3$2cQLMT30U z9Ymfzww_d{hqe}gB zKjitc4#j6WGlVtRWIP{G37FrrQJ%aOYl5|PTkDBbl%5^V%dS?{_L6L5;G$yj6Q&)u zaxy+zTElP;_WY8GngPenS+^Y3Rh8hF;Lc+$&C{)_!A%#r+r0N0?9});nkXz3{sxEd znWeZx%;y3%(H^_>FVq$tlu!YQn5)LdFvInPvgKxYxzF)kTN22YDuBaxE&R|J`)R@A z<6P4`eP-Ua%Y(e+=A-w0cX^fX1hoyRY5;C1wSLwmcWGSZgp}V-**}kCKf+n3B5JT%3qhbi+{!7I#P2=%u2iM zW=_Ztg@-mMi)gVbMm0StcF)uZh0UuU_KPZbRhn`D74e6G#i~imAC!cgu+2QRQAT^2> zGe)2F!3|QHT0xnhwl+sJEo!l2LHg%O66?XO7_IO?g{NCyEVy}_{(dD6CMtopY+VWj z`2hh&A_j^F`;u4#Aln|Rd1iC}=NvFbg?&)>9dwM*oNJJkPXB6RTN)cTLUx4Z&GgAo zv;?+K<`hJq@PP1Fv$yQjQSS@gbV}ze>eLd+0YOEWE=;KrS(h!38}IBrLj>8kV@ukWwmB`Kq;0aFcPLox#lj*eeQt(_ z%To+p3&+?;TGbzT<}7Ibn4|YhflqEdq5&aWQ0d?&LcEwVK;WU+1RHpVy9?uW7$0BB zTtOY}$rXr~pg#CE24(%Nv=6KL?t*x9S*Bm17Kx*P`DbO!Nt_Q&UW)& zt$A0S4S4%QCQ+S3VxADtS3a{4qUS_DRNL*OEcd=dhgP%K07u#jBtMjYvSn^$E770* zUv8<_tV*jX#i8YwXULz-X&?zx+j21+imi&fU00r>*oazRp4W>C`8)p< zTuC6Rjf3jkIq#WI&}p^bX5RN7$5bz|h5RIVla;O?Q_#C~zZ&k)|3_vqpU@QV`rei>6W## zGFlJlH2ktdo2+UKO!hX0c?B*~>b0xpa57n&kD5xkZJbL5(e{OAsy__z&^pRIww%)w zV*coi6)r?=q4a#FK#CmF_^G0OwInezXerp)StZ;#B)eExem$7dcTOi@^=k1DE5lR$ zaE4#1(q>1orw&zcW$)tznrvxCHNo2JEHAc~`rC0lz{VZ^Y*7$5X4fo5Jy*o`Fj@7N1-DQdy)GX!Om{ex)*N4_ zWew~h(R-B1x++y9dkqj=+MI8v-ng7AkC$H<*igvTuEP5qA?}}c(lT&!bDoW$)Mp)d zIKD(>ov9(SAa{o=E^VTZcCp7DUnQ*!cFIKB$x;7a^44Pa)g3nO)jDX4Yoee{!WNpR zgM)W}=DD=WFA8jPSS?#AH(nM6XG9lXb>zL=_b@(feLc`<(32j`cL6l1$Tn8N%N!H+ zxzePE^2{XdS>4STWH@-Ft5EVCjoTfl`LlXv;^X2RApkeHJMi*mc z*80?*@IL6BP=4o`@`xTA5}SKR-jsI*i*TzN*IdayD%-x~DBTf8ro4f=>9RmhagBLn zl)9z#kXj>PR}R)l*qxO=k#U^G!p6^MQK9NXTm0+WbK|)NkPj)ku7h!$*JUW2u-`W0 z>lMEaFdXv?M<$I8{-d+Iw>BeWs9$m6O5uWNGpI8NJ}HYgh_TusP)uZF0aHr4$MIRv zFyj6Fo(v$PZ9)862$>*~p%Lueh3@G8Tq2pkPhVRh@UwKGe;gUD!FnZh;|u)te+0>X z$V`c1RU%q&=LNMFHfvo$oDQ3^?P8rfCcbEDvQ(zXhqnHRrbEx`$)@}TJCEA#dm6(Q zY|?5S4H8}mQoKbpp$CZC&@R)VnCD7|+!r=^=EGm+oOibuq9(Ub%Z%d4fpWvQdDmHJ z?Nypz+uz}OQAem;HUAUGH>OO#saNV(;9VMob>@{(9a}qznbFB?UZ)V{@v;Noj8zxV z+aearVO#jS|KRmC@U*3nL{?|biCSrz&#w|sIOi^G|AeK0t~EMLvAf=RXk`23O-K?h z&maMZ>z>8Yjh^>yz|BY3tYb7LFqLiuLW~_t=N)Oadfj*26EhAxPdzB6jiZd^IK#TN zlPezSx-R`XxR9}>7#m?&fsW3FJ&v+eQ&xhyR2()gxL{oQHd)kKRsBWv5hlQQ!^3ql z^EeH2fxc^Bw9aC=*X{+H@z?LSWf0wI_1h(8I!V))pFq{WkLT|^@}eLOD*$XcBRawY zkp;pRq*<_YaG6_mWcOJSmDA1tLY=QXUK@*Lkmidga%jh4-ysevGLQeTxx42H)$ag3CY2xskUHH}UN9EZr z@3F8vCjH2!{O}rU72dcKsim1NIyUVMPO0!mzqWzEf^o(3;^NJ+P!?`R_)9=5lx z=E0?V$eyiZo)aRo=-nG#?^^=*tJCV|!Be_=@4 zFpRe0l^K9&Dq?QtQno6{jxsdVK**GUnG7()_mP76x^H@jMgO{!INWZuVa7F1IFNaSyiU1~L2J zYj`40rO{5|L=qmQSOQn~@rqsHS)ap{?`Wg^`o1HOZs|w0ZcO1slE93cxF&9at}paN z-|1(auhwZWi!a;}SS}K)dbUXkORR82sy^9}@al=cw(aE!^{pGbUD5_&%X+n}you#) zS7RFM#zu{COh;Zl>_7xv1Mw&pT*|0FW1Lc?Tzuklyf#xcsykKfAf8fwjXa9<>OSM5 zJvxG?h(^$T`n^5QL;c?6(t?}!12kzoRdg21cl0y8I!-ps{IxsnWhU0CtJjAGiTx== zi$^&tzIzt^(^EDaqBe12B0v^8#9*Bnj?Cl-X28BT82`QJ7eSs4PCE)Way3Xz=yV7R zl=F!5K7U|Qs$jTypSiEa>Ds?p5Yb|H80!A0W-==eQ^xgba{W^7dDcj}B|Gj!x@h&= zF2~J9q0jhA)l3OcnOE_O(q3 z-xHz7U`I93Q(F!Rx=2;GZbb)J3?ewYz7-&PzRkousI7;=F!(W$ki+&rl3Be0E??jm zaR49-%IW-T+opnVt%X{z?I)YHAU6FZ+9;fX)aj4cytX}d5>l!dsKU%lJ4?#pA_nPr{ z71WPHAVa>z%s@JR|M`7l3=nH3As>omZWmK}>1)B?l~{^~ov((c3!}nG9xf0NcS?4M z99Urd@$HVVs?rzLl&j0Kr^RJJgf$lYL8K3?0zLpoUt%llub|U3VV8;#cPn*IIQa+$ClLE%G*u}rl(z*&Ml<@_ZI%IkKlrWw-Lo0p$kk7VXoS5I7`RHh~tuva&ohI$v zdN5My)qUo<=2xcRJrQye5?> zWoH4AiL=&S3gKle=hSce35fe_Fq{7!6i?;5mxLHEI<9|VT3)t4F5-I`vE}q$#pjD1 zF&GNp+u{?X-ycO-;6JQ|zAt*8aV~M|<+|Dk!G+v;6yN+KqMu_ixKzmK2;Njw?e|}K zY3$2!${%mxA-<6iQ%2rqd{Z`TLYs!}Yus%kZ*p)xoe5%ksS>{AO~*QL3j!S9>amfbs6Z|u^u zI%FIsBT@9eQS2WW)w2HPqBGPHlbGlsltm3!k))6>9r0=T*lr})VFZYQelMu0pfM-u zJvgQ*9Z&x)&yQBxe!h3<|JKJ-TI^t%ZB*JTo~Gb^IxdF*;)$oCm8}G&hzoYAbGj{} z`>m$ZpFD5=zH3Q~xU&$YKlKJ3brW{}cv|4arGD#5bJ?o3U2CZ$k_2@>C!0O*`MGM7 zo>rMEStq7Tb(PpR^DnZ`LL*xF`ooa7w#j#h3(uZ8?8xFo?B9}^+y{Pc!{N;+HD^<5ARHRjxm!V}3Al4EF~7uDjh1x~}5}hY9Bz%x(=O zvW;7>HW(UOPo1h{y^!Qk{M=?3+v$Ki3*U0{b3H1B_J*pC7l<1$A z7PewSJ)6?s*h_TmqW2wfc+3Vax8CEQ?H-k1T0FgMR_#Ok%v3G=ol+`x&WP#-UaQyi zSMwjajxNbujnc%3JDoq0IHp|%4`&^z6*30CF&<3Sax%1RKB-63@f-0@Jk2HWMD04Z zPcx8QHE%@cB?#X(?8f?e-RuFun=ICdPjm9VxYHvRGJH{jeHow@-ghzEipH)#vn*fX zc!VOG0AB4_m`qvJ{Or%+2YL=y>s^$&YA?DVN)aEF9rmRlMPO9;r zg;q`mbLoIMQ`FAyKh(Zw$<%UemXw^#HMSLCTs2v#pr`2>{!}e|#1M1ec0twzI}hO6 zIXccqx>=<5&8)cNi*ZZiG~-oBYnFK|&OeafV4y$9!n2uH>yZlDEZhLT#PjEKzi^Fij;eldufor0(-5ng| z%vS&Oxrwg%o(yMe8Z}`x_ZO-E4^!tD)@i(b{c5spbE?U^ZSvUH;|08;KwJCrjA01hkOPIuL-g~-JJ4?+YBA7pA_wmi5%jL1es^TQ%vj^^ z*kK>_xVisQ3C9U@lT7Hl<|DvzTrV4zIpLBQp_Py}fAB9TjOADQLYzXIb0st%Px~eL z?Ny`q4fQ2qCpEl1^-}L+Aqqm?ue&~J=6IE=wr@L!gjw2p*OkPC63TFs3H&}+NUTqj zT360t1E}d7YPMkQ3uyyhECzT-2}-3#iP4&Smxv5M0mX4cl&%Tu~zJP)z=HQri`A&^-^FgtB5MF*g9J0Z)((CmRhWlO{^2MJa z!azS-#!A=|>{Uv;9o;RmyMYL${-oR{xxz+}x46 zKNfLiQC41;tKH18m)qN<(z6xzqRdDc?`oAr^-c_d55o9KUj=RsERE%}3is$E$L8RP zzj#T27x!xqu`2lqy5ey*fHElNlL2QkCk@zlX@b|;ZyD=D9>$WA(D?kGntr2Z43c95 zUxlH8Bi$Did8Vy-x8dmT5=exMN+>FrJ2~T^XskAm>mJQFZF}r{B4&!(bsp6%(L&jc z5paU0*$NqBGv!U3>eoBu^k1pN8A)D5|JEH**4^pQ{$A}8i=QRZWPj1=^oA08q3_Kc zgi3tCGCR`VR2rx^O)Sh<(r8sF0GW0nvEjiC{c`i2cgMNr!Hc8?9ch85QPksssCU`$E>`>%mOL9|Gw?ivq_ zMy>qD&>F&)ay*Sz zo$jaQLN3-kyoS7z&u1XA1EtDzy&n$`C3QOw&BN`HM11kNOR-!NF_o=0ZFCr+mV@Rb z4f!`8n7FqMPy${~!`vf{!6&S@wtgRtH)-nNpJW5&-pS9mqb%f$uroKLkmgP>M;QYb z`EAI5PTqDQ!BaB+=f?q?c{D{1^gKW3dog)Z}F!7iXs!=Yil2f zy0TM#@D{MWDl=onjDs_ke{Zh2^Nqw&zT{P%Ld0vwiFFkf)*AOIb{vfJe3yhrBGSxQ zVtIgiD@rb7Pc|ijKC38Y42eecrFfTcZR47+aQ)eI(xIsIdyL#l_0OMm?8W0RA+{jh z&clLoV_*Yj7a4R73H#&IyF?dBEf{ss#ctd(K;uKHgr-BTP-j_?9o zqc6KIGsRlerHTg<75ByqLy@oR#9;HK8g!Gi+TacBcE7=_cLL$?jDe2TH-I?TIYty! z1v%9M`)H#~Jn+BA#S9VHNz9|9S6Me(KFSIrs0Rrn1SzvnfcbwLjZq=?gX^yeSvb+OJ?n!m@3B<)zQ1Rvz`vn4u4j_g*Q#1olB+K$~AgQ0~ntRufQ zN()IQF^5TcIZ|abWkbe*R=75OmkKXd55oERjnT(K#<&1OYB9W#GMzO7uP)=Y?Yfax z#TuUqgfFdiO+Hd@h*AGpK9%2=A*N!~EE~I*$d4A)#V{2|n~QjbfI$aeEQ@@%(NmsB zP`3=5a%#Egaf9`+0BwA9X+u=JbV+}J8dM`Ma21LiXn}!8Y4UE1-a%pg<$xgvJuqMnY za>u&JjMt6<(6Vh41gM~Qkcj?*8Jvi2)H8*^IWPDHw92DUoGDxMLEdphksCM%kinis z1m6XN{%t=0Ai|A1N1lQ6b2~XBu(@AoH1IwcW|V6Id92~7F1-hDZPXv9K4LRttD&NO z>a#Q({s%Wx_TZpnv~t$8ewMB7c3X3#st-OL4FL^V_C$dKAROq!EgxARKmJ9x7OHy~RGYdkk-;^juqKJ@Z86{tHb8 zb`s1)lq_9%*Husa@|Uiv3;ul{h3k~;!A3MRlo33}A+`K@1C%t}j_3$KLv()9 zfTmLo5og49+6_RVQY_rlQYDZ32c zK8KWWW^Qv*tJ8f4u5ID-whVQ#`LrA&zDXVmb-KcX7~FdnK%r`&3{3=jwJvzoJC;P- z&tPTRDyp}-;y-Iw72^9^fbpY94*yhNh7K67ec#A(-~CK%#GNi!L*R>mAqs>fR{;F- z3r1e?#mc<5r{6J>6mDE68Sw{ZQ{y8+i+?5t;hp@D8`$# zKC0qNp3rw19@lUCgV%?nBNLNs!B%M^FziYiUh7S#9R|gVCAxItN9Y<@eo!$w!Zm&s zIIZqchogI?ipwN%*hd$H#nqX_XbxB+p2XW6SFPrpRw99jbmM{_*^2|zug<$ns?|JU zIdFr!^QN|2re#y>qjW80>yKBv*Y^iZY8z)_FG=jt=Y&08h6et!dg5Y(C=NT^tmZ@~ zG7D?C z_C3qQ@hX`?z`c`>Mi|-JmsDth@f?eB8Kq>gM01&qj^4UO63=i8mh;EFnK;9xJjnX=SbDj-qdvXI=bcs|@Z#;1bjl-1=+a4n z8JRq?D*nkN;1nkn?oq~|{L4co9Dn_vmj6?702Bu3(oK5~UJSlA6z-Cv`V_=*(5`4c zV+W(T39@WfeFoN(;RUf|8Y$Drm>Bs&?5nUq5UJ<$EubdzrshQJn^@v8-ZWx^!wf2P zy$aRAzA27@%7xIUt%ys-!%Ub@1I*G23R(p?#hC z*g-sKM(tPrKn8}6uwhGeYVfWCxv+Yc!;fI@Nz=>MTwv_KiVO!jRnu$MtKmjrenE%t z6L)ubO~o~vBE7Gh8I^F!7jvc%R&dsVjqMRw6$r z3yj&&sucuc%42z`;!i-DFL-wkyj;#&iG!gs)SZIB+*qz}N~^2pG{J#Ro+*Bre*gLs z*I>hy*+7xN?0RkK6l4B?<)e%&^+UD81cE-b`jD7C-#+rzLxzZUzZ9G@UiVR7bSt|4 z_s1m$TK8ZJ^HGl7Y6~UhgZ+={)UzeG2p^97Sux0nwqa8!e%&jr@afSKh0UIEjgL#8 zo!0C3eqdoFFVn(HBeduEKcXhs?2S{s$zn9(myT$wO-`te2WumfnII&S*`c(6jo4eC z&Cq#Go_w+3zHa#4Y5O}U_U{30jt^WcSRY6pl)-Pm#Va}rioy;dQBtFuu32^a$%wl; z*mitu`WXo2Vu8ob03Y-&hSfQwuuGLp`0zh+;D6-8To>?LQN4|h8}wCXRF%$FGsJ%2 zb6+gFiO#L7a*13X+qN5!9T+M@2y+5iqn6Wm{xzumLH}QswOVNVyF??=t2qfx070_d!5#sK`gHu>T<_h zgD_{Qm*Bn0LCCCVj_mVJu)`0NXN^)XHXhTkWdR!RpbOb=Sc{EpOCj8EtCLiUW%8wE zLT>abhi86LEH0qcqxWoloFpVmNn!eq<;txcggc)l1&}BS)xibd);QS(yu+wPV{gB= z@M04RxFJw0lj{@F@%m+XFa$p-ivInWxSGrjm*ID=lbhQ2==jr#3FW5_QO)H1v1l2y zb-nmv_de|re)mNNGVGyU@4y3hGJTa4nu3@AX4c5l=t~z?i^ntdgSWHS(=M@dHr@1o zjLs?>1Hu;%4UV<9e{U(dUNMRhXeWmqE7FL`B-wc4dz>SIl>t8|SbwU3D+b}9qV`P= zCx6u^>Oz(^6*T?N12BLOc7nj~tRSjaj*{mHs1N@sMZU;0JYXje8Os}i;)B~|jxMR! zR|<~-Pja5Odl(L9<8jEFkKbDWee-xH2G?kMIWmJ~UP$oi0k?#g)d7FnLl<8IStXjjCz*#_lLSTGq_w4cU+tCouu4e+ms+`3^P79- zn_AnRPA`bkadfWI^K-5RKbOJK;!YQZBxUp=s>X)x5aHSSBJcQ?c| zWeBRWZ3E4V5v=XIv_0rY~GvlS*`9V77a z4FqZ2N_0GK>d3Nbook{5w&p(`^CZrT=h5eIa&SuH|8~56)7UZOX-R(h@-mcep*{g- zI-Csx!gM1gDx?aLQ$gTCzgvRx#E>LcX&CgR^5fbqtmz0^Z|}1PIR*Wv1Nv-h1AVo? z1C@VPfOsKP2Ei76_DXcff?$2PBu)H!5mS#rNaG_DL;}kZHlfnpv*+{i&yPv=M#5J+ zgLE{6I_GX{XwXT(+{qZp@a@oHiK)jp34-+CYG7F7!ubi5+){3RU|Q3bF2}k* zE*UDn`(&meyb%IU4@pNpFVg7m2i=inXgC>&z-fBwQ7b@-!laT#QcKL{X9SO@kTX-p zeAA?S)6K_b;|aOxxIFxMkit|UwTY6Y$*k}#fKpqj7gs5EF;}z6s%(ATmdZxY5`sMK zBZWL~CCZ9bD8j^&i8wDSFby}_=C{$*!E;X=5>fXKjfSIf!(80nfaN+WeB;R)_arb> zG!4ytf8(O@Tj>z8fA1YqfMBt&UtrFDyqd5%h-hVIagL(wGb9^1Xk#G4o8pgk1EJ%g zV`b_joNb|muR$_brZS$69*Ct;LzP4pAnZOg;PXu0dBYNbtW!f)N1btcT+y?QXdgU` zwd@>)RSF|20c@19O$b6JHR~)Bmc)**ZVq4Wi6sZC;K{Wq;fHtHC*&z2LW_H@K!{HP z1|e<5F_R3EwTf_SDQWmv;p=}@3Mmb)|4b+UDHy3@g69mH=;U`rg8tFKU-YcO!CN`p zu8lStbi!CHr!!XTeZVhQLP19rNV)FXv!ey|PO#omaaKmc{1|P`897LB2Rtpf-CNwV z(xf`BLBg3Eje?y<2?!9|GrQwB^3NTFmU9XGdPLdWo^*$AD1`Sn@{g@a-)ceB#dX`L zQ(-KBJkn&g>Jin${|v+_digPnMk=aTI zmR`r__RNki;QU3=JSN;eW-94%dH)^4FU)*NK03BOk<&X71u9LAdc=qxb-EvNA>P4H zO96Ti7OheBi89FA&L4K=Qv%PnJCFU1z|i4&hxq!3Fi438!*) zTn?V79`{_*w@EcCcqu7Nv8q--^bL(kOayTnQ9N)|Y;=mxKCfpC+N_K*tfzr_WC5RK zs|R|^gE}q=(GwKg_2kh*;w!($J-MvfK<HE6~Fzu@q^=WfOA5T1W>rd^xO&Gm$ z$v;9=I)zjU6hf+&q!{kq06sv=@rvAdd+I`eTix_%o2(a?Um4>hL_^d6Su7eQ2NTJN zsG_}4M^S4Ek9TqW=Swl>|2BL;0r4O>By#5Go=Ymy_VPc_^@gg}D@FGK=+HyRWY+XX zR#HYXoeyjV6fI^YIZ}(XK;@LMD=F;UFP6yV1_7kSTY?|o;$ZC2j6!9vuxTrt`Q}oN znIJjXWT7<)KU+@*+K$v;w~|LQZU^TxI-&MhaXg&~A>x&C zm^e4lXn*H)J6V!$`QzePV_MwGLPM|Yp;gn+Gg+Hq`;JUkCiFZ>Qs>9*m4fThDt!#J z?3%vydGW}0|s#C=WZ$G&i%$&E&HI%&lJb=KNb@0nIt zOR2(X0kKk&x7l&8xPE-Gz)=z02!qnPhxxz1Tn%_+h^3?+9+(oCfU<84Mt<>D?ab*?)+&%9ot-8AUyzxD-8$*~10fKqD=g0#rl5R5g@b&|tWeX~ezD<1b z!QX~NC?_XdEe#9WR!Rx`Ch=f5rfP2 zd~yL>wfOSZY`xm67 ztiRb71X~KFau?gD-W%N&DcX?w1@F!Gl4&GeJC|bxU$7MWYeb6v?2}}G|A7sYoD!!` z>U30#?tibl!%vSTZ%GkP#tYp1WE#d{g|*S7xcqHcV{ao-qwfb~tQe1x2o$@)cQp`` zc;0W9by6>_$PPQfrC<@J4M52{P@b9;GXndx5Ghmja^{EGg0v&SlSE>?-;sa6!Ed!3 zC|FaQYN4krfzxhrKRDNb2^`2zC;cwvP8wYX92DUyiy*f(GaGhWds{+@MeCWah~MUG zINtOhFe{5hbH(ywNfjAd@3&7LWaLA&@9^xKEYxgbyQpU-R^^Ff4*V02^DeY5eZFxnib;*aUmw5EiJ|N%!NIy1Mx$zeykj{9}_O=6^bbU~Ia0ejF?~B%7=kbPG`ZR;&u~ zrrx(ZKR^qTq)8M&tNeKe33SL1TCH03(j?gIxlIP;tiEwS`7|@I7mBSRq>jFm5KW{n z%l@NTZ+R5>^Is1D*JcIbhrCdYP=y=LfWdtsCb6BMN}E|UWT)!+>tr66DqT*t^*UBRfANB7pX2BRdw%%faGop*wWLAjE?|iF zm*Usx_<&b&GpHVrL5~7xj8L3IzngC9FLaBa0|jaYs53r~?%5@jcEWuxQ?)7y>Y1+r@39#zkSH5;wlGx2lgkzCbvyZsz9Zl;Cwa$#x;n1 zQ>MAI1yZObe~zK}jrRUd5HJ${GX{r&vx7QH6C+RW30_xaON%smFd3A|NDpIC=v?;{ zq%RcWdQRMW%)>KcWxl$_1czYkDFkxS@Bf>;E}kAvLP!uf-6(oSNqwFF!2DmQW+|PN zxjE6PWI+Qxn52nCTH4?XXV+o;+(w(LWurKFF(%D@-2K5hRBH2{Ug~BjP9rB z;Uya=E(I;>>r1Qw90MR<@l1HY{KDRmP8%~vmBf4k0orNj9YY3d8i%@Yi4pC0wus1R zJ|Ac&2^`B)80cuB`1t`1C4bP0?Fu?4Y8>#&Z5I|FsoqzF?Jf3Jd10tjIj2wWd1FqN z@rmJ`6aY6iUmxc3SXU*hDziXAjQsHODZtBC=h)`DQmgJ78lK>05x<;Q*J|(Yi%)M% z+OI55Ln>PlR_#R!fg2&M-U0NA5tZh!;{#9iaPIDyO)$7*qMecu7uO&r6xy*g9{J2# zxZ=sb{q@E^NhjAwUmDd?#CY9yE)a6%qjI)}&Z!h@)l1uy5+zXW6Ld93V(Gb5_M&$S zOH3bVX2MW@lM*+zWAL z2Kbv$V9U71Z@r0TL~Q1?6UG-v*x*-V3h=iQ+Vh+g%6hnF`c+&Qj(DsNM8ZEq3Mo+Z znVEr?sgHDv$C5)~fs`d|06qe%2t?C?NIcVS7yrw%x(d{7V=+c`HoJ3Tv~hJd0-HW* zN_W1J{8ULxEi&>_{GZB(Y6c~FudK2fhNso=F*Y%yV!`z2+A!aA;+|S$dz=hfVXspJ zP)AY9&cmTs>9PxjN1iO%ks;oHf6hQTRy>e&s!DkyEfA;DQU7D%}?R+;TQo&3kK)bjNheM+1^v4f|Ij4yLut z@xG@pt({W{yQRE6zhi*VxKB1x?~atijwUlLN8Oecf&OO~RH;?wxNN|8T-fCM?Lg9U zJ}(Il<{t<_m~yG%sQtwlKl21PBBix5O^dh_WJ9GKy#;hTy^-LOAwr%&N*tbmK>Bt9 zkAyqW;}ax`VjeFRp_1{9)9zJehImW+l<`MXk+^YMDmy}uwPv=qSP5gk*;thE*8a;5 z@DXmek#S||qdE?oZ_X7uU7?g4CojF6*6MTsO=WKilj5?ksUHaUm;L?1a0D_XR(IV5 zuwY0bF+tyRGP$~NRP9Gi=b&g<$5rwAmtf>}|E;YLlW%^d0+Y!^LP-_ZQlp*f&(!*M zCS3Y0>~{m{)eX4o&NX*Pal= zUuL1Pq(F0gB$A646=h8cE$qAXsSru<5|R$BE1&4EPIEGCOEqe`Vw4$oj_RS;AL`b# ze8ug>_b{OCB7zYkR{st_$slhV+FO^$eSy*JT{gspF~qoy-%Mx`Rh^`xxA0Lx&-)O=#c4W;yngERpp0Qkno) zEU2h{Rp|B6-c7eN&F#i zQYOEH^-i`aYX<{{@0l#YUyQo|1$AYrrjB+_Y-{*`xD^h~f=eIkpDzz301 z5?R6A6kviS?5bd=a18Mb_BQF`|myC@`@WF_$1~W(Nkx2g;=*A z&YtizyBk8FW00xB_>MQCAzv!0j0N4sF;sB*^tj5s>+|u?U8}i+ki0MF8HQizjePk z%{lAFY6ikVvI=;zoWHIiHEhe@ti$gcqy0G}B;#BF%<(bmj#gOaU>-$kgpOw_ocvOE z28A6W+kbyT^&4a!vRGfET7aW7d@4OGhkO<8L&B-BQ-Sol6TkMFZgf2XHGY}6k5&sA26|2!cA z+bH0;;-wQiO}p`NB>JOCY-Y$X=<_w(MUPTHAk|#+w!PJ`3W_of;iKaP`bh?H|EZ2a zm3+A3^}teDVaqUnM1H}w?B@Jw9pG6>84FyE%YAM`fj$sjawZNr6bF1FF1103h{g4^ zR*jVYu7!rRcv)Dml%?^ttZZ2HL5hvk$;KLd;bOS3t%1F7A$t~u7!_DXQrV?};)mn4 zK3Kd0%-Y)Lesp6QUG40*{%2aUZ%Sg_8Wz3D&|BheE;UJtMF!qxD=mR^?Y~C420o}W zJ7#4;S5N)RVPw1A&Jhj?W!1OE8ssHkjVEILX)l$FCBfKmj=xT{g}2pRNXIofspBcW zEF{W#hCA$p1|AM!{CmYAr4=1Utb)43VFIRp$jcutkytU2D?fzv3ttAud=mC6vJUWDYC*^$(T}3n%%J%(^w`DVS^mE1ii3 zsoI>6ixy@GU$xx1m_be7eyat#=0A1QAR`gd%OPtV=1n>4bb6N&$2;vdcLhBB+ ze9!Z3#o$|cxTk8qd}>!hjt)Qjdf*<0@m_)ARJq*U{}8kUz@wo%7OBrCAjf?IY*G_+ zdO^B+&1G364y>cuG6VGQg*1S+h>Qb%Df5jr3fbYcO^R(blf$n+mg@V1?kX|Ip7MNQ zzAuQ4r(tO51o@&ndQTAz3TZAS_|XPFiIFN`V2FYU$fm6}m&Hp+%R<0bh%G$OJ`)gm znGF`>(6DniJqK}owOxl*le+cD$25TR>rMqLb@(gQ;X!5#GK|@E;yw65ElUNa5+#3@ z2{dqJ`?jbsv?&g5xn_CaVk6S%#7OA@HXf4>j0PF6Ib&R|gVa3a9cgx;ME&vRzE_b(^3=Q=GG?R3-QFV(>tC` zRsRs{sMVd{4jJA-FO)Kf0lb8Q)+al1rrT7_##oS?6eT1uQ_(qptZuz#S1vI@Qyn?A zkG#6{b8`PPm@iz8+Gu2d13=Q%B+DZoO== zinalG8On?fOt!P!AiMsTlRF8;N;CDJB)*0YRUBzC#}h)flF!|W5OLLz=DEE(M3)w6bfz&ySBP?6X(^Cxed1_7da z-(41UyWF7wE@2Aqe5DoD=~UBiSSiZy;2+}*)FwV;#5+2GY%X%+rFA#^KXo;iM$^H< zB*EVY@vjU}w$nZT8sHD@S=cPEcp$IN!q4j#nhSM`06|TA38?8|GXcYVQ_R;|zF;og z6V+xavAQI?;gITmhUWEn$IPS&eAmrls-~uDt=f#>|DO0T9M-cioWAUdenP~>3Bt@~ zcw?N42m>kyzL(MPRj(nVF*T)tpz&X41J^$@4NxF14PDC7vuKSH_Q~FnMfl3<82sEk z_YrG?p-}(?WD{h?&3>=q7ok&KnKWv-(KsPN^{Dxa=MiG8rf)W5$xa0QEJmB^emqlt z?I}bGn>(8b*9T3q+o(-Y!_p`NNx%+@t;K?H8_JmXC7M6!Bf}#6a)#bo1J1sDvm4?Q zyVg=j!1eP-O(Frm4eDu)aLH4jKlW7gNb{MRp!vvr#7%kq&ii zuAwSQtQ1I_gan3Ixde2ogkHL-N{a(`AeJB;x;O0`iFJIaoJpE(oC7lG(}r00d4Y?Z zwrMfwPm2)LA80*oJp(o3Q%LR zJA!wT#IH6+O3Wt$Z1$p#QY9En+!cKqBw!~}d(*yCfam*zq=zGL*&_f}g_S{PkL@|r zzb|NS(b-&_;5&>0S>g!Sj{Rmt3c@4dD=EvPD@cH-+odqi_dNtoRD>BE%)%$tNcs==xHNdNv)zC?{m)jU zMLhX^g#WOx;|JSU#udVPfM&|;?XamQ8OMtfGoN#bf(%}48RW@mX;6O&x5oGY%-1Bk zFt!`J07%tSq{_M^8>jOOj{NMjwCf>$TIS`u0B)5x3ZsMp?m2`;H?3Y~T-n9Es2;AVwK*jK7OLy- z92L{u2Bc$*CR#bEOE+B$!A=|z!`NM2dzH?9P3*Y^+QrNWqiqEGJ0w7o%>zH!H|!_z zer=gO9RaoQ`7Z#ZnPv{;mFxnjwQbkkC*BN-0nB?O=fMNWg%wSzk>?0}KyhV3Ue!hs zDkI_kaaJCZLOCTPzObIj4!*mJl8(4;XW-+=_a|N?0XIMdP2o=1;=W-3`1ioa$EySb z7z4o?3!tsXiL)LdLrD$RRv)LN@JS0}J0da0u~7~GjJ*wDRpk3H$9#?bol}s!DRW(B zB5*1}wkQDhot*M7?OnHxJ6>K#n8;qyhI+`5AuIR-jcg{7wV8x%5^xy_y&cZ~ODWd7 z&0+eu9GqVB5as}GVq`|!ml>u=P$@5E%v*D0ju(g)lN3X z&(qo6LA_)geJgN2{pZ~|)W-*;p@(H}0VF!SYp8w9e#a}Fus}UkcMR1;Jd`4F2cl>= ze6I%iO&tnC9{o&yjE#UcMBsOqp&)$GYdOJ=Tvs7bD3`Th)rGc+Mlq6bRtfkI!9f|0M)|{FLPyzAm+(Q)Xn@ zn}|bnFjg4$^wa{v*}!2$thZ*~Vs}hPE}aMx&SM<#eGxXJ)WjI9UL!?%Ya`E}d8qlr z69gFi^y4;3{FDLqPaFprV(dQ|LEY z%ohq0;GA44%;HT3&{i7LXS4B)gxND`)i+W}UJkZF#Iyo^r;ua2)d%w8>Lg9!(5LJc zh@z3*a1b>U#h^veK<`XA*-aa;{+bDkE>02?Z&n=hTSJmd1NKg&@An|7i{{xcnq4t* z5vfP|I4!&QEgnx-;`0AZChpn)ha_4=8Ofm{1Ala0JOft?YroeCfA8iZlvi*P)B%~smU;H(1ed`DpTrWVs7F}k%} z9``;-^;h5pSHs42Ou(Qm;)e{rWX>bEe%}9=3v% z2~IP)ILWnNllstSMSY=Vu_=JOYm`YD;9Rh1l)VN(M$BlWnfT@#2@8fmk8oHs@fQ5;80?7rl_-GU z+v<$h4Ca7IW7A+h=Lrq^lQ?$+epqNWxP?Ah{2?IrVi5?&c(ogIM zW^E}lyPY)9IFuy3JTH$ZTJE$FO{X@MOs{T!;<*dme=FA z-XD~VN~3f6N-)@b=6FDUPNXcl(DH&lDX+zmU2Z+eA3Y2i6!5tNM-BjEv4LxEfrJFv z82mFAn)2R(WGr*&KQK|}O9oCRzyXvHM~HDe7R|lYQ4|wtWo7`}I6?t!giT;~y={>$ zHjL+pXAEHT;;SbzgRTl|HEl1!!j!Xc7mjERFF_wgJUf! zLBQiY`6PqX7aEoqF5wmye>8;SX z&dqSU1j*cziK0y5V>Onvb)QoAFW(!n-67XVzh@XEb51@=5MFbRU_Igu1_XKf8Jz|V zhv%^D6KP%>-4QPpT0pZ$FGoC>@qU{jquwKA(UVT3tJVLPotn37!3-=j#Jy>e)yprx zAF3LrESIZ{@n+f`ZmzPGHwJ@@7+FH?%s4f)KW@#7an4oxM4s5enElLIQ2=EV*RmQY zRCh&?l8K_wN5CXA6eUuGK$yjVjhFr0kFtus^pQRb9pwi??H*Yh-IN~WSyPN8XBEVK zCj#v$mMf8judARrVTOlJnGn&7wi?<**a9$#`UBha8@aN2?B)U~DDE6R04MvIF`CmC z$Z`H{AyRr>XObk$Xw{?P&!D%4b2Ph|lOjg3*R!#hAe#i6}}7qdP-$ zt2te4dx-`@ZjSM9tE4)h9Dx?_S}i-kx+8p_+?D)3mAwE~XN;&unGdrtr#5`eo5uUU zU>>}6&xEcjp^ff*K!S@!PIBqY*;*AWYw5009Bko5Y@_z#WB)^$9>8mwxWmhdZzcft zg>S_Y;Z@)}Uxjc+aS;ZpJ)=r%J}ySTwh7{&B5guK2{CiPH^Klzg*gB?)EnN{fb!Mz z_UQmTGV;$X=Daaj0+r`efYHIl7fX?Cu(Um+a0xWv4Jm-pgI6s%6o>bfmir9MEF#A~ z^hVvk07WlqTy!I~7uesbUBF54$)6#j#Ogi{ULK8$`31g5pCCz?sc8FHijpzYgB2bz z{aYOxnAm$TYSS17?^~cGLFNa$ot1ig)J&Jbn@uLguy3_jap{|Y@Mxu81aukcAOKu; zNGPrgS75IVA2_(Rmm?-*>8BAc(HYkt&W3wMPYQ%;zE_iNg#|-fdkJ`&O7x;H$MDm{ zx-r&`WHt|Z%Ft*NDNmFVUHWabkAlrjsp=ON_KDkK9-yK1TZp%08+fzXSEgatrqW7q zhmOlmY_!~=Z}>cpET-3OPDD&BoIjTF=uP(uxOu$>@UyqKw+gBE@(fzSCU-E(O?@;Cg?HYr~7Q_>q)@i+zAE`{3RBc=lvrGAlsn(m03j0(g z=CELM4B&;XI8pnu|NeC2%@C`Y!<&5r6^u3E z5P{#(UsT#j1}VAM!sovjv!>hnjVz+_n^BSL`S4;jzb~o`csjCF=cWcAxM2Z zM1{F6$Be5O#9!}1D;#>0yWw>$Nq9VWy4RR z4!Ed%cBG6|@YNgXmg_A{pK(x}9Vahf9J~nk*Y&%tNtZJp%WnAI`W4?7w-tpPen&9$Fn;*d2f*?qv>lS{ z+MeL0hGg>#8|(e~LjUzZr8e@SR9rFPDU0{lH6;MTLL~@3&WU0aVX^2&kn~IwTpvC~ z4?jTYLf-O(d5w7$z$|sj22m&YcL<}u6LUqpa~YGO*#nCm#sUWhi%TXp5X^!x`*S2h zg!~!2X=Cuas~?Mbk(aT$KIkbpHSk9MVaH{F`;7!eTQoSdt>5ZOb0#h;6o0OGLXF&N?d<<5xND&;RyK#7{uc&>d)>%ss zRTWXvC_1Yy0IfY$CvPG~yYp*G++^QB1L^TIEoH~~+ANQntpn${Z@i5@j2xzGz(_$1 z8j%OYP!#95c&t|4zcbZDj7|7?SqOd-oXp13dw~=zYar!Z5v`G`p#~EJZOh`pPSl#u zJg#pCUpm}w2;+ahySr3P{B}p6_&I#}3z>%@rhF!*O#4ey&bIj@)M8O+*`JO?Vf0#P zpv=?xj}`~*9R_bQ>weAtXq0;?nF6RvUu0+`;Gg{>!-8(t{m><10!Hu+Evy(JVuI^E z5Fib`WU({TN^*Mf&&mRpB4qa8z?QR*ZOuu=^jbnBwaHCW-DX*)Go_Bmpv+IYosQe4Qn$>BF9ur zzEVD?841W+;uOQwnP5W3OHu&iEp>~PvgQWM(5BQ>Yt3q*?#XA7f(DgAMg}7!^Ny5< zl|6=4w9TJ~O94j|g8}6c6ZF0srk)Azc*j(>BHSKvz@SG2w8SIZSXLkyC4?M1i8}W- zCxrR8&wMO{T{DQEy%w31MjUzo1k3_#zQ6yA`~4Zt7e1VJsVhWjaFg3VPt;^RV@yFr z>3Y`MOE}_)W&L>nw>98OiVRoy{3OWjb-Ds6cY2F092N!bKdSuSimxWJ0-mqn=j{yJ z;70#jIPlwg$?a&)1!AOd$LrwK!RHdARGWmMP&zHJnc@92G{avO#OaFXoAoqQuws8<=3V*#3$@kX0qA!~SMql!AYSy=7YOvn#)`G~C(tGmt9gH^ z@&i&}k<>Le)+lU5>h#z^80uHKGa#D}^jgNs8&PEfvja8&_UPK0;gT&pe}B`$tb*K_ z>bGZ1tda})k9R!rH;1d%6_i?aEB35bw~5kcUgq&(b9zhJlvHDAO!~(+A7FP$Dw7Yx zXF{sta3UM?SLGeD+$<9;^azo`JdYbXNMs((POIPaU&3>^9v>p20Y!y{Z~uY3lgWtN z6cxYp}P3%a36vs&k{MW^K3TkZ>RA?Z^;~uLeD8QVEbHIGJ`*{xoSy0C%kWtssWI9P>(-sigA% zQT3Nmbp^}TFpRsq1}C@^Ah-q(ZVB!Z+}+&?uyG3-+}+(>g1fu>yEymU?>^tZJs5+r z_gdXuRdZI&*|DG1Ve-39g>g47XBK`YhV6;(`tEFB+K~AUb9G3fx-@u z#j>P4R<}AnW=fnb{D})V7TR_n*Trn_RVxz&e+>?LI_{;Ug!=J?4nFGWt@NSyH$7RC z>;4-17ad=Q6s1d^kw^0AYn(p?k_j;fF~6IdRW6dQS)zNddcIZQS<}vGz7P!^c9iq` zf0%t>Sf@sPH6up{J^gg8vWc9~?#NHry#A~GL_`x6{*g}*z%^S9dt-D$JAX5oUP8ho z9MH?cm@r_1ZmOhQB^5uB z=@CNhpvqT%ySo15w$gX*10cf~;<->fa7sZ`lII9ox$`dF1rpZlrgvNADtu5ix?E!t zU1q@Z1IGeJ2?c7%&j6yO8KJe6^%Q!ELl|)(5Jc}Gz8(8Non$#Q5VyDInYVlMs7|5C zHZ^dFA<0O6ZVY^cF)dI)+gE=3is1mYPi>5$HOio zptHdSVn{*?v?DU-NLlvG>&}&pg5>iV8%l6ovwDe3v+v{qU8rV4IZzr5V7{hl)V$yb`qJ zblCKu*0)wnA&zbi#8?1GQfQWx#i2_8)HE5xWnC6Br5NyYwt+4}x3#qubU{T=!Fn&@ zH82bLpVJG27VE?Qba#sQ`gF^#Fm0a@*7?%rd_~r*yfC~mTNb}tP8apzCsd0T%jGcI zDab)gOk6iSDR)CkN+5SJEFEh^ZI+bEf)24|V~`X5dix4r%pw=a$SL|U?=LDu&0JMb*X}D1wRgTR z0#IV7BNtwpMI2<=&cFhxoFjw@zS#xtD9g1Si&4}tLK$^fi0Mgqp8_*%2&UudehHty zcbENWApI6iG}+VeR1AQ-ygU4ppw-ll5DN_?CV)~w95!f+&l~jSBAIkE<9gH!zqWL{OqsRI90)vDX z_rY`+5GCTiZo7pTkMLVmuL;>?c?Jx{e)XvTeCf1 zYeRbkskgb`q8(buqdjvL@6NSKbA0(B>I*kISkbt+T$#UY`dO@9*uwAQocKwSlV)j8 zj?l(Iu_Kg>Fn0itfdE!O4d|LXX`qa*Fz7X|&!N#l9VGBiF%gRE$aG?G8VnFuJ`lY> z>~VmTOwibRkKt!7jwqvCWd>E6*vX((OVjH47@rUWIM~5gNjU6}L#wN+XRb9nS>g$L zw<^KRC1|%e@7^Yg;Rs{zMicXrTF#eBUq#?DXM4XsxrfOx{3aJiSZs3m{6{K5qB!p^ z)Kftes^Fh`M8B!b2OH_GI5dzh{>n}VR0PcN#A6!d=awU{gEiX!B4b>AP)(C-aJ=ktELF*lpc z_ET!NTB`o3?{+jxo2k}dtqtO|Op0Fzc>TrtUupI+y`~voof;1p0dYS4VG>r{y(QcI zumAlryN)mSsYB0%8PFg?ucH`XO&kGkqqW_^39(mJL$U#G^)DFsV}ocBi=Bp%v>J4T zs3<9v6&QF*Yww}fGY{9t(pZqQ{MuNSQ?51t*!b&LBR)5ne#Py#Ip|AF5c962z-x6X zQp!irodsZ%=v;|v9gZsyVOVT8gs4srrt)IRL}6U-&xpCde(eV0xZ7GdHhs285CY#? z-Z#|6b}!z=rKPU7*XP7$xwJ3PE9+tx7Z;P3)oMFGx>eUIfO)$>>^#I9or2_I|JWbacK(rWMOwN*^6u4 z@F+#j*@wC+2@Q_h3pUk7D_3?2XZ8sQG&B+wB723Hv?dLOrE{NRIWH*G|9sqMLZ~mL{M#d@|+suk9~)90kqi8D_|6c zsKG*zJ4Px5RbZZ^B@jrBU0}|VuLk^V(igZvDC%P7o|vpA%RwI!!*vnx+v8`IOVk^<1KlBalb-1~T?cMgS8>{K}xQRg8J9HIM&$q4Sb@ z)~n6f?zcyM$sb2Mmfcj|nCJQC5HgL+X7B!B=4!FZFqXqyNywl^GLBrDCo=n8dbGvs zxjz1EdmA%%C7D^bb7ls!Kt7`cEl(pyB8p(L#l>bYnK?^o9fO?RWE6~xi%UQSh}Zgk zW|8&&Kwc=&G=H>Fvbwxmf-f)!X?-MdkxoedT1% z^7i(g%n^lc@2r+ua5|pv$`%gz9pqwXr#MU&7Z!qnr(o>1GlaM` z5<&|^q`=Db%gRyhtfMad-u_O`I#(;pfg{qHi{7~o-vI3m)F;twsBRsK7-bwHP}Dy( z9?>%3Wa+)8&9%P7_{!@28z!W)&~rxK6(ZyeIt_Inshna-Etd)?}buzkET9!%$9$D&t5!l0HNyzY5& zeY~P-Y-#B^S*ocE&n>9}4y%^AMQa>9Fl5s(%PmHLNv4+o^w{bci4Da13`TsC4W1pD z(6DKg(>}6%gfEj@7_-97=`j>5CeAdY^gW%p(EUARBIqIQJLZXKpiUF_= z<@0h0~nb}hP#g!CMgfiiI{hq{%6Go#RpqBgP%zRIL9S*bNw zKI4KoS1;87$EH(3(_HxpL~mp4T!)QLzx2sST;@!rz0IDGpLJm+vb?;!op-U6;?-D? z1<1r-v4(%s_F!j%?rx9O%lp&0?a0?eTAWYyMnSB6<@2bI3c*+)3vDWbul%kOXu&Xz3Ml=z-W47!39% z07&6sO_**F6=m0yMU1rUAkaZpcDwIIfaO@xgO6mg81CnbWaGmtUpYF>TtZ*+r1{+y*#Lp6VvYQ?p{8P zjSbVubI^M{T<8lWQeh+&_5OxTE$Yioi?$d8-ol8uDsD2W5s+3^uO@wS;rupibjh4o z1eqDc6uIK8li)@rMT_QqU+!s5(87wLQCB~DC-7*o6Vzf3jSDw~Hzd8G32Ky6b0X-m z@76fVo$t!}LVuaWj1W`~q=EF!1`&*voEyohIe_87b|(!*Eh_zHM+Pyo_$_1@9U>{n z_azJJ$J)(Yt;H2MIy%Tg3!fmOj6!b=s-U$VsE`zL#o*;da_y(*=2XYkwoE57gU(i4 zdbS5*5;;~Sb3}slfT#9VM**bWq8U zPXr2Bq!&+16rjcYuRC-~2w$U9QCSUoL;ZDRXDETV*rjMKzIM%$At{Sb{Nb@GB!WlgE1sqSe^*x&1WdUmNl#i$Q$6m2}Qwta9J7|F9Lj(QyZC} z4%A%$7}PxU#pyc`Ypfqbg5DeqlqUq`=7Z5b)-?i97Cp@eN{GRc{-==iVR(Lg6b?Zl ze#qVSfwL_ShKSGbxI5Y0(?4JDK(*WI^#j0^NF2F1nAG5JTIHj%aIw9v0NBmD(`DQH zGwt#V*(?G64MA8~*uf0mhBbE!ncuJ7=zhiZb2*n{&=A6Y`+Cg@{a81l`5t7d{Z@)o zQBwb~pb9w87pTGM(xV2SKdI$A_}fBA_I(&%33sB|g+Yh5s_0cW!)3Y<@j4@|tgPIO zBl|R5zC(5h=t2!_Ep>M3wE0*bWca3FKzA7yoG~;Lx(72DKK9x`M9-s{hq)QV2<_IF z!IHBqVFD+m%m>-*t=<>NK1V-{fdJG%4RA?d=#|(Im~uV2VHzBW5e3GEec|G$zTi+h z0iNV6$i8AjYC)YDBZXaMK9~Z9Z|rSkGMU{a86@Z+#RiTm9`L&@08{`byeTLtc_{W4 zfNLKeR4-Xw4x~jb*uF+HO0R5D)rP)mLgA1ZS@yF>ZX)gK#(893TgGpQ^c5_4HeqZX z15~dge9Y*q_FuBB`H?yMKWK%}(}cjXR*Mjt*TV+y0+^9Nbc5?sO>&p7z2_M(8Ekvc z{=HOmbpR~erT+C*9#~%Q#}LYD8#V7&9Hh&wew0fEd}&n0SPVEnIC7y0je|~LT=}cO zRNP0X%9i)SfY7G^kf+918XB4tX&kk!wKflhdMizVBb_$)j~vNh1+z!Ygbv#4OLLF# zhAPkEM{o$K--A5l845^?k9t!)B$w(D+Uf?zsfG2xk6Gv=dSN-rJ2fS0lm8CLjX%2Wzj#{s5No% z@pf*^Mm@6SeV~FbVZI7WfqNaYU=%7Vf10sE7w@oreoXc<1pE4eQCTkoZBBUbWg)O3 zX(U&HD8JbxBA5UYF+DajS#iD0Hd_*cdhO1xwc{C*lj=Vkmq8<_T{|i5A12w{O*V~P zec4DOLK}eWkwyO>3f_FhS1jguRqTKQQ-%{`08v{-j%tetAeBrF!}eS^5;d zc#_KP5yjG)zL^n+5b(aEzYrVrhnKa&{pn}bx(1bIbiTYivM(+#_W%`mjzW$|rSS-k zrH*{UC}T7H=sxOscbD8*Y{!mZ?0`;QdgA9h$^#hL6QzvLb@)dxci5IAOCM6IsLm!) z-Yzc@W*wWsm($oartgi*Eb7qYEMq~ncTIlQ75<~Wj>?a0n7yIay7l%pgA8e9qgi{! zpP+hO6hRwUdtk`43{Kz5(UM1jqKV0AD#t?`>LOi?3ocigi$C{X5>!G4a@Vl{CFDnv zRa5I4_DfmD7k^zeg$~W=r~Fl1hAymh6df@rtu5Bw4wM=CMsyI@vrQh8e_lnRoA1M| z75?Z^f5P3tSX&X`^(&kT*(I;+f2QP)x%acrx9_@Y`lZT>?%v<60|WE|`Uh92tTH;W z1pfZy$_R0Ve`n)*DLG|+c;>=eXvf&7!)0GiX8K`07d@v1TOgz7?&hXBN9cFFP!Zw> z3467h#$k@ZVY9ZPl{{^99c~SQnP^)SDw9M<9I5F<+1q*%goe5R&e!JlKD41xrWnfh z8%Vyee%tMYdSf?at>Gi#0T&&$_&v(2rN&{M9P}%#H5VYTUPuu`DU|DpKz9yKh&X<< zFSo9<)0vFkuu$e{;G() zw#wD6?3SAXLJk;s6 zBohKs{+bF@!G&mOM3afsXEf^Xlb#H8J1jp1c~A@nqKCF`^zr;Gxw1r}`fD1ORw0*R zL~;Y|ka52_j5o^_>K`md#WKoBUtAxW?ycRrn8HtJ_)QybY!}*8x;dfJ#Bv3^g^B6Y z0a*dg8Y+z%LRnPL`BM$t6|aRSjX0Y0-x8e4WkS9}^zKZymH<-^^lqV)>lh>x8~- zS1Lg5m|FBlTdtHgX%&*g$r9-*D8y=on6qzpuj4IIW?B_Ybaj#ab zY%he-Rm;7uyWN=wM>*jy+JYWf7}W9-N3P2JLev%m>BrTkrx1&K0?=Djl2GLWxUw7p zMdrb9qeWeF|9y(u3Ny97yHrXzku_aH^@!AwIljFPA~^smNF$&Px21X4m+MTA{;|N%L7xLQq|E zf!QfkLGxsNm2BC78lEEJAmOK&;rcWSohKMb%=}Y^ObohHt!ds30hMd!I&`Emyd!esef{gYwg?Gtl`cc$!*Zfwq0a4}fB_ zykk`e`3I*#;P<#WCZoaAB=`dNhVmh-7CBwmFtk1JRrs3TwIcEWKzG?z&=a82`ERTyY z9*diEU$NR3tYP%${&{KSwB_RPfuTnt_4DiZBaFcVwUc%RAx5&dj|Lb)Sd z@b6q#&;#o8omC)EafsMna6Qluz2jXbLrc$FA=qsnH#A>9Eg3G>+VZ6fR?FZk)qCD4 z%G-G^WsBSeeC~>@EJ*O3E65qq@mn^ZpD+utbFg?1ytT&>Fh|~XG9B3Z)_t>f&Smk zFe$N-;)40`bb<0+_Gd0Dr2}PG!WR+8Z*VWdK!+nZs2ENbwF1mG<^jg>xFS>YnZ)$DlA@8h+$*W(V2m^c2GEss!#xiORPxlguhHs|qe9PG^D-vPgnGMxlDo8=*uZCW1dr7WP7^5xK1 zP3aWFOkA?K`J#VZU?ctI*T2z2L2m^sS!P{9FW;_PPC9#)tHx69PA_x0g9lVT4_nyb zxnGFL9+7MXw11f~UryCcBU`AK;cXl|+-n@-8;$-7v<0r)N(PMNiy zP(TKe!V?`z7p_Yos|`uXuMLYF*7FcB0`*-b+%ckxrn6D;h^RDqQV8;VX?P_qnl4Qn zR4TX8a+{Yi zt>#J*3@2z+Xb^z%5(L6ytt|`iG_-hUJ*B%!Ho3V~uXm<5oAknZ{x$7Ab-E$pvN$OD z-3xqw)#d4{q)qZS-FijykOtSz;HJF9>c)0SwsyBSP$B0rFS_)+S@(9FX7YsqH1P|^9#CIh4%>`ItyCW_9 z^k!9zhrr{nK1;$NHV>}Pi%d@~G)~KoE+G^HHX>tmkNVM;4pgg2KgO2+BlEA>!ZHBndtDCZ3+D3`-uGs~igr8sfF;aNG^sEKN)#kCiA)R2_w!vwK?e-AE(Ic{Rtl@<_uBHcGt#_4T#gI=H^tfpIkAc#*?!nt*NqRE@}D3?CG{YXsD zUiao0ntZ|R>d1>m?NZSD{@=unBRP*R&gWm?} zyUmIv!|&pq3lYA0bM&eln;`- zppQv=W7kUkk02kpwRT z4C;mj7JOr~QNbczp7=SgaSXl-R+>s8XGG?t(P47AAMo>zqzR_sTp&b+_RZgi?gcrY z*xn3TikhV@$;rfUxMYXo@~HhE$-HaU&TzLTW?EOr93su*W|ZgK74er}yd#6_uot(- zE`wQIkQ}AjSn;1{zM$fyn4T;}+*cZm*}=8Gfi_biEL3R0#1V|xQazDT-r%{c1%pQM zm2IzdMyG!Hk&J3JeqRZ0+#=X=$DxhIw>SEZ%yX(F8sB><`kP%9;3Eq}rX5cg0c(`! zjmA6d&E4(imu#1pf7{`19ZjNIUAGPc*&OVB3x6iKXTUA9srX92$wilEeSz%DqtX<} z?gtMvX`fnw?5*>>ykNSAbUEIFe|UI9-eEWxoYf&7<^N4{9s@T@6SBDVHB=tLp<+Q~ zv3YckW|Z1B15AZKob>PA3H@wvf7^hF|9U&!#K}4gJvGl|E;gZKpu6@v=t_sDibP#5 z*W%Nw-tZ=m@_39Xf;*Dgbp3iH^3SGF4+adIgJrz1HpP+q>#A%7pV#y$+v)FyPioXM zyUsn~Sy$KK061*b_{YGLoiHLI|DfNUw*OsYJ>N|SrXZAP6iyS}2miZ?Dxa@#}Dw9q6gF*b?ohXz6x00p$ z6CaP31)B!ATjbzlFTzB~>V3r;fw9DKnMPF809@f3{0Ex1W0}9ZFE$qxDz(0JC^tcr zS+>}-^wRDSE|}@eESw{xe|5o)m6ZmOe)?myqnQsVqc!wjj|B2B70A!md^V*eD4#?S z3n$hC+F;A=cMVHHtfWpQy|Gy-*QMHRdVynZAB+}1jH^AHg6?`zeOlja0``a*id3cS zz#9E+l8ws1r+%cr$naYOQwu7gwJ}-kUHxUIJY@Gzr=KM=ImrGoqg4#=^%ING8Alx7**YSan_`X)6+Tp(I$zo%HSjJ&!^YO=n zWsI@CVeqjs_a>J)rHbI&$f4Wa#*+qB`3BR8i~;{>G(jCJDq!%aBJzL_fojoH1`C)z zpCT!mQDAm`e4^N?l!0EOZx-F^*t%?`qL1nHVRxpx6^9BNxjoZvL{R^evi;Sv&%xGV zhNwP{N6dlrlQT&(l*whV*G6BVuD*I?0Y;13m>P=JEiKDsEJ5NI`&evh&gGi`3q}vx zB}55F-@l(x{SE8-CtKessrdQO`6MN>tW8nRd~MbRMB6bEvlR~MVp*i@%S=A4v_iOQ zyr@^dcZKGAqfwa1o(Z0wbp1*Xl}YCL%pSIkRK3esqKb;m9pb6hnb+bs+M%U(rprXH zETsQh1a~m;%QIwHdS-iZ1Y7DYS-L5i!p!>aZEPf+K{B2X`_E81u|eOLGZ%8c>u$(5 zV)}v}o;@ySIyc>0-GwIH+- zdQm%Hy1@Cwuncy5%bW?OF>xuhL7n}lYsXiE*Y(+xx22R1iwS$Lj{S|rMuTvz@IeTB zZ($+}iw!&M0A1#uE|5J5@w{1V4`2PneLWzJDvIL4? z$*&*(!4S(TP$oY6P8&T~AYw!rChGAaR~J{bv7rZyV7FP?`O3Nb$-EC1d}xgHzz8>=Jd62@l~gb zt#;|n;8HyIJf};jR0XnncILN=3xXhUAfSrpc_)>dFsRDmgc zqTch>yEP%1nCf&0pUNa6TZOkcFJ{RaM?C+LU8b>q6ByJiqVzN=^3wWDz6jj)8 zGs(3>P_GzvD{`f~{O(mWXYD)h*YFc&doyLB4Ds_KS)2FF`J!$?`eP36brgov*z^2pM0;&*2DSfi7}Y)###i#sSr)MK8BuFp@*W;J2!ms*`tz!d zgHgUHXwv*n)*!vO(FX~(W+>rMc$t=}>uhztI;lj&^L1F%d{X$^Ch^#)AC1V)+Y(;5 zt)pDmWTM*#l|v(zNC88XKs1SEzMNyNwH}1hh$G8L5UWl!RT^WSHSv2@RC(+iJ+Q$yrShkmY)|m=I`4_#&_J3vy z{##s;Is!8*NW>(=R8U61CnGaZc<0H<4!Cy+1o#mBJxWA3xg3YIYUw9H zteRuIpO2vbn@LWIMwbrvk4zd?Ih#Xf%L@}^$?1z%h|td=6^qk72Oe}s+hrli!QYX6 z^sj}rJcc5eLgXb*4f$xcPv!AvZMlS4v>>UbZ!>?i@y$cpdBls<9glxsn4zl-2UMoJ!{L-r z!a6r!ErV5w?pRNFt2oiX0WU>8xe;6OYLqY@FK@|blxu&+77kdQP_EQE#?#o0v<_^_ zlg3S_9OG?_R`Zlj$I6M4Aa**L!YE53{|D`cLk=Y37DXhvbJZdqW^FGhC#_M{VN&qb z!XL)v7^eby*ek8Ce1Tc2q7xcp1rCiEP7?**k$CdOwX_pt%E3F8o;n_bNj%V=hDO$h z77338$EgpAQgg(|?xGXIWOF%1C#VK{G7E`Mn%G=`If|9gkkzbw zbkI8J$y%Y!-_wB2FDyJ=KkDRhla)?FAua?wtf}i3@v^ZUa?*I)z1yweNYpr%<$Fj8 z4&NewTyjdSRwrH^*1e>f4CJ1QAF%arm_(_ z3)0>awdJ>!eWE9i2>jz15q`06IGNcpdRvZ?bB3D{1VfwOKKs!QGNp^`Pa%@|4EfZW zP~0iKoQ>@1s5hbPnEbeGd0uZ$K;yOMGs(SyC0cAm`ocU_w8r`!&8K;<)p!fbi&UV^ z1@jBz^vhUAUN+=SdaaTr30k0fV!`TC1Op^^;u_ZkY-PuKJ2>5fvT5}Ufs3m(geRBp zKG1X+2H5+=SLC^bGfekuoh+2BZ5xNIv%C(3oXolOrh9d zRN$hVR$(S|y?x^XEQC{4q+FY2-+&7PNhFD4@HCWZKkovmv0GSTOnRb82(h=qO@?#a8DGGQS~+HWQn&{+P{QE3kkVr)Q|f3TTT0FuVBUwP*+K;gnTPbyJr|_g3MX?qzccKroq3(@U{W=R4J42)|2me|LrM&vv0z4*XCQus5(#5@e)8_$0aQ*IG){ z-FzKf7i4J~t%o(q!C2y}0PPBTWLUmfHchemhb1>Cf2f(3j7uVkGk<2Mxgz=NV zOIBt2vnJx(Thy~lkLa1b)3QjW$7OL$H%;`X?Rx#~dXwQ8t%Fdg5_z>Oye@WGtthT~JQnUGZ| z7zWAVC)7#IO6zT9ZXTHNNg0$K8=kohc{q)WpiGXa8b)>f++M8JhY1o-;J(DT@bof~XNA0H zIQgbFYxva5G`Ar&DHrglL7lsKHs`>SXCs>L4$v{1JuT{^o)2046XvP7&x9`Q4>k*> zF$%vC5W%I5afC&#vih~j+>UNW`R?TNiB-bLH6w&y)-Kc5Ngmi_{Nh!?UcIa}ziN{C zOjVf7cOZ_4i$;xDhSS}p2XR^~DA2!-BTf9L*bUdxHdq0%rnW@3s@+tFHBA-1{U2xq zq*a2znGGgkFVS!LPq7R1+f3TjuAA@`DXUG`-=py+f78;zvqqXy{zR1M*T2w%rDRH| zzu+#x%I7@^vJh5gNk%qY{#ouZPb*M+eKH>v6L4D>Kldn_0Kx znsu-pPjOWxPMj;;-by z;=6Wl1s^%_+MG+tzy;o+)C%@)cje|lev4GDz<{)w|70H&mVO)@AKfc&*66Pf;? zK4cf&B?g)pX&hE+7vN>O@&;@g54BZ9bN}5!Q+zvL^B8)OHrWMk$g?ruwBLR}o0L6C z?aRPcTApk~;y2XLp<&5rR)ZrKXkwpPTMy`I6+$=x8ClGkhf?P3@6!7 zhMxU>lN{w&l~oq4C85=2=x~Zg1PlupLqv9Ar^ET&GpS1BZ#ZDEEh{0+hS{n z*3DULFNoTEYPN#N_Gco23+q9eGLvPo9VZ&7QBV)9JtBsfxBq-7gf*nAl;lc>m>6pB zX(?}Guzt>S{K=E??B^j&i`vAVN0~yWp+4o(Ppw47J1l&T6(5)?el%T4?v1ucTKdly zCpeHlyK;_5aux0pXpiposB0Z?85yk;|H@)*eVTJVkd=I2PG`UoQN3?F#@$@2yT0&$ zjaJcT&OC_EmNs-Jzlxx5UV5+>-?Zn{SjJtnQS!=U^yE6b)q?wx++=T1r<6Ay4x!tC zOz9h!j$?@N2k#9=o3B0j{qAtS>-kALJx8Pm<*6E}V70>zzTocZ+TLJ5@j8dJMuHUL zjhe6BBO`8Xe{(8BsIzHtmzlb{Ytboel$|uq7j=xJhONjfGPJIn448qBLKN2izYAJ` z>0(M?R4k=?l`QEQux43L8L89a5R){=M%aZJ>SyGgaHv*zQ19I`lXg@%r&Fks zwQj4UUr6f;`>tJ6VdmcO%f&_FVad(_cdqnN#$0kM4$@NC<1qHq>yKP!shlsp(Xoy{ zcslscmDO?ylu(XPP+pUgO22Ayr6hD6SYjonqay=%iNdHv7#vJhK_H#vmn}{&l;@Zt zjpQ0&Bqmhr&TAsx`z=EV3dlcqYi>?X9czt|r-ZeuT<^&KO>FOcMQa?tQk{+Lp<0O~vQho!c;UM)PQExg#D5MObt;bb*K*k3dh5H*RyH%p)jpC9 z+FXhJkR$y1l%Rp%e#0`oT>y{od9a+dMR~;R9zOUL^q8Vp zjUR8f2QjB*ODpPj;%e<7nL{@)z47=CcFPVF$fn_E3bv07i8txA=+|UR25H$V71^mA zZbuLmqFSjAZ`#PO%8S;?wpi;5WSnX`B;s1!m1a!5z!8!pCD?(8)^M(fed?PY!EjKB|opzb{cL_cNVmAO6@?xejhN4Q||vgT4REf1|FiF_p=kf_Yq3% zL2DqcV5=bj*Y6AER=l1rs`v-&v79ZxN`){B64oqCe9bh&XE!8(18v?P0Z z5YUC}&|xezGe-oPYFC~D%{JVZj~SEX$eVH#w9lg3U(~+>bF3n%Pya6+)aeMvcWn)! zOS0zYieD{!6nreZz)}Zj&d7igwpyGM+m9BGOSp9N+MiM_6C>{-Z{|Nux9U$Z4d-X#c_h7ah%o83xaRQ;LbBwPRhrWxqRhwzHfpPm@%e zNP|bHe+mv`=MbZcu9J@v8Fkqw=61OhU=A@C`0F8*SC0sW?I)?0aY0y4BjiR#ArQ+q z(*0z)T1&~rLtg?%?m(cNija`s^E~GZIP!#&ztD|#=MGI_diOkZ%`Lm=0$12+5`E4Y zeR*QUdvkusI}mkfIv7y;QV}5_F;gI4@^D8q+d}7fu8=wQ2+m=~uKQP)GI46OjFb(xN z12D>0jj{*Bz0?yguYT07`@9YN*J2oI55?@et(j1(zN?wni|#gx&HYZ_baU_>c2uVs zcrfOmOk5>pfcbiP9Sl2sukXKG)xY8T;m@Dkmuh$3t*Wk=+G-e&#l`H9(otmzHbQ0z zvS2am~62R&Vdx7)!1qwkwIz|>AVIknXD)g^!W zT1R49xYzR$es4?`oDy3u&yTg-6J7LBLctuVr>c1gKuk8DnC}J{`25f4X`pW%#l>DD zJ*5MY)#=QiM|6ldiH~++4v>`g?F1@K^7^f2FZo5uoQ8Wn{h&5OskvS~IZP(xGg@Hf z{xQ$C_)40&Go8V~WA+A(%YWK%p@~giuFFB)8O{t&!xJ|^?@?SQlVt^gT_qBYoli0E z!AW?AlcuR_pUe}`B~L&~LL?QlYAU*fSG$U1nAeyxm#<+3Vin{K*PvgE<7fJ|3o5RL z{6aZVMYd2In|Lv@P?0Qp;B(j%P}XGHk{ezO75Mo-bB!b#MG$iFlw?-d{1Z~D3>0bg z;t4Hg?&`WiLHlMV^H_3;NkuOtB$)BJ1J0jlYSjpo=hc`4Zn5u zFYe1&s5;MJFEhqnzO+#YOZ-8tZ7rj zaQQu#$G&Y3ThDtKlT7ypwx1lk5!5KGVvahJp=ZEQEG>h_MUP{?9gf@Z;Bm%Z)2^ie z0yb{80;gT)IkXr$h#9;sjKm}{t&&aJ2X+{@3N?Da$J3F|4|lsYUAWwDqy2XV-Lr0H z9vuQIP=3Y7xGK7u(h&kcOCVC3HJ&yj*nn+tbWfZ8aD>S>DY6-eUT*N z-gS(mkQYrap&dFfb?|!@1_{_y#0Li&zGtdtvr^~;>56v*_)Rudz%o;e4sMMx2Cq7= zBUb*phi-IxZkmRXuwTqK2(+fNJyJ`Fvp2_YDr6;#pp&}+r~N0fg}R*8H?OtvFYb}g z$)x>z46#ja`z(2_z06F9nEPhvAc6gr<9bYIjx`}vyPYEpHne` zkkvJ>ME!#&O>reQn={9Y@VG);Cc(+0@1^`DRR14Ye;HL(->(0|QUW3^Al=<19g-r5 zba!|6qC-l$OFE>xJEc3MI~Of1VG+;d+Sk2*_y5^rFowQxsB_M5oN*kVQ=Q$pg&O}d zl~S9bkeM&AbOnH}^qxiew1n&PodbT5G514>ibhG_OA1ZHyLPuNwY!LWL*4N1wHOq0vC$z%HlA`8hZ=R;&Xw=_-@yl{#_%Nv8YSS=^5DN*G;H@~fIzjBzhpatO@# zsK$R#kKq|@ph&yDHC#|@A{H#L=dmfmy(5_RytBU!cCC}XVo^BN%2y?gRV@j-IX)79 zw>>w-e;loN3Mryui=t+BMDG^;EPEqFe5F^R1~|A%ews@h#DD{V#>f>6K1WTNeLXh+ z6WnBK;5#21HKTny`jQvo-FyoR7sLNq*=(plsKY_@Mbcn9{x8Q2vP}N*^X`rWc9G8= z!P|^9{ZXLkCHm+WX_59zV*9jbXR&+2lw5XjSIbjF9bV-N%@=^MhF1!l+A);1eeHQ^ zs()O65&~pRMEiALSgPtjd&ZtrlWboxTw875w+^va{&s-fXB8+(jhI)*MQ)LlBi1S~ zD8l$%i!MFJq|Sg>Z+nIWC_Bb5j@3lV&*6na-#tTR6}yj}r}> zmKGijWjMiGa8{x~75y#F^yi@#>p^^FM%~r&6&HeU(erd-?5M+AAIAC}X72~yWvAwq z(^mn4X{;}^4b-F&+HV3py*A8Vw6}=4fV6g%HANI&h694!(M;Mn64fSp8Xa`wt1o$H zJ6goywlbTK+&SAsAQF$ z{+4U4u*p}HXOlnwI=MojOXy+6_m#RyL~CG)?Ycb{ubTZ>LL8ZJ^)#{k=`}L@SFUl5 zN;g6}jUtBrXl!+^d>u-TDs4RZ?(&c0NgpKOlXBmi^b(x6eb;>pNaPIX@`C7*#;U_) zw`Q>b->lMF%qU>~dxs4MjG&;3KT&5SD@~U?aD9F!@&u<<>VLhZM7nNT`A@9VXH;JN zbqpHtfM@%~nev|~;U!p*z<*hC7IgrA{}uRuSR67f#h-RDMJb%0bFBe+EN2mx4_eQ6 zQMn9$1i61!yx)=u25Es=a|EWH$c%?+TqNlFky?vchQA->&_ zU%TemMd9+Bo%i1MhZE{ob&3DE{u_TMEIaVvj*mU%xA#0_HYeL0e*cL^sNEA`WZu3Q zBkx>K2r*{(ar3^#I8P&tkpYv?jSxYSVQ%{-u*)aJxNSjvU*`^HJ~}t#)duS3aAHU< z0`yG-UqA+z{%_qfbH>s|4D9Vn}J7+i7YJ03%4`$YL^6TM+y~jisFzTJh*7KdOfO_JAyIkz;h_V6^Ic5r^C_5 z|5YFUZ!zEcC4y^g1=x(&l=9A$@TrmQyp{(0@v;$t@a)KM-cVTN;ls#Mv*qJ|s5pz0 z>%gie{UL%DxVd>A3-+@lGoE;e%}R6SPL`OrQfR#cqRJflMQL}~t@RxV5R0_rifX}6cm!=FjIvCumnvyxW4|Bk5Wx6qdsKj)uV7h2i6a_1ay^D|LW@= z*0tvLMs~~TfFbNS%rsv@{-56R-zOFWSPL{k!AN5mlL!ExPb~yk!pA!D|2aCl-veRA z3|1T$w`2IDmNOf-UQu@hlALnR`wrV*H1)b_B!SO`!x;Pi3VXu!hM0O2HCo2|*PcqL zt|k6=m{V(2M+idKRWLU%17!buKLCW67vI)fZGi5029uwR48#2beIQUH-+d3L6~d7V zF<`AkMIoX@LsX;@qn&;8YwP)3bYlRYl|8pCV!OOjPQ{E~8QR9=2 zVos`y4rrD1u!X%*QT@W$5Ly^gg7a!ZT>#paOw!TB3&hyyf3XXGhWGeDv}m;?@ESP z%|fJ@0r1binZ*D8VxadDb$N^;I-(8K_5kgXv9>pJgfAxuJ#d0Z(YIsTWg?9siA&=k z2haf=0#PD?1oVNpsW(VS{>DSm_(alx|J)Nl2%$F)Q%hva;##zU z+N0KbS-v6f9QFS>qcC7;{!u&9Vy4pg6qT@xiMj&Kte>saTW}Px%2_93`4lhDjfp89 zx?Riv6_1aKo&BYT$un9GX?OuRb;B?2r%4$B=aYp7+E{gFU0MluuHzb{AU~ijL0@^W|0&U8}Q9k&W*`#^{IFi3MGZv z=G)Y`I95K23S;2ap+$DT+q;Me#``w)YwS?X^a^GsRPp7-0z(U`dKl*a|fXMnI zh==GNN-)SVk^mM3H(;j==W6)!U)#pt67a6ia`7rLHU_=?^A!Uy{48zUcij)SkBI$I z*rRSg%+l$}4PF5ciux))(4^)Wkfa9=)cDf%YgL0-PghD@S z`mKLhS!%fY#We+72%+w**_S89==Po>^t0vxbNF>1YZEQWrN&YfXWU(Q82)*+5G#fM z^(4Nm_24Mr5e$tbsF7gsf&VW)!S*ZSfErxbS=R+SjZI&S1=jO7w;>#(*7A;w;+p`w z7a)`1l_*`>GiIb66#=S=qiQZ!S)*qh`2FjzUER;?=*sTmr4pYyBu-&jOZ*O#Qf_G|K_+0KNCS!45TVmvPhu2Qn zYOGQJGHSkY(Cj4LrupENb%d;jC;SMF0-8ECKqLJsY^ z_T1#Uez_O))-0BK?BEN+>ff#1_zeI@(6CIukLN>cIw$K4d}+h%DQ?s;{7oCL zc*r+m6%K|QG;v^Q=6`1jpkMAOVCo!br{c8R5Zl-OB+#^^@@|z z3XSsC#t~X_`2V~WqvWX9FF1;S-^z!V9rU67FV_u#8v-t%FGM+%feTUw$*fr@s|WQ_ zQ+i0LAUH3UT=yl=&5e?0rD42CtZR5ZW~#H_3b8$@yP8Sg0~m8tFC+j1jPU=?U(;@Kmg1s05fF}XBqK7=WsEvsI*B%NVm6{tXaLf4QI1a54sZ_qopc=)ws4_O!n&Qp%L#~U1xLxA^LIQCvE+vt; z?yMpJvcf_MMk4aqN9U&WweQy5cJ>#Uw?j;eKpsG5R=!lE!LM3R{&FIe8p&8-{P*n_ z909C|++OjtU{R!(D=;<;&P8HK3u$b5{NP2jI#ytzW~No|92U|vA6)Sr$=N9Q{kH_@ zH}Qzw*=+EA3ME6bU3-?bM+$lfPRlW$kSWDW}C&flw(#RdJ5~qbL zb<529Jiy4i>8~24a;&VZ4leb=)^J8oR_(z08iPO9;H~!zsTJgO(=n&gVA>Pr zh|B4|9t$;wTbGD%Nh9-V6ukJe{>5X^mvWokFSS`e7svm&!b`d+!=en-4WhTv`a^my zqAd&x%Le>EV-LPBUam=27*6_?F-zD7-S+n3p|B^(Vu&rgzKGwh?SXsw+pHlb4o{Kk z-#P92*a7%ij30geKaFmpmkYRi5twmncokqtWTrCG_)oLDi&u0=x(E0oSI_bxP;`Cq z`3O(Lo86&8lY)$dS97Mgb3(0>pebsq3Xd2-0B4W>#fHXAzed$>-HOo zurV__QAC}B%~er#vSZ0_{*!_4-CvS#Ty*9B+!klJMMT5JtI{-qp{3Yu3!CssqgRg@ zDa1%if~Ikm6@yda10uaz1T4hEX0-(|1Kg@Vr0w31nCWwMp)xG#O|7a=OzEZ` zb+~7609Z@MGyIL5HB#W_XaXZK_~O_URe2`8FLhYkaA0`{1K+QbeY+qaO_n7fBSR`6 zgjiz8uzjKb@Eau%t9<+{`1}Xu;rvknfuCoRH1v1SbNed}lP5R8BRE85obctf8-5%7 z6`u;1Oq-RZ#0Pxd-%zH0^EVlXZwdFCySFOJ{$c>cd4N~6wRI|588J-- zwZdM-;3l&ks}`7#S`@0b{>~FPMJ!lbA#%B6{cv7N1`(?4lSxQ&{;HL5TLl|5XdW&6 zwmF7hQ$7Wp1D;-5G*s?*z?J*!N7Z|i2~{K|^O4s_sY)9vG-K1GyA!H)@2(H8-Yao- z{g@c*GPi*7>w0La__oMO5=2aBtSD7kWD?*EC$qLK#^wM?S zz2r-9s=#&C`SNY(gkB5#-oMUTNenQ2w36t_H%|K!ZFW4GBo|`&#Yz$wv8JgMnEh2` zR3}wIDk_*0Ych`90;t;|Y+6iUL8}Om<#H+BVMG~(w*Fk&s-zE;Gib!;AOY?OOb>*RkBpcNou|A-4J# zIf)<8Ak0uKNqiGe`Zm=Jd2jaNL%^puH}}^==*^p5gm$|8!()mfp&sRW@y~@utOF{r zDn!TM9to2JCyk=`)Yk;PHS@eX+!V@m{nnU#W2`<^rBQA+pWkELB@wsrw4ET)N+BzK`Po18neSlBZ)T zFj5cDPT_%EMhwHg#Iu&QeFZa}lWg4uPyv-`@+8K5mdK zgL@v<%nnALp83`HjvAIx{Y%F|hqmH8m_7$UIXI~+EH@(*(#pCfXcTchyk<==Ia`}T zQ`&S0_ZtQ11y;PzP!f$G^9-jt`yI66d8|LIBzo3cxb-JM1R8%GFC@ooHFRB5b^X*C z%WSXs_u+|{DSIijSm4VwY}>;?L2^TW96n{E%c?tExuG?k*iCXru2e?n_5wrI)ds`1 zDO*`4-IxDC3;&y=GXiqjAj%7GW09~Fa>4>Ti%z_x3V7l4Z>TSyPus=OKl+4J6@I@C z?|ch<2L%R@$$!ESkxvx167hQqB)X;aAO&Wq6HdUSm&R`hmwYqK%H>YKi`js1oXjNp)LJa4C3>sb^T z8~Y;wZEXGh@U&Q^U(P(9o6`v#)yLwJ#FjuXMHz4;WX~jEpqKlK+jz*eh=6_k$NP64 zBt-(;o3{BP4S#-LbdR*oLS*SvXs(qoKYy{DT-kS_uOBLY~*a+Yz z7ggQLIEcA>z+y|N9Z$tNIN#nI=cu4IZmYv1a}%%ZrOrJtiK73_JsWC8AKn_JZ`ODj z%zeZOZR&}Tk^fxtdQD2~?*23pS`rN``FSL5HE5%#!FsC$tQqQ?qaa21!r11%6it^t zH;835=v0sze>|{wJP|IQ{iX%d&Mw&DSXHA?bW2*d``Hi|U#E(UR^F64KeLL^?~*Po zgLg;>2aaL*nMDmPzWDQ`oUwk$wY1wMo}Gdti{9AOqh5ZDt_o<|mq=}?&P~|HIeR2> z4+uLf_{d0B7EOCGztrP0=`u?wJEcwJH zzuHa`vL0buiT)JupZF-dGKqH((-Lm@5XU-NeDh{V?k4p=rr^L-c2sN&Cy|~Wk*&eg z;#fZ8H~HKp%3|h^|f2A|k-YB3`z%ofK-g{ax2v=)oOzVMO_4 ze6M?Gs(gpyul|)pwY{@nbQUC;6X<18dtBdQ11;p&=^yA`137e%oPpl|kbZv3o%HU7 zR$B~^FV!4i4LO}G>&0>_zoFKb2zacqK8v zH;7=owU>aH`3t42hGMKrqiw6MmjSoZTV;B+%Q1~H+)kel6#$6A50ysAiLGW`T?af8 zru1aseckNKNHL5qlgYjW3b&tnU-_x)vt36vr7a&V3}9cx{iJrrbqRihDiHj;C>Qo$ zwYw;w{LJmRwkcdXjied1^CYZvG*h(|K`km%eL@6f5w1{Qa|mkuR{;T z5R%^VSr+HGDb!0hOWa3BoZcocj3u0`Gn>ZhM|uq#JU|=j_i;3dR2u12-OheJRWPYx zHwZ|J^74qj@JHs94Gwio_PM^Alu|XOO$mu*5Lfek-oL{J%d)Fpe82?>U*>$RPbv33a29c3^uzEAV*-U9#lj^Y zf7;A857IKLke8`C4I`eO%kw8+bJUc_11v2VT7Xsj@uyXSCov+lyxL60yzmPZHIJRp zs7&4B_f47quY*{5#1MW+wYIc~7%+_GdLQX%-5IXQrF9?zjje7UG%x1Uw>MhjK>{CH z>q}8jwTWBVu$<2W%Gk_@VdUK!4N9ff6x!}|nu-1*6-C>^qgvr9)}oWP!2mEAb4y8b z%lN++dyE2t>DBn{O+U6wY1+8L*aFI2@@OgKPNLr{B*c}b!%+(jyrrqo!h4lZV}~4P zug}Zr!E}gpx%22b3jL1-8C>@UkpZ;s2zY{Ms~rA(-FktDXbxo^KDIg=QJ9 zCfaV+?X;^U{9WC?X2ecil&^-9EPCS$c|wuP>L66BK%Xk*1iU0gylzvU`v`$hUol86HslERaBY<8lq%zFE0Xr4QGY+hz zGOg#$g#Nmt?Y!^wdwjbipf>GwR4||OQ{f@|UXXNnG8Z-SN(eXMm~h71 zJ^=S&jJRSo#$^L|5_7dPNePTpG;l~fE*zI%U!)+IQff?NMA*S(ltDv8{Tigw65lDuH?8xcThyxsD19*V%9zlQFRuT%>{+`s3M-1FRSbbjk7amI{LuSDc~ z6SDCp%3@Th5@;KZe4gh0Z#&mNSp*ryi!DR}8}Se+rv{#e0U^UpqZ|YDNeYmlBD6vD zw@&;`W{|rKAF9a@dl*YJKm=ch&~JAaL+Y$X~ z(9GKl^*oW|!55xgw%4pDW|e6cK)WcNu5}qa$8%Uqm*n?AyI}ahGZKb{|-yCkjZp7zR+YG z3t98nPx#^KeBXECWS*1LeBxf%d9ovG(!0w%^iN0ak3^AuB+CWEcwC1* z z>F_A}52BR>vGyG|aaikQusl{Z2NmuGCwUTcgh{DZp?*(yUt1v=G{D~+PE73d^tV7h zntFeSQ*Aib@Gh_P)ClJWKxZnDeUqv^9H?_W9yYBn+qCb5s%kp1B)XE?BnNa5e%`rH za>;VWYiC?s=aNM!z&^L8ouA`8K)V#q$EX(2<5R^8YK;2vr&f4KIAyc-W>KWj_4-)L zy9r?oT^%iH@m&hxd1{%Y!7Q4CjZ#;wig2NvhO9=Dzhys1LiP|ClkK_+Kfwkmwd?6` zM|+o5c&*u;$~l~y*j_p;J0{ep1G;=oFrnH&H?FZTb;fqSl~L)aSGvT*E%RWV z*%=mc{k=M#FlyA*klkbWP}O6KfLn6PHNDD{7bTsoAXBMUsc*=h{;7ML4|n=RcU_$2 z80)vvnw#tU@nhV{F=pHA?9iS0Yo~FcJ7=Ng+dHWBZthBz3q1U+-XCLsES`um^FHEE z^<))e#Cqi6+0yehrLyV0vjY5+!cNBQ=W80ym4_LJ~9gmxz-fm0~2?K(fMkaHAY(Pu%q1D3%T2B*e+FSZk?DyJNW*Q|?ee86ewqapu z;738{eu|sXOK5dPw^J;dUx;*O-CFk9E&V)yyd`#t!(}D)wrT0K9lX9#B`olnl$OK& z>1-yM=H(}SKtLNVnD0OiQaqVzy3^LtQGKnyqM^OJjI#u5cG|v^HAy_PIS>GycXkpB ziFZqeJ$T24LUt(^oaR?YZO{o_7W2I73?N4t^M@jG*_w3QXmV_MharR zwHrErv@t?)jIxJ@2LqaLea|)0VlQuFt>1ssYF*m6#0BB>sh7l`TdS1TZ*g`wbXP4} z>A$a)CIfc!Htl%|!pJ>g&(qjSa&&X(B&Uud<*E>rneQUjsMc!Q=8-tC`3W+q4!b|h zJHK!iv+5c3WY?2vRz7lR1Ygd!=3Iq|yv8c=-to;s*3u+akB0TK*s=b|_=!z}{q=E5 z_OvCKAH{j)ep-8U_ijik6e-Q)V_|4*N&izSr5E>f9yL=hD;TIh)pZ!2GCcO8F9-kZQB@#`~g{M|V7+H;UmE zL^Q@3C=39>)$eb100ro!*~TY-`2Y7qwuq$AtwL#sJH`Ij$19W7$7><#o~YdG1jTZV zd*h2z#WBvVBDLAarIm+qHCw*W4ovdk3SGBIW7Nj2NM@z3-xj)OKJN9gvgw()N<*Rz z2fIuS5UO7B9sXaQ4PRBDR8M56=`quebm$a?r)Ue{gD`mO#1jQDZJ0rm{gbl#DE+g0 zVsy;Sq%&(fZ3p5P_jq@kBwF#rfjj{N)?e)?-;NH|Qq)q~>{_U(WQxm3UCWW$8bxFy zm1zPTnNip=KuYParmsex?vj}>LaN#rTqDsOg1j78ybTxULMfM$Rg!-+X6Q6ugve@8 zF7+(ph$m7h@QLrJhatN@jW!x zFng3A&pmNpeLZh>$&Q(oI9)X@%s;C^lw_lB3~LJ|^f-sg+=0 z*!}MKN@RE2zGG-^uZVrH}2Cds!tFir?xaD&9aH8xlz= z2e4CTib>kp6{o#76awcGv>^t2zTp&-<`N&|7)@6Lp!0gal zcvJl&>L$}(+f2UoXWb4O(K;-H+D~e0y|e>w9?|L`Yt5ZToZgcpcahRE(9Ydat!E$N zGQs18lIbapMV!M^ALJ0)Y`EIHzSa@2ZjYTuw;E+!wdoO*7F4#_A+e0wu5P~ZXBgyk z&VBdnUfsorZJEDDva%Au{IF~p=pqlp`6++6GxinofB!Ti0~=Rj43>YhpTdBk>E~;j z;G+D*UncMj{pg*IaW1y{D^$}%$FAoy$zFEW5-WkI3w{aGd)iJqo{tGvy`!Xu?RK)C zwGYK$K`}DTBq1JQ7!%9N8(~*NxT4cdi~8^BB+==W$c7d-?2_sTH}ZGZ&Xi9%V!J!+ z^9>F0tXA^~*m*J=va^KOomVK{d9p90=xeDO*oM6-QZMzvkIJ!1ZL7eD!|gpGwbED8 z(4!E&09l?jkS_!^nij|xJaq?N;eCT<*zks(kkk@60TKm)LoF4`DkSN@6|Q z71xy7)MeYT$|5sB$Ud(k+!0aZfKR6zmLn6TY-KgaP==ew92r?FL8Yg%b~lW80y1K#(|rDnK>z*N;V$L?;?yxbt(uR&E|w>jAY^mo~&9 zLL1e;VlH~?9cwQxANYzC+k|JTeL4}24@Pdb-|q!t_B5u>V75Ikl~1bs*e$!}_z3I( z;RR>(>MYJ{-R}C3dIz}a{Z>q+I$!4=9Vd6lx8?aV=I>wL&rR2ixf~3QzAv7)EU4IL z52fsme~jn>?VI$ElZZ%k@WwrBqirA~l6aT2KQ22@{Ag}GwCvIeY+*r}nLWR|;a4t8L_a1R!k4H2uW0j{y7D z(8PTpanf*{C2Fbq>O7SG?#%X*LnamuVT>$>1^WLN5{tqp3&EMe5XXoc!qVt`d)lzu zLNJ=UkLhxiV7lhL0lP$FX&eu)gU(I!{!F`3LT_g>3YKyak#5V{o5wwD_g-UyLyDmm z?BE!Y=j6}k``Q<)-agnz^`I)|ckq{pl`>J1WZP58Lo{x^KieOUrDrPaJ8yo~jg+E- zhz7A27*S?un6KABjn<0Fc7&^3Hf|c33zqS$2GPw8X*xwrD40`j;2Voz(SBWD8CCob z1^Du&2NSwZ-t!rMTJAW(xYQ(c?3jj~PT9-SEs7ywX4*CB_2DG8FxYl}RYyO%4sBA* zBctJVW>In;0GzqyIDQR;Ciax5Te@>9UGoq?|R%8)5 zKXeK(9uI7GdzAis<8>D~snOq=dx!6}g%i01H{baU5m)H#u!}>RZb)1Ac3}*AQ&v7E zoApAM+_DbOYWNWH@!t(SkicZq=?F$L_rZsAaDg<){?sA5Zu7ek_lk~4Z%YiehJ-MW ztI02t%<2W3xyGhBo2mBhCkxW9$5y%bbL0hYH3-lCUZWS2?1qjmJ1k@_+0SS^FM;I( z4zpLd!HxR9i34B1C~RNm)1(Ri@YCcmJ(_Fj4)pkxM~CNEhH&-N#w^Hs?i0GcGET*V zo7%V;2AWuE*6_`_{p_=uY{xH9p4p0)RxsK0d|$T?r=kyjf)hq8Meqb$oA)*t|44Qk z*~=3E|J`x#(F6H0q%^uFg_NvtjARL~Exu!${j#LLhp5{fjPnPZ5|Q^zef&_gJjj%6 zwKd)Uk%Bz-qYELsdsg(5@roL?=sJTvbC!MlQ3o6AnV0yl^XUxuDB%aL z9qVW_uZW{SB&OIwY@CG}q_b*YMwG$iBZ_rcLiCnN9wdo>@|9I3W6Xio%RI%49Hj@(?U)s>{@5~}SGpjm*|hC-{}&Bb`S+`e(N!vam=r@d z_LL=fdHA>>+pYD}5nR*nnGjzXMXXDQ3@BK>M%%XB&OlZ2riB_6L3*x9o@8Yh0kz@&rjfCBSHkYA{_nIgca2+wEF1IvC0m zV3Hcwv)dW#JTP}sEDrn zt0-Ci2zJY<1zT^jKRxZQyn1&tb)%|m8+hs*Q zQ~8=j-|udVFbgkFzxkE*05GWYTUK^<>-lTV7K`KrRF>XJyXBsmD>pl^a8D%JQXk%_ z&~}Njigw@gYq2&k-j{WdPYqM%2dQ)GBbM1tyZHl_pYyzat6TgJzY#-mJbL=&W3Hu6 zp(A(9tv{b|kXu4hxX7k_EA)!kVOUo(i*-sup(HW|QAIjLmqzH#;Ns4{KoTWtgKg)t z;@FgwXTrX(wr1Kg|DC$}!*A09*DyOL%3-&9s=sBV{q0jqO`iRIcqZx7|cOi+eNm zia4v$eBDl13js)tn>dyJ|0#G8h!s32A(szUFu?0I^Ke7iE7hPby(4pt7r{`qFxw^U znc(NA6?$4=y7H(^A(ktJAp@^qyVpc!^ggk>x7M+in#ww z`KhWNH<(=Qx$}|rPB*-!_=l2|7Izs-K20aMIZ9HBvV0|^{`4ufT;tI*OtnnATZe6j zAdU0cR=QgX$VQP40k-4_MEU}2iT(e$f>j^*EePQIAB+#t&v8v zfF(pmIc*n-*E=8HEeI{HUKg;y`HdMBoaRkwz%Pq2;7*%=Z63$G|EFm31|{i$l#rOh zBw1a|6nzg12W8Efc7wWV%=6P`^hImU5?=gb7;{7Nv zPIqZnJ8o(jX|cZ7EmM@WoOhobWs(nEqusE5P{9@9`>4r1?risw7wmeV658TSIfBfK zYUA4~ztvb@j}Qbp@S}r*o4Vv7!vglaptS%`>;}5$3u1M?g+z5!kYIo98#{unN=(DQ zneIu@LAc$ruN&&1VV@F7w6Ja{4xhqC%%6i(^&bh+p*PcGE4m^|(SkJ3Aw{2#X7Rtm zaAvtYsy7O$=ihw9Ve4Sya4>v4P02nK;yA^#R$BWI2Fh-46YG2ei{IJJzP>Yt+AGy- zIQBp$6sLdPn1Zt2MDw#r3rajysD}#@~5+6`m@m4rO9GT%An8_0p4;X z&f+S{K}KgzirdlWN+BOY8`+~fe!Q;oD;T_Uw`518n1~qQ`GqcGm#`dlRT0Y1Fgi{? zV#WO9)u>nkDaFD=@hy{J+h|W`Tt%xO1_cfI_!MC}LsJR2=~%-Yfh@Q9($nTNoYOr*&pcduvSHveNPpP;2S~cI&9hNizW|%^0rfqHba<8hkxAU($~qnn z$W@KbJ$R=S4?ha0+!2JrIB_>ym24a%-22mayRT|QXa_gamObr|Gh#9blTEQlplY3H zdS(tcP{ix{!|$(}n!b1QUjxni!K0Q0o(Z*s;C9gYM{M)xcWkK0=R~KHmvShnve~|$ zet44U3-$>c6fcz0It)XW+lPomm!7K+x2TE9M>WoUTAps~3}*aB z=yR4uZk;T60+zKr*Ei!3_J*p~y4|5HN1#K$f7}c%90Q6a4Wl9}9L_UevJhYagU~>P zs?~71`aYkXonadCV(p+UWAGy5`Gotx{owU%QtWMj8|$IlmpA0j|CCDT2%&sN`a|Y4 zYvi)op74rJ4YhKDA2sO)3Gcan;cb@wr)JUwNv~t(obCh;gF+^zsyP&ho9;%Lj|iSH z`@$9qCTIMKmOwUN)4&`jVMBbf&pB6nW$ zO5K0J3N?m&pUBevEuTqw5T~n0sWz=)&+xFk*YH35_@~lJ7Tmp%L}{{;QMVgQz+9lQ z?jH+kCxgMF`{WN>WMuSwP|1cON7la{lhT;6sX;)2MzNbKi5`nS|0C5-aW6^|E~(I@ zQ{Zbs1M>!%KU!BqOXK%GA%O?zxqL&+HV}ITr({}$sV(Zc2GlD+zYr+StzXYH$8jhB zsCMvv4EakVj^vc0%dWZZbI#aOZmEjTUY`>wQvC6FcW0lNVCz3D0Mw1qF!soLSCZQv zaOvtqUKWxA{Ynw^ovo`^sFUQfR11~l-#^!+yW>rrb86wB(;E?D^JjKD6OfLgbT!_d z{mN7PfUIyV)&TnSup1quq04@Q+yMo8y>NP!k7e7XxZtht2aWVDgV$K$N5{93oj7lL zpn*jD4P!ony|y?StcU6fb$<7qS};?Imvh2EHGg<-D_+&r-fKg8_G9nII&(c{y_d&S zm1J8Gggd>F6S#N7gjfx4T0G?SdovH)rD#vRJy=e^ISN!kmP;t?>j6Y3(R zSC89*%D}Y<`3_Q~5pGvmd|nPbK>_1iGw?v;-PEAn>jk$#+%Vfhs|V2_gYaujm&j2X z=4fM5FMX*Pzt7V6HDjHV*JIe#cmw09V~>X$-?{p6XL?na{;iU9;O(ug5n7E3tWY@h z=0r!E7727Y;ux_XCNblfMk!Z`J3)t(yePd>^v}xE z3hRiLOQmtH*9zCoU~cWLgvVnGt?7QV_b4=>G@CMN`39}#D!3XNTY(xsm&{_!cN1>f%P*YVo@U-Rq9s~Ld6_w;0}*4_Q|9c&z?2HT&p|y6{P9|NJrW47M>Xpspd+d8E^(XnKY*syQ zfUFuA8JA_Qtvys{j{46Ywnmt+@HYR@XY{++O1;q^6HjJKB2KIRAGFFj#Y#Iiigi;? zofe|IKnnzX)rW@L>)P1c`e{4z$){aUOoU0i*5{WR(B8(G?erC}kxCcsufU&>Z~bW~ zO=J-rd_%YPwsr#2^_V@CD2r?MAO+3Hg#k;sp2qt2n@dwCjo}%7QVH$mgMnsrF9XGZ z+I26}PQedM8-;jGMk_i3aw3QbSfxmHO~oyUveQ?z*}1G;e=lsMS0&(A-?U{T)*(fK z;07n1`*oG|Xkvhhsmu!0^|v#(7D~$EEz!{rZ6Dz|zF2l7N>Bx~pi&*iAAe9hKUf!CLrMT_3zMZ#B|e z@xU&fY9dJUv3uQpj^%Z75Jm$T7?)8?B0L6)RL(a_NA6Ut^t*b9s{C~fi*ssw`D)Q1 z!_05j=1Id#WE8FDh!ASpa^e`FqGp5V$yr8H>=e-xmW;(CY6Fzihpz@+nnPNx9dti{ z?7QCaB>8+!*uWkMe!MS+iE=1QtHa-wc&1m(4*H?NybLP^{AyHGV}aa0F6fH6{}jsm z3^^zM{C9&MdgkX3OAOHIOMZiM65&^3$3{ZkFiugS@*qvia6Jr~(TNF#T zRi_jpSL^8T+J5(b^n4=erdju~`6sjVh$*qsrfZ!|KPf%Xqrzj1>=BQlv&{SBI7HfI z9k5VYw~sKsCupkap>l^UKC=yLXR&(x0Esh+|vx)uaQ#!A? zs*Vrzv3gM#5}yBm?7ekRRP7%(%&vU-16lKe1;9cwyccdatm#yfsAkj`AYH-fqL}O&<#i=0tOZ~YjAv~O?=#+hWW-2m zBhotTxIkFJ;UMKk@tS&D)F9m5kC= z+T7;uSi<1zGq;UAACg=!kR9!)?jGk|G>(1XdbfI_kZJB(RQ5%n%B|r0@}tMsVh`h8 zpY)7ndeeWio+S6Rx=?sVz{p}3>6zfb?K!_@*5}i}OuFfDG1;u3AC7!d-g15#c!R+q z<_2rF?#RPUX5ZblN$2A98R5{xUxa-lH=KWG6hcES4=EXoR6lkVi}A18zBOR3&VJ8>q9hA=eP7^3j5kR#(w;Vp~&W<%|awlbx@f4(kz%bwA`n9pKYvXFxZO zK|vkBYCn`!yTRtop=@P3g!{@b)lp*T`U(n6t0yQdT>o$=C)fG9Aj$?MdW`5S=(=H@ z;#3AQ(1Iyoxqhkh`Oi9I{m{Vu{h3#_r~_KvLJVYQ2`#K|k2L!UVnw%bz<>fqcPy0{ z^t&EBTazKL1RF6;;e;O%`u z3X$`2#)e44P4F!WIPJInl`Am#p{)t9L$orj0$}|fW=!LFcrs}IGL*DObV_K}NwUxs zYtR&D&ov}=AXtHs1#2H5*NnsvX)Ma+svTzu*tEXh8}y7cj*uZoLmwwdxxwbR14E9Q zoV?>e9bogjB?Z@@1cS)UYA^%t_?_Lcdb6Yp9_JYiL6T-!!Pd^Wb*+kdp$QiBFUWKJ z)*{j3i^=^T;R8ZjWc4QBhjF@xF!>`!fM%KAD>!bDhsKuoji%8eV*=j~&s(1uz5PzGsh8K7U?n_i zRBYrZ)9MLG`i8xn&;(F+o5BRp<|br0SX@=axPEca>(dU$(8P$|Ljt4^c#c9Yc!P%bLdjBQM+V}`Ic1si67tu{cW#v` zQ(Fd=nwl2k;V*Hb1P~2BY)Yl(=Q$<5Q0q4{pjW2>@^%l}E#23SzM2cFnF1X$^YLa^ zLsWJ`@YVz9Erub{8>YjcXK;l*XG2mHIljl;`P>idZi5&c7g<}(_A5>Uq5PZ{@?6k` zya5?DJ_h}(>#3$@GUO1N%%I$udqfu}-!Jv^YyBl^9Pw6NoRxWXpAyLZ!KTeJ0Cs-118n-l+1nK(stGnEkNG7B zqxu3Lbj=wO2IS!kG(ehi`K~-n5z_vK z;dl#wD~th>z$_ChV5Eq71`R}*$H=$krHtMj4<;gzAv*HdGe#J_!# zR_f@yIAL3ge@o<5q*uL5q75yod};LKjZM#$bl1fms_2A~T)FOya0bT!U~$rJnYzUN zf#I|Qc~@;KBQ&W+%R6w|NkG7%xsR@nW_U8_d)Ru7XH7$UaP1aW8Rpl!Px;;ruZP87 z_*#@Bu_$a*Z0>u!ZSyZ_eT~pE!%mE6Db1vVpLl1^h4WBN*MC(={$>I>Y#Ug%!mf^U zTp8G?CpLqYB)pIu414p2s#b{5(Fl!!#Y6=co=gZ>p7AOwXQ_~0rgCncw$Oa9zDq(A zB`gz{4Ne=3m3d7&VOe-@fYZZPg4+G`M|ZAb`n`uLdyiT0=%19NjaoO?&c2!jX6qlg zf~Y8DY_3>dT7uR1A)6&Zm{#ke?|$PCzHvuZRC(Ly=vrcMa{cW>X~GIGS}a(l{sY)3 z&Ri^_Lax{Wf%WH z>RBxyB~83ehtqOk=L_!%mRO)Aw5x9kqitoVwmRX@aibe2 z`w2Z!Z5`NB?@&GI2*+Rgc%LjWmg@e4&Bxgf2420k=#NH9Zno>(~MNB`MZA~=-1@2*k zaX!h`#7H3u$<%kgI6KOGRG{3}dkwQ+0$b=)-AtWh{-ZaKWNU1{m7tnGXS{+Ap;9jC z3E2NL0;+pB1x74g+j;VSLKCkC58_NCv;LQ&$GRCFWtK@HfuDir&WQjr=M<|@qFot^ z$?Org&@)XP^8jx8Fb!^cxA3@yNvkzy%Jw3f&U4lm?oGU?wfmY5xB>QzZ(e(mKKwmZ zhrd=blT{V@JGJ{luH9Zd-5VYoi>Fgc0{`4QaubIGnUy z>aI$~FZm1S0A^Z$EZ{u~{eS@MsCKW*kYhqV8s>qerqcQxh8G_r8iJl8WzQ03cB`@b zE$1Hh3t8CcLSW}IYDQ~_^IK!>3m!!gP#dCfcXuD+R%(<*2{0+uZA$AB+U=oIdAF6Z zX=yr?#&C9WG}K5~B}f1*T3pzS>MWTR54M@Sa|%`qFv z;3)~Gj)CHV-U79h$X9V%qGId7D&m@fWIXeG^YvAmmf)V@EN zHMz!mYJHC`!%f^`tYkB%_(^Go*YR5Z5cjV0_KePh1bPZGpP#Q=ZvvxYdEgzE+~YeU zOUJ+Fj>iN)yvU^2ziMeUaT|D~S^P580SSw$7(@3f0pH9IV-RsrWgX_k+9SxF`}R|j zrm$?)fFcNpXdv6hMeI3gINYL*_3eGI8FdUZyhckLBV~dZb^D7}i8g<}y%W`U$R-;n z_z5Xx8xDOY>ddq=DJw`5@~o+e5?n9HUOYScHfA#C)^&<|UtZClRL z>W2ij1%CmpI&cO?r~3Ou)8S-)U37eB1fkqJo3BpVxepQ->sB%rno}r(0>F=#aq=1U zW&jJYND36bj(2&(eitzOZpb10UW`hs{CS%pI`u3!ytqLjR?V{c!{GeOH#5X{D#?Eb z8}Hr`cYlcyjlwM>?Baj2JU-j~K?k_!E7Y_U{#<`OtSaV;1zNC~5->h2beUciHe}je z;${lflIg-Jic(}?5$F4~T~a+Q8j{VzWE^{ZmLDBLkgf~4OWep9jK;k(z5i#0jC zkN1n-g87{&6tZfWzM?T<>l;bbv>tM-;wN#x9_ zN9e?Gyv%^ZeqQpQZlKRiIz0(_QhOpb!O7;sSn)bxRLZy$YcM*mciT$m-ivpINW!QHGmx) zx*9QLmg)WYy2z=WD|_R0B&^a!YFHdbq#@*(?DIA?063c|N#o0vmi44lvT-M>ci z&A#3+Fl7gU_yjcwJhc;smt}%Vr$F?}t>h;CoW@@sr|d{=AiATRy4#wfyqnSBXx;VnsiTV-XvLAd7Az z-^O{8R*hBc)oa8(KuB&>YpA_>6bpXT(Q&)KAmP+^)C*g+b^k)W0u3yt?mh@!)NF>T1QJjz$R^R?%-L}=g5C)#sd zx7rtX=H8=rQ%HnJeywx>QsMw2ivBFA2n=K@p|0C&r>$v?H;;?g69(gFfl(%!E;ktl zNpXPEpQN}xo90Di9HogRTFdAL1v<4XL}Nu<%6Px~$VYr<()l#n8}`a> z>m8@}Ng~0XGKh(JXzwG(lY~IFY5XC6HBZPa`&P48975id#{U*_+>tBJF(V50;(pu) z@H{NdKMg0U%;N`M@+w7%fU{XzSKIcsTP~7--IIfiIOGLKOe#<=aM-@JRkiL9r8U=x-%Zol3Nw|)s@EW)&jP8xc^f-{DDAToePTSpI6q(X@hzje@rAGUTo<_aG0l6y zE5rT(xz?JExd_s>J1dV42AU4web%2!qPeWT38<-+kpM0Jir^{Iq5%hj(C6CR?;%20 zSkF)QlzxFFEzSmFA9`s`h;HMvUD#&%t(}z{V*X8H;y^k+YWUO#`GUSamYdp*8g>CW zyXf-D;#n|vG`kUFpv3RFiN>6cI?m?F*0aM>ZPuAY2+_w+@Ut({@*RIL=x566W5X(K zq+pJaP5TpDB9eh?x3t-8Ar1U=M9eSt zr&;E~=1?wsP6dYYjZ77~7{M6TGwS|wob1=cr}&oiBFczIlXR<7q$+UFl}9?z`D(16 zkjTsPkIqFDZB*!sEU-OaTHuOKu*y~-Mu96v4!ERxX@O}&tYS8u7&lqCFonLBo`b>6 zN#JsZuX*Rf8M_hYnNq7R`HJA$#g73e?Doyza31B3fqj{>=2y7LciS{8tFIzCP9)&r zxNI!b4EnTP(RF{%R-8%!{Yv@4VOy1|Rn^XQNqwBMV%Coq&E;Ew1uJ?iekVo);|~B& z+}6I8@2T`tg{M5F(v$o#B2&)nL#f|zDBZxIdu+Zx{uob&6)5T2Qvb+b<~IH_zMU3~ z0=<~+y`SL?bhi|gVpbtRzK1WCb&)Ict~b;hrQotSc3MYv_JZgs19!KQm4tN~`d*Lx zlhwa-9K;9WwTkG%eoA#{7PjZ;q}wP5uLFcPEs5ki`IBhZK{bw`AG{(MgVve`my&#h z{^5A!#OOwG@ZqT(Mey_uN>4Fg$bbf?#x+y%g zi^vz1!1!tWpC%f3kA}8EJL7)8ARz{`7}W5n|f$odDU2123*_d@CvZu+hr! z*X=KmC6#Ff!SrZeo8zBs*#8xX_lZn`zLsFcCX^UwqbxAv?V!M>oS6uhh1kf^dD>j= zhfzLliYJ#brGgQ{jwtz$&|F~iCMt;OmUMPkTQ^&iFe##yy&N1)M-=qh6%!}>uIEH? z$;pJRGZk_`zv(}5a@{|4?)%RPr*)}PfUO69p3l6c8IuNe$d&r*TyViGYO zQQnJF?O;L>+s2C~A*;4{q#hUlYA;N;EfJvia>W~p3q3J7wz)N-AB}6r8s`|bKaPT$ z!#}rd+o)V+*!e7mRHX;2tu2Ct8fVCaNtiL^C4=tr?3Ff_c+uxT$h(lwdLgN74MfzG zkWfI$kJjU?33{jygr?O?8|(KDxGpAR!FJ|$JCOnLS3h{rjr~zN`#!G zU%amT*$U*8X#DkoRrmCllvv_EcOg8YF0@2ZzBxSaB{>?gJNAbW2o*UXDi-dbk^qve z9%_HGjAm^YuXI~C5EaHRY9wcZ1o64Semiq3nCez5I|xd{{InV+tDPcJD1Mq3(N1Gh z7eqyTH4hrfx+1H8>c>ODBDkh25c7Vc7d+nU+MUAJDO1skG_MW*_x#Kx5X?(2c3nX;NiAZQ| z3FlpT!LyUAx#(^L&vL_3QCEvN*bwc^Nu%v_Z12CnjM6716Y_9^5mtP>hr*8k+Tdj7 zW84o?bhiw@)XuLM{en%+7wej29WMY=>m`)5x`xITN%I$<|E{g3rp0^7vK+xA9@8QPO@X zL`rOiw8fkYue+)yC{fz*=7Pw;Rn;YrLR}dn3Qh!Q=w#79OGZaIkWI2+?^zVDQ3zV8lO&I`>+LMo_P{tDdou7H`9sr0`a1(+ zNIif#fFOzV8*?~fYb6y~J-&Mjm%ZlG`qK5}lg9#?xa6CBCxS27%=0UU*^ek&chduM z6+$56-wPGwv4oasE)^3h8lSQ2W1d3M5S|f%IeWUeDV}m_BBC89%+$HJ^A}qyoaCnY zOm&0R^fiZVu9q0Yfe*-E*!2eQy^8X%J262h?6W0i<>`fY-t9WB_RaemjtrtLeG*lp zAI|fzYQL|i><%@~zjfyr(JkDc7FN#rqrYPrfiXeQav5?-tHceahYSt*^c~>f%Cwqq$ln9LNV15j+vCXU2P(O3Pd2EkcxW1HY`gvFOOVY#! zPKx0*#wXnw1STb3-}lrbq8N0zU`%;%m6!t4>cuYJQj6deE&mk;&7H|g(+nCxx?3me zT@mdjRk%Z6JyRUZdq~V-_lSuWGKOa`BTt@6QzgCd?0Uw=ox7AgztXg9MLM28xY11@ zTf{XzkR@3B@ft3vEcppydcL4R>W9~pI@Cw58oG5+)Fky&X3I>jjVCYKvtA-gA)id1 z3A9}p9c+tw))s8#OvuN|+XV=|Jk^mp;pfPnn~FtuU{hpv(>F|r!H1W$Fva0Gn;K^M3Y+9j1eOW2zWFs+sct_3N zKlWyAfCT|pq=y>4E?(7kJJ)Nk>6V?&I?6;Q`}Kkix!PHpl**`R@`$;SV9o!b9KvB>@~{9H!)< ziRmDB{+9#)d5y#XgNhN!9{!~p`D3sVBbrX9KN~}VtN(m;@(Bfty5T=lQOBr)1ulQG ztE~LHlxL7cUH$!U3>88;l9(p@esof^_sIkBBvAOMSzf`d7JlYh-{t1l9OHl?Z8!2zj56EY|B3fYW5IQ zVE*0d4iOke_%<8QAIjoEjfsa21NZ;1?Q%yG(A|bp4S~O`TLvQy2s(bbY)66hX+VB} zaa`lFCH6Pq|J_Q{xLmbyI)IiY%f(hySI%7L0j*JSkPYZbKih;nJGlI*(GfbWMQU%8 za6=2Gk-)J&oE4E^Uk8pU8(M^wU#{&fTjW@rmZL|Xr-*&z1S?RO;e85r{@%&4>9=q0iI&FU*;#UG6p@c%o)N9*+ z-rf4>mrGI*yKuZ&PAfL=dfxGVB@ zd}qh$f5}Nb%MV)A?ieWyOBunD2Q3#gP+p2+ z_mujN)Ij&45!y(M4xa(ek3&@S^qOeche8I@5~jr8`xd-^NsP6+IfKBzlz?84Q(n?@ zAS}i0(f|m8Xc&OgLayzx0nH&LCE?yW1?e2Q&~T z7>r17=k{mb07icqNfIV)E*8^3OQq6{l|IsI?&c7iQO^)sj@Cdtn)dqpVb&F29vT zG!D#;jgcbTZtU`(nL%1Y{?Bf(0?gF@ep>^AKv=G7u>an*agtxtDo9!?H#BblO9(&_ zYKeasL=-phCqJg1B?JpU<501p`~6Ys-|VH9H-!?wzt{i2?f+M9S6O_1@VH3*H6sAl zXdD*(Q5HzG{19=F(;)Krhw2k_J7ddByJO3|5TJU4H-woSmI>5!E#BXd^N;vx0ItN- z^!%@#h`9p^+57ZrNK8cEAicP2>RBuQzmC5;YzH5Lr>d?3X2pXJMtD?|3gf^UN8 zzXCx5M&S&-`cem2a3l%PAOG7bF7y)Mqozo%x0mog;OwmM+2E$dn$^a~`)d<6ihb*> zpx3VYIOg!s>0T$O;ruILX^@#%A%nrey`4Wm6rs1!csE5De+#4k04_4i4>W)^14kRc2nbm)cQ5W(*0f&*G2JY{*Km&lIFd$kRkdHML zy4-~a_^#@30?qPOK)X8jp$&kmx%`+6AUantFo}#$N)nQowXT-vd1?Qa*UMe}PpN8$ z5Pbcdn6>81zo4sscrC*S-iiHj4*GT#-@pTV6lZ@3_}I2||EO^Pm!608T zEu=dF;QzV%WfGy+ch}#RKBzPuL|{tzVEWkl3O0HkZZJTtFc8(s=<4bUYNUcD)IVhL zLwJEIWL{}#bmU))D&q%Mi+IJ3`J1>uq4LX4wke|3aaiaHsrUZJop3>t??;E-fii zE1(#b2(V1Msi1rwRr2+@?OsO_Er7M5O6dE;0%SltOWna z4yecgco#~f2H2*Gl0N9KR(Kp(6h6!jxxuO{cyQCWPI7B1X$iJte?aEFAA5sY^U;H3 zmUJgpdi{|5<2kbU?x2TKEvHgTd!V%gI5cOC7yx-fZ;ovI|EH<%&kl=RB!Je;>e+zu z8@;1fjL(rr){M`U*SAacT6DL~YE1`;f34+bE{0J>#l$NKYut0)VgkSc1@B;Udi!6y zOCvSbS~|Nru3wkZ=rnHvJ}eFipIAYC6t1TqL`mId{r@uK5Hu_kP%#N)67Q-R&osKT zn|-Z!e(Uw4lTf8lg=WI_o43(WchG;xb`y}Ti386H?JwE#Ai2F^{sovKWxrO|mQ%{{ z;f-|H8T&2Jg_N_Yox8enkDhM&9djiW)IVJt%~$Gp08l#`{F_yylru+tk)Y!w2H?1J zTxI~mqq*oxIWPFykBD@f$Sm!()D}?9zA#*>cDh}=qHj}V*puH&TXeyJI2yWir?}ZY zT)CgiFKu&ln>;GKPh7mccG}QPN}CR!(J+qwqvw}eUC)Zyh6wqsDFU=AMEs1?sKV~gh3N0oMqSTiV8ATz>A%9sddq zBYW%j3!p{%O_FtJf1bVshzD%`MI6W8KdCR^j2p0W^26A;xBuj$)G>xY!c?&m0F-wO zDW85%ZJRyZ1%PrqRjQtTEaJswF|vNhusToUM^|sF^#zTw>#CcHZ(X{8>$=dEzJlwg zhxfy+=WZK45lpq}jiZ_TT47G2ZPp%a+SrEFskZnW2js8&<=6N%_Aa}#XE8D@I|B)< zdb%rsbyK;(5*|;p`kv05yYatF)|*w+pQtclHw*oIar$d-sd`K|UM2TIuOIBnB0vV| z4g5-t`IjXXP-mv*zTd%LcV_$o@#w zdKOCN^PxoPVK+Km8ct1y8x_Tu?vZo1{z0c>b$+3p)+-JGAZfaY8cgPU`Y`&$zWWa0 z&2BIr4U|ET>G|q3h=z7LrQdTl28J&BMf7jjJmeL~A!xwD>&{C9J1YeDwbo^6DJEt- zNi3uVIlfz|zZEXJ+n*{tz&k?icVOlk(;WI33YmnrDtK(B3kY3e%fWGX4~lzR;$A5f z5J^T9ST|XZ<|9}!g5-h#wnSy<-gl-JeWt2(pq)*?T)dY(YC|LI0e>6X-45cgN^+W) zjE9e_j+CU<%`)7jX_Yca=6!DbC`0KW2_fV-oG>GkyonS!+mD~Ndw;FM?Z-U)h`fXQ z>4#UMf|SkY$CmB@C4_oX2f1^H4&3|5t`cBc9cKN}LKYc{&M3m;8~tvI1dUNjJ(FZ& z0FN{ipPxfKS#_PZ4>b)qOIO9!PpmU|f7N`Zv5v%J^)*}!uVl<@IHNeCWhKLNakrlp zTkG@s=eGwlg{y&g%91&o9VwhR|20&icY&djN0mwf{u-bZsCGV&uh;LM^(Q394Vsyj zQWT{kydX?!^wkIj8h=S-N~i8(@#Eqb1cJinKaG9?cujRKQoHZ^ZJwgmEN@EZ&N#Hk zM)9-?BeG+~TKR+#M!hRXT;Us2%gMr?=Dz&3VO*ZKeD*)ycW8y7{X{jA-|U!4c1yy^ z(!i9y`y6g6#*yiJUYF)N<9^&#q)|x{d&@KWIx&~bY%{MWALzZKdKQZtS6x{imt$>u zG+gb{=uK*Kz2m%iyqjCwk1yJ@=KHA3CU&xj%Cwek6HR`G$D0jc3*iwK>U>$&IC|wP zci}|sQ$L_QC7~khN$3W2GzXP zV1svRGPTWLoyT;5lby1Ar$UT2lb`8R?=qp@;^XaK`+||;BM{s6ZpM>-1KS=_A&;k4 z5YUWZ?W~?dyx zx0NhARzU#3iT(QN&1lv7_S=x^AQSr-E4gvnksaSzXn6)X!4l=y0RZE}s7U&svIZz3fY3`K+8qjg$s3>yDOtqw2x_#S-w~)0l!e>%yPp4A z=?9L@7?|YAJLHF@FND48b)CAzMYfC#Mz2pyf4`d~G=Bbb0ox4stIa#_>x{VS!>#jI z&ji7}&8Z>&L+U`eA)thcT8VyW+N7i|;|7W^+!U>j%GLe4GJMKItQ?}NBR`JhT7KgZ z>{eF?sL}dQ;3U2#zM$)NKWob;g1ACAf0V~Y!@?&;t?9HuG>F<{C#}w0%yxn8#%!rr za9oCN`~+_;IsKbUrFGG+GFxA0?y;`t=FI>sxG{w7BMD+1NW6s|reSwG z(yxx-go>EA(#_no)ei=Lh-dR4GRBmNBo@s^^GCpa5Chs5cHdrzvIC$-^dJv>%Ia^^ ztQ%p&3IMVRAcA*yciv-1FdxqmNo0~Q8v zhGPZ%D7w0z6D=b-@7Es%h9s^5k6!AFHp}aeo}vY^ghJLI-X=k+lCA59%_m! z^NUsOVR8zM+J1i9asy}otA5Mx1p6K=y4v2IY~}bTq4+m31m|b47MWm7T^V3K^%vMe z`!TKeu59g2!w8g2VO%<%IyY-dZxMtVeGMF}ad0|9-8v|XPS%id{r0Yu0?`(Ct^K*c zE9Psrd7SPOdPG*}CkFY|Q~?kBK~N^<`j(w11|tcQK>u;2sX~*hA(NG%MHOSL{Su}a zd4~5C`LwvUAKSJ5BGASUPH1#mQ&}4?&nBp(OvJ>@ReSq*t=TY>7dfV$z^dEmCZn+x zA-PxA@2@4h!8isAc4r%H;6Z}AO(fsBwYVHsIu8IDOy=5yD?paUzg4@i@2CpW&6pgV zvD;AX_9qZG+@c`c}#2pF}Ont)W^4M z6vZ;y0?{lV_9qxc;k>7;!{#<8_YuR9XvMsGlkqXvI}D*0pti^CHV)UNJmbiu;8hw> zOR3yR?d)fcoyL8Cj7{(bxomPPp74ac9bU>7OFE3mFc%Cg^XS;KX`wg7^e#a2QTa$Yjb-Ei46vOq@)&e`OT*MLwMx zuu=zmrIg@jUPhxroL;qn!GS2bQHY<1}_>HOc z{gq5#7ba%~A~!`g@Fa_{cg!9N_n_wa?*xYLHOs{eDW8umE$nXv4#EoXd@U6D1u&WXMWjSqG z#S{2;hZ_@OeVI^zqZN}9tgc?6Z$HjtW%Qayx1aSXWDw(+d&u$EO^5O_Ri+(fv7PWt)U;Z zDvV_v<;W-o0xPw`cuKATVDg(iQSB-T%3tjuka>?S!Z%F9bWTpLug5pVoj;W|P<6#~ z;;YB^VuRwhvm+1`Z&DjW$nnp!04C9s9+tJXk4s_T7?P`q7KD(a$M#%z%V?XhHVNS> zdll&C1QMPpjeT8D99ar-DGX@@`w_*VsFT0iZMd);jx0H4DvH4!aqArY4!h~-v9n5z zedI{M{R;RpaM?L4S9UsTMQ87`u^6+Tsr(Ql-7*VQ@-ue)5)}~#4oj978PgFfP9>> zJ+R8ALoYzRa}}>g1Ku}poP4GfF42(CX#yjCfSAseB%mwS%yAi^XJounO5l*rmquT% z*vpKrbKmJ&bc9_td72fp%0aC*!Qj3NUx}!eTehqS) z{iYT7?suee^`&RvlWj_dyDE~>L~R}e7@(6wwf<%Yqd%i2uE91D?bGNAO^laaQSw0z z=_1p06{gO|`nCFkb~Ep6zv(l;PJE)Xv9QOeHQ1jFUPKUVRH5eywAZ<7jfQ_9(P@ zYAqq9L9Xs%I+y95YpU6mOh?zU6L`jIX2twuE>nH_Zo+}N3=fUCz)?aUjnE7QF57`u#?+;pUHst)0>ZnxOhMjXEt z!Yu3no$K=yw6aM<(i~m6y`3gtqYdgjWC0W!m4N6!Fj;6f(oUSCB z$wJu5ryR8s%T2A|jFDp>qfZ2VpT4Z?I(l~2WUQMe=8F?&?%9~8{0-C>zE@qdcJeiiy}A(W`7>=|k73!X z!)QakOpnyBwQ$<`rPeX>;bd8>V9+Z;5X#Q5(!J7_y%!M^8-&92;AN+rV~1fCa~C;jM6C;3M7#QU zNUizqe3+D4CGxpt1Cv&x9}U)4HpoEU=p;MDuOzhCFG!z!VbOkBpLn|?#dM>0zkGc~IcBJbI`vk=FdrufAh?OScfpZCA& z`gM;>$X8~fAs^8e2;FcNuIj$7p78D@u)?6+0-j-}O1A5R+f^}MgTX@(6TVy-{9x+Y zwSyNfDO4F|VefH{pGze;ynFhhS*|Vzs~3NW@?(uSSCT$GijnC$C~!Pe&Esb$a9Gyd z+*{9a^$oF(#WQJdA4QyS5-N(auWe8jMJY@T;AS#EkFE=`(^4->?P45H{S(173bsxX ze4`M2=?9_|HBO$`sGl44D>{Oq^o%N`0;H_@axWJGn3kycKjP`mgu_7b#6wky18d6DX~*WS669(p=XQ-|Cj zc#q120M8t5=U`EhN$||s-DrMoGsQ8qSRTyD6qnC2P9P8hlYB&VuRXUgocFPC!8c^e zElh!erkm;ev}8rb3Ty;2wSjay)^J@FohQ+6gLJPNfufm?AA}$8+@n2JH4| z+&%>&K9|)GVp^>PkCUv~^A6V;ncREM$T3n@U(Kb2`Guc$abO43-`PoRH)EUDM#4U- zi%q(3IEK9~%i|JnGN%*v#1Z~Ml1j+$^J^(t$D+kBpH&9)`~B*cES*Ibs*I`3DNsY` zy)_5-mehHg?Vd!@W#9vHx@gQrabm%Kp0~!6Q5f@c#vQT^O{m0$l+O#Exqn5(6B6XP z_|fo8w@iL{ap=7u{Ut+n;3%I+RglU;N&9U0ta04PB)R`D3>hFsq1K|?Q+8&kvh4-{ zoAS1iXj%(oP+Y0;<@!o}pR0t``Yrp^ive|0Lw2qi zp>cUVtGOY9B)f0OyI0Vzf9fqqS`!#W#yktw+Q3BjAFOTUGNE&*VxF-Q>(P0fHLbv3 zmKTV$(3%#*Vmo{9dk$4D(n;>ua&iZdj?^AucHU|mEM#s@tvBSKJn4At^~ofnpawO& z$@b-@^w=kBO!Z|VvDBKSY-S6ITbx@aAF+{?HGVE9o9_g4WVpyM*eI__D&tSLGc(a` z>QDCsph+qtv~G!};tk|=MnN5pT`?3?X}NP zIf3_Xq#Ptue7)YmMkef48`ag1TK1tG6JcsAB;cLnIAxM=T}RB&bVmuGy31Ec2-;FS z2OHn53}tYt8@Io?ZvKP+8LudbM6(QKDg*W~p76rB7Q2dH>r<^5P<)gzp&;FSmamlZ zV(*ZMWC z{@XN?Ndl}hM&}TLSZ55Kh}9UmME&p%w53neQl z-4t2sOB_v3*7Yb<&6w;~_|*O;s%0TcAjkXUFl)M|59)F(C}&p4#Cji2bhF%DbEIhp zr??c-8~}`+Dp}hh+|gPQZz<~ZK@fiRSQ^U%be^oDPy2Ov{gN80YmR#>pbhlljAv}Q zfGIjhCYJJ81sy%Bs2Hk~91|~OF=fb)q(rTYybNf5@u~yw;e2-8r|vJsS`m0UORoa2 z!Y8sMMZF`}*Vl zg>QRUZPc>p)IXJ`OS21j-`8wAlvv9OWw6O31rMnhbn2_L9K%G1R)Y-!)l~lV^ zj}yo#)^ibbcN7yu8E)ZM-Q0YXWRZ3FE}<`x5wnY6!QZ~knbB#*7aG_9>a{N}MsGc}V@{b~8N3%nH*)?<4Tc zrzGaLE9(}Y-^l)$@8tY^18Qge#wgjr506DZ_za_Yt&R&pLigeSiD8Su0L4TsP`2tq z36~9!)Bo^FY9D7iE7BK8P7}G07RiF~246NvjdpF}DC;W5v zN+GCFM%??^SWx%qb#OM@eVQ*hAb&uWW+~t zsAUK0dz#Z&$=5f(f$m;2>O(aRO^Y8lyaQ9Rv^Qiy3V+~j@|CvlSUFST>7$ZEaRU9s ze$}r}kV@*c!TLC*J%vnq)hQ7VltmEy$onob2?CR7Z9Ug5eD8S|GMysCv$P~DN9Re} zDG}262~w}Xsjvy{b@qoRe)q=8(`M^Zxh<0v-yEvOgM(!R;3Qxv=;q!dg%f7bR1syp z#hp~34NJnY=ucpv0*A4)w8_8!;H6|7F(6^j2u3xrzr_&}z8q+WV81au1&TCI)i}l! zGZggwO8m#G*#~vMvYC3To<{w6{5UeJNa}#ON>*ca-6yqbE~;-t0~7U7$crH1r9yt% z(_uPuuTrWfq_BGMByJzyL`t{@TDwR+*qrEih`>*&RH;VNCz@ z?j=B<=MSW{Q4Bf|mkS!~Gf%O=!xMxdz&~1G2W?)oAM-7SqgNCGSvue2XykH$T~)k+ zKsPGCIyXXa%K#l6SGKM;zH;cET-3dFdViFwudl;VgA|ZaHA{pTA$)KH%yv|S6u$Cx zNdk51$%EN*(2FYw<{l-8@Aj@jy>ldlv@av!gdMc043%I8e0m*9dbh9mwHGLo(iRGL zF}@;?cUYryUjJlYv_{@qD0D|ck}F3pNT6e4R8ipCgfogI+q)}WXCUsyUvhbv&>+s7 z(XP1#x+tG|$$5w)1SD&xUCxfnNngbTOYwAswd7Q5fpKqK zz~Nk^7XEo=6Q_TJh33W2_?%iw2~*^I$Zw zCk>e!cTN;vdzZs;FbC`6Y-4P)xbj=m@J_SY_(|IKvDf+rINfI{^u6Jb|GUojcbV1C z-@Oqwnv`}?2ETZBUCnsx-W%Q+;q1FJmdb}d_Ky}A9Wx8FH__j?Gl(3}su}Rn-=a`% ziU%^aK=aXT2db4@j~NwbVyGQg_~S07FPR9jnmT3+X9EzAL#OmjY1$Hr6K!#e{A}&X$P3twbvU_|~rnCHoxq*B|9P!qt>gym_~p z>87I^3b&Y$MYT}c5{nMD6@F>vaTB-h2(9j(-QlQXu+E@X@RmFrZc1Avg9$^5qQG(d zP!QxO{;N2mt2mSM}ZKTkw%zHw4pDcfIY~9Jx1N0K6pkAt(vk5t)l$?3N78n3eFRVffcO6|peZpC5nE9~jLPmFl?0 zJN@Bi;Mv$fokwTk(|p~=KEAK7NK=L~?9fTQ4UX0w>wwFvPj)uf)*;|dj~Vj5tN8Xq z3Io~T)VD(ep442Izz%cA=x3w1t;+mA6{>bi(wsd~`e4$ZR5zeTeI4WY%;|6_9-P%j z({Wj-B0csl01bbA;WfDolg!XxeVXtecU#ug~2%SHFJU? za{s2hs1uT0Upk+;4Dp8zr+^Iu*k%tH@Zfh9JDf`yZ%V7>jGmEv_EeI}dnDE*_t{gi z>HVXQ(Iyn<=oZj{xVlC7;;zVr^H;4$;CGUcdsYKzuAX{gU;E`z=MAsbB){3Z`NmTu z5C5*G-aU7-IO!v=N9?*nNr6f8gVd$sxoS$52Uw3kK1vpvF^*R-2K8Rtubj5gLWc$9 z%j>S9$x+<8?G{Ji;VRQjha820LThb!6{#^)%>V8I1%UyesahFAY?xC73D^JZ%m?gj zIg~RONEi&^bNb`HYn`U2#w2Lq8*elb#yEF9H1!qNng6WY&b#p3(C$5G%`n%u6kgTY zQcq@6pnG)K?6j-M8|fjoK2k{Rgi=ac-bwnY^6?Qzb#g}2>27}$jWuUsi_n_CUn)BP zi1;5l2GhSPLu|Ax;r?HjLxK2oMIjmSnMRp=H6vh+g@!oJWaOz1i@KD6j?bgc2l!6I zatC3f-cruP9}SV~)%k`+TY8fqCMbOX^#B^s@;v66_l5+v;__#tR$f&cO}HB`s9TSk z#5-d@uB84B;xpDzQjda{O&){CM{$GFbr1WE00<$gpYo-$#t)V4F(F9>dTCgEILT z-hK&}g$JIzLj*iuT8d!d!5qXV_bj*fmbH^y4+QQwyg%OGnl>aBUtw4UP#Lr&hz!=b z+tprPoPO4lFWefDvIhqt=&_`4;*s>?lJs!=UaQq{)fc&Ku$Yy z^Z6(Njj@B|USdNnQs@HW&*B< z?FS{$1NnrO)=+{)(D0FjK`i?AS#mTHa}mAg+M9eIrDAaMfq5548z2TgwJsxg#iW1i zHB;Y*X+n0ahK{wCS6w)I4bKzkl;5RD^3}VoGJ%q|J53E}KPi?qP2*N8V*JMrU%?Dd z3cPeWd2-^@^msDzg&bI1DW0!nLB5ZH>m+u$LT49(C7SU{5GhbdMzgS2CJ?>ho2t?MqW*F&M$qt- z7;yeM81uLN7j%?FhDo7a%D>k8G=|yZy|<(U$`Xy$PpI`tuyFp1kqUQPER#YwK!4A6L`cXMi~DjW8)7mv!T67Ztf;{2IC(+ zC%vtPe$l)cvEy_yXs>Tegoc|4EPjvHGt0FU0v&)A>xxNruR z>n~>l`c|y9J*EF!9MZuc&NaMMKvl#4`5#UP_BB<;VvVATtO*__o>xbp7TN9@g@i$X zB!91V3){bB36wR$JG$)SUP{uPn@}7BbdhC<1fyQ3o@0#70ROm18~w!s@>zlSFy{Nw ziM6iF*b@dI=^5bCDa(G9Y;+i=@gMsYG{^YslI9v(h1W!K8lXixU{}S_WXg@DS712D zFY%nH$r+Y0weVoJviad27kKpb{E2VE>wq!wxYPMxB19U-1(;uhn-b^1K?oTn(g(lh zAF53srA-RQa*L}ItKPeX$q@ShNJJbC%7|#nW9s~Z-wE;N$6gc+xHx;eQaP6<;zXaK z9x6l;*YQ>*|13=>E%0*N0P-&jNdJPx_-DGoy!<3#)Z)X^@`IvH1FAM` z{7~5`$Zi+4mqHwsjl{b?pa_$wx>pv)=8=i-9r@av$uDzb;8(yFxi(8yK52(*>h};q z+=Ylh)39pOqaSu*wa2!2-9eBP2i+zE&{xaOjNl*M9Rt^uO46MO3Fg}nyidRN&OZImSMDzwNnJ;59Ijmx9`M^Nw;fLLfQup=oG zN4^63Hs3OQ91%+RrMp9U2ma%RuQ|n}KN<&DW)~YQJ9j3Rkoo7yJQ)+wyScM{nR@f8 z!nKOE0K~U$#wI4Tz3D=)oHhQ7ZJ!ZW)t+}_0WupfqJ3(T(<}xHR_SXQkfjWi-GMfo zRsE6mA1;2p59CHl{bMkaz?7J7E@Jlik25uYcAehVD9z*KVfjY7c*0VVZU@~l_du4- zMF?}#DI%7-ld2u%vL7gdA*kt!kuyN1ArdkNWxDwk5YHqjG2 z3^2 ztrpJc#OkP(^kbfVM*qRPSc`U>80idLfCUm{d)x(Z24v{21mRKuse3+eb_2TjMDd5@ zed?{~7ZK(+q3c10z%czAo2uNKPqYI=_R+5f5%7787l)mCPEVo*kH$lZWG2WG!Qe?EAmwU zp0``kV*xEF9s+ITy{atGycXGk0jrO(e~eqJMkB}76hb$ybBhC3sxB4uzusk4&v;x# zSWAN%UXuZ=2@74L4Ek-kJkpwE*g8(iR zk^2L2 z&~`z$$cF@uV`%r9`^kzz4vAC{@R8q+uO|uD6$8&!{+y}~vLA33cnl`I1Adu{_U0Dyw*T>|qz+)A1CBQfkugEgFxaTJ zAQKj}%1;zWjx&7u+QPTjte)J&WV#3D7ElXfL@ornkah^oL5|1}9N47|77YmP%oZ3( zeMr^j4_!cC3a|iQ#?C*8)QLcVyh)K?$Ywx<&aW!Hiu_eDxWazOh|)YT_Y(*>^%SX0 zL$p{31GI>4iAM$f3n784HFx$vrhMTep}=1Rvk(t{!m}DLf}}T?;o&W?j&{ATtk62h z*fdK_q()o8!!h5%8LG^V+)F6I%?A>~b-)&Qf;6KtK-`XM7=@5bg4hlTV(Jc)5OPpa zKpJmCXg#tF>X1AP8;qgkK@MEK{6va_Bmkg3KQ!vn&m2c+|1Lns1@D`6p+xO}t?({{ zZemK=|JuhZK@Wf*i`m?q2_YCNrR@dTBg6a`Ovu;ELW{n-X~ihK>V-8lA6B;|wE3TATE|){}a^fBXi!a0en(-JSzJj1RfSph} zvJ|HedY0-91)E~0C>PSiqv2wL-w+1gMUkN`JWz^Q``e?!$7GzR{1Ag>{}rf55G1zz z%WE7+`mzmE^)uG6xr^4s}oHW87O{W2uqE zqezi`3&lkE55kbL9TWy6G|~&hoE}N@j{vX{f#0kc){FG%|DU0p%9ft(fr z#T#gq@{ktaO2_)Z1Y}5Xjwr+%BG7xt0@`v1OChNh8xDTMan}gagDXLz5YO&$nlnQe zW?<=Gjlx1@1;9UDE6<6LriBCiq%bf9DX64j|Gld~Spz`Rp;mb&Xq9o0#pts7`su%k z6Z8pELW2l1h>vKvADO_bVE6OPrJNnDQRde=V2rviH1rH#e zn^N3=90&v?35y=kL2Ub9>tzvog`!FS>jPAvc11Ns$ zgBumAZTJY0`w(D|4M;}giiflnY>R=^2T-TX=`IJDP?n(?oUpI0VglZH=TG$Z4m1-W z_AAVkKfR>ayO8)F$lFRk(_eW*ejN?*0+(wX8u7Ls7_EJxe3$^bafjTjwZ>~QISpFn zZI*xWv#$EGf)-tU68Ix$LZW(xL1c2Ms@jg)hK8RQ{UK!3bAGl^K}*15eT!j4M@n*? zk6}2z8#je8MpU3q#rLEJOEKoy(n5GNYYS2APl} zO8%Q&w8KTW`aOw3Fee9SMZX!61o3_MZVUa&tH$&6H-TO0&E7V`_>hy)EXORLc4B`Z z_@*bssY_Z<2LjcyGk9Q*{D&TOd}u=;hLAE}-M{gd{7|GDP{^L|1!RCq!%&L?qJ%E1 zSww<)7MsTnZocurF?vZhl}DD}x*%ZzIu{=}&1P9Yi|-SccS}y&21Pn{G@q%&!b_$5 zEq8e5K7DP7Oc=6ac7?v92DYmQ_u3oAUl6vJgJ^jr2opUYyB%RsHj|>7Qi;>wHOO+k z1C{_ZyS;;@Wks7vHq=g-TH4l&%leh@n+_2O~B^5t>%vw z{dqFJBy<#QUDR5(V1uhQt*oJ&{+wKD#p`!3kQ%F5hiUIDP_8(r~mG06n+U ze~uaC2Byqa33o&CHY`xe3!$+;F-ocQqXjU2Z^;QyN>m4e*|I^nW1vrSaI`6R>G)JA z>cgxSK1y$H-wJX&`D}rnd(K4swDfNEEu`iC|3wms;5hUv>Oy20$}NO-CasV2QBH6n z3_|5T_1j9;cgLvsHho+zIFRND@E09#3_T}+Hdkb9bn~KntMg8#pISd2ijZDl6QNER zTwynX_8(PIQ$bApP7qZ3wJd;1^x1QJ);RAy5neR~WZ1#*i?sBVdlEi4>oh(}C_3bX zrAC#0=b!8=#4dOx*X3h)lcQ}w6BY4qs$r&Nt4V0C}9Xn5!^0w>zOm`t2<DuJ z3Ibc9HNc~RTmog7?2jw@YItc3KfQeZ^rSa~%mH%X<6CBjh8;r!sev}8A7;C)RUT3y zhYuku?bJ!@pt`l`-`6(jyl-i)^X6X%7!4$Me#~}^5DdYffw}RuA{F=N`yn#E-av7U zTrEP>0%h&x2EB^LoWs9w^>6q`E1A+WAPq?vdXU9=GPhpZPO_Bn2i}Og1WzzfcU*yb z#RF5h;};+d`xY0XqOAc(=eN>_Gp5IG7(C; z{3wkGPYj%fjnDH*m~@&WB1FSm>C>;W-zY%22Ea8uV5+#@b>tR61n0QdBJ<#WY0uZ z!AM9pF?f-~8-;J|3p+?w>JZhFsExCnwkyf9c> z>BrQ+MMewADE-ABzzU`)nN&T%>9}z9dEVP1CcgQK1tOPnQs6Z2YJ?Cd1ng_BgltdM zx;BcMH0=C%j*=xZCbQ-T1y)!Grnv!tubSV?@&DngJ``#QC0bxYeD%T`qxgzQZ}RV_ z0(3bg)D--eF4%Y}UWIM>LS;yEv`SvuX)cbi1y2VbqDN<~6Nufo{yJ$(@NO?8(}0%k zP`xn45LO4t1b9n_57QX^?NgiqFH+j~6>b0LPfQj1Zj+0yytFS}37=9%$oeh!ZtPIDB|z@~_P~|6A8&{X;$=`Nl0)bJ<#YTBZ&p$mQvvl!67H?_ z{_C%G@wi2hb`>h{H@wSz%LKNrFCV3(3r)6=COg)Ws`Se!fh_<@zE6ZfrT!{=Pg-jP z4z?7(8=e%!62IK>fWECM=(!8MX z^uB~SR0{}tK*on*J#!MJQ(=ZOp&_Jv=gB5ktsY@R*9 z{h!$^UI*%ruyEBJ?=AblKqW5=5ucy7{XLGk(dQheE zl)Vd^Gbz~ARa5(B4n-~Jnkhck%eYwXLNT4Ppi9>W0q;Ecy~BA`R;p9ZtgXFwhnDd* z(ln`Ib!nbsf}1sBnZ8M7{?~$chfgf6ValE)LvR>sqO(x8&VQ79o(GYsms%v+@BK6Y1e?6tiGABtO z0nh1DR_tniX+!ESb%p44XO`H4=3R~Nx!V12l|H*|RSfCm8lCz_Gip9SH}_Mj6Skc2 zO!ZAjQjxK`y@MAd4^kIh3e5?eUwMpoI4OR^HI+K8)Y5HcPEHe}R}84*_>Jk4x+ILj z3z~w9%ZtIAX8b?WxpawB#~6*LdbBJ3m4Mq%fLtp~ebFI8uLWrm-(JJy+UtLoS{-+R z*q7gH$-4FPjg^$Oi>QQs7fU_n*?H2EdUld=#{zcH7cxG-6z;^qnD(NpWg;|~0#|)k4Q#LUlB&^3GEm?pZXpSoRe4Bo3nhDcqVEni*wyl9zLBcQ}) z(fuvksT@{ZNjG>ve&;ytOzXTTsd%M;@Q#zTQs9Mi$;($d7;y#3-_C(lHsGz?!Vk7; znB?pGnf^vzuuCEP>he#*8sh-#xf@=rA7)*0Z)&RyWqyhAFDfMk3CvbZ+N(Fu-xVs| z3rO@a`C0j7V(F&`evk;u8$xo4#)T11$LU|juEg=vb_BTyDcbulWygehJH#tO;gWVT zIN^}>d zzXpIF4O11_;}OUhTl~xh`^$x#!w<8c3l0H#maDpZoz0o1Jkcu7t2#r)wZBr=CC~ex z6vN?G7pwkG<@#3Q-s7{{r#`T?`gcyUc4p+Fblo5Gxq{qIIVA%2X!4q`=^oFF&k{*o zcGFJI7O8G)92oc9kr}+l0K@D#_HPCEZ5!@KgZBz-E^e1feor+qFs8|UWIyp>0uiiuaC8;jS^GX%&sIkeOH-ti_hg#WJp z*x&5!e;}s9YJY=20p=p$t^9yS2%W>=qH2v>&M4df4zh71ZXoJFO9INBD`)vKxO0+JL&wiLbk3; z49?Y8vUo`YI0ytz><1KgEul)$CeYMU8;R zC%U~fbX|F~k#6rw&==qjBJJSjBADf}3?mkNt(H^_30RoQDS_&kINnwQ$A+yW3M>Wc zrn-2~ra%GAB2@OVOm6BB%o2+~)gmacIa?>@H%N?kXUp`pObQVAcA=w(f)EC0;E=LF z=x=Mlf7$@BSYz#LABVuvMTpz)z5R~Mcf?bU-TTyiqzHPI(xu)HmAyt}=x z$Ro?piK)}D0nq1N_GN^)eBb7@IAe6eKHgdNiD!Q8Ox^0NtTOIG?3MXXw5=?5NKS`g z0oVQb4u#@fa9&2d8m{24ULHFaP}2HI{)eQ+`=u8*O*98N?)qo1u3RS0Ct%7m(m#h! zb9GJx?*;e!lH;ziQNwkMu%wSSBy4=nddmYU=ob5avnhe6B?P*kAEe1xG~O5ytb5%y z&hqCNCiA^sMJ53wZCmG#1KIH5k8O*?T1FAnN}xpKN;Ofk90MCT@$AnyBKfTNN+uD@HovZ2A?P{JH2f8i%TuYi%%c%o}?Lt2Y}-nqry7qvHK;IPlJI zOW*rhqRYJLqm)92ezW*Uk=Q|Is(0h#e0tXij6OXxk~v;wH0y)qg&i;%*YA1X77dgw z#PvP?gI7LNeF~Lj%F1b;G6IweB6B8 z-kjR<{-Yo?75&dy0G8C^b2cL9A5Z%dbE7CGZ*N8pJ~--&?_XGV@^@<_95wdLyt8#X z>XwG|Y4v{n^I${$tMPf#WA!T>Gr)@Dx!-M5ZTX4^;Q6n5?{5p8kMh2F{H>U}EI4$? z;!>so!1c~BaF-FZYsje(+6;ujylt<9gIFIV_Md>n{(=+w1)hGCm6#+w3DDPW%rkoa zu#|_6MRTRSGQ|-IUisY5EtU-}-&C|&7yNiTv*e;W*|?b4*K0YXb`i(ntykQX=E*+pCP;tBqTp<~EYxw=j@ORYSVCCs@Mq+pl%Q+bzF0tmPyZh~ai!A_ zh_023C+br^zr4H=q3w=W569eTQl)UC;< zz)vze(w-n$9^A2)M&}rwTT`_r{5N@&jk*WwTOe-+@Xq5r#JjQA zGh3e{Q^Q~=rY7PYDX8jQ4Rf(;pA-%-rSA0Kg_%Br8%D^)jeHJ(eRlG_D}D5bbKEXw z^W)Ns_3qd3=6EOYeEhq(i?XjTT#m8stK=cn$md+0cUw0O7a}Wex>fY)Tz4Ej5can2 z%G+vu*)OA9mt7gaqnhZ-n|5m`+gosXaiQ&q*6QTo(m&qnHQ|Bs7xM?N8zcoyg1jaF zlw1LC7Y&z8R(}XYP8Xy=^+M?nlmAI28-w$B&mW%6S?0bhj}KB*04;e9WS^Q=yYNow zcB!0ae%vo$xC|-0ajjEn{DEBib-+d^h>Zq(ybB9N0ZLabN^~>{&p(o#E6)0cS4uyB zRKwkTc5**ea2uo6>5Mar;nOu9$;0QX?h`?}j@vSqXDZ1ThXmthFVo7uRj1!tu;!IJ zY5Q1VS(90#{Dt0QcfCNU{C)&y1N@PRTjaXfnu5jZufy5P#10(eP42G|I7B=3NW$ysgViu=yKRXuxYwMhvz=vt1D)*JtOH=bOggF6<>sT z3(eJzF;M@}$&#(QVc7k7dpH@K;Ng?{t9I_)?Kc>w3N+g0C%5kfgO)x99?Tw#atB@$ zeqo~M3b~fZ&2USa*xaHLS1BH+hBdF%AH~jC^1OZe9Mh*!J9AF-6Z#WX({PEBp05YY z*~#^t4I4A!i$NxX7Ta_U_PMHtx38IqjYtLT?Ce?kWVfuc?9(+Ie!b=*pZCyY;{fIv zOA!rk>RhSl=S`kjPU5}!Ids}bMZ{*rtfIwvf{7w_G4jk(q+`dE18liqI6MU%Ma*VitCsfRCYw!iG$ z5<82XZrrn{1#h0s9=uqPAaBrOo1IlbOj;LRNsn@wOX(~+?1}$f)Bjwpe{r|*p~VM7 z7oE>>S$WSXH#=Ez6QK&LmYwrjIuY3H_}d5cq+z$gtAYy?zKYE1Hf4mTbvMDt9Qb;I zW$`F*CrBvzljZ^#f=w#l#c*7B%jZH2BF%kOUCEdxB7J=xJ7Ok|-%*U6HzK6r5t zv@dr>pPDQsXWM#@!ApEpJJeFQbBoqy@h-jH-VxTlpMq-%4qMt(`4T;4>xD6n!U$4S zt{HK!7uQGAWUn<_Tk#}IFP9ud`g*B4*))tI@*UwTb+e9*$6a04@Zw7@haKfZRb34e zB8~^mXTuEnx~|5D4U`u{BlbV@r*kO_l<62XDS^aByr<5W&jNsXiCVN>j=>ajL&@Cu z$ur=Krrz(^cReD|BnGJhyr8!^7ODHaA!d?8Y1bSg5B}k&5uV#7c=EU2aJ{8`#o3|j z*@Q9IT+ZOgIjyIo_!!o7^C6>)ddbhAA0vBz+OiJ2gB9S@>$+F@gc7cDUb4-z^(SmM znJ#i2)MYQXdv*Hpb~ihD|1|dBFA}>qwL5t1bTZ`BU3*PqH@?q9zS5z3VoaiBEhyX# z=ceC>n!}u~?L{`;Sce*(LwX_S_Wfgc$!E*P8|$oB<4YP>m1;ewXlo`$i5z*2#qV&l zV%m&g@uKRv|MW_Pr9j1G{g>uEn1d{4tf3CzS&ZI}Kwe{-2gnLP1-d`4YVr&`#N=#n zf_LKj`sxQlJdlb4crB&*d8*qc|Ab(6Q_sDxa#XmK34t~~W{CB1Wo(30CHlKdo=bz` z+8w-sMm`}ovEqCqPw!Lw)mA3y^D`f6nMK{zDRY@|#;9|yOFuYa?R=Ad`9U%bJR=V$ z)Y6b~W=2Ag20nE{AbQ&RX1A?k(WIu6t|~@yw{6`q9#jSJ+c-|Zqu5U6o*y=IwybDQ zDu^Uak5@g*y}I(sQxlrXTxZ4CbqG{pN(*a30b$jiy$6GbkqzrXu!UFY*>N7Pw^>9? zXT?gtEZz)YE{dVvG8q!~F+cH+{-rO)=fHoz{F)Vi4UapgFcAIrpYMm+ZbHGWT&e&= ztTnn^LMv!U=Js~P4F`ZufGO~rmExBt*<-Vh2JW{vDdmI2#UBy0=4-5+_((@Sq& z(!h!enc`yfU21jV#uk@WL^Aw=8X!rFI{aJs2&O7FMp?zF9Xa7#C4&^}a2+cY7f#J{a~Yd--2^FvI@ z_0@X4_n}`}t^E@fV^LbMu@#?_?QKOnf!2!xecd_wcGG~znjm8s<!x{E|Ay zR*Pk`ZrC)T5kC@r^odyYF?!sHh*T}6t^r}iX}C zr|-soERD!zZ3L&6Uej?O8!)$%)9Z4VM9sMgm7Wcv=kG=Xodur$tiO16=zFu1V(-FG zLxnU3g92OLW8Jm6C+jfC2QWSwJScnv=c55lj)Uvbj)+gjHobE>?TbH-b`)oRl`Fk5 zIrI6Rb=DJG6|~#%8!Rh+HZ8xP_x<4oJ)=P7n%o2JEH@PyoQ zXL1IkCLj8RlDQCOe{!%84%GTfP1-^ri1C?vGz37aX3BK}g|+7vCPhuHFub@OZ>IfuVR(2JU6`I(pbLlCk)@2lLW56**I)mhy-utbhNKHo%^Ow`sZdspXQ2%EDU=4-d?G?Z%^$%fkd( z=d;@bjkmQ!Q)7q|u_f%2IrO{NXD>^Yqb1D~eG%=4juULG2MN<2GX`1yTXd^FancdGO7o-9jP zg2%4kbafD0D^+xX6B$oEy$ndj|8Q{{u|vz-^Tb|vO~)Q`B1H=M8q!e9!%G26S%w(U zblqxDx%E=)cNV>OWiW^+Aq*7w!u;oKek}~`^tJcx@THc^_e3)=g`r9v(2#Qk>VT{< zcdPLzCwI%bmjkQ6Nd`GSx}P!mWN%_OikZrPE6mWpOf$4fg7&wAjP%lO= zM8$3vj%o5c<}8l{+ZhdU^tX606Dc)TRbx$ajoxCZt+-Nh1oxavk1`@>PzMiYgE;`GGQ+8~0v#P~S`;QMFU#1IQVu z)-3Mvc@=da^jml<;BB~B(mG3hnd?-q0#kjKj(NUP@p*T#&hE2=MD2GRv`c|<@LFrTqq)4(vIdSY-uF zvAzFgOw+*L*8aAQm|L5+OaIG?X(_I}sabe^|JYNXi+r}c7vZ+gW*Jh{G-y5!C@Tdw{yu-wa_#?ibjSsdPbJBgL z&+^{Sy^Gc?q;=Eixkfi|C&BEJ>IT~0d(OP>1sj5C`xhQ|?AJ&{TcxTGD;o}86}v2$ z`{>=a9dT-wm|K2ue3alcZTG=RTERf#^aHtkPkc{grq>N*8t-ii)B~bzr*#ci6f+o8 z_0o*bShKn4LdyKP)fPd7$mns1tS;W4NZll}t*!q1lPdz{Y>0+Y3ce~~PQAkK#yQ`` zN-&pt)7dg5N-`~amQ;kz`>h}QnsBO{E2srpbJg`4ey`?F=u&=-x40tB{IE*(6b{}~ ztqcy!$lR1QfSZ1v2`|V`s)loo7f?5xcax5rogbj@-PjC^8Wns0;g!FXVOLAj>$*7OvE_!ymL#Kf9c~y!|7VjoGVglL_MQdo<}Li~1PT&*45f z6m3m;g%y#kvOKiy9CM#&lj(kq!vg(!0Kx3h1My4`m}fE0n%I|>_OD3@Twm|4Qy1G= z9P(XNkHPswjD=R$N#%pJ^{_^^Pu!Ymr;iHHvMX&p6MSBbd*AO~WxL7Hd{$?Gm})k9 ztLxNpzcYK$+bZo8n<$mz(!0>;xv%F%QjjYqYtiN2P=7Nl`97iRhmyLZ>yPa3EZ@j~ z|7pwg_(;&HtG3Uj;>KBUCC%|+fM<-ie(_EHE&a*1osKH3XA;Kj^pk$XPxG{V(LW)} zymO#OPd{T*xkx!h796Tb&U((dKEB&!J*zZ1@05I=zPZAatzBdw1S`-lUK|X%|D8Io zbV?3S##ZdJc>m+RA9`Z*rL&*%Y?X~^DSj&Aozu{n+9uJ-o7?Ng* z%rklBn%nfE!JVSM@uNv&X>j-M)un8KrIJN!a0g8>MbV88V_}T zR#*fPZXP`{oxRb7$Ysm4^qCOHI6R5_ zMB4xORvP1vFu>84Hr+@a-X3h9;^}&roY&PL#T5^qyL!C^8ysjC9HCO#XSqj8?WY<3 zoHLs+r}c0C*LE#mbDP&e&hb(xy|hLbH9 zM8Exzt%yi~kIZ);#Vn@Rv^q_g`DF4AF>0rt@(Nv!#@!Z+Y_na!bMM*frt?T#J{$jR zS$vh&;XNgKaW{P%SkcAG?Y%0qrM-)DRr*(8LfS`R4Czw3+Z!a7IH5u^@U#)RbsyW&vJ~jUSNNuxmwo;WfaZjO1w7bkywP+)S$M%+ejLHG) z)RR5nR9hE|Qy-ZOt54h<88d#NE5*}%F{`mVOVXT}XU&61=GE1J+w_s=N&K9hYFs|O zsrM#axNhS!o!iZBC~KJQx6??wUo+-8xYN5}Yt5@f1k%7A(9F}8b5Z;NPO7= zu%&nWsY{TLy(AG}x`vJKkCCe(!Xj~dvt5~K@Wq$W9-GzMqm4}N-otLemo<0wGv;u8 z<;+(DnxE?_-&i!!D(SCR%-CR&*evAV9pqFo-tKqLWU;hvs%)#bx=A~3r9thyiKq2J z@gT(Vxr4?7iAJ$3FDDI~Y9~L}G{o|l(jEn->D938bYWRm@uveIb=V5e+or~em>{Lx z$a!{fGG)!12$J8FiIo=fNR{i7#zu8R4nQhb$C`6DN3~^p-grTGz&< z;7Fu?ZM_jNd%<;+>$E@%;|Tw1^h5cv?YqSVW+bipFL)m%?q!!qF{Z1<#Fs7$5;wCS zJB1Fj3_9glS!lKWrv5NXUUr!!rjCd5QnH_cwseBhkZ z`@+rF)TZ*Z8t?FPnD;z)|MxxK=4BqX0{Hu_hWQD8*cR;5DJ#AEW9Hh$S3EO~n;K2` zW_+!4>I-6vc|Lwc^yma_Ltt&bQ!R@ZMq<3~oFB)LYa;mzS8RYxQBBiO=lXdEHD`~P zMIc*c+PZo6>&lyqa6R8m6XJ>b!KVHTpZf~GQlm+q-K~*6gl~A?{O){zlUc-QU|^cU zb!x#o>!_gh9gmpLQa;WLAGZnK7d^giZ^3yNwzVtW`B{0nOGPtpm$Us+HmdjZaz8~% zyYtR@7&{#_JiY0&!Zq#+TyjbCMf|(IncRvcE_X!Zl;I0cneP>F(?D+Gip;2N_2c7S zPibC|)RKPmZHzsItPEL{B9#HL=#HHr*DLTo&1M`cUDj8<{^|xP=N%=D_V7WhN3V(b z?nZHaCD``+6G}T^lUTp8#kH(vM7N>HqUvw)&d6;v!~Q50T7ygTa#M+%J*Yi-i{#LbE0B?2lr?=j(** zE}lzDHutTx>z+r=Oq6Xdz00JF5}nf`yw1x zJ%3}urYI8n5Hr=Z0&aDY?lW`4np_w*H3DL1+nFrcwN4qfWQQ|@y?!r+UbkB&9{hy@ zH}a*_US{$8*&R+FO2b}EI<0NN5+1U+#*V2fuFddW%kuFeRt~dDHg6`VE&quyGAGUA z>^|-toz<|cxQXW@{nB51*KqwPNwGOeKOIZz>Aqnqe~<1O@Qy}F*Ah#P)8$Hj2T{w1 zePKZN2pIQm#uoUURdpJSlM|yo6@MrLUMGL9J}h{4dea|p#CY?-)WtqCN+h;d zcv@4~S-NKG3G?{K&WT~OUX-XV|5L+&kva*LrnUIuRZPq3F>0Fgq48_300(&2xNVDc zkm13?8;y!R`4?Jd=3NomFFA1Q7;6@VCT1oD$4&CRg*bvk8O;<+AM!zuwqC(3fLhnO zaY4uR0HYa++ekrySG8(twI+{Sa`b>%^BeCOOYMa?Hj`_{{#JcMFQ?YeU>Si@WcIuY ziD$)UMt&!?o*%cdNNPj;ulbUyH zd|^L6HFCqnNV|4x7a_*Yshs-yoM)k15wpiAj@zi|!k;4@-D+%a^-_)9Xmeg$;9Sqj zVKJa%rAA;L1|No)z%`Uz(?~0ho|{d)Y>v56Bp0sMLiky5N0-ekq$zq6krZG$!e2x< z>wl;%w$wH`RoO2ithwgYtZ#6*$T<+6beW*H>F5%%=aaDnfAuBtWgs;yxm6uE40^)X z)?t*K5`=v>Kt7Ah;u_G;5lnyOKB%%`Ga_ATD-LS#**h?94-go}BfX8P%m zssP`)-N8kF9c`~vuO6QHO{4os-#{M+T{!Tjc+0+-+jTdsiqeD@ z68kKnB>1sF7w0mBuPN$a+uplVl|h50CckID5WVQ&acKx*lQ8+px?;k6g6+r{yd|** zFtLxqP2hI)F9t;VnoFjG4y>X*SLo{<);I1CsexBNG~EAe{fvsZ)&E*~tot)6MJ_A6 zaZUNMIo{nzz!xU|LhgY+Mt_e;i3PP;EzohNiWUYZ4WkIMS0|ixo{iYuHq3Iz zs;*fg+~EVI9%TLxf+w}^k00?iB@+9ecvm&-Anuo&4d8pPXKG9>s-9*zKMy|2o8f#! z1KqGCV^@~fru>jNNMWT#*c1+bCH0hmiW~O&=BZ46 znWOuvrxoXT`Icf(^%B!AsN=_d9hKs6;=YiV_LxCezp9hsf1dMIYkC~cIx5%rOl0O6 z#`x3GB3I+Y5TLo`;4w3!!QOxw4;kGgzqFTJBtFvL{I!F3Z~qiMDVnOb9}`q;zS`3` zcFG-4M8->jPFJJ^=cA}DxGC3~;4zQcOgWsPz3{uka+de32Lmm#6d zIeqU!D#MnI+2&@#>zX|Af!Lri4ifa0O049z*8mUuPT;FB#>PWB3RMg47pCZ%m%tHh zv%j|##;WdV*Lpl(-8eslib@|enixq`Q`UdZ&R)-RcVL8(h)+apEqDp>xHZsxYlU(r zAoQa+x7T!Xg@9L%VlqtLcz>xAS^oXDXsQI0>uj;=bG|=_%Dp`!Eo5u0z#bsjJdQp0HEnMb85$cSPW1s`DklKB8@16HE8e1P> z$I6!*mGe}){jUtaD^}Ts=4vdr!b~r3KV|JnU{R5A&5G+?2osLVIIGbhjd!AvQgN(v zc$GfAk%!#{+&*ne6%Ua+$TiTi3N6e$%+s5whJv`e6_jrRQ(0okx>(cC6PuMmCPmj@ z`^>U!HN)So>W9;SQz&5cUcEijfNzP4S*X?DuA6y1GMdk7I9$Z- zBd%gwZCC#&2R5uj(xTwx2bxc+{01dm9wMATg}-T8Y9TzxL#nq7-|_?Pc9$NvC;vm2 zj&Ty~T$zg$R+q*0O{1V5m5ha7pveOcdRUCe)+e_E;?#s^WUGP8UG?f|D&PF+j=p_% zekfx(Jlzj3I&9GVUDrHqkLKFCLO~X=E%?M1XYLzjTt8Qg{`Ey84-MV(ob@P?((QMHCDxxqPKx#SUK@FAeh^q3 zsPefwfw$GLdP}VEV9O)%)1w*k`%|_XxCu8*&YKqn<`C4yxbCM>Y+dR^YvJnXB-AG< zk(d3Si{GBKZ06~DaaIageir>S^X7)IrzGX5Yr}rirFjx3vBObJS{7{LeBLw;3g*=E zE~}g+sWl{=1IGPe4zy@oV{mmCt>5!G$?-CO<8gOb(LyOP^Ym%I#OJ7tk$JO2(UAc0 zcm4*mr^YiEl^hn+0d}6_e{Qa`7-yKsyZZOb{mq5W|A(%(42rW`qJ;y& zU4pv=f;+(p?hrf#_u#=@gS)#EJa}*hcXyWrcV}?8&%EzB=lkxDTU1dqFw_jY_wHW3 zdUbEXC+;#(dy3+jrrltI=UQ=yzsM1LA10duf@i^&KhQ^V;6c@ZqG=={ z;i1_=ZI)!qZDZaE%m56Pmg2q554#+XS7PuIUhP$XQpzFH7MR}&dYvfE(F%9l|FiYe zG30qw<+!nsh3Ap=!0|x@NA~q;gv{i%$L{-<`lv3?5xdsJ1-N3=mFCzrPc%;}5@^Vy z97Qlm8uJ;}&jQkzbFCQ2UwM04<+ zRI>AwMN5=B66-<)=4C6g4_=pT&U?yX_xE)&#vGmjx`ptnxAHyMH|K*5`Wn>-t4&J9 z&Qm)`8~T@2%~}A!$u%65y5T^CHc+IEqCi*rvA?alpteUgzlIr*`r}lccHF2{m6`Dl zKnjBZakJBR6llvL#WVBFrtr;od%gCAJ7X|LxH2A|cFeIL=Q~W^8nNME;E|LN6NEFh z1?80!yDw%t0MaR4J2BAuPE(P?+Rlk-FA2vE?m)pL%*~UUk|;L4Y6G+J@>V81gWFM> zz|BnP@5a^V0OKCV^+Gv#Qi@`iHT_vDmNf&KNp*xalRt5EcAo2o8$A|pv1hSegr)XZ zhTRi1V3~310>Bg}z8BQ)#|IzRR%Tn3Q|bkC@1svI8zlol5Anu3KNZJTIUD?z+GM=V zHzIe6q+Jo`Gb&HjM4Ymp21T3q%S!cHa^GG(fu2u`9Y(_%-DRBglN>$BDEXvM9oKm@ zS=E%s*3G$&2{Cl+nG)EoP@&wtoUA0P|qRTrGjfT=GjY68_TG-D{YKfkjZ0s#V?-D$(fd~MKGzD0}3Knj`HUEx!CpYRMQ$O)6mpkKif7uu|s5Z)0| zqKAdVac{vr6m!o3Qdb7qi7x_oNOCkW9%khzd@ z`rPm4e)tk9MYjHdLXqWAD5^!j05OSfg(W<_?RY{p>jBmNury)Sd&2iaSGj$0u=7s0 zJysTmwF1?Y7D0$KVH-QW^Fo&HxtLb|WLI8f9SN zlTgU2b9(iN)2NRc5o zd_S^sT3idF)|ya)Pc#L1qMg@Z@_*7QX8(?PFJ|ADCh+@V zr?UuSzLTnQb?n=-S)S#XF^o{isEQB2p}9Yi^8$~yy_%5xA)}o{)IxuJk_Vd0dH_Yw z*g8RjW(sq2S#}^{rM-9)HVKe0=m>o=(cSu+C zLT$wdfvI+!Y0N|d;PDx;qk{ZJ0ADlb@OerePPZPSmS#8T^Kb57#iF8uer%>UFzt61)_ZracwP;@G38Ije>IYxw zjYnFjvl2nW{^NDtkiNoN!65~IQUUgj%*A(1Xn!Dd0Xhpre>bd2(_qPH#to~QGU%c- zC)KkiPg|ir*vqxsj^TEA{Vh`cT{q?IQ#VyhR=Hngop;;Yj##}(DSPcK#bEDF1@o7T z|0I5=rD+J<3*AC<*r&+mEW&FOVm%pkAx+)J>ceOZ3|ejBQ89uy!3?~>u)vds2pXr~0b1KcJhP6J>a*>(h2)_yL8(^v(lL7ILz7yP(aTRrT}t_B^PwF}!T`>Kl52_tn8nws&Dq&*E-iX8b%?#EKSAxv zx>m{C{UE8Zi04+dZxkp8Ft)yM*rR`LMYS3KH~APzv2D23s~C2r*>|Pr8YIq4_2(f? z<@M9{V|1i5fD_Cy?4VP>CxzrgO8}BFm!FxmOET*)6(;kD^gE#Kc5i`^mFeX4Jxe=5 zn>H11dBUE=W0+|Eo=?2)5fg}$c4>$B{sA%d4B%++UF(ittH3YRnSj~`7BWEU4_xlu z7f7#zX`LY_OaP*ZYNs`zG#*$vQyb;pAu%X}%nq!4(Y_Vt~vhZZHbNEx;^c=C3yA(_iewyu4v zOEFU)=Dye5RlTLoJY=LOOZd|eyELZd<1`o~<++|nV^_yn!koZwc)h%1H}*4G>t#mo zaz?L6b87*X*KqL{#Yo@8DWQci?Bl36q5^^rLO z8&_b}F6+>`5Hl;O-uZ`0&4LzhtKlu6L?)5`yJcRX7`*PY73M!uvrG^K*uEe4+eVge z;bUg0ruD>&@fzd0+r0xsFXC=>L0fwVnlK4D;kM7-@Tl#2zzJG4&(UWDO%R>o2rgqD z)MwMkr$3~(M*dp1Y4aZAdge4LAnmw4R9D%u<8H`(xJa<+8!8hEbn#qkg8XJATpfSE+gDkmDneUA1|ZUuGdb=t`G6s7sr` zZ6c(q`{Czl|KGbp5GWLIsqQxpcYHCNNYyLcpda%Yc}=i-nV)eJFnV(0QIgDk2Rj<^ z(3r%b^sLFwsKdbO579y{6MyWkH#!I?2eC=|kLA0C_z`@hi$p}%p?_@T!)6B9$X)=$ zO8ydIrTwo$9?}0A0oWm&0U2qzYRGE6%k@h;m2A5FoJm&jDIHU;6F*o+CNY&-FVbI9 zslYDa37uUg-BSCR{@bK2C6MYYgOWs^rn%l`xB^(bx+vp9+SkJO@+8}@G~f>oUcKYh z%SG{q38aoritdF&8qbNGR;=^Hn)`eTSSKK^?koIwnuLt{unc#ci z_5S=cwY~6IZFLLGemMt=3#{T=$i*PGWHWTh#r31j%~`GX6~#Kp27RdqhxUxF9`UmkkL!nqBf18s>u3lKnyDCfp-v{@kr|s|7+l=p*`z(5B zbG)@5ba}c!Qi3?Q%Z~iKj-5POn~ccT0Syu1zf(}04xMFtMhwK`5p3BMy7*fAH?OU3=2h<5=(T+a1 zA0}ww(BZ)yEFQkDKP{4coOf-c8&FLmq7#r! z;dVsHxq+Z$Jfnt>1kBy5r88LW-fSbl8^X<4a6#qCJz>=xoff1r=j#rIBk`&i*o2$t znT~n_?m75O>Rpn zB^#D8Uq9Nt+1%QT@W@y;Osd1PoS9!}@v3Uu$CqldwbfU|@Nu4)1XMI1>UP^`k@a5( z70ktV0y9)HOZN6_XxB~)Tds-{wyWn`J@&|N4?Hz)mLcItxQQ)}yZU2l5eGGxBJ^G}CI>VdfM_X@%fALnY}9>UqBLbVjVB z-i7Qi*Ib@r3KGk9Ufm@i13CqCq3v;5oiidCDt%Y)wLOv|n?as`!aoH6I2jVkoycsWBX^8|9&`b!ZW<6gfE?>&qobG1c#UyLen`FtW{Zw_gS!Lq z-q%@d-O*JI|2ZN};)XxlwJnGnYm4Aw;jo>4IFJi~$bP?+%Yw);te`i>h-k`%-4(bk zeWGWVnK@|vzi1L~QGABXd`p5Eu)zqW^tVPzZID_BwNi>Tj#*u)s6Bx-XNfEQ@#~7@ zue<~$_N?c5fFs&%+rst9GU=YcK2j{Lt0l9hu0y22^D6)JZ9vF&Ne|%=(?=(JyXV!V zqsv0xGr0_n)**ntXI*N}X)#g2GbdOAJ}C2ouHGrs*QHu)c13%mKviFM68ZE(`!TK? zb}t7NVaG$C<&g~ZhqVOJTqK67&5w^>pSMFB#3YW+ zk9(d$o6h?3H`kmZSF5*Yf*R^gYM65{IP~h(v$?kwsuk(PPMzbs4&B9=Kh9jhSpu^J zJS%^Tb2RxqL(mjTBAVBM80p@*RHvR&H^y1=y#l%ckv{mWePsSpxERf@y^cLk;EIIZ zBFQR+v*O6^8nK>_jiRSbdXX3Vi|&;JK(RiAzxO$ z{QHgbwFvy8ohVddUiAOd6_=UBwb^#GrCFvadkWv?;=xmCb)iJ6D0MIB=~KiA-UrKj zUfu+D>?v@AzkJzR*SbvmedJN@dB3jRB2%8+n)(y=Rn5NThOI}Je^i#UnbmSM(A{CD z`oMO+rFIOU&3E!s^M_|5Gk&W;nst7w0c8chPco2Em0YGvkq2>m(G5iQXV)7V6uy3)pjB+j?fixJ% z>QUH6QH82kwJQc)C(*Tv?owbrG#m%Mw$Zz;-Wmmv?s zjxH-+axbdd9`rx9NX(Ehu-+Cc?dkL>KNnAY=HeQ~ix)J0IDP`Jq$4#jI|vv&qNe}x zfPbvLQk*Nel{M$x1Z9~z%%BdB3lzV2(g@dnW^O54rBbUZtzI6}GHSZAe{}aWdN`BW zTLo(HazLF=&tV-CXWec7Y#K=J>~rus?#L1~P1`v{I*>)&&8ZYq@=uqIe`nx8n{kIq zQpI@O0It>C5mBo`_n&lI;;aYl^)NzynK>@G-j^P7U^hBXe>5wk2F!sd`%*3k`d6$@EY-Wx2XT>ibR+IBTJXqm$$8qydSildL@vi&Vq@~80qK{kcl#;YjU8Scu1 zjKUK}&Dv{R3+(cjh+(zR-DOM} zcHvo%%oEUImwxeP&=sUVI;(Epr~bSJa*=L9(VUMA{Uix=OMVL^PK}j=;(*`n7m|CZ z*ypBr)^J*($w*E^G#$f+;>a+G5U%1uP6Z@3f5Tl`x`*Az>Dh<6vDOIx25PTTo5{tQ z92EE{=<5&&o3?M++y^t-_sa0^k5m?CHFY?hEdK6BPIKYY?ri%H)8*ZqJMd6DeQGME zLv(7dw<@@utLtbK!(^bCGpPh#7 zvX5fqkfD|ZbEIE@AxmmeKvmFE5fJy5d<|YDK?MAK+&8#LK%a0ao@+HR#r|4vYHNW_ z+N1`yb2iJCElO>|cH0SRHQla2m(fZHcCbC0uPKdQ$`?ibk9O<%XG2HpwIgqtV>6^w z$AJxYQ}QqtX8{Yi{u=eNkO^>pz}lpvO&r1uIZd0FWvc3^SK)JN?HLnm>|6wj)y*v3 zWwgcZi(kskP5{BpNI8OQL|gQ~d ztxAv;v!I5eK)Y6%$$L){s5+C3>~P4x@xO-0qFF#8ko6zlQ4q(=ZazOC zy24Z-{~}UFW(J?j!L$TKN^fQh{ym}~votn`;70q~j&cG0AeQ|KZ;vqvphTMv%!Ik5 z8-%$f-`XVH;lUSN z3Edv{GeY<{7TxuB>pR0FWk}MdN+xulTpTo$Ms5gy2-&G#_&e8dxtgA}=gA&)!FKnA z4AXeqX@!IanTOq5D_57@sAuzpp>f}JPuU8xmwObEE?Pt7KbWV}Z=b!;4z;z1KH_xM z!j7PDy@%e>armJcXZ`tuujF9*dz|g@xc-u-qmN{NyLaF*XcdX22Xq=uCgQPxW?@sc zbxaPU_a9(tuyO2r^QKY}phW#J$YG2ypv4XPbBBuZ_=JE*d@sZ^2#Efvfp0KVqj^_= zAMCbGmjJK*L;@uUonSGQWKUGWEL|Gt2L-J_sG&BBzJl!Uel_m+y<9jf#^m5v!{>S4 z#;%g?b=x&}D6c2y^dfU~v;gX`tq@Gnj`*$&MaQxJ4t-z0cSBhADrJUio2z@?snCSj zr>nkQ-Xp~dS-s!F>^5G4NGMYNY^U*`U*Bufkddhr%c3NW>;++s{w_7VdwJ%U&uJv1 z8DBI_g+2M2=-4U^|$}6{gVGe$KF8Dl@L;-2ax^-e_Q^1X@lE`4y>#{r60h z%wTBv^I(!7>h|+{K{XrJH1nIoLpWW2wRc7|RhTDdzndS`5H+X1G)5o`nyDQ{s?zrjJbl)@r2Oht z@N#pF3WK6wB?~=rl8?LhQ(0m3FhyrQdsEzCdB$29qKre=ztb^V8H|Z(ERD@z@3Rp6iCFbXY1;l?`rg6LOXwNG#6xJ1HZlpEMg&K zrY0KV2f|F%?|4BjERX)%j=a*yhztd?v`inqmqYIw3|nbBu9%^^89wIy4$F7_rR*op zU`CvkV3ATvWP*20T2*hlEv(@(54r@LH*NASfpDwUlZ{m(lp~&|dURTu@+KvfP>I=h z0;KGpltwV?IkerzG&zwts<{~whu#vThG|nTH-zYHQ!oA{c{x{Y^xd3QbEF` zz@v{jqK;MW0$SikI_y*)GS^c<&F|udj4?iJTm4Z?Ws#FOY?XaaVMeS$2_xqbb7hP($qLXZRYGQFHZk<#obhFHslngd1qj6LL)x(X4`OFze^5d;IC(#?>}HV} z4usWIRDc1fCYYn5Fhbpd)jUXbRD>xq9%qOO2M)t5u>Dm@@MKZRpggHqr$#OJ)<^C; zxf##8pWc}v`NqnNf(d|+xL_w3WQkgQ&KGMS9$zcEBp{oGV;QsOgUj}JI~ywYQdmKF z){9Zom^g*s(SZE-K?}loW=B=xN_!DMOyn1`{tXjyj*H{t#2+IR2m9*|!J?rzx8-Oy z8LzB@eBZa>zO?W1q-2eZmbb8vjh{oOCkuLP;&QOG#^?t<;HFNx zYt+FZa7%hnqmn-^C5OF*Znji!(L}d0GPd3OmN!M@wY&hovc~x6v2C^fmfh`Er{A=? zgGke9%4B`sq=!3G2BrvA3C)?QpX&Ugi$XfKStoM9{|!0JZ*pL1aTBG3F{-g4d)rjC zv$>O>|A5yy0pe%;s}Tla%nqgt4R>U-2uiEt6OEK0+gYhj!uvg7B{_)eQGqeGbqAI{ z2wm!$D>OakYtNbQaX=a}*uJiA4;@`2-1z_$V8v_Hz1)|T`{ggA?0O$VOkQ+|B0oCz z0J;?n>fyVsdv^bWxiaEW(-({#KN@lLp!Gj8ytRcI(p7oShwP3e;{x6mlWh0`W>hio z*T0}n1bwa9U#~8Z0mUjmEuQilZ?$NmWxo;2`w$rN_=fJ`P_{N0|FCwl0ko+qvakUj zdn3ASZSltlAE4f()!VvOY6H8!0xC4UBO13x+uR^W#GsSjj3>4xkCLyU} zoXXmUIa^hcp|4D9L~iBI{f#REd#*&+-Opl)@QQ%X@PS#8(pd0u(UCsm0>_zuI0->s zDoutkq=4d_-if$Pust1}LGBhinp=Pnox;Ew;uQFPs!>p2cmr!l;Jv6i68MI~z#H;& zp`wP*RG6s^tjM$QK+FuKhcSV!3u$DyCaxwgOD}RcCI)6SvMRS$e{VL_NkFgP=tr5S zU6ohp8${GLe5vix5&ve8FKO7tF6O@6xn7@Z6TvQvvp-W?cni&A{}3G?{Bt`#&E_5? zZ$J-Ck2$-W$yEcJSS}Qu5XbJnH)Y67%08cLbGva(Q45D8&e-I$WKY5QFg@3;H$|?6wh!G)!%e&v;FwJ0pJJ;e&tdsKJTWyjMg7h1c~}Ta zR&d&8r#TjM!ii-~39~X3#eL4N)Z4fD9af|2(s?Wa@77L7RwT(#eGzF~Y|D_|3wY@} z6XjbYM(Eq(WVL?4B|yVUAV3vBHt0LTVRD#bbRb5a*o~JAK-QTUcoJo7Ji!b6-PN)3 z2H@R0b%3?!#AA!cLy#w)Q=FO>yKI#z9;O`Gt*mXxJZ4drlQI=OxU9*;cr9-Fesqdl zvRTPN#@E|{_SX0wf~^ckg1qN1^(sD!21c71jbw%&%8f~=o>>QN3c_ZF zcdnoT!MxvBKakhzn`c&|oNYhC)Wug9p$Hr%#aX@4Otxl5IE9t59qNiYRTqTJy~6FV zje-4yt8;L>fFR|wM8YqM$11;+s_}sj>1tVg4FubRmg#E9k{T(B`OT&VykARvpG+-6=1zI`NZs78s9*z;(sA_ z3$*FY3}RRyy^6xWCJQ9u$y!kl+#bUE`nh#SqEC)zUYy2+X<$af5_m5Svw#QSN{?@A zwYc$r7mSaNl9v%eJJrO3rhk(gD8Up_ScJkD0#}VkCdwhcdOe}Aq_GT`%NLx!6m)1k zy`3#liG7~`)#>Z0m<58TnLzp*i-2`GvsV>6aUa5~^<)z+{wH8Z>fQb%i|NDfpggzfOW0+TiQT!o3Y z7#aPGbB=tcg_*kpnZZYH(!Poa?LJqSmfUH>j}%sBl0Q32YQlHF%QZ(&AE8I%3`8!r zzn){RgI-b2HhcrbQ5yAh*YFP>&}219M);S@N|Em#H9~yE0t~UK;3YbR3SP%$cYOYS zO`}{}EoW6f(yIf`!%`rgudQ2wy@wJL8B9?$fu*>iaauaPXf+|yFXEhz>)Q#J7+ z6o&P+5>@jiHlf(Z*CZ)hsZ%Cj>$YY(TQiJ;?S$e}vY zIsK$OL%NkZvUDcufu>Z9Uyo~00smuH9iCktBL{K>R2a|fZvn@@P@J|L8O3A78_;FV zyL z(CY#25HlcnXXg>jG!oz&bLi1dEb;PZD#PHQ?6c2GzA~M3Iw$4~xd8HQnw>OCIi5R^ zhy4;DoqE~)J9(8tdT8+T%Ul56gs?B-+}X${2i*3wk*pX3FBU%#8N zI*wj}M$$iYAwJ|0w!3*?Lha7c!R+aZt zfp4-cacR7(k?$PBSmyR$w>vS{5ci`PT130N2-H3A&Mz2vh&`}}*=!WVR29l#jogen z&u=jBe>vjE5WNYt;@+*>op8MU(_djuOd`ik{0oW&Gj;yc6!;K$2k>s7$2&IQfQRGT zVltVc0|$H&1yTOezcd)Du#|!vJGDe%=QvSJVbp9o_S0eC6CILc{$FPl#DO0Er@w2h zF@EAW=>bU!0Y1+UETdmTe=gb<(lgE^SU_=C?){AQsU=XG{Ue{lO{Zoj5cHb8_HO?z z?4{YC9c5;bfF?-Fsge_lAe>oCx1j6E!tokgemy9C#=QgOZAv3oMm2US1)aAz6~ojD>9H2i?*lf2>O|KFC6e01d>Ri12P@z3PKxoAo+z9%mZCNr%G+ZJf z1ye*Qn^+Xw*_bloc3Gp^Xp`NR_!vVYMJC{ZSf%&cBi+w4>LDv|xABU_x{+*;PjDBM z7VrI5DFO+iTRlfSRWrhAB$bw+7s2yJ4uI7WkM~vlncrT2n>CS$)cM7o49(I_uMJ z9eG{Vmd|vBgx94@%RNCEX6jglq4f_DcQu-wWmf-YceYv0Bt z4Zr5M8HWluR^Bmk8B$lWJ_GZ^0ItD>X(Z5H5#>li^qC`LX0gUtBwbA=GPjQzlgB5FnFn)Rye^+W3DyK+Dan>Fq- zOy_|%*4MY?n1)ebC|N6nn7EZGFjKkNP~)Z%zczMC~ zB`fMwo~LN13&bC}>;`xkkwNeV*_Akm8io@RfoM{XCBKbspRk%=h^*@`4TPpQN*yfW3_zftY(4W})gA-+ZvZ$3Hd%%er6ZRDzs`eN9! zqa5ku9WF4NxnPSMq@$+kz5_lca}k|r)=%dV9_!z>1Osr6#HMR1M1^g6^1!whO*BJ+ zfL02z0|3D02K%+G1LRIH1`y90`~mYeu%XW)KSI#eV;aLD9R&t6VEtg^I9I{|Ml8gFa~SymrNs@gAos4nhoLaUg2Ijxg70CvCHxRZzf;sQ_;V2{ zifmAH$>xAo65(G-=Mp_VLx}}ENWQvi{dB6Q<=8p|Kx{K!oJaw8ckSFjDp@8YM%&K> zX$=q>Nvq6+3FEw0s|Q7ijw@x#M{3roS46DrTe6pmSp4rv0!^blF9DU7Lb z*P!JsUQ+2@@UhB!6_0Qsx6d<{r0Ug=x{_J!84)#DkgRKvGftOTThAStaV}y>kxGM4xh0r$f zy82^XD|6mhw-U?Ex)S>{V@%WiQPjHuJ0VbGvcral`(~L=$uqN?mvV0Z5DCLcmPUzY z=D@9(ot%4~Gvj|IkC-d8J-&9jD`WDtM?L56{T zW-!2vfgtSe+nBwc#4~;#{CR5uSZpayxZAH1f5)Hks=wW1@&JgAS}7SJoP*3?7k>)x zqwO)PDJu5omCcpLv&r8gh&_yRp4JS80dBF(3MzA6a@h6>L?3%6KW;Whpm(b+^cQfV z9BT8+w@BaZZQ*@syyv76BI_(x($6(`k)Nzqw3m((h{I3@QBru_^y|lR%81(T&duKa z3hOX5k7aG~IYKlU%N+hmpwZ(n->zSM%o>?mtzL?oQ@k!@7*?AwDz%w*b^GV@z5>tp zZ6Gg8gLA38Q6IL5T5Z3D);T)hWYy+!_NRD0EWw2^PfpjmWvLkw6cr}vmObvGzcAb`Zau#(|scEWB*eu-I*QSEj`Hv<0N*d=eWQ10YHFDC{HD#~w0a_XOgZvZWF)sr=9^cZhvWi3G$Xe9{=gm4nl*`C%PIjoBh^C3m+n z|I`UXxXac9CFbJY1+^+@mmj8+YOJK$7!-0v?mbJ4z?4=xZus`_@UPe1BI^0^@7UrfqUkq; zbB>!A#SbzduQ#0 zFYSfLl!{ZPK8~K`^9*Y=5Zz}QbRe{$SUI`Y%8p>e)0|(x|IB5f@yce-!jE60&5Q&U z`P=qPOOXZDZX06mo=)RNL`(|SddOM7*{aEPK7z;XH`?`yF%ks8iWtX}ljB9hfU_Jb z?7yWbCLmUq!oH8E0GMI_P5_EiTnCObn#L$5QMkm@V=(A>#3^Y0QxOEr-pLH%j+u+2 z_!JOIsB)7!#V;)v+DByHODB${9U~UX?W#?)1`vkTr@wT~%swIIgZ~1w@-{#eho#5f zXTo1y(-mZh4!(?3+duO9u)Oj&v z(~*45c*Z6oP)kgT$cU`c^F?|Jfn#9jT0`odWgB~kB|j8P$)eTjZw1^c67;BmW%Wh; zdg3!SF2MM-!`XP_8ow)wv8oL7QqLGLty>XEQ)zY+YH~ntbT@_oyc^=fcymAU&~`)z(1Mic{k3)7$ub>1aYY z03u`BM?QvPq<-!AXt9>>95K(d@|+!t2!z`Qaf7lvQH!q_pP)H7Ip1Zb_2D1^vGx-} zY6JjAb}1!+8zg{d7|;!&5b(*vFiR^+9)N(4m0F^Ff2QrNK>LeujAs}om~B&J{xXBJ z7Lntlw)g>orG0Z#l!kOMZ^BnmoT7_R5pJdkYT!bx&quXfR1Xj6`U5+-6W`u;B)_B$ zR9v9fA5Kn?#%odLN~ZbV9!7>fN+|tM-rlNLo!F^%JbRxV=yU(zQdMSBP_#44>)Dxv z>CTkFb9D_A8b2MB8RHO$Ibd?Md)>p$?4JA({-T9a61KEa1s7x0M;7!JU-=!r7c>f@ z$tp2O{BBW!;j;YX|FD!?7-dZ3>mPPY@0SzZlbD`QtF^Ey` z;(O0ef>y)v%^wcz`23>)9n#+3{%3EmSTox87~B6}lkc2Ce{KA>hIEE)G;~|?^lPdG zhjhBdOYXcO1vtlug3yl2ZxTyql@%hV5&T&Z^|1T;BV|F~;b2rp1H znJ4g{qwEREKs?jv4rwWJp`a>Yb^#=5aQO>EkIGV)IcG+%9vrnY9&qNTE-*1UP^bfs zH2PYQw5fdw0I$!@m5LEcYLO;O{hgJuLN+QNG`Cz8rEg5lEcw|wBIZ#_)^KWHks`C( zbSV^TUZCAM98g)77-@<|UXo(JBWE7rRQA(g6~fA*cWwHaS(GcT%M#ODMN1~iW+g33@b~X={VCR zXh>`EN&lcVJO0F!@z4Zf@}KXi8UH=bD!%fNtfMdfO!;f$EE`vnCE_}`*0wY%2S}>b zJ}4kVv-=aQpJ_AQ#GwEu`AhJdcB&J8C)7z(G}7L2pr&AoBk5p4$x20FP?r6#&VXDN zNX0m04T>ZY{7XcD(A=D8v4?6#_yzy-W0}j1XcWvxMs>3DqvsZOJ=XnuRSO7R-v1w{VH^OMd0d7E*aBlLo3jeE9 zCVGish~e%X?&mQs;g|^Dl02=-{x6Ehc|~h4V<3s--J`C81%lnyzrbztf_VJ#R^?(Ys5M$O$eIVhklofSz| zkmASWFD(upkLwS3@1a$Zji|*C#F?RknNh!=y}8A( z<^s~|at^WQe`@Z27_9IB5E~PTFhPVOk^-zvXH}kd>G;7459L%lhZzXsFFlXsSu1a`>gVXad$2f`^b~l-XHCq>MVYoW-^S#sVIysk!+1Crq z3bmhQ@L|OiPH>KMp?%;22`myt9K7axabsh=eFO1AJ5kUn9!Fyk{7Y{<6Xuz2%mV-_ zL1$ad5n^4W*7YH`V7!lbcAi>|v;7JHMMR%i1PfK8m(Pfq`V}LBFiqQ4pY@hV|8L6b zL4J`4YQ-P^vzmm)MZ>AgRbNmrCAAR^G+vcJFdykdNYjPjQ*ZX0o!FW*b;%aD%*1{s z2?>v_*?(g^B*}gcsZ3+Vs-aRCAR~Nx&$^>PJ{w?pTsiZTo%n~f1KQQ3_(9$zkq(OQ zFFUO{5&_Vjqk`~tPm^N>%Y-(eQ>2BP?e+!V98ymr8y|EOZME+nvk=T}GRK|GU~qOU zRe3vsDg|Dg6~L0lMx{yISbrtNdOf2pL=rJkOl^>33*z;=|9RZ$rJBzob4^DFja65O zkD0{fD0_E#Lj=$ti0hz7gdAbtAGYgB%>=9cHs(F09Jg+m)M8dO_C<4j&BGwjtWDLSy$8c^gUk6v>yjAk6CQc~u+#d!vR=WxeWk~T}Ve&$X ztYSj0_oy!*%;47^I?+IVHvMbWan_=~4=UvHfkAR zuGwP*fDppM5!0HnKxg%6)IFbt*EyiSCoIhw9$2eyzVLkv7TH~Ofo)W$=KNqW&^x)m zdLj`4a}0ZD3}o5&_*5A3xhm+CgU3n~x#TNv$5fF?l-2B)HPCv@qZSVAWa9M1d$>#O z`^Rk-v*9NBda`yS7OQ8ynDU#amEW#4 z=ZH|FXjTaKMUXAed?V)iiHzuI($;ie$NoIz5HG5an97Fpf0n5F1no| zhPV|`dRVd2yml8i>5@^_Sq=oWsj5&Oh!AsCvV zV7cjfRJ@smLtu#{n@1p=rGu+Yjwxk7<%;P1U}md2&%+7tEAUslPj^pAjRq%(L%Pf z7!&FyH_Q`pP*ug%&$0h(?A|AkDL@RSh-n<@ChT9}@IP$@K*iw$sIbUdR?*rzopAot zJH>Fh&3n}A*z^40Ga^8U7sYDdrsnj0RVZ?BFn&xwjipT)Peili?2pD7*|+F*&QRjW zakDb`k2+0JTn_|>iKh;mXubT~gI9EahpE?b53k2wS3i;65d%GuG#Z#yDkg5%(5jp-V0Yz)X999@>k6r*@>}Fs_8|r?c|ANUZoS0 zd^?=nm+Ko|>mFZVRx4huXM?~=ri3sb*DfyXbm3;to0#YaE-g!aYLKt#n{5_Yfm~Z0 zfQf}k?1iRCy`VUoG|=x(EC8C9bwi0)0ng_M!)H9@;%DL|E+pKVoJbx%Q}av;J&`p! z4T>v3u7>5d7Yw0fUlp$D)$E<8&k6s2FQHq@h8Y^Q@Zj>f!!Iu_vwt~}=h28!V2HkgrRGOAM{}%;qOE}Dk}?Pp&6l9aZ?5#)mzdXi;82|Db)CTfT#N^go?%gzTeAOsL`%fi6)mI+2mL8O{(j z$aw0IKE?)Vr})yBEI?OjyK)fpguh(*y1j(^=7@oi)Ncpzc7Z6Y?Q}?j-{w zY3@!obk9lU&=hX@=O;g5mEcM9Vu3c}(S4eb)L;qx`Z&a;R=>P|LeURG&iRAM##Rs{ zS$jUaAUXb~?^^dmXfwDhI)wV1evznJ5rm8?H*{j;y#%gDUy5acBTahF1{t9HJ7bh$qOZ@ zaEJco-1v#)5P>;T(z#xdF_&bfNfF@}fiqObk8Zl04!7k_Wik){yZ!w0H>?ConeOj< zYgT0ODO@hML|^eSN1yG?0{k~PJ>JBP!FN4lM@BEf{`2Cx`Di|r%xdrB_)OLOED`X8 zE7l6))sWIZ$uF|OJ9TpRRm3hb0uwSX39-)dbETww7a0=M>Pl0pKk#W*&$K-70-M@;Z6T=e-^sKSg%X}MlY`5(TfkyFw`0fq&4Im8;!A&~=t6+vc; z=Rg#BhsF}tT~FEXde#D+?a7ETM>(%2J22c0AlJf|CUDDjwCKO=tW;)h9_Np6X1Wy+&x1S=ce@lNOFlWYQphDFs$CT8x=}JAqX` z`hf)=1HqH57+@KRF6uzerG$zct6xvSfV5`>Kg>hf!VX*Gp{1>`kkb-E8g=yZkrmtg zE{|Jx-fFYj$ertpUY**yUEg0*6A!$jv%m&t8V6*|kN+R7GXg@U|uTI+({yxnU#_>2n(kOAz zV~*!aC#Fx$jpzOelJs-(3%&4*us)x|0ljp6dqA7F70A9bB)-zlSm?I5tapvUkF9|n z&IHr!*VG(qY3w0F01-;u41uJWt7LO@pVL`)TyP_Hp z2rK&@zAzpvibU+bv0I)FSQuj9mAUW&W1rWJhx-w0jD*%oEj5H5V&lvZdh_siRz=`0 ztc9%wF*ZFCYxyO-#KiJVN9m)OCzq*7f2AY9c#MP_R^XBtaon@6whO3J?jjs)76d!qg z6T3{z{KLBX%$#-m8pT^a-QG{ zqj610h!{I?aEo4>#ocHRtlKSm7!*nLB{rDrC4wR3T-p|Lp@Hcn(#{s$A`Z}5wWPWu zFV_}9XU)O5M7)$u2jUWa+VDD{Yl=vsQZp*+?uj!E4AaScHWj2k%-j z1AK@PCY6!wPx~g6Zk2?VsL)wMkJZ$ zYUr9pO1jjzD_zJGvZxC3>e0Uqvo z$c`R4w|AyCE7sp8=K;oofU3dCA){#*H7cWCj+R;SFR$fXJL2_`RSTZlbEWDJc0CX5 z{kIu-y5uKuyGg1YV5-*7p-2rI@C^*z>3o;+N1YZ0#JbNs2LSMG4{qWoxgNrsuO+Zf1v&DFjUh_Qn$RbeE;wq~e&CrK=K2R* zA(h?xSczg1g(_z<1V!sw%eI!$HnFoTOA_-&4zR+@6a>8mfEb=BI{j`uF%T~zqhZQo z1Cnl!VXUSRR6+zz>5O7|Su%Ddrw^Jl!Ijz_EV4u|ef?+scx}8&p}w>-SX23^Meh2I zs&*19K9HMCY9x}2s_m1e6)`<|GX%+xf25vLq-Yiu2Ll9BQ!5cebe89YHuPsj7hn=w zc+iajqe)WakHPq)D}=8D9O_e7-u>l3Z3t!|$hxevi7Phixqgs6>qEad6b=^2glC8jzX4F~JQz#$vCiZy3;Nk!Dl(0)x@ zDt!`NJfY$oyL#or%g7>>_9IJI!l${Ppij)fqWRqJtX-38JRV`bFsU#hy<4oA?w`=p z%2lY;e_-!vXtS_k%BBCXe4M!N;04beX|MK9C26#MDn-&5vlg{3jA<$daWi0aKjOlR z4jn#{(BZFCGr6|OxgN&5mPv>0Y7Ab}CVdLMsYU=M1Cc+jt9o$?)F-4V zU~A+MEl$m5%Sy8TCc^P>o~m_7^tHtAZ_RJKGo6gW8q;JDVn&$QlqXsjQ>egtfBUf+ zlF*Yu%>-e0Vpw+5EadvcLs%InH~s&eIOG5gj~R#!w<_#o?KF#>+p+z->D%%))RA!H zU~!V;-j4IhyzSou76sbaqTA`$o92@01dEX2K+`b8{$P-yRqvF{yZ?MH6l6}jE{@a+49-|D znegtU!}r9@*?Mi)Q`mL(0C0TGzDO2389V!N@=+s)&*`BU>Dl#IRW%5i-$VwR3eBoi^nI-+TTcG;md;|AZ_Wu zyxDH}61Cpxm#;P1?Lh{;9Mjy9)#gN3?Aup7FeaqbQG3v7rArMag9Kvs^J!N=!U6?p z8*29=V#==WLZ3VM`!bj81w>l#Ke1t7ECakNwfhXk;noZRW>yolOOUt6ECKeCH?z0D zQGn>`RMh3A#SH|t!M5`*eZPxu9V`RxUR&G@29MXp$7kj~J^XDAX%ayr($ zx6w~TpC+Ujnv6l!v&8deAGz$<*|ndLVQ$NdEjA=DslDa@dZ0n99?yaBxn(K`>Z@tY zPdsBk*J0tQ>}C~4*AMOCh zt#nu-?T8w>5)Ae%_QYF-^8%+3&uK410v`%2vBSvLCWnG;foc5g&}Ph^x~=;sBC(u4 z3AMXBRL(=HiNb!3dDjuF9g`1QK*h(0m&-ho?RxDieR`2tz(pTEt6=Fs^GWbwo55qd z4?mOj)Fm|e8$-|GWkZ+2Gcljxd49#0)BADfJJ%42n}{%vPApO+G|eZ+fi>hPlgT{P z*{>gceiYa*04C8tT8%sF_*XFZtV;}_f<;b*4nERLOjw9jN#Gq_%_Tp|w*`ZRwi6$2 zfRhUgKDmL+64;$jTa-F2k2p!pPj%wVZmj71_|8S-Ld@Io$PP)|K+&5ddiD)zw)M|9 zm7~8)r987R7PgRA>{l{rP4gthT~?jcfKL4(qakjg+NtF`wZV&swZM1fgqEnBulYfM($jT0dLo+*QN>8DoQ+uO{e$qM6Vs)aO{8x| z)R}AsyKLK-M3y=~0qkEztMEpypM?`V;{X5%v*i|-yMuZ$nKwQzZ=)Q}Q7u@X_6g?Z z8>Hv++Zvo}O*e|;Q4+ssH(u-Iq2{FpObr0Or54%$NLnI6KX;Z~eP839T+VO3=57GW z=_$sCp1%G`J#6lN{v_w}%=t`%>zybP`EUf;|uMio`w>CfQ z0>4&6&~FZ(%NNU}h9=YdSx{=X(Ef-4Iywd+U{w*i3kz7$ft57gt)I&y#QKMx-46cy z^4mj_(5@&3L4W)&-Jche3X59?OkycyrN0zHoOpH}3@n1tsycq6otZ z(bBGYqibSo#i53f9-GF8G68l7djUfQVU@ra@I=p3FI+lkXLk1!!6jDVo}cOB#ktF( z3Nm>=Ut@8xT>T)YvQd(12M^AZBObPZc}br80NPjRV2Sk!zNzi`!#16<8nFu-PhaL1 zlBc^sOO=Ag0$4p+0+8U`7YvAFuwi=hOl$u`OKoXr+>1u{n8v;nnzS4Tl;`WLs649a zL>6RLgjo={x&@$C{xxMlUG%m86@#iZA;sw@QDyk+9|-&vQs&uC{8XN!BU0F+&ecvMKAp=Y2taUpSsP)t+N9A+AR>($ zCE~(2ldn4RR4-22wX&9;nSwmLXNRd2tIg1e{)O`9ZS}|HqP|YHm zu-Hc;A?CcUwMQl(e}7Jxf@C&qb)~q%;mv)B?)Mi^`)~uoDDlmqeW|082cE1j>G9cV z0F_I|o?e-5rv>$$hAI1t@9sDGjMW09R7t{TgI}IsTh4+VnaQNmCXI3lO35_Em6P-a z*uO`k2jCS?s~89SYXVoOCAJQQEG%$cU!$TzB7@!Pi|Mlbrcun@x#{T(#dWKP*#-^r z2&yuwWCqn&%aITJ!f&Ers?hEBk{BcC869j=<%a-9uFW>^JUH&=OHhSV<+=*`^)7m8 ze-N^k0xXY!=o{Z()_u33!~pL3SS0S(1w7R{c;1kJdN66WZrLgi@k zmKu5)dRMDXk>J_3^mM4{EnJr=n2I|b8BMt6|HQb~vXAb3)R0v=^2&^K%+)UWmT{1X z29;oj@$1~Y&*0##PE7?z;Ea*a@8dm^)-)QI-Djw=rH}gq0G)s=DiMB8(tiqgi+s1R ze0UaZ*s!K!ODfYf8@5gC%y7a5KKiwCJZ8j%`a~UcL^V|YY2{(T42@asuSn)(H)R}& zL;8#*m{;J#v6?fbDd}y5|L7$qSdNfFj7Am}RW-@Mo#?=*C5wq)@t=H7=ZGt8y8=w~ z>4Nv$?uY?@hJs?55S7JZ9)%cYX?Fwl7W!4qN{yU{JkMA(Sv;@Zk$i{^=aTCA0)axF z#P9ODLebyFjpum`A_H}?O3NfxDNaSktW-FX5yZzx_{iL$yf^EU97@T zS%?E-QILD;4A!?#IFP7>1ILxD?N*+NRNzENJi+~u=SArv97F{faGkqsCF$G!r`~Io z8vUTuLj5`PASTG^@#Toi`CB3E3&ub{NmnpNz#*=JKVaOv=5>iOXZs{E)47U%Rw1)! z6&?euYv%vfby=dTt3$_ZAtvvSzcjlWy#Rc)T1<(Y(yB7FeV6G^>rGSNNa*!AUzh88 zrugost2JF7w{24Ims_MbZ||EuC_&5!d`}e#?vPIl2$4rg75Dt)!d%AQPV%s7CSpY- zs$Q-RP}PfnNM02jlG#v*y8;ELHh%Z3PTE*DORq?dmzuQT|87Q1kjjK?qq}r0!eYPh zq`3~wD(VlhT&cq$67o?XgwUw4BE1LTXK@(vAW8ec;tCNDtzS&nvH!~gKrqYTu$&e9 z=`o}!gpL&?>&aviy!rP++x^pHr`X5oSfC!n%xSwv1)yXUl&=qyr}Sdqcf~N80k`{~ z1+tozjk=FI&CjS{IDXDVurLj^Tk1qe!pvCcEmP_YFBI?U&Y>$N^w2$amstP`Fz7)R z>quUB^tr~KoeW6}EK`?idBlgB%Aer9LmMd1eEWh5?Mx6myR}@&0dN}1H3h*_;vIW) z$@_=D&R*jHC1j?3ggyhhN=KtKgbvy?S|#4>#41XPo60`g`5*Yun&gWMDr9^Zm_?0h zW)R&q-4&MQNyN3uP}hKCFL!cjxcvF5y!1nToE zr^lBkw?;H?VnACc_b8mm$w8IWMwrO1ZWM@wki*8$D5@*vt#{R*Vc1S({%4Zg3tHLu zOAg7tMoS5S0sA$x8&huE4K>5eXB;-i{_Pb3=)BA+<|WgbRLvhT!bORzBSbzD@q4}p zkYB~`B;!I|VIv7tyL-A|xOLb_>|U*Xl4Btzw-t&ANk-)4m!Yl`EPL&SY|vzvB&Xic zhNuQaxExyCF5PRcv^)jV`!H{V9DDC9_Xl^v8GE zPt1584??t&@#Te?!ly?e*R%B-27HnVYw+YT_wO06)t~hX-c1VnrW*aKtDz=;QNCz? zMtKsj-u6x7F>XqTa=9`2;0=8KPe;Tg=)H-_-ZRIkp;?OG~y zts|*X+W4!FTwTjoQ}Pj%E%u@ARZ?`^e{YSbIJR21@SMIYeSkjC1C@}JXf3iRW7bB+8DhNzOz z=9=S;k-Ew+OCTE^j<2O=RHIsfAO!chQz>XO6JL2CD*+iw()OKowQ%F581IUf#hEu8 zq`bvP@uBvwUhB0ch`qAJc2VcXV$gQ{nedJNx#h2TduRcc-(lA?gM>@~1lK1noOk+80W_Ji$Z+S#T)maWQmd0p9XeE=nvtf6A>sFVL5EhN7ZIv02CrceMUpBdS)^WD|&WrPx=~5El7^ zikxT#kKK}h0iNILYxG73Gx^mtCfy@Nw3LuYYs(Vq|60 z4*-0@6^r!fl1R?`d%6Q1DaYDod#;w#N_LzOe^jX6h<8Whu_J2Dt9Y&|(lZRnAc@R8 zpLdbgj9LN(ypsg5&Ser=E9B5* zXaOwHX8&5Kq-#Qq zgO&9TKbv>LyCO}5c>Xow-hhNv90w%^C;q+&aIh5KTO8?>#-5W*2&AM1i!GHsas#+ui@L&R?Vro=2s#B|Tv)Jr`Ib zCBu!kxt))S=JSvmphI{!J-tqOloYEk7iBfDxv^u^5H8zmA(qJ6<``+kd7h%(7MIC9>Y3O;d=BQ&CEU61jFWRnG zZm6?{MG$)t%Ceg*pER2Rjj!Eo&qWD?1<(MuR5=KSrsDdMss$RnDRfbYT9BoGox_xp zE%&1KS((}}GLrFZKkdw5aHnf-4}D9V7(3>E`-Xshn0~I--yG#s^$lg{7d~>zEw%iY zW5V9XdS=!C&^3S5pU~ibOgMHF9g-{bpZX0oU;KMR%Mt@5h9p;w{6Hmkjb&y)hb-a7 z#*)>|$`|p#ZkvCN?{>*{(dq4kdkg}@#|b{879f7MeT)esP6GkQ=_ZuW-=r@O(mK~o z%AjqNoHIcDUfNM(&os3`6ybxrGR3Iz)VZ?;r6F>f4y~Q-n zbW}bdxn=WbgG5K8a#$fKWN%Vqv!~~$*>75*IVbHP?*eBn@Zzn&1r3@&I-x&JsKvhA zvSE=$=sSb*0S8%z+$ejf|Fa$rPc;+|-yaq#s|M%ouxX`Dl}Z>)H8I#<5MWzH4}}uW ze?d0?S>U9;qy9vJ-o;tZ%Ojyl-w*Y1UJLaMFIB(def!J(3-GaU2bqAFBIL9y6)YAs zE9|t(BvI2A=XF5K&KXANuoJ|93o|6$>r0hoGjNIFFbP6lN0JSX)dUd;8M{$$FfZdE zQaM{`Lx3sW@+q46=2IIUmp!6X$pSW$j$Gx%CO=f0wbwrT*jNq9gN>S6-JGB5Fs2m1 z02EgH=3|!Dibgu>D&|37Llq-vE@hPOpBoSfuMRE+I^;5l z=JL+VWGcSgg}!!`ORY=R0QsUC3Y5{3hWBCnNeX6fKpk%XLp6@*wNTqq_ zaR%>^oyO-R7QKPz{2zDQbvZTr{y*+Ezq?m<4ohxHQa#q+Cq2R$FL}+a3Ojq|H6I`6 zH90ysB|L=Q1fsR3c1r5Svcs{YMvbHiV6wX!Nj@0En|LT`YUfTqec#C@mQx5?GSOD zG$WTUf73hK(So){k;dYK6a29mF;;V3M0-m0vvwM9i}!eC7ANqU*l%4gKYhC#Cu|1F z2Qbj#&8JcG3ukzmKO4p{h_J$XD=s&>xswrBu#Yf>?<{|zjY)k_xsd(qMRzf2%H6*yO3N(PUjDu`ZUVD=_P18iUBY z)+|nrrD8!FDBxb7Agb@RcK{BQM6aDkXI|@Gd;uSThgT3)wA^8b&s`QNSZdaNMS~0% z9*M)Yc00Gs?bc`IGf>t?Oljr8CXWE2O6U+0*8;M}8N8$S{}?~O7b7X~sA$2GU;$Vc z9Qf2tPx?fPb5%8Odjg(+9-mi%%{7KNZt`Xp_AJ2q;-4Y+x;0U_+_`|zq1F~?mf{+$ zvx6V)p1`Zrdmoo!8>7yYtD(TB$Q5b!HNGVy1!bAa=x@o~8pZKG^X9vesmk$PmAX($ z%_qS_wb&9Bg2q{Hb~*%uzGI9;Dgbah*I4SKM8;C5}i8KI|g7#9$vTc8Akfz_EJ~U(mIIo-9F^>*aAz?&lOX zc$3BP)FLkDr$hhG__vOeQM^`67J;9Zqf+}Lo9Xl&anz}+;U=*cif(eI>P{J_VDf`- z1>jVql#8*@rLSzk9z7+9B;Nx%gL(Gfx~ZBf7N<%u5m=K(mADGA5fA88<_?SkHRk>! zuVNf?2`&gAoG9F`_do=kYR1r}nC<6L5u_$as&FJwmzoKC?-z20*|I7O#8dqb=v3dS zsgDOh;rgnTz_~w?BVM-<6Y{OD;|h2<@7grPxGH^J9lkMK%ogI)%;fBqG71)o0(V5X zyr&!%|FlRSv4Oga(ACP)fED~oE}_B7&gV1(7FX}bYb50&Dd>^3Ddx4P=&|wGof0d@ zMp}YQTNq=Z-m}-BE>a+DBQDfZmaOOgwGW31C-wmoe}w z(7rv81&d%Eyr zNv6%|?LF`%z7JBp^=vAovWP(Q9Sk(}5Qw?kPjz$(o;X zJqeXtdgKf^oSNKC%Mw<5Fj%Y0*9aoFB$!}CXWwzh_`VrakLx*;$O2H;CrrY8wq_YV0h11q>GF5=`@RLSSBaqz_ zJy}>lYIQBMaBN;2WUgIO1VS2!ZodgWr*n0QCl-oxuL)o3JP6V$s0Sa8KYw z-Vbj*`~uu}Ghsk=F%$N=K^E3Rcx!D*T2Ett!*@Uw^W)<}k@)kZ&*M4=I1XL?2P) z+)M)a6xceC@0n$c?R)gE8PoQ%A#5HbqK_v2nNwvTf9OqcIOCjO$>eSfBNDu6W=f{} zYGOmGud3o(3rUe&PqRBi{*aJ_=^J;oS5v^!nOZ} zt~uJ%&HlI&2@$$7_GNP*M+-MbyM z9{j8w=kYc2mM=`pJ)OA?RaiUW_}5tub&*jKB?GNEU%a0?D+bq z&pSTEAgzd2GEn{4_Ks2jFe^|G^J)Y5%z7|K8qP3Ah8Zs6BR}LN0Ki-9%qx5gbIO1PL2g zXrXnth5B6GoJitCVfrkgwWojBk`B;pur zZzGv@VU0JY6eeuEe6rpzDeTi%-0LU=6pZEym%$%3RUCNNqL{ROKE@KDhq@8B2k#N? z(TdT-lWwmjT5|1&ql+gjVnZ`Ws9p^Oa44#yYSU&%AR3GPF9ZdkbT_(pMUnb6B^0Dd9nt4vw14 zey@PWidD|sm$vUJI)dtcbYXg8st4qQ$v95G^rk$HY3vj9v>2d1k=0^GE8wM@PW2~% z{L`sH)>XE(kJ7kFH^%j==pyYO#a~L^xPWlxcy!7%w{o1syGiK-1Da_EUzN$yO<*U> zPly}LXbFzhr9wZ{T)sXR30WGVlyV?Qq$?W;<>cn3znri7U(zVU^O;xzI%l(ZS@&Y& z6Uy6O6LtE?Y|=>{5)8#z>$P6KTiJ&$x92tubE+9XxWbD`gc`q;DlDxf5vM(zr3fI9 zz^MP6xIL2*KRIw4;zlgBrN!=|4HAX;RPVerjTYFRE3H?M_xB$zJJ+9`pb?Gs>;}$1 z6!$R%I@S2Npe)>|r?5v!ISRPvXqt)|BW6MeCb@Oa6%T!U|qkG0Ff-`7WsxU_5F=0D)5Nq1dYPxw|j7 z=)!*0NxUPn1soBYpd3xwBD0x>027^sI_(c!rT9W*XaLiR`tx@18}Qz7!cKoM{%FR= zD)UX z9VD{yi6U4LQU&QO*$NmAGCP&L4mxYjW=(Fen(Gl`xM*3XrY|P@9Vi7!6 z^N4sAbKU*@9hsiOTzeQQ!9W6p2no3=_2jSOxY{%gz?U}nqY@g(1w$ zkN!5#gjK`{U(m9GJo_uTmtkj${Al&90B`O~*DHFcP@OY~#hh{eQ`ax^mW?gS?0k#-@wCd|&9`60 z_iWfDCWXeG)6{fC@V(O@W7872wy>cKgGKueYe;mx-s%g$Q!$)Pjx@FO$Xsxo^1N0K zy0P%=0!R8_s(w88OZ|3w#utE9pruHLC$jYccM)6A~@c0Fy1+Z2k;l08k?7(sc4Ld*Ou!<*LYH(n%Th}l~^#q*Hs>n+W^ed zs098fUWR;MeWYX<%?hD=EX0ep%V}HSkm>1rSs`Tv?uif-In6%P-w{=cEcz09$Zz?L zLRfdwo-e0!AUbC|4dHmScp!wV2aJl1+7Mb&%{qJpvOvMqB>y4%)lv$;h!9B%Mgh|S zby!WM5|vf>=?ZJCjtTepuqD!Q{xftlDjc}ZtFRCBnj&NSi`nL=z23nYZNe<}horci z4g}Tb$_I*7Xhw(LblG#Lyg))Z`+1Z0vLd!P0Nv)=yHpL(&-Psb)p-u-Pw0I{&5zuM@bg8NeOCWn1xcBkUf~&x9=ISMNDPq zKQ>Z@#Xan~>gz?`u&t=RSv6@D)qzeFt`Q*E*cR(^=$naqL-BU z%+pE*!EJt(G>?|nkw&X_efk|@M2I40nx774EeXM}v!4anFY)$kMmjbt`4ULzX}B9K z`evf(*c6@DMnXICX?bewPdJ{JTpZ?Rx-6Ah=kJRn%=aDEq{hw*$e*dAZ6I&bsyUX! z7J!%>abt8&lIbOiM)Ibx;9mkIA(rEANRMt>^*i5dTgC{?-ulNLE0*4bE+WX?Yo`&x zv$m{HKB@L)DKt593+KgUAF68?XBU3=VRpq{Hk&&Aa0F_DWqw_tgHWvf%2jn^IO3gQ zHT9Lq>Rdht4-ybkW-)rqjo8?)(twVu!?iiVLD>$x7&pRgmMDL@!n8zXu1}CorlPRNaaU7xKG~z7lZA*UYH@+OP^5ffD zdCf~+EQ0*|7w?3JPcd&QI%k;<0(??zp_aTxab0Z-vSyfinm<;bf>x zz$pV{c#KaVUq~j-GwEQ1(UbD~gp^ed)83D($Yv$o@xKzo$JOO@4SxFBR_WRuYLQmM zZG%EayE(-kk�=2^44(q+UHa7|K7k#><@+xLN{37Sx5SknF@IxAvhH;l#$n*@H@v zVf^R4dy&Cb#30_|6q^;C;~w)m``9CJVvrtVl^{4KJ}h%u&_NebCE3E=*j9A6jTT0`DW$~*Z^LefP% z+O~dXR%;m5a&(ri2HllAva}bWC;9906hV-S_UIFhE@e;*-H! zk0h1zc3M{3&8(3Th`vH)G3qF={Qg?9$b8((VS??bZ`oY#Zlv&zx@6i0KOqb^PPI2x zXI>qRpuKDc7x+p|@UkE$BhIqwdYUj&p0p3O-sVoXBJL-7b7XZ3GyDwyz6oOr(xwys zsj)3ViYqR*tCn83l<8ez$2#`8Ga(fl&L5cbbMD?FM`>Lr-;E?tj)fD0H0z3XDHu0? z{|bJSX$Y zl5lRB9>{kA!yZUY@ryAbE-vE@!B@pq)TUE7t7kEO_5G~uJhVdOodYn2KMg%_Xp#gm zV#hkW+##RB#M<=DH99=7J#Kvgzt4ubV%OWhTP(Y6mn15uh@39oxm>#t_70DB+{t*w z^ETbCUsk11|I4!;5X(;zHP|^V;A;Lsi>fw=5qDD8d)SB7SjWQ5_wgC)lJ{-eU6OtK zK;NV1lALA`%2%*hAyf9EgVy?_@NLLu+^@)dZZ_`TPcWQJXAV7l9B$5)cGCsw*-FUA z%oiKw+TihT2VW^pwg;31eH1BIhC5JT)|K9Z51vV}uvUG`7d9se;V_rEzIHF`NG zPWWB+rU+~)je+Mvery_;n(2|T9ODz=h41joePY7>RoH9bQaI2A>mr!n&Bf;vDvxaQF5%2shgRTohH6-`pbnITp z7z)_K!VmIJ=N}K&u_hYw>y64!AQ3N#zVXjcL+D*bK*{Uem(5WD&xnH3QInq{OhA5j zR*EU=*OZufN57e7fo=CZgeFLEmlDWtR*=!|W@s<~fT(%7GUP8gALgcUTc zRj)3SvA7*J;mYtKe8}b1A*HUmmt`;@=#^3*C7`RN71X?f-goN#JE+*;=&Kacr~5u! zwDdcnvDF_J_%1=u=dO+?|I7j&Vjh7SF(c@jeC79EJ)zwQ+r${(IU``{@(e=0M_?i} z;s(JWVC%t=DWMt{mWhel(2zxNkVYVB(?DQS;!i_w3PHa4+?u9%+^x5*uD5lpcD$VK zLp3#-y58~upT3v`4sEqDg91bDxn&8{$RDvkd`~ABFKUryZOghs$6fFHK&)?*KZ$r) zON~LSER@}}iO#|uuBVRP9?h5g4ffR)S}rO)kl7TKDsklpv%1Ps-3P~edGo^+N#tiSa&`XoC|a+~<$3Wuy+i6sR-TxLo(;UYb056Iq?td{(cq@HTPksh1! zym3|a^RSG+B4nw4dALvB!M9SU!RY3Y$$qeb+`CD@PFpUz)HZ(wB`2k{e`e@{j_x4# zbL?!rSTs-O-+c^2>xkm~!|L_GiE$dHwYI*LIm| zlLrDaEu@Zu655Cqoa(_xymsJ;6IPGObC(ci<=jE5t!EL9=W6=)+q&L=2CLpoW+c^| zL0#k7(=qy`*fcN-sl}<|)2*;+6sYbgszDp^fTS>;VEsq04%L-%KELJjfa^e`9`$Se|)A+4T?Pyv>qO` zTs*618t^#_qg1x@k;4NwxQQ{OnqCN})&!a(QdKrni=DL0C`<<1Vi$ufl8@ z#y3BOv2N!~FM@1w>$;Fn9q{~sr*Bubux|J}^yY?)y4$d^JAtGhsp)y-X!cz4h^<0< z1g34trIC3E4>IkK+7Gi3n=Gib2VVuj*Ap3+g=hs{&`S|!vFYx@Vjsms=9wAJk9wU0 z_AN9{>ofDw&rEeTB*wfM#%n8lwh#8>>-+QW--KJc3D|v0TFtNNC(Rt?T6&L|bhUPl z9l}bdo~3RGugf~3&m_21{z_NjoLPt1$>p3kh_IpVY8_;$#^Cud&7K|+cAc6%eKvbP zaP1*=yyP@#Q|Rlmp+JZsUHj`#aWzsxuB8eLFA7eJfKjsTw{ zDEVwPZ2hJEDpN=RBU^!AMAtAnmvQxg_QW2mqpQ8s)J3Bj>d&5Idg`X;5+Z+(W>%~| zZB@Qm2pfcKDQ@}*sevAIOZ8Yl@{pC&J%V&e9E5CL7~x@n#2^n-sq`lOSjXW!Tx#-M zIpwX6^Ax8eBs2?#c9aSKd*FwP;&7Ic@OxMT?4-ngRGJQ8XReu&V0B!*wW=~cJuUs{ zya|4bSD%({U3F5JJ&lb@U1&c%<7^#>f6?&0P-w7BRGz>;PpIKjEvJ|lmSaD?6VNe@ z4JxNsEk^&@w$QQuULAFz5|!gFzFt7TV-eo&bSFyOv?|rZQmw^u&Wu`!hEZowfNkuI zZK?zG(76yVw@|kIIXeYY|K&Fy=PxUtBclD8HOb3|Ow!Ua6?qFy>+y*ZKdrK=4v&bs z7Uwaqss&ENySdo%?E{-FCy6ym`?K27CVn3xfnziD@Avr&2}9P8L)>q*bfs?JM#b&x z?h_Zjv?e9lMcr+>DVRs8bm(>Yc6w&7hg?)inAYfNoK;_8Gc^n%f{55tu+#o1tw>kI zP2734K33Q-Xj$`XTh>q+8+yt)GRC=l8ZK*!{~$71t0i!Bk#5=&mF;C%eS4JmayOf~ z;GTSul**)E=(_4s8im3wRWW+AY42P`i9VhFciy`~`yyRF?}tQz_Qvq$%Lh6yJ`}56 z;F@z{ydF($k!|Yc&<5GpzY0{>R^_&xYc)yWwCwqMcBcJlIErjg_dRldm7jiy|4@+A z)EX@~*6M5a=#$rk^myd5KFi2L`SZt6X40kzePviCg?lrN=t5N|2<;u)`~FDz3ZhP}zF$GE zKedfEr3~oi0%cS{-L1~CPlQCiR++V5)O5tEzLSxW@#wuxE4*{iLiISaU`$$+aoS^4 zdTRWoOX%Ibk0Nf})zqk=P; zbbm3vNna@N9;xM2=ibgd>3Z_g?5QM2wyun^TAshsN}g&cm|hb3tlr=(>^X*1DlVSP zx^9%BF(|uRvlAfLnQvF^t#r8Y@luNJx+<4Zw@~2WTE?aR=3DY|pXdR!@y|ldeA##G zFz*xM7}AzxM%eCLW-zmes`ZqY){AhCq|Dwq9RR76v3N zNt_r&-RkEYGI=97&;9fdw(YuKGrIh%WKUg>W{xH^+x`GivC6n__cktLx>L)HjKv6= zCB3ky-3(F2_$sW`scTE(S8xaOrorrDq4kfx_nlYpu6Z?!&38?9cz1f9?eXe&xnFh1 z{Vx7|dI~7=bnz>)>!^uLaL7#L(RF?xw;HUBJ*^z$$=$4cX$r zJ8#QVKj)R^Un<%K_L=gR2=5YIAikcV!EDqZGv6+gUF{`;)RKDp8Eu$O=p8#f3O$-s z4zP)Klks>Z-a*&{kK_iEw7!-4@MXX6tPH!}1F`3j>djEN)9L8eMIpQf9AR}`6caad z=Vd_a^%3<6)bnX|SH*(3x3eVEc~ZF|)A?oeut@E*8 zJ->;(R6vb^Nwef~Rflfs_7LXU9Wa=V2}}n#9dB-4HrsasUyDs}BV?E!)2(JIJrU1- z4kZ}!nb@2iRx_*Xe-+g^*j5BS%(GVXn$vM1rFG8ys>A4Qo}Q5Nv2l6t!)@BSG}4|K zJI&&!x_x}!y~{sikuDZ7LGK_*p^LEgLe&sdI~t)>vj+)9in^I#izI=C_8iS%mkamz zDEzyx8X+yRZ8GzQ4TgP{jJMv%B8E5Wr8xr(f7lSnfkMe%fT< zU8Yax&9hcl7sO|U=ZRR%cvZZ;RB6j%-iva1`DY0?U=Q3=FY0jf4c^{yrX-Rj5Fi_N z&9t5wJyTy3O*2+(CVam>*lN6Pq40jX(^iUA{sn)hM_)6FkNg6K3l91V_B`H?_0fxG zE-{|b9K43eJ$SSJ+ReL(GwakQc;kI0eJY<^!(MZz9wKG7JY}~hQr(7I@S@N6&lk}z zSyyuV3g=d@%iU6M5`CLB(bk!fYT0*`gm(KOz5BRO`!cI8nju`Y;HB`^+za>k`n4kD zte?ko8XPj;W;rQsXrig#@aKrIWFjvg!@k{UQOXp4LLc#cy8Dp<2y_X^x3 zWReY4jg5`n!JS7cDVMsuBZ!K71%6(oX5Ln`3ET(!Fvrr6qibY;iy(64`2CqGA zqia%(hj2LH4%8kCPg^o9hbPtL2_Pz7?MCL_o2`$#eodF-RlNx-fa)BwJ3{lRi$m{T zw~bJZR^?bu0M(;OIn3<40O9(Up8JX)Jglb8-8Ru;BNY|reU>Ue*(|LbX-w)B5?%TB zK%36pk;x7HGDxIw*ur4N%ul>qzOVa7iLpni{w4eB>08V5p_>DBPMt&5g*>`Vsz(OZ zMwqhAaGSo9+Zxrqa{02}qs66ZwAca8G{-uZ!qNdRrs>-7`i!7m&!7|2`QNYuG#jHW zsSl#lbaiZf(`Fb_z)p0tL0dZmn*kmRE@vS4dTR+QdC zE!d#{T_(k=%r7jkX#xcgIRCa-bzq(ScX^|vQm#OF#T?3UAZ6#L9Wh-=wbu-KH-cRP zQhrBQ?J7Jm&1zyN^`KQC&#E|J$NB>DwDxY&t;bp(hkkE%TUv@o6(sefd*AFOHW<|L zY@@w=7Fv&Vn;Wc#qx(kAu62Z)t%=Lg&HkWZ#eC92{>ajnsL<2BrMWtynF|@!tbUrb z)nBZ!n(;>b=#or>c+3d*~k{oLzt$zym- zZnh%cziy7Y5>u5JSU$hewU&{5BP-__#8)KumKMx2V%pvLDYExuH3Kef4=j@!yVu@-cQzjex~(pG zmP?D6`s=`|A7Zb*0XwDJfSZp*?&bIfA!Hj0WI--v`Q)HBUvuHbP5s@>Y1Xf}2wH4o z9_`KM@v!S1v}Be(BzAgD_Z}B{sdPBJt15i6Hx{ToMYw@gIxJdxbN^`RHF|9}Mc1r+ zJ^xl-2(iR6RLH_2J~~1*jArd!<6e=17{7HxP{sp3>*s9M@%+95Lm~~x`hkB9UOrbiuY%X;AUO-H1;qbE(^XlM6bWs9`cL^X+3Q3qe2wea=s?3m)uAWtB`j?uFz^RD?`Xwsnk zq~ci`$0VCdRoW-h%p#Z&uYr!OuNpt=-_BhGaEN!s|W(M*?X|^Ym-wPjZAwvtVjQZ+Z zRMs`M*rV9nlCEznRPGgtnyZ99G_<5p?#PkJSozZNXq*?P?&r8mI(bhamdbJ(7yoC zo9VEblLq(@b&qgIQ5W)UQd#&zDcGtc`x?!L>xmRGEmgLo_mz`%rirvmH=BQu@y?w! zaSr3CcbZU7#x1j==T2a?&bSvRxxZ3fIXay0!xRxTa1=!+KFd2F=TrI}})VQ8+E$W0M(gkwZEig=HFy;?a- z6_IZ(AYM@&OMO&sR8yyw{(W7m(yTm)_ zoTD)nI=3em8giqqi>hF1D^HCe?8T+s#N2GX28e!cB{Jkrau{nU7V;QvGjrc2-d@f0 zJM<^cdt6W|(t}FSn4c`x5Zc2Xa|Qm>Rj-@?!&p*8Hv$e3&sZqRr}OMB*L%)v;- z;Og`WYu82pGQDXFH)58^=(t(CyYIDQ5KRk()y8PAfz+ZoW4N_c5BpKXyA3W2`j;&b@yHN3fR z+WUaiM3}N0O7U}6FoJ08jk0op3-4tIkE4l?WD30%Pouq}OlFA=7@YKs(M!_Bm;x^k z2ivCM=`sb!4P3_w8&|j8)?}4o&v@|yk8#uKN~`sEvl0MLci@j_kZ`@M2& zO-RpTE9OT(S1wZLBG$df*X*@;-l8Y$wD$dCIKvQ>`uGl=vR+lS; zV<&N9Yr4y3vz6B@t?#yFiB%eL;ScHfTmj)~wU!|F%JQ0KvXe$uZfEila3FdA-(elbpBx@Ag4<=2jBxpAp3MV|dc zpkMI(Ex)UfK_oTZbg-tajgh;er4KG?Mfjp2Jc3+#F1u@#*}$q#3lDQ z;T{^n8-vWvbLX%s#pl}XEW&U?J2RX7C+`bn#qKOsG%D?}!Z+oB_3qRLZV?y9Z58(r zQcQ5oa4YNQiRN6UNIpnwlafxr^Y)_4$e3x*_G{3q2oR&#;bWAlN9W_O6DE8KQPR7O zE>g(ST!8G|RLyAX(Quj3n`hn~7(6lTGC1SXYU#7{Ls7F!nkJW+!Bcwr*1U|ji1qVv zTqaY2$yYVj{%c*SrBz}J_1AZ5V$SO%OC0R#8K$G@W$JXK>b(n0QkB;IN)+5sHz!g@ zxdo2Ozciues)nT`be$gNNMo+EFnIY?HKgo&OwU~?bb9IG-a1piyRk6La*^J)IR1xl z(#`Q68>xx=n5_i{;k~bt%hMS~TE<74vpXBB#A%tk$&*ravxREDRpw!I!M>kp1ibph z&_a8-#OsLhh66#vVcneSGLOB5XSFt8zvu4MkMp~H7<9-bFO>XN+5fCnMfjm4}G2TV75MohkJWQ9|2xw5VFoz437lC#oJde>2Z$ zT6kyo^MaAOhi-suvAo{>!<`BBVXWpA&7}L)6MJ1k-HGmn)6+IxX{X%=>hq3B(TfY@ z>fYa~qElijXb)BqoCR8GzrIJ;bbdqf2?$8wwG^PZT}M%2c1` zowqaGIwY$c5Drr05e9d$Z+6|B&2w*?HC^ZRtbOl6ynQ&YiN?jxR>vPy96-=t_Ik@# zmYTYX6CV+WSRC%BVl>w2F+<|Y?ryDT52k3mH!I55T?h*|jcR*^h{uE2%Lxj31tv>Q z2Go#g6eefoY4vJtbH4a^Gi32pz!=E5LzN#yo`aHc&e>02%@yd?F%6Rkw*1|>ewBPh zv}^6fFeh5Io5kv)<`;s_Gm?|Qz@4kbBk@r@L z@$kzIkc0kEtX>>2x&}mAp>~h09tX+xXa?FxFT7FScrM8W1%~0>rFJOc^NWtv_Mhd_ zlf=23IYHe00KR6npu_QbvBNjz9hK#c-pp47gr6?-Q+2G3#EXWi*YmTrF?|lR^UJCO zH9_~$Vu`%+h6(>!vEjGG7))Y%1wlc1i@S|B=TFP~`Axf8g_W%xF>xW_@|IC%Gt*9C zI`g5)eNX@&($(r}ix9`%;Vo-htS`~_242ZqsH^)jcyj@2O?RmywP!q7gFC8Gv9lUc;$BMmL+q^Pe}_0x1le}-?Glb8+<>WLVqE32`#kg zJBuFeSliX~=|(k>YmYCOpi3oL4%Q6y^H23!7n{$BDSK~AYg&hq+j-Sgzs6S2jLB@5 z448KEd=JE6H719|c=luqWC#MrNbjw7-Bdq0iMtBFjF@MXzD@~)Uv9a>s7OMofV7)_ zH<%e45;sQ$fzGo@u6EsyQ*ia<=fu^D%d9AM0v6H+JVqbh(F}i+&cVrPOB=anX6yL< zS~`3@tRS>SN64}+@6qrb%?kUXi7Fd=*q5qS`c*+wd{@#2^lJD@;7r! zL(JIYp1`TTJ$5q};GCQ6cVX~7J23q1S3$AJ5fl`;R;$YTvXzqxwUYeH^{V{d9#*-A zxz(>@iXO5=ICt+n#)b{uTM2gLMiIRd!z#r{J5 z#giU0PqTJV&gJ@RS-m*MNF|7#@wM`)jgCsh72Q^wUb$%BbCT&Agu+iyer(bk(%8~0 zj@S+1BKzmL`i7|Tg~t1e-(N`zL#O;t)U;-1ObK;uoaZ}{;B2XUvR>1OwS63>Y65~s zqLVB_(D`d7?o75Pujev{>&w5E!Nmj+5HMNE-<8r}fDUk*h|6=Guh5e3TWL1@qQVP7 zv3vJY1~^mnn?=?h#i3jlys}xqJLr-5RN0vwzJ`ru-y$TPq20vdETaZyQR?oCln`!7 z0(CGQxC9vSFnNqD;3HW=p*}O5J=*?dN00r(kl0WrRV_kMc>}Fam(k`^tvv zj*)_Njx8I0*62-Bb&|u}Ww87)h zTjT=}jdK`xw)~7Ys;#ja=Ro3zyIV6g)oWPuvG&W7rJ$CKj3vdrEI5w&dTc}PKFJvC z`V>)VNdMNw`4~oCY^= z@Jg2G&TV;3>)c1#E=H!>$cFEwx4ONUvp{t{r-emEfa<8v)|g-z$ifD7 z(8(?wL{n5P7w8+MwkuNS2QN^4(tD~6AdQ1xu8%PrP|MN**XV}t>9H85k)ucDD<5l4 z6M2EhP4t2!nj3J-ddG2I+=C;2`f5FI1^n_y*Q3-ZvvVK0DsH0~RSh84ZtSB;zB{)1 z3rGWF8^`Y_1MZRm7Mqv8Mx6%-T#VvDvgu=lmNFp~t~8ELZ8TC!xokzi9Gv+!S~{k? z@C20Z_#qzER#Txl-%dm>xlT(djmHGpmBCBNK)!M{oiloc+aRuVILrLm4D^*6E$zqi z?bIKc;T>|&fJHJpb?ikn@#MN@zPIyQe3X9nA}s`;66Q9G64n9|EZ=%~3fifVu0^r= zPFiy*0d=!gJ^fux!K>tGDVP@Fz!|kZaWmV+mq|ZpXzNR%-L|nOU)x42iJE7~JOPIB zA<0){mc})2F_SO7CmKj=WS%f&!9{|FJieUYbG$(R?ZT3&{1piFDn&o{zT3bupJenk zWi=`)Dle*Hur9{{Dkl9&s}kvq?Vg4ai`G=rY;6b z4au+Ob*LE=@4#LYuJQC{#=e5`)?QXIcalxDE{0``vyo3cA~iZE2~&XNDy1H+VNgrgjwGNGxcSMbjW(j zXxMs+ahz3p?OZUv_Vf|S907bM;71+*7@bMv5>;W6z@d)8sVE;t3KxjA6Uu&xTQ_^y zqWcujwQ(2Y{HgJInkDKlVuU1%oIo-zZdA7PLAGyln|*SjAE5k89urg5an4QJd3P3S zaw+>qFAI5EPxn2;C#oR+Oaub4tTH`AXu&(Dd7Ue^d0@LA6a#Ein$9|p=rt39#0$w1 z$?nLW^x*UAhxx}^klaYjXYceCw~na?5er;hU&W9yrs4(geLBT~p(@QyW(z?r&tVI0 z@+mtr&1*^AoGunI_6N?SrHx~RekT&wh_zYLmMb-8R`y`|hxYP%%amK|n%S=1H@1Be zDp-P7t;ja^?reM$G!$|_KX^9EN=UA0=~Bh%Y3rbI;{moU^VDJ=YS)W>7~$mXljQUU zUt3<&|5}L!JBXyYLSm!uz!KovFm-#QhNlTpWl(<1s2?3h`%oIBF|I7i*~GBB!E4(; zlb=yVc8+y+ddDL2H|?raFRx?k-P^bMZ2Pyag<=*~-+ZHbwB4Cj+wCwRwaMjw>L8St!{2twtT73<`39S` z!Y1}1vtbcqYLugHtE;+dkS6F0Pap9cjNNK^^3`X_Ybl`D;7$QPFQbr5*Mk#WOM^lw zN{E<)+0`v9vJTK#iSnnnXIWhj=%X?3E<~=`ZM>;^967q?wNk(N_QI>LZXoLo%-L@$ zc3!){F`b-C?S7(sLt8JeJtlu69%*6P1?$O0NoHSyl3IEtK0nj`BrA4RWUi9D3k>H- zSlih5d^a4unz)#Jy@O2KFH?gSLf5aM?-$*?Hlwfm?FCD_ zxb?VJon*-BP}Sp-&Zjl0a-TQ0jo0C*<*?eocev#T{%s#L4m-nYHMUeck{sAen)FRJ zTZcP~o}78`a^-#q7GIR-fzZ_~dc`Te+LotIuqB#y#dd5i_mr#1G{p;Uh_#QPZ?}t? z_pirf7H-CL>G=8Y9u%Qn!mnaro^jq8FvC}^ERP>^H+4BHpLgAMz9nHMd2@^rTw+`(h$tVonhoE9L|`YrWz}=i|T;I-jP-7 z2I;{EB3G7TD5u4|tk@SKg`EqGb2-lg_wG8o$*jzXZ+SCJn+M$&84M>vSBa``)W%(` zL%3LDnl(hhKE)ebogY&|=YOgFuSLV=K!GCLn7s-Z+ovQ6u`*H@xbBu{5E}*3Pfj&b z;P?j3=pnoiI@p$;70>1*UGePJ`;Rx>5`XE?u_FEP5uk?OX?-r-hOeHU4)A$eC(GYx zMT#8B-ncTA-(a=F0zFZ_S3-@~zdxG&{q_5K5N%|A;Q#YiHqL;&*m8~G&hb3IPmyK9 zy!TK3eLB92D5etkHOKAgeOArsTUO2vQOOUPa#%n=x>ui^4FR^A*-+BYvE#0c2ts55uA^F^|~;n;O6D;aRPlHtQi6^kM86%mSd!Zpxv+L z#OlIT-Do`8ONsM;HDpVHdrn6gIAS;qbsf$BnD+oEPYCp6_08u{-9Ju)(d_~&D25DvgYFV5cHfEfrqDH zdkx1)D_R9G7#xRBpwk*sd+I>-X;3~}yak*EolTvbzw?x?~)z?FpIgJm49)T0ipB|E=f zNFR&<#X-4`%58F^>8IO45%+lFAMYVgn~Xem$(|^&32-@>ha_L3qSXC`*e}?K z2rZ{+$ThOlkwTy9L66Iy+IsQ!#0%O@W+G%us1m*Y&7UfD4ee=Ra#kyJH4uAZN%E$adhy0CY6D4shC$o&8oDkLatu$8MNXmP#d zxX+I#Ri-Sm-VbHMGFm)DDZvWz?Z1=>Y%)UY*Hj+HO7RCuhJtCezUONv1HV#eAJ;ZU z0hN=rxS)IsdR~jC@S-b*d#0Qxnx3xpu03m)t^G>iPsIWu8&Ibg{B3L(0eDMlQORJp z5=QYBj$2TuZj5QE<%GL^a(t%CX#GoCKBp4M=V}-#-cn^d$lm(3gYip<0feLpZgxW% zR3x$Hom#-Uj8k*7zQBu67Fn<#OF1o3)XiX2VZt6Nl>3W1vdGhFXky33FG<2-B12vz zLAE-k-SR|W&j0X&|I}BnTi6qEiCB{}gebuC#F?^{ZoJTq33r2H?Fu;kd+2&PP5pcl z`MdF|M-rolJcv{Vx{OBPRD?$bicGV37{eoe<9B=nuO@&fIa~y_5b)LR93Q8D17U)t zU>TGTH};DjGDsf+xeaD9l7!J4o(@JW=2!GgUlHOj55YJ2=OgPPkmW8vj2O=hJED3H zf0lB!FPw!sXzkOvnz+@qhqtO&<$h^hryjEY1&Y|Qn;`qF)|*1UZFh?^Egir1C4P4r zNdN>v%Enw4491C3Ko)!*j;-_Ke5M#{6^ zlecQaUy}T9w15P4_58!b!8$%_A??B*YHhrU{jdzOJ-yZPTZ*6X8YyZUj*q;VWnub64%u#N>8yp?=hdgoDM3eU zoDl4G`{}!ubuUQa-V#M-7TcwB=Kliy{54>ZO^IaP0Y8CWB~aB{c2Nmc)$-`CQeo>J zE+(1%qD>@5*y1F6Ly`J&~jJ>;=VA3KI56Oq)hgr*Zi<8KgUs2x)&jjkiSIt1%*y)AOH>YW`*VbUl>eV_z#N1ZiSjSN{u%h~GCmK?G$s1q zivd5T0;emcs}ubHFbE}hftq4rBLA4~m^o6vS5yFKkUlWjk1xa>qqRr@%3f}h$+^IW zb^H^D{7<0)w}PCgZ5j61OsjyrS5aJFfeT=HB?$(bsPvE535;smrOCGsT!zyp&F%i+ z;~)LN0^#GnT#)Y5_qb${plWDs!gp-p=2U=h?xc71{L&Xzc-(ZJ5jI*1JR3?1M*m5Q z=a25#ydmg;&cm^0JcEer7eH1Bphh|FM-v_K0x*g2h#eQ${-XqtXytB|MrPlGM#O1SJx zLartY3cE`zz~MwjD*h*uo2zz7GOO89lYki6dJ;|Xs7KXaCuu1>1&xD@$6lz<{w z7D(Vm5x%nin;O6C%KxHq4VY-1Y z@iBycRr2LOP9}Lm;0N7?e2MgDLOv&U$U_?J;rQq~`_^9h36Fj>vOmk*|J!n=S5mP=!DRnrmfX zY3gs>0v1pGEL0)UW1SN;JQsTPG3kCsj7mdTisXx_Fdec_G+vn1)iE*!O zwd$>e8*8T=CLR_d#I9reZmbI$j?X$w2$yg}9ybZn3uBaUH{yxqwRfVOV{$kSl%xhn z3Zr`}++efdt_DADbNwIvg3mX^M=3QYr1bTPJ3JyqyKssVemQLR150BMvaVR*YapIK zZ0T-b?&R1WhRyt#b4RxkYh$TlE0NY@Z&s90CA1)imqV`4UK3hK_m|$!rn_O-p{QZK zr!07%-l^S1hrFo?`*h#_Jb4qUb*b7YQZ&Tv61fESgw#6GyShFK^4gqD0r}Lx7_%YI zUaCt2E0l}kL;F2B*uEVaW=uME>A?ugPwuvK&z)7qG=$HpAAhW3KYxttyRzOGaIgM@ zVhkKPx~F7a2Ii3m4CRLWF)4!$Ku*@VINb1JZ*GmD-PDf+G3dx34Q{;*a_qlyqC-6z z;(#i!@Hw;arTSDfvlqz_NX!JhJPX-Y`s6N=H>E}Jr8TfQ&>m#dZ6Us5Hv{A_4%(`= zO%|879HP_sPpkVmxB6{zp@=}fVeurF%8=1KdkbraP? zQ_+1buU1s~#YM!W%sZOm3`^h6Rh(Y(o*NZ@G~x7<=VXsd41H8pnm1&eC$C9aew)OL zmtx7mxG8Nl-GXgt4U0&)8^I>wEx$Fl$(Z)DP(23BZA92=qni1vG=Y~fsB$8kfRY|* z*}cRosVWSTYY^RW;OdY~#)NCOxj&u?59^99mhv~>Yha`bf>{$6Zk<-^Sivq!JW93L z1K%6ixAjY5X@fBOCwaK10mo96@p)(y;F%-as?{p%sK{GkI(k0$*5{oAoDU$LUW zK|@wQF`wl(zF|!>-*0>ymEOo>ZSowkw#v`{Y~5)avwcfyqhRy#GWTTtkZuQMHa5@> zRdyDy(?G%PT4;Wm8Z~y9V5DDmy2QjmHfLM-)yK8X-IUCIu8mPv9_eC(RZ@d-SJa!- zHLQc=N7t3D6Rxze4NzM(p%gr!9UlD^S6q|6<+pEJwKzNvo_$3vRFjDjoQ6?xKggzI z?u&}J{p4#C-2)xgVWxW_L$EZl(z6>SFEi;D5nroM2m0(V_dN;L*`{2=vb;dQ8n{FKvl*az3F3K{yf`uDP`N=n4Tvo$cF1Bb<&w+Z`gL;pVdufspx**{+J7|8!H{8tR{`|E$S`Hwb#An>22{8o*B9sQ>% sfAHkDfBMhB{);F7`uhJRZRU<3O@~V%&zdir6M#Q?nfrH(?wCCNKho;!BLDyZ diff --git a/docs/source/references/index.md b/docs/source/references/index.md deleted file mode 100644 index 51e3dd0ba..000000000 --- a/docs/source/references/index.md +++ /dev/null @@ -1,18 +0,0 @@ -# References - -- [API Reference](api_reference/index) for the Llama Stack API specification -- [Python SDK Reference](python_sdk_reference/index) -- [Llama CLI](llama_cli_reference/index) for building and running your Llama Stack server -- [Llama Stack Client CLI](llama_stack_client_cli_reference) for interacting with your Llama Stack server - -```{toctree} -:maxdepth: 1 -:hidden: - -api_reference/index -python_sdk_reference/index -llama_cli_reference/index -llama_stack_client_cli_reference -llama_cli_reference/download_models -evals_reference/index -``` diff --git a/docs/source/references/llama_cli_reference/download_models.md b/docs/source/references/llama_cli_reference/download_models.md deleted file mode 100644 index a9af65349..000000000 --- a/docs/source/references/llama_cli_reference/download_models.md +++ /dev/null @@ -1,165 +0,0 @@ -# Downloading Models - -The `llama` CLI tool helps you setup and use the Llama Stack. It should be available on your path after installing the `llama-stack` package. - -## Installation - -You have two ways to install Llama Stack: - -1. **Install as a package**: - You can install the repository directly from [PyPI](https://pypi.org/project/llama-stack/) by running the following command: - ```bash - pip install llama-stack - ``` - -2. **Install from source**: - If you prefer to install from the source code, follow these steps: - ```bash - mkdir -p ~/local - cd ~/local - git clone git@github.com:meta-llama/llama-stack.git - - uv venv myenv --python 3.12 - source myenv/bin/activate # On Windows: myenv\Scripts\activate - - cd llama-stack - pip install -e . - -## Downloading models via CLI - -You first need to have models downloaded locally. - -To download any model you need the **Model Descriptor**. -This can be obtained by running the command -``` -llama model list -``` - -You should see a table like this: - -``` -+----------------------------------+------------------------------------------+----------------+ -| Model Descriptor(ID) | Hugging Face Repo | Context Length | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-8B | meta-llama/Llama-3.1-8B | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-70B | meta-llama/Llama-3.1-70B | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-405B:bf16-mp8 | meta-llama/Llama-3.1-405B | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-405B | meta-llama/Llama-3.1-405B-FP8 | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-405B:bf16-mp16 | meta-llama/Llama-3.1-405B | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-8B-Instruct | meta-llama/Llama-3.1-8B-Instruct | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-70B-Instruct | meta-llama/Llama-3.1-70B-Instruct | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-405B-Instruct:bf16-mp8 | meta-llama/Llama-3.1-405B-Instruct | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-405B-Instruct | meta-llama/Llama-3.1-405B-Instruct-FP8 | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-405B-Instruct:bf16-mp16 | meta-llama/Llama-3.1-405B-Instruct | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.2-1B | meta-llama/Llama-3.2-1B | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.2-3B | meta-llama/Llama-3.2-3B | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.2-11B-Vision | meta-llama/Llama-3.2-11B-Vision | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.2-90B-Vision | meta-llama/Llama-3.2-90B-Vision | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.2-1B-Instruct | meta-llama/Llama-3.2-1B-Instruct | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.2-3B-Instruct | meta-llama/Llama-3.2-3B-Instruct | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.2-11B-Vision-Instruct | meta-llama/Llama-3.2-11B-Vision-Instruct | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.2-90B-Vision-Instruct | meta-llama/Llama-3.2-90B-Vision-Instruct | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama-Guard-3-11B-Vision | meta-llama/Llama-Guard-3-11B-Vision | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama-Guard-3-1B:int4-mp1 | meta-llama/Llama-Guard-3-1B-INT4 | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama-Guard-3-1B | meta-llama/Llama-Guard-3-1B | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama-Guard-3-8B | meta-llama/Llama-Guard-3-8B | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama-Guard-3-8B:int8-mp1 | meta-llama/Llama-Guard-3-8B-INT8 | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Prompt-Guard-86M | meta-llama/Prompt-Guard-86M | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama-Guard-2-8B | meta-llama/Llama-Guard-2-8B | 4K | -+----------------------------------+------------------------------------------+----------------+ -``` - -To download models, you can use the llama download command. - -#### Downloading from [Meta](https://llama.meta.com/llama-downloads/) - -Here is an example download command to get the 3B-Instruct/11B-Vision-Instruct model. You will need META_URL which can be obtained from [here](https://llama.meta.com/docs/getting_the_models/meta/). Note: You need to quote the META_URL - -Download the required checkpoints using the following commands: -```bash -# download the 8B model, this can be run on a single GPU -llama download --source meta --model-id Llama3.2-3B-Instruct --meta-url 'META_URL' - -# you can also get the 70B model, this will require 8 GPUs however -llama download --source meta --model-id Llama3.2-11B-Vision-Instruct --meta-url 'META_URL' - -# llama-agents have safety enabled by default. For this, you will need -# safety models -- Llama-Guard and Prompt-Guard -llama download --source meta --model-id Prompt-Guard-86M --meta-url 'META_URL' -llama download --source meta --model-id Llama-Guard-3-1B --meta-url 'META_URL' -``` - -#### Downloading from [Hugging Face](https://huggingface.co/meta-llama) - -Essentially, the same commands above work, just replace `--source meta` with `--source huggingface`. - -```bash -llama download --source huggingface --model-id Llama3.1-8B-Instruct --hf-token - -llama download --source huggingface --model-id Llama3.1-70B-Instruct --hf-token - -llama download --source huggingface --model-id Llama-Guard-3-1B --ignore-patterns *original* -llama download --source huggingface --model-id Prompt-Guard-86M --ignore-patterns *original* -``` - -**Important:** Set your environment variable `HF_TOKEN` or pass in `--hf-token` to the command to validate your access. You can find your token at [https://huggingface.co/settings/tokens](https://huggingface.co/settings/tokens). - -```{tip} -Default for `llama download` is to run with `--ignore-patterns *.safetensors` since we use the `.pth` files in the `original` folder. For Llama Guard and Prompt Guard, however, we need safetensors. Hence, please run with `--ignore-patterns original` so that safetensors are downloaded and `.pth` files are ignored. -``` - -## List the downloaded models - -To list the downloaded models with the following command: -``` -llama model list --downloaded -``` - -You should see a table like this: -``` -โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”“ -โ”ƒ Model โ”ƒ Size โ”ƒ Modified Time โ”ƒ -โ”กโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ฉ -โ”‚ Llama3.2-1B-Instruct:int4-qlora-eo8 โ”‚ 1.53 GB โ”‚ 2025-02-26 11:22:28 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama3.2-1B โ”‚ 2.31 GB โ”‚ 2025-02-18 21:48:52 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Prompt-Guard-86M โ”‚ 0.02 GB โ”‚ 2025-02-26 11:29:28 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama3.2-3B-Instruct:int4-spinquant-eo8 โ”‚ 3.69 GB โ”‚ 2025-02-26 11:37:41 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama3.2-3B โ”‚ 5.99 GB โ”‚ 2025-02-18 21:51:26 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama3.1-8B โ”‚ 14.97 GB โ”‚ 2025-02-16 10:36:37 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama3.2-1B-Instruct:int4-spinquant-eo8 โ”‚ 1.51 GB โ”‚ 2025-02-26 11:35:02 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama-Guard-3-1B โ”‚ 2.80 GB โ”‚ 2025-02-26 11:20:46 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama-Guard-3-1B:int4 โ”‚ 0.43 GB โ”‚ 2025-02-26 11:33:33 โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` diff --git a/docs/source/references/llama_cli_reference/index.md b/docs/source/references/llama_cli_reference/index.md deleted file mode 100644 index 09a8b7177..000000000 --- a/docs/source/references/llama_cli_reference/index.md +++ /dev/null @@ -1,276 +0,0 @@ -# llama (server-side) CLI Reference - -The `llama` CLI tool helps you set up and use the Llama Stack. The CLI is available on your path after installing the `llama-stack` package. - -## Installation - -You have two ways to install Llama Stack: - -1. **Install as a package**: - You can install the repository directly from [PyPI](https://pypi.org/project/llama-stack/) by running the following command: - ```bash - pip install llama-stack - ``` - -2. **Install from source**: - If you prefer to install from the source code, follow these steps: - ```bash - mkdir -p ~/local - cd ~/local - git clone git@github.com:meta-llama/llama-stack.git - - uv venv myenv --python 3.12 - source myenv/bin/activate # On Windows: myenv\Scripts\activate - - cd llama-stack - pip install -e . - - -## `llama` subcommands -1. `download`: Supports downloading models from Meta or Hugging Face. [Downloading models](#downloading-models) -2. `model`: Lists available models and their properties. [Understanding models](#understand-the-models) -3. `stack`: Allows you to build a stack using the `llama stack` distribution and run a Llama Stack server. You can read more about how to build a Llama Stack distribution in the [Build your own Distribution](../../distributions/building_distro) documentation. - -### Sample Usage - -``` -llama --help -``` - -``` -usage: llama [-h] {download,model,stack} ... - -Welcome to the Llama CLI - -options: - -h, --help show this help message and exit - -subcommands: - {download,model,stack} -``` - -## Downloading models - -You first need to have models downloaded locally. - -To download any model you need the **Model Descriptor**. -This can be obtained by running the command -``` -llama model list -``` - -You should see a table like this: - -``` -+----------------------------------+------------------------------------------+----------------+ -| Model Descriptor(ID) | Hugging Face Repo | Context Length | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-8B | meta-llama/Llama-3.1-8B | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-70B | meta-llama/Llama-3.1-70B | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-405B:bf16-mp8 | meta-llama/Llama-3.1-405B | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-405B | meta-llama/Llama-3.1-405B-FP8 | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-405B:bf16-mp16 | meta-llama/Llama-3.1-405B | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-8B-Instruct | meta-llama/Llama-3.1-8B-Instruct | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-70B-Instruct | meta-llama/Llama-3.1-70B-Instruct | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-405B-Instruct:bf16-mp8 | meta-llama/Llama-3.1-405B-Instruct | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-405B-Instruct | meta-llama/Llama-3.1-405B-Instruct-FP8 | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.1-405B-Instruct:bf16-mp16 | meta-llama/Llama-3.1-405B-Instruct | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.2-1B | meta-llama/Llama-3.2-1B | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.2-3B | meta-llama/Llama-3.2-3B | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.2-11B-Vision | meta-llama/Llama-3.2-11B-Vision | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.2-90B-Vision | meta-llama/Llama-3.2-90B-Vision | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.2-1B-Instruct | meta-llama/Llama-3.2-1B-Instruct | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.2-3B-Instruct | meta-llama/Llama-3.2-3B-Instruct | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.2-11B-Vision-Instruct | meta-llama/Llama-3.2-11B-Vision-Instruct | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama3.2-90B-Vision-Instruct | meta-llama/Llama-3.2-90B-Vision-Instruct | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama-Guard-3-11B-Vision | meta-llama/Llama-Guard-3-11B-Vision | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama-Guard-3-1B:int4-mp1 | meta-llama/Llama-Guard-3-1B-INT4 | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama-Guard-3-1B | meta-llama/Llama-Guard-3-1B | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama-Guard-3-8B | meta-llama/Llama-Guard-3-8B | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama-Guard-3-8B:int8-mp1 | meta-llama/Llama-Guard-3-8B-INT8 | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Prompt-Guard-86M | meta-llama/Prompt-Guard-86M | 128K | -+----------------------------------+------------------------------------------+----------------+ -| Llama-Guard-2-8B | meta-llama/Llama-Guard-2-8B | 4K | -+----------------------------------+------------------------------------------+----------------+ -``` - -To download models, you can use the `llama download` command. - -### Downloading from [Meta](https://llama.meta.com/llama-downloads/) - -Here is an example download command to get the 3B-Instruct/11B-Vision-Instruct model. You will need META_URL which can be obtained from [here](https://llama.meta.com/docs/getting_the_models/meta/) - -Download the required checkpoints using the following commands: -```bash -# download the 8B model, this can be run on a single GPU -llama download --source meta --model-id Llama3.2-3B-Instruct --meta-url META_URL - -# you can also get the 70B model, this will require 8 GPUs however -llama download --source meta --model-id Llama3.2-11B-Vision-Instruct --meta-url META_URL - -# llama-agents have safety enabled by default. For this, you will need -# safety models -- Llama-Guard and Prompt-Guard -llama download --source meta --model-id Prompt-Guard-86M --meta-url META_URL -llama download --source meta --model-id Llama-Guard-3-1B --meta-url META_URL -``` - -### Downloading from [Hugging Face](https://huggingface.co/meta-llama) - -Essentially, the same commands above work, just replace `--source meta` with `--source huggingface`. - -```bash -llama download --source huggingface --model-id Llama3.1-8B-Instruct --hf-token - -llama download --source huggingface --model-id Llama3.1-70B-Instruct --hf-token - -llama download --source huggingface --model-id Llama-Guard-3-1B --ignore-patterns *original* -llama download --source huggingface --model-id Prompt-Guard-86M --ignore-patterns *original* -``` - -**Important:** Set your environment variable `HF_TOKEN` or pass in `--hf-token` to the command to validate your access. You can find your token at [https://huggingface.co/settings/tokens](https://huggingface.co/settings/tokens). - -```{tip} -Default for `llama download` is to run with `--ignore-patterns *.safetensors` since we use the `.pth` files in the `original` folder. For Llama Guard and Prompt Guard, however, we need safetensors. Hence, please run with `--ignore-patterns original` so that safetensors are downloaded and `.pth` files are ignored. -``` - -## List the downloaded models - -To list the downloaded models with the following command: -``` -llama model list --downloaded -``` - -You should see a table like this: -``` -โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”“ -โ”ƒ Model โ”ƒ Size โ”ƒ Modified Time โ”ƒ -โ”กโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ฉ -โ”‚ Llama3.2-1B-Instruct:int4-qlora-eo8 โ”‚ 1.53 GB โ”‚ 2025-02-26 11:22:28 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama3.2-1B โ”‚ 2.31 GB โ”‚ 2025-02-18 21:48:52 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Prompt-Guard-86M โ”‚ 0.02 GB โ”‚ 2025-02-26 11:29:28 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama3.2-3B-Instruct:int4-spinquant-eo8 โ”‚ 3.69 GB โ”‚ 2025-02-26 11:37:41 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama3.2-3B โ”‚ 5.99 GB โ”‚ 2025-02-18 21:51:26 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama3.1-8B โ”‚ 14.97 GB โ”‚ 2025-02-16 10:36:37 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama3.2-1B-Instruct:int4-spinquant-eo8 โ”‚ 1.51 GB โ”‚ 2025-02-26 11:35:02 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama-Guard-3-1B โ”‚ 2.80 GB โ”‚ 2025-02-26 11:20:46 โ”‚ -โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค -โ”‚ Llama-Guard-3-1B:int4 โ”‚ 0.43 GB โ”‚ 2025-02-26 11:33:33 โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - - -## Understand the models -The `llama model` command helps you explore the modelโ€™s interface. - -1. `download`: Download the model from different sources. (meta, huggingface) -2. `list`: Lists all the models available for download with hardware requirements for deploying the models. -3. `prompt-format`: Show llama model message formats. -4. `describe`: Describes all the properties of the model. - -### Sample Usage - -`llama model ` - -``` -llama model --help -``` -``` -usage: llama model [-h] {download,list,prompt-format,describe,verify-download,remove} ... - -Work with llama models - -options: - -h, --help show this help message and exit - -model_subcommands: - {download,list,prompt-format,describe,verify-download,remove} -``` - -### Describe - -You can use the describe command to know more about a model: -``` -llama model describe -m Llama3.2-3B-Instruct -``` -``` -+-----------------------------+----------------------------------+ -| Model | Llama3.2-3B-Instruct | -+-----------------------------+----------------------------------+ -| Hugging Face ID | meta-llama/Llama-3.2-3B-Instruct | -+-----------------------------+----------------------------------+ -| Description | Llama 3.2 3b instruct model | -+-----------------------------+----------------------------------+ -| Context Length | 128K tokens | -+-----------------------------+----------------------------------+ -| Weights format | bf16 | -+-----------------------------+----------------------------------+ -| Model params.json | { | -| | "dim": 3072, | -| | "n_layers": 28, | -| | "n_heads": 24, | -| | "n_kv_heads": 8, | -| | "vocab_size": 128256, | -| | "ffn_dim_multiplier": 1.0, | -| | "multiple_of": 256, | -| | "norm_eps": 1e-05, | -| | "rope_theta": 500000.0, | -| | "use_scaled_rope": true | -| | } | -+-----------------------------+----------------------------------+ -| Recommended sampling params | { | -| | "temperature": 1.0, | -| | "top_p": 0.9, | -| | "top_k": 0 | -| | } | -+-----------------------------+----------------------------------+ -``` - -### Prompt Format -You can even run `llama model prompt-format` see all of the templates and their tokens: - -``` -llama model prompt-format -m Llama3.2-3B-Instruct -``` -![alt text](../../../resources/prompt-format.png) - - -You will be shown a Markdown formatted description of the model interface and how prompts / messages are formatted for various scenarios. - -**NOTE**: Outputs in terminal are color printed to show special tokens. - -### Remove model -You can run `llama model remove` to remove an unnecessary model: - -``` -llama model remove -m Llama-Guard-3-8B-int8 -``` diff --git a/docs/source/references/llama_stack_client_cli_reference.md b/docs/source/references/llama_stack_client_cli_reference.md deleted file mode 100644 index d4d79cea1..000000000 --- a/docs/source/references/llama_stack_client_cli_reference.md +++ /dev/null @@ -1,589 +0,0 @@ -# llama (client-side) CLI Reference - -The `llama-stack-client` CLI allows you to query information about the distribution. - -## Basic Commands - -### `llama-stack-client` -```bash -llama-stack-client -Usage: llama-stack-client [OPTIONS] COMMAND [ARGS]... - - Welcome to the llama-stack-client CLI - a command-line interface for - interacting with Llama Stack - -Options: - --version Show the version and exit. - --endpoint TEXT Llama Stack distribution endpoint - --api-key TEXT Llama Stack distribution API key - --config TEXT Path to config file - --help Show this message and exit. - -Commands: - configure Configure Llama Stack Client CLI. - datasets Manage datasets. - eval Run evaluation tasks. - eval_tasks Manage evaluation tasks. - inference Inference (chat). - inspect Inspect server configuration. - models Manage GenAI models. - post_training Post-training. - providers Manage API providers. - scoring_functions Manage scoring functions. - shields Manage safety shield services. - toolgroups Manage available tool groups. - vector_dbs Manage vector databases. -``` - -### `llama-stack-client configure` -Configure Llama Stack Client CLI. -```bash -llama-stack-client configure -> Enter the host name of the Llama Stack distribution server: localhost -> Enter the port number of the Llama Stack distribution server: 8321 -Done! You can now use the Llama Stack Client CLI with endpoint http://localhost:8321 -``` - -Optional arguments: -- `--endpoint`: Llama Stack distribution endpoint -- `--api-key`: Llama Stack distribution API key - - - -## `llama-stack-client inspect version` -Inspect server configuration. -```bash -llama-stack-client inspect version -``` -```bash -VersionInfo(version='0.2.14') -``` - - -### `llama-stack-client providers list` -Show available providers on distribution endpoint -```bash -llama-stack-client providers list -``` -``` -+-----------+----------------+-----------------+ -| API | Provider ID | Provider Type | -+===========+================+=================+ -| scoring | meta0 | meta-reference | -+-----------+----------------+-----------------+ -| datasetio | meta0 | meta-reference | -+-----------+----------------+-----------------+ -| inference | tgi0 | remote::tgi | -+-----------+----------------+-----------------+ -| memory | meta-reference | meta-reference | -+-----------+----------------+-----------------+ -| agents | meta-reference | meta-reference | -+-----------+----------------+-----------------+ -| telemetry | meta-reference | meta-reference | -+-----------+----------------+-----------------+ -| safety | meta-reference | meta-reference | -+-----------+----------------+-----------------+ -``` - -### `llama-stack-client providers inspect` -Show specific provider configuration on distribution endpoint -```bash -llama-stack-client providers inspect -``` - - -## Inference -Inference (chat). - - -### `llama-stack-client inference chat-completion` -Show available inference chat completion endpoints on distribution endpoint -```bash -llama-stack-client inference chat-completion --message [--stream] [--session] [--model-id] -``` -```bash -OpenAIChatCompletion( - id='chatcmpl-aacd11f3-8899-4ec5-ac5b-e655132f6891', - choices=[ - OpenAIChatCompletionChoice( - finish_reason='stop', - index=0, - message=OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParam( - role='assistant', - content='The captain of the whaleship Pequod in Nathaniel Hawthorne\'s novel "Moby-Dick" is Captain -Ahab. He\'s a vengeful and obsessive old sailor who\'s determined to hunt down and kill the white sperm whale -Moby-Dick, whom he\'s lost his leg to in a previous encounter.', - name=None, - tool_calls=None, - refusal=None, - annotations=None, - audio=None, - function_call=None - ), - logprobs=None - ) - ], - created=1752578797, - model='llama3.2:3b-instruct-fp16', - object='chat.completion', - service_tier=None, - system_fingerprint='fp_ollama', - usage={ - 'completion_tokens': 67, - 'prompt_tokens': 33, - 'total_tokens': 100, - 'completion_tokens_details': None, - 'prompt_tokens_details': None - } -) -``` - -Required arguments: -**Note:** At least one of these parameters is required for chat completion -- `--message`: Message -- `--session`: Start a Chat Session - -Optional arguments: -- `--stream`: Stream -- `--model-id`: Model ID - -## Model Management -Manage GenAI models. - - -### `llama-stack-client models list` -Show available llama models at distribution endpoint -```bash -llama-stack-client models list -``` -``` -Available Models - -โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”“ -โ”ƒ model_type โ”ƒ identifier โ”ƒ provider_resource_id โ”ƒ metadata โ”ƒ provider_id โ”ƒ -โ”กโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ฉ -โ”‚ llm โ”‚ meta-llama/Llama-3.2-3B-Instruct โ”‚ llama3.2:3b-instruct-fp16 โ”‚ โ”‚ ollama โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - -Total models: 1 -``` - -### `llama-stack-client models get` -Show details of a specific model at the distribution endpoint -```bash -llama-stack-client models get Llama3.1-8B-Instruct -``` - -``` -+----------------------+----------------------+----------------------------------------------------------+---------------+ -| identifier | llama_model | metadata | provider_id | -+======================+======================+==========================================================+===============+ -| Llama3.1-8B-Instruct | Llama3.1-8B-Instruct | {'huggingface_repo': 'meta-llama/Llama-3.1-8B-Instruct'} | tgi0 | -+----------------------+----------------------+----------------------------------------------------------+---------------+ -``` - - -```bash -llama-stack-client models get Random-Model - -Model RandomModel is not found at distribution endpoint host:port. Please ensure endpoint is serving specified model. -``` - -### `llama-stack-client models register` -Register a new model at distribution endpoint -```bash -llama-stack-client models register [--provider-id ] [--provider-model-id ] [--metadata ] [--model-type ] -``` - -Required arguments: -- `MODEL_ID`: Model ID -- `--provider-id`: Provider ID for the model - -Optional arguments: -- `--provider-model-id`: Provider's model ID -- `--metadata`: JSON metadata for the model -- `--model-type`: Model type: `llm`, `embedding` - - -### `llama-stack-client models unregister` -Unregister a model from distribution endpoint -```bash -llama-stack-client models unregister -``` - -## Vector DB Management -Manage vector databases. - - -### `llama-stack-client vector_dbs list` -Show available vector dbs on distribution endpoint -```bash -llama-stack-client vector_dbs list -``` -``` -โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”“ -โ”ƒ identifier โ”ƒ provider_id โ”ƒ provider_resource_id โ”ƒ vector_db_type โ”ƒ params โ”ƒ -โ”กโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ฉ -โ”‚ my_demo_vector_db โ”‚ faiss โ”‚ my_demo_vector_db โ”‚ โ”‚ embedding_dimension: 384 โ”‚ -โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ embedding_model: all-MiniLM-L6-v2 โ”‚ -โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ type: vector_db โ”‚ -โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - -### `llama-stack-client vector_dbs register` -Create a new vector db -```bash -llama-stack-client vector_dbs register [--provider-id ] [--provider-vector-db-id ] [--embedding-model ] [--embedding-dimension ] -``` - - -Required arguments: -- `VECTOR_DB_ID`: Vector DB ID - -Optional arguments: -- `--provider-id`: Provider ID for the vector db -- `--provider-vector-db-id`: Provider's vector db ID -- `--embedding-model`: Embedding model to use. Default: `all-MiniLM-L6-v2` -- `--embedding-dimension`: Dimension of embeddings. Default: 384 - -### `llama-stack-client vector_dbs unregister` -Delete a vector db -```bash -llama-stack-client vector_dbs unregister -``` - - -Required arguments: -- `VECTOR_DB_ID`: Vector DB ID - - -## Shield Management -Manage safety shield services. -### `llama-stack-client shields list` -Show available safety shields on distribution endpoint -```bash -llama-stack-client shields list -``` - -``` -โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”“ -โ”ƒ identifier โ”ƒ provider_alias โ”ƒ params โ”ƒ provider_id โ”ƒ -โ”กโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ฉ -โ”‚ ollama โ”‚ ollama/llama-guard3:1b โ”‚ โ”‚ llama-guard โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - -### `llama-stack-client shields register` -Register a new safety shield -```bash -llama-stack-client shields register --shield-id [--provider-id ] [--provider-shield-id ] [--params ] -``` - -Required arguments: -- `--shield-id`: ID of the shield - -Optional arguments: -- `--provider-id`: Provider ID for the shield -- `--provider-shield-id`: Provider's shield ID -- `--params`: JSON configuration parameters for the shield - - -## Eval execution -Run evaluation tasks. - - -### `llama-stack-client eval run-benchmark` -Run a evaluation benchmark task -```bash -llama-stack-client eval run-benchmark [ ...] --eval-task-config --output-dir --model-id [--num-examples ] [--visualize] [--repeat-penalty ] [--top-p ] [--max-tokens ] -``` - -Required arguments: -- `--eval-task-config`: Path to the eval task config file in JSON format -- `--output-dir`: Path to the directory where evaluation results will be saved -- `--model-id`: model id to run the benchmark eval on - -Optional arguments: -- `--num-examples`: Number of examples to evaluate (useful for debugging) -- `--visualize`: If set, visualizes evaluation results after completion -- `--repeat-penalty`: repeat-penalty in the sampling params to run generation -- `--top-p`: top-p in the sampling params to run generation -- `--max-tokens`: max-tokens in the sampling params to run generation -- `--temperature`: temperature in the sampling params to run generation - -Example benchmark_config.json: -```json -{ - "type": "benchmark", - "eval_candidate": { - "type": "model", - "model": "Llama3.1-405B-Instruct", - "sampling_params": { - "strategy": "greedy", - } - } -} -``` - -### `llama-stack-client eval run-scoring` -Run scoring from application datasets -```bash -llama-stack-client eval run-scoring --output-dir [--num-examples ] [--visualize] -``` - -Required arguments: -- `--output-dir`: Path to the directory where scoring results will be saved - -Optional arguments: -- `--num-examples`: Number of examples to evaluate (useful for debugging) -- `--visualize`: If set, visualizes scoring results after completion -- `--scoring-params-config`: Path to the scoring params config file in JSON format -- `--dataset-id`: Pre-registered dataset_id to score (from llama-stack-client datasets list) -- `--dataset-path`: Path to the dataset file to score - - -## Eval Tasks -Manage evaluation tasks. - -### `llama-stack-client eval_tasks list` -Show available eval tasks on distribution endpoint -```bash -llama-stack-client eval_tasks list -``` - - -### `llama-stack-client eval_tasks register` -Register a new eval task -```bash -llama-stack-client eval_tasks register --eval-task-id --dataset-id --scoring-functions [--provider-id ] [--provider-eval-task-id ] [--metadata ] -``` - - -Required arguments: -- `--eval-task-id`: ID of the eval task -- `--dataset-id`: ID of the dataset to evaluate -- `--scoring-functions`: Scoring functions to use for evaluation - -Optional arguments: -- `--provider-id`: Provider ID for the eval task -- `--provider-eval-task-id`: Provider's eval task ID - - -## Tool Group Management -Manage available tool groups. - - -### `llama-stack-client toolgroups list` -Show available llama toolgroups at distribution endpoint -```bash -llama-stack-client toolgroups list -``` -``` -+---------------------------+------------------+------+---------------+ -| identifier | provider_id | args | mcp_endpoint | -+===========================+==================+======+===============+ -| builtin::rag | rag-runtime | None | None | -+---------------------------+------------------+------+---------------+ -| builtin::websearch | tavily-search | None | None | -+---------------------------+------------------+------+---------------+ -``` - -### `llama-stack-client toolgroups get` -Get available llama toolgroups by id -```bash -llama-stack-client toolgroups get -``` - -Shows detailed information about a specific toolgroup. If the toolgroup is not found, displays an error message. - - -Required arguments: -- `TOOLGROUP_ID`: ID of the tool group - - -### `llama-stack-client toolgroups register` -Register a new toolgroup at distribution endpoint -```bash -llama-stack-client toolgroups register [--provider-id ] [--provider-toolgroup-id ] [--mcp-config ] [--args ] -``` - - -Required arguments: -- `TOOLGROUP_ID`: ID of the tool group - -Optional arguments: -- `--provider-id`: Provider ID for the toolgroup -- `--provider-toolgroup-id`: Provider's toolgroup ID -- `--mcp-config`: JSON configuration for the MCP endpoint -- `--args`: JSON arguments for the toolgroup - -### `llama-stack-client toolgroups unregister` -Unregister a toolgroup from distribution endpoint -```bash -llama-stack-client toolgroups unregister -``` - - -Required arguments: -- `TOOLGROUP_ID`: ID of the tool group - - -## Datasets Management -Manage datasets. - - -### `llama-stack-client datasets list` -Show available datasets on distribution endpoint -```bash -llama-stack-client datasets list -``` - - -### `llama-stack-client datasets register` -```bash -llama-stack-client datasets register --dataset_id --purpose [--url ] [--dataset-id ] [--metadata ] -``` - -Required arguments: -- `--dataset_id`: Id of the dataset -- `--purpose`: Purpose of the dataset - -Optional arguments: -- `--metadata`: Metadata of the dataset -- `--url`: URL of the dataset -- `--dataset-path`: Local file path to the dataset. If specified, upload dataset via URL - - -### `llama-stack-client datasets unregister` -Remove a dataset -```bash -llama-stack-client datasets unregister -``` - - -Required arguments: -- `DATASET_ID`: Id of the dataset - - -## Scoring Functions Management -Manage scoring functions. - -### `llama-stack-client scoring_functions list` -Show available scoring functions on distribution endpoint -```bash -llama-stack-client scoring_functions list -``` -``` -โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”“ -โ”ƒ identifier โ”ƒ provider_id โ”ƒ description โ”ƒ type โ”ƒ -โ”กโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ฉ -โ”‚ basic::docvqa โ”‚ basic โ”‚ DocVQA Visual Question & Answer scoring function โ”‚ scoring_function โ”‚ -โ”‚ basic::equality โ”‚ basic โ”‚ Returns 1.0 if the input is equal to the target, 0.0 โ”‚ scoring_function โ”‚ -โ”‚ โ”‚ โ”‚ otherwise. โ”‚ โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - - -### `llama-stack-client scoring_functions register` -Register a new scoring function -```bash -llama-stack-client scoring_functions register --scoring-fn-id --description --return-type [--provider-id ] [--provider-scoring-fn-id ] [--params ] -``` - - -Required arguments: -- `--scoring-fn-id`: Id of the scoring function -- `--description`: Description of the scoring function -- `--return-type`: Return type of the scoring function - -Optional arguments: -- `--provider-id`: Provider ID for the scoring function -- `--provider-scoring-fn-id`: Provider's scoring function ID -- `--params`: Parameters for the scoring function in JSON format - - -## Post Training Management -Post-training. - -### `llama-stack-client post_training list` -Show the list of available post training jobs -```bash -llama-stack-client post_training list -``` -```bash -["job-1", "job-2", "job-3"] -``` - - -### `llama-stack-client post_training artifacts` -Get the training artifacts of a specific post training job -```bash -llama-stack-client post_training artifacts --job-uuid -``` -```bash -JobArtifactsResponse(checkpoints=[], job_uuid='job-1') -``` - - -Required arguments: -- `--job-uuid`: Job UUID - - -### `llama-stack-client post_training supervised_fine_tune` -Kick off a supervised fine tune job -```bash -llama-stack-client post_training supervised_fine_tune --job-uuid --model --algorithm-config --training-config [--checkpoint-dir ] -``` - - -Required arguments: -- `--job-uuid`: Job UUID -- `--model`: Model ID -- `--algorithm-config`: Algorithm Config -- `--training-config`: Training Config - -Optional arguments: -- `--checkpoint-dir`: Checkpoint Config - - -### `llama-stack-client post_training status` -Show the status of a specific post training job -```bash -llama-stack-client post_training status --job-uuid -``` -```bash -JobStatusResponse( - checkpoints=[], - job_uuid='job-1', - status='completed', - completed_at="", - resources_allocated="", - scheduled_at="", - started_at="" -) -``` - - -Required arguments: -- `--job-uuid`: Job UUID - - -### `llama-stack-client post_training cancel` -Cancel the training job -```bash -llama-stack-client post_training cancel --job-uuid -``` -```bash -# This functionality is not yet implemented for llama-stack-client -โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ -โ”‚ Failed to post_training cancel_training_job โ”‚ -โ”‚ โ”‚ -โ”‚ Error Type: InternalServerError โ”‚ -โ”‚ Details: Error code: 501 - {'detail': 'Not implemented: '} โ”‚ -โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ -``` - - -Required arguments: -- `--job-uuid`: Job UUID diff --git a/docs/source/references/python_sdk_reference/index.md b/docs/source/references/python_sdk_reference/index.md deleted file mode 100644 index b1a9396fe..000000000 --- a/docs/source/references/python_sdk_reference/index.md +++ /dev/null @@ -1,462 +0,0 @@ -# Python SDK Reference - -## Shared Types - -```python -from llama_stack_client.types import ( - AgentConfig, - BatchCompletion, - CompletionMessage, - ContentDelta, - Document, - InterleavedContent, - InterleavedContentItem, - Message, - ParamType, - QueryConfig, - QueryResult, - ReturnType, - SafetyViolation, - SamplingParams, - ScoringResult, - SystemMessage, - ToolCall, - ToolParamDefinition, - ToolResponseMessage, - URL, - UserMessage, -) -``` - -## Toolgroups - -Types: - -```python -from llama_stack_client.types import ( - ListToolGroupsResponse, - ToolGroup, - ToolgroupListResponse, -) -``` - -Methods: - -- client.toolgroups.list() -> ToolgroupListResponse -- client.toolgroups.get(toolgroup_id) -> ToolGroup -- client.toolgroups.register(\*\*params) -> None -- client.toolgroups.unregister(toolgroup_id) -> None - -## Tools - -Types: - -```python -from llama_stack_client.types import ListToolsResponse, Tool, ToolListResponse -``` - -Methods: - -- client.tools.list(\*\*params) -> ToolListResponse -- client.tools.get(tool_name) -> Tool - -## ToolRuntime - -Types: - -```python -from llama_stack_client.types import ToolDef, ToolInvocationResult -``` - -Methods: - -- client.tool_runtime.invoke_tool(\*\*params) -> ToolInvocationResult -- client.tool_runtime.list_tools(\*\*params) -> JSONLDecoder[ToolDef] - -### RagTool - -Methods: - -- client.tool_runtime.rag_tool.insert(\*\*params) -> None -- client.tool_runtime.rag_tool.query(\*\*params) -> QueryResult - -## Agents - -Types: - -```python -from llama_stack_client.types import ( - InferenceStep, - MemoryRetrievalStep, - ShieldCallStep, - ToolExecutionStep, - ToolResponse, - AgentCreateResponse, -) -``` - -Methods: - -- client.agents.create(\*\*params) -> AgentCreateResponse -- client.agents.delete(agent_id) -> None - -### Session - -Types: - -```python -from llama_stack_client.types.agents import Session, SessionCreateResponse -``` - -Methods: - -- client.agents.session.create(agent_id, \*\*params) -> SessionCreateResponse -- client.agents.session.retrieve(session_id, \*, agent_id, \*\*params) -> Session -- client.agents.session.delete(session_id, \*, agent_id) -> None - -### Steps - -Types: - -```python -from llama_stack_client.types.agents import StepRetrieveResponse -``` - -Methods: - -- client.agents.steps.retrieve(step_id, \*, agent_id, session_id, turn_id) -> StepRetrieveResponse - -### Turn - -Types: - -```python -from llama_stack_client.types.agents import Turn, TurnCreateResponse -``` - -Methods: - -- client.agents.turn.create(session_id, \*, agent_id, \*\*params) -> TurnCreateResponse -- client.agents.turn.retrieve(turn_id, \*, agent_id, session_id) -> Turn - -## BatchInference - -Types: - -```python -from llama_stack_client.types import BatchInferenceChatCompletionResponse -``` - -Methods: - -- client.batch_inference.chat_completion(\*\*params) -> BatchInferenceChatCompletionResponse -- client.batch_inference.completion(\*\*params) -> BatchCompletion - -## Datasets - -Types: - -```python -from llama_stack_client.types import ( - ListDatasetsResponse, - DatasetRetrieveResponse, - DatasetListResponse, -) -``` - -Methods: - -- client.datasets.retrieve(dataset_id) -> Optional[DatasetRetrieveResponse] -- client.datasets.list() -> DatasetListResponse -- client.datasets.register(\*\*params) -> None -- client.datasets.unregister(dataset_id) -> None - -## Eval - -Types: - -```python -from llama_stack_client.types import EvaluateResponse, Job -``` - -Methods: - -- client.eval.evaluate_rows(benchmark_id, \*\*params) -> EvaluateResponse -- client.eval.run_eval(benchmark_id, \*\*params) -> Job - -### Jobs - -Types: - -```python -from llama_stack_client.types.eval import JobStatusResponse -``` - -Methods: - -- client.eval.jobs.retrieve(job_id, \*, benchmark_id) -> EvaluateResponse -- client.eval.jobs.cancel(job_id, \*, benchmark_id) -> None -- client.eval.jobs.status(job_id, \*, benchmark_id) -> Optional[JobStatusResponse] - -## Inspect - -Types: - -```python -from llama_stack_client.types import HealthInfo, ProviderInfo, RouteInfo, VersionInfo -``` - -Methods: - -- client.inspect.health() -> HealthInfo -- client.inspect.version() -> VersionInfo - -## Inference - -Types: - -```python -from llama_stack_client.types import ( - CompletionResponse, - EmbeddingsResponse, - TokenLogProbs, - InferenceChatCompletionResponse, - InferenceCompletionResponse, -) -``` - -Methods: - -- client.inference.chat_completion(\*\*params) -> InferenceChatCompletionResponse -- client.inference.completion(\*\*params) -> InferenceCompletionResponse -- client.inference.embeddings(\*\*params) -> EmbeddingsResponse - -## VectorIo - -Types: - -```python -from llama_stack_client.types import QueryChunksResponse -``` - -Methods: - -- client.vector_io.insert(\*\*params) -> None -- client.vector_io.query(\*\*params) -> QueryChunksResponse - -## VectorDBs - -Types: - -```python -from llama_stack_client.types import ( - ListVectorDBsResponse, - VectorDBRetrieveResponse, - VectorDBListResponse, - VectorDBRegisterResponse, -) -``` - -Methods: - -- client.vector_dbs.retrieve(vector_db_id) -> Optional[VectorDBRetrieveResponse] -- client.vector_dbs.list() -> VectorDBListResponse -- client.vector_dbs.register(\*\*params) -> VectorDBRegisterResponse -- client.vector_dbs.unregister(vector_db_id) -> None - -## Models - -Types: - -```python -from llama_stack_client.types import ListModelsResponse, Model, ModelListResponse -``` - -Methods: - -- client.models.retrieve(model_id) -> Optional[Model] -- client.models.list() -> ModelListResponse -- client.models.register(\*\*params) -> Model -- client.models.unregister(model_id) -> None - -## PostTraining - -Types: - -```python -from llama_stack_client.types import ListPostTrainingJobsResponse, PostTrainingJob -``` - -Methods: - -- client.post_training.preference_optimize(\*\*params) -> PostTrainingJob -- client.post_training.supervised_fine_tune(\*\*params) -> PostTrainingJob - -### Job - -Types: - -```python -from llama_stack_client.types.post_training import ( - JobListResponse, - JobArtifactsResponse, - JobStatusResponse, -) -``` - -Methods: - -- client.post_training.job.list() -> JobListResponse -- client.post_training.job.artifacts(\*\*params) -> Optional[JobArtifactsResponse] -- client.post_training.job.cancel(\*\*params) -> None -- client.post_training.job.status(\*\*params) -> Optional[JobStatusResponse] - -## Providers - -Types: - -```python -from llama_stack_client.types import ListProvidersResponse, ProviderListResponse -``` - -Methods: - -- client.providers.list() -> ProviderListResponse - -## Routes - -Types: - -```python -from llama_stack_client.types import ListRoutesResponse, RouteListResponse -``` - -Methods: - -- client.routes.list() -> RouteListResponse - -## Safety - -Types: - -```python -from llama_stack_client.types import RunShieldResponse -``` - -Methods: - -- client.safety.run_shield(\*\*params) -> RunShieldResponse - -## Shields - -Types: - -```python -from llama_stack_client.types import ListShieldsResponse, Shield, ShieldListResponse -``` - -Methods: - -- client.shields.retrieve(identifier) -> Optional[Shield] -- client.shields.list() -> ShieldListResponse -- client.shields.register(\*\*params) -> Shield - -## SyntheticDataGeneration - -Types: - -```python -from llama_stack_client.types import SyntheticDataGenerationResponse -``` - -Methods: - -- client.synthetic_data_generation.generate(\*\*params) -> SyntheticDataGenerationResponse - -## Telemetry - -Types: - -```python -from llama_stack_client.types import ( - QuerySpansResponse, - SpanWithStatus, - Trace, - TelemetryGetSpanResponse, - TelemetryGetSpanTreeResponse, - TelemetryQuerySpansResponse, - TelemetryQueryTracesResponse, -) -``` - -Methods: - -- client.telemetry.get_span(span_id, \*, trace_id) -> TelemetryGetSpanResponse -- client.telemetry.get_span_tree(span_id, \*\*params) -> TelemetryGetSpanTreeResponse -- client.telemetry.get_trace(trace_id) -> Trace -- client.telemetry.log_event(\*\*params) -> None -- client.telemetry.query_spans(\*\*params) -> TelemetryQuerySpansResponse -- client.telemetry.query_traces(\*\*params) -> TelemetryQueryTracesResponse -- client.telemetry.save_spans_to_dataset(\*\*params) -> None - -## Datasetio - -Types: - -```python -from llama_stack_client.types import PaginatedRowsResult -``` - -Methods: - -- client.datasetio.append_rows(\*\*params) -> None -- client.datasetio.get_rows_paginated(\*\*params) -> PaginatedRowsResult - -## Scoring - -Types: - -```python -from llama_stack_client.types import ScoringScoreResponse, ScoringScoreBatchResponse -``` - -Methods: - -- client.scoring.score(\*\*params) -> ScoringScoreResponse -- client.scoring.score_batch(\*\*params) -> ScoringScoreBatchResponse - -## ScoringFunctions - -Types: - -```python -from llama_stack_client.types import ( - ListScoringFunctionsResponse, - ScoringFn, - ScoringFunctionListResponse, -) -``` - -Methods: - -- client.scoring_functions.retrieve(scoring_fn_id) -> Optional[ScoringFn] -- client.scoring_functions.list() -> ScoringFunctionListResponse -- client.scoring_functions.register(\*\*params) -> None - -## Benchmarks - -Types: - -```python -from llama_stack_client.types import ( - Benchmark, - ListBenchmarksResponse, - BenchmarkListResponse, -) -``` - -Methods: - -- client.benchmarks.retrieve(benchmark_id) -> Optional[Benchmark] -- client.benchmarks.list() -> BenchmarkListResponse -- client.benchmarks.register(\*\*params) -> None