diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 54c01c80d..5884f2582 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -2,4 +2,4 @@ # These owners will be the default owners for everything in # the repo. Unless a later match takes precedence, -* @ashwinb @yanxi0830 @hardikjshah @dltn @raghotham @dineshyv @vladimirivic @sixianyi0721 @ehhuang @terrytangyuan @SLR722 @leseb +* @ashwinb @yanxi0830 @hardikjshah @raghotham @ehhuang @terrytangyuan @leseb @bbrowning diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index af2058b9a..263828e1c 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,10 +1,8 @@ # What does this PR do? -[Provide a short summary of what this PR does and why. Link to relevant issues if applicable.] + -[//]: # (If resolving an issue, uncomment and update the line below) -[//]: # (Closes #[issue-number]) + + ## Test Plan -[Describe the tests you ran to verify your changes with result summaries. *Provide clear instructions so the plan can be easily re-executed.*] - -[//]: # (## Documentation) + diff --git a/.github/TRIAGERS.md b/.github/TRIAGERS.md index d4ef6d1ac..586a5a506 100644 --- a/.github/TRIAGERS.md +++ b/.github/TRIAGERS.md @@ -1,2 +1,2 @@ # This file documents Triage members in the Llama Stack community -@franciscojavierarceo @leseb + @bbrowning @booxter @franciscojavierarceo @leseb diff --git a/.github/actions/setup-ollama/action.yml b/.github/actions/setup-ollama/action.yml new file mode 100644 index 000000000..3dd6c940c --- /dev/null +++ b/.github/actions/setup-ollama/action.yml @@ -0,0 +1,26 @@ +name: Setup Ollama +description: Start Ollama and cache model +inputs: + models: + description: Comma-separated list of models to pull + default: "llama3.2:3b-instruct-fp16,all-minilm:latest" +runs: + using: "composite" + steps: + - name: Install and start Ollama + shell: bash + run: | + # the ollama installer also starts the ollama service + curl -fsSL https://ollama.com/install.sh | sh + + # Do NOT cache models - pulling the cache is actually slower than just pulling the model. + # It takes ~45 seconds to pull the models from the cache and unpack it, but only 30 seconds to + # pull them directly. + # Maybe this is because the cache is being pulled at the same time by all the matrix jobs? + - name: Pull requested models + if: inputs.models != '' + shell: bash + run: | + for model in $(echo "${{ inputs.models }}" | tr ',' ' '); do + ollama pull "$model" + done diff --git a/.github/actions/setup-runner/action.yml b/.github/actions/setup-runner/action.yml new file mode 100644 index 000000000..6cba4fdc3 --- /dev/null +++ b/.github/actions/setup-runner/action.yml @@ -0,0 +1,22 @@ +name: Setup runner +description: Prepare a runner for the tests (install uv, python, project dependencies, etc.) +runs: + using: "composite" + steps: + - name: Install uv + uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v6.0.1 + with: + python-version: "3.10" + activate-environment: true + version: 0.7.6 + + - name: Install dependencies + shell: bash + run: | + uv sync --all-groups + uv pip install ollama faiss-cpu + # always test against the latest version of the client + # TODO: this is not necessarily a good idea. we need to test against both published and latest + # to find out backwards compatibility issues. + uv pip install git+https://github.com/meta-llama/llama-stack-client-python.git@main + uv pip install -e . diff --git a/.github/workflows/Dockerfile b/.github/workflows/Dockerfile new file mode 100644 index 000000000..9261bd174 --- /dev/null +++ b/.github/workflows/Dockerfile @@ -0,0 +1 @@ +FROM localhost:5000/distribution-kvant:dev \ No newline at end of file diff --git a/.github/workflows/ci-playground.yaml b/.github/workflows/ci-playground.yaml new file mode 100644 index 000000000..251782855 --- /dev/null +++ b/.github/workflows/ci-playground.yaml @@ -0,0 +1,73 @@ +name: Build and Push playground container +run-name: Build and Push playground container +on: + workflow_dispatch: + #schedule: + # - cron: "0 10 * * *" + push: + branches: + - main + - kvant + tags: + - 'v*' + pull_request: + branches: + - main + - kvant +env: + IMAGE: git.kvant.cloud/${{github.repository}}-playground +jobs: + build-playground: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set current time + uses: https://github.com/gerred/actions/current-time@master + id: current_time + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to git.kvant.cloud registry + uses: docker/login-action@v3 + with: + registry: git.kvant.cloud + username: ${{ vars.ORG_PACKAGE_WRITER_USERNAME }} + password: ${{ secrets.ORG_PACKAGE_WRITER_TOKEN }} + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + # list of Docker images to use as base name for tags + images: | + ${{env.IMAGE}} + # generate Docker tags based on the following events/attributes + tags: | + type=schedule + type=ref,event=branch + type=ref,event=pr + type=ref,event=tag + type=semver,pattern={{version}} + + - name: Build and push to gitea registry + uses: docker/build-push-action@v6 + with: + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + context: . + file: llama_stack/distribution/ui/Containerfile + provenance: mode=max + sbom: true + build-args: | + BUILD_DATE=${{ steps.current_time.outputs.time }} + cache-from: | + type=registry,ref=${{ env.IMAGE }}:buildcache + type=registry,ref=${{ env.IMAGE }}:${{ github.ref_name }} + type=registry,ref=${{ env.IMAGE }}:main + cache-to: type=registry,ref=${{ env.IMAGE }}:buildcache,mode=max,image-manifest=true diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 000000000..87f196cc2 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,98 @@ +name: Build and Push container +run-name: Build and Push container +on: + workflow_dispatch: + #schedule: + # - cron: "0 10 * * *" + push: + branches: + - main + - kvant + tags: + - 'v*' + pull_request: + branches: + - main + - kvant +env: + IMAGE: git.kvant.cloud/${{github.repository}} +jobs: + build: + runs-on: ubuntu-latest + services: + registry: + image: registry:2 + ports: + - 5000:5000 + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set current time + uses: https://github.com/gerred/actions/current-time@master + id: current_time + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + driver-opts: network=host + + - name: Login to git.kvant.cloud registry + uses: docker/login-action@v3 + with: + registry: git.kvant.cloud + username: ${{ vars.ORG_PACKAGE_WRITER_USERNAME }} + password: ${{ secrets.ORG_PACKAGE_WRITER_TOKEN }} + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + # list of Docker images to use as base name for tags + images: | + ${{env.IMAGE}} + # generate Docker tags based on the following events/attributes + tags: | + type=schedule + type=ref,event=branch + type=ref,event=pr + type=ref,event=tag + type=semver,pattern={{version}} + + - name: Install uv + uses: https://github.com/astral-sh/setup-uv@v5 + with: + # Install a specific version of uv. + version: "0.7.8" + + - name: Build + env: + USE_COPY_NOT_MOUNT: true + LLAMA_STACK_DIR: . + run: | + uvx --from . llama stack build --template kvant --image-type container + + # docker tag distribution-kvant:dev ${{env.IMAGE}}:kvant + # docker push ${{env.IMAGE}}:kvant + + docker tag distribution-kvant:dev localhost:5000/distribution-kvant:dev + docker push localhost:5000/distribution-kvant:dev + + - name: Build and push to gitea registry + uses: docker/build-push-action@v6 + with: + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + context: .github/workflows + provenance: mode=max + sbom: true + build-args: | + BUILD_DATE=${{ steps.current_time.outputs.time }} + cache-from: | + type=registry,ref=${{ env.IMAGE }}:buildcache + type=registry,ref=${{ env.IMAGE }}:${{ github.ref_name }} + type=registry,ref=${{ env.IMAGE }}:main + cache-to: type=registry,ref=${{ env.IMAGE }}:buildcache,mode=max,image-manifest=true diff --git a/.github/workflows/changelog.yml b/.github/workflows_upstream/changelog.yml similarity index 100% rename from .github/workflows/changelog.yml rename to .github/workflows_upstream/changelog.yml diff --git a/.github/workflows/gha_workflow_llama_stack_tests.yml b/.github/workflows_upstream/gha_workflow_llama_stack_tests.yml similarity index 100% rename from .github/workflows/gha_workflow_llama_stack_tests.yml rename to .github/workflows_upstream/gha_workflow_llama_stack_tests.yml diff --git a/.github/workflows_upstream/install-script-ci.yml b/.github/workflows_upstream/install-script-ci.yml new file mode 100644 index 000000000..2eb234c77 --- /dev/null +++ b/.github/workflows_upstream/install-script-ci.yml @@ -0,0 +1,26 @@ +name: Installer CI + +on: + pull_request: + paths: + - 'install.sh' + push: + paths: + - 'install.sh' + schedule: + - cron: '0 2 * * *' # every day at 02:00 UTC + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # 4.2.2 + - name: Run ShellCheck on install.sh + run: shellcheck install.sh + smoke-test: + needs: lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # 4.2.2 + - name: Run installer end-to-end + run: ./install.sh diff --git a/.github/workflows_upstream/integration-auth-tests.yml b/.github/workflows_upstream/integration-auth-tests.yml new file mode 100644 index 000000000..a3a746246 --- /dev/null +++ b/.github/workflows_upstream/integration-auth-tests.yml @@ -0,0 +1,132 @@ +name: Integration Auth Tests + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + paths: + - 'distributions/**' + - 'llama_stack/**' + - 'tests/integration/**' + - 'uv.lock' + - 'pyproject.toml' + - 'requirements.txt' + - '.github/workflows/integration-auth-tests.yml' # This workflow + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + test-matrix: + runs-on: ubuntu-latest + strategy: + matrix: + auth-provider: [oauth2_token] + fail-fast: false # we want to run all tests regardless of failure + + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Install dependencies + uses: ./.github/actions/setup-runner + + - name: Build Llama Stack + run: | + llama stack build --template ollama --image-type venv + + - name: Install minikube + if: ${{ matrix.auth-provider == 'kubernetes' }} + uses: medyagh/setup-minikube@cea33675329b799adccc9526aa5daccc26cd5052 # v0.0.19 + + - name: Start minikube + if: ${{ matrix.auth-provider == 'oauth2_token' }} + run: | + minikube start + kubectl get pods -A + + - name: Configure Kube Auth + if: ${{ matrix.auth-provider == 'oauth2_token' }} + run: | + kubectl create namespace llama-stack + kubectl create serviceaccount llama-stack-auth -n llama-stack + kubectl create rolebinding llama-stack-auth-rolebinding --clusterrole=admin --serviceaccount=llama-stack:llama-stack-auth -n llama-stack + kubectl create token llama-stack-auth -n llama-stack > llama-stack-auth-token + cat <> $GITHUB_ENV + echo "KUBERNETES_CA_CERT_PATH=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.certificate-authority}')" >> $GITHUB_ENV + echo "KUBERNETES_ISSUER=$(kubectl get --raw /.well-known/openid-configuration| jq -r .issuer)" >> $GITHUB_ENV + echo "KUBERNETES_AUDIENCE=$(kubectl create token llama-stack-auth -n llama-stack --duration=1h | cut -d. -f2 | base64 -d | jq -r '.aud[0]')" >> $GITHUB_ENV + + - name: Set Kube Auth Config and run server + env: + INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct" + if: ${{ matrix.auth-provider == 'oauth2_token' }} + run: | + run_dir=$(mktemp -d) + cat <<'EOF' > $run_dir/run.yaml + version: '2' + image_name: kube + apis: [] + providers: {} + server: + port: 8321 + EOF + yq eval '.server.auth = {"provider_type": "${{ matrix.auth-provider }}"}' -i $run_dir/run.yaml + yq eval '.server.auth.config = {"tls_cafile": "${{ env.KUBERNETES_CA_CERT_PATH }}", "issuer": "${{ env.KUBERNETES_ISSUER }}", "audience": "${{ env.KUBERNETES_AUDIENCE }}"}' -i $run_dir/run.yaml + yq eval '.server.auth.config.jwks = {"uri": "${{ env.KUBERNETES_API_SERVER_URL }}"}' -i $run_dir/run.yaml + cat $run_dir/run.yaml + + nohup uv run llama stack run $run_dir/run.yaml --image-type venv > server.log 2>&1 & + + - name: Wait for Llama Stack server to be ready + run: | + echo "Waiting for Llama Stack server..." + for i in {1..30}; do + if curl -s -L -H "Authorization: Bearer $(cat llama-stack-auth-token)" http://localhost:8321/v1/health | grep -q "OK"; then + echo "Llama Stack server is up!" + if grep -q "Enabling authentication with provider: ${{ matrix.auth-provider }}" server.log; then + echo "Llama Stack server is configured to use ${{ matrix.auth-provider }} auth" + exit 0 + else + echo "Llama Stack server is not configured to use ${{ matrix.auth-provider }} auth" + cat server.log + exit 1 + fi + fi + sleep 1 + done + echo "Llama Stack server failed to start" + cat server.log + exit 1 + + - name: Test auth + run: | + curl -s -L -H "Authorization: Bearer $(cat llama-stack-auth-token)" http://127.0.0.1:8321/v1/providers|jq diff --git a/.github/workflows/integration-tests.yml b/.github/workflows_upstream/integration-tests.yml similarity index 67% rename from .github/workflows/integration-tests.yml rename to .github/workflows_upstream/integration-tests.yml index f54bed839..d78e82c9d 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows_upstream/integration-tests.yml @@ -24,7 +24,7 @@ jobs: matrix: # Listing tests manually since some of them currently fail # TODO: generate matrix list from tests/integration when fixed - test-type: [agents, inference, datasets, inspect, scoring, post_training, providers] + test-type: [agents, inference, datasets, inspect, scoring, post_training, providers, tool_runtime] client-type: [library, http] fail-fast: false # we want to run all tests regardless of failure @@ -32,30 +32,14 @@ jobs: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Install uv - uses: astral-sh/setup-uv@0c5e2b8115b80b4c7c5ddf6ffdd634974642d182 # v5.4.1 - with: - python-version: "3.10" + - name: Install dependencies + uses: ./.github/actions/setup-runner - - name: Install and start Ollama - run: | - # the ollama installer also starts the ollama service - curl -fsSL https://ollama.com/install.sh | sh + - name: Setup ollama + uses: ./.github/actions/setup-ollama - - name: Pull Ollama image + - name: Build Llama Stack run: | - # TODO: cache the model. OLLAMA_MODELS defaults to ~ollama/.ollama/models. - ollama pull llama3.2:3b-instruct-fp16 - - - name: Set Up Environment and Install Dependencies - run: | - uv sync --extra dev --extra test - uv pip install ollama faiss-cpu - # always test against the latest version of the client - # TODO: this is not necessarily a good idea. we need to test against both published and latest - # to find out backwards compatibility issues. - uv pip install git+https://github.com/meta-llama/llama-stack-client-python.git@main - uv pip install -e . llama stack build --template ollama --image-type venv - name: Start Llama Stack server in background @@ -63,8 +47,7 @@ jobs: env: INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct" run: | - source .venv/bin/activate - nohup uv run llama stack run ./llama_stack/templates/ollama/run.yaml --image-type venv > server.log 2>&1 & + LLAMA_STACK_LOG_FILE=server.log nohup uv run llama stack run ./llama_stack/templates/ollama/run.yaml --image-type venv & - name: Wait for Llama Stack server to be ready if: matrix.client-type == 'http' @@ -92,6 +75,12 @@ jobs: exit 1 fi + - name: Check Storage and Memory Available Before Tests + if: ${{ always() }} + run: | + free -h + df -h + - name: Run Integration Tests env: INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct" @@ -101,7 +90,27 @@ jobs: else stack_config="http://localhost:8321" fi - uv run pytest -v tests/integration/${{ matrix.test-type }} --stack-config=${stack_config} \ + uv run pytest -s -v tests/integration/${{ matrix.test-type }} --stack-config=${stack_config} \ -k "not(builtin_tool or safety_with_image or code_interpreter or test_rag)" \ --text-model="meta-llama/Llama-3.2-3B-Instruct" \ --embedding-model=all-MiniLM-L6-v2 + + - name: Check Storage and Memory Available After Tests + if: ${{ always() }} + run: | + free -h + df -h + + - name: Write ollama logs to file + if: ${{ always() }} + run: | + sudo journalctl -u ollama.service > ollama.log + + - name: Upload all logs to artifacts + if: ${{ always() }} + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: logs-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.client-type }}-${{ matrix.test-type }} + path: | + *.log + retention-days: 1 diff --git a/.github/workflows/pre-commit.yml b/.github/workflows_upstream/pre-commit.yml similarity index 87% rename from .github/workflows/pre-commit.yml rename to .github/workflows_upstream/pre-commit.yml index 17a42dd26..2bbd52c53 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows_upstream/pre-commit.yml @@ -18,7 +18,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Set up Python - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0 + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: '3.11' cache: pip @@ -27,6 +27,9 @@ jobs: .pre-commit-config.yaml - uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1 + env: + SKIP: no-commit-to-branch + RUFF_OUTPUT_FORMAT: github - name: Verify if there are any diff files after pre-commit run: | diff --git a/.github/workflows/providers-build.yml b/.github/workflows_upstream/providers-build.yml similarity index 65% rename from .github/workflows/providers-build.yml rename to .github/workflows_upstream/providers-build.yml index 23257d7dc..cf53459b9 100644 --- a/.github/workflows/providers-build.yml +++ b/.github/workflows_upstream/providers-build.yml @@ -50,21 +50,8 @@ jobs: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Set up Python - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0 - with: - python-version: '3.10' - - - name: Install uv - uses: astral-sh/setup-uv@0c5e2b8115b80b4c7c5ddf6ffdd634974642d182 # v5.4.1 - with: - python-version: "3.10" - - - name: Install LlamaStack - run: | - uv venv - source .venv/bin/activate - uv pip install -e . + - name: Install dependencies + uses: ./.github/actions/setup-runner - name: Print build dependencies run: | @@ -79,7 +66,6 @@ jobs: - name: Print dependencies in the image if: matrix.image-type == 'venv' run: | - source test/bin/activate uv pip list build-single-provider: @@ -88,21 +74,8 @@ jobs: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Set up Python - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0 - with: - python-version: '3.10' - - - name: Install uv - uses: astral-sh/setup-uv@0c5e2b8115b80b4c7c5ddf6ffdd634974642d182 # v5.4.1 - with: - python-version: "3.10" - - - name: Install LlamaStack - run: | - uv venv - source .venv/bin/activate - uv pip install -e . + - name: Install dependencies + uses: ./.github/actions/setup-runner - name: Build a single provider run: | @@ -114,27 +87,14 @@ jobs: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Set up Python - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0 - with: - python-version: '3.10' - - - name: Install uv - uses: astral-sh/setup-uv@0c5e2b8115b80b4c7c5ddf6ffdd634974642d182 # v5.4.1 - with: - python-version: "3.10" - - - name: Install LlamaStack - run: | - uv venv - source .venv/bin/activate - uv pip install -e . + - name: Install dependencies + uses: ./.github/actions/setup-runner - name: Build a single provider run: | - yq -i '.image_type = "container"' llama_stack/templates/dev/build.yaml - yq -i '.image_name = "test"' llama_stack/templates/dev/build.yaml - USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config llama_stack/templates/dev/build.yaml + yq -i '.image_type = "container"' llama_stack/templates/starter/build.yaml + yq -i '.image_name = "test"' llama_stack/templates/starter/build.yaml + USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config llama_stack/templates/starter/build.yaml - name: Inspect the container image entrypoint run: | @@ -145,3 +105,43 @@ jobs: echo "Entrypoint is not correct" exit 1 fi + + build-ubi9-container-distribution: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Install dependencies + uses: ./.github/actions/setup-runner + + - name: Pin template to UBI9 base + run: | + yq -i ' + .image_type = "container" | + .image_name = "ubi9-test" | + .distribution_spec.container_image = "registry.access.redhat.com/ubi9:latest" + ' llama_stack/templates/starter/build.yaml + + - name: Build dev container (UBI9) + env: + USE_COPY_NOT_MOUNT: "true" + LLAMA_STACK_DIR: "." + run: | + uv run llama stack build --config llama_stack/templates/starter/build.yaml + + - name: Inspect UBI9 image + run: | + IMAGE_ID=$(docker images --format "{{.Repository}}:{{.Tag}}" | head -n 1) + entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID) + echo "Entrypoint: $entrypoint" + if [ "$entrypoint" != "[python -m llama_stack.distribution.server.server --config /app/run.yaml]" ]; then + echo "Entrypoint is not correct" + exit 1 + fi + + echo "Checking /etc/os-release in $IMAGE_ID" + docker run --rm --entrypoint sh "$IMAGE_ID" -c \ + 'source /etc/os-release && echo "$ID"' \ + | grep -qE '^(rhel|ubi)$' \ + || { echo "Base image is not UBI 9!"; exit 1; } diff --git a/.github/workflows/semantic-pr.yml b/.github/workflows_upstream/semantic-pr.yml similarity index 100% rename from .github/workflows/semantic-pr.yml rename to .github/workflows_upstream/semantic-pr.yml diff --git a/.github/workflows/stale_bot.yml b/.github/workflows_upstream/stale_bot.yml similarity index 100% rename from .github/workflows/stale_bot.yml rename to .github/workflows_upstream/stale_bot.yml diff --git a/.github/workflows/test-external-providers.yml b/.github/workflows_upstream/test-external-providers.yml similarity index 51% rename from .github/workflows/test-external-providers.yml rename to .github/workflows_upstream/test-external-providers.yml index 37f5c45ab..06ab7cf3c 100644 --- a/.github/workflows/test-external-providers.yml +++ b/.github/workflows_upstream/test-external-providers.yml @@ -23,29 +23,10 @@ jobs: # container and point 'uv pip install' to the correct path... steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Install uv - uses: astral-sh/setup-uv@v5 - with: - python-version: "3.10" - - - name: Install Ollama - run: | - curl -fsSL https://ollama.com/install.sh | sh - - - name: Pull Ollama image - run: | - ollama pull llama3.2:3b-instruct-fp16 - - - name: Start Ollama in background - run: | - nohup ollama run llama3.2:3b-instruct-fp16 --keepalive=30m > ollama.log 2>&1 & - - - name: Set Up Environment and Install Dependencies - run: | - uv sync --extra dev --extra test - uv pip install -e . + - name: Install dependencies + uses: ./.github/actions/setup-runner - name: Apply image type to config file run: | @@ -59,57 +40,32 @@ jobs: - name: Create provider configuration run: | - mkdir -p /tmp/providers.d/remote/inference - cp tests/external-provider/llama-stack-provider-ollama/custom_ollama.yaml /tmp/providers.d/remote/inference/custom_ollama.yaml + mkdir -p /home/runner/.llama/providers.d/remote/inference + cp tests/external-provider/llama-stack-provider-ollama/custom_ollama.yaml /home/runner/.llama/providers.d/remote/inference/custom_ollama.yaml - name: Build distro from config file run: | USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config tests/external-provider/llama-stack-provider-ollama/custom-distro.yaml - - name: Wait for Ollama to start - run: | - echo "Waiting for Ollama..." - for i in {1..30}; do - if curl -s http://localhost:11434 | grep -q "Ollama is running"; then - echo "Ollama is running!" - exit 0 - fi - sleep 1 - done - echo "Ollama failed to start" - ollama ps - ollama.log - exit 1 - - name: Start Llama Stack server in background if: ${{ matrix.image-type }} == 'venv' env: INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct" run: | - source ci-test/bin/activate uv run pip list nohup uv run --active llama stack run tests/external-provider/llama-stack-provider-ollama/run.yaml --image-type ${{ matrix.image-type }} > server.log 2>&1 & - name: Wait for Llama Stack server to be ready run: | - echo "Waiting for Llama Stack server..." for i in {1..30}; do - if curl -s http://localhost:8321/v1/health | grep -q "OK"; then - echo "Llama Stack server is up!" - if grep -q "remote::custom_ollama from /tmp/providers.d/remote/inference/custom_ollama.yaml" server.log; then - echo "Llama Stack server is using custom Ollama provider" - exit 0 - else - echo "Llama Stack server is not using custom Ollama provider" - exit 1 - fi + if ! grep -q "remote::custom_ollama from /home/runner/.llama/providers.d/remote/inference/custom_ollama.yaml" server.log; then + echo "Waiting for Llama Stack server to load the provider..." + sleep 1 + else + echo "Provider loaded" + exit 0 fi - sleep 1 done - echo "Llama Stack server failed to start" + echo "Provider failed to load" cat server.log exit 1 - - - name: run inference tests - run: | - uv run pytest -v tests/integration/inference/test_text_inference.py --stack-config="http://localhost:8321" --text-model="meta-llama/Llama-3.2-3B-Instruct" --embedding-model=all-MiniLM-L6-v2 diff --git a/.github/workflows/tests.yml b/.github/workflows_upstream/tests.yml similarity index 100% rename from .github/workflows/tests.yml rename to .github/workflows_upstream/tests.yml diff --git a/.github/workflows/unit-tests.yml b/.github/workflows_upstream/unit-tests.yml similarity index 73% rename from .github/workflows/unit-tests.yml rename to .github/workflows_upstream/unit-tests.yml index 962141744..fc0459f0f 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows_upstream/unit-tests.yml @@ -30,17 +30,11 @@ jobs: - "3.12" - "3.13" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Set up Python ${{ matrix.python }} - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0 - with: - python-version: ${{ matrix.python }} - - - uses: astral-sh/setup-uv@0c5e2b8115b80b4c7c5ddf6ffdd634974642d182 # v5.4.1 - with: - python-version: ${{ matrix.python }} - enable-cache: false + - name: Install dependencies + uses: ./.github/actions/setup-runner - name: Run unit tests run: | diff --git a/.github/workflows/update-readthedocs.yml b/.github/workflows_upstream/update-readthedocs.yml similarity index 78% rename from .github/workflows/update-readthedocs.yml rename to .github/workflows_upstream/update-readthedocs.yml index 794a727be..981332a77 100644 --- a/.github/workflows/update-readthedocs.yml +++ b/.github/workflows_upstream/update-readthedocs.yml @@ -14,6 +14,8 @@ on: - 'docs/**' - 'pyproject.toml' - '.github/workflows/update-readthedocs.yml' + tags: + - '*' pull_request: branches: - main @@ -35,16 +37,8 @@ jobs: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Set up Python - uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0 - with: - python-version: '3.11' - - - name: Install the latest version of uv - uses: astral-sh/setup-uv@0c5e2b8115b80b4c7c5ddf6ffdd634974642d182 # v5.4.1 - - - name: Sync with uv - run: uv sync --extra docs + - name: Install dependencies + uses: ./.github/actions/setup-runner - name: Build HTML run: | @@ -61,7 +55,10 @@ jobs: response=$(curl -X POST \ -H "Content-Type: application/json" \ - -d "{\"token\": \"$TOKEN\"}" \ + -d "{ + \"token\": \"$TOKEN\", + \"version\": \"$GITHUB_REF_NAME\" + }" \ https://readthedocs.org/api/v2/webhook/llama-stack/289768/) echo "Response: $response" diff --git a/.gitignore b/.gitignore index 0ef25cdf1..747acdc7b 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ dev_requirements.txt build .DS_Store llama_stack/configs/* +.cursor/ xcuserdata/ *.hmap .DS_Store @@ -23,3 +24,4 @@ venv/ pytest-report.xml .coverage .python-version +data diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ff3bc1250..aaec469e4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,6 +15,18 @@ repos: args: ['--maxkb=1000'] - id: end-of-file-fixer exclude: '^(.*\.svg)$' + - id: no-commit-to-branch + - id: check-yaml + args: ["--unsafe"] + - id: detect-private-key + - id: requirements-txt-fixer + - id: mixed-line-ending + args: [--fix=lf] # Forces to replace line ending by LF (line feed) + - id: check-executables-have-shebangs + - id: check-json + - id: check-shebang-scripts-are-executable + - id: check-symlinks + - id: check-toml - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.4 @@ -41,7 +53,7 @@ repos: - black==24.3.0 - repo: https://github.com/astral-sh/uv-pre-commit - rev: 0.6.3 + rev: 0.7.8 hooks: - id: uv-lock - id: uv-export @@ -49,6 +61,7 @@ repos: "--frozen", "--no-hashes", "--no-emit-project", + "--no-default-groups", "--output-file=requirements.txt" ] @@ -76,24 +89,29 @@ repos: - id: distro-codegen name: Distribution Template Codegen additional_dependencies: - - uv==0.6.0 - entry: uv run --extra codegen ./scripts/distro_codegen.py + - uv==0.7.8 + entry: uv run --group codegen ./scripts/distro_codegen.py language: python pass_filenames: false require_serial: true files: ^llama_stack/templates/.*$|^llama_stack/providers/.*/inference/.*/models\.py$ - -- repo: local - hooks: - id: openapi-codegen name: API Spec Codegen additional_dependencies: - - uv==0.6.2 - entry: sh -c 'uv run --with ".[dev]" ./docs/openapi_generator/run_openapi_generator.sh > /dev/null' + - uv==0.7.8 + entry: sh -c 'uv run ./docs/openapi_generator/run_openapi_generator.sh > /dev/null' language: python pass_filenames: false require_serial: true files: ^llama_stack/apis/|^docs/openapi_generator/ + - id: check-workflows-use-hashes + name: Check GitHub Actions use SHA-pinned actions + entry: ./scripts/check-workflows-use-hashes.sh + language: system + pass_filenames: false + require_serial: true + always_run: true + files: ^\.github/workflows/.*\.ya?ml$ ci: autofix_commit_msg: 🎨 [pre-commit.ci] Auto format from pre-commit.com hooks diff --git a/.readthedocs.yaml b/.readthedocs.yaml index f114dbf9b..461977a6c 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -5,28 +5,21 @@ # Required version: 2 +# Build documentation in the "docs/" directory with Sphinx +sphinx: + configuration: docs/source/conf.py + # Set the OS, Python version and other tools you might need build: os: ubuntu-22.04 tools: python: "3.12" - # You can also specify other tool versions: - # nodejs: "19" - # rust: "1.64" - # golang: "1.19" - -# Build documentation in the "docs/" directory with Sphinx -sphinx: - configuration: docs/source/conf.py - -# Optionally build your docs in additional formats such as PDF and ePub -# formats: -# - pdf -# - epub - -# Optional but recommended, declare the Python requirements required -# to build your documentation -# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html -python: - install: - - requirements: docs/requirements.txt + jobs: + pre_create_environment: + - asdf plugin add uv + - asdf install uv latest + - asdf global uv latest + create_environment: + - uv venv "${READTHEDOCS_VIRTUALENV_PATH}" + install: + - UV_PROJECT_ENVIRONMENT="${READTHEDOCS_VIRTUALENV_PATH}" uv sync --frozen --group docs diff --git a/CHANGELOG.md b/CHANGELOG.md index 5086094ad..f7644a5af 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,75 @@ # Changelog +# v0.2.7 +Published on: 2025-05-16T20:38:10Z + +## Highlights + +This is a small update. But a couple highlights: + +* feat: function tools in OpenAI Responses by @bbrowning in https://github.com/meta-llama/llama-stack/pull/2094, getting closer to ready. Streaming is the next missing piece. +* feat: Adding support for customizing chunk context in RAG insertion and querying by @franciscojavierarceo in https://github.com/meta-llama/llama-stack/pull/2134 +* feat: scaffolding for Llama Stack UI by @ehhuang in https://github.com/meta-llama/llama-stack/pull/2149, more to come in the coming releases. + + +--- + +# v0.2.6 +Published on: 2025-05-12T18:06:52Z + + + +--- + +# v0.2.5 +Published on: 2025-05-04T20:16:49Z + + + +--- + +# v0.2.4 +Published on: 2025-04-29T17:26:01Z + +## Highlights + +* One-liner to install and run Llama Stack yay! by @reluctantfuturist in https://github.com/meta-llama/llama-stack/pull/1383 +* support for NVIDIA NeMo datastore by @raspawar in https://github.com/meta-llama/llama-stack/pull/1852 +* (yuge!) Kubernetes authentication by @leseb in https://github.com/meta-llama/llama-stack/pull/1778 +* (yuge!) OpenAI Responses API by @bbrowning in https://github.com/meta-llama/llama-stack/pull/1989 +* add api.llama provider, llama-guard-4 model by @ashwinb in https://github.com/meta-llama/llama-stack/pull/2058 + + +--- + +# v0.2.3 +Published on: 2025-04-25T22:46:21Z + +## Highlights + +* OpenAI compatible inference endpoints and client-SDK support. `client.chat.completions.create()` now works. +* significant improvements and functionality added to the nVIDIA distribution +* many improvements to the test verification suite. +* new inference providers: Ramalama, IBM WatsonX +* many improvements to the Playground UI + + +--- + +# v0.2.2 +Published on: 2025-04-13T01:19:49Z + +## Main changes + +- Bring Your Own Provider (@leseb) - use out-of-tree provider code to execute the distribution server +- OpenAI compatible inference API in progress (@bbrowning) +- Provider verifications (@ehhuang) +- Many updates and fixes to playground +- Several llama4 related fixes + + +--- + # v0.2.1 Published on: 2025-04-05T23:13:00Z @@ -10,10 +80,10 @@ Published on: 2025-04-05T23:13:00Z # v0.2.0 Published on: 2025-04-05T19:04:29Z -## Llama 4 Support - -Checkout more at https://www.llama.com - +## Llama 4 Support + +Checkout more at https://www.llama.com + --- @@ -21,58 +91,58 @@ Checkout more at https://www.llama.com # v0.1.9 Published on: 2025-03-29T00:52:23Z -### Build and Test Agents -* Agents: Entire document context with attachments -* RAG: Documentation with sqlite-vec faiss comparison -* Getting started: Fixes to getting started notebook. - -### Agent Evals and Model Customization -* (**New**) Post-training: Add nemo customizer - -### Better Engineering -* Moved sqlite-vec to non-blocking calls -* Don't return a payload on file delete - - +### Build and Test Agents +* Agents: Entire document context with attachments +* RAG: Documentation with sqlite-vec faiss comparison +* Getting started: Fixes to getting started notebook. + +### Agent Evals and Model Customization +* (**New**) Post-training: Add nemo customizer + +### Better Engineering +* Moved sqlite-vec to non-blocking calls +* Don't return a payload on file delete + + --- # v0.1.8 Published on: 2025-03-24T01:28:50Z -# v0.1.8 Release Notes - -### Build and Test Agents -* Safety: Integrated NVIDIA as a safety provider. -* VectorDB: Added Qdrant as an inline provider. -* Agents: Added support for multiple tool groups in agents. -* Agents: Simplified imports for Agents in client package - - -### Agent Evals and Model Customization -* Introduced DocVQA and IfEval benchmarks. - -### Deploying and Monitoring Agents -* Introduced a Containerfile and image workflow for the Playground. -* Implemented support for Bearer (API Key) authentication. -* Added attribute-based access control for resources. -* Fixes on docker deployments: use --pull always and standardized the default port to 8321 -* Deprecated: /v1/inspect/providers use /v1/providers/ instead - -### Better Engineering -* Consolidated scripts under the ./scripts directory. -* Addressed mypy violations in various modules. -* Added Dependabot scans for Python dependencies. -* Implemented a scheduled workflow to update the changelog automatically. -* Enforced concurrency to reduce CI loads. - - -### New Contributors -* @cmodi-meta made their first contribution in https://github.com/meta-llama/llama-stack/pull/1650 -* @jeffmaury made their first contribution in https://github.com/meta-llama/llama-stack/pull/1671 -* @derekhiggins made their first contribution in https://github.com/meta-llama/llama-stack/pull/1698 -* @Bobbins228 made their first contribution in https://github.com/meta-llama/llama-stack/pull/1745 - +# v0.1.8 Release Notes + +### Build and Test Agents +* Safety: Integrated NVIDIA as a safety provider. +* VectorDB: Added Qdrant as an inline provider. +* Agents: Added support for multiple tool groups in agents. +* Agents: Simplified imports for Agents in client package + + +### Agent Evals and Model Customization +* Introduced DocVQA and IfEval benchmarks. + +### Deploying and Monitoring Agents +* Introduced a Containerfile and image workflow for the Playground. +* Implemented support for Bearer (API Key) authentication. +* Added attribute-based access control for resources. +* Fixes on docker deployments: use --pull always and standardized the default port to 8321 +* Deprecated: /v1/inspect/providers use /v1/providers/ instead + +### Better Engineering +* Consolidated scripts under the ./scripts directory. +* Addressed mypy violations in various modules. +* Added Dependabot scans for Python dependencies. +* Implemented a scheduled workflow to update the changelog automatically. +* Enforced concurrency to reduce CI loads. + + +### New Contributors +* @cmodi-meta made their first contribution in https://github.com/meta-llama/llama-stack/pull/1650 +* @jeffmaury made their first contribution in https://github.com/meta-llama/llama-stack/pull/1671 +* @derekhiggins made their first contribution in https://github.com/meta-llama/llama-stack/pull/1698 +* @Bobbins228 made their first contribution in https://github.com/meta-llama/llama-stack/pull/1745 + **Full Changelog**: https://github.com/meta-llama/llama-stack/compare/v0.1.7...v0.1.8 --- @@ -80,73 +150,73 @@ Published on: 2025-03-24T01:28:50Z # v0.1.7 Published on: 2025-03-14T22:30:51Z -## 0.1.7 Release Notes - -### Build and Test Agents -* Inference: ImageType is now refactored to LlamaStackImageType -* Inference: Added tests to measure TTFT -* Inference: Bring back usage metrics -* Agents: Added endpoint for get agent, list agents and list sessions -* Agents: Automated conversion of type hints in client tool for lite llm format -* Agents: Deprecated ToolResponseMessage in agent.resume API -* Added Provider API for listing and inspecting provider info - -### Agent Evals and Model Customization -* Eval: Added new eval benchmarks Math 500 and BFCL v3 -* Deploy and Monitoring of Agents -* Telemetry: Fix tracing to work across coroutines - -### Better Engineering -* Display code coverage for unit tests -* Updated call sites (inference, tool calls, agents) to move to async non blocking calls -* Unit tests also run on Python 3.11, 3.12, and 3.13 -* Added ollama inference to Integration tests CI -* Improved documentation across examples, testing, CLI, updated providers table ) - - - +## 0.1.7 Release Notes + +### Build and Test Agents +* Inference: ImageType is now refactored to LlamaStackImageType +* Inference: Added tests to measure TTFT +* Inference: Bring back usage metrics +* Agents: Added endpoint for get agent, list agents and list sessions +* Agents: Automated conversion of type hints in client tool for lite llm format +* Agents: Deprecated ToolResponseMessage in agent.resume API +* Added Provider API for listing and inspecting provider info + +### Agent Evals and Model Customization +* Eval: Added new eval benchmarks Math 500 and BFCL v3 +* Deploy and Monitoring of Agents +* Telemetry: Fix tracing to work across coroutines + +### Better Engineering +* Display code coverage for unit tests +* Updated call sites (inference, tool calls, agents) to move to async non blocking calls +* Unit tests also run on Python 3.11, 3.12, and 3.13 +* Added ollama inference to Integration tests CI +* Improved documentation across examples, testing, CLI, updated providers table ) + + + --- # v0.1.6 Published on: 2025-03-08T04:35:08Z -## 0.1.6 Release Notes - -### Build and Test Agents -* Inference: Fixed support for inline vllm provider -* (**New**) Agent: Build & Monitor Agent Workflows with Llama Stack + Anthropic's Best Practice [Notebook](https://github.com/meta-llama/llama-stack/blob/main/docs/notebooks/Llama_Stack_Agent_Workflows.ipynb) -* (**New**) Agent: Revamped agent [documentation](https://llama-stack.readthedocs.io/en/latest/building_applications/agent.html) with more details and examples -* Agent: Unify tools and Python SDK Agents API -* Agent: AsyncAgent Python SDK wrapper supporting async client tool calls -* Agent: Support python functions without @client_tool decorator as client tools -* Agent: deprecation for allow_resume_turn flag, and remove need to specify tool_prompt_format -* VectorIO: MilvusDB support added - -### Agent Evals and Model Customization -* (**New**) Agent: Llama Stack RAG Lifecycle [Notebook](https://github.com/meta-llama/llama-stack/blob/main/docs/notebooks/Llama_Stack_RAG_Lifecycle.ipynb) -* Eval: Documentation for eval, scoring, adding new benchmarks -* Eval: Distribution template to run benchmarks on llama & non-llama models -* Eval: Ability to register new custom LLM-as-judge scoring functions -* (**New**) Looking for contributors for open benchmarks. See [documentation](https://llama-stack.readthedocs.io/en/latest/references/evals_reference/index.html#open-benchmark-contributing-guide) for details. - -### Deploy and Monitoring of Agents -* Better support for different log levels across all components for better monitoring - -### Better Engineering -* Enhance OpenAPI spec to include Error types across all APIs -* Moved all tests to /tests and created unit tests to run on each PR -* Removed all dependencies on llama-models repo - +## 0.1.6 Release Notes + +### Build and Test Agents +* Inference: Fixed support for inline vllm provider +* (**New**) Agent: Build & Monitor Agent Workflows with Llama Stack + Anthropic's Best Practice [Notebook](https://github.com/meta-llama/llama-stack/blob/main/docs/notebooks/Llama_Stack_Agent_Workflows.ipynb) +* (**New**) Agent: Revamped agent [documentation](https://llama-stack.readthedocs.io/en/latest/building_applications/agent.html) with more details and examples +* Agent: Unify tools and Python SDK Agents API +* Agent: AsyncAgent Python SDK wrapper supporting async client tool calls +* Agent: Support python functions without @client_tool decorator as client tools +* Agent: deprecation for allow_resume_turn flag, and remove need to specify tool_prompt_format +* VectorIO: MilvusDB support added + +### Agent Evals and Model Customization +* (**New**) Agent: Llama Stack RAG Lifecycle [Notebook](https://github.com/meta-llama/llama-stack/blob/main/docs/notebooks/Llama_Stack_RAG_Lifecycle.ipynb) +* Eval: Documentation for eval, scoring, adding new benchmarks +* Eval: Distribution template to run benchmarks on llama & non-llama models +* Eval: Ability to register new custom LLM-as-judge scoring functions +* (**New**) Looking for contributors for open benchmarks. See [documentation](https://llama-stack.readthedocs.io/en/latest/references/evals_reference/index.html#open-benchmark-contributing-guide) for details. + +### Deploy and Monitoring of Agents +* Better support for different log levels across all components for better monitoring + +### Better Engineering +* Enhance OpenAPI spec to include Error types across all APIs +* Moved all tests to /tests and created unit tests to run on each PR +* Removed all dependencies on llama-models repo + --- # v0.1.5.1 Published on: 2025-02-28T22:37:44Z -## 0.1.5.1 Release Notes -* Fixes for security risk in https://github.com/meta-llama/llama-stack/pull/1327 and https://github.com/meta-llama/llama-stack/pull/1328 - +## 0.1.5.1 Release Notes +* Fixes for security risk in https://github.com/meta-llama/llama-stack/pull/1327 and https://github.com/meta-llama/llama-stack/pull/1328 + **Full Changelog**: https://github.com/meta-llama/llama-stack/compare/v0.1.5...v0.1.5.1 --- @@ -154,176 +224,176 @@ Published on: 2025-02-28T22:37:44Z # v0.1.5 Published on: 2025-02-28T18:14:01Z -## 0.1.5 Release Notes -### Build Agents -* Inference: Support more non-llama models (openai, anthropic, gemini) -* Inference: Can use the provider's model name in addition to the HF alias -* Inference: Fixed issues with calling tools that weren't specified in the prompt -* RAG: Improved system prompt for RAG and no more need for hard-coded rag-tool calling -* Embeddings: Added support for Nemo retriever embedding models -* Tools: Added support for MCP tools in Ollama Distribution -* Distributions: Added new Groq distribution - -### Customize Models -* Save post-trained checkpoint in SafeTensor format to allow Ollama inference provider to use the post-trained model - -### Monitor agents -* More comprehensive logging of agent steps including client tools -* Telemetry inputs/outputs are now structured and queryable -* Ability to retrieve agents session, turn, step by ids - -### Better Engineering -* Moved executorch Swift code out of this repo into the llama-stack-client-swift repo, similar to kotlin -* Move most logging to use logger instead of prints -* Completed text /chat-completion and /completion tests - +## 0.1.5 Release Notes +### Build Agents +* Inference: Support more non-llama models (openai, anthropic, gemini) +* Inference: Can use the provider's model name in addition to the HF alias +* Inference: Fixed issues with calling tools that weren't specified in the prompt +* RAG: Improved system prompt for RAG and no more need for hard-coded rag-tool calling +* Embeddings: Added support for Nemo retriever embedding models +* Tools: Added support for MCP tools in Ollama Distribution +* Distributions: Added new Groq distribution + +### Customize Models +* Save post-trained checkpoint in SafeTensor format to allow Ollama inference provider to use the post-trained model + +### Monitor agents +* More comprehensive logging of agent steps including client tools +* Telemetry inputs/outputs are now structured and queryable +* Ability to retrieve agents session, turn, step by ids + +### Better Engineering +* Moved executorch Swift code out of this repo into the llama-stack-client-swift repo, similar to kotlin +* Move most logging to use logger instead of prints +* Completed text /chat-completion and /completion tests + --- # v0.1.4 Published on: 2025-02-25T00:02:43Z -## v0.1.4 Release Notes -Here are the key changes coming as part of this release: - -### Build and Test Agents -* Inference: Added support for non-llama models -* Inference: Added option to list all downloaded models and remove models -* Agent: Introduce new api agents.resume_turn to include client side tool execution in the same turn -* Agent: AgentConfig introduces new variable “tool_config” that allows for better tool configuration and system prompt overrides -* Agent: Added logging for agent step start and completion times -* Agent: Added support for logging for tool execution metadata -* Embedding: Updated /inference/embeddings to support asymmetric models, truncation and variable sized outputs -* Embedding: Updated embedding models for Ollama, Together, and Fireworks with available defaults -* VectorIO: Improved performance of sqlite-vec using chunked writes -### Agent Evals and Model Customization -* Deprecated api /eval-tasks. Use /eval/benchmark instead -* Added CPU training support for TorchTune -### Deploy and Monitoring of Agents -* Consistent view of client and server tool calls in telemetry -### Better Engineering -* Made tests more data-driven for consistent evaluation -* Fixed documentation links and improved API reference generation -* Various small fixes for build scripts and system reliability - - +## v0.1.4 Release Notes +Here are the key changes coming as part of this release: + +### Build and Test Agents +* Inference: Added support for non-llama models +* Inference: Added option to list all downloaded models and remove models +* Agent: Introduce new api agents.resume_turn to include client side tool execution in the same turn +* Agent: AgentConfig introduces new variable “tool_config” that allows for better tool configuration and system prompt overrides +* Agent: Added logging for agent step start and completion times +* Agent: Added support for logging for tool execution metadata +* Embedding: Updated /inference/embeddings to support asymmetric models, truncation and variable sized outputs +* Embedding: Updated embedding models for Ollama, Together, and Fireworks with available defaults +* VectorIO: Improved performance of sqlite-vec using chunked writes +### Agent Evals and Model Customization +* Deprecated api /eval-tasks. Use /eval/benchmark instead +* Added CPU training support for TorchTune +### Deploy and Monitoring of Agents +* Consistent view of client and server tool calls in telemetry +### Better Engineering +* Made tests more data-driven for consistent evaluation +* Fixed documentation links and improved API reference generation +* Various small fixes for build scripts and system reliability + + --- # v0.1.3 Published on: 2025-02-14T20:24:32Z -## v0.1.3 Release - -Here are some key changes that are coming as part of this release. - -### Build and Test Agents -Streamlined the initial development experience -- Added support for llama stack run --image-type venv -- Enhanced vector store options with new sqlite-vec provider and improved Qdrant integration -- vLLM improvements for tool calling and logprobs -- Better handling of sporadic code_interpreter tool calls - -### Agent Evals -Better benchmarking and Agent performance assessment -- Renamed eval API /eval-task to /benchmarks -- Improved documentation and notebooks for RAG and evals - -### Deploy and Monitoring of Agents -Improved production readiness -- Added usage metrics collection for chat completions -- CLI improvements for provider information -- Improved error handling and system reliability -- Better model endpoint handling and accessibility -- Improved signal handling on distro server - -### Better Engineering -Infrastructure and code quality improvements -- Faster text-based chat completion tests -- Improved testing for non-streaming agent apis -- Standardized import formatting with ruff linter -- Added conventional commits standard -- Fixed documentation parsing issues - +## v0.1.3 Release + +Here are some key changes that are coming as part of this release. + +### Build and Test Agents +Streamlined the initial development experience +- Added support for llama stack run --image-type venv +- Enhanced vector store options with new sqlite-vec provider and improved Qdrant integration +- vLLM improvements for tool calling and logprobs +- Better handling of sporadic code_interpreter tool calls + +### Agent Evals +Better benchmarking and Agent performance assessment +- Renamed eval API /eval-task to /benchmarks +- Improved documentation and notebooks for RAG and evals + +### Deploy and Monitoring of Agents +Improved production readiness +- Added usage metrics collection for chat completions +- CLI improvements for provider information +- Improved error handling and system reliability +- Better model endpoint handling and accessibility +- Improved signal handling on distro server + +### Better Engineering +Infrastructure and code quality improvements +- Faster text-based chat completion tests +- Improved testing for non-streaming agent apis +- Standardized import formatting with ruff linter +- Added conventional commits standard +- Fixed documentation parsing issues + --- # v0.1.2 Published on: 2025-02-07T22:06:49Z -# TL;DR -- Several stabilizations to development flows after the switch to `uv` -- Migrated CI workflows to new OSS repo - [llama-stack-ops](https://github.com/meta-llama/llama-stack-ops) -- Added automated rebuilds for ReadTheDocs -- Llama Stack server supports HTTPS -- Added system prompt overrides support -- Several bug fixes and improvements to documentation (check out Kubernetes deployment guide by @terrytangyuan ) - +# TL;DR +- Several stabilizations to development flows after the switch to `uv` +- Migrated CI workflows to new OSS repo - [llama-stack-ops](https://github.com/meta-llama/llama-stack-ops) +- Added automated rebuilds for ReadTheDocs +- Llama Stack server supports HTTPS +- Added system prompt overrides support +- Several bug fixes and improvements to documentation (check out Kubernetes deployment guide by @terrytangyuan ) + --- # v0.1.1 Published on: 2025-02-02T02:29:24Z -A bunch of small / big improvements everywhere including support for Windows, switching to `uv` and many provider improvements. - +A bunch of small / big improvements everywhere including support for Windows, switching to `uv` and many provider improvements. + --- # v0.1.0 Published on: 2025-01-24T17:47:47Z -We are excited to announce a stable API release of Llama Stack, which enables developers to build RAG applications and Agents using tools and safety shields, monitor and those agents with telemetry, and evaluate the agent with scoring functions. - -## Context -GenAI application developers need more than just an LLM - they need to integrate tools, connect with their data sources, establish guardrails, and ground the LLM responses effectively. Currently, developers must piece together various tools and APIs, complicating the development lifecycle and increasing costs. The result is that developers are spending more time on these integrations rather than focusing on the application logic itself. The bespoke coupling of components also makes it challenging to adopt state-of-the-art solutions in the rapidly evolving GenAI space. This is particularly difficult for open models like Llama, as best practices are not widely established in the open. - -Llama Stack was created to provide developers with a comprehensive and coherent interface that simplifies AI application development and codifies best practices across the Llama ecosystem. Since our launch in September 2024, we have seen a huge uptick in interest in Llama Stack APIs by both AI developers and from partners building AI services with Llama models. Partners like Nvidia, Fireworks, and Ollama have collaborated with us to develop implementations across various APIs, including inference, memory, and safety. - -With Llama Stack, you can easily build a RAG agent which can also search the web, do complex math, and custom tool calling. You can use telemetry to inspect those traces, and convert telemetry into evals datasets. And with Llama Stack’s plugin architecture and prepackage distributions, you choose to run your agent anywhere - in the cloud with our partners, deploy your own environment using virtualenv, conda, or Docker, operate locally with Ollama, or even run on mobile devices with our SDKs. Llama Stack offers unprecedented flexibility while also simplifying the developer experience. - -## Release -After iterating on the APIs for the last 3 months, today we’re launching a stable release (V1) of the Llama Stack APIs and the corresponding llama-stack server and client packages(v0.1.0). We now have automated tests for providers. These tests make sure that all provider implementations are verified. Developers can now easily and reliably select distributions or providers based on their specific requirements. - -There are example standalone apps in llama-stack-apps. - - -## Key Features of this release - -- **Unified API Layer** - - Inference: Run LLM models - - RAG: Store and retrieve knowledge for RAG - - Agents: Build multi-step agentic workflows - - Tools: Register tools that can be called by the agent - - Safety: Apply content filtering and safety policies - - Evaluation: Test model and agent quality - - Telemetry: Collect and analyze usage data and complex agentic traces - - Post Training ( Coming Soon ): Fine tune models for specific use cases - -- **Rich Provider Ecosystem** - - Local Development: Meta's Reference, Ollama - - Cloud: Fireworks, Together, Nvidia, AWS Bedrock, Groq, Cerebras - - On-premises: Nvidia NIM, vLLM, TGI, Dell-TGI - - On-device: iOS and Android support - -- **Built for Production** - - Pre-packaged distributions for common deployment scenarios - - Backwards compatibility across model versions - - Comprehensive evaluation capabilities - - Full observability and monitoring - -- **Multiple developer interfaces** - - CLI: Command line interface - - Python SDK - - Swift iOS SDK - - Kotlin Android SDK - -- **Sample llama stack applications** - - Python - - iOS - - Android - - +We are excited to announce a stable API release of Llama Stack, which enables developers to build RAG applications and Agents using tools and safety shields, monitor and those agents with telemetry, and evaluate the agent with scoring functions. + +## Context +GenAI application developers need more than just an LLM - they need to integrate tools, connect with their data sources, establish guardrails, and ground the LLM responses effectively. Currently, developers must piece together various tools and APIs, complicating the development lifecycle and increasing costs. The result is that developers are spending more time on these integrations rather than focusing on the application logic itself. The bespoke coupling of components also makes it challenging to adopt state-of-the-art solutions in the rapidly evolving GenAI space. This is particularly difficult for open models like Llama, as best practices are not widely established in the open. + +Llama Stack was created to provide developers with a comprehensive and coherent interface that simplifies AI application development and codifies best practices across the Llama ecosystem. Since our launch in September 2024, we have seen a huge uptick in interest in Llama Stack APIs by both AI developers and from partners building AI services with Llama models. Partners like Nvidia, Fireworks, and Ollama have collaborated with us to develop implementations across various APIs, including inference, memory, and safety. + +With Llama Stack, you can easily build a RAG agent which can also search the web, do complex math, and custom tool calling. You can use telemetry to inspect those traces, and convert telemetry into evals datasets. And with Llama Stack’s plugin architecture and prepackage distributions, you choose to run your agent anywhere - in the cloud with our partners, deploy your own environment using virtualenv, conda, or Docker, operate locally with Ollama, or even run on mobile devices with our SDKs. Llama Stack offers unprecedented flexibility while also simplifying the developer experience. + +## Release +After iterating on the APIs for the last 3 months, today we’re launching a stable release (V1) of the Llama Stack APIs and the corresponding llama-stack server and client packages(v0.1.0). We now have automated tests for providers. These tests make sure that all provider implementations are verified. Developers can now easily and reliably select distributions or providers based on their specific requirements. + +There are example standalone apps in llama-stack-apps. + + +## Key Features of this release + +- **Unified API Layer** + - Inference: Run LLM models + - RAG: Store and retrieve knowledge for RAG + - Agents: Build multi-step agentic workflows + - Tools: Register tools that can be called by the agent + - Safety: Apply content filtering and safety policies + - Evaluation: Test model and agent quality + - Telemetry: Collect and analyze usage data and complex agentic traces + - Post Training ( Coming Soon ): Fine tune models for specific use cases + +- **Rich Provider Ecosystem** + - Local Development: Meta's Reference, Ollama + - Cloud: Fireworks, Together, Nvidia, AWS Bedrock, Groq, Cerebras + - On-premises: Nvidia NIM, vLLM, TGI, Dell-TGI + - On-device: iOS and Android support + +- **Built for Production** + - Pre-packaged distributions for common deployment scenarios + - Backwards compatibility across model versions + - Comprehensive evaluation capabilities + - Full observability and monitoring + +- **Multiple developer interfaces** + - CLI: Command line interface + - Python SDK + - Swift iOS SDK + - Kotlin Android SDK + +- **Sample llama stack applications** + - Python + - iOS + - Android + + --- @@ -337,8 +407,8 @@ Published on: 2025-01-22T22:24:01Z # v0.0.63 Published on: 2024-12-18T07:17:43Z -A small but important bug-fix release to update the URL datatype for the client-SDKs. The issue affected multimodal agentic turns especially. - +A small but important bug-fix release to update the URL datatype for the client-SDKs. The issue affected multimodal agentic turns especially. + **Full Changelog**: https://github.com/meta-llama/llama-stack/compare/v0.0.62...v0.0.63 --- @@ -374,39 +444,39 @@ Published on: 2024-11-22T00:36:09Z # v0.0.53 Published on: 2024-11-20T22:18:00Z -🚀 Initial Release Notes for Llama Stack! - -### Added -- Resource-oriented design for models, shields, memory banks, datasets and eval tasks -- Persistence for registered objects with distribution -- Ability to persist memory banks created for FAISS -- PostgreSQL KVStore implementation -- Environment variable placeholder support in run.yaml files -- Comprehensive Zero-to-Hero notebooks and quickstart guides -- Support for quantized models in Ollama -- Vision models support for Together, Fireworks, Meta-Reference, and Ollama, and vLLM -- Bedrock distribution with safety shields support -- Evals API with task registration and scoring functions -- MMLU and SimpleQA benchmark scoring functions -- Huggingface dataset provider integration for benchmarks -- Support for custom dataset registration from local paths -- Benchmark evaluation CLI tools with visualization tables -- RAG evaluation scoring functions and metrics -- Local persistence for datasets and eval tasks - -### Changed -- Split safety into distinct providers (llama-guard, prompt-guard, code-scanner) -- Changed provider naming convention (`impls` → `inline`, `adapters` → `remote`) -- Updated API signatures for dataset and eval task registration -- Restructured folder organization for providers -- Enhanced Docker build configuration -- Added version prefixing for REST API routes -- Enhanced evaluation task registration workflow -- Improved benchmark evaluation output formatting -- Restructured evals folder organization for better modularity - -### Removed -- `llama stack configure` command - +🚀 Initial Release Notes for Llama Stack! + +### Added +- Resource-oriented design for models, shields, memory banks, datasets and eval tasks +- Persistence for registered objects with distribution +- Ability to persist memory banks created for FAISS +- PostgreSQL KVStore implementation +- Environment variable placeholder support in run.yaml files +- Comprehensive Zero-to-Hero notebooks and quickstart guides +- Support for quantized models in Ollama +- Vision models support for Together, Fireworks, Meta-Reference, and Ollama, and vLLM +- Bedrock distribution with safety shields support +- Evals API with task registration and scoring functions +- MMLU and SimpleQA benchmark scoring functions +- Huggingface dataset provider integration for benchmarks +- Support for custom dataset registration from local paths +- Benchmark evaluation CLI tools with visualization tables +- RAG evaluation scoring functions and metrics +- Local persistence for datasets and eval tasks + +### Changed +- Split safety into distinct providers (llama-guard, prompt-guard, code-scanner) +- Changed provider naming convention (`impls` → `inline`, `adapters` → `remote`) +- Updated API signatures for dataset and eval task registration +- Restructured folder organization for providers +- Enhanced Docker build configuration +- Added version prefixing for REST API routes +- Enhanced evaluation task registration workflow +- Improved benchmark evaluation output formatting +- Restructured evals folder organization for better modularity + +### Removed +- `llama stack configure` command + --- diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5828250d0..10e3f6cee 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -110,25 +110,9 @@ uv run pre-commit run --all-files > [!CAUTION] > Before pushing your changes, make sure that the pre-commit hooks have passed successfully. -## Running unit tests +## Running tests -You can run the unit tests by running: - -```bash -source .venv/bin/activate -./scripts/unit-tests.sh -``` - -If you'd like to run for a non-default version of Python (currently 3.10), pass `PYTHON_VERSION` variable as follows: - -``` -source .venv/bin/activate -PYTHON_VERSION=3.13 ./scripts/unit-tests.sh -``` - -## Running integration tests - -You can run integration tests following the instructions [here](tests/integration/README.md). +You can find the Llama Stack testing documentation here [here](tests/README.md). ## Adding a new dependency to the project @@ -141,11 +125,20 @@ uv sync ## Coding Style -* Comments should provide meaningful insights into the code. Avoid filler comments that simply describe the next step, as they create unnecessary clutter, same goes for docstrings. -* Prefer comments to clarify surprising behavior and/or relationships between parts of the code rather than explain what the next line of code does. -* Catching exceptions, prefer using a specific exception type rather than a broad catch-all like `Exception`. +* Comments should provide meaningful insights into the code. Avoid filler comments that simply + describe the next step, as they create unnecessary clutter, same goes for docstrings. +* Prefer comments to clarify surprising behavior and/or relationships between parts of the code + rather than explain what the next line of code does. +* Catching exceptions, prefer using a specific exception type rather than a broad catch-all like + `Exception`. * Error messages should be prefixed with "Failed to ..." -* 4 spaces for indentation rather than tabs +* 4 spaces for indentation rather than tab +* When using `# noqa` to suppress a style or linter warning, include a comment explaining the + justification for bypassing the check. +* When using `# type: ignore` to suppress a mypy warning, include a comment explaining the + justification for bypassing the check. +* Don't use unicode characters in the codebase. ASCII-only is preferred for compatibility or + readability reasons. ## Common Tasks @@ -174,14 +167,11 @@ If you have made changes to a provider's configuration in any form (introducing If you are making changes to the documentation at [https://llama-stack.readthedocs.io/en/latest/](https://llama-stack.readthedocs.io/en/latest/), you can use the following command to build the documentation and preview your changes. You will need [Sphinx](https://www.sphinx-doc.org/en/master/) and the readthedocs theme. ```bash -cd docs -uv sync --extra docs - # This rebuilds the documentation pages. -uv run make html +uv run --group docs make -C docs/ html # This will start a local server (usually at http://127.0.0.1:8000) that automatically rebuilds and refreshes when you make changes to the documentation. -uv run sphinx-autobuild source build/html --write-all +uv run --group docs sphinx-autobuild docs/source docs/build/html --write-all ``` ### Update API Documentation @@ -189,7 +179,7 @@ uv run sphinx-autobuild source build/html --write-all If you modify or add new API endpoints, update the API documentation accordingly. You can do this by running the following command: ```bash -uv run --with ".[dev]" ./docs/openapi_generator/run_openapi_generator.sh +uv run ./docs/openapi_generator/run_openapi_generator.sh ``` The generated API documentation will be available in `docs/_static/`. Make sure to review the changes before committing. diff --git a/MANIFEST.in b/MANIFEST.in index 879a9cbd4..88bd11767 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,5 +1,4 @@ include pyproject.toml -include llama_stack/templates/dependencies.json include llama_stack/models/llama/llama3/tokenizer.model include llama_stack/models/llama/llama4/tokenizer.model include llama_stack/distribution/*.sh diff --git a/README.md b/README.md index 9a4f1a849..37f1aa0f3 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ [![Unit Tests](https://github.com/meta-llama/llama-stack/actions/workflows/unit-tests.yml/badge.svg?branch=main)](https://github.com/meta-llama/llama-stack/actions/workflows/unit-tests.yml?query=branch%3Amain) [![Integration Tests](https://github.com/meta-llama/llama-stack/actions/workflows/integration-tests.yml/badge.svg?branch=main)](https://github.com/meta-llama/llama-stack/actions/workflows/integration-tests.yml?query=branch%3Amain) -[**Quick Start**](https://llama-stack.readthedocs.io/en/latest/getting_started/index.html) | [**Documentation**](https://llama-stack.readthedocs.io/en/latest/index.html) | [**Colab Notebook**](./docs/getting_started.ipynb) +[**Quick Start**](https://llama-stack.readthedocs.io/en/latest/getting_started/index.html) | [**Documentation**](https://llama-stack.readthedocs.io/en/latest/index.html) | [**Colab Notebook**](./docs/getting_started.ipynb) | [**Discord**](https://discord.gg/llama-stack) ### ✨🎉 Llama 4 Support 🎉✨ We released [Version 0.2.0](https://github.com/meta-llama/llama-stack/releases/tag/v0.2.0) with support for the Llama 4 herd of models released by Meta. @@ -70,6 +70,13 @@ As more providers start supporting Llama 4, you can use them in Llama Stack as w +### 🚀 One-Line Installer 🚀 + +To try Llama Stack locally, run: + +```bash +curl -LsSf https://github.com/meta-llama/llama-stack/raw/main/install.sh | sh +``` ### Overview @@ -100,26 +107,29 @@ By reducing friction and complexity, Llama Stack empowers developers to focus on ### API Providers Here is a list of the various API providers and available distributions that can help developers get started easily with Llama Stack. -| **API Provider Builder** | **Environments** | **Agents** | **Inference** | **Memory** | **Safety** | **Telemetry** | -|:------------------------:|:----------------------:|:----------:|:-------------:|:----------:|:----------:|:-------------:| -| Meta Reference | Single Node | ✅ | ✅ | ✅ | ✅ | ✅ | -| SambaNova | Hosted | | ✅ | | | | -| Cerebras | Hosted | | ✅ | | | | -| Fireworks | Hosted | ✅ | ✅ | ✅ | | | -| AWS Bedrock | Hosted | | ✅ | | ✅ | | -| Together | Hosted | ✅ | ✅ | | ✅ | | -| Groq | Hosted | | ✅ | | | | -| Ollama | Single Node | | ✅ | | | | -| TGI | Hosted and Single Node | | ✅ | | | | -| NVIDIA NIM | Hosted and Single Node | | ✅ | | | | -| Chroma | Single Node | | | ✅ | | | -| PG Vector | Single Node | | | ✅ | | | -| PyTorch ExecuTorch | On-device iOS | ✅ | ✅ | | | | -| vLLM | Hosted and Single Node | | ✅ | | | | -| OpenAI | Hosted | | ✅ | | | | -| Anthropic | Hosted | | ✅ | | | | -| Gemini | Hosted | | ✅ | | | | -| watsonx | Hosted | | ✅ | | | | +| **API Provider Builder** | **Environments** | **Agents** | **Inference** | **Memory** | **Safety** | **Telemetry** | **Post Training** | +|:------------------------:|:----------------------:|:----------:|:-------------:|:----------:|:----------:|:-------------:|:-----------------:| +| Meta Reference | Single Node | ✅ | ✅ | ✅ | ✅ | ✅ | | +| SambaNova | Hosted | | ✅ | | ✅ | | | +| Cerebras | Hosted | | ✅ | | | | | +| Fireworks | Hosted | ✅ | ✅ | ✅ | | | | +| AWS Bedrock | Hosted | | ✅ | | ✅ | | | +| Together | Hosted | ✅ | ✅ | | ✅ | | | +| Groq | Hosted | | ✅ | | | | | +| Ollama | Single Node | | ✅ | | | | | +| TGI | Hosted and Single Node | | ✅ | | | | | +| NVIDIA NIM | Hosted and Single Node | | ✅ | | | | | +| Chroma | Single Node | | | ✅ | | | | +| PG Vector | Single Node | | | ✅ | | | | +| PyTorch ExecuTorch | On-device iOS | ✅ | ✅ | | | | | +| vLLM | Hosted and Single Node | | ✅ | | | | | +| OpenAI | Hosted | | ✅ | | | | | +| Anthropic | Hosted | | ✅ | | | | | +| Gemini | Hosted | | ✅ | | | | | +| watsonx | Hosted | | ✅ | | | | | +| HuggingFace | Single Node | | | | | | ✅ | +| TorchTune | Single Node | | | | | | ✅ | +| NVIDIA NEMO | Hosted | | | | | | ✅ | ### Distributions diff --git a/docs/_static/css/my_theme.css b/docs/_static/css/my_theme.css index a587f866d..d078ec057 100644 --- a/docs/_static/css/my_theme.css +++ b/docs/_static/css/my_theme.css @@ -27,3 +27,9 @@ pre { white-space: pre-wrap !important; word-break: break-all; } + +[data-theme="dark"] .mermaid { + background-color: #f4f4f6 !important; + border-radius: 6px; + padding: 0.5em; + } diff --git a/docs/_static/llama-stack-spec.html b/docs/_static/llama-stack-spec.html index 4c5393947..d88462909 100644 --- a/docs/_static/llama-stack-spec.html +++ b/docs/_static/llama-stack-spec.html @@ -62,11 +62,12 @@ "tags": [ "DatasetIO" ], - "description": "", + "description": "Append rows to a dataset.", "parameters": [ { "name": "dataset_id", "in": "path", + "description": "The ID of the dataset to append the rows to.", "required": true, "schema": { "type": "string" @@ -89,7 +90,7 @@ "post": { "responses": { "200": { - "description": "OK", + "description": "A BatchChatCompletionResponse with the full completions.", "content": { "application/json": { "schema": { @@ -114,7 +115,7 @@ "tags": [ "Inference" ], - "description": "", + "description": "Generate chat completions for a batch of messages using the specified model.", "parameters": [], "requestBody": { "content": { @@ -132,7 +133,7 @@ "post": { "responses": { "200": { - "description": "OK", + "description": "A BatchCompletionResponse with the full completions.", "content": { "application/json": { "schema": { @@ -157,7 +158,7 @@ "tags": [ "Inference" ], - "description": "", + "description": "Generate completions for a batch of content using the specified model.", "parameters": [], "requestBody": { "content": { @@ -193,7 +194,7 @@ "tags": [ "PostTraining (Coming Soon)" ], - "description": "", + "description": "Cancel a training job.", "parameters": [], "requestBody": { "content": { @@ -211,7 +212,7 @@ "post": { "responses": { "200": { - "description": "If stream=False, returns a ChatCompletionResponse with the full completion. If stream=True, returns an SSE event stream of ChatCompletionResponseStreamChunk", + "description": "If stream=False, returns a ChatCompletionResponse with the full completion. If stream=True, returns an SSE event stream of ChatCompletionResponseStreamChunk.", "content": { "application/json": { "schema": { @@ -259,7 +260,7 @@ "post": { "responses": { "200": { - "description": "If stream=False, returns a CompletionResponse with the full completion. If stream=True, returns an SSE event stream of CompletionResponseStreamChunk", + "description": "If stream=False, returns a CompletionResponse with the full completion. If stream=True, returns an SSE event stream of CompletionResponseStreamChunk.", "content": { "application/json": { "schema": { @@ -307,11 +308,11 @@ "get": { "responses": { "200": { - "description": "A ListAgentsResponse.", + "description": "A PaginatedResponse.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ListAgentsResponse" + "$ref": "#/components/schemas/PaginatedResponse" } } } @@ -333,7 +334,26 @@ "Agents" ], "description": "List all agents.", - "parameters": [] + "parameters": [ + { + "name": "start_index", + "in": "query", + "description": "The index to start the pagination from.", + "required": false, + "schema": { + "type": "integer" + } + }, + { + "name": "limit", + "in": "query", + "description": "The number of agents to return.", + "required": false, + "schema": { + "type": "integer" + } + } + ] }, "post": { "responses": { @@ -434,7 +454,7 @@ "post": { "responses": { "200": { - "description": "If stream=False, returns a Turn object. If stream=True, returns an SSE event stream of AgentTurnResponseStreamChunk", + "description": "If stream=False, returns a Turn object. If stream=True, returns an SSE event stream of AgentTurnResponseStreamChunk.", "content": { "application/json": { "schema": { @@ -497,11 +517,127 @@ } } }, + "/v1/openai/v1/responses": { + "get": { + "responses": { + "200": { + "description": "A ListOpenAIResponseObject.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListOpenAIResponseObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Agents" + ], + "description": "List all OpenAI responses.", + "parameters": [ + { + "name": "after", + "in": "query", + "description": "The ID of the last response to return.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "limit", + "in": "query", + "description": "The number of responses to return.", + "required": false, + "schema": { + "type": "integer" + } + }, + { + "name": "model", + "in": "query", + "description": "The model to filter responses by.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "order", + "in": "query", + "description": "The order to sort responses by when sorted by created_at ('asc' or 'desc').", + "required": false, + "schema": { + "$ref": "#/components/schemas/Order" + } + } + ] + }, + "post": { + "responses": { + "200": { + "description": "An OpenAIResponseObject.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIResponseObject" + } + }, + "text/event-stream": { + "schema": { + "$ref": "#/components/schemas/OpenAIResponseObjectStream" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Agents" + ], + "description": "Create a new OpenAI response.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateOpenaiResponseRequest" + } + } + }, + "required": true + } + } + }, "/v1/files": { "get": { "responses": { "200": { - "description": "OK", + "description": "A ListBucketResponse.", "content": { "application/json": { "schema": { @@ -531,6 +667,7 @@ { "name": "bucket", "in": "query", + "description": "Bucket name (valid chars: a-zA-Z0-9_-).", "required": true, "schema": { "type": "string" @@ -541,7 +678,7 @@ "post": { "responses": { "200": { - "description": "OK", + "description": "A FileUploadResponse.", "content": { "application/json": { "schema": { @@ -643,7 +780,7 @@ "tags": [ "Agents" ], - "description": "Delete an agent by its ID.", + "description": "Delete an agent by its ID and its associated sessions and turns.", "parameters": [ { "name": "agent_id", @@ -661,7 +798,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A Session.", "content": { "application/json": { "schema": { @@ -741,7 +878,7 @@ "tags": [ "Agents" ], - "description": "Delete an agent session by its ID.", + "description": "Delete an agent session by its ID and its associated turns.", "parameters": [ { "name": "session_id", @@ -768,7 +905,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A FileResponse.", "content": { "application/json": { "schema": { @@ -798,7 +935,7 @@ { "name": "bucket", "in": "path", - "description": "Bucket name (valid chars: a-zA-Z0-9_-)", + "description": "Bucket name (valid chars: a-zA-Z0-9_-).", "required": true, "schema": { "type": "string" @@ -807,7 +944,7 @@ { "name": "key", "in": "path", - "description": "Key under which the file is stored (valid chars: a-zA-Z0-9_-/.)", + "description": "Key under which the file is stored (valid chars: a-zA-Z0-9_-/.).", "required": true, "schema": { "type": "string" @@ -841,7 +978,7 @@ { "name": "bucket", "in": "path", - "description": "Bucket name (valid chars: a-zA-Z0-9_-)", + "description": "Bucket name (valid chars: a-zA-Z0-9_-).", "required": true, "schema": { "type": "string" @@ -850,7 +987,7 @@ { "name": "key", "in": "path", - "description": "Key under which the file is stored (valid chars: a-zA-Z0-9_-/.)", + "description": "Key under which the file is stored (valid chars: a-zA-Z0-9_-/.).", "required": true, "schema": { "type": "string" @@ -863,7 +1000,7 @@ "post": { "responses": { "200": { - "description": "An array of embeddings, one for each content. Each embedding is a list of floats. The dimensionality of the embedding is model-specific; you can check model metadata using /models/{model_id}", + "description": "An array of embeddings, one for each content. Each embedding is a list of floats. The dimensionality of the embedding is model-specific; you can check model metadata using /models/{model_id}.", "content": { "application/json": { "schema": { @@ -906,7 +1043,7 @@ "post": { "responses": { "200": { - "description": "EvaluateResponse object containing generations and scores", + "description": "EvaluateResponse object containing generations and scores.", "content": { "application/json": { "schema": { @@ -1090,7 +1227,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A Benchmark.", "content": { "application/json": { "schema": { @@ -1115,11 +1252,55 @@ "tags": [ "Benchmarks" ], - "description": "", + "description": "Get a benchmark by its ID.", "parameters": [ { "name": "benchmark_id", "in": "path", + "description": "The ID of the benchmark to get.", + "required": true, + "schema": { + "type": "string" + } + } + ] + } + }, + "/v1/openai/v1/chat/completions/{completion_id}": { + "get": { + "responses": { + "200": { + "description": "A OpenAICompletionWithInputMessages.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAICompletionWithInputMessages" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Inference" + ], + "description": "Describe a chat completion by its ID.", + "parameters": [ + { + "name": "completion_id", + "in": "path", + "description": "ID of the chat completion.", "required": true, "schema": { "type": "string" @@ -1132,7 +1313,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A Dataset.", "content": { "application/json": { "schema": { @@ -1157,11 +1338,12 @@ "tags": [ "Datasets" ], - "description": "", + "description": "Get a dataset by its ID.", "parameters": [ { "name": "dataset_id", "in": "path", + "description": "The ID of the dataset to get.", "required": true, "schema": { "type": "string" @@ -1190,11 +1372,12 @@ "tags": [ "Datasets" ], - "description": "", + "description": "Unregister a dataset by its ID.", "parameters": [ { "name": "dataset_id", "in": "path", + "description": "The ID of the dataset to unregister.", "required": true, "schema": { "type": "string" @@ -1207,7 +1390,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A Model.", "content": { "application/json": { "schema": { @@ -1232,11 +1415,12 @@ "tags": [ "Models" ], - "description": "", + "description": "Get a model by its identifier.", "parameters": [ { "name": "model_id", "in": "path", + "description": "The identifier of the model to get.", "required": true, "schema": { "type": "string" @@ -1265,11 +1449,55 @@ "tags": [ "Models" ], - "description": "", + "description": "Unregister a model.", "parameters": [ { "name": "model_id", "in": "path", + "description": "The identifier of the model to unregister.", + "required": true, + "schema": { + "type": "string" + } + } + ] + } + }, + "/v1/openai/v1/responses/{response_id}": { + "get": { + "responses": { + "200": { + "description": "An OpenAIResponseObject.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIResponseObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Agents" + ], + "description": "Retrieve an OpenAI response by its ID.", + "parameters": [ + { + "name": "response_id", + "in": "path", + "description": "The ID of the OpenAI response to retrieve.", "required": true, "schema": { "type": "string" @@ -1282,7 +1510,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A ScoringFn.", "content": { "application/json": { "schema": { @@ -1307,11 +1535,12 @@ "tags": [ "ScoringFunctions" ], - "description": "", + "description": "Get a scoring function by its ID.", "parameters": [ { "name": "scoring_fn_id", "in": "path", + "description": "The ID of the scoring function to get.", "required": true, "schema": { "type": "string" @@ -1324,7 +1553,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A Shield.", "content": { "application/json": { "schema": { @@ -1349,11 +1578,12 @@ "tags": [ "Shields" ], - "description": "", + "description": "Get a shield by its identifier.", "parameters": [ { "name": "identifier", "in": "path", + "description": "The identifier of the shield to get.", "required": true, "schema": { "type": "string" @@ -1366,7 +1596,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A Span.", "content": { "application/json": { "schema": { @@ -1391,11 +1621,12 @@ "tags": [ "Telemetry" ], - "description": "", + "description": "Get a span by its ID.", "parameters": [ { "name": "trace_id", "in": "path", + "description": "The ID of the trace to get the span from.", "required": true, "schema": { "type": "string" @@ -1404,6 +1635,7 @@ { "name": "span_id", "in": "path", + "description": "The ID of the span to get.", "required": true, "schema": { "type": "string" @@ -1416,7 +1648,7 @@ "post": { "responses": { "200": { - "description": "OK", + "description": "A QuerySpanTreeResponse.", "content": { "application/json": { "schema": { @@ -1441,11 +1673,12 @@ "tags": [ "Telemetry" ], - "description": "", + "description": "Get a span tree by its ID.", "parameters": [ { "name": "span_id", "in": "path", + "description": "The ID of the span to get the tree from.", "required": true, "schema": { "type": "string" @@ -1468,7 +1701,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A Tool.", "content": { "application/json": { "schema": { @@ -1493,11 +1726,12 @@ "tags": [ "ToolGroups" ], - "description": "", + "description": "Get a tool by its name.", "parameters": [ { "name": "tool_name", "in": "path", + "description": "The name of the tool to get.", "required": true, "schema": { "type": "string" @@ -1510,7 +1744,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A ToolGroup.", "content": { "application/json": { "schema": { @@ -1535,11 +1769,12 @@ "tags": [ "ToolGroups" ], - "description": "", + "description": "Get a tool group by its ID.", "parameters": [ { "name": "toolgroup_id", "in": "path", + "description": "The ID of the tool group to get.", "required": true, "schema": { "type": "string" @@ -1568,11 +1803,12 @@ "tags": [ "ToolGroups" ], - "description": "Unregister a tool group", + "description": "Unregister a tool group.", "parameters": [ { "name": "toolgroup_id", "in": "path", + "description": "The ID of the tool group to unregister.", "required": true, "schema": { "type": "string" @@ -1585,7 +1821,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A Trace.", "content": { "application/json": { "schema": { @@ -1610,11 +1846,12 @@ "tags": [ "Telemetry" ], - "description": "", + "description": "Get a trace by its ID.", "parameters": [ { "name": "trace_id", "in": "path", + "description": "The ID of the trace to get.", "required": true, "schema": { "type": "string" @@ -1627,7 +1864,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A PostTrainingJobArtifactsResponse.", "content": { "application/json": { "schema": { @@ -1652,11 +1889,12 @@ "tags": [ "PostTraining (Coming Soon)" ], - "description": "", + "description": "Get the artifacts of a training job.", "parameters": [ { "name": "job_uuid", "in": "query", + "description": "The UUID of the job to get the artifacts of.", "required": true, "schema": { "type": "string" @@ -1669,7 +1907,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A PostTrainingJobStatusResponse.", "content": { "application/json": { "schema": { @@ -1694,11 +1932,12 @@ "tags": [ "PostTraining (Coming Soon)" ], - "description": "", + "description": "Get the status of a training job.", "parameters": [ { "name": "job_uuid", "in": "query", + "description": "The UUID of the job to get the status of.", "required": true, "schema": { "type": "string" @@ -1711,7 +1950,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A ListPostTrainingJobsResponse.", "content": { "application/json": { "schema": { @@ -1736,7 +1975,7 @@ "tags": [ "PostTraining (Coming Soon)" ], - "description": "", + "description": "Get all training jobs.", "parameters": [] } }, @@ -1744,7 +1983,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A FileUploadResponse.", "content": { "application/json": { "schema": { @@ -1769,12 +2008,12 @@ "tags": [ "Files" ], - "description": "Returns information about an existsing upload session", + "description": "Returns information about an existsing upload session.", "parameters": [ { "name": "upload_id", "in": "path", - "description": "ID of the upload session", + "description": "ID of the upload session.", "required": true, "schema": { "type": "string" @@ -1785,7 +2024,7 @@ "post": { "responses": { "200": { - "description": "OK", + "description": "A FileResponse or None if the upload is not complete.", "content": { "application/json": { "schema": { @@ -1822,7 +2061,7 @@ { "name": "upload_id", "in": "path", - "description": "ID of the upload session", + "description": "ID of the upload session.", "required": true, "schema": { "type": "string" @@ -1846,7 +2085,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A VectorDB.", "content": { "application/json": { "schema": { @@ -1871,11 +2110,12 @@ "tags": [ "VectorDBs" ], - "description": "", + "description": "Get a vector database by its identifier.", "parameters": [ { "name": "vector_db_id", "in": "path", + "description": "The identifier of the vector database to get.", "required": true, "schema": { "type": "string" @@ -1904,11 +2144,12 @@ "tags": [ "VectorDBs" ], - "description": "", + "description": "Unregister a vector database.", "parameters": [ { "name": "vector_db_id", "in": "path", + "description": "The identifier of the vector database to unregister.", "required": true, "schema": { "type": "string" @@ -1921,7 +2162,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A HealthInfo.", "content": { "application/json": { "schema": { @@ -1946,7 +2187,7 @@ "tags": [ "Inspect" ], - "description": "", + "description": "Get the health of the service.", "parameters": [] } }, @@ -2008,7 +2249,7 @@ "tags": [ "VectorIO" ], - "description": "", + "description": "Insert chunks into a vector database.", "parameters": [], "requestBody": { "content": { @@ -2026,7 +2267,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A ProviderInfo object containing the provider's details.", "content": { "application/json": { "schema": { @@ -2051,11 +2292,12 @@ "tags": [ "Providers" ], - "description": "", + "description": "Get detailed information about a specific provider.", "parameters": [ { "name": "provider_id", "in": "path", + "description": "The ID of the provider to inspect.", "required": true, "schema": { "type": "string" @@ -2068,7 +2310,7 @@ "post": { "responses": { "200": { - "description": "OK", + "description": "A ToolInvocationResult.", "content": { "application/json": { "schema": { @@ -2093,7 +2335,7 @@ "tags": [ "ToolRuntime" ], - "description": "Run a tool with the given arguments", + "description": "Run a tool with the given arguments.", "parameters": [], "requestBody": { "content": { @@ -2111,7 +2353,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A PaginatedResponse.", "content": { "application/json": { "schema": { @@ -2136,7 +2378,7 @@ "tags": [ "DatasetIO" ], - "description": "Get a paginated list of rows from a dataset.\nUses offset-based pagination where:\n- start_index: The starting index (0-based). If None, starts from beginning.\n- limit: Number of items to return. If None or -1, returns all items.\n\nThe response includes:\n- data: List of items for the current page\n- has_more: Whether there are more items available after this set", + "description": "Get a paginated list of rows from a dataset.\nUses offset-based pagination where:\n- start_index: The starting index (0-based). If None, starts from beginning.\n- limit: Number of items to return. If None or -1, returns all items.\n\nThe response includes:\n- data: List of items for the current page.\n- has_more: Whether there are more items available after this set.", "parameters": [ { "name": "dataset_id", @@ -2172,7 +2414,7 @@ "get": { "responses": { "200": { - "description": "The status of the evaluationjob.", + "description": "The status of the evaluation job.", "content": { "application/json": { "schema": { @@ -2319,11 +2561,11 @@ "get": { "responses": { "200": { - "description": "A ListAgentSessionsResponse.", + "description": "A PaginatedResponse.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ListAgentSessionsResponse" + "$ref": "#/components/schemas/PaginatedResponse" } } } @@ -2354,6 +2596,24 @@ "schema": { "type": "string" } + }, + { + "name": "start_index", + "in": "query", + "description": "The index to start the pagination from.", + "required": false, + "schema": { + "type": "integer" + } + }, + { + "name": "limit", + "in": "query", + "description": "The number of sessions to return.", + "required": false, + "schema": { + "type": "integer" + } } ] } @@ -2362,7 +2622,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A ListBenchmarksResponse.", "content": { "application/json": { "schema": { @@ -2387,7 +2647,7 @@ "tags": [ "Benchmarks" ], - "description": "", + "description": "List all benchmarks.", "parameters": [] }, "post": { @@ -2411,7 +2671,7 @@ "tags": [ "Benchmarks" ], - "description": "", + "description": "Register a benchmark.", "parameters": [], "requestBody": { "content": { @@ -2425,678 +2685,79 @@ } } }, - "/v1/datasets": { - "get": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListDatasetsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Datasets" - ], - "description": "", - "parameters": [] - }, - "post": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Dataset" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Datasets" - ], - "description": "Register a new dataset.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RegisterDatasetRequest" - } - } - }, - "required": true - } - } - }, - "/v1/files/{bucket}": { - "get": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListFileResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Files" - ], - "description": "List all files in a bucket.", - "parameters": [ - { - "name": "bucket", - "in": "path", - "description": "Bucket name (valid chars: a-zA-Z0-9_-)", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/models": { - "get": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListModelsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Models" - ], - "description": "", - "parameters": [] - }, - "post": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Model" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Models" - ], - "description": "", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RegisterModelRequest" - } - } - }, - "required": true - } - } - }, - "/v1/providers": { - "get": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListProvidersResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Providers" - ], - "description": "", - "parameters": [] - } - }, - "/v1/inspect/routes": { - "get": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListRoutesResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Inspect" - ], - "description": "", - "parameters": [] - } - }, - "/v1/tool-runtime/list-tools": { - "get": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListToolDefsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "ToolRuntime" - ], - "description": "", - "parameters": [ - { - "name": "tool_group_id", - "in": "query", - "required": false, - "schema": { - "type": "string" - } - }, - { - "name": "mcp_endpoint", - "in": "query", - "required": false, - "schema": { - "$ref": "#/components/schemas/URL" - } - } - ] - } - }, - "/v1/scoring-functions": { - "get": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListScoringFunctionsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "ScoringFunctions" - ], - "description": "", - "parameters": [] - }, - "post": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "ScoringFunctions" - ], - "description": "", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RegisterScoringFunctionRequest" - } - } - }, - "required": true - } - } - }, - "/v1/shields": { - "get": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListShieldsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Shields" - ], - "description": "", - "parameters": [] - }, - "post": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Shield" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Shields" - ], - "description": "", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RegisterShieldRequest" - } - } - }, - "required": true - } - } - }, - "/v1/toolgroups": { - "get": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListToolGroupsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "ToolGroups" - ], - "description": "List tool groups with optional provider", - "parameters": [] - }, - "post": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "ToolGroups" - ], - "description": "Register a tool group", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RegisterToolGroupRequest" - } - } - }, - "required": true - } - } - }, - "/v1/tools": { - "get": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListToolsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "ToolGroups" - ], - "description": "List tools with optional tool group", - "parameters": [ - { - "name": "toolgroup_id", - "in": "query", - "required": false, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/vector-dbs": { - "get": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListVectorDBsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "VectorDBs" - ], - "description": "", - "parameters": [] - }, - "post": { - "responses": { - "200": { - "description": "OK", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/VectorDB" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "VectorDBs" - ], - "description": "", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RegisterVectorDbRequest" - } - } - }, - "required": true - } - } - }, - "/v1/telemetry/events": { - "post": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Telemetry" - ], - "description": "", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/LogEventRequest" - } - } - }, - "required": true - } - } - }, "/v1/openai/v1/chat/completions": { + "get": { + "responses": { + "200": { + "description": "A ListOpenAIChatCompletionResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListOpenAIChatCompletionResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Inference" + ], + "description": "List all chat completions.", + "parameters": [ + { + "name": "after", + "in": "query", + "description": "The ID of the last chat completion to return.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "limit", + "in": "query", + "description": "The maximum number of chat completions to return.", + "required": false, + "schema": { + "type": "integer" + } + }, + { + "name": "model", + "in": "query", + "description": "The model to filter by.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "order", + "in": "query", + "description": "The order to sort the chat completions by: \"asc\" or \"desc\". Defaults to \"desc\".", + "required": false, + "schema": { + "$ref": "#/components/schemas/Order" + } + } + ] + }, "post": { "responses": { "200": { - "description": "Response from an OpenAI-compatible chat completion request. **OR** Chunk from a streaming response to an OpenAI-compatible chat completion request.", + "description": "An OpenAIChatCompletion.", "content": { "application/json": { "schema": { @@ -3142,11 +2803,772 @@ } } }, + "/v1/datasets": { + "get": { + "responses": { + "200": { + "description": "A ListDatasetsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListDatasetsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Datasets" + ], + "description": "List all datasets.", + "parameters": [] + }, + "post": { + "responses": { + "200": { + "description": "A Dataset.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Dataset" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Datasets" + ], + "description": "Register a new dataset.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RegisterDatasetRequest" + } + } + }, + "required": true + } + } + }, + "/v1/files/{bucket}": { + "get": { + "responses": { + "200": { + "description": "A ListFileResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListFileResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Files" + ], + "description": "List all files in a bucket.", + "parameters": [ + { + "name": "bucket", + "in": "path", + "description": "Bucket name (valid chars: a-zA-Z0-9_-).", + "required": true, + "schema": { + "type": "string" + } + } + ] + } + }, + "/v1/models": { + "get": { + "responses": { + "200": { + "description": "A ListModelsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListModelsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Models" + ], + "description": "List all models.", + "parameters": [] + }, + "post": { + "responses": { + "200": { + "description": "A Model.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Model" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Models" + ], + "description": "Register a model.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RegisterModelRequest" + } + } + }, + "required": true + } + } + }, + "/v1/openai/v1/responses/{response_id}/input_items": { + "get": { + "responses": { + "200": { + "description": "An ListOpenAIResponseInputItem.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListOpenAIResponseInputItem" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Agents" + ], + "description": "List input items for a given OpenAI response.", + "parameters": [ + { + "name": "response_id", + "in": "path", + "description": "The ID of the response to retrieve input items for.", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "after", + "in": "query", + "description": "An item ID to list items after, used for pagination.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "before", + "in": "query", + "description": "An item ID to list items before, used for pagination.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "include", + "in": "query", + "description": "Additional fields to include in the response.", + "required": false, + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + }, + { + "name": "limit", + "in": "query", + "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.", + "required": false, + "schema": { + "type": "integer" + } + }, + { + "name": "order", + "in": "query", + "description": "The order to return the input items in. Default is desc.", + "required": false, + "schema": { + "$ref": "#/components/schemas/Order" + } + } + ] + } + }, + "/v1/providers": { + "get": { + "responses": { + "200": { + "description": "A ListProvidersResponse containing information about all providers.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListProvidersResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Providers" + ], + "description": "List all available providers.", + "parameters": [] + } + }, + "/v1/inspect/routes": { + "get": { + "responses": { + "200": { + "description": "A ListRoutesResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListRoutesResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Inspect" + ], + "description": "List all routes.", + "parameters": [] + } + }, + "/v1/tool-runtime/list-tools": { + "get": { + "responses": { + "200": { + "description": "A ListToolDefsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListToolDefsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "ToolRuntime" + ], + "description": "List all tools in the runtime.", + "parameters": [ + { + "name": "tool_group_id", + "in": "query", + "description": "The ID of the tool group to list tools for.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "mcp_endpoint", + "in": "query", + "description": "The MCP endpoint to use for the tool group.", + "required": false, + "schema": { + "$ref": "#/components/schemas/URL" + } + } + ] + } + }, + "/v1/scoring-functions": { + "get": { + "responses": { + "200": { + "description": "A ListScoringFunctionsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListScoringFunctionsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "ScoringFunctions" + ], + "description": "List all scoring functions.", + "parameters": [] + }, + "post": { + "responses": { + "200": { + "description": "OK" + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "ScoringFunctions" + ], + "description": "Register a scoring function.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RegisterScoringFunctionRequest" + } + } + }, + "required": true + } + } + }, + "/v1/shields": { + "get": { + "responses": { + "200": { + "description": "A ListShieldsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListShieldsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Shields" + ], + "description": "List all shields.", + "parameters": [] + }, + "post": { + "responses": { + "200": { + "description": "A Shield.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Shield" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Shields" + ], + "description": "Register a shield.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RegisterShieldRequest" + } + } + }, + "required": true + } + } + }, + "/v1/toolgroups": { + "get": { + "responses": { + "200": { + "description": "A ListToolGroupsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListToolGroupsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "ToolGroups" + ], + "description": "List tool groups with optional provider.", + "parameters": [] + }, + "post": { + "responses": { + "200": { + "description": "OK" + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "ToolGroups" + ], + "description": "Register a tool group.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RegisterToolGroupRequest" + } + } + }, + "required": true + } + } + }, + "/v1/tools": { + "get": { + "responses": { + "200": { + "description": "A ListToolsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListToolsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "ToolGroups" + ], + "description": "List tools with optional tool group.", + "parameters": [ + { + "name": "toolgroup_id", + "in": "query", + "description": "The ID of the tool group to list tools for.", + "required": false, + "schema": { + "type": "string" + } + } + ] + } + }, + "/v1/vector-dbs": { + "get": { + "responses": { + "200": { + "description": "A ListVectorDBsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListVectorDBsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "VectorDBs" + ], + "description": "List all vector databases.", + "parameters": [] + }, + "post": { + "responses": { + "200": { + "description": "A VectorDB.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorDB" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "VectorDBs" + ], + "description": "Register a vector database.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RegisterVectorDbRequest" + } + } + }, + "required": true + } + } + }, + "/v1/telemetry/events": { + "post": { + "responses": { + "200": { + "description": "OK" + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Telemetry" + ], + "description": "Log an event.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LogEventRequest" + } + } + }, + "required": true + } + } + }, "/v1/openai/v1/completions": { "post": { "responses": { "200": { - "description": "OK", + "description": "An OpenAICompletion.", "content": { "application/json": { "schema": { @@ -3185,11 +3607,54 @@ } } }, + "/v1/openai/v1/embeddings": { + "post": { + "responses": { + "200": { + "description": "An OpenAIEmbeddingsResponse containing the embeddings.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIEmbeddingsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Inference" + ], + "description": "Generate OpenAI-compatible embeddings for the given input using the specified model.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenaiEmbeddingsRequest" + } + } + }, + "required": true + } + } + }, "/v1/openai/v1/models": { "get": { "responses": { "200": { - "description": "OK", + "description": "A OpenAIListModelsResponse.", "content": { "application/json": { "schema": { @@ -3214,7 +3679,7 @@ "tags": [ "Models" ], - "description": "", + "description": "List models using the OpenAI API.", "parameters": [] } }, @@ -3222,7 +3687,7 @@ "post": { "responses": { "200": { - "description": "OK", + "description": "A PostTrainingJob.", "content": { "application/json": { "schema": { @@ -3247,7 +3712,7 @@ "tags": [ "PostTraining (Coming Soon)" ], - "description": "", + "description": "Run preference optimization of a model.", "parameters": [], "requestBody": { "content": { @@ -3308,7 +3773,7 @@ "post": { "responses": { "200": { - "description": "OK", + "description": "A QueryChunksResponse.", "content": { "application/json": { "schema": { @@ -3333,7 +3798,7 @@ "tags": [ "VectorIO" ], - "description": "", + "description": "Query chunks from a vector database.", "parameters": [], "requestBody": { "content": { @@ -3347,11 +3812,64 @@ } } }, + "/v1/telemetry/metrics/{metric_name}": { + "post": { + "responses": { + "200": { + "description": "A QueryMetricsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueryMetricsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Telemetry" + ], + "description": "Query metrics.", + "parameters": [ + { + "name": "metric_name", + "in": "path", + "description": "The name of the metric to query.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/QueryMetricsRequest" + } + } + }, + "required": true + } + } + }, "/v1/telemetry/spans": { "post": { "responses": { "200": { - "description": "OK", + "description": "A QuerySpansResponse.", "content": { "application/json": { "schema": { @@ -3376,7 +3894,7 @@ "tags": [ "Telemetry" ], - "description": "", + "description": "Query spans.", "parameters": [], "requestBody": { "content": { @@ -3394,7 +3912,7 @@ "post": { "responses": { "200": { - "description": "OK", + "description": "A QueryTracesResponse.", "content": { "application/json": { "schema": { @@ -3419,7 +3937,7 @@ "tags": [ "Telemetry" ], - "description": "", + "description": "Query traces.", "parameters": [], "requestBody": { "content": { @@ -3566,7 +4084,7 @@ "post": { "responses": { "200": { - "description": "OK", + "description": "A RunShieldResponse.", "content": { "application/json": { "schema": { @@ -3591,7 +4109,7 @@ "tags": [ "Safety" ], - "description": "", + "description": "Run a shield.", "parameters": [], "requestBody": { "content": { @@ -3627,7 +4145,7 @@ "tags": [ "Telemetry" ], - "description": "", + "description": "Save spans to a dataset.", "parameters": [], "requestBody": { "content": { @@ -3645,7 +4163,7 @@ "post": { "responses": { "200": { - "description": "ScoreResponse object containing rows and aggregated results", + "description": "A ScoreResponse object containing rows and aggregated results.", "content": { "application/json": { "schema": { @@ -3688,7 +4206,7 @@ "post": { "responses": { "200": { - "description": "OK", + "description": "A ScoreBatchResponse.", "content": { "application/json": { "schema": { @@ -3713,7 +4231,7 @@ "tags": [ "Scoring" ], - "description": "", + "description": "Score a batch of rows.", "parameters": [], "requestBody": { "content": { @@ -3731,7 +4249,7 @@ "post": { "responses": { "200": { - "description": "OK", + "description": "A PostTrainingJob.", "content": { "application/json": { "schema": { @@ -3756,7 +4274,7 @@ "tags": [ "PostTraining (Coming Soon)" ], - "description": "", + "description": "Run supervised fine-tuning of a model.", "parameters": [], "requestBody": { "content": { @@ -3817,7 +4335,7 @@ "get": { "responses": { "200": { - "description": "OK", + "description": "A VersionInfo.", "content": { "application/json": { "schema": { @@ -3842,7 +4360,7 @@ "tags": [ "Inspect" ], - "description": "", + "description": "Get the version of the service.", "parameters": [] } } @@ -3908,7 +4426,8 @@ } ] } - } + }, + "description": "The rows to append to the dataset." } }, "additionalProperties": false, @@ -3961,9 +4480,13 @@ "properties": { "type": { "type": "string", + "enum": [ + "json_schema", + "grammar" + ], + "description": "Must be \"grammar\" to identify this format type", "const": "grammar", - "default": "grammar", - "description": "Must be \"grammar\" to identify this format type" + "default": "grammar" }, "bnf": { "type": "object", @@ -4087,9 +4610,13 @@ "properties": { "type": { "type": "string", + "enum": [ + "json_schema", + "grammar" + ], + "description": "Must be \"json_schema\" to identify this format type", "const": "json_schema", - "default": "json_schema", - "description": "Must be \"json_schema\" to identify this format type" + "default": "json_schema" }, "json_schema": { "type": "object", @@ -4607,7 +5134,8 @@ "type": "object", "properties": { "model_id": { - "type": "string" + "type": "string", + "description": "The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint." }, "messages_batch": { "type": "array", @@ -4616,22 +5144,27 @@ "items": { "$ref": "#/components/schemas/Message" } - } + }, + "description": "The messages to generate completions for." }, "sampling_params": { - "$ref": "#/components/schemas/SamplingParams" + "$ref": "#/components/schemas/SamplingParams", + "description": "(Optional) Parameters to control the sampling strategy." }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/ToolDefinition" - } + }, + "description": "(Optional) List of tool definitions available to the model." }, "tool_config": { - "$ref": "#/components/schemas/ToolConfig" + "$ref": "#/components/schemas/ToolConfig", + "description": "(Optional) Configuration for tool use." }, "response_format": { - "$ref": "#/components/schemas/ResponseFormat" + "$ref": "#/components/schemas/ResponseFormat", + "description": "(Optional) Grammar specification for guided (structured) decoding." }, "logprobs": { "type": "object", @@ -4643,7 +5176,7 @@ } }, "additionalProperties": false, - "title": "LogProbConfig" + "description": "(Optional) If specified, log probabilities for each token position will be returned." } }, "additionalProperties": false, @@ -4746,19 +5279,23 @@ "type": "object", "properties": { "model_id": { - "type": "string" + "type": "string", + "description": "The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint." }, "content_batch": { "type": "array", "items": { "$ref": "#/components/schemas/InterleavedContent" - } + }, + "description": "The content to generate completions for." }, "sampling_params": { - "$ref": "#/components/schemas/SamplingParams" + "$ref": "#/components/schemas/SamplingParams", + "description": "(Optional) Parameters to control the sampling strategy." }, "response_format": { - "$ref": "#/components/schemas/ResponseFormat" + "$ref": "#/components/schemas/ResponseFormat", + "description": "(Optional) Grammar specification for guided (structured) decoding." }, "logprobs": { "type": "object", @@ -4770,7 +5307,7 @@ } }, "additionalProperties": false, - "title": "LogProbConfig" + "description": "(Optional) If specified, log probabilities for each token position will be returned." } }, "additionalProperties": false, @@ -4838,7 +5375,8 @@ "type": "object", "properties": { "job_uuid": { - "type": "string" + "type": "string", + "description": "The UUID of the job to cancel." } }, "additionalProperties": false, @@ -4859,18 +5397,18 @@ "items": { "$ref": "#/components/schemas/Message" }, - "description": "List of messages in the conversation" + "description": "List of messages in the conversation." }, "sampling_params": { "$ref": "#/components/schemas/SamplingParams", - "description": "Parameters to control the sampling strategy" + "description": "Parameters to control the sampling strategy." }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/ToolDefinition" }, - "description": "(Optional) List of tool definitions available to the model" + "description": "(Optional) List of tool definitions available to the model." }, "tool_choice": { "type": "string", @@ -5090,15 +5628,15 @@ }, "content": { "$ref": "#/components/schemas/InterleavedContent", - "description": "The content to generate a completion for" + "description": "The content to generate a completion for." }, "sampling_params": { "$ref": "#/components/schemas/SamplingParams", - "description": "(Optional) Parameters to control the sampling strategy" + "description": "(Optional) Parameters to control the sampling strategy." }, "response_format": { "$ref": "#/components/schemas/ResponseFormat", - "description": "(Optional) Grammar specification for guided (structured) decoding" + "description": "(Optional) Grammar specification for guided (structured) decoding." }, "stream": { "type": "boolean", @@ -5547,6 +6085,14 @@ }, "step_type": { "type": "string", + "enum": [ + "inference", + "tool_execution", + "shield_call", + "memory_retrieval" + ], + "title": "StepType", + "description": "Type of the step in an agent turn.", "const": "inference", "default": "inference" }, @@ -5588,6 +6134,14 @@ }, "step_type": { "type": "string", + "enum": [ + "inference", + "tool_execution", + "shield_call", + "memory_retrieval" + ], + "title": "StepType", + "description": "Type of the step in an agent turn.", "const": "memory_retrieval", "default": "memory_retrieval" }, @@ -5676,6 +6230,14 @@ }, "step_type": { "type": "string", + "enum": [ + "inference", + "tool_execution", + "shield_call", + "memory_retrieval" + ], + "title": "StepType", + "description": "Type of the step in an agent turn.", "const": "shield_call", "default": "shield_call" }, @@ -5716,6 +6278,14 @@ }, "step_type": { "type": "string", + "enum": [ + "inference", + "tool_execution", + "shield_call", + "memory_retrieval" + ], + "title": "StepType", + "description": "Type of the step in an agent turn.", "const": "tool_execution", "default": "tool_execution" }, @@ -5978,6 +6548,15 @@ "properties": { "event_type": { "type": "string", + "enum": [ + "step_start", + "step_complete", + "step_progress", + "turn_start", + "turn_complete", + "turn_awaiting_input" + ], + "title": "AgentTurnResponseEventType", "const": "step_complete", "default": "step_complete" }, @@ -6035,6 +6614,15 @@ "properties": { "event_type": { "type": "string", + "enum": [ + "step_start", + "step_complete", + "step_progress", + "turn_start", + "turn_complete", + "turn_awaiting_input" + ], + "title": "AgentTurnResponseEventType", "const": "step_progress", "default": "step_progress" }, @@ -6070,6 +6658,15 @@ "properties": { "event_type": { "type": "string", + "enum": [ + "step_start", + "step_complete", + "step_progress", + "turn_start", + "turn_complete", + "turn_awaiting_input" + ], + "title": "AgentTurnResponseEventType", "const": "step_start", "default": "step_start" }, @@ -6140,6 +6737,15 @@ "properties": { "event_type": { "type": "string", + "enum": [ + "step_start", + "step_complete", + "step_progress", + "turn_start", + "turn_complete", + "turn_awaiting_input" + ], + "title": "AgentTurnResponseEventType", "const": "turn_awaiting_input", "default": "turn_awaiting_input" }, @@ -6159,6 +6765,15 @@ "properties": { "event_type": { "type": "string", + "enum": [ + "step_start", + "step_complete", + "step_progress", + "turn_start", + "turn_complete", + "turn_awaiting_input" + ], + "title": "AgentTurnResponseEventType", "const": "turn_complete", "default": "turn_complete" }, @@ -6178,6 +6793,15 @@ "properties": { "event_type": { "type": "string", + "enum": [ + "step_start", + "step_complete", + "step_progress", + "turn_start", + "turn_complete", + "turn_awaiting_input" + ], + "title": "AgentTurnResponseEventType", "const": "turn_start", "default": "turn_start" }, @@ -6192,24 +6816,880 @@ ], "title": "AgentTurnResponseTurnStartPayload" }, + "OpenAIResponseInput": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputFunctionToolCallOutput" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + } + ] + }, + "OpenAIResponseInputFunctionToolCallOutput": { + "type": "object", + "properties": { + "call_id": { + "type": "string" + }, + "output": { + "type": "string" + }, + "type": { + "type": "string", + "const": "function_call_output", + "default": "function_call_output" + }, + "id": { + "type": "string" + }, + "status": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "call_id", + "output", + "type" + ], + "title": "OpenAIResponseInputFunctionToolCallOutput", + "description": "This represents the output of a function call that gets passed back to the model." + }, + "OpenAIResponseInputMessageContent": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "input_text": "#/components/schemas/OpenAIResponseInputMessageContentText", + "input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage" + } + } + }, + "OpenAIResponseInputMessageContentImage": { + "type": "object", + "properties": { + "detail": { + "oneOf": [ + { + "type": "string", + "const": "low" + }, + { + "type": "string", + "const": "high" + }, + { + "type": "string", + "const": "auto" + } + ], + "default": "auto" + }, + "type": { + "type": "string", + "const": "input_image", + "default": "input_image" + }, + "image_url": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "detail", + "type" + ], + "title": "OpenAIResponseInputMessageContentImage" + }, + "OpenAIResponseInputMessageContentText": { + "type": "object", + "properties": { + "text": { + "type": "string" + }, + "type": { + "type": "string", + "const": "input_text", + "default": "input_text" + } + }, + "additionalProperties": false, + "required": [ + "text", + "type" + ], + "title": "OpenAIResponseInputMessageContentText" + }, + "OpenAIResponseInputTool": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseInputToolWebSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFileSearch" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolFunction" + }, + { + "$ref": "#/components/schemas/OpenAIResponseInputToolMCP" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "web_search": "#/components/schemas/OpenAIResponseInputToolWebSearch", + "file_search": "#/components/schemas/OpenAIResponseInputToolFileSearch", + "function": "#/components/schemas/OpenAIResponseInputToolFunction", + "mcp": "#/components/schemas/OpenAIResponseInputToolMCP" + } + } + }, + "OpenAIResponseInputToolFileSearch": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "file_search", + "default": "file_search" + }, + "vector_store_id": { + "type": "array", + "items": { + "type": "string" + } + }, + "ranking_options": { + "type": "object", + "properties": { + "ranker": { + "type": "string" + }, + "score_threshold": { + "type": "number", + "default": 0.0 + } + }, + "additionalProperties": false, + "title": "FileSearchRankingOptions" + } + }, + "additionalProperties": false, + "required": [ + "type", + "vector_store_id" + ], + "title": "OpenAIResponseInputToolFileSearch" + }, + "OpenAIResponseInputToolFunction": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "function", + "default": "function" + }, + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "parameters": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } + }, + "strict": { + "type": "boolean" + } + }, + "additionalProperties": false, + "required": [ + "type", + "name" + ], + "title": "OpenAIResponseInputToolFunction" + }, + "OpenAIResponseInputToolMCP": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "mcp", + "default": "mcp" + }, + "server_label": { + "type": "string" + }, + "server_url": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } + }, + "require_approval": { + "oneOf": [ + { + "type": "string", + "const": "always" + }, + { + "type": "string", + "const": "never" + }, + { + "type": "object", + "properties": { + "always": { + "type": "array", + "items": { + "type": "string" + } + }, + "never": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "additionalProperties": false, + "title": "ApprovalFilter" + } + ], + "default": "never" + }, + "allowed_tools": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "object", + "properties": { + "tool_names": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "additionalProperties": false, + "title": "AllowedToolsFilter" + } + ] + } + }, + "additionalProperties": false, + "required": [ + "type", + "server_label", + "server_url", + "require_approval" + ], + "title": "OpenAIResponseInputToolMCP" + }, + "OpenAIResponseInputToolWebSearch": { + "type": "object", + "properties": { + "type": { + "oneOf": [ + { + "type": "string", + "const": "web_search" + }, + { + "type": "string", + "const": "web_search_preview_2025_03_11" + } + ], + "default": "web_search" + }, + "search_context_size": { + "type": "string", + "default": "medium" + } + }, + "additionalProperties": false, + "required": [ + "type" + ], + "title": "OpenAIResponseInputToolWebSearch" + }, + "OpenAIResponseMessage": { + "type": "object", + "properties": { + "content": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContent" + } + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageContent" + } + } + ] + }, + "role": { + "oneOf": [ + { + "type": "string", + "const": "system" + }, + { + "type": "string", + "const": "developer" + }, + { + "type": "string", + "const": "user" + }, + { + "type": "string", + "const": "assistant" + } + ] + }, + "type": { + "type": "string", + "const": "message", + "default": "message" + }, + "id": { + "type": "string" + }, + "status": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "content", + "role", + "type" + ], + "title": "OpenAIResponseMessage", + "description": "Corresponds to the various Message types in the Responses API. They are all under one type because the Responses API gives them all the same \"type\" value, and there is no way to tell them apart in certain scenarios." + }, + "OpenAIResponseOutputMessageContent": { + "type": "object", + "properties": { + "text": { + "type": "string" + }, + "type": { + "type": "string", + "const": "output_text", + "default": "output_text" + } + }, + "additionalProperties": false, + "required": [ + "text", + "type" + ], + "title": "OpenAIResponseOutputMessageContentOutputText" + }, + "OpenAIResponseOutputMessageFunctionToolCall": { + "type": "object", + "properties": { + "call_id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "arguments": { + "type": "string" + }, + "type": { + "type": "string", + "const": "function_call", + "default": "function_call" + }, + "id": { + "type": "string" + }, + "status": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "call_id", + "name", + "arguments", + "type" + ], + "title": "OpenAIResponseOutputMessageFunctionToolCall" + }, + "OpenAIResponseOutputMessageWebSearchToolCall": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "status": { + "type": "string" + }, + "type": { + "type": "string", + "const": "web_search_call", + "default": "web_search_call" + } + }, + "additionalProperties": false, + "required": [ + "id", + "status", + "type" + ], + "title": "OpenAIResponseOutputMessageWebSearchToolCall" + }, + "CreateOpenaiResponseRequest": { + "type": "object", + "properties": { + "input": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIResponseInput" + } + } + ], + "description": "Input message(s) to create the response." + }, + "model": { + "type": "string", + "description": "The underlying LLM used for completions." + }, + "instructions": { + "type": "string" + }, + "previous_response_id": { + "type": "string", + "description": "(Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses." + }, + "store": { + "type": "boolean" + }, + "stream": { + "type": "boolean" + }, + "temperature": { + "type": "number" + }, + "tools": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIResponseInputTool" + } + } + }, + "additionalProperties": false, + "required": [ + "input", + "model" + ], + "title": "CreateOpenaiResponseRequest" + }, + "OpenAIResponseError": { + "type": "object", + "properties": { + "code": { + "type": "string" + }, + "message": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "code", + "message" + ], + "title": "OpenAIResponseError" + }, + "OpenAIResponseObject": { + "type": "object", + "properties": { + "created_at": { + "type": "integer" + }, + "error": { + "$ref": "#/components/schemas/OpenAIResponseError" + }, + "id": { + "type": "string" + }, + "model": { + "type": "string" + }, + "object": { + "type": "string", + "const": "response", + "default": "response" + }, + "output": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIResponseOutput" + } + }, + "parallel_tool_calls": { + "type": "boolean", + "default": false + }, + "previous_response_id": { + "type": "string" + }, + "status": { + "type": "string" + }, + "temperature": { + "type": "number" + }, + "top_p": { + "type": "number" + }, + "truncation": { + "type": "string" + }, + "user": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "created_at", + "id", + "model", + "object", + "output", + "parallel_tool_calls", + "status" + ], + "title": "OpenAIResponseObject" + }, + "OpenAIResponseOutput": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "message": "#/components/schemas/OpenAIResponseMessage", + "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall", + "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall", + "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + } + } + }, + "OpenAIResponseOutputMessageMCPCall": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "type": { + "type": "string", + "const": "mcp_call", + "default": "mcp_call" + }, + "arguments": { + "type": "string" + }, + "name": { + "type": "string" + }, + "server_label": { + "type": "string" + }, + "error": { + "type": "string" + }, + "output": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "id", + "type", + "arguments", + "name", + "server_label" + ], + "title": "OpenAIResponseOutputMessageMCPCall" + }, + "OpenAIResponseOutputMessageMCPListTools": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "type": { + "type": "string", + "const": "mcp_list_tools", + "default": "mcp_list_tools" + }, + "server_label": { + "type": "string" + }, + "tools": { + "type": "array", + "items": { + "type": "object", + "properties": { + "input_schema": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } + }, + "name": { + "type": "string" + }, + "description": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "input_schema", + "name" + ], + "title": "MCPListToolsTool" + } + } + }, + "additionalProperties": false, + "required": [ + "id", + "type", + "server_label", + "tools" + ], + "title": "OpenAIResponseOutputMessageMCPListTools" + }, + "OpenAIResponseObjectStream": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseObjectStreamResponseCreated" + }, + { + "$ref": "#/components/schemas/OpenAIResponseObjectStreamResponseOutputTextDelta" + }, + { + "$ref": "#/components/schemas/OpenAIResponseObjectStreamResponseCompleted" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "response.created": "#/components/schemas/OpenAIResponseObjectStreamResponseCreated", + "response.output_text.delta": "#/components/schemas/OpenAIResponseObjectStreamResponseOutputTextDelta", + "response.completed": "#/components/schemas/OpenAIResponseObjectStreamResponseCompleted" + } + } + }, + "OpenAIResponseObjectStreamResponseCompleted": { + "type": "object", + "properties": { + "response": { + "$ref": "#/components/schemas/OpenAIResponseObject" + }, + "type": { + "type": "string", + "const": "response.completed", + "default": "response.completed" + } + }, + "additionalProperties": false, + "required": [ + "response", + "type" + ], + "title": "OpenAIResponseObjectStreamResponseCompleted" + }, + "OpenAIResponseObjectStreamResponseCreated": { + "type": "object", + "properties": { + "response": { + "$ref": "#/components/schemas/OpenAIResponseObject" + }, + "type": { + "type": "string", + "const": "response.created", + "default": "response.created" + } + }, + "additionalProperties": false, + "required": [ + "response", + "type" + ], + "title": "OpenAIResponseObjectStreamResponseCreated" + }, + "OpenAIResponseObjectStreamResponseOutputTextDelta": { + "type": "object", + "properties": { + "content_index": { + "type": "integer" + }, + "delta": { + "type": "string" + }, + "item_id": { + "type": "string" + }, + "output_index": { + "type": "integer" + }, + "sequence_number": { + "type": "integer" + }, + "type": { + "type": "string", + "const": "response.output_text.delta", + "default": "response.output_text.delta" + } + }, + "additionalProperties": false, + "required": [ + "content_index", + "delta", + "item_id", + "output_index", + "sequence_number", + "type" + ], + "title": "OpenAIResponseObjectStreamResponseOutputTextDelta" + }, "CreateUploadSessionRequest": { "type": "object", "properties": { "bucket": { "type": "string", - "description": "Bucket under which the file is stored (valid chars: a-zA-Z0-9_-)" + "description": "Bucket under which the file is stored (valid chars: a-zA-Z0-9_-)." }, "key": { "type": "string", - "description": "Key under which the file is stored (valid chars: a-zA-Z0-9_-/.)" + "description": "Key under which the file is stored (valid chars: a-zA-Z0-9_-/.)." }, "mime_type": { "type": "string", - "description": "MIME type of the file" + "description": "MIME type of the file." }, "size": { "type": "integer", - "description": "File size in bytes" + "description": "File size in bytes." } }, "additionalProperties": false, @@ -6361,7 +7841,7 @@ "type": "object", "properties": { "type": { - "type": "string", + "$ref": "#/components/schemas/ScoringFnParamsType", "const": "basic", "default": "basic" }, @@ -6374,7 +7854,8 @@ }, "additionalProperties": false, "required": [ - "type" + "type", + "aggregation_functions" ], "title": "BasicScoringFnParams" }, @@ -6426,7 +7907,7 @@ "type": "object", "properties": { "type": { - "type": "string", + "$ref": "#/components/schemas/ScoringFnParamsType", "const": "llm_as_judge", "default": "llm_as_judge" }, @@ -6452,7 +7933,9 @@ "additionalProperties": false, "required": [ "type", - "judge_model" + "judge_model", + "judge_score_regexes", + "aggregation_functions" ], "title": "LLMAsJudgeScoringFnParams" }, @@ -6490,7 +7973,7 @@ "type": "object", "properties": { "type": { - "type": "string", + "$ref": "#/components/schemas/ScoringFnParamsType", "const": "regex_parser", "default": "regex_parser" }, @@ -6509,7 +7992,9 @@ }, "additionalProperties": false, "required": [ - "type" + "type", + "parsing_regexes", + "aggregation_functions" ], "title": "RegexParserScoringFnParams" }, @@ -6534,6 +8019,15 @@ } } }, + "ScoringFnParamsType": { + "type": "string", + "enum": [ + "llm_as_judge", + "regex_parser", + "basic" + ], + "title": "ScoringFnParamsType" + }, "EvaluateRowsRequest": { "type": "object", "properties": { @@ -6802,6 +8296,17 @@ }, "type": { "type": "string", + "enum": [ + "model", + "shield", + "vector_db", + "dataset", + "scoring_function", + "benchmark", + "tool", + "tool_group" + ], + "title": "ResourceType", "const": "benchmark", "default": "benchmark" }, @@ -6843,7 +8348,6 @@ "additionalProperties": false, "required": [ "identifier", - "provider_resource_id", "provider_id", "type", "dataset_id", @@ -6852,6 +8356,482 @@ ], "title": "Benchmark" }, + "OpenAIAssistantMessageParam": { + "type": "object", + "properties": { + "role": { + "type": "string", + "const": "assistant", + "default": "assistant", + "description": "Must be \"assistant\" to identify this as the model's response" + }, + "content": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartParam" + } + } + ], + "description": "The content of the model's response" + }, + "name": { + "type": "string", + "description": "(Optional) The name of the assistant message participant." + }, + "tool_calls": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionToolCall" + }, + "description": "List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object." + } + }, + "additionalProperties": false, + "required": [ + "role" + ], + "title": "OpenAIAssistantMessageParam", + "description": "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request." + }, + "OpenAIChatCompletionContentPartImageParam": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "image_url", + "default": "image_url" + }, + "image_url": { + "$ref": "#/components/schemas/OpenAIImageURL" + } + }, + "additionalProperties": false, + "required": [ + "type", + "image_url" + ], + "title": "OpenAIChatCompletionContentPartImageParam" + }, + "OpenAIChatCompletionContentPartParam": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartImageParam" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "text": "#/components/schemas/OpenAIChatCompletionContentPartTextParam", + "image_url": "#/components/schemas/OpenAIChatCompletionContentPartImageParam" + } + } + }, + "OpenAIChatCompletionContentPartTextParam": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "text", + "default": "text" + }, + "text": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "type", + "text" + ], + "title": "OpenAIChatCompletionContentPartTextParam" + }, + "OpenAIChatCompletionToolCall": { + "type": "object", + "properties": { + "index": { + "type": "integer" + }, + "id": { + "type": "string" + }, + "type": { + "type": "string", + "const": "function", + "default": "function" + }, + "function": { + "$ref": "#/components/schemas/OpenAIChatCompletionToolCallFunction" + } + }, + "additionalProperties": false, + "required": [ + "type" + ], + "title": "OpenAIChatCompletionToolCall" + }, + "OpenAIChatCompletionToolCallFunction": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "arguments": { + "type": "string" + } + }, + "additionalProperties": false, + "title": "OpenAIChatCompletionToolCallFunction" + }, + "OpenAIChoice": { + "type": "object", + "properties": { + "message": { + "$ref": "#/components/schemas/OpenAIMessageParam", + "description": "The message from the model" + }, + "finish_reason": { + "type": "string", + "description": "The reason the model stopped generating" + }, + "index": { + "type": "integer", + "description": "The index of the choice" + }, + "logprobs": { + "$ref": "#/components/schemas/OpenAIChoiceLogprobs", + "description": "(Optional) The log probabilities for the tokens in the message" + } + }, + "additionalProperties": false, + "required": [ + "message", + "finish_reason", + "index" + ], + "title": "OpenAIChoice", + "description": "A choice from an OpenAI-compatible chat completion response." + }, + "OpenAIChoiceLogprobs": { + "type": "object", + "properties": { + "content": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAITokenLogProb" + }, + "description": "(Optional) The log probabilities for the tokens in the message" + }, + "refusal": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAITokenLogProb" + }, + "description": "(Optional) The log probabilities for the tokens in the message" + } + }, + "additionalProperties": false, + "title": "OpenAIChoiceLogprobs", + "description": "The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response." + }, + "OpenAIDeveloperMessageParam": { + "type": "object", + "properties": { + "role": { + "type": "string", + "const": "developer", + "default": "developer", + "description": "Must be \"developer\" to identify this as a developer message" + }, + "content": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartParam" + } + } + ], + "description": "The content of the developer message" + }, + "name": { + "type": "string", + "description": "(Optional) The name of the developer message participant." + } + }, + "additionalProperties": false, + "required": [ + "role", + "content" + ], + "title": "OpenAIDeveloperMessageParam", + "description": "A message from the developer in an OpenAI-compatible chat completion request." + }, + "OpenAIImageURL": { + "type": "object", + "properties": { + "url": { + "type": "string" + }, + "detail": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "url" + ], + "title": "OpenAIImageURL" + }, + "OpenAIMessageParam": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ], + "discriminator": { + "propertyName": "role", + "mapping": { + "user": "#/components/schemas/OpenAIUserMessageParam", + "system": "#/components/schemas/OpenAISystemMessageParam", + "assistant": "#/components/schemas/OpenAIAssistantMessageParam", + "tool": "#/components/schemas/OpenAIToolMessageParam", + "developer": "#/components/schemas/OpenAIDeveloperMessageParam" + } + } + }, + "OpenAISystemMessageParam": { + "type": "object", + "properties": { + "role": { + "type": "string", + "const": "system", + "default": "system", + "description": "Must be \"system\" to identify this as a system message" + }, + "content": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartParam" + } + } + ], + "description": "The content of the \"system prompt\". If multiple system messages are provided, they are concatenated. The underlying Llama Stack code may also add other system messages (for example, for formatting tool definitions)." + }, + "name": { + "type": "string", + "description": "(Optional) The name of the system message participant." + } + }, + "additionalProperties": false, + "required": [ + "role", + "content" + ], + "title": "OpenAISystemMessageParam", + "description": "A system message providing instructions or context to the model." + }, + "OpenAITokenLogProb": { + "type": "object", + "properties": { + "token": { + "type": "string" + }, + "bytes": { + "type": "array", + "items": { + "type": "integer" + } + }, + "logprob": { + "type": "number" + }, + "top_logprobs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAITopLogProb" + } + } + }, + "additionalProperties": false, + "required": [ + "token", + "logprob", + "top_logprobs" + ], + "title": "OpenAITokenLogProb", + "description": "The log probability for a token from an OpenAI-compatible chat completion response." + }, + "OpenAIToolMessageParam": { + "type": "object", + "properties": { + "role": { + "type": "string", + "const": "tool", + "default": "tool", + "description": "Must be \"tool\" to identify this as a tool response" + }, + "tool_call_id": { + "type": "string", + "description": "Unique identifier for the tool call this response is for" + }, + "content": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartParam" + } + } + ], + "description": "The response content from the tool" + } + }, + "additionalProperties": false, + "required": [ + "role", + "tool_call_id", + "content" + ], + "title": "OpenAIToolMessageParam", + "description": "A message representing the result of a tool invocation in an OpenAI-compatible chat completion request." + }, + "OpenAITopLogProb": { + "type": "object", + "properties": { + "token": { + "type": "string" + }, + "bytes": { + "type": "array", + "items": { + "type": "integer" + } + }, + "logprob": { + "type": "number" + } + }, + "additionalProperties": false, + "required": [ + "token", + "logprob" + ], + "title": "OpenAITopLogProb", + "description": "The top log probability for a token from an OpenAI-compatible chat completion response." + }, + "OpenAIUserMessageParam": { + "type": "object", + "properties": { + "role": { + "type": "string", + "const": "user", + "default": "user", + "description": "Must be \"user\" to identify this as a user message" + }, + "content": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartParam" + } + } + ], + "description": "The content of the message, which can include text and other media" + }, + "name": { + "type": "string", + "description": "(Optional) The name of the user message participant." + } + }, + "additionalProperties": false, + "required": [ + "role", + "content" + ], + "title": "OpenAIUserMessageParam", + "description": "A message from the user in an OpenAI-compatible chat completion request." + }, + "OpenAICompletionWithInputMessages": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the chat completion" + }, + "choices": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIChoice" + }, + "description": "List of choices" + }, + "object": { + "type": "string", + "const": "chat.completion", + "default": "chat.completion", + "description": "The object type, which will be \"chat.completion\"" + }, + "created": { + "type": "integer", + "description": "The Unix timestamp in seconds when the chat completion was created" + }, + "model": { + "type": "string", + "description": "The model that was used to generate the chat completion" + }, + "input_messages": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIMessageParam" + } + } + }, + "additionalProperties": false, + "required": [ + "id", + "choices", + "object", + "created", + "model", + "input_messages" + ], + "title": "OpenAICompletionWithInputMessages" + }, "DataSource": { "oneOf": [ { @@ -6883,6 +8863,17 @@ }, "type": { "type": "string", + "enum": [ + "model", + "shield", + "vector_db", + "dataset", + "scoring_function", + "benchmark", + "tool", + "tool_group" + ], + "title": "ResourceType", "const": "dataset", "default": "dataset" }, @@ -6928,7 +8919,6 @@ "additionalProperties": false, "required": [ "identifier", - "provider_resource_id", "provider_id", "type", "purpose", @@ -7058,6 +9048,17 @@ }, "type": { "type": "string", + "enum": [ + "model", + "shield", + "vector_db", + "dataset", + "scoring_function", + "benchmark", + "tool", + "tool_group" + ], + "title": "ResourceType", "const": "model", "default": "model" }, @@ -7094,7 +9095,6 @@ "additionalProperties": false, "required": [ "identifier", - "provider_resource_id", "provider_id", "type", "metadata", @@ -7293,6 +9293,17 @@ }, "type": { "type": "string", + "enum": [ + "model", + "shield", + "vector_db", + "dataset", + "scoring_function", + "benchmark", + "tool", + "tool_group" + ], + "title": "ResourceType", "const": "scoring_function", "default": "scoring_function" }, @@ -7334,7 +9345,6 @@ "additionalProperties": false, "required": [ "identifier", - "provider_resource_id", "provider_id", "type", "metadata", @@ -7386,6 +9396,17 @@ }, "type": { "type": "string", + "enum": [ + "model", + "shield", + "vector_db", + "dataset", + "scoring_function", + "benchmark", + "tool", + "tool_group" + ], + "title": "ResourceType", "const": "shield", "default": "shield" }, @@ -7418,7 +9439,6 @@ "additionalProperties": false, "required": [ "identifier", - "provider_resource_id", "provider_id", "type" ], @@ -7490,10 +9510,12 @@ "type": "array", "items": { "type": "string" - } + }, + "description": "The attributes to return in the tree." }, "max_depth": { - "type": "integer" + "type": "integer", + "description": "The maximum depth of the tree." } }, "additionalProperties": false, @@ -7598,15 +9620,23 @@ }, "type": { "type": "string", + "enum": [ + "model", + "shield", + "vector_db", + "dataset", + "scoring_function", + "benchmark", + "tool", + "tool_group" + ], + "title": "ResourceType", "const": "tool", "default": "tool" }, "toolgroup_id": { "type": "string" }, - "tool_host": { - "$ref": "#/components/schemas/ToolHost" - }, "description": { "type": "string" }, @@ -7645,25 +9675,14 @@ "additionalProperties": false, "required": [ "identifier", - "provider_resource_id", "provider_id", "type", "toolgroup_id", - "tool_host", "description", "parameters" ], "title": "Tool" }, - "ToolHost": { - "type": "string", - "enum": [ - "distribution", - "client", - "model_context_protocol" - ], - "title": "ToolHost" - }, "ToolGroup": { "type": "object", "properties": { @@ -7678,6 +9697,17 @@ }, "type": { "type": "string", + "enum": [ + "model", + "shield", + "vector_db", + "dataset", + "scoring_function", + "benchmark", + "tool", + "tool_group" + ], + "title": "ResourceType", "const": "tool_group", "default": "tool_group" }, @@ -7713,7 +9743,6 @@ "additionalProperties": false, "required": [ "identifier", - "provider_resource_id", "provider_id", "type" ], @@ -7880,6 +9909,17 @@ }, "type": { "type": "string", + "enum": [ + "model", + "shield", + "vector_db", + "dataset", + "scoring_function", + "benchmark", + "tool", + "tool_group" + ], + "title": "ResourceType", "const": "vector_db", "default": "vector_db" }, @@ -7893,7 +9933,6 @@ "additionalProperties": false, "required": [ "identifier", - "provider_resource_id", "provider_id", "type", "embedding_model", @@ -8015,7 +10054,8 @@ "type": "object", "properties": { "vector_db_id": { - "type": "string" + "type": "string", + "description": "The identifier of the vector database to insert the chunks into." }, "chunks": { "type": "array", @@ -8023,7 +10063,8 @@ "type": "object", "properties": { "content": { - "$ref": "#/components/schemas/InterleavedContent" + "$ref": "#/components/schemas/InterleavedContent", + "description": "The content of the chunk, which can be interleaved text, images, or other types." }, "metadata": { "type": "object", @@ -8048,7 +10089,15 @@ "type": "object" } ] - } + }, + "description": "Metadata associated with the chunk, such as document ID, source, or other relevant information." + }, + "embedding": { + "type": "array", + "items": { + "type": "number" + }, + "description": "Optional embedding for the chunk. If not provided, it will be computed later." } }, "additionalProperties": false, @@ -8056,11 +10105,14 @@ "content", "metadata" ], - "title": "Chunk" - } + "title": "Chunk", + "description": "A chunk of content that can be inserted into a vector database." + }, + "description": "The chunks to insert. Each `Chunk` should contain content which can be interleaved text, images, or other types. `metadata`: `dict[str, Any]` and `embedding`: `List[float]` are optional. If `metadata` is provided, you configure how Llama Stack formats the chunk during generation. If `embedding` is not provided, it will be computed later." }, "ttl_seconds": { - "type": "integer" + "type": "integer", + "description": "The time to live of the chunks." } }, "additionalProperties": false, @@ -8147,7 +10199,8 @@ "type": "object", "properties": { "tool_name": { - "type": "string" + "type": "string", + "description": "The name of the tool to invoke." }, "kwargs": { "type": "object", @@ -8172,7 +10225,8 @@ "type": "object" } ] - } + }, + "description": "A dictionary of arguments to pass to the tool." } }, "additionalProperties": false, @@ -8293,38 +10347,6 @@ ], "title": "Job" }, - "ListAgentSessionsResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Session" - } - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "ListAgentSessionsResponse" - }, - "ListAgentsResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Agent" - } - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "ListAgentsResponse" - }, "BucketResponse": { "type": "object", "properties": { @@ -8372,6 +10394,91 @@ ], "title": "ListBenchmarksResponse" }, + "Order": { + "type": "string", + "enum": [ + "asc", + "desc" + ], + "title": "Order" + }, + "ListOpenAIChatCompletionResponse": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the chat completion" + }, + "choices": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIChoice" + }, + "description": "List of choices" + }, + "object": { + "type": "string", + "const": "chat.completion", + "default": "chat.completion", + "description": "The object type, which will be \"chat.completion\"" + }, + "created": { + "type": "integer", + "description": "The Unix timestamp in seconds when the chat completion was created" + }, + "model": { + "type": "string", + "description": "The model that was used to generate the chat completion" + }, + "input_messages": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIMessageParam" + } + } + }, + "additionalProperties": false, + "required": [ + "id", + "choices", + "object", + "created", + "model", + "input_messages" + ], + "title": "OpenAICompletionWithInputMessages" + } + }, + "has_more": { + "type": "boolean" + }, + "first_id": { + "type": "string" + }, + "last_id": { + "type": "string" + }, + "object": { + "type": "string", + "const": "list", + "default": "list" + } + }, + "additionalProperties": false, + "required": [ + "data", + "has_more", + "first_id", + "last_id", + "object" + ], + "title": "ListOpenAIChatCompletionResponse" + }, "ListDatasetsResponse": { "type": "object", "properties": { @@ -8422,6 +10529,130 @@ ], "title": "ListModelsResponse" }, + "ListOpenAIResponseInputItem": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIResponseInput" + } + }, + "object": { + "type": "string", + "const": "list", + "default": "list" + } + }, + "additionalProperties": false, + "required": [ + "data", + "object" + ], + "title": "ListOpenAIResponseInputItem" + }, + "ListOpenAIResponseObject": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIResponseObjectWithInput" + } + }, + "has_more": { + "type": "boolean" + }, + "first_id": { + "type": "string" + }, + "last_id": { + "type": "string" + }, + "object": { + "type": "string", + "const": "list", + "default": "list" + } + }, + "additionalProperties": false, + "required": [ + "data", + "has_more", + "first_id", + "last_id", + "object" + ], + "title": "ListOpenAIResponseObject" + }, + "OpenAIResponseObjectWithInput": { + "type": "object", + "properties": { + "created_at": { + "type": "integer" + }, + "error": { + "$ref": "#/components/schemas/OpenAIResponseError" + }, + "id": { + "type": "string" + }, + "model": { + "type": "string" + }, + "object": { + "type": "string", + "const": "response", + "default": "response" + }, + "output": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIResponseOutput" + } + }, + "parallel_tool_calls": { + "type": "boolean", + "default": false + }, + "previous_response_id": { + "type": "string" + }, + "status": { + "type": "string" + }, + "temperature": { + "type": "number" + }, + "top_p": { + "type": "number" + }, + "truncation": { + "type": "string" + }, + "user": { + "type": "string" + }, + "input": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIResponseInput" + } + } + }, + "additionalProperties": false, + "required": [ + "created_at", + "id", + "model", + "object", + "output", + "parallel_tool_calls", + "status", + "input" + ], + "title": "OpenAIResponseObjectWithInput" + }, "ListProvidersResponse": { "type": "object", "properties": { @@ -8595,6 +10826,15 @@ } } }, + "EventType": { + "type": "string", + "enum": [ + "unstructured_log", + "structured_log", + "metric" + ], + "title": "EventType" + }, "LogSeverity": { "type": "string", "enum": [ @@ -8643,7 +10883,7 @@ } }, "type": { - "type": "string", + "$ref": "#/components/schemas/EventType", "const": "metric", "default": "metric" }, @@ -8680,7 +10920,7 @@ "type": "object", "properties": { "type": { - "type": "string", + "$ref": "#/components/schemas/StructuredLogType", "const": "span_end", "default": "span_end" }, @@ -8699,7 +10939,7 @@ "type": "object", "properties": { "type": { - "type": "string", + "$ref": "#/components/schemas/StructuredLogType", "const": "span_start", "default": "span_start" }, @@ -8753,7 +10993,7 @@ } }, "type": { - "type": "string", + "$ref": "#/components/schemas/EventType", "const": "structured_log", "default": "structured_log" }, @@ -8788,6 +11028,14 @@ } } }, + "StructuredLogType": { + "type": "string", + "enum": [ + "span_start", + "span_end" + ], + "title": "StructuredLogType" + }, "UnstructuredLogEvent": { "type": "object", "properties": { @@ -8824,7 +11072,7 @@ } }, "type": { - "type": "string", + "$ref": "#/components/schemas/EventType", "const": "unstructured_log", "default": "unstructured_log" }, @@ -8850,10 +11098,12 @@ "type": "object", "properties": { "event": { - "$ref": "#/components/schemas/Event" + "$ref": "#/components/schemas/Event", + "description": "The event to log." }, "ttl_seconds": { - "type": "integer" + "type": "integer", + "description": "The time to live of the event." } }, "additionalProperties": false, @@ -8863,192 +11113,6 @@ ], "title": "LogEventRequest" }, - "OpenAIAssistantMessageParam": { - "type": "object", - "properties": { - "role": { - "type": "string", - "const": "assistant", - "default": "assistant", - "description": "Must be \"assistant\" to identify this as the model's response" - }, - "content": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIChatCompletionContentPartParam" - } - } - ], - "description": "The content of the model's response" - }, - "name": { - "type": "string", - "description": "(Optional) The name of the assistant message participant." - }, - "tool_calls": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIChatCompletionToolCall" - }, - "description": "List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object." - } - }, - "additionalProperties": false, - "required": [ - "role" - ], - "title": "OpenAIAssistantMessageParam", - "description": "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request." - }, - "OpenAIChatCompletionContentPartImageParam": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "image_url", - "default": "image_url" - }, - "image_url": { - "$ref": "#/components/schemas/OpenAIImageURL" - } - }, - "additionalProperties": false, - "required": [ - "type", - "image_url" - ], - "title": "OpenAIChatCompletionContentPartImageParam" - }, - "OpenAIChatCompletionContentPartParam": { - "oneOf": [ - { - "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" - }, - { - "$ref": "#/components/schemas/OpenAIChatCompletionContentPartImageParam" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "text": "#/components/schemas/OpenAIChatCompletionContentPartTextParam", - "image_url": "#/components/schemas/OpenAIChatCompletionContentPartImageParam" - } - } - }, - "OpenAIChatCompletionContentPartTextParam": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "text", - "default": "text" - }, - "text": { - "type": "string" - } - }, - "additionalProperties": false, - "required": [ - "type", - "text" - ], - "title": "OpenAIChatCompletionContentPartTextParam" - }, - "OpenAIChatCompletionToolCall": { - "type": "object", - "properties": { - "index": { - "type": "integer" - }, - "id": { - "type": "string" - }, - "type": { - "type": "string", - "const": "function", - "default": "function" - }, - "function": { - "$ref": "#/components/schemas/OpenAIChatCompletionToolCallFunction" - } - }, - "additionalProperties": false, - "required": [ - "type" - ], - "title": "OpenAIChatCompletionToolCall" - }, - "OpenAIChatCompletionToolCallFunction": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "arguments": { - "type": "string" - } - }, - "additionalProperties": false, - "title": "OpenAIChatCompletionToolCallFunction" - }, - "OpenAIDeveloperMessageParam": { - "type": "object", - "properties": { - "role": { - "type": "string", - "const": "developer", - "default": "developer", - "description": "Must be \"developer\" to identify this as a developer message" - }, - "content": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIChatCompletionContentPartParam" - } - } - ], - "description": "The content of the developer message" - }, - "name": { - "type": "string", - "description": "(Optional) The name of the developer message participant." - } - }, - "additionalProperties": false, - "required": [ - "role", - "content" - ], - "title": "OpenAIDeveloperMessageParam", - "description": "A message from the developer in an OpenAI-compatible chat completion request." - }, - "OpenAIImageURL": { - "type": "object", - "properties": { - "url": { - "type": "string" - }, - "detail": { - "type": "string" - } - }, - "additionalProperties": false, - "required": [ - "url" - ], - "title": "OpenAIImageURL" - }, "OpenAIJSONSchema": { "type": "object", "properties": { @@ -9093,35 +11157,6 @@ ], "title": "OpenAIJSONSchema" }, - "OpenAIMessageParam": { - "oneOf": [ - { - "$ref": "#/components/schemas/OpenAIUserMessageParam" - }, - { - "$ref": "#/components/schemas/OpenAISystemMessageParam" - }, - { - "$ref": "#/components/schemas/OpenAIAssistantMessageParam" - }, - { - "$ref": "#/components/schemas/OpenAIToolMessageParam" - }, - { - "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" - } - ], - "discriminator": { - "propertyName": "role", - "mapping": { - "user": "#/components/schemas/OpenAIUserMessageParam", - "system": "#/components/schemas/OpenAISystemMessageParam", - "assistant": "#/components/schemas/OpenAIAssistantMessageParam", - "tool": "#/components/schemas/OpenAIToolMessageParam", - "developer": "#/components/schemas/OpenAIDeveloperMessageParam" - } - } - }, "OpenAIResponseFormatJSONObject": { "type": "object", "properties": { @@ -9192,115 +11227,6 @@ ], "title": "OpenAIResponseFormatText" }, - "OpenAISystemMessageParam": { - "type": "object", - "properties": { - "role": { - "type": "string", - "const": "system", - "default": "system", - "description": "Must be \"system\" to identify this as a system message" - }, - "content": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIChatCompletionContentPartParam" - } - } - ], - "description": "The content of the \"system prompt\". If multiple system messages are provided, they are concatenated. The underlying Llama Stack code may also add other system messages (for example, for formatting tool definitions)." - }, - "name": { - "type": "string", - "description": "(Optional) The name of the system message participant." - } - }, - "additionalProperties": false, - "required": [ - "role", - "content" - ], - "title": "OpenAISystemMessageParam", - "description": "A system message providing instructions or context to the model." - }, - "OpenAIToolMessageParam": { - "type": "object", - "properties": { - "role": { - "type": "string", - "const": "tool", - "default": "tool", - "description": "Must be \"tool\" to identify this as a tool response" - }, - "tool_call_id": { - "type": "string", - "description": "Unique identifier for the tool call this response is for" - }, - "content": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIChatCompletionContentPartParam" - } - } - ], - "description": "The response content from the tool" - } - }, - "additionalProperties": false, - "required": [ - "role", - "tool_call_id", - "content" - ], - "title": "OpenAIToolMessageParam", - "description": "A message representing the result of a tool invocation in an OpenAI-compatible chat completion request." - }, - "OpenAIUserMessageParam": { - "type": "object", - "properties": { - "role": { - "type": "string", - "const": "user", - "default": "user", - "description": "Must be \"user\" to identify this as a user message" - }, - "content": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIChatCompletionContentPartParam" - } - } - ], - "description": "The content of the message, which can include text and other media" - }, - "name": { - "type": "string", - "description": "(Optional) The name of the user message participant." - } - }, - "additionalProperties": false, - "required": [ - "role", - "content" - ], - "title": "OpenAIUserMessageParam", - "description": "A message from the user in an OpenAI-compatible chat completion request." - }, "OpenaiChatCompletionRequest": { "type": "object", "properties": { @@ -9313,11 +11239,11 @@ "items": { "$ref": "#/components/schemas/OpenAIMessageParam" }, - "description": "List of messages in the conversation" + "description": "List of messages in the conversation." }, "frequency_penalty": { "type": "number", - "description": "(Optional) The penalty for repeated tokens" + "description": "(Optional) The penalty for repeated tokens." }, "function_call": { "oneOf": [ @@ -9350,7 +11276,7 @@ } } ], - "description": "(Optional) The function call to use" + "description": "(Optional) The function call to use." }, "functions": { "type": "array", @@ -9379,46 +11305,46 @@ ] } }, - "description": "(Optional) List of functions to use" + "description": "(Optional) List of functions to use." }, "logit_bias": { "type": "object", "additionalProperties": { "type": "number" }, - "description": "(Optional) The logit bias to use" + "description": "(Optional) The logit bias to use." }, "logprobs": { "type": "boolean", - "description": "(Optional) The log probabilities to use" + "description": "(Optional) The log probabilities to use." }, "max_completion_tokens": { "type": "integer", - "description": "(Optional) The maximum number of tokens to generate" + "description": "(Optional) The maximum number of tokens to generate." }, "max_tokens": { "type": "integer", - "description": "(Optional) The maximum number of tokens to generate" + "description": "(Optional) The maximum number of tokens to generate." }, "n": { "type": "integer", - "description": "(Optional) The number of completions to generate" + "description": "(Optional) The number of completions to generate." }, "parallel_tool_calls": { "type": "boolean", - "description": "(Optional) Whether to parallelize tool calls" + "description": "(Optional) Whether to parallelize tool calls." }, "presence_penalty": { "type": "number", - "description": "(Optional) The penalty for repeated tokens" + "description": "(Optional) The penalty for repeated tokens." }, "response_format": { "$ref": "#/components/schemas/OpenAIResponseFormatParam", - "description": "(Optional) The response format to use" + "description": "(Optional) The response format to use." }, "seed": { "type": "integer", - "description": "(Optional) The seed to use" + "description": "(Optional) The seed to use." }, "stop": { "oneOf": [ @@ -9432,11 +11358,11 @@ } } ], - "description": "(Optional) The stop tokens to use" + "description": "(Optional) The stop tokens to use." }, "stream": { "type": "boolean", - "description": "(Optional) Whether to stream the response" + "description": "(Optional) Whether to stream the response." }, "stream_options": { "type": "object", @@ -9462,11 +11388,11 @@ } ] }, - "description": "(Optional) The stream options to use" + "description": "(Optional) The stream options to use." }, "temperature": { "type": "number", - "description": "(Optional) The temperature to use" + "description": "(Optional) The temperature to use." }, "tool_choice": { "oneOf": [ @@ -9499,7 +11425,7 @@ } } ], - "description": "(Optional) The tool choice to use" + "description": "(Optional) The tool choice to use." }, "tools": { "type": "array", @@ -9528,19 +11454,19 @@ ] } }, - "description": "(Optional) The tools to use" + "description": "(Optional) The tools to use." }, "top_logprobs": { "type": "integer", - "description": "(Optional) The top log probabilities to use" + "description": "(Optional) The top log probabilities to use." }, "top_p": { "type": "number", - "description": "(Optional) The top p to use" + "description": "(Optional) The top p to use." }, "user": { "type": "string", - "description": "(Optional) The user to use" + "description": "(Optional) The user to use." } }, "additionalProperties": false, @@ -9630,35 +11556,6 @@ "title": "OpenAIChatCompletionChunk", "description": "Chunk from a streaming response to an OpenAI-compatible chat completion request." }, - "OpenAIChoice": { - "type": "object", - "properties": { - "message": { - "$ref": "#/components/schemas/OpenAIMessageParam", - "description": "The message from the model" - }, - "finish_reason": { - "type": "string", - "description": "The reason the model stopped generating" - }, - "index": { - "type": "integer", - "description": "The index of the choice" - }, - "logprobs": { - "$ref": "#/components/schemas/OpenAIChoiceLogprobs", - "description": "(Optional) The log probabilities for the tokens in the message" - } - }, - "additionalProperties": false, - "required": [ - "message", - "finish_reason", - "index" - ], - "title": "OpenAIChoice", - "description": "A choice from an OpenAI-compatible chat completion response." - }, "OpenAIChoiceDelta": { "type": "object", "properties": { @@ -9686,28 +11583,6 @@ "title": "OpenAIChoiceDelta", "description": "A delta from an OpenAI-compatible chat completion streaming response." }, - "OpenAIChoiceLogprobs": { - "type": "object", - "properties": { - "content": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAITokenLogProb" - }, - "description": "(Optional) The log probabilities for the tokens in the message" - }, - "refusal": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAITokenLogProb" - }, - "description": "(Optional) The log probabilities for the tokens in the message" - } - }, - "additionalProperties": false, - "title": "OpenAIChoiceLogprobs", - "description": "The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response." - }, "OpenAIChunkChoice": { "type": "object", "properties": { @@ -9737,61 +11612,6 @@ "title": "OpenAIChunkChoice", "description": "A chunk choice from an OpenAI-compatible chat completion streaming response." }, - "OpenAITokenLogProb": { - "type": "object", - "properties": { - "token": { - "type": "string" - }, - "bytes": { - "type": "array", - "items": { - "type": "integer" - } - }, - "logprob": { - "type": "number" - }, - "top_logprobs": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAITopLogProb" - } - } - }, - "additionalProperties": false, - "required": [ - "token", - "logprob", - "top_logprobs" - ], - "title": "OpenAITokenLogProb", - "description": "The log probability for a token from an OpenAI-compatible chat completion response." - }, - "OpenAITopLogProb": { - "type": "object", - "properties": { - "token": { - "type": "string" - }, - "bytes": { - "type": "array", - "items": { - "type": "integer" - } - }, - "logprob": { - "type": "number" - } - }, - "additionalProperties": false, - "required": [ - "token", - "logprob" - ], - "title": "OpenAITopLogProb", - "description": "The top log probability for a token from an OpenAI-compatible chat completion response." - }, "OpenaiCompletionRequest": { "type": "object", "properties": { @@ -9826,46 +11646,46 @@ } } ], - "description": "The prompt to generate a completion for" + "description": "The prompt to generate a completion for." }, "best_of": { "type": "integer", - "description": "(Optional) The number of completions to generate" + "description": "(Optional) The number of completions to generate." }, "echo": { "type": "boolean", - "description": "(Optional) Whether to echo the prompt" + "description": "(Optional) Whether to echo the prompt." }, "frequency_penalty": { "type": "number", - "description": "(Optional) The penalty for repeated tokens" + "description": "(Optional) The penalty for repeated tokens." }, "logit_bias": { "type": "object", "additionalProperties": { "type": "number" }, - "description": "(Optional) The logit bias to use" + "description": "(Optional) The logit bias to use." }, "logprobs": { "type": "boolean", - "description": "(Optional) The log probabilities to use" + "description": "(Optional) The log probabilities to use." }, "max_tokens": { "type": "integer", - "description": "(Optional) The maximum number of tokens to generate" + "description": "(Optional) The maximum number of tokens to generate." }, "n": { "type": "integer", - "description": "(Optional) The number of completions to generate" + "description": "(Optional) The number of completions to generate." }, "presence_penalty": { "type": "number", - "description": "(Optional) The penalty for repeated tokens" + "description": "(Optional) The penalty for repeated tokens." }, "seed": { "type": "integer", - "description": "(Optional) The seed to use" + "description": "(Optional) The seed to use." }, "stop": { "oneOf": [ @@ -9879,11 +11699,11 @@ } } ], - "description": "(Optional) The stop tokens to use" + "description": "(Optional) The stop tokens to use." }, "stream": { "type": "boolean", - "description": "(Optional) Whether to stream the response" + "description": "(Optional) Whether to stream the response." }, "stream_options": { "type": "object", @@ -9909,19 +11729,19 @@ } ] }, - "description": "(Optional) The stream options to use" + "description": "(Optional) The stream options to use." }, "temperature": { "type": "number", - "description": "(Optional) The temperature to use" + "description": "(Optional) The temperature to use." }, "top_p": { "type": "number", - "description": "(Optional) The top p to use" + "description": "(Optional) The top p to use." }, "user": { "type": "string", - "description": "(Optional) The user to use" + "description": "(Optional) The user to use." }, "guided_choice": { "type": "array", @@ -10000,6 +11820,139 @@ "title": "OpenAICompletionChoice", "description": "A choice from an OpenAI-compatible completion response." }, + "OpenaiEmbeddingsRequest": { + "type": "object", + "properties": { + "model": { + "type": "string", + "description": "The identifier of the model to use. The model must be an embedding model registered with Llama Stack and available via the /models endpoint." + }, + "input": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ], + "description": "Input text to embed, encoded as a string or array of strings. To embed multiple inputs in a single request, pass an array of strings." + }, + "encoding_format": { + "type": "string", + "description": "(Optional) The format to return the embeddings in. Can be either \"float\" or \"base64\". Defaults to \"float\"." + }, + "dimensions": { + "type": "integer", + "description": "(Optional) The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models." + }, + "user": { + "type": "string", + "description": "(Optional) A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse." + } + }, + "additionalProperties": false, + "required": [ + "model", + "input" + ], + "title": "OpenaiEmbeddingsRequest" + }, + "OpenAIEmbeddingData": { + "type": "object", + "properties": { + "object": { + "type": "string", + "const": "embedding", + "default": "embedding", + "description": "The object type, which will be \"embedding\"" + }, + "embedding": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "number" + } + }, + { + "type": "string" + } + ], + "description": "The embedding vector as a list of floats (when encoding_format=\"float\") or as a base64-encoded string (when encoding_format=\"base64\")" + }, + "index": { + "type": "integer", + "description": "The index of the embedding in the input list" + } + }, + "additionalProperties": false, + "required": [ + "object", + "embedding", + "index" + ], + "title": "OpenAIEmbeddingData", + "description": "A single embedding data object from an OpenAI-compatible embeddings response." + }, + "OpenAIEmbeddingUsage": { + "type": "object", + "properties": { + "prompt_tokens": { + "type": "integer", + "description": "The number of tokens in the input" + }, + "total_tokens": { + "type": "integer", + "description": "The total number of tokens used" + } + }, + "additionalProperties": false, + "required": [ + "prompt_tokens", + "total_tokens" + ], + "title": "OpenAIEmbeddingUsage", + "description": "Usage information for an OpenAI-compatible embeddings response." + }, + "OpenAIEmbeddingsResponse": { + "type": "object", + "properties": { + "object": { + "type": "string", + "const": "list", + "default": "list", + "description": "The object type, which will be \"list\"" + }, + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIEmbeddingData" + }, + "description": "List of embedding data objects" + }, + "model": { + "type": "string", + "description": "The model that was used to generate the embeddings" + }, + "usage": { + "$ref": "#/components/schemas/OpenAIEmbeddingUsage", + "description": "Usage information" + } + }, + "additionalProperties": false, + "required": [ + "object", + "data", + "model", + "usage" + ], + "title": "OpenAIEmbeddingsResponse", + "description": "Response from an OpenAI-compatible embeddings request." + }, "OpenAIModel": { "type": "object", "properties": { @@ -10214,16 +12167,20 @@ "type": "object", "properties": { "job_uuid": { - "type": "string" + "type": "string", + "description": "The UUID of the job to create." }, "finetuned_model": { - "type": "string" + "type": "string", + "description": "The model to fine-tune." }, "algorithm_config": { - "$ref": "#/components/schemas/DPOAlignmentConfig" + "$ref": "#/components/schemas/DPOAlignmentConfig", + "description": "The algorithm configuration." }, "training_config": { - "$ref": "#/components/schemas/TrainingConfig" + "$ref": "#/components/schemas/TrainingConfig", + "description": "The training configuration." }, "hyperparam_search_config": { "type": "object", @@ -10248,7 +12205,8 @@ "type": "object" } ] - } + }, + "description": "The hyperparam search configuration." }, "logger_config": { "type": "object", @@ -10273,7 +12231,8 @@ "type": "object" } ] - } + }, + "description": "The logger configuration." } }, "additionalProperties": false, @@ -10347,24 +12306,38 @@ "type": "object", "properties": { "query_generator_config": { - "$ref": "#/components/schemas/RAGQueryGeneratorConfig" + "$ref": "#/components/schemas/RAGQueryGeneratorConfig", + "description": "Configuration for the query generator." }, "max_tokens_in_context": { "type": "integer", - "default": 4096 + "default": 4096, + "description": "Maximum number of tokens in the context." }, "max_chunks": { "type": "integer", - "default": 5 + "default": 5, + "description": "Maximum number of chunks to retrieve." + }, + "chunk_template": { + "type": "string", + "default": "Result {index}\nContent: {chunk.content}\nMetadata: {metadata}\n", + "description": "Template for formatting each retrieved chunk in the context. Available placeholders: {index} (1-based chunk ordinal), {chunk.content} (chunk content string), {metadata} (chunk metadata dict). Default: \"Result {index}\\nContent: {chunk.content}\\nMetadata: {metadata}\\n\"" + }, + "mode": { + "type": "string", + "description": "Search mode for retrieval—either \"vector\" or \"keyword\". Default \"vector\"." } }, "additionalProperties": false, "required": [ "query_generator_config", "max_tokens_in_context", - "max_chunks" + "max_chunks", + "chunk_template" ], - "title": "RAGQueryConfig" + "title": "RAGQueryConfig", + "description": "Configuration for the RAG query generation." }, "RAGQueryGeneratorConfig": { "oneOf": [ @@ -10448,10 +12421,12 @@ "type": "object", "properties": { "vector_db_id": { - "type": "string" + "type": "string", + "description": "The identifier of the vector database to query." }, "query": { - "$ref": "#/components/schemas/InterleavedContent" + "$ref": "#/components/schemas/InterleavedContent", + "description": "The query to search for." }, "params": { "type": "object", @@ -10476,7 +12451,8 @@ "type": "object" } ] - } + }, + "description": "The parameters of the query." } }, "additionalProperties": false, @@ -10495,7 +12471,8 @@ "type": "object", "properties": { "content": { - "$ref": "#/components/schemas/InterleavedContent" + "$ref": "#/components/schemas/InterleavedContent", + "description": "The content of the chunk, which can be interleaved text, images, or other types." }, "metadata": { "type": "object", @@ -10520,7 +12497,15 @@ "type": "object" } ] - } + }, + "description": "Metadata associated with the chunk, such as document ID, source, or other relevant information." + }, + "embedding": { + "type": "array", + "items": { + "type": "number" + }, + "description": "Optional embedding for the chunk. If not provided, it will be computed later." } }, "additionalProperties": false, @@ -10528,7 +12513,8 @@ "content", "metadata" ], - "title": "Chunk" + "title": "Chunk", + "description": "A chunk of content that can be inserted into a vector database." } }, "scores": { @@ -10545,6 +12531,147 @@ ], "title": "QueryChunksResponse" }, + "QueryMetricsRequest": { + "type": "object", + "properties": { + "start_time": { + "type": "integer", + "description": "The start time of the metric to query." + }, + "end_time": { + "type": "integer", + "description": "The end time of the metric to query." + }, + "granularity": { + "type": "string", + "description": "The granularity of the metric to query." + }, + "query_type": { + "type": "string", + "enum": [ + "range", + "instant" + ], + "description": "The type of query to perform." + }, + "label_matchers": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + }, + "operator": { + "type": "string", + "enum": [ + "=", + "!=", + "=~", + "!~" + ], + "title": "MetricLabelOperator", + "default": "=" + } + }, + "additionalProperties": false, + "required": [ + "name", + "value", + "operator" + ], + "title": "MetricLabelMatcher" + }, + "description": "The label matchers to apply to the metric." + } + }, + "additionalProperties": false, + "required": [ + "start_time", + "query_type" + ], + "title": "QueryMetricsRequest" + }, + "MetricDataPoint": { + "type": "object", + "properties": { + "timestamp": { + "type": "integer" + }, + "value": { + "type": "number" + } + }, + "additionalProperties": false, + "required": [ + "timestamp", + "value" + ], + "title": "MetricDataPoint" + }, + "MetricLabel": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "name", + "value" + ], + "title": "MetricLabel" + }, + "MetricSeries": { + "type": "object", + "properties": { + "metric": { + "type": "string" + }, + "labels": { + "type": "array", + "items": { + "$ref": "#/components/schemas/MetricLabel" + } + }, + "values": { + "type": "array", + "items": { + "$ref": "#/components/schemas/MetricDataPoint" + } + } + }, + "additionalProperties": false, + "required": [ + "metric", + "labels", + "values" + ], + "title": "MetricSeries" + }, + "QueryMetricsResponse": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/MetricSeries" + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ], + "title": "QueryMetricsResponse" + }, "QueryCondition": { "type": "object", "properties": { @@ -10602,16 +12729,19 @@ "type": "array", "items": { "$ref": "#/components/schemas/QueryCondition" - } + }, + "description": "The attribute filters to apply to the spans." }, "attributes_to_return": { "type": "array", "items": { "type": "string" - } + }, + "description": "The attributes to return in the spans." }, "max_depth": { - "type": "integer" + "type": "integer", + "description": "The maximum depth of the tree." } }, "additionalProperties": false, @@ -10644,19 +12774,23 @@ "type": "array", "items": { "$ref": "#/components/schemas/QueryCondition" - } + }, + "description": "The attribute filters to apply to the traces." }, "limit": { - "type": "integer" + "type": "integer", + "description": "The limit of traces to return." }, "offset": { - "type": "integer" + "type": "integer", + "description": "The offset of the traces to return." }, "order_by": { "type": "array", "items": { "type": "string" - } + }, + "description": "The order by of the traces to return." } }, "additionalProperties": false, @@ -10682,22 +12816,27 @@ "type": "object", "properties": { "benchmark_id": { - "type": "string" + "type": "string", + "description": "The ID of the benchmark to register." }, "dataset_id": { - "type": "string" + "type": "string", + "description": "The ID of the dataset to use for the benchmark." }, "scoring_functions": { "type": "array", "items": { "type": "string" - } + }, + "description": "The scoring functions to use for the benchmark." }, "provider_benchmark_id": { - "type": "string" + "type": "string", + "description": "The ID of the provider benchmark to use for the benchmark." }, "provider_id": { - "type": "string" + "type": "string", + "description": "The ID of the provider to use for the benchmark." }, "metadata": { "type": "object", @@ -10722,7 +12861,8 @@ "type": "object" } ] - } + }, + "description": "The metadata to use for the benchmark." } }, "additionalProperties": false, @@ -10743,7 +12883,7 @@ "eval/question-answer", "eval/messages-answer" ], - "description": "The purpose of the dataset. One of - \"post-training/messages\": The dataset contains a messages column with list of messages for post-training. { \"messages\": [ {\"role\": \"user\", \"content\": \"Hello, world!\"}, {\"role\": \"assistant\", \"content\": \"Hello, world!\"}, ] } - \"eval/question-answer\": The dataset contains a question column and an answer column for evaluation. { \"question\": \"What is the capital of France?\", \"answer\": \"Paris\" } - \"eval/messages-answer\": The dataset contains a messages column with list of messages and an answer column for evaluation. { \"messages\": [ {\"role\": \"user\", \"content\": \"Hello, my name is John Doe.\"}, {\"role\": \"assistant\", \"content\": \"Hello, John Doe. How can I help you today?\"}, {\"role\": \"user\", \"content\": \"What's my name?\"}, ], \"answer\": \"John Doe\" }" + "description": "The purpose of the dataset. One of: - \"post-training/messages\": The dataset contains a messages column with list of messages for post-training. { \"messages\": [ {\"role\": \"user\", \"content\": \"Hello, world!\"}, {\"role\": \"assistant\", \"content\": \"Hello, world!\"}, ] } - \"eval/question-answer\": The dataset contains a question column and an answer column for evaluation. { \"question\": \"What is the capital of France?\", \"answer\": \"Paris\" } - \"eval/messages-answer\": The dataset contains a messages column with list of messages and an answer column for evaluation. { \"messages\": [ {\"role\": \"user\", \"content\": \"Hello, my name is John Doe.\"}, {\"role\": \"assistant\", \"content\": \"Hello, John Doe. How can I help you today?\"}, {\"role\": \"user\", \"content\": \"What's my name?\"}, ], \"answer\": \"John Doe\" }" }, "source": { "$ref": "#/components/schemas/DataSource", @@ -10773,7 +12913,7 @@ } ] }, - "description": "The metadata for the dataset. - E.g. {\"description\": \"My dataset\"}" + "description": "The metadata for the dataset. - E.g. {\"description\": \"My dataset\"}." }, "dataset_id": { "type": "string", @@ -10791,13 +12931,16 @@ "type": "object", "properties": { "model_id": { - "type": "string" + "type": "string", + "description": "The identifier of the model to register." }, "provider_model_id": { - "type": "string" + "type": "string", + "description": "The identifier of the model in the provider." }, "provider_id": { - "type": "string" + "type": "string", + "description": "The identifier of the provider." }, "metadata": { "type": "object", @@ -10822,10 +12965,12 @@ "type": "object" } ] - } + }, + "description": "Any additional metadata for this model." }, "model_type": { - "$ref": "#/components/schemas/ModelType" + "$ref": "#/components/schemas/ModelType", + "description": "The type of model to register." } }, "additionalProperties": false, @@ -10838,22 +12983,28 @@ "type": "object", "properties": { "scoring_fn_id": { - "type": "string" + "type": "string", + "description": "The ID of the scoring function to register." }, "description": { - "type": "string" + "type": "string", + "description": "The description of the scoring function." }, "return_type": { - "$ref": "#/components/schemas/ParamType" + "$ref": "#/components/schemas/ParamType", + "description": "The return type of the scoring function." }, "provider_scoring_fn_id": { - "type": "string" + "type": "string", + "description": "The ID of the provider scoring function to use for the scoring function." }, "provider_id": { - "type": "string" + "type": "string", + "description": "The ID of the provider to use for the scoring function." }, "params": { - "$ref": "#/components/schemas/ScoringFnParams" + "$ref": "#/components/schemas/ScoringFnParams", + "description": "The parameters for the scoring function for benchmark eval, these can be overridden for app eval." } }, "additionalProperties": false, @@ -10868,13 +13019,16 @@ "type": "object", "properties": { "shield_id": { - "type": "string" + "type": "string", + "description": "The identifier of the shield to register." }, "provider_shield_id": { - "type": "string" + "type": "string", + "description": "The identifier of the shield in the provider." }, "provider_id": { - "type": "string" + "type": "string", + "description": "The identifier of the provider." }, "params": { "type": "object", @@ -10899,7 +13053,8 @@ "type": "object" } ] - } + }, + "description": "The parameters of the shield." } }, "additionalProperties": false, @@ -10912,13 +13067,16 @@ "type": "object", "properties": { "toolgroup_id": { - "type": "string" + "type": "string", + "description": "The ID of the tool group to register." }, "provider_id": { - "type": "string" + "type": "string", + "description": "The ID of the provider to use for the tool group." }, "mcp_endpoint": { - "$ref": "#/components/schemas/URL" + "$ref": "#/components/schemas/URL", + "description": "The MCP endpoint to use for the tool group." }, "args": { "type": "object", @@ -10943,7 +13101,8 @@ "type": "object" } ] - } + }, + "description": "A dictionary of arguments to pass to the tool group." } }, "additionalProperties": false, @@ -10957,19 +13116,24 @@ "type": "object", "properties": { "vector_db_id": { - "type": "string" + "type": "string", + "description": "The identifier of the vector database to register." }, "embedding_model": { - "type": "string" + "type": "string", + "description": "The embedding model to use." }, "embedding_dimension": { - "type": "integer" + "type": "integer", + "description": "The dimension of the embedding model." }, "provider_id": { - "type": "string" + "type": "string", + "description": "The identifier of the provider." }, "provider_vector_db_id": { - "type": "string" + "type": "string", + "description": "The identifier of the vector database in the provider." } }, "additionalProperties": false, @@ -11018,13 +13182,15 @@ "type": "object", "properties": { "shield_id": { - "type": "string" + "type": "string", + "description": "The identifier of the shield to run." }, "messages": { "type": "array", "items": { "$ref": "#/components/schemas/Message" - } + }, + "description": "The messages to run the shield on." }, "params": { "type": "object", @@ -11049,7 +13215,8 @@ "type": "object" } ] - } + }, + "description": "The parameters of the shield." } }, "additionalProperties": false, @@ -11077,19 +13244,23 @@ "type": "array", "items": { "$ref": "#/components/schemas/QueryCondition" - } + }, + "description": "The attribute filters to apply to the spans." }, "attributes_to_save": { "type": "array", "items": { "type": "string" - } + }, + "description": "The attributes to save to the dataset." }, "dataset_id": { - "type": "string" + "type": "string", + "description": "The ID of the dataset to save the spans to." }, "max_depth": { - "type": "integer" + "type": "integer", + "description": "The maximum depth of the tree." } }, "additionalProperties": false, @@ -11176,7 +13347,8 @@ "type": "object", "properties": { "dataset_id": { - "type": "string" + "type": "string", + "description": "The ID of the dataset to score." }, "scoring_functions": { "type": "object", @@ -11189,10 +13361,12 @@ "type": "null" } ] - } + }, + "description": "The scoring functions to use for the scoring." }, "save_results_dataset": { - "type": "boolean" + "type": "boolean", + "description": "Whether to save the results to a dataset." } }, "additionalProperties": false, @@ -11312,10 +13486,12 @@ "type": "object", "properties": { "job_uuid": { - "type": "string" + "type": "string", + "description": "The UUID of the job to create." }, "training_config": { - "$ref": "#/components/schemas/TrainingConfig" + "$ref": "#/components/schemas/TrainingConfig", + "description": "The training configuration." }, "hyperparam_search_config": { "type": "object", @@ -11340,7 +13516,8 @@ "type": "object" } ] - } + }, + "description": "The hyperparam search configuration." }, "logger_config": { "type": "object", @@ -11365,16 +13542,20 @@ "type": "object" } ] - } + }, + "description": "The logger configuration." }, "model": { - "type": "string" + "type": "string", + "description": "The model to fine-tune." }, "checkpoint_dir": { - "type": "string" + "type": "string", + "description": "The directory to save checkpoint(s) to." }, "algorithm_config": { - "$ref": "#/components/schemas/AlgorithmConfig" + "$ref": "#/components/schemas/AlgorithmConfig", + "description": "The algorithm configuration." } }, "additionalProperties": false, diff --git a/docs/_static/llama-stack-spec.yaml b/docs/_static/llama-stack-spec.yaml index a24f1a9db..7638c3cbd 100644 --- a/docs/_static/llama-stack-spec.yaml +++ b/docs/_static/llama-stack-spec.yaml @@ -27,10 +27,12 @@ paths: $ref: '#/components/responses/DefaultError' tags: - DatasetIO - description: '' + description: Append rows to a dataset. parameters: - name: dataset_id in: path + description: >- + The ID of the dataset to append the rows to. required: true schema: type: string @@ -44,7 +46,8 @@ paths: post: responses: '200': - description: OK + description: >- + A BatchChatCompletionResponse with the full completions. content: application/json: schema: @@ -61,7 +64,8 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Inference - description: '' + description: >- + Generate chat completions for a batch of messages using the specified model. parameters: [] requestBody: content: @@ -73,7 +77,8 @@ paths: post: responses: '200': - description: OK + description: >- + A BatchCompletionResponse with the full completions. content: application/json: schema: @@ -90,7 +95,8 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Inference - description: '' + description: >- + Generate completions for a batch of content using the specified model. parameters: [] requestBody: content: @@ -115,7 +121,7 @@ paths: $ref: '#/components/responses/DefaultError' tags: - PostTraining (Coming Soon) - description: '' + description: Cancel a training job. parameters: [] requestBody: content: @@ -129,7 +135,7 @@ paths: '200': description: >- If stream=False, returns a ChatCompletionResponse with the full completion. - If stream=True, returns an SSE event stream of ChatCompletionResponseStreamChunk + If stream=True, returns an SSE event stream of ChatCompletionResponseStreamChunk. content: application/json: schema: @@ -164,7 +170,7 @@ paths: '200': description: >- If stream=False, returns a CompletionResponse with the full completion. - If stream=True, returns an SSE event stream of CompletionResponseStreamChunk + If stream=True, returns an SSE event stream of CompletionResponseStreamChunk. content: application/json: schema: @@ -197,11 +203,11 @@ paths: get: responses: '200': - description: A ListAgentsResponse. + description: A PaginatedResponse. content: application/json: schema: - $ref: '#/components/schemas/ListAgentsResponse' + $ref: '#/components/schemas/PaginatedResponse' '400': $ref: '#/components/responses/BadRequest400' '429': @@ -215,7 +221,19 @@ paths: tags: - Agents description: List all agents. - parameters: [] + parameters: + - name: start_index + in: query + description: The index to start the pagination from. + required: false + schema: + type: integer + - name: limit + in: query + description: The number of agents to return. + required: false + schema: + type: integer post: responses: '200': @@ -288,7 +306,7 @@ paths: '200': description: >- If stream=False, returns a Turn object. If stream=True, returns an SSE - event stream of AgentTurnResponseStreamChunk + event stream of AgentTurnResponseStreamChunk. content: application/json: schema: @@ -330,11 +348,90 @@ paths: schema: $ref: '#/components/schemas/CreateAgentTurnRequest' required: true + /v1/openai/v1/responses: + get: + responses: + '200': + description: A ListOpenAIResponseObject. + content: + application/json: + schema: + $ref: '#/components/schemas/ListOpenAIResponseObject' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + description: List all OpenAI responses. + parameters: + - name: after + in: query + description: The ID of the last response to return. + required: false + schema: + type: string + - name: limit + in: query + description: The number of responses to return. + required: false + schema: + type: integer + - name: model + in: query + description: The model to filter responses by. + required: false + schema: + type: string + - name: order + in: query + description: >- + The order to sort responses by when sorted by created_at ('asc' or 'desc'). + required: false + schema: + $ref: '#/components/schemas/Order' + post: + responses: + '200': + description: An OpenAIResponseObject. + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIResponseObject' + text/event-stream: + schema: + $ref: '#/components/schemas/OpenAIResponseObjectStream' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + description: Create a new OpenAI response. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateOpenaiResponseRequest' + required: true /v1/files: get: responses: '200': - description: OK + description: A ListBucketResponse. content: application/json: schema: @@ -355,13 +452,14 @@ paths: parameters: - name: bucket in: query + description: 'Bucket name (valid chars: a-zA-Z0-9_-).' required: true schema: type: string post: responses: '200': - description: OK + description: A FileUploadResponse. content: application/json: schema: @@ -432,7 +530,8 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Agents - description: Delete an agent by its ID. + description: >- + Delete an agent by its ID and its associated sessions and turns. parameters: - name: agent_id in: path @@ -444,7 +543,7 @@ paths: get: responses: '200': - description: OK + description: A Session. content: application/json: schema: @@ -501,7 +600,8 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Agents - description: Delete an agent session by its ID. + description: >- + Delete an agent session by its ID and its associated turns. parameters: - name: session_id in: path @@ -520,7 +620,7 @@ paths: get: responses: '200': - description: OK + description: A FileResponse. content: application/json: schema: @@ -542,14 +642,14 @@ paths: parameters: - name: bucket in: path - description: 'Bucket name (valid chars: a-zA-Z0-9_-)' + description: 'Bucket name (valid chars: a-zA-Z0-9_-).' required: true schema: type: string - name: key in: path description: >- - Key under which the file is stored (valid chars: a-zA-Z0-9_-/.) + Key under which the file is stored (valid chars: a-zA-Z0-9_-/.). required: true schema: type: string @@ -574,14 +674,14 @@ paths: parameters: - name: bucket in: path - description: 'Bucket name (valid chars: a-zA-Z0-9_-)' + description: 'Bucket name (valid chars: a-zA-Z0-9_-).' required: true schema: type: string - name: key in: path description: >- - Key under which the file is stored (valid chars: a-zA-Z0-9_-/.) + Key under which the file is stored (valid chars: a-zA-Z0-9_-/.). required: true schema: type: string @@ -592,7 +692,7 @@ paths: description: >- An array of embeddings, one for each content. Each embedding is a list of floats. The dimensionality of the embedding is model-specific; you - can check model metadata using /models/{model_id} + can check model metadata using /models/{model_id}. content: application/json: schema: @@ -623,7 +723,7 @@ paths: responses: '200': description: >- - EvaluateResponse object containing generations and scores + EvaluateResponse object containing generations and scores. content: application/json: schema: @@ -749,7 +849,7 @@ paths: get: responses: '200': - description: OK + description: A Benchmark. content: application/json: schema: @@ -766,10 +866,40 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Benchmarks - description: '' + description: Get a benchmark by its ID. parameters: - name: benchmark_id in: path + description: The ID of the benchmark to get. + required: true + schema: + type: string + /v1/openai/v1/chat/completions/{completion_id}: + get: + responses: + '200': + description: A OpenAICompletionWithInputMessages. + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAICompletionWithInputMessages' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Inference + description: Describe a chat completion by its ID. + parameters: + - name: completion_id + in: path + description: ID of the chat completion. required: true schema: type: string @@ -777,7 +907,7 @@ paths: get: responses: '200': - description: OK + description: A Dataset. content: application/json: schema: @@ -794,10 +924,11 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Datasets - description: '' + description: Get a dataset by its ID. parameters: - name: dataset_id in: path + description: The ID of the dataset to get. required: true schema: type: string @@ -817,10 +948,11 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Datasets - description: '' + description: Unregister a dataset by its ID. parameters: - name: dataset_id in: path + description: The ID of the dataset to unregister. required: true schema: type: string @@ -828,7 +960,7 @@ paths: get: responses: '200': - description: OK + description: A Model. content: application/json: schema: @@ -845,10 +977,11 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Models - description: '' + description: Get a model by its identifier. parameters: - name: model_id in: path + description: The identifier of the model to get. required: true schema: type: string @@ -868,10 +1001,42 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Models - description: '' + description: Unregister a model. parameters: - name: model_id in: path + description: >- + The identifier of the model to unregister. + required: true + schema: + type: string + /v1/openai/v1/responses/{response_id}: + get: + responses: + '200': + description: An OpenAIResponseObject. + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIResponseObject' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + description: Retrieve an OpenAI response by its ID. + parameters: + - name: response_id + in: path + description: >- + The ID of the OpenAI response to retrieve. required: true schema: type: string @@ -879,7 +1044,7 @@ paths: get: responses: '200': - description: OK + description: A ScoringFn. content: application/json: schema: @@ -896,10 +1061,11 @@ paths: $ref: '#/components/responses/DefaultError' tags: - ScoringFunctions - description: '' + description: Get a scoring function by its ID. parameters: - name: scoring_fn_id in: path + description: The ID of the scoring function to get. required: true schema: type: string @@ -907,7 +1073,7 @@ paths: get: responses: '200': - description: OK + description: A Shield. content: application/json: schema: @@ -924,10 +1090,11 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Shields - description: '' + description: Get a shield by its identifier. parameters: - name: identifier in: path + description: The identifier of the shield to get. required: true schema: type: string @@ -935,7 +1102,7 @@ paths: get: responses: '200': - description: OK + description: A Span. content: application/json: schema: @@ -952,15 +1119,18 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Telemetry - description: '' + description: Get a span by its ID. parameters: - name: trace_id in: path + description: >- + The ID of the trace to get the span from. required: true schema: type: string - name: span_id in: path + description: The ID of the span to get. required: true schema: type: string @@ -968,7 +1138,7 @@ paths: post: responses: '200': - description: OK + description: A QuerySpanTreeResponse. content: application/json: schema: @@ -985,10 +1155,11 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Telemetry - description: '' + description: Get a span tree by its ID. parameters: - name: span_id in: path + description: The ID of the span to get the tree from. required: true schema: type: string @@ -1002,7 +1173,7 @@ paths: get: responses: '200': - description: OK + description: A Tool. content: application/json: schema: @@ -1019,10 +1190,11 @@ paths: $ref: '#/components/responses/DefaultError' tags: - ToolGroups - description: '' + description: Get a tool by its name. parameters: - name: tool_name in: path + description: The name of the tool to get. required: true schema: type: string @@ -1030,7 +1202,7 @@ paths: get: responses: '200': - description: OK + description: A ToolGroup. content: application/json: schema: @@ -1047,10 +1219,11 @@ paths: $ref: '#/components/responses/DefaultError' tags: - ToolGroups - description: '' + description: Get a tool group by its ID. parameters: - name: toolgroup_id in: path + description: The ID of the tool group to get. required: true schema: type: string @@ -1070,10 +1243,11 @@ paths: $ref: '#/components/responses/DefaultError' tags: - ToolGroups - description: Unregister a tool group + description: Unregister a tool group. parameters: - name: toolgroup_id in: path + description: The ID of the tool group to unregister. required: true schema: type: string @@ -1081,7 +1255,7 @@ paths: get: responses: '200': - description: OK + description: A Trace. content: application/json: schema: @@ -1098,10 +1272,11 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Telemetry - description: '' + description: Get a trace by its ID. parameters: - name: trace_id in: path + description: The ID of the trace to get. required: true schema: type: string @@ -1109,7 +1284,7 @@ paths: get: responses: '200': - description: OK + description: A PostTrainingJobArtifactsResponse. content: application/json: schema: @@ -1126,10 +1301,12 @@ paths: $ref: '#/components/responses/DefaultError' tags: - PostTraining (Coming Soon) - description: '' + description: Get the artifacts of a training job. parameters: - name: job_uuid in: query + description: >- + The UUID of the job to get the artifacts of. required: true schema: type: string @@ -1137,7 +1314,7 @@ paths: get: responses: '200': - description: OK + description: A PostTrainingJobStatusResponse. content: application/json: schema: @@ -1154,10 +1331,12 @@ paths: $ref: '#/components/responses/DefaultError' tags: - PostTraining (Coming Soon) - description: '' + description: Get the status of a training job. parameters: - name: job_uuid in: query + description: >- + The UUID of the job to get the status of. required: true schema: type: string @@ -1165,7 +1344,7 @@ paths: get: responses: '200': - description: OK + description: A ListPostTrainingJobsResponse. content: application/json: schema: @@ -1182,13 +1361,13 @@ paths: $ref: '#/components/responses/DefaultError' tags: - PostTraining (Coming Soon) - description: '' + description: Get all training jobs. parameters: [] /v1/files/session:{upload_id}: get: responses: '200': - description: OK + description: A FileUploadResponse. content: application/json: schema: @@ -1206,18 +1385,19 @@ paths: tags: - Files description: >- - Returns information about an existsing upload session + Returns information about an existsing upload session. parameters: - name: upload_id in: path - description: ID of the upload session + description: ID of the upload session. required: true schema: type: string post: responses: '200': - description: OK + description: >- + A FileResponse or None if the upload is not complete. content: application/json: schema: @@ -1242,7 +1422,7 @@ paths: parameters: - name: upload_id in: path - description: ID of the upload session + description: ID of the upload session. required: true schema: type: string @@ -1257,7 +1437,7 @@ paths: get: responses: '200': - description: OK + description: A VectorDB. content: application/json: schema: @@ -1274,10 +1454,12 @@ paths: $ref: '#/components/responses/DefaultError' tags: - VectorDBs - description: '' + description: Get a vector database by its identifier. parameters: - name: vector_db_id in: path + description: >- + The identifier of the vector database to get. required: true schema: type: string @@ -1297,10 +1479,12 @@ paths: $ref: '#/components/responses/DefaultError' tags: - VectorDBs - description: '' + description: Unregister a vector database. parameters: - name: vector_db_id in: path + description: >- + The identifier of the vector database to unregister. required: true schema: type: string @@ -1308,7 +1492,7 @@ paths: get: responses: '200': - description: OK + description: A HealthInfo. content: application/json: schema: @@ -1325,7 +1509,7 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Inspect - description: '' + description: Get the health of the service. parameters: [] /v1/tool-runtime/rag-tool/insert: post: @@ -1370,7 +1554,7 @@ paths: $ref: '#/components/responses/DefaultError' tags: - VectorIO - description: '' + description: Insert chunks into a vector database. parameters: [] requestBody: content: @@ -1382,7 +1566,8 @@ paths: get: responses: '200': - description: OK + description: >- + A ProviderInfo object containing the provider's details. content: application/json: schema: @@ -1399,10 +1584,12 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Providers - description: '' + description: >- + Get detailed information about a specific provider. parameters: - name: provider_id in: path + description: The ID of the provider to inspect. required: true schema: type: string @@ -1410,7 +1597,7 @@ paths: post: responses: '200': - description: OK + description: A ToolInvocationResult. content: application/json: schema: @@ -1427,7 +1614,7 @@ paths: $ref: '#/components/responses/DefaultError' tags: - ToolRuntime - description: Run a tool with the given arguments + description: Run a tool with the given arguments. parameters: [] requestBody: content: @@ -1439,7 +1626,7 @@ paths: get: responses: '200': - description: OK + description: A PaginatedResponse. content: application/json: schema: @@ -1468,9 +1655,9 @@ paths: The response includes: - - data: List of items for the current page + - data: List of items for the current page. - - has_more: Whether there are more items available after this set + - has_more: Whether there are more items available after this set. parameters: - name: dataset_id in: path @@ -1496,7 +1683,7 @@ paths: get: responses: '200': - description: The status of the evaluationjob. + description: The status of the evaluation job. content: application/json: schema: @@ -1599,11 +1786,11 @@ paths: get: responses: '200': - description: A ListAgentSessionsResponse. + description: A PaginatedResponse. content: application/json: schema: - $ref: '#/components/schemas/ListAgentSessionsResponse' + $ref: '#/components/schemas/PaginatedResponse' '400': $ref: '#/components/responses/BadRequest400' '429': @@ -1625,11 +1812,23 @@ paths: required: true schema: type: string + - name: start_index + in: query + description: The index to start the pagination from. + required: false + schema: + type: integer + - name: limit + in: query + description: The number of sessions to return. + required: false + schema: + type: integer /v1/eval/benchmarks: get: responses: '200': - description: OK + description: A ListBenchmarksResponse. content: application/json: schema: @@ -1646,7 +1845,7 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Benchmarks - description: '' + description: List all benchmarks. parameters: [] post: responses: @@ -1664,7 +1863,7 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Benchmarks - description: '' + description: Register a benchmark. parameters: [] requestBody: content: @@ -1672,472 +1871,61 @@ paths: schema: $ref: '#/components/schemas/RegisterBenchmarkRequest' required: true - /v1/datasets: - get: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListDatasetsResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Datasets - description: '' - parameters: [] - post: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/Dataset' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Datasets - description: Register a new dataset. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/RegisterDatasetRequest' - required: true - /v1/files/{bucket}: - get: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListFileResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Files - description: List all files in a bucket. - parameters: - - name: bucket - in: path - description: 'Bucket name (valid chars: a-zA-Z0-9_-)' - required: true - schema: - type: string - /v1/models: - get: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListModelsResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Models - description: '' - parameters: [] - post: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/Model' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Models - description: '' - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/RegisterModelRequest' - required: true - /v1/providers: - get: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListProvidersResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Providers - description: '' - parameters: [] - /v1/inspect/routes: - get: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListRoutesResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Inspect - description: '' - parameters: [] - /v1/tool-runtime/list-tools: - get: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListToolDefsResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - ToolRuntime - description: '' - parameters: - - name: tool_group_id - in: query - required: false - schema: - type: string - - name: mcp_endpoint - in: query - required: false - schema: - $ref: '#/components/schemas/URL' - /v1/scoring-functions: - get: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListScoringFunctionsResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - ScoringFunctions - description: '' - parameters: [] - post: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - ScoringFunctions - description: '' - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/RegisterScoringFunctionRequest' - required: true - /v1/shields: - get: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListShieldsResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Shields - description: '' - parameters: [] - post: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/Shield' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Shields - description: '' - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/RegisterShieldRequest' - required: true - /v1/toolgroups: - get: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListToolGroupsResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - ToolGroups - description: List tool groups with optional provider - parameters: [] - post: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - ToolGroups - description: Register a tool group - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/RegisterToolGroupRequest' - required: true - /v1/tools: - get: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListToolsResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - ToolGroups - description: List tools with optional tool group - parameters: - - name: toolgroup_id - in: query - required: false - schema: - type: string - /v1/vector-dbs: - get: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListVectorDBsResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - VectorDBs - description: '' - parameters: [] - post: - responses: - '200': - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/VectorDB' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - VectorDBs - description: '' - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/RegisterVectorDbRequest' - required: true - /v1/telemetry/events: - post: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Telemetry - description: '' - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/LogEventRequest' - required: true /v1/openai/v1/chat/completions: + get: + responses: + '200': + description: A ListOpenAIChatCompletionResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListOpenAIChatCompletionResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Inference + description: List all chat completions. + parameters: + - name: after + in: query + description: >- + The ID of the last chat completion to return. + required: false + schema: + type: string + - name: limit + in: query + description: >- + The maximum number of chat completions to return. + required: false + schema: + type: integer + - name: model + in: query + description: The model to filter by. + required: false + schema: + type: string + - name: order + in: query + description: >- + The order to sort the chat completions by: "asc" or "desc". Defaults to + "desc". + required: false + schema: + $ref: '#/components/schemas/Order' post: responses: '200': - description: >- - Response from an OpenAI-compatible chat completion request. **OR** Chunk - from a streaming response to an OpenAI-compatible chat completion request. + description: An OpenAIChatCompletion. content: application/json: schema: @@ -2166,11 +1954,546 @@ paths: schema: $ref: '#/components/schemas/OpenaiChatCompletionRequest' required: true - /v1/openai/v1/completions: + /v1/datasets: + get: + responses: + '200': + description: A ListDatasetsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListDatasetsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Datasets + description: List all datasets. + parameters: [] + post: + responses: + '200': + description: A Dataset. + content: + application/json: + schema: + $ref: '#/components/schemas/Dataset' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Datasets + description: Register a new dataset. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/RegisterDatasetRequest' + required: true + /v1/files/{bucket}: + get: + responses: + '200': + description: A ListFileResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListFileResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Files + description: List all files in a bucket. + parameters: + - name: bucket + in: path + description: 'Bucket name (valid chars: a-zA-Z0-9_-).' + required: true + schema: + type: string + /v1/models: + get: + responses: + '200': + description: A ListModelsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListModelsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Models + description: List all models. + parameters: [] + post: + responses: + '200': + description: A Model. + content: + application/json: + schema: + $ref: '#/components/schemas/Model' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Models + description: Register a model. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/RegisterModelRequest' + required: true + /v1/openai/v1/responses/{response_id}/input_items: + get: + responses: + '200': + description: An ListOpenAIResponseInputItem. + content: + application/json: + schema: + $ref: '#/components/schemas/ListOpenAIResponseInputItem' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + description: >- + List input items for a given OpenAI response. + parameters: + - name: response_id + in: path + description: >- + The ID of the response to retrieve input items for. + required: true + schema: + type: string + - name: after + in: query + description: >- + An item ID to list items after, used for pagination. + required: false + schema: + type: string + - name: before + in: query + description: >- + An item ID to list items before, used for pagination. + required: false + schema: + type: string + - name: include + in: query + description: >- + Additional fields to include in the response. + required: false + schema: + type: array + items: + type: string + - name: limit + in: query + description: >- + A limit on the number of objects to be returned. Limit can range between + 1 and 100, and the default is 20. + required: false + schema: + type: integer + - name: order + in: query + description: >- + The order to return the input items in. Default is desc. + required: false + schema: + $ref: '#/components/schemas/Order' + /v1/providers: + get: + responses: + '200': + description: >- + A ListProvidersResponse containing information about all providers. + content: + application/json: + schema: + $ref: '#/components/schemas/ListProvidersResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Providers + description: List all available providers. + parameters: [] + /v1/inspect/routes: + get: + responses: + '200': + description: A ListRoutesResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListRoutesResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Inspect + description: List all routes. + parameters: [] + /v1/tool-runtime/list-tools: + get: + responses: + '200': + description: A ListToolDefsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListToolDefsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - ToolRuntime + description: List all tools in the runtime. + parameters: + - name: tool_group_id + in: query + description: >- + The ID of the tool group to list tools for. + required: false + schema: + type: string + - name: mcp_endpoint + in: query + description: >- + The MCP endpoint to use for the tool group. + required: false + schema: + $ref: '#/components/schemas/URL' + /v1/scoring-functions: + get: + responses: + '200': + description: A ListScoringFunctionsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListScoringFunctionsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - ScoringFunctions + description: List all scoring functions. + parameters: [] post: responses: '200': description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - ScoringFunctions + description: Register a scoring function. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/RegisterScoringFunctionRequest' + required: true + /v1/shields: + get: + responses: + '200': + description: A ListShieldsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListShieldsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Shields + description: List all shields. + parameters: [] + post: + responses: + '200': + description: A Shield. + content: + application/json: + schema: + $ref: '#/components/schemas/Shield' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Shields + description: Register a shield. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/RegisterShieldRequest' + required: true + /v1/toolgroups: + get: + responses: + '200': + description: A ListToolGroupsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListToolGroupsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - ToolGroups + description: List tool groups with optional provider. + parameters: [] + post: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - ToolGroups + description: Register a tool group. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/RegisterToolGroupRequest' + required: true + /v1/tools: + get: + responses: + '200': + description: A ListToolsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListToolsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - ToolGroups + description: List tools with optional tool group. + parameters: + - name: toolgroup_id + in: query + description: >- + The ID of the tool group to list tools for. + required: false + schema: + type: string + /v1/vector-dbs: + get: + responses: + '200': + description: A ListVectorDBsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListVectorDBsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - VectorDBs + description: List all vector databases. + parameters: [] + post: + responses: + '200': + description: A VectorDB. + content: + application/json: + schema: + $ref: '#/components/schemas/VectorDB' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - VectorDBs + description: Register a vector database. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/RegisterVectorDbRequest' + required: true + /v1/telemetry/events: + post: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Telemetry + description: Log an event. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/LogEventRequest' + required: true + /v1/openai/v1/completions: + post: + responses: + '200': + description: An OpenAICompletion. content: application/json: schema: @@ -2197,11 +2520,43 @@ paths: schema: $ref: '#/components/schemas/OpenaiCompletionRequest' required: true + /v1/openai/v1/embeddings: + post: + responses: + '200': + description: >- + An OpenAIEmbeddingsResponse containing the embeddings. + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIEmbeddingsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Inference + description: >- + Generate OpenAI-compatible embeddings for the given input using the specified + model. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/OpenaiEmbeddingsRequest' + required: true /v1/openai/v1/models: get: responses: '200': - description: OK + description: A OpenAIListModelsResponse. content: application/json: schema: @@ -2218,13 +2573,13 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Models - description: '' + description: List models using the OpenAI API. parameters: [] /v1/post-training/preference-optimize: post: responses: '200': - description: OK + description: A PostTrainingJob. content: application/json: schema: @@ -2241,7 +2596,7 @@ paths: $ref: '#/components/responses/DefaultError' tags: - PostTraining (Coming Soon) - description: '' + description: Run preference optimization of a model. parameters: [] requestBody: content: @@ -2283,7 +2638,7 @@ paths: post: responses: '200': - description: OK + description: A QueryChunksResponse. content: application/json: schema: @@ -2300,7 +2655,7 @@ paths: $ref: '#/components/responses/DefaultError' tags: - VectorIO - description: '' + description: Query chunks from a vector database. parameters: [] requestBody: content: @@ -2308,11 +2663,46 @@ paths: schema: $ref: '#/components/schemas/QueryChunksRequest' required: true + /v1/telemetry/metrics/{metric_name}: + post: + responses: + '200': + description: A QueryMetricsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryMetricsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Telemetry + description: Query metrics. + parameters: + - name: metric_name + in: path + description: The name of the metric to query. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/QueryMetricsRequest' + required: true /v1/telemetry/spans: post: responses: '200': - description: OK + description: A QuerySpansResponse. content: application/json: schema: @@ -2329,7 +2719,7 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Telemetry - description: '' + description: Query spans. parameters: [] requestBody: content: @@ -2341,7 +2731,7 @@ paths: post: responses: '200': - description: OK + description: A QueryTracesResponse. content: application/json: schema: @@ -2358,7 +2748,7 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Telemetry - description: '' + description: Query traces. parameters: [] requestBody: content: @@ -2464,7 +2854,7 @@ paths: post: responses: '200': - description: OK + description: A RunShieldResponse. content: application/json: schema: @@ -2481,7 +2871,7 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Safety - description: '' + description: Run a shield. parameters: [] requestBody: content: @@ -2506,7 +2896,7 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Telemetry - description: '' + description: Save spans to a dataset. parameters: [] requestBody: content: @@ -2519,7 +2909,7 @@ paths: responses: '200': description: >- - ScoreResponse object containing rows and aggregated results + A ScoreResponse object containing rows and aggregated results. content: application/json: schema: @@ -2548,7 +2938,7 @@ paths: post: responses: '200': - description: OK + description: A ScoreBatchResponse. content: application/json: schema: @@ -2565,7 +2955,7 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Scoring - description: '' + description: Score a batch of rows. parameters: [] requestBody: content: @@ -2577,7 +2967,7 @@ paths: post: responses: '200': - description: OK + description: A PostTrainingJob. content: application/json: schema: @@ -2594,7 +2984,7 @@ paths: $ref: '#/components/responses/DefaultError' tags: - PostTraining (Coming Soon) - description: '' + description: Run supervised fine-tuning of a model. parameters: [] requestBody: content: @@ -2635,7 +3025,7 @@ paths: get: responses: '200': - description: OK + description: A VersionInfo. content: application/json: schema: @@ -2652,7 +3042,7 @@ paths: $ref: '#/components/responses/DefaultError' tags: - Inspect - description: '' + description: Get the version of the service. parameters: [] jsonSchemaDialect: >- https://json-schema.org/draft/2020-12/schema @@ -2701,6 +3091,7 @@ components: - type: string - type: array - type: object + description: The rows to append to the dataset. additionalProperties: false required: - rows @@ -2749,10 +3140,13 @@ components: properties: type: type: string - const: grammar - default: grammar + enum: + - json_schema + - grammar description: >- Must be "grammar" to identify this format type + const: grammar + default: grammar bnf: type: object additionalProperties: @@ -2834,10 +3228,13 @@ components: properties: type: type: string - const: json_schema - default: json_schema + enum: + - json_schema + - grammar description: >- Must be "json_schema" to identify this format type + const: json_schema + default: json_schema json_schema: type: object additionalProperties: @@ -3199,22 +3596,34 @@ components: properties: model_id: type: string + description: >- + The identifier of the model to use. The model must be registered with + Llama Stack and available via the /models endpoint. messages_batch: type: array items: type: array items: $ref: '#/components/schemas/Message' + description: >- + The messages to generate completions for. sampling_params: $ref: '#/components/schemas/SamplingParams' + description: >- + (Optional) Parameters to control the sampling strategy. tools: type: array items: $ref: '#/components/schemas/ToolDefinition' + description: >- + (Optional) List of tool definitions available to the model. tool_config: $ref: '#/components/schemas/ToolConfig' + description: (Optional) Configuration for tool use. response_format: $ref: '#/components/schemas/ResponseFormat' + description: >- + (Optional) Grammar specification for guided (structured) decoding. logprobs: type: object properties: @@ -3224,7 +3633,9 @@ components: description: >- How many tokens (for each position) to return log probabilities for. additionalProperties: false - title: LogProbConfig + description: >- + (Optional) If specified, log probabilities for each token position will + be returned. additionalProperties: false required: - model_id @@ -3297,14 +3708,22 @@ components: properties: model_id: type: string + description: >- + The identifier of the model to use. The model must be registered with + Llama Stack and available via the /models endpoint. content_batch: type: array items: $ref: '#/components/schemas/InterleavedContent' + description: The content to generate completions for. sampling_params: $ref: '#/components/schemas/SamplingParams' + description: >- + (Optional) Parameters to control the sampling strategy. response_format: $ref: '#/components/schemas/ResponseFormat' + description: >- + (Optional) Grammar specification for guided (structured) decoding. logprobs: type: object properties: @@ -3314,7 +3733,9 @@ components: description: >- How many tokens (for each position) to return log probabilities for. additionalProperties: false - title: LogProbConfig + description: >- + (Optional) If specified, log probabilities for each token position will + be returned. additionalProperties: false required: - model_id @@ -3365,6 +3786,7 @@ components: properties: job_uuid: type: string + description: The UUID of the job to cancel. additionalProperties: false required: - job_uuid @@ -3381,17 +3803,17 @@ components: type: array items: $ref: '#/components/schemas/Message' - description: List of messages in the conversation + description: List of messages in the conversation. sampling_params: $ref: '#/components/schemas/SamplingParams' description: >- - Parameters to control the sampling strategy + Parameters to control the sampling strategy. tools: type: array items: $ref: '#/components/schemas/ToolDefinition' description: >- - (Optional) List of tool definitions available to the model + (Optional) List of tool definitions available to the model. tool_choice: type: string enum: @@ -3574,15 +3996,16 @@ components: Llama Stack and available via the /models endpoint. content: $ref: '#/components/schemas/InterleavedContent' - description: The content to generate a completion for + description: >- + The content to generate a completion for. sampling_params: $ref: '#/components/schemas/SamplingParams' description: >- - (Optional) Parameters to control the sampling strategy + (Optional) Parameters to control the sampling strategy. response_format: $ref: '#/components/schemas/ResponseFormat' description: >- - (Optional) Grammar specification for guided (structured) decoding + (Optional) Grammar specification for guided (structured) decoding. stream: type: boolean description: >- @@ -3896,6 +4319,13 @@ components: description: The time the step completed. step_type: type: string + enum: + - inference + - tool_execution + - shield_call + - memory_retrieval + title: StepType + description: Type of the step in an agent turn. const: inference default: inference model_response: @@ -3928,6 +4358,13 @@ components: description: The time the step completed. step_type: type: string + enum: + - inference + - tool_execution + - shield_call + - memory_retrieval + title: StepType + description: Type of the step in an agent turn. const: memory_retrieval default: memory_retrieval vector_db_ids: @@ -3989,6 +4426,13 @@ components: description: The time the step completed. step_type: type: string + enum: + - inference + - tool_execution + - shield_call + - memory_retrieval + title: StepType + description: Type of the step in an agent turn. const: shield_call default: shield_call violation: @@ -4020,6 +4464,13 @@ components: description: The time the step completed. step_type: type: string + enum: + - inference + - tool_execution + - shield_call + - memory_retrieval + title: StepType + description: Type of the step in an agent turn. const: tool_execution default: tool_execution tool_calls: @@ -4182,6 +4633,14 @@ components: properties: event_type: type: string + enum: + - step_start + - step_complete + - step_progress + - turn_start + - turn_complete + - turn_awaiting_input + title: AgentTurnResponseEventType const: step_complete default: step_complete step_type: @@ -4220,6 +4679,14 @@ components: properties: event_type: type: string + enum: + - step_start + - step_complete + - step_progress + - turn_start + - turn_complete + - turn_awaiting_input + title: AgentTurnResponseEventType const: step_progress default: step_progress step_type: @@ -4247,6 +4714,14 @@ components: properties: event_type: type: string + enum: + - step_start + - step_complete + - step_progress + - turn_start + - turn_complete + - turn_awaiting_input + title: AgentTurnResponseEventType const: step_start default: step_start step_type: @@ -4291,6 +4766,14 @@ components: properties: event_type: type: string + enum: + - step_start + - step_complete + - step_progress + - turn_start + - turn_complete + - turn_awaiting_input + title: AgentTurnResponseEventType const: turn_awaiting_input default: turn_awaiting_input turn: @@ -4306,6 +4789,14 @@ components: properties: event_type: type: string + enum: + - step_start + - step_complete + - step_progress + - turn_start + - turn_complete + - turn_awaiting_input + title: AgentTurnResponseEventType const: turn_complete default: turn_complete turn: @@ -4320,6 +4811,14 @@ components: properties: event_type: type: string + enum: + - step_start + - step_complete + - step_progress + - turn_start + - turn_complete + - turn_awaiting_input + title: AgentTurnResponseEventType const: turn_start default: turn_start turn_id: @@ -4329,23 +4828,586 @@ components: - event_type - turn_id title: AgentTurnResponseTurnStartPayload + OpenAIResponseInput: + oneOf: + - $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall' + - $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall' + - $ref: '#/components/schemas/OpenAIResponseInputFunctionToolCallOutput' + - $ref: '#/components/schemas/OpenAIResponseMessage' + "OpenAIResponseInputFunctionToolCallOutput": + type: object + properties: + call_id: + type: string + output: + type: string + type: + type: string + const: function_call_output + default: function_call_output + id: + type: string + status: + type: string + additionalProperties: false + required: + - call_id + - output + - type + title: >- + OpenAIResponseInputFunctionToolCallOutput + description: >- + This represents the output of a function call that gets passed back to the + model. + OpenAIResponseInputMessageContent: + oneOf: + - $ref: '#/components/schemas/OpenAIResponseInputMessageContentText' + - $ref: '#/components/schemas/OpenAIResponseInputMessageContentImage' + discriminator: + propertyName: type + mapping: + input_text: '#/components/schemas/OpenAIResponseInputMessageContentText' + input_image: '#/components/schemas/OpenAIResponseInputMessageContentImage' + OpenAIResponseInputMessageContentImage: + type: object + properties: + detail: + oneOf: + - type: string + const: low + - type: string + const: high + - type: string + const: auto + default: auto + type: + type: string + const: input_image + default: input_image + image_url: + type: string + additionalProperties: false + required: + - detail + - type + title: OpenAIResponseInputMessageContentImage + OpenAIResponseInputMessageContentText: + type: object + properties: + text: + type: string + type: + type: string + const: input_text + default: input_text + additionalProperties: false + required: + - text + - type + title: OpenAIResponseInputMessageContentText + OpenAIResponseInputTool: + oneOf: + - $ref: '#/components/schemas/OpenAIResponseInputToolWebSearch' + - $ref: '#/components/schemas/OpenAIResponseInputToolFileSearch' + - $ref: '#/components/schemas/OpenAIResponseInputToolFunction' + - $ref: '#/components/schemas/OpenAIResponseInputToolMCP' + discriminator: + propertyName: type + mapping: + web_search: '#/components/schemas/OpenAIResponseInputToolWebSearch' + file_search: '#/components/schemas/OpenAIResponseInputToolFileSearch' + function: '#/components/schemas/OpenAIResponseInputToolFunction' + mcp: '#/components/schemas/OpenAIResponseInputToolMCP' + OpenAIResponseInputToolFileSearch: + type: object + properties: + type: + type: string + const: file_search + default: file_search + vector_store_id: + type: array + items: + type: string + ranking_options: + type: object + properties: + ranker: + type: string + score_threshold: + type: number + default: 0.0 + additionalProperties: false + title: FileSearchRankingOptions + additionalProperties: false + required: + - type + - vector_store_id + title: OpenAIResponseInputToolFileSearch + OpenAIResponseInputToolFunction: + type: object + properties: + type: + type: string + const: function + default: function + name: + type: string + description: + type: string + parameters: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + strict: + type: boolean + additionalProperties: false + required: + - type + - name + title: OpenAIResponseInputToolFunction + OpenAIResponseInputToolMCP: + type: object + properties: + type: + type: string + const: mcp + default: mcp + server_label: + type: string + server_url: + type: string + headers: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + require_approval: + oneOf: + - type: string + const: always + - type: string + const: never + - type: object + properties: + always: + type: array + items: + type: string + never: + type: array + items: + type: string + additionalProperties: false + title: ApprovalFilter + default: never + allowed_tools: + oneOf: + - type: array + items: + type: string + - type: object + properties: + tool_names: + type: array + items: + type: string + additionalProperties: false + title: AllowedToolsFilter + additionalProperties: false + required: + - type + - server_label + - server_url + - require_approval + title: OpenAIResponseInputToolMCP + OpenAIResponseInputToolWebSearch: + type: object + properties: + type: + oneOf: + - type: string + const: web_search + - type: string + const: web_search_preview_2025_03_11 + default: web_search + search_context_size: + type: string + default: medium + additionalProperties: false + required: + - type + title: OpenAIResponseInputToolWebSearch + OpenAIResponseMessage: + type: object + properties: + content: + oneOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/OpenAIResponseInputMessageContent' + - type: array + items: + $ref: '#/components/schemas/OpenAIResponseOutputMessageContent' + role: + oneOf: + - type: string + const: system + - type: string + const: developer + - type: string + const: user + - type: string + const: assistant + type: + type: string + const: message + default: message + id: + type: string + status: + type: string + additionalProperties: false + required: + - content + - role + - type + title: OpenAIResponseMessage + description: >- + Corresponds to the various Message types in the Responses API. They are all + under one type because the Responses API gives them all the same "type" value, + and there is no way to tell them apart in certain scenarios. + OpenAIResponseOutputMessageContent: + type: object + properties: + text: + type: string + type: + type: string + const: output_text + default: output_text + additionalProperties: false + required: + - text + - type + title: >- + OpenAIResponseOutputMessageContentOutputText + "OpenAIResponseOutputMessageFunctionToolCall": + type: object + properties: + call_id: + type: string + name: + type: string + arguments: + type: string + type: + type: string + const: function_call + default: function_call + id: + type: string + status: + type: string + additionalProperties: false + required: + - call_id + - name + - arguments + - type + title: >- + OpenAIResponseOutputMessageFunctionToolCall + "OpenAIResponseOutputMessageWebSearchToolCall": + type: object + properties: + id: + type: string + status: + type: string + type: + type: string + const: web_search_call + default: web_search_call + additionalProperties: false + required: + - id + - status + - type + title: >- + OpenAIResponseOutputMessageWebSearchToolCall + CreateOpenaiResponseRequest: + type: object + properties: + input: + oneOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/OpenAIResponseInput' + description: Input message(s) to create the response. + model: + type: string + description: The underlying LLM used for completions. + instructions: + type: string + previous_response_id: + type: string + description: >- + (Optional) if specified, the new response will be a continuation of the + previous response. This can be used to easily fork-off new responses from + existing responses. + store: + type: boolean + stream: + type: boolean + temperature: + type: number + tools: + type: array + items: + $ref: '#/components/schemas/OpenAIResponseInputTool' + additionalProperties: false + required: + - input + - model + title: CreateOpenaiResponseRequest + OpenAIResponseError: + type: object + properties: + code: + type: string + message: + type: string + additionalProperties: false + required: + - code + - message + title: OpenAIResponseError + OpenAIResponseObject: + type: object + properties: + created_at: + type: integer + error: + $ref: '#/components/schemas/OpenAIResponseError' + id: + type: string + model: + type: string + object: + type: string + const: response + default: response + output: + type: array + items: + $ref: '#/components/schemas/OpenAIResponseOutput' + parallel_tool_calls: + type: boolean + default: false + previous_response_id: + type: string + status: + type: string + temperature: + type: number + top_p: + type: number + truncation: + type: string + user: + type: string + additionalProperties: false + required: + - created_at + - id + - model + - object + - output + - parallel_tool_calls + - status + title: OpenAIResponseObject + OpenAIResponseOutput: + oneOf: + - $ref: '#/components/schemas/OpenAIResponseMessage' + - $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall' + - $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall' + - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall' + - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools' + discriminator: + propertyName: type + mapping: + message: '#/components/schemas/OpenAIResponseMessage' + web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall' + function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall' + mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall' + mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools' + OpenAIResponseOutputMessageMCPCall: + type: object + properties: + id: + type: string + type: + type: string + const: mcp_call + default: mcp_call + arguments: + type: string + name: + type: string + server_label: + type: string + error: + type: string + output: + type: string + additionalProperties: false + required: + - id + - type + - arguments + - name + - server_label + title: OpenAIResponseOutputMessageMCPCall + OpenAIResponseOutputMessageMCPListTools: + type: object + properties: + id: + type: string + type: + type: string + const: mcp_list_tools + default: mcp_list_tools + server_label: + type: string + tools: + type: array + items: + type: object + properties: + input_schema: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + name: + type: string + description: + type: string + additionalProperties: false + required: + - input_schema + - name + title: MCPListToolsTool + additionalProperties: false + required: + - id + - type + - server_label + - tools + title: OpenAIResponseOutputMessageMCPListTools + OpenAIResponseObjectStream: + oneOf: + - $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseCreated' + - $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseOutputTextDelta' + - $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseCompleted' + discriminator: + propertyName: type + mapping: + response.created: '#/components/schemas/OpenAIResponseObjectStreamResponseCreated' + response.output_text.delta: '#/components/schemas/OpenAIResponseObjectStreamResponseOutputTextDelta' + response.completed: '#/components/schemas/OpenAIResponseObjectStreamResponseCompleted' + "OpenAIResponseObjectStreamResponseCompleted": + type: object + properties: + response: + $ref: '#/components/schemas/OpenAIResponseObject' + type: + type: string + const: response.completed + default: response.completed + additionalProperties: false + required: + - response + - type + title: >- + OpenAIResponseObjectStreamResponseCompleted + "OpenAIResponseObjectStreamResponseCreated": + type: object + properties: + response: + $ref: '#/components/schemas/OpenAIResponseObject' + type: + type: string + const: response.created + default: response.created + additionalProperties: false + required: + - response + - type + title: >- + OpenAIResponseObjectStreamResponseCreated + "OpenAIResponseObjectStreamResponseOutputTextDelta": + type: object + properties: + content_index: + type: integer + delta: + type: string + item_id: + type: string + output_index: + type: integer + sequence_number: + type: integer + type: + type: string + const: response.output_text.delta + default: response.output_text.delta + additionalProperties: false + required: + - content_index + - delta + - item_id + - output_index + - sequence_number + - type + title: >- + OpenAIResponseObjectStreamResponseOutputTextDelta CreateUploadSessionRequest: type: object properties: bucket: type: string description: >- - Bucket under which the file is stored (valid chars: a-zA-Z0-9_-) + Bucket under which the file is stored (valid chars: a-zA-Z0-9_-). key: type: string description: >- - Key under which the file is stored (valid chars: a-zA-Z0-9_-/.) + Key under which the file is stored (valid chars: a-zA-Z0-9_-/.). mime_type: type: string - description: MIME type of the file + description: MIME type of the file. size: type: integer - description: File size in bytes + description: File size in bytes. additionalProperties: false required: - bucket @@ -4473,7 +5535,7 @@ components: type: object properties: type: - type: string + $ref: '#/components/schemas/ScoringFnParamsType' const: basic default: basic aggregation_functions: @@ -4483,6 +5545,7 @@ components: additionalProperties: false required: - type + - aggregation_functions title: BasicScoringFnParams BenchmarkConfig: type: object @@ -4522,7 +5585,7 @@ components: type: object properties: type: - type: string + $ref: '#/components/schemas/ScoringFnParamsType' const: llm_as_judge default: llm_as_judge judge_model: @@ -4541,6 +5604,8 @@ components: required: - type - judge_model + - judge_score_regexes + - aggregation_functions title: LLMAsJudgeScoringFnParams ModelCandidate: type: object @@ -4571,7 +5636,7 @@ components: type: object properties: type: - type: string + $ref: '#/components/schemas/ScoringFnParamsType' const: regex_parser default: regex_parser parsing_regexes: @@ -4585,6 +5650,8 @@ components: additionalProperties: false required: - type + - parsing_regexes + - aggregation_functions title: RegexParserScoringFnParams ScoringFnParams: oneOf: @@ -4597,6 +5664,13 @@ components: llm_as_judge: '#/components/schemas/LLMAsJudgeScoringFnParams' regex_parser: '#/components/schemas/RegexParserScoringFnParams' basic: '#/components/schemas/BasicScoringFnParams' + ScoringFnParamsType: + type: string + enum: + - llm_as_judge + - regex_parser + - basic + title: ScoringFnParamsType EvaluateRowsRequest: type: object properties: @@ -4759,6 +5833,16 @@ components: type: string type: type: string + enum: + - model + - shield + - vector_db + - dataset + - scoring_function + - benchmark + - tool + - tool_group + title: ResourceType const: benchmark default: benchmark dataset_id: @@ -4780,13 +5864,375 @@ components: additionalProperties: false required: - identifier - - provider_resource_id - provider_id - type - dataset_id - scoring_functions - metadata title: Benchmark + OpenAIAssistantMessageParam: + type: object + properties: + role: + type: string + const: assistant + default: assistant + description: >- + Must be "assistant" to identify this as the model's response + content: + oneOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/OpenAIChatCompletionContentPartParam' + description: The content of the model's response + name: + type: string + description: >- + (Optional) The name of the assistant message participant. + tool_calls: + type: array + items: + $ref: '#/components/schemas/OpenAIChatCompletionToolCall' + description: >- + List of tool calls. Each tool call is an OpenAIChatCompletionToolCall + object. + additionalProperties: false + required: + - role + title: OpenAIAssistantMessageParam + description: >- + A message containing the model's (assistant) response in an OpenAI-compatible + chat completion request. + "OpenAIChatCompletionContentPartImageParam": + type: object + properties: + type: + type: string + const: image_url + default: image_url + image_url: + $ref: '#/components/schemas/OpenAIImageURL' + additionalProperties: false + required: + - type + - image_url + title: >- + OpenAIChatCompletionContentPartImageParam + OpenAIChatCompletionContentPartParam: + oneOf: + - $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' + - $ref: '#/components/schemas/OpenAIChatCompletionContentPartImageParam' + discriminator: + propertyName: type + mapping: + text: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' + image_url: '#/components/schemas/OpenAIChatCompletionContentPartImageParam' + OpenAIChatCompletionContentPartTextParam: + type: object + properties: + type: + type: string + const: text + default: text + text: + type: string + additionalProperties: false + required: + - type + - text + title: OpenAIChatCompletionContentPartTextParam + OpenAIChatCompletionToolCall: + type: object + properties: + index: + type: integer + id: + type: string + type: + type: string + const: function + default: function + function: + $ref: '#/components/schemas/OpenAIChatCompletionToolCallFunction' + additionalProperties: false + required: + - type + title: OpenAIChatCompletionToolCall + OpenAIChatCompletionToolCallFunction: + type: object + properties: + name: + type: string + arguments: + type: string + additionalProperties: false + title: OpenAIChatCompletionToolCallFunction + OpenAIChoice: + type: object + properties: + message: + $ref: '#/components/schemas/OpenAIMessageParam' + description: The message from the model + finish_reason: + type: string + description: The reason the model stopped generating + index: + type: integer + description: The index of the choice + logprobs: + $ref: '#/components/schemas/OpenAIChoiceLogprobs' + description: >- + (Optional) The log probabilities for the tokens in the message + additionalProperties: false + required: + - message + - finish_reason + - index + title: OpenAIChoice + description: >- + A choice from an OpenAI-compatible chat completion response. + OpenAIChoiceLogprobs: + type: object + properties: + content: + type: array + items: + $ref: '#/components/schemas/OpenAITokenLogProb' + description: >- + (Optional) The log probabilities for the tokens in the message + refusal: + type: array + items: + $ref: '#/components/schemas/OpenAITokenLogProb' + description: >- + (Optional) The log probabilities for the tokens in the message + additionalProperties: false + title: OpenAIChoiceLogprobs + description: >- + The log probabilities for the tokens in the message from an OpenAI-compatible + chat completion response. + OpenAIDeveloperMessageParam: + type: object + properties: + role: + type: string + const: developer + default: developer + description: >- + Must be "developer" to identify this as a developer message + content: + oneOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/OpenAIChatCompletionContentPartParam' + description: The content of the developer message + name: + type: string + description: >- + (Optional) The name of the developer message participant. + additionalProperties: false + required: + - role + - content + title: OpenAIDeveloperMessageParam + description: >- + A message from the developer in an OpenAI-compatible chat completion request. + OpenAIImageURL: + type: object + properties: + url: + type: string + detail: + type: string + additionalProperties: false + required: + - url + title: OpenAIImageURL + OpenAIMessageParam: + oneOf: + - $ref: '#/components/schemas/OpenAIUserMessageParam' + - $ref: '#/components/schemas/OpenAISystemMessageParam' + - $ref: '#/components/schemas/OpenAIAssistantMessageParam' + - $ref: '#/components/schemas/OpenAIToolMessageParam' + - $ref: '#/components/schemas/OpenAIDeveloperMessageParam' + discriminator: + propertyName: role + mapping: + user: '#/components/schemas/OpenAIUserMessageParam' + system: '#/components/schemas/OpenAISystemMessageParam' + assistant: '#/components/schemas/OpenAIAssistantMessageParam' + tool: '#/components/schemas/OpenAIToolMessageParam' + developer: '#/components/schemas/OpenAIDeveloperMessageParam' + OpenAISystemMessageParam: + type: object + properties: + role: + type: string + const: system + default: system + description: >- + Must be "system" to identify this as a system message + content: + oneOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/OpenAIChatCompletionContentPartParam' + description: >- + The content of the "system prompt". If multiple system messages are provided, + they are concatenated. The underlying Llama Stack code may also add other + system messages (for example, for formatting tool definitions). + name: + type: string + description: >- + (Optional) The name of the system message participant. + additionalProperties: false + required: + - role + - content + title: OpenAISystemMessageParam + description: >- + A system message providing instructions or context to the model. + OpenAITokenLogProb: + type: object + properties: + token: + type: string + bytes: + type: array + items: + type: integer + logprob: + type: number + top_logprobs: + type: array + items: + $ref: '#/components/schemas/OpenAITopLogProb' + additionalProperties: false + required: + - token + - logprob + - top_logprobs + title: OpenAITokenLogProb + description: >- + The log probability for a token from an OpenAI-compatible chat completion + response. + OpenAIToolMessageParam: + type: object + properties: + role: + type: string + const: tool + default: tool + description: >- + Must be "tool" to identify this as a tool response + tool_call_id: + type: string + description: >- + Unique identifier for the tool call this response is for + content: + oneOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/OpenAIChatCompletionContentPartParam' + description: The response content from the tool + additionalProperties: false + required: + - role + - tool_call_id + - content + title: OpenAIToolMessageParam + description: >- + A message representing the result of a tool invocation in an OpenAI-compatible + chat completion request. + OpenAITopLogProb: + type: object + properties: + token: + type: string + bytes: + type: array + items: + type: integer + logprob: + type: number + additionalProperties: false + required: + - token + - logprob + title: OpenAITopLogProb + description: >- + The top log probability for a token from an OpenAI-compatible chat completion + response. + OpenAIUserMessageParam: + type: object + properties: + role: + type: string + const: user + default: user + description: >- + Must be "user" to identify this as a user message + content: + oneOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/OpenAIChatCompletionContentPartParam' + description: >- + The content of the message, which can include text and other media + name: + type: string + description: >- + (Optional) The name of the user message participant. + additionalProperties: false + required: + - role + - content + title: OpenAIUserMessageParam + description: >- + A message from the user in an OpenAI-compatible chat completion request. + OpenAICompletionWithInputMessages: + type: object + properties: + id: + type: string + description: The ID of the chat completion + choices: + type: array + items: + $ref: '#/components/schemas/OpenAIChoice' + description: List of choices + object: + type: string + const: chat.completion + default: chat.completion + description: >- + The object type, which will be "chat.completion" + created: + type: integer + description: >- + The Unix timestamp in seconds when the chat completion was created + model: + type: string + description: >- + The model that was used to generate the chat completion + input_messages: + type: array + items: + $ref: '#/components/schemas/OpenAIMessageParam' + additionalProperties: false + required: + - id + - choices + - object + - created + - model + - input_messages + title: OpenAICompletionWithInputMessages DataSource: oneOf: - $ref: '#/components/schemas/URIDataSource' @@ -4807,6 +6253,16 @@ components: type: string type: type: string + enum: + - model + - shield + - vector_db + - dataset + - scoring_function + - benchmark + - tool + - tool_group + title: ResourceType const: dataset default: dataset purpose: @@ -4833,7 +6289,6 @@ components: additionalProperties: false required: - identifier - - provider_resource_id - provider_id - type - purpose @@ -4932,6 +6387,16 @@ components: type: string type: type: string + enum: + - model + - shield + - vector_db + - dataset + - scoring_function + - benchmark + - tool + - tool_group + title: ResourceType const: model default: model metadata: @@ -4950,7 +6415,6 @@ components: additionalProperties: false required: - identifier - - provider_resource_id - provider_id - type - metadata @@ -5086,6 +6550,16 @@ components: type: string type: type: string + enum: + - model + - shield + - vector_db + - dataset + - scoring_function + - benchmark + - tool + - tool_group + title: ResourceType const: scoring_function default: scoring_function description: @@ -5107,7 +6581,6 @@ components: additionalProperties: false required: - identifier - - provider_resource_id - provider_id - type - metadata @@ -5146,6 +6619,16 @@ components: type: string type: type: string + enum: + - model + - shield + - vector_db + - dataset + - scoring_function + - benchmark + - tool + - tool_group + title: ResourceType const: shield default: shield params: @@ -5161,7 +6644,6 @@ components: additionalProperties: false required: - identifier - - provider_resource_id - provider_id - type title: Shield @@ -5208,8 +6690,10 @@ components: type: array items: type: string + description: The attributes to return in the tree. max_depth: type: integer + description: The maximum depth of the tree. additionalProperties: false title: GetSpanTreeRequest SpanStatus: @@ -5276,12 +6760,20 @@ components: type: string type: type: string + enum: + - model + - shield + - vector_db + - dataset + - scoring_function + - benchmark + - tool + - tool_group + title: ResourceType const: tool default: tool toolgroup_id: type: string - tool_host: - $ref: '#/components/schemas/ToolHost' description: type: string parameters: @@ -5301,21 +6793,12 @@ components: additionalProperties: false required: - identifier - - provider_resource_id - provider_id - type - toolgroup_id - - tool_host - description - parameters title: Tool - ToolHost: - type: string - enum: - - distribution - - client - - model_context_protocol - title: ToolHost ToolGroup: type: object properties: @@ -5327,6 +6810,16 @@ components: type: string type: type: string + enum: + - model + - shield + - vector_db + - dataset + - scoring_function + - benchmark + - tool + - tool_group + title: ResourceType const: tool_group default: tool_group mcp_endpoint: @@ -5344,7 +6837,6 @@ components: additionalProperties: false required: - identifier - - provider_resource_id - provider_id - type title: ToolGroup @@ -5458,6 +6950,16 @@ components: type: string type: type: string + enum: + - model + - shield + - vector_db + - dataset + - scoring_function + - benchmark + - tool + - tool_group + title: ResourceType const: vector_db default: vector_db embedding_model: @@ -5467,7 +6969,6 @@ components: additionalProperties: false required: - identifier - - provider_resource_id - provider_id - type - embedding_model @@ -5546,6 +7047,8 @@ components: properties: vector_db_id: type: string + description: >- + The identifier of the vector database to insert the chunks into. chunks: type: array items: @@ -5553,6 +7056,9 @@ components: properties: content: $ref: '#/components/schemas/InterleavedContent' + description: >- + The content of the chunk, which can be interleaved text, images, + or other types. metadata: type: object additionalProperties: @@ -5563,13 +7069,32 @@ components: - type: string - type: array - type: object + description: >- + Metadata associated with the chunk, such as document ID, source, + or other relevant information. + embedding: + type: array + items: + type: number + description: >- + Optional embedding for the chunk. If not provided, it will be computed + later. additionalProperties: false required: - content - metadata title: Chunk + description: >- + A chunk of content that can be inserted into a vector database. + description: >- + The chunks to insert. Each `Chunk` should contain content which can be + interleaved text, images, or other types. `metadata`: `dict[str, Any]` + and `embedding`: `List[float]` are optional. If `metadata` is provided, + you configure how Llama Stack formats the chunk during generation. If + `embedding` is not provided, it will be computed later. ttl_seconds: type: integer + description: The time to live of the chunks. additionalProperties: false required: - vector_db_id @@ -5617,6 +7142,7 @@ components: properties: tool_name: type: string + description: The name of the tool to invoke. kwargs: type: object additionalProperties: @@ -5627,6 +7153,8 @@ components: - type: string - type: array - type: object + description: >- + A dictionary of arguments to pass to the tool. additionalProperties: false required: - tool_name @@ -5699,28 +7227,6 @@ components: - job_id - status title: Job - ListAgentSessionsResponse: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/Session' - additionalProperties: false - required: - - data - title: ListAgentSessionsResponse - ListAgentsResponse: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/Agent' - additionalProperties: false - required: - - data - title: ListAgentsResponse BucketResponse: type: object properties: @@ -5755,6 +7261,73 @@ components: required: - data title: ListBenchmarksResponse + Order: + type: string + enum: + - asc + - desc + title: Order + ListOpenAIChatCompletionResponse: + type: object + properties: + data: + type: array + items: + type: object + properties: + id: + type: string + description: The ID of the chat completion + choices: + type: array + items: + $ref: '#/components/schemas/OpenAIChoice' + description: List of choices + object: + type: string + const: chat.completion + default: chat.completion + description: >- + The object type, which will be "chat.completion" + created: + type: integer + description: >- + The Unix timestamp in seconds when the chat completion was created + model: + type: string + description: >- + The model that was used to generate the chat completion + input_messages: + type: array + items: + $ref: '#/components/schemas/OpenAIMessageParam' + additionalProperties: false + required: + - id + - choices + - object + - created + - model + - input_messages + title: OpenAICompletionWithInputMessages + has_more: + type: boolean + first_id: + type: string + last_id: + type: string + object: + type: string + const: list + default: list + additionalProperties: false + required: + - data + - has_more + - first_id + - last_id + - object + title: ListOpenAIChatCompletionResponse ListDatasetsResponse: type: object properties: @@ -5791,6 +7364,96 @@ components: required: - data title: ListModelsResponse + ListOpenAIResponseInputItem: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/OpenAIResponseInput' + object: + type: string + const: list + default: list + additionalProperties: false + required: + - data + - object + title: ListOpenAIResponseInputItem + ListOpenAIResponseObject: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/OpenAIResponseObjectWithInput' + has_more: + type: boolean + first_id: + type: string + last_id: + type: string + object: + type: string + const: list + default: list + additionalProperties: false + required: + - data + - has_more + - first_id + - last_id + - object + title: ListOpenAIResponseObject + OpenAIResponseObjectWithInput: + type: object + properties: + created_at: + type: integer + error: + $ref: '#/components/schemas/OpenAIResponseError' + id: + type: string + model: + type: string + object: + type: string + const: response + default: response + output: + type: array + items: + $ref: '#/components/schemas/OpenAIResponseOutput' + parallel_tool_calls: + type: boolean + default: false + previous_response_id: + type: string + status: + type: string + temperature: + type: number + top_p: + type: number + truncation: + type: string + user: + type: string + input: + type: array + items: + $ref: '#/components/schemas/OpenAIResponseInput' + additionalProperties: false + required: + - created_at + - id + - model + - object + - output + - parallel_tool_calls + - status + - input + title: OpenAIResponseObjectWithInput ListProvidersResponse: type: object properties: @@ -5907,6 +7570,13 @@ components: unstructured_log: '#/components/schemas/UnstructuredLogEvent' metric: '#/components/schemas/MetricEvent' structured_log: '#/components/schemas/StructuredLogEvent' + EventType: + type: string + enum: + - unstructured_log + - structured_log + - metric + title: EventType LogSeverity: type: string enum: @@ -5937,7 +7607,7 @@ components: - type: boolean - type: 'null' type: - type: string + $ref: '#/components/schemas/EventType' const: metric default: metric metric: @@ -5962,7 +7632,7 @@ components: type: object properties: type: - type: string + $ref: '#/components/schemas/StructuredLogType' const: span_end default: span_end status: @@ -5976,7 +7646,7 @@ components: type: object properties: type: - type: string + $ref: '#/components/schemas/StructuredLogType' const: span_start default: span_start name: @@ -6008,7 +7678,7 @@ components: - type: boolean - type: 'null' type: - type: string + $ref: '#/components/schemas/EventType' const: structured_log default: structured_log payload: @@ -6030,6 +7700,12 @@ components: mapping: span_start: '#/components/schemas/SpanStartPayload' span_end: '#/components/schemas/SpanEndPayload' + StructuredLogType: + type: string + enum: + - span_start + - span_end + title: StructuredLogType UnstructuredLogEvent: type: object properties: @@ -6050,7 +7726,7 @@ components: - type: boolean - type: 'null' type: - type: string + $ref: '#/components/schemas/EventType' const: unstructured_log default: unstructured_log message: @@ -6071,149 +7747,15 @@ components: properties: event: $ref: '#/components/schemas/Event' + description: The event to log. ttl_seconds: type: integer + description: The time to live of the event. additionalProperties: false required: - event - ttl_seconds title: LogEventRequest - OpenAIAssistantMessageParam: - type: object - properties: - role: - type: string - const: assistant - default: assistant - description: >- - Must be "assistant" to identify this as the model's response - content: - oneOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/OpenAIChatCompletionContentPartParam' - description: The content of the model's response - name: - type: string - description: >- - (Optional) The name of the assistant message participant. - tool_calls: - type: array - items: - $ref: '#/components/schemas/OpenAIChatCompletionToolCall' - description: >- - List of tool calls. Each tool call is an OpenAIChatCompletionToolCall - object. - additionalProperties: false - required: - - role - title: OpenAIAssistantMessageParam - description: >- - A message containing the model's (assistant) response in an OpenAI-compatible - chat completion request. - "OpenAIChatCompletionContentPartImageParam": - type: object - properties: - type: - type: string - const: image_url - default: image_url - image_url: - $ref: '#/components/schemas/OpenAIImageURL' - additionalProperties: false - required: - - type - - image_url - title: >- - OpenAIChatCompletionContentPartImageParam - OpenAIChatCompletionContentPartParam: - oneOf: - - $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' - - $ref: '#/components/schemas/OpenAIChatCompletionContentPartImageParam' - discriminator: - propertyName: type - mapping: - text: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' - image_url: '#/components/schemas/OpenAIChatCompletionContentPartImageParam' - OpenAIChatCompletionContentPartTextParam: - type: object - properties: - type: - type: string - const: text - default: text - text: - type: string - additionalProperties: false - required: - - type - - text - title: OpenAIChatCompletionContentPartTextParam - OpenAIChatCompletionToolCall: - type: object - properties: - index: - type: integer - id: - type: string - type: - type: string - const: function - default: function - function: - $ref: '#/components/schemas/OpenAIChatCompletionToolCallFunction' - additionalProperties: false - required: - - type - title: OpenAIChatCompletionToolCall - OpenAIChatCompletionToolCallFunction: - type: object - properties: - name: - type: string - arguments: - type: string - additionalProperties: false - title: OpenAIChatCompletionToolCallFunction - OpenAIDeveloperMessageParam: - type: object - properties: - role: - type: string - const: developer - default: developer - description: >- - Must be "developer" to identify this as a developer message - content: - oneOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/OpenAIChatCompletionContentPartParam' - description: The content of the developer message - name: - type: string - description: >- - (Optional) The name of the developer message participant. - additionalProperties: false - required: - - role - - content - title: OpenAIDeveloperMessageParam - description: >- - A message from the developer in an OpenAI-compatible chat completion request. - OpenAIImageURL: - type: object - properties: - url: - type: string - detail: - type: string - additionalProperties: false - required: - - url - title: OpenAIImageURL OpenAIJSONSchema: type: object properties: @@ -6237,21 +7779,6 @@ components: required: - name title: OpenAIJSONSchema - OpenAIMessageParam: - oneOf: - - $ref: '#/components/schemas/OpenAIUserMessageParam' - - $ref: '#/components/schemas/OpenAISystemMessageParam' - - $ref: '#/components/schemas/OpenAIAssistantMessageParam' - - $ref: '#/components/schemas/OpenAIToolMessageParam' - - $ref: '#/components/schemas/OpenAIDeveloperMessageParam' - discriminator: - propertyName: role - mapping: - user: '#/components/schemas/OpenAIUserMessageParam' - system: '#/components/schemas/OpenAISystemMessageParam' - assistant: '#/components/schemas/OpenAIAssistantMessageParam' - tool: '#/components/schemas/OpenAIToolMessageParam' - developer: '#/components/schemas/OpenAIDeveloperMessageParam' OpenAIResponseFormatJSONObject: type: object properties: @@ -6299,93 +7826,6 @@ components: required: - type title: OpenAIResponseFormatText - OpenAISystemMessageParam: - type: object - properties: - role: - type: string - const: system - default: system - description: >- - Must be "system" to identify this as a system message - content: - oneOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/OpenAIChatCompletionContentPartParam' - description: >- - The content of the "system prompt". If multiple system messages are provided, - they are concatenated. The underlying Llama Stack code may also add other - system messages (for example, for formatting tool definitions). - name: - type: string - description: >- - (Optional) The name of the system message participant. - additionalProperties: false - required: - - role - - content - title: OpenAISystemMessageParam - description: >- - A system message providing instructions or context to the model. - OpenAIToolMessageParam: - type: object - properties: - role: - type: string - const: tool - default: tool - description: >- - Must be "tool" to identify this as a tool response - tool_call_id: - type: string - description: >- - Unique identifier for the tool call this response is for - content: - oneOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/OpenAIChatCompletionContentPartParam' - description: The response content from the tool - additionalProperties: false - required: - - role - - tool_call_id - - content - title: OpenAIToolMessageParam - description: >- - A message representing the result of a tool invocation in an OpenAI-compatible - chat completion request. - OpenAIUserMessageParam: - type: object - properties: - role: - type: string - const: user - default: user - description: >- - Must be "user" to identify this as a user message - content: - oneOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/OpenAIChatCompletionContentPartParam' - description: >- - The content of the message, which can include text and other media - name: - type: string - description: >- - (Optional) The name of the user message participant. - additionalProperties: false - required: - - role - - content - title: OpenAIUserMessageParam - description: >- - A message from the user in an OpenAI-compatible chat completion request. OpenaiChatCompletionRequest: type: object properties: @@ -6398,11 +7838,11 @@ components: type: array items: $ref: '#/components/schemas/OpenAIMessageParam' - description: List of messages in the conversation + description: List of messages in the conversation. frequency_penalty: type: number description: >- - (Optional) The penalty for repeated tokens + (Optional) The penalty for repeated tokens. function_call: oneOf: - type: string @@ -6415,7 +7855,7 @@ components: - type: string - type: array - type: object - description: (Optional) The function call to use + description: (Optional) The function call to use. functions: type: array items: @@ -6428,52 +7868,52 @@ components: - type: string - type: array - type: object - description: (Optional) List of functions to use + description: (Optional) List of functions to use. logit_bias: type: object additionalProperties: type: number - description: (Optional) The logit bias to use + description: (Optional) The logit bias to use. logprobs: type: boolean - description: (Optional) The log probabilities to use + description: (Optional) The log probabilities to use. max_completion_tokens: type: integer description: >- - (Optional) The maximum number of tokens to generate + (Optional) The maximum number of tokens to generate. max_tokens: type: integer description: >- - (Optional) The maximum number of tokens to generate + (Optional) The maximum number of tokens to generate. n: type: integer description: >- - (Optional) The number of completions to generate + (Optional) The number of completions to generate. parallel_tool_calls: type: boolean description: >- - (Optional) Whether to parallelize tool calls + (Optional) Whether to parallelize tool calls. presence_penalty: type: number description: >- - (Optional) The penalty for repeated tokens + (Optional) The penalty for repeated tokens. response_format: $ref: '#/components/schemas/OpenAIResponseFormatParam' - description: (Optional) The response format to use + description: (Optional) The response format to use. seed: type: integer - description: (Optional) The seed to use + description: (Optional) The seed to use. stop: oneOf: - type: string - type: array items: type: string - description: (Optional) The stop tokens to use + description: (Optional) The stop tokens to use. stream: type: boolean description: >- - (Optional) Whether to stream the response + (Optional) Whether to stream the response. stream_options: type: object additionalProperties: @@ -6484,10 +7924,10 @@ components: - type: string - type: array - type: object - description: (Optional) The stream options to use + description: (Optional) The stream options to use. temperature: type: number - description: (Optional) The temperature to use + description: (Optional) The temperature to use. tool_choice: oneOf: - type: string @@ -6500,7 +7940,7 @@ components: - type: string - type: array - type: object - description: (Optional) The tool choice to use + description: (Optional) The tool choice to use. tools: type: array items: @@ -6513,17 +7953,17 @@ components: - type: string - type: array - type: object - description: (Optional) The tools to use + description: (Optional) The tools to use. top_logprobs: type: integer description: >- - (Optional) The top log probabilities to use + (Optional) The top log probabilities to use. top_p: type: number - description: (Optional) The top p to use + description: (Optional) The top p to use. user: type: string - description: (Optional) The user to use + description: (Optional) The user to use. additionalProperties: false required: - model @@ -6599,30 +8039,6 @@ components: title: OpenAIChatCompletionChunk description: >- Chunk from a streaming response to an OpenAI-compatible chat completion request. - OpenAIChoice: - type: object - properties: - message: - $ref: '#/components/schemas/OpenAIMessageParam' - description: The message from the model - finish_reason: - type: string - description: The reason the model stopped generating - index: - type: integer - description: The index of the choice - logprobs: - $ref: '#/components/schemas/OpenAIChoiceLogprobs' - description: >- - (Optional) The log probabilities for the tokens in the message - additionalProperties: false - required: - - message - - finish_reason - - index - title: OpenAIChoice - description: >- - A choice from an OpenAI-compatible chat completion response. OpenAIChoiceDelta: type: object properties: @@ -6644,26 +8060,6 @@ components: title: OpenAIChoiceDelta description: >- A delta from an OpenAI-compatible chat completion streaming response. - OpenAIChoiceLogprobs: - type: object - properties: - content: - type: array - items: - $ref: '#/components/schemas/OpenAITokenLogProb' - description: >- - (Optional) The log probabilities for the tokens in the message - refusal: - type: array - items: - $ref: '#/components/schemas/OpenAITokenLogProb' - description: >- - (Optional) The log probabilities for the tokens in the message - additionalProperties: false - title: OpenAIChoiceLogprobs - description: >- - The log probabilities for the tokens in the message from an OpenAI-compatible - chat completion response. OpenAIChunkChoice: type: object properties: @@ -6688,49 +8084,6 @@ components: title: OpenAIChunkChoice description: >- A chunk choice from an OpenAI-compatible chat completion streaming response. - OpenAITokenLogProb: - type: object - properties: - token: - type: string - bytes: - type: array - items: - type: integer - logprob: - type: number - top_logprobs: - type: array - items: - $ref: '#/components/schemas/OpenAITopLogProb' - additionalProperties: false - required: - - token - - logprob - - top_logprobs - title: OpenAITokenLogProb - description: >- - The log probability for a token from an OpenAI-compatible chat completion - response. - OpenAITopLogProb: - type: object - properties: - token: - type: string - bytes: - type: array - items: - type: integer - logprob: - type: number - additionalProperties: false - required: - - token - - logprob - title: OpenAITopLogProb - description: >- - The top log probability for a token from an OpenAI-compatible chat completion - response. OpenaiCompletionRequest: type: object properties: @@ -6753,52 +8106,52 @@ components: type: array items: type: integer - description: The prompt to generate a completion for + description: The prompt to generate a completion for. best_of: type: integer description: >- - (Optional) The number of completions to generate + (Optional) The number of completions to generate. echo: type: boolean - description: (Optional) Whether to echo the prompt + description: (Optional) Whether to echo the prompt. frequency_penalty: type: number description: >- - (Optional) The penalty for repeated tokens + (Optional) The penalty for repeated tokens. logit_bias: type: object additionalProperties: type: number - description: (Optional) The logit bias to use + description: (Optional) The logit bias to use. logprobs: type: boolean - description: (Optional) The log probabilities to use + description: (Optional) The log probabilities to use. max_tokens: type: integer description: >- - (Optional) The maximum number of tokens to generate + (Optional) The maximum number of tokens to generate. n: type: integer description: >- - (Optional) The number of completions to generate + (Optional) The number of completions to generate. presence_penalty: type: number description: >- - (Optional) The penalty for repeated tokens + (Optional) The penalty for repeated tokens. seed: type: integer - description: (Optional) The seed to use + description: (Optional) The seed to use. stop: oneOf: - type: string - type: array items: type: string - description: (Optional) The stop tokens to use + description: (Optional) The stop tokens to use. stream: type: boolean description: >- - (Optional) Whether to stream the response + (Optional) Whether to stream the response. stream_options: type: object additionalProperties: @@ -6809,16 +8162,16 @@ components: - type: string - type: array - type: object - description: (Optional) The stream options to use + description: (Optional) The stream options to use. temperature: type: number - description: (Optional) The temperature to use + description: (Optional) The temperature to use. top_p: type: number - description: (Optional) The top p to use + description: (Optional) The top p to use. user: type: string - description: (Optional) The user to use + description: (Optional) The user to use. guided_choice: type: array items: @@ -6876,6 +8229,118 @@ components: title: OpenAICompletionChoice description: >- A choice from an OpenAI-compatible completion response. + OpenaiEmbeddingsRequest: + type: object + properties: + model: + type: string + description: >- + The identifier of the model to use. The model must be an embedding model + registered with Llama Stack and available via the /models endpoint. + input: + oneOf: + - type: string + - type: array + items: + type: string + description: >- + Input text to embed, encoded as a string or array of strings. To embed + multiple inputs in a single request, pass an array of strings. + encoding_format: + type: string + description: >- + (Optional) The format to return the embeddings in. Can be either "float" + or "base64". Defaults to "float". + dimensions: + type: integer + description: >- + (Optional) The number of dimensions the resulting output embeddings should + have. Only supported in text-embedding-3 and later models. + user: + type: string + description: >- + (Optional) A unique identifier representing your end-user, which can help + OpenAI to monitor and detect abuse. + additionalProperties: false + required: + - model + - input + title: OpenaiEmbeddingsRequest + OpenAIEmbeddingData: + type: object + properties: + object: + type: string + const: embedding + default: embedding + description: >- + The object type, which will be "embedding" + embedding: + oneOf: + - type: array + items: + type: number + - type: string + description: >- + The embedding vector as a list of floats (when encoding_format="float") + or as a base64-encoded string (when encoding_format="base64") + index: + type: integer + description: >- + The index of the embedding in the input list + additionalProperties: false + required: + - object + - embedding + - index + title: OpenAIEmbeddingData + description: >- + A single embedding data object from an OpenAI-compatible embeddings response. + OpenAIEmbeddingUsage: + type: object + properties: + prompt_tokens: + type: integer + description: The number of tokens in the input + total_tokens: + type: integer + description: The total number of tokens used + additionalProperties: false + required: + - prompt_tokens + - total_tokens + title: OpenAIEmbeddingUsage + description: >- + Usage information for an OpenAI-compatible embeddings response. + OpenAIEmbeddingsResponse: + type: object + properties: + object: + type: string + const: list + default: list + description: The object type, which will be "list" + data: + type: array + items: + $ref: '#/components/schemas/OpenAIEmbeddingData' + description: List of embedding data objects + model: + type: string + description: >- + The model that was used to generate the embeddings + usage: + $ref: '#/components/schemas/OpenAIEmbeddingUsage' + description: Usage information + additionalProperties: false + required: + - object + - data + - model + - usage + title: OpenAIEmbeddingsResponse + description: >- + Response from an OpenAI-compatible embeddings request. OpenAIModel: type: object properties: @@ -7034,12 +8499,16 @@ components: properties: job_uuid: type: string + description: The UUID of the job to create. finetuned_model: type: string + description: The model to fine-tune. algorithm_config: $ref: '#/components/schemas/DPOAlignmentConfig' + description: The algorithm configuration. training_config: $ref: '#/components/schemas/TrainingConfig' + description: The training configuration. hyperparam_search_config: type: object additionalProperties: @@ -7050,6 +8519,7 @@ components: - type: string - type: array - type: object + description: The hyperparam search configuration. logger_config: type: object additionalProperties: @@ -7060,6 +8530,7 @@ components: - type: string - type: array - type: object + description: The logger configuration. additionalProperties: false required: - job_uuid @@ -7115,18 +8586,41 @@ components: properties: query_generator_config: $ref: '#/components/schemas/RAGQueryGeneratorConfig' + description: Configuration for the query generator. max_tokens_in_context: type: integer default: 4096 + description: Maximum number of tokens in the context. max_chunks: type: integer default: 5 + description: Maximum number of chunks to retrieve. + chunk_template: + type: string + default: > + Result {index} + + Content: {chunk.content} + + Metadata: {metadata} + description: >- + Template for formatting each retrieved chunk in the context. Available + placeholders: {index} (1-based chunk ordinal), {chunk.content} (chunk + content string), {metadata} (chunk metadata dict). Default: "Result {index}\nContent: + {chunk.content}\nMetadata: {metadata}\n" + mode: + type: string + description: >- + Search mode for retrieval—either "vector" or "keyword". Default "vector". additionalProperties: false required: - query_generator_config - max_tokens_in_context - max_chunks + - chunk_template title: RAGQueryConfig + description: >- + Configuration for the RAG query generation. RAGQueryGeneratorConfig: oneOf: - $ref: '#/components/schemas/DefaultRAGQueryGeneratorConfig' @@ -7176,8 +8670,11 @@ components: properties: vector_db_id: type: string + description: >- + The identifier of the vector database to query. query: $ref: '#/components/schemas/InterleavedContent' + description: The query to search for. params: type: object additionalProperties: @@ -7188,6 +8685,7 @@ components: - type: string - type: array - type: object + description: The parameters of the query. additionalProperties: false required: - vector_db_id @@ -7203,6 +8701,9 @@ components: properties: content: $ref: '#/components/schemas/InterleavedContent' + description: >- + The content of the chunk, which can be interleaved text, images, + or other types. metadata: type: object additionalProperties: @@ -7213,11 +8714,23 @@ components: - type: string - type: array - type: object + description: >- + Metadata associated with the chunk, such as document ID, source, + or other relevant information. + embedding: + type: array + items: + type: number + description: >- + Optional embedding for the chunk. If not provided, it will be computed + later. additionalProperties: false required: - content - metadata title: Chunk + description: >- + A chunk of content that can be inserted into a vector database. scores: type: array items: @@ -7227,6 +8740,109 @@ components: - chunks - scores title: QueryChunksResponse + QueryMetricsRequest: + type: object + properties: + start_time: + type: integer + description: The start time of the metric to query. + end_time: + type: integer + description: The end time of the metric to query. + granularity: + type: string + description: The granularity of the metric to query. + query_type: + type: string + enum: + - range + - instant + description: The type of query to perform. + label_matchers: + type: array + items: + type: object + properties: + name: + type: string + value: + type: string + operator: + type: string + enum: + - '=' + - '!=' + - =~ + - '!~' + title: MetricLabelOperator + default: '=' + additionalProperties: false + required: + - name + - value + - operator + title: MetricLabelMatcher + description: >- + The label matchers to apply to the metric. + additionalProperties: false + required: + - start_time + - query_type + title: QueryMetricsRequest + MetricDataPoint: + type: object + properties: + timestamp: + type: integer + value: + type: number + additionalProperties: false + required: + - timestamp + - value + title: MetricDataPoint + MetricLabel: + type: object + properties: + name: + type: string + value: + type: string + additionalProperties: false + required: + - name + - value + title: MetricLabel + MetricSeries: + type: object + properties: + metric: + type: string + labels: + type: array + items: + $ref: '#/components/schemas/MetricLabel' + values: + type: array + items: + $ref: '#/components/schemas/MetricDataPoint' + additionalProperties: false + required: + - metric + - labels + - values + title: MetricSeries + QueryMetricsResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/MetricSeries' + additionalProperties: false + required: + - data + title: QueryMetricsResponse QueryCondition: type: object properties: @@ -7263,12 +8879,16 @@ components: type: array items: $ref: '#/components/schemas/QueryCondition' + description: >- + The attribute filters to apply to the spans. attributes_to_return: type: array items: type: string + description: The attributes to return in the spans. max_depth: type: integer + description: The maximum depth of the tree. additionalProperties: false required: - attribute_filters @@ -7292,14 +8912,19 @@ components: type: array items: $ref: '#/components/schemas/QueryCondition' + description: >- + The attribute filters to apply to the traces. limit: type: integer + description: The limit of traces to return. offset: type: integer + description: The offset of the traces to return. order_by: type: array items: type: string + description: The order by of the traces to return. additionalProperties: false title: QueryTracesRequest QueryTracesResponse: @@ -7318,16 +8943,25 @@ components: properties: benchmark_id: type: string + description: The ID of the benchmark to register. dataset_id: type: string + description: >- + The ID of the dataset to use for the benchmark. scoring_functions: type: array items: type: string + description: >- + The scoring functions to use for the benchmark. provider_benchmark_id: type: string + description: >- + The ID of the provider benchmark to use for the benchmark. provider_id: type: string + description: >- + The ID of the provider to use for the benchmark. metadata: type: object additionalProperties: @@ -7338,6 +8972,7 @@ components: - type: string - type: array - type: object + description: The metadata to use for the benchmark. additionalProperties: false required: - benchmark_id @@ -7354,7 +8989,7 @@ components: - eval/question-answer - eval/messages-answer description: >- - The purpose of the dataset. One of - "post-training/messages": The dataset + The purpose of the dataset. One of: - "post-training/messages": The dataset contains a messages column with list of messages for post-training. { "messages": [ {"role": "user", "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}, ] } - "eval/question-answer": The dataset @@ -7387,7 +9022,7 @@ components: - type: array - type: object description: >- - The metadata for the dataset. - E.g. {"description": "My dataset"} + The metadata for the dataset. - E.g. {"description": "My dataset"}. dataset_id: type: string description: >- @@ -7402,10 +9037,14 @@ components: properties: model_id: type: string + description: The identifier of the model to register. provider_model_id: type: string + description: >- + The identifier of the model in the provider. provider_id: type: string + description: The identifier of the provider. metadata: type: object additionalProperties: @@ -7416,8 +9055,10 @@ components: - type: string - type: array - type: object + description: Any additional metadata for this model. model_type: $ref: '#/components/schemas/ModelType' + description: The type of model to register. additionalProperties: false required: - model_id @@ -7427,16 +9068,27 @@ components: properties: scoring_fn_id: type: string + description: >- + The ID of the scoring function to register. description: type: string + description: The description of the scoring function. return_type: $ref: '#/components/schemas/ParamType' + description: The return type of the scoring function. provider_scoring_fn_id: type: string + description: >- + The ID of the provider scoring function to use for the scoring function. provider_id: type: string + description: >- + The ID of the provider to use for the scoring function. params: $ref: '#/components/schemas/ScoringFnParams' + description: >- + The parameters for the scoring function for benchmark eval, these can + be overridden for app eval. additionalProperties: false required: - scoring_fn_id @@ -7448,10 +9100,15 @@ components: properties: shield_id: type: string + description: >- + The identifier of the shield to register. provider_shield_id: type: string + description: >- + The identifier of the shield in the provider. provider_id: type: string + description: The identifier of the provider. params: type: object additionalProperties: @@ -7462,6 +9119,7 @@ components: - type: string - type: array - type: object + description: The parameters of the shield. additionalProperties: false required: - shield_id @@ -7471,10 +9129,15 @@ components: properties: toolgroup_id: type: string + description: The ID of the tool group to register. provider_id: type: string + description: >- + The ID of the provider to use for the tool group. mcp_endpoint: $ref: '#/components/schemas/URL' + description: >- + The MCP endpoint to use for the tool group. args: type: object additionalProperties: @@ -7485,6 +9148,8 @@ components: - type: string - type: array - type: object + description: >- + A dictionary of arguments to pass to the tool group. additionalProperties: false required: - toolgroup_id @@ -7495,14 +9160,21 @@ components: properties: vector_db_id: type: string + description: >- + The identifier of the vector database to register. embedding_model: type: string + description: The embedding model to use. embedding_dimension: type: integer + description: The dimension of the embedding model. provider_id: type: string + description: The identifier of the provider. provider_vector_db_id: type: string + description: >- + The identifier of the vector database in the provider. additionalProperties: false required: - vector_db_id @@ -7539,10 +9211,12 @@ components: properties: shield_id: type: string + description: The identifier of the shield to run. messages: type: array items: $ref: '#/components/schemas/Message' + description: The messages to run the shield on. params: type: object additionalProperties: @@ -7553,6 +9227,7 @@ components: - type: string - type: array - type: object + description: The parameters of the shield. additionalProperties: false required: - shield_id @@ -7573,14 +9248,20 @@ components: type: array items: $ref: '#/components/schemas/QueryCondition' + description: >- + The attribute filters to apply to the spans. attributes_to_save: type: array items: type: string + description: The attributes to save to the dataset. dataset_id: type: string + description: >- + The ID of the dataset to save the spans to. max_depth: type: integer + description: The maximum depth of the tree. additionalProperties: false required: - attribute_filters @@ -7635,14 +9316,19 @@ components: properties: dataset_id: type: string + description: The ID of the dataset to score. scoring_functions: type: object additionalProperties: oneOf: - $ref: '#/components/schemas/ScoringFnParams' - type: 'null' + description: >- + The scoring functions to use for the scoring. save_results_dataset: type: boolean + description: >- + Whether to save the results to a dataset. additionalProperties: false required: - dataset_id @@ -7727,8 +9413,10 @@ components: properties: job_uuid: type: string + description: The UUID of the job to create. training_config: $ref: '#/components/schemas/TrainingConfig' + description: The training configuration. hyperparam_search_config: type: object additionalProperties: @@ -7739,6 +9427,7 @@ components: - type: string - type: array - type: object + description: The hyperparam search configuration. logger_config: type: object additionalProperties: @@ -7749,12 +9438,16 @@ components: - type: string - type: array - type: object + description: The logger configuration. model: type: string + description: The model to fine-tune. checkpoint_dir: type: string + description: The directory to save checkpoint(s) to. algorithm_config: $ref: '#/components/schemas/AlgorithmConfig' + description: The algorithm configuration. additionalProperties: false required: - job_uuid diff --git a/docs/getting_started.ipynb b/docs/getting_started.ipynb index b764d4d34..cdaf074b8 100644 --- a/docs/getting_started.ipynb +++ b/docs/getting_started.ipynb @@ -1050,8 +1050,6 @@ "text/html": [ "
ToolGroup(\n",
               "identifier='builtin::code_interpreter',\n",
-              "provider_id='code-interpreter',\n",
-              "provider_resource_id='builtin::code_interpreter',\n",
               "type='tool_group',\n",
               "args=None,\n",
               "mcp_endpoint=None\n",
@@ -1061,7 +1059,6 @@
             "text/plain": [
               "\u001b[1;35mToolGroup\u001b[0m\u001b[1m(\u001b[0m\n",
               "\u001b[2;32m│   \u001b[0m\u001b[33midentifier\u001b[0m=\u001b[32m'builtin::code_interpreter'\u001b[0m,\n",
-              "\u001b[2;32m│   \u001b[0m\u001b[33mprovider_id\u001b[0m=\u001b[32m'code-interpreter'\u001b[0m,\n",
               "\u001b[2;32m│   \u001b[0m\u001b[33mprovider_resource_id\u001b[0m=\u001b[32m'builtin::code_interpreter'\u001b[0m,\n",
               "\u001b[2;32m│   \u001b[0m\u001b[33mtype\u001b[0m=\u001b[32m'tool_group'\u001b[0m,\n",
               "\u001b[2;32m│   \u001b[0m\u001b[33margs\u001b[0m=\u001b[3;35mNone\u001b[0m,\n",
diff --git a/docs/getting_started_llama_api.ipynb b/docs/getting_started_llama_api.ipynb
new file mode 100644
index 000000000..128e9114a
--- /dev/null
+++ b/docs/getting_started_llama_api.ipynb
@@ -0,0 +1,907 @@
+{
+    "cells": [
+      {
+        "cell_type": "markdown",
+        "id": "c1e7571c",
+        "metadata": {
+          "id": "c1e7571c"
+        },
+        "source": [
+          "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/meta-llama/llama-stack/blob/main/docs/getting_started.ipynb)\n",
+          "\n",
+          "# Getting Started with Llama 4 in Llama Stack\n",
+          "\n",
+          "\"drawing\"\n",
+          "\n",
+          "[Llama Stack](https://github.com/meta-llama/llama-stack) defines and standardizes the set of core building blocks needed to bring generative AI applications to market. These building blocks are presented in the form of interoperable APIs with a broad set of Service Providers providing their implementations.\n",
+          "\n",
+          "Read more about the project here: https://llama-stack.readthedocs.io/en/latest/index.html\n",
+          "\n",
+          "In this guide, we will showcase how you can get started with using Llama 4 in Llama Stack.\n"
+        ]
+      },
+      {
+        "cell_type": "markdown",
+        "id": "4CV1Q19BDMVw",
+        "metadata": {
+          "id": "4CV1Q19BDMVw"
+        },
+        "source": [
+          "## 1. Getting started with Llama Stack"
+        ]
+      },
+      {
+        "cell_type": "markdown",
+        "id": "K4AvfUAJZOeS",
+        "metadata": {
+          "id": "K4AvfUAJZOeS"
+        },
+        "source": [
+          "### 1.1. Create Llama API account\n",
+          "\n",
+          "In this showcase, we will use [Llama API](https://llama.developer.meta.com/) as the inference provider. So, you would first get an API key from Llama API if you don't have one already.\n",
+          "\n",
+          "\n",
+          "\n",
+          "> **Note:**  Set the API Key in the Secrets of this notebook\n",
+          "\n"
+        ]
+      },
+      {
+        "cell_type": "markdown",
+        "id": "oDUB7M_qe-Gs",
+        "metadata": {
+          "id": "oDUB7M_qe-Gs"
+        },
+        "source": [
+          "### 1.2. Setup and Running a Llama Stack server\n",
+          "\n",
+          "Llama Stack is architected as a collection of APIs that provide developers with the building blocks to build AI applications. \n",
+          "\n",
+          "Llama stack is typically available as a server with an endpoint that you can make calls to. Partners like Together and Fireworks offer their own Llama Stack compatible endpoints.\n",
+          "\n",
+          "In this showcase, we will start a Llama Stack server that is running locally.\n"
+        ]
+      },
+      {
+        "cell_type": "code",
+        "execution_count": null,
+        "id": "J2kGed0R5PSf",
+        "metadata": {
+          "colab": {
+            "base_uri": "https://localhost:8080/"
+          },
+          "collapsed": true,
+          "id": "J2kGed0R5PSf",
+          "outputId": "2478ea60-8d35-48a1-b011-f233831740c5"
+        },
+        "outputs": [
+          {
+            "name": "stdout",
+            "output_type": "stream",
+            "text": [
+              "Requirement already satisfied: uv in /opt/homebrew/Caskroom/miniconda/base/envs/l4/lib/python3.10/site-packages (0.6.12)\n",
+              "\u001b[2mUsing Python 3.10.16 environment at: /opt/homebrew/Caskroom/miniconda/base/envs/l4\u001b[0m\n",
+              "\u001b[2mAudited \u001b[1m1 package\u001b[0m \u001b[2min 83ms\u001b[0m\u001b[0m\n",
+              "Environment '/Users/erichuang/projects/internal-llama-stack/.venv' already exists, re-using it.\n",
+              "Virtual environment /Users/erichuang/projects/internal-llama-stack/.venv is already active\n",
+              "\u001b[2mUsing Python 3.11.11 environment at: /Users/erichuang/projects/internal-llama-stack/.venv\u001b[0m\n",
+              "\u001b[2mAudited \u001b[1m1 package\u001b[0m \u001b[2min 387ms\u001b[0m\u001b[0m\n",
+              "Installing pip dependencies\n",
+              "\u001b[2mUsing Python 3.11.11 environment at: /Users/erichuang/projects/internal-llama-stack/.venv\u001b[0m\n",
+              "\u001b[2K\u001b[2mResolved \u001b[1m123 packages\u001b[0m \u001b[2min 1.13s\u001b[0m\u001b[0m                                       \u001b[0m\n",
+              "\u001b[2K\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)                                                   \n",
+              "\u001b[2K\u001b[1A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)-----\u001b[0m\u001b[0m     0 B/9.53 KiB                     \u001b[1A\n",
+              "\u001b[2K\u001b[1A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)-\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB                    \u001b[1A\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2K\u001b[2A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m     0 B/44.00 KiB                     \u001b[2A\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2K\u001b[2A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB                   \u001b[2A\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2mtabulate  \u001b[0m \u001b[32m\u001b[2m------------------------------\u001b[0m\u001b[0m     0 B/34.43 KiB\n",
+              "\u001b[2K\u001b[3A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB                   \u001b[3A\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2mtabulate  \u001b[0m \u001b[32m-------------\u001b[2m-----------------\u001b[0m\u001b[0m 14.83 KiB/34.43 KiB\n",
+              "\u001b[2K\u001b[3A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB                   \u001b[3A\n",
+              "\u001b[2meval-type-backport\u001b[0m \u001b[32m\u001b[2m------------------------------\u001b[0m\u001b[0m     0 B/5.69 KiB\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2mtabulate  \u001b[0m \u001b[32m-------------\u001b[2m-----------------\u001b[0m\u001b[0m 14.83 KiB/34.43 KiB\n",
+              "\u001b[2K\u001b[4A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB                   \u001b[4A\n",
+              "\u001b[2meval-type-backport\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 5.69 KiB/5.69 KiB\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2mtabulate  \u001b[0m \u001b[32m-------------\u001b[2m-----------------\u001b[0m\u001b[0m 14.83 KiB/34.43 KiB\n",
+              "\u001b[2K\u001b[4A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB                   \u001b[4A\n",
+              "\u001b[2meval-type-backport\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 5.69 KiB/5.69 KiB\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2mtabulate  \u001b[0m \u001b[32m-------------\u001b[2m-----------------\u001b[0m\u001b[0m 14.83 KiB/34.43 KiB\n",
+              "\u001b[2mtyper     \u001b[0m \u001b[32m-----------\u001b[2m-------------------\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB\n",
+              "\u001b[2K\u001b[5A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m     0 B/85.81 KiB                     \u001b[5A\n",
+              "\u001b[2meval-type-backport\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 5.69 KiB/5.69 KiB\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2mtabulate  \u001b[0m \u001b[32m-------------\u001b[2m-----------------\u001b[0m\u001b[0m 14.83 KiB/34.43 KiB\n",
+              "\u001b[2mtyper     \u001b[0m \u001b[32m-----------\u001b[2m-------------------\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB\n",
+              "\u001b[2K\u001b[5A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m 16.00 KiB/85.81 KiB                   \u001b[5A\n",
+              "\u001b[2meval-type-backport\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 5.69 KiB/5.69 KiB\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2mtabulate  \u001b[0m \u001b[32m-------------\u001b[2m-----------------\u001b[0m\u001b[0m 14.83 KiB/34.43 KiB\n",
+              "\u001b[2mtyper     \u001b[0m \u001b[32m-----------\u001b[2m-------------------\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB\n",
+              "\u001b[2mtogether  \u001b[0m \u001b[32m------\u001b[2m------------------------\u001b[0m\u001b[0m 16.00 KiB/85.81 KiB\n",
+              "\u001b[2K\u001b[6A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m     0 B/3.08 MiB                      \u001b[6A\n",
+              "\u001b[2meval-type-backport\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 5.69 KiB/5.69 KiB\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2mtabulate  \u001b[0m \u001b[32m-------------\u001b[2m-----------------\u001b[0m\u001b[0m 14.83 KiB/34.43 KiB\n",
+              "\u001b[2mtyper     \u001b[0m \u001b[32m-----------\u001b[2m-------------------\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB\n",
+              "\u001b[2mtogether  \u001b[0m \u001b[32m------\u001b[2m------------------------\u001b[0m\u001b[0m 16.00 KiB/85.81 KiB\n",
+              "\u001b[2K\u001b[6A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m 14.91 KiB/3.08 MiB                    \u001b[6A\n",
+              "\u001b[2meval-type-backport\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 5.69 KiB/5.69 KiB\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2mtabulate  \u001b[0m \u001b[32m---------------------------\u001b[2m---\u001b[0m\u001b[0m 30.83 KiB/34.43 KiB\n",
+              "\u001b[2mtyper     \u001b[0m \u001b[32m-----------\u001b[2m-------------------\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB\n",
+              "\u001b[2mtogether  \u001b[0m \u001b[32m------\u001b[2m------------------------\u001b[0m\u001b[0m 16.00 KiB/85.81 KiB\n",
+              "\u001b[2K\u001b[6A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m 14.91 KiB/3.08 MiB                    \u001b[6A\n",
+              "\u001b[2meval-type-backport\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 5.69 KiB/5.69 KiB\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2mtabulate  \u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 34.43 KiB/34.43 KiB\n",
+              "\u001b[2mtyper     \u001b[0m \u001b[32m-----------\u001b[2m-------------------\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB\n",
+              "\u001b[2mtogether  \u001b[0m \u001b[32m------\u001b[2m------------------------\u001b[0m\u001b[0m 16.00 KiB/85.81 KiB\n",
+              "\u001b[2K\u001b[6A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m 14.91 KiB/3.08 MiB                    \u001b[6A\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2mtabulate  \u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 34.43 KiB/34.43 KiB\n",
+              "\u001b[2mtyper     \u001b[0m \u001b[32m-----------\u001b[2m-------------------\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB\n",
+              "\u001b[2mtogether  \u001b[0m \u001b[32m------\u001b[2m------------------------\u001b[0m\u001b[0m 16.00 KiB/85.81 KiB\n",
+              "\u001b[2K\u001b[5A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m 14.91 KiB/3.08 MiB                    \u001b[5A\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2mtabulate  \u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 34.43 KiB/34.43 KiB\n",
+              "\u001b[2mtyper     \u001b[0m \u001b[32m-----------\u001b[2m-------------------\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB\n",
+              "\u001b[2mtogether  \u001b[0m \u001b[32m------\u001b[2m------------------------\u001b[0m\u001b[0m 16.00 KiB/85.81 KiB\n",
+              "\u001b[2K\u001b[5A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m 30.91 KiB/3.08 MiB                    \u001b[5A\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2mtyper     \u001b[0m \u001b[32m-----------\u001b[2m-------------------\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB\n",
+              "\u001b[2mtogether  \u001b[0m \u001b[32m------\u001b[2m------------------------\u001b[0m\u001b[0m 16.00 KiB/85.81 KiB\n",
+              "\u001b[2K\u001b[4A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m 30.91 KiB/3.08 MiB                    \u001b[4A\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2mtyper     \u001b[0m \u001b[32m-----------\u001b[2m-------------------\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB\n",
+              "\u001b[2mtogether  \u001b[0m \u001b[32m------\u001b[2m------------------------\u001b[0m\u001b[0m 16.00 KiB/85.81 KiB\n",
+              "\u001b[2K\u001b[4A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m 46.91 KiB/3.08 MiB                    \u001b[4A\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2mtyper     \u001b[0m \u001b[32m-----------\u001b[2m-------------------\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB\n",
+              "\u001b[2mtogether  \u001b[0m \u001b[32m------\u001b[2m------------------------\u001b[0m\u001b[0m 16.00 KiB/85.81 KiB\n",
+              "\u001b[2K\u001b[4A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m 62.91 KiB/3.08 MiB                    \u001b[4A\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2mtyper     \u001b[0m \u001b[32m-----------\u001b[2m-------------------\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB\n",
+              "\u001b[2mtogether  \u001b[0m \u001b[32m------\u001b[2m------------------------\u001b[0m\u001b[0m 16.00 KiB/85.81 KiB\n",
+              "\u001b[2K\u001b[4A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m 78.91 KiB/3.08 MiB                    \u001b[4A\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2mtyper     \u001b[0m \u001b[32m-----------\u001b[2m-------------------\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB\n",
+              "\u001b[2mtogether  \u001b[0m \u001b[32m------\u001b[2m------------------------\u001b[0m\u001b[0m 16.00 KiB/85.81 KiB\n",
+              "\u001b[2K\u001b[4A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m 94.91 KiB/3.08 MiB                    \u001b[4A\n",
+              "\u001b[2mshellingham\u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 9.53 KiB/9.53 KiB\n",
+              "\u001b[2mtyper     \u001b[0m \u001b[32m-----------\u001b[2m-------------------\u001b[0m\u001b[0m 14.88 KiB/44.00 KiB\n",
+              "\u001b[2mtogether  \u001b[0m \u001b[32m------------\u001b[2m------------------\u001b[0m\u001b[0m 32.00 KiB/85.81 KiB\n",
+              "\u001b[2K\u001b[4A\u001b[37m⠙\u001b[0m \u001b[2mPreparing packages...\u001b[0m (0/6)----\u001b[0m\u001b[0m 2.62 MiB/3.08 MiB                     \u001b[4A\n",
+              "\u001b[2mtyper     \u001b[0m \u001b[32m----------------------\u001b[2m--------\u001b[0m\u001b[0m 30.88 KiB/44.00 KiB\n",
+              "\u001b[2mtogether  \u001b[0m \u001b[32m------------\u001b[2m------------------\u001b[0m\u001b[0m 32.00 KiB/85.81 KiB\n",
+              "\u001b[2K\u001b[3A\u001b[37m⠹\u001b[0m \u001b[2mPreparing packages...\u001b[0m (3/6)----\u001b[0m\u001b[0m 2.62 MiB/3.08 MiB                     \u001b[3A\n",
+              "\u001b[2mtyper     \u001b[0m \u001b[32m------------------------------\u001b[2m\u001b[0m\u001b[0m 44.00 KiB/44.00 KiB\n",
+              "\u001b[2mtogether  \u001b[0m \u001b[32m------------\u001b[2m------------------\u001b[0m\u001b[0m 32.00 KiB/85.81 KiB\n",
+              "\u001b[2K\u001b[3A\u001b[37m⠹\u001b[0m \u001b[2mPreparing packages...\u001b[0m (3/6)----\u001b[0m\u001b[0m 2.62 MiB/3.08 MiB                     \u001b[3A\n",
+              "\u001b[2mtogether  \u001b[0m \u001b[32m------------\u001b[2m------------------\u001b[0m\u001b[0m 32.00 KiB/85.81 KiB\n",
+              "\u001b[2K\u001b[2A\u001b[37m⠹\u001b[0m \u001b[2mPreparing packages...\u001b[0m (3/6)2m--\u001b[0m\u001b[0m 2.80 MiB/3.08 MiB                     \u001b[2A\n",
+              "\u001b[2mtogether  \u001b[0m \u001b[32m-----------------\u001b[2m-------------\u001b[0m\u001b[0m 48.00 KiB/85.81 KiB\n",
+              "\u001b[2K\u001b[2A\u001b[37m⠹\u001b[0m \u001b[2mPreparing packages...\u001b[0m (3/6)2m--\u001b[0m\u001b[0m 2.81 MiB/3.08 MiB                     \u001b[2A\n",
+              "\u001b[2K\u001b[1A\u001b[37m⠹\u001b[0m \u001b[2mPreparing packages...\u001b[0m (3/6)----\u001b[0m\u001b[0m 48.00 KiB/85.81 KiB                   \u001b[1A\n",
+              "\u001b[2K\u001b[1A\u001b[37m⠹\u001b[0m \u001b[2mPreparing packages...\u001b[0m (3/6)2m--\u001b[0m\u001b[0m 80.00 KiB/85.81 KiB                   \u001b[1A\n",
+              "\u001b[2K\u001b[2mPrepared \u001b[1m6 packages\u001b[0m \u001b[2min 365ms\u001b[0m\u001b[0m                                                 \u001b[1A\n",
+              "\u001b[2K\u001b[2mInstalled \u001b[1m6 packages\u001b[0m \u001b[2min 50ms\u001b[0m\u001b[0m                                \u001b[0m\n",
+              " \u001b[32m+\u001b[39m \u001b[1meval-type-backport\u001b[0m\u001b[2m==0.2.2\u001b[0m\n",
+              " \u001b[32m+\u001b[39m \u001b[1mfaiss-cpu\u001b[0m\u001b[2m==1.10.0\u001b[0m\n",
+              " \u001b[32m+\u001b[39m \u001b[1mshellingham\u001b[0m\u001b[2m==1.5.4\u001b[0m\n",
+              " \u001b[32m+\u001b[39m \u001b[1mtabulate\u001b[0m\u001b[2m==0.9.0\u001b[0m\n",
+              " \u001b[32m+\u001b[39m \u001b[1mtogether\u001b[0m\u001b[2m==1.5.5\u001b[0m\n",
+              " \u001b[32m+\u001b[39m \u001b[1mtyper\u001b[0m\u001b[2m==0.15.2\u001b[0m\n",
+              "torch torchvision --index-url https://download.pytorch.org/whl/cpu\n",
+              "\u001b[2mUsing Python 3.11.11 environment at: /Users/erichuang/projects/internal-llama-stack/.venv\u001b[0m\n",
+              "\u001b[2mAudited \u001b[1m2 packages\u001b[0m \u001b[2min 32ms\u001b[0m\u001b[0m\n",
+              "sentence-transformers --no-deps\n",
+              "\u001b[2mUsing Python 3.11.11 environment at: /Users/erichuang/projects/internal-llama-stack/.venv\u001b[0m\n",
+              "\u001b[2mAudited \u001b[1m1 package\u001b[0m \u001b[2min 63ms\u001b[0m\u001b[0m\n",
+              "\u001b[32mBuild Successful!\u001b[0m\n"
+            ]
+          }
+        ],
+        "source": [
+          "import os \n",
+          "import subprocess\n",
+          "import time\n",
+          "\n",
+          "!pip install uv \n",
+          "!uv pip install requests\n",
+          "\n",
+          "if \"UV_SYSTEM_PYTHON\" in os.environ:\n",
+          "  del os.environ[\"UV_SYSTEM_PYTHON\"]\n",
+          "\n",
+          "# this command installs all the dependencies needed for the llama stack server \n",
+          "!uv run --with llama-stack llama stack build --template llama_api --image-type venv \n",
+          "\n",
+          "def run_llama_stack_server_background():\n",
+          "    log_file = open(\"llama_stack_server.log\", \"w\")\n",
+          "    process = subprocess.Popen(\n",
+          "        \"uv run --with llama-stack llama stack run llama_api --image-type venv\",\n",
+          "        shell=True,\n",
+          "        stdout=log_file,\n",
+          "        stderr=log_file,\n",
+          "        text=True\n",
+          "    )\n",
+          "    \n",
+          "    print(f\"Starting Llama Stack server with PID: {process.pid}\")\n",
+          "    return process\n",
+          "\n",
+          "def wait_for_server_to_start():\n",
+          "    import requests\n",
+          "    from requests.exceptions import ConnectionError\n",
+          "    import time\n",
+          "    \n",
+          "    url = \"http://0.0.0.0:8321/v1/health\"\n",
+          "    max_retries = 30\n",
+          "    retry_interval = 1\n",
+          "    \n",
+          "    print(\"Waiting for server to start\", end=\"\")\n",
+          "    for _ in range(max_retries):\n",
+          "        try:\n",
+          "            response = requests.get(url)\n",
+          "            if response.status_code == 200:\n",
+          "                print(\"\\nServer is ready!\")\n",
+          "                return True\n",
+          "        except ConnectionError:\n",
+          "            print(\".\", end=\"\", flush=True)\n",
+          "            time.sleep(retry_interval)\n",
+          "            \n",
+          "    print(\"\\nServer failed to start after\", max_retries * retry_interval, \"seconds\")\n",
+          "    return False\n",
+          "\n",
+          "\n",
+          "# use this helper if needed to kill the server \n",
+          "def kill_llama_stack_server():\n",
+          "    # Kill any existing llama stack server processes\n",
+          "    os.system(\"ps aux | grep -v grep | grep llama_stack.distribution.server.server | awk '{print $2}' | xargs kill -9\")\n"
+        ]
+      },
+      {
+        "cell_type": "markdown",
+        "id": "c40e9efd",
+        "metadata": {},
+        "source": [
+          "### 1.3 Starting the Llama Stack Server"
+        ]
+      },
+      {
+        "cell_type": "code",
+        "execution_count": null,
+        "id": "f779283d",
+        "metadata": {},
+        "outputs": [],
+        "source": [
+          "server_process = run_llama_stack_server_background()\n",
+          "assert wait_for_server_to_start()"
+        ]
+      },
+      {
+        "cell_type": "markdown",
+        "id": "90eb721b",
+        "metadata": {},
+        "source": [
+          "### 1.4 Install and Configure the Client\n",
+          "\n",
+          "Now that we have our Llama Stack server running locally, we need to install the client package to interact with it. The `llama-stack-client` provides a simple Python interface to access all the functionality of Llama Stack, including:\n",
+          "\n",
+          "- Chat Completions ( text and multimodal )\n",
+          "- Safety Shields \n",
+          "- Agent capabilities with tools like web search, RAG with Telemetry\n",
+          "- Evaluation and scoring frameworks\n",
+          "\n",
+          "The client handles all the API communication with our local server, making it easy to integrate Llama Stack's capabilities into your applications.\n",
+          "\n",
+          "In the next cells, we'll:\n",
+          "\n",
+          "1. Install the client package\n",
+          "2. Set up API keys for external services (Together AI and Tavily Search)\n",
+          "3. Initialize the client to connect to our local server\n"
+        ]
+      },
+      {
+        "cell_type": "code",
+        "execution_count": 3,
+        "id": "2e68e32a",
+        "metadata": {},
+        "outputs": [
+          {
+            "name": "stdout",
+            "output_type": "stream",
+            "text": [
+              "\u001b[2mUsing Python 3.10.16 environment at: /opt/homebrew/Caskroom/miniconda/base/envs/stack\u001b[0m\n",
+              "\u001b[2K\u001b[2mResolved \u001b[1m31 packages\u001b[0m \u001b[2min 284ms\u001b[0m\u001b[0m                                        \u001b[0m\n",
+              "\u001b[2mAudited \u001b[1m31 packages\u001b[0m \u001b[2min 0.04ms\u001b[0m\u001b[0m\n"
+            ]
+          }
+        ],
+        "source": [
+          "!pip install -U llama-stack-client"
+        ]
+      },
+      {
+        "cell_type": "code",
+        "execution_count": 3,
+        "id": "E1UFuJC570Tk",
+        "metadata": {
+          "colab": {
+            "base_uri": "https://localhost:8080/",
+            "height": 1000,
+            "referenced_widgets": [
+              "75307e3dee604d30aa44713e6e293e64",
+              "5ce87402a79342af995df41ac3940d55",
+              "fbbcc19886cc43b38424fbb184162c61",
+              "29212208db6b432eb4f708cd64258954",
+              "50dd8994a4cf486ebbec5ffd4322992a",
+              "f9b768c703494dd198f2978aff4892e8",
+              "1231b9e4cab34c33a38bee63543f1e75",
+              "754deb3970604d48a522bc9f021ad945",
+              "f6ecca7a1a8340fbbe056235a2714fc3",
+              "ef4f63fe9d8f4683a9d20becb6e4e2cb",
+              "7508f10c13634e7aa682cfb29c48d9e7",
+              "26f1430ca7cb4ad5b1b8df1ffdbd32a9",
+              "7cd2d9c9ea7b4d70902ffaff33033078",
+              "101288236cff40b8bb9dbad80dbbc7ee",
+              "d5c9977838a249eeab6ef628279b8155",
+              "d032d1e7b4b54ba28ac83c1a12b23876",
+              "321fce57c158432abeae496ae8a947aa",
+              "3ebe00201bdb4e119e3b74f684a58345",
+              "0f8bab6b8ed04774b386fe952aae66f1",
+              "cfcb6e456c354d99be91f161552f3376",
+              "61bd0d490c0e4c04a331cf9ce6b7d38f",
+              "7d8653fca29f4df3a7487733ff9db60b",
+              "943f8fcb66614353a51f32f8344b6122",
+              "0e695245b97c4bbc85e349fda3dc07b9",
+              "bb0d168c41f540b8ae42239d3938483a",
+              "87700a80125348f28c4f249bdf8b0a8d",
+              "8902c3622da540e496ed5b1524bd01ca",
+              "90432ec1c24b4607a935c94e130cd68d",
+              "464147b149824f20afc727751a702fc7",
+              "67e37a088be64a2ba786ca923b1017dd",
+              "98786f52ef5345b0b9164b9c1f2b8e18",
+              "0e1b9910a77d4b7fa69cb8926e6547d7",
+              "0b276315be4345be83da1e03905c8495",
+              "e11f8c3891284e07bd2572257afd5e1b",
+              "ee18d96394994d01b49d5b03b3d9a019",
+              "844b06df5749441fab6f61656ce581a9",
+              "e1c6b9a20e074f17aeba976b24e80c65",
+              "c690da8daa1e4f9ea73bcacdd92e8a6d",
+              "d0b161ae25c441e8b3caf7a3d88c1b05",
+              "47cf4b6b835d43388576a2abf4cc54f8",
+              "03bbebd659e64b5d9c29a73570c34854",
+              "b68e5097d2504d2cbd7e19aa1aac3a04",
+              "22a665deff88477b9372c0350c4c572b",
+              "5e535ed2b83e496ab57b1c80b615ab0c",
+              "d9de065c7f81443e98ddf066c7b5bd54",
+              "1e836106837c4ac7a11b36e700c46b64",
+              "55591e8179084fcfa3a61c8bd8d09dcb",
+              "de1ef93c41364eda9b4b111231057348",
+              "23b0b2f4f82c4a21846e91d7cea91da5",
+              "9e4d0fbb51284a7487c495c7b95a293d",
+              "b0f8cf1f79e04b5fb47a810f2c81bd7e",
+              "0c359bc4c94c46acbc9094354a15c33d",
+              "59d0b59b6c2248508d0601ff13878d33",
+              "891cb726d45c4fef8f2c74a56df5532b",
+              "fa39189070334939aea5fa4a7de5ec8b",
+              "f0e107dd6d54483aa367da0e337a97cd",
+              "861a00796f55470e85d94733eeee9a5f",
+              "5459633eb6e94ec391d13fcf67425726",
+              "b7b7467ece304ffbbd352b9b96a03aad",
+              "9dece059f1204e29b106fca9e191ddb3",
+              "e2e49c25d6fc4592b317e94cfabc2e5e",
+              "76d37a48a73946bab2821f097cf2605f",
+              "8e81ae00681347cb906b392c3656a64a",
+              "74bedc38b7da4e8a83b0c892d7aa59b5",
+              "d1e67c28b4664e8098dce8f5e80b8779",
+              "abe6cf39b784436993fcbe92221c31a3",
+              "d021a18ab70b4c7e8aec43932a124c36",
+              "72e7c092fb054b7ea0dcd2782b5d8a7d",
+              "8b1ea80221174fae943d5c9f997dfb57",
+              "f8073d625f80415dbf712cee434f6e3a",
+              "5f6014ba13fa4a659b9eb1b5f83599a7",
+              "327ff8f5292d47afbfebd3beea187739",
+              "988cac4341b646079fc73719f3f88ad7",
+              "900a4dac08f540dfb35c29f63236a12c",
+              "1e6009b9b0684b8fbaa379ea96f111ee",
+              "541b9b4e74614e2cb855bb90f03df538",
+              "ff256b2275f740ed82bca4f43b4d6fd2",
+              "3703041a499c426bb427ee008c81cde5",
+              "4b22bbacb995425fb32a2368f3685a92",
+              "49a66eeb9ef74de5ab8904fd90eb7558",
+              "08f9d125018b41c582a0fa1e234315f9",
+              "736c770230644894b85dbc34bd8f1d52",
+              "b67cbbf32f844a19b219be612d5038c9",
+              "774b513d64524ac7823a2cf13efa8d41",
+              "1e56da93bcf64ff490416d2b66cd3dc0",
+              "b7e35038ce344110b785753b655130f5",
+              "5472af91737446f4a4a2d92a3f684a45",
+              "9fb4368802da4a5a8101ba200d98403a",
+              "2e713bcc372e48b2a006558db4d1df68",
+              "1a277abd5ea44253bc6894bef258b52b",
+              "b3eedd82e7da4ce8b3ded70e49a2afd0",
+              "6f5c18cb8002471f8b3764effee37324",
+              "3bebac362b344e8d9103c5011613f1ea",
+              "670905a55b19458da69f83c8bcd511d1",
+              "ff54451a48394faaaa9d8cdb690d0718",
+              "36b5bc19b2d0407f8ab28ff0da2ce12d",
+              "879e48d9a9e04183903d94ffe98313d2",
+              "abce503d70594c2ca9afdc47847c125b",
+              "028e291ee53947bbbbc4bfb68c695f5f",
+              "a530662719374c95a9bef12e59e28c85",
+              "bffc0f4b12f141398535990709fd4f2c",
+              "04804c74e1dd43449d5f758cf5d0ba5e",
+              "95a506c3007c4525b01ee4e1600d671b",
+              "a0d6b0caeb2340fe96c8f5569e3d3ae4",
+              "30798f87a8b848d783fdacd71af5dc04",
+              "07ce54c75e76488ba4019a20b3707061",
+              "f023175de68445f98a6b01bb40ccdc6d",
+              "7389b79a0ff44cd68c7866995d728023",
+              "8e2b70ffe4eb4974bd6393fcc1292267",
+              "13eee164dc534424acb9dc9ee37a9465",
+              "722a7fe16af3422585a20c651345cfa4",
+              "f5596c1c9c4d42f3bc171961f9582eff",
+              "85d66e615b5742e78657b1e60c75fc72",
+              "731c02dc5dd446c3b22765575148e256",
+              "254ce460ce244c99a5afe39d5d51f6b7",
+              "4cf1dc345ace4da59f978f661487f975",
+              "8f30fca71bf24e5ca26e17c2321f893c",
+              "dd85d37dd1d14c7ea4592f8e11b2d2c8",
+              "3cb06377e4454f009d6b2aa7aa6ff0a9",
+              "4502477db4d948e693012364c2dcb370",
+              "52fe404ec9c14db2a7279b4c154eef3d"
+            ]
+          },
+          "collapsed": true,
+          "id": "E1UFuJC570Tk",
+          "outputId": "aebb69d4-c167-4de5-eb8a-dd19dd538f63"
+        },
+        "outputs": [
+          {
+            "name": "stdout",
+            "output_type": "stream",
+            "text": [
+              "Not in Google Colab environment\n"
+            ]
+          }
+        ],
+        "source": [
+          "import os\n",
+          "\n",
+          "try:\n",
+          "    from google.colab import userdata\n",
+          "    os.environ['LLAMA_API_KEY'] = userdata.get('LLAMA_API_KEY')\n",
+          "except ImportError:\n",
+          "    print(\"Not in Google Colab environment\")\n",
+          "\n",
+          "for key in ['LLAMA_API_KEY']:\n",
+          "    try:\n",
+          "        api_key = os.environ[key]\n",
+          "        if not api_key:\n",
+          "            raise ValueError(f\"{key} environment variable is empty\")\n",
+          "    except KeyError:\n",
+          "        api_key = input(f\"{key} environment variable is not set. Please enter your API key: \")\n",
+          "        os.environ[key] = api_key\n",
+          "\n",
+          "from llama_stack_client import LlamaStackClient\n",
+          "\n",
+          "client = LlamaStackClient(\n",
+          "    base_url=\"http://0.0.0.0:8321\", \n",
+          "    provider_data = {\n",
+          "        \"llama_api_key\": os.environ['LLAMA_API_KEY']\n",
+          "    }\n",
+          ")"
+        ]
+      },
+      {
+        "cell_type": "markdown",
+        "id": "635a7a6f",
+        "metadata": {},
+        "source": [
+          "Now that we have completed the setup and configuration, let's start exploring the capabilities of Llama 4!\n",
+          "\n"
+        ]
+      },
+      {
+        "cell_type": "markdown",
+        "id": "0fc75d73",
+        "metadata": {},
+        "source": [
+          "## 2. Running Llama 4"
+        ]
+      },
+      {
+        "cell_type": "markdown",
+        "id": "7dacaa2d-94e9-42e9-82a0-73522dfc7010",
+        "metadata": {
+          "id": "7dacaa2d-94e9-42e9-82a0-73522dfc7010"
+        },
+        "source": [
+          "### 2.1 Check available models\n",
+          "\n",
+          "All the models available are programmatically accessible via the client."
+        ]
+      },
+      {
+        "cell_type": "code",
+        "execution_count": 13,
+        "id": "ruO9jQna_t_S",
+        "metadata": {
+          "colab": {
+            "base_uri": "https://localhost:8080/"
+          },
+          "collapsed": true,
+          "id": "ruO9jQna_t_S",
+          "outputId": "ab1722a7-62ab-43bb-9cab-4e45bf62068a"
+        },
+        "outputs": [
+          {
+            "name": "stdout",
+            "output_type": "stream",
+            "text": [
+              "Available models:\n",
+              "- Llama-3.1-8B-Instruct\n",
+              "- meta-llama/Llama-3.1-8B-Instruct\n",
+              "- Llama-3.2-11B-Vision-Instruct\n",
+              "- meta-llama/Llama-3.2-11B-Vision-Instruct\n",
+              "- Llama-3.3-70B-Instruct\n",
+              "- meta-llama/Llama-3.3-70B-Instruct\n",
+              "- Llama-4-Maverick-17B-128E-Instruct-FP8\n",
+              "- meta-llama/Llama-4-Maverick-17B-128E-Instruct\n",
+              "- all-MiniLM-L6-v2\n"
+            ]
+          }
+        ],
+        "source": [
+          "from rich.pretty import pprint\n",
+          "\n",
+          "print(\"Available models:\")\n",
+          "for m in client.models.list():\n",
+          "    print(f\"- {m.identifier}\")\n"
+        ]
+      },
+      {
+        "cell_type": "markdown",
+        "id": "86366383",
+        "metadata": {
+          "id": "86366383"
+        },
+        "source": [
+          "### 2.2 Run a simple chat completion with one of the models\n",
+          "\n",
+          "We will test the client by doing a simple chat completion."
+        ]
+      },
+      {
+        "cell_type": "code",
+        "execution_count": 14,
+        "id": "77c29dba",
+        "metadata": {
+          "colab": {
+            "base_uri": "https://localhost:8080/"
+          },
+          "id": "77c29dba",
+          "outputId": "4857974f-4c70-4bc4-f90a-6ae49dc9c41e"
+        },
+        "outputs": [
+          {
+            "name": "stdout",
+            "output_type": "stream",
+            "text": [
+              "Here is a two-sentence poem about a llama:\n",
+              "\n",
+              "With soft fur and gentle eyes, the llama roams with gentle surprise, a peaceful presence in the Andean skies. Its calm demeanor and soft humming song bring serenity to all who belong.\n"
+            ]
+          }
+        ],
+        "source": [
+          "# TODO: update this with a vision model\n",
+          "model_id = \"meta-llama/Llama-4-Maverick-17B-128E-Instruct\"\n",
+          "\n",
+          "response = client.inference.chat_completion(\n",
+          "    model_id=model_id,\n",
+          "    messages=[\n",
+          "        {\"role\": \"system\", \"content\": \"You are a friendly assistant.\"},\n",
+          "        {\"role\": \"user\", \"content\": \"Write a two-sentence poem about llama.\"},\n",
+          "    ],\n",
+          ")\n",
+          "\n",
+          "print(response.completion_message.content)\n"
+        ]
+      },
+      {
+        "cell_type": "markdown",
+        "id": "7737cd41",
+        "metadata": {},
+        "source": [
+          "### 2.3 Running multimodal inference"
+        ]
+      },
+      {
+        "cell_type": "code",
+        "execution_count": 15,
+        "id": "e7b1baa7",
+        "metadata": {},
+        "outputs": [
+          {
+            "name": "stdout",
+            "output_type": "stream",
+            "text": [
+              "  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current\n",
+              "                                 Dload  Upload   Total   Spent    Left  Speed\n",
+              "100  275k  100  275k    0     0   847k      0 --:--:-- --:--:-- --:--:--  845k--:--:-- --:--:--     0\n"
+            ]
+          },
+          {
+            "data": {
+              "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/4QmWaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLwA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCI/PiA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA0LjQuMC1FeGl2MiI+IDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+IDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiIHhtbG5zOmlwdGNFeHQ9Imh0dHA6Ly9pcHRjLm9yZy9zdGQvSXB0YzR4bXBFeHQvMjAwOC0wMi0yOS8iIGlwdGNFeHQ6RGlnaXRhbFNvdXJjZVR5cGU9InRyYWluZWRBbGdvcml0aG1pY01lZGlhIi8+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgPD94cGFja2V0IGVuZD0idyI/Pv/bAEMAAgEBAQEBAgEBAQICAgICBAMCAgICBQQEAwQGBQYGBgUGBgYHCQgGBwkHBgYICwgJCgoKCgoGCAsMCwoMCQoKCv/bAEMBAgICAgICBQMDBQoHBgcKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCv/AABEIAwADAAMBEQACEQEDEQH/xAAfAAABBQEBAQEBAQAAAAAAAAAAAQIDBAUGBwgJCgv/xAC1EAACAQMDAgQDBQUEBAAAAX0BAgMABBEFEiExQQYTUWEHInEUMoGRoQgjQrHBFVLR8CQzYnKCCQoWFxgZGiUmJygpKjQ1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4eLj5OXm5+jp6vHy8/T19vf4+fr/xAAfAQADAQEBAQEBAQEBAAAAAAAAAQIDBAUGBwgJCgv/xAC1EQACAQIEBAMEBwUEBAABAncAAQIDEQQFITEGEkFRB2FxEyIygQgUQpGhscEJIzNS8BVictEKFiQ04SXxFxgZGiYnKCkqNTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqCg4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2dri4+Tl5ufo6ery8/T19vf4+fr/2gAMAwEAAhEDEQA/APxxgtYgAAtfLxrVGkfVe3qvqXILSMDOwUSqzLVWrbcmht4mfG0GpdSfcqNao+pI9tEvzKgNT7SfcbrVF1LumwROmcVnOpPuaQrVWtyxBbRiXIXP4VDqTLjWq33J/IjLY2A1Dqz7l+2q33B4o1b7n5U/aTtuL29VdS1p1sj5+X8aznUmVCvVfUstCgOAtR7SZft6vcIIo/MOVoc5gq9W+5dsYkL52/jUSnM1hXqX3LEsCk8rwKlVJ9zSVap3IvsqHkoB+FN1J9yPa1X1ITaIWYADkelTOpNDVaqnueEfF21ji8WMNoxu5r67KKtWVA+PzXEVXidzuvhbDaSWUQSLoBXn5jRn7S8z38BWq+xVmemxQqsK4TtxXiuTTsj0/bVUtxfIUuAV7/lSc523E61W+5JqUCC2UbeamE5t2Q6leqorUrw26sgG0UnUnfcI1qltxViUttA/Gp9pMr21RdQuLZCu4qM+lONSb0uEqtVK9ySSyF3YFQoOBR7WaluQ61Vx0ZV0uAwxmIjGDitJTk9TOlXqrqXLS1BnL7azlUkkbwr1b7kd2P3u0j2ojOdgliKqluP8hPLBIGcVHtJX3NPbVLbiGJScBRSdSY/b1e5JHbocfL1qXUn3KVap3LFvbp5g+XuKl1Jle3qrqbSxqZF46ADpXRCU3RbM5Yir7TcsxwJn7o/KuSVSfc3Ver3J0iUjoKh1J9y1XqdxkkKZ4Wlzy7h7ep3IzBGP4R+VHPIPb1O5FPGozhaanJ9ROvUXUjiRTxsGPpTc5i9vV7kbIok6VSnK24e3q33C7CCPGB04pKpLuKVerbcjto1I3Y+tDqTYo16vckeJSfujFLnnuV7er3GiJCQABT55tbi9vU7kkkKmLIWpU5jdepbcgghViRj9K055mca9V9R/2RNhJWiNSV9wdeq+pRitF+0k46H0rWVSXLuYxrVFPctXMaBMFR0rLnkdEq9VdSBYEbkDjvxR7SXcSrVO49IE6EfjUOpJ63LVep3GvHHu+7UupJLcft6j6ixQpnO2p9pN9S1WqdyRoF24I61KnO+5brVO5DHBH5vC/pWvtJ2Od1avNudJ4ShjE2Qo69axlUnfc0hXqqVrieMbaNroEr39K0p1J2M69eqpWuUtVt4z4clXA+4ePwqHVmp3G69WNHRnyv4ttIl8cXCmMf6yvuMHXqPBp3PicTiKrxb1Om0K2jUIdnp2rmqSqT6nrYWtPld2d34fgjMakJXj1p1E9zup1aqe5uRwx/3RXO6k+50+2qW3LlpbxkjC9azlUn3LjWqdzQggjBB2/Soc5s0daqupfECeVnaAPWp55sp1a1hIbeMoTihzmnuJVqvcqLErzMAPxxVc8jNV6re5FJaoJOB071ftJ23EqtW+40W0ZVuB0qXOdx+1q66mfYWMP28sE7+lbe1nynJCtV9puab2y78bahznbc6nWq9wmt0EX3e1R7SfcbrVe5FYWyNNkKOtN1JdxQrVb7jdThTzApWmpza0FVr1U7XIbuGMWnKinGc7ilWqqF7mPbxIZSNvfmtXKZhCvVfUvQ2yEcLn3rNzmjZVqvchliQvwtNVJkurV7kZt0xkLVe0mL2lXuV5YRu+5Ve0n3E6lW9rkUkSjkpRzzZLqVV1IZY1IO0Cr5pcl2Eas7XbPof/AIJ8+HEW/wDEnidlwdsFpG//AH07fzFf0F4I4BfV8VipbNqP4H8O/SrzqpXzjBYFPSEHJ/N2R+gXwH0yL/hWOvXEvzFlAXNfuc604VoRi9Ln8aYyk69KvVf2FG33nyr8f9EimvrtWT+Jq4s1qSnFn6LwljasaUHc+Iv2gPA8VxHdKEOSpIxX5LncZ6rof09wjnFWEoO5yXg7UDrXhW1vJzmSJTDOWP8AEhx/LBr8AzOjLCZlUg9r3Xof1dk2Z18Zl0W5Xa0LEsCE9B7VlGcrHoOtV7jWtYzHnaKaqTF7WrbcpNbR+ZwBxWvPUsZqtWvucn8UrdBZqdo+telldaftLXPJzbEVVHc4W2to/MXC817rrTfU8mlWnzJtnd+FoUa2A29Bya8bEuo5Xue/Rq1GrxehrG3jJwFFcLqzXU19vV7lS5tkEhG38K2hVmzGVWt3IpbVBHnaPzrVOo+o1Uq23KciR9NnzfwkVTpubvIMRUnGGhv2i7wDntXO6dOGjNXSpqTVy/Ase3aWrnnZbEaJkkATfjcMH0qXsEVdk1yVRMhhShe5pKKvZFrRdpTDnAPvWddJbMulGFi0NqTHa3TvWW6HsyZAhwxYVN7HRCEZLzI7qQKSY8Y+tXBJoUqT6l7RzmLJYdOazqxSejKpQp/MnlaJWO5xn61KuW6TvoRW84MxXitGrRJjBKRpaafmyxwO1YVLWNYxgtS1JyRgjpUKw0k5akbsqrk8/hVKzdjV00tSC3dDKd3p3rapStFM57S9oeE/GotN4yMcWNuetfXZVKNPDLufL5jQtiLyO8+FFvHDpsZB5wOa8XMqlSrVZ7eAcY0bHpEDO8CknjHGa8V+47M9KXK4qw5FYyAn8eKTasQtZWZPqkZ+yKw5xUUpJSNp000itao5i+YYAHHHNXKK6mduV2EYfOc8+vFQkjSEOZXY+7+W33L1Fa04LmM5dhdJufMiKYGSO9OrSUdUaUow6kMkc0U8hEfHfiiFpKxlOnGN3EtWNxCM7h1GKyrQtsVRlHqVrwM1xvQdT6VVN2iN01J3JimIvfHpWcoxi7gm3oNRDnLDn6VNk2aWsieNegx3olCKBPUnjIR1Y9jWdkNtI07WdJphgiuhK1OxinzVS+pVSe+a5XGx1bD1bPVcn6VLVtykmxCpPRf0qWkPlsMKknG3mhxSVws2yK5t5yMqn40RcS1TbY23tLhjwvP0rbliQ4yTegraReNICqnGeeKpRp9xKMmWJ/Dd3JFvzjHtXPGUVLRmvsnIhg0r7P8Au2lJb6VvyQtdshxcdESf2PNJznAPcCsZNKWhoqMmiMaPcK+Bzirjytak+ybZLJpcnlc+npWX2tCnRlYrxaXODkc/hW9lZXOfk5W0NlQwxnzODg4GKapXehbilEzIGllvCFXODyfSt6lLk+I5owu7ot3lrOYxx+lZqMTaMefRkUVpcAhSuSe1S4wNXTstBy2twDtaL9KzlGCWhVOk5A1hcsSFTj1xWas9yZwlFiJZXgbHlkfhV8lNFxg2iV7C7EeRH+OKxaV7BZ8xWSKaOXEi85rpVOPKTKCjK50vhFR52PzrlqwtqghZz1H+MIx9oAUd6KTj1CvGPPqUNTjzoEoYfwH+VNqLejKcIOmfL3im1eTxzckAf6w4/OvtMFGP1NXPjMVCh9bdmdVoFg+E3Edq58RKMY+6ztpQvojtNHtxFGCrYwK8erNvRnq0lBKzNe3jyeSPyrnlY1ajfQtwoBgZFSrGtOMWy9bEkgggCqjBLUupBQRcyBEV3D6UWT0LjNONhFnjSIgtj04qZwSepFRKCKUMgaVhu6mnKEUtyKcFJXFmxnCGhRsyE+WepAkyorZOcjvVummbPlaKmmTg3xJ9ac6bS0OKMH7XQ05WDZcMP8KlQN9b6kM1wPL2hucdKHSinqVJRtuN02QF8k/pWcox0dyqVLuR6nMhmwGHvWkIwtuc87upZkN1IhtvvdO1aJxTOicUqdjKhaMyli9aNpvRnFRbvZIuwSxrHwwI9TUSipHY6aauQNIXkySOe9Hs42OeyTaCQlD7UlCI4pSe5Wc7nwT9Dir5Ioc4JK5Hc/d4bOPatoxMYz5SmJcngj86VS3LsW/fWp9cfsMaOLH4VtqG3DX+qTPz3ChVH8jX9Q+D2GlR4RU39ucn+n6H+cX0jcbHE+IlaCf8OMI/hf8AU+3vgzbywfDDU8ZAkzxjrxX6dVilXppn89uUZYDF2fRHzR8cbDdqFy23qTXPmMFys+h4Xq2oxPkf45aP5bSSFMqwPavz3N8LCcWf0NwriINJXPAPBtwNK8Sat4WlOFkYXVsPXsw/lX4fxhlsKU4YiPoz+suBsV7bDOnfdfkbU5Cnrz6V8dTacrXPuYxUpWIzcRxoWaQAe5rVPWxdflhHUoyXFuZt0cynJ6ZroV+XVGFCopSstTlvilIn9nBmIwK68upSdbQ8vOIKyscJZedPKoRRjI5r6OUKdJXkzy6dJaXPQPDSxRWi+c2OPpXzuKqy9o7bHuYdQpI1AYiTtkH4Vwtu5cVGUtyjcn98SzD2rqp3gjphTjErX2q6dYxZurhV7YJrohCrU+BHBiKtOFWzZDbXFrdfvLd1ZT6Cs66qxXK0ac9OS5pHXWfhV1jUGftXFVxMXK56EsHeTdy7H4WIPFz+RrJ11bYyWEcnuTxeEgW3G4P4GlKukrpFrB2ejJn8JBhtE5NZQxL7G6waa1ZNaeFni4ExA9Qa1nVhKJmsHJS0ZbTwuuc+cScda5/aK50fVNNyxbeGCx+ab9aznVS2COHaejFuPCYZsJN7GiFfubexbjqT2nhlowFWUj1IrSpWp8uxgsLJO9y3/wAInG/Lzc4rjVexuqEu5EvhJVfKyc9q6IV7rUU8N5k8Hh5oiCHPvzTnUhJWsZxw0l1LI0iToZDXPJxR0Rw73uMbQpSCBKfxqfapHR7LQaugSwHeRnIrZ11OFjOVFx2PO/GXwM1DxPrx1OO62rnoK9LCZrHD0uVo+dxmVVsRW5uY6fwd8OZvDtqI5p87R3rOvjadWVzqwuDnSjys6OC1ZIhHnIHeuWo4Se56EKMrWJ4Ik3KSnQdqyaS6m8aSW5PIiXEflOvSsrcrvc0UF1GxWUKHBWtHUTREqcbjnsbUSfMmD1GazjNpXNlGKWhDe3WlWMX+kkYx0NaU5TqStE463JF6odok2magCbaAAHoRVV5zjo2bYdUpLQ000qAgl4wfauSFWVzpdKFtiS30jTUOPJyamrVm+pKoQ6IedK08Hd9nFKlUa6mrpwUbWJYtN04rt8pevcVdSUpLcinShzbEqaDpzHcUXB74rFTcTaVOmyaPQNLA6D6EVLnKRmqdIevh7SmGCBU88l1L9jSkTQ6BpcB3IRVRrS2uJUKUXoWItMsM8sPzpSqNLc0jSp3LCadpqDO7rWPPJlctNCSWtgOg5xVJu25FoX2GpBaKf4cGpnK/U0Sh2FkgtCMFFIrNSsyrwS0INlohyBj0rp9ppqZPlfQXzIs/KfxHFR7VRZPKr6Djl1y05xVKvT/lK5JLZkUltETuZ8n1qpV01YFFX1Ii0UXCseOxNLmiDlYT7ZCvXnNHMQpa3Ip9RiAw2OParhYtziyu+rWqNuxjjFdCszgqTakQXF9b3g2bRk+1aJcqumEZqWjKwFtYP5yJ1PNaRftNGy3aEbpEU/iSxUlWTk8dK0jh1JnH9YfNsSW2t2JILYHHWoqUY9DqWJioki63ZFuxx6Cs1h09yaeLvJjm8QabGucDntQ8PFuyKq4rsiNPE2nvkrEPxq3hVsFPF2Wor+JLIjAUAVLwKT3JlX5myOe8guo98Sjgfw9qToSS0IeIWxq+DZiZNpGea4qseWVjow8efVljxkzLcAkY5FZw1VhYlOMyhqbr/wAI/Kcj7nrVUqTcrMqzdJ2Pl/xQks3j2ZYyV+evucPCNPAbnx1bCSnjXqdp4a0m5MYLuRwO9eLiK9NaW1PXo4VwW50tnDcQrhZMj1rklKDjqdUKMpbM0YvtAHJNZRlTN/q8l1JohdNyHPtUyqQj0NorlHT3l9aJvDZqY1oSdrCrKTjuV7XxHfXjGNWxjjNdU/ZUkclOck7DrjUr+Pjfk4qYToSepVV1KmxENRv4FEzn6VTlRY4TnCNipP4zeF2Lg/L1rspYeE1c82riKvO9B1t4rS4bdnr09qdSgoHXSxEWtWKviCGCffn8azcOaFrGsasU7jLjx1ZwPiacAHtmrp4SVTaJyYjFKEhbbxSt+NlrJke1Z4ikqK1Rvh60aivcu22oXSDAb6nFcDdJnV7aUXoNmurmSQMzZI6VUVGxm4SlLmEuHupYSA5GRWbqQjKzNW5WsZyW13HMW80nJ69q19tTa0RjKm4LmRK8t2nrx2xRGUGtWTGU2V2uL5TuOQPcVsnTtuVaS6EbarO3yljke1HKkYKfJO5Vu9VvIR5pQkemaqHI5WbLq1HyMypPFV3cu0cUbZB5yetetDCxpw5mzyY4i83Ysx39+bbzMAcZ61xVYU+bc1+tVJrY+/v2UNEOjfBTw5byLh5LETPx3di39a/sTgXCQwPCmFpJfZT+/U/y18VcxlmfHWY1273qSS9FofYXwwtmi+F07KSFcN+Py19LiV/tUEfmNG/9k4qTe7t+B85/GiwElzO2MfMcVnj43iexw3XfJFHy/wDGPQEuLWVSnQHjFfF5hC6aP3PhnF8lSJ8mfEO3/wCEc8XW2ux4QRSFXP8Astwa/LeIculisLUp/P7j+neDs3lh5wce5Fe6vcOzKs2OevtX5bRo04S94/ao1KjlzIz9Qju7m2JF4RjqPWuqjOjTqJuNzLEOdeHKVdG03UIJxcS3e5Sfu1WMr0qmkYmOHpTodRPGOkXmswC3jBAx3pYOosOm2bVqbxEe5g2XgTVrdgxJ46HFdTzCnUdmeQsJXU2bVvpup2wVc5x2xUTlQcb9TupUK83YuRLfBcFSCe9cLdK53woThqQXlnf3ERCEjjitHUpRtcqftEjlta8LazdTbnZnXPAr0sNj8PTjY8ivg61eTdjQ0DTb7TVzcK2MdKmtXoVfebOaFKvHc9atcBA27qPWvlHB31Pra0p+0aLcKDjDjrUVJ6WQoSadi1Eg/v8A6VHtNLGimTRoBwT2qOaxfO2Txrzgt+lVz3Qc7RKoUdHFQ5K4c82ToRxuNQ5IuMpImQLjk0uYvnZLGwU5Bx+VRJ3BTZOrgjJP5GkrFqUujHBwBwfzrRNInm11HKynvQ5pGkXF7DhIucZH1qG29Sm5WGPNtPWr5boqnK+4Rzh85b6VPK4suUmWISMfeHtSaSZg7ykN3HJBlH0ptpI0jRas7jti7QWcH2rL2rYno9BokgXgYP41Sk2TzNjhND1bHPTk0pK61HzMeskb8KePrWfNYHqOEKu4Zjx9KUqlkXDUzfEnh+LUovLB5xwQK1oYiVN3KqUFVjYf4P8AD95pShJGyvrV16kaupy0aFSlN9jqIY1Y/vH49K5Jy5dEd8WupL5NmvLyL+JrLnm0bxSkCrZOdqyrx70RUmwqRUUEiWiHHnD6VquexNNRb3HRvbE7TcD86xqcyKmoomSK3b/lv+tY88kQoxfUebeMni4/Wj2ja1G4We49LRCRib9aFJIpU49ST7GoH+t49zQ53D2aJY7VM5Mw/Opchqmhz20WMCcfnQ6jtZh7OPcjMKA/64fnScx8iAQxscecKlzGqavuI9rGOso/Omqg3CKIXhiBx5oq+e6I5EKI0UYDfjmk5lcqGvGp5z+tHOZuFxnkRnqw/E0nNjVJMhkhgzgsB+NUpsUqaQz7LaP8pkX8TR7SSEoRZDdabYEYLrn2NVGtU7l+wiykbOJJQY5x+ddCqVOpyyw+ug99OjmXbJKv51lPFST0NY0boqSeHLKST5pV/Oqjiq0tmafVKbjqTL4dsNv+tXH1pe2rLqc31WLeoLoWnqcGZfzo+sVktxvB046jbjQdMCZ80ZqFi619zSFCmyFdL0iIbHkHPvW8a1fmvczqYamnoVNafRrGJWEn611UnWrysc1WMYosaTc28to0kWMY4ya3k3B2uKnRTV7G34P+a8O0cZrmr1EzuoRjFk3jbcs4BPGe9Z0mc+LSc0Z18N3h+UNz8v8ASuiL982ikqWp86a3bxjx5KZCCS3H519NRU3gtWfI1sQnjmoo7nw+HMYRHxwOoryKyhHdanrUY1Jam7bqIiBI4+mK4KtVNWOxTUdiyvK53j24qITWzKTqMhvdXj06PzJcYrphS9s7IitNU43ZDp/ie01omKOQHBxWVfCTwr1McNX+suxoWtjbROCzJk89Kz9pKUdTrqUILUsta2knG9eenFczquLsghGCGy2ds67PNT6YputKLD2cXIy7vwvZyyljKnI7100sdVSsCwcZXYtt4Vs41wJkqni6j3ucksHaTHP4WsZThpxz1rKWNqR0RrDDR5TN1T4f6fctn7Qv410Uc2xFPYp5dSq7ljSfC9ppagLcJx0FTUxdWu7yMFg40Z6M0VW2U5LrjFYTqPY6FCC1ZFLdWcLckEe1aU7yKdSK2K/9s2TsYt2PrRUpVIasyTu9R2bdyCJhU020tTeShKGhKkMDn5nGampUeyMI04jZLS2YY81eahTkU1Eoz6ZbiTargfjXXCo0tTGdKMxz6LBJDsaZcYrJ4i0roPYJxsZn/CK2cM5cTrya7Y46pOKXYxngKaV0OutJtkjEUEoJdgoA9ScVdKpLE1owitZNL72cGNorBYGpXk9Ixb+5Nn6M/CzTBpXhTS9JRSFtrGKMLj0QCv7qyqisNgqNH+WKX3I/yJ4jxDxOZ16z3lKT+9tn018PraWL4fN3Romxkd8V24lp4mK6nxmH9pLAYmT2ueD/ABdsvMeZv9o0Y2LcT1uH6nLynzf8T9LEsMyleoOK+UxlJSufsuR1+WUT5I+OPhkzi4XbzyVr4bMocsmf0TwnilFxbZyfhGzj1rQorqQgyxExTexHH8sV+F59CrgsznBbPVH9KZNi6eOwCfVaMnvvDzPEyQybSRwc159HFSi7S1PR+rqexR03w/qEU2J7jcF6c131cThnC6WpnDB1FN3ZuQWSYG8Z2jnivPlXvsdcYRoaWHSwwL8rLxWcJSTvchQjUldGdcXFnDdiJkH0A611yjWnS5k9DOpUjTmoomNtA3KqMYzjFcfNJHbS1jdhHawLkNj6YpOc5aJinCDI5tPimY4Ax24q4qoiXyQgVJNORA3HQdK1qPkhZHOsPGUtStD8W7BQNoTn1NdkcsnVepxwzWGImy9B8V9NCB5FQY965p5ZK9kOeY0obFiP4v6P/EU/Os3llQxWbUyaL4uaMy53pzSeWVGbRzSla5Ivxf0c8F19uaHllQl5tTeg9fjDpP8AeWoeWVB/2tBEsXxn0sfxLSeV1RrNYMmX4z6X1ytR/ZdUr+1KZIvxl07HG2h5ZV7lLNIWFT4zaavULS/s2oNZpAd/wurTC2zcuT2NH9m1TSGPjN36E9v8WrOc4QqfTApPL6iOn+0aUVZEo+J8G7n8iKby+pylfX1KFxk/xQh2HOPbitKOBlcini7vUqt8WIIuuPyraeX3Z1xxcEhg+N+mISskwBPqapZZKTtY8/EZnCFayIn+NOklsi8GD1BarllnLpJHXRx3MrtliP4xae6DF0v/AH1Xn1MtfNZI56uYxU7Eq/FfTiNz3S/99VP9nzQ1mUIokX4taSOTdL+dJ4Cpcn+0qbJI/i1pYwwuV/76qHgJlrMItEg+MGnIc/a1/wC+ql5dMHmUYu5HL8X9Pc5+2D/vqtaeXyTKjmysCfGmyhPyz5/Gtp4OytYzeapsk/4XbHIfllGPrXK8A2y4Y/mY4/ErVL+Fri2yVHcVVPAJO0jaOZSTsisnxRukJ82Vht64Jrs/s+nBGk8wTjqLL8arUKEa55z/AHqUctb1ZyUsx5p2Q+D4x2rjcLnj/erCtlyex3zx8Iw1ZYj+NVoP+Xsf99Vyf2XJvRHFDM1zEg+N1ooyLz/x6tFlNTsaVc1gpWCL49Whk8tLvPr81XLJuSN5GlHMeZ3Lf/C7YP8An7/DdXO8rcn7qLqZktkOX42W68tef+PULKZvoRHMPMa/xwgH/L2P++qiWWOL2IeZq+40fG23Jz9rH/fQpf2a+w/7SQo+NsI63Y/76o/suTD+0ra3Eb44Rnpdj8TR/Zj7E/2onuxv/C7EY8Xa+/zU/wCzGCzJdxR8bGbhbkE+zUPK2DzPzA/GaUrkz/8Aj1X/AGVIP7RklcjHxiJPM/8A49R/ZbbCOZ6kNx8YIwebsD/gVP8Asxp6oKmZruRD4txvyLwYH+1Tjlt3sFPMU5bjZPi5CFy12P8AvqrlliXQdXM1GVrjI/izBIcC54PvQsva6EQzHme4+X4swRD5bsfi1KeWN62NJZiodSu/xbhd932vHPrVQy9R2RLzh8th6fFlMcXo/FqcsvUyP7SW4rfFmNFybwf99VEsqjYHmXdiJ8XoWOPtX61m8simOGaa6Edx8ULdut9jP+1XdSy9ON7HbDGqpHUoah48t9RQK2pA47ZrSnhnSnexwVputOxu+HvHMRshB5gOAOc1yYjDzcmdscTTpU+W56h8LrsakDMORnINebVoSi3c1w9d1GXPHgK3QyO/NEXFLQMQpc9zMvyV0GR06bK1i1zXZsoTq0T5r8Uaxa2XjmaW5lAAb1r63DOUsHaJ8riPZYXFNvc2rD4laTCAkVwhz15rknldaory2O6jjY1UaUXxN07GTcL+dedUy1xlZBUx1OE7JkyfFPTApAuUP40QyyftLI6aWLS1ZT1Lx/p2pIYjcA59DXcsDOj7yHWxNOcbFPS/FOn6TMXjmHJ9ac6E8T8RhQrwormNX/hY9twTcjjoc1xVMByuyM55ipPckh+JNtzm6Hv81Zf2c29i6WOjJ7g/xLtf+fofnTeXOL1QVMdGEtxv/CybRz/x9Dj3p08A1LY0pZom7XGn4j2yk/6WOP8AarepgJKOxWIxsIxvcVPiXblsC7B/4FXK8v7o5o5ir7iy/Ea1bBa7H/fVOOB5XdI6HmkYIj/4WJadftgP/Aq1eFdrWOeWZRmxr/EO16faV/76qHgX2JePiRt45tZutwPb5quODkmXSx0WyGbxfZg7luQD6g1rLDTvYdbFwtoFv48hU4N0PzrKWCdtDCGNu7XJW+IMC8C5X/vqp+o69y3jYrqIfiHB3uR/31VfUH2E8dHuNHxAtXODdL+BoeBdiFjot6MlPju02Y+1qM/7VCwVnsbfXow6ld/HlmrYW6BP+9XSsI1HY1ji3W3Nz4Z6hF4r+JPh7w+swdrzWLePZnORvBP6V6fDOVVMbxHhaaWjnH8z47xJzqGW8D4+qnqqUvxVj9OvC8QQIingYAxX9q0ocskj/JrHzcm2z6I8GQBPAoBx80TfxEdvSqxD/wBrifPUFfLaz831PFPilbLJ5yg9GPatsTG8DuyWdnE+eviLpxdX445r5jFRV2frmT1rNWPmT416BhpJVTjntXxWbwitT9x4XxMpJI8G07WU8I+ILzTbhsQXQEkeTwHHX9P5V+ScV4RYnkqQWq0P6d4Nx1KnQcJvdfkaE3j7SRgSXKj2zXykMsrPofXLHQc3y6kR+IWkRkhZ1P5VNTLqiVjup4iDV2LF8Q9OZ/8AXr+dEMrqbEYjFU1TbEu/Hlgy7hKvHcV0wyySdjzoY+F7Gc/jXT7iUSblJHTmtKmEdOPKjf21NvmY/wD4T2JTsYrisll6lFs1ljFy6CP4/iYfLjgVH9nKLOOGMftNWQN8QIkyGYZI7U54F8tjpr4pSV0NTx5By8jDPYetZzwFSqvdRzVcypw0uVYPg/clV3XBBxXbHMVTm1Y4o5U8PUety5/wqOVItxuCePWn9eg+gPLvaMavwmlYZ8+sXjlcz/sppksfwolxhZx+dWsZBGiyuRIPhHOeftA/E1lUx8U9A/sqVyZfhFMMYuB9c0ljoNFrKpEyfCOccC4H0zR9ep3L/sqRKPhJKBua5H51lPHxTBZVIsR/CGYpvFwMfWiGPg9zVZY7E0HwakkGTdis6mZRi9EEcslfctQfBFXGftq5HvXM80lfY6P7NaVkdF4R+FNjYO3nurketTWxrqRReGy/37M25Phzo8khxGoP0rFY2SjY9iGCpRjYY3wy0lsKUU/hUQx0kafU6S6EN18LNDMDlo14B6U62PqK1mL6vSTPAfixpCaJr7Wtq+F3dq+lyms61HmZ8tmtKHtdEM8O+Cb3WYBNECeOuTWtaqlLVnHThVlojdt/hZq7cAt7cmuaWJpRR0wwNabuWF+E2sk4Dv8AmaFjKNjR5bVkia3+D+qSSYaZhzyCTXLPMKavYiGX1L2aNGH4L6kwCrcN+ZrGOPhe7O2GXTlsSL8D9WLcTn863/tCg0W8sk0WIPgTqUjY881yvMqakQssqIlT4A6mz4Nw2D71U80pcmiG8sm0WrP4DX6XAR52wD61zf2jFk/UKsXZHWzeDofCujCC4TJZcg1j9YdasmjseHeGp3kU7HwFBfaLPdvHhipIOK1rYlxq2Zlh0qtNuxxVn8HbnVbl5hIdu4966pYxpWRbwPuc0VqX1+BFx9xZTk+5qFjOXVmccDUqSsxw+At4OBKffmkszhzHX/ZUbDZPgDqrgmO4IxWzzWnFXsZ1cr0ukSaN8AtVubryi546nNclXMeaN0c0MJNS5TZb9nHVTjErfTmojmajE7qOWTnLUcv7N2rEEl3/AFrSnmkWjq/smwz/AIZy1MEhmb6ZNRVzKPQ5p5S29Bsv7OuoJzvb9aiGZx6lRyh21K8n7PmqJ92Vv1roWY02hyyrQik+BOqIMbz+ZrmqZiovQ4Xl0lKyEX4Gap/AM8+9OnmMZPU0/s6aWwi/BjXEfy1Sqnj6aZvHK5WFf4M+JFPEZxXSsfQcSnl0trDT8GPEL8FSKyjmNGMiY5TNasjb4F61K2ZC35GrnmVLl0B5U5O1gb4CascBWYfnWVHM4Ju4LJ5JkU/wG1iD/WSN+tb1cypuN0c2IyqUZXJYvgPqjw5jlbPWuenmUPaWZrTyqVrkY+BGuF9rSN+ddU8zo2LllMp7Cy/ALWVGTK2KlZjRcdDN5PPlGD4F6mp2mds+nNcn9qxUrE08pm9yNvgfq+/Hmt14Ga7FmVFwuazyp2sPPwP1ZV3LIc98VySzKClYVHKHcWH4Has7Zd2NU82gqbsbzy2UdEB+BuqxuW3n9axWbprUUsBOMLo1vDnwr1SC4AnkOwHmnVzCm4X6nFHCVnPU9w+GeippNusCcAAA14dbESqT0PfweG9mg+IBAuwpHGfzopXuPGJRaRQuIRJoEgH9w1MpSWprSlakfIHxk0u4/wCE3uPKlPLcAfWvusjqx+qK6PiM1g6+L5SnoHg/ULsAhmOevNd1fEX0Rzxpzh7qN6H4a6rPjaX6eprlniacI6lrCVKkrlqz+EOsSNy78+5rl/tCEZXsezSwUpRL0Xwa1gHKyN+dbSzGlKOo44GXMPb4Oa8xwJGNZ08worQK2AqWshR8HdazteR/zqa2OptXRzPKqjkPPwZ1hgBHM/PXk1lQzGnfU6KeVTiRv8GdcQ7TO351vVx1JxuYYjLKnNdDm+DWsFPluG59656WYQ9psXTyqe5A/wAF9eX/AJbsfXmu6eYUXEqtlk5xtcIPg3rTMVE7ZHUZNcDzCHY4f7MqxGyfCPXPM8syP+ZrqljaKp3N3llScRW+D2uAZEr5+tRSx1BuzCOU1ENPwk1xOS7/AJmtpYuhYmWV1G9Bf+FU60FyHf8AM1lDHUeazLWV1Yif8Ku1lhy7/nWs8ZQKeXVHoMf4W6wOVL/nRHF0GjCWWVb6DG+F+s55Z/zNX9bw/kCyyqRSfDDWMcO/51LxdFomWW1H1GD4YayPmEj/AJ0oYui5WMv7Nq30I5fhrrgGA75+pro+sYffQqWW1N7jI/hjrynczuc+prGpjaLdkS6FWMeVHq/7EXww1af9qPwzPfszw2LT3bg9AUjbH6kV9v4c1KWI4qowir8t5fcj8W8d69TLfD3Ecz1qOMF83r+CP038NZEiA+ozxX9QQ5nM/wA68ak4s+h/DKSDwbGGUoDB1KdaKyviEz5yjKUcBUi9L3PG/iPHvkmP+0cGunEK8DpyiVlE8K8d2RbfuODz0r5nFx95n6nlNWzR8+/GPRo5YHO3nnPFfG5pT54s/ZOGMU4VEfK/xV8LecZGVtrIcoRX5tmUHKLjY/oTh/MFDlb2PPl8Maq0p3F2APFfKfW4yVkz9SeCkoc8epZTwlqUowIWyelSpxerZzQp15SsmypqfhzV9HXz50YD61o8RSlK0WddfCVPZ6szjcSzuFEjD15rSM+U4IRhT1bO2+Gnguz1/D3MuDu7niuLESnfU6aFqy0Opu/A2jWk/ksgJBxmuCeIlsmehToJblKXwto8WSEH0zW1KcpPVmlXCwdmitdeFdINuZ/LXPoT0q5zmp6EypKNKxz11oUGSqKMfWtIYh00eNPCqcj1aWEGNdpIryaSi56n0mN5vatItwWRNvhieR1rCpNc1kaUYXRLFpmUOemKwnNJmjppFi00v5sGs5Vi1CLRKdKy4HT8Kl1bgoRuTx6QAPmH4VPtbGns0tSSHStpyFHPtUOqi4xW5LJpvTcMc+lJTu9SVFKRdttOH2bGB07Cl7TlZq4xSuT21iCmB/KspTdyIxTLlpYbcjH6UKcWbQo3RYFksPzAd+SKHO6sgUPZyLENup4x6c1lzNHRGVx0luG4ZeQO1EZe8bNaFe+URwOT/drWpHntYwad9D5p+N0Bl8TFkx96vr8lpyjQ1Pk80nGNbU6n4W2bx6Uuecis8fJe0sbYTllC6PQbGyHloxXqPSvAq1L6HtUrKyNa3sEEZLDPFc3tJLQ3nFRVyGxtl+2lSc5PTFU6bavc56UeeRs21pGkw+QY78VE9EdtOPLI04LONlPyAenFYc7RpJWdwtrUCc7RxUPuQpRehZFuFk2gde9DbaFdOVhFtD5wkznB4q4pA6VpmL4zszfkRYGABxXRhfclc58dT54WHTmDRfCzq525j4461o2qtax56p+xoPoY3gJxeQuwXhiTzV4h+zWp3YBynT1OkSAJNnaPauCVS+x2wUYy1LTQbeq9elYXludVtCWO3/ck5xxQp8zszFtK9yz4WtVN3uA5D8mumy5DippOsdStkuThc/hXFOT2PYilEnSxymOOlTBu5p0KlzZ7JOneqm1YxcrSsVrq1JTOMYrNM0eqK5twU5WtoOyJaujOvLYAkH0p2uzit74WVsGX5k7UW5Tq5E1oOj09ftBfYMZ61m5NoItXsWprBNowg6dxTjN2NHErfYVB4GPpUNu5HOrjktAWzt/CqbfLY0S1uSLbIGHFRDVlNWINbtFMOSMcd67IK+h5+Jb6kGmwAwnI7VnJcrN6NlAlS1AlyOv0rOUrlRmnIsPaqyYb05ojJpGs1pcotaJ5nAH5Vm02zOla9hJbRGIGzHPpWik0rXHNWkRtaKAQAOawb1KTUVcWO12jn8TU6sPdmx72qMhOPxFVFWd0KpG0SpDbKsjEKPxrodmtTlpwi2dX4UiJcL7VlJRTOymrGZ8RE23gx61rRaR5mNbdQqEH/hH5f9w9fpSnqx03+6PlD4sxtN49kCjjca+2yam1gj5HG1IQxl2bPg3TnRVI79qvEzib0nGo7nf6NYZiHGa8atNJ2O+mkdLoulqSGK8n1FcE5I9LDs11tYoziSMe3y1hKc7WuaSkoyLljYRTcmMYx6VjzSizog1NCSaZEZSPKX8q0lUdiG0pE1tpMO7mJRjsRWSk0zoWupDf6dEH/wBSv/fNdLcpQOaqJDpsBXIgU/UVz3aZpTalHQlbTLcpgwr0/u1rGbtuElYg07SYBcljEvX+7UO9jGCUpahd6TbC4O2FfyFaOb5bFNqEgk0yEAful57YFZxbT0LTUxl3pUCxgiFc/StfaSa3Mp2gyOPS7fZkxL+VZ3d7mqScSIaXAW5hUD6VUqjfUyVrjZdJtgM+UuO/FOM5dGXZEEulW4GPLX8qpVJdyJJFdtPtySphXgd1q1KTW5hNJakDafb7uEXHcYqVKSe44KMtSOfS4Uw4jGP92t+eTjuRW90rSW0ajoOv92lST1dzl5E5HqX7FOlJP8Zr3UhF/wAeejMA2Ohd1H8ga/ZvBfCwq8QV67XwU7fNv/gH8ufSlxrp8N4PCp/HUb/8BX/BPtLwvFmZAfUYr+m6TXMj+Asc9Gz6H0NHbweqySbituAoPGBSnriLpHztNyngJuTvbZeR498QIw0swzkZOc111fhsdOVy0jY8V8b2gJcjv0yK+excdT9Jyupojw/4nafHJHLuXse1fK5hC8WfqWR15RlGzPmT4p6YFuJVVOue1fnuNwkuds/e8gxadJXZzHhaL7bogYRqXgkMTkr0x0/TFflOb4Z4HM5Rvo9Uf0FkePhjMriusdGaNtaBpQrqMA/3a4ZVLLRnq0qcd7GX8TLS3OkZCgZXpVYTm9vcyxn8PU8sttLd5SQeCfSvp6fK1dniSwsKlNu56D8N9PlsogVlIPXGa4cdWjJWNcBhpUzoLi0nuZCXkOSeua8SpKy0PTlCXQrXWnMCFHUVpGs1Y6acW0QS6VJLAVOcEVusRdainTbVjJutEaFG5p+0jKokcFSiqcj0W2tTKFFeepcsmezWpc9Vl7aqbYwgyPUVk31OeMnCROkWEz2rGqzafM1ctWUfPK+nNc8iYbkoi/ffMPpU30Lt7xOEbbhl461LlqarYlii5Ax3oS5i0rK464g55HGacU0yGW4IyLXB9Kyne5bTcSazhJTntUSbuVTWhbtYyXwB6VUFodsI2iTXSEHHr6VRhUXvD7VCV5HYVE7WOinFOI8qfMxjGRWcfiNraFTVVC2r+wrp6Iwe7R80/GVwfEmCON9faZN/u58NnbaxFjtfhfGG0uMY7V5eYNqqztwEf3aPQ7CD92gK4x0rwZXctT36EE9zUtoT5e1hgEdTTejOirFKBWhtWS8HycHrW104WOej7s9DYskWSXjqK46rtojpablc1IUDR5AxWFmzRqTQWsRWdjircVymFveJljZpSB6d6zbsaQScx7RFQWxj2FXDc65WSuZN1ZNd3Dbuv0rV1OVHJUXMzH8VaJfahbLZiUhcYwK0oVUpXOerRlVjylvwb4fGkWnkuO3TFRXcqsrs0w9KVHQ0po9knPGPWslE1TtO5YYboQSKmavodq1iSIMwt8o6Vza3OepdJl3wgu66wwH3q6EpclzloL96deIeeRiuaex6kiykAKgFQPSpje5rDUoahEQ5GPrmqZjONpFV0JiyRzioBNlQodprWGxstjMvYzvbjqKq9mcMviF09CqgEelEm7HZTs4lgQlLkntXO2zF6SLU0ZaMEgcdaqDudMNUVJYtrHjtVnPU0mJbrk8jtik1c6I6of5ahge49BUU7ph1INdXdF97jvXXA4sYivpyARbcdqyk2VS0pEgGLjBHfis+hK0mWZF3RcjHHWneyO56w0M8g+ZtwPxrNNnND4wljZeMY+tDkbVfIbsJGMdRWWtzJJsQKwHPpVJF0/iHsn7pgfTmtkVW2K1uuZmBHANa6NHNSXvM6bwsCsgUjnHWsLO52KOhlfElh9sX6itaWjPJxy98qbQfD8v+4f5VM22wh/BZ8qfE9B/wnkn+8f5193lF1gT4jHJ/XDpfBsBaNOPpXHinJyuehhl7p3+kW5EeMYFeNWbvqepBHUaDBgKNoPNcc2dlLY2G0sSDIH41ldm0oc2pZsLHy02FRj6VL3NoLlQS2xE2SPxos7Gbs5XJII1HJxzQlLqdMG3oRaha7hyO3StuZNWIqr3SO0iG3bj2rF3UjOk7MsC3JiOB0HBrWLujWpqivYxf6QcevNKzsc1O/tBbi3xcMxHSh7GlZWYySEswAH1pR3CjuF9DiEDHb0ptkV/iIYocp0qQhflI/s+HzjtzQZPcWa3+XcV59aqJo20VZbbPLL+NNPUzV2yq0J3HK4Hat47DqRsis0Z3kgYwemKdtSKbfNYbdxHyxheMVd7JmldNRM94TnHSqjNxWhyQ+I9x/YX0fGpeItcdPvNbwK303Mf5iv6C8EcJL6vi8S+sox+5X/U/ib6VOYc+a4HBp/DCUv8AwJ2/Q+sPCiD7VGT/AHhX79SV5H8X49/u2fRGnrCvg9JIZA3+iqGIA4PpUa/WOXzPGUYLLHKD6anjfj1N0shB53HtXfU1joGVu0UePeNbZiXyO57V4eKifouWTVkeN/EKzaSKUFcj1xXy+Nje5+kZNU5ZRPm74s6U6ysxQEc44r47MaVkz9q4fxCaSR5j4Kkaz8U3uhSnCXcPmxAnjevX9D+lfknFuGk4xrr7Ls/mfvXB2M990X9pfidHHAVkwR3718epNn6NSRz/AMUYyNMAzjivayxpz1MMbZ0Tg9LiTeBXsVNDwIStdHoPhO3Cwqy/pXiYiq+Zo9bBq7NgRorFj0rz6kpnfPkTsQXS+Y544HfFEbJGVOfLOyFjtwbcEp+YojP3rHXNGbqNp5kb4H4CuiM7VEctWCcrs7DT4sRg+1c0nqehL+Ix93G3mhQ2OayjqcT0kaFtGTbAHj3rKpds63ZwRZs4sHkZHasZGcYpMsRwkyEkc1F9C+XUkWE7jxxU7s05SSOMbh9eK1Xuo6OX3B1wpzyPShNHO1rYuW8f+jZ29RWFR6nRyrlJrRP3XI/OsZbkRLFgnz5I71onyxO1bBesVcqRx9KSd9TnavMs2SkR/MOe1TJtnQmox0Gyj95j9KI/EaPYraqA1pJ/unit3eyMHq2fMnxnDf8ACVY/26+0yXTDHw2cx/2g7z4Wqf7Jjbj7ory8xv7Zo9LL43pqx6LYRsY1LH3rxpLlZ9FSjFJGjNKbW2zg8jrisdJTsiqy9x6lC2kuruXgFeeDW8rUk4y3OSknubmj20kMeZDk+prkqe/LQ64SvubNqh8pge561m5cpvzJIIQFlYGocm0cz+JksKGSfkcZ6g0krm1BLmuWLyMLDtHBI9K1iXWujNtE3St259auUbq5MWm7kOpWrGcMc8GiKSRFSXLInt4QsY54Heoc+hvSXMrkV+mx844BzTT6mNWPJK4+JzJFjHGOKUtjejJNWJohi3Yk965pfEKstGX/AAaM3fvu61vF+4cdBfvTsiv7wcfWuWpqeoy1Gg2YpRNoKxR1SFuT+RFORlWWqKCjdCcjp3rPqQiqEBJGK0baRvsjLu1YSsMd6Iyu9TlcLu5JZRgN8xxmrfY0jO2haljO/IHGBUOOg+XmdywV+QHHWpjozeCSKV2PLwGPb86blqYVY3lcZaksMBeKTlY1pqyJGBDgEY5pRG1qQa2v7jkdAOa3g9Tlrq7sVtOUhMkcVckhNWpgxxcYOOvSs1EiKvI0PLBgyR2rGejO9L3TPlUrJ9elEFpqYNJMV1DJz7Up6Ie5EEIO3AqUluaxS5RMH7pXn6UX1M425xw5jY4PA70+bU1qrQq2WXuGGO/et3JKKOSkrSudT4bTEorJvU6k1bQxPiSh+0qSckHmtqVtTx8YnzkMMYfw9J/1z/pWbumaUo3os+WPijAV8fSZP8Z/nX3eVzX1KyPkcxgvrdzpvBkeETA9K4sRpJs6MPax6FpEY2g4rxqrdz04JHTaLEVAJXvya5JnXTR0NrEWQj2rM7IomWMRDgjp3oB3ZHNC0h3oOnWq5+UpU1a7CGAxMN3pWTcm7F8ySHTRrJ8v48VaVlcStKOpVaF4ZOcYOMcUrqWphJWehaWL9ycnPHNOL1sauzgU7EA3ZX/arpS0MqaXMTXaATEleMc1jJq5piFsRxxiSTb78cUk7EUVqM1CPAI29BUJ3JrayI4IjtB21fQcFaBG0YLsMfnUmSSbFmjIXJXtWiRrUjaJXZPk3EChL3jGCTkU5IcA5HTpgVurIuqroolD5hB9RxV3Oek1zjrpD5IyOMVLkjorfCZ5j559e9KL0OGLPpL9irRxa+BLrUNuDdalIc47KFFf1b4OYV0uEfaW+Ocn92n6H+eH0ksd9a8QalP/AJ9whH8L/qfR3hS3H2yIE/xCv1yiveR/L+Pk/Zs+jtNSyl8KwosCBltMHYuN3Hf3rmqKUcS2n1M6UaE8rVoq6i726+p4r45T/SZdw7ng16cneJ5uXNcqPJvGNsWD89+K8fEpXPvMtnax5L44ssrKNvUHtXzeMhe5+hZVV2Pnz4saTujdivrmvk8wp3R+wcO4i0kjwPxA8mi+ILfWYRg20wJ916EflX59nWFWJoTpPqj9pyXGPDVYVI9Hc68sjv5iNlWGVPqDyK/JIrlbi+h+40aiqQU47NHNfEx92nDPp6V6uXztU0OXHu2HZw2lDLjPrXuVHeLPCpp2PR/CMObda+frRam2z3cDG7NeWDa3C/WuSb1OqtG0xi2oI3HnNRuOlBXuOFudhT2pKXLI62tDPu4AVJxWvP76ZyYle47HSWOfJB9qmXxHZLSpIffg7gR3706Nupyte9c0NPQtbAOayruz0NU1Yt24/vCuRvuOJYgX5v61D2LsSouH96RoSeX/ABgdetDk27Gy1iNkRmb5RwfWmmzO1nc0LdCbYAelZT0epXNdEttGQm2odxxRZsY9pJbrVWlY3c1siG5fdckdxTUHYasW7YnAJGOKcvdQS0QyVf3vGfeoT1NW7orako+ySY/u810LZEdz5o+NSL/wlO7/AG/619nk3+7nw+c/xzuvhYpbSUwP4R1ry8xX71no5Z8CPSNMXKJlegrxqklFHvqVkjRubZpbfB9OBXGn7xTXOhuk2ojdV2cbueOtbtXWocisbSQBQdq8duKyk0loNKxes1/dEGuaVylZsBDumJZc8dKpK61LlT0uSQxhZwh4FO6SsFJqMh+ozI0e0cYoTkzSu7rQoWGTcEMO/StJcyRFCKTLGrQAgOorFSbdiMQve0IoR+6yOlLVM0oP3Srqe7yySOR7U1J3HXjfYgsJpSgVyOnFVOTWhFKUYF+Mny2TvWfK73NKvvRujR8Ggi7wf79dCj+7OOlpVO18vLHmuSex63YsxFVUZHPp61EdzoS0Kt+m6Mj07CiSZnUimZYTaSDUnMtyFkw5AXjvVTeh0vSJmXKgSnNZxu2c8gto2lYqPrXQ5KMdRwhY0VgULlhyVrBtyZrJpbEkcYKYI+uab91ChJlPVLRVUMByD2pRZUtRlnEAMKOKfLcy5tQuFIYqD9TVQvsaxdyvqoJgyR/COK2ppp3OXEO0irZ5WLAPWrk7F6cgwlWnAb161HOrGMW+fQ1UB+zDjnHWueTu9D0UvcM6dSZCaqOiOa92IAxXB/E1nJXHKIIoGC3pxSadjSm9BrRNnn9KhExi+ck8jEBHtxxU3szZlOwjCXBPbNdkVzQRyzVlodN4c5lABqXZF0dTG+Jhxc5963oL3tDz8w0kkV7Xnw9Jj+4f5VlWlqVTf7lnyz8VXH/CfP8A7x/nX2WVP/Yz47MZXxdjpvBAzEmPascS9Tpw2yPRNGHy4AGcDFeLV3PThudXpMfy9M5xXJI76aN6zUqpwPpmpZ1pIeIN7YYdfWplK2iLaWyHiMKMe9ZxjKpLlirvsJu2h33w+/ZX+PXxSu7GHwX8MtTnXUifsV3LbmOF1BGX3tgbRkZIr6nB8G8TY2CnTw7UW1q9EEcLiq13GDsjQ+OH7J/xW+CuoJJ4g8HXMGn3l79l0qWSQPJduMLlUHzYZgdvHQivQzjgjOspofWJRvTbtdO+p2vLcVQpc0tbbtHmWuaReaPdy6dqdnJBc20zRTwTJteN1OGVgehBBBFfFTjKE3FqzR58rLchXHkfUUr2dxLYoaejfb2z/errg7xM4O1SxZv4yXbnqa5m9TWuhdPgJP8A9am07E0UkR6gm+Ug+tQiJWlIYkexPmX9K2lsXJWiQRp5kp3fhWaMI/EFyu0cjBArS9jevpAqzKdhAP19qIvU5ofEV3U7CdvWqk3c1nsZ6xu8x9DVp+6YRjyyuSXMY8rGO3OalO5pValAzZIwuSK0iklocG6PrT9ljSv7P+FelKVwZkeVsjrucn/Cv7R8OsK8FwdhKbW8eb73c/y98Zsw/tDxAzGqv+fjj/4Dp+h7l4Qh36hEB/fGOK+6pr3kfhuYStSZ9Cp5ceiIRbbQ1sM7DxkDqa4226u/UnnjHLVJRtePQ8Z8dwv9skJIzk8gV6k17p5uWS9xHlvi22B38da8nERPuMvnseWeNLQssnHUda8HFQbR93ldTVHhnxN0wyRyBh69q+Wx0bH6tkVflkrHzv4/0kJcSrs4Oe1fD4+DU7n7JlddypxH+C746l4fiDH95bEwyZ9un6Yr8jzuh9UzKfLs9UfuXDeL+sZaoveOny6Gd8SY/wDiVjvWeXSvV1PWxqXsTiNGX96oPrX0M9Inh09T03wen7hcj6V4OIvzM97AGvcqRwRzXC22zpr/ABBBEWizisnLlbFSaQjJtUgjvxxU36s7I2ZQnjyCSOh7CrlK5y4hWize09MRDPpV1L8x01NJsddOCo45HtVUk0rmKSaNHTCTa9O1Z10hW0LtooLYNcctjSO5bijy/K/hU3drHQ0h7AbxzUttCvYlkX5QSMZFQneRrH4RChMYyK1joiaj7F225g+7xWM3eQQi5Ilt9oHHNEYmzVtCSOXbnd09RWqaQpe5qQKVec89+tNy0Kppz1ZegXGB3A5Nc85XRrOPujZgWf8ArSi9RxINQj3Wkh/2a6L7Catc+ZvjaCviccfx/wBa+0yZ/wCznwuc3+sHc/CbLaVH9BXmZimqrPSyxfu0enWKhLZVK4OK8GpK7se02tjTs1EkRDL9Kwsr3NYSaRNp9uBNnHGetOVV2sPmbZpupYkYx0rBu5qotk0W6HK4x0oauGzJUQEh8U3JctkbT+G5HGHkmPrntUx+GzMqceeZLcW2SFkOPrWkUVUg1Ipoqx3O0DBz1rbRolS1si5cgSRAMB061yy+IucFYgiQKDkAccZon8JcVaNyG+VZNyMBzis4pha8ioIBCgyuOPzroUUtTKpFJ6E9kQ5K/wA6cnZFwi5o1PCw23xwMfPVRleNjnUbVjtkyG6fWuSpueolZIsquFAH51mtzdP3SCQBoyMdqp6ol6oy7hDFKeOvesznatIheMbScHpSk7s2voZVxHvmIFVB2MZJouaZpdxcTJBbQs7t91VBJP4Unebt1FdQV2eofAn9lr4p/H7xRpXh3wXobeXqdw0S6hP8sEQRlDszdMLuGfrX0uRcJZvnic6UbQW8mNU61WnKpH4URfG39m34mfs/eNr3wV498PTwy2czLHciE+VcIGIEiN0KnHBrfOuEM3yafvw5oPaS1Xf5M7pYOpCnGotYvqjz3VLJjHkj6ZFfJNOErM55qxStYmQYI5703K6MYx1GXaEOc/rV09maJWdynq7ZhCjriuqiuY566TZTswfLxU1JWY4Jcuo0Rf6SGYkc9KzXvIm/v6GurHyNnTjvWcklqdkW3EpMPn5HHrWSZztNO4MpUEnv2qm1Y3klyjeRgd/Wjczp3HFCVzxU6JnQl7w9RiNl9PWspK8hVNEUoIyZ2PfdXZDSJzXvG50nhtMSgiom9UaUlZGJ8To/34B5wa1otanlY+7mV7YAeG5c/wDPOsJ35iqd/YM+V/imP+K/fjPzH+dfdZSn9SPjsbF/Wm2dV4HBEKfhXNiXqzuwy0R6Joa5+Ujj1rx6q1PUprU6/SV+QZHUDmuKZ3Q0Ogso8g4HpmsZao6FbqPKqJdh4J6ipUerLv2Po39h34NeDdXk1T40/EHSrfUNO8PTwrDYXhxDI7N8zN6hVDMB3IA96/oDwd4UwmJpTzPERTeqjdXtZbn0GTYGFRurUTd9Fbv3Pp4/8FFba61P7JodraWul6ezLbRWduixxuoISJemMkZav22eW4BR5bt38+p9JTyjDUab523J73Z5F8Xf2+tO+I/xF8M6dqniOzbxhbTSz2WuXUBuTYXMvyiQRsdpkUH5SQdpIPWvNznDYCjlUsLRtFtaeVjz8fDA/Vng6N1B291abdDwX4//AA9+FvgA6pptz4p1LUPE3niTETpLHDubLPdSgsDNLywjU/KCM85FfydxHlmHweKqONRylffp5r1Pj3GHs23Fxs2rO3R2T0b0e6623Seh4+02ID0r5eMJPcwjK6KOlsXvmz/ertjHlgQo/vLl/VF27sDvXI2uYusx+mL+63kfjTcrBR1TK1wrPOc1KZk/iHyoViOPSqlK5rN+6VoY8NkjvQc8dHcbdKSpP05oT1LqvmKkiEr05qo7mcNyGdcREdPWnJalTM5AQ/A59cV0QWhLRJdEmLBHIFCirky0izKulba2D1FVFc0uXucFaapUXN9E39x9s/BrRjpPgrStOC/6qxiBHvtBNf3hktJYbKqFJL4YRX3JH+RfF+N+u55icQ/tzm/vkz1fwXATfxEDHzivZpu8j85zKX7po99haJtJjjIH+qAbI9jXHKMva3Xc9DDwpSy6MZLXlseP/EG1EV/IFPGTjjFetfmijwsC1FuK6M8v8UW+Sx6H0rzcRE+wwM7WPM/GNqy7yRjPt1rwsTE+3y2pqjxn4iad5gclfXpXzWMgnc/S8mrWaPnz4maT5czsydSe1fG5jR1P2LIsRzwszi/AU4svEt5o8jYW6i8yIHpvXr+hP5V+X8V4W9ONZfZdn8z9m4NxiVZ0n9pfiiT4jMDpmD2r5vAfxlY+6xz/AHBw2igfaBn1r6Ccm1Y8Wgrtnp/g07rdPTvXiYu6bPeweht3EIk4HT1ry1LU6J3lIWBCsRXHSqlFbjceWJEELhgtZt2NqL0KV7GV4FOLuzDEu6ZuWuFjBLdq2qS947K3xMYzBnAxyema0pv3dTmjF81zX0yM/ZjgVzVZ3ZvKOly1ZKd+AO/XFc71Qobl6IEEnHPrUtWN2mP8os2SetZy0ElckKFkAOMipWkjZaIWRCEAH4U9WzNq5ZiUiEAk+9VFO51RhamPUhE47Hir23Mk0ndlaS5aRmRW4Jwah33sTf2tQs2sAiGW56Go1kb35VYuwHcc4qJq2hb+Ajk5fG7OP0pQ+IcdGR3qj7NISOdhrp6IGtWfM3xzXHinP+3X2WS/7ufD5yv353XwhTOlJj0rzcyb9qz0cr1gkenW6nyE6fd614E/iPbkrI1dPGLfGPxrFgloWNLXMzFl70nFjp6yNCJS0+GxT5bHfBKxJeDYMjtii1zmraSJLVzJD8opTjrY6YWnALMH7V5bLxmptaOoQiozF1qZoRwuOetKDuTiE+hRso5bqdtxrSpOUFZGVOK5rsv3ERVQo7Vild3ZpJ3ZEUbGQKc9jWXwaFSVGaTB6Y61MWkjKEmQ3kEoXAOPrTU9SmuYn0WAKx388dxQ7thGXLoanh9f+JmQBxuFWtEcsZXr2O1GMgdOnNc82z1X8KLKjMZBH4Vk7otPQgVcjkdKE20KL1M/U4irkYqrEVFaRWXmIgmoadynojLlhZbv0BNaxi0jKTvsdn8NNO8Zafr1l4p8Lz3NlJYXkbxarDGcW0oOVJboDnsetehltCt7ZVafR7i9j7f3JLQ/S/4WeK/+Ed/Y9/s7w1o9nYeLtSa51C6udPgCLeoWHnsoHEZY4YqoA7gV/VHC6XJTqOK5OXVW+13Pq8swtOji4VJNOmkly+fR+ZyHw9+P+meMX0zSfilJbarY6fvt9Vt9QtUlEwIZUiYsN2xSQcgggnuK+srYOhicPVhyr3tl+Z3YuFOaqQjHl5trfn2u9jwv9rX9mr9nvRfDdz4p+GfxjsbjxEIVu7vw3a2Rjt41b76ROTyVPQelfjfGvh3TxWHr4/BYd0eTW117yXW3Q8itgauIpSqex9morrJO/n/X3HycsKhskc5r+dkpbHgxaILyMM/I61vBWiVN2VzMv4mlj5bgVtSk07M5qkk0VIsQLhzjPetZxTM0pNCS6pplpIDJIM+hNP2b6ImFenCfLI1La6iv7TzISMY6iuWqpKVj0IzhylQqfN254rKxLs4j3XII+maGrBe6K5RgwHbtVpaChoTKCUwPx9qylpI0hJuY+NMRsO+Kyk9TSrblKdqhM7D34rrhfl1OWC0Ol8NgeeAQOOtZyepvFaGN8TYwbkfUVtRWp5WOj7xUiTHhyUH+5/Spl8RVOP7lnyt8UAf+E9fP94/zr7nKn/sR8hmH+8nV+BlxEmB3FcmJvqdOGPRtDTaBxxxzXi1XuerCyOr0lfl247DGa45O7OqGp0dgcIQBk46VLVjqhFtEogXzNxPGe/asas3yNI2jZH1L4LupPBf7I8mhy2fkz3d1DcW534aczFl6d8BQB/vGv6/8OcHiMFwlQjBPmkvz2Ps8LWVDBUXDzbPn3x74/wBL+C+iXOr6r5iW+iWsq29tGQfteqSkEKR325JNd+Oxry3mjNOLV9LdW9dO99/M83NM6q0sPKpzXb0R81fBfxX4s8XfGaDXvFF9MXa6af8Adv8AOF68A9+mBXzjxteVOdao/Q+OwuPxFXE+1kz6C+J+u+ItcaFbqOGyslzJDo9qDtjPeWQnmSVupZifQYAxX4HxDja+NxbTVld6L8zR1J1puUnds5NpN8ZxxXza3OhRUUQaOcX/AOI610WvC5zpv2hqaoNzEY4rz3uy56k+mxAWxGKbWhUFywuVHTMuSO9OKbVibXkPnB2ciiUbFNakDRkHp1q4pJEzSRFMu4HP4VDfvEW90qSDjOK0huRH4iG5UiMkDim9y6mxnpGd/I963j8JDauLcJmIg/kaUXqKTWxBpmmtqOr2WnKCTcXUaY+rCvVyPCvHZ5h8P/NOK/FHynGOOWWcLYzFP7FKb/8AJWfdvgyyWGBLdBwihQPoMV/dtKKhHl7H+QmaVXKbk+p6H4Ih8vU4mwMqwPSuyilzHyOPqWhc9se7W8s9wCq7KPurwK5eRxlZHq1MbDEUOZKzaPK/iDGXuHkY7juOTnJr01pBHz2AquU3fe55f4mt9xdc1wV1c+xwU7JHm/iy23K4Zs49a8TEK59ngJ2aPJ/HNgGVzt49u1eBioJo/Qsqq6o8J+J2kGRGJTpntXyOZR0aP1fIcVyHiesvPoWuwavGCDbzBjjuO4/LNfBZlhfrOHnSfVH6vkWMdCvCpF7NGh8SJYpNP82I5RxuQg9Qea/OcDCUa3K+mh+xY2onQUls9ThNJl2zj3NfSKmlHU87CwlO7PU/AvNogPpXz2NSUme/hlqdD5ZfOa8mWjOlx94Ux7IyO1aSehVaNooht1GCzVjJhRWhUvkGCSKE7GdePus17eAPAB0yOtazl+8Oup8bQ1LfEmAOe3FbJc0TKrGy0NvTeISD0x61z1YpO5MG2tSa0B8wjPesm0kXH4i6GC8r+IrNts3lK5JDyCSO/asp7hElQMZMY6VLRVwlGcL3z+VXAuMbO5ZC4gwBz3rbZHVJ+4Ub+/EK+TEcnPQVmtXqcE25OyJdHtJJCJpR17VMmtkdEFyRv1L04Mb4UHoM1UdgTuyzaYVcEZxWFTc6ErxImB8056npRAq3UbdAtbuP9jrXT0Qktz5o+O6lPEoOP46+yybTDs+IzuyrHc/B4Z0pDj+GvLzL+Kzuyr4UenW4P2dM/wB3pXhVNGe9NaI1NPObchelYp6hTSZZsSIpTvPBParlJ8ug/djLQsRzfvOuKhzk1qbUql3qOu58x5JJ4qU22KvFt3JtEvk2FZCD2FObaNcPKMYliBc3JZSDUSnJQsVdc1yHVna5baPXnNRG6WpDbnIXTYjC/Hr1rW11cLLnsT3bkjPf3rOUrbETspEYGU4HFQ22dENaZVIKyEkd+lVbQwXxDbwExggZGMgmpimmbok0kFCcjAHetm7IxluX/DYDav8A8CoSbRy02vrB2zJtO4+1YPc9m6ZZQAx9D7VjO4m7EaLhyw/lSg7McGUdVj65yeBWjY6q2ZnxgBCT3pN6kSehRkyLjcRnBq27IzvbY9N/Z8Hiy98faZpvh3xFLpceoyG2kuFAaKcnkRSo3yupxjaQa+k4ZwdbG45U4ysmdlNScLn3H8efiN4R/Z/1r4Y+CPF9mdPa70VpNWh0qQxrDJOSUcLyEQcEg8Y4r+n8lw8aOW6vrZdNjbB4qv7Jyi7q9lf8THl+Hfhy7g1PxTbSW1sIJd0lvG+5W3jPnI2MFCOvpn2492GLXMlbpuevCtUnUjTim2z5E+Lqj4f/ABG1g3mmvPFHZrBYNeXZwzyE/wCrXILADnkVHFeJWD4YxWLm3pBpK+l2dGNxapUpSm9WrHl8gzkHqeeO1fw8m3K7PkqcPduyKZdynJ6dK1NG7qxmXWwSeTx14zVJpHHzRjJpmNr8r2doZD8oKn5iK0Sc1oTVdqbaPLNa1/ULzVjbxzEjeMMK9aMKcaOq1Pnp+0lV5j1bwF5zaGokJ+51PWvIrcqdz28LKdSOpoMmHI965b3kdyXKrCycLnFE2b8vuEDEnAHrUxZjFEkXA5HXtSmjaMfeJ1X5GGOo61hZ3NKiumUYlIuGGOM9a7IbHHF2VjpPC+1p1DHGKie5001oZfxMjUXinI5Irek00edjV76M8bf7Bl7fuz/KsqmkgT/cux8r/FMD/hO2IP8AEa+1yl/7HY+Lx7vijq/Aw/dJ+Fc+KTTudmGWiPR9FUgDjkDnNeLW0PUjsdTpfyjYOvBzXPy6anVSTZ02lwkrlv4h1FYVJdjthex0/gX4W+Pvibrn9i/D/wAKXWrXSjc8NrDu2r6segFellGRZlneIVPCQ5tdexpGjWrS5aaufQfx08Pa5pXiHwr8M9XgntLiHR4DNE0e0QMics3rtG4/Wv7Ty2ksuyGjSmtYxX3o+xkv7PyyLl21Ph39rrxhazeKpLrStNEul2TtHZG6YskDZJe5kXgySsegGcDHpXw2ZYl47EuXT+tT89zDGRrVnbWJ5p+zhrKz/GKzuwHu08wAXEybMfQdh7VhWpQlg5uOyRx4K9WraGx9KeLtNutbjuNW0TSJWt43P2jULhAAzf3FLHn6CvwfNMHWqVJShHrv3PUhFRlynIhMR7n7CvmU7MU5NOxFpKj7cfrzXUpXpkU1zO5rXiF5QpHOeK4HbUp3uXYoxDaZx161V77nS42pGey7mPcZP4UQ0MI6yJJIwIwO9EmazVkQvkDJFZ3Zg1cgeM4we3tS2HbQp3AO4j6VtBmS+Iiuh+54H1FH2hz3KESFnwPrnFbr4SZJXC4TGVIxx6U49zKW5s/CTSv7V+Keg2ZXIF8JHHsuW/pX2/hzhfrfGuEX8rcvuVz8j8csweXeGePnfWUVBf8Ab0kvyPtnwjAWiVuhIr+zYs/yuzGa5meg+Bo1j1WIsuQGGRiuujHmufKY53geuXCSJZvHDgblAHesI6z1PQxLqRwzjS0ujzDxlAwmcSLgjOT616NvdPFwL5XY828SQAFgPfmuCsj7DBzvY878UQHD8Y968bEq6PscDLY8t8YWjkuMYPpXh4hWR91l1RKx454/0sSK4I9eK+Xx1LmTP0jKKzVmeD/EPRyHkUD17V8fWo++freR1lJq5zGoag1z4OjgmfMlu5ibPoOn6fyr4TMMJHD5tLl2lqfsuExLxWVRu9Y6HPaRGRcDnqe1bVLKNrnfhJ80LI9V8CqRapn0r5jHP3me1hkdKhAOT09a8pq7O1R/eDpAfLOfy9aJdhYjZFVMhSFH4VnLciiVb0EIcn60InEP3WbdqAsYU+mK3cFKdy3U5p3RLHAxYELV3UFY1nqjStF2jHr1rmqTuZR1ZPFGF5HXHFZNrlsaJWehYjDN+FZy7G1OPMyeMbEGPyqZWKasOTIP40uli6ceZj5SVIb2raCsKpbmSQXuoLb22Oh7j1pOPMx1alocqM+wt3u5vOkOR2zSlK2gUoWV2bVo4j4xgA9qXLyop6q4skoaXaxFSmTBcxYgzjjgYHNRJq522UYg6rvJHepi/eE2RyD9y4I/h61u37qBM+bPj9GB4iU9Pnr6/JHeiz4XPH++O0+DvOloPYVwZl/FZ6GVaxR6fFkWycfw14NXc+gnblL+mSMIuawSuwpLqWWLI+V9jmtlBNGctZFm3DSgSA9etROFtjppxSRdFtGItpANZxjZ3KrR90ovCIpv3fyjPaupOPLYwiktjT01mCEsefWuWra2h0Qg3uKYFaQu4HXioSuU1yahbg+aexrV/CKGsri3CFyVH51ildiqRfMC5VAGAocFua09NCrMpWXBGPemmrGM42kNeMlBkUXNou8SazQRk56U07mL95k/hjzTq2FXjdXQ2oQuYUqf7+53iQgYJ9OledOTbPWukShty49uMChJtEvXYckWTkg89azejEtGVNUiwmfQU+YqbujIlUqSuKSbZlK9yrMqtIOCfm7V0JLl1KUF0Pev2JI01P46eGtIg0eG5zq0QVYoiVbJAIlUjA4JIYdD9a+w4S9r/asXCLtbex6EJ044WfO9kz1r/gqv47hu/wBqbXrLUPG19pWm6Vbx2V7BprMjXFqkYHkZUHOSAMYxkgngcf0tTpxw+R0HKN/teafcyjajk9Fw66v7zf8A2NviZ4M8beEhpfhS9v5NItQtvHFq7hrqzzwUk6F1zznFdUcU6qi1vYv61L2V1ueBf8FG/CGk2nxS0LXbeNg0Vy9sAE4ZvLznOOnUj61z8Vxni+CMTTau7G9bmng1KerPEWQgbvzr+NZLllY86LtoI8ZaNsA89KcpWQ7KRzmqJcfbS+Mbf8aqFuU4atNxndEeuJHqGkeRs3HaRtxW1FSvcio+enY4fT/h3cxah9rnQ7C2QD2retW5vdTOCGHs/ePQtEhSzsxbxjAC4zXDODR6uHjyxsKfv59aw2NZS1FxuGOMUpNtHUrONiMrhixBJognY53pIIs7ifWqexvB6lmOM+UxwelYydmby+EoRhvtDL2Jrog/dPOUbO50Hh3KzCs5XbOqDRlfEckzISeR610UUtTzcbfmRRU58Pyf9cz/ACrOprKwJf7Oz5b+KKH/AITth/tH+dfa5SrYM+KxqX1k6vwQoWJM9wK58Um2z0MKro9J0FSzDA7V4lXWR6cUdRpCIzDAzjjmsamkTrpJ20OstLeaG0F6I22dFbHDH0rnjTlPRI6veWx9r/sxXsnwW+ANqY7WTTtT1+Vrm5uIn2XN2qYIiB/hjxwfUnjnp/Xnhpw3RyvJIOrFKb95t29ba+X/AANT7nKKGEwODVWtG8n36X2OK/bB+J+v+HdBk0/xKou/Eup2hkcPiY2VsVJSBTjKyEcnngYr188x9LlcaTsvI+U4gzZ1ZOMHaC6dz8rvjLea8PHMt7r1jdz28zlzb3HiBZIxz3jiIKduOor8+k68qq8+zPzrETlVqpR0Rvfs1aZ/xc2xktlQKZQ2xmOMenJr3qlN08BP0PsMuhGjS1Ppn4oaNqutzPrHiWfUZFgCixVohb20K9gqnBbPqBzX8+Z5Kc6sueTtcULSldM4l5n2bX49q+V9xsmV7sdo+ftpfb3rsX8MdF+9Y2ZBvnxjjPGK5GtToULT1Ls/ywBMc4qG+iN6vwFCNDn8aIvU54qzJZh8uPUVbLm7orSYxU8tzJK5E5BzxxQ0S3Z2Kdwu1yCOe1VBaCjG7uQXH+px0oXxEztzFWFArgsPwrqfwkyGTpufHepTsjJrU7r9mfSftvxTS7ZeLSykfnsWG0fzr9c8FsKq/FVSq/sU397aR/Nf0ocw+rcC0sMnrVrL7opv/I+vfCy7YVHpX9VQP83se7yZ6D4BUS6sq+Xu6cYrrofC2fL46LlFJdWeq3Muy1bjnCjAFYxXvHp4ut7LDtLfRHmnjPfJPIzsSQT1rutZHiYN63fU848RR5LE8e1cVY+twb0RwHia2yXB9OleTXjc+twU9rHmfi+zdt4x9Aa8fERS1Z9xl1RKx5N47tFhDGXqc7R6183jU5n6DlNVzaseH/EbSZMtMY8H+7618pi6ahJs/VsgrxUrXPJfESXNjHcRCP5JQG+jCvhc7pqpUjUXTQ/X+Hq8anPRb3V0Z/hmJpZwZSQc8V4+Ik+TQ+tw79jues+D41S2UL0r5/ENvc9vDS5nc6CNcHkDmuE9BbizkCPpgkcVk3dmVd3K6AKpyO9TLcVKNkU7s7lYEfhTtYivbkZs6eDOVc/dIziuiclDQunBQjqaaKgXpjjisYqUncpvm0LECHBIFYyVmSlZlmNdqg+o4rNs0VieCMhsnj6UX0ub0HYlK4OO1ZNhLWQICOetbU1c3hZRC5mEEQkbqOme9Xd3sjnrvl1M4GXUbkAk4Bxirm3TVmtTOhB1JczNe3gW2t9qisEru51TktkT26EoTmpqTLdlGwyBD553NnB7ik9gilBGjDDlTj09KxloaqV0MdMMQSdvrThrIFdu414w0LfQ1tfQo+bP2gSP+EkCgdH/AK19hkelA+Czu/tzsfg4caYn+7XBmb/es9LKvhR6dCGaFVJHSvCqWTPoJr3UXLBtgAP4CoWrNKVlEsXMxjjVs846VpGTeyMqu5b065Vk3EYU9qmcjopfDqWLq/8ALiIB5HSsbXeg6t2jPtLma6u+e5wQa1qNRRz0YtTdzoLRFhiDE9O1c6vLc74tNhNMASM8ZqnKysRVd9CKG5XeTt7+tKUu5NKyepL5yg7mwT2zWXM+hVSVw3oxxgVMpMKbsVr1G3ZUU4MqUL6iwLuiG4cnrTabZnflHRIxkK84PpWiaigiang+zkm1oQxRlmJ4AGSaicnJGUHy1T1fwt8JvHHjW/h0zw74curmSWB5h5UJIEajLOT6DHWtqOBxFf4Y+ZvKvBK7Z2nhn9jv4uaj4Dvfilr+gT6XoNjp5vLi+vIiuIixWFVH8TysMIo5x83Su+lk+IdGVSaskrhHGUFVVJO8n0POLvR9Q0+CC4vdPmt47qMyWzzIV81ASNwz1GQRmvFq0pws2tGbpxlJpPYzNQiyv8/asS2tDEvYSmSBj3rSmr6mUlqVobf7Rdxw93kA+vNaTvojaFrn2F/wTLsLiz/ae8P6FI02nym8Aeyugsq3CLhmA4+TGAQTjPY9a/R+CYSWKknf4e2jLxShPDVYvSyOT/bs1OLxF+1t428QGaKZU1hoVtLpQYud6qWBHK5xn1xX9I4mj/wjUodomuLlGjgqUI62ijK/YW+Kn9na1e+EfEFtp935eom2v9XZjFuKjKR2zHPmoBtwh27e2a8bAxUVfqeAsZXr4lRi32d+x6Z/wUM8JS+NvhLa/FfSrIxxWRjunwg/5YNtk/ONia9eg44ihUwtXVSTv8z6fC02sPKm3qtT5CvYEU5gbcpGQexHUV/H+fZdPLMzq0JL4W7ehwVIcruVmH7sj9a8ezkJNNmZrEMcaiTZzxzjrWtOLTuYYiSiZVvC08md3GeB6V1cySsctO83dFi6tgoGT09a572d2XUiyWygLJmsp1GbUWnEZLGUfAB96werLcR6oSv1oaaRvTkmQy5VuRinF9DOa94IEO4ArVPbQ1ptFyFf3TAkcisJbnRJe6ZpXFyTjjNdFNaWPO57uxu+HTvmHanONkbU9DI+JLbJhn8a3oQ0ODGSXMkUrUh9BkAYH5Dj8qyqRfPYcbyw70Pl74syGD4gsgXPzdq+6yyCWDWp8ViaUpYrU63wDC0kSM/tjNeZjaiTsj28PCMIHpWgrt6DkjgV5L21OqKcpaHUaREVlB7nsa4q1SPModzsh7qse6fszeCPFnxJ+JXh7whpunQSafdXLk3N3biSO1kjUOzc8AlAeDxg19xwLktXNsyhBr3E02ell9H63X5LXS3PoDVvG1lfeK/E3xMu1tpNF8GQJa+H7SPG2S5GVRD+ILkfjX9Z42Ussy+FCNnGST6Nq11buuunVWfY9zO8YsPT9lHoj4B/aj+K3jD4g61eappM2pSrBI5vZ4xta8c53hZGZQq9s8nA4r85x2LjXnJvZH5ZmGNnJqV1ZPW/U+Sbmyjk8RyXMuhNaSSvkp9sMxOT1LZNfP4CKq4nmSObB0fbVue1j2j9mPRzdfE3TrFohtJzKGXcGH07/Svr8wrxo5ZU923LHfvv+P8AwD6ulKUI+R9ReK5fCr2byXPh7U4HLHN7JcRNI5HQBHXKr9K/mjH4qjWqS5oNa73N+RuSaaPLPEcFxHme2zjPfrivFpwUnuYVVJ7Ffw7qW+5IkIXnvXTL3Y2RNGShK7OlhIeTzAQRmuV3PQjKMmXbxv3YGew6Vzyb5hyK0QBxxznrVwiyHHS4XHCbc9q0k7IiT0KzLlcZAx61ClYUdHchcY6n6UORFTVkEybhnb0qoy0CDKt4hC4zz2NVF6mVValeGMj/AD0reUlykxegyRfmOevfFZpuzId7nrn7I+lGXVtY1YpwohhU/iWP8hX9CeBWEdsbin3hFfi3+h/Fn0scz/fZbgk9o1Jv5tRX5M+nvD0QCque1f0NA/hfGSu2ei/DqJ/7TVkYAjGDiu6lb2bPmsU25xt3PSJpsRMkg3cg5HQGsUlzHdiK6hSkpq7PO/F0cstxLMzgjJ4rrs3G7PHwctFc868RRtuYgVx1j63BPY4LxJHjcMg5ry62iPq8E9jzjxarh2igTc+OT2Hua8LE80nofZZe00nJ6HmPizSSGeVjvc/xV42IjpaJ9zl+IvZLRHkHxC0nekhK/WvmsfR91n6Tk2JcWjxTxrpx+zzIF5U5FfEY6hzwaP1vJcV7HEU6iOY8PyYuFXb/ABenSvnZwXsz9RqWnZo9a8IAm3X0IFfM4pu7R7OCtynQxpzzXnT0R6Em1qhHQlME9KwvqZ25iq2RnjAq2vdubRVkUbrkNzQ3octfWLPSfh38MNV8ba9Z+GtJmt4Gu5/Igur1ikLSn7se7GNx7CppxniZJodWpGjpLc9m0r/gnV8bvEmkWGqeFLVbs3dnc+dbFCsttfQZL2bj+F2UZQnhq9yllVSUdGcNHHL2tpKx5v8AED4OeMPhbrcuia9YNJGLaK5gvIYyY5oJR8jgkccgqQeQysp5FeZi8JVoS1Wh6vNCaumc+tsV+Ug++a4ZRaHFWY6NTEwyKye1i1oxzEn5QetOMbnQo2V2OQqOv41t8KJjK2rMzWL5pnFvCeSaIrqznnJ1Z2L2k2gt4Azr8xpfEzqiuWNkXWYsnJqZys7IiWjJbVgI/p1rB67myvbUbCrfaDxxmtI/CD6GlbYAOfSsZp3LjJJWIZRmTJ/ECiKszaKdh6RF0Yf7JrYLq582/tD2wi8Qq7f36+vyT+BY+DzqV8RZHV/BmRJNNUIvIXnNcWaRUajbPVyqlPkTPUbRCYgD2WvAqyi9j3ZxaSLFspJwc47Gs76EQdmWdRt1MYPbHWqg22ays0T6QuYwpqZp3Jg2noTX0SlNg/Os4t3Nt2MsLQRuWHQniqlHmWpE1yyujYi5ADd+2aTfKrIqErMV7QOCefzrHmbZ0WcmPh05B0P1JquVvczlBpj3soxycc1KTuUoXiRiBQ3y8U5RVjNXixtzACASKzjudF7K41FWNQCOT7Vra5hbmZLbwkvuxx3qKjSZfKkz2v8AYP8AhNf/ABJ+O+nwWtgbgBm8iLy9weXhUQ54OWYV62W4P6xVSseXip+zi5PY/fL9nb9jD4OfAf4f2Kav4esZ9Qh0EWN/eXMahfLPzOv0JJz6195ChCjBU4K7SsfD4nMq9Wo0nZX0Nrx14B+Anx38Ox/DK5u7P7DbEOlpaIqLwuwbeMBgp2gjle2Dgjo+r81LlnHQinjcRhavtE7yPy1/4KcfADTbPUrbWtM0yDT7ttbbR9D0iFX8xLOGMLb28EAGWZ2LMW6cepr4viGjGKv1vaK8j6/Jca5vls7NXb835nxt8W/hX4k+Emvnwr4y+zQ6msKyXVhFcrJJaEjISUKTsf1U8jvivkJxlTnyy3PpqVRVYc0djg79QY+aum/esNpWKIEZcEgcMM56VrUvZWFHm5j7O/4JoeO/EfhT45+G4tejmuLS7uY44EutNh+ReBlH5kUDOeymv0rgmrUWLcJyesXbsa4nDVMRRmm7aHk/7Z7tD+0Z8Q7m9gY51S5WMY5dfNYKw9SOeK/qWtBLK6Epx0cV8+n56G+Jw7hRpc38qPOP2fvilFofxBl07xLolvquosyLDqdzdGOLTYlwI/KiGFaTGcE85Jr42rWVHEqMHqeHLkp1NXZn6R/Drw58O/jb8AfEHgyFZ2hEDyWw1RcvnaQ6ZKjdkHqABV4epXo4uEqjvfRmkswrxxUXT+F/kfmR4o0OfwhqV14V1D/W6LfPYTEA8qp/dOfQMmB9RX5/4q8MRqwWZ4dbaS/Q9eUVOnoZsigKzetfgSjynPHS9zF8Qyu0Y2np6VVOT5jkxEHLUo6JDJy7Grmww9o6Fq7XBOfxrKUi6tiSzXEfGQKxmrk03YSaM5Pt0pKOtzXm0EjZc4A71UloXCLvchu1QvyOlYRTuObsxsQZmCp0HWttIajpx6l5ExET7dawbuzeU/dsjKkfbcsq+tdULqOpw8t5XN/w0u2QH86iUm2a82lkYPxSl3TKievIr0cOrQOGvTvK7M7Szs0sxtxlfWuStL3zeEkoWR4L8YfBGpx+Lv7cihzCTyfSvpsDjYyw3Ij5XHwqPEXWxseCU2RKG644rGrCN/eNaHNM9K8OWvmBWYDpXl16ii+VHr00ox0Ox0PSri9u44LaFnJYDCjJNcsIOpUUVuy23sj7n/Z48Pa/+y/+yj4v+Mnie0+z3HiEi18MWksYDHCYe4TuAQce9f0/4W8PTy7D+1rKzer/AER9dlGE+rXqS3Suzyn4462fhP8As56B4Lu7xrXUtaaXWNXQW/mybphgDaeM7OhJ4zX02b5hWjNuk9XdfJ6P8D57PMQ5zcoPf9T85fjrqq6rqdxLqLDUEUnbDqmsGBVHYpFGRz7V8BjJU+X3rN+p+eV+RxcJfEeb+FrYNMCkAjySdoYkL+fOK78mw8IrmasexltKpThqfRP7JunrB4uTW5nkKWdoXZ4s5LHgdBXbxTjaeGyKbTvdaeZ7MU5KzPV9bu/t11JeSyvvZiSGV+R7lySa/mXFVlVm5JWudsNFyoxLwCYEFfwrmhdMtpQMG90ya3b7VBxg5wK6ozhf3zkrUXKN4l/w94jJPlT8HOCDUzh2MaVWUHqb73fnxB1IIIrnlA9CFRVBYQQmSOtOOhq+wlwpYD8qcmkibJakfl7F5rJXbIlJFeVG3dO/FaOJnNNkQGQRjqamz3JUbPUrX6ELtHbrThuTKxWjjIGDXRYzaSZE4xJgUnZIhu1z3v8AZE00x+E7m+K/8fF+xB9lAH9a/qjwUwvsuEp1rfHUk/kkkf53fSfx/wBY4+VFP+HRgvm25fqfQeix42Kf5V+wxR/KOKe56H8PVP28DfxgcEda7qHwM+bxTvOOnU9Au5Jfsx3BVAGNmazUVzpp/wDBOnGSlGg3JW8jgfEjBZZSG9eK6G2ebhrtI8/8QpuLHHeuWofU4N7HCeJoQQ20Zry66ufV4KW1zzvxJpxjd3inJ3feU15NWn0R9hgq3Mkmjz3xRal967cY6GvJxEEj6/A1LWZ5V4408Or/AC889q+exkbxPv8AK6zTR4h4z04rcvGy9TXxmLp8tQ/V8sxDdJHB2VkLPWXt8fdkyM+lfIY6Eqc2j9ayjFPFYOEn6Hqvg1f9HTjOQK+RxWsj7DBrQ6GMcEYzzmvNqbHoTGyghC3fFYRV5ELSRSkJCn5eD3rpkrI6GUX+fOBWUkcVZaM/Vr/gmX+zD4H+KllJ4NvILfXNKZgZtMvdJkjktnyfnEpB+YZx1Ar7DKcFh4LXWP6nk5pKpzvpY/Sr4f8A7IfgT4ZQBhqKoxaPzGuZdzMqfcJJ+8y9ATzivedOkp2ijyniOaOpxH7TX/BPf4afEbwxe3mnaTaLHNaXKF4otwMcxDNgDpiQCQD1B9a4sXhoV3ZoFmVWLSvoj8MPjP8ACrxB8IviNr3gTXrIxzaNq0loxYdQMlT+K818Ri8JKhVknsj6zB1Pb01JHHOh278cD1rzpRSlZHoqnFiKuAQ3fpxVaRRu1aNipqOoJAvlxdT2oh77u9jhqOV7Ii0jTpLqYXE46daJytojWnBR1ZtFFBAToOuBSbtEpuzuDKdnHTNYLcEnNktoNoK9qJKViuZbCxL++JPTvVxTsU3cuxsfuoBjsaGkty4xuBj+bJ45rJu70NXJRViS3AyxI4K1d2kZyd3ofNP7SbyP4lWBB0l9fevtMl5YYdyZ8bmkUq3MzsfgtZiLS43xztrxMzrOrWaR7WAmo0UemQ7vLGB26V4/LZanqRfMixaR7+c4FWkmjN6SJ7su8QjBzx0IrWKUVdlKDauyxpqCKPk4z19qxqSc3oP4SWQF5PkGR9KIxUVdlwTvdk0CYOc1Dn2Lm0y5bglh/OspMzjuW/mY/wBKUY31OuD0HrgDIz15rQibaYkgyDg9cUrInmZXeN2YE5H0pSWhWhL5Rxhhz71nGOoNuSGx2hZ9xBwP1qpy5VoWlZGhYaZLqF1HZwIS8rhQoGazhFzlYirJKJ+pn/BCn9jbxhbfFVfjT4stFXQrDRUubGFk+9dSlghPHUIm/wDFa+7yPCOgnVfbT5nzGd4qEMJyLdv8j9Av2lPGMlxFPYPPImnWR2SJE+DPLj7v0FfS0JqGjR8lCDi7tHzN4Z+OGm6L8XLXwkZJprrzFkAadY7eEZyFJYfMfbFdyn7urdjrp0PbPsdX+2h4l+G9jYHxx4hNro988BEmreHXtxqCoyfNturkhbUEcFogZDnjHJHzOaV6CjJN9Pn8j28JTrwlThTg5puz2tHRu71V100u7taWu1+PXx18R+ANa8aXh+HGhWtnYCVsNBdS3MkzZ5eWeU7pXJ5LcCvzrFRoOpenGyPt6blCkoyd2jze7cliG6nrWcYqOoOPcqIBvOG4zxVVLtWIcuV6H0j+wV4zTw58c/CpuPDwnshfKLq7jtkj8vJHLuzBnHsM8npX2PB1Z0syhzaK251QlVq0ZKL1sWv+CnXgu18IftKeNkWJ0jvZRdWeeMkgSKw9iAw+or+tsJF4rIKFR32/I6K9aVbLaU+trHzd8Oohrt8PFHgC4inkivVmt9GuGRbcyYxJMzu4CtwACQcDkYxXxuKhOOK5ovqfG4lS9u5tf5n6s/sP+I/FPi3wRaaf43u9Iv4mO2NbG8huJLbK9GkR8tjpzmuWtO1S8ZbGns4Qj7SDafmfBf7ffgVfh/8AtW6vobyra22uWzI8phVyJEyUYB8DPbPUZ4r6NUaeYYFQqrmjJWaPp43rYaM1+B4xHvk0yC8OCJVIJBJ+YHB6gfyr+XOLshrZDmMotfu5axf6HPUkpXsZur2/nRcDpXydPcxlqippUZRipXjNdErJGdODbO++BH7M/wAYP2qvH7fDT4J+GU1TWFsZbs28l3HAPLjGW+aRguegAzySBV4DA18wrSjS+zuPEOnQp883Zdepx13pWs6Bqd34f1/S5rK+sLl7e9tLhCrwyoxVkYHoQQRSxeGqYStKlVVpIdNU7XTuQSjf36HtXLdJG65Yka4ViFPJqG3IpzSN3wp8IPiJ8R9B8S+KvBnhyS9sfCGlJqXiGeNgPstq0qxCQgnJG5gOOcZPauzC4CviaVSpT2huZ+0purGDestl3MnQdB1rX9VtNA8P6Tc39/fTLDZ2VnCZZp5GOFRFUEsSegHNcHJUrVFCK1YOuqdNzlokX9a8N6/4W1S98N+KdEutO1HT53gvrG9gaKWCVThkdGGVYHqDVyozo1OSaszSM4zgpJ6M5qG1vNQ1hLHT7V5pp5AkMUabmdieAAOpraFGrWmqVKLlJ9FuZq50WjQS28pjlQq6nDKeoI6isuVxk4yVmtzelFHNfEVlNyAWHB5J7V2UeeVlY8/HX51E9Ak/Znfwj+y3eftF/Fb4hW/hqfUokk+H3hCTT3mv/EUAlVJrxgCPstqoLbJWB8xlIUY5r6OHCuPxOAqYpRdoq5nTo4uu5vDwcqcF78tkvLzPnD4iXgu9MLEAjOQe1eVl8HBnj1pKqjK8D2LSyBmGMHiuzGVUlZGuHiken+HrKSUqscZOOuB2ryPZuctTu62R92/8E7v+CfsPxj1RPi78Q7WeHwhpF3Fc2NyxaGS+kC/NFjOCmTye/QV+ycE8I0qVsbi43k/gi/zPbwGDjSn7WprJ/Cv1Z6P+2p4otfjZ+0H4Y+AXh63jj0azuUja3hfEUEURDP0H90Yx71+6RVLLspkpxfNNaWdrO63VtVa6tprZ30s/Yx1V4PCcl9Xqz48/4KFeINP8XeL9Su4/D97PBBH5EOy/FrGkSDaFMjbcKAB0JzX53jMVzVGr7H51meLmo3g7n5yeP5NFuddNpYadpkbh+tncvcOf96Rjgn6V85Upwr4hLQ+XoWr4pXLXhGxuJb/ylO1SuGdu3rX1+W03TjqfWUozS0Wh9Rfs86DqGjeD7nXrQiKWd/KjZSM7B9cV8X4iZhNYeNClKzPRoWlLU6e9e7lXN3cF3J6EV+FzlJy953Oumlcy7hwueMAU20KsrakUW2ViNoINTN3WgUpXVjP1bRDG32iz4I5OKqliLe7PYyxGGT96O47RNfZD9nuOCOMGt5WkrxRw05ypyszorSeOWPcrcEflWEkerGopx0H+ZtJLD6UJ6ag4NvUxb7xhpdte/ZGmUMTggkVpGlOesUc061KE+W+poQz293biWNsgjIrOamnY6HONiB+pAwBSs0jlk22Vrghvx9aSdiLNlc4Ude9bxkmhNOLISBk80pbE1E+U+nv2ZNJGn/DrTxjBl3yEEerH/Cv7R8NMH9S4IwkGtXHm/wDAm2f5Z+OmZrMvEbMKkdUp8q/7dSj+h7No8QyBnmvvIrQ/B8TLQ9A+Hsb/AG0Mq7iAMDFddFrkZ8/Xb9rGy1udxfW+ozQs0RBVVzIfQelKDgpasvGUcVVg5fZW5wfiBQDJ6k1tO3Q5cNrY4PXwxLdiK5Knc+nwmhxHiFclsn8RXnVtWfT4NnCeI4Q7MX/AivNrNH1WDnZJI4DxLbo+8Bfzrx8Qrn1uCm1Y8y8Z2Pyvxxzwa8DFRufc5bVV0eK/EDSzvaUAcE84r5TMKWtz9PybEe7Y811S0MOsRXQHEnB+or4/OYWpqaP1XhfF/vHQfqj0XwbzZoM84A4r8+rzUps/UsGrx1OjOMZC8964JvWx2z3EkT9znFZx1kCWpm3GApH610z1N+5QiJEuGHU1nPY4K8tWj+in/gkzpCxfC3UdXt/CMWn3UFuXjWG584NgZzyeK/R8v5JYazseBnnOq1zp9V+Ndz4w1TULB7q4kmtGxeRKdghBJAyeOTjitqbine55dOMpU7vY9E+GnxA1LRbaKzvLn7bpVxH/ABndtzxzVyipEuKWp8pf8FV/+Cad18X9D1L46/CHTBc3981tPqFvCMtviDqW/FG6+wr5vOcM61G0Vqe3luZOnUjCS0PyU8VfCbxt4YTzNX8PXECO8wUvGePKcK+fTBI6+tfHfV60ZXa2PpvrEHLc5W5geOMrjNYOTlLQ6lO8ShDpfn3Pny/d/lV875bIhQ+0akaJEmyNQMelOPu6shzuxVU8nPNZTd2NXY4KTwBweaUVdmyaiiWCMhTnNaNpoyejBSFk9T6U0rIcE2y/ZWV1eSxWdnbySyyuFjijUszseAABySfSueo25G0p8ur0R6D+z9+zzfftCa5rPgzSPFtrpev2mmyy6DpuoRNjVbuMgvZhh/qZCm4qWGCyheCRXTg8N9Zm4t2fmcWIxUqMo2V0932OQ8O+EvFHiPW5PCWk6FcPq0azCXTmTbKjRIzuhDY+YBG+XqSMAZpOjU9q6dtUdkeSUOa+h8t/H1A/iFbiQH/Xf1r6LAVL0eVHymcyhGrY+rf2G/2RfA37Snw213Wvht8Z5Br3grw7JqHjLwjqeg+XejEhUS2IWVhd26AqZWPlyJnIRgRRLAUasZVJyafZK/p127mVDMnQnySjfsaVl8FPjB8Pfi5pGgt4HS+vYrc69p2UEtnqdhbxtctOjHiSLy4XJB5+VlIDAivGWHrfWVCKvbX5I+khVhVpSjs7foexftV/sa+L9b/ai8XRfAbwBa6f4Zl0O08XIkl/DBZ6RYXsMcyxNK7bEAklMaqTk4AA5Fd1TK6zry5FpucGCx9NUUpu7vb1PmnULe40q9k03VIDDcwNtmibqp9K8iesmj2FUTjdHqv7Nn7JXxW/abtvF2qeAP7PttL8C+FrjXvEmsatcGK3treJGYR7gDmV9pCrjnB6AV6GByyti4ynHRI4MVjqWHqRjLeR5xYyJcIpQY3AH868qoveseiproeo/BT9lb4j/G/4c/EL4teHLiws/D/w30VL/W9R1KYxRyyu4WO0ibGGnYbiF44X3GeqjgK1WhOstIxPOxeZUsNiYUXq5duh57AwJznj61xct1c9KGrLKsGbH5cUm+VHUvdjqDOeq9KlSIVpPUbvKgFh2pttky0Y5HBHPX1FNXY1dkyI0p4U0m1FGsYpLUtw2uAFA696xbu7g2fSv7AX7IHiT4/fES1eHSpJYWmEUaqnVdod29MFQyg/3jXtZZg6lWomlr+h5eMxCpR53sj92fhR4B8Lfsu/BO30WUwQNBAJLwx8B5yoARfYABR7KK+7oQjCKiv6Z8LXrPG4ty6XPm/4z/FvS9XhvI7e6lljgV5Lm4jI2xsckku3yK3uTxXRKcYy1FOm9bHw14b/AGhLH4gftDDw/wCG76H7Bp0hBTTJd4d8/ellwTIcemBWka0pU7LY76FKcaXtD2T9rDRtO8bWdqbP4c6r4w1GK0Urb3O+PTrXj70jE8+/SvnM2um58ik0j2sBKpGzvZH55fGXw3qHh7xTNa6zqWitcsSWstCZWgtR2TK8ZH1NfCV7892fUULKOupwN2mHJ7DvipTui6jvsU4tzSnaep6VTk1EIwW7PSvgx460X4f69Zas2nWjXPnLi4u4WuGHI4VB93616OXZhTwteLjC7v6m6rRpRtFanu37dOj3XjLXtP8Ais267i8SeC8KQ3EdxbYJQZ6ZADc88mv694SxbxOUWTdkrnbhY062G5Xpa58UC28QaRLew+G71LCys7tZpjcQCSG3DYw5Qg8tjp3x7V4GYxcqkvet6nyWYRnGUpJfNH6G/wDBNr4u6tfaLHEmv+EtQhV1Ah0S1hsZh6lgqqzH2JNckI0+V6nHB2pO99e7uYv/AAWa+HL217ovxh0qFo0TZJJIqbiGU8g/hX0eTTlPDSjfY9XLsZKVH2aPh1rvT7PxRczM3lWN4Ulk2RY+8PlkGST1PIFcPFXC+FzzL5Uais3qn2Z6qjy0/e1LOo2E1pJ5M6/eUMhxwwPINfyzmuTYzJMdLDYlWkvxXdGaiuS/cpwwqj8cc15tS7Ri3bY9g+DP7PfjD4j/AAi1P4ofCH4nxQeJtB1tI77wpZXZg1CSxMYYXcIyDMobcGVeRtBr9W8LcFODq1cNU/fytaNk00une/yOvJs4xGXZpyte5JW12+dyr8d7rV5ptE/aJ1vRbW/nnmjg8VW12hMdzfW5G7zQMHE0agk9c7u9dPiPklWhmNLOFS92VlUVtE1uejmWWcuNdS1oz102uVf2ofhb4P8AA66V8bPg1azTfDnx7ZSX/h2OWbzJtIuU/wCPjS5jnLPE+QrHlkKn1r89z3Ko0OTFYdXpz2t37HlxwlWPuVN09X0a6WOz1v8AYa8O3niPwF8Kvhb8VLnV/H/i3QINQ1jQNR0j7Lb6MZIvMxJOWICgYG4juPWvtl4Z8+CU41nGryqXK1dNeq27HZDJsQ8JXxNZqEYfD1cl5W/Ix/hBovxf+BHxU+JHwC8WWUuk3Wq+Abqx161PzpNBHNHKWVh8roQuVYZBzVcJZDicDmuIweNpaTpy1+W6+89ngvA4TFZrH63T5otPlb6SadjW+Ctp4l/ZU/Z38QftieHCp8Xajqf/AAi3wyuwoJsrmQZuL6MH/lqkR2IezSEjkCteGOFaODp1cfiFzWdonHPh2Cqyw+J1jFuTVt0npfyf6Gd8Xfg9478TeALT9oq61u61+8vLW2j+JE10f3+ka1LkeVOWOS8gUP65PPUVrx7wfOcoZrhVa8E5Q7WSu7f19x1ZhgqFbFKNCCp+7dRXWKW6XY5n9kcJ8Nvi8nx18U6I76L4U0u71GC6kg3QvdouyJDng/vHTI6jIryvDTLVDF1c5xVN+whGSjKzs5K10ns2rq6vpdX3PJwWEhXdVVvdSjf11NP4Afs3+IPi/wDDLxT+0D4x8Uw6FoGnXRtrAvbb59Y1WVspaQrkAKM7nkJwi9ieK8rB8L4nOniMfVbjFuUvXr/wCKNOtUxUaVON3L8F3Lmi/AL4E+FPCuoftFeL/iLB4/fRvEraJ4U+Hem6dNHF4r1YeXsxLkSSWilsuFRS4Crkbzj28n4ew2EwNLFV0+dtvlaVktLapu736WVt2KeAlUxzg17iV3K9rPqrW/rqtNZ/2/fiZb+FPibqvhTxtYTap8X/ABT4UsbTxBotxdeZpnga2ECmSGLaFUSBQAkQGyEEqNzHNfouJxuBjh3Qw0bc0LWv5avob0syVPK3gsJ8ErttK115+fn+R8WeJPDPiC++Hk3xEt9LuToEOsjSk1NosRPdeWZPKBPVgg3HHQYz1FfiU8JWw8XOSsr2R8TUhyNqw74daelxaiZmCqMbmPQZrzqic52NKNlG5+gX/BN7/gnVqn7Qk9r8Rvinpcmk+ENKut7TFismrgdEXP8AB6t36Cv1HhPhFTUcZioafZi+vr5Hu4TDR9nGpJa9Eff/AMefij4f+Ffwvl0XwTpdtZ6Zp1qLbT7WIbE34KooHTPQ1+35Tl371Tqf0j6fL6FqnPU3PiLwXe3Wl3fjL4y+Jr+Mta2/9naZMreaGnkXdMygZ56A/QVOfYydT93F+6r2Pnc5xjxFZpNpK58F/tX6wviHU7i5vtAvNUVnZg+sXskFrH77cID+tfm2MTUuj9T89xsrtxR8oTSjUdde1abT440biLTowIk/4EOW+tceXJzr/wCROXUoQndu7Ox+Ffh6fUtQbajO3mBYy3Qljivs6clSpNvSyvc9+nzK7vofVFlpdpomlWuiQwBRbQBSSg5OOfrzX87cWZiswzac+2iPWw3u0xs54ICkV8k7Jm0W+Yzb2JyhO3t0xTvd6lVI8yKmlGUSZZ+M806l4mdNqErGowQDaeQawUWzSpMxdb0XcxubUYYc8V10ZuOkmctWipxulqGha3JE3kT8EHHNbzUbXijmo1JUp2Z0Ec0dyhK45Fc9rnqJqaujxf4ueF/EVv4hXWtMuXARiQmTg17+DxFCnQ5ZRufL5nh6sKqqRep6z+zp8N/iT8Xfsuj6NbqbmciOJdhYufQADJNeJmGNw9BNqN2uh25XSxmMR7t8ZP2YvBPwK+HqzeM/GjHxWzhZNFdCjRDGckGvlMvzTNMyxcn7PlpLTzPfxGDw2GoJqfNPqeAzuqt26etfTwhzM8tyWxUkmLGuhw5YFWuRu5HB6mstZy5V10OfF1Y0cNKb6Jv7lc+xPhFpP9meE9Os+nl2cYIx32gn9TX985Jh1g8nw9BfZhFfckf478XZhLMc6xOJe86k5ffJnpGkR4KjPfrivXWx8FiXoz0DwFGyyMTLs4HzeldVKyi9DwK1nUWtjrbtnitWVJDt28nNCXNMjGSlTp8kXocRrjsSwJ9cH1rR2sLDrVI4TXwxLljXLVdz6fCdDitfQ7myK86rc+mwj0OJ8QJu3ZP415lVan02EaOE8Q27fMpI9q8ysmz6rBzWljzzxbbeasgK84rxMRHU+wy6dmjyHx7p+5H/AHfr2r5/GUudH6Nk9azR5N4hh8sOQvMb7hXy+PwftaMoH6VlOJdDFU6iOz8GsklpHJGflZQQRX5Bif3deUH0P33CKLpqUdmjo5M7RkCuGTvI2ndscwP2fBHaphfmKgmzOkj3IRnjNazdmaSlZMotDtffUSfunDUV7s/or/4I06Vq2mfDjULG68Bx6UskDAk6ms7t8p7ehr9CyuEZ02mjw8+k5VeU8u+JV3Ja/EbXFsbiK21ZLuY2q3LeXb3TKTsjfHoehPrXpThTpux5sqE1TSWx7J+zz8SdM+IHhNLTULZ7DVbdQt9pq27FUkAwwVxwwzyCOKSqRmuVGChJSsz3j4Z+Ozplp/ZlwFeAtsImQhX/ANkhq5p0VN6FTThqjk/2i/8Agn7+z/8AtIeF9Vn0PQbbStZvNNuYTNFGFQNMoBYDpnKr+VctTDUXGUGt/LuXRxdalNPdH4d/t7fsur+yh8YF+FcVw9wttYI8l0y4Esh+9j2FfFZngI4KrFR2Z9dl+NliY3PCChxgdPpXDZR1PX1cdB+W24I/HFZuTZzj0V9oPvU8qZ1QcbD0Q7uD2qkrGc/iJCSowoHTpinZIcVzCRxYYu3FZVaj5bJmnw6GpoWs6zoms2mu+GdVlstRsLqO4sbq2fbJDMjBkdT2IIBrKDknzLdGNaUZwce591f8E+NP0X4h/ts6X+094++H17Za5qUuoDxvFFa7dNF0NNnna8CFDtNwCrsgdAjq+0FXUJ9LlVWjin7TeXfp/XzPncdhamHwMqEZ6/iehfBb9lPTPitrPiP9oCHwxDNfLpFjN4msFiYyQapaXCTpMCOdl5YswDjgvuU85FdcsLOvN1la73M4Y2vTpKF7PbU+If2+P2KP2TfgJ8ZtT8F/G/VfE+lnXbqLUPh/qNhAo0i+t5WYos0xBe34ZQz7W2FWyDxXZhMvhQpt332fQ5LyxFROauluegf8EqJtV+Bnxw8XaX+0RYQN4p8I6XBL4Hu3wZ305+J7WWdI1W9tJ7ec7ZcttZVIAU5CqciqezsnLa/QTw03Fzi/kfcen/soa/rf7Pfxh+F9g8P9o+B/EN23gDVoUDSw6LfW6tNbBhztaKTnsWDGnQwNlKzs7aP818ylXU8VSb+F7rzRp/8ABUbwx4d0/wDYo03wf4U0Sa1ufEHhHS18YXmnwFpLqK1tmSwhxniPziGOM9BnoMaZhUqQw/sqXVamuXqEcQ1L7L0PiH9nz/gn/wCKvFnxLvNF1/xLprQ23w4nsr3xLrOmvEkuqyxeTcHBL5a3mmVN5+Y7R3FeDgcC6tXVWX3nq5ljVyJRv33Prn47/s36V+xR/wAE+tG/Yc+FGtG58V/GLV47zxl4imh8oyaZGA0jspbckKoAADz1GMvX0OIpxw2FWHpOzlu/I8enUq4zGKu9kvXU8X/YV/4JB6x+1B4/1j4neKLG68P/AAu01LiPT9Z1iM26X0gUpHLGpILohJc4wGIC7hk14uV5bTli268OaFn5avZ/Lc7cwzeNHD8lN++fSv7TP7KPgX9njwT8MP2cvhX4Ge9+GPh/Un12bQtSuUS9+JPiDy2Km4LD5LWJf3k00gWOOPgc7AfeeHpwjGnCPuLWx4uBVfFV5Vpy956X7H5C6h5q6xeiY2o23sqkWL7oM7zxG38SehHUYr4DEyiqslE/Q6FqdNJj0AXp1rlV5Gsql42JI14yB1NKzJT6gFBX1PbitVG2rGk5MktbVmk3MMA9qU59Ea25DRt7TBwqg57Vz6yYpS5Uet/s7/ss/ET46+LrXw94Y0G4nAvreO9MMRYwRyOF8wgc7RnNengMtqY2uqadtVfyXfucGKxKow5pbH7rfsWfsreB/wBkT4RWGq6rp8NtqsWhw29/KVGV2FmOPclv0FfbYLCOhTV17zWp8XmWMliqnsoO8U2cD8cv2gb7xlrU001qTpUKMIYZFZowARwQnJY9ePQ9OK9eFLklsRQoKET4x/bB/aAFzpNzYabogltLeIuNOWyggtkbuwW5kCs3uQ3XpUYh05yutDojSVWVo6PzPnX9jOSbxN8Sm1+5tEt5JpdywFIRgZxgeSirxyeBXRCmvYNJ2stPP+t9TrnGTiqa2Pqz9pfw9qfjW1TS9T+IfjC+hSJVTRfDGkuwUY6E8KT7818bndGtKLabt5I97AU1CKtb5nwd8bfBE3gjxE1lJ4U1nSkcnYmtyjznHqVH3a+JmnTdtT3IWlE851BkVDnpRFNsHZlG3OZgR68VrpYhNvQ6nwhq1tourwX80cTbD8onciP/AIHt5Yf7PeunBV1hMRGZvThG92fT7+K9Q+LfwD1H+0ENxcaFMt9Y3A05beOSEjZOkUYAwgQg/hX9G+GmfTxcqlKSt8rJ37LsdtOp77S0Pjn4m6Te+DPiY6rOfslxAIwVTIYEDy2x0OVx19K+kzpSoz97ZnzWYyVFtdWe/wD7CuufEe01eMQ+DNEu7O3ulCXGkQ7L0g/xMiHkj3NeLhcPP2nvbHjQrVKi5X0PuD9qH4Zz/HD9mDVfD+r6NcJe21s81oLyM+YRjnIOcfTNezgcTChimqcrxZ6+XQjTrLsz8gtTE2mWws9QbdPp80lheDBXgE7cnjt/Kvqoz542ep7c4S5+W5teCvEVnqkC+E/EsypjBtbrdkx7sAE+q+3tXyHFXCmD4iwzpSsq0VeL6/PyGoprXYu6h4evtJvjZ3sW1uqspyHB6EHuDX8wZrluMynFyw2JjaS/HzRm6fY1fCdxfaHrVrqOm6lcWDQzKTfWZIlhGeWQ5HzAZrmy/G4rLsXHE0JOMou+mhrCSpp3V2fWut+HPhp+0DomqaP4Q13UdS0DWLU2kep6/Yw299JfRrlZpkiZkDnJwQeR1yeT/VOUZlHjXhNrExV5q0ra+966fkj6jL6lTG5aqVRJPrZtpJ9rnkn7Mnwv8TeIfhv8Xf2U/iK6iDQ4U8Q+Hzeg4jvIz8wjz/z0TKkDrxXwWRcJ4x08Tl+Jp3jF3pvzRhLD14NU3G6T0fkdL+yL4yX42an458L/ABOvbzXIBq9ol/eWFuItQfRoHVfsyyDLIhjzuQHGQMkha/ReE81q4zAynXaWJopwWl1t8r+Ttc9/LIVZ4ZzhJKpT5nHm2vbS6Ou8LaxF4j8Yx/DK88OC5h8Da+um+HdauU/0q48PX8r25tpf72zKOM/d2kZx17MZTqY7EKtLSooe9pprudE4zWKWOvaU4JtLbnWt1/Wpk/Eb4PfE7T/Cfg/9jnSAl2+mfFi4vLbUcHbbxoiSLMT2G0r7ZJ715GHyv2GXUqKl1u35JtorHuOOm8XradNXt63Nz9q3w/4g+FXwgu/hSmtwi8vPG8nirWWugyxarfZhEdsT/EWCsEX1IxXNxJOp9XlUpSbqOybls1s0Y4HD0KlaWPs2/ZqEddl3K/jP9lvxX49+GOv/AA/8J+D7zQD8QPiTDcWmkcyNZ2UUKvIWPCxIZvlZjwAo44xXFgcnw9PI5YXmfLL3rJaXa7X2el/LueEsPSq0VCvUbUYt3S3fRb/15npXxu/Z2n8V2Xg/9nnwl4mXRfBHhbTJH8SeJlvFje91KZ905gUfNJIwGN2MYzkjodaeS1K+XxwdNcsNLpaXQZW8RQo1arXvzaSSW0UtPQ5D49eFvBvwj1TSj8H/AAwniPxhYxJYfC6GG28mw8Jxplnu3fjzbgkl/MkH3+nau3EcO15YeEILVafL0FLB1acL695X1u/0R8uftHfsR/t1a14bl8ZeE/gZaak+qzNe65rtrfyzahrMzN80s0shO4ZJIRQBkmufG5BjYYD2VGMJVI9b2fp/SPn67xDgqSUU1pvZv19Oh8q6wPHek+Fk+DPjW81O1sdF1G4ntvDdyhVYL+ZVSSTZ3dgirk84AFfkeaYXGTr/AFasmnF/D5s+ZxW7j1PvX/gl1/wSu1LxfoVl8Zf2jtPNlokbCS00ZxhroDkeYD/D04r7rhfgmnQccTi43l0j/mdeX4KVlKa17H6FeMvGVrp+l23g3wNZx21hbqIILe1jCqqjgYHA4r9cweEp00pTWx9Xh8M6b5pnzn+2B43tvC/hdzLbSS/2TIRHbyy7jeahL8qIFPPyZz7V3UIww1CcoN+829W3v2u3ZdktF0R0V8QqVByi9WeHfF7xRF8Ovgxpfw+0rTrma7itjPqjW+pGL7RcyfM5IRCeDx+FfB5pi68arimfm+YYmpKo7M/OL9o7xNr15rE8978N7NYBuMc+s3F7OVPsJCo/8dr5TF1qsorZnydao7uz1PE9E0/7dKzyxJG078iFAij2AHSu7J6XK+aW7PVy7BPku92fRH7OngVrbUhrF1EhSwQZR2O0y9uDxwDk1pxdmMcrymfK/flotT3qdN39metXUrO/b6AV/Olecqs25bvU9CEPZRUVsipJJk9OB61yOGh1RimrkF1go2B271jsxPcy7DeLhgP71dXuunsYON53LdxObckHisbq5dVKIQTxXAIHPtRJ2QUdTO1rSWjb7TAORycVVKq+az2Ma1KEndLUbo+ryxuElOMdc1rZDpy5NGaGq6ba61BuZVJI9KaqezdkFeFOsrMn8CePPGvwtjktPC+sS20bnOI2KlT7FSD+Fc2IwWFxkuaotRYaVbA3VN6Mr+IfGHiLxjqjax4k1ie8uXHMtxIWOPatqeGpYelywVkRKrf1KLyiQYB5xWkGxKN1dlcFt2Ofxrpkk4ChqyzpNm2pa3ZWKrkz3KJj6kV2ZBgvr/EGGw6+1UivxR8rx7j1lXCONxW3JSm/nytI+2PCFt5VuiKAAqgD8K/vSCUEkf4/ZlO83c7LSUOV5rdbWPmsQzu/BvmoH+bA47V007cp87inaSZ0d+bhoPJlQHPKkck1UEk7mWIlNRUai87nH66pRmVjyKJnVhmm1Y4rXV+ds1y1NT6TC7I4zXUyWwec159VH0uFkjjNfibexGPcV59VWPpMK1Y4jxFB1+XjFedUV0fT4SWxwHiiDAfK5yK8bExPq8BPVHlfjiyZ0cdueorxa0eh+g5VVSaPH/EVjHHfNGw4bOeK8LFQsz9HwlVeyTNv4XSo+lyWxPz20pQ59DyP0r8a4jwksNmcpdJan7pwjj/r2VqLesdPkdU3zAjNfPySPpprUc6jyOR1FTB+8XFWM2fcMgevTFaztfUGkyq6YXkVnI5Kzsmfuv8A8ERL/UfC2qGy1fR/C1mJzgrZ+IfOnI9QCSPwr7DLKs/aNL8zzs4hpZo0P26fAVppvxj8RaXqWBZ3skjDKcpvyVbH1xX0Ps7xV2cKalSTR5B+zv8AtK/Eb4U67B8KtT1dpI7dDb6ZpWm/6LD5MeR59xO7ARqBgcYFYurTpS5ZdDycTFpuR9t/DH4xSeOPDVpeyxpcW5YJGbK0xGx9pXOX+ozmuuCja6YU1KejPS/DHjiTw9feTPJKtuXG9LmJgyA/hyKmfK1Yv2Op8Af8FtP2WNa+I1xY/ErwVp5vLy2uCJEgQl5bdx19Tg4/CvBzvCxr4PmXxI9zKKkKMnCT3Pyw1LQ7jTJZYrqJkeKYxyK4wQw6g+lfAzk7n0vOraFGZcLjH4UkKSVrjrcjbtIxmh3JTsPZSijAzWkNUUldksVuSC7d/WoqTtojdWS0Ox8A/s7/ABw+LPh+98T/AAr+H13r8GnybbyDSpYprqIAAlvswfzmXBHzBCPetqGX4nFQcoK55+IxdGlLlmz7O8H/AAD/AGZf269f0fSvjN8XZfhz8SdJ8NW9tqP9keDZo7TUoLaLHnTwSRQtDcIo2yOuUOwMCRyfoI5Tg6llWlyysvJXa21S16Ppfa6szwnUxWCVqC54777H1p+w7/wTy+HPwY8PeJrTRf2kF8c+HvEOiG0GradZzQy20vz+TJ+7cAhQzLznAYqSFJFe1hMDg8JR/dyv9xwVsfUxM4txtJGp8Irj4hfsaeI7T4bWVzN9mnvFthpes2Qkiu7AuXC2l1j54xuOLeRi6/wEDCnCEadOSaf/AAx01YLGQvL5PzOq/bd/Yl+HX7Y/wo1jwBdaZZTWes6Q9/4LVrfabK7RS0lsMc4YncBxg5wK9OqqSw7gtnsc9Kr7K0Z9Nz4Q/wCCWfhrUPiDfXfwF8feAp4PEfwnupbLQ5NRuVnmn00BE1DTpHKgtH+8W5t8jISQLklTXzuGhOVe0pXa26aGlesow91NJ/muvz39D9OPA/hzRvhb8QrjQRoYA19rO1uUZ8iULYhCSPTCH8MV70oxi2oxOb2c6lJSSehwv7RmgaXpv7T/AIW8C6vpqXlhd6KNP/s2UB1eNMvuGeBsKrj3b2rCdOC1k9dreRvCMo0nJep3H7NX7OWk+CtJ8Q+JfHdn/as2r+MpNVsBeDdIjlmYyFjyWZyWOe7DHaqwkKWFpWirWMq3Piqiv0Ru2nwT8JeLvjR4i/ah+Nzw6jpOg6Sum6Tp1xDmEJHlpCUb5WLPjC8jgc5zV16FOo1VvfTZdPU2q1fYYaNCmrPqVfgX4s8R/tXfEy/8T69JJYeAvCc3k2Ph2C1EVp5q4Kh2DfvXUcsMbV4A61eGdCVBSg3e7TVtPKzvr56fNnk1KTU7NavrfXz0Pk/9sj9mn9s//god+0nraeFfE1t4S+E8cAsH8TXM7QxNao2GiLFkZkLclE4YnkmvNx1OpiJtRm1FrpofQ08VhMHQjTS5pfqfHvx6/wCCdmj/ALLX2zVvhh48f4lf2azRvr1xoX9l6Boblwgae6uH2XEoydsabsttzu+6fFqZOovmparzOmhmtWp7lZcvVWd2/kYHxG/4JzfE/wCEv7I95+0Z8Wl03w1K2swroCa1rqfafElvJwfslrGpIxuVyXYfKOKxq5R9XwjqS3NKOcxr42NKndq2uh84AEAJj614tup9LBdyW1tQTkg9eKynK7LT1NjSdBvNQkC21s7jeqFlUkAnpUxi5PQzqVVE+vv2Jf8Agmx4o/aT1XWdCtLNxLHp0M1pfXERFtAzEcu+MfgMk+lezl+WVa6do3T69EeTjcxp4S0p6p9D9ev2R/2GfhR+yho8OoaRYxXnieXTIrXVdb2bPNVOcKucKufx9TX2ODwVHCRtBavd9z5HGZlWxnut+70RyH7Tvxjt9T1G7gtL6ddOtIzbmS2Vm3dzgKCck8Z9BXfBR6FUIqnC/U+Ev2iv2kNG0TQrmDUtbluIY3Li2GhXjIMf7IZc8d6upVaXKmd9Jyqx5dUfAvxX+OFp+0H4pGieGfDmhrpTSIjXUGkywXKzhvmU+azEDGORisqPNOo72sjoUW2kuh9I/sQeGlXxdCoi3xRERPu6Y2g/lz+tdrqQlCSjvHT8LnVOlGVOz2Z6l+1h8Rm+zTad4h+Md9Z2xUolvpcV5JImONoVGhT8ya+LzaqneMnZPrrdfc/zO7Bpy0ij4W8X3Wmya5O2l6ld3cTMds9+hWVvcgu2PzNfFVVTU3yO67nvwcpRtY5+9O5SSeaqL0LmuVFeyYeaOf0pN6mVPfU2dNufIukmDgFSCCVyBUuLTumaSk+h9DfAX4hTabqVrqHiC8imtZojb3EV9c7pLuNxtMUUC8AEHrX6bwVmdTLMxjWnPRq2r1+SIbnLXY4z9vb9nq88NeGYdS0Ey4sYfPs7lOs9mG3x546rkxkdsV+85xOGPwMatNvSz0+/8dmZYyjTq0VNannn7L/ivxPrmvWGs+HPE9xp80a+VjS/IsBIM4KyXJwRx1yDmvnMPjG6lqcrNaadn0Pka1R06z00P1V/Z/1LVLnwWlr4h1mK5jmh8uSN9UF6xBGDlgOn6Culxp4f95LRLf8Ar+rb7HpU8Q6qTitT8yf2/fhNc/BL9oTU7fC21hrM3nQSBPl80HKkE+vSvrqVdOUX0Z71LEutTu0eNwJA1yqxyMpjx9mlZCpbAy647nPA/pXs04KcWnv0NoSna0keifDnXdI8UWEPg/xNdGMMSsF6Vy1u+ef95cda+M4w4QwnEWAcZK1VfDK3Xt6Hp0kpwaaN7xV4UuNA14+Hp7aUWcJH2SRUz9oU9JOOCW64zx0r+YMxyvMMsxv1PExaaeiWt/NepyVYSUkpKx7B8BdD8YfDyFNf8SeFr2z0q/gTUNNluflS4WGYJIVGemGce5XA5r9e8LI5jlzxFDERahNKUb9GvyPf4dlzVqtBb2Xy6nvEuteD7PWr/wDsjRbOWXU9HWDzmjHmTWwYMrZ7lWwMj1wetftcJRVRRUlzNX83bR/LX8UfRRwVWUISd/dlfyvtqcR8NfBGh/Cnxz4j8c+BmW1n8R6G8Gp2EkY/0eYZYlcDkMCea58NgcLhqsp8tru7sKdCEJOWu9zE+DXj+C90vxB8Qb5Ior+80+IwEkbt8byJuI7HzNx/Wrr1KdS7hombQlGrJKOqT/NX/I9Y8GarPreoXHjXVjFLKmoyW6XAPJ/cxgnPvgflXncl5cvYeJnGko0odtg+JsfhjWrSwk8dafa3/l2jTrJqFvvRZArBJEXu4b7vYGtqWEjUoqNV3a3duv6XJpe0pp8q07GWvjLxRaaH/wAIxe+KJoUutLji2rIUl8ojHzdlLdcAd66q+AoYjBuhK7Tja+z1Vrq2z9OpnSlS+sc0Y+diZ1TUNTeDU7iK9mtUE9rGGDLaPt4wf721mBPbJreMoqKO181FOMNL7/n+ZpWGmeHX83xN4ytbaGwtomYylQwnQZ3Zz1BORjp1q+dRj7j1/I4cRzXUYPU8i+O3gDxP+0T8VtFm0z4863o2nWejMvh/wt4RtHXMqrujEiJ/q4gAMtgfWvjM0niZ1VGlW5N2+7Z8lmmHUqntY3TW77/M539jb/gm1rviz4pXvx3/AGpb2Rms5x9lgnw5Zl4Er7hhm4yBg1y5Lw/W+uvGY1+0n0v+p5H1PnxSqS18u59ueL/HC30UXhrQEjt7SLCQKh2hVHAz6GvuqVCNJXe57FKlHDR5upi6Sp86aW6vYLa20+Jprq9fkQgHliQevoKK9WFON3u+hnUxUYLmbevQ+SvHXxU0z9ov4+y+KpZlXwr4Slc6dBJLhLu5HG8k/eOR1NcmJxCp4dKL9Tw8dmEKj5Y7WPnf9qf4i31ppt1cabpl0LGJikosxeSbF6AAwKMfia+BzKu8RUcr6t6nwlf2ODpQoUtIxSSXZLY/Pz4keJfD3ibWpVtLHV0nLnbJeXkpA56bZOcfjXgezjOty2Z5vK6uIUYI1fhv4av9T1OG0tIN88kojt1I4Zj3+g6/hX12CdPDUJVJacv+R9fhqcqUE2fVXh/w7beCfDFvotpPGzxLuuGMZzI5+8civxLi7PZ5vj5crXLHY9XCwtdsj/tGOVsM21vQ5r4hu5tOw4yq/wAw4Hes73ClJEcxDIR7VjL4jSUbsp6Oga7MbDvW9m4HO175o6vpAkjJU8gVyqTjLU6ZLnVjKtYHtH9++auVps5nenoXAgul+UA+oouoKxpTs9WZupaMY8zQDkdQBVUqrcjDEK+wzS9SZH8qUnI9a2qQ6nLTnJPUvzxpcLkDr3qYyaOxSi46mdc2MkYLJwK39pGSsYKKvcp+c8XU01ZLQU5SSJEm8wfLyKFKw6SV7nTfBvTTqvxP0e3dcqlz5jD2UE/0r7bw0w/1vjfCq3wty+5M/H/pBY/6h4X41p2c+SH/AIFJX/A+xPCsTC2THYV/aMddz/K3MJJ1GdbpIIZT19QK1R8/iHod34PwInY+gxxXVD4T53FuzRu38khiJZsbR8vNXFK5y1JSnJc5yGtZJZmJJ9aU2ejh3rZHHa6Mlua46h9HhbOxx2uISSa4qiPo8K7JHG68hLnsa4Kp9JhWcZr8R+bA7815tTRH0mFlocJ4kg++MHkV5OIVz6jBT2PNPGNkXDblrx60Ve59tltVK1jyHxnpJW5Mo7GvDxtlufpOVVlUhYpeArz+yfGH2GUgRajFhc/89F5H6Zr804zoOphlXivhP1HgbMPY490G9JaHeyx/PyMV+eQkpWufr8kuUUqdhXpnpRflkWrWuUp4MDcRjHrVyV9TCdSzdim6B+M8j2pygkrs5Jqck2fqD/wSc1fW/CfijStQXxH8M7RWkASIThp355GSCc/jX0+W0bVeZNHHmVKvVv0R+hH7efgqHxTPpHj22tklGo6cIrmWMfLvA6g/lX08ZrlseVh4TUeVs+APi34Ga78Uf29BaJEYNLEsDMpeOS5SUrh1zzjcOPpxzmsKkOZ7CqUk7pdTK+Bv7UXjT4U+Nbrw78RdX1PWtdWQIvk3SxmNTyBGWwttEox9xST/AHu1OhW9lFqo7ihhVD32z7x+D/7Rem+J7TTxqA0yWe6TaVtdTluLnHo2AQD7nimp+1leJz1aqTseqfGvwxPrngW21Tw4k0lxpiC6hNxAAWA5ZG7Hj/8AVWMoLmtIlVJKzifG/wAe/wDgmF8If2orf/hM/hjqyeGNevb9bzULMoDBdnHzBeyE/lmvJxuS0MQ+eGmux3YbNKtF8s9Uj4A+M/7E3x5+Evim68O+Jvh9fWsqPcyoZIvkFtETh9w45XB6183WyuvTm9ND3aOPp1Y6M8gFhNAw82MrnkEjqPWuGVNxdmdimmSKmXAI698USjaJvTTZteH/AAb4p8Swm70bwvqlzZJcLFdX9ppU88Vux6bjGpxx261lTw9Wq/dTYsRXpUVyuVmfaP7NX/BO+L463Gn/ABJk17V/A2uRxxy6frfhzSLiLRdRiUBQ8r7leGQYxInyEHnvmvq6GAhUoczcqT7q363X4Hzs8XTpzSsqq3s/+Br9x9m/BX9hz4jabcQt+07Zaf42vLXyn0fx3osJLuI33JFcMr5YEZUknJU4IINejJ127NqS76HJGtBybpNpvdM9d8Nfs7S/CjV4PHH7LbP4deO4afXfh/eKpsdUViBL5L43RScZUBtmc8DcTWPs3zc8L37EQk2406yuu/X797fl06ntWqaPoHj/AEy3XVNKiaB4ln077TH+8tnHWMnqCp4HpXbTmkioxlTmysNFFno5gsIFMun3iXNmc42sOGH0NRVm1HQU4RmeefBf9k/wJ4J+PPiv9oXT/DUFtqOvxRo/lrtWXBcqzjpvXzXQN/c2jtWVCjBS5+xy1bytDoj1A+AbfUvFa+JbuLdKsh8rJ6cEZ9uGI/GuxVLNnbSvGjZMwfEfwisPF37Q8XxO1S1Vk0XTTFZqx/5aNjJ/ICueonKqZySUFE9Be0MsGPLA2MMY4yRz/OtJdxwSRa8Q/DFvHPhe28InV7jT7JGEt1LaNtleTO75W7H36/lXVFNRTi7Na6dzlnWjGpKctX0NnRfBfw9+HnguHwNoelwWmlW6bRaIDh+5Ld3JOSSckknOayjy01Y4IOtKrzLVnB/En4bfDP4t3MVh46udX1HTbJleLQbW6NtYqB0EuwgN9Ce+MVnUjSlJXOtSxFON6as+r6lfxX+z38GPFg0zWbr4T2msx6MyvothqsZk07T5F6TJb8q8g/vbS3uK3l7kLJGdOE5yu5WffqfDf/BQr9iDTfi/4pb9of8AaF/bE1GO1tlNlp1rfeB5lttKhGTssbVBmSQnADHr1LHivJxmDWIaVSfy30/zO3K8Tyxao0tbtPWzdnbr07W0e6uj8u/iN8K9W8EfEDUPDUWl62tsJmk0yTxBpDWV3c2xyUmaEkldw5Ar4/GwhSqtQeh9vhK061JXWvk7ln4f/CXxH49mtotItGAnnVQSP4S20n8DgfiKwpUXOW2g6uIUYvl3P0l/4J8/8EjfEGvWh1j4saJNp+gTXMV3b3k48q6mxghEjOcDr8ze2AetfTZdkk5JSrLlj26s+dxucRpXUHeX5H6g/Df4beBvhB4StvBXw+8OW+mafaoFjhgTGf8AaY9WY9yea+np0qdOKjBWR8pUqzr1HObuzD+PfxHi8D+Bbv7JcqLu4TywQ3MSnq2B7cD3NEm+ZRRVCLnUu9j85v2qNf1XW7ZtM07X7E2LwkiyvZTs388s0cyNn/ewParahbc9WMVPU/MT9r6H4gadqDtpVpbw3TTLFFdaTqdxGYyxwCCZHDfTg15Uqk5VUlqdkabXwifAvwpM10L+UtNNE3+ukGTLNkbnJPXJJFe3QhK/MehSi4Ru9z7a/Zt8N3Ph7RJdYt9OaSRLfEMQZV8x8dMnAp4utGlSaRaXNKx5D+0T4713SruZfGHwA0xvNLKLjVonlMfPDIUkx+NfnOYYiu5tumrHrYaCmtHsfOV/cJLM0kUSxqxyI0GAvsPavn7XZ69JWIJkMkJYdBV3UQm7lOzIE2Pek+5m1Y1k5HSo55K6N4Jcp2Hwx8b2vgfU01GPV4dPdj81xFame6YeiZ4WvWynGLC101Ll76XZlUcUuW1z7A8M6TH+0J8JJ/B2paS8N5DaPN4eg1GQNcXMZGZo39N4GQP7wFfv3CubvGYN0J3Se192jilUaTjumfnh4m+F8fwu+L118P8AxTotxe6dd3XmabbpqJtYsE8lmA4xgZ+la4nCUMBiPe1TPncZh4puUtz9B/2FvG/hHSNNg0xPGfhrT3QLEtgvitriTI9VC8/ia9PD4iFeNoInCOck4bmt/wAFQPgRZfGH4Vr4y0q3inmsI/8Aj5gXJYA53ZPI5717mXtuLptvU9/DR5qPs9nc/NO60rV9Bv49M1WVZroKrxT2nWQNww46P0BBHavpqUrU7NnoYX2ilaWh0fhHTli1O3ubYNl5PKEanAZ+flHtyMk8k/StHUhXmowu29LefkelGs4LU+mf2fdVvvFdjB4JuTDPLt3W73Vup2PgjCsSMZA9q86eWYWvyynFOS6tLT0Z7GGVKtJe0jdHvfwd1G38C6dq3gq00/QbFLpJA2n3cCXqTmRWEsitMC0MmSThSc+tZTyvDxaSVknfTTf/AIJ9H9QpYlQnLm922qbi9Nk7bq3f7iD4b2V3NCz+K47NZrEvBavCekecr0xhW4BAz61306bUk7eR6l6fwRbs9/U2IIfDevyLNqOmvo88RZAzssigDtuXJZW7ZHHtVRjOau7q19/L0vvuvXVJkYiPs/dXvI888b/svwXtnrHiD4Z3kFhLfxETxSs3kzOf4kYfdJ56+tcVWLaahpc4JYhwSutEanwVsr6PQ9U8PeIbC4sbiyvxN9jl4aYkAFge4yCc0QtGNupg5ynJTZ2vwu8K+E/Geur4h+IF+4js7iW08PabBLua6mUZMm08FFyO3Gee1Z15TTXLpfuVXq4iFK1NX7s6+/8AhR+zRp3il/FPibUdVv8AUoLdYFSe9j8m4Y5LOoC4yvr6niodTMKseWCSR5sa2ZuacIpL0H+H9P8A2YNXvJ9A8I6dcSfaZVW5M16u+RuflVgucc8gde/Ss5wzGEOao0kd/t8xjG85RXy/4J2Wt/s4fDG98K3Wh+Mvhzrk2mTspW2tNVl2lQBtGNoAHfHPU81z0sbXlJqNWN/NHDLHV6s17KrC/mv+Cc54h/Yu+Gev6pL4x8L+OvEmi3DxwpqNi8sYjnhTO2MsoBwMnhcdTmuOcYzxKlVin5o4K9bEc3LOKd+qf6Br+mXOhaSfDugXQksLNB5MglJ85j3w2CTn8q+uwtWkkmlqRCEvtR1ONutQlt7tbZYWkmjbDLu5Mh7VtXdGSUrbHNWnJbs8P/bU/aA1Kys0/Z7+Hc80d1qYRtevLdwChBBMfPXAz+JFeDiKt6nM9+h81jcZZvm36Himta/oXwt8DR2lwNX0yyEZMuojTXlUHHLFk+77kggV89j8e4/u0z5upWv7t9T4l/ad+LOi3N5O3g79oy6m3yESWGnXzRK455J2nJ9uBXz1T2TTl7SzPBxU17SUZLU8N8P2Op+I75rjULye4Yn5rieUu+PqeprTAYSWIb956rfqj1MmwcpTVRo+oP2e/hc3h60Xxjqlr/pLJstYccpH/ia4OMs4WBwX1ak/ee59PKCvY9DuZopSWjllUk8xSdq/B6z5pOWup10bmZqViLhd8cYDD0HWuNTs9TWUVNGSt3Nby+XIMfWtGla6ORRcJal6F1ljOWB44rNJt6m7qK2hFpaYvTtH8XJrouuQypvnqG9KAykbeOhrha947eWzKM+nLMN2MHsRWikooxqWkP0fQry91COytkwztgE1CjKrKyOaU/ZrU+ovgv8Ash6V8bfB58Mr4Qaz1SKImO7IP+kE9MN0H0PWvpMFl1OrSs1ZnlzrVI1eZv3Tyf4z/wDBPr49/DLWJhZ+FLi/gjLFGhjO/A65WlXyrFU37qujVYnD1I3TPI77Q9e8PlbfW9KuLVnB2iaMrnHXGa8qpSlD4lY2jOEo6MgdlZPmHBHOawacXcqMkZ2o2YzhR9DWkJXL5ebcqW4aM7Txg1ra6uYSvCdkelfsx6d9u+JD3hXi1smP0LfL/Wv1zwUwarcU1azXwU397aR/Mn0qcy+r8E4bC31q1k/lCLf5tH1f4ch2wKAf0r+rIbH+cuNleTOn0wfMOK2R4lfY7rwiyrEx8vniuqMfcPnsVpNM1dQmDhmkPPQGrSOWTnUndnKa02GO8+tZzPUw3kchrRyW5rmqH0eGWxyGtDls1xTPocM9DjddUFmHOPWuGqj6LCvQ4/XIgzHnn1rzaqPosJLQ4vxDbk7uPpXmVo3R9JhJWsee+KrTIbI4xwRXk1oo+vwFS1jy3xfpZkVzjvXz+PjdH3+VYjlaOI1PTLuOz/tmxY+dp0olUDqQDmvncbgI4/AVKb7H2eAx/wBSzGnNaXaPRrK7ttUsodTtWzHcRCRCPQivw50p0arpy3Tsf0bQrxxOHjUjs0SmNVGc8Vdrs1TZTuFMgKov4it+ZRRSppvUrvaiIZxk1zTlKorGdRqKZ9O/sD/Eb4L/AAl8QW8l34r8TRX9zLgw20Nu2eeAhaN2DehGPwr3cJjcNFpRumGY0/Zwdz9sPhb4x0f9ob9mV9N0nTtaWfS4RPbya8hM8vHPJAzX1WD9+F2fFYivKFe6Pj743eCZmjmsLVmS6NpO6MkfBbB3jB6Zwp/Ou+K599WdkG17yPBfHHhzxLrI1jxJo8cMeoLotrcSSRA7JYgVSQSAfwlsDB45FcOKpwVpGknUqrlPVv2Kfif4ihuDZaNofiy2lumWKQzW5+yRjPcrxgcHADDHfjFVh8U6cbK6vo/M8+pR96/Y/Qv4M67rFlZDw/4nke4glQrLczuCZM91UKOB74NVK8mxxpKx5/4j0Cf4d+NL7RrUv5PmG809lzh4ycso9wea2pRSj7xnOKOv0fxl4L+Ivha48CfFnw3bavpWoWz203nKPMETjDBX6jr2p1KEK0bNGUJVabvFnyD+2j/wRS0DXdGf4gfseX32y0sNGEX/AAis7/6QroxYMrH73Bx+FeJjcmpyg5RWqR7eBzG0v3p+aXjH4V+L/AHiK58LeK9BubG/tZfKmtrmIqwbPTnqPeviMRGVOTi+h9TSqwnT5oanq/7HXwh/aP1/4sWNh8FPF2v6LNcgm4bSr3VIoSQMr5wsrebI7cjvzgc1vl31qVRezk0v68mcWOnheW9S1/M/VD9n34J/tV6vpUKftGa14Ea0hYpFp/iHw3LJdS88yGaWUS59G2gEHoOlfWr61OP7yd/Jnzt8Hd+zTTPqD4ZeBfBfhC2MPgnUraxLIC9ppd032Z27/I2cCtaVOnB3QqlSTS5lqddPpVtK0ax2aRyL8wKDbye446e1XUnpYhNXuMls5HAxndHJuYY7nrWN76m68yW+09XTzgq4bGT681q0mtRap2L8OnItuqxqqbowCVXqfWlbl2MUlfUmWxaKPAjAPRW6cVKhZ3Zp7VPREE1qiRvIo/1snJzVhJM0bexKWqXUg2og3E/3j6U5Nbsz9qlJx6sstq8yW42y7Sv3kDDkmqVVuNjGVOKlqjD1qeeXzJri7WGEj968j44z0Hfr2rJJRk5J79/60/p7m0KalokW9H8K6fOkVyqvcvnKi4OIsdyF9PfBrX3KkbIzlUcLouaz4I8Q63JGk/in7Paoc/ZILVSregOeMD0xXVBRjGxhDEUqbaUdTkf2i/AHiHWvhpPpHg/Sr2W/MTImo6WLZLyAEYJiaVdsbEcbhyO1Y15yhTfJuZU6vLO7+53t8z8rNT/4J+/Gnxx+0BNpV34Y12S41KEss0viNdXvV+UruuZndduOMgYAz+FfJyyupWxFpt6p9n6H2VLMqOGw6ldR9Fpsffv7Hv8AwTM+HPwF0rQtV8aaLp95q2kWxW3jhVmQSMwZpJNxxI+QMcYH619BgctpYaKc9ZHzGLzSpiVaLsvzPqpY44UAUBVA4A4xXptuTPKbuZ0uvWlzdPaaZtnkh/1rhsJF7saG+U05eSN5HyF+1V8W7G88S3Npp+q6a1pAzENNdbVll6MxJ6dMDtgcVrRoprme53YaDqI+Bf2oLbwf4t+1pqvh/SrlWjO06X4tbc+f4SvHU+9efj5U4ux7VOjaPJFHw3q/gbwnpvjOebw1ot3aXkjG38m41J7gITySoLEDA4BHqelY4Ci5S54nRCHsvU91+AfgAXupWem2ULFYGAHOAzY5J9ea9ufLFKbdmr9dPn3LbcrI+gfjLqOneEPh9H4a0nxPoM10se640q83o+cdVcEYP1r5LN8ddtJr5nZRpPc+M/GmofbtVklme6jcMcwtdmWMfQ5r4qrVi46Sd/wPaoQV9Ec1cOGf5f51hBNnf8KJoxut2qKlyFuZqqUuOBitF8Ipo1oDuXb3IqLLqVC9jT0LU7jR7xb208tZlPyyvEH2e4B71dOpKjPmiPlV7s97/Zn+LGsW/jS0/s2ae4vmlVpWUmWY4P35ZPuxqP7o4r77hjOFQxUWrtv5/ec1eCcX0PRf23/2S/D37SngbUvid8MjDNqliPN1K0szwsuCXK7edjHk46HPrX7RCrgs/wAJy396Oh4uJpc8VGrp28z5r/Yo+L/w++GnjK28Ia7pM1z4mLGE6BoeikujbsZklf6dS2AKeCnRwT+rz0kebLlwknFLU/Tax0bUvjB8Krm01SztLOG5siFsFnSV0yOCxGQD7Zr2aMlQxKnzO3bp69z1sNU95Se5+X3x2+A+t+DvifeaIqPJNNhLZZHKKzox29sDOeT3Ar26+NjKPu7HtyjGUvaK+ptfCr4Bp8RPtEt6IYbgzG10y80u5SW2nkjxu3qMPEDnG8gA89wRWOFxDm9jspKVd8qurLW6PrP9l39kTWtN8VHU/HFqkdnZ2vnywD5MqAVVffI5z3yK7q+Mp0qCUHds+iw3JhaafV6I9X8Wap+zRp1k+r6r4ekTU5RHFcNburRqQDkgsASe3v7VhTp5hUa95WPoaMc4lU5Yyjyea1K2g6h+ztrkjvaeHdRt7G7XdPeXjeUiFRxsBXkHHPPaprrHUVfmRvKOYUKTlKUbrpbcisvCn7KnxH8TXPhnT/G2oWurTIoWT7QPs4IyQQBjGfU1TxePpUudxTj1tucOJxWcU0qnJGUVul8Rg2Om/Drw346Pw+b4n6lb39pZi6vknt1a1kh37SwPfqvINZ1qlWXv8qs9kXW9tKDmoafiTnT9J+J/iHUrL4U65Drd1oblESEqjjg4JVdxwQRxk47VHtPZ006lk2Yfu6dJTq+7ffXY4t7j4n6BYanok15pE0+nSi50+3u5jbyPkqBH5mNuWJJBO0ZXn3JV6iaitU39wpe/JNXs9B1/4W+MfjTxBdw6J4Pu71pbyGLTI/tETpEmCHLMrYTafXrknjpXfTr0acLy0SLl7GhSdSpJrXReR6dpPw48M/sh+DbjxHr6xa34nLmVVkl/caeSM7kU8Fh/exXJ7Svmlqd2qSu7dzgpqtnE24tqH4swP2RP2hfjZ+0j4nu75H1FhdazPBoUmoXyxrNFG2GcRhiApAPzY4x1NaYvA5dhMH7XlSit9NTTEf2ZhcsnVq0+WMfLVn0TN8Q/Aeh+LtQ+FGv3+j3WpWjRSatHaRAtCzY2lyB6/ieDXz8MF7SCr0YtJ6+v9JHg0IVsdRWJpOSVtLvoSX/hX4UaZrlpqfjK1luoFlZ9lpahI3VgfmJYkggehA596cq+Z1KDhh7KXmU6+ZTw0oYayfm7s8D+K/iP4O6BfeIr34daZqW+ELqGkyXOpR7YoAnIaERl1Yuwxk8gZ78d+GebxUHiJLlSfMktb9Nf6/DXyMW8fGnGWIauk727+p+f9uuu6h4q1f4r6tpWr3kEtwSQIt4Bzksdq7lB9u1cFabhWlVUna1uXS3rte/zsfI4vERkrHkH7RP7QdnJp949l8ULnw7JG2BYWzPLGpGeWSbcSPUjPXpXzGNrxq1G+blv0PmMXVmtlfzPiPxRr2t+N/FUr3Or2uo73P8Apltp8cO8Zzk7AK4KVGdapFQfMn1JwuHqYmokke4fs5/B+S+mg1zV7Ui2jO+Eyp/rG/vH2r2MZj6WRYByT97ofoGDwywtJXWp79c/8SyIQy2YeDoWhmyp9wR0Nfh2d5jWxmJlUqa3NpwkzPkmDTFoyxU/d3nJr5qpPmZdO9rDo8OQD+FcVTc6LcqKuqaTFdKWRfm+nWrpTadhTgqkTJImsXKOMYPXFdEmjh9nKMrE+h75rstkZ3VMp2jYqjyxqnRSIUy3FYLVnfN3REu7dwPwquSNtTFRbOu+EVib7xjaQf2bJcI8oEixJuOK6ME4RrK5zYmEXC7P2d/ZV8A/Df4V/BrTfGXiGBMXEY8ozjBHsc1+i0aEXSi0j47G1ayqckWd/cf8Kq8Z3K30FrCsxyUfhlIPY+1digrWZyKNaC3PkP8A4KkfsO+FPGP7Pl78QPhl4fiTVdDuGu5IrSPlkP3wMdR3ryczy2OJw7dNao1wOKqU8Sk3ofkVMkisysCMcEEcg18LNJaPc+tcYqN0RNlk2MBwOKSjyoFN2K0sWDkVvGT5GiXLmlqet/si2BfVdX1JxwDDED+bH+Qr+gPAzCNU8bibbuMV8k3+p/FX0tMwUsVluCT+GE5v/t5pL8mfTmiqQg5/Sv6Dpn8M4l6nRaYmSMmumGp41dnceE0lSIvvwFx1rsSSp6nhYiS59DR1QxuzFRgnvTWxzP3p3OV1rcXYYz9aiZ6mGscjrSkAmuWaPosK9jkdaU85PWuKofQYZo5HW0JLZ/SuKpqfQYZo5HWImLMS1edVR9BhpWRx3iCL7wJrzqx9FhZaHB+JLfcGJ6/SvJxCPqsFO1jznxTZ/eHqOteJiYcyZ9vltVKxyGiW0C+IH068H7q5UowPvXBhEoVuV7M+ix1WbwinDeOpL4Dml0mXUvAl1J++0u5JhB7wscjH0NfkHFmXPBZnKSWjP6A8Ps0Wa5PFN6o6ERSy8nOBXykqii9D79QURJIxEvNZOTlqRJ21Z9cf8ElP+CZWv/t3/FuLxT46sLmz+Gfh+6V9f1LBT7e6nIs4W7s38TD7q57kV7+R5RPH1ueatBfifK55mrw1JwpayPnf4GXQ0/xlaE3l9DvbaRp19DayN7edN8qD3rzMLKNGtdn0uY0quId4n7Af8E5f2iNI8Li18O6udNtmlCpIJPiDFq11IuMfMq5Az7Yr6vC4yMpKMfzPnMXl8KDu3+B6X+098Oo/Dvi1PEOi7G06+DS20pQHajA7l9OMnj3r3aMpx2OWFdLRI+OPjD4Om8HQXGt2GVZ9Huo3hjBKuVJcxnHYgZHqPpWteEZQWpT9pJ+6VfhNc+KdQ0Ia1o2ua/q2qWlqrT2ui5W0hVjkMT5ilRztCgc46E1xRowjK9/68iVGpKGq2Ps/9lfX/G1posD/ABJFlYh41McZO+5P+9kk7q6klYhyU07Ht/xJ0JPHHhGLV9EDrfab+8tjLGdxUdVPqCKycn0MVTu7HlEAgvoV1Owv2gh3/vYT/wAu8oPKn0BrfnkluTKnKOjOo8I+KPEGkSQ3OnXgSFCSbhJiSx4xx6UVKnPFRt8yJJSjZFX4z/s8/s5/td6asXxe8OQw6quPs2u2QEc5YcbnC/e59a8rGZbh8VHVa9zow2Lr4TSMtD518N/8EwPHnwG+NWg6v4V+MWoHw4dUe51W6ttansbZ7VRlYJBC4ck9CQynAOOTXj08lrUKjcJtJ9v6t+B21MwjiqMlKPvPbQ+sv2VPhf8ACrwzrV3ceCdE1nxTqTzM9/4g1QXRtw2fuwyXLM5VcYGDt7969hUaKs1G76mcpVuT3tF8j6Lh0rSpVCnQoIpm+80Y2kk9e3X3qZtR6GLu+pbW0kRDBLHJgHCuTytc0m07MEr6jorfcojmcbgTySKEmzWLfQnfS2v7T7CUaME/6z0rZJtco+ZU3zXuXL+50vw3pfkwp5siL3OTW1SpSpQsZQp1MTO70RUu9aSPSotQupFaCQYJIwY29DWLqq1yI00qzhHdfiQ2k1vdSsyyhkQZIB49qj2kVudjhJRLfiLVJbbTorW1jJLABI8clj0qJylKyRy0oQdVzkatsmk+E9KSS8G5yBvcrlmNdjlHD0rs5K3tMXUtDYo3HiDwFq+px6fe28DXcjDy0mhG4nGfzArCFfD1qij1NIUsVRpcyehPq+laNaSjVZp5UcALEpuCqA9sDpXVOEKUbhSqVJvlRnapqkUagC9nZimGQ3ZCqe3I5/HFRSq233No4d3baMfXfA6eN9BfRdL8WXNtOTmVBfM/HcZ7jn9e1aVKUa0ddiZNU/flHU0fhZ8KfDPwqsmg0eECSQDzp95LSn1bPelTowpR0OSvVlW06HXXF/a2URurudUT1Jra3NscsITnLlijn7nxkviTUH8P+HoS5HE8zZCqveqaVKN2dbpRw8bzep5D+018fNJ8F+Gp/h54BuVM7qUvbmFh+KKT3PQnt0qqFCVR88vuM6cJVp8z2Pgn43/FvWLezklk0jVEUghiNEhvVznuFOcV1VJKCsz3qMYQp2Z8EftQ/G7wDeNJpU+maA2qXG5YbabwjdWFxJz1VlIUHvzXjVowcr7nVTlGkrp3ZyPwt0jMUE5mdriQeXBvJJ9S3PPtXoYKFo2RulPdn118CvCGk+G9Ph8V+LtXjskYB4JJ0Yjd6nArLMqyjTak9Tow1PmbbOE/ao13Vm1N/EEGi+HtdsZRtkuIgZMejZVgyH618BmLkpcySkj16UeZW2Pn2a8S4dpYoBErHiNWJC+3PNfOtKUrpWPRpR01KrZeRV71stEaTdi6BiA4HWuaoyofCZrkiYkAda0jsRJdTSsCWQM3pUyNKdrFwA554z1PrTjyy0YSR03hLxTr0US6HYa0mlWBYG7eFMNKPQ7fmc+1dWHr4m/s4PlXUxqSUFfsfWf7L/xqh8EarZaNpiNIsq7JdPcb5JkYfM03ZRjtniv1LhfNI4SrCEW30svzZ5OMl7dWd0cb/wAFAP2HYNN1NP2ovgppd7LpTuJNf0jRtQNtM4xkp5iqSoznnHI4r9mp4fCZtB1Z354p2s7XdtOj2e66rS63POnTniLpaTW11f8AyPQv+CfHx10nUvCNnoeqWdvpNnMpSx0xr5neUA4OQ3zSNnqTwOmDT5KFWiqV7ytaSZpSquPuXfMvItftzfBSHxJdR+JbTT0VF2ExhcZUHoQOn09K9ClRpvDcqdrH0GFqt0FHVu5ofsgv4M8S6hc+HNB+C+lJNa3YTUtSgDRpEEHJdwojkfIONhwB2PBPM6nK5cjafRW3PbowftZJJq2l9Gm/K39eZ9D/ABh8ceFdJ8A6nqOnXAtrS6titjcBMlygJbJHUE4HTvRl+HxM8YlUe2tj2MuwddV4KprKO6/LQ83+Dfws+Efijwxf+LNH8Kalq+ryyK13F9r8to2yfuDGVAznpivZxdbEwcU3GMXs9z6CrisTh68VOpGEH1av+pxPxL/Z1+FnxBtNQttd8R+PdFvbyPdHYafqLSW9yw6GQjA2juSOMdaU1i1Dli48j36fh+R04p4qVO1KacNLu9vw1ueY65+x344/Zn1+z1qDU9avtMvNMe2XU7SP7cs8kmfJXahUpyQNxJAHPPSssM8NNWoN3Ss1J267r5f128fCVVWqPlk79b6WPV/CP7AGtfETT4tZ+LXiWXRHbS4rSO30e8G5LcHLB8dzheQegI5q62MoQTUVeRVfNsNSXu3nPr2PY/A3w+/Zy+B1/Hpfwu8OQW+u3KSQHWonWCWcxrjL/LhskcsQck5OSa8mVPFVm6k0kt7HlcuaYxurXSUNLxt/l/w5m/EL4RfAHx14p03xbdXuppLrmhTWdzZrGktjcRFds+VI2oygllbIZWwy8gYKUMROMlJL3X3szanHM4wlCaTUZXWrT7r18+jWjOl8T+HvhZ8KvhxHpfg/wPsg1a2jt7HbcN9su0CEkyggEKozzk5B5xW+G+s4mu+d3tvpp/wRYR5ljcZzVJ3lFu6S91drPqcv8HvBOr/ERPEvjX40eBoL7wzPfpbaFp+qqYZJoRxJKxUNhfvY45GM4zkPG4ucZxoYeVnZ3aV9ei6FYvHYjDxVDAySqde2/wA/l+h6zow/Z78C6J/wifgSK28J2kVqXjvbGWKRn3BsxpySMfkTj0rzI0s4qPnrLn8tkeFVw+f4mXta69r/AHbOKXmz43/aY/ad8M/Db4pWHwr/AGf7G3E+s6qs/iHWdTfNxqDLHku8rHhQDtC9ATxgCvewtGo0pYh+/ayXRI9PD0qs5qeJfvPRJbJHYeIPjBbfELwGfCmt61JpsGq6VI97qz6iYmsX6IYlCnfk44ODz0NdSwM6MpSPSrwpUsPJ0r81rLQ+Xfifo8nwI+CT+GNR8bHVvFHiqdoX1SG5Z/Os4y3lM+7Hl5BVOBjCZwSTnzquIjgoyhVk9b20vbT5dd+2+ux+f4rEVaLkm7tnznrvizTvDGhnWbnxRHo1zHGUvJ9Cu2ukYDOBLlAVx7p+Jr5vEVYSf8Sx8Hi8VUk0qsbPsn+un5HyN+0b8VLrxlqjJB4n8P6/5r7VlsYWWYA9CQyqVPqMn2r5+tCpXq2TT/M8tUZ1attVc1P2avgBc+JrxNU1iJktkYMwcYMp9Bntmu2tVw+SYTmb1PuMqwMMNTU5bn0tDbW2g240y1gktmjGECKFx7YPUV+PZ7ndfGYiSmevzOcrplG5ncBmRcZ+8q8V8bVquUiprmIrVd7ZbgGuaU7KyCK5S0FUKMisndluSY6NQzbSeBVLbQaTG6pp8E1uzMoBx1FNOSkOULq6MPRMxXxjxgBuuK6+VOJ5/J++0OjkJcEEdetYNJM9BK0dRkYxw3pxnvRLUzcktj0P9nabWj8QrJNDldZGmABjAJ6+h611ZfD9/e5wYxOVJn64eKdF1Txh+xfFHcySLc2Q/esPlYcDnjpX6EpynSjc+IxHOq9mfMfwu+JvxZ+H+qC30rXRqFkGwbe5f5l9q9GGHk4pp6GanPmaZ9TfBn49+HviDZy+HNf05YGlj8q7sZsbZARg/WlGPIrWInB7JHwv/wAFJv8AgllqHgm7vvjf8ALI3ekTu0+oaVAMtCTySoH8q+bzHIfaKValv2PYyvGVL+yrM/Py5SWGZkmQq6sQyMMEHuDXyDdpcr3R78rLYgbBbAHWtbJIlRcme7/sk6S0Phy7viP9fqB5/wB1QP61/U3gtRjS4PlU6zqyf3JI/wA+PpS4tVOPlQT/AIdGC++8v1Pf9HGAFDV+vQWlz+TMRrdnR6WOQT17V001qeLXZ2vhgHy9ytxx8prutaFjw8Q/eRo6mSFb5cDNT0MLXkcrrB3FhgjGeazmelhrLQ5LWR94GuaZ9Bh2lY5PWVXJOK5Jps9/DS0OS1qPBY5riqRZ9BhpbHKaxEBuOK8+qme/hpnIa7ADuGa8+rFn0OGmcXr9soLK1ebVp3PpMJUehwHiOyDFmx0rycTTsfW4Oq0ked+JLd7O9W8txh0fNeTVpezkpH2OBkqtNwlsxfGRTQ9a0P4qwj9xdKLPVABx6An9Pyr5HjjBxxuGjVgfbeHmdvKMbPCN6J3+TPV9L+FF/wCPND8MX3wZN54s1HxALmK90PStNkefTLqGYxmKQgYIZdkgfIGHwelfkU8sxarQhTTlzLp01P3ijnWGq0pTm7Jba7n2v+xZ/wAEKvHfxAv7Txl+1vrR0HSAyyDwrpU4e8uR12yyj5Yge4XLe4r6jAcLTVpYr7jwsbn8qqcaC+Z+qsMXgD9lj4DLoHw58MWejaRpNmLfR9KsowibsYHH8TE8knJJ5NfWxjTowVOmrI+clG6lKTuz+ae0CSAIyAr6MOtfkk07n69VmlNo+i/2M/jVqHhHx1p3hXw7oWi6Tbu4Nze2Xh9ry+m56KeSD9SBXo5XWdOpqr+iuz5/MYOem5+wng/xFoHxZ+GkXg3VLkw3DWwaxXUbpPtWcfeMaklM+lfaUMTCpa2nqeGsPKDUmnb0Pnj4sfDe60+8k8Ka+pjdeLeZlyAV+5knsfu59DXU58+jO26i+Y+SPHvg7Wfgz4v1DxTpWuTozy2EU9hNdPFZmLyyomIQgsSQqhOm4t3HPHO8PQzrzvDlXU+qv2VfiLq+rCyt9W8LapZXUZAe603wsyynPZp7kkAe6itMPVclo7o8+NRwXvRPuv4YeI9INksBV4pXGHFxfCSVvXcBxzXSoyhqg9opvQ4n4weCl8F62/jDSYs6Vf5+3wbDwem7HqKz509/118v6/yOh/vIWe5z9npt/ZzRXdpdQPYyAeVJnAZT6+9aRlbU5pRcDqNL0jRIDDdRXkqsM7dpyrGtE4sh3bPUPhUJSUT7Sq7uTDcqDyfXgilKSitDOybPV5pte0d7aytdLVobgZZ44kEfPryCfoBXDVqcz3saJRcerNeHRmnQTQyQiU8EJHgcf0rhklNtxdzolOMNJIsrplwg3GIlv7ymtIU2lqSqkG7JkV7YRPGNzhXxjIHNFSMbGlOtyu1tAt70woIJOBjBZqmNRpWG6aqPnRg+IYdQikc20ZkUj5FC5zXNVvzanfScHBdznWg123kMJDJYXrskiuoxCx4BBPXntWKm0rPY0lCE1zL4ka/wusbyW5u31wstvZXDKskjf61vfgcD9TSwsZznepsjix9dqKUN2ehQSQTsJIbcMB0baP517y9m1oj56fPHRsr3+iz6ldLNNMiqp+6Rk1nUpubV9jpoYmFOPLa7CHw/o+nzCez0mFZR0lCZb861pUqUXdJIVWvWmtXoS3WnWOpwi21TT4riLcG2TRhgCOh57061OFSNnqYU8RUpSvF2Zkaj8LPBV6HfyZrUucs1vdun9cVzfV6aPTpZni+S2/yMqw8B+HvBuqf2vY+Ob4AH57a4nSRX9umf1rqpRUdEjP6zWre7KK+Wg3VvHNpGxc3KcScLmtXBJamsaairnLeLviRHdXK2wv8AoAc+/pzwOvWqpRvsaQ5IaRRyvxO+PGgeCvCMvh7wreh725T/AEmdAQXyDlVIBwo6bvyq/q0py55Pboc2JpuVW7Pjn4u/FzRrY3WpX6TiTZiS4t43YKATgEqN2OvQd6cpQpy5ranRRjy2fQ+Jf2nv2pvCNpbzpb/EuCIzK223t/El/AykeqiDg/U1zVKsaiutPU9ONGM1dHx/4bg1z4leLX8R6rreoXUJlP2U6hdyTlEz8zBn56VzRp+0qabG1CmubVaH1P8Ast/CabxX4gi1q/hZNOt/lR2UAKi9z9a9NNYSk5s7rKcrI9Y+L/xO8FaRG3hLUNUudImRStvIIRLEy+6/xL645r4rM8ypqo1NnXToux8v+Mf9E1qY2ep2sqS5PmabI6xOP909PpXx+Inao+WV0z1aEFymIpDeg9CKxgdySS0GqhMgPr3q2Zy1ZeQgQE46iueotTVKyM90/ebj68U4NtWE7NFy0cDAA49PSlIyWhogbowTgnFTF2Zu02iS0kaGVWRipH8Q4xWi1dzCSaZ6H8J/ijF4K1OKN7n7PDI/74WsRkubps8IvqSfUgCvosmzSeErpXsn26nPVw3O00r/AKH3H+zR8aYL6zubL4hTae2laiq28mjMwZYkIxtd8/PNzkhelfsOSZ3UhJSnPfZLp6+Zx4ig017O/Muv9dDznxz+xn4d/Zc/aZHxw8FS2kXh3XITLFePGzCD+IooXIDk4HT6kDJr7+lVhjZe2XxdUvz/AFMYU1i6ntJNqS3R7X4lNl8T9AtWitx5UiNJOrdWUITu56nODn6161GnXhKDVuW/vX7We3ne2/S57mXUVUbb3PL4YPiBb+LrD4T6ZZ6iuh3KMLWLRpBZtJdMMgyyGNjIACMgEHBwCDzXTisM1FVqckuWzbeu39f8Bn1NGqlyeylGLi022m/dvr1Vm+j/AAex9I/tDfCyz0v4WeCfhdrfii7tI7a236m2lSLHdTIf9cGdjkKFzk5yM5NfPZTjK9bF4nEw3eive34HPk2Mq5ljMdiKTceb3Yt/D2VvV9Cv+yJ+zJo3w8u9V8f2nxV1jVNHvJkbSovEF1HNdQWwACwu2TkBQE6DA6YrrzTM5U8LDCqkufW9k0rvqvnqTnmY4nCZfSy2UOaovilra/dfnueYftVftX/Cn4X+LE0rWfD2lWrWkzR2WpW+Ukw5wwBC8A45564r0sPhZrDxqzqO7WzPbwcKuGwanUrSfMleL20/yPCvhB+04moeMNV09PHGry6b4l1KWOwLurNDax8+Z1wrlQR07gg+nVOh9ZoKK0na1159j0oYvD12uWKly7XVvyPWPEX7ZHw38XeHPEVz4bNtYX4lRLSRbkJPcQxEY39wQCTt56n1rGlg5wa5ne25y4eKgoJz5kr6dE3vY83l+LDeKrq68XaZrEssmkrMomkk2s0T+XKSMd2KhTj19jVcuF9o52u43Sfk7P8AGy+42VWbo2tZdvQdo/7Tuo6Hp8+kL4r1EajHobNa6lpwUpbMCWY+WVIAZSBkj+E0VKFOtKyuk+qtf8br70zopuhiI3qQT8u5o6J+07p3xQ+KlxrPjwRX+kaKEf7LJCskfl+Tt2DA6ksMjJ+Y13SoQjQcaPuvucqbVB06Pu+a3Lp/b/8AE/xI8Z6n8KvBulG8ht9RjgXTjEIFsohGoKhtpCgHnLA/e9AAPKw2EwkK0nd8yMsLh8HRm7R/eLd9X6m74S+DPwk8Xa48+s/GHXNL1q7Q/wBqXFpqjT28L7gVh8rykUocZLADGB1zkd9fE42lrCmpQXyf9f11OvEYnF0acnSjdaabXX3/AIF3UP8AgmZ4u8e/EWH4maNf+H9f0W0+e2ksZGeQtk7iY2O4ccYy3JryZ8QZdCajVTjPzR8zis5yyDUKt4T7NaffsL4T+EujePNUvIfjR4d0fRdJ8PTTWmjDUlNjcNOhVjPwu8g9PMYMAMhQMcdmJzOMf4Db5rXtr/X4F4iToUva0JOTetk7ra1vJenqeW/tbfsr/s8/EvVYda0z43atBbQQKt5cRBP7PgmwRFC0oHmfNhmUqATsbPA58yeHePg/b+7Lp3a7ny2YYatiJ83s2l/X9fLc+Cf+ChX7N/wd+BPh2201dQvdT166tllsLy11Bv8ASI34XypGkxIM8FQuRxxXhZhgcNh6Kkk+b0Pjcdl2IpVOfdPpofNf7Of7NmreJ9Yh1fXrF41eZt+8lsYzknrj8+teVSUMuoSxM3rYvBYWPOpSR9aW3hCLwrocNhpenq6RR48u3yJFAAOSOpHuOK/MM+zTFY+pKXNePRH0XNa3KZ02uifO9EY9HicFs+/OSDXxGIqS+0XRkrlKcxyIWXAPb2ry23zHQldkVqMtjbx6U+XqyJXuWo8kEdulS9io7jvL2MM/hxUK1zpWqHTDMLA+lbrUibaizGsYQL89zurXmdjhoa1tTZnOwY9vWsmzuqfCQJKHB3H6HFLpoc0Gr6m74D1a60fxHa6hZ3LRSRygq6Oykc9cqc1WHlUVdWFVlFRZ+wf7B/izWfij+zbrfhDxNcC4n+yl4H+b5l2/7XNfpuAtUopSPkMXCn9ZTaPjL45ad4m8HeNTPomqzWpMzI6Rnqyk8fiK9GnVcdEeXXi+d8pq/s7/ALVl/wCJJ20mXwv9nGmzkXWq3s21lAOMlj1rfnVRXZFGM1J3PtX4KfFzQfiDpx0CbVUu4bhdvmhQy5PGDnqK5nK6aiXOqlG73Pmn9sf/AIJR/D74satqt58PJYfDPjGXNxbQvxZagMZwP7pNeRiuHKWOTqU/dn+ZFLPp4OdqvvRPzd+J/wCz58W/gh4pl8JfFHwRe6ZcxOQGkhJikH95HHDA18LmVDF5fNwqxat1PqMFmeEx0U6Utz179mi3Wz8AWwOQ0s00mCOxfA/QV/YHhVg54XgXBqSs5Jyf/bzbX4H+Z3j/AJlHNPE7MKtN3jGSgv8AtyKi/wAUz2PRwGQFTX6TGNkfz9iNGdJpSscY59q6aVro8au0dt4XSQxHaOcZ6V2Tdoo8WuryVi9qbbo3ZF6tg+1Sk7aGVru5yusBstuNTKDO6hZPQ5TWc/MCORWE4o97DI5bV1JJxXHO1rHvYey3OX1eAkk471xTTZ7mHmjltagI3Y/I1yVKaPdwsr2OR1u1kbOBx6YrgqxSPocLNHH69psmCWGa8qtsfR4OtE4nxJpgAZ1T65rzKtFydz6bCV02kef+ItL8x2UR5J7ivKxVP3T7HLquq1PRf2Wf2XdT/ansde8F6i0tp4e0aGK51bVgP9U7SBYoI/WWRvlA7AMx4U14sqVKtSlTqq6uepzyo5nTq0pJSafzP2C/Y3+D3w4+Bnge08KfD7wfZ6cscSCeaOIebK+Bl3fqzHuTXHVhhsPFxpwSWy8j9Ty11XSSmz6p8FwidAzHCgZZie1eVOTkz20rR1PJ/i18QbT4rfEVfD2nz50Dw4+biXOFmn9PfFZ0Y+0qp9ATTgz+fG2lKxjBr8lbXNqfq1RJ1WbuieMfGPh60lsfDPiy/wBMjuGBnNjLsLfiOaiNarS0hKyK9jSXvPc+xv2CP2p/BHwGvILO712J9U1N1WZbW3l1bWL9s8LuPyQr7DHvXsYDGWqWTv6as8TMFJ/1ofpNq+naH8evBcerw2xs9WNtvS2uHQzKuOjhc4Pt2r66i3VSb0Z4LqyjKyPln4+/s96h4w0m80PUrCKbVo7YxW1tLDj7XF18ssf4lIDKfXgd6qtytd2U2+W7Pmr4Ya1qvwh+JFxoHjKe2uoFlc2b+ItVvxAi9NpWGUEsp4C4wcDgjNYU6Xs3+JgqbrPVH6F/stfG1bjTbWPUdZktIXIEcUdlHZW7n/pn5jmab8FzXoSnCdNckvkVU5aEWnHY+sF0+18c+GZLC9V5UuIvl8+PGOO2Rn86zjTTfvGUa9ppo8J1vwbdfC7xBJY63cynS5HLWvPywt/gTWrcNkVOr7TU2dG0HWbGRUu50c3EfmxRKSUVex56nFTGLuT6nu/wk+GHiK5sINVv9XS0t5FHlwFV8w/jg4rOpWhB2vuYSVRq8I3a7s9gtPCGnRwwxXlxPdJC26PzyCVPqK5XFNam6xM4RtFWNIRWkcYSGAn6dazl7OK0RzKU3K7ZITGq8KcY7mkqqsaat3MHVVt/tBe3lCMP4S+M1z1ZRvc9CLkoLmRTv762kt0mkVo5EPysVOGH9KTqRUbjw6lN3js/k/x1NR7thoq6pZWnnNEMmPb1HfrV1Jc1LngrmagvrDpzdrnB678XdMug2nXVgXiS4LPE8GCo3f415v1lvWS0R6FHBwpt8rd+56BpDRatYxXrWhhg2ho42OCcjvXs0JxrJStZHi14ewm43ux2pa/NZYttO05rmUjhFwAB7mlVxjhPkhHmZlHDSq+9J2RnreeNLu5Ed5CqRHlorUHeo92z1rkq1MbOVmrLyOylQwVOHMnd93saR0CyihNwLW9dyMmP7W+Sf++v611Qo0ow2f4nLVqOcrXX3EOs69H4W0ZtTvdHvFRB8sa5kbPvtJx9amtiXTp6JhGlCc+VSR5f47/aBntITDcaWlujjMfnRHcy+27FTCdSSUprc7YUFS2PPbr4va5rdz51i10YD952j8qNPXDV30aiUr9DWNJX0L0nxR+HWk6RKPEonubxYi6b7sxBPVl4yR05xzWs5SnJJGFZTlax4J8QfjXFd661ro+q/MpZYoZJCvynHznIGR/9evQw9OKld7l0Vd6nH+KvinNotm+tHxfbYK5uJ5iWP0bA3KvvjFb1ZqLNZRhfU+Tv2pPjxc3Wh3eq+F/iobG7Ct9nk06Rbm3Ydcs4Vin4rivJr1Iyg+WWvp0MOW+x8GX/AIi+MHxm8T3dr488YJe6Zat5jzwW6AMAeSXQAN2xwOtckPaS0vod9CE6kUe8fs7fAfUvF93HJDYvFYIFad3TA8teQn1PU13UJxwv7ySul0fU9VUmoWifSHiHxPoHwk8GRw+DzC8CJtlbZ0bHKuO31rwswzeMrm9LC2kpI+ePiL400rxQZL2zv5o2ZyZNMu18xFP96N+30r4XHVKda7ue5TpRjG5w905YEgfSvMhE66cdCGAMXGelbXshNk0iEMABxU3ZKWpO2RBtHSpaudE/gKOCW49apWRhFlm1LFgR3wM1L2NOW5qKP3RI/IVnezNktBkT4Q5OfYVqjCauSRXcsNyJopWR16SKcEfjW1OTpvmTsTFu1j2D9nLxB4t17xPbWmlXkdtBb4E2p3rqsduvXjPC/RRkmvqMkxeMr11GDt5s58RVhShZJtn6IfDa68M/Fb4fTfC/UNQTVYWUNa3bg/LNjhlzyBniv3bIsSnTjO+255cp1HNVLWaMrwp4LvNIuLnS9VW4e7VjHKz3G4EKGwuCflHr+HoK+9VWnGhHl2PYy6m/bOpBb7726/Lrv169Do/hX8E/GE3iPS/FOhadqU2pWloPs2rS28Sae0hGGmHzBmI7dR83T04MRj8LDDSo4mon/Nb4n8lornvYnMMtw0JwxE1brFX5vTY9D+KnwV+BniDxRY3/AMT9c1fXNaRCPKstRMflErh8KCCQehx2PNeVgM2zeOHlDB0404d2iMlzziNYOcMBShSo93G99dLswte0zwCIJ/CfguS9jv2AaC3v7Y+XGoHyoJV4ByO+SM+mK9OisdGKrV7OPk9fPQ9qNfNElWxKTgt3F6vzs/0Pgn9tb9mv9pX40/FSDwe/wC1g6kzD7JrWm2hktbleeXdcqSMjLHb+GK65YzAvDe7VSj5uzXyMMRisLjUlSqKKXVu33o1fDH/BIf8Aas0nRLLxj488d+GPB0qQyRateXtxsZEPyhoooVCg7MHBPJPOKxee4C6hSbnLlS91K1193zerbu3cwlmODeJUcLUdSb3UI6fojzz4ufsyfss/DXw3e6La/Ebxf4il0a/Zn1QSrbwPdTCMSNGB85BCIBnjcv1rooUlJc9T3ZyW3l/TO9YCdCCqVE4zl57evQ5T9nmX4o2HxJk+H+q+E9Xu/DN/YmDTtTTRpCJFIYjzHVOWGc5OSRxngY87BYKthcXUjOTcJa6vb79vRaddyY1505clV7bHqfww/Ze+OPxD+Kj+AvBmkaxOZPDK3WoWcFt9mLOrusQkMgHGAQM9Qc4r0MVXwuBgqtaraL210uarHYXDUlXryUY6pN7dDu9X/wCCZP7ZfjG4NtpXgVPC9tbWqQQPO8O66ZiAxk2HkKCTuOTkAdOh/b2VTo2dZaLSy/P+n2OWtnuScrSxKv5K52/wc/4If+Jvh1JqdtqnxfkSx1pd2q2sknmSyTlOTHIoQrkjPfsOeteFRz3BYWTdO7bPMhxRkeErudJTlzW3f39upwHjP9iX9l79m3WNR0r40ftJeO0t2IuLxV8qEwgZ2qk7DfkntGRkY3Doa+hhicRLBOsuXlls5PXT8v1PVnWzLMsE6lBJU20029dPPdLXbr8j3r/gn78UPB/xLN/D+zpY3tt4T01Atxr+raz5tzdsDgnr8vA9s9uK8vOMPSpUKdSu1OU1olZ/et187d9jhzONH6opYv35bJW0ubP/AAUS/bF/ZN+FvwzfRPijBp3iG8e0eKDRZjme5l7YYNuBzxn3rz8BgcVQvVqy5IP+tjy8syzFYPmr15ckW9En+Fj4F8F/BzwB+0hZar4h/Z/8Z6pDqM1mG1PwB4g1eSSFDtJiEEiYDbSc7eG4xk4xXvQw8a0nNSdmreX3G0518Vo3aKe9tTzb4o/Az4s/DXwtceDvF/g208Ya9r7lHbWtPMy6AVIw9qpz5Y27huPBxzXHmGDlhKPNF86b69DyMzwFOtP2qvZdupgT+CxpHhGez014YioYXckIESzN/EybQAFBAx27CvybivFyqU3ThpffsedSpKmko3su5y8eo6rYWkdhdTSPGgDQGfPmxH1VuuPxr8prValNcrPRjGLjoiHVddnv18m7CyEEESyRgy/TfjJH1rza2Jq1VZmfsUqnMiorGUjHA9a5LXZ0qSiOiG1uap7Ca5tSe1JaTjFYSZMVqTSrzkj8MVKudUVYdIhaBjnt1rZSJmrxMmxX/iZEf7XWtvsnDBctQ09QQjnPb0rByTO+VmjMEsqtyPqM1poonFKNpXLWn6hLa3KTwSbXRsq4HSnGXLNSQKMZKx+j3/BIv49apD4iPhXxFNdzQXKeUs1wFC4Ix0Br7DKsd7yi7niZtQhCnzI6H9vH4TXOieNb1Le3CxzObi2lC8Z619RRi1ufP+2hJXR8d+IfCt/fa/bXVlI4SCbzL2wjOBJIOhIyPlJ61c78yOarKpLY+j/2V/inqmhTRHXtBuDqstwFg0/ToyY4kHd+OPqfwrZVIQV2jklGUlZn2t4ts7n4tfCZdbs4PI1fSo/MhY/eKjquauniXCfuo5q+E9rBxZ41r19oHxA8OHw78TNAttUtihVXuIlZ4T0yp6jmvfpYbDYyKVeKafc+Ixrx2DUnh5uMl2PnbxB+yPa6Zqclt8MNUtY8OTDY3cixKwJJAVzwOvfFfteUY7CYXLoQjG0YpJW1SSP4S4pynNXxHXpVXzylKT973Xq77vR/gc/c+GfFHgfXG8M+NNAudMvkUN9nuo8b0PR0PR1PZlJBr38Li8Pi4c1KSa8j8+zrK8bltTkxEHF+ZvaNF+8BFejSTufI4hnofhTSZ1tv7QSQBQMEHvXRWmo+4ctGhVmnWjsnYs61axLG7w9C+QCOlKk5O1zlxNKMZ3hscjqtqZJGVvw4rWpojaleNjmNV0u6Zz5cJb3ArinJN2PXoVYpWuVPBfgGbx74/wBK8Fhmi/tC/ihlkC58tGYBnx7Lk/hXl5liFgMJOu1flTaXc+nyfCzzHHUcNB61JKN97Xdr/Iyf2gPhWPhf8R9a8LaVLNdabZanNb2GoSR4FxGp4bPTO0gnHrWGW4uOY4CnXkrSkk2u1z6LH4CeVZpWwjfMqcnFStZSSej+aPKtV06cyEFCc8jiitbY68NUVtDJTwhqmvSyW+nWTyukRkZUTJCgZJrzqkU3Y9SGK9lYk+Ff7L/xj/aF8YjwJ8KPA9zqd9jdOxAjhto+8ksr4WNR6k142Z1aGX0+evLl/U+xyHA47OKqp4OPM326er2XzPZdc/4I+23hWxutM8efHPRNX8SvYO1l4c8NXqwxC4wNqyX1wvl9TyFU9MZGc15NLMqVeCqexlyd3/lufWV8pxOXVFS9vT9r/LdvT10V/vPkL4ufsK/tR/CaWSLxr8BvEdsgDFbmGyNzA47MssW5GHuDXkYnFUqzagz3qdSrhKlqiaS8nY+lv2YvA8/wM+GvhP4Xyad5V7eTf234oHlHe93KMQxtxyI4sADsXf1ryq8/ZJRtruz6rh+k8xx312/urSKt/W592/BrRZdRu4YYk8tnAkMMgwQD3IPT8a8vFUoykp3tfXT7tf8AJ+T7H7DhIcsOaRq/H79oCx8HaRL8NfAWopJqEq+XqF7GwK26nrz615lSTnpHY6k3XduiPnXV/if4X0LSjoM/ixoIsMzogGZZD1YsWHJrpockIWR0K8absj8fbUgxqSeor8akrzP1Gp/FZpWyKwHH1qGtCE2dP8Ote8Q+HdejXw34wt/D7TNifVJkOY078qC34DrTw1SdKr7rtc4cXS9pFPdn3h+yT+1d4K+E5s9B8N+JL/xFqepMAsl3ITd6pJ3fYTttrdeTvbGfrX0+ExatpO7/ABPMrYSUEnJWPuLPg/4s6JHNrF5bReIprVZYoYJ/mTHKtwPl574r36U4Tkjiqr2afstW+/f9PuPlz9r/APY48QeInjvtO0iBrtVa5nuokx58iHckqYGA4+bcO/BHOaK04yTj1Hz14cqgly2d+9+lvxvsedfsh+K/FHhTxdf3Pj/VjZ39hdmOXULo+deSJgYESn7i9Rxgmpwn7id2ziqN1Z33ufpP+z18SrvxxaxixxFbAANJNP5k8h7ByThSeuxckd66pVOew/YqlC7PSPih4I0jxXocmn3KxyzeX+8AXOyko3Zk530R4t4Y0PxCdYu/CGoM6x25SSG7RiGVFPAx35PT3raTUV6HTStHVn1J8IfD3igQRXd7p1xcQCMAS3swQ/VUHSvOlySdwnUjBtbHp8cUMEQx1785rVcqieZVqSk9CHfulxHZtjuw4rCVJylZIiE2viYy8+yWsYaZmT3BNY1YRoxPQoynUdlqcz4iGk6ihM7ucHCyRygEn8xXnSrQbPVoxqxVkvwM3QfCfim9uStnrUclju/eC9G7A9gDz1ojh8RXfubBiMVh6EFzr3vI7nRtLOkWpgt7nzh/dYbQPYV6dDDTw0bbnhV8ZHEyvazK0ujaTPd/a73w1AZP+erxq1P6vSqSvKBo69WNO0Zlq5Z3hMNpEGJ4HOAPr7VvUpe5ywRxQnLnvIZa2dzp0eY4lllfmRy+PwHtWNOhKlra7OqVWNXS9kS3uqXOmwrJHpM9xlgH8gBtvvjNbzlKEb8tzJQVaXKpJepYW/t0txdXr/ZwennMAaUq9OC97T1M/Yz57LUR9T08R7mu1ZGHDZyMU+elON76FKhUlK1jwH43eK/Dll4zlj8P6Vb38S2zS37SYJVsgYUtz36DvXLBR9o0tj1aVOooKM9znH+Eo+ItmPEHw+1kG7WPcdJvXO0cfw9q9SFGHs9zVVPY+7P7zyb4ht4p8GWF1beMtISHUIFby4XjEQf05Y/N09qqK5OplN9U9z5NuPG/iY+IJ9UfU1vnklZxDeWyRMCTjy+m51xjBJwM+9d9OTSsOmlBtnnnx0/aB0mysyuv3raNeEFYoJHMaMcdNzjaPTBGK561WKvcOZXuz4O+JVze/Er4mL/wjpFreGbfJd6ZI1uUTPDSCM7HJ55B59K8qVBVXodeGoOT5kz3v9mD9lPUvErC+1iKW30YRB2MikG5Ktk59QSAcV1Qn9WptHs06ShDQ9+8YeNNB+GejDTvD1kIoLaIGSGIbXYD+Iep9a+fx2ZKW5cbx1SPnf4m/Fs+KNXGueGNSa2lf5ZQnKXC+jr0zXzWOxEZR5ou56GFXM7rQ4uW7aeQyuACxyyqMAfQV4Mvfk2etFOW4p3SDA/PFaJWRt5CW4Mb5I69vSpkZO1yaQncO/HWs73dgWjFaTMJyB0zQ3Y2lrEz0lYydO/FHNoZRi0zQtVJOT69qhybNuaNzSBJhC47dal7lKQwAqMgdferUiZJESoS+H7/AJVs/eWhg3Z6HZfCTwz8QPHviq38P+DpFjSH57m9up1htLFO8skjfKoHqefTJr2MnwuZYrEpYd2t1btbzOWvyw3Wp98/ss+J/BHwwji0vwPqsniK7YqL3xTOjCGZ+628TclAf425PoBX7pw7haUKfIpcz6silRlUV5Hs3hzwx8cvGfxy1GD4XeEliSaBHl1i4QiFd4wwHTtnI75r7zEYjKsHl0XjJ3S6LfQ9hYrKcuwvtMZOy7LdnsOg/slfFfR7CKDx/wDF86iFsAkYicwmGfPLqFwPu5Xp36V4P+teV1p/7Ph7a9r3Rwx4vySU+bC4azvu1e6+ep4l8aP2CPHuveLD4r8LftGXui3VsCwhe2Vomc9ywXLc4z6j0r6ClxHQrU0pUW42eisvT8en5bn11Di9V6UVGm4ra0ba+qZ5D8Qvg5+2P8Nkn1Xxd4eh8a6asW6TUfBN0RdbfVomwQcc5GemK9rLMwy6vQnzz5JRXuxkvid0rdtrv5WNqfENG75k1fSzVv6+RheGP2+dc+F+heV4c+Nt5GLcmG78O69ZeVd257HLE5IPGABmjEZVlmLrL21FXet0ehWw2Q5lBSrYdNrr1+djz342/td+O/jx4XdvC/xRtrrXoVdZrC4ukMN0mSURSMbjk+gNaU8FhsNeFGKUfLcbp4fD0fZZZDld3p8u+579+xR+wDqGmeF4/wBpD9oXwlp2veONUdbxrGeFUgs4+qhYV+QEDPRcV8/iMwoRrfV4ya6X3/E+YxGZQoWwk6jUtnLfXtdnulp+0teaPNdeFtN02ySSF/NjNlaKRhRloFBGQ2Bgj8qipkFGo1VnJ7W1f4nHV4bpYicaknJp6av8f1Om8LfG7RbCabxPpdxFJrdzYtPfMLVUVolB2qHHOVJxg881wYrI5V4qlJfu07LVvXroc2IyGpUhHD1F+6T93Vt36trz8jybXP2u/H3ijxjqei2+qzrEXt4bdlGVd3JLomOpAHJ7bvavYw+TZfhYql7LWKu3+n9d0ev/AGHlWDpR5aabX9I9E8BfGLSPHXi6x8Eya0L9tOKfaro4H+kZ+4M9cHg/WvMxWA+r4edbl5W9l5HnYjLo4bCVa6jyt9PI+ef+CgPwe/YN+Jnxk0hv2qvGfis3FwCE8MeH7orBcuvBMwXHQcA+hrKnTzDE4OMaUI22Te9jvwWKzZ5XChRUVFd29fl1PHP2jPHf7Sun/D6P4Df8E2v2S7bwN4TaMJJ4k12SGJ5APuy7clye43DjrXp4XKMdCmpuoue33DeEzHEWhOalLdX+Fei73W+583eB/wDgjv8Ata+P9ZT4m/Hr4qpq2qzXSvFcSXBcKCecevsOlVTyqrF2xFbmZyvLMTRrc1etzWPpz4c/8E+viX8M4biy03xLPHJaSRXLQG7MFvG6kjzZNo/eSAFsA8DOPWvUws6FBpRno/xPVjXw0MPyc2j6Lqz0e21Szk0rUtR+Olql7pCQpbT6kloDM5Hyg7j2HJPbFdNeEZUnGn6s4amHjy8tHffc+OvjF4b8Alpofh7K8Phe3vH+z35s/Il1hgxKQwRj/lmgIXI4PWvxHjHkqT5oR5Ka6d2u3kfOewrxk5VHdtv5a/pt5niHjJ5b/UTbR2eLiNP+PeEcW6DszdzjtX47j8R7So0kXCeljm3RW5I44wfQ15L1Oq11cFIQZI78jNK1jmk9RytuGc/Sm9jWnqiezJzz2PQ1zyV2Nx5WXJAQc4PQZqVozSD0BFBgYNT1uVN6GTaJ/wATIgD+Ku2K904Y6zNi8VWTBGTgda5JJpnXZ2Ma6QRnIUdeK2itNTKduUrxTES4U556VTdonLC6mepfsveNvEfg34taZq+j62LVY51MrSzlExnvXdltWUa3NfY4syp+0p2sfrp8UNI8OftK/Ay18SaBqlte6jZWg89rZw2Tiv0ChifawXLqfHulOjKzR+dnxr8Pa94T1warp0r29yshhm3Icbs/xY7Hn869BWcbvcHSk3sdp8APi/8AErSERtflSK0f5GgaI+bdgHorBevsSOO9VBOWxnKMOdI+/wD9l74gWfi7SktrbTHtkZdsiSDOcjoatxUNR1aaSucJ8Q/AGp+GPG+paVMUWETGS3x3Rua9XL605Rsz5jOaNOU+aGzOQ1XwvaXCSQlC0ipuj74PcV9vk2YVKD9nfRn4H4h8NYTH0fbuCc11sZWpeArb4leFpPCWoEfaYkZ9IunOTazYyACeitjaw6EHPUCvqcNi3h8Qqq+fmfhOZZBHMsG8O1qvh8n29GeN+EEuLi4NrexGOaGUxzxnqrg4I/A1+j0ZxcVJa3P57zPDSwlWUXuj1nwP4W1jWZvJsInkjQZYKOlTi8VRoQvLRnFlOBxmZYjkoptLV2N7V/h9eXELtaIXCjLY7GualmNKHxaHr43h3EVE5UdbHOap8EviLLYtrlr4K1Ga2BH72O0Yg/jil/beWSqezdaPN2ujKjwtxR7D2qwdRx7qLt+R9HfsffBa3+HngfWNW+Lfww0i8m1WHbYRamgMypggg5B2A9c9a/IeOuJKdbHU4YOvKKhvyvRv5H9beB/hZXweTYnFZ9l9Ocq1uRVFeSVvPb8y/on7Onwjl8Y2PjnwVpkWk6tpcLQ3WlSgMLpSTh427kDA9eK+fnxfj8ZhZYbES5oyd0+3qff4Hwf4fyvM6WZYKl7OVJNShunfqvNB4D+Fvwf8RXN/4S+L/gqDU7eS5na0g1GP7krIqByRzjH5EA1zZhnWYYSEZ4Wo4uyvy9kfTZbwNkOZUpUcww6qLmlJKS2bSV9Pl9xwnh39hD4M+A7TUL/xz8OE8Wa7e3brpVnbF0tLODayoWwfmbkH3wM104zjXMsfOPsansqcUuZ6czfU+QyXwYyPIqVT69B4itNvkXNJQiumzu2dP8E/gN8EP2RNA1zVbnwFpuveLbuyka6WaESW1jBgKsPzZySSAfWuXMM8x2czj7zjTjbbRt93Y+l4Y4CyXhSFSUqUZ15J/F7yiv5dfxOT8U+PfGvxc8PeJNVv/h0lsukLEnhyy0CxEEUqsuAXRMb9pJxnp+lelhKOFw1WnD2l+bWTk7/mcWKWY4yhXl7Br2elNQjZfcrXPC/GPwy/aL8daleD/hE723gtbGNYYbqwKLM7EfKrDjOCOuOlfZ08fk2Ew7vVi0+lz8izXIeOc2xUn9XlGMYq3u2u+yfcT4QeGP2+fgv460Tw74U1i/s7K/V7fUNO1y2820hJJwDyflK7TnjBJ4718rj8VkeMc5WW+jW7/LU+w4a4f4yyuthlTlN8ytUjNe6ndqy1d1y2d7J3uraXfuf7EX7MVp4a+LniX44ftCXVpcf8I/PLPcMUDQLLztC54OOcfhXx+ZYuUr8h++5Jkby+leSvbf1PP/2wP+CmPjnxH4o8S+A/hRoel6Rp+oW8ds+sG0U3NrbKT8u7HDvnOOwxXiUVOc9WfQSjKSXZnzhpXxD1bwpZfa7r7POJ/mlvL+TPmE/3uOK9GmlTNvZ8sPdOC+N/xNs7rTZJLnw9FMskR3T6feLtx7jPOK2U09UHJPlsfntaOSiivyN/Gz9SqfxWa9iCVAbr2FY1JWRMbcxfiOTgdRWNubc1tGOp1fw5+IfiL4d30s/g42NpfXpVJNUu4t/kqD97H8WOoXpnHpXXgsRPCNqOzOLFU1WScdz60/Zt/ao0f4ZtDeN4gv8AVJdQmHnS3c6i+164B5Z2Jxa2qenU9OSePco49Q0i9X07v9DkqYFJ3l95+gfwi+OWhfEi0k0fxXHaTXDWKy6jabP3dhE33QcjKsewPOBnAr2KE3OXvv5djx8RG7tE8u/aY/Ye0XxskXxI+FU6ie2kD27+UXZSMnEikYdPrXrOFOtT8zmjFU3aSepx/wAA/Fnxm8GaxD4J8T+PLqxvIrh8lYAqxREgYt0B2Bm7ttz06UqceV2b2M6s+eKitj7q+EXjfRb7SYvDVreMJVQPetNLvfkZ+dj1Y56Vs9tDgVlLUt+N/h7NLfWuoaHaxzXxn3BGPXJyM+wrlnTcVe7On23Q9p+FWmeIl0xTei5Z14lubqUqmfREHUe5/WuaNJzlzMydWKW9zt0tFQ73kd29SeB+FdMaME7nJOrrogMqxgmV8AdSTWzkkYtOTuMTWtOJ8szh+3AyK46uIw70ep2UaNZq6INS0fwtMEfUtFgJmYKu6POTXFKhhE7yjudcMRio6Rk9A07w3oOiyu+l6etuX+8sLEKfw6Zr0KNClS1grHJicdWrx5Zu9i2qsvANdd09TjgluR3V3FBFvkk7gDJ7k1x1cRCGiZ0UsPVquyJfLdSFLflXVF3iZ2s2mKsiscoxcE4GKx9rC+hWttEJNeXEU3k21pvYdWaQAVlOrK9kjalSVuabscF8YdK8TaOy+MrKym1C1Tm8s4pCzx9MFV/iGQM15WKw96ntHqezgMRRqfuXp2fczLfU/H/xG0uK08PWJ0bTVCiW6uzhpVxyR3/Tn2xXfh6blSd9NunQK0cPh6nNe7LP/CpfChsZ0W0uNav7iPaZ4EWNFPqGACjH41s03U5tPkkvy0MoV2neWi8yLQfgn4p8NxRapa3saTw8rbJMx+XrgMQOfwrthKCerMquJoTnZakmvad4C+M+kSeCvil4bVrgMYzM6hJIT2YE4I5+vWonCXMnAwqU5ppweh8B/tyf8E7vHPwdvLr4m/Dj7X4jsnwyqtwUAVc7UmIV9oGeGUdeoNdlOrzR952ZrTat7zPza/aY8E+NPGvi5NCv9Oa4luxiHQLqJZZCV6STzkABR/dABPA5qfY1a75b62vrpt6/09kddCg6jWh2/wCzb+yJ4Y+Gmgrr/jKNfOkk33KmHaC5yAMEcICeO1cs6tPDxtLc+jwuHVNWR6F42+Ndj4VgntdEj8uG2l+z28IARd5AO0noCB0PevExuYQTdmayjZ2XU+fviR8Wr7W0Ux3odo2YpOPveafvKQeQuPXjjivj8bjE6Titzoo4eUZ36Hl1rcb9RaZQB5jEsF6Zrzowfsk7nTSpfvTbhYt1NZqKR6raii/bJ8mWHQc0pOxKbbFaMK+NtTqy5R0uI4JwMdqlJGcbXJfLVYCCOcd6iSdzZ7GcExMcLxn8qtLuZ8xftHweRS5bCSRfjYlAp7dDWT3NYu7JJVwORjt9aVrhNWRXGS3zfke1dENEZx5WyawjK3kb7EkCyBvKldvLYg8ZAPP411YatXo1E4v8TRuKV2j6e/Zh+It2dVQa/JqlsFKbJ4IVMUpyP3Y6eWuOpx+PcfsnBGZOGJjTnF66X6a9vT+r7HLPmm0oOzv0/rrt/kfrF+yvc654X+Ecvxb8ZyNaWRgK6Xp/mBsqCQHJxk7uw5xX2fE0sLisyhl2FXNLTml+nyPmuIIQx2Y08voK705meYfEj9p/XtV1O7v4tTeNVlASLldwxuO3PXHA98kdjj6nL+H8FhacYcuttz7LBZfgcDQjSVO9upufDT4x2HxBQWc1wUuo3wZSQDjGRuHoeOma5sfl/wBVfNTV4hUw1NOUqW3b/Il13xDLoGrNLZhY2LbX56k5+QkdVPY0UKUK0LT1NY0lVpqM9V0OM+JX7Ov7Pv7QEsOt+LfBNg1zLEVW+jhCOWGMxS8d+zda2oY3G4OfKveS6Pt5HXgsbi8A3FLmS6P80YXhL9ir9kr4cPBq1t8HIZNQspjLDMEDgsTgvtAx5nT3rrq5lj60bRklD0Oz+1cyqz/dSjFPys1/Xc7jxvd+Ko9OvNV8P3M9jqphIt7Yltk8AyAUz3GRxWGGeHdRU5pSj1fZnnwdGTUJRU4LVvqpeZ498MrD4ifEzRr240Lw/NP4i0HUm8y5gh/fXCbiFdl65I/nXqZlicLgK37ydoPa+x7DxeCo071Z8sXor6I95+FX7GXxRmudWvfEzWlla3mlvHZQsdx82QfMzLjjoK+RxnF+WxUI07ys9bHyObcZZTRcFRbk09bdkcde/wDBPnxt8O9EXxNdePbCXWoYbiOysGciNpZWAR9x6Yz6dz1rb/W/CY3EuNKlKzWrNaXGWW4zEclOEuXe7PA9S+IXhH9gTw9deJvib4+0TU9ftLOaLQtMtbhQZHLNI91M5+9IWzgckKFXqa68RW+tUXbmServfotlf9N35np4jGxzGl7Ne7Hdt6Xstv63PgDTvif42/bQ/attvE99qt9DKbnbZyyW3ysGYmSb5zzg4VQBjn250ymVfGYuLStCK6iw1eGM5Iw05NPVd3r6dP8Ag/pJ4E+F0Xhu9tvh+qNJNcsDcSS3JmkmUKMvK2BlmxjaOAK+vqYqHsue+yPclP2NL2yb0PSviV4usvhstvZQWcL3YC22lQRyAmSRmCmTB4wCQBXjYelPFwnU1stX6HlqTxMHO+j3/wAjyP49/tKaBotjJ4APiELp0EoGt3aSDzL662lmijOQCq4OTwB9KwoxVOoqtV2eyueYsVRoydeXp6I+YvCH7auo/FnxXNptnd28XhDTZhamGO4iuEvZTyYkG7DkDjjnOc4xXq0qlOVRx0bstU007q+6/Fbp6OzNcJi8Pi7zi9L2u9DS/aN+Dt/4/wBKX4r+E/E15YWcCBLvSJbEtc6aMAeXbouFOe7ZwDnnivzHjnIZY6PteaUFH4rK7XfS61+a9TDHSpqHuWlbqno/M+UPEV5bafcz+H9GtSJTkSxpNvkb/amkHAPcqpP1r+e8YqVCbpQ1/rqeQuWbuc5LbhV2Aj3IHH/6q8m7UtTug2U7oPGhCjHFJy5hTjfUXTkd0+ZvpmolJoVNqJetlCOeO/X0qdSpPmZdxvyDjtg0ramkEG0iJsjtWi1Y6i0MmxXOpHP96uqN+Q4aTvOxqXw2qee1Y21O9rQyr11I2Y4qJS1OVvUpQJ++znoeoqviViJrl1RftZGjlB80pgj5gcVLi11M7RkfeH/BN79rjwd8PLy2+HOo+IHne9ITyPJbYM9iT1r67KMfQw8FG+p5WZYSThzxWx7b+2T+z3Y+KLCfxn4SjBgvId0yxrnaeoPFfUQc6j5k9GfNSrTfunyhY634q8JNbaxFYx3DW8vk30NxgLEw6S84HSvSjFxhdbkRgotuSuz7Z/Yy+J2vazpsN0qQRWpwySRpzJ7n6+gzTjzyV2cFerzppI9p/aF0Wa90+y8XQBmdY9s+F7e9a0a6pVLLqefUw/tqTPE7spFMJQxGVJJPcV9TgqyhNNn57n+CdWhKI7wjpcq3cc6ngsCDj3r66NdSjdH4RUwXsqzVupxGjfs4fFP4m/tD+KtI+Gfg+W9tob5Z5rofJBEZVD4LnAzz0HNfW0OJMtynKaVTFzs2tFu3bTY/B814E4h4q4ixOGyrDyqSUnd7RSeqvJ6I+mPhz+xBrHgG2XVvij8W9N0ZAwaW2spgzYHUMxxXy+Z+ImGxV4YXDuXnLRH6Rwb9HDN8vrrE5rmMaPVwpu79Gz0Xwr+z1+zL431B7Twn4wvtRuY5M3P2K9yAR644HNfIVuOM9ptxnGKVux+4YPwR4Br4jnpSqNrVtS0Z6zNpOneC9Gj0q78RRpp8EOwWzwqztjuSe9fB47M267q7SfY/astyOhhMPDD0leEVZJnAeMtM8CeIr17izvNRuGbO8C4CgDGMCvKnjJz3d0z3KOW31tscxZeDJnuLcWmhTiNCfJuTdk7TnqR2ohiJxskbrCUGvP0I/GUN9feIE0/UbJEn+7BdxL98getdEsdUfut6GM8FQhK8SzN8UdZ+GekQ6MlxDLPdwlo3kQF1UcHmlzSsc/1JOXNY8u0fQ/GHxB8dXunadZ+XYW0Il1e9nY7Oecf7R9qv+0MRD3Kb6FUsjw0m51FudOl74mhddK8K2TWsEA2ieODa0+O+3OWrGFfEVpa6s9RYLCYeCUYpBrmj+Ok8NSajrN7qBg6ut2vkhcdMZ7V2Qcox1ZwV6VOpKyicZoOpeGNb1SO98T+Mr6IRnAaVyRuHA5Brop4unQSd7nLLBR2sTXujat4i8O3Hw80LVFvdNubwzXEFiSpfv87NyST1JPStak415XTsdNLDyjSaSvc+G/jp8GNe8E2fihB4Y1CaW41Qy3OpNDI0fmOflijbHzEAAYHSnBRimoX+486cJU5cr3PlT4yfHh/AljJ4Qvor23vWxE0DWTSrLx1GeK56uIjT0loUoSk9Fdngmqx6nrs76jq9/Lbw5JW1tZChcf7Xp9BXk4rOKdCbp0pXV91/wT38Dlzkuaoji7FdwUewxXyEnaZ9fVbVVmxauBgj04rGUVJDpr3jQtSzD5hgD9Kh2idE1dEu4yHCj61LlFo54u0jc8F+KNQ8E66nifTbWCa+t0P2Rrpd6wyfwvtPBKnkZ4ziqw9b6tV57akVoyqKyPb/AIYftgeIvhz4Lh8I6H5uq6tqmsC61KW8lJbU7on5WnbP+pj4IjH3iOTjg+xQziUY6K829v8ANnDHK1Oau9D7l/Z6/bztm1C08PXWuW9xFp0aprutMvy3t+wB+zW6D74XnOK9ynmUo1VG+iWr8+xGKwsE3da30R9EXnw8+E37S+jW2uaa0Gn38sbyRojbZGIP30YHgAg/XPtz7dGSrQ82fNYhVoV1ytctndW1vpZ3vstbqzvdaq2ub8Fvhj45+Cni0aRftcapaTXTy2ZWPkyNtG6RuWY4UAemPrXSpTjHlvorkLCurqlqz608A+G5PEILNZSOZCPtEkU4Vx6nOeB9K55vnerIlHke56tpOlR6ZZRWEJcpEMKZJCx/EnJNRzpHFUauW/KYc4pORKsytc25YFQgJ7BuhqottF8qsJaGVVAmtzGe6jn+VT7qWqHOXLomWZZRGASSBkc4pSlCEbsiEZTZmavr+l6Tuku7gbgMkZ6VhUx0V7sFdm9LBzra9Dnf+FtWJ1EWBkhUSH9zKvOfwrnWIxNlztK50wwdJO2rMfxt47mW5EocBbdwWOcADgk/lmuWpJyk31R3U40qC5Vuz0S8v7e309dRBHzopQE9SRwK9epV9lQu/wCmeJSoyxFexnX/AIq0/SbCe8Z122qiNEDfekIziuKOJjG9umi9Ts9goySfXX5HM6p8QrXQmhh1Bxd6ldNvhgU5EIxnn0IFbUoSqPvLyNnSp4h25fdTMkeNr7xZjTrC8kQSb0lu5UXypHIwqcjkAnt6Vz2ctDeVOnCaktkdD4B+H09tp0Vx4r1dtQlVQFQArCvHZCe/XnpnjFelTpxilfVnLi8XduMFY6vUda0zw3pj6jqVxHBbRLkseB+FVOUY7nmRg6suVbnj3in9qc6r4qt/BHgs28Etycvd3EoJii5+fb/D7Z/Ko9pHoepRw1CjC83dnivxZ/ab8M6L42/s/wAHa82o/ZZAL2SOFpSZB1djtIyecLnPsK6aEnUV+hfJOauejfD/APaD+HPjPw3LpfimG4l85GBkuJmjByMbSqDA/WrnCbq3jsRJS5lY+G/2pdA+Gnhzx7qHiLwpo0YunJd4SwExxnGN6KXHvV4qvGlSu9z6DAr3Ez5R+JPxwt4Eks0ujudXV4pMjaD1VgO3oecV8bjsxd2etSU+XlR4V4v8fXmoQ/Y/P3qGb93Iu5tp6hjwGyOjDkYr5qvjJzOmFFRZyF3eyXD+ZJwQMKCckL2BPfHqea4aknNnoQjzIztMdzeleuXzW9NWhqYyly1NDqbaMhQD1PtXNJnUnzGjbsFHPfr71hKTZUXYVuQc+lPm0N3rEgabBwBnn0pJvqYJWZO7EwEjNK5u9Y6GfHKVlKkd6q7sYKLT1LdsQG5FZSk2NtNaF+zcuQpPNIun8RPcAgZI7U0+xtUV4lVZT94N7VvHbU5krFzRoftuoR2K2RuJJW+SFSMsfbPFdOGk5Vko7lShSkrVFdeZ9Z/sYfCn4ifEb4g6T4cj8BXawSTqs0lzp0Yj25GSS3oPQiv1/hKFaniFVrR5YwTexP1vD0bzk7KJ+mf7UPi+bwp4OtPhxoFvCltYWKRohlEaNIF4U9x9QDX3/C2D9pVqY2es23a/+ep4+Q0YzrTxdR6zbt6HxD8R/HDQX15c6lc3DyNKsi7Imba6ncDjGFdcDjGHGcYPX9H9nTcUj6CdeUfdgtjC+GP7SM/hn4kWrw3UUFtOVX7Od20kYBAzn5WB3DnGeBxiuXF+yqQ5I9SqVeNP4kfUmt+PLLXoFmkvQ8dxBjzE43RN9xuO6EgH6V5NLDRpLYcFJU3rfVtffp9yM7wD4y1W8v7/AMDvfObme2EsK7OfMQc4x6lWH5VviI4eMY1pbp/gap+yaqyR28fja/g8Gx+ItM8P3BllP2e7wAVnYnBdUwWBB/j6DGc8GuFUKdXFuEpaLVf1+nUt0I1MS4VJ+6rNb3Xlft/TOaubL4y+LNdtvCNros+pxPeqNMkGpxeZanBO855KqTyCORXWq+TYWnOq58rS10ev/BN6+Jy3BRlWi0tNdHZn118HPgb4V+EsT6lYWKf2rfwxDU7lPlErqPvbc4HJ7V+U51nWJzefLJ+5FvlR+P55xDic0fs7/u03Zf8ABO8u7z7HZyTmMtsTIVeprxKdNSkkfNpSnNJdTwb9oDx7qNndi4i05ZWi2qyMuQm7GOfXr+dfdZHhaEaWr3Pr8uoewpKz3PjX4tfGrTtf1a1sNa1HQtJs/Dyyyatc6vo1rcMYFDLiMyxlmkUgYA25PUnv9PGhRjTcpJtvbVn0DmnSule588J+1P8ACr4z/HXQdT8K6es1r4LikSHVIoYreS63HpNGigJg89uK9fLalCnzRpvf7l5H0mTU6LV07X/A+h/2dfirpGs3N14/1iSGOC3h8uwcvljHubMzen3SeewHrztjIVJR9nB3TPpcS41MMqFPo9fPr/X+Z8+ftEftceGtP8S6l8Qda8QlItKs2i0Ibh0HDSnPGeh+p4rmklgcLGLlfu+vzPnsbjoYag4rQ/NX9oP9rTxN8S9Uu/DnhxJrLSG3GF/7SZZ7wPyxYlQEDdMgE46Yr53G5hisbUdJJWW2u/r2+9nwGLx9Su/Zwlo2c38Kh8afFQh03wTe6dohij8iyWO0ldlBIyqM5XaD1LKCWIAPs8NTx7lGEXyq3QMNUxdVyoU5Wt08/wDhuuvY+n/2etH/AGh/htM2lfEO7u76C4Rlurc2Mr/aEYYKsCrBVI44ANe5UdSngaiq2krP5n0GDw2NoJ+2d16knjfwvJas81l4Rm062D5SyitDFEfeSSQgn6Yr+Ws8w1X6zNQgoq+iSt+LOimte69TiLmXcxeTaOcYXpXyUozcved2dLfYp3Z8xDtHHqRULRlWYmnH5cFfp7Vo1Yz6lvO2XP5mhFx5S5bkEZb8OKhuzLjJD5FOxsnoOKnmLlZoxrBWOpE4/irri/cOCKUKhqXoG8gj6ispNna5XiY11tWY5PHrWerOa3vXIgmFHHXpW0EippS0FDHOGP14q9DkmnF3Ok+GXxF1f4ca6muaG4W4BAVigJ69s9K6cLVdCd0rhUjGrTtI/Sf9jj9q/SvG3hKLwn8WtZtI7q7QJHA9wGdsj07V9rgMxi6a59GfNYvAyoe9FGf+07+zRJBdz+KPCFo1zpc4D3EMR4cdccV7lKtUnK6PInUUpW6mP+z3+0No/hPXYNB1KNbaa0IittLUNvkc+x6/hwBXbKUZqy3OZ0eRO59w6N4x0rxd4ClstVukaZ4g9wA2RHxwv1qYUpQd2cc6ri9DyHxJ4Fv7yN7Wx+7ICRwcha76NaUVZnjYzA/W2xzM3hmyitRod3c3AK+TBbplpHGMAZ98V9Hh82ocqhJ2sfmmZcEY2hOVemlJXPSvhV8DPHeu2c2tfEL4uax4Zs9QYXD+G9AmELE4AxLIOS2ABxXFmed0HJeypptaXep6+UcA+xpOdetKKm+Zxg7L59z1TwB8MfDj3Mmg+H/DUd/bykie81+d7p9vc5fNfI4vHYrES3+7T8j7bB5Ll+Ap8kKaaffX8z1XR/CfhL4TeFJNP8DeHLCxAy2y3hWISOepOB61yV5yp0eaTuz0sLQpc/JCPLHyRxGsr8TNVukuZ/BRuRK/ytE6lQPUkkYrxIwxFWfw3uevGdCn7sZLQ6HQPhRqghS5vLuG0mLZbyEDcehyOtdtPB1OX39GZvG0qTdlc1vEHhPQrPSR9stpJ35wYFClj15xWlSEaaSOWGInUm+XRHlfjjQU8SM0fhVWtbuzQyC1mkIdx/eGainRVWOmhtFtay1R5doHhzXfGvjdLaRC0FtI0YeT7yEgbs/0/GsrVVK0fQ6nUpey5V/XzPYPhZ4A1DTRq1/qFuq2jTERW3lf6zHcjvzXVQw00+aQOtBJQW5b1XVPCvg67FwNJtZdZuXCR7oAFi9ACBjPeumUlzruFT2k6Zi/FbStEu7KE+ONZLjy/MnQTEIoPQH/AArdRko3ORWhr1OG8PeAfhT4jQ3mnaTfzWEbZae4GyIY/u561gqcW/fRUZTaudJfaVCdEk0LwNYvZwSIV82KLDufrVQulyrUuVeOjPjP9tz9jH9rXxRpslz8KbG/1S7MRNk17qH7i3Y9W8s8ZxXo04OULKdmzz60PbSvFH5gftFfBbxz8EvFB8M/FPxBHqOuhfMkKTbxDnqDg4H0618rxFKrg4ezcr3PYyfCJe846HkWo3se4o7dueetfIwqxurbn0U5QUGkcdp7qEU5610SjeZ1Ts6zNWyYuQuOgrNpRiaxSjI0EfauF9K5XrcJzu7ElqSDzzSa00YKFtTQT7vHPHNJpLcqMLoVEljmE8MrIy/ddDgj8aE+XWJTdjq/hr43n8PeMtM1TXtQuW0/TInEFjbnYoyMkDHdz95uuM124XFclVOq9EcFbDyqaRPrb9nv9uu78O3UeseItea1uNRkSFmgG/7DaqQEt7eIHl26BR3OSa+lwOdUpzjzuzemivZHJLK7U27XZ+hnws/aE8K/EOKxtvEesrZ6lNEpigF4rCFWUFUlZTtEnTKj5gTg45r3qWJVZp3t09TxK3ufCv6R9L/s42/ia98RFjA8em20TM00THy5ieFGcfMe/wCFdNSNOlSasrv/AIc8qviPe5UeyXt5babA95ezrFDGpZ3Y4AFYWXLqcDpylLQTT9Y0vWIBdaZqEU0bDIMbZqbp6DcJQHzYdjsPTrWkJRii/eaGTSmG1eaJxuAyPrUVqloNxNaNK81zHO6747t/7NkiJAYRkMV6hx2rzatWpUXLY7YUIwqXTPLbvxZfeLYpIhM4ltp9mGOBIpPAPpSoqKd2dcX71lsblr8DtS1W/s9Xt52sfLcG6W7G/eBg/Lg9eozxXTLC8zujlrYiFKPLe53Nz8LvCGoxtFrlo16ksapLFKcIwHqB6+9b0sNCDvbU85YypJ2Ny60nTtUtVsbq3zErAxgEjaR0wR6U60FNWkrl0sRKjLmi9ThfiX4C1aw06O90SVprS1le4miILSlzk/8AAhnFcv1eKZrHFOcvePlyX4j6pdeKQviiXM1zcMq2qPiWVd3CkA/ImOTXO4SozXPLV+Z68ZxVDRbH1H8LtDvJrGw1W68mcBfkjjQCK2GP4MD5m7Z+vPY91OMEtXc8mtKVrLY6nxB4x0PQJ3h1G6WNYITLKxPAFVKq72sQqUpQuj5V/ae/aztb6eWwsrwR21pljGD0x0/E/wBKzvFO8nqduHo+zjZbnwf8RP2sPFehatq8/hlp5dV1IYaSGTa0UPZQ3RAe59OlXRnKSfLG9j0YUY8vJJWM/wCE3iGfXtRWbxB400m0845+xQtwGPVmYsXZv948+lerTpJy5lp/XmRVjGL00Poj4a+AL3xYGtvhz4ntZdTtiXazkkaN7k4+6BuCn2wPzrplywiiKdKMruWx8u/tLeM/ilpPjO/tPFHhK6t44SVNpfpdOQw4yGWNQPwNfNZrVnHZ3R7eFw9Pk91nyV8QPEia1qUsv2J4SDnDSu+f+++a+Fxdfnk01Y9qlBxicXdzl32lfpXAm2dkIJK5XJOCRz70cqT1NHJRK+j86hg/3uldD0hocDu6h1sHbGfauGb0O+K0LsaNjAHXvWLLsTwRB02n054qJNo2i7IrTwbJsbe/WriuZCmrouQxKbfkDpSlCxKukUWsV80sBx3xVpaDdmixDCOmAPpSaszO2pZs4/nG0fQUO1jeCVy3doAhXHas1uazSM8RkNn161utTnukavhLT59Q1RY7bTLW7IPzC5s/P2j1C9PxNellWHnVxSUY3M6l5LXY+6P+CZPgbwVa/EdPiv8AEi3isNB8NoZzqV1fhRPOBhUVIzsUD+6PSv3nhbK8RHLKrwsG5z0V+i6vyMZ+2hgqiw+spJLVLTzOv/bO/wCCh/wf13xTdR+DtWknleTZHBbywxJt6fPJMdqA+/51+mZXh8Nl2Ep4WVROq33SV/NvRLzuZKvh8BgYUnK7ju0fKvjD4gfGLWluvGyeHdH0TQJwEkvNbmvL23uFzgeWm9IWzn70SkAclsDNVjsRj41/ZJxSTs3dNfenZ+tzlhia2LlaDSWr1svzt9xZ0f4feF/HH/FdfD3x9pcmu2cCNqmjeHtXuTAY0AbeIrh2PLKGKgkDjAGAK4qUsPKrfn5pR1Ip4rnnyt+Wh9EfDv4qx6z4USZIXAtbceYGOcjhZV9sN8w9Aa7PauSu9z6Kn7OUEoprbr1tr+P3bak8/wAY77SfG1l4j065MeoIpkWRYwqlcgOSOn3mQ49z60m6c37KabT8u3n/AF+DNvclFRlt6nfaFren/Fu0u9Mma4e7a4YSyXOvrp8dmBzlJGOCSOcYOewzRzPD+9G/L5R5m/lY9KWMoUqFm2tdUouTf3an1L+xJ+zx4R8AQy/Ek6RDJqN1AFj1k63JemZTnPzNgAfSviOLc0r1p/V+Z2vrFxUf+CfnfGGcyqWwdKT5esXHl9PM+ibe986TchyN+GJr4apDlPgZUrR1LUzxTwOD93GCR2rH3k9DJKUWrHzZ+1nNBDDJJo6i4ZEJmWOQnKhgzkgDsBn8B0619xw+pqj+80PoMG5qC53b+tD8dP2p/EV9cfEvXraOa7GxTOmkt/qZgolcu7dQAGLZ6YXpxX0Pt2oOMnY+ii606SjFXfl6Hzd+z54q1vS/iN4p8NatfjS7jUrEXCXFjKZwM8uwJwZCOeMA8isMDiPY1ZpP0O3BYmtH3UrHsOkftQal4V+Gs/wx0/VpY7rUNMZtVuQp8yzgeXklmx+9faAFGSAa+hpZlCFPl6vc9lY6pGFnfXqfLH7QXxA8e/FjX7nTkR0sNkdvbWiuyiTbn5TgfdXClm7k+1eDisViqs5xg7Rla6u9db7bWTSe+/ofL5lOtXfvHzV481LxffXesaf8Lobme10C08/X9Ys+Ci71QneMbU3sqjHJNfC5lmWInVlTw90o7tH59jsWqVdQcrJuy82cToPxU+Kuk3CJovxG1q2beCoTU5Au7tkFsV5VDG5ipfuqsrvzZVHGV8LJzUmvmfUPwD/bM8VeDrn/AIV/+0bY3DoJgo+2tcW0ikjO4OhUDOc88HOaWOx2bTw84Vqrumly63trd6aaWV9eqsnrb6DAcUV6tNKtO6ez8j6C1CTw9q8MXiHw9etdW06ZiM0pkKg9sljn61+WZnF8/Mm7ee59tgZRrw5o6mdMSxGf0rz4W5T0HDlEeHenHSpauyoO4xCbZST+VU7bEVEoq463ufOJXPNS9CKb5mXLaR1bknNZO8mbe7sXGbdCxPpScWmU9jIsONRb/ertgvcOO3NM1L0BssMj1Nc9Tc6eljFvYWMwYnjPGRSjsZtaDcAAjHPrWy0RFPV6leaUKdx7e9DauY11aQ1Jg5ypzg/lVKStqKmn1Ol+HHiy68KeJbfWIrlkZGH74Elox6r71eHrSo1U29DDFQVSFkj7+/Zx/wCCgnhu38OjTfH/AJSaPCqwxNcyeZJcN0/HNfcYfN6cIxbVkz5Stl1SVT3dz2LxZ8Bvhj8YbGDx58NJ49N1WWPfBLGFDJkZr6SjXhUhdM5K1GdK0JJu/wCHqcVr/if4u/ATQZrXV9Gubq10+Iuvkks95L2LH0rX2ztdo854RzqWRD+zd+2T4l8VeO9O8IeNtP8ALvtSR7m6yPktogQFX68gU41JVJJIiuoUYWsfW3jjwlbSxWmsaVKVlaMS2zr1DDBrp5eV76nJGftI26HZeE4PEHj7To4/D2km6uJgBfeY/wAsTDjJyeB3rzsTShOLUtU+jOmNRQjZ6M9g+Fnw11nwVatJrWti6nkHKRqVSP2HrXKoKMrkuouWzN/X/Cdl4jtfseqxM8ec7Qcc1VSjCvG0x4fFTwrbhuy1aWn9m2C6fp0AjWJNsanoK0cVCFooyc3OpzTe5biZvKC7h5m35vQGsHKTXmKSjfyOU8Z6ld2rC3u7maNmPySwgBcfjXC/aOdpM7acKXs7xOcu4YdW1KC5udNf7VHHm1u45vmc+n0rpjGfLoS1N6dCf4UaTod1rGsTx2SxXsNwEuUx0JHB6VtSoJe/JamdWU0kjuriGz06wCsQo6DjvW05aWIhJuehy+r6D4Vlnilk09JpFcv5m45UnqetZRpxjLmOh1K0o2OK8Q+ALr4yeM44cGLRdOH8S5Sd/RlOCcfka3pu8tdiuVU4XqPU6bxP4X8IeE7GGEWaySom2C2Hyxg+u3oKprne1jNTlNNRWhwPjXx9aeFrZUW5iiupshGAACgfeI9hSjGKYXh1Z5Z4t/an8NeA7e3v/FOrtNLdyt/Z1i0+wSIPvO3PT611U6LqzUYb/d+ehnVqJK0D5j/aI+Af/BO34zaTrX7RHjj4daxcXltbeZd2Gj620IunAPGAePrXl47A0K/vV4XsbUKmNpRtB2PyP/aLv/AnjDxXcW/wR+CkHg/TLSYoBca9cXd0VHdy52DPoK+NxX1KUmsPStbrc96hTxLs61T8Dy+3mKIij07Vkrc7ue3V0qs2LC6WOP5iMkcZrnqroUptl2zmaZiK5px5dCqXvSNC3Vo2BPejRRN6jSZoW5BX5uoxWEnqVBuw8sF5x+FEdQauKknOBn3JquS7HGPLuaehX15a6jbtYXMsUwkAikgYBwT/AHSeAfetKEJe2Si7BO72Prz9mvxvqOj6hp/narp9tqAcBGl1EanqT88hIY8xwnHrg8/WvsMuxU6M1T6/efPY7CWbdtfPY/bX9hDX/EGufAmG/wBdsbmBBcsLVr26EkzptU7nA4Q5/hHSvo6zUuV9Wj42pC2IkkaPxZ+KOlw3jaWurxrb42lc8FvU1iouUkmy6Ur6I4vw18T38PXpa31SF4lYE7GBDL7c10zoO8k7XXmv6fyN+SJ6zo/xH02/sRqkN2jxSRhsKffmuSpCUZOz0NYUYySZlan8RbS2nliSbMXKMN3r901yRgloaTjqkeXeK/G063dxFE7CO6gZkIPPmLUKmti4RlJp2Nf4I+HtQ8T2TeIdNgS9guP3dyDKAFIPP0NdKwyaTZdSpGjvoe6W1uYbeKDyyAigAFt2PxrsTjFaHgYio5X8x2xi9TGetjCEb7Dbq5hsrZnnuUQbeCzYGfrVbvRHQ1CC5pbHzj8Uvipq/wAHNdfUrO+uEkS4WSKzN+1wsqsRkNuORn9M12+zp1Y67+hMYxxK91WPCr630/xH8epPGd9oUUE+rXAkV4cSsmeflUnbHz6152KwlP2ytE+iotxwqhFao+4PCLw+FPAEeu6tqKultZA7BNuRMDhc85bOAT61lUXI+U8zESUqvLFWPkr9pb9o1oI7hYr/AGPdsXmCnoozsX8TzWKkr72O6FKySPhf4r/FfxH4x1eXTtLMk88srN8pyN5B6+uM1x4p1KiVSV93rrq+vrvqn5Psd8KMYR1PhX9sv9rrwp8DLqfwja6m2o6irlbxrOYGSSXuoPICr0LHvwM104alWac4J2XU4K+YQozUVqz5v8N/t2adq+oeXeanqOiSO3E1y7Sw593iw6/Xaa9OGsfj+/8AzNaOY06rvUjZn038Gf2zvjh8KJNO8dWepam2krKktpqkbC5tZMH7yToCcZGMHoeDRCvVd4dHb+r/ANeZ14jERdL93sz658ffFTwd+214IHxc+HPirR28UxQD+39D1e1WRLlgMGWJ2wwJ6lfXpXHnFGE4NUZJtfj95tllWo7KaaR8meNbO/tL6UanotjbSo21m0+43KD7qWJFfnOK9om+aNmfVUo2WjOYuQN24muSEn1OpOyIyoCYxipnOzsRFXepW0lSNR6d+tdbbdEza/eHW2pIYEDtyTXDPY6Y7GjFyoU/hWL0NLuw6NmUcdfpUsrZDZMO2D7VpDQIyb3JZGMUOVHbkUN3NJL3dCpG7ySYIPvxQmkjKKs9ScZTkcYqZSbHO3QsWJ+YAfgalNmtEtXpwmR6VSRpUfuma0p521asjmirjtPvLqzvo57aRRhxvR/uuM9D7VthsXXwleNWm7WNW3FaHs9r8RfFvxX0WDwbqvxV0zwdo8EYQfZ7eS4kI7lY0AUH6mv1vB+IeKlh40YzVKPW27OLE4fEV05KVin8dLP9lv8AYi/Zu1T9rPQLLUPij4h0vWLXTNIj8Vwqlh/aNwsjJJJAuQyosTtg9SADxX1mBzbB1ssni4Xk72TfdnmVsM8Hl9TFTd2tEmaP7EP/AASa+PH/AAWTstR/a+/4KA/tj3nhXwvYeJobC58JQW0VuXiS3hk2RESLFaR7ZUVBsbjnBqM7eY5d7PDYj3+ZKaUdVrfqr327n4vl2f4fOKuJqKrZUpuMnfd2T67KzX9I+V/21fB/7Ln/AAT8/wCCid/8O/2KPi/rmt+BdNe3tr/UpNbW4eO4KKJvLmTasnlvnnGOCOetXCliMHhKWKs4Tle8dbW6Oz1PXyTPHLMpwvemmrO/lr+J9w/safF6D4lanqXgjWZ7dPEFpsmmt4ABHqFs4wl5CD1RlI3r/C2a+pyjMnjJOM37y/E/VaGYR+C51Xi211fR9WutIvVdZ7Fwi5Q/cLZOM/7oyPevo5NeyvfXt/X9anfCs5xvc9v/AOCctho3xF+IGqJ4t0/T5oNI1HdBFrasUVioBMaAbZGI6E4xmvPxuZYijl01Rc9Xb3dH/wAN3ZniK01hJqLlzP8Al/Vn6YaE1jp1kmh6XoSWcEEAkHkqoX8h0r8prSq4io6s58zbtre5+aYmNSpJ1Z1OaTdtSxolyLhDPK4QiThCentU4mLi7IVaHs3yrU3I5Mx5cgD0NcK30POa10PF/wBp/wANaZfeFrq8g0y53wo7Fkg3ByACSc84xkZzivq8hqVPacrktT3cGqlRpNo/Dn/goL4At4PGOo67Y3xkOmQiW6hgz89m+VEgXPOxiTg46jNfTYtuK5Ybn0cUoUU3ujyv9jj4OeBvitHLL4h+LC+FtRMSx6dqU+nPPFJ1ASUrh1B55AOCK4IQlzc6ZzLGVKeyPbvit+xD8cfD3h6HxPB4Z0TW9EthNJPrfhmVLpGI+5IxzujO0k4de/bFelh5Qu+Z2Z1U8xov3XfmPz7/AG3vG8Pwl0qfwnoFwo1a9Xy5riP70MW4/Lnsep/GuDNsdHDYeTh8TVkcOcY9UaSpp6yPmP4mfEnwJ4w8GeCPDng/4WWvh+98N+HJLHxFqtvctI+v3bXtxMLuQEAIwilihAGeIhz0A/N61ODkpLd7nw9qqlLmle708ji1dy3mKa7MNGMJppXZTi2j9GPhRpVv4k+A3hGz8f6Na6lcHQbfz/t9ssjEbf3edwzkJtH4V+bcR5jiK2d1pxk0r2+5H6ZkOW4dZPShVgno3t3baN3TNB0Hw5ZjTvDukw2VsDkQwLhQfYdq+enUqVZXm7nu0MPQwseWkrImKGTAHr1pxasayeg7ouCMcCldmcHZlS+3bMDipTu9S52asJpMRABZc896TTbsjOCUWaMkgTBIx9apKwpNJk6TK0JVD0HNaaM3vzQMywbOpt/vVtF+6cVLWoa17kKSPSuWpqzsmkjLuVDnIXk9aUNzDm0IWQgbm6Vu3ZDiklco3XJI9B1rHmfMZSSlLUZbIAoxgZq1qzOT5WWdxjGc8H0rRQuLkclct6dr11Z3ltI8xaO2k3xxsflB9cVtSm4yXNsjllTUZXR7p8Cf2yfHmh/EXTpNd8VT23h/T23TRBvmnPvXtYbNKka6u7QRzYvDwlSfLHVn3H8H/wBvf4UfHW+m0XVdPhWzMq21ubrGZ3PGEB5Jr6rCZzQrxPDqZdWo0+Y7bx/+x9oOsOfip8JolW9gRDJbxnG9VOce/evRhUcZc6PEqU4124y3Pb/COtLrvgzTI7m38uaKDbN5nVCBjb+dd8Oes1I5VSdJqNj0b9m3wF490vxlL4lu7N7fSJLdxvkfHnscbcL1IHPNGJdCNHlveX5BUhG/Mz3VAF+8RivMcW2ccpaiTSIg3OwAq7qK1JTbGStIIi8adBngdaG+WNzSCc5JM53xN4uXTLBbgAoWYjcWxtPvXBKftNT0I0Y0pe9qjDfWrnxjfwafZ6hbGbYGa3ngZ1xnqSDUpN1NDRqO6Wh2dlptjp0YSC0iRwPmaOPGTXqQjZann16zvZPQlgt7SCZ7iK2jjeTmV1QAtj19aqXmZ87nGxQ8SeJfDthYsdRv4lyMKCec1jzRb7nTh6E+bmZ4340+NOjeEZJJftWYADtwep9/xppczOvnUpWidd8AfjPoHxJ8OyT2nlxzQucxKwJYev1qmuTU56tKpKWoz4wTmL/iZ3LbIoocvID8309qiE+ZluXsqNkfDPx1/aJk8QeKpdJ0mRBFFuE0inIihTr+f61tBtbnLJSmryPlfx14d+MPx2+IVx4+1LENhFGI9L02W6WIiBeBwTkk9cChRnKd0a0acVK6Rl+Ovi7rfwZ+H13B4+hs9CDQukNtewy3CXBx8ucJsyfQmjMa6oU7t6WO7C4etiai6an5+fEzx14j+IWsT6pqt7E5dz5SW0IjTb2+VeMV+dV8dLFV7tWX3H1MMNGEEnrZHCrDwCvpVJrm1OrER1bRbtiSox2GOtS3czpS1samkcOMjvXLUSudkYrc1yegIrGXkZz0lqWoGOBjv0rJq50UknEnGD+I71n1DRSF2kdq6I25SnrqWYY0bAkx178isJ8yY3NLY9s/ZX+D/i34l+OrLwR8MJtRfVLxgNlrqErCLkHc0VsmyIdOZHGfTtXvZdltSfLKnJ67vWx5WOxFKn70lf1P3z+CngS2/Y5/ZF0T4Y+LvF+7VpYWk1G+uHy7zP8ANIRk5O0YH4V9lgqE6k7N6I+Lqfv8RKcVoeA+IPjJ4s+J/wAS7n4G/sZ/AmPxz4qt7RLvW/GHjnUDaaHokchYRl0QGSZztYhFXJ28mrWIw0ZN72dtN7nn1JYihUSS3Plf9rH9rv8AaU/ZL/aGf4P/ABP/AGx/h/4k1TRNEXVvEnhXwv4HNlp+mFnAiszctKzPO67iE4bbgkc4r6PDZZLE4CWNUXGC7rcMLi6bxHs6msntZn2R+x9+0b4P/aJ+CifF74Z3xl06baNQ04vmTT5/4lI6hTyRXi1PZ1NYbHs86jodZq3iEJqDtHelojEVDZ4J7Z9xWHslBXEqrvoc7p82s674hSwFu0siN9w8CXJ7GlSoSnO6RvGUbXufTXwr0DX/AA4i2cXgez0yykjDTSJdDcz44OwA5PqSRXXNU4q1zysTWc20+h3SKNgJFcU3eR5zXcaVZX3Y4oVrmlNWjco65fWtnpzvc26y8fKmOp/KumFOU9nYVVXjtc+Tv2rtWvrK7F7/AGDGjeT8k00YIA3DOeP512R54aHRg8NJr3dD4yvND1i7/aSvbbUPEupMs1xBLbabb3TJCy4JLHB7fr+FckYXq2kfQRrUqWHtfX+v6/rX9J/AHhi08Wfs/Hw1b2v2KKO2aSNknMkkrAE5Ixnk+nPSoxVLkqX6M+fqVr4j2lz8zv2p/GF2niy98NJPPHNuaNBJGUZOxYg/dP8AKvGxsYtSgm15nvYPlqwUj5K/az/ab0r9mH4Mavd6FcJP4mvbGZY5lOfs+RjIPZiTjNc9OEqr5LjxdXkpNo/Kzxp8RPFGj3etXnijw/pWpyeOPDNsYbq+jMr2UbSRyiWBgw2ShomQk5yGcEc19phsxqZXhauHVOLjWhFa9PNed7/M+CqUI5lVp1faSi6U3e2ilurPy1ueaQjfw3YZrzU+WNpHt1JtKyP0C/4Js2/i7wz+zpqY1pHt4ZvEkd5oiykMHheBklyhJVo2KJlSOSua8XievXyulRUVyykub5PY+v4NwscZRrTnrC6XzW/6Ht2lR/B1LuXVbhNT8HayeYtU8LRB7d29ZbYuoPPUqR9K+Zp55HEe7idPNf5H1E8njRqc1J+72OM8XTm51OS4m8QwapI3JvIbV4fM/wB5G6H8/rXkV6tOVT3ZcxtCmznpmJkwag1Ss7DiwEefWsmryCp7pDpXN0Xx/FXXoqZirykdVYgnJPXA6VxVGjshGyNFAygZH/16ysNxsSIuBz+BpaI005RgB8wnH6U76GK0ZNK48vBGKz6mybkiCOFlPmEYNaKN0KomrA5LHgDr1quVWJRLp7MHK4wfU0uRLU1g0noWdRk/d49qm5dT4TN88DBz3pqzORN3Ft9zSZx9aqyLjNXNrScGRQgHX1qowkprl1RrzNnt/jn9lu4/am/4I3/HnTtCt2n1vwVqWneKdPhRcu62qyeao9/KaWv13h9VqmQRoR2lJ/erWPl+JamKVKNFfDK/3n41N+0d8ZG0w+FH+JWtyaW8yytZNqEgiZ1UIrFN2CVUYBPQV9THiLGUKcacpXUNFdK/6/mfkC4ZymE5SjSUW97Lcz/+EkGqyNJq15lgchnPJOa4a2dSx9Vuq72OtZbGlFeyVrH65fsmfAD4lfGv4F+Cf2hfhJ4C8YfD2/8ADcFrb+F/FPjVIo7XWZ9uZYowhEk1u5HBKEAHrXfhq1fETj9TXvLv1PucuqQxVKMZXjZKx+nHwR+Ffg/4u6Vpur/HbQ7bQPFCKE1W3gkE1rcOOrxOACVyv3WAYZ6cV9ZWr5lRh8F3b7j3HXr4WnZrmPqP4c/Ar9nnwhafZvCukxpIrAymINuZsccAcjnNfNYnMs9taWi+R4tfN86jK0Eop+SO/wBK0u10fSb4WekywKIwscs8m/ePxOce3FeFVr1K9eHNNP0VjyqtepiK9Nzmn1aSsSWN5aRoEkuQJIyN+SQPpjNa1Kc27paMwnGq5XS0ZqWupW7Moy+8tjHOD7d+K8+VOSZk6M1d9DjvjzpFrr3ht7B4NSnlKnyo7FtozkZySMAD1NetktaVCrzXil56s6MJJ01dWPx9/b++H8vhb4oxa3rlkLmC4SXT9QdVASW1lLK+eOSCR0PFfZZhiIxcKkNrdj6rAv2mH2ep8M/B03Pwm+IWt+BL3UpP+JdqTQxMD9xNwKEHGSCOfYmvOhiZudi5U1TdrHvXxF/aKm+BngZdHGqtLd64ZEhg84yllk5JIboDk4HboOOK9O/PC8jllSVrO5+an7TXgDxZrni7V9Y1rUZLgag4ubCV87CDkiPpjOK8LG4GvWUo30ex81mOHrVXpuj59tltNP1B4Nf02eVEVkaCKcROGxwclW6HnGOfavlakKdCpatFu3RO342f5HmqlO2js/NX/VHpH7Mf7Onif41eL4LybQrhPC1jcq2t6o0ZEe0fMIFc9ZHxtwOQCW6A1w1szWW4KdW2m3nfoj1cBl8sxxUacVpfV9D7uSOKJFghjWNEQKiIOFAGAB7ACvympUdapKcnq3c/UaUVCKjHZDJdxGPwxXO3qKd7hgouf61UdjaMfc1IZJgAcnkdKGzC9mMKPcMAOlQjeMb6li3tDCw2rj8K0T5YktK43UYZSAUGRUKSuROnfVEtkNtuQx5Ap3cmVzWjYpWGRqZI/vda6Y/DY56Vue5sXwypHtXNO6Z1VHdGZJksAfwpwRmoWRFdzKqbPwOatvQyb5TKndmfgfSpSuNJN3HRvsBbFbLRGNRWlckifepCk4o5luXTk3oxkjup4PH0oTTInTu9ByO7jGTj0xQ3dWZCjFG94G8da74G1uHWtDumiuovlt5c8QA9WA6A4711YSu6VSyObFr2lJxP0x/4JvftzX/j++uvCN2C9jpFtDCbiVsmeQ/eJz1r7nLMWqidtkfGY6hKlNPqfb2jf8I0NRTVXhQRSEPGP4Ax7n1r6fDzUItPqcLtLbc96+GusWOreHt+n6tJfLDIUaeRAo3YGVUDoBXLXjyz2sck4u7uYPxP+KVroKbNI1QJPay5mQ8BsdverpUPa7lUoxSd0R6J8dfDfi7wzJeW12sNzGMPG3UH1xWNXBVE+V7BCk1K7NTw/wDFmxn08LqsZSRRjP8Ae9/5VlKHLGyNZYZ814nI/GDWIrrRJ5rCQyFW3xqoznvggdax9jJGkrLSW52HwmfU7rwhBq+qaetq9zGpjiK4dV960pUknc5qk7QsdADls5rdyPOlK8ixEvOSOtTzX0Oilojhvjl4Yu9a8Mztp9goYIdsi9VOOvH4VnGPv2sdKm0nqfnn8TP2g4rvUdV8AeIbgWer6O/k3EMnHmKSdsgPcEfqKtp7MqE1ubn7H37Sel/D7x/b+D47iNjOFx+9y0rN7fiKbkpJRR2VXGnC01bRan0X+0H4g8R6f8ONQvNXu/s39pzSfZw/G2PZwfzP86z9m4nn1ZRk0uh+W/7Rn7SHhn9mnwNrviK4g+3yWls9xfXCo0hjTdt3MByBuZAPUmtIylJNRWy1Mq1WNFJX3Pyl8fftvftweP5tY+OPhv4t29to9hdIZLWw1W0ElqkrARj7PI3nMBkAkKQDnkVtDLsXOg8TBqUY72auvVb/AIHBSzGH16NCTkpu9vddnb+9bl/E+i/2J/8AgrR8QPHvg67+HX7VvgO28WeFxKLe5vUiTzMlT1VuQec5UjmvJxmaUsPJU665oy/A+ohTrY7llTk4um7+7a0tGrPTbrpbVLW10+b+POj/AAT0jXpPEHwJ8Zve6PeksNJvkZLiyJ/hyfvKO1fK5nhsDCftcLO8e3VHt4PEYiScKq1PMVBaIELnjqK4pO1Sx7NZc0mMtEnZ8Enk1TfunP8AAzc0cHIyefWuealY3jUNSaQqcnj61mou4tZMt2sm/tUTi0dMNFYnO4NjHBrImUSaNyAGxmtY7FwblGxNGrMdwOPTHas5uxtCmk9T6n/Yf/4KC6f+x3CniG1+HMWsa3FcKlpp/kiGzVBgmeTad0szHozcJ1APSvs8t4jw+HwSpVIt9Glp8zxsyy2tjJctKSWqd2r6X1W63Wz6PWz2PRYf2+f2iv2rfjDc+MPir4zmDalEIbHQ9ODJa2EOdwjjUdeQCzk5P5CtKXENacpU6XuwkrPz1v8AojOplWGw1PRXaPDf+Cmv7RHx1/YI/bkt/i38PviX400bQvH/AMMNPuby18IeIH04X7xDyJEd8NhVkjc/L8wL5BGa9vhfE5Vl2bOpj6LrU5K/LdKzto9n/XU/O+I8vxmPotYWpyVF18j5L/an/wCCqPxH/bB+GekfANvh54a8IeEdN1UXlzDotqz32qXZODd3t25M13MQTl3bJzX0+aZ1hatKdLBwlTjN63ley7JWskeXleRzy+ccRiZ89RK17WPvT/ggl8ZvFnwC+NeheA7bXpda0Pxtpch1bRwCxt0jA2zNnjkE89sGvmsLVpxlyNn0NZTqQU1c/VS48W/C3Vr258R+F/FMM1ispD20tzHtBz93cCRkfUGuyUqMp2TuXT51T95nSfDeTSvFUqT+FPCd/foZPklsbfzFjb2lA2r/AMCI+tONSMHbYuU24e8e+/DGD4i2oeHxRpC2tkEAhNzqnn3LH3VV2qP+BsfYVhWfNK6POunJnZgnbg+tckr81yW9Bk2cEg9KE9TemnymbrV7JaabLJDaSTSbSFSIHP1rtoxUnuKpK2qPkb9rLxAdPQ6Rq9pdfZb+2kjlmnf5FkJOAMdM9M13qMqa5uhtSjHEx5NdVZ9P60Pj/wCI2qpo3xe0XxNZQGKS5so7YTDOSQwBUEetcqqU4zUup60MLFUVGP8AVj6osNU+EN14Pk0nx5rutx30triOTQdbkhlhBA7oVG7npXTVjKrT0RxVabcbRR8Z/G79jOy1TWtW8Q/D39onXr13jLpa6yfMc98FjzkY9ea86eApVE3ezKpVKtKGq0PhH9q/9kbx1498Iarp82uKupxWs0cYkY+VOc8bjj5TwOvTnn189YKNOpz32/E6aqeIw8lHqfmX4p8LeI9A8Qz+DvEtpJa32nO0LwXJ27CCeOeMHJIPQ5969enCM4pSdtNLngRoypXbVu5rfDv4KfED4jazFpOgaKBG74m1G7mSG0t1H3nkmchEUDkkmp+p4uvNLlsu/T79h1KtL4U9T9FPhhoHh7wL8JdG8L+FNd/tGwjgH2TUdjIt4qqsZmQMAQjsjugIztcV8TxhiI18fFX0ilFfI/UuFqKw2Ux0s5av1Yl/K0j4c89+a+QSij26tRvQy7oljjp6H1rKKtIIx0MyZwkxJHfpmup3auYTvzCOz+WeKxcuZlVIpoTRlP2gg92rqbfIZRtB6nUWEgEY57AE1xzvc3jK6NCJiRjPPas3oW3oPVyGxmpbuRdiqcPkimk2hpXdyQZYgH0p8tmbQQly4jXjrTvYqrflKsd0WJGO/NF7Ixin1JbacrNnbgetLmNYcqZYvZg8eCegqGyqj0MtsmQknjuKpPQ55WSuOhn2yhBzn3raKtuTFam74dPnXiRZyCRklsBfqfSunDR56ljdNH6j/wDBEvwcLiz8faHfy2GoaPq2jol7bJlkYEMrI+Rg5UkfjX7hlmB/s/hehUe7ndfceFxU1Ty6hOW/M7H5af8ABQf/AIN4fjD8OPjZrXjL9lCDw/4w8BazqTy6fHca9FZz6H5jMTDN5jqNqHgHnIA4rzcywmMq4luCaXkfIYqjD2jnOmry6NtfPdFL9kn/AIJkfszfs7eLLPxd+094psfiR4whuP8AQfAXhtXl0uyl/hlu5Tg3QBwfKTCHu56V7uSZJTdeH1m9m+ivZd91d+V0cdDDYly5eX8f1P1T/Zq8H+PvGVxZfEj4tahDNd6fZRx6JpPkqltYLJxHHHEAFQDHQAACv0ChgqeBpWS3Pq8Nh1CKuj6a+A3g6x8Ra9qeqxWsIh0u5lncLkbpBkJnBHcuce9eVnmMlhqcIX1nZfIWYVJRhFPeTsdN4LvW8WyzXGp3bQ/ZpWChCFAGSNzsMMzemTgVyYyLwkUoK/Ml5/dfRG9an9TheK5r9/06HqvhvUZ7Twrc2l5qRuPsgQmdwRlSAevevksRRhPGRnCNua+h8fjOWrmEHGHLzX0Iku4bvfPaMELSjIb/APXW7pyhZSN1RdNpT6Ict/LBcrJHIWUn5wjYVTnqR26YrN0YyjZqwSipQaSKvj2w1fxbph8G+Hpxm6RlvZJ2wscLggnodx7AfnTwMqOEn7esttrd0c9H91L2ktLbep8E/to/sl/Fbxf4Q1p9M8OarqOm6WxWxuWhBMsfSUIM5I3DepAP5HFfUYjHYXGYdQjP3rbLX7z18NmVOnJQufkD8ct/g74m23iDVLfy7m4t2s9SWRSD9otzgEgjI3JtP4V41KcqTXNue5WmpRUonKRnUPi5eT6xqTh7okyW6sSQgjUYAz046V7GHrqe7OKblUuZnxJ0+38T+HhplrKGaytGk8rHzqWbC4PoCrcf7XtXXOpTVJrqY/V5RSk9jwrxl8DdO8T3E00qmO+itmkDRL/rVAXDY79efqK+XxuBp46eukjzquXxxF57M9a/Y2+F9h8O/A93rV7p8rarqEuwXssx2iAEHy0j6Lk4JPU8V+X8W0auGqQoN3W57vDeXU8IpVHrJ6XPX0YEfMc+lfFTlpY+pbsJMCcDIrKKM95CfwcgjiqlK2hve0SmYS0oXd36k1N9DO2ty9axKnUAHtxTUW9RqRYbCkD8sUSbYPcbNHlASozipiaRtYjiQKj5PatU+xnKKbM7Typ1Nhu71vFysc0E4zsbF7yhGccCsKj1Om+hmyk7chaSlZCumjOmaSR2X26U02c/LdkbxgBSacdyrqJFcByhIU+xq+ZN2FKKmhukqxG16mXMiYyUXqWJypfBNEXyib94jQtGePwzW6lFoiUHJ3HyvlCen4UOKfUynBpHo37LH7RcvwF8Ufa/Iee28zeLSI4M0p4Ga9DLswnh5ctjzcTl8aurP18/Y4+MDfHzwLa6fqsUdtqE6hhbrcBjHnoDg1+k5VfE0eaT1Pj8c44Orax9VeNvjB8GP2OPg3AvxI+IGn6UwiPlLPOGmnmbJOyMZZzk8AA54pVqsXW12PKrVnzJPc+LPh1+2v43/bu+Nmv/AAG/Yt+G0D3Hh11/4Sfxj8Q9QNna6cW5CrZRZuJ5cHOw+WADlmHStqGbUZv3VeK/M5Pr8liVRglffXt/XY8xl/4KZeAP2bP22db/AGN/2gfGmk3Op6NNbxJ4w0Gylt9NuZZEVmgkikeQxMjErv3spx2r2MTyRhFzVuZXS8jtwGMp5hOSg7pO11+J9k+DPjBoHxBkml0XVY5FZsW6RSB8jtjB47V5nLTc2z26vLBK50smi/ELWG8u18N6neJKBmS3gxkfViBn3rKcUjirYik5XbPfPC1vNH4esrOexngaGBUZLjBYEDvtJFYxfKjjxDU9YsvNasGLBSaTscsKepJCpDD61Kepvay0K2safBqGny2FxcFA6H589Kp3vdFJtvRH5H/8Fffh7pvw++I1r8WPCl1+8jP2bWVClN8Z6MfXB5FdNSjUlBTSFFOL8jzr9j+yEPjm3+KWheF9X8SvahWhgs7cykEdRjI5FTCmo+9Y1rTc4KLPXP2tf2s/jh8cdXHw+8PfB7XLKaCArb21/AIDJtXPCscnpXNVVZ35VoNUVGmpSPxb/wCClPxO/aT0PSta8G+M/D+p6NpuvX0EV1LgbJ7aL94IZCDkZl2tjvsHNLDVcRSpTh1l+R5mLhRrYum39m9vU+HHjJIwAR15ojFpanVGLs29D6c/Z28OP4f+EtlJNDsk1G4lvHyOdpwifomf+BV8TnOKVTG8q2irH1+QYeSwjqS+07/LY6i9cRKxAydvWvKi+edj3vZqKuR2jbYgT6VvNNzY5fxWWAh3DC9T1rWMUkKdpM0dOBjO8ilJq1iuRKNy40wdsbgPqayVkzKMrMu2TYAGecVjVZ2JJK7Lm4H5gOtc63BO6HRPtfaacpXWhKbiy7BHuHy/lURabszfnsi3bAh1LH8KrToKNRdD1n9nnxXLovimC0aDVJbaeVBNHpS7mlwQQr+iZAPUDiu/BShGet/kcuK9o4Ple59Y/t0/8E8vH3/BT79hez1j4OaIJPiN8MJ5rnR9BaWP7RqGmTqPtFmrfd81WCyIp4JyP4q+uovmipx3R8HmNJwxKlfc/Ij4Z/8ABKf9tL4k+NG8O+FvgR4ntoYpwuo6prWkPp1tYhW+YzT3G2KMDByS3GPpXowVWvG669TzK8Jxlyt3fbqfqv8AsR/8EytO1j4kR6dpXxmtJbfRNIhsde1Hwfdzb7kMo3wRzFQqJnI3IdzdRgGrnl8K/wC8hUV46OPV+e1vx6+tvQw2JcKCi4623P1I+H37K/wr+D/g+x8HeAPBum21taRKwMtkZXhbHLZbO5snknn1r1KNKlCKsrGTbi7t3Po7wdpS6F4QstOXYGW2UyGOMIGYjJOB05PSuCSUq0n5nJOVtEW0Zy+NvGaqSijNRtqSklRx1rJbsLMikdscjrSsrnQvdgVb+a4GjXFzbo0bLGxBK5PHoK6Icikrigudnwv+3FqXijw/eWUt3pGoXJuoJHmt5W81DH6lQMoR1BHTFejUqJq0NT0aEYxWmrPk34p+ILe18N2ms2d8ZJdOm8+0lYDJ56H3FeVP3WmdCrNvQx/An7RWjeKPF8OizXT3cjQYnZyR5LZySo6fjXXSx0ZaGkaEnSu9D0XxNI6wP4itZmdQgW6jQ8SwkY38dxW75ZNTRzOzXJI8c+Kml2ckk9y0gmBXbMGUESRPnax9xnFc9Rwd2jtpKPKkfEP7V/7JXg7xvrV7dXumGa4SNJbS7t3CTLGeCA3OcHswI57V5OKnNuy2Kq4eGJ33PAvhJ+wNp/i34p2Wl614n1D+yUuwbqzm08JJKgblN6uRyBjP6U8OlKOqPPjk/ta65paemp9o/FjQbPw3r0ekaVZRwWMFpHFZW8Y2pHGihQoHbAFfL8QUrVE4o/RcIlSoqMVokef6jIHbHIHvXyctzdtszZhjJzkd6TkiryM26iBkznvWiqaWHa4m4BCCeaizuQmJpLD7T1/i612aOkiLNysdFpznGMZ49K5KhtTi7GlEcKMfjWL1Ld72FiYBjnn6VXs76j5R/mK7BWz+VLVFqDZOrbDv29e1OzaKTsMlIlODz71i207BdtkJjVMHbz6+tWk2ElZCAkMcA+1aciRktJXCWdmTB4IFYvcubTKsrqOcdTVwV2Yy10CGF5JlVAS7HgVq3eVkKN2z1f4BfA+++Kfiq10+x1HR2lLASQXmqRxvnI42kgmvuOGuHpZliY6rzOmnCE7an66f8EzfhzY/AnxJqPw5bUba5mu9NW4uBAiYjIIG3Kjnr35r9w4gwFPD8OUI000oOx5XGdCM8lpVIprllY4f9s3/AIJy+DPGHi7WvFa3d7anUbsvItlqMkG6Nwcn5CB1wPx68YoyuWX5lhYwrx95K1/Q+fValmOCpylG7Wn3HjfwP/Y08A/B7xFcJpMciz6gZIZ5p2LtKyxtKwdsncFMYPoCv0r6iGDwOEoxlCCutu9/L5X+RvhoNRbgtFv6X/zse4eKvHGhfD9NQspbgGSwvrcJhQPkWHA246jdk+2TURTr8s3s0/zO2im2rLQ9q/ZMu5bX9lbVfiJqMAD6tNN5TZOZEBKKT+Oa+Mzuf1ziKlh4bRsebj6kaub0qUX8Ope+HmgDSNFttfv4Le2a6k+W1YHMhJ5dwOWOM9fWu/H1vbV5UoXduv6K+x3Yit9YqypRba/rY9rR7Sw8JSaxqNrHE00SmQLwG7AV8NapLGqlCTdnofEVISnjVTptuzOTj8WLfo32LZEAQCD2X0Fe88F7N+/qez9RlGS59TO8QeNVtpZGnmDWvlkYi4bpkn+VaQwyjBWVpA8PTjBJrX1L2gfE9G0rztLElzcTWzvFEU4wo6kgZ68fjXBXyxVJpydlfU86vQ9o+yR8ifHNvj3481HW/iP4v8XXY0jS32afZ2UrQpGwz8oGQM8dT0r6bC4TB4ZqlSWr+82p0qcNKcfVn5a/t+aQPjL8P9Y+MGliy/trR9SEuriBlLTwg7fOKp0IzgnuK8zMf39WUo9D1o1lTi1I8K+A98lpbRXUq74vMGXQbuvX8MVxxrclmgjeZw95qd/4c+I+t+G9aKyLbaxmJ8DD28udo64xz+de1GvTqLfodKilLVl46LZnWX8kq5tLopkDny5BnB/SuaE6UqrUXqtzT3eWyRu3PjPwv8O/E1l8N9ZlkiifTI5Vu4gSkM7E4jcAHHy7T9DXy+f8NUs4brc9mkdWCrNVfZpbnXOsUE/lwX0NzGwBjnt3yrD19vpX4zmeBqYDFOlJ3se+6dlqPYq4GK89Re4uVDJX2KQOmOtHI2Q5JMqQsWlBYj2q+SxSd1oaMWCBipbaGklqx0rAYBY8VNmwdmwkcGPPbH50+VoV2mRZDI3PaqgmmN6amZZBYtSLH1rf3rHNGalUNi6lzx3xWU1c2lFrUqTbVT69ay5WKNjOuwIiX6ematRZEnGBntdM7bRWzgkjmd5O5KjgpjHX1rPkdzeGqsNiJiY4HFXZJEVIq4zezyktmo6EKHM9CYYA9PqaqMWWm07CTHMRI/nWiTJqOysULRjBfLcZIKtnPpWkIxjJNmK5pxPpP9kH9sDxt8KfHum6TpXiFtP06SQfbJkjDSSDPTJ6fnX0mU53Vw2I5L+6eRjMpo1Yucldn63fAK3+Cfx/1qx8Y6vY2Os3kZjZ764IlkXBBxuOSv0Br7GNWGJTlHc+OzCCg7NWPwj/AOCwfiX9pP8A4JVf8Fevi5efBPxtqfh+18d6r/wlOj31jK0Zltr4F3CsDztkM0Z/3a2ybMXllaS5FJPRpq6PjsyyPD5z7lSTTV9U2nr6Hx34Y/aK+IvxK+Laap4gt7vxNrfibUkinWRy0s7yOBwepb0rozbN6mYYr2s1Z7WXY9rJsBhcgwaw9JaI/b/9ir4Cftoa3f6d8Rv2drpbKx0WxhtNY/tu+aSy1F0UBzgAkSZ43L6Csabk1zp6npVKrrR11Z+qX7PPi34w+K9Ej07xjoV3p17boq3Esbo9uzd9hcbsfVaVWvGV4uNmck04yseqw/8ACR6SDLeSpcr3OQCPyUCuPnUupslzos2PiS3vGMckTIc45WtOS5jJSg7MstcRH5kJ/KlyFXuRpdm6VlktWVc4+fvV8tluNWTuj5Z/4KIfAPVvj74YvvA2k+A7aVLrTpB9umIHzAZGPevUw1SEaHK3c0jTdk5PRn5J/A74qeMP2c/EN94Cup7iy1DSbqS3ukMhB3KxAIwehGMVxQqKL5ex2OnTlG9juk+OOqa58QLXWNV1WYvMpVbl3JZW65BJ6+9dEZ0+phKStqtDO/a3+Dnw++NXgS+0rULe1u5L2wJuYbiEFpOp+91yOoNarkpx5zn9lTbtNan5Gal+xhqPh7xfrMF1Z38ukaZdIyyIgyIC3zF/YAgZFfK5hjpx5nTjsdWFwvtai9s7RPStlvbW6WlrGqRRIEijQcKqjAA9sCviZ2nJyl1PuocsIqMFZIo3WCD8tc0ny6o7FHmVmR2y4VQfWvRfxswn/FZeQcjjtUO5inaRctldY8gdaxlJHZzKS1IiszTAN2ppqxi48rujXtXMcYOKzcG3cv2mli/aHeMVhJJGlMlMYLZHpUlzehctiUXdjp2q0kRFczsWY9zEc49TmiUopaGiUVoevfssaf4N1nxX5Xie2guo0cCS0fxIdNG3I5dsHzE9QOa3wTjUqchy4mpGMGj9rv8AgmB4Y/s4tLYHTrWztrLda2eikyWrxNgB/NPLt6mvvsFho08I5S3v20sfHZtO1o23PKP21f8Agn34i8b/ALVd7dfCrwHpGnjxXOt3f67/AGULqaPu7xiQlEfcTyF4616FONarR5Yzso7o82MsLGPtZRXO9L9bH0Z+zd+yn4d/Zq8MR6HZDzRbIZr+8kyzzznux6sxJya3jL2Xw63MoytGzO8nuZpExbgtcXUqL5YGOWbFaKp7w1LRM9J3XUF95kxVbVbZY0G7ndnkkY9Md+3Suf3eW/Uys3JtFlVUAMvNY1Ndh8tlqDkk5CmojcIpJEGoahHYwGeVGbb0VFySa2hSU5aMipJxiVfD+s/2xBNHdqAYuX29MelOvBQVosdGNVwV9z5V/bY8ReBfiyup+Btf0i5u5orSQWsVocSKADhh8p7jPH5104ak0ve2OqVGtBp9D80PGEeq6HfXfg6+acwJGwtmmUbyuONw65FZ16atJI6qUuZXe5438DbXUdU8V6i1lfsdRtr13iduC5B+6fY15mEi1UbkejVm3FLufWfhjx7/AG94fjhu4kh8g+XeRMuChbhlPsTyK9j28eSxyRoSctTzTxBqotNRuPDmpTAm3maEsy/eiflT74NcbrqKsdsaLTPONe0eK/migu41Z42ktpj6gjIrgc3OdrHbGk+S9yn4I+G0Hh+Z9XulTfaxPJK7r/AuT/hW9Runbk26nRRp8vvGN8Qg/izw1a+IUJL5ODu7dq8DN6bqxuj2sPVVrM8p1RiHIcFSM5Br42cXfU7+WyujMupAo3HoO/rWagmRzNuxQlkDtyc+lNQS1No2GvGCmM1V9TCr7uw3SGAnPHOec10ST5LWFCa5jo7BQGBJ7VxyjY6YvU1IV4wO3Ws7MvUeI1yOfoa0Tdhc1hsQIm54Prik4iTk2Wzs2bS3albQttxZVEx8wjHWj2a3HGzFaVME559TS5dCZtpkDXcYYAHiq5LoiLTI2mZ8EED0qJUwk0V3ZjLs4xVpKESFrqafhmz07U9bis9TuHjhJy/lXEcb/gZCF/Wu7LKFCrXXtr8vlqzObT91bn3/AP8ABPP9n/RvEc0vj3wp4Xii060T/StZ1S2tnuEYd42jGPx5r+huFssweX041YwfvbX3Z62GpYbD0+acfee3mfZf7FWq2ev/ABt8YS2108g0/RxC0juCzkn7xwBgnFfVcZSlDKcOrbyPI4xk/wCzaMYr7Z71C2ifFHwYl/qjr9p03dFeR9SWHQnnv1/GviputlGN5YfDOzR8RiVUyzGSpQ+GVmj50+Ifibwh8MIb6C7FsGNrdSWEBUfMpCiR5D/fLyk++7619xSnPF8rbfS/y2X9fod9CCSSjonq/m9f+D5nxX8VPjZrnxH8c2+jeGZVl1DUblYrW2CA7neQqigHrx+p9q9GNRUqlqTXuK+traa9dP8APY9GFVYeOi27n6Q+K/EGlfA34NeFv2fNIt0u9Wh0aMvbMuVZkUFy3Hdtx/Cvi8nwNTH4+rmE3aF3qeHlWGqYzHzxU9I3sbPwttdS1Dxelpqls0tzLCslzJvAVM87UUnIUDGeO4680s2r06WCcoOyvp/wfM9rNHRw2XOpGVv66nefHK+1EaPYeEPD1q9xfX048q3h+9sXqx9ACRk8da+byL2NPESxNd2jFb+Z8nklShGtOvWdkuvmQeGPg3rsNolz4h1uNJiAWhgTKjrkEn610YnP6NSdqNN27s6cTnuHU+WlBtd2eefGfSJdJ1z7EsuSf4ANqOPx712YTFKtSTehNPEe2ipI88+EnjnUNT1KPQrZhFCfNtIpC+WiIkb5iD2xg/UivTl7OVJt9DqqUrRbZgfEDwH4v+N96nwV+F81tLcyyyebd3mXgsYhkGeQcEkk8DqSa0qYqjgqDrT6oxnOjRpOUtEz5c/aC/4ILftDfDPQ9W8XfDb416P45l1HSp4tT8K3GniwnuVZDuW3+dldh2VsE465rxKeb4WdOVotfijipZhhfhkmflr8PdD1jwpJceFNf0y7s7/TbqWzv7G5RoZYXjYqyurYKkY6V5sq99EevTk2uZGD8btDuotWj8R2dixH2UWty5bJKggxyn15yufeuvLsTFNqWltEdMIzlJF34dahZNc3PijV49ltbWayXC5++6jp/IV1VcUlJ8p2QlGC16HjV94r1rxFFe+PJ72SK8n16SczdPLBOFH0CgDHtXblkvbU5JmOFrNS55dz6B8Ea3da74Qs9YubyCZjHtkeJNuT74HNfjnGWWzo491ktGfRQxKqxujROpxxsSW/WvjYrTU6FJcpFNq6sNpP0NVZI56jW5GmpJG+4dD3zQ72KpVE2Tr4gVCDn61DSNJyVtBJdfUvnI6cc0WRjGrZh/b4MZXjNDtcc6lncauvqq4BHvTLc1KJVTV4Uut7HgmtFJtWTOOEmpltvEayYBI46c9KmSR3OacdSGfXosgM/P1pJXehy+01sipNq6ODkjGKcrDqaorC8h3fKe/enq0KmnbUet8nWld3BN8w+O9GMGnIueqD7SgfPHPelHYzpS1B7xW5J6e1XZFu1xG1FdhQHOR1pt21Iq8tiksyvLwevYU1PQxpt3NLTpRHOrNj7wyNxAP4ilBp1C60rQsj9Z/+CJGq3PiK7FlY31m0ESrvtrIthPdiepr9GybEUvY2PzvOaLk/mdt/wW8/4JNS/wDBRnSdN8VeD/G9h4S+I3gNXbQNf1CHMF7pso/f2cpweAcujYOCWH8Rx1YylTlTc4q78zyaEeXERnFtNPofDX7DP/BH3Sfh58VofCvhLR/Dt3qyT+X4j8XR3k+pXcEB4kW2AiSC1ZhkDAZ8H73rhhKOIxE+aW39bHqYyGGp2cd33P3I+G/wd8M/CfwTo3gTwXpMVjZWsSxW9kkW4KB1Zs/xHkknua9Op7OPNGC0R5dFSi9WdpLrM2kOLWz095iMDCLisJJNXbNp2TbZbtdcubtzBc6LOmMZ4BrL2d0ncUXfVFXVta/st8/2RIR1ZhFnsalN81h1Yrl5rF/SdZh1O085YXTB6NGRV31M48ttBdVvDb2nnRoTh1z9M1tSV3qWos4T49eHNY8c+F10/R9dfT41HmPcR8McckCujCpQqakzp1JwtE/E3/gp/wDCO3+HXxhX4peDryS606+mEGrSeXteOcfddgCevTOearMYUKb5qbfmddJcsLPc8k0u9m8Q+FnvbG8b7VZkTRbc5OOo/KuClWg2rvQy5ZM6r/hcN7q/hFYbeZVleLa7ydQB1FOpX9ppc0pRk5angPxbvrS10LUZJC0U92whUrwHB6g+tePj6qo0XbqephqEatdJnhl0EtzsIxjjGK+MlGUndH1SstCnJcKxworN0W0wnOUVdEcRICnNd7+NhU/isvISFBHSok7GT3LltN8v1rmkjWMk9xGZllDbcHtWlO1tS525S1FM2MA/WiUlcwj8RftJGUda55anW5KMVYuwkuQCfes3YS95l2JRkYNPdlPQ3fDdp4WvLK7stXnvU1GXy10kxyxJbbt3z+ez8qMdCvfrWtKhTqaSlZkONW91sfUH7I/7Ni3HiKw8WeKPD/ggWsVwrF7jxi0kTgAgF4Y2O9v9npz7V9Bl+DjRlzXizx8ZVk9Eft3+xbpnimD4dJea7qel3Vt5McemtpNj5EUcQ/gUHnAGMZr62Muagle9z5XFTjKWt7ruexStDF/pUwXKKcORyB3pqPKjz2+aVjhfHfiPT4VXS7e5DLOfOm2+44H6D861pxbndo2Ssl2MbwDdpr/jeztxAzi2SW7nkOfkP3UXoR/Fkcg/LW072d2JRfLqeh+I9A/4SWwTT21O4tVW4ilaS1fa5COG259Gxg+oJFYNO1jF1LKyNHK7Aka4AGAKhU1Bag5Sm9BrkgZyAPUUla5cY23M7xPeQnw/OYW2kgKZCMYNXFuMtDaKitzkPhjr0t9a3+m2ciyzyNsVGHQ4wSfb3onGUtWVOolayKuufsnfDHxXBdzeMTd3V3eW7RSzifaI1bsg6DHbvXVHGVVFRSukZe1q8176H55/to/8EqPiJ8JNVvPiv8GdQfxVoKZkvrKLJvLNMHLFFP7xR6jkY6VdWrSqQu1ys66deM9JaHwh8LGudA+Ll9C4aINdkh9mCue5r5+E3HEtM9u/tKSklofTOraEbvSZdas7opd+RmYbCsdyuOhIwAe4r1pRXs+cilNX5Tyf4uSx/wBkxeKLJZJDDGsczk4Yg/3vdTx+VefUfVHZTgndNHO2F8l/Ob9fn3xI5GchiOhB9az9o4q6OynBN8rQnxE8TNF4K1TTtMY+dPYySXLDqq44FX7S+7N50lGm7dDivAV5Nq/wnbeN7QxgkAcj8K5MRHmpvQ6MEpSjdnmniXVLMyNKSFZWxIhOCD64r47EUrzPR9tyqxgXWr2JcqRgg9CaxWGbdkzkeJ12Kb6paM2B0HfNH1axrCuwOpWrJjt9aiVGz0KqYjmjawyyvoYZiycjNVyNLUxpz965qW3iIRjG3jHcVhOmmdixCsWU8WvnCtxUezSRTrOwN4tboG6U+VGXtJtjP+ErkzkPyaVoXNYTcdbiN4smIz5p+hofJsFSrNrQi/4Se5dvlaq9xIVOc1qNfxLcYP7w+4NS3EKs5yREviKYvkHPtVXikYxc2yUa/IBtz17+lJcrLcrsmh1fcdxbJxyM1M2tilN9D6B/Ym+DvxB+Mfi61svBOk2zQNcqLnUL3QlnWIA8hZJcKDj+6Ca/TOC8nxVaUZ2Shve1395ph4OrPm6Lc/VLxFFp/wAFPhXH4A8OWayNHbf6W0Vuu6VyOflH8q/cssoQqVlJv4T3qMFUqe3k7JbGz/wTnXzNH8a+Lk8zHmpaRm4tvKcEAkgjAPVuvpXHxrOM6uHoLrqfKcUt1alGl0bbPT/h/pN5e+NNV0S8vpILLV7ZrVVR8YkwcMPQ8H8xXmZvKEMsp1Iq8oO/yPLzrkjl8KkVeUD4F/bY8XX3gX4g658P/FHiXbfWFuQ9tdOsbGLzAd8IPLlvlGB27cGvcwmPw88LCcPtdlf/AIYdBUp0VUjrzItf8EvP2bdb1z4mn9qL4seH3ttP0qJW8P6fcRbWlkUsFlKnsA2Qe5OawzODqUuWnpKatfy7HVUoueH5V1Psn4u6FaeK9I174oTADUrC3jfTRIQA5DH5OeueOOOeM1OXTq4SdHB01eMr833DoTnhalLDUo3i73Jf+Cdni74gfESTUtd+Ii2S3MCMyxQSCWRA8rKgkkAwWCKMgcDOO1eTxpSoYSlCnBWb+77jyeLf3eFhCMZK767dz6O8W6/oHhG3ufEstskt2kSxYXG8jkqmew5J/OvhMPTniZqleyPiqFNztBv3dzwbxR+0L4tu9Va7OplISDttIH2qi++OSa+lpYDCYena12enCnSbSjEpf8LJ0b4qaS+napfo08g2xSbcbGGec9jXNKdGnUTpvTy8j0VQ5UmjxLTbjxH8P/GesaRJbwPPb3wnikQ7d8Dcsw/IduwHFe5g5U6sXd2HXcqi3Po/4C2fh/4C/Cq9+JvjiGO01zxEWvp4JXG9Yx/q4xxwACD9Wrwsyq/2hilCHwR/PqeVVmq01G+iPm/4p/tdX2vfEVdY/t6LzlZmhjW52C1jG7B478d69OMMJRwyppqzX9XJVGLPgX/gqLZfDX4q+LR+1D8N/s0GsySx2fju0tV2reORtg1AAdGJHlv6nYe5r52tShTblB6HtYKnKnHkex8jXunp4kuxBGWkmP7ry2X5XU9QR09Kqkk3dbnoqEtOx5v428SaO3jd/hH4Xljkt9J3Nq1zCcq8+D+7z3Cjr7/SuyFKp9o2Uoe1stkeX2sU138NdXktoiXtrwSBV7jeQa9rKKVm0+pzShOVGUo9z1X9m2/lu/DU9lNpkscg5znIH5Gvm+NsFCeCbS95HrZZzzpNM7W5V0bp3r8Nc7Ox6iT5bFSRnPGPrzU88SPZu5GiyHkuQal1VYPhYN5ykhRmp503qV8SGIk5b5mqnViloTycuo7bJnAP41DncG0KIpWGAx470e06Bq3oMEbmUJk9elaxm7aClHl1JzBKqZAPPak5NbgmmQeQ7tyfrQ6lkLlW4r2vHf2qed3KVmIISOn596fNKwm7Mb5ZV+px6Gi8mg2FIdPu/hS5n1E/eGpvP3mPNae0sTbkBxL0Gc9yaaqLqNXYyVJFTkke9CqJsTi5DbVGJyWziru3oQ0oo1NOjuLm4itre3eRncARr1Y+laKLT91mM2rH7Lf8EcPB3ibwP8Ppdb1Pwvb6bEbQyRPDHtZzjOWPevv8kpyjR94+SzTklK19T9CPEPgnRPjj8M4re+maGS7sdn2hOvI5B9q9KzhLyPnZpQehz/wh+Afgb9mrwzJZaGql5XLzOBjzG9T6nn+ddtKd4csFZGEr1ZqU9+hsaD4yS+8VyT3w+WGBijHovsPWs69NpK2x0um2kmavhfxidc1qaO3tvkVsBvWsJ0pcmphWk4T5UdVHexOdrZU5xjFRCLirDumh809pGp85lwP7woauNRlIonXdInn+zWl9DuHVUYE1vChKKu0TUXs15lHxZrsWl2yRMoPmH7zcCle0jswlNyjzMwdZum1vwwdOtbP7QLklEXdgq2RgfSt6dua9xyThO1tDwL9oX/glh+zl8YPhzqdv8UvGWp6bfXkLD+1rW6EccDnJX92RhwD68/SsZznVuoxucl6ildPQ/Hz4l/Azxl+yh8XNR+E/i/VbfUUtm36ZrFg+bfUrQk7JkIz1AwR1BBFeXKE6U7M9CjarC55t4g1MaDrEhtFZ7SeTciqfunOf504qUtGXJK+h438f/GF3rHiy0sk+S2hQq2P43I5NeXm0lGml3PYy6yldbnE6pKk6LIT8xX5vqK+bjNt2PoVSsr9TPhUNxjvSrS5YtmUo8zsOQgxrXXo5suf8VluFyy9aiSRjU0ZYgbZ/9espxTWhVPUmRw7YI5pKFkXNSSHszq+FHUdcU4wjYiKRctJnZhk845qZwikbXWxp2b4xnr7VzSiaxi0i9DITwfzqEtbiuW4GhJUXETSLn5kR8Fh6ZqJ8zemoqlSSg9bH35/wTI/Yz1Dxhqdh8R9M8H6XptmZlKX/AIhvJ5+Qeih/LRWHbCsa+myzJ5xala19bs+cx2LhTW/MvI/bfwPosXgTwLp2j/uhKqxo/lgKpdiBx0/LrX2FODhaPY+YclWncx/jp4h1Hwz4Vi1SxQtGJik4H+0MA/nTUkqiv1MIxTqnka+KZ9cZxI+/a6KSueo7Z9OK6qkobXOuEbvVaHp3wM0yBrO98RopkkuWWBZ+cMiegI4GSemc0ndpXIrrkjY79CA+DIMgcLnms5SSOWMFucb8SPitB4eEmkaHeQi9Q4mkYbvK9gO5pRiqj97YjncpWgeX65+0J4lsphJYa9NK4OGhuMbTz7cVpCnTi7M7o4Rzje51WmfGBPG/g+4+0qhlBxP5bDCEdCfbjH41TjTjNpdPmW4NVY01Bu/XTT1/4Fzovgdpmm2Phe58cXKxxveSMBLngRISufxIJ/KsKtZS93ojKulTl7NHiH7VH7X93pNz/wAI94Q1GKHdKI1aSYIp5xuZj0FFHEwpyuEKbtqfJXjT/gpv4r+FXxNfTrTxRaXsqzBWFjdrJFKO4z0Ppiu6riY19+pUcNKo7rZHjn7Vkfwa+IPxY0/40/C7SodC1TWrT7R4j0e1GIJZQebiIdFJz8y9M815eJwtKFZTi9T28FCrGm4N6HH/ABX8Tap4h+COp+H9D8RvZ3YRGjkhB3ooP3hjrg9R6Gum9OeGfc7IUo06t7Hnngnxvd+JPCz6VrsiPcSRBbqNudzY5I968dVOh2wpylK7MLSrjVtDvJdKtJg1uJMxHPIFKVrHXLmTsi9KHvtH1SS4U77qykEf0C9ayi3J3N4xU9JGF8A7lv7Fk04qpEkZRgw4J5612KKdPUqjJRhY4b4neBtS/tGYf2eyAuTlUDD8D1r4zMajpTaS0NYQjVicLP4WmRgsrnjpnivKWKk9i1hIojbQYY+pxjoc0vb1GS6KQ5dEV8FW/EGolWqAqV3YI9F8hs9P60/aTkipUGtizFpm87T+BzWUp2Q4UWTLpAzhhWLrSZuqVmDaZEv3gDx60uebL5LCjTom4Cj8qPftcmw2fTU29BSUmXGBElqo4AGO5q7NomcbMdJZBhlgPbipUmtBxV0QLAsfIGDWlnJXJnFp2Q4QlmBH8qptQVjKzRseFfD2nazq0Vvq2vW2m2wYGS5ukZx/uqigl2PZR1r0MowUcfjI0pS5VfccoNrQ/WD/AIJffsxHRdPt/i34l0TXkt7aAf2Pc67cCES5Ucx2qHbEnoTlj3r+hspwdLLsJyRbbff9EehGdHB4R0qUm5S3XY9M/aE8Sva6hLKZre2mfO15Hznnge4r77JcBTnL2vL7zSV7a2XS59BRpP6rFdD2j9jPSLqH4JR3Woui3Gu6jNcSGNQAyqAo49OBXy3E8k84bW0EkfAcRVm8zbS0gkvvN29ubzwp4l0rVBEqNNrKtveXAKlgh+nHb1qZezxWDqQetodvmY+ypYnD1abbd1+hr/tOfBL4d+Ldd0/xvrfgXSr6+KeWl1d2quwYcrye1eFw3i5KMqLbstTyOHsTGFGdGf2XdHCuFt1S0itTGDLtdFTbGqgHr6JxX2cEnG99l/XzPoZTi1zI1PCWsWGpx3GkWBW5gCv9ql8vcHJB+Rc/dUflzXnV8M8K/aOTu3dXd7f10XQh0pX538jo/wBifwtp3h8eJrjTVXbNcxYZYwoP3+nr9a8DjTESr4qipfynh8Z121QhfozzT9qz9omH4X/EXxP4E8WX7WbTTR6hpkkowtzbm3jQhCTyVdGyB615mW06ccOqvr+Z8vQpylRUkrn5p/E3/gvP+yX8JPixP4E8faf4rlhjufLvNS07QWMEYzg8uVLgc8qD04zSnmlCMmmmVQqRhUtLQ+s/hH8a/BPjbwDo/wC0N8H/AB7aeIvA+uu32TULOTPkP3jkU4ZHHdWGQamjNYmLnDY9ZYmnUTUGdf4P8eeGfiH+0Z4O0PUpY2ivZ2jmII2ywpG0h3HqMbcY+tdsMQoUZRhvZjpSfsZN7o5b/goV+3Zo/wDwlcng7wlr1v5cW63jSUrtVcHc2eiqoBJY9OvavMoTWGg02r9dO/r/AF1R40KSTcpbH4tftF/8FatJh+JOp+E/hZJqWsadCxt21iJEVbxw3zNGD83l56E4JHsa4KmMlN2jsjqw2MwkpXcXpsan7K/xY8ZfGHw7411PxRZTw2T+HiiR3U2S7+ahQ4HHBAOK6MLSr1acpy2PXw+JVestCt8U/G6/CnwDc61plyE1S+BtNKBX/VOw+aXH+yMn64r1cso06k7z0SPUxElGnofP/wAEtPlsDLeXErNcXAkeSeQ8uTkkn1J/rXZOfMtDLK6M53T1RZ+Hdump+CfFWlSP9+0lJK9QQ2c124NuFSF3udlenGFKUEbv7K+oi01BrU6hdESDG1icfUiuXP6Cq0GjfJ5qneJ7NeQJ5hGeOvPFfzhjYexxMovuex8TKTxRg4PPHWuZR5h8mhGEG7cR19q0UEkZtXYpjGfu/Q1nKOpUYWG7Bndt47irUFYc4ocLcOen6UKKQlT0Jktk2kEDpzUTSTHGCiymtu5vcIOhrppWtqRNJuxrSaeNn3MHHNRVBU2V2sCp+79KiEU9zTSxG1iScbcetaOMUiIxs7iHTWHJUVLkrWG4pvUY9gQ33c0RloDimgNqqgll/wAah3bI5EiH7KQ+K2ilYUopjhAoPK1E4ohKxHcWylOB+NJLUuxXSAo3oK6VFJGFRo7T4N+AfFnj3xrZ6Z4UmEMvnrmduAgz1rswWHniK6SZ52Lqxp0/M/cr9i74ZeI/hL+zvPLr+tyXk7WOwSPKCMkY/Cv07AYb2NJRPhMRWlXrt2PqXw5qF34P+H2hSw94EEqZ4INdFozk7owhD2l7nnvxI+L19cavdWV9LHbR2zlXaZ9oUfnVKrTp6dBPCy5jqPgR4X1LxBoNx4k1HS3htbzC2L3QIeaPqZdvVVP8OeSOehFc03KU99CHVvLlR6Ja6Xpfha2ee00UuqjJ+zjcx/Dqac5SlHluZNK/NuJ4f8YeGPErtHppYOrcrLHtINZuE6W5FKcajsjkP2jPilZfC/wqXsNPa5v7w+Xbxp6kHkn2rvy3CyxdbXZHNjsXKjFKL1Z84XHxi+J1nD5lncvak/NmAEc+/rXv1o4en5kYOnOpaUpXO5+F37Sc/wAQ9Pk8C/EGdY75B/ol2y43+mfevlsU7V/d2PpqbpQjdHbfBL4gWGpeI59BvNQB/s+Jmdz06gA/rThecHYxrp1HeJ80/wDBRT9uXRNFvr/wrYanGNP02N43kWXHmSlTwPxrSFSFL3UcU/aXtHQ/Hq5/aIuPi14h1KzOsi9g026eSNhJ5ggaTG6IP35AJA4zXm4jkTsd2Ea5bPfqVr+9iMHm323aiGRs9sCs4vodip2ep4h8W4HOn6Vq8nD3Ms0jfi3H6V4mbwcqKfmejlUoqs0zkmmLxYOfZq+fUVF3Z9JKp0IUDp1PPUVnUXtNEccpNXaCBS8a5Pbit5VOSbKnf2raLkTMo4PPfFRKpzLUmV27k8LA9ajncS6bsyUzIhCkHNNTkzWSbQoviHwAMU7uxnya6l6xc5GTw3vWUpvY3i4xNa0dCAAcHPFYNvqVKpctIxzx+NWmkrijZbnZfCie3tPEkM6WFw955q/Y7u3mi/0Vs8sYpFbzeOiit8LOPtkurOXGTtC6P1t/4Jj/ALMnirxN4y0jxr4z17V/EMcRWd7nxDrO54hgEBLVMLHg8DK96+3y/CVaaU3O6XQ+axlXD+zb6+h+mmrXcUGo2OnLcBC8wKpj7wAPFej7T37PqeNSp+65FTxsLVtKVb2382E3kIkTZuGC4ByPT37VpPlULs5oK9Uoaz8IfDuqXaS6fK+nK0u+6is0ULcDHQ5Bx+GKlXep0fWXGOp0sNlaadbJZ2qBI41woHatNWjllOdSQ6L7JLKZ4tjOPlLjkj29qycVcmTex8VfFfxV4y8I/EbWFvNOnvreO+kJNr8zgbjwRnNdUP4aMKUrM5/TPiZovxU1Cfw9oglsNWtYzKun3bxrNcooy2xN25sDrgdK58RGU17srfce7h8QuS80VvAfxkXwp42fRr2ZRaajC8MyycYfB2n8xXNSqONT3mdU3zJOB23xD/aw07wd+zv4f0fT7xY/N0oSSKrfMzFiQv8An1rnr10pJozeFUq7kfmL+3B+3R4Y+Fl7JfeNrxLjWNRBk0zw4sg3bTkCSQZyFrnq1JVJtpWb18kaTdKm+TdnyX4R+NcvxS8bD4pfGHx/pWj2VvgotzdRW8UEQ6KiZyT+GTWkMS6dNczOn2bS5paWPZvg18ZtG+N/xQh1fwjcPLoOnxG0sLmQMPtIJ+ZwD/D6etdeFc8RLnvpt5lUqsLe6dL4sme31i/8Nw3TAwSshCv93J6H2IolJRbgdtKPtXcxrHw+gu2vEkaJ/KCsQPvD1rn9mraHpQTR02m/DaPXLlLu5v4Y7cKGkkHDY71lW54o2jDnOf0TV7HxP4n1KTTlxYRhrazHqigjP4nJqKDctDGjJzrtGB8DreOHWbizOcR3LL+prvgmk0yqN3JpifGnw+1lrM1xFp8hU87hKyj/AAr5HOKDc7xO+g+XQ8svUMjYOcj1NfN8qg9Tv5o2sUZ7IypgjHpVqa6CUVJ6FMpNZNkdO4ptKWpjUi4K5ZtnW4A2EdPTpS8gpTTdidYXjPK8YrKpFHRy2Jgp24PfpkVz9Q1IZbdmyR6c4reNkS5SegQxFSCR75qpWK5UPmiDDaeK59mLmaehEbUghsVtGV0NJyGywkrgrwenFKyuS7xZCtvlssMe1aJ2WhEnzMmtbKa6nW3trd5JHYLHGi7mcnsAKzk25JLccoWjc+jPgb8NvC/wB8R6V4j+MPh2LWPFl00c2i+Cmi80QAnCy3m3Ji5wdmC2AcgZFff5BgHlU4Vq0OactYxWphGT5W0m30S7n63fs0Q/FOb4Dr4s+L8enQ3+oxl7PSdOtkjgs4v4VQADt7V+u4FVKlaEZJp9Tpko/W4UYpqS1k/0PnT9qDUtM07UZLu506I3kinyru4kwoGegr9byqmqdOMj7im5ypxp9D7f/Z4024t/h34UsGhQFPDiSyDP8TjOfevyPOqqniq1RvedvuPyTiGcYVq7v9tL7jl/HkF1e6ysDHdJHOPs6bfuMDktjB9P1zXuYPkjhm+jWp14eXLTUo7W1PVNXTTPin8PZNE1V2WSBUPnICDn+8tfIYf2mU5gqkFo76HzkIvLcxVWG0r6Hlfxj+C/jTWNOu/D/gPxJNaALGGZFyzArgnk4LdOtfTYLNqPs1OqrN317Hv4HHUnFTlvqZ2g+Ebz4feFdQtNQsCTFbGOa9kHDIByTj1OcjvxWtSvDF1afvXZ3+1dStFqW/Rdz1z4M6dH8NfhfF4h1O2itn1K6gLrGMBY2IVc49jn8a+RzibzXNHTp68qf4bnxGcVHmeaOnF3UE7fqc5+1v8As9+Dv2gdBE+saNbXslshWEyJ8yn1Vuo+orPKqiox9jVW+pxYTmpU+SW58FfGP9gXR76zufDN1oc11A6MHjvH+1Q454Mcu4Ee2K9yrhMPjEqfJdW306HU3GppJHzj+xD+z18Uf2QPjj8QP2Y9NQP8OviFoF1rOhW3lsV0vWLWMyMsaYBUSRhsY4+XHbn5+ph/qWIcIX5JfgcUcM6Ff2kb8vU818Bftw3Hgn4xw+KNZ1gN/YUV+i4JQndDJGny84PNeVHF/V67s7pN2dreml3+Z6KqwlTcY9T4v/bm/a08S+IPCt4LO/8AKuPFMklrYeRlStgrYmcZGcO2IgRwQsormr1qtSblJ6s8fGcuHoqhHT/LseA/BP4V3Gr3STz2zmRyGx5eeD25ruwWB9prIxwcJvXufZ/wA0N9Dx4BtUiDanFtRSh+eXGUQn1JGMete/OChhnCO59RgaSpPmaPEf2j/GB8Z+P5NBhJ8vR7doHiY/dnZvnBHYjGK58DVnToOJ2Vr1KvKhnw/s/slooMQCiFunsDXVCMbpHqYWDpQ8yH4GWkl5p3iKc7cNbT/j1r0HONKUX5mE5Oo5FH4Ca7df8ACTb9P1BhEHw9vcJg9eTkDn6U8e1XpOxGX80a59JXd5ZXjhn0+MExjEiE88da/D+IqWDoYqUfZ6vqfS25Xe5Tmso2OVPH8q+PUlFhKbYxdP2dBwD1zScwSuElmAcED60kky3TXLcja1L8LVJWMVoySG0CrlutNo6VqSR2/BBGPas3FMzqRsQWcCm/Kn15rogko7GNNe+bFxb7UB9ulI6eW6K5RCwyMVHMkYNqLGG3Gdw6VLldDu2I8A28j9KhNg2ypJGQ1WQr3I5FwCSPyq7qxVTa5GqhznH40cxjFsSRcNgCle4P4hjjI6dKtRRVV2KsgLMdorW6juYKPc+g/wBhT4T6f408e2s2rvqEq+eoW1tt6I3P8TjgCveyWnCpNSPBzOo4Jn7aeD/Do8PfBu08PramGJxGptxLuOMjvX6FCUYpKSuvu/zPlXDmq3R7T4x0/wArwBaW6KSILVMBfYCppzXO7ijaMpD7v4PeDfGN9p3jK8so2cwRySwSxBo5TtB3Fe7fX8qzkouepn9YcYuJ0Oq+NtG0YixU72GF2pwBSjaTOKFGe6Lejava6ynm2wbg9xSqPSzLnRcCvc+F7eDXk1/T40icn/SEUACQev1qYylOHI2ZNLRo8Y/b++F/xH+JPwpEfwk8RJpetwMxt7l0Dc444717OVYhYecoy6nl42ip1IyfQ/Jb4j3v/Bf/AOBWty6j4ai8D+ONJt3LCw1DSvLkkQfw7lcc/jWeIlipTfLqjuozoUqd4Kx9DfAH9o/W/wBor9nNvjJ4v+Gs/gTxz4W1b+zvG3hZ5cizuQnmJJG38UUqfMp7cjqK48Q3TjeR14Wo2nd3Ob+AH7fkWoSeNprPWFaSPV/7PjRZMsFdAcj9PzrmwWJXNKT6HqUYU7pLdn5Ef8FS/wBvLx1+0B8bLz4E/CXX5Tpun3zR6rqFnId15dZxIoYchFOV4+8Qe2KyUqlSrd9zxsU71nCL0WnqWv2R/Bs/g3w//ZF9E0UZjywYdXHOW980Yujyr31qj2ssoKnB3O48U68968mmWxz5vyyEHotcifKmzaVSPPyo434824g0vQbRRjZGxIFeNmuIfsoxPUy2nZuTOFRcLgD8K+fm+ZnrNSepFIvGTUOTirIjlujQtdI/djB7VrJ3mdNaNqjRKmmOoxsHualpcoo07ssQaW7Hpik7WHKk09CddF3dV7daybd9BwV3YUaKAwOBn61Sk3obuknEtw6ZtA+X9aptGXsrE8doyDI/U0ly3BU3fQvW8L5C5znvU1Gka2ilqevfsxeCtC1nxna6pqmla9Pc28oNsmkTfZdwyCd07fKF45AOelerldGlN3ktTxcfNyTS2P2+/wCCWHgfw9pWnXetaJpOn2w+zAFodWN5cHOP9Y/TPHOO9fb0ORUrRR83mKmqCufWl+lo/ia08yyEkqo5WUsP3Yx1x79KFG8zzqU37Jq5X8Z21zf+GL+2tFBkMJKZOMEc5/St6llTOeCft16kPhjxbb6t4ITVZ5N0lvF5d1g4JYcE+2etTF3eh1SoWqpdCvrfxB0W0hXJBAI46kfhVRTvqZKDUjV8LavZ6vprXNspVQ+COeuBTlH3hVqbUbnyZ+3/APsC6j+0dr0/iDwf4x1vQ5buBRevpFzJF5hAxn5T1rSChOn7OTsebUUoTuldH53/ABN/4IQfEH4BeJ7D9pD4afG/xRbeKvDV/HqelahdXk0hEkbBtrbv4WxtI6EE06eEp03ZNtnVSxFaS5XHQ9H/AGs/iXe+EYpfGCMthPeaHFqtuoUjy3kh3kAez7l/CvHzSlUo4iVKpFxa0aejX3nu4KdqaUjyT4m/tXaJpej6S/ijWI7iDRNBheW3WTlvLgDysQORjmuChJU6kHbmSto7/pY7sQ1TpSml0Pxe+Iur/E79tj46eIvifqM7s9/fPIHkDMltDnEUK+ypgAe3vXs0KTnK0T5alKdesuZ6s9O+Ev7Ba3+p27+InnuyCGIMTCP8TjFROjV9tyt2+X6nfLDpz1dz7i+A3g2x+FcNtbW0aI6YCIhyqgdzXVFfV48qPaw2H/d2R0fipLq1+Jeo6ncxEw6oiTQsV4GQARz715c+b2zbPbw9PlopvcstGkUYeSVVUsPLcfypymki7tMp/EDxhPoXhB9H0d2W91H9yjKeVQj5m/LiuWrea1NK03Cjpuyh8MNOXTEhtk+6AB/+uuijyxVwwVLk1kVfhkRZeONQWMDC3rZU/WutSd2UrKszQ+LtnrKau8i6iskEi/LBcrlGyOlfP5mpPVHbTV9zxzWLaS3u3SSy8jn7g6fhXx1eElNt6HW4uJQk25Cg/jWSLjoQXMQljwVwQODQ52NJxU42McTz2M/yDKk81rBnn8jpzubWm3kV7EAxAOOOaicm3Y7VUi4k0qMhwOlY2Kg0MEy45I59aOZinoKuGOFX6HtSc7kR1FEfOfXpxUqxTjYGTnp1q00jWDWxDMAuPenza6EVb9iONA8gXcOau7UTOKRr+H9M13UdYtLHwvBdPqE0wW1Wyz5pcnjbjnP0pUaWIxNdQoL3+lhVZSjBs/Rf9gX9mi6+GPifSND+JYsbnxTNL9oj8OW9rC9xBkbjLfzgblx1EZYknsK/ofhbJK+CyiDxdrq7+Fc2veW78k3ZdOpvl2GlSw06z0j36/I+9/iTrUf9lf2RcKjrFEFPkttHuBjtX1uVYf8Ae866jyui1iPaw699T4+/aM0/wR4g1KK21fUrgzmZVg0+FSQ5LDHP19K/R8JUqYej7y0sfY0Y1HJSex9//CJBZwQaa8KobTw7axomeV/d9K/Fc1aneS6zf5n49n1pU7p71JP8TzrxhHcXHiSa1RVWV5HBkc9ADkBeOucD8a+owzisIn0sd9JXoxtsdd4J8T29tpk4a6ZGkt1ZwTuVZAcMV455/WvExmElOonbr+HmcOJw0pTi2jqbfVJG1sNd3X7uYoyDbkNx146HNebKivq/urVXOSUILDNRWqubmq2sEskiw+GvtaGEloSq7JST3z/nmvOpzaiuapy6/NHjQqykkp1eXXfW6NXxZoVr4h8Iy6TPpKzJ5astsGxhlwQAe2CBXBhcRLC4r2kZa3epwUKsqOLupfM4zTbnxVpcDwXOnysinLgRl8c9OBzX0FSODr2kpK/3HrP2NWdrq5pH4beH/FkH2rW9DMMk3BAjwenU9cfjiuCeY1cLLlpSukcFSv7Gemp8lftnWEP7MvjzSviP4W8Jx6hLpU5uPLlkVY5oSCJImLH5dyFxwD/SvVoQqZhgnJf1YbcsTQa2ufhl/wAFBj8LvhJ8Q9d8aQeG0fw5f30lza22j+PdLeWXe24QPDn7VGRuZSfKyAPTp8jjqMKM9Hdt7X1OBYp0XyuPkfIfhb4f/Ez9pjx9/wAJ7c+FbgWRKQabZWdq7RW0C8JEgAJ2qO56nLMckmvSyvLK2LXPKOhrRo1MRd1Op9afCP8AZ4ufBcUI1e1eJwxDCeLHzAZwQR/nFfVwoU8PCzVj1sP7KmktzSj8I622prrMAeG5tJyFkgGBuDbkYgd+OP8A9dcjnHmbserTbjqec/tTfCuS0+NDfEyLTVitvFltHe3aImFW9HyzfTcRvx/tGvJnXXtGkjso03GXMc+9pFpWkXl2Twlm4ZcdDg100a1mro6o1Gk7DP2ftNktvDOoyyoM3FrKM5xnKMa2xVe6VjGSlGm5JbnHfBvVrw+JzZ+XADFcENGwG7GTyOlaVJynTvcxwXM6+qPpJ40EcZCKCYxwvTpX4nxTJSzOSZ9Vy3AhgO4Ir5NkirkEGpGnqJcZJ4/SnHc3iyu7MvJ4OfStnqc83qSW0hcEe/FJnRB6EsbHJBFKxNVkGn5bUj35rePwmNP4zdnU7Rnk4/Osps6VsUpbdxJkd6hRvuc04tMYyFSFLdqtRRpCyQ1+BuxxmjlRcloV7gEHcvpzimkjmd0yrKJCMdRVaFuSaI4EbPOcUppdDK+o+RDnJwfeskD3uQzDCYIx61d+xNVoj0/S7/WtRj03S4i80rBUUHvVRU5OxzTnyo++v+CbfwI1TwP4rstU8UI91cvIGS3Ops0cfH9wcZr7bIMC6NnI+YzGrd6o/V7w/Fd6/py2ptViW3MKoFXtkV9Y3qeDK8Z3R61rUX2nTltlIwkSggd+KIwtIzTezNHSbf8AtLwrBaF2Tda+UxXgggY4rOpuzncvZVUzzfXfCvibTZWisonuJUfAY8swzxWEW7Hp2pqHMen+F7W7sNBtra9hWOYRDzETopp2lJ3PJrVFKbaLc0g5ya0howWpz/jPT7/VbNILGzEu1sum7BP0rtockZXk7EVaSqKxiP8ADDwtc6a13rWgSlwuSuQSKudZ83LF3Lw9OnBWauz4v/bF8C6R8Oz4vvPDFm9rZ+M/Dn2DVJNOMX2y28suYrqHeNplj3yDacbgxGelc+Kw1SpQ5m/ka1KcFCPReR+FWo+K7n4Vav4p+C/7HereNviH471e6ltt9xpEyDSy5Km5mLKFWRUIC4+UHDZ4wfFoUcdiqyio2S7dTKriKGGTjTm3J/h6HY/sq/8ABG74qeFtOHiD4jaXJJrl0PMnSLD+XnnaCepz1PTNfW0soqUaXPP4vyOVOCak2fQXjn9lm++FnhOSzutJMKTL5Rd0wd2Ox7nPavLx1KadlqethsYpRsj5rbR5LHVWgn6rJhmPPINfOzk4txZ6FOHO02cp+0HdxtrOnaej58m1yw+teDmcrzSPawSSOCebbHgfhXn01bVnqJc2hTubwLlWfA6Zz0qatuhpzQp7m9DqHyAgZyKH8dgrt+0dizBqCEcHjvms5SlYISdyxHeqOQ4pcztqbSd0SLqTE7Ff8QKV7IyT1uWbeR2IO/I7YqJVDp5rrQsQswOAx/AVLndGMm2yZCx4Gc57VpBqwQTvoXIAVwMk57UTlG5tyK15H1F+yl+zpreoahoviP4uxw22lCUT6YureN1hjjU8iQWqbmP0OCSa+jymlWpyjKXy12PHxU0r8iP24/4J9aR4Z0n4eSQeHrewVQqjfp9rLGjjn+KTl/r0r7Cm4+y0PlMxlOcFc9jum0KP4iQTSeYb97RkT5jt29Tx0zWftLVLI8tOahZbG3IkbI0TJkOpDA+mK3spaMyi2pJnD6Np50i9utGS1KW16GTywPunsaxb5Gek6iluc/qvhDxbP4ji0WxtTGjOB9oxn5R1ye1bJ80WxSlCCvE9O0nSoNE02PTLMfKg+YkfePc1NPm3ZwzrOT1LCwrKNsqAj0IrSdhRcUtTgv2iL7whB4Bv9C1G2hmuLiAqkK4yMjqf/r1eGjUnVT6FqpBM/Hj9s3wN4d1PwmfAvj+21yyGkm4Gg+INEs/tRS1di5tZ7fILqrsxVlORnBBFZZnhXODlJfMqjXdOrzX0Z+a/xf8ACvxh/aj8a3fwu/ZZ0TxVq8d1PJZal4j1LTP7NsY4s7JUUMSXOQVPpyAD24sny/F4uV1H3ToxWYQqL2N9D7U/ZK/4IL/FXwv4AsLOfULSyZtr3T3MZ826kI5OOwJ6Z9q+1p4DB4enZuzPEninQleMTvPHH7E2vfA/On+IIXSOJ8AW6DdK3ORgkGvMxVGCTaZ7eX4v226sebX+nW+hai9vHG6uzbc3CBWUCvHlZM9+jVktjqrqPSNR8Iw6xfRI32OQASFQCUNctZRcT0qVSdrM4/4iWVnYT276dOxtpZ0YKTxwNx+vGa8upeM7I6Hscne3DeIdVOoFQFLbYVI+4g6Vavy2ZMYupO7Oo8HWxS9RV6NtJHvmtqeh3QXKjF8EokPxG1VEHAv2z+ddberOaGtVmj8Zn0ufUHsdV19rMmMFFYHa3HHSvBzCtTinzM9SlGaSseM63a3tlOVnvBPEeY3STIx/Ovk671bvdG03KT1MuRskH865k7mlPUZNIVTcvTNQ4sJvlZThgW5kYOvBNWrg4qcQuLG505xLAuV9q05YNHJKnODL2najHdxiKXg+9YtWZcKj6j7i32negqXF2OiLU9wgYdCOO/FLkdhJWloTOP8A61Q1YptsYq7j0pBFNu5FcwgkDGPwrWmm9RzbejC2tSWGc1U5WQopQPRfgf4S8W6t4rg1DwvrV1pfkv8AvNQs5RC0a9yZWwsYx/FnPoD0r3+FctzDH5pBYeXLrv2+fQTbnI/Wj9hH4PaD8JPh23j+6i+XVWDNfz3DTXGpSY5fc43bffvX9DUaKwtFYOjNye7b2O6vFzisJh23Ldt7I9S1fRNX8bpNDZWjW1pLktI/yZX6+le/hsVRwSXM7yOuFXDZdBe1lzTXRHhnx38FeCvhrbN4o1TVPtd5boDbLv3bCDnjn1r6bBYrEZhBrlskj0aGJq4pXimkfWvwe1ddZurK/knIGqeHLaRWI77OgPc1+ZZnSVOhJL7M2fmObUJxw7VvgmzkviFLNb+L1MirsjumKr0PmY+U/TIz+FevhtcIrdjspOKoLl3aI7XVo7cvK9+JEJka1lzjavHt1Y960VJtbev9eRE1z9DqNA8RSTahHJdxmJoViEfltkAMOuPXPGK82vRUabitb3Oerh0oWXU2f2lfEXxi0z4O/wDCR/BiLzdRtHjnubeJN7ywocuij1xXiZNQyueZSp434XdJ+b2PmKeGoxqzUt1sd98Evij4f+MHw803xzoEoaO8t1M8TDDwTAYeNweVZTkEHmvBzDAVsvxUqNTo3Z913PExMZU6tpK3qdRNFBDL5rsqg9sdTXKnKSsjNczRU1zxFpui2Zubp+gwqqMkn8Kqlh5VJWiXCjOZ8Y/trftCeFPFemy+HNY0O4tmt1LzpPYO5liwc7SB1HHPNfZZdhpYKlZSumd9pRo8sWfjv8SPgn+zz45/bkfxLr/hi01GE+CNYu4UvbZXUSQrCImZWUAsodiCR1rnlg6FfHc0oo8itQXOuZ6s+x/2QIPgb8PdPn0nwxpV01xJpkYkTR9LSCMQuCG33LAhc8/KvJB4x39uo3DCpxdtbGs5V+fl1Vl5nTfEL4c/CTXvC1/rn/CJWssUMyxwO8TF9PbadzySybQz7SQCpz8wGOTXnyrus/elsdNFTp8r1PifxHo+gQeJb+GG5byjKUi24yWBxu75ODXBWqKKsj6rBy54ps8j/aE8Xxa14q1XwYthB9i0fyofNZT5jXGwM+PQDIH4V5FOnKVVzvoepGbcfQ8H+JN5/Z+hDR1/19421hnnbXo0YvmJ5k2dR4KhtfDmg2ttKgzJYzzuvfaE2/1NaVlFaI66qcIKJwHw6isbrxNHqEdnFE3nnbKjg5Gf4hiuqMHOnoY4bljWXmfQJUskZLf8sx/KvxHiqPLm80fRqOtx4QEYxz6mvlOpDWoAdz/KrkkVFK5G/DFiOPftTitDWySKt5IAoGKd9TkqaMbYuGGcdOvFEnY1pXLKOST2IqFIursQ6a//ABMyfeuqPwnNT/iHQykYAPTHUmsZL3jtvoVrmRIxg1N7GNV6FRrhJOMH6jtVXFBajZJQAcjt2obVjUp3E43dPpxSTZzzi7kSSFzn3pOTIaW4oPzZI/OldsmyEdgCMj86aTYa3ILiZSOh/OtY0+5nO7WgaBGs+sxRTXNzErOAWtP9Z+FdNH2fP7xjKlzKx+mf/BLfQNO0+Vb7Tre+JBG6bVLre59wP6V9vlFWnCFoo+ZzJRjPlkfp58GrZdWsNS1QQP8AuduJGH3yPQV9FBp20Pnql4NI7aC8W5shKzDcGwW7VUtNTN6PU1vCWoRrY/ZZTyJSFYDg55rkbfOYVVzamhPbAzqyfKASzOAM/SqskriVTmhY574x+LfFPgv4S+I/GfgjRF1PVtN0S4utM09wSLiZI2ZEOOcEgDjmtaKjOai9jKcJ8ra3SOZ/ZP8Aj5Y/tG/BjRviC1xbx6ncWaHVrGEFTbz4+ZdrEsBnOM1ti6McPWcU7rozKlVVWipbPqj0pbcA7v61gp9Acm2c18UvGtr4X8OTRl5VmkjIRolJIrqwtLmnzPZHTSg0uZn5u/tnfF6+S4ns7qVJLaSNw85GyRW/usp4/GvUdKElfoROVRq58b/sr/HHw/8ACiP4sG1sNDmD61Z3t0LuKMXFw00XkxIhZl3nfEw8vByX6jByZfVo0K7ktP67nPLCxrQk7a/ofYfw0+NXiiy0aLUPETBNQubGGa/kitk+QHlbeNQCqD1GQT1Jp16k69R20QUoQpxUJanjf7X3x2ufiJr6PNdpONOgHlRKqCEud2RtXgkEjJ5ry8TdSaPRp0YqHuKzPgXWpLi+8S3bSRIjNqUmEj+6Bu7e1fJVtcQz2qbl7Fdzyj4xXwvvH9yqtkQqsY59BXz2Yy5sQ0evgo+6cpc5RMjrXFGTasepGSjuc1r1tf3ZZDIygn+E4r0sN7GK95anjY91azfKz0ez0eHYoJ7V5lrzPoK0OWbRPHpMG4LuH51bWgqagi5Bo9qQcn65rmqSd9Dfl7jk0WISZHQds1PvSVhOmmrotxafEFAUj603BEqDLEFgpJ46DpmsnEtwSJo7SMHORnuKtaItJRWhaW3DMqQozFiAqKMkk9sU+W7QnCUtz6i/ZF/YzttJ8Z6d8T/2lp5PDel2jJdWFj/b6x3sxyGDeSm5wPTJX619FluErYeoqlWXpqeVjOSHuwWp+3P7C2s+ENX8DGXwjphsrAIBp8LQyIzxDjexbhifXJr66FnT5ou6PiMdXqVJuJ6wmnawnjkXiaaxs/LbdctKOCemBUqE+a5xy5eS9zakYBgM/Wu6EX1OZao5u61W2bVVaNCGEnA28nmspwbR1wi2kmdMi7wH8vDd+OaINNamVRuN0hUkhaX7P5i+ZtzszyR64pykoszVOyuRXU5s43mZSVRSxAHJxWisxqKtoeAftAfFXQb21mWXT3hdFI3sACwHqa9nD0404bnM0qj0Phv45+MPCPiq0u9OMlylyUPlz2jRllGDkhZFIJHvxxyDW0+WUPeV7F1V7TCundxk9pK2nnZpq+1r6d0zyv8AYj17wreeCNDisLEvJYa5r0D3slggmwmo3BYHawCnAznGDgVpl01Qw1lojGpGXNbc+5LD46ab4a01tK0nWrtLeNRIMLiU55AMr/KOOpHHYClUSxDbRtFXS5jy74o+OvhlrZuPFWvTB5GjJhvLW3e5vRkEbd5H7vOf4RXJiYRhC13byNYpQkmlqfI/xYHgG51Yz6ZaxbsktMyyPK2T3L55ryK8aXPdH0OEqzVNJo87+JfiWWw8P2fhXTbIyXGs3qRWNqpLExqQXdsYwAO/qa82rJ8p68XKVuQz/iR5UDW2iwTGWWODM3pGduMfz/OvN1lM9R0mkmzF0q1WMoWUhV6cVq2mh25WdT4QjH2pTt5G3knpzVwk0bI57wBELv4havP2a/f6da65PRmdOC5ncu/Gy68P3N82l63bRHZENkshOF9M4HAr5vNFRatM76bvojxXX9ITSrhjbXMEkLHKm3n3gV8vWoOLutjWVkzJeT5sdQKzSSRvRI53Owrmk9ya25HpEm64I4+9Td0kFHU3VgilQowyCO9YSm7m9kmZWpaRLat9ptRx6VUZqW5z1aKesRtjqQkHlTcMOOa0SZjG6LMcY370PXtSafU2jPUmJXbyPxNS6aOhpNDl247e1Q0ioWK8p3SfjxWkXZGdSykOR9rAAj396h23ZjrM+iP2VdI8EfD/AEBf2jP2lNRePwhp9z5fhbwjG5WXxJfKeWZR/wAsIyRuc9+Bk5r9C4TrUcrofXMVPlp30Xd9/wDIuFJQblOVkfe3/BOP4w/E/wDa/wDFniL4v+IoZx4d0yRbTTbRNM+z6ZYooG2G3JbMjY+8Soxxyc8fouS5zUxkZzcbRl8Pccs4wWHwUqNL45P5vzbPor4ka/cfZJ7Nrl7e0KbEEEO5nPoq/wBa+4y+jShadry8zpyvD03OM2uaXmz50+M3w98R+IdJku4tNa2UISlzKSZc9ic/dr7fBY2jTsoz18j6aM/e0ex77+yT4rbU/gp4W8RXMonuNFdtPvnUklgjbd3POPrX57nVK2Y1sPf4tUz4HPoWxlWhH7auiX473dtB4qnv7ObNu8QnhcrkEgjP6EitMtjL+z48+60Z5OAjVeEip7rQwNb1+Cd5bmFPKWGCKKEquAA38VdtCNlY7VFwjZFzS/Hxg1GeQymJkktgzk/M59vY1z1cPF7rTUzlGbhqj6E8AeMLCy8HWuua9dqsThw7lDg/N1+lfDZjhalbHypUFrofHY+jOriJQpox/EEegfADX5fi94fhjh8Ma7KreJI7eElYpm2rHd8H5Vx8r4HQgnoa55SqY6n9XrP95D4b/iv8jhjRqYuLpz+OP5Gp4o+MvgOQrHc6vtG3dDcIcrgjO4HvSwmX4pq6SJ5PY+6eU/GT9pC103w9dWvh/XNOntxHnzdRmKgk5OMgZGcdjzivVw+BhRqKpUVn5EKTpS5pH5zftq/tK+HdEsG/sDWbaLVJbdw1zpfieQxnK/cZByM5IOK9L2lotv5DbqSal0Pzif8AaFmsPj/qXiK2uIJH/wCEA1yFD5zMzNJHEqjLZOc4x64rw3mUqWNl2scvsqlSaklsfRHws/ao1nRfCFrc6fZ2dveaaIH1TVbi4knuVhkKoSkchMCheB8sZbDc9DipY2M6fvt+h11JVZUnyRV1/wAMew/Fn9tLSvFOiR6nc+LvDV87QlHm1Ce4nnjOAFIhOyEH0wo69DXR7TDRo+05rCoU6z+K79DwbT9Zm13xRP4y17TLaOxtmWSaeC0VBLkhljRR0ZmAAUfyBrwsbjqMZ2jq+nzPoMthJ6K9j561XV7q/wBR1jxZ4nl2td6pcXLoHyAzuSEB7gDA/CuykuWmj3lBKNjzuKzvfiF43ifYSpkxGo6da66WiuwjQdR6Gudeh1rxH4lmt5R/Z+k2H2G3cHglR8xH1bNdMlCPvMzqVVzycfQ4n4PWuoHxCsioZojISzhOF5749K0hUS9Dpy/Dy51KR9LKCsMKMBlYVBx9K/EOKqiqZvUaPfnK7AzdMDNfJPQyb1FLELz+dBa0K8rtuIB/GtOb3S7qxRumklfYeBQtjLkV7lmxQRqCB25rOzkwcrEyZy2eKd7GjV4lbTyP7SI967KbTicqvGZ0KZL5b8RWNR2OpPS5FeQK4+8ee9ZczuLmvoQpBGqj5RnHXFXZsmasxJIUbjAxUy0JUtCtJaxcEoM9qEmUlcrTKqsBmrjTuZyVnYaAmOn61Xs7AoXIbh15UduvtTTsxONivJ9zceBitE9DCUlHYveBdF1jxD4kh0vQtImvZ5JABFEcd+5HStaGHrVanuowcpPc/W3/AIJyfCbxB4I8HWx1zRYLGYqGUMc7SR6k5zX3mV4atTprmPlMzVN1VJrbY/Qn4Ah7fwbfwyXpuWMpbzCOOnQe1e8k9D5+o26qZof2itvpU8e3aRJ0IrZ2sXNXbHafr0WnsISxHyq3B4zXLUSvoL2aW50w8V2r22T8pzgZ7+/0oW5P1ZJ3RY0nUoJofJuJFA3FUJPB9qbVnoZVac07o4bxn4B034V3V78Xfhr4Qi+1JEW1rTbKIKb6HqzIowPNHJB78jvTqVbw99mEYU27vRnlmn/H3w74qibxL8PvGC31nKx3LFdkSW7A8oy5yrA8EEVrQeHqRST1HKNNvVnEfGP9qTXtL0WW3kv3uVKkeTcWpcdOxrv9nyx902UoqnZO5+d37Ynx/j8ZWNzpVzaTfaYyXRnzDJEBztB43L7EUe09nBqRg2pRUZadT4i8AWfxL8PfEPVPF2p+DkntNUvrEacuosDueATyhsN90E8An8K8XB5vSjjZQT09DXExqTpe4tD6J0H9pr4pz+F7fSdU8D39nDFOzx2sOLhBIwAf5+p3bV+g6V6k8xp25VPRamVKhUlJe7oZHi34tWGg2F9/wkWi3Frc3i+ZY2c0JWTcGz0zkIWzycdBXnYrMqEYe67nsU6Da2PHbXUvOun1O5xvZmlfHTJ5r5+lLnqXZ6UaTSseJa/dy6jr17qMv3prhiOe2a+bx01PESt3PaoWjTVig+HyB1GODXPCNlc2lqrlC6giydwq6k3FaGMaPOtTro73YgxIc47VlzLmtY9SvzObshIdQmmkKjPvVOcYrUyhF3uaFpLJgbnOD1rllNNnRzpGjA5zkt+IqeawKoTpK2QF/GpbbJUtSdJQq/UVPMzZttD1m3jrVRd9CYXvqWYWOQd3Q9abhK1ynOTeiPoH9gL4I+FPip8UYfEF34inSbTrob47i8eaQuCCHhtV5LDIAd228n0r3crp/voqbfc8bMIq7XU/dj9jeW+0zSDo8EdxIjDMtxqt2HupMdyi5CgdMV9fR9mlaGi7Hy2NjCUOaW57lq3mJYySJdCEKhLSEZ2gda61NJHiSvexFBcw3drHeWz745IwysO4x1roi7xIs1NJnFNqztrUdxOwAEozgYPWsOZtM7qkoxVkegShkJZD370U7uJ58neZj+J9Zj8PXFrr9zb/AOjBjDdzqP8AUq2MO3ooYAE9s59a1VP2qaT1FaSnGXTqJ4z8TaRoWhvdXd6i70ypDc49R60qFOdWei0QsVJ0qbS3PjD9qT4teFjpt/GdQeffGylFh2BcggMrA9QecV7MZRpR1PNpqpLbc/Mb9pv4030EMPg/SL6yeebUvsrz6ncAtEXPH3F86QgDJVUIGeetcmIxlOOkWbfvFWje+v8AXojyH9iX41z2Oi69qOibml0LxjrC290mnlHnzM0pJaRwIwFdiMDOQMDJJqcvxMbSVSW/Q6Z+2rVn7NPT/hz6Sg/bL8N6RDAdevI75Cp+xGC7div95XG5grDqCVx6cV6ixNJWUXoa06dSUXocv4q/a+0nV7mWPw3rr3fO7ytT+Rkz/AHi6jr3Fc2NrwcfckdlGDejRyE/j7Wb6Ftd8UX8Wm6XvBeSW7YrKeyqvVyewA718risfBStfU97B4eoo+87JkHhiz8QX3iO68feOIysx/daJZxSHZBbg/Ljoeep9Sa4Z4tTk79D6PDYNUZXZPe2slxdtczJvaQ5JJ5z71jzpu7OuWhZtdODyAytt8tcYA4Jq3NWMuV3sdD4atFWVpwm0BCxOOmBRGqrmvwo5P4PYuL++1A4JlvHKk9/mNdPNzRu2c9GTnJlT43Xj3esy28tjDMVTCiUYYfQ968LMaiTs1c9GmlGN0ePaozRMQIlTjgBQD+NfPVJXlZbGqTluUBLvYbhjnmsJOyN6SaG3TbYyc8is4ybYqi5loM0kgzlipHPpVSk7GdJcstTobeWMKA787RWMnc6ZSTRMHhkG1iCD1FS/ImMkY+saMgYz23GDk4ropVOjJqUlL3kUrbUXgPlynB6c1s7PY423B6k5vGkOVb6ipem50U5uSJYblig/TNYzRvFu42Z2A3Dp9aqNmjOd3K4sEpDguM89+9KUlDUum0ndnpvgWPxP+0p8YNC8C3a7lkgisNPtkQ+VptrGuXdFyAuFDOWPGck5rry5182zWlQd+XRWXRHNjZKtVSvp1/zP1B/4J8/E6Hx94o1T4ffCmJ9N+FHw8g/s/QhFknXLzP769mcgFyzZwOgr+j8Bh6WHwKUYe9ok7W08go4WhHAyr04pym7J22S7HuepXt3qusy6pf6itpaRPtiWNMs3twOK+koQjTpKCV29z3qdCOGoKFOPNJrU8/+M+iXOt2M1rYXC7JASsVuSh6clsjk172VSo0mrqx2UFNQV7pkn7Amu6fa6p4s+EF+wt47hI7izWV8lWYYJ6fLlhn8q8ni6nKHs8TDVxetux8xxHQqKUMQtWnr6HafGb7PceCb7T4yBf6NJ+8GOqucPx6ZOR7NXk4OVXnU38MzwnOVKqmlpI8S8HfFNdU0W50O8vI2uLC9azuieC3B2Ng9scivShUik7dDopzc/eNDTPH1sYDqVzcxurWKRMCeVdHwre5x/Ks5O6u2aVHdWeh2Nn8eb2902Pw0+qStbW7yIYomzuhzuY4yDzgDJ4AzXEo0Pauajr3PNWCi6vtEj3/9lH4j2vxn8A6l4U8Y3FrfId0YsmjBH2ZhgKwxg8dfrXx3EGFVDExrUVbu/M8LPKUMNVjVo6PqfKv7TXjRf2EvG0ngX4zNfzfDi+l8zwx4itImmn01GzmCYAfNGh4BzuAx71lSxVWdB4n2iTi0nG2stHrtay66p3atdXt5Ek61L2iV31PDfiujfG3Qn1v9n740aL4ismUyI+n36SyJ32tBuDKce1bSzt1F7rsa4eEa6sz4w+MP7IXxq8ReKDrty95BIzg3U1npP2ZFXnLF5GWOPsSx465715uJ4jqyrXhpLyVvyKq0FCm9dEfKHje7/ZW+Hfxy0r4TeJ/F+hfaLiwuoNb8T6PfSXsNncs6+THcXCHYy/LhvJBVMjkkGvMoVsWqkq9TVdjCPsakoQi7d2e8+Dv2Yfidqeltr3hnxvpGtaZKqeRdWutW0ttHFzja8bA7f985FZLM6bqNuTSfTTT066+bflY7Fg40na6a7oZrafBn4N3Ij+I3xE0KfU1X5dK8K3I1C9unPRcI7Rx+m5ioA7GprZzKp+6hDRLf7/Pp6W9dTsjhacVGSmvQb4l8f6lB4Cfxv4mtE0e2kV4vDGgxPuNsGXDzyMf9bMVPLHgdAAKWW0qmMxKb2R71Cly4dpKzez7Hzb4p8VXHiK5NtaMVtl/1Yz1r7SUYRajHY15JSaNOyvV+Gfw61H4gTri6aI2+mK3VpnGN2P8AZHP5U4uM6igjfEVVg8M2t3ocl4bB0f4T3LEkTXsoDMTy5Jya3lLm9083D0X7JN9TofgZoZ/t1ZI5GETffXOVI7g1hi6lOjRnVW1u/b+tT6KjenTWh7FdXAWQBRjI4r8Fx05YjEynLds1i+ZiJcAk579/SvPlCxooakjTcYx9aOVWKqKyIfNAYk+tKUbmClqQSj94CTn3pRibxaa0LEMirFyOnfNNqzIt7w6NwQ2DUpXZpK6WhUsWP9pHA79a7IJKJxRl+8szfE205/OsZq7O2zURlxNgjBzmoUEZJ6kLznnHHArSw6juiLzmIwc89aTgmKGqFEuRk/kaFCwTdihcOxfdyKpWQk0xpnVI/mOPxqlqTVbSuilPMWk+Tn2FaKKtqc/tVchmlYoQDRZRG7M1PAeq6zY67FFo+rT2hkcBntpNjH2zV0Kk4VUosxqr3Hofrd/wTk8Ea4ngu31rVZ7tmkUGOTUtQaUucZ6HgV+g5e2qSbe58Zjm3UaXQ/Qv9n5oJfCl0kF4J9spV3Xpn0Fel7W7sePWvzIg1HUrex1ybSbx8eaepGPyrR1E0dTp2ipHN+IdcbTbyaymnYHAKlTwQKzbj1MpN30K3/C17QWkZa7aMn93CAfvepq3KFr3Kg23Yqa18d3Fn9isro/u3CqwPVvWl7RNEzpSk9D2L4T/ABN0b4j+GBPHdIbm2AjvUJ6HHX6GsXNSumcNek41LI+Cv+Ck3/BOn4mWviy9+P8A+xv8QLvwd4jlXzL+0tV32monrmWI8E/7QwfeuR4fm+B2ZFRNwTSuj8t/j1+2p/wVE+DZl0H4hfBjStca3Yg3drqE8Kygdcp7+xrSnPMKStKpp6XNqVakqcm46nyz8Wv+CrP7VeoxS2rfA3QNGuTlftlzp813In08xtp/EGojCeJm/aVG122OaeJ55e7FI8I0X9tj9sDRtc1bXLH4kX8lxrdxHLqEd5p8M8cjINqbY5EKoFHACgCtp4fA0oWsdNKVaM3JS1O68I/tVft4/E6+XTU+LV9pdvI4Eh07T4Ldj24KICK8evTwcHeMb382egsbiKiUItfcfR/hTwjdeA/h4V8S6vd6nrmtOs+p6jqVw008gH3QXckge3SsHGMIXZ30YyS97cz9ZvW03Qbq83cLEQD9aaqKMHJnarxPI5JGkUue/NfL1J887nrUY3sVY5gC27045rdK0DWdk7Fdmy2W6A96wqu6YpS5EdS0UXKgj2Oaxu+Y6pSk3qOtBHGQcd+1aODa1FLfQvwHeeBg96lxikQ009S7a5B5br0rOajYpWb0LcQ5GTj8Kw3NoxsWERGAyuPSk7o0THKAh+XpWlNNu7JfxE0fI2g9TzWsnyq5rA+lf2N9W/aE+KXj7SvAnh/VJfC+iW0aJc3GlaSts97GP4pbkrtiXGcyEkkkY5NfQZfiZVlGFRWR5uOnGMZt7pH7K/sd+LrTwW9t4D8MTwa9eABLq4sbhvs8Xu8jkmRvc9T0UZr66k8JO3s0fn+JliHRftXrd7dr6fhv+h9V30TzWbxggMyEc9B/9atGrqyPOjzXuYng26mn06TTrtwZLeQ7QOMoTxWlPSKNa+rTOE1G7W18QS2pbmCc/eHQA0tIvU2nSbtfqeiWHizS7yyS48zLYxtx1pXdtDGpThCW5Pca5oJj8i8njCSja6SDIIPUGp5mtwacFfoeEftSeK3/AGfNEW/ufDVzqHhW6DGO6tB5kulv12hD9+LuBnI6DIwAfXKlGWuxzypqpGx8D/Gb4/fs/fEC9muLn44eGXitcypb6pqSW8tuwzx5b4ZDz1+vrRXzegqVm9SqWFipX6nwF+2r+27+yb8Kby81nwH4qt/F/jMqws57JhJ9nZs5y4yFznBbOcE9K8mM8XjJpxVovqeolhaFNyqvmk9j5m/YH/4KjWX7Nur+MvB/x7+HUXiLwh47v/tt6kEKtPpt108yMHqNuARnPyjrzWuLwGIqQToys0uvU5cBVVOrJzWjPYfG/wC1/wD8Ey/ER/4SHRPHusadIpLJZx6Vc7hnkjaBtz7156/t2jLl5L/NWPbq1srcOWL1fkzzw/tnfBy71GWx+Cvg/X9fnY/u7zU0FvAvucksfpiitPMeS1SVr9ERTqYSmrrVnpv7P3hXxz8ZPHln4j+Id405hYG2tFyILZf9lfX/AGjzUQoqjTvJ3OzCzq4yqkfRXiaKO61VkhGIoVEcYHoK4PbWkz63llcoHT3kcKFPyjog/nWkavMHs22WbfT1z5iLgdCCe1X7S2g/ZstapeR6B4N1bWpBgR2jhcnuRgCrpyu7owxMuSkzlfhFA9hpUDOvJw7j1zya9KP8MwwqtT9Tnfivqr3uuz/ZoTNGrfKpQ4x/vdsV89j1LmPQj0R5lq0e+UkQeUM/cDZxXjyVjqpqT3KKQFWyV/OuKbbZo5qOgTwF1xjr0qEpJlRtJDILdo3JBxzWiVtzGpFt6FwRyf3jg1nNxvoVGnOSJI0m3cN+FO0W9iuRx3JNzIMSE80+W+xUblHVdIE6GaAYOO1EZpaMmpSUjLtZ5baTypuueM9615brQ5OZ0pWNCOZSodB1HbtS5dNTpp1eYfvJT5jyahKzKk1fUWPAIY8n2okoyMryk7H0H+ylolp48sz8LvhdM2haprFtIfiJ8RNWmWOPSdIzhrK0Gc75R9+T7xB2DA3E/oPAGEwlbHtU1ZpXnJuyS7Iyhga+LxaS+Fb+h+kP7G3jb4DWdtL8AP2fx9osPD9nGLq9ZSrXTEfeGcEj3r9iw2bZVjsRLD4eon7NLY+kqvDKneEl7uyWy/4J7J4pv9J8PaWJNYgWN0/1UKMDz+PU17OCp4mu09F87+nRdPI56Mqteq3Td13OKe81nVbOR/DOixQXDxuVuLkZYg9hj+texGhGnL95PQ9iUKcVzTkcT8MfAfibwV8WpNV8SSu/9tWTQXN3EMbcZIK89Rz69q6cd7OvhLx1sePmLhVo3h0O1+IGt+Kvh/4ohHxGgim0bUoBbS6qsTEyRsvyNNgYRh03Hrx6GvnIexqx/dvZ/wDDnyWKp050eaG/b8z5t+M2haj4E8TalqOjzrJa39sClxEeHkQZjfI/vDinVi1ByicFOtKy5tzg/Cnxy0zxV4du7eG4VLizZRdwBjuWRWLEEdhjvXlyxU5UlrY7m3V96S0Oi8J+Jr7UP+KjtL0RS3xeO0t3l5C55GOvPr71lCspov2sYR5Ue6fsv/tCah8JPEo8YXMIbT5YkiltoH+ZwAdzEHpkg45p4+jDF4Z0n8jzMbgvr1P2fXufVfxn0H4Hfty/Ai/sbC9sdVit4v3oDqz2rlc7W7g4r4aNOrgsR7OotGfKvC4jLcT7KstGfhL+2n/wTRj+Gfiu81f4dape6bJNO6wyWE8kLZ5PVCCOlXWwNJz54bF1KahOL/mdl9zfy0R8E/HT4Q/Gn7U2k6/4/wDEl6gBBjvNVuJUIHqHYiuBwo4duRjPLpTneXU8r0z4E3bTme7jllUNggqRz/WvOrZgmrx2Lp4ZRjaKO68KfAOW4YQx20uwj5kQsc/gOtZ1pxp03MqjgJVZ2sfSvwN/Zw8MeA9KPxC8ehbPS7dN25kAaRwOFUHqc1hhlLFVOWC1Z9XhcupwpKU9Ejgvjv8AGa5+KfiR3tx5enWv7u3tkYbFQHAA9vf1r7vL8NTwMF1fU7Y3c7pbGJ4H8K3euajHAybF3AvI4wEGMkk+mK1rT9mvM7KVJU1eRh/tB+NLXXtTtPCWjSj7Bp42QgHrz8zt7sf0xVYVypxv1Z5OPar1PJFbxnqL6T8OdKtbUH95eAsq9cDvXo4WknK8jZJqlG2x6X8G7i1t/CD6/MpiaJckcDdngV8/xZioYTCOC6o9RSSp8xvp4qguSiqcn0r8ZlCUtRRrJy0NKDUkljBUYzXHN6ncpxSHnUVIwevY1Mr9BN8yIjfEyYU1N2tzJRs9SRHdmDMRU8zLUorYma4AH0pJtsTl74sE4w2eapuxstUV9OuFfUCP9qt4ytE4Iq2INua7Ctg9az5tbnouSsVptRQfKeRziqTucj+Ii+3A5yO1KUtDZpNDftuME/hQmKNo7DZL7A5bH0puRFV3K0l3vOcggUr30JhaOpm6vdyeWQnB7VpTaT1McS26bsUYPGkNlF9lktFLkEFiM16FOEHG7PHjVmpixaq90AQmBjnNYVeVXPRhVujsvhHqsWneKIJx4dTUHEqkQsM55FGHjJ1FYzrV7QaP2W/YLsvHvjD4dWUmqaQlhDIoHljA8pcf56V+h4CH7hJ6HxWIn+9tZu7+4+7PhH4ctfB/hBdLs4VRfMLMwz8x7mulpQehyVoxdkVfin4WfXdM/tDSlH2qA70VerYobSVzak/3bjI8T8deIRrmmmz1J2tLy3+XBOG/+vWMql0cvI0zyjXLjxC5F5bTh/JBVQxwAO5qLu9zSPKloYtn4pM2ryi7ldF2ABi3Ab2pqTUjq5F0Z0vgD4wX/wAPYb2eGaWF7y2MTrnPfhvrW8KkVHXqU6Kvc9p/Z/8A2ltP+NOgXfg3xfFtv7Fdtvc3Q2i6Ttx60cvNH3NyK2Fpxd4bHgn7Y3wg+D3iCeaK402E3MoYFPKUgt71pzqKtM4p4GcldH5cftTfsxfD+0W4uLXR40lIYPIYlwpzwF4rL2tGCbSJhQjBe9HU+aIf2ZtIdSsmi7J+GMqxjO3tnIry604yeu5ccJOqrLQ9C+HXwK8KeALZ/FXiUJb2sI3IHUAyHsAO5rzpTtLfQ9PD4GNCPM0UfEPiKTxRqb6llRFnEaDoF7VjKU6tTyO2KSdzjPinrqx6XHolq2N/MpHpWeYSlGhyo6aFOM58z6Hnk8jKvHT6149NRbPVprQqsx5x/KuqTSQpNN3IDvGd/wCWKxUVN3OWvPmWhvrdSKgGT0qHGPtNT0KqlKbZc0/dJjNTOdloCk+Y0Y9wwfT2rB3YtWy3almPtniplFo0UWnqXY9w7moHdonhV2G0ZxSdi4ptkgjKrmqpu8i7O5Nb7R171rKN0Lmktj3T9lHxf4evfFuj+F/EPjbUGje7WOLwtoVjtfUnP3Y5pAOSc4DEjGevFfQ5ZClOMW+x4+NjXndWP2Z/Y1tZNF8N2baD8M7bTGtDvW2tY/tM1uSMYZz8olOSCc5UZ9Tn6+EYQp2hsz5bF4eg5Rc0m4u6v0equvOza9HbqfYFjPfSaGk19hZjFl8cgGhS7Hl1JRU2oHI6Lr9vpPi4QyuAJ22OxOBz0/WoVVxepuoKpTZjfGrS30LVV1+JT5NypD7ezgd/w/lVO/Pp1NIS9rRXdHBad8SLm1FxpTXIURYdW/vL1rdJRhczlTVRi3nxKbXNTTTtNu2lkBAZWz82fb0965/a8zepo6bULWPYNY8N6J8VvhbN4R1r7Pds9oElXcG2SBeDx0pWTXK9zzJxcJ2Z+Kn/AAUu/wCCXfgzxTrF7eTeHk+07nK/usY69M81x1sPTeqNY3cbH5W/FT9gjSvCWrSwweesYLD92SMEdauOJlTp6O5ssLQSvLc4T/hl7w/DdLYra3TTsPvzOQn51yzxeKqPV2R2U6NOUdi34e/ZW0tr0C608EpzIrAkn2Fa1K9edOykawjQTase5fBn4HWFkYYLTSVGCF+RdrLnuQa89SjTd3uCpSnNJH2n8Gvh/D8PfBj6zdq32mdfLt/MXDEetefi8ZOoz6vKcD7Gld6ssRafJdzl1XdznJHSvPdeN7HvKJMuiyKmQCDgkt61Ua9tiuRDZLEb8bBjHIBraNRyZPKcn8Z9RQaVZeDYGG+8mElwAeiL6/jXdQvJ2PNxiUmoh4YaOxh+Vc+XH90cE/SvZirU9R04pw5Tyz4matcz6pKZZGKFvlilG3zPy718/mLf2TqppU4qL1ZxkGqrfXX2QwmMhgPKccr+PevFlGT3OynzN2SK3xF1238B28LXvy+bjBJ9aqhgnWg2uhw4/F0cHJKT1Zj2nj7TbmMOLgYx/erJ4SpF2aJoYyNTYtW/iS1n5jnUjPauapFwlY7o1YWLi+IrZV4cGseSTZcK8WxV8S23QuACexrXksgqVUoit4gtJG2+eM9jVKErGUKybsi3baksiBdwPuKxqRszqjbcp6tbLIDLGMH2rSlUa0ObEQjNe7uVNOvst5TDpwc1q7WuctH3Z2ZfGQMqQeKzTTZ2zXMtCSBGLH1Papm0OCstTrfB3i/WvD+mN4e8JeHLWbU9RuVjiujHJLO7H5UjRNwX7xzwMk98cV6OXY+vh4So0IJynp1vr6P+vQ58TXlSpycNHbc/RD/gnp8ILH4AftBaV4R8ceILvWvirqmmPN4u23hFp4etdoaKzYAYknOQW/ufd65r9X4LyzCZbUqc0r1XH3l2MMDRrVMHVrbQtt31Prr4oNoltqP2/X75G82UC2tUYEk5756Gv1zL5VJUlGnH5n0GWyqQoqMI7bsj0C50+NcQokiICbiNec+27PQVtXVRySudVeNSpHffY8i+L/jvxjqvjO1tfBGnpFb2Eq3D3UkvCoG5jjIwWcjtXu4XD0aeGfPq2jVYWlTw6U3dvc9T+JfjTwfo/hu2OsTbLm8tkl1PT9QhISQkdTyx3YHTHpzXzeCw2Ir1pJx9xXSaPlquHqV5yTV4rZo8/wDix8N/gjf6bba7outTWcF5AjSWEcp2cjHAbgUKliYNwqLQ8OtQxNNe8vmfNHxe/Y28Kz3Nz4x8EeLJLC4nDBpLSfDynGBlV+91rgxWBo1k3bbXT/gasmNao4KNtDwbxj8Iv2mfBF3D/wAI541muAYmjj8xMsFIOc9NuRkf5FedLJ5022p6Ee2U3Zo4PWfiN+2x4chl0rTtRjtoAdm+RGLKMEHBPbn8c1hPAYuyake3CUFHoan7O/8AwUR/b2/ZO16fVtG8HaXrcF9GYdY0lneNdSQsMlyOQ4XO1hgjPesMVlmIxdLkqfetH8mtUcWMw8cwav02PrS7/bg+AH7UGnw3HjPT7zwdqs5XfpOrIrpEzKQQJV4YbsYPBGa87EYCtTp8qRFPLnCnbc8C+M3wX+DHiMPfab4w0q5jnt55IjHcJlgg3MMZyCFINfHZhRxEXawo5c5Jtx0R4DefAP4TaJd3T6h4ssljR8JiQEncgkXp6rXmUcNiKr0iy6WXUprRnP6z8U/gT8IJE/sfQbnXtQABjjVPLhBIyCzdSM16MMixeIXNN8qPRo5fQoO7R5F8TfjR8TvjLeRi926fZRyFrfTbNNkcQOc7QOM9yx5Ne/gsJSwFpQfvLr1+RjVp03eMVZPp66swtG8HQQlpNVnESJ94sMbffkc11KpKbaW5tQw6ptNlX4mfGvRvCGnt4S8GOkt5OoWTC/8AoRHYHt3rqhh9eaocOZZjTo/u463PMTcXFzexy3ku64lk3ySHue9dEZJz0R5NGM6lRW1PQPEsH2rwTpyGPKpcja3v3/pXpUm022j3qiiqS7nsPw50S0uPBQ0d02iQLJhlx+NflfHuJbqQSN8OnKNmWk8EpFMCpAAPFfncqzkjd0EldGtbeHHSIEEe/NY6J3IjTk9yzH4eLDt0rCU9Tf2dhjaCFc4I4z3qo3krFOkuUlXRV28tg1pZIxcLCHSU3bQR+dCilqXGkmrlmLR4/KP0rGTtIuMbMy7CxC6oUDD73NdNNc0TlqRvUujam01GblueMVnN2OiKcUV5NEi3A7unrSTbGkmIdKjAzxT5SLO4w6ZGOeOadrDlBrUrXOmxZ4bjuKFcIpSITZxrxt5quXqRONiK40q2uF5ORUXlFk8qaMyfwxYrcb3xx04reNSclY5quHg1cc1jbRjYgH5VXKuph7N9Dsvgn4c1zWPGdrbaMSi+cvmyF9oUZ9e1b4apL2qUSZ0ouD5j9qv2NNNvz4L0/RdN1eOWOFV3QWThsnHJZu596/QsDJeyTufI4xqlKyPs7wzcPa+FY7f5lYcKsnXP1roqSe5xQSnJNhp17dTuwHHJyT6UoNtHROMYnJfFb4R6B8RbJ42hEEqrjz4mwc/UVjWhfbcznTvqtz5b+Kn7K/xQ8OpJL4c8R3EiyElYd27P1z7VwVKdSD0ZknJK1SKPBfG2nftFeBo5FeHz2jGUQwHAH17GslPFQ21Omk6Tdle5434m+PvxOsp2t/Emq31oWBMnkwZC/jWMsTWjK8z1vcSSe5lwftBtaahbSwfFHVIJ42Db1vRFtPUEn09qqGOmtbmroa7Xueky/tc6T4xtpLTWdfi1O5sLfM93Bcg7gByzkcV0/wBoOUVpuJ4enZq55t8W9S8K+I7qWOS7QmPaWhkmXbGzYwM+veuKrmEVKxgsts3Jni3i7xX8N/AymXVL2KWXABt4W3FWI/i9ulZ80pu7NJxpUVdLU8v8bfEmXxPfC5uWD2K/8e4iAKRj3WuarTmzL2j6nL3+twMm2xVOTgmLgEV24SlazZLbaucZ4jM95evJOc46Zryc1q3xHKj1MDSU4XMuSwUKSSPyrgpt7nrOEYxsMOnKsWNoB9aKlRnI4JtmdcWQDYHrU+1cUYOlzGyIFEakdaptuoelKym0y5YKSQv5GqlFJXM4wvIvMwVQSPrWd4pilaDuWtNCseDkE0ptNHRTXMrs0EGG6Vhy3M56MnjBGD1FP2asdMV7tyRuBkj061UEkyHoS2yCRsDv6UTbSsNNJHpnwI8cWXg7xXo+lvoFrIt9frDdSW5nhnnViMRyTQxSSpHnGREAxHFenluLrK1OML/mcWNklRer26H60fsBaH+094x0K8+IXxvu4/Bnhqy2x+HPCOlKbeNUPd8jfIzZ5J59SSTX2OBhiGm6rsux8PjvefLDXzZ95fDmyv73Q0leeQRuv3p2yW/D0rsk4vSJ5riqesi3qfw607VZjNJqUiMGDDYgAB9aj6u5bMJYvlVkg+I1ppV/4Sk0TW5Ml48Rz7ejDofatG/ZLUeGlLnclsfM3iiGXQLt7Ka1DuIyiSDJEi56U5O8DpW90YWm3E+m3e+OfZcyj55yThF7YrjaSd0a8kqjVzq/hR8Tta8Iay+rvqaw2ifKySsT9o+ua1p1Ixd2aPDQqrU1vjrongf416XJqumxCO8eHdcWhQFsY4Ycc061SNuVIzlhnCNkrn51/tSfsmWqX01/Y6UssY+aaMRj5l3cjjvXmVLRHSg7aq58v+Mv2aLOfxQ0dpbxpYW8fmoZosMgb1PrXK6q2NfZTb0MST4CXF9rqvBpkg+VRGqRYyR3/SqdVRg7s7aWGlUlax6/4B/Z18P+DbU+JfiJcJaxJ86RsgE0ueRtXvz36V42KxSndJnu4fL4UkpyRd8T/Ejwrc3KoNyQxriCGNRhFH9a81VJVND0qdVU9EjHHxW8MxjybPSrxlz8xENWqcrbm/tEMk+Knh5n2TloBnjzVwB+NVGnO9jRVI23K958RNBjtpb6S4URopLMDw1d1GjOTRnKtCO7POLbV7nxZr0/ia+BXzWAhjP8CA8V6+HgqZ5cZOrV5uh0OkX5b7R5bAKF2klulejOXLA64x7HkXxH1G7i1aaG7tirhyVnRd6uPpXzWLm029zqhDlszjW16W1mN08Y3AHa2MfpXnSfPK5rCooMw/iJeXXxCEcV5ysYGPwohipYe9nuedjMJHGzTZyk/gu5toswXDAj0NbLHOeljCeE9hH3TW8EaLeyyeXJOTg4OTXPXlF6tEYdVZSs2dsvg98gGXHHrXBKrFbI9SGHne4S+BpGXPn4rL293sdLpXRmy+D72CUulyTjoK6PrF42OeeGlT1RZs3utPfbOeM96zdJT1TIjWmtGa0MqzxYJyPap5LG8Jq5Q1CxaB/OhPXuKcddGKtTuuaJNY36uoRmGapU9bmdGpJbmjbleOM+lKSR0crlqfSP/BPfw1oN74w1Xxro6w6j490q1YeBNIupPJtrW78t3OpzyupjCW6rlUcjdIy+mK+s4XwNOdKriotOpHSKb7/a+QqmBdenzuaUVv3fkl5n2H/wTo0HR/BfxW8Q6hffFtfGHim53Tavd2582CB3G5x5xx5jFs8jqDX6LwZleGpyqylW56stZf8ADnbGSrUXSUbRsvzPoDX/AAd4o8UXtxfxfIs0+BfTgr5K9yuBwOvJ7mv1vD4qjg0oxld9tD35YnC0KEYdUtl1LvhPV/DU+qzeCNEjmMcER+03UZGLo45wSefc0q9PEKH1ie76djzsRKvGHtpfLyOb8T6B4d06++03VoUS1mEwmLLtt/fp8zflivTo1qlSmkuv4mvNVlBO+5pfBrXNO/aFsdc8TaHYacul6VMtqur3ESm4u7heCc87VX+7xk9RXkZlP+ycVTpXblJXstkebj50cvcISu5S6LZI5H42eGvhpFr1toqiTXvE19GYtJ0KymJXZnmaXHG7pz0GOK6sJPE14OpUXLTju3+hMKNXE0W5x5YLqzxH4w/sp/EPwei/2T8a77RtUkZAbCzKy29vnsd4Jz68jgU6eHo46LqUZNHi1MuhXd6ex8xfErW/23PBurS+F/DXizSPFpVGWSSK0ZGBwTyykgV59XL8zhJey944KuBnQVo6nzl8Rv2j/wBqjR7r7d4m8AaVK64S5eO4bc4U9NxXnk8D3rz6lbG0Ye/BGMVilLmjG7PH/En7aPxvgkkl1vwfHareT8CKQiW4YDgdjt9/SvKxOYYulFe4d31vE0oa09WZcv7YfiDXmbUvHo/syONtxggUuwQLhF5YEsxyT2AxXHTzWc9KqsP67Tp0+at7pyOofGbxFqAgfSfEF6iRYkLzb0XLLl844wc498DiufF1sG56tdxVMYqkP3bdjL1L46azDJd6bdajOt2giISTdlSqgDIPbFeVHH4OpL91qjyP7SarOC3XQh0n9oTUEjNrf2C3IIAVXj3Z+mf5V1wxFCvE9WnnKaSaHXPx8kkhEOlaWimPkJ5YXaR7Vyv2d7RWh59XN71W4LU53xB8X/GusqVS4aONydwU/MBXdhpRjryhXzPE1KWisZelPGf9Ku41kaQ8yvyc+9XUquRw0ubEz95G27SXRgeYoCpzHz1H1FXQjeR7MYxppHqltai88E29xNuPkTqWxkgjPpXZVqRp3d9js0qQPY/hxPZ3/heC+06WOS3xtV1Pzhh1DDtX5NxvWp1ZU+Vnbh3FrQ2yhLfL+dfn91E6HPoTRodvX6g1LlzArWuOVvX8DQ1ZFuSsJnc2MVCkjNTfMK/yrk8VpdjqbkaDLbgOQKpN21CCZYDtsOScYqbKTNZbGTYf8hQnuGFdELRjZHDTbdbU2Z3CtjPH1rKSV7ndU+EryThnCk8HvTWhzwbvoBIxjcePeplI0krK41zgHtx0p30KlrAz5JR5vXv0NOLuc0bpjZ22jOO1NtGlX4SETqcAeg5zQ4pmNNu5FcjdkZ/HPWnTsmXV+EqpaXN1dJbWcDSyyMFjjVcliegrSS5lY89zaloer/Cr4Ba7b+O7HTvihdz6FBI6M0LTeW0gODjg08PTTrpSY8RTnGk31P2S/ZT8N+F/h78KLR/AUE08oiAiXzcgnHViDz+NfpODhGlh0kj4rFycp+8fVXgP7fJ4Ct5dS2/aXGZdhzg+laSg47s46MZKprsX4p/IsG2tiQthmP06UlLlOyUE6g+CXAIlUKiruOf4j61XxImaKFwbS/8AM1C9iUomVhiK4z71laz7mUotNI8n+IvgrRdf1FtKj0+ExRwl5ZmBOe+PehyTeiNINQjex83/ABS/Zz8J3sTy32hI1zcKfslqkYAYD+I+g+tcdWkqj1RTnKbuj5E/aR/ZZ8PaTNBJFZh5Lw4htIRksO59cCuWWFUeh1Uq85NI+cfFnwAtWjuLrw2Johbz+TcqhKkP74qYQgjaT0uefa54b8QadBJbyavchi21/wB6xO4HI3c++ayrUIN3COIfLa5yV/pV1eTul7IzTp1aRs71pxcUjOpKUmRWltLaMy25/dMcOhP3TWcveYQi3uQ7QrFVG35u1dVFcqLm0tEc9qDPLcuc/wAVfOY93xLZ72BcY0UVdp3EHjHauZtRR01JXY2ZmEZIFZJ8z1CEboyLonBy3PrVNXdjCrJQ1NnBCjmt9Oc0xDaqNFmxcryOM4pVfhNackWpo5ioUHIrlTSFOKeqLulr5SAMaGnKWhdGa2Lxk2vn+dVsya2jJbebJ5ok7RN4P3SQyEkg+vWpgnuZ6tk8JdO9U5RtqP2dz0n9nLVvBmj+L49X8ZePfG+jtHcIunWXgLSVmvdQl6+X5zsqwDA+98x5+7xXq5VUw1NOc216HJi6cuSyjc/T39hL4gav4xia4k1e6hstPiA0+y1DWVvWslYg5kf/AJb3ZJy5PC5wAACK+rwVf65L3W9D4/Ma8cNC0lY/Qz4LappWlxroN/r0X225XKW812HnkIHJI7fTFdseSFSze54teVSpC+rR38uUOK6lK0jkgk1qUtd0O18RWh0+4bYCPvYzipqwdSOh0UaipM8Y+Lf7OHja8t5bzw7qscyYyFztI/SuGUp0t0dtGtSk9T558YaN8V/Az+VqOhQXAjfKnztpb6+tc7qTknY74yptJHnuq/GXWIZbiHxfo1zp0UQYxyPEzgnoMbeBj34rlqVKi3OuEIdGaPgj4+aWyRXun6+0VzBEPKj89WaaQMCXfPTjt0p+0vG99TePLN8rWhP4y8Y6Z4tmZtVQsJrvfMRwo46AjrXBVxElKxSw1No8i8T+GfAghjkNo7vK8gnBAChAcIo/OvPrY5paI6aWDg3qcdr+saZ4feYaDo8EO9/3UtwoJVWG0Afqfqa43XqTv2PRo4eEXscD40S/8WXUk2pas7zK2YVJ3gqM8H0rBpp3Ouo7wscvJ4ctbWHzbqIAx8ATOB3/AJU6d29CYQcVdHJ+K/H/AIF8MMY7/X4HdVP7mA7uffArsjTk9zCWJhGVmcFrXxXXW1ddJtAIGOFlmHH5V0Rpaoj2/MmkZcUmoahIqz38jQqQWt84T64716MLJWRhN8zO30CLy4cL2HY9q76a5TppuPLoaPh9yljdTMhbO7MYbBIq6tROFjppq8tDy3x9fR3mpSCyuCDzkAAMPqD1r5vEtc53taWsefa7KsUohll+bdgbhzXA+W5w1XyVLMhgxEuSa5KiudVNXV2MulBjYjgEcU4e6Y4jZoPBTkXzjPGfSt6qi4XZhg4xc2egRozAPnjAyMV5kknseyrJFnau3aR+NY21I5kVpbUM/PHcVtFpIJO6sZ2p6R9oTIHI74raNTkMXRjJXMWb7bpcuBkr9KG1J3RyShOm7svWOox3ibJGByO9JSdzeNaLVitqFlJay/aIM7T6VspqS1ZnUhy+8i9pGoxSgRyt83ua55xbdyoYi7se+fsm/sifE/8AaFvLzxraeJU8G+CtGjI17xrqybbMDjMCcgyyEZwi55xnANfZcKcLTzepKtUqSpxitLJNSd0mpO6skru6vqkmrO6iU6jr2grvt1PuD9hLV/2ZvDvxtg+CPwOF9eTWOntcX2sXsrLJfhcfvCq/Kinj5SemK/XsqjlOC/2XCu87atf5n0kKapYOdRJKTVmfTXxY1/V30+40KK7aCC5lAkaHBYL/ACFfcZXhaPtI1ZK7ReEo0IRVVxvJI4/9ne70rxF4m8R3cUjXFhpEC2jsQVQsclkT168txkk1257FuhGlHRz/AK3M8yrKNKCjfml+Bwv7WXizxDNpr+GfCkJFzq7eRpdiC3zuc4yAP17CvUyijSpYdzqSV0nq/Tb5nbgqPJRU6mpc8C+EtQ+DPw0s/g94bvorGOOJ73xFfRhjHDI+Wcgkku2TtVeSc5PQ1596dbEe3mrz2RNX2FWo67jdvRI6X4V6d4L+DdjqXju8Rr7xJqroReXyBpI4xnZGD/BjrjtzU46licfUjSWkFvY4MYsXj0qd7QXQ8A8e+MPiD+1z+0SnwC+GuqPbW0JNz4t18crY2xJzgngSP0Ge3Ndsp0cowqTWvREypLB0vdex1Xxs8BfCD4CeBLv7Vdw6fpCQi3iuZ3/f3LkhWYZI3O5PLEgKKqhUniKXM/n5GdWjGdLmnufLS/Dj4V/tReOfFk/gSP7R4S8BW0FvqWqpbMy3V/Lt3KvBBEYbk9Op6AmvHr05Ymsk9lf8NTx4VaCs3F72tZt726dO72S1eiufG8PwMj+M/wAUfEXiqOxkbRrQ3cGmOqFUWG3Us7A+pwff0rkjlf1mpKbV10PRjgqlaq520XRniHiH9nC/+IHxt0bwFaCMrczS32oOpCpFaRAs7kngfIMcnqa+D4unQyXB/Wqj1Wy7voj5XO4wnVjCS6i694F8EeNviJLd6JeRP4L8JaAda8VzQzk5lWZ40tMj+J2EKDqcPntX5tVq5lh8HGpWfNVrv3UndpXa1XRqzdn0afU58HGli6jTuo01d+bWyPJxpF94mmu/HF5tN5qF88pwvAJ+YIPbHA+lfa5bltOlgopbhg8MqsJYmS96TJZNAsrxBLGoCzDBC8FH9a744d01cqNPmlsY95p88d0YpeLpDjfj/WL/AIiuazlN9zF0IczdveRLZrbOfs80YDnknPAPr9K6ISnsEZe2lyWsWY4jbsZ47fIBCyKB0PqK1VKTd2dLVPDr3UamnpIs6H5SC3+r7rz+ldVNxVkY+1lJns/hGNJfCAMhAiSRDkckfNzxWGOX7id9rHsUnF0bI9k0y3trXT4orWONUZAwMSAB8jhuK/A8fOU68uboz1YRUaV0TKQWyOPwrznqQtWShtgyR+tJuxrK0UERDnAxwetS5Noz1kKoHmYAoirmkIai3H3cdPrWkSavxDLYBmJAwcdat7GkWrE4QGNiDxjrWaepcl7pjWTY1c/7/WuuK908+m0qpszk7ySc9sVzydtD0J6xKYT9+Sx4NCbascysmWHO1Bx+VZ8rRbdyJ1JGDx70SZp9kzpU2z9OB0NaR1Rzu0WFyjNHwcHHWp2dipNSjqVIVw/zevFU23sY/AxZsEE+lVFuJcrSiVTJLFOskE7RupyroSCD9RWim73OOEffujvfgxceJ9X8fabGjx6hK9ygUalIZE6jqM104SMp11YyxVaKjeZ+1HwM+y/D/wCGumweIdX0+KeaJDb29muFLEdNo6/Qmv0PDVPZ0UpM+Nr04VZ8/Z33/q59b+AhcyeDLJ5SuWjyx2bRyO4pqTmrnPVnH2mhasra3C3G5SwR87W6dO1SnbctzloyKJ2k4m5VvmeNew+vai9ndmskpLQrazcrHA11LBhCpjiUHpnvzTctDNRclY4vV7iyv9WWzECfZrGHfcyBvvsegNZxknKwnCUI33ueceKp7KdrnWntYlnkb7PACM7Yx94/lxVOS3HaySPn3WPBmn+JdY1bxzqVvEy2ytBp8QiwsSgYB/8A1VzSnKpdlfBFKJ4BD8KWtD4ga/4N2xuEOzjcO31xWNODu7lOpOx4j8XPhMhv7u1tLfcWYyJ8mGBAGR/OipD3QifPvjHQIJnnMQMc8DbZFI5UjviuHkb2OuCTZzUdq5+Z1UZ4MgHDH3rWNNJainU7GIY2W8eJlwFfqKvnsrIVP3nqc9eMBcSE/wDPQivmsQ+avK57uGVoJFYtknjB+tctRaHU9GQzk7MZ5qYm0DIu+MkDvWietzhxWzN6VTsBHpxV3bmdWJTVRsfZuwbGBk96c03AKDRpKwkQDpgVyW5Xqayukyxa/IR1zmtbqxFBXkXHIIz696lF1txLdyW2mipG8TSm1YtxIJACKmOkSpRitS3bW89zdxWNpA0s08ixxRIMlmJwAPqalU51JqMepi6krXPXdE/ZJ+N2k+OdC8OePvAfjexjku1k/sbQ9P3zXcjD5MfMAvGfm7CvawmX4ulPllTbXc560nWotRZ+kX7D3wI174T2Qi8d31n4PjNuqxWU1+jXsaEkiOK23sVfH3pW5b2AAH01BU6KTvY+YxeFqVIRvrufen7MVx8OYbuSx8I2KT3IjYz35YzSf9tJSOWPoOB05rqpWqT5or5ng14uEPf0ev5nrkmfO9a7m7HBHW9hkrlTuYgVtBrluy2mkPktUvLcrdOVjPVQcZHvWFSn7VjUnB6bnB+OPhtoXioSRaJpMbuAd1xJ90H+tcNSlraJ2RlOK98+dPjB8EIHmuIZbeK+kVCWiVQEA9yOgrknDl1Z34ecj5q+I/7PvhmS8e+i0a5tTFGS7QsI4wfYjk/nXNUlC1kjqlUlN6M8A+MV18VfhpZ28nh3xBeTT31x5el6bOwczP3YjsoHP4VwVfdkXHETR5b4y/am+L+jmXRb7QLW4nsyDJINwGTkn9QK5KkU4nVTxE1HzPN/Ef7VXxhnt2mGnWaSC33AeWzHIOSOT1rlhS97c9D6zVjC5zOrfGX4v+IbIX8HiyaKO5XdCIFCDI/hNXOmpoiFapVerOb1DUvE/iG3W+uvEl4Sx4d7pv3b90bnoexq6SjGNjrdSUY6MrLfaxFIIdYcy7RtaVkG9D6N6j3rWLsjlUHLVmrZ2ciuGikAdx8uR8knscdDVxm2W24Rsjf0ZyX+zSRHKkbkYfMn+Irtp1OVWZMLykd/4VRJYRGpywQ4I7iu+nUbR3wp2RN4fuGiguCRIhDMBKBnn3HpU1al46HbTUYux5h8SDaXOoy+ZZp5iA7trAFvevAxLtK7OpXkjyjXoJDqCyeZuTJ2t6VyU5x18zy69O1dMtwuDGNp6DrXJKLuz0VJco2+Yrb468VnGPMzKajIXwNHJ9tkcevGa2rRfKc1Jckz0OFsRjjnbzxXDNWPRu3ElQEnk/jWW4opyYMpGTtFaWsaSjYjQgsdw4+lOWo1oVNSsIbhSGQdOmKSbixSipKzOb1DTbiwcz24JAOSK6o8k15nnVaE6bvEfY6zHcx+TcD25ocHHYiFa+jEkgaKQT25yPak530Z0KmovmR6T4R+OXxWuPDWjfDK98a3s3h3RLiaaw0SeU/ZomlOZDt6ZJ7nkV7+W8T5tgKUaFKfubNW3R34fFOnNqKWvl+p97f8EVfCM2oat42+OR8KWum+ENNg+yf8JBdIFL3Yb95EGbBYAEZ7ZIFfoHCud5diMTKjGny1NDkxGZ05YmNCF3Un0/4B9geObW01iSW3sCNsytIzbMZGPve3Ffs2WQjh7yS1k7vXrZL9Omh9Tg/aKmnU6HKeDXtNAt5PBngi2Kx83F/JtOZJCfujnp616eLbqTVSr8vQjE04c/tKnyOd8Ri4s7qXxM7pd6nKHj02QLuW1VeGcY6ntn2rWnBTtHZDUqtamoR+E5nwbqOo654avNU1qSe6jvLl0SLeVKW6HGT6NI3HHRQea3qQpe15Y9F0/wCAdEKXs56JqxzP7RXxYk8MaO1/ZTI+GaVYIzg3UjZijjT/AGd7dT2Q/hvSjONNyW/X+u5hib0oWXUv/wDBOjwzoug/Cjxh471y9t7iC6vJJNT1QA41ObJB2EgExADYvqOcZNeHm2GliJ0qUoXdTdPt5o56icqdOlFXm/wR418SJR+3L8Ydf8beILtz8O/hsC9xCG2QXl4AQkC44IBxn3NfQ+yjgadPDLXm3NKjVJRpLVo4f4r+PtQ+Df7L9t8LvhEzaa/xB1I/2vqdtHlYLd5QjSDPZQTzxzivHzBQc0qa66WMI4ejGTqTVn0RL8c/hd4c+APwS8LeAvBkhuF1qwaaa7jwcQLAd67gOrN8x9S3oMDqowdXDzlFWUFb1ZrjYuWH5krWPlf9nnwvZ/FMfHDUNNeJtd034Zr/AMI9YzXEUC25edPNcyS/KgUKAc46jmv528aMfi8Ljcmw0k1SqVU5dk+3zstfI+JzClKdTRNng37HHw1uPiP8BPGPw6s7MMbi5XUtXvJDgzGAN5cYbuoJZsdyR6V7WQZPHM8e8VNX5VaK6a9TbIsJRqZRUXVvX5dDh7rwn/Z3hjWLezUmTT5xPFx9wo5BGPpxX1VLBxpUJw6oypKSpSh0RSg8NLqOjya9pik27hWlQZ/dlsH8vQ+2KxVP2qOyOGU6anE57xbpEt/am/to9txbnEh3fxdj+PSuSrhFD3up52LpRUeZbmTaQQ6xpy6nCdkittlQdY27/ga53NfZ3R5ixKrx5oqzW5raJb7ioXJnAKyK/IZf/rdvWh4iVjpoN1H7w6CFEvhb5IdXxu9OehopyfNcxmv3tj3D4cWl1H4NnntbaIzRFZIUnAKmRWyNwP8ACT1rkzfERpYWTPbhScqTPV9HeVNHtorlVEqwASqgwqt3AHYA9B6V+H5nOMsXJx2Z6FP3aCiyaB90nTp1zXmsUdyeQfusH0pSZ0TV4hAgJz196UVciNooczEPg9KuNkVB3ZHdzbY+SOBQpJMira4tlKrgMv48Url0k2ixIQsTtu4xS5rM3a90w9OfzNWOBkb66oytA8uK/fmzJ95ua55yuehJ2RUXe0/PrTi7Iwskyww2pj880m2xppsiYtszUyZcnaOhmyNI8hGO/FaQaSOdx1uOkb93g1nL4h3VykVYScnvW0Niamw9sc1Mr3Jv7lioWUyZzzWsYO2pz+/sjs/g74g8WaN4qtpfCtiZ5PNUMDamRRk98CuihL2U00zmr0VUi+Y/XT9jv4U+M/FGm2Pizxp4rtUlSFXjtim4RjHUK3Q19tgYOcVKTufHY1yb5UtD9APCVv5XhW0jRzJsjwGbjNejKcbaHDGLvqWIPKZmHQytzUR7nVO8V6FHUWgRzED5KAHPHL47VNSRvRT5bszNflt4bFr+5R1xH+6j68+tSn7o4+9Oy2OP8QrLp+iF0QwJdDBJABcnofwpNqK8wuvaaO55/wCP7C006W38M21x5rQW7STSIudgbqSfWlJSclFGbu5czPMUm0S5sb7QhfvHCjKJfNXBILfepx5YRZdTlT0PMfH/AIelXTba38O2wkuYdQkE0XQtEGycf8B70lZmdm5Hlvxs8FfYPF9tqvlRwwXNuWRVOQrf7Xp0qZRu7jipcp8nftGfD4WWvXHifw2oVpCVu7QdMg/yxzWM4a+6PmadjyWaxje0kn8raCvKYxzXPOEky5NI4w7vPdn6hj1qFsdFK1kcpLJ5kj5OMua+dxLSrs96iuSKIOQxBP0rnnqjqtciuGwhI61nHc2ijHu5epJ71o1ocOJtZnSyqWiyB/DWispHfiI3bG28gBHIyKpvQ5Ke9kX7U84B69656ljs5eaOpcjG2Tp3796iLFTXLItM4WLJ9OaHKz0HVQy1cSOAvbnNVJrlCmu5fgz1zg4rJS0NnFNkz7JBskAbI6EVUW73QrJGx4Ij1DS9UXXdFjENysscCam12wa2L5HyLnLNjOAK6qMpqDfM90txScY0nJR2Ptb/AIJ7/DHwx4q8fPrrahcTIZFtL26mu3d5ZFbIjcsx3SnO4wx4xkbm4xXuZbThCvzTk3fufLZniOVJJux+y/wJhsfh14VtdPuXttLsSOGvlSO4nY9PlXAUDoOp+pyT9ZKrSp/Cl8j42NCq48rlKbu9Xa+r20SWmy0vZatu7PUWeOVRJCcg8hh3FaRfNqQoOEmmMfyIv3s54Xpmm2r3ZpFORnXl5LrLmzjk8q3X/WN3Yeg9KwdVzlZbGsaSpLm3Zg6/4kvdVuB4O8FxbQFxc3e35Il+vrSb9p7sTeFDlXtKjOa+IHhXSdA0eOzFqZ5rghLa3ViZLuU929FHWuatSUbJBGq3fseW/Gz4GweGdFjvdfu0kupoy8qL9xB/dA/SonQjTj725th63OtD5fufgoviM6x8XNds8okf2TQYmTHkrzucccFv8K4J024vs327ee/y/wCAd0oRclZnyjrnwli1u61jUp7bCtI8iEDJIEgUZ/EGuL2cYpnY4xjFI831r4RQxXl/o13blZEfz7VynDI1c/sbApOWh5te+CP+EN1i48O6pH5dleSbrKd1z5cv90ntzWU1yG1FuMjE8S6IfC9w08kH+jXgCXcf91+zD2Nc8lK+h3pOSuzNSF5IHguFR5oBiCX/AJ7R+h9xW1KE2veLm4qGhNZw+RCbiCykkgJG9c5MZ9/Qe9dMYqK0OfS2p0WgvDdHJyJE4DMcOvsfUVrB3kXTlY7Xwt5izImQGJ6g8H2r0aXwndF3RWtZriC8vkt5WiZZSybm4B9ff6VniLLY7aKa3PPviHImo6m41bSwjKMtNAMY9G+leFVqc87NG0Xd6nluuzJa3RUjcN2DnvXPFL2iSVzhxTadxum6hFcriMjg8isqkXHc3oSUojdZvhDEVYY470qcLy0OetUVKRpfD6NpN0xPWlVbjGz3OijFNczO/tsAD6V59TU6201oWvLXHTj1qIlwSRHMQAVA4x2olK4VHYrofm3Y70k2ODuhtzyNrcVpZGc20UriFHQqVHTvSjeMrgvejZnOazoTqxntSR64ruVWL+I46lBR95FfTNYFs/2W+cL7vwKwlDmldbGEcQ78rPcPhb+znqtn4Lt/2jvjda3ehfDuO4Q2LNAy3viaUMMWtkmMhGOFe6YCKMHqzYQ+/luS4irSliZxtCGr01ZdHnxeJeFw7vNrfpH1Z95+DNN8T67deFPgDoXwFh+G+i+N/EB8a+M/DuiXrvb2ljAqCxsXcAKZJWXz5AAMhl4GcV9VwNkTr8STx1ROKdpW2W2it0/A+gyDL6WDqutOp7R0YtKTt8T3a7+p9G+P9cstMsLjU7mOWKKNdkiod7MQOEAAr+hcKlNpR3Pew1Kc5csXucJoDeMrzw1dMwfTX1CNoxsTH2O1JyTuHLSH8+fSvRqzpKrG+rRpVwlNVbt81jm/iD4j0PwZ4JvNbuVEawWIit1d/nMQzxn1PU/U1pJyaOGrVknyrY4bwT4u1i5/ZzHjHUJ5li1bzJrt5piJJQAyxQxkcxRhTjj0+mJpUISrt/dbTfd+txUufm5222vu/wCCeIwal4t+OWm+J/i14jVNN0HRCmlae1sxbyYyG824bHIcgOF6YBrulFRqLmdk0Y05TrYl83R9T2D4nfEi8+Ff7E+jeFfAFsbXUNdtA9laqMNFHLhIARzg4O4nrk+wFZ4Kn9ZxrrX0joj0KdKUKsqvyOY/aD8PD9m/9jHwh+zJ4GUvrXijyptZuScyTz3DgFmPr8zNk9K3y6jXxuOnUjq78sf13PKpzqTrTqS2Rw/7RvhnRrj4e+Afh/awJ/aN/IplKNuc2dvIxUtjpGCpYjjcXHYVFGjTqVJxk/hdyqkMTOVnflOu+EHizw9+1X8MJfCV9NDDqHw/SSy1W3nwZri3MLCKZWB+Undk9eeOK46+IrYbF+zptcrfvKzu+1ndW+5/qd0IUJUJRlqz5e/Z8+H3wfsf2vfE/wAL/F2va5pug+I/BF7b694i0i4iUDT1XMkKwSIQ0jnADlgF3EdSDX4F49xxiy7C4rDwUpwnG0Zd27J6bW369ND4rN8JWlVTjK0Nb2Wrs+/bdPTro0ZX/BMXwD4Xv734ntpXhe5j8NadY6hFZaZeXYeXZHG2P3oUB2J+YcYOcCvueCKOLw2SxqV3+8bV7Lv5f13KyrmWXJRTSvoeA6TaaN8QfFWrXWl25gsdS1W6tTFKuCm9fl4/3h0969qveUpu250YelG0mtbnJfDzSbvwpeXVu1os8dncPDeWhHE8J5PHtyR6EV5WHjaXkgpw5YOJz/iyPQ7DxvJ4dETQidCAGY4lhPKsD6rnpUYitT9vyM8qvVpOuqOz/M4HU9FufC3ix/KUCGZisqkfLkdD+NeFiaTpYi62Z87Uws8PjXJbSNjSLZIXDEbRjKjGeP4l96FTdrs93DUbIa9nFFrzqjBELjtkEf8A1qdKHNM5akLVz3/4dabeS+BJXsITLcRJ5nkjgzIOoU+uOleHxK3DCNJ7nuRX+znbaDqNprujQ6pp8u6Nk2sW6hhwQ3oRX41jY8tQnDVfawt2LED+XNjj6VyJXN07S1LkzZjDDpinKJ2aSiFsSRyPpWbTiZS0ERHklwc9fSlewU20xL+2k8vd/SkpK5clzdA0u1K8MMmnZsm8ouxeuoR9nckfw1L0Zsr2Of0hV/tYj/arsirwOCaftdDelCgHAx71zzjZnXb3blAKRKBz161rFKxloySViy8+lJ2Q4qzI5TtjIOKxk9Rt3KOTuL7a0gu5nPREEsuAR/KqktDLXcg3BjnHPpTgrI1l8JBLK5cqp49a1SVrnPdkRQls5wPepcmxNxgd98BNW18+NbTRfDevanaSXNwokNjdCJHGejGunC0XVqpXOGrU0Z+zv7J1r4y0fwBbi+trpHaFVW6ecSM34kdK/QsJQ9nRSZ81iVBM+zPBTXMvgy0acgt5Qy3U1ckoqxx1FGNVWLdrLHvVlGPmxkmpg7mdROzKviCRLd1umt94RTgleM/WlUjc2wycoNGPqiqY47/U1Vj/AAoGxx61lzWVmVZ7I43VFuPEeuR6t4mnEVhZEtBGGxzz1xWfNd++KSVOFoq7Z5/Y31pr2u6t4omhRrS2TyLGAvkSdRn1P1PpU4eo5VHNjp4b2cIwXRdzznxlBay3FzZ2tuA91bhpwi/NGd3Bz6YrWclLQUo2sc54T8NldfvL3xjeLtRm+zSngEAYrOleN7lSjZHB/G+08N+JNfs9Os5II4hGImZGBLdeGA6deDVynd2RldnyT+0B4UOn67c6NLcu11GGEZBGWQcj2OPQ0Qd2VBa33PA/Eek3NhbTNdRYV+pUYGfWs6sbsc9UeYzALNM3puOc5rmlZROihukcXIX+dh/ePJr5StK9dn0tKPuIYmRyTyaxm2zXm1sR3eCuc96UGawMW/BDZNapnn4rW51dt+/hABHTilPSZ6dRxlNplU28sdxuJOM9MVpfmic8oOnqjUsCFQHNYODHGrJlkOWYHP0qnCy0OiNlqWpiDCV5wetYJ+8VuJZJg56elW1damc/dehdjkIIx1pKKTKjO6sSpxyDVt2Whd1FXOq+EFv4B1Px9pVh431qezQ38T+bHC0iJGuWcsqAs5IG1UA5LdRirwdP2tbWVvxOWrUhKm43aZ+mn/BOWx8D+LfiWvxH+G3guziGnxfYrSXVblN1gAcDybSMlLd2xlnkZ5nIJIUcV9tg6Srq9lY8DF04ezvN6n6S/Drws8mqx6z4tuhNOrH5NQcHYM8MBnAJ6gdh+Ir1JQhFJHgJzdC8otSTf52T07rXv3s9D1ZJEdd8LKyEfKV5FbxcXG6PKbanqUtUu4S4hdWJPXArKo0dVGEmrmVeQ6rqgNpbKtrbfxyk/MwrmUpX93Q0hyQlrqyr4WdJdRlh09Fj02wBMkueZ5P/AK1VRqXm0tka4lOMUn8T/AyfCR/4T/4q3fiK6jP2fQ4/Kt1JypkYdfqB/OtIS9rU5l0MsTH2OGUe55/+1JqF1resxeFbeRRJeyrGoXnavp9TXFXl7ary3t/W3zLw9PlpKRwfx50rTPC3w/n8N2mES0tfmTGBuC8/596mUVCmzppuTfkfI3gXwAniTwfqutWkfmRCKR328gZk4H51yU4RnDmO2pJpWPNPiN4bguTZaxa2uPLVQ8qJ99CdpB+hrnq2S0Lg9dDjfiT8KdL8S6Rf6XqZ25g2q4UZjfqj/wD1655U4zjqdClZXR4RpECatb3ngLxgHe90xvKZwoZtuflkxjlT39K50nTfKdEK03omcze+FoNGmfR9TDKkhP2W57A+me1bJycSk3a7I7KGexLCVwssShWkCbgy/wC2O49xWcXJPUdlYv2mm29wwv4JER8g7ojlc+h9q6Ias0gjsPCyl5kUoNysN4HfnrXp0tYnfBNMqSyXC6jeGzu1ikZzhJlASUenNcmKcYt3PSimo6nB+OJrN3m+02LQzKv8DkxZ9Rgd68KvVSbaJcopXPL9cso74iMRgehziuONWXPchxdXRlTTNPisDkg7h3NXWcqiTuZQg6UrGJ4yvpvtAij4+bFaYd21OTFQfNqd38ObYR6WryAZK8GuKrKUqjud9GcVSsdlaElVJ64rlqPU6FexdjUBOT1FS9jW9iJzuOAOe1JJtjcbogbCP07+laqKRnB2ZDdMTkE/gKbuVUtcqsx6n14oSHoo6Fe6iaRSQOvU4q1YyknI679m7xR+z98NPiFP8Qvj18Hr3x2umWRl8OeGUu1gsLrUAw2G+b77QL94onLHg8Zr08urYbD1earG9tkctXCKtScac+SXe19PLzOtn/be+PHif423vx08XXOj6tq19ZCyh0zVNIjm0ywtlIMMNvat+7jjiKqUUDAKgnJJz6dLOsTSxUq0Hq1ZLojqyuf9lU5U4Run33v3utbn0d/wTM+IvxY+NP7WOr/ED4j+NdQ1y6GkS3OpXl9MzIjsVUFR91eBgKBgAADpiv0Hw2r4vF43EyqO6sr+tz0sNi6kaTpR0hbZbH2N460y18T+JbWCA5hjmMzgF1CydAzkDBYDGFNftWFlRwilaNnLfTfZX/TvZdke7QnUjR5r6mV8QtWvbOD/AIRmS8aO2OQR5hSRlHLOxGME9AOM5rpp0KOJjLzXRtfc1qvVO5UJ2fM92fNX7bXjWXWYrDwX4Xs1NzrNzb6bbWLzYLea6q33cHhSSQOg/Gt5Xo0uR6tnHUppNwV7s6D9qvUtL8AfCyx+Gnh6A2MOmWiWZwVLMdi7mUHPOcgcZyPxr0ctwtVUeZs9PB4epTw2rvc+e/jf4o1X4d/BXTvgD4Ms7y2vPFfjJYdfWefdIITsZ0bgZO3cCSODms8V7T3E9ZPRaaeZxYh8knKDbk9nufQfxQn8M3t3Z6veafLFaWFtAwtpJeF2xpHBCpOAvOWOO5A70qU5YKg1LVpPbqzqoRrToOE5XZW/au05PG3x7+1zxg2nhbw7FcxxhcxwkRqF9sgv+ZFcuDxMqWCT2u3+JzUKTw2AXeTZ4r438br4p+I/iL4laZcJLp+g2DeHdBtoY8jeIgJJOmDjceemV+lduFwqlKMlO/M7u19O1yoVXG8bdNzifhlqniP4G/trLF4dv47jTNf8LJaXlldr8kz+WTgleAcknvRioQqVtev6HmR9oscnJe6+w/4GXPhTXtX+LH7R3jSztNKsPh14ZvFv4pHF1b6kzrJGLW4iI3ASMUA2FOVXJIyp/AvGPMeapg8vjFSnUkrau6tJO/rb8PPVeNmuIw9aSg5SjyXelveTTVndPS7T92zulra6dX/gj14wuPiP8O/GfhjVbGOC91HTJ7iEW642RgZCIO6qoCgdgMV+q5FJvLKbm7tWWuvSy+4WWv2+CjzX0aPFfDXw+t28S+INImhSC+07WGa4UptzMJSyPjsHU49MkVriadqsonrqhCldWOb8e+FtJ0v4ga3NbI0FpdSpLHI+VMTEcgn+HqTn2rz5UIQi2efVSU3JHkf7R3hA2+k22sW7L9v0iQL8p+/FwcqR95ST26ZweleDmmHU4KrB6o+dzqjJ041orWLv8jiPFs0Gr+E7HxPCGJLBZWB+6ePzrGVKWJoqoKq4YjDRqpDokCaS0+zMvlBkcD+Neen0pVaLVPQ7aSlOh7pAl0b3UBdsgAfa4ArzleD0PKk5KrY+kfhnBNa+ELW+tomLQ/OVU8lOOR7ivlOK60o0Fc+goTTpI6DR9OisdWn1XSo1Wy1TL3ECcCGcfxAdgw6j1r85zGnCVJVV1NKOFVOq5rZkt5uhcPjgV48bJIVaPK9C3bXAuIMZGAKo0oybViSAiM7SaymXNdSWB/m345zwazSuTTSZNcusoCYz6ZoUWtzR+4ri2gVDnA9xV30HG09US3jD7Mw77azteRc1yxOc0pR/apx/eruhpE82Dcqhuz5HK9+5rCpqd0k1AoEsre+eaqKaRzU7X1HkqF46e9RO5pJohnfdGwyOnFZWdyYu7KZcjB/KuhLQzqMrOy5OePWiSbFTV0QklT6GmlZFytYgkkUPnuetUtTkd72Rc8Oz+F7bXrW58Y6feXWmJKDdW9jKEkde4BOcVceVS1F7JdT2rwj8dv2aPCvim2n8NeEr/T9PEq5tGi8yZv8AtoOa6aFenTqJtDq06Lp2R+oX7FvxdufiL4bhurDT5LfTTEpsrJH3yMPV/T6V99hKjr0U0fG42tGMmj728Pfu/CNsXQKRCOi4wcVo42jqeW6jnV0GW88e0TSdQcjIrKLtudEk72Qy+mN3ahrsDaGyq56+laTfu6jp2pSstznNR+33mo7VsmlSM5k+X5R7e9cm8zoSjGK1OV8bNaXaym8k2Rt8hiTjcM9AKyqOLdgXkcR4x1DSbbWdO8LaREiQcMIpIxmQ1UZWmoouMZSvI4fVdMlm8X3ySq32t4iJMLhQoHA47Vry/vLMirZJGHotzo7aTd6fqczSi1dkMTKAynseaqMUFk4nmHivwXp2rarqV9phdZotplTGC6Hr7ZpOMVIxlFo+Y/2ovDunanq4SOZ5THGf3xyHT0zjrVXitgimj518c2dzpWnXFpcysxwMg45HrWUle4SPIriLZBcusYGFbAbqK46ySizegnzo4R3LgljySa+QqfxGz6mm7QRGchcUaWBayILmT5BmktDoiZF4d2c1TRxYlaNHS2shhC57DmrlC9S511vdm2XI0S6XHf1rRLlRUZKroOiH2RsOCMetYzknsZVI8jLFrPHK4IIz2NLm901o3bLkzlY+RXP10NG0mLaHIO3tWsmlEl6lmBsuST6VlFvmIi+VllTn7mOlW7WNFFz3Oz+EviD/AIRppYvDepaboWq3+6C88UandSMILQj54kiVTgsMgsPmOcAjmu/BY9YaLjFJN9fIc8LzK6Z9Z/s5ftn2Pg7UNB+FnwWOrzpbzbLnxbdaUCbZXIBSxsUIhgJI/wBZIxkb7zNX0WHzpV5QoxT5U97fkv6ueRisPThG9R2P1K+A97P4m0W01bUvEk16AoN3JcTnhyOQgH+tfnGQSBzjpXu8jdO8j42tXbumfT2gPB/YlstnbSQxCIbY5AQwHvmt6NlTPKmnz7jdXeJY8ZIJ7qOawxE1ax14W7Of1i31S8g+w2crwRuR5jtnc/0rhd5LQ9SnGlH3nuF/b/8ACO+FWtox5KCMs5b07k+5rZXpwscvNGpX5iv8IbWTRfh9JrO0yTX1xJPjGCcnCj8gK6KLjChcnGN1sQodjymWKbW/iYdd1CJZI9Lk853zwpGSR7npXFGF566rudjUlT5Yo8u/aXn1bxjp97ZRhUS8jErODyAXII/LFZ1oqZ0Yegk1c8svfCMvgLw1JZaJF5VpLZq06YwHyQeAK5uWMNIm9WMWeX+KPDY8LW8N1qsWbRryS2unA4QSAEP+BNTKFNLVhBrY53x74WtrfTTqcjrLFc6e0czRnpIo4YfzrGfKl7pfMj4/+N/g/XbfXrT4geGr82l3EyoLkEmOZOflk9PxrnnRc1zLobckm7ostaJ4z8NHU9RskjuUUrPCTgFh+hB7EVMZJxOtXhCxxrwpp0gkeSY25OAQMvCfT3Ws5XiQtWaOm6RcQzebBsKt8wlRfvD3AroopN3OqCs1c6bw6pE8Y27Srche9epDSJ2RZk3ckc9xexyzxKrZ4lclD9dvIrzsTKMZM74NWvc828aW9rHNJcNb2zD7p8u4Zj9cZ6V4OIcpNuwVYSlqzjLp1kfgZwPSuaEHe7FFqxBIoEZb8qJzdrES1Oe1e0a+1Ddj7rDrW9FtRsznqQ9od54QIhsljH9zoKyqxle5VGNtzqLEMyKW9K45LU9CMdDQRsR5IOfSspblN2IGVgST69atWSNFqtClNOyyHb696pMwafNqMuHOQzDtzSUkaTs4lYyJxz+FO+hCuKAQCuOo4qHK7HN2WhClsWlOfWrbaWhndI6DwP8AD/xj4+1tPDvgTwjqGtahIMpZaXYvcSkeu1ATj3rpwlDE4qfJRjdmc5yfQ/SP9gP9n/xN+zX8JrzUPHHg7U9G8SeJruOCOHW7JYJmTbu+VdxYKvJ5xnHSv6K4Ay2WAyeUqkbS3l+h7eWUabwntHq1q7a26H0BpGteFI9Ih1O01MX9qZ3WGZSCplAO5/fG0j8K+lqYlyrRTdnJ2X3N2+5M9Ne1mrxVj568X/FKPxj+07bfDy5SU6dFE08Vw7IqN82GYrnLN7ZwM/jX12FpyoYKU47pHRyypUuaT1Z5qDYeMP28Dr+rxyz6R4KjU6Tbrbl3nnlfYJiiZ2og6ueFDZJwM1xV66eIpRqac0fxHgqUquKsnryt6tLZX69ey3b0Wpn/ALVvjSz1vxVNqct5DJBbsGkiDD5CZFVVCn7zliORnA/Ovr8HG2F9n2OzEYiFCkk9Dzjx4ttca23xl1S1M8ulXwMcTjd5t27DLDONxCuq89zxXFUk5u9m+XyMqdC0U3szvv2ofEtx4v8AD8HhfQddNs620VzeQvGkcdnKiblIO75yi4Oe7NgDjnmhh/aturt6mFWusJC6j8zhvhB+0p8TP2iR4ktLTwjbx+G/DEFtpdx4xik8s6vOAGkjYyAAsAOOx2jkYr5/BY2FfOqtCUrRjsr/AH6ep5UKssdipN35I7b7+SIfiJ4StvB+t33hRdUNquraBcanLbJIdlhAoJhhJ6ec5PmMR13DngAfQYTE0niqkYKW3y0/D9fuR6kVKtRTilZdX1/4HQ8z1TU7jxB4jg8aiWSWXSrTTGjkgyuws7Bt3OScH8q7J06dd31urHE1KVrHB/EHQvizpP7O1/otnFp2maH8c/Hsl1LdCRhcXljp0oUjav8AAZGJJ9RX4ZnGBwvEfiHFpX+rR36Xk/8AJHyGZ4SviMwUY3s9z0n/AIJy32mfs/fHDRtOaAW1vJqZ07UDM+NqzwqynHYZDc/h1r9JwmHjRwsqUFtqe3h6Hs8NOEFsVf2tIW+BH7aGtaTpOnpe6ZrELyXUezBkiQZYgY6qnIrWrKL5aj3a/IbhWqU4zn1OT+Jup/C34reJLnTtHuo7Ca/0pI7y3vZgAMxq0dwjYG5CxZfVc/N3rxq9Xn5lcurSpyptJ7I+QfiBL420LWZPh/4hvXmSwkeKzMxyUGclcnqD6dK+frOqm4PY+TxPtlUdKWzM3wrGn/CMaj4eu0HkyjA5z5TdVP8AStcE3DDuDN6dF08ucGuozTtVS60CWydMmPAbH3tw4P1GKSftYO5WBrQdBxKnh1EZhbMQJI35JGQBXnSouLOWGGlKd33Pqn4VgJ4Ks2H30LAkD2H6Gvzjjqs06SR7lOj7Kmjok2LkxoFzyQBjNfmtWpKe7Gpu9itfxiRCcdO9RF2NJx5omdp9/Jb3Plds81pzdGcdNuEzZXDrvQ9RUT1O63Mh0J+bHasb2MovlYsvmgDjgUcybLklIsWKZALdKlybZVOSTsSX/FuwP93tWsUVWfuHP6Sd2rkA/wAVdUfhPOpfxDcnbAx7VjM9CfwGc0h83nrn1pxOOKdxzthB2rOTNJ6IhdhtPH51C3JgUpSQvBroTRFZa3K+ctx+VUKk+g1lJGAOaynI1exTnQpLu7VUW5aHK7qVxwlxwDxVciW7BNyZr+DLPVLnX7WLSbA3UxmXbH5W7PNXS0qR5dTHENcjP2c/4Jv+CPFNj8MbS6vdAbSpbjYJHlB8x19Pm6V9/gq83RVlY+IxVGUqrZ+gWnMIvDsMS5+WIAhh149a6nKUo6nKqXLXM22uEgR/PBPz8A5rOGj1Ozlu9CPU7meRQkZwDwNxxirqXauVTilJ3Ma4UmCS20S+kyAWuJ2bjHcVzJq+jHJu95I4bxcLq8162tdKiSe6ZcncuBGPX3qJQblZBG7jZ7HI+MoJ9I16ze3iW41IsBI7kYT6VfMoTSS1OmlC1J32OL8Za/qVje6hqVjOJLxWRZncDYy5wVHv1qKlWak0jKcJNIwrbR4zFqEmtxpMtzcAO0I+5wCCf6VpTm1oypJQV0cv440QaRdi5F3OIVgGTDjdKvvjk/zFbSsjmnJWPl/492AufE8+pRahLHbiPaGdCEGf6VDklsZqpOWlj5r+M1mlnamKWNi4Q4kHKsvqD/SpcopGjaSPFL1R/Z92/J/dt83euWouZM6MPrUSPOEORj3618hVSU2fSbWEZsZx61LtyhfUr3LAj5elSjoi9TKvE/vHnNWjgxTbudMihowcduK0bXMelWjzNjrWdoJckjk1V04nHFunM0mjW8g+Xriudtpux3XjUQlhEITtb8yKlxbMtYTLsx3KMHmiMUmJN82o+0+XovXrTnFM1abV0WY1w/y8D1qNETF66lmEqoGeeM/SsW22auVibT9H1LxPq1v4e0aBZbq7kEcKyTrGoJ7s7EKoHUkkACtqFKVWXKkZ1KsuXRH03+zFqf7Pvwc8RaX4ZsviU3izxDa3Xm6p9ihdvD+nSkY+eX/l4ZTgEqACRgFh1+nwVbDYJqkrtvp0ufO4pYzFaT+Fa2P2Z/YzgvPG/gWz8VBpYhMcw3NxbiNivTMSZ+QHnaAMAcnJr6q8alG6bUr7W0+8+blHku+h9LafdJcWarCjgRnyyz9Wx3qqT0aZwVoqLv3H3jlcEQbzng+lTVUX0uKkn3sUtUv1jXyrUp55H3z/AA/SuZtLbc7aUG/j2OW8cWQFgF1OeSQuM+SG+aU9hjsKyqRXVnVQtL4VZB4dvNUtPBU+is6x3iRM+ztbofur7ECqjNRo8oYilGVdSR53Np2naLZ3OloknmamzJb75PmkUnJb6/LUc0Y6LqVTqNzt2OU8QeDtMfT55tRYtEZvKBJ6Iq5yfxHXpUThpub+1adonllxpE3xDsJrKGBkdCbayMeeQOc9uMA81hBQe5Tk4u7OI8feEbLV9J1LQ7aMysrpDIMfK79M/l3rKolU2NITvqkec6NpMUfhC/8ACOq27y/2bKCJnHzKR1B9RjIopUVGLuatRck0fOvxO/se2/tXw7JaJd6ereTcLEmXjDA7JB7gnBFclZ3vGLOuM0uh5/pPw21XQfDbW0cMxV7Usqht22QHh1B6Bh1HrmppUZRTuU5pnFadaLqEc1okUuYZCJV3ZZG78HtUcuti1JSWhc03S57XeI5jtUgqynH5g/dNb0YWeh0013Oi8Oxs8ocZIDdxzXpJNQOuKVzldd021eac3FntZtwDmfYT7g4NeRi5xUnc9KnFRPLfEVjcWN1JLIuEY8YlDfyrwa0ua9hSpycr9DBaMu53KRz+dZKokiJWTI7tcJgHtUKSkzOabRmwwAXHmkDk966E+xhTdpanU+GjmMAHnHBqKsrI6YrU66zXYucc4FcUnc617sS1kn5e3vWO5DdxHIA6U9S4Np2KN5HyZAvHtWsVoFRXVyvIxlTaRgds1ErJkQkVhEFfDevFXbmiU9GTqo2gheOxrJqzF01AIpO0Nz7Vt0Iik2amg614g8M6hHrPhvXr7TLuI/Jd6dcvFIB6blIp0MXicHV56EnF+R1Jxhqj7/8A+CdkPjL4ofDPx14z1XWtQ1X7DpSvoaarr8d5di8jUhvk4eIFWO3KjIJwWwTX7fwXxBjnkGJcp3bv112v/lrazfoR9dxOGwsYTkn7RtNxVla+ml3r67721Q//AIJ5eOb/AFD4Y+OvCnivxNFeT+FvF18LdPKcGCO7AmiiO8Ah08xk4yPc9a+i4DzBZ1go1K69+nJrWzd9VfyutO9n8jvyPEVq/PTqRas2vVLZ6PZ7mDqHhrW7v47XvxIKn7BpGlbIlMZCyOxztJx14/DNfrXMlCyeh72Lw79mpJ7kv7Knxh8O638evi98a9S8JPpHh/wD4Nmsdb1W4didRvLtl8qD52CbIghYBQGJk5J4r8o4pxVetxJg8JTb0d7el/n1/D1Pk8RVr18xp0IqzT31u/6/XU8B8SXj/F3xJpvxEFvJCJFW40ywmwG8sllSWReAXcsSo7DHYV+y4GXOo1G7WSaPr6MXOcfa+nkdD+1VJp3h7wvNo9nGmnw2tgH0+WNgz3FyApaVfQl+A3YfhVzxPtaclJ6v+kaVKs6VK6Tev4HC/Ef4qSfE74H6vLoGnyWkvhXQ2ivpJFAkup8/vCxxyeAcdhiuZ0JVYzlzP/hjyqjUoOcr+h3t9470/wCMf7P1r8BPD3g/QH+JOjaOl94U0SJXsrXxbZuoYzbotqi8g5YqTiRRwM1+QZxSxXBvECzOcnVw1bRt/Yfd22sclBYyjiEk3yy1Wv4HkWt+PdX8U/De8WTWzqOs+GbN9N1nVpbWaEz3DgeYNsypIAj4RdyAYBA7Gv2jCVsLUwcpYepGcXtON7PRd7P8D0qMq2KpXcbW3RQ8H6JZ6xoXi3U/tyLZWU0BMglIEkcScH3ySCfbIqacksJUqPt08kehSjCOFUktUupzHwc07WPi2LHxH448yQ+F/Bsn/CLWMc5eC2gSfzS4yCNzsWJx149K+O4ewOFiq2KkveqPdM+SwtOpOrKvNbnYfA+O71TxLq2v67MHuP8AiWpa2oHKyq29jk9wpYk9sj3r1r8s5WTsz0ML7t4ln/goxqqax44tfiRZxtbyf2W1wsvOdkZxu9cMox759jXn4m/1e7duU5MyrKhRt0Wp8ueObnw34x+H1v8AEXwhrQkewmRLq2LbZYoJVO+HjG5AylxnpuI4AArxcTOnUp80Hc8SWJji6anSW255d461G8nvPLvNT+2SJL5YuCcttwCjEjjocV5qblLlOTExmrO9yhaahYyWl0lyPKuAu24jHTrww9u9dM5RpwuU68VQcWZHhK6lmupBBIXeRztQk8ken1FeTTraNnn5TCV5Tlsbmk29uutr5IIBk6HuPQ0pOU02j1Z1VGp7p9S+AkMPgyxYKAhB24+nSvyjjdtYuEX0R6CnzRTNgPnBGeOua/Pp7kJXYrIrRk+3NZ8zN4voYuoWxhm81FGFNbRd0c9eFndGlpt4JYguB0qkh0al1YmjLLKSD161jUSSKkveLT4aMH8qxW5aaJLIEcEZ9eK2shwjqLqLf6OwJ/hq1oXUV4nOaO2dZYf7VdS+A86l/FN+U8Enp3rlm9Tvk9ChtzNn34pK7Rg1ZXHzDjao6dqlprUhtyK0xwuCT7Gqii6asVLxcIT29KE3czra7FSESOQM/pWzehEXYcfl4/PPaspG71RUustJtA+hq6W5zS3EiUZ+Y9a1krmTm+h6Z+zh4O+PXi3xpaW/wh0ebDTqr3ws96xnPqa9DL8FOpNNbGU+VpuXQ/af9lr4MeMPBHhGy1D4s/EW7vL0bCqSTqgLYHAReB6etfa0qMKCtzXPk8TiFKUrK2p9eWMm3w9Ai8fuxtz6Y/WtZSvC5xtr2tzLguN8zuwLBD0rmg/eudNkloQ6rIl0nn3EjKirgIDjdTqy5t9i43iuVGNczokot7DTpHRQMxA5X6k1kleRDi1q2YUPmR6nqep3Vkn2uZQkaKuQiA859OKu1ro1n7sUkjhtUvYtZ+JU89hYuXtYwuWUMoGOo7ZrKMf3zY7yVI5XxnBZal4lm0Bhiz2Ft5ULtkHI/HIqeXmqWE3OnG7MXULmysYb2aGzk8zhZGLfhke9dMIXbMpXmkcR8S9E8WeINK8+1v2msZI1DhV2OvP8LdQfatpwvHVmbhb4jxf4m+CkttE1HS7xpZQkOQtywDgkdR2YGs1ZINIO58UfF2w17R7yezv12QNzDFjjHtnpXNUk3KxnKSnueQa1u+wXYQbR5ZxXNNtRfodeHSVRanmWW5r5Oo7zdz6NO6GsxzzUPYpbleVs5zxzTibwM68wQTnimzkxKVmdRCu6EL7UTdqh6E5WqsbLAT0/HmqTTM6kFNXRNpl60L+S5GP51ryx5djnp1HTnZmqqRyATKOorN3SPQtGaugmYrkJ/KsFJt3ZjKPUmtshee9TKbexakuWxLCziQ5/lUsz1uTmRl+UGqhBNal6S2I7yzS9t/IljDBvlORxRKTi/ddi7RS1PtH/AIJ+/BTwQ3ibSNF8KjxBrtnFdR3N3f8AiKH7Ho1tckgOLW0U5uZh0EjYPByvr9Bl2H9rXi4Kz0u+9jxcfi60abg2+XpbuftX4X13wh8NPBtppBmntL+WPaqSKHkCHpgcgE8YH0zzxX29RRprc+StVrq9rev/AAO+/wDkeo+DWuJ/DcN3PayQ+b86JL97B6ZrKhdtnDiowjPzNG6mjS3PmsQOmR1rao4xptszpXclYx7u/tbCFpLOH95jC8ZJY9h715LmorTc9SNOU2ufYzhot1ZRNrutXCG9k/1Zk6QD14zzTVNqPNPc19rGXuU17q/ExpfD1zdaZPPDNI6S7tzsu0SHnLH0FZVLON0aufvpdTy+e31e5+I2naz5X2gaZZyiFJHwjAbckDuRk8+9cyc/bJotQgqTv1ZzvxR8KG/8RagINcnaBYwr20bkrhumR7dCa1q80noyouKgmkeUaj4L8d+Erye+stVkhOmr+6towfLnj59Oc1yqnUve5TcakdTjtPj8V6r4iutMu2WKBEa5shFkByBkqR1OPSrTqwm7lKKklY8+1S2+LfiCPVNY8KXa2qtYtKmntahopxnDMjDkjI/nxVxdSrB8r1NpxjBKLPNfCHwziuYtY1rxGWs7uYh5nVV6j+Eo2Mj3rCFBpvmNZLkicZ+0NbeDLK5i/sLx7c2xktV863aFovLYdGIwTtz/ABLVzlCPUzg5SlqjxF/B92upPrS6is0x4mkjcBvYn1BHeuSTjJ6HdBK2xp/2U67p54tswOPmH3uPUcGuyjZanVSk9jU8LcyjcMYzwK7ZWcTpi7HKat/ZWp3M+marp7XCBmKlZCrL75AIxXg4mMZTfNsdlNzqaHLX/wAC5dU1jyvD/jzw1aW0i7lk1rxPbwBfrk5/rXnSoKbtTdvUVT2lON2zC+IXwkk+HNrFdzfFLwXrbSMQbfw34hW8kj/3gqgAfjXm1qNSk9Wn6MiFVTlZnG3kqmMKDxjrShGT1NG+ZWRmzysr4TIORz610XcWYOFnqdF4TLtGCx4rGpLmsjohNW0O4tVbyg2ecVjJI6I+9EtpD8ocjisuU0UURXEbAcD6GhWuKyUiC5jwmD6cU3K2w5u6M4ZDEY/MUmZqKirkMp+fnr0+taKVkJNyY/JC7B6VWktSmtLD7cbmG8fjionKyshpcup0XgbwX4q+Ini3TvAvgfSHvtW1S5WCytUIG5j3JPCgDJJPAAJPSrwmEr47ERo0VeTM6k+WNz9If2Sf2XfG/wAAr200nwx4i8OfY7eWK71/W7/VcG+lZSsgjjC7vIiBZF/vklvQV+x5bw/mWTYGOHwicpTd5NrT09Ed8KeH/spwnGcqkr6KOi7anaa34a+G/hTxz4m1nwTp5gsNb1NL3V0ICteTrGI1KDsmFXHc1+lcIcMvJcPJ2fNK7+897KMJVo4aKq/G1/XzOA8d2V/Y+GJ1iaezgvpMy27OGK9W5xwTzz2FfeYefNBaON0rp7+jtpdeV/U9LEOMpWWtjzj4Ox6H4++GfjfwrbWtqNCk8SI+pzxKqrfSop/dFsDKgcEjnAIyBkV41XLKFTOljpaySsjyaUKEsUqzWqZ5V8VItNtzqd74KsWOt6jamPTbWRflto0G1rqXA+XPIRQPlGBzzn6eXPVlanpoexCdRNRltqeZ/F/VfiV468I6ba6+8Q1G20aKzkvQmQZXyWwCMfKgUk9z9KzqUf3ai9Gcbk0lCD66k3jDw5rGj/sQ6v4rhsQBfXd3F4h1xmzLNKzArCMDBZsuzHjGFGDk4Uaro0Jwptp228tmZ432bhUcpNzlrr17u5137K/7O2p/E/xT8KNR8YXkgbTo5bosL97d4rWOEvuLqQYwzDHPGK8DiitRp8I1o1qfMnG1mr7/AKnjYvEVIZcpPS2zPQfDX7N4/ac0Lw78fdE/aJ+Dl83iTTry28U6VLqI0nVriJZjGoukllYSvD5eRcAh3AUEEHdX4Hwl4k4fgTFPK5YKo8LC/NO7lu73t2V909NrHdlma4SEI069Kop2T5ormi7ry/U848M/sj+P9E8B+ONM8V6RPp3hvTJylz4lvbiKCzu7Uq2WgcsRLlfulSdxr9d/4ilwZicudHB1ZS9qn7tndX7prT5nbi62DpWowm7y79jkv2bNY0Cb4WSWeiRRlb7wtfQ2106gLBHDLHGpznriTgHruzzg19BlU6ayiCg9E/zufPYatH2Uacry+JrR20a67LfRbvW2zNDS4fD+k+OL6zhvvK+x2dxcCYjm5Kx+Q0gH90SMfruFeisRFX3Wh6MZUY2drO1zx/8A4KG/GLTJNMsPBtlqSXV7d6bZWs8jR/NEvkKzsMdM78/jXzebYi0PZpt3Pns6r86cbaSPj3RbrVPD9ldW1pfY8lDBcQIcCeJjkfXjP6V4MaUqNHTdHztDmw0LQ6FeKRtVluJrfdgkZhkzllAPt1HA/Koouo5czPQoPnTl3K+t3tnO/wBilljeeNPKWTcR5q+9LF10nys8vGVIe09m3qTeFLRFugs0Pl+UeShwFxkgn1rmUbs76C5KWht6BbJe68kLkKPO3Blz83NauUacblUoJzuz6T8Ha3Dpwj8MG6ZQIFZLa4iChzjlom/iPqOtfinGEa+IzSVRfCj0ZV4KSizdWZGbKPnnivjHa5abTJ42BTb6+1Q0dCtoyrfRCRTkd6pS5SasXOOjM6xungudqZHPetFKyOSnHknqbMbEuGHpnIrKep1vVF2NCUGBjjk1mSkSxqYxn86Z0R0RHfrugb6U+Zslyu7HO6KFXVm3H+KuyCbgea7xq6G9PIMkfrWE0zsb90pucPyOp64qorQm9xGYtk57cUpbAokEzZPseopWsiU0mVLx8Lhj25NQtWTNOTK0TMT/ACrZfCZySiwyWYnH1qZmkZXRVuDsbPqOKIbmE1d3EQFznbkntWzny6BCKserfs1eJv2kNU8X2Hw8+C/ie405JrkGWZWCxxLnlia9LLqmKqSUYOyPPzCrFRatqfsD+zZ8FoNLXSLrxz8W9Q8Sa3EyPLGLwtCj4HZflr7alQppXbuz4utVqVN0faiN5WjRx5PyoB+lOo/dsiYJqdjLsJEJmmaPcwPB9a5oas9CUW4qxFdahJP8iWbHHViOM1Uk5dAhDl6lC+WRbaW1F0kW9cssPJY+lS70yuSnF8yW5xV5p6NdFbae8iIB81m/5bE9selZ/FJO5tJ3hexw13ceIpdeu7HSTFbyxQj7SqLyE9SfXFTGM5VGOmoqPM9Tl9dHiOXUJZCI5IPLZbclOVcA4Y+gFNtxmTVSk7FTQra6awmad3luFP8ApMsvKufQYrrpSbRFSUIqyOPez1C5/tK/029uGEUnFvIPlz3B9RVyu27mNRTa1PE/ipBceJ7u+04XsrTlMhPMx5fspPX6Vm+SO5zprqfH/wAZtF8Qae11p+tt5yR/dcj54/qPSudckmTfmex4RrKMthdqV+7GwDAVlWhFRZ30IpTVzzLaxX5hzmvj6ivJ2PorWImII5x7YotoDK02OuPoaR00loZl65AK9/Sm1c48VLRo6i3lGwY7CrnBN3O3EL3myRJMyEMevQUKFlcVKV1YiuUKNvTtyKamloYYinyu6NDR9TMh2Nge1VNJq6NMLV+yzSlZTyq9u1cbi0zraQsRKnJo5EznvaZPF/fI601BRRtON43RIhBbJ/WqREGkvMnRZpSkNpAZJJHCpGoyWJOABSUOaVhqMm7s+5/+CafgSH4Q+NLb4kfG7wNrd9r1gjSeH7HUb3aYTziMQb8JECNxlcqM4Cq2SR9dlqWG5VOWqPGxkHKUkm1G21tPvP0w/Zr+LOp/FA6b8Sdf0Frm9vrlzbQZ3o5DEDZ0+RRjL8jjjrXtRnUxUG4q7PAxNSlShyLorfM+wzK72Uc8qhTsBYDp0rqg1Shdnzc7zdkVdTv4reAIYt7SttRV6msMTWiqaXcdGMnP0KuoRvBHHDawqhxl5Mcp9PeuWUGkkkepSlGd3JmRqttdanMhliZbaPsRy/1705RcrX2OujKFBNJ3bKPjHWIbfSjaySFVSM7IEPLn8qxxEm1YKVPlfM1v1POPB954euvifY6dH5s9ytnN5wlGEQNjPHTsBU4dU51Ei6tKbouXS5x/jjU/EPhf4maZZ6RozznUZJY9RQDiNOofnrWc3OOISiiopOEl0Wxj+O9esr6PUI7oulza3CPbxZ+UIM7gTjJFaN+8xqD5bv8Ar+tDwP4h/FDUtX8R6jpHg/RblvItxPbahEojNnOVwFBON44zj3rlniFKryxO2jShGIeB/BGjab4Uvr7VvEX2TUbmEGWW2nHmB2+8SnbJ9K64qFON3owqO09jzDxR+zvrMet3Hi7StdOsLNEZGsrm9IJXuecbT7c1x1VJu8XdBKrFrlaPHviFomk6lq39k6naalayJgCPUEMixjHVJBghe3WuRtSbTNILS6PM/E3w1g8L3xTTbVgr8qLvLLg9kcHp7U6dKN3qdkWlHUpzWQhtCv2RoWyN8QkyB7r3rsp2NqaaLHheN2Zjg4G7BxjtXTKSUdDshC+55xruuxl77RtcXyVG5ra8VuQe2cdR9K+dxVS02d/tY0lZHl3iC2nimIm1K1vU/hkh6/jxmvJk5Td7mMmpS1MpUWLJUAE+lYztJjjBSegPl1+Y8GtIPQtxUUV51CgSAfTNXFKT1MZy5om34Rm6KfUUpwsiKaakd9aHMYOe3euSWjPUgrRLyNtQcfhWDYmxkilwaRLIJV+XbjJoKSbKU1vtXdj6EVSTFPTQotF++I9+5rW2lgSJBHu6dqS0QPTclVRGeOT6Y61Di5bEc05bH17+x78DfHnwb0e3+KXi+3htNT8VoIPDnh2IJJqF9bEZYyKTmCA8MxGHZVx0Jz+n8K0YcP4OWLqte0qWSXVK61PSyukqcpzrWWlle+h9YWNva/DLwfY2moQR3ereJtUhiE0nyl8vwFU/dVT0UdMZr9/yynChlyk3dWvf1ProSpwvKLaSW3qP+Mvh7UE1Cz0eyZLRpbo/vS5y54wq4GSTjHtzXt5dVUqTm3qZ4STdN1Gmzzr4xeGtQ1nS5tAtNRNqJ4hHcT2pJwSfmAP97/PFbRc5rQ65ypyp6nDeMfEHhX4BfBO08NeEtCma3ScLZaZA+5765kbaGbA5LOxyx9aI4V8spwV+XVveybS+WrSv3aPJk44X3b6N6epz1t4UHw304z+P4YL7XdUT7TrrvyAx5hs48g4UE4I64z6k11xnywTiz1IwfsLJs8b+MGt31jf6jqOovJPZaPaPLDaQ8RLdPkbhgEM2cDdzjGOgq5OfI2mcelNNxRz9to/xSg/ZKuvhTFLdzWlwIpL6RsyKLicSYJ6847+g9qI04Socq+No5HCdem5T37/kb/7NHxF8Qaj4X1LQfFfiN7bxBo+nvpVzKgUeZEVJLcdjlgfrXlY/D/2tk9TAVVd2Zw1Ye2oPCcuyPkTTvB02i/E7UpvF1rba54g1PxNLp+g6Bb6cWa+mM+IYQgYAoS4yuOfpX5rwvj8uyzKsRUxsopUk1JySdlHWzve589lmM+pYadTFO9nyxjqm/wDgdyL9qTwJ8Wfhx8aJ/wBnL4pPNZeJLCeOTV9Gsr1hbWIaESCFIl+RVUOBtXptrv4e4k4a4+yuGMyzDw/eTl7ygoOyumlFWVm9dF00OTGVvrNWnTi25T1+8n+Fa+PtM8N/2PpfiSSCz1BbuzltoiyLAzxhuMdFYoMEdC1e/hcuq0sPy8zWr7/me3luHxqpcsLaEfg34tfHbxn4r1ldW1GKK4RWYJCmWEIKmRRjkqTGMj1qKMq1XEShUkcuEoYuWJmsQ+uhw/x71zUrv4h32keLLySW9t7kTW0rHdlCgAQgdAAMfSuLHTiqzp72OPGzjPEuh22OJ1W+0jTZ3iurcwwTW/lt8uSDwflP17+hrnhOKXvbMyrqjhVeXUPDVtcalJJE1ssXmW5DvGMEYXIbn1pxpKKbRVGo5rY534gWp07XVslhWRSNs5YdGzgkGvn8dUcaqR4eYRjHFRbW5seEdPlLmKGRvKYclhzwP8/WqoqSOqjVm48tjf8AD0DWmuQqUAxN82Dg49a1nBTVjsox98+lI/D2m614etrS+h5EKtFLFw0Z7Mp7Gvx/iurPD5o7arsepWowqwUWMtWv9On+wahJ5zKMx3Cj/WqOpI7MO/r1r5KrRVdOpSXqjBQnSXLL7zSguBKgdG5x61yKxrCavZjndWXg9etZzTRs3czbpVSfzQuMmrh5nNWaTNLT7gyRgenernHQqk7xNW3PyY7Cua1mdNNEjSqq57jtmmo3Lk+XQpXd0pgdd3JHrWsY2Oebd2YGlBzqjHH8VdcVaJwwd61jcnfDn9a55nfJWgV2IbGDj3NSpK5hHcAQPw70nJHRayKsrEtgHHvmk3c5X8RXuznBqYldSqX42gcd810WM6m46Js8npisqgU2VrpPmCgZ5ogxyQseFGT+taSjfYwu9kej/BvQtDk8VaYo+IV7bzTzr5kGnuUAGejMDXdh6cYTT5rHHWw9Spd2P2e/Yg0H+wdI0218LaTLLBJAv2nUL3kufYnrX2uFpNQVtT5zFU4RmfXV9J/oCxyAnC9TxzXTJNROZr3m0Z2iTIq3G1AWB4IHB9KwpqzudOjSFvLm68lBNhM52gHr79elOc2h2hfQyb2yOozfZdMvApZc3FxjDAc8CsHK7F71rs43X7i2srsX9pbTTMuY1aZ9wCjq1UpRSujVU3JWPM7RPHOq+I5F8LzRQaffMz3c8vMzxggYHoOtc3tKnO+TZnVajThrujG8bWV/FfHSYdXeB2B+WNQC8Y5bPuamcKjnqznU1J3KWy8m0lo9Hu7qyjlIVoZT8zHuRXdQajDQ5525znI/CWtGwuD4c8SXcsSZLqWXer+pBz+XFOcZyegTmno0ePfF3S9cspLiwntEn8+ElpCgR93qMHrWbUmrHI4xTufI/wAXCLjTbyHVTNHfwZVfNGN6896UISkyk47o+ddXnZ7G7D4BCMMCssQ4wjJHbh176seXyq3IJ718Y5XbPeSdiGQkLkDpSAgcB1JB70XszWk9DM1AESZ/OtIvQ48Rd3OhgkCx8ntWk5WkeniE3exDFqI8/Z6nqKevKctKdplwl5gMqRn1rK1zqfLNFcu1lOJAe9bwXNoefUTpT0N/Sbxb2DIxwOlZ1IKJ6NKftIll5ArhP1rlbdzOdrlmEq0fXtxUts2i7xsNjc7uenat9oiirPUuKuU3EgjuMVzylJvQt1EkfRf7EXwu+OniOa+s/BWheM1trpCbm30u1U21+vG2GeQssiRkZY5ZgwXAUnp9Hl2CrVaN6l0eDisWnKVn0P19/Yb0PVNH8MaLpPiKVoXjhjivSq/LCRjFtGSowm7jaAScckdK+kwyVJ6XR8/WhUrxvLqfaWoXSWmnNN5DP8vyoozn2rqxE+Wjfc8WjS5qvLexVikBtlvLm2KNjIUjJWnRjempzRFaChJqLKd1fmfgKVB7lefwrOT9o9Drw9K27K0lxJJdhY7ZmSMfffnb/wDXrH3uZnZyQUNdzk/Fdze6vqT2+kl3fbueY7Rs/PtXLKVTnvHodEYrks18jjIbaxuvH+n6N4VdlSI+brF4V5bH8I+p44q6Eb1El8zepeNB3Wr6Fb4ieIbCPxq7JIrScv8AaJUIWJRxtJ9/61VWpCNT3TKjh6ipuUjhvjHY6Rd6ddHRbRGAtj54RhkhiASD2xXLVqOV2mOHNoeZ6N4Bl0FVtYljuhd2weKCXpIVPQt2P1pUlyyOtT5lqaXjf4Y6LPZ3N7NocduLlI1kuIZ8SRN05PVSP1ror041IkObUjxTxLrnj7wftsI7qK7k02dlS7jRXZ4y3BZWByccHFcCU4mijTk7nkHxP8Y6hD4sW+8UWdkbeVT5MunxZGT2eNgMKe/oafKoy940hfaKPL/HPhe6n2+IPDuojBciXTp3G36hc/rWvLFRujqgrKzOR1WBRbbQzg7vlRsNg+gPpVRWp0xfvaD/AAqGBkBAHyNlcd8VpKzR2wvzI8q8RXT35u7AeWziQ+X9ohzg5PG4cV4eJgnJtnY6Maj8zzXURf2Vy9te26xsOqKBivInTcG2Yzpypu0igy72yBx9axlFPY6KfK1oNuVZV46etNNRCXvaEDlfLw2CKqMrM5KsXFmj4TmUybQM/NxV1Je4aYfc9Gsc/Z1yf4BzXBLVnpRasXVPy8VnJWZnJWYqkEZA/CpHG1yGbIIyKuPKaaIqXDELxx65qnJLYzqWtcoN87jHX2oUlbUiFyXYQvHFF1ctxuSQKRIGBwQeD6UnJrUqLjF3R6L8B4vjP43+Mmh+HPhHrGrN4n1K4+yWU2n3bJOqOpWT5yw2r5e4Mcgbc54rbC4SrmOMhTtzO60euzTX3bmdfE+zpucmfoYPDn/Cxv2g4dSmndvDvw2ixFdTuVhursJhpBjIKp8xJ9cV/TnFPFGG4eyOmqitHRN28uh9TPEToYOnzXTkkdLqOv8Agjx5rVr478HeJbHVtCh07ZoOpWErTRsOUll6Z3ggryM5Nezw7mmBlw7CvRk/YqO7bbsu97tu27d2engq8quEUor3pPVf1oeY/FbUtN8KTPDLMVmdCLSHd8yIfvMfRiO56V9vhbyhfZF1a+iaPO/hlY6F448TT/G7WEsp9O8HYtdDsZLnEK3BGGlIAOSi52j19OtYxpR9qqcHaLXT8FY47fXK7ld/8E5jVdd1H4k+LvttleRRWqXJkWaZM8Kf3ki9OduQpz1NenOlal7j1R3KSpxSOF+Lx0mz1fUbaxtwun3sLRWkdyolKRZ6sAvEjEjB69CMVi1Jwip76X7GVVQcLtPv/Wxl/D290m+1nxZs86aDTNIthqCKjCJZxkoD/tYx7813YXDx9s6jfl+BlGDqPaxx3hCbUPC3xj03xZPd+XZajp/k3NrcwZEgk+XJGOuDnnHAqKtOEuepdKy21vLVaKy+etlZd7J8+IgqFeNRK729D279h/4ffs+6t/wUQsNc8S6Vrx8TeBtBuvFV14uutTtBpOl2VsIlaUQNCCbhyzDe7FUDgj5lFfyH4+4TiDL6LhRqQp4bFyjTUIp+0cpN3d27Wt5ep8rm1P2eMlWUE+eLjZpuzemmvz+R8r+NfEnhj9sn9uj4j/tKxLdQ6drerXV/pN1qscaTtFGoSMMI12gME6gdGHPev2Hwk4Vp8OcLUMO1rT1vaz1PWy/LMN7KNRqzirJ6/h19DF+A+iR+I/DGs2OiajdwSvquz7UkYlChdzFihB2/KCMjgj0r9Ow0XUpS5l1ZvSdqLpxb3vdeXTVPfb8tTjNBu9A0n47WlsmsRodp+3TRHKAMzMoJ6E9CV759q+dnGNLFu71PDq1b412vbQ4T9p7R9dvvinH44msYLiK4tIjqUNg26OORkVm2MOqhia8XF06tbERqJdNTz8whVjjY1acXKK3MvW/Dmkan4ZS1u5bSQzsjRX3RgCTgMM/wng/XPTiu2VCHs7bnTi1GtRUeXcXwVpkltqTxy2aGSOLPlsRteRc8Z7Ajp9a5p03LyMKFPktzaI4a50S6+JXjy+0eNoopXucWkdxOsQzknZliBk4wBnk4FfJ4qpQp1ajqvSJ5FaVPFYipCenLsW9B0zUdJv77Sdf0650/UtOmVLiwuUKSRlcAgg1rhcTSxVO9N3M8NWpVrqOjRv6UyT+IYVtYjH++DASLxz6Z61rVkoxZ61BWkuY+nbSIWWn2cTPndbKQV6Hivxbi67zK/kevUlFyRFdxM7LLDIySI2UdTgqfXNfJ0qtXD1OaDszCooVI8sjKa9msboiQAbjkgDAP+FJp1JcyOLWE7dC9DdrNHuRs/wBKnR7nYpRtuVr+QbCSMEU/hMqq5loWdAuklwo9amU+boTh076m9G2xApHasbNs77pIr3t1sjbnqKpOzsyJy7GK+oySOy4P1rRnLZylch0R3fUyxGPmq+dRiY07Rq6m7cjBYAj8qwcm9zrnJNFU/Kc9j2qlG6M1ZajkbcuccGpkrFc1yrcL8+D+FKKciOXqVbljg89a2jFIxk2pFfODkiqexo0pIRGZW6/XNZyTZhflYyT94QScH3pxiU5tLQQsoOK2tZGafK9Tpvg/p8+q/EXSNEtPPDXeoRp/ozsGJJ6cEVvhYTqV4xic+Lk3h5La/Z2P3f8A2WdGvfDej6NY+INRkEqQKkFsTgjAHavvcP8AuoJSPkJKcqjbPpPU5fL05ULsEIycDJp1ZvlLcdblDQJ1CTIq84yXbPFZ05aFtO6GyML4mGBhKoUhnlJAWpklN6GrXL0szF1TS5rLzb6KWW6mdCIkhPyj3pezjEL3snocrNpWqW8j32pQGyU2bBQG35z1JrJaPU3m7Q93U5CO+gj1m8afMUVpaLDHJFIAS3Xn0pLk579jKVOXIr9WcVrHi3TLrW7vV5bWKO+sdsUcbMBwep56nvWSqJybaInCUHyoyY/FnhrxGbrTD4kadoyAZUmULFJ1wD6V2UZRbdmKpSkrN9Tl/FXj/QvC93O1t4kgN0luQ9nC5Ys2PvZXrVSlZkzpNI+c9W1jxf8AETxNLqSai8caHYsIbD5z97D1MWk9DGdpaWPFf2ipr+Fp7TVwizhTtn8sDd7HPQ10QlfoCioanzTq1s90JYFVQ7Arg8ZNebi4OzN6LcqqSOF1/wAFanoFkdQ1G7slXft8oXamT/vnOa+SdCo27H0Mn7BpSOfnKFMqegqY031LfvLRFZGyCaU42kXCNkZ96yuxNGqRz146M3FEawjd16VXvOWp3Sk3NkNpa7ZSxHGa31cdTkqQtK5Ze7AJjDAAdMUKC2LhVjERs3UeQozUczhKxVWn7SOg7RryWzudhbHrVtpoyw9T2b5WdESsyeah571yNNM63FSdyzbv8gGfpxiocUJ3ixVKgkn+daLYrmcizZXlzY3cNzAyBo5ldDIu5cgg8juKI1IwqKXYfsYzXK+p9L/BT9pXWU+IGp/Ebx54jv8AXNZW/wArFp/iQ6TpNvYRMojZ4ogGnkZvuxryAOh5r6HDZknzJdfM8vEYSjhLRWq2vv8A18z9Cv2Ef2pvir8Yvi3ZaVBYpY2fngxJdnMyx4PK26D9ypH8chGfxr0MPOriPhex4mNrQpJJRP06S58qzVpPmwvJ9eK9tSUaabPl7SnUdiGO/iuIWnRTgE9RVRxEZU+aw6lGUHZmZJqBkl8yG3I+o9654z55XsdNOk4xs2Vb9Jb/AOWa7WJV+/EmTx3zjilOV3vY7aceVaK5gazY3d/DNa6VYG3jZcNJGuWk+p7CuKq3zXivmdkXGik27sy/h9oV3H4vutPsxCTbWoLyBOInbOMnHLYzWmGhKTfKzLF1qcKak+pxPxeOnN4ij8IRwqxuLtftN0snzOM5K4H06VzVqfv8qLpVpOHMcN8c7ay8MWV9dQ5ZTCDHFG2A2NuNx+tc9e1PQ0oRnU3OUn0zxJ4iuLfxCbe6szpkal4RyEY8bWA/hOevrWtFTm1Jl8vs24sxfjH4lVNA1DVdB1ZEvRCokspCcOwH3XPb2NdFW+5EYJP3j5v0SPx78TUmn1qzsrKWeQiSx0+QlgO7L0IJ9q5KUpzWqsdfs7anK/Fv4W3Gn820mohbcbHGoEjIPBBY4OOeKmrBp+RsqsIaI8ym0vyJ5tHvNWDS+Xut4pXJljI6ADA3qfUVlGVtEdEHFq9jldfjvoISNUVIpN3zxxnr/tbSAQfp1rqpNNm0bc2hB4XY7ny2f3L4YHrxXRKN46HWr3R5V4vt9P1yG6ubaI219AxBaMho5lB6sM/Ka8PE2SfM9T04RVl3PMr4yBizsC4+9jpXhzqXloc9WUpaMr27EtyR7Cs22zSilFaj7lcoU7Csm7CcryKlxAfLK4Ge1VCWoTipRLXhEeXdbT/ereSly3OKnUlGpY9LsHXyE/3fyrjdz1qequWzJngn05rLcTbFjcnofxxSaGlqRzseQBigck9ypOd4YfrU6phuVFjUSFgMc1pZtEy93YkL5Gw+lChrca5nqS2ysTjGea0bQRhd6nqX7MX7QvjH9lzx9P8AErwFYWEuqTaTPYQz39v5n2ZZV2s8fo+MgH3NdmWZriMqxPtqUU3br0HUp0asOSav/wAA9+/Z28beO/EXgDxt8SvGmi3zae+itpvh66tVMVvLezyKzxhm+VmKrye3frXZnfEmbcT5ZHLsQ+aUpJRsvPv6HrVsXiZ4N09W21ZX1/zPZv2Vfg1B+zd+zT4c+EZ11rrUmE+qXSzYK7rmRpmUEcBEyq9OTk1/TXBeWPKsgp4er8SWp3YChUo4flPGv2ldR8T+PviB/wAK68GyPPqWqQN9sv2OE0+HOGnkY/dVR0HtX2FfEyjh+SOiS36JHpTo4rFNQWt9C3450bwF8EPhHYfDXTFubmxsrZnuNlwM6jMR87nPGWJ6n8K7cHh3Tp3bO2FN0IKMHojiry60e9awuPC0Miz6ZaxzataltsSLnKxZGTtA28dya7rSfNGb32tucXtZat9Tyl7vUfiV8bNVk1SCGz0vQC9zPIzuv264ZRhAMHARVAxjv7VyurOpilGLdoolUK866lK/Kjcs5tL0fwTrGhW+oQ2s963267ZUCo5yAq79vzNjgcDrXsUrxTdjunONOCSje5xvxivrm78NXus21oirpSwpbNFcBPPEeWLgj5j1I5x6dhXPieWNF26ankZhTfsVdn0D+xlp3wW+KmreNdR+MOl6ve+G9c+E15da9aaFOiNc29uELRXMuVZE3uAqqwV2f5zgCv5Y+klWzSg8mq4VfvHVSh11ex5eZSl/Z0Z0muZyS13+R8Nfs/ahZXer6lpWll7SzuYZ4tPtJpt728BJ8pWbocDA9OK/oHhWWIo4SlSxLvPkje3ex24fEu6gnp066GzqVyvwOOs3uiXkRD3htJZEYgoJAwZgDjgjGOnFe3XqwpRly/ca1KH1ebk9UeGaRZ6LqutapZW+qyre2shmlO8neYomKuD35J/WvkcQ1OblLc+YrVoValSlB7a/NJ/8Ef8AA7xNP8RdSutN8RmNNQhuTJb3E0BKlVTaxxjoV4PB9azy6cq0Jcy1TMcgxU8VRn7RNNP7yl8UNFsLfVIxo8/l21ypQW6rtImALLj1U4ADDtiuzEUrRvE7MfBwlbY19O+yT6B/wlH2QNJbtGZ4EfLPFtILEdcq2f5968+tKbV2Ztxq0zyzWtD+36peag8ccwdzLFIqbS4z1r5yth4Sm3JXufOVMCpVZSkty1p8J1N5Yr+5ZrySJRDcu5dgR0BPcdBWEKMKEfcVghQhTldI3/BllqX9tQWt4xWVZMuFA+Y/0rGb0vI6Yc85pM+ltTtmgsLSJWHy2yEd8HFfjXEtb2uaSXSx7k4ONkVYrkMgdj9c8c18vU1dzmnuQahBbXiFCRnHBFEJuJLaqRsYzXNzpU+xySvrWjjTavE43CpTlqXFuor2EsjZ45qJRexp7SVrEekXD2V4VDcE9KpwXLY0py11OshuBNCrKevWsGuU6U2yO5hWT5T071g5NsbbZTktY0QnaOBTi22VCKVzM0qdV1MxkfxV2Rprl1PNl71fQ2bmTLsGIrGUbPQ7WuWJWeRRjJqomcXfQfG/y479qyqbltW1K9wdxyPXkGrg1FDvoUrmTDbSc0+a+xyy1loQh89R9PeqcjaKstSLzX8z8PSrsrXMXFOQ4YY5PH1qOa2xbUYoAEZtq4zinzSULsyestT6A/Yt8R6N8L/F0HiuTw3Z6jqsp/0RtQK+TbD+/wA969nJZOE3N9TjzCS9kkn9x+of7ANp4++JfxFufid468YfbI2O2ztIExDGvqPWvqaNGbm5yeh8ziV7S3Ktj7T1CWJd0UhJz94Z/KnVkm3Yxpw116mfpVrqQhuGdVWF2wgdgAaxipyRvVcIyVtx2oL9mtVt4tr7xhIox8rn1zWlnFWNKbb99mXrML21u/nyhCqZk2Px9Kl6BfmldHEeLdQnubCNtOs3E0bb1jEpyyD19BXPOKvc0pQnOeux5h8UjoU9re3+oSy2UAtmlvfJJO9uqgY/LAokqbjzSWhrKXuqJwmk/BDwprduvxAvIrmO9udnlQiVtydwSpqY0qdX3rGcaj1T1RY8V+DPA3h/TxosnhWztppXDPbQHaZAepPqa6IuFN2SHPnk7szdT03wjoM0UOl+GbaArAfsl4IN3z9djA+uT+NdjleKSexx1ZTqKzPFPjTqC6prY1EaQEaJwJHt4RG8fHoODWFryuYRcYRsz52/ai1PTLrSnsVZrl1jBEzpiSM+jf41102lHUJS5j5W19ZG0W73OVlRTtcHmvNxTXIztwnL7ZXPJZLSR7rz76dppOzOOcV8pOrUta57dTDRlU5iR1BHA4ojK6OmSdONiBiVyFNZTS5h03eNzMupOSG9eBSabVjmxMkkzcVyYgCa15b1Gdk/4jJbiQQ2eVPJFPmbdkY14y5boy9Pmub24ZSTwa2rtU0kc+GjrqbVv/o8fzcGuS3M7nRKpyuxXnBMoniHA9K6IOK3Ma0eb3om7od8s0QViM9wa55Rd7nXQqJxszRGVAI4HtXPLcuauSW7bsFhyPUVMpMIvk3JC5ZtoH0FOKVrlKd9jX8HTRHxHa2B14aaZ5douFsBcsMjGFTB+Y9Ae2a6MJye2V3Y4MZVai7OzP2N/wCCO/wf1X4bX9lp4EMNlcKJ0t5bYx3sqFSfOumZndmYnhSVAHYEYr7HDctlyanzNem5JuV9vkfptq9xFb2RGRyvTPWvUnLlhqeJQi/aXK2m3MM+niRNxHutVTkpQsFdS9rqQWssGx7oQMMttXcOazptb2NJxkrakOqW+lwAS3s5LKMrAnApTjFayN6M69RWitO5yvibV7xLSRoQ9vCxxtjG3dn+dclRpN20OtUoxjeSb9PMyPhbq1rFoXiBrC/DT3N+qCZlJLEIAQP1ooVqUIz5JdvyFisMvaQclseVeKvGekaZ40fVbvRpyNPh/dSyNlZJCcE/y5rBTUpvTRHbDklSUb7nner+PdD+JOvawNRlh8iziW3hgUEZPADAHGcHHT0rmjOnVrSTKdOULKOp1OkaFPcaXNqmkziEwaeqSkHf5jcdMdc9xXp04xa2Mqj5NGec6v8A2j4j8QXl5rFzZWsiIIpbeUgM3uT1GfQ1k4pzdxpxaszzX4iReEfhPq0viTUtLvI9PAJmuYJNpjyPvA9xn0qZKNJ3RuqjkrRPI/GjfEj4ys3iTwR43t9SsFiJSJ3BYDsGUnJrnk5VPejIEoJ2a1PFfG2geJG1GJ9XtzBd2jneXg2LGc9UdTyD6EVCjJas9GmoQgZXiiWUw5nminfbj7QvJz6ZrSLTZpT3KfhncrvuUDZE4YHp0NdP2Tui1zHmeufYreW7kubOKUZbMcsbcfR0/rXiYik23dHfB87PNdbm0CQM+mwTQuHI8op8gHsTzXh1oR5tFYxrckXpuZkbb3G3p3NJ+7EdJuW4+R/l4/HNcsndkS92YMu9eBxTp7my+EZpJa21D5eQT1rudnA4nFe0PRNKul+zIXbBxxzXDVavoelCUVBF/wC0xFcHv2zWKWpcLORNHPFgb8fnWlhzsiOeeN+BjHamoocZXVijd3K7SBS5UmZStGRWWdSTk9vWnKNglqh0cqM27PA6UJCjK6sX7QBSG459aOWNzXlbO0+C5+FKfEbTb341jU38M203nalaaNGGubxV5ECFiAm84BYngZPNXTdKNRc6ujWFP3ZWdpW0v3Pvf9nv4+a5+298Wf8AhV+heF7TwB8O9A8PXCaDpdowa10ZghCXE4CHz5W9cDr1xnPr4XL8zzjHQq4JOn7NaJK6T7vbW+/daHDKjHBUXUjzVKmjumk27rRX6Wvp8/XptDvl0/wpqOreIb03VxFKdPtJ5I2QXKxfJ5wDKuFbGQAAOeK/q7Kvb4nBUalVWlZc19Ndup+hYei6MKalpdXa9fQ4XU7208JyXms26bLq9K/aZ1hAlfJ4TgcLX0tOEUklubVK3IrLueGfGbxxea343stC0GxW+vJZvNiiktFlSPAwZHU5AVc8Z71bg4w5W7XOOtat+71s+zszM0Pwl4hvptR1iyvra2W1gHmR3IaOS+lYnfN0IEaYwOmSeARkio1/36hrtv0/z/r0N6cKNKShq7fOxxfiHQbJvENnZWGry28c9yTqLO37y5XaSSOgROBnJ9PfGy5Vu7DdepTk77MwPitqqk6fq6S26T3tk8SWMDF1ndflU9BhV657mvQo1bxuRVm5NtHH/FOS28QaT/wjlhZ6pNPe6ekSiC2MnzIv72QBf4M5PoAOvGa8rG1eShNz1uZ5nCjXofu01ovvtr267dlprufUX/BJ/wCGMXxv/Y1+MXg6P4bW3i68k0BNHs9CbU5LH+0pSxnNjNcY+RG8pWYLk4HPv/Hf0oOK1knFHDeGdTk5Zc8ra2jdK7X/AAfmfGY+sp/V6FSXupty8trHwx8IvDVzbfFHU/DeoyJZS6e721zbYaPyJI3JNrzz8rDyvcAc1/TXC+KWIpUq0anMnCLT11urpfp2+R6eEdS3NFX6f16Gv8cfEcOuyroY0xEtb29SLVCyZmTylJyM4x1PPTp1xXs4m9aoqnVHfilVUFC9z59gsr7xB441a/WP7NC83lRRbgBKgO3IPckE181CNatjJvoz5Ghg69bMKlSStrsdh4E0XRY7+XSFuPJv7C7Nqjwj5juXhhjr0Gc8817dKn7OO2x69CKpycEtUQ+NpdR8W31tcvbqJbBzaX6R8bGTkNzjJJz9ex7VlXqSbTaMa8JTndfMy/EutxeA9QtdSgCNdz2aJJZj50uASOVboQQCCDg5ry8XUd3ZGWJqSoKLZyV95GrajM9tbNZl93lo5wI+ckA+me1ecoxnI4nT9vK8djM08T2eqbWgdpd+Ny4GPwPUVlUitjLljzWPQ/h5pjXfi20tN5lJcHLEe3px7V42LUadKTfY68LSfNdo9/1fDkRKMbFCr6HAxX4NmdX22MnLzOyUnKVzHu7K6C/JJt3flXlc13qctWKk9CibS+gk3yyZB9Kc5p6RRgoTpNNsluLaC+t9r9cYBxWcZuMrGytURhXMt3olxkZ255FdMZRlscFeE6cr9C5aalBeFZ4mGe4qW3HRmtCamzptEvVlgEZPPbJrGd3qd10y3O2V5NY21EtGQsQ0bA5HFWlYupK0TG07adWIH96uuCtA4KKvVubF24Vzk9qyem511F7tyn5iu2N/Pakmc8L3sPVwOM8g8ZqKlmdNrIZM+Tk4wTQldCasilcqx579qqNkcstGQAYG3NW7MuMrsay7Gy3GfWnvGxMmlsbvwv8AA9r8UPiJpXgS88daP4Zt9QuRHca7r0/l2tmnUyORzgDt3rKSjT1lsYz9o1dK57J8VfhB/wAE+/gNr9nY2n7Xt38UpYZAdTtfCml/ZYZOP9XHM2/v/F6U6s5upy0Y8y6vY56VScqb9ppLotzsf2FdM/ZT8W/Fe78f/ETw1d2Ghac+7S9FutQMpbGdu8nGTivocihGHNOrrYnFwnOkuVH6yfsR/FPwX8VLe6u/hz4Vh03RbKTy7cRxABse/evoqdd1leOx89WtTly31PcdauDKTGrhDk4OeTSkmyIOyGPLHDpirdyOsQPK7uWNNLlRpBuUmyO/vBZWcconKRFdwUHLt+Hapm7DUXJ6HKapqup6zem4GkOtpF03tteQ+4rOTdtEaRpxg7HHeOptat7We8Hh2eQrGTPEsu3PHC1hUc2r2N4KKdr2PJPGngPxt480xLqbxN/YtvbxebaafaMGdmXnEmeozUVo1Kqsnawc1L4Uru5h+HNF8falpI1T/hPre9uJH2XawxBTHt4yD24qsNGqrvmugmoUXy2LbeDYjAkmqay+qOGDS30lwA9v3Ix9P5V1ShGDV3c5515XtY5T4nXp8PXaSJq9zLpckYZrhrdtqnPBUgcn2Fa0lKpp0OOznd7HkPjfUk1m6up9NuPNmchdkrFTt7fKe9XG19DPk59T5x/aCa/8m4g1GMxXUIwj7MCRfQ1tZWaLcGtD5n1+UHSrt3bO5DggdPavIxkX7FnTRglUieZSoT0bPvXykHfQ+lpW5SIklSPT3reyiiakr7kGA2cHHNYTbvdCpt2MvUB+8yPxqouyOTEJtM6C3haVV5/OtJy5Xc76j/etD9WT/RNiDnFZU5e/qRNtqxV0u2MXse5xW805PcyjBxZcui6x5zzwcik2k9CaqaI7CcyKVZBg053Vma0FeOpaspXtJwRx7VPNdWMU+WrodDazJPAGI7dK5aqaZ6StyKwofB2jr1pQimrmbV2TR5JA9enFOTii4xSPRPgl4v8Ais+uad8MvhfrrabJqGoq093peiwzXwzhdyyFd4Az03KOa7MrjVqYmMVdRvq0rtL8PuujhxKoq8mrux+23/BLLwP/AMKbsY/B3jTXWfxLf/6Tf20t2bmediOZ53JO125OwHC5IGK+yowpUeVbs+fxtWcqTgtmfaeuvLcgwQDdkhTheFrsm+aVjxaMVBJssWFxGjLppPzqgLEDitYyjflRyV0+bm6C3l1bwgF2AIOAKVSpGKsVRhOTMzW57LT7V7tmRZCCd8vb6AdTXFWacbno0E5SSex534i07xX4yaQadLNHBkKZZvlGD3Gelea6VWte7PQjUp0la5yPjqwtfh/oyW1hrjiBSTctCSSznjC8csf61p7OFONkWm5u8kec6/4I8b6pbTX08t1ZWMEG+2tX2+fKBzlsnnJ7VLp1YenY0Sowempy3jzQPDsWpLBrQuN0iqI7qGLy/IlOMZI5696h0eeWpcKihG6Ot8JeEk8JxXSG/vBHcKstwgJkPmNj5165GSPzrvpqFODVznrT9va6OU8UeA9E0jWp5fEkM8JvYmkkuDk7iB8rev4VzVOVSbQS+BI8M1rxB45+LVjqnh7S9Os9Q0yylkjsjv3SyKOCQCP0rOhKvWk+wKmoyT7nkfg3w7YaXfXGlahGILiGYpNBO7WsqoeMqwGCRxWahySs9GehZQV2Y3jj4feN9L1WU2X2m9snjyizTJKCO3OOntVvnivIJTjNnlvjC4RpxZSR+TNu/eRgAKffgDmopy986qSRQ0B8PIoPHlvnHXpXY5Wp3O2nFc9zzfxyz6U8+pCO6jikXBmt3wCf6GvFxNZtM7eaMXoeVaqryO0hnZi/ILPnI968OdWMpXOdRcpe8VIFYfNmlJ8yNFLkehK5Xpx061zWd7F25tSWIgx4PUnrWluUybaYscIE4ZR/GMjFbKXumM22zp7Bpvsq4JHFcc3dnTDmcTRszMQBuJoijppKw+7nngj+UHOKaabsOtfdFa2vbmYnOR7GrlLl0JpaakN5LOXwrHPrWalrqKpFylcRlkSMHr61Ld3YbTcbIitxOXwHIz78U5S5dERFcpt6Hp2ranci0sbeadyMhIYi5x64ANClJnTBSlsdLpulLbyiOVW80Dkuu3H5iu2kocusdToitbH6B/sS+D7v4Yfs6/2nHbyDV/GdyGRQRvNqhwF9geSSeMc1/Q3h5lKo5VCcvim7/wCR62XYROXtZLRHW+OJntp4UvoY2hi2sCpym4HgL7D17mv03CTjObp8rSVnd2s/JdbrR6pbqzetvolFShzX1Z4/8S/EUqC9fm3t1BmU7vmKjPJ9K9RwW8XYlqMKdmeVaR4m0Pw34f1TxjNp0F3q+oxgy3jgBLeBeUQDpzjJJ9ac7/E3oY0owjFzbOG+A+qeK/FvhjxH4x8W38F1HrmvyrJMbsGOO2RSEVduQwJGMDg5PPas8NCcKfO+rKwlaNaDrxuu2n+exy/xgv7600p9W0rS1N1FM5txPPgT7QRhlAyeuQOmAc1dWT5XZiqxk5b6GU+i2PjC21C21TxElwtvpXkW8lqG2xqRmQxZwVzg5c468VvhYyqJ819VYhSk5bbE/wAM/EmufDudtX8JXclrqU2kyRQ/a7USxwWjrsZwXJ/hYktjtkU1g6VSm/aa+Ry1f30Wmz7R+Eesfs6/8E7/ANmzwN8UNa+JPhXUvD2lXd14o1HVtP1DGpeI9WubaSAWcNmpG4Rqyx7nPIUnAwc/5seNuXcb+IPizi8FDD1E3GFGDcEqcaakpOSlvq1018z4tt051KU3JTldarS173v5/wCR+ZmleLrPUPEev/Fe+dLF9Sa7164sIbfy0iaW5LJBgZ2DBHHPAFf3pwnlMOHOG8PhJzbdOnFXfdJI9yjz4XCRirt9TUm1zQviDo97rlloal9Qiht5GuDjdKVIZy30ORnj3r6ya9tRc77ndzL2NzzGztRps26K1gEkWmXU0AbBwckbz6MSMjuK8lUaVKVrruedQqKFVpljQ7a7g8Watqeobkh1i2VbqZxnyZQq4PA4zng96JKSm+XW50U8KozlUb0ZFdXdlbwX9jc3z3L3qgWtzCD56yqco8ik4UcnLZI4rlnrJpqxlUai2uU5HUxrfjOzZPEFnPJqFohI2SAuqrkcenrjj+teZiffg0tzw8Qq2Jg01axgWFy0tjHPPJJGVl2yCRd3zA43MDz+PvXjRqu/mYUKziuWwyPzNQ1D7UCkas4AJGeMY47gVpWqRauJ25rs9V+AmlCfxSJZEC+USwUHJHv9K+Vz2vOGBm/I7qFaMdEeuXjBn4J65FfhNV+87mi1M++lYJkngdQKwgoc2py4huL0Etgl5DsdulKV1LTYdN+1jZleaBraTntWE3d6ByOmyDUbCHU7cq2NwHFOnUcZaDko1I2ZzISbRb3aykDdz716Cj7SFlqzzJKVCemx1Gh3yzoHRhzXO4OGh2Yeupmy04ZQwH1rGUbHXHcazYibPpwaSauVNc0TD06X/ibtj+9XbFLkPOov97Y1b52MhwecVzSZ3VPgKagqwLnr0p2ujCm0idGyPf3qHHqaxlzOw2clcn1qk7IU5WKF07scAc0ouxjKHUjQtncepqucUWouwpBbIY5zSc10KskV7uGORTHJGGB4wRTXvEyWhFZW1vbfJFEqD2FdEpSitDNcu6R63+zD8K/ih8W/HCeG/hXoS3NwR+/u7lv3cI/vH3rqy+jXq1XyvQ83HYuNDU/c/wDYI+Fep/CH4K2vh3X0RL9YQLpoVABbHJ4r6ulalT5bHzvL7WTlY9Xv73ZdYBBYdGPG2nfqdDp2gZGl69qfjLUprmUQ6do2nzbPt08w33LjqEXso9e9KNRSfZImKcZpJXbNi1vdL1yOW70q8iuVQ7ftIPCgccVDlGb0OmcZUtGjH1HXgNRNvYoW8tDuaRePrVboU9YnE+MNagttFcO7zLNPi6HmbduTwKym0tBRi5S1PIvi5qGopqlrpnhuwmtr+7j8i3eBt6ond3PasKi/eqMdLnTQVPku3sc94YsLHwLpVxo+q61ctcRSb2mLcuzHkE+hPeuuEY0Y2M6svaVLs57xp4T8P+M5x4ouftunxQH/AEv7FeMpDdiwB6Up8k2n2FGSiuVK5S0rwx4kis0bxN4ivri0EgXToYWVkWPPDNnkn8a9Cg5ez1ZyV5xeiVjkPjV4P0/VruWztGT7QF3Wt5FgPuA6EA8VVoXFF8sV2Plf42ahc634curLUkb7faEh5H43D1rJ1uUznPlZ8u+IZGTSbuMnHXIHevKxla9GRpRUp1U72POps7TuFfMQeqPo6SkmVy+EI6GtKulhVGVg5CkCsWXR2M2+Yhjx9atK6ObEaJnUQSxxKCPwpzu3Y6qzSqMdK3n5wvBHes4plwSeoyHCtj0PArdtqIRSchbxh5ZJH0rFSlLQzqxc3oQabGd+NuATxVNtJCg/ZysXp4TxJtpxkhV4pao0dGn3oEyOBxU1NUb4eXNuXwPmLGs4KxpJqLJkkCLwMk0pQW4lzyPb/hBYxppulWHw++PnhbQpbxGfxHLbXcmk6hZRYPyy3VxGyzKSAoihViWZTwFJH0eD9jRppQqxXl1Z59Si4ylKWp+p3/BH3wPo5+JN34k0Lx1J4gsY7COGG6vJ2ubiTaPvvIQME56BRj1NexhvZSre7qvM+fxkoQotNWdj9Gb66ERMMFud2eSq/er0KktWkjxIwlJJtlLR57w6hLJfw7EY/usnlqKPNGXvCrKm6a5XqT3V5YfaxHKRvzwKVRwc9S6cKqp3Wxk+LmiWPz2t8oo4BX7x7CuOvNXOrDXUdWcnrNtrF3YG71K4eODPyQLwMD19BXJOU7e9sdMHBNuB5kupWviT4l6Wt6PM07SnaW7dBmLf0CnI+Y1nRqfv0+iNeWcqDvo2L8R7u41i81HxNaaqyQ28RS0MkHDNnjj0HtXVUxEKknJFUqUowUTw3xN408QXnj+7K6ZJcpHoLSTz8GKRxjBAHQjHSuWWJ56zjY6PZKNNd7nRfDL4n6j4z0TVNa8LaiZDbW6lJY4iFSZFAdACODnjPT+dbU5uqtERUjCklFnPfETXvE/i3Vry30C4u5Ly705DPa3g/wBUSPvJnj8Kia97lW5m+VpXPLdG8P8AiTwnLPb65qB+3AtNHNbWgjK46nK4GeenerpS5Gdc5csVY8h+JWs6nq/iNdbl1cNDuKvdKNvmAnrkdD7VnUnFy5i6aco6mD4r8R3PhC1OpWCzuFtyYrhI+GGehA4IrOUpO9janyzZ4t4q1vUfEV4urXUMeZskiNsEZ56fw/Sijbm1OqmruyKujSuyzyoknyWzksOv1/Wuyrb2djrinsjgvEvia10bULi3uLPZNsG6OVN0cox1ZeleLibU4u3U640mldnmHiLU7HUJvtNlpMNpk4byM7WP0PSvEcYS1SM1JyZkxSyGQAH8PWlJRihxjHm1JvLccseM/lXM3d3N1a1kTICNwHpSbbMKiaZNasWkAPQMK1hG61IUU9TqtNQvbICB071lKKuddP4TStQEAJH51EttDWLdyW4jEqYxn+lZpu43vqVURYEwPwIrZQM5aMq3GZJi351LQQd1qOGGix3HXilKALSREgVOPXvRy3HPUu2l5PCd1tMyNjGVYjj6imrpgnKx33wHsNT8d/EXSvB2p6xBaaXLcB9Vvrp1jjtbZfmkkLnHOMgepIr18olHFZlSo1ZWhfVvsd+EblNRm9D9Ffg/45+C/wAY9b1nxGvji10v4cfDrSyNT1K6R40mhVMLBG+COcbjnBbtnNfTcf8AjHS4ejSynIZwjiXbljJSbmrpNRUU1pu+Zx02u9D1cVnn1LCw+rpuTbXTTz7/AHXPK/hn+0/pX7T97Pc+FvDFtp1pdXD23hTS7f7WXCrIYohO1wFUO+BIAmVAYAkHIH7FwLxNmeKyz22bpJpO7V0k/n23Ky3Ma9WjUq4lu0Xfmdldbt6dOmup5R+1JefGg3kPgLRvhFqd3HNNHHf3MkflxzR7vm2S9DnGODX2scwli4Kng5Kdt9Vt30NcVjXiElhmtTlv2iPCGu2Xg3/hALmzi0u4udPWS/s7SIlYS/ypDuJ5IUc168XKdPk1vbXTT79v680ehHBN0E5u+hlL4itvA/w7vfDeleFraA6dDbpbzpDliwyQQPXP867Lqckk7JDUo0YKLbsux5T4a1fxx8QnuZbv7JcxwOTfaqsryGOaZsmIDGNwUgE9s+1ZYZqcnGOyOWnOeKm56pIg1LWJITrWieFdKKXN/wCXZx3KXeCIxgM3+zwDxXVztSko7nXUkqSQ/wAdjT9G0D+yraG7tmsLdmNys3mPcQIMlemRkg5+vanXnJYX3rrl103f6/5nLVlUmnZ6HoWr/wDBN34k/tgfsL+Fvjf8JLDT77xDaeKZ7G3N9eJEyFYhILdkGMR7Ukk8yTgEkZweP5+8QOO8JkXFUaVaLtGKu0tdXoceeYnA43BrCu8a9OPNF2dmr669+lj5M8K6B4u1u2Pg2xuIjb2BNl4kZV3kMsm1o1KE7wCCMqSMc5xX6dldeedYSlOi/caTd9Dy8v58RhoSv0szqvijqNl8L9Mn0ixmWSay05oYLZceU0jFdrf7RGCM+5r6DFt0Ka5HbRq3R7f0vVnr15unhG4rXoeGxXfji2v5tRudQ8y/SQC4jkceXNEx+59B618wsNjVU9rKWvY+Nhh8x9u6nNeSfyseg6L4k1CeyuLLxBaSWVyji5aXO4jYMgKe6kfKR6GvbpTcoNSunufSU6tR3jUWpk6D4U8Sahrs/iG01dF2nDW0ZAXy2ByQCfu4PIrgqU61WrdPQ4nCpKq5X07Gv400QeEtGt/EE0kT3Cf6swhZI5kByCSOhFc+MpxoR5pBUk6cXNbHCeJ9etNduv7esLCCK5lfbcC3H7mRcdfY1484wrPmhuzyZ8tR81MqxOJZ5fIT94p3NkbdhzgkY46VDgoRsZ8rnKx6l8CLq6sNejuLa3iYGPO2a7ESSHByCx6E4r5jiWpSjl0+l0dD5cPTc2erw3NrrHhnT/GGnXkEttqDzRPHHIWa1uIiPMhfIHIDIQRwysD6gfgWIhOjUtLqThcXHExbRn6krsnHcdawhJOaN6kOdFfSGkichuhPOTTqTeyM6LUNGaN3B58ZbuB19awuzduNRGc4aFtp6ClFO5ztOEtSrqmmxanBtZRvA+U11Uq0oMmap1o2MjTLi60a68ifpnvXS7SV9zhcJUJXOostQjnQMhzkc4rCSaPQoVVOJalYfZ2IPY1yy0kbvVGDpBJ1Zs4xurtg24WR58eWFY2bqQBySKya1O6o7wKMkyl9oPGetUn2OON2yxb7iMv+dTJ9DrhFRQy6kIxjvUPY55v3io7DPvUpGqXukZYk8/yrayOSWjFOSnPFZyTT0NoakU2QMqORWtNJvUKmiI4UMhOO/qK0lKysZQSZ6/8Aslw/Ey++Ken+H/APjVdFiluUa/u5rnyo1jBBOfU8VrgpVpV1GDsjhx8KHJqrs/e/9ni6kufhlaf8TD7SBCF+0r0kwPvZr7KnHlgm9zwW41IOO35mt4vg8SfZGk0BIPNHLJP/ABqOorCs6jXuHXSVOXuyM/wFpl34t0n+2fGehxaZbxuRFpUCAhyM/M575rKCqVI3mrIus4YVqNPqdFYtbW2m3ItLBLazT5YYIEABJ71tTXLHTY5ZO8tdWc/rN1fXMjw6ZCFt4o8Ts4ABP1p+/wDI0b5Vc8/8apZ3GsAeT55MXy2pYBQ+OCfWjkUpXZUZy5bI8w8T3Guf2jHqdlPJbXEB2XVzKMxOuQCsY69+tYSvCpc0iqdONmjnviPqGtX+rDTGmtYI7vZEbwphgM9/Srk23qYJqb0Itbt7vSXmmsZPNi8oW95ayuNrn+/mtIJ82goqyszHXwzL4WhKzG6EM5Dxhbvcid+euB2r0YRcI2OWq+Z3OM+JunWWqQyraedb6kmJEHmZDY9CO1Q4t6o5/azsfJ3xk1Swu3vjdS+RfICsqMpAb161M4xBuSV2fMXiyVTY3WAAdxH0rysbD9yzooS/eRZ55OWXI7/SvnqcYn0VOTK8mdmcdaKr1sKbuysMBTmsrGtLYzbw/vtprSOxyV3udIoJjXHBwOabV6h3VYJzZbtIyy/MMAd6bikTBdiVbZfMDAc1lUegKVpWG39qGQcD24rKD1NlG7IrW3IOQMCuhpI5pR94utA7REAZHrWF7SN3BTgN0sPDNjHfvWjvYypv2c7GvIBw4OOPWlHQ6p2tcdDJk+3es5PUqLdrm94DsfAcvi6x1DxrBqsqW82YLbS9PjuWmc8bSJMhc+uD644rfBSw9KupVP0/U83G+0qU2k7H72/8EavDT6F8JbnVo/Ar+HLSVVaHT5pmklZccSSlud5B+nJr7rBVqU6CUdz4zHqrZuT0Z9gx6rdJI6XrL+9P7tEHA+vpXRzSW7B0YezXL0JbzU7GzVW1C7RAnzEk8KPrTdWCkrs4vZyk3yIradc6Nr1z/auj3azgHaXQ5UGnJ05vmhqdC9tQhyVFYq+KtdsbFTcX7RsYh8gI4X6+tcVapG+p0Yem2tDjbuy1H4hsFivlsdPjO6Yxna8nqaxjBYjVvY1p0qeFVoxtdt6d3q38zEv9Gkjg/wCEb8KCFNsh3SJACW54Lfp9ayjFW5b669PP/L79zapPklzHl/xJ+0eF52D2892sEbBoZZtqzykcn0AHT8K56vuaJm0KrqRstDzj4fXHhbTtE1fV9bvZxeyp/pUKpmKzOfuBv4htx6dTU4ZRjFyudco3SuVP2efFes6rpusf8IJJbyWs2rXBaZYGjBhDYyqMPmJ9q6KcZauDv5+Ry4qnFT1ZBOPiKPjZb6pq/hO5s9Iu7MJFfKzI7Sq3WSNsYT6VjJ1XXTlsVRUI0W0P+PkOsxXsWvkWYe2mWN4rKNQsoPRiMDDY/Ouhxad4jUlJanzH+0HpscWmvrS6OqWnm+ZG9vGUVz3JHfntWcqGnM3odNCprynk+pa3qN1aiaGGQqIv3SwE7GB6gr0B96h3tY6bOUrHAeKYktmkubvT3EgciXzPlYc9Djr9fetKUdTthHl0M5bm8h8Ja7qNtLEj/Zkij3HG7c3Y9jgVeKdqOh00klUWh57rlu2s6MLzXFvTPFGFF3GuYtv91mHp6+lePVcqlP3tDZwqTna+hweraTYW8Ujxa/ZysrYWGJ2JI/EV5Cik9GbTpQpx3MyJQp57GiVzkbvqiZWUndnr1rJtG1F6k0eChz1zmoSuwrJbjrQfvymc/MDXTF+6Yw952Ou0sAWq/T8q5qj1O2KtGxeWUBfm9awWrHTauONyiKdzY/GtNAqSsVXnVshTz603LQiK52VwzF/m71ncpQUWSA7VyQc4/Om5FSkuhCZCx+QU1sZqLLVmis4zwD6Csm3GWho5RtY9M+BnwO8cfHj4naB8G/AulNLq2vXixQxPkKidWmkA5CKuWP0rix+YrKcM8RKPNLaKWrb7Cq1YUIOpU0UVdn394+8E/CD4Z6Jafsc/BTTotSsvDfzeLNRkhMh1bUgAXY4yNqHIAIIHTtk/SeAPA888zPEca51F1MTPmhRg0nGEFu46aa9fnc9nJsJKrhPreL3l8K7Lp82cFqOl2U/jbTtH0aL+z3tiJZZLW0DAlTkrjHGRxx69q/rnERoVYqi3aOl7W+757HuRg1C7Scdj5t8d/E/9pL4C/FDx3Z/D3Vk1nw0upW98PC2tRB7e1eYkGa1kJzDIoBPHr718N7DMOF8/nPCK+Hla/wA2ePUwNeliXXjOyXTui54m8Qa142tdNuLu/k8y5VZnmbBYouWcknOOOMnk9q/Xk1UivZO19T31iadCim9b/qcf8TprHULW4ttOu5rWSQSMGnmJM7IpwQFHA6AD19O01JNxt1CXs5xVtNDzXR9F1b4c+HbceH7n7Mbq1lnvI1mDecc5bcQfkDd2POBgUUqbpRfQ5+dRjy09Sj8Itf0TxFDqGt63o09lNb3/AO8gmLIXPZh0JT/JrpoVYSvK1mZUZTqtzkmmtNSl4p8b2mr6ZqV9BOqv9k8q2jfCLGi7tzYJ43ZP1wKxknOrOXM1dLRuyVru+vV/jZCxFVSh7NJXPo342+N/Gf7L3/BFnwxpWg6YR4j8b29xeXF810UNva6jK0EZCbSN7QxygHPAc888/wAq5pQnxj4q4mClejSSTS2bXd+p4GMrYqWGq1ot2ilFer8z4J+B+o2nh+yktV1iS1jNu0d5Mznagx8xDDnJ5AOK/ofJlSwlFQjoloTlEYLDKMG3b8zc1jVx4zSfUkthPHNOu043GNI+4BOQx7Dv1r1q8va1L3PSlUdV+z6GV4/iXRCrnToR9ssYjcSWwDL5pbg+xwOQaxxSaSt1MsXONJLlXkdNfappWt+G5LNrm1yFjhuHkAEmHQYYewYf+PVpB04QbkxNyqRs1v1OLsLfVPDoksr8FHtXMkkYlPzKc5dGOMcYOOnoK46lWyslYxVF076nK+IL/Tb/AF2bSoPFkyQyyZsWlf8AdD0PXj3HvXzOOrQjUcXLc8XF16Mq0qCqtX27FfS9OutLnbSNVCqH4S4Qgxyrycg9MA1lhYVIq0gw+GqYeny1N+5PF5kdxFJATtKbWcHBc+lPEWSBWjNHo3hfwHrfxF0SLwjoWmxXcl64WOOW6SBUHJLmSRlVQByckYxXx+fzp0sulOeljPMKMsRhHGKPan8CeB/hz4I0jwp4d8SDUdWS5muNeFkimwgdkiVI4Zc5mYbW3uPkJxtLDk/iOaYiliaicGVgsNWw8ORtfIzbt1MR9q8uPxHa7op2jDzSG79DXTL4UjFq8tDRjmYDa4FYtK5a9zUgvrfeu5eBioehUkpxM7e8T7T0z1q4tW2OSzhIg1KxW9h3oPnA4NbU60oPQqpGNeFihpt/cWE3lOeAe9U29zlo81KpZnQxXqS2pZWxleRWTi5M7pVexlaPJu1YqP71dlOKjE5ItzqG1e5DE81yzlqd1RWjYz7eLzJmz0qZPsYwSiXkZY0wQaEuppGV2U7lyxwT9PahS1Odr3xkYBOWGPrTavsavSIyQc+npVJGE73IwXyQVOKt2Kg02OkUNHkjipUrMqsnyEVs+3lTn0rSS7nPFSkz2X9kf/hnzTvH1vr3x48T30MccoFlp1ip/evngufTOK2wssNGpeozPEUYuN29j92f2aLjTZvg/puoaTGwtJola2Ruuzt+lfWqKdJK2h4DqRqS0O1mvYXmjVm3t3wv3RT5dS7OKHXcixRCDzCI8ZYAYJNaXsiObmZVtpIWgnlm3fMP3UAbv2OO1S5LlG03K/Q5jXdJ8Q6dbXKx60hadNzIwBES+gHc1yyu9Ewm4ykjz3xobTw5Ml5MGnmNqRBGTg7yfvNgVpCXK+UI1JP3Ujh/HV3Pda3Z3N5aFV02386JnuAsMx4yNg5OKKsIqd5ChBuMn3Od+JeuW15qP20WkOI5IXk2jO8kjAHv7U1acrE0m4opeIdTt/FsUlnc6PIBCuWeMFADx971rqXLB2E4u1yhrJsdRktLPTgrslvz/pGPwI71q62trGXs/duePfHZkjiln0OWaK5hiBDwzEqCDyCP4abqXWgKKjufMPxB1rTPHmn3sGpQtFqkf8TjG/HWsJOetzOdm7Hzf4sEsIubabIKsRya8rGyfsWjTDRj7ZHCXfzKQp/GvBop6XPok4pFdgwjIPpWlVozdmtCumCp9Kxd0zSm/dM2/Ubi2eh61Sdjlrx0Z0sIzGM9MdaJfxDtrNuq7Fu3Y8A1fQdNMsM2zD7eg6VDSkTL4xmXnOO30qVTUTpjqiSOIqucc5qpMwmrMsRNuBX8qwcWmdEErDHiaKUSqK0Wxz1U1O5oWbfaLbk8445oudEGpR1BR5HLfjWUk27mbqK9j2n4IfBPxlpuoad448f6fa6J4euQktvqOqeIZLPepOFdYbeQTTg9AoGDnkivXwOBxVGpGpOyi+pyVqtOpTfK9UfuX/wS4i8NaR8HZE8I+FrrS9PMuQl3btEZzjmUK7O6qe25ia+rowjGCcdT5jHtyhyt3Z9E+HL861qlzJbKPKifEkjLxx2HrVKTbskccpxjStIta94a1LxHALSEpbWxf940nzM656Adq2VKVTZWRlTr0aLblqzSh0+w0LSBZ2m2OOKPqi1pOmoUrJnJ9YniK92crpfhbU/F1y2oahG0VkkmUW5HMv4dhXnUcJVru728z16mIpYeHLu/IoeNvh34m1RbhNJ8VfY/NXaDa26hYkHb+dW8JJP4rehtTxlJU0lHXzONsPhx4ttfCVzFpfjloIlbaJsqZJpPcgcD+dZRpRjTfLIU6kJVPeieN+MvA+ua/wCOFgXWb/Xri0tiZ4ZLgRRQ4HLYUda8mpTftN7ndTqRjG7VjmdH8UWnw+sPEHgYfDy71mK9T7TZ3M8uWZyQTGXz0z+YqsPVlRco2uGJcqvK1pY6X4R+MfBunX0FhrVgmlatZwug0vygjxgrnepICnOfXtXpYatGq7NWsclSE2rp3OI8NfFLUPjB8V/EvgfT/Flpq0OnQIPJWVGnjl5+Rg3C4x2NS61KeIlFdDtdD2VJTkjhfHHhn4oweIJtJ1jxxd/Z4T8tnb20cjRjPIcNncPSoaknowbpzVoo8u/aH07xdomhXWm6hc2E9ltE1rDc2TRPn1XHANW3NQaOhRhFaLU+ej4jn1WwWO1sWtiqkeUW278dRnFcSbZvSXVnLeObzdI5RJFjLDa7Nk8/wmtacrPU6ott3RleIHa3+HU5OG36lFkheMAHrV4r3qWh3QahY4rXtAnksZ4rHSrwRzRhleynOxj/ALQPSvMqQkqdkPnbd7Hn2r+FdV0ohrzTZIlxw8mM/nXg1VUhO7ISd9UZnktuBL+wJqXNtFNKKHohPy5+lZu4qbSkTRAiMg/nVRdjWorq4lgxF4YyeNw5rpXwmFNpSO0sBts0B9K5JnbzaFmNGZgT+lZLQVN6iXlsdnXkUKSuXNXRVSMxjJ5rRpMzhLlE3gN92jlRTlzDnXcuDWcrJktWdxqKqe9UtUO7ktC3p9wlldw3jWkc6xSqzQTFtkgBztbaQcH2OaiajZq5pTgk02rn1B+xr+2tdfBT463vi3wL8P8Awzp+o+KNEOjW95LA0UOhlv8AltG7yOxHdix5x6V89jMkxGKqUZYeu4ygpJ82t+ZNN+ttmenUo4XN5RoVo2jdOye9ujPbvgn4I8R+HNLvtV8c380l3A8st1cFDsvix3CZCfmdHzkHvmv6m8L6mGp8K0qdHSNL3Xp1W+m/+Z9RCtTxFNKla23pYi0q71iPxxd+N11dTM0DxWwSDi3yCO4wG54xnH4V+pww9CcLTiVVowpUoxeqer377P8Apq33Hz98d/h9rnizXdQsNJ1G7lFzbQ2lxuG7e4fMkhx/dUd+5rhzDDU8bONKN+ifye5y4lKc9Fa5yfjqaXwlZi8e9eO0tikE08zlfNROigd8kjpXu160cNQvJ2Ud76BKUaVC03ojz6H4g+EPiJPqOtxaytpNC3lx2cRUyQRE8AKxzuYn3ODmsMHj6eMourCSfzOOhjadSnGEXd9TjNSn09ftWm6XYXsdvp9wHvIBd+YLonlImOOOevWvQhXjVdr6K1/M3VRU5czGfFLxtqupxXOnaiscJS1jCJbAYtWVc+SMY5Pf3PtWsp3v2LnX9pTUjzTVIr298fQ6PduY9M1GyUMwkzypzycYzya8bEyn9aqTb932cn80rnkQp1JZmnPWLPp7/gspqOneKfiN4b+GvhzS9Q0rw54V8G6VpelR3d2wN1HbKyeb5HCoN7MUkGd6sSDjFfgHgtl6zCjj8xqy/eTqyb8tTz1g6uOy2UajteTZ8b6V4J1PSri90os0jKqsy5x5it91QO/rX71TwH1aLXMaYLBVMInBMm8N+INN0mTUZ7i3KXUdzmwQNvMbBsKfxGRmnTrppq+prhq8Y1ZJ79i4gujrGoahdSr5ZGZbC4cPlcfeH97HqORXRKuorU3lTc53voYXirXJ9Ov553he4sJowIzGO6jgn2B5ryMbNwk3J3izgx+Lng/etp5GP4v8TeJvF1rbSWVwhggRVmz1wPrzjHavNrYutUivZ7HiY7E4zGUovD7dSr4h8Mqmhw3k3kXMAXdCI2JYnurY5HrXHicPzwu1c2rZanh41Ki5rak/hlZZbY6Y8ksKTIDGlwMqPc56fWu7CpRpqJthm6sOVFq5iXTZEsb8ESH7lzE4YN9Mf0rCtFKXvbETouFRXZ7N8DrO01Tw7JJdWiERkFc9Q3rzX5rx/VjLLYwjpqejyr6ud1zGojUbeOBX4+4cr1OPm5ZWILtx3PWs5pXNHqiGyTMgPHJ70SbsiFZMvv8A6vA7VDZpUS5bojSbzFKNUXZlB2ZRvYOpH4U0n1KqU7oqwylG2Mf1qtHscivGRDqNgl0hliGGropms4Rmroq2V/Nbh4ZTjjjNbSXU55e5uO8OSiTVS+7+OtU/3bOfDybr2Ojum3Ftv/668+Wkj1JoqwLtkJxjNNK7Ja90nfJOAOMUS0QkrIpTKxfg8dzUoLJaiYAGB+taLRGTldkMr7ODn3zS5riauxkLmQn6U76FRikyZ8CHkdulZ3HUehBAoJ24xWnvNXOVSfQ9n/Zb/Z3uPij4o0/xRqWu6Xb6ZFfpE8T3am5d8ghVjzu59a9fK8DCrNVKr0OTHzqxpNRW5+8/wp0hPCHww0fw7bxlBBZooXv0r6WrUUnaOx4lCi4x13No3lvaxlZtq7cs7E5qZTUUbTTtYxrK91zxxrHk2CiHT4QQ94f4j6D8qwbqSafRiVOMfee5sDRdL0+0mslaSS4lOPNkc5Kj0qlGNipVLtHMX6WGmM7QQSyEphpLiQnyz2qVBX0RLlzLU87vNS0/xF47fUNRi3Q21uUlLH5Semc0U03UckLl9xLzPMPi5p3hy6ube6DyRXCXAK3QlJhCKfugdxRJJyvJ6G8qnsoOJw41z7bq896LyFo3v1FkbhCELDGTz0FZwn+9ck9DKEPdWhq+L7XXL6J3tbxoJBzE0K/upsfwj3rvUrszcoxVjmrqwSXSr7xLcR3S3saBZYdhV4j6+9azpqcTOUmny2PNPijqP/CReHbnxDpM/wBnvoEAfdjbKOnNOmo3M25LQ+RfH3iCJ5LmW5hUXAJ3+X/C3rRUXM7GUtzxXxfOZbWe6dssT3714+MtGkzqw0OeukjgpJWMmST+NeSlHkVj3XpoNmfdGa55xdxTi4rUqCQgEUNWWpdJrlM27kJRiTxmlLQwxLtF2OpgYeUoB6ino56ndOyqst2xPfjnvTk1FFxSJZZlAIY/hWakjln8Q61w2OgpTmddJLlLRxjJHXvUc6IqrXQWIAdsZ70pTQ6LHuN0fTpSUtC6seZC2Nw0E23OOKuKuYw0Vi3OGZgeOabcUjRU0tTsvgZ4W0PX/H+mzeJ7fWraKK7QxarpemPdguDxEVzgZOBkAkGunDV37Rb2ucGLdKNOSWj7n9Bn/BNq0lsv2erV7nSbzSzPIxFnqTSNcKCeN5k+bJ64PTOBX2FCopU03pc+UrKd7vU+jLOaO2aPS9LhVELZYbep712JbRRh7JOLqTLmvanJZwBIVO88ACqr1JRSijloUPazcnsQWLvBYG81N97Yzs9KlOFOHNN3NJxh7XlponD3GoafvcNbIeenOK39o6lK+yMuXkra6s5XxpcNBoM95cav9isAhU7SGZz7d8mvOqqpJaOyPVozhz8qV5Hm0ngz4g23g24vNOvxp0U7F7eC4+aVR/eOe5rBYZ+y1dkzaVRPEJSRh/C6+svh1ompav4ruRcandRStLc3KgAjIGSR2rCCo0YWkVjKrrSSW1zzzwZ8QPBHjz4meKWV5DZWcKLAbi3MUVyO/lM4AkxyMjNZYRwq1W9kDjW5I8pl+FNG1Xxd471691HSLPUNPb5LeCVzHNChHBJb72Pbiu2lC05KxrVlTpU0upb8dfDfwb4OEjf8IjaWV/c2ZkW80S3KSFgM5Yr/ADq6mHo3vZXI9tXqJK55dHqGtfE/R/tlncQCOzR4pZjcIkznJ+RmzncMd6wj+Bbi6OvU8d+JNvrdrp7WLy3V4GJESXTLKsvBymV6Hr1q3eKsdUG5/EeA6e1lY6y9vqFv9ljMjFbW8DJg9wDggA1585RhLQ7Wmo6HD+NpbOS9nWzlJUS/KhlztA7H/GqpyvudVLmsUdfszc/DK9mKlTDewszBc461tVkvZHXSi2zzLxXJqzqNU0WeOW22bZmtZiCD/tL2rysUqk6d47Ft8jOVvr25uF/fSMzDu5NeCubZjjapqzOYMzA5/SttEiXq7CtvQbsc9cVNkyUlzWJojlD+orNw1NKr0IbJyNQO71FdUV7pzq3MdtYMTZpz2rkraM7FpEuQsSw+lYbjhZC3YbZub04xTUWaSqaaFASjJ6+/FaONjGzkxwBb5ie9RKaWxrFKIkqysMKR7VmndkzaYkUbA5brV2layIi5dCdAx4VuKnks9TaLla7NHw5qVxoet2erW9xLA1vcI/nQY8xBnkrnjOM9a0oaVL9jKdSSlofoh8C/jDrvx18I63rl3a6kZoIIYrS81jUmu57qKNNqO7HAQADiNQAoGO1f0D4YUIUMmqKmrJzb+8+jyirGnh0oRSSfTuZ3i26tvC+jQR2zYkjRpZmkf/WuT0A7DtX6tSjUkm5S9D2XOdV3OL8TLbroGp6/rlwtvcXR+WOMbBtPJC06lGnKDirq6tdO33Nar1M6lSSnzI+YfjL8TNZuvB3irxH8NdJvLnxv4bvtHk8ESNFDJp8T/asTmZJARK2NgUEYGST2r8w8T81xVGphMvjf2da6k09dNl8z4Ti/EZi1To4Vaybv6HzP4b+Gmo+OPG3irxb8Q9Qli1RI5LnWZbaPyUFyRubZGmMYPQAdTxX0vC2TUcPl0ad2klrudXD+XOTUZ35ra69RLv4MfEHwvNep4c8ZSQpbW0NzcLcPuLPu4jAPJbByf/rV9T/Z08M3yVH6M9yeBxSi/ZVPvKWi6h49vNXvn17wjL9jgi824urfnzXX+I5/z2rfDvGKcpVIad0Y0J5h7Zwrx91bM7z4Tt4U8UX2h6PcIt7dnVNPSYqQrMWuAjJjqMk9q5M1xNKGRYiSlqoS8uh3yrUPYzcXay/E+lf+Cwn2bxZ+1t46m/sSW1g0J7PRbRZZQwiCwhxGMADC5LA47mvxr6PuGVPg6tVa+Obd+t7nLlapPJovd9/M+Hri5WLUUu7+8dz5p+zzxjGwxk7VI7/Wv2ypiac7dv8AIyniI0+juc14J1u61bxLqeszaRCgeUoytFnykHGVHP6V4mBrRnXlK3U+WyrEVMZiak5q2poeO7zTtDuUsRCsFw1kWgSJshuMhsj19K6MbiqVO6j8Vj3Mbi6WDai9ZPZHKS3fijxbdwzXkywW0YUtBFjanbdjrznmvFg8RjK16m3Y8OnHG5hW5qrtHsXJ9JvPCojsrqOIi4YNDI7bgGOeVx7dq7Xh40ZJdzp9l9RrKHRlq+jgs1j1SaPytqorKoIWQd2APGM5rSUYxTkz0cTPko83Qoarq9kmqf6HcBZ5IQ/kwygIvBJzjjB9K4J4mEZ8qZ4lLHxp1nCMtWuhW8g317DcSSt5mS+xSCiH6fT1xXBiazm7I6KktFJvU+j/AIC2TDwZKXUIpwCvQivznjlKOFhfudVKpKULGtqB1bTbkSWASaHPzwsACfxr8wSpVL3djlrQrxnzR2HyEzjzAuMjoTyPauNu0rHZBxcRLQbDg9+lN7EzVi15wIIYYGKiS1LtzUyFSVc4OOeDTtoYp2Yk3zcDFQ2buWhn3cJU7gSCO9CTbOSokRRzsTjJzW0W4qyHBNFXVrKRojPF1A5xWiquTSZFZRaM/wAKTlNRPmHkPjFdUqcnC62PNoyaraHWzzAuQp4rkaitz1veluLDEWw7Gp54o0ukPk+UbV6etRJ3MnLsV5FJGSKUdx83ukQwCR09aubdjBaMikQSnGPrWabRal0ESNY+nStUu5Ll7wkjkjBPShRdyZST0I41BlygHNbN8kQglE+oP+CVPwZX4q/tYaNdzWDyxaOTcyOM7FI4GfWurK+edR9jlx1dKNj9wY45Yo1iVACBt3HoBX1CVlqeQ5dihdw6bqF+lpNdt9mhOZFReHPoTWEk5ysCbauar6xZWdu1pY26xQKAAgGAPrWySskiKl3a5h+IteS4mFrb3hQ7cmQIR+tXboQoW1ZyPir/AISB7X7VNdyxQs6osIIJYZ61lVUoaJm8I80dEc74vuNC0+0lt4byeBhCTIoXLOe+KOeMFYVvZq9tTxv4rJceI7zSvCWkTyxQEGUxuoLyrjJz6VjUfPNRME/ecjO1qPRLPTrbTNVMTaXNHtdZECskpIA5rqjGKjaxvBcsH3Mvxe954avovD8upRPZ5Q2+6f7vfGTWqXI7HA3GUuZIzPEOqPpVo95dSOLeVSsiLKCw/wARWzlymqT3Z4d4rsNQ8PW2o69oupvLbuzFo2OQN3ZgegNSnbUmpUjI+UfiTcZ1a4u4Y9kcjHeg9aicm2YPXQ8r8ZTm3tHOMruyPSvOxsH7Bs9DARtUOY1i70aXT4o7KItct80so4Vf9nFefGMfZ3Z6dWf71GTLI+zBP41jJI0mnKFyq8hUHFZz1RNHVWM6didwI/Ss5IwxDsrHVWh3IueOKzm7VDuqX9qy/bjpg8VNSbaHCTTsOeHc+N1ZxkXJKRat18tcEfnVON9TOLadkT7FZSAf0rLVM6GrrUIAAQPzpuLZjZxkXFjV0wcVCumbqSK89r5Mm9K6YNtGTdndF+xR7rZBBC0ssjBUiRSzMT2AHU1LpynKyVw5pNH0j+yX+zl8XLXxzaz+N/BGs6HAu27sY9T8WnRYLpgQUEkJHmSqf9gA+/Ne7l+XYiHvTWnm7Hm4icKsWk1c/cf9jC51y6+FVnda7FZrLgCQ2KERLgfdQk5YDpubk45r6CDlGKX3nhV/ZuLSZ7T4buYJL6WYfO68Fh0rohJrU5MRH9xZMt6jd4k82SNRg9D1ArN1HKV2YUqSUeVMdp8raiiyCHZGp6sOtdEIe0V2tDmrr2Umr3ZR13WBfMNJt3wjNgmPkn/CsKmIVaXs47HVhcPKn+8ktTj/ABpo9xqF1H9uvY44LTDKX+ZYx6/7Te1Y1tGuyPTpOnGle2rOE8W3mu+PfFUHg3wXJNFaPMovLmYgPIOuBnucH2Argkq2IqWhsW5xpU3N6s88/aX0q71rxDH8P9C01/Ke2WC4dZSCqGRQxI9+eK5sTzuo6a1SDDRtF1JrqRfEfwO2q6/DpelaWtzHo8lukMEcYVV+XLY29e5rrjTcpqy0R0UFy07LqQeMfiF4W+Hk0mteMWhgivNLCLbxxsrhzjG0jqT2rplWoQfvPyOWnS9tPl7Hn/jT4nS/GCeR/DOna7bwWFr5c9hcXAt5FQjqhOCQRXHVqc8nZPUitRdGXvLfqfL/AMNPAmh/Dn4l654L0831ql9dG9VZb+S4jG5slmO75Wz2rHD0VCbTuelDmqUlN9De+Onhy00jw2/jHwdq01rd2rFb8ycqT67T1z6gV6M4OULxY4z5nZI8U0nxHJ4nlZr6VZfNjYbjCGDt9SPlPpXnSalubwi9jy/xpdefqUkrzhtp2iUxBWI9Gx3FXSg0jvpqwrR/bvAGp6eF3o7xZ3cAjJ5PpXROlHkep2UubmPIPGNtq2lS/wBnTebGiE7A4QnHbDjlh9a8XFVHShyxYVVrscxP8w3n8TXip+8FNt6MrjIyffmrexE9wbBUnH4elZOVmJbjYZFUnLUOTNp3cSO1k3X+Fxk10QnaOpzKKU9TtrBiLNcjnFclSXM7nfpylqKYLgg81EI31Jih80gkTDHjqKptI1ULIqvGAxrO9zOUrux2v7OPwQ8RftK/HDw58C/CV/bWt/4ivhbxXV2SIoRglnbHOABWVecaFLnl3S7avRHLia31ei5tXsZvxc+G/iD4MfEzXfhV4uhEep+H9VmsbxR0Z42K7h6qcZB9CKqhONSF0bQkqkFJbNXOegUvyR16c1rKXY2iuVXLVvEGwFGSahJyYpTdzSs9KeZ0Vc5J6YrRRey3M3vc+8P2QPD0mmfs8xC3zbGS9/0uXy8F164OfUAgV/RHA+ExdHI4KHuttXdr6X1XzWnlufVZW1GjZrVlvxJ8M7rXbtvib8Tta07wz4M0xsW99rF0bdbmQHgRqAWmI44UHrX0eccW5RkK5MRU959Op3TzTAYX925XkeL/ALVmuaZq+naprtrf+JNM0HTLtbNLu88Mmxe+u2UGOztYJW8yV3HJcqFUHJOSAebCcZPMpww1ChP3rcrSeresbet1Y8jEZsuV0oU3zp2s/XXoeLabZS/Dz4bH4t/EzSVtQ1nLLp+ng5827JBjyB94J3PTdmvRxOVVc8VDEY9fwtYpdzOhhq2Km6lbS2x5r8DdO1YLqnirVoy82t3cjqZYOOQDuPHB9M96+zynDPCYVX3k7noZdgJUaTlJFrxxYnSNVk3yhpb9DJcnJYxxqTtGB0ySMmuuScqtjtrVUkrEuttp2mXzaS8C/Z7q0AncfK052Fjn+6gPf0NdtPkjD3npqaQqRjHm6nQ/sT+HfAl3+1j8PdY174a6brkkfiEXNpb3N3JawXVzGwdN7orFQpUHGGz6HOK/NfFPB1v9QMdicPG0+R2a691/W1zxcXhI4+lKF3FvrFXf3Fv9srXdB8a/FrxH4o8LeOL/AF/w94y1W61jT9Y1Wy8qS9YuYpQig4McbIYwcdu9fn3gZNx4XqYGvFQqUmlKCd7XV1f1NsHh54fAxpS6f1958zeI7ewg+228PleVC7ENOuNzAAlV+g4z6mv2Ks4yjJRT08vy7+qOWtytWbR57od1DbapqF3o05t5rX97AJWzngZ47814OHpS9rNR0aPm6FSn7apGjo0yW20rVPEk03irUXV7rzNx2gERqCBjGOnNbxwbnL2k9zoo4Kripe2r/EX9OtBpupnTIMQ3LBVSdk3pz7HhcgcfU100afIz1KMIQdupzHjyfxBomvRQ69GZbWGUbRGmNvpxXlY2tXhXi5L3T5zNljo42DqxvDyNLxU63PhuTV948mSLFtyMKe6+3ripxmJisO3Fnp4qUHgXZ9DkNBeO9nMsUCyTYyxJ+9gcjP8AWvDpzXLz9T57AQgn7S3vG/osDfacmIlmIKnnLc9BW0XzTuzs5pTkfTvwWVYfA8hXKjcAFA+7x0r854+nzUoLzPUppQomjqGHJyP0r8us2yed3KwLAMuM/Sh2iKMXcRAY+vUdqiTQ5O48OMdeD15rPmLjNKJE7kSY3fjRzXMdG7jt/Gf8iqULq45S1K93NEq/vGHtk0+R9DO6KDOPM+Tn1NappbgovqThmaMoy9RUNxvdF2gt2Yuk2LR627r0z+ddHtn7M4W7VrxR2EFleX8wt7KzeZ8fcjQk1zWlN6anROtGC942PBfgLxL441k+HvD+lSyXSj549hytbUMLVrT5YoyniacVe4urfDvxloniVvCWqeH7mO+DYEJiJJqsRQqUZ8jWptGpCUOa+hT8Q+DPEvhq+Ona5otxbSldyrJCQSKcsPVpL3kVzRnG6Zmz6VqaIZH024CAcsYjj+VT7Go43swtGxVA2/KDz9Olc7VmZtqIxuOnI681ukuXUlRcncjkjeTqMc9RS5ktinCKe4+CAhtoHTuayqNy0B6bn6y/8EO/hZ4d0D4Raj8T4ikt7fXBjMw52Afw19XleHhSwqkeBiG6tZn3ZDJNPKqXMmE6geteluZKLSsW5b2ztLfzRaAZ+6u3Ofes0lfQpeTKc1m1y7XNxlFHzFMYB9qttLclmTrWoedeRubXCiP93GE4yO5pKpzSBNKNmc54i1yK71OO4Aje4Vwqq8Ywo7kZqJxlJ6jTnay2OE+LQvILCW5tlW13YEboQXmckYUCs5xt1Fe+h5zLpDLrs2rXr7r+2tQJrgyYAJ/gA9aUIKVW7Woocqicp8QXvfFeoRaPceHzBCIRM0sT4yy8jg9K7JXggcjB8VaZrJie68QQtJN5A+xxkAgY6moSnNXsZuEYoral5Wr6E8V5C0UixqiqhG0ntz2rWC5tzGU7M8K8Yatf6JLqKgzuwZluI5FBDg+o7H3rVK7sZyTeqPl34iXLS6vO0I2qxPXt7VzVIuLuU1ZHlvjS6IslgPXdjBNeZj5NUbG+DcvanIXKPsJUV5UKslpc9eEeZkMqARYGcjsaJOTZpKpaNio6ksQPWiWiIptWK1xGOp69+KINLc5qzcrnSQSqsK4IqOW87s9Oo7Tdi9bzZT0x61MoRW5lJtO5IkmX5ajlikbQldaFmLkctmocrMTlystRmMDaxH1qGtQ55DhIkbZAHtTs+o0pNk0bu/KDHqcVL5YmkYdyYQmRfnPNRKdti3yxRa0NNes9Vhu/DUtzHfW7iW3ms2YSRMvO4Ecrj17U41ZJ3Rm5pLQ9N+ACeLrn4gJqWv6bq2u6tcBk066kvmuBbzlhiV13Zc/ewNw/TFdeFxE/brmlf1OXFP8Ac8z0P32/YB/4SFP2btKXWzcSXIgCzG4djIzDgltxJz+Nfdw9n7BKPU+LqSlKs10Pe9CWDS7YKIT5jkEnGCSannsrGVWNSfXQdqM/2u4W3sbYu7N85I7etTH36iSRpSi4U3KTLGtyx6dpDRtOIvkwcfyrpxc/Z0eVaHNhY+0xHO1exy1rZBbmDTkuzbtcHdJBndKU9T/dFcFGikktrnp1K7ndpXt9xhfEnUbaOYxWYwifLErnjPdj60q3KtCqCnypyep554Q13xXpuv3WraEII2aUl9QvlyUTB3CPjA47n1qYSlGN46WOiVKk9ZO9+hxVr4k0Tx38V7zxT9qMhiEUOx35kw26Rh07DGa4qc6dfEuTWptXg40FEg17xRZeMrHU7uzjuXjivViP2KNguN2Cdw6/LxW06sXdJGLvTSj1PMPiNq8nxF1qHR/DRuzZWEZghM8SvFLjkDPIDdRmsIv21T3XoKgnBXe7J/APh7RgI3122FvdQKwliu73Mg4/1f8AtIe3pXoxgnY6ZvV31R5Z8TvBniHQPizp/ibSfC0mnabPbMkr2cYdX543+2KyrXhUT6FRrQ9m4oz/ANofRtcs/hsl5rWoGCWXdLbOsZ/ejsGA5PHrWdeo+TQVGabdkfK+orr1gTqs+WilIJnsRuhI9GzyprjWmsjthJWt1OL8WzNNPIUCsC331bO89ifet6U1c76Cb3JtLlhj8KX8k2xV+Tdv6Lz39q2qNuGh1qfI7Hlvjr4feMLCeXV4dNlu9Of5kubaTzEQe4HSvm8bSqqTa1QSjKTucbI46dPqK8xJ3JvYh4J4X8a1knYS03AIQeP5day5bobXUelqrDp+VQm0zRakMFt5d+NqHrjNdMUuUxqqzOus3JtkXHGOM1jNWN6fw6lhOWyelZx0RcXYtWdlPqN3FY2xHmTSBEJOAM0oUqlWooR3ZUpSloj6H0T4G/s923w9ttK1Rry58QE7rzUI5f3an+6o9K+srZLgMJhleV52OlYSimm5X7k37MXhKP4BftSaR43W9WXTksbr7BentKYztXjvmvyPxAwWLxnD8sJRvec4pW9TCrBRnFrbUj/aW+Cvi340eGW/aK0+eS/8T2928Hi7TgC0k0Wf3V0vrhcKw9ga+gy/Czy3CxwsYaRS89ep2VYSxeFjVjG04qzS7dzyrwV8B/HHjbxBaeGtB0WWa5uJFUR7Dnk17mGy7EYypGFNXueXVquNN9z6n+HX7Angm+u9Q8LeNfBmtNftDHHpt3pTjfHcAfOrxtwwz7g19hDIMEuaNaDja2vnb7jtw2AVelGftEu9zovC/wCwP8J/D2rK2t6zqEnlqG3vCAEYHlHUnr9M19bgeFMmoTg1Fyur3e19NP67HqU8vgk58t16nsPg7wb4Ha6t/h1pBjWzhVp7qQoSlraRgtJM/U5wOB64FfX4vNcHkOT1K0rxkrKK/rrsbYivPBYRzWj2SXU+JP2l/Bnxp/am+IF38bvib4vbSvA+gtcx+A/B8M+0RQQlUimZAAQ7khgcZY5Pavx2nwbxNn1KWPxElH2jveTu+W/wpdN+2p4lPJsVVre2lK6l08/M4LTPhlb+IvHWmeANF86XTPCMASJbqUnzLxjukkZicbixOT9PSv23IOHaVGvThH4aSV+l2e1gcqUq0Vf4dzT+NWgTa2UEjGS0soI7YQJFuRBGSSij+8T+dfpVJUrWPdxFOKppbNGDqeq5sG0G302KGO0RJ5tOJy0H3gZZOwI7VsqsZTsnojKhOT9y+551qPjL7N43v9ZNpE8JtvKtIJx1UjGT/OpqVbT1Qq8VGFjh7f4nHxNrl7Lq2kzjT9OTyFuvOAMwB/1YyOmMdPSscNVnKUk17qPIw+IrVK0oyjZI2fhb4u1DS/iz4V8VLNJbRLrdsQFwpiiNwq7gc5UlWI49K8viOlUx3DuNox+1SnZdE7O3mehT/dVFOGln+Z3H7c37Td5+0X8RtZ0D4R/Dqx0fwT8EFOhiPT7fy2tbVrjyg8pBw2+fv15575/l7wKyiHAE+bNsU54nM3zPmd9Ur2XayPnsPi8M51Uqt3Fu619D5o8aawms28csd2UhE/LseWO3k4HUV/U+Nq0HFcrdl/kbVnelzb3PONEk1WTxRctdQR/Mo3DGCQBjA98V4OCbeIl2Z8xhKNajjpuS3PVfDdzp+jSS3sNomxrPMTt8yluAd3sccjtmvb0jFtn2UORU07GBFdXGuapqM8wWGPelvvdcgICFV/qM5zXj+1nVlJrY8fD1a2InOS72K3iVYr66ma7u/NnS5SJ2ByjFVIL+vYfnWcqUqifMd1eEHRlzu7RX0rS9M8W6BLpd5fLbPZKX+/8AIcAkAjuevWvKxlOFSCp31R5NGnDGxlRbOF0ixu7fUZ7dGT5HwY4xhW57V5dCk1JroeJOjUoSlFdGdXoUnkXsMJB3CT5AwwN3ofau32cWmkdWGhKTPpv4RSCb4eyXLhYy8/3EP3eK/N+PlCFCmvM9v2LjRu2XZg8jkFvpX5TOaWxz2UWRSr5Y5GM+tZpSbBtormYFz8wyKc4uxEndAkqnqevQ1k4szTsMmbDctz2qowBt2K17qCWVs88gwFXPNbwSbsD0jdmPovhv4i+PdPufEfhvRJbmxtT+9kjGdor0I0F7N2R5ssQ+a62JdMe6ZxbywP5gONmMnP0rzakbz0OiniFyXbPQ/AX7PXxe+Is0aeGvBl3Isn3ZHjIX9a66WXYqrG6RjUx1GLPcvhh/wSb+PPiW5F5r8kGnoxBxgk4rsoZJiKj992OCvmcIO0UfUP7OH/BM/RfhNqMureJrtdRuHXH71BhfpXuYTK6OE1epwVcXWxMtT2HwH+yH8MvA/iebxZougQJdTnMjBBXXSoU6dVzRftJuFja1T9nP4e6v4mTxTe+G7Z7uMZWUxjNFahTqTUmtSlXqez5SPxN+zb8N/Gd0mo+I/B9rNMgCqzRDOK2lGEo6oIVqkFYzNZ/Zd+EF/bvpU/gWyVGTbxCM1MIQSs1oU69W+55be/8ABMz4G6jqVzIND2eaDgoOBXn1Mvwrq8ziTUxVZzvc8T+K3/BIrVo7yW6+HOvhQeVhmFebisq9prS0OmnmdWK2PPtc/wCCVHxr0nw1Jrq+INP82MZMM7bB+dcEsoxUYXTRtTzJ1KnLYtfsp/8ABMzxP8a/iVb6N408VRWWg2d1H/a+oxIUgkXq0STNjcxAx8oOM1wYqWAwWGcp1f3vRW923W8r9PR37o9mnhcTVoupKLsui3+4/Tb9mbUPhjoOr+JPgx8JfBNroGmeEHht7e2th/rwUB805Azn17813cK5hUxdKqpWsnpZ/iPPMseBw1Gf86u9LfI9isrxGTEcuCOHY84r6hyTeh8u02mLPqEl7N5sSkhRhnbgfhUq7dwjvqUr/Wrqd5QYiyJHhELdfpQ4ybLkk0Yc3iWKO9lnu7abf5AwjIdnHvUr93LUI03KJzvi++8P3iLLfSpb22zeRbvh2Pp7CpnVTeppTtCFoo8s8c2virUbiO70VQFgUzWltM5cuBzz6VLUnqjJyjL3banF6Na+Jzp934o1fRihdpX+ztLu3SYOOvUhea1w8ZSk2zWpCEbRE8PJd3thDq+qTxuJ8eeXcDaMn5QOeTXRZ7tnJNqMrHM+PvF8enXO+2CJJdQuLe0Z9xAzg/TjNCqJaInl5jgPFOv6ulhejT+FeQbkVsgcc/TBrWNzPl1SaPE/iXq2szefcTXg88RhDIo4IP8AepubiyvdjufO/jqeWFp45Bkq/wAzZ6VlUve7Oe/MzyXxxraG/EafOsUirMy9F3ZxmvJxycqTR3YNWnzGVc3C44JPFeZSpO1meynZFVpS2WY9+lVO0bIxqK5AJF8wg0ptuAqC5nYhmZST8tc0m0h1YRi9S9b3EzIo9q67LnNpXdRstLeXIX92prKTVxTUpO5Yt7i7Iy47ccVm4tuyJVRx2LUEl8wyin6Y61XJTjuO1STuy9DDd8eYp+mKylOP2TaNktS/a24cjIOa5pTZrz6F+G0m84Wyws0jcCMLlj+FRrJ6ImVTlWp2Xhz4KeNfEXg28+INrZxppFjOIbm6kkxtkPRMdcmumGErTg5paI554mCqqHVn0n+z5+zne+DPhVZfEm/8IXCXeqW16oup9PMyiQqghRk/ucuW49K9OjhZU6Clbc4JVPa1nDmPVPhH+xP4Ma4tPip4o8Kr4c1GwuFnuG0m6b7FecElvKPIcknC9MdK1pYOlKXPKPK/IynXqv3Iu6fc/Vz9lxNOHwWsG0u2uI4io2+dCY3YdRkHoP8AGvp8PBexvY8bF0rzs3bZ6eX9a+R6Na35dfJVhnILEr+lNx1I92WqNuK4s9PsPtMqhTiuqHLSp3Z51T2lStyROe1Sa71e8jkhni3K24M54j9/c1yVG6s+ZnoxpRo0mrf8Ey7S7tbbXJ5DcB1VSbm9l4MhHbPYewrOMouQ17tK1vkcTezTeL9cuNcvrVf7KsTuLhsCZs8getZOCnU53sdMIVI0o3tzaX/U5H4ieOvDMVwV+zFbUcmKInCHjhj2HHapqyi1psbKFSR5RYeL/DepfGSw0jQH895YWWS4Ns0MOzocMwGTXFGvSjXiox9ToeHksO5zKnijXde1HxHdfD7wl4ru9MtJFkhtYdIjRo3lyT+9bB6gYB45qq3PKbUXZGHs41LVLanCeHf2bfEfwT8FXXxW8HXup/2RDfs+s6Bc3DMPOY/vJVLnK5646cdKKGGVD34/M6nKlP3Z7l268F6D48DeKvCusSiTYpt42Ynyw3pj7w6j15r0ZRjKPNEyVRxlyyRj65rHiLQrJtOvtHu50AMcwllMOxhzvj3fe9emK4qs52s2U6UG7nAeJbbV/iTocFsuvXa3UVu/lNeOAMew6H6VnJK24KNpaI+c9Su9a8D3F3Y6zZW8gQMs1xFGTE3X76AfL9a5HUs3c6ormWh5n4vkiur6S5tkhRWOf3J+R+K2pNc2h20+aOjFs0jn8FavAQrg2oOMd8967nKPsz0IU+azZ4xrlxremXTRreTwKw5SKf5SPbBr5rF+4+aL1CT1sjCmkZst1z15rkj3YuTlQRO8XXvUTTlsEbSJFl3Hd69KycZJBJNEgdun9KlRe5PtLbAny3AJ79hXRHRakPmqSN6wnkaBQTwBWNRxW51RhZGjbo7YUn8qxlOKdjRKKNLSwbW5S4HVDmnTquFRSRpFpfCekeDNZOqLHbpetGx6ljXrRxKrRu3qXGavqfS3wG+Fs+ueGYNZvLBL+PTtUilKOPklUHlS3UAjivqMo4anmOXxxUoqXLJOz8j0sJgPrmGlz6du56Vo+nP4d+K0uv8AhzwfcabaSsXjtX+eLYfvIc19BVyKMM8XJh52lG6drwWys+zd9Doo4GrTwlr3e3meia38IPA2sQQeL/hzAND1VwWuEjh+fee6n0r3J8NUKUPcn7F7t6aelzzqOWzhXarxTj6lyy+GvxA8MzRt4n169heQearzjy2/3vU1ll2EyyVZ06WKdZ/Lf5Hrwhls43oJP01En03SLkvG920txI/MzMSWP19a+npYGrHDx5tHF9OxvBy5bWsjzz4mfEf4ceBPEl1+z9brqiajr8Bmu/FkYeO0v9hDy6VHOeCyDa7pkFhxyBXwuGzCHE3GFShj6nuUpXUeazbWt2uup50aftMXF4m6drxi7feeH/EbxCbPRYSl7bP9rvpLiSJBxFbxkmOP8Tj8u1ftawsZU04vS+q8kv8AM9GhQlKb51sed/Dq90WC0n1C/s5Zr27mkuJkEfBfjbnPUDg4r2cHShDBq27PUpUpUo+6jnvinrgjw9pcqrWcUknnZwZnGSSe3HQfQV0WSTZjUlKejPNr670y9tb+e3tJlOqQRRXkso3PIMfdHTcT+gNZ0KT5ua4lNRl7qOF+JAii1K4fSLJbeRr0LYW5cM8shGwE/wB4IOSQPWsMRVkna+pnWm5tIoa54a06y0zTtMtbnz3MTPPLsLLKT/rJfQkdj712wSVBQW5VanGFGxz3iR309rLXJYo4THAzxJHFkosRDRkjsDg+pJNGHw3t67oS2lGSfzWh49ScnL2SbV9bn2z+1d4Gbwt+yJ4r1PwF8MPh/pNn8Z/h0PE2r3WnI/8Aak89pJHJGzbiVCOHlfagUBgpOScj/OfLpynx9TeKxFWc8vxMqME7ez5ZN/itEr367dfKw2V4bE4TG4lRcZxvbs9NdO90fmp4f05NV8FNq1zE7K8G1UOcqD/Fx74r+78PB1MFKpJbjy+UquXKpNboxdK0p76/gC3Iij8wr9o6lJMcE+3FLDUktTlgm5czO20+C21DwncWMKkyxEmW0H3zKOroPwBIrodR1INI9aFX2tJxiYmiywadaXVvMsTSSQEqZGz5gJGcehHpUYWlCMHcyw0JUaVplqXTbW4sLnUIBEwlt0kLq2WGMguR254I/GitVi0+XYK/v07oxNJs7Q39yEdYpo4flSIj95xwV7H6ZrwMTGPM+54+G5I1Zcu5yEGnzrfzfaZXMiynOCMqc9civLw9WEU11PH5pOrJTfU6Tw1YySX6pOxeQAEMP4x2rrbcoaHsYZKDTZ9QeBEg0rwDbRDCqzEjBr8q8QObmpR+Z11a8px5Ue5fD79kPTLTRLb4lftZ/GLT/hX4Zuoln0+zvrN7vX9XiPO6005PnCntLMUTnI3Cvx/E5hHn9nh4ucu61S9WeRicb7D3aUeaX4Hq/wCzZ4b/AGNPil421jwx8EfhR4nkisLJGXWfHeqQXFxfcnc4toYglspHYMx969DLZZhQqN10tVojvwVOtiIylWs7fgbnxY/Y7+Dniq0mfSdGj02+wQr242jP0r3OSNaOqCry2tE+O/jB8HfE3wh1drbUoWktS2IrhRwR7159bDSg7x2OSEpOVpHHxOswBY5HauWUlHRHUkkiHWtIu9ZsjpmmwPJNN8qJGuSSaVDmnWSObFytRaZ9P/sB/sh/H7+zzp2raSbbSrwZkEikFlPqK+wwuCqp+9sz5761GlBxPtD4X/8ABML4LeHtVXxLrPh9bm7kbc29MqDXbDBYSnK6Wpwyq1Zn0J4a+D3hDw1BHZaPocFskYABjiHSulcnQSi27M6ax8OxxSeVEhHocdKWiK5EXl8KBn2zjhupobvoVH3WW/8AhBooVD7CVI604pLc2EtfCsBl4XHsw60pW6EpkyeE4GkwYwvbBqtOUFvZkVz4LspCXMPzDjg9acZJoFK7tYrf8ILBaHz3URovzFpTtGPqa561alCdmylh51JI8/8AF2oWi6zcwaZOkixIcSqMjP1rj9s5y93Y6p4KcKEnGylbS+1z5n/br+Jes6Tq/gvwVFY3y6Rca7bP4imtbR3MlsSSUBUY5wFxnPzV4md5k05YSEXzct792foHDPCtF5as1xFRK8lFLe2j1foe+u0/xCksfHN94bPhvwjo/lt4f8PwqImkCR48yYA8564/Ovm3w/iM2wcpYl8mlor/ADYUMwpZROWHpz9o53Up/wCS8j0r4IeOdK+PVnr/AMTtL8F2WlXNqxsFe1Oz7SsJxub1615eSOvlOdVvaxjG0Uvddk13s+pwZnQoYPD0MNGtKrCevvbxbNfTluXujbh1jQ8uA2S1fq0HGpFTT0Z8pVw3sJOLWxdvri6063ZZkCQq3EQIJI966L6HEtxlvKk98JVKLGI8F/WnexT0WpU1u7tpIvs8MW0hQVUKDkDrWTlKUirpQtE53xJbaD/ZrahJpC+bOoRSRnJz3qJxp321ElUat0OC8Ua3ai/W4s4ljFvAYjOr/KxNJXvoiFHlOKmmvdEWaWeZpY0ciJCMrh1wTVRcqY6k3I8/1DTpNP0/UFhupUEkonhCjA68fhmk+Z7swau1c5TXdLuvERk1FrJknhgJjkBzkH+KtaS5mXy6HD2+pXXmz2MilHjlw+88Mf8A69a3adhySSPI/jDrUdu11p0EBjldjj6DqKqybuzmUXUZ84fEnX7fR9KudTncDyUYypIevWpupPXoRUlGktNzx3wVZ6zqNle65qyuYtVOZEI+4gPyEfSvFxeJ563u7Hs5bhJey56nUtXelzWe1JY2UFd0bspw49R61ytShq+p3VGmtCrJGUUgdPeuapO7IkouJS8shyVPU03P3bE0VZkV3uRCDWSs2Z4l3TOisYYxEgAHSnVcnN2OmVoyaZdiji+bcgxjpis1GT3Jck3ZEsESO33Bj1rVtQRUYpas6jQNGjurEGCwaeaS4WKNEGcZBP8ASuVynORcqsYLU0/DHgHxJ4kvLSztdHkVLy6MMcxQ4j+YKSw9BkVcMNVqz5UjmliYwpuT2Ppj4df8E/vFHirX20DWNEgWG10wRQahZhlM0zcq7epBOK9fD5JOUvePOrZlGEU4n0R+zH/wS01vwZqll4w8cwWl/qdjMzW8kkA2uhAG1l5BIxwfevTwuT08O7y1Zy4jHus7JaM9++GX/BPfwvoOmap4cv8ASFNpq+o/bZYCvyFwcgmu6OEpxul1OedepdM910H4AeEvDmn2mh3GkQtCEUQ28qfLwKuaUfdZEZSbumbt38DdI13SW0x4UtJFB+yzwIuYiOhGRzULDxqqz0NoVnSndnqXw+0m98N+ErfR7/V5LqSCLBuHG0yNjHQcDiu+K5YctzmruNSfMtDQ0+9FvJ9mZ0VRy3zc/iazT7GVNN6Gpf6vZyWYkW5BjT7zMeB9PU1nVq8y8jSjQcKjutSnqJa00RtQnSQQMSVjjX55T2ArCrJxhzNWRrFxdTkbu/yOMu9D+IPjfybi+ik07SLds/ZGUb5APU55zXOvaPllH5pr/h7/AHGiWGo3V7yIPiVrem6DpMejwWoht4IwdscRyD3J9TWs5qNKzQUXed5M82stNsvENpf67JatHp6EyZuCUa4k7Zz90duPc1zRqTnFytojarKNOo+R3u/69PQ8c/aC8W+D9F0u98Y6tYNFa6VEZ2trZyu5VGSeOQD0A71zV6kYL2rWiKpuT92+55P+z/4T+Lnxp1Ob48aZ4rufDT3tsj6HoltGFWK3ByGkQ53OfWnSofW260JNLTTY6q1TD0YKK17n0F4rtXn+EWox6/4r1S5u57Z/7ajtgZEnjxhwycEHnqK6varD0Wk2zzYc0q/5HjHwK03wfD8KbW+8C+P7tbdwyWFmzsJ0wxGcSDkZHTrxWFKrCtStGVmddec1Vs4mPpPhnxn4s8S3Evjv4i3GoacspS3eSAKYSOOcckiqpU5xlec9C3zRV7WG/FbwMfh34UXxLHqNtqFrGTIqWtwHeJ+zY4OO+DW1WLjG8dUCrRlKyPlbxv4wt/GGuT6ylpFbSyIUla2GFbPQsD0z3Hqa4OWLbuddKLjoeT+KLXyNVc29uIdxIkiQYUkdTjsfWtlT5VZHoU2oq0ixo10X8O6jEsoDGxPTvzW7/h3Z0Nya0PGPFAsZbktDFPHLn5xIMLn1FfO4xwuXCy3MjYFGSa4YzuFST5QLKON3Wqc7GUbkkQUHPHvmoc7lSTHFg3yjpReyHGGt2SQrGkoYtnA55rPn5inaJv6Ja3mobYbK1eVz/DGhJ/SofvaIFPQ7LRPhR8RdUQNZeD9QcHoRbN/hTjhcTVdoU2/kVFt6pHpHw1/ZF+Iniy8SXW9OksrYDLlxhsV9Tk3B+ZZlVUqq5Y+Z2UcLVqu+x9FeA/2IvCkD26abHc3cjAK7SIQu761+mYTgjJMLFSqrmPTo4GFFOVRn1b8EP2X38K+HLnTLTUdNa8+RYtCe62yXRP8Ad45I4q8ZxNw5w/ReHkrU1ukVic8weEceaEuT+ZLRep1mmal4D8En7JrukR6nfgc2UkWFgwT8ue+OnvXymL4l4q4oxH1Xh9KhQtrUqJ3/AO3djhqYrF468cPeMX1K3iL4p33iRDFpHh2x06NMAx2duAwA9/Wva4f4DhhVKpmONqYmct1Jvl87K+hvg8pjR96tUcmzn9Zt/EPi++jm1jVbu+ZlCp5znA9gT/KvtsFlWUZNTtRgoXZ7uGp0MLTtCCiiGz8IXOnGRmeF/KkBdFZcoPfNevCvVVGVO/ut3+7+mKpVp1HZaM8a+L/hbQ/Hvwj8X/sv+LtYa0ebxnNrmk+IcbbrSbl4VltZ4XB5TeCjKR91mr8Sznw94gqcYQz3I6q5ub34z0VrdH5u255eMwE8VivrEG+ZRSWvb1Pn608Oa74j0iyufiJBFa67a2XlatbQSBoXccGVD3Rsbh6A1/Q2SPE4jAwli4ctW2qvdH0OCqOnhoqovf6mfq1/p1vpcNzHBFEgLJFDE43zEdWPcA+tfQxXu2RdSt+8cTxn48eIxaxST2dhFJ54dbW0TOJHx29QPWsJ88YW3ZyYm3LdbmObS60HwTBpl0pN8LHzwY15jldckZ9MCtIQcad5ble9CjZ7nn7W+pX+pf8ACW3kCQz29jusklbCxQ87nz3c4OB1rjdObqc7Oe0k+dso2GoWhu5td1pflSJNsDHDfZiDtXgdGPftXo2koczNKVRufNUenY5rVLbV55Lm7FyEtzpRjX5AVTduIVv6VnTxFSFVTWnQc6LjJTWx3nxm/aM0TRfAHgTwzqHjuLX/ABH4m8J6ZodjpMbOW0ezE0sM28DG3fywwT1z2r+U8z4Soy4yzCUqPsaUKrq8z055tRd0fn2NzbF4XiP6olaFTkt89Hoj52a4hsdNbQdMu2aCO8MLHeQdiEgH9BX9H5fWp1MBTS7I+gpSkqahBe6m19xVGjBLq7ltZAIJSFulV/m9VYfQ1qqSUmo7Mn6o+ZtbPc0fBvhLVNC1F7e1uTM8q70zKQJD6oeMHFcyoSozdupeDorBtqLvc6C807TmZtNubdRNGytMk0eOD0bOPv8AUY712ulGVPU7pyU42sYt/JpyNNoFvbGDaSqlnBaI4yzLjqPXNeVXppRaicV1L3F0OVjuNN0m/wDtWoytDbxqcPu53fSvFnGMJXnokeFO2HrtydonO6fGWvprmJ9rNKW2Oedue4PevNo06U4OpB3TZ4sKVqjlfdnWeFGSbUIzCSQXwHz931roVVQStqe/gqUn8R9QfD/XfEHg+003UfD91FbXEFpujuDbpI8LsTh03AhWA6NjI6gg81+M+ItaWKzGFFbJanbiFaaSJNd1TVvEGqT67rurXV/f3D7rm+vbhpZpW9WdySx+pr4OjCGHjaCsjhlTpxfMehfsifG+6+BPxaj1xW3W2pQm0ugx6Ang/nXXR9+qpSMfrE6Eny7M+7/BUEHj5DqET5Wf5lIPrXsRcWrIV5PVFb4h/staR8TdCuNH1G1WQtGQhYcg1TceRxCcrI+O9Q/4J3/GM/Es+DvD+nlrSST5Llxwi5rxPqFarWtHY5quMVGGu59u/sn/APBLbwL8MYIPEHjO3Go6mwBLTICEPsK+qwOWUcLFNq7PAr4utXlrsfW3h74c6LoEEdtY6ekaIBwigD6V6bq8uiOdQ5tzo7bQ1yFWMKM1n8UrlN8hcPh4QurJEDkd61toNSVy/Z6DbGPfFGQw68Vk4sHNSWg6bTMOSUz2HHNXzRSEkr67ktvaOf8AR5SQnsKnm6lSY86DlvMSXhfuseKHLQUW0VdYm0awRZLrX7GJycMHuFBP4ZpwvI2jCpPocl8SfijZeEYPsPhgQXt28YJlUbkX8qyrXpp23OqFFxabPHPEfjLxf4mulm1vUrm4DZzDkrGv4CvNmmveep6VKnCO25DpKTRTjbEDHICCDmoi5xltozblclcl134aaJ8V/HPhiy8Tndp/hdpNTu4PKG2Q4KoGPruPHtmtMRRjXqQ/u6s78Nj5YPKK1Pmd5tKK6eb8jY+NGtvceFbp7WMR24hYxqBhVUKcCtWqNSPPB6NXPNpqcY2e5b/4J0z2kn7N76j4sj+yNqE94+mSIv8ArlM5C7gB1Yd6/P8APMso0pV8VVbTlG0Wtfe0smuh6mIrYiv9XjSV3H4l5a6nq3i3wvfWUhntF+x3Cou6LHEpxnIPb6V3ZRncsLJYbGK2isc1SlHHU7wd99f0Zyyx6gLtYdQuZpWCkuHUhfxNfaU5pxvF3R4c6Psb33HXmtXV7cbbeJLe2hjIkmTv/sgVtCV9zJqMjK1zxINFijSSSUyiMskJjJYj1PpSnPlHycu5z+peJzqFxDYx3G9Jk86ZZDgYpJWehN3Z2OY8ZJbR2bxXTxwOF3wRx8KVyCSw9fSrjNO5fJy6s5/xf4rsl0m4lm8pFNuNxbjaAMDt61NS81oZ3d/I4jV9ehstBY/ZAxWwC4BzgkZFOMdLMzSblaxxehR+I7nQJbmWfZO0LfKjfwg5H/6q1p+4XOCi7I808WX9/b3N5NKgWSSMSFF6g+vtVSkrmfs7K7PFfjT4nhnZ76bi4UAnI68daxdRtkVJqMbJanzB49lvPi341h8C6TG2GfzdUljUkRQg8lvTPSuXFYxYei316GWBwk8bjFT+81fiHYn4fQReHrTyy4iUqpT5WiYda8enOTjzyPq8wp1cKlSVrr8jzxpmAzI7FRnaGYkD6VEp1amj2OGKjF3W5BPKfLDA9uKfsl1JdT3iitx83zevpWkqK5TSDSGXb7ozXLazZjX+BnRwOIkCk1ry3dzZy9pK5ailZ2GRgd6iUlFFxilqzZ8MaRqPiXXLXw7olqZ7u7lEcMajJYk1zxjOtPlW5NWvGlG7PqL9m34B+O/DHxWs/DF/4Rna+t7iG42XNtlEIPIPHIINe1g8vqUcQuZXPLxGIhWpXTP0H+En7BPgo+LG8fzeG1tZ7gZawXPkqTgtgdByM19EsNRU+e2p5Uq9WcOR7H034E+CHhrw7dC307SoiQo3CTnBre66IyUbLU9AsPCFnaFoJLeMLjLj1qG+5SSvc2bHw6jZMdqojVeC69Pxp26hJssnQjMyxTRBzDkowXp9KHS5tSFdal620ksA7KE+bBJHWm4KJbvJ3ZYVJmDxxMuehfrTcrqxGmzMyHwzrepav9pOrbIIgdtiItokc93bqR7DFYS5mtDoTpU6e2p0Fp4b06C7S91y93+V/q7NOIgevI7/AI1MaajLmqP5HO8VVceWlHfr1IvFPjm0jQkxL8vywiMbiPcCscRX53tp0LpYZU1ruzibzxNqfibV4rbVdVlsNPhYFl25ZvqO1TTftJLmdkdbpQhSvFalPxnrvhaLUFUX5ljDgfZjGd0nua3nKkndO6MqdKpJe9oef/EvXNRii8q0to0CnzLezcDYo6/MO/0rirTlNcqR0KME9D5a/aLufEnjTVNN+GtpqV3HqvijV48wWFmqgQRkMxdhjYNo4wD1x715uIjaKpXak3pb+tDswsI87qvS2t/M+h/gb4V0nR9W0xLfRDbtboLIObgKQyjH3Txtr16K5GklsclaKmmbmp+IfDHhLxZrdr401uBZ/skxtUvtqwp8pzl1HTpzzWXNSUmp7GM4ycE6avY+T/gd468NfEnSPE9v4ciS+02y8TXUFleQja8XJIKleqhicVzYWEa1OVmdc7qa0szk7qy+NNlqN1oF74vtb+zNyZbVyhimRuylhwW7e9dKpzpxcW7o6lyTs7HOyaY9vqV2dT1C9t7uQfvrSYkoR3O3PI915HpXP7RJtdTWUEo2SPEfix4XtfDer3GoaQpQSoXCKpdH9QTnJHv1HcVzSlJSKoysrSPH9U1SGeRzEHBMmTukyAfStozkjrUW3qXvCMkV7pt1ZmMjfaupGehrupXqU7WOlbWRz938M/Dtzqi/8LM8bQeFrTYDFLFA15JKp7iNW6/lXm5hlslHnlJIPeascL4s0Lw1putTWng7xRLq9gp/c3lxYG2d/wDgBZsfnXzrp8srJ3G1yrUyxYTHnbxmtPZX6kO8dRxtZh+7Xv3pKk4gm5Mki0u727unNZVLs01SPXf2d/2WLn4opJ4w8baodN0C0bBZf9Zct/dT/GvXyvKHil7SppEqlh5V3d6I+l/A1r8MfhfZtpfw/wDClpb5+9dXMQklYY6biK+qpUcBhF+7gvVnoKhRhb3Tq/Dfi7xFq9xFaafL8pUgLEo49BxXr5dUxNatFUkuW2/5HpYWipOyjZH0H8APgX8V/ijLCdD8PPLEoAnu7rEUC/7zHj8q9yvntLLKagnz1ErW0XzZnmeZZflcWqs9ey1f3HpXiH4Z+LvArDQL+9sD5ecvplyrhcepzXh06ud8TOUZVfZQT+y9fwPPwuZUMe+aEXbzVjMstLU3n9oXiiTyVyJTcEuP8K9zDcKZdQoqNWPtH1ctT0qf1l81PlSh08++li3pvhHUvE0k2raWkphg+/I8BYMT6nvXuUo4TA0+RK1uiR0Sq4fCqMajSb6Gnqvwh16ztleG9sbd2gM08klwFG32B71vTzSgpqlyu712Maec4Pmsk3rbYy01a7awGj6PcgRxsWaYQjdu74OK9iOGpzaqTV+up6kI3n7Sf3GNqWgC2g/tOS6SVwjO6ySFcnH8eeMV6N6ThtpY1p1VKra1jxL4zajLrXxHSSylt7eG98PRM8lu29NysRyfxxXJl9Gsqk7aKWwqnLz2jc8b+KFppyBbhZJoDCDmWN8iQj+HHHBr6+m4cqezOqlGy5tzzPx3eWXiPTpNT8N2TRNYRCK6SaUBy+TuyP4R6CpjVcnuYSqOTet+1jxjw/4o1vxjr1/rPiXThbTWUos9LgkzsWPIDSDH8XXmlCU6lZt6JGWFU603OrpbYi8feJdRW8a2swJXgi22ioP+PhkBX8EA7+1b1pNU36GuIasmcheTSpoVi9xe+ZZqrm6df+WmSAVUdcZwM98VnRs0mzCMJpczd0c/42nvXtl0+GeOCWFI1u1SM7VTf8sY+oPT1p4mtNLlRhXklC61NK+s0aKayN2wlnhjS3kGDyQckjGAw7LzRCneDu2bc05wUdj6N/Zc8Q/sC/s6/scah+2D8cPhZpPiL4h6N4ivvDNgdXtvPZY5bYyWrJGflR1LORJ1GDX8i+NWC454i8SqeRYCo6WFnCE3NaP3ZK+v3aHzeIwuChmbxWKlyqCVpLe99Efn/wCD4oLjT7vU7qRQrMZAgA5Dtyueelf0vk0IU8JGMpXcUl9ysb4CpB4b3dVds2rT7NPrR0e3tmhKwf6U2BluPvDIxx1r2lKKnodcZKUlFC6kZGaMWclxDLY2TOJlPB5+V+Oma5sTXitOprVoqKvExdU8f6r4phudPu7hROoXfdlQN5XuT1PpXEsZKtTcEzghjI1oTpx0a6nGPFNDdvfNdy7A5MoVuT/tAntXiV5Sg27s+eq06lGo5KT8z0P9jr9m7x9+1n+1h4L+CPw7k0651K/1QXTvrk6ra+TD+9kMgz84CqflHLdK/PePM5ocO8O1sbiHLlSe2+uiOWUKbxdKU/ejFpyT6q+2hmfHvTLbQf2jvHmjQ6vFqq23i29Q30Gn/ZVlImbLLD/yzXOQF9BVcEY2eN4XwtRQ5eaEXa/NbRdepVWCo5hVjbS9189STwnFbnUIWTCgyjfGOtfXKg3oejRq1Hsj6atrT/iQ2N0tuVDW4VWxgMBwcH2NfhvGVVSz2a7JI7J883dlaUfLkgg18qmpM5Kidys7SRSB42wykEEdjVpPdMy5Ln3N/wAE9fjbbeJ9GXw7q14q3dmQrLI3LD1r0cNUVuVGkpQVPzPsfw9YXeqXiy6dGCGPJA4r0adGc5XR51XEwpx1PUfBvw0t4ZV1S5tozNgfMV5r1qFBQdzw8VX9o7nZ2WhShiEiAVR2reV2Y3Rq6foc96CkEeNvJIHWoUGy7pK5P9kFqvlyx4ZehPenaxlJqQAySTIqjtyQKpSGl1POP21/jZa/s4/s8ah8Rb/w5qmoW8l7b2NxJpN4IJLJZ3Eazl8HYqsVBOO4rHFVJwoN01dm2E9+ukmk+h5P4c/b/wDiNZxW/h1PgjNdrBAsf29pjdzEgYBcb4wxPBzmuenOr7O9jtngVWfMpak3iX9qX9oDVoftllca1oMLPhlh8A2o49nkv3P47aVSdZK92vkZQwVpWkvx/wCAYB+JXxr8Sws99rvjPWcEBoorq0tck/7KwPj86xhUqT0V2ztoYSlGeyNPRY9ajuI38QeAvGdq+NxnupLm5Vf+/WmP+hralLEKVuU65x5VZWf9eo3x/wCM/EOhXCS6J4uhgiZSNt/4Z1cuPqwsEArSusSmvZzUX56fiTF0qlL+FO/lb/M4rTPjreXk8iTeJfDMhtwC4vY760BJ9DPbKDx2HPtXj1MRi4vWUGr23Oig8Pd80Kmn9256N4Rk+IXieC2m0X4RazrEcmGWfRVV4SOuVMhjyPcZBrqUsylFKULrpZr8NTmr5jlUE0qjT84tfodHrPip/CNubHWfg94/hu7iXddTDwpJMpVRhUJiY5A5+tdUcaqdNwlTd+v9XOWGKpVLNTTXTf8AyPK/jj8bfCV74avLK50fxlpqPaOolv8AwBqcMSZHVpGh2IBj7xOBnrWFb2OIlGTTTXTY7aNVOldPc9v/AGW49K1H9mXwXqmixItjPosM1ui9DGy5B/I5rnjCFai1NXTvob1qlSliG1o0dwPFF94UvbzU9Ss21TT7u1KtAzcxEDG4E9Pwr5XNMhlCpLEYZc11rFvy3XoawxGHxkYUW/ZTi7qSWj8mV9et9A1Cwh1fwdfm7gktlkkhZiXtyTjBHfniscrzb6hJUpNuFtU94s6p4WpjIyhiIqM7vll0kcNr9/eW6siIWkibcARtUHnrX3dHFU8RBSp6o+fq4SeGqclRHKatr15dYiktZ55LpT51xEcFAB90Z6VurSWpLgp7mElzDok9w9wXa4W14SQlljXsMjvSaUfeM5QS1PPr7XfEviXUZsRiaMTYWTBBwP7wPRR+tZ05O7uN6LU534h32q3sjaHp1rLdsrp9qaJcqq7h1NTUrpPlQqVF1G30F8XXkKWX2XywG8pGVFPLHgYIraMmS6aizA1fWV0TbZXMIjW5tyRsGAp961ulqY8jk7nkPxB1a3jmeR7lTIYSJPw6Coi1Udr2FPZW1PDbP4cfED9pj4w23wd+FdgLrVNQI8xmbEdtEPvTSH+FFBzn8K87NszwmT4V168rLou7OnBZVWzCpyrRdX2Mv4qafafsRS+If2eYvDIl1XVkI1zxRPErNeOpHyxHJKRLjp3r5TLMauJJrGQbUVpY++q5dh+E8P7OpDm9rG6nbf0PnLxt4wufEepG+u5dxWNUj3HOFAwBX0NSHRbHxuJxLrzczmnu0YfvZePTNOMNEjKGzbKl74k0mxjPnXIOe2a3p4WrWlaKOLEYmnSepFpmoW+q5ltAdvY0sTSlh/dZvg5qqudkk+FUoTXEo8zHiKis0jobL99iRx1FTUqcqsjrjaOh0vgrwR4r+IOuxeGfBmhz6hfSqTHb265bAGSfpWFKnOtPlgtSK1anSjeTPpb9jT/gn547+Nur2Xifwp4ou9OvtL1QJqkMlo0b25U8gEjk+4r38Bljupt2a3PFxWMVnFq5+x/wm/Z503TrWzu9ZsFutQt7dIpLt1HmPgYyTX0Emlojzop2PZvDnhKzguRbXCBY0Tg55H1qVqJux0+n6JYTwu9kgOzGCFGavlQ7suR6U93bsL6MhgcLIMYIqHqxuyehoWujz2FkUt5VaNhzg9aFexlKpCUrMjismLovmhj3Ut0raCbRpKSjG7G6/NqEVsun6Na7ru4ilFvKyZijdVyC5HQE4pVISlojD2mvoZq61bGIwRMrSo5T5ByGGQxrJ8nLZPU6I05pXkbGm3EdtALgFWG3OH6k5ojKEVdhVUnpF6GTql1qWr3E7aFeW6SKp2NMp2KfWuaq5VPgKpqEUlJXKWk3Vto5RtZuo570tmVVjyCB6VMXGK13NcRTcknHRGdf315rGoyRaPpkQeT5mYwDCD1yeppqcpPQUH7vvHN2miX+tXk2orEvlRSeUt20eGuH5yF9hjrWbkpu+lnojedox5VueWfFm81iPUX0tb4AQsWdtoJbHYtg/lXPUlKm7M0p0owXNI4T4Q/C3X/if4yvPjpqHiddGjs5PsehIqA7mBOXZWBzluOMcCuejD6zU9vKVrbGuJreyh7CMbo6jwD47s7jWdds/F+oWR13wxOI5rixB8mRX5ztP3WOfwrrhWhUlJPeJx1ZLlioJtM+b/jDrT/tPfEOfQ9PiurXTNAMlpdA3BA1GRsck8ZH4815/tVjJOK0SOuivY0td2b3w4+FUnwO1zUdMtYJrHTdW06GTZDDhYLleN3H4V1Yek6E2u50SUZUlJ7m5rNjFe3V/FrNnuExXdKj8iX+Fxn15z9a6ql5XRkqjWiPLNXufDV1NdeG/HsVzCYyUt78sBJbkdCD6Z7HmuCajB+8jW073Pmn9pS1134Y6y+n3ztdwXMfm2V6sm+G6TnDxnOA3rjHvXPUhVir9DqpU6Klzq9356aeXT9TxefVTewbykSF2374024z2IrWlTd9WdkItl/wTdG3v2VNpBBVgfcYNejSkoqx0xklJWOI8Z6ZcaRr1zCHjbLkmOQguo9vavCzCjU9o5N3QVUo1NepjySBlwT1ryYfFoUo8yuyNCc5AA/Ct21bUjlUmTRqgJdhWE56WKaS0R1nwd+GXjT44fELT/hn8O9GN7qV/LgDOI4Yxy0srHhEUcljwAK0wuHniqqhA5qtaMEe3eFZfFGh6vdfClNSg1G00e7a2ivdNBaGZlOCyccjOcHvX3WApOdNYfp3PYw/NGCitT1nwd8DfFutaottqxmsoCAzG4gKuy+uD2r148OYmeM+O8F5WZ6uEwvtVzzWh9aeGfg38B/hp4D02z8Hpcaj4gaEvqF1cxgRoSPuqO5r28PQnhJOjCKjFI4qeIzKtiZxnFQprbuzWg1/xVBpX9kvqlxFp/3msUmKKMey9aWE4ay+vUlWrx5+Z316Gscvwspqq4Jy77lfw54YttSvrjUdMjy0hLzYlbaeOQBmvbhlmDy13oRST7HrVaqjBRkvwN+28EaFJDcXtvqMsTtGF+zruYs3t2rWvRrVIqML69exz0sXiVUUFFNdwurr4h3WlvoGjSXiaanzRyw2/JI+9zx0rrhDCYeXNVabZcaGAo1fa1UnNmV9nW9tjJPrt1Mkg/drdkqffA9K9KhdK6Ss9jpVZvSMEvQdGWjtgobYF+8okxkZ4rWn7XEQtNW32fn380bypycSh4tj1jxFpkth4b1CC1uZnj8ozJvEiqwLoe3K5H41GaU6iwE1F2bsTTgrS5k36Hj3xk0Dw7pniGe801Bauq7UjTgFP7o9s+tetlrfsYLrY3UXGCvqz5y+K/iS+vpprC3eFVuH/ci5OCADgnOM+vSvd5JShZC9pOEbdDgTZzaTZGHSYQkNxe5upWl3POwGcBeuPcgD8qIqlT0S1DlpwXM92cXr8MaRAxxCR5oZpGCrtRXJPQ9wMZz61LlquXqY1W3rFnlcfijV7+5liutEvRK0jW08xt28uKAd1boNw4rmxEql0pHFByqytJPQpr4s0GWxa8lNsRHPmVTPuWCOP7sf4nnjvU05RUbpo1q1oRpuzMy+vlvbGa4e5W3e7P22UZyUQH5ck9/QVr7aFrtnJTjzq7NJdVEdpHfXkaFLmAiy3HBiIUgyvz1pxrt13bWLX3ef+X6nqudKNNR6nrv/AAS98N+FPjj8U/HXwD+JnwitviBpep+EJNd0rw1cS+Uh1CxIeJkbPysys6nthsEV/OH0msxzLIeHcDmmX4l4eaqxpymle0J6fcfOYtYacpRrP3ZNb7bny9+0xpXw10X9qjx14b+AF1eReD7bxA50eDUbKS3mtUbG6Bo2yQEfcgPOQoI619j4aYjN8dwpha2YTUqrSvJdfM8zC1OXEToU3ov1ONKa4gOoWd7JsT9zcS+Wd6gkg5GOR71+iqpU5XJS8mdjqVY1FaRRmg8T2+q/Z7nVPLAQokwYgOuMhT6Yry6k61SpqzKX1z2zjKWhX0/Q7uOC4nBZEdN027kvk4O39DSow9nFoxo4aVFvle5Slt9SR5rC9H3TkTKBn8fbpxSqOVnFmEoVVJxqbHS/s9w63pP7RPgq88PalBZXyeJ7VLO6lumhTe0gVd8icqpJwSOxr8+4+oUKvDeKWIjzwUW2rXv6HDDF0ctx9PESV4xabS6rsL8X9I8eWfxy8ZJ8SbCax8Qf8JPeDVLO8Vlkjk81ichiWxzkEk8YPNHBbwFPh/DxwUk6fKuW3axtKr/bGZVsXFcsZybSfRE3gmGKTW7a3V2VmnAZxxu596+yrTfs207M9Om6dBrmPffgl4hTUPh7NYyXBlEl7PLiRi32ecSYwuRwGTIIHGVWv594hi6+KrVZb3/A4qWKnVqOK2uat5Kc7V6nrXz1OOly3eT1IAoAyR1olO+gpNRR9I/sIfss/Gfx/wCNrTx14aeWw0+OQeY5U4mX0r18vwdWXv8AQ8TG4lc9on65fCj4bDw9oMUGpqHlWMB2Pc19JRhKCseZVbq6S2O+06yOV8mPIU4xiulPUyatojVih8pyrx43DkGm2RJIsaRcy2TukK4LZAJpK/QlydrDLuC4kctKBg85xScWVFPqSQRGTGyEB8YBNJRRocd8ePhr4u+KXwj8T/Djw7qmnrL4g0S5sfK1K282Ji6ELlfUHBB7HmhUud8re5jT9nOpGTT0fpsfB/wh8Y674m1VrnWYvJumZY7q2Jx5U0Y8mVD7iRGFcbnaVu2h9TClCCue1afICIt1sjADqyZP0zWjcnuYTd3Y6vwhauZWWSABG5HsOOh604Ra1sROLXU7aCWWOIDa/wAi4GWJyDWvPKKvYwvucr8R/PKENNJnbheT6VzYuS5bHVhJS5jz2C9ubdipuH5O0KWNfOVnHm5W9X+J9HCUuXRnT/Dg+F4v7Q1jVdOs7y7EOLSK5thgjIDPkEHIz71WFwuHUZSmteh52YRrVnFJ6LcwfETaRqWqtEmjWyvHGSWUY3HHvVKlSctFqXBcsEkjxP8Aaf1ZtH8H3T28awTnS7hEjViQcxkAYzzkkV38jp0HK1mkTCnLE4uml3sfbX7OvgweBv2a/CHgmSPa2leHbOBlH+zCo/nTw1NQw0U9zDNqsFjppbXt+hsvdRXFq0LBcKCrBq6FqjzJJKVmcXfeGtc8J6u/izwLf+TMxRp4CMpMFbcFIry8ZkeFxq5oq0u/c9ehmtRUfq9dc1Pp3XTQ5DxN+0ELA6pF8T/AU8kl1qKyWc2mjaIojjcCO4B5rwZYHNcrlJ0veV7/AC7Hs4eOX472dGnU5YqNmpa3fe5o+J/CHiC68P2nijw0y3ujXdr9ohl04qzhB1VlHK+9elhOIqNTljWXK3/w2vY8avgKMK0qVGXvRdrNW+57M898W3ss12LPRS0UixEv5wAIOMfN7+1e7GrGUU4u557oVqLvWi0YGjxvbaXcW9td+e4nJ1CVx1P90VLqc+iCcfaapaHJeGPEF/Dca9fRRi3t5rjYjk5LAdetcUKkouUpbI6YxhFKC3OV8dvJHfx60lzjDEGNerjtxXa69krnO4Ru7nGePvFzXdsLmRwjbNzgnuOla3nbm6GSpup7sUYXhb9mL4r/AB8Nx4gupV8M+E7aN5r7xDqY8tpEXlhBG3MjEcA429Oa+WznizBZVCUab5qi6H0WUcM1sVXjCtFq7XTv37I8W/aO/bV+COjeAL/9nT9lL4ZapoPky+Ve+PU1HytR1EjhhIygNtzkbQQB2ryMtyTM87xFPMc1a5Vqobry02Pq8zzbL+EqVbA4SbnVkkm4pKMX5N6tnyRr82oXdu1zfandXcvQzXly0rcnJOWJPWvuo08NhqLVKKivJH5ficwx2YVF7eo5W2u72ObvEmdtqygYrhdVSlqa06M27soS6HNeEobkgN2FbxxMaaukFWlK1kyjdeB9Od900hYg87jV08xrRvYw+o06rvPoaumWdlpVl5UAAAHYVyVJVa8+aTNFGFFWRUZ/tExI+7nrRW/dJpMzjBSep3ng/R4dS1y0t7u1uZbRp1W4FmuZNmedo9a4KUXVaTOqo3COm5+gH/BPz/gmd4k1D4qxfFKfxRfxaHG+7TUUGGV4zziTHWvq8uyuNCXtG9DwMZinV9xLU/Vn4S/BDw74Jt0stB0WK3cyZcoADI3qSOtevKy0icSVviPUdG0y2tbrf5YiKHJUj71QlqVJ3RvtYLdyGa0iX5sBmK8GiW5DdjRtIbG0iVHXaDw3l9z70nJLQXvdC5E0YBt0QgN90keveqSctBO7d2SppyaSFvmnZxjBXOQKJQ9m7maqOvLksWpbaynh3R4VmHJUc10RcHG6JSqRlZnAftEan4o8K/B+/vPDVvcyymSNJZbeJ3lhiZgHkVE+ZioOQB+PGa8PP6mJWXSVFtN21W6V9WetlMcLVzBe1tono9m7aI5P4V/E/U/iDdajbz3tvq9rpV/b2VvdJaqsjMIFeUkqefmYjBGeK83IsTjsSp+2qc8U0k7Wf4G+Y0aGH5PZxcZSTbV3bfTQ7XULvUL+9FlYwyocFQm3AOfevcqXlLliefC7jds0rHRZ9JsgJ3jWVuS8hB2/h0reNH2Ss2Q6ylPRGVq/hux8VeaBHcNJjBu3fylHsMc4rmrU4yeh3wqOnFc1vTdnJa58MNH8JaLLeRfEHVrcshL7bsshJPQZ/LiuKtRUI35rFqvPn0hoJpPj2e3063tL4LJaW6YW3aIxtIMdc1dKt+75U9jlqxvJ23OB1uCw+JvjmDwnaabBaee7NNDAdwWMnAyeOTWaUa1dU/vN4xnGm53uN8R/D3V/hNplxpngjU4YYIZMfIwLxvz9xc/MenaqqUlQbjB6C9pGu/eWp87+Knn8K+Or3T7LVpLpdeRn1y6uXSO9kyucKg5wMEZwccZxmvOjKdOq4xe+53UlTnFK2q27C/B34UW2g6LeR6zbXQt7x5b6yvZgZGj25Pzeh+tdeDoKLbZriEnNO52tzPDq2rXOq2+pfbbKTQ1eTHO1hjJI7HHP4V2TUnPmicrnNxtY8+uNY8NeKItVsW1Yf2nZjyriESY3oAWV1HsDXP7ZO/cpRdOKkz5q+IXinxB4n1G/sbSybEMzQT6hKDslAA6ZHpjB6157qzqTsdVJcmrPB/H/AIKk1dB5/iK7WKPLQebcNJCjA/MChPAJPUUnzt2ud1OEZz5mcNc6RLZwvG9m6qRzhdwB/vBh1FdNJt7nVLTYm8MzTGfyJNquvAYcZrqp8vMrkxbUjL+MFxE/iQyYQymJd5xk9Pfoa8/NJSpy0NqkW0mcVJgHqeeleHBO9wU3JWHxEA5eqlrohOdtjtPgH8H7n4+/F7RvhXB4rsdDi1GR2u9X1GTEVrBGheRsfxNtU4Uck4FTCnHVyeiJ5KtTSK1Pse0v/wBmX9l3wtqHw88E6Jq0lhqNsYNa1+K68nUb9O58zBCITn5F4wec15dDO8ZRxPNh4pRXfqe1HLsHhqS+sXbZ1f7K/gz4TXljN4s8D6NqD6aWJs21dAXjOfUY3kevvX6RwnTzfiDEJTg4U073Wn4m1KnCdRRpX5T3MwaVrly/9qxzTzOFDzq5BCjoo9BX7Osqapcqk159T1qcJ4dJU7JI6K20f+wIYdWlitViui0drE91ulUqOsg6gV8tiJ1KuZ+xptvu7djm5aOKqyg20476aa9ix4s8LappnhNPFF1rVrKbl/8Aj3hlBIX3HavosFVquu6HLpbcvC4qnVxf1eMHp1Nf4JR6R5iSeJZPs1i8DmYock8dOh4riz6rUw2X2p251bToZ5wqsIv2OskyO/8AEVlpuogWnia42IzG3iWPaFTOBnI+b/PFfOYbPOJc1awlFRptLWTV9PIxi604csYrme5V1TxXqM8EMF1q9zCq5aIqdu/PqOlfSZbw1PC8tbEYiVSSbfZam1GgqdXmqJNmbqJ+z3J+1RqTBHiMxNuMmf4j7/yr6ujCHsUoux6MFzK6e4aOY5dW+3aoBJbogKxA/f8Ar7UVYYp0UqDV+5tXVSVLkpvUj8aanePo0reCfCNzqF4ZFaHT7O6Ebld3zbWIPQc89cGvMz6VWhlUpOeun5mEabw9FynPXzPEPiY9vPNe6tJPMzx5Pzjkeo4r6PAVL0Iy8kaRU5ySPm34i2Fnq2pu1nGsc0gkFv8AONyN+PQetey6jkrRN37z5Tx/xNb/ABE0C/NpfPHfQzRASX0IwwDE9+gAHGatRafM9UcuIpzpLmucudWu9RF7YQz+YLVWRTG5KBQcBQTjOetVeLs0ZUpupqGpX13baVLoMmtTJFPZ+ZOlufvnHC+2P61lOpKSsayqKGq1PPV+G3gu8vri2OmQOJNjFFPBJ+/IxPXH8645QovRxOX2FKoruKOV8Q/Dq2tnt00m8u4mlR1EL3W4FFOQ7ZPT2rmeA52uRtHPiMNFpezbRQ8Sp8QbKVpE1MXkUFv5hLpjMRGP0rolQxOHfNF3Vr6nLUoY+mvaKd0j6B/4IrQ+PoP+ChHhSw02C5t3v9L1S0muIUDM0LWrtuOSMAYHvxwD0r8F+kTgMXmfhBjn7LmnTlTkrK+007/cfPY2piPY89WOkWUv29PhXD8S/D95+2b4av4Jr7StVTSvGsloDJBqDNLLFBeLMW+aQ+Vh1woBIA+7k/M+GXGFTB4vC8P4pNOdNTg3o9Em01/wT3s5yilgaFDM6LtzKPMvlufL6xX98GOnXQEbDcisuTKQMkN61/QdSpUqtypPT8zhtUq+9F6GNfS39/etMbpMK5/cIMjOMZI7DiuOnCvUq8zZinVqTblLQvXsN2IngtXJmtLT5wRkKCc9e4Pb0zXdWThTutzasqsoNweqMiWSVo3llfdceUA5XnjHBPr6GuJzU43k9Tz1Kbj771KN48ryfa7biWLa48tiuCOSOOnrXm4ynTrU5Kyd1Z+aODEUlVk3DW259HT+G7//AIKH6fBrumaxb2PxQ0XQEh0qK9ljjTxnZ2y7WSaZiAt/CoCgtxMmz7pALfz/ABx8/DbGunJN4OpNt9fZOT6L+R/+Su/QirVjGrzUnbT3U+vdP0PF/C2m6s2rCz1K3nsr2xmaOS0kQpJC65DKykcEEdDX7TSx1PHYONalPmi1dNPR6eR14OtVx1NTasfTWmeA7PTvgw/iXwvIf+EjsdRtLq809VAW+00Aq7IcDMsbkMV6srN/dr80z7D4Z1qkXK0t7GM6VWGKjKG3UfIElUTqMBhnB7V8JJq9kerOzV0ekfsx/s9+K/jt8QLLSdL0aaSxEwN1cBDsAB6Zr0suwFSvVUmvdPHxmKVNcqep+zX7Pnwg0v4WeDLLw3oumpH5EShio64FfZKNOnHlijwZOUpXZ65p1rbm1MZyZD1UdBQrWJ5m3Zo09EIt5lheLLYOPrSi3cUotkt3HdS3W2RCRnjFXZt6kaCzWz2+JGP0ANU/dRctEP3yXMf73gY61PM5CjJ31It9xgJE3A9BU3sbbajIlmgnS7dj8jBuTmtF7upL1i0j8+v2iLKP4F/tp+J/DpQQadrlxF4j0kkbUMN1kTqP924SQ/8AAxXPXhCFTTZnsYCnUrYWKk9tD1PQtS0mXTDqa3yNA2CX3DC57fgaj2iijV0nSk02dx4Wu7a4tY5rMKwLAqynjBFKneWpyVZtvQ64anbhAtwyxqihd5BwvPU4Hat1poZOpPkulscb8TdVtp5ZrexvIpljkaITQsSkpGRuU9wa5MRFvSR2YNtpSta5wO4LFudAW659zXh1uSOslsfQUW9Dfj1fxhrlr/wqDwV8Mbm51HQLU6lqetvF9mijt5l3FFmORK4C524q8Oq2NpunBW5Xv6njYvF0cJXnWlJ+9olvt+RwXwn8Sal8XHvNTj8A654du01qXT9N03xDJGk16ikf6Su04CNg4JxxzWjwjhjrJ3sreWtjSliva4fnkrI80+LOkt47+Mvhb4ZXbB5b7XLeCSNfmDBZg8gz6bEfmuzGyisDJS3ei9b/APDnZh6lq6qLaOp+hehXdrCBpsWBH5SooPQADAFZQVkkeTOTnNyfcwNZjj0LVJLy5Qy2znkZ+6fU1q/dVzKpPmWhTu9YkuofNtrRTCPuNE3Nax5ZxuRF3Vmc94v8M6DrSG41a1RUkQrzyc0pSg9Gbwm07Hgvjb4KePvDd8PFPw08dalpywsSkEF4wVxnJUr0wa8XE5HlmKSc6d+9nZ/f/wAA9vC57jMMuVWmu0kmcJ4o/bS8e+EvCV/4X+IPwL0zWNUkvPMj8T2+5JUTPIIHDY5r5+PD2Y5ff6tV0vdc17ry3selSzHKcZjoVcXzwglZwVnF+euqHf8ADWH7Imk/Cyy8RN8ZHtNWupimoaBeWLJJHIeN5PcZrlhm+dUJOFWk5yTeysrd73OupgcoxuOnGm406P2Zc2r8rFnVPGP7Mfhvw0tvr/7VfhqxW9086jCunW8t3J8xP7lsYCv7E104fiLF1a3s3R5U02tG9eifY1pcPUknKEJNJ2blKEVbutW2vkeOeK/2s/2MLbQ7G/bxv4z1m/jusappUOnRW0TxZxujmJYg47FfxrHFZpxHUoQ9hR9++qeit5P/AIBq8qyCniaka1eCgl7rTcnfzVkrfM43xD/wUr+AXw5v5p/gH+y2NUuvLxbXvj+9+2vbvn7yIoCenBH406eW8V4+o3XrqnBrZav79DF4zhvBUUuaVSS/kXIn6t8z+6x8s/Gj45fGv4/+Mb3xh4v8a6tbC8mLCwgvXWCJW/5Zqi4AXtjHavcyzh7KcrVlTU59ZS1bfc8HOOL81xtZwoSlTpLRRT6ebVrnEHwz9gjLKMhThiT196+lcW1dHydWUpvmb1MzxJZqNLkjgUEKRyK568P3bCh7tXXY5GVNrFWNebBWR6fPcVRtXrxWVW7JV07sq3u6TIAyOxzVU2luaOcUtCtJDM0PlkEe9bSqxT0OflUrsgjh8tcFqlpTd2Yzm7M/Rn/glZ/wTv1D4r6zafF34haUE8OqUl061lZxM8gPUggDafxBr3MqwHLL2klpbQwzjFONeUKZ+wvw7+G+jeHrW3sdP0pbWOEgBE4AAGOlezNJRsjxqaa1Z6TpWjPayhrK23RplhKV61ny2CUlY39KgW7D3U6KwBIbIxRzIhyb0NCP+0fsn/EkSJgvVM4+tQ3K/uktQT9409Pt4mi8x4gZQOV7ZrROPLe2pNVtaLYs21wl4ptpoSjilGrzaMxnCdJ8yehMGgjiMM0GV7ZOauUtLSKtKU04vURIbWJBLAvA7A0U1CLuhynUbtIfNLFLbt50I2kEFX6HinVanHYyVOXNozyxPA+h+E5L0eH9Nit4r3UfP8i3ULGrCNUGMdOFHT1rzqOFhQptRVr6no1alWtUi6jbsrGzpOp3UFs7IUj+b5m3Zc+2fpW0W07msacbLQjGralKyhYk2ryZpiCQfXPT8ql1JSeiFOnFO5Dpvi7TfEl9J4a0S9jv5oji5O7IQ+mBwal1YzfJF3ZtCi6cPaTVjVl8LaHpU7aprub6VVGyFm/dp/wH1qpUKdP3p6sxdetiIezg+WP5nNfEG90HWQbTVfDkTM8fyxBsFR7jsK5JxVTRxHCEqXU8r8LeD73wl4mv/GHhTR7mWJrQrNLFlhGw6AFuv4VFHCOlUdSK0OmpWdWiqb0Zw/jHU/Hty0t3Z6c15qRimmtrdTnymxgMR6jNZS9ok9Ls0pul8MnY8i8XeDn8C/FDTPH3j3XbZby4tE028nupMKskpyowe+eNx9cVmqapVIylu1uddJv2ThTXU+o/hafCHhiCG38aIJmWxleRpkxGFAwe2DyePY17OHlRpP3jzq0atSXus+c9T1n4U3fivXvFOga6+mI8TpbpHI32dGTuyEAYPc46GvKqYilUnKUHZI3qucYqLWx4L8Ov2jPBvjPUddhh8MQ3Ot6ZqskF7IqMkNwOm+J+4I7Vx08TCcWuvkdM4TlQXY81h1DWNNOr6ANSZtPvdRcxQSEFrdnztHPPFZQag20bRpuSSPAvEev6tpt21jrENxHd2d1IgkgTdHImfvYB9OorP2ltWeirJKKMl9SNzcMonSMbc4Riv4gGuuhNS1NLNLUlsY5J7hElm3At1POR9a9OnCLaKgk2cf8AE+7F34mdTNG5iQIGUYJA9R614+cVIuqoLobVbtKKOXLBn5FeQm+UycXFEinB2E59KSlccLbFm3EplSaN2RkbKuhKkH2I6VjKcn73Q3dlGx9C/s3+EfHX7RXiO00zxbr13c6Fp4AnebkED+AGvquFeFa3EWLUpq1NGmHhiMdWUOZuKPvLw14e0bQNFttC0K2SCztkCRWyDrgdTX9E4HLMLlmHjSoqyR9bh8LGhDlSO10Hw15Vl9uEkLzbSQFIKwAd29/avJzLMKlOuqNM5MRikqvskmRTaILC6mluJhMjpv3Acn6Z6CuzBYKlRXOluejh25UuXualh4r0LR7RZtRskuk2/wCpkPGexauvE4epVd07I5atCtJtRfK+5oWHxOsPEelLFPodtFBBE6CPTwqY9CWI5A4r4DMstx2YY1Uack4dWtTz54SopumpNt9WchEmrSXsk9zqlxeSONkUTKuI17YAHJ96+pyrIsPlknU53JtJanp0KFOlDRa9yl4quIHhfT9ddjsjxJhymPYehr6CNONSOmx00qbcroZp9/HZxCPTEl2zj5zcNuY/TNddOhBKzOhpX1FbX1M8UUKlWXgpvAGKIUYUIKMFZG/LHlOq+F1h4317xxa6f4VYQ3ju2yaG6CrEhU7mcsMDjPH5V8P4i8T5DwjwtUxWZySTWi6t9EjycXKlRwtWeNiuRPS13daW6LW/RXXmfP8A8RNOfRta1rw2+sRSfZruaKWWFsqzbzznvz6V7vCOYwzbI8NiqWkZwjJejSH7R1eWpFWTWx8y+MYdR8Pa3OJ4UuQ4dYrmIFiuTzuHavtYRgmmdi51Hscl401K41VhabwkIt1EZd/lAA5Z1A6ZzxU1HZGeJcZU7NnhnhiHxXrvxDutViu7ay0fTyY7WJn2LezE8scjoK5qHPOq5t+6eDhaeKnjJSk2oEPi7xFqFkmoWOoQrDcrMGnVWyzxDODu7Lj+VViK8YppHXXqKndbnNp4+0x57xIEjdUtVVRE4OVxk85rCNSlKL1TDD4mFaHuO9jBt/Hml33iS+mu5w8EVsscBR8DHQ8/U4rejiaTm7M53jac6ji2aet6xDrmqXP2CQJAumiL5emAOTU1qrqzbvpY63Uo1aPLc+6v+CTH7Jtvfa1bftvfHq8vNC8F2ME2j+Co7AlJ9VvnjaMzNjBEIyQPUn25/PuMMfUxmX4im1fDxp2mkr3t+p81ia2KxWMdLDJWjZtd0eB/tz/tcfBnwR+ynJ/wT2/Z4ksdRefxP9p8VajbaZgxeRNI6wtIQGL72PTIOOtfzn4a8I8R8QcbLijNIunSpQ5aMdNU+tl5d9TTiHNoY+UcOm7pWt0S8130Pjuw8RWNnp1vqcUA82Esoj8w424wxx2PpX9ZYSvhnhE1pK+xzYWpTlho1L7XW/6FCTWbfzbpYUXy7lARJnlJh+PfmuatiKUeaz3LdSnOT9ns/wAyTT/Ect4jbJgk4CpI+R8xHOD7EcVOHqqpS1d2bUqsatPlTs1v/XmUbu9BuZrjToBsYcHbnHPP4Vw1Irnbi9DgqckKrlHVD0itri4EsroI2jOAp7+n51h7Snz26GtKphpSbeiaZ61+xJpF1rnxk0rQ9J+H1jrs3habUPFOpR6neTxWZ061snklhnaA7kR3SP5gCQQMZ6H8U8W40MJkknOvKnOs40o8qi5JzklzRUtG0r6dj5yUVLFRw97ayd0rtK3Q9Q/bJ1bw/wCNf2sn8c6Fp1taya34W0fUNWtLchkhvJrVXZMhVzhSgzyT1JzwI8HMFjcu4MeEqyclCrOMW93FP1fU+gw1L2cuXyR0+nyiDTNJe2kwZLbYCoHBz61rxHDmzG7WtiMQ17eyPVv2fv2Ivih8bfG9rC+mNb6M8ge4u2P3lz0FfP4fKa9Wum1aJwYzHKnDlhufq3+z3+zn4H+Cvhe00Hw3o0MbxIBJMIwGY+tfWxiqMFCCPn0pTd5bntGgwWoVosbSq55pK5NR30JtAvIluZVaTgE1m7h71zb0F47q9YqxAH8ZrSlq7sUYqMGi1rGpQwt5MRXd2I5rZys7GD+IqwW13d4d3OPSk9dzVK6uyymlzuBEJADjkE1OlxO6Y8aPLEvmG4XIAyAetEktzaLUkVb66eaby/KAIXHAxmhu60Glrc+Of+Ct3w7ibSvh18colVH0vWpPD+qTf9Ot4u6Mn2WaNcf79c9anKpFWZ6GCxnspOna9zJ/Zo8L6LYeDdYu9EfxRrGhXlzGJdQ1+1hWC2utih44NjFjGWz8zVpRw0VTctbGdXEVq9dRqWTX5HrngxbayT7LDblUjXEe1ahWUrI1lSVrtna6TIJ2E0UQQl+RjAHvWsJW1OdxR558Rmmm1O5klyzCQ7iR35rmxTc22elhkoJHIwT6dZzLd6vFPNbWqNNdw2o/eSogLFFHdjjA9zXz2KUo03Jq7XTueo3VdNqm7PubPxH+Jvhbw1pGk+H7Px9qtrqviANLb+B4lSecIRlY224Z3C4zkkL6VhmGa0cPhVSTcZbtWPEwOHnVxbUouVuv528irpi6bZ3Mmr6vY6hfpFBGE0/VmCtCcYYEptOOen0Fe1ltp4ZTm7ndilGM2qN1E8t+FtlH47/b08OXEVuixafpV/qaLGMJG+0RooHp+8bFGYxVVU6ae8vyJo1XClNb7H2PZX6yXSqx2SocOh71o42OV6RbLfiCSGOJjcQq8MqhTmk30OX4tEcX4i8Kan4Rtl17wncfbLRsvPaliSnuK2jBKnaJcailLkktTCh8VWXiaPzJpgFR8vEx5BHUYrGUU3ctRnTMnxpq0lzZx2FpKAkr/u4o1wSPc01JOy7mlOWtjhPH3hHQo9FabXNMhMnKQIyj5z9P8a0lScfidy0k3vc+VfjD+yp4f8U6s1yunIjMhJ479q4K+HfNcavKokkfPmqfs0SWNxfWsLyCOObDjqc5I4rOjhly3S3OqrVcU4t/iZMPwOlh1J9DvCGZk3wlhwwodJ8xz86a8iDVPhjYaE8F3JGCj/LuzkKe9ehCnaKOepUcXoZPiex0XTmECKMs5IlUjij2Svc5qs7nDeL5I5Q1tYLwM5kA+9WsbGHtFs0cvIn7nY3OR0P8XNKqvdLw6c66M298OWsymRVKHHXtmvKqwvG0EezKMI6GNqmi31ivmtA/l9n2nFcbVSEfeRzyqRehmlSW5HFTdWuZJNsZKSqFePatYRTV2OScdCnvG7btNKr7q0NPZpn9P37PnwU0f4ZeC7PRdOtlhjs4VSNCecAdB7e1ffvlhoj5rETlKs2z1Wy0UyTB7OEqxh+YtyGHpXNLVmfPdHT6LcXOl2JgEhcSR8lsAg+mDScnYykrmvp+nMg+zvLtMg3Lk+tQvMuLS942bWCOzTzzPGoUYKqBzWl4wRhOaqPlSJsusZmtolO7H3RSXvPRBBJytJkyWkcxWaWMh8ZyDV8iRlUm4XSeg6SNwNjWwZccHNXzJrYUJa3TsJHGxx5cYUjtmoive0KlLe7I9YuQIhFNJ5ZJ4BIw1ayld2YsPF3vHU5K7kO0oYAcPlW29zWM5JKx6HNaVyu1nFdj7HDGrurAydAoOe/vXPd9ClOn9oy9UiDo6XyMY4925XkAVh+HQVjLezOmEpTV1oWvDF7oGkRiLQdGt7fcu+d4QAWOP14p0YQhL3UkZVlVn8crnO+LfiNZG4lHntGOQjMRkc/e/wAKKsoy6msYS5FE5Ntcn8QX5jaU2lhE4N1OZBvmHcZ7muWNROVug6kfZ+9a5znxJ+KxtYzo+mXf2e0UOLOBJtp24+8xzyT/AFpVcUmuRPQunCMpXseUfs5/F7WvFvx18U2onPl2WhRILhAWAmdmBGT3xissvrJ4io49joxODcKMZeZ0Hx78MeF5tTS28b6XHqMaxr9vtLmMMspPTIbg9fwIqsTzwn7yuXTqyjTtHQ47xZZ694S0GXQLfxRd39jBCJ7CK4uN7xwsRmMseWA4GD2rGFOUU+Z3CE3KSbVjyL4veL9J8P6bc6b4os7C1u7+Em3ubeIqFjzwgwcEnAJzXLiIKmrNmsYSnLRXRx3gTxr8OvC3hy803UdPtb21urMkXdrDseF/Uj2+ppQnB0uXoarnqStseOxajZ63qd4NLuhJL5rMrB/llA6ZPY1jBKTZtN8tkeTX1vqOqeI7y11a3k8xpSY9pB3D15PJ9u9TzJTsdFFNRuZOqyWNq0lts3SqcbXi2nH9DXZSlFLQ0b1E09cukittOdxXtmvTptaNGkFJnP8Axt0CHSPGARI5Y5Z7OKeVJYtv31yCPUEYINfO5rKnLE3i9ep0VLxSOKIVTuP6V58btnNNuTHwkudzDjsaJvlLglFHUfDTwLq3xE8W2nhbR4S8lxKA20fdXPWuzJsrr5tjlRh8wk5TahHdn6L/AAY+DVl8K/Ctr4X02AJKoBmwPmdu+a/prJMqpZVgI0aejVrv8z7HK8PDD0NPme0yeCtR8L+HItY12I20Vz9w4IYj2rprY+E1KnTd2azxlNtqm7tCeHteOt6d5Nnpf2e2t90YRm5c9CW9a8/AZX+9datq2cWFoSdd1Zyu90PS6vZr/wAiRgImGUbPXFe/KHLtse1GPLTv1Gz+Fm8UzLY+WQshJlSOQYVcck5xXFjMQqOGkpbPoRUqRhFy6oSRtK0uxXTfDkyyxQJsLbcBj6muXKsLGhRvGNrnLFuc7tWZQn1Mwyh0AWR1+Zg+M4/lXsOjSfvJa9TopRlezM+62zhXmRGkZi22Rdw+uD3rWMGrWOuNo6FC71+9MhitFRY432tufaR69OTXbCmrXKklzWIdHGra34ktdF8L6LHd3F/OILdY8s0khIA+vWuLH43D5bhKmLxDtTpptv0NHUpYWm61Z2jHVnfWnj6PwR+1/wCFv2Q/CWvyfZdLsjffES/t7bzJbq8kULBaIx6KpJPQ9vev4N4ghmPi9lWb8VY9SlhqMnDD01ppB6yts7/10PFw855rlGKzCpFNpfu03ZKKer+48F+K1hbaJ448Q6fJ5iNaaxcIsF4gV4z5h5YADn8B1r+t/CfHxzDgHLsSla9KKt2srHpzm66hUjazS222+f5ngfxViL7r6CJoRHLvlSF/mkGevPSv1CCbV7lVHKS5VqeY+JbOyupwBazmMwkxhDhnBJyOO3vVWvuczScfeOA8Z61pWk6ra6HY6dHHNcgpbxTw7nnwM8Mf4Qa5q1SKkox3OHFYinTkqSvd7HlXxB8Kaz4uu7q71PWJY1WIwFYMIWc5woA6jg1w18O6y5bnm4jBTxiab0OU0v4MaLp2mI73s0cjJiWLzyGJLY2n3P6Cop5fhqMbI4MHlVPBXjFvzI/Efws0jRrWaCKGIx2ThElWY/vZGPb1x69K2eCoqF0dmJwdP2S5I2NU+H59G0qfSxuDNb7WyDkkgMDk9sGtXT9lBrujSlgpwon6afAb9o7wb+0d/wAEufDnw8ljMC+FNPbSNbXS4mmm0+7V18qZ4kGQrddwr5GnClKNSg/t3T9D1Mgw2G+s+1g/eas07LZHyn+2X/wTakfwvN+038G9Cm0fxHHZC68deCHhaUz5xt1K2TG5YpchiuMqzYr8cwnEGb8D8RPJ8zj+4lrRqNaNPZPpoup8fUy2pi86nUwvdp9nY+Q/jB+zn8dfhnPFP4y+F+q6Vc3EHnLbm2LxzxHGZEK5BUZGfQ8Hmv0LBcQ5XnFT2uFrLmvZpdyc0y/E4fCuvCNrOzS1HaH+z34n8V2Gh2mh2t1qOueJrhf7H8N2MOZ50DhPOdiNsMZJwHbuD6VrxLmeCyDBxxOMqxhB66vXtovPoclHByqqEY806lTVQitdN230R7cf+Cb/AMH7iYP4i/bk+HvgHWfmGoeDrq9udauLTYCXZp7SERk4A+UZ69a/M4+JuKjWaw2ArThpaekU77WvY+lqcKYirVi8K+VyV3FSjK1tXu09Fq9Cuv7Bf7M2nXVvFP8A8FMPDE6XkTG3Om/D/VJgyg4Y8qoAHJPfAq63iDnzg5U8rqad5wRi+FMfKCkqnxXtotbb9RmsfsR/ADwRFaeIvGP7bclz4f1FmGnX2gfDq5Zr5BklYzNIiK52nCsa8p+JHEVebo4fLb1FupVYq33Juxy/6q5jpF4iKctNl/mb9t8Wvgh+xz4Y8U2n7IWp6jrknjaeyabVtejgluzpUGHuLGeIL+43yDlcncjgZ4OPlsZhM445z2hXzumqSoOVqcebl55aRlGTfvWWzezPIzXL4ZKqdLm5pS1bW/p5Hl+rfEjXvix8Qdb+L/imSEap4h1d767jtYBGkZdt2xFXhVUYAUdAK/f8kyLAZHkdPBUG1yW+fVtv7r97nfg6U/YqS7dT6R+GXwu8Y/FbwJplr4K0uSa+W78uIheFyAQSa+Jz6P1nHtU9zDMF7JprqfrJ+wr8FPG3wz+FFlY/EC4jlvlhG7YuMe1ZQhOlTSk9T5WrJzkz6O0SKzgYecAeOF96NQ1Rbt7uH7S7EYXstDkkZTiri6HDJdamVt8KpPzfSoVnIuDvA6q9uLfTbUW1uAJCOStbQ905pvWxnCzubh1mkfr61LvcqKT1NK1jaGPy47gEjrzUtNq5V2QXWj6/dzCa1uxGnUk1i4SbNoum1dofDa3llEDd3ok4z61aTW4o1KdSKcNmC24vZtysM46niqSVhN6HiX/BSfwZF4r/AGHfH8Jh3zaRYQ6vbbeSr2syTZ/JTSk6nK4wFSbjXi13sfGn7PVnqnibW7aDwrqDRXWoQZga71kQW4k2gqGj7g4ODxyawlGTWsrHuqpCl7043+Wp9SfDDU9Yv9Eg1bUIkXZuiuXSQbFkjHzfMfbJ+lOg5T6nHia9NyvE9N8Nw6ZMj3C67YTeTEssiQ3qM+G+6QmcnqDx1rshTTejOCWI/ectjyzxZqcOqfaby0uAQt26MVbPPvXHiPdUme1QhK65jM+GOow2/wAQn1q+tYrq00XS5b25hlg8wO7fIgI785/Kvm8dmP1HFU3JcyfSzb7Lbz+49iVF1KDipWb87HkfgX4hePPilf8Ain4x3PhFfDzQ317p/gi6j0rF0XVW3Xm9gSm48L0BCgd+fNwGVVcTjKuLnNSi1e3Z6af1qebUqxkoxlG1nZaPXfV/0vvO90K51uP4f6fJ4gvpLvU7m2VtYupmzLPNt3FiR3JzX1mHUaeHSsc0m1PdtHKfsfObv9qbxD4ma3ymnaCYUuDyp3XCKVH02H86wxNWjUxFGPLqru/fa33fqdeFUKeHqTb1dkfXHivTGuwNT0jiVED/AC9GBrsq2lHQ4Izv7rQ2x14ajALHVGCsI8OhHfsa5IfFaRzzjJSunoYmsXt94Su2cyu9q4xuU5H0NdXvQ+E3hyT23OT8aeDNG8ZImpaHqraXf5wk0LfIc+o6GnKFOove3Hep8MtjzzxI/jL4d60l54002W8hVSIryzXcgX1IzkGp5IxKioW91nNah8VPC/jHVDcT67CIIjiKKRgCWHsabnKUjOTUHYTwLo+k/EP4nRaBHNDLbxRS3uoyq3yw20S7mLEdBwBn1Ir5zizOKGQ5FVxk371rRXdvY9HLqUqmKg5rS6/M+YtVvY38Vy63ZaXNNZvczNHLG3y+WWOMjvxXo4B1HhKTl8Tim/VmOZRpfW5pbczOR8b6vp2oaqjQRzWzWsuIGKYYqf6V6CgnucbqRirROG8Uy3GryR26IJmY7pFdMA4+lNy5Ymcry1ZxHivwnqFsRNcW7OjNlFY8JWMptqzMJxdzifEsMUEpgwAqnjHQ8VKlZmLg7nIs011PHFaoCzSkYx2rbldSyOrDzjCaOs8M+Boru/jfUweCP3eP6V7GDy2EY3kelK9XY6n4hWHh3TPCckd1Ywqu0hQy81eKwtCVF3iFShGEUfPOqWUdvI9xbL+7Lcewr4SpFLEOC2JilGJh3d2yk4PFdPLy0yZtyIoZg3zN1zWFSLa0FOcYM/rMTTTZIAiZIAeQZ4AFfeVU3Jo+ZrP98/U19Ge502T7XqEWVc74SFJ/CudNo537zOk07zNQm+2CxwD9xWXGaaTepUUtmba215OiXSWeNv8ACRzRKMr6IfPCDcWzRlgsbq3DABnUcqDjJpuCmjni6sJeRZgbZEpYbTjhDVxaitQa1YXKXFxA1tJHtRxgsGwayqc1WNiUqcJcyepFZ2X9m2i2dqzsoOSXck0qcJUopQ1FVrxrTcpaFgRyErtOea6o06m5z+0gyv4pjt10lprtAxUfKAe9XOOl5G2DqNVbR2OK1BnjhBzIm48gGuaaVtT0eV312MLVdTS0szFaWkmJWyxRt2/n07VyOp9mJrGMVNO+pztzrqzRTCSeWS7CcW5ACoOvJ7//AF6yqTdrLc7lG2j2Mu3v9e0zzrvUL20in8jlpZfL8tT2C9zWdKVSMtRTlCo+WKZh3EOjzI974j1QTqfnEKDJb3zWdbVe8xtyvZI848c+MNQ1WGSLR7PZaoTtRyQuPb1NcVSdSWqWhUILq7s4XWJ9U8RXVsvlfZLe1t2WNMYEnHU+9SoucfQ66fLTjqdH8HrLTPhzb/a9CtTHc61cpH5oXdvfPzEnt7ZrbCJYTXa7IrupX22Rc+MfiywttZvI9Ui+1rNMIXWZ+Ru43e2DjH1rTE14876mdG1OK6s8W8afHDwR4Q1GVPFXiqKws1ke3vLq4fCxOchPXAO3v1wa4/rFOL96VhyjOS91HzTr7+Mfit45f4gXHjaS6t1DR6c1kytaGPOAzLgjJ9a86KnWquTldHdQTVO1hJ7e4EUlre6lJCkP+tiht9sbH1U4/wDrGu3WMLFy0ehgan4g8JeHLC6FtcW6zSxlreZUKkH1Ix+lZR5Neh0U0pbo8gmk1bUpHa7nJLuWBVMbWzxg9s04QTe5ry30RnahBev5kV1MzurBd8gIJPXBrpjGxUaaRY0SRWv0WReduMk9fxruoSfMlY1U+XY2v2ivDw8SfDfQfjHpczSvp7jQfEsO7cYJFBa2lPorx5Ue6e9fNZhCdPHyT2YSlFrR6nisibmyTx9azclFWRn8KJbVHllEUaksxwqjqayhGVWaildsHJJH2f8AsG/AjU/DwXx5r1gYpZcGITLgheuRX7XwPkFTLKP1isrSZ6WV4Zyftam/Q+q/Cdzqk2sS61JACIXxGpH3jX6YqinCz2Z9PUtClyrqdl448Yat8QoLbSNTv/NaCNV8pFAWJR9K8/DYOhSqS5Diw+DpYeblBbkT22leH9F8h5ljt1Qne2fnPevWpJylyrY9BJRvZXZn6bdya55U+mqXTf8AIAh55xjBraoo0ldvQ3ilKDudLIYvBpez1DS0e8voTFNbyWpcopH3s9iBnmvmMfUwuN/dN8qb3PNqzlUmnFuyfRnGzaFcaJcxXNnHdJb3RP2CK4QBZADycemfWu7LuWrUlGlNuKSXl6nVGrGvdLdbjPEGm2qayl7FczSSNEBNBwY1f2r2aMZU99TqoQdNe8UdY1RIUedr6OFE+XezAE57e9dsZO1rG8oc7ujJeSG+nQwQdeBERtAB6sxJqZTlRjz6s6KTUr+R3/7OutT+GfFWsfFe6kht9A8BaNLcTsq/8fN66kRRL64wW/Aetfz19IjiKpg+FqeR4Sb+sYySjZb8p4ueUpYrDRwqu5VZW9IrVs8J/ZO8VeMZ/FPif9pnVWkj13xHrMl1bz3I3sih/lPTpjGB7V9X4ccI4XA8Exy2cbU/ZuNrdWtWCjSq/wCzW/dpctttNi7+1Tbnwv8AFTWLnUdRe9fU/I1Dzpl+d/OjWTJUfdGScDr61XhBThl/Cs8sev1epOHnbmuvwZvg5qWDjGEbKN192h86/EK8ttTv5sN5i+WDKQuFjx3PrX7NGUfZ7nXFOMTzDULy/wBS1CW0tHIsj+5NwDiS4OPuj0H6VtZOmmcsoybOD1LSfN8VSa5bLEZLAiO3Lckdm2k9h3PeubmXNZIweGhGpzyd2jlfGmq6Xpni0yXRKTxwloVjQ7RMAcNXNOqufUwqVXT2R5f4w8YeW0H2u+ljdrkm5h2FftOT1B9Md/euGWISmk9jxsXWqQrR3Vyfxlca34ouLApcsltCIpEtwoARAcAZ7kZrTESlUsovRHdVVWdONn1Oj8ea1BHbtG1y7tHaoGMTkApt2tyOc9K0rSapNXe1vv8AM7qtdQotx3OK8IfFD4jfCnxg/ij4L/EfUNA1E+WLiXTZSqSL12un3WA75FeDiMPSr1LRdpdz5S8qtVujPlke7fBb/grp8cvhp8X7bxl+0XZT+L9FjExurPR5/sMtw+wBDJt4ZFZUYrxnbXw3HPBlXiPLVQ5kpJ/Fa7sjqq5xjcJSVPERTS2lFWfzPUfE/wDwU/0DUf2c9Etfgrp+qz+OxHqttd+J9ctop47exvJYpZLSJj8ycxpk46opzkcfnOW+HWc086+szqqGHXK0oaNuKtdncoPMaMq6fuStb1Xc+W/HvxT+JmrfBG68IeENRFlLYyTSeKbayhjS51Cwd1dMSqocwxOATEDtGd2OtfXYnh6jUzqGKx8nUSSUbu6j8trnk4uniMJR5qWjW7W7Xr+h4NHLpDxOOrSruTLk8/pX13s8LSfKoryPAqSp1JO3XzYhtFtZDcNbFEZcFg5Byf6VlVjRt8KsU8HKEOaz+9ktgiljbySMdwyUEhwfQ0JYNWi4rmt5XOnCr3WqmvzPQfgj4PGq6b498QrHHN/ZfhR52DIWG55FjByOB97vXw/FmLjRzPA0lp7Sol92pwVlGVZot/DOV7nTIbYMpAKY4HY5PPriv0dp1FofSYKsvZKKV9D9Ev8AghMPDc8+veFJ9RkFzea3NJBNf3hkIdCAEUN90bT0r80xDSzivTe62PArRqzcr9Gz9dtB0qfTrdbeWUttxyB1rCc9TypJc2h0nh2TT4rvde491pxd0Q1poReJrq3jnLWoKg9NtN2uYRT59R/gmx1IF79pOAMj6VEYa3OhySjZGgbi9vL7CqeDjGOMU2rHI4K9zTniulgWNX+qqKFvqaR5UX9OjmWNXnLZ+nWrdrETXUs3d9KseFDAY6etQmmiqbdyK1WW5wbhSFxnmk7FpNPUnvoLRtMkghl2SFfvL2pRumO2p538ZdFGu/Anxv4UuAZlvvCOowMH/iLW71cGk7M1ovlqRdup+ff7Hvw01DxT4X0rxDceMbC2sLrw+iS6Vf6Ct2skpQYlVycq/YHnbk461y+xlJ8ylY9LErl11+TPpH4LeCZ/CcQtPEGrrqUjXEjrmDEaBhjbsbtjI+lFDDSpO7dzkqpTaaR7f4ZsdEnjFjb6VDh5Iz5qQKjR7AQmMddoYgDBwCe1dtGlCLukYSjed2eUfEDTtHttV1e3tZBDKsvmNFwd55BcDjqR6VjWw6kmtmenTqVZRjZXRzula9Z/DbS9W1C7t3nuUuLSPVIV+UpCUL7WPYYYE56d6/Ocfi1Uz+U6LbVHS3fuetKKlRUZOz8zL8Kar4asNMu9C8BaLq8GlPfCQnViT5jbchY+zR4b5WXgg8E19hk1f6xQqOEXGMnez6v+rnJiqVSi1zal/wARadLDoUl9Y2YeU2js8US87iSAPYnAH41vWXJF8pxzTUb3Oa/4J+6L4wn1PXY/iFo8Om63HpUa32mwSB1geS6mk8vcOCwXaDjuPavIcZxzGMJ7pGkXH6oprZs+j49Wm0xRYl2MTn91I3b2r1Y1LoyUebYra9ax3rSX+mPmRFG5PeiynIycuWdmYVxr99C66XqtuGt5yW+c44HbmtXLljexclFao53UbKG5nkl8HaudsR3y274OP61hGLnLQTqykuWxyWv/ABJ1Tw5LKmuQGW2lfakcnKqMc5zVzqezMnBLU4HxJYfCj4o60dLj0yz+0Kha4uIAFMI78j1qaVWFSVrFxvLU57SLfwj8Evhh8QvD/wAOjdPr3jG1isG1BpS32eyDHzVQk/Luyc49vSvi+KeE8TxLnGDbny4elLmnH+ZrZHqYTHwpU+ad+aO36fceY2Wk+F/DtoqlIvLe32xxmTkPjuK+5Spwb5TyqjlOXM92c9faBoN3ezzX8qG4jQBXLfLg9s1rFprQydkzifGUHh+0uWlsZk862OGiLAZHXj1qJKPUmc7nkvxC+JelsZ7fTnV3xkoeqEVzyjKWxCbUbs8k8Qa5NfmSUsTufIAHSrjTimYNyk7HN3F1qFrcLc2infA+4g9xV+1VGSkdOHpvn1O58MfFvSLOz+03zhJwoJV+xr6DA4pYj3Voe1TqQgtTkviP8Vr7x3fCxsm224b5sGsM5rQw9BtPU46lf20uVGHLEj2/lEDgdK/P1U/eOTOunSbjqYOr6GTF50PJHUCulV19o5qzlFaIxgdhKsOR1BrSLjucEm7M/rZ0iSDWJ7h3cYUEBW9uwr7ed5TZ42ITVR+pv6ZBPOVaa1ICjCqjdB71zpamUbNnQWWfKADbju42vyKbvsaI1rWa4LiWOY4x0JqoppBOEHGzRc8yGxjF0YNzucbV6k0SqKnE5Pfm+S+iJuZ3EroSc8DPT61zybk9RxXIrElzI+0IelbRUrGD5egzDKuS2OOtdKglG5ytqUh8IDKCkxPPJpp3jeMiuVLdFLxk6tZpFIPfBP61lVm3JI7cBBRTkcLrDqcLCWZVU4LNwzd+lYz96DO9u8bI8s8TXXivwzczT6ReuGus+ZCWzGfQe2K86UJU9Yvc76dGnVV30MS48QXGkW/23UxMmAQxEZIJ9ff2qZVY01qinFSVjh9f8V6VLei/1vV4vJ8zeUuDxGB3YHqa4ZVYOV7nSpWjyxRTb4q+HfiH4tfRfCmoJKbNAsdnbqBjtlznn14rX2kK8kodDOScYptNF3XYHkke51fVbbbbx7ZIcYSI/h1PNX7J9WKFNX0MC7vvDhWZ1glmeb92HBOfdscYH061N1TXKlctScZanj3xF17xHDqTx6T49vrVNPcTQRWKlQrr0b5hznuDXnSU5S53JpI3U0k7Lc8g8XftgeLdSudesfHnh97y58mOXS9R0uLarzI33ZlPvg5FYV8TJuUmr3K+rNRTgc94O+Dl/wCMLm58R/ECw8691OAzby26JiR9wBugow1B1I3qIcZpK0TWX4N614MtLi00uyFnFCFZbWIfLjrk46fypxo+zl7uiOtOMYjLzw9PpEhl1p4UiNsWd50JhIwSC39081cvPQ5nJt6HgGv+HtYl1y68QJq7XNnPIQscc/mRJ/8AW965V71RtO51UG3G1jK1EJY2kqRxHmPdBnkY7rmuukrHZokYMlzNewlHnLHAJYZzx2Oa6I3UrXCne5NZApMYmTfHjJIHT3FejSlHmsjX2Tb0LJ+IOj+BvG3/AAhXiu8T/hGfGlmNN1tM58ok/ubkDs0Um1h7ZHeuDPcPyU41brucsa8KNflmtzz7xN4Q1Xwb4hvvCWuptu9NuGinA6Ng8MPUEYIPoa+Z54z1RtUTT1Pb/wBjT9nu08Za2njjxVamSytWykTDAOB1561+p8B8NTxNWGMqw5o3+5WevnrZfO/Q7cvwP1qXPPZbH2d4b+IGnf2va+HLfT40ggUIsEacqvTk1+z4ilCqnTS6H0MKEaStFbHpP9k3VtceRo1q7ySIDFGF6DHJrhg6eHgoXtbQcqkIRTmyLQNG1GJ3kv4cFv8AWgH9K9GMYcqkjspzg46FvWZLSS2Vb5EJC48lm+VVreDlb3TWmhV13+xbZYLG3SJcBoyp9ORmoqUZVrqQSSUThdej8b67ql5qKeJbyGa9XZKDN8vl/wA8/Svk63Cc8di1VqVGorojzv7OnVrc/NaK6G74bF1omippM9w9zLGoAmuCXdR9T0Ht3r63D4KhgqahSPSiorRGfdvqElxPLZnaka8s6nOT3xXdBK12bqLbKWoNDcLHNc6Uk8akFFmHGQeTj+tNxbWhtT59rmfrk9lp6p9m1oS3czhYrO0TKs7HCrkjrk1lUnGhRdWtK0I6s0cFGV7adfI1/wBqnxEPhZ8OfDn7HHhfXo4/FOpzrq3i+S0YNIrNjcjZ5AVcKPp+f8mZHUxvid40yzKlKUKOBfuSS+3FrueZhubFOpmMm0pe7TX93v8AMx9LtrbQ9Gt9C0URx20EIVQxxg46H0zX9eUMNDDU/Zw2SNqOHSld6mb+1JHpN34X8OeJdNXd9t0GGK+uGDs0lxCWjcBm+8FUIOMj3r8T8OcTCHF+eYGD2qxl98VsRTUqbqwmtU9PR6nytrmoGyubqQRM1q7Yllk4IH09a/dIQSiOE5cq5tzjNR1hYLi6bQoPuoTaSgA9R2Hb3reEk1YKs7o4bTr+dri9ZtLglhhsytxK4Pzuc8fUVnyRs2zi5KrfNJnA+M9e0vVtXOoG2EsMEQS5nQ/dcnhR615lSalUMakoqGqPN/Ed0ni7xwugRgXN1HGAzeT9wE8EccYrgjS9vXcO2p506lPF13R6x1L+oCPQ9QW1vX3GHS8Ro7gjecYxjryfzrslBQlYbnKnXUWw8QNc317fRoiwrLpoZozyGfaM/Q1lWd4NHXiVejyLdnHR6LLYQDz5kMtxPsnlUcqhGV49/wClccaFo3W7PNw+CjQp80nqyjq9qxe4+1b2miXYXK8Od2On0rWalJNzeoV1GcG5aln4Z6zaeGvEaaNqEyppeqsI2L8rbSnG1/pk4NfNV6Lo1r3919Dz8BiZ4XFexb9yf4M9K0We78NeLYZ9PRLTUrSaSIMyApiRSjKyHqjKzZHTmuTHUaeLw7p1Ntz6OvQhWhKlU6qx4NqHh1tI12+0C7hAuLC6eNgSQBhuMcfdxWdPlrUk+qPiKdOilKm170WOW1mhZ4rkghk+UP8Ax040+X3WFN11JxlsS2SBCCBwchJB/KlCmpyu9kaRhUjueq/s2i/n0T4k6HFO8dvP4GkuLqFYwxlEMyHBzzj5s8EdB2r8748jCGNy6s1qqqS8rnM8FUrV1Lmtbp330f56dV2Mj4cW++1SKXqcEbWIJI5/Cv0hzUocu3o7fkfR4Gl7KNz3z9hj4g6toVp4kfwxdTWl9pfioTW80b8jIBxnPQ46V8J7H23FsuqcTzJTjLEziu5+737IXxstfjP8FdL8XXjhr4W4S9UdpAMGscXhZYfENPY+bx65cS0jvb+5i89XVip6lQelYIwhe2pR1XUHYeYoPHU+tSxcnv3Nnwff6i+nMDNtBHGaUJDm1HRG5pgmBL+ZwOcmqSuZrYt6fc3M918xz2JIppJE7PU1NSu7u10+WawhV5Y4yY489Tis5yaj7pfIqkrNnFfCHxB8WfG93eXfj/w/HpsUNyyWsaSlt8YPDHjjPpXPRlXaftFY3nRpUfhdz0WZo7aLYDk98GtrmPOm7GdfzCOHEUZ3N1FNaPUtJGffWMepaPe2Nwo23NlNCy+u5CP60+W+ncFNQ97sfnz+xDq1nafDDQ9NdJZJI7MWu2Lna8ZKnPHXK1ph6UlDU7q1WdazaPoPQbsPeCN5SAWOCTzmt/dWhEYOx6n4BlE9xEdg+/yCfve9UpWd0c1V3i7nnd/o/wAObrxN4i+JXiLwxg+GfEiw3GrLqzSvdS+QJFszbjCxx/vAc4JYjr0x8NxDnGN9rPCUoPW1pLV69Ldj3MK3h1HlqX5o35bba737nlXw68Uan4m8KeJ/EHiO2W4v9X12a6SO5UqNgwqqQcHbtAWvCwGW4unndJ0/ehFe9dbt7nVN/WI3et317Gx4T0xYNNtVtVaOJH329vJKZPsse7AiBJJAUHaBngV+jzvKba0OKu4qTSjZdhnjrxENN0K6+ySGIvFIQ7NgALk/4Vx1bdCIOMrmZ/wT7uZLU+I51iKvAbQzHcSZWYSSMef9+uF0lLNJNfyodSSeEgl3Z9Ba/Ja3AkeA7opF34Xqp713OKTsjOGhzlzqeoaNbJqNlL5gX7y/3x7+9Q24O6MakU3qGpXqfE/SUutPVWMI2tGnDKfTiq+sRqRsJK0tTzzxFqN58ONQl1CeLCyqTcKTyvGMmsruDvEc3GS0POb7x7onjyT7Dp2pw3CwZeeQt1xU3UpWZmk46PUwr/QNHstRuY/C1wLe5uIgbiTdxjrj8q2pQhF6GzcfQ8z+IHiR/CZFtb6gt59pjMcQByR6mh1OxzznzSsjxzxFpviS5v0EfiSdZWl3JkEBB/dqIRbbuKnKXNuc/qdz44t3utMv9XJVvmTjofeuhXgtBuD5rtnnXiK28TAyvd63K1zu3Bg3UVi5p7mdRJHFatbvNLJK5Pnj7zf3ql1Eloc9p21OW1gm1ZmcYzyAaIyNIRUUUNIna+lmMij5hjGarERTpnZhnzVNCfVfCNre27POuCqZLDiscNUqUnozuqwjJaoxILK3sMxwhTz1HeuPH4irWk+Z3FRoU1JND9zMmBXlxUUzslZIikcbNhWrabORpSepha3pClGniHI5qozl8LOWdC+x/WnYWEGnwjysrJIRggZJFfoc17zPnKzbqv1NjTLaK2JmIf5u27PNc2zMoq89DaiaAooCNG55x61ad9S2aEFzHbwoxTcz8Ih6k0TkoxDyLdhBqVq3mXZV2c5xkAKK5kpJ3MZqlNaMvw7MbxFgntn+tbxjfVo56kmla45wd5YqcAc1aqpOxjytq5C06ynEkZC/zpSqKro1oTGk4a9SW2uICREkR46cVUKtFPlii5U6jjdsw/iHIGEaGQjAzgd6zqe/UudeCVqTOSluIsbd7ow4YkDn25pNt6HU99DH1zSbC4mhWO2ZgjbpAU4J61zypu5tSlyJnP8Ai600gqzagqA7PkCgFUHbj1rKpGnfUuEubRnmXjfwH4a1zTJLu9s0jQPgLjlyecn1rjlRhe9jppqUJXvofP3jr4U3J1lrjw/NNb3QuNlk9hIYpWJOMll6GuPEU1Jrk0fkejGpTcbbrzPQPAv7OHjfwboi6v8AFX4g6prtzLyLS7vMrbrjhcAfMenX3rtoYV04XqSbZjVxHNNKnFIwfF/hmyke4g0HVNZa7CYmS2QlYUHJ2nHYDrXNiY03rdoXJWmvhVjyB/g/4h8Zakvn+ONXubOJ2LW87rGAOeGKjk8dM15vsufVSbQ1JQVrakLfBzQbHw3/AGzqEMa+d50kQbnMaL1/PFdNKEVA2p1L1OVmp8C9QS08JW+leK4P9IW3kWxdk4dHztJz6HFdVB+7qOtZSvFFH4o+M9N+Ht7caxrs0iWu9re/iiTcy71GGAHXDZNY1uWjLVEcs3Gx5T8QNT1LxxYyaJNf2t1FboUhurNwxnhPIEgz6flXJUhKcrSOmnBwR49a+DLPwrKYbWOSzidmBiEg498Hgj2qYUYUdUdkVaN2YGuaZNZLc2ryJcRySb1+zPxn+8B29xVOpLmCE3N6HMTWyQ7k2BZMBj83DCuim3I3s4q6LFgR5yqqcA9Cfzr0aXKrK+ppGT5XcrfEL4W6Trm3xL4p8beENJ06Q+WEuy0+pyEdSkSZK+xbANcuMneo4tq34nlV4OpWUrljTYLT44+NNHtLZpJJrO0isbi6kTD3ccXyxyOOzbMD8BXNkOTyzPNI0Vqr6nq1FHEVYQifa3w08LaP4I0O38M6dAu5YhvG3viv6ey/BUsswSo0ktEfWYWgqFNJHRWVtB4a1BLuK0Tz3wBnoK9KjTVRK9lfft/XyOipG6bR3l/L4judJgv7TVJLOWQYYw9QPT2r56pgOfFtvYwjQpzl7yui1Z3V3oWkqzSs6sQ8js5y/rmvVcYqNl0OqMYr3Ymhd/2Xqmnx65e7ogQf3W3JY9q5oV5QnboEJyb5YlSSwkaMyz6XdhWG6APH2Hc1vQrUatSXJU5n20djeXK7K6fcqanqOk6bYRXuozw2kXLBpWwTj1rsjCdVWKUVZ8pn6Rqtxr0cmoWUq5ZTtkIxhfX2pz5absR7JU5Ixbi81RPMgtLsSx44LDJds10Q5XC7OuN3K7M26u767mkeW6aIbcFs4Bx7VWjVtjWMlCZ2X7MegnX/AI2aLNeCE6f4dhl1jUWkGdyxLmPdng5cr+Vfh/jzxXPhHw8r1KFTlrVfdhfv5Hm5nUbwVSMb81S0V89/wPHz4i/4W98avGHx9vLkq+r6lJBYXbwBWEETlcKD1DEEj2NT4C8KYrLPDmn9am4YjEXqSmklK8rdWn26pryPRjQp0IU6UVdQSj9xt6vqR+y7LcmNCw+SXjcc8Mea/dsS/Y4apUk9Ipt/JFwcYzSO8/bLtdPi+C/hKGHxfpGrTeFY4rC/j0iBYYtNM8Xm+TIAfnmJwxbjIYcV/HXhdxfVxXibiXUSUcQpcrSt8Mml6vTVniZfSUliavs5Rc5X953vbS67LyPh3x/BDLDfOZyYyCYwRxnHGK/sGlzSpyble/4f15nSppRseR6B4ovbaO6uWmc3fmGMebFtUjphR/Wqi7QS7GMXyO8kZ2vXc9rodxp9qQr+ZmW4xwxPJ/D+dPnTj7wV60XTstzyOLSNZsEu5J7kGQzM6pKMIsh+6qjuf5V59WDk27nk0qFWUnKTG+GvDN94Lvp9duGW51O5RjcysASpxwo96dGHsE5dSo4aGGk5rWT3Obu5oT4ktri9YSPbptUOOsjEHB9xXBKq1X5medaP1yMpdDf8R61ZXlw8QsookMbSQIFwJl6ud31H6Vu3dHrVqsVC5zs17BfX2ovHEuJniRYyuSgGByPpnmlTqRjJo89V3UvfoYmowQsbmeQyYVgwc8fvVHzj8ea56uJp3k2yZyg4v+tTkmimvZJUuN+CCEUN2HINeK/aV230PnnTqSquUj034a+LbXxHYrpGsXL/ANuWaqqzO5P2yAABcZ/jUAfUe+a8r2FdSkpao9nA4qeIfs5fEip8fvBk1nrFp8RrNJmivEWDUWdOBOq/KT6ZXHPtXmUavsq7hc5M3y+pRxSxSWkt/U4d4orlGRZi+QFUA/db0r04yVV2TOdLTQq2sM0jG3lVo2Unhjgmt3NU42e5th4VJP39D1j9luG3XV/HGp39xcQWVr8OdSa9mtPvgMEVFJPGGcqv41+b8e11OGDhFJylXhyp+t39yMZTdSo7dFf8TD8HahFZ6T/a14oAiUmMnBOcdSK/QVSpVmrr4dfR2Omnip8j5Nj0r9i/xQli3iPzp12vqMUkmF7kf/Wr5qpThhOLaS/mgzgwlGTxEm+rP1Q/4Jg/tA2XhrxfP8MdRvDFZantktJGPy+YeqjPSunOcPKbdRLREZhls6rdS599aiGgcOhB6fMR1r5ByufPPlSsUJbs3kqIEyC2DgUJajWx0mlultb+QyhSFHJFbwSSMJJ3NXTXldstwCOMVErtidS2xpW08NjG0siDg8ZpO/KKylZP1DQ9YfV55HH3RwDXPGTbNWrGr9pFpBsRR747mt0roJ3aGiZ7ltwx78VLdjOMU5X6lS8uA8uwbiAOSKlfEbOzK9/dpboiJwpcA56n2q3daoqMb7n5o/s13V54c8Q+LvC9hqRhm0PxvrFonzkBY0u5ePrtIrXD1G6Tv3PaqwjGEVboe2/BrxjceIbY3V1cRmaHUZoOufungn3xShK7OTEyUFaJ9E/Du6jM8JJDFk4yvb1FdEVY8itds5H4oSWlleyW+gaFBHLf3Uc+pFY8fapEBCyuO7BcAZB4FebPCUqmI9py+8z0cMqkYat26HnWoWN3dvNNDpyTgxMRHDhJF2/MSB3FTG3M7npxfJFal7Rzb3ly0aQPbK1sj4cYIbg+nQ/1roSlu3c5asalbRPZ/wBf5Hk/xW8URXWiSRG6VY4pJYpDnGCD3/KuNSTu2bVLRXKlqdd/wT11a1N94wWWyWDN1aRSRE5Ab7Pnj881zx5VmEvQqWHdLBRb3uz2xJLjwhrs/wDarJJYTn9zJ2UnsfSuybS2OKVRTXuvVbmZ4s05iXvtKdktZD+8AIxg9xWL5uRtLUqm1OSU9jJuNY8EfB3wPfeI/DF3d6hqF66mSZ50Ecch6gDPBr5KjjcdWzN0mrI9XGYShRw3Mnp0PGD4o1zxBFLceJbhnuLxHZlbkIuf8K+ppx5VZnjtJrQ5/wAe+A/DZ0SC+0BhZ3U0m3zITtLHPcd61lShJXHFWvoec+JLPx74cuZb0auPKwIljOBvXuWNVyKMdGRU99HH3MWqX+sSX94RM0PEZCghM9SDXO0rmcYnP6vo8k9/9hm1IkQnc0wPG6tYxbRajGK1OV1+K0m1F4Irl/tIXLSHow9KbT6hKV9EcF4iuNIjuJIZnxG4OHbjYwrCSTehm5Jbnl3ifxDYW88itIrSITwP4qXs5JamM5pPQ4XVdUur+QvMeSeFx2rWNJJ6mSU5kWhXa2t+YGYAOeTVziuTQ3wNRU61mbfifWRBp/kx8NIMYBrjvKMXI9mpzX0OWDbW2ntXlt892wcrLQVb+3U+UT83riuWVKSlcFVdRWIZpFJ3KfrWsWr2KUJWK904e2dPUGhJKqmPlkz+sq0g12C8WR9PhNsvLv5g3D8PpX31epKNRp7HyFZp1Glvc17azRhJNYXrum3dgLioS57NMmMZKOpaQeWIznzGxyXyCKtWi7MlX1uTaXfTalqBuUtmMFv8sJUck9zWM25O5pCKtdnQ28eJPMaVyc8B26UQV2ZTkmrWRqWyxC3M8p2qoy2a6XZQuzyqjlz2RBFczzKZmtiqs3yAnkj1rjhKb962h1ezgklfXqEzKDvZcexrWM0tzFQbe4kE80vyxJgbutaRlzfCjRw5fiZzfxKlaFgGBAC9RWjVmdGGTdPQ5SCWGRDPcW+4RRkqrA4LdiaxlJROu6iZk0fiLxFqn2KDd5SriZ0wFHtzXDJVZTv0LfJY4/xytxYXL29tFvByryuvIx6DvWE072RVFpq7ONvtUslmj0OzZvtRQ485MhSRyxzUXa91HS4ycXJ7Gp8GvAmiXPjg6i8ouotMjzKWiBEkzdCPXFdGHpw5+boZ1JVHTsdj470q813Uf7Js3TzJTlsR8Rr7+9Ks5Tk4xNKc4QhdnAfEDwPouk6a9hFIbi4l+Rmh/j/2R/jWNSnzRUWXGrKT0R5F498D6foHh94LCaOByhSYK3C7uo68muOrQhTp8tzaLcp3PJvFPiO98T+FNP0W2eC0nvr6Sy02ytjvYWkWPMlb0yePxFccXKUVGJqouFRnTa14Z0fR/B9pYX8ZDyoqQSltrqwyNvPQ5xx716E0oRSKhJp6nEfFLw9aPomoi6tRM32UxmRxz5u0ldwPQ4FclWKBXUz5A8Dabc6Rqt3caXcvBcyXLtcQkkLLzyMdjXA7qo7HqRilqze1bXdOcSWl9ZyPEg2tkENG3vXRKVoWY276I4TxPYRTXHnWc3mxnpJna/8Aj+dc8Vcqyic/dWRMpDtIqDpuwxB+orvo6I1vdXDTTtuQx5IOMkGvRopykmy1JtWOI+KtpFN47urlbdFc7cSLIDkY/SvKzBRjinLqYSpJSuz3X9gb4e3Wo+I5vF13A32aBflZl7+1fe+GuAq4jMJYiS91dT0Mtpe1xKl0PpDV/Fw0jWVFxMEDtiIbDub2r9vlJQk3J3XofTynCE7M9T8KWmmXmhJ4k8QwhHUAxQtwT781p7VydobGrm5L3VfU1F1KeTTZJ1A253Jz2ry5Tn9alTcdEk7/AH6FTWtkaEl4upaR/as8PmxeTsxj5Fb1JqcTi8PgqXNVlZGEEqMruRz+t+Pm0yxt28CmDWNQkJR7eQlYrUf3s45r8xx+N4j4jxjwuApuFHrLy8jgxEsRi5Onh9PMj0aHxjIG1Hxb4ukvrlskIPkihH93Ar7nhrhSlw8pOVWVST6y/wArs68vwUsIr1ZNsqanYReLNQRdSaJ4oyFEKjOTX1spuEdD1/a8tPlSG+NdYt/C+ktp1tJGJDjzCAQCOw9/pXNTvOd2Rq5Jswb28h0+3guZrpvOePdJEuFEantivQo+8tDdNuyRnXniRL2/aKztERvLCxktuC5HA9zVyjaDtuauFtzrPAPj258DfDD4jXuiFpNbu9MtNOs3RCzr527cxx/q0GAcnjiv48+kbgsVxTxnkHD0P4cp88vOzWhz1abr1acXtFt/M81sGs/COkWmhaY4keCFUhZuQGA5P49c1/X2X4F4DK44fD2ThGyvtorL5Hpuneau9DY+GmlN45+JPh3w9dAS/wBoazBE5c7QytIuQB2HX614HiLmdXJvDvMcbtOFGTuu/Kzlxk/q9GpUj9lNnX/Erx9r37QniT9qD4K3+haBbN8M2sJNAi0S3WKZ7dIwxkucEmR8kgNgYGBX8JcHZfheHMDw1xJRk28ROSqc0rr3paaHz2VYpYWNOnKbftG93fVpPTtqfEGqeKFW0S01yNA0qjyNy/I2Ofzr/QvDYmKgrvfbsd1VckrSOB8YaTP4r1AxwAWyJxGkceCR1ZuOgrsc4yIqc1S1jhPF/imezS8iktndIlXbvPHy8Aj1xWFSpGKOSspRVzjx4ltr2KK61FRJbxQsYpFPzNITyfrz+FcUaic/IijLrLYoeKfFkIvbqHz4obxAkkUEZ+VQM/NnucVjiKnNLl2M6+I10OHs9UXU/F4WKcOpk82aUngFuMn8K4aShUr2T0R4kKyxOMsuh0F1ZWN/JPci8eOCCJIsMf8AVxMcbwfXqce9d9Wzi0j3XTjOkZ9hb2lm90qyss+1trbv4kG7cfqOlYrlirHJUjGjflOanE1oGiurgNFI4mbuIXz3+o/nXkVYtVGjyVGoptN6MqamsFvcTXOxFXcPKfPGcj+YFCaorU7JU4RTbRV+2QQyhoN6qZQ1tIj4KMvbNYfWk24W0Z5bnyV24m+vxd8Uaxoc2i+ItVOo2k6LHcQSjL8DCurHkEYFeTicuwsn7ZaO52PMKtSg4VHdPoctM89uGSPDoxxHPnG4eh5/zitsPFKLuebGFWn73Qcl2pzIsilguS5PX25qakFLqb1KsqiTj0Pa/DdvpXwt/ZD1C3ubyAeKvirIJY7VnAe10K0kIViD/wA97gHA4JWH3r8czKtiM843p8sf9nwn2v5qkt//AAFfmckI1E7SW6ueXaJeqsBhMWwgEMB0U+9fsODSgufvqejhH7lrHYfs638tkPErmQBlltiGX6sK+QzetJcWYNN68sxYapCNeUfM+s/gfrX7ROv/AGbV/wBmqaxvNe0xlkfR70gfaQvPynqDXuYv2sqTS0NMdXgqbaR+xP7NfxK8ZfFH4G6J4q+JHhWXRfEBtFTVtMuCN0MoGGGR1Ge9fIYmnCnPQ+IqRakzudCRQ7XEyDG75ciuTdhzWibojkuZA8fAHXFbJ2ISctS1Jqi6TZvfXcgWGFSWZj2qZzSVxOBT+GnxP0f4o21xdaM4lt4pWjEingkHBrClWVZXRrycu61R2dtNp+lWpEQC45LGq0itjKd5O6YlpqkWoxs9u+4Zx04pqV0U276ssiT7EgcnqOaLJoqyRUnvw0h2gZPUAdKnqN6mXqt87X8UCw5jRw0kj9Bz0rRRbWgN9j81/AN/Pp/7RXxl06NzGkfxR1MCRByiysGz6Y+b9a68HQjG9+56CdWVGM+tj0/4N6hpWmeNvGfhnTL8XMGneIwsd0zcsHhU5HbrU1HFVHFdBeynKnGU1a59RfDOfzBFCABlF2+qisveepxVlGKOS8X+I7DxBc3Wp2jTyJDezWccs0e0yCIlGcdPl3A4OOcZFc8oyWrOyil7JWZw2nSXH7y8eYAhiihTggdvzFVGMdzqcWoli9uZ5JJok8+4mS1JREYAiNFy3zHHIGMDrxx0ranCU2+Xo/I56tXDwtRmmlO+qT/NbPXTVPtseZeO9A0mysB4itp0uItSma5+zEkeW65DCRCMjJwR7ZrzsNKVbEVIzjy8r+89LE0qVOnFKV2/wOm/YFtm1Sy+IJnZY7l9btjbsuB8ywDA/LiojSi8ZORjiK1qMILoe8w6hBr9hL4c1+HaSCJVZeQexFVKSWhxu25x0Wo6l4M1FvCfie4EsEpIsblvusnoe2aypwbndv8Ar8i3G8Lo5Lxr8MvAdtqMutzy3MU8o3ACYmIsOjFelJ4elGpz21MJOtOPLfQ8o8Walq3hq1ubzXNNaNHJSK9hGUKZ6/7NXJcu5cVyK5laF4m0nxfbDUNI1WO6trKMCORGzl+/A6U4zi1ZMxlVtIx/iBa2OpWX9jJdFxJHvnkLfdP1qHdvcnnb3PK/EPgvU9Lae10nVJEjG1kTeTvz3qowi9bmqmzznWNM8X28d1M2qu+XxKm3tRzSg7IJtTOC1+y8cpem6/tJ96D5ABwy1M3KWphOJxWu2Gt3cMtze37M8hAkUHioTs9THllc4zWNLFtIzM+Tzv3NyDT53IFT97U5m9uIVkKRNuZehBraEWtzZ26GfcH7MDMzfMORzWt9Dgb5J3RfmvWvbSKWVskLjrXn45PlcUe7hputBNlCSQsSVNeQptROtwjE87+LnjbUfB91DPZsSCeVzXsZVh6eNUozPAzbM54JrkRY8A/GHTfEgFreSBJsY2k1WPymWGhzQ1R05Tm0cZ7stzsgq3WDG2VYda+arVJRvc9ty5Xc/rG1bwToPivUba41kXZNmd8SwXjxoxx/EFPzfjX6DiMPSr1nKZ8kqkqUm11N6Gw0/TrYPGxiUY/dZP3RTtGEbIzUpTVyD7R4l1m5RtFvbaG1DEXkc8JLsuONjZ4/GoftPskxhC95HQ6Yw01UtlYgr0Hrx1otcmbi7I1rZ5X5ON27nC9quOhEopIv6hdMbdLKI8nBkJ6Y9KVecpRUEcFKleq5vboJNeuArHC4HT1oc2lY3hRiroiv7pwihRhmHGTWUpsdKjHmfYn043DRglcDvmumjKpbRHPiOSMrXOW8dzSXVxIhXKquOR0rVyu9Tuox5KCscTf3v9mSSIlwQXGY1PQfhXPNqLNYQcrOSMPVPFMlpaS6fFe8uS0h2gZP19K5G5NPU6VHXY5TSdWm8671PWLoTGOPbbrIo2qe/FRRVpNsqUVJpLRFK50QeIJDLp0Ku7Lh1SPaxz1OewFOSckzSU4wjY6n4O6CNF0fU54f3gtSFjO0/KxzkZ9ff3rWlTtSvcxqyc5pIzPEXie/tJHs45gHugWlmAwFQds/0rGU1DTqXGmoxuzzX4h+Ozpk4LSkXMiFLeOM/Mq45OO1cdWraVludVJqWyPBPjn8Tri00W4vJbgpa2sZeXLYyB1JP6VyVJXvKR1U5JK1jzT9mzT59XVPiJdAmS4keSMSgnyk3Bti56ZHJxWeFSUuc2cdW2e0fGzUNJm8PpNYXZJuoVm3kFvLbdx05xwRx0rsxcrQ0MqUOaoeP+OPFGpatoMxvLgiLje55boRhgfvL2/GuRTco2NPZ3lofNsyXS69c2twiqY5N684LJnhvwrkfKpvU67vl1M/Xtcur3UGjku0yY8eepyJB6N/jSfvF0/huzkNTvbgXEllBb7nJyx3Y3e/1qI3T0N4r2hlMrHdv+WTbnZuGT/jXo029ik7KzLWmRGSdXdCrBs4Hau+m43T1NEklci8TfDe+1/4mWNtZxAJqcStNsYNjHBPPSvOq4Sti82jQjtK3QzkpSmktbn2J8J9E0DwR4StPDWkQrHHGo8x8cyPX9CcOUsJluEhhqS9X5n0GAoqilbc6+18GWWpavb6/qVuHCPlI8A8/SvrrQlF3PXnTjJqTN7xVo93riB7C4uF3MAsYPAA7YFYwkqcrLY6ZqLguVWOhuJRp3hyOyuQMiHBG07mP0rjnJTqt9CFHllzHH+NfiukWn2vww8Oam7yTHddQWqZcD3/ALor4THSrZ1mqwcY/u1uzxK37/Fezgm3+BZ0PTk8OaYloGEcwG5wZM/ma/QsDgqWAw0adNaI96jRVGCSXqYfxX8eT+EPDUbaba/aby8mWO3gjBJyT1ra0ZzSsKs5U9UbHhW21Gw0qJLmRxdTIGmeRjiPIyeaqpOLRtJJannXxw+IM/hO8j1RdPn1RYZhDZWVqhJnmJwCfYdc14mZZpDA+zppe9LoebmGJqUEnFXb6G8j6nqOkxz6woS4+zq8wfpGSM49yK+iw6tCy3aPXoqUaMXJGdcarY2JijspcTNxGVXLE/3iO3tWsYSjJXNpTjVgnY2vAWl2mhfArx18SNX8TLbXGs+LLHTbW0Sf95PHDAzsHGDhMt04zX8t8U4yrmX0kcswcYXhQozk/K7seb7epLN40YxdrXv0OIl1tQZZ4ovnl6TOu4kf7K9q/q6nFOV0e/7Pnud5+zG9uf2hvA2nSQJJLLrsUkgcguxBzyMjpX5X4+Yl4XwhzW27pNfeeXmE5U8vru/RnMfs9/EXT9O/4LJfGj4RXmnWTxfEPTNS0+61A3OX3xxRvGhUDAwFIAxnvk1/K2AymX/EruW5w4JywtWnO7ve3PZ+h87KlKphKc4rWm4y9dkfOnxJ8OWYW/8ACOqyRNJZX8kOyOUMflYjII+ma/tPJcdh82yLD4qk7xnCLVvNH02YUOWs13PItQ1bWfC11NDPMZLST5Dfbf3ir0w2fbvXpUZTS948fEN4a6ucn4iuNI1mS7uhGjWyjAYTbti+49SazqTUupwuqpR1PO9Ukv8ASbOcQ2asjROY4HHCknggdu1ZuKirkNuxxF/ql6Y5JJrITXcoVJ3PXb/dFcOIqSSslqebVnNvQo3EUUge2s5PsisdwkB5f/Z965qMfe00MlThD4NLm9BpWpWFvHJLqeIDBhQ2CG9iPX+VdU/adzvoSq8vxGTqGk6qzzNJqLhpFBuGB4Uj7ozXDV572TOfEUatTaW5Vv8ASL2e5klvL7MkcSgBejL6n1FcrhJTu5XM1hXDWUtURalpCSQyWcku5dqlXDZEn/161xTXJysqcozpuDM++sbRyLfzEGUGSP4WHTI9a82VpK0TCpTouDS0ZTVnVDGIQsyfMSR/rB6gVzudRp855lBy5rSWq/EYFkvSbmB9oP30Tpj1pUn7TWL0OucJVIXjp5FqG2hgtVMiEKQB0yW5xge56VGLaoUG27JLc541IUleWiPTFiufE1r8Sh460IjxFoml6aun26NhdJtIdq+QMdCFI3D1znnNflUZPC4jAzw0v3NSc3Jv7Tez/wAjHB4hYqWJn1VvkuxwOjaisqkTuGDAh379OOa/WcK+a8WXh8RNNxR03wa1drNvEiMoC4tTuDdPnYc8818zndOnT4lwUnv735GOFp1fr0uZnvXwE+LV58J/HVl4rW8uIrMOvnyWdyY3xnnnt9a+grQVem4I9WpS9rNwklY/VL9j+y+LXxJ8Z6f8WfCHxUvrzwbPaYm0m6dJcPxg7xz618bmuAnSq3bPKxtGlhYOEo6n2PFKFRVU4xgYxg1510j5/luzTTUYoYwWJztGc9qm91qLlaK+safD4k0mfSbqZhHOhDHOOKLRe5rF2ewz4VeBNB+FfhpfD/h+JUiDEgL6k5JpQhGmmoiqz5pbHSXsi31qYJZtu4euKHFvRhBJPYs+HYLDwzozyXFwCq/MWc0StTV7kVVGo7WK+k+L7bxMrXFswZdxClT1xU0ZxnFyRfLJblme6t4LhY/vu/B46USavYq2hn+IJpYWEDuCMg4U1pKUoxshxVkfnAJrew/a7+OOmqEMbeOjOB/fD2kDgcVvgXVnKXN3PTjOlUw0JQd1ub3wdkgg+KPjMyW01jDPf2lwjCNv3swhI8oknkDA5A6EVrVpfv25PQWIqtxhF7WPrz4P6jfz6FLf3yr50VsSF9McDvxik5wjBs8qopOqkjnfG13Le3jNPIQduRxgHI9q4W3J3Z6dKKirM5CzBN0cMowwyAOuBTg7M3laUNBdRt2ubIyNbj52P3umPWlKa5Wwpy5fdZ5v8TJZYtLZbiUozozKM/fGcd/ahTikwnF6Ski/+xvf3OlaB47uLBCrQa3aScZ6GFa4YScq9RrQyqQclFn0LqDP4y8Np4o8OupvoIx58K8F629nzNMx5JRdnsc9NeJ4+0R9E1q22SocDecPEf7wNXotzW/LFcqOGju9S03X5PBnj2/RlIIsLpj8sg9/ek7ydmZ1Xy6ox/Fs1tortZ6rbfaNMzgOo3Lj39q53Lk0aJcVUSuebeOPgl8MtbsDd+BNYk0W6u2IaTT59gZj3Kjg01SpSVzKbi9GjynxR8Gvjn8P2n/4RzxPDr1oBhkvchs4yPmHX8qznBr4Tkc+aTS0sec614/+KNvDJFrHgq6guYnBklDAqcf3a0pJ9Tqimkctf/Gq6zcPqulzwLKMM7QnqPwpzcYszcmtDide+N2nXz+ed0LwDam+IhZKzvKTsZ+0bkef+LPiBcancyT6GgDEfOoXgH0qlTUd2Kc5N2RyOo3WqaoWe8mIZ+GVe1UlCPQUYzluZF1Els6xqpLkEHPXNat+5c1domVqLSvIY5CQexz0qVtc55U7y0Lmk+ZNYGMn7vSuDFyco3R6GDqRj7pC8gRyK8lKXIelN80TyX9oLZcTwRjrn0r3siTjKTZ8fn8Jc0VY87Fhf6JImp27kY5BFfQurCrF0zz6VKvhEqsD1f4QfFGLVol0rU5gsoIAJPWvk84yepG86ex9HQzenWSi3qf2SabbC1RhIrZYbkKkZz6V9ZJLmZ5uJb9o/UkuTcyT73Ifeu0byDiuaUVIUG1sWppDpVisWEWSTBzGvOPpWsY2VmO8ZLQn0yTfsE0o3EjnHJ9qTVhWSdmb1lOIkMjcBckhuf1pRundkTSlLlW4QXUcitMU4bJb5u1Q5JaiqU3FpII763mk8tRkbsAk1k6iehXspxjdjbq5JvUgjBIA5OOKy3nZFQgvZOTNSFtsG8vjHc16UJOMDyJrmqHIeJTG99JK64KrwprK+lz2IpqlE5PX9Ls7+yaV5lilIxH853KOe1RKMZfEa05Nas8u1qSWC++xTTFCnIcjl/rmuZySTibJSqO6MK9u0tBLJbwy5YEEBgxdvXHHFY3cZGsrSaXQb4f8d614f8Ny2GpX6R39zIzzSswykfpzT9vCEPeerD2EXO9tjudJ8XaXZ/CyGKzlaNXZprlpJMNITxz6/SrU5SppJmVrV7o4DUPEtjrtzLc3cjN9hVS0TNhQT91B68/zrnqyXNq9jSonZRR4x8UtXkg1ee+u1Mk0zFHkU52dMIv58ntXnOT59TqguSGh82ftDX9/48k/sazRotKjkSOcA4+0sT936D9a5ak5VXboddFRUuZ7nafBCzTQPAkGnPNtlmtfNtee6ghh+QFdVFqNOx0zcampc8UXurxao890jG3SBQYV+6jZHzL7GlWU5NERlFR0RyXxBEJ0O8vhJtjuIfNhRe3PP447Vy1G4RHTdtz5n+K2q3VpPHf6VdKZbZwELZCyRsOhrik23c6HH3Tk5pr5oGnulwz8goMjHf8AH2rRORdON0Unh1GaX7ZazJIrD7rHBX862ppM6tIxIZ/LRv38S7tvUN0rup2uYfFIsaNK3nD1DcZ7/jXfTguZM6Iw5mevfAnx9FN4lvfhzrvhvT28yOOex1OWD9/HjIZQ47H0Ne3lee4bKMd7HE0041LKMmtU/UqjPlxPKe8aLDY6fZLftaN5Yb9xu6MfWv0vKEo13VlJci217n0+Hpubumde2qx2tmi/Kk4j3MVHQelfXqfvpLS518/NotkVvDGr6zqs26K+AQMSdpI2/U1tVUYxTTuaRkop3Rs2+pz/AGiSXULjzHUbQG6H1rlkrQdkKcm2omd5Oh6NdS6tZ6dDFLKvzTbcFvxrTDUIR1hHVmlOEab5krMgu9TsL2QMZGdEG55SuB9Peu3llGOprz2SsUob6113VmvEtllSzH7osnesJ6GiuJqer3t1IljNKTLK+GhUdR7ntW1OMPZ3ZHNy6nn2i6Vr6fFLUfGXjDVo2s7WIRaFpir8ob+KQ46nt+FeHTyqdfNXiquqWxwYXCYr+0JV6r93oaWu6zNNHKJpm8xjlYgeXPqfQV9JKL5bns87lIyLk2ttbtrGoam0cFuQbuVDlpT/AM804/D8ac8RKUVZakVVGnETTtY8H6n+zrZ3Ol+J7q91fXfiDdv9ikfbFZwwwhFQLuwzHdkntnpX8rZBiszzX6RWMc6aVKhQUb9W5O/yOCjXrvM9V7ttCC0l07S5WIlWW9C/vMnKjHqfQfrX9a0oU4u6Wtlc968ou93Y7X9lbUvElr+0h4Z1XwnYrf6t9qd7W1mbajnYw2gkHaPfH4V+Q+P1GjPwozJTk0pR31dtlov0+ZzYqlhMRhZwxUuWnbVrp+Vz5w+JHxEg+Af/AAV3Xx7BpsulyeHvGFtc65ZzSbg/nELcMzNy25XbqT0z3xXwHg9lUeOfoyV8jg+dzpVOXTrG7j+R5uJqKtH2NF3jKFlbS+mjOn/4KFeF7P4cftxeKJJtYs54dYaPULWO0hEUdqsqq3lnBILkFXPTh1PQiu/6OHEP9teHNHCYqPLXwrdOS8k7X/C3yMqWMp4zAU5Qldw9yet2pJJ2fZ2adnrZp7M8a8Sar4P1e4eyuraKZZPlibaM8dSa/fJe9J2fu9NAnCFSOp4r46+GutW9/LP4XmH2KOQvJbRsP3jHkDA/OvPqYRt80WeZUwk41Lp6HEan4ouleWx1W2SG5kIcbhkADg8/QUozcfdkY1Z2dpGQpsLtp47SGNVWP5JQAQB3A9TXDUjzyuc9SKktEZ9zoVle3sc7TKqQR740PG0+rDsKujCFzKEadRpdUW9Rt3a2htpJ2UFlIfBBZD1OOwqMQ+iZ0OhOKV+pDqzQWhd4SUZkDIM5DqCfmP4fzrllAcoNRuzE8QgwSia3Zo4lC/OpyShx1/GsXBKV2eXiatpK70Mu9LRucMUMiGTZnK5ByCfTipq04y3E0krorvHBdzGUny5DGCmTnp39zXGowjN2OPnU61upVSK4BKXB5jXci55HPUVnV5Zx1NVCU1eXQV7lixl2HpgnOMe9YQlGDdhPEpPQ7j9nnTIZvFF/8V/E+nrNoHgG0GpXMUn3Lq+Y7LK254O6XDkf3I2r8943zStXpQyvDStUrvl06R+0/u09WeFVnUxGJcX8Mdfn0JfgfNqHiDxZ4sh1K8mluvEHh2/mvZScvNJgyknJ7msOJ6NPLcpwrpq0aU4L9DfK6apV5xX2k7nGaJbKLUG4H7s8Ag9PrX2+FxU56vY7KcY05cxp+GUnFh4jvbTUvsr21tbuyhcrMA/Kkge9eHmyqVeI8GpK++vbQwhXlLFyXY7v4V+OLTxbaHQNUKASKFBPJ/WvsPZKELo9bDOVde7ufan/AASZ/ak+Lf7PHxbk+Gepa5Z3PhC7nRJLa5ucSwlzhXQdxmvms6oVa8VNdDHOIQ9h7+6P2Q03Uo9Rjjv4nBjdAyH2NfLqPc+P5n0H3l8TMRGxPHrQ1oXF3Ra017iRMzTbRj7oqbalJpM0rZ58hcFVPbPWtNLDkm0XftigrDHjgdRzSe5Ck0RaxFPdQfZ5pGaJuCmeCKyqJt2ZrpuXNDg07SLMRW9qqDb26URioqyQTm72I7rWEM21FAYr94nrSe4km0ZesXdw9yGkYBdvb61o02jW+mh+evjFGs/22fjKqYH2jxHZOgJ7vp8GP1Felgly83qd1Cj/ALPG52k+ozQ+NNL8RXd3czvrMhN+0gIEVxGu0KMcEFRmtMZFyV0OFOnGna2x9K/Da6f/AIRK9DlpD9k3M4GCFLA/kP5Vw1IRjByZi4J1FYyfFcWoWklvLcwoFvbcTWrbwcoSVB68ZNcbnG1yoyUk7dDm9QtdQsZ7i0jSJbqKZVZZj8u0N83I77c496uDbnZla1KSlB7kWtxWUoiu0ilQ2yyi12SHG18Z3IeGIwcE9MmtISVOk4SV7spU5tt31Z5z8W9G8RSWi6hY6E81sls0izQHeqRBgrM5GdnJHX1HrXPVjOC90pyoyaTdpW+fyOr/AOCflvY3cfj6yvlXyrjUbVCScnPkDk151CfPiqkTXEVYxowS3O6XxBqPwf8AiJH4Z1NjHp+oPttrovwWJ6HPArvquMFHlXr6nKpwlHUf8ZNE8XaHMvj3wwVmgVv9JgQ5Lp/eHvTcfaQTTMFUlNtW9DI16Hwf8T/h5/xMLlC0oxBOhHmQP7dxg1EZJaSKvJo8f17xJ4l+Ejw+HPi3OLnSp322WqoDtK9ll4wDRVilFS3uZOlJvQreJ/hjpXiLw4fEHg/X2tmjffbiKbK5PfFZ8sHGyYppxWp5X4q1n4/eCbq4stUtDd2aKJGmiU5IxXO+aDOZ2ucWvx3tdUuhca/pbxRyRmNhLCQNwq4ykaJNJNo4XVfGvg/xJNc2KGHzYn4UqMdeaGnfUJTRwfikeDrdmdraCWHnMfG5DRz2MmlJnB6m3hOzkcWgTyn5Y55B9Kzvzamiajscdrmp6VJdSJYRksBgNt4raFluZuq7mEfNuHa8uE5A9Kuc9LISblqZdysjgylfvHjFRZvQuCJtJl2AfL1BBrOVLmhYmNT2dQqXT5uWWvJrQdK57UJuUU0eVfGxozqcCE8k17OTTnKEj5vPKt6kUzNfRo7vTAjKMFfSuhVGqmhacXhkjDg8M6lp+orcaZKUIPBBxXpRxtF0HCqeDLBVlW5qZ/bDbR3c9qbtFACdGc9quo25M9avd1HbuO0NNSXVGvJnYRr83lhePrWVOMlMTaVOw+6ubi/1Bpln4JwSqZFaXTZFPmirGno5cbfLKkBv4V7+9JWlsdDbUdjTu711UW8pLqoy6rj5j6VE5WdmYU4WnzDftgu4i1vaBf7wL9qyqS5tjp9lyv3pE+lzRISqwZAXk4xzWSuuhnXjLuSG7a5mVfL+6M9KtXk9ifZ8kHqaRuI/siq67Tj5U7mu5uPJY8uNOXtXY43VZyb6VZJgqkHCv3rKyR6ig7I53WJ9Pil8+dG2MpGVOCB61jUsPmblaJyfivwhpHjWza30G2dpEQ7irEj6k1yVVzxtE6IOVJ2Z5Tc3Xij4dauttqMUUwLbBLJFnYCaiClD4jrUYzRtNoGi3Ok3OtauEkugmFjMY3ck8kAcE9hQ6dL4pGVSUubl2sc7rMPiLwl4YnTXLBdm0y2sKkgopHG4etZqpKnBtozTjKWjPPPh942W80G7l121ubRmumESSEZZRk72OeP/AK9c0armndWudUnd2toedeOdYm1HUbixt5pZ5WJ+WNchAe49h+ua5qsYrQ7KdJRjc8u+JZTw/awwQ5KwKHlSZxmSY8AZ4z17dK55KUVZGiXUs/DLT7uLwcIp9Vle6tySs2MmJs5IGOnatcPScI3bLs5S1LvjDxNruoWhvLm13Ep8k8ZyJNvXI9D3or1GEVGLsjyfXPG/iLVNCurr7Cn2aJdjQxTbymD1x1Fcrs43ZryxZ478QLyTVmeWGJJIJUUEIeVxxnHrSXIzopRd9TBtXltbV7c7EfHyjqHHqQeho5Y3Nm9bIrXbSEIxUCVuoUYDfStI2CV7WM7Ub427tEy5lPVH5C110e4o6bE+gzTJdAOw2sM8Pmu2lUfPynVBrqe2fAf4NeKviX8QRe+Gr77I+n6RPcyySkAOI4y+3J6kjj8a7s1y3+0+Gq/s03UgnKKW94psKVOVapPkV5Wue/w/tGfDH9orQbW68AW1nYDQLBdPm0uKHy5lmj4kkkU8kls8+mMV6nhFndWrlKy7H6V1qut16mnDWMp1oSU9Kjb0bGT6jqdzrUNjBb5UwhWk21+40+d1Euh9TB8jN+0vNO0Gye1QbWGC4IxXXKEpy0OhJtanK+OPjD4c8NX6wTShppOILKLl3J9q5JzhF8hy168ac7dQspNW8XC21LWJprG0TDJbjgn612UXLD8rpnoK8qabLF/fSXcy6TpTMTghYkXPHqfSuiFRvWW5M4pTTuP0iabR7KaKeZ1ZRhSV53VlXlztFxkovU5zR9T1PxLqF1dwyg+WSpZiRx9e9dCcYU7BUVpX6FK/1JD4hNrFdfaY0XEyJ1Zuyg+lYyqWg2uhpGcpbIZe3Onw6kza0TboEIaPqznHTFU6jlTumHvRdzlvGurtL4bmuLqVLRFQtBAOMgHqwHf60RioJzT6G2IqQq0/e0sM0m8u/BXwc8LaPqmlW8N5HZ3upo8EgeS5S5nOx8jpwnSv5x8KqEsV4g8Q5wv3keeMFbf3VdpX89DycAnVq86baV/vKVlqMaFZlkMl3Iv73d0QdhX9RxhCM4yW7Wp7sOecLydkj039lLXfDXg79pvwpq3jnxc2m6fZvNc3N8suAwVC20kHhT07da/IPpE/WX4S4+OHjzSaSSXm7HPiqWKxOFq0sNDmlJNWPkX9tLVb/wAS/wDBRfx1dWd1ut7wxXEUs4KeWjoCrYxyenBPQ5ya+a+iTi54Dwyw2HSTalKMvK619fwPOqOeDxNKm9JKMT6P/wCChlzq/wC0J+wz8L/2rTNaRnQoYtP1uWwGJCij7NL5g3YyJFRyQTw3TufwrgbE/wDELvpFZpw5Vk1SxMnJJ7Lm1X5s6ZYWlHCVqcZPR+0+T3Piq/sPEPhsxavDbx32kmE7JoG+ZFPXgdT/AI1/c1ZOglyr3VseSnObvHVDrfxtorW0qeHbkSoFcsZQAUBAB4/vHpRSmpQujpnKnOCdzl/GHhXw54jhlu3soYRHbYk2dWc/dQe/rXFUjCUrSOCrRjfU8v1L4W69ZXF19g1N4YoF3th/lX/ZHvXi4jDOU7wlY8vEYCdR2hNowtO0TxZY313cee8hmAK+d94D146VyRjiaV7O5w0MLicDKTcr37jYvFOpKJFu9MlZUBjlndSy7c9q5lXxM5e9FlU8fUrNpxenUp33i2TVfMdgoLJ5a7wRsUenpmtXXk1sbvGOcOUz9V8S3V6wLWTH9yFEfTGOQfzrmliZ1JbHk4jEVKs9IlJjq99KHlGF8rYQTyPw71NWtUbLhOrU0G3mnavcNDmQpKkeUZP4hXK4Tlrcmtg6zamnqSpamdWlkJE4PKjvWbvUVnozSC+sK0nZof5ZiCs8LPKzBEjTkyOThQB3JJArDFVI4em5yeyMa9Sjhqd5bnp/xsjX4R+ENL/Zo0+RUvtMk/tLx1MP+W2sSqP9HPqttFtixz85lI61+e8PUZ5rmNXOKv2vdp36RXX/ALeevpY86EXCFlvu33f/AADD/ZgU3/x207SJEdvtmn30JAU85t3/ACFdnHHucOVG18LhL/yZCwLk81jTSve/5HKpM0NttztMcpVlz3BNfW5dSU6FOd+if4Ho4i9OmaHgy8TSrmLXViEkN00kV1E2MSoDgr164rKrCGPxk3HSVPVM5sBUi/fa30FuIrPwF8QVfTpHNndgTWRm4IU9vw6V7GEdask6m7LjXqYXGcltGfSfwnOleJr/AEvV5rtY7+0njl0+5i7lWB2HnnNcmYU2qUoHfXhKvB3P3S+CXii61f4U6Fqt4rLNLYRlg3HO0V8BqnZo+WlQdJ8p1A1HbiVzyW609UHKaOn6jI7NISMg8Y6U1ZESi1qjUstQRpBGJd0mOcniiLuy0nyj01EW1yZJSOB603KxnJXF/t2O8lCowwByqjNRLXU0gna7LjXLIioxAXHSqViZNuRXilso7gzyuzFuFXP3azaNVblsUdW1VWdtx4C4DHjvWjl7o4wcVqfBfxnijs/23/iQnnBGu5NJnGSOT9ijA/8AQa7svbnKa80dkajqUopdDc+IeqSJoGlX1siwvaalBKGY4yHOCN2enWvTrxhGgdOFw0qsuVs+nf2cJIvE+nXFnqd5AtrHYzPdNOcAIFyo25BbJwMA14mMnL6u3E8zEVfq7t8Tv0K3xFu59ZnS/wBd0uC31KR4MyW0GIjEsRjVgOApCjoB39hnnoxc6Kc3f/I6I06dC6graXfm2cvObq3uHtIWMsRBdmV85Zc8/kTWsoqErJmmHqSqUuVLzt/XYpeJbs2+kz6lDG85it2k2Lkl8ckADqaEueolJ6PqaQg+bR6nH+MrW4tbHW7XT9Qumtb9zstWGwmIKGYui/7QzgngAVjOMI42caDbX+W5koVK2Hg60VzK479h3X47TTvHcowmNbt8KeufIXrXBR5FiKltzavh0qcGe0eONI074v8AgybTLp1NzEN9rNjBRxyOfrXZzRa1Vzl9kov1OS+EHxTvdfs7r4f+MLkLrWiqYri3PAmToHAPUEVzKo6cuVl1IKC0OF+LHgfxv4D8XW3jrwCGubBJDJqWig8MOpKehpVE/iREqkXTIPEPxm+F/wAYLOPwRq0UTm6XbcWFwuGh7EYPSkqinozmVZnkHxH/AGfPij8M9MfU/gf8QJm01nP/ABJ7xt4QZ/hPUCqp0FF6PQhyurs4zWf2lviPo9vJo3j7w8yPHbiJ3C5B96K3PfYzd27o5aTVdA13TDc6dHbyF23SKwBKj2pQV9S4q61OJ8beE/DVyzPplmsbImWKjlqmbsxvkZw2q+DNIk3T3Mg8xhzufGPrUtNrQh8qZxXiQ+CtFG+6uYSxGWjRgSTUqnUS2HOUUjir26i1KQzWsIit0blsYNdEKWilc5tZdCpGh1BmWEYiUHBB61NSEos2SSVigsIlgaNgA0bHirUZN3LgrIqwAxSFEXgN0qpK0jnavUuVNSlC3pIGB15rxsfC1mezRl7p5F8V7mO+8UQwRtnaea9bKISjhpM+Zzr3sVFFsAQ2scYXnHSrp025tnVGP7tIt2FpG0ZZ4wcjPSuGupc1rndhaEJRuz+x1pknXy5Z34wQsfIr6WpD947nlyTU22alrIYdPa7e2dCifKD0Ip2sjmqTU5WMW0DzXTSl518w/u0jfisbWdzopxUVdnUaM08VrmdQ4VclVJ4PvTSa1Co3eyIJLnzBLN5jtg4ITpj0rJ2kiqaukWbO4uZrXyrewMSMfmYpklR/n9awcpXslob8tFVOdvU0LNmtrXzrhQTjO3Iq0uRanPXtOpaI+0vLq5iJEfDZ2rH1FWpTcdBzhCD1ZM1y8REM0wXA9cmqjJvRmfs4vVI5zxGkI1NrwIJCgBEbnGDVttmruopHI+I2h1jelwyqXUgBH+6PpWc7LcmKcZXSMjRNWvImOheGY1wPlnfGD36mojKM4+6dDi370jJ8eeGdP1OJ7ae7ea4aP5l3gIp9c03CC0bCE5djzK3XXvhx4gtzrql9FFwJZXU7ioHTPqK4pxVOV3sFSDqU7rcveN/iFZeJ7ea7hkWaO6BEbgggDnDH2ArOpLnWmxdKmkrtHjPjS5vToV40NvmOCIRWxHG+Zj984HPT2rhqTnyto2ULSWp4B4o8cfFmGa507TNLtYpIo2M1wzsu7HI5HJPt+FccJVpq7O+Kp2V2eY6foXxV8fePotT8WeIS0MJPkW8cexEPbI7n61ivazqq70OhJTR6loF/4g8N2dxYWyJJFcxkpLu2mGZeoP1r0EmohOS2RmeLtc17UtEB+1xW91CMT26dVf8Avj2Nc1VNhJQUlY8v1LRyfMvLedorlmxOI+/ufUVi4Nm0Umclrunx6dbyaldQxPan5peCQp9ahpJmyqKKPP8AV9Z0vVnkt9EleYq5/epGRtGenI5pqDetyoe+rxRlTNrUkcitrSn5ujWxG0Dt/wDXrVO2lh8km9WUpJJEIeX5txw3U7j6100Ggvy7FzR9RtdKvob29UyQxzK00an5iueQMjvXo0YKWiZooyldJ2PoX4O+LZ7/AFS6n8P+IEWzmuG/s+3kYrJHAekbY64AA96+r4TweLo15+1mpRlp56nZhMLKNTmkztPD3wi8DaR8SdR+LGiaMYNd1iNY7+WCUrDJtGASg+XPvjNfYZNwXkmV5l9couXNrZX0V97GmBynAYTFyrxWrO2trxoF2xO7yKfm8sbi35dBX37nBJO9kfR04uettDN8R6hcecYGvwqzDCJn7p967IVXZpGvPFe6U/C3gDwxp+qzeIrh11O/OCskq58segPauKFJc7lIqFKnCfM1qaWuapHDKwExjwuTnnnsAK6VFs0clN6OxS8OzX9mk17LOEeTnzDne1VUULJIEmVrjxLLq+oS6PZy7ljU+a6nJH1pypxhH3i6cG5FLQ/FWlz6de6bZIirEzCSYnqwrGUlN6dCqk1yuNjifhs/i7UNQ1bxP4l2WdrHdbNMVR8zD++fXvXLh41nOUpu6OfCwrWlOe3Q29a1uGzR73yUa6YExzTnJA9ea9LD0HVvE6KtSVlI4fUtN8TfE+90zwF4eVUk1vUEs3uXfAG9xljk84FeRn2Y0sjySvjJ7QhJ/gS7Vmp223Lut614Atddk0Lw7rNzKdFu5tG1WOcl1RoZWELR46IYyCeOua/H/Ar6x/ZmLxNWKUa9T2ia39625WHf7qXKrK5j3GspZXFxcq2IRuKoBgzt2HfAr+hasZt3pvRdH1/y/E78PKKjafU9N/Yz1G9g/an8PXd14b0/XZri0uTLpmpFUjij8v72T1I6gY5r8c+khHk8GcwnzcrfLr21XYcYfWIzp87p6brf+mfJH7Quuah8Wf8AgpN4+utVvbTQPD9lJDDf3t7YkrarjIcRrkyN1IHfivjvo1062T+H1FYePtG05b9T5jHTxcc+lCPvRpwir93ufXn7JWi/Dj9oX9iL4rfAeDW9W1e28JTPe6YbrT2tZLqK8h8s7YFcgqJItwDEcn1r8h+k3QzLI/EbKOJKVNQnXSjJ9nCSe/p1PdyvNZLFRo8t41k4NtLTt+bPgLwJrmt6LZ3T6vrdxPbaCnkX+kpagSK4fazHBPyjAzX9k5BmVLNMno4+EnKMoRbVtLta9Xf1/A+TpzqYTHVaKu+R2ZDqmj+D/GiN4l8N6glneu37nyWyHye69vxr1YzpV0uXRnoWpYpc9M5vXr3xJ4Mt0tdbstlqlxua+2Z3nu1efi5ypR7nLiq8qUG5lSLxpp3id5YLOeN4ApCIp4IHVzn+tcMKsauzOehXhUjzoh1u8hd1g02GNXv1WOMkZIX+Jq6oypvRE1asa0uXuV9YtdMRIPDkMMalWO4qeSuOWPTmlU5LKMTuo4WEKfLYxdX0PQ3RHtbEKrAY3c7TnAJ9zyfwrirwXY5K1GnfRGN4g0XSLe5MKKBEWKrIOcDA5+meK8/2K5tjndKDfkZt3apDdOjR7Cg2Mw7HsfxzUVMOmzixFCNKV1sVr37XK7M6lNq/KOwHt6VMKO9wUptalSaWLTohe310qoBkszfeNYVVRp6yZw1atHD+9N2PSf2ZdL0zw7o2rftkeO7WM6H4QuPsfgjT7pRjW/ETLmLCn70VspE8hxjIjU/fr8w4xzOpmGKp5Hg2+arrNr7FPrfs5fCvK7Pno1f7SxLqf8u47eb/AOAeX6v4hnu57rxDr2ovcXFzM811NM+WlkYlix9ckk19Zg8NTwVCKWkYqyR04nEUcJT5pP0R0X7I2u6on7T3hTX5AI4n1NbaOOToVkUoc/8AfVePxXh6uK4Vx9aW3s9F6NP9DiyD6xVzhYytpHWyKnjC1m0HxF4i0qcKDYapcpwOm2RgK9HI8UqmR0aqejgn+B9HmCf1epJPa5U8Oxy3vhGJG+/E7ORj15yK68vfsG6st5HLl1GMMsg3u3cv+KIRrfgOLVRc+Zd6ROMIcf6tuuO9dtOc4VZSTev4HVjKMKlJVk9Y20PT/gFrq634afTYLgLMqkwuh2nP862qvmak9j0qdSM6UZJn6a/8EfP2ndSfwVqPw/8Ai78RGlaxuiunW9/J80SdgCeor5fOaFOnU5oLQ4Mxw/MuZI++ItTtb23F1ZXCyRPysinINeApc2x89J8pq6XeRxxbGbGR0z1p2tqZ3uzRsLq0QtJDJnPUtT5rLQtSurCype39wIrduN3zN7VF7lJK5oxLpejR48xWfGSc96aT6kXkyvqGtoxG1gPQ5olEcacmU3vJWmEhG1dufrUJWZsoqxleItQv7m90qz8N2aTeZqKjWp7qXYltZhWLumAS0mQoC8DnJPFXNOVrDSbvzbdD4u/agmms/wBsnxBe2tjxd6FpEmLh9u8iN0HbjO2vRyyUfaT+R10aPLQiXvHinxR4R1Cwt4RbLBsItd5JUgZ4PXrXZimpUGkdmFfsnaTvc+kP2b/CUUvwyvvHGq6VE2m6qtnptvczylmiuIyszEpjIBwuGxwRXhZnVlHDKmoX5mlfseNCCrZmoc9nG7faw/4h3kGovb6bPeSxxQ3JeYwSMCQrHZ0KnHqM4wSDkdVT6K+3kdMVX9nOUkubVLrpf06r7n16nKrbz6ZPLAuoecUjMxPmhgokwQuR6bgNvbvVUqM/ed7pf1+pdOop04yUbEs07yWiQ3AVo1JKjA6nvWntKns1Dpf8zRavY4jxtqV5ZXt9Bau6i1svtEkg4AVjsUZxySTjA5xn0rz1JvNZQTsoq9/X/M6GnHDxb+03YrfsQ6ZPrel/EKwhk+f+0bdwe4JgU8+9c+DnCeJq+pljG404WPYPAniFNKtZ7bUn2TWrlWDNyfeupQlF3ZxOTepyvxj8DXHinWI/iB8Np1ttfs4slozhbhf7jeoqaqVSOm5pJ6WZzHwz/aHTxXqF54c8eWkul61bOI5ra6OAevzKejA4rODlu1ocEp20ZyXxW+AXhT4leKn8deE9UbStXtFPk3FscLMfRuxodJT1izKSerR5nffG34qfC24bw98UPDN26eaNl9axmSNl9SRnH+eaVN1Iz5Qg5zpp2t5PdC+NvGHwu+KGk3epxzWrubRRvRxnPvXROcZaCi7ux5B4t+AGoWdtLqvw98YCESwhhCGBAJ9qdKEe5pVkkrI8o13wh8cbRitx4igMUYxlV/KsJx993OSUZLVnH+IvAvxIubwW+r+K3TzVy3lcbvSikrbmqp3jdmTP8MNJ0VBeavdtI+3c7Svu5HatHUmo8q2EouT1Zz1+R4gvBZaGpFsDiTANaQi1FO50LljHQ0LfToNNi8lgAVGOBVNXRjfU59IC+rTRyR7RjI96znN7Iy55c25XubcwymQeuR71ndyHTfvGB46vl07y7grgOvNclak6iud1OtGlrLY8cvRLrfi83O0lVbrivWpSWHwljxFTnjsw5n8KOia2MrhBxj1rBVPduezWgoaIvW0SxQkE/wAPpXnzk3V1NaScKTP7CdOMV5cLBpTMvzDexbGfavqZa1GjyqnNGbv3Zd8S3psbdNJM3GOGJyRWVR8uiOKycrmfpd0skxhsjI3lgFnOcUo66nVb3dTpxc/2boD3I/5bHkKDk1M5tR0CUZc6S2M6y1C4vVwqRxxxvy5X5h9B61zSemptThymtpdxcyT7lllMYXcQ4I3fgBU05SbZVX2bjsW1vo51ZFGxmyAaU530MqlNxdxdCmiFw2npdEPIhy6VVGV04pjrRfs1O2iGB7O1lKXKNt8w7CTlnP8AhRBKErM0mpzV12+4z7uz1C61K48y0Kq8BMe4ZNbxvzES5HBO55d4mhK3zWkjOUdj5rr8pHtk9qxqpsuEowQ7TotQuLA6N4XuoRknzcIW/M96zpOzsmTOo73ZgX3h3xFpeqJda5qzSWqODNaRIFBPbJ5qasJqV+bQ6IzXJoh3iHX/AA/4h0pglhbtKvyxIz7kiXoWPHXH86znJyjqRKFlqeAeIPAPjOz1m4vvhusclrOz+dZTDEVy+PUcrj1Fc0qNSa/dGkJOpZT6HHn4pW2sLc6Pf2bWGq2Uqx3GmTAYQjjcrdHHoRz681k9Ycr0Z2SpRjZ9DJ1vwrJcW8ai7SN5pBLdTyZIUHPpnJ9KxdJwp2CMovoYmm+ErFNUtrlrSKEPdYBPGNuACwqIU0mjf2iSsil8QvFXhi30rURp1urXZuwhsoo8jzARuIPocH860nVULmkYN2cjwzx/4Z+Kus6lcaro2p22nSooW2C7n3D0YHBPHauKrGrUd0ypKLmo9DjdS+Gf7S8LJqer+OdNtVkzieCwyXA9SW9e1TTcoSs2OoklaBzms/C74hahbKfEvxEmuI1fLxW0ax98klQMkVNWUm7JmlCE5RtIy77wxcWIWSTUra8K/wCqZ7fCsB2YgDn601KTjY7FBRjoZV/Gt3O0TWKRyADHlswUGs1e4LUwbyQPMYZ5DEy8BdxOf/rV20YW1QSSS1KmsXCW9nGofeXl6Y6gV6OGu5hGd5HYfDnxLJol1FqEEu1QoPynrX0+AxH1aopI7o1JRtY+lrXxdrGs/Do3Hg14/t1wm2FnXIVyMZPtX6RDFVMTgH7GVm+p10asnG52Xw81XXfg/wDD9bbxXeW9/rlzbk3dxJACEB67RzisaeQYrFQjOtWl7uva5TjiJvmk9O1zhNM1/VfH2pzXFvZPDaLIUEsikFiTyRX0eFx804xpp22O2jObsrbHUwa+thA3h7TIArpjdMT8zH0r15x5Y87O2c20lIxJtQuk1byr+5Es8n8JI2xitI1PaQuhwemg3XPE40G3mlknCyyREQR5q4KM5bnT7Rp2RR0fUBoHhK91O+Bkup4SzArzk9BU4mpNrToaQlGlBz6nPeB4bzRfA0+s+KE+zrczSSyrnBwfujmuajGSi3JnNRk1Rc5dyLTPE0Wp6aL+4Hylj9mjGMAepNaU0/vB1vaU7JlTR1PxC8bx6LdXhtrGAZvr7+GJB1xxyfaqr1PY0XyayN1FpcsjovB2vBvjBoSeEI2W1sb4RaZbwv5ct3JnBb/eboK+B8Ua9LD+HeOq15cq5Gr+uhrQlGNfkfwnjfhrWY77xN8RvEDWaaMJvE8oOllxI0TR8MWPZyQSevWvC8E8NOjwlC8+aNk1K1r6HHh1fm5b2v8AqO0fxM/iLUU1iz8qQqpSzgkPCgdZGx+lftVFqdW9z1MPNSn73Q9Y/wCCfOpSa1+2jp6ww2jPFp93G8t6WIkbysnCggH6c/Q1+KfSfrxj4IZjo0lOmr7XvJbB7dxc2m7W6ep8ufFzxFN4i/a/+K91qtxJOq+Jz5qrbbFby0CKxXaOg4Ax3710/RswFPC+GOG5E7Wu+u/nr3PIw1OtWzHEObdrx/BaHvX/AATL+IngfSf2uIfAHxD0yym8L+OdIl0y7j1i2DxPPERcWpZQwO4SRjHXBIPFfL/S7yXMMb4df2nlbkquEndWspckvdkvuevQvH0KVWi6cI83JKNRXS0cHzJpO6umrrqmk1qeS/GjS7L4NftGePNNudJiSx8RCHWdDlHmbWt5/nXakvznHOVYDB4Iru+jtnks24AhQqSvUovkls9Vvtp92hyVpKWZSxEXeNWKlqrPVdVpZ+W55n4t8AeHvE9zJ4j+FobSdWW2je5tJGAg1CQcsBj/AFZ6c9OcV+x1sPKnVk0mrL5P9fw66dQr5eqtP22GdpdV3OP07xPqXi+W5h8V2TwtpreV/ZlyeS/ckHqO+a8uni3Vm1JWPHo15YhtVVZroUfEfgPQtQ1FWskXTpWh+e5t2272IzgjptxTnTpz20N6+Ho1IcsdGcdaWnxC0fUp7n+yDqaWcOBcW/ZB3x2ryJvFUK17cyPDpvF4KpepHmsV4/iTpZaabVc211K2D9oQjC9OM1qsfy/GrHfTzqhNNt8r8yteeNdBvLt7e3v4DAmZMvJwSBgfl/Wrlj6clrJBPMMNK651b1MjV/HujXE0l8LqHCw+XFAOQfcivN/tShGo9Tx/7bwKk25r0MKXx7Pc747exaUSYw5XrjjFZ1sbWmrwRy183daDjSp3v1Kz6x4t1Fv9GiWFW+8epAryZ18wnpscFStm9ZW+FHZfs6fss+L/ANqr4r2/w8t/ESWFhawNqHinxDenFpoemRYM11KemFX7q9WYqo5NfI8S5xHh7AyxNZuc3pCC3lJ7Jfq+hwLLauLxHsqlRt7vyRr/ALW/x0+H/j3xrY/Dj9n/AEm5g+Hfgi0OmeDLCVQHmUHM1/OR1mnkzIx7ZCjhRXm8JZbisHTljMYubFVnzTa2XaK8orQ1xmZ4WhKNDAJy5VZdr9WeUQ6Zd39wJ9WlDOPuxgfKor7yjhalWfNV+4WEy2viantsXq+iOq+HWqx+D/HugeIGdYxaazayhioycSqfauzO6CqcO4qg/tU5L8GexWdLDK7djvv2y/DD+C/jj4+0raUMuvyCNZE2k7yHP86/NPD3MFmHBeHlfXlS+7Q6s3oyjlrktpWscP4duZLW3eAMvEfAx94elfp2EpU/q6RMLqioLoifw5c+ZFeWkSK6zQsGSUlQw75PNdHuxasU+f2LgjT+B+rf2fczQ28pYQtkKwweD2+lRiItJRNMBUpex5H0Z9e/sl+FNE+IPxVsLLUneIasvkTT29wVKvjKtlea8PMG1QaktC8diXGi+Q/YLwJpK+CvB+neHmnaVIYVXe7ZJ46k18o5Qi9D5fldTVnSDUomCJK/G3Kle9Q3zGkYpaM0LfUIbS23SMQcZAzTbViZOzsXNJ8R3Ez7QwXd0xUx3HG5ca4hlnEty2UB55rbdlLYpzmxvdS86O4IRTwu6oqq70KUrIXVdWCKY1cFQuABzUcut2EW2zHfVDG/kxhVLDH1H0olOK93qaRioo+Tf2w9PP8Aw1TNcQWaO0vhHTnUOOPkkmBPtijLGo4irqd1PnVAPD1/Dq0199uuS6m1jaIINoxt64+te3ScJOSv0M/ZuDTaPbP2f76x0z4ZX/imW6nF5cav/ZemWqXRMEUMcKPNKydPMJZFB6gA15OJj7XHutF7aWHUxEvbxw0ErWcm7a76K5d1PUVaMm4uFd5CcblHOc1Ttaz3KhGUdL3MFZYUundlUbchfLbIY46/oKGlsdKjZcsiy8jtam4aKQqjKu4ISoZs4B9CcHArGpGKXNroKlBSqWvqcb8VdTt10+REVEYR/wCkN03sBgdTzjnH1NFZR9m3Favf5HNTVSrWuul0l0JP2BNZiN/8RYHkUbdTs1UAdW+yoea8bLqaWJqMzxnO3FPt+p1nxR0XxTZNLrenzRxOuSBtwJB7+9etV5nHQwi4RXvE/wAN/Ey2dgt68hl8+P8Aebh9xj2rmpX6jqS5locJ+0P8OvCPxEktSVW11Rmxa3VudrofXI5pSjFuxyODcrni/iS1/aT+AFy0V3HL4j04MH8+L5JEXGRxjDfhVRozUHK2v9dDFcjbir6ev4d/kZvh79qnwt44vbrSPFMggdUKNaajFtPPs1Y+1960hPlerPPPE3wy8B+I/HP23wNqv2ZmX/SILSf9059SBxVcsJbCpq7sUNd8H+JdA1UQQ6tdv5ibXSGXgY7A11qnyw0OiSSVjkfFUfivQrSW/wBTlEahP3cUj8tj61ytWMJXWiPPNc8W+IfEciG3sSjwryWGN1FPmubU3ZWZxHjT/hJrjULfTNTuTGLggsi9e1apNpt9DmlSn7TU0rbRbfRrIJuMbd8jlqamrG8uWOhTkvLe8laGOLcynnPalzysc6k27IyPEFtJZajDqCn5WO2SpkpWI5Wp6lfVIkI3AjaRxiroxu9TdxUVc5Px3YPqGhNhQWibPviiUYRlqVCmq2557bWVvbuXWMAt1NcjnKcvI7aMaVNaFtERSGArOdRvRGdSUZskfBIJPGOa55yvqjWFRQjdn9iWlpFYTLBaoJptvzSMM4GP0r7OVlJo8PESlzP1MLW7r7XqZUzEEHayD+LnoBWDabsZUmi/pMt7HJ5AKRpuwYkGSfrUqLubPe9jovEssNtY28D5JRAxUt1rKt7uhMHKU2Z9pfwzgIkQyr5UeX8o/wATWL99HVqlobegX4dbm9ZVbC7ThQBn0oi1AmpBy5V5jlv4bqdUClVxhugz9e+KzT5maVKfLDuypLqKaFrVrOiCOGWYRABSc5OBVQcac0TFOtSlFu73NTWL02F1HMlv5szSYBYcIM9q0qtRne2pNCHtKbT0Rn+LNUu9J1OO6Q/dUDJc8jHNE5SjLUzpUoexUOhw/wAQvDt7420htW0ZRAZAQVi5xjvSqTjKGh00acaUkmedW2va34CgSwnEsodwDcb8tuJ6sR0GKw0ikkRWjGtU00Kmva54g1e6W4tkCWpBAkZt5lfphRj5s81EnNPVnVTUFCyMDxT4J8T2OlPdXupfZ5JRiGLyxuI+nqamtTk4aMpOPNZov6R4t8NaV8PTeWl9G12yfZpIgvMLgHcPcnjmt6UoKhdbmE4T9v5Hz/8AFD4Z6b4rgawS1E2o6jJwF5YE5x9McGvNrRi3Z9TvhJuOux5X4i8D/Fz4Dstp4b8Tvq1uHBk03WWMiKVySFc/Mv45HtXFOMqSdjGoozemhF8PvirrXxNTUtS8Q+CZtNhS4kiijeQSeY+Mb0x0XOBVUZTqay0NYRcUrmp4h0zwnpzBDf2+62EQujuwwduSGPZgPXritJqMXY7ovmVkUZfEnw9jLpf65Y3EZkGJWnXfnnHeub2sLvUz1jK7PP8AVviVoEhvNGHiS2nigmZFt1nRg4bpgnnIrnlua3drs8j8Z6xrtprEkaTJcWRB8pnwWQemanks73OiMpKN0cpqEttL8slkELLndGSA1VoaQuzH1VYxCbiZLiLb1+cEY6flUSumJPXc566ngnmOw7wOF2jt/Su6gmjSetMwvEd2s+pJawOcQLxkdDXq0UooilJKRreGdTI/0cycEYx0w1d1OTvY64zUVc92/Z3+IU9hBcaJdkYQFod7dPev0HhSuo3pVGdOEk5ybO7i8S6lqkEt3fz7kY8KxyTX6NR5eW3RnuU4JrYsSa9Jp1tC/wBwEEiFBwfriumNOnFpI6IuPJZDPDOumXULrWb63QSuNsCE9Pesc4qexwfkc1eo0m2zL0nxXo9/4uutLfLiyAadkUkFz2JrxcpzL65S5IO9jLCV/bScY9CO91O01jVlvNTiKrA+IVZPvfQV9NSpyUdj06bkkk9yS81dvtjLLHmNgNgfv+FE4waaNb2Zx3xV8VS3tsLHWrtYYiwMke7aoUevoK4LtUlzWX5HNja8fg2RJ8JIdI+MUOpQ+ENTg/sjRIx/at8CREjf88w3QmvKr57Qw+KjhafvTfRGeGq4efuQd7bs1Nf8b+HND0qTwz4TkaGxP+tQjLTsO/rXtUMA1VdaTd2lo9kehKSlFcy2NH9lfxjpyfGv/hNNZmhitfCui3moxQCRwsbrCVRmYKcfMwyK/EvpIYuthfDV4SK97EVYU0lrdN3fbojgxNaUH7nbc+f/AIZ6iuvaNq1yniptWstR1e5vL3WDGyi4ldixVQ3JAY7Qcc7c9MV9r4V4WdDhilRceRJWt6I7MFWpfVU4S5vP1/yK+la7Y/Dewu9L/tIzajczMtogXpuP8R7YGPpiv0ejFUW0mVGpUpS5X12Pdv8AgmyUsP2t/D+h26wXDz6TqMk9wsuxixgJJyWXOPrX4n9KmpyeBuLhH+em9r686O7DwVCErv8Aq58ifF3UrDQv2mPibfzap5sCeJ5zudyxkIPAyCc+nWva+j1en4X4WdTRqK02PPr4mNPE1ql+35Gr8HJLe48SL8U5dRt7bWbGdLnQLJgx8h0IIfg9TjNfqOPyXCcT5ficHj1enXg4cr2V1uedgqtSrUdafyPpX/go/qfgf9of4d+Fv2tPhfcxpqWnabDa+IoDbsZpVbKTRsyxrHmGZQ2wMzCOUNgLgn+FfAeGdeE/iFjeEcyuozqSUdVZJawe7dpLrZK6PVxGGnVwSrNO8NfWN/U+RdHkRr1LprqSUJmSZyxC7jng+tf3RVVTES55yblfV9359zy6Nao1eOiL3jyz8IeK9BTVNYgNtqEcJWyvbNx5rOeBuHQj2NeXi8JCbutzLExo1vi+LueXalqPiXwpdrZeLbUCQZliuYmJWZdnf+6fY15rp4mlPlcb7v7jw1UxNKVq606Mfp2vxRWYVL2RVnHm3e2T7wzwvvW1FQ5b3O5uDhpqUvEWn6FqKTCfTo8RqFCkAliei5PYd6VSNKfxJM89rDzn7yOf1jwL4RS6hX+y4gXwHAUcZHWsKmDwrj8KLqZfgalv3aM/UPA+k2dss9lpkZR0JJZRlSDjmvOhgMJGrdROOWU4CKvCCIZdFsIpTHbwLtCfOMfd/GuurRpqLUbG6pU6MdEL4T8DeMPHvjHTPhv8PNEOpa3rV4tppdjCOXkY4yT2UDkk8AAk18zm2Kw2UYKpjMVJRhBXf9fkjxMVWrzkqVFXk9F/meq/tHfETwx8EfhnL+wx+z14ggvVkuUn+LfjqwbnXtQTpYwv1+xwNkAA4d8se1fmmVZbjOIsxWc4yDS/5dQf2Yv7T/vNfcefjaE6UHgcPK9/4k+7/lXkjwaxsLbTrcpaRgBR8wPev0mhhIUYaI1wGXwoRSgiwiJbneUDKy5VQ1dtKHs3d7Hr1GsM1ZbnPX80uveJbbR1dvJjuFe7mhXJVQQTj3615Ga4udWToUVd2Z8viZyx+ZRoR2T1Psf/AIKXeH/gHqmkeHPEvwD17WbmSCyhu9SfX4gkmprcQRyfaI1UYVUIaMqST8mc84H4V4Vzz/DYvEYTMoxUHJqKj9mzej13e59BjquLxeEkpO6hLT00/rofMXhy4S+hwTt44Ir+iaFNQpLUMParQWpf8OTMmpOgALOCojY4B4xUSmr2jqdMeRzaKvw61Sez8S3WnuF3JOR+8ODgeh+nauhVlUm1NnBlNPmxVSnJ7M+mf2ffib/wrbxvpviJCFiguopllUkY5+YcexryccvaQcbH0FShCVNpn7TeB/iPpvjbwPp2uWNysyXFmjK6NnJIFfD1IqMuU+UqpU6jRsaZrFzDJ5ckoAxxmotZGEpGpa6vPc3yxzS8D7oBpPTUWm50VtqdjaFYbdcyEZNKL1KatqP1LXIoVEbfMzdqtSRKepRbWfs4BWEKzDiiU49DZJdTzz9pz9o2L9nDwhofiiXweutz65qjWsVqbrySsaoWd84PTgfjXhZ7nSyXDxqcvM29rnflmCeY4p0U7WVziPCn7fvwO8RTpF4lGoeG536rexebF/32mcD64rz8FxjleJ0rJ0357Hsz4WzKEW42kcr8ctd0v4i/Hix8d/DXxVotzZJ4TjtTq39pIginEsjbcMeuCK9bDZzlscS+WpGzXcyeSY6OFtOm99jP8A/Db4pai0Wm6RaaZdTXUKqt7/bKebOWySCpfCKoGAMCvRwmYUIVHNVE07dUVWwrhCMalNxt5P8A4b8D1T4YeHPiLpGh3HgOL4c311dWOrTXLSaZbeewieMAvL5bME/1Z69cUpZlgYVqkItuzve3T7zCvg5KUcRNpJpLXT87Ca54hfT7Zn1OxurdgQWe5tHXYDwAcjjNckc3wKfK56+emnQSwOJT91XRkw+OtCuZgtvqMPynLAjH1rZY7DysozRtLBYhfZZrweInubKRrOd2iQbpNjfLx0J59/1q3jKaT10MnhasZXaszzj4zasz6C9wImXYC3Ldj0JHp/jXDiKiqQ52x0ornasR/sP3t+2q/EZ7CRUY6rZMg/vD7JEP8/Wsspmva1EmcmNjBKPc9L+I/i7xNLs8OC3WS9ul2QRoPmHqa9ic+V67nlN8zsY+kjXfhzPPoHia5x+580k8YPpWbukXNckb2PNdW8SeO/H3i8eOPDGw6XozlXhUE+Yw6n6CsFFupdGCUqjSaNKw+Ntt481P+zNVuBE0Y2yRSNg9cdK73K8TSdoxOK8dfBPwB8QtQ1H7bptq5RSBIiAEe+RXJKEaidjjcebY8Ng+EGv/AAW8aTQeEYLi9gu1Mgj3lioH1rJU3Bl07Q0ILj4x3+nyzTeINNntpVcjy5UJAxXTGo5KxNSrZnEaj4x1D4iXb6tfyO9rE/7uIr1rNJJkpuaMh76PT9ReVkC7uEQjp6V0cqtoN/u0cxdWEvizxO+uzRLm2wqJ079a5oxlzWKjVTNHT9NF3fyJfKGCdYz1A9a6XGMUROPNqc61vDca9cS2IIjjYBo260uaJhTk77EfjDTUk0iQsgBGGBU5NV7ttTaas0zCiAm09GbH3eDmsqTVyudSWpk6kLSMmW4P7o8Sj0FZ4ulOpTfLuKFWSlZIp6L+z1Y/EW6bUfC/xT8N6fDyzQ6pe+UV9s15NLF+yg4VIu5UasXVa5kcV4m8OxeGNYm0hfENlfmFtrT2UhZCR6E9aqDctbG0uR7MzBL6EVtGmlqzOUKklof2DaHd3trFc3TycspAOOfpX1Lb5pHDX1m0u7OcEepSarJIb6OCBjjBX5j6n2rnd1uTTioq7Oq8MQhryM2jEREjcX+8/vj0qouUnYpzRZ8RX8dxrDxxFQUTClxgLiuWo1KegUlz6kUckt80azg+Qemw9fespSleyOhWgjoNLvLJ7MwQ2ZRAckfxEVXMuVXCTfNe45r23EWILJTvP3XHJ/PtWcpx1VjRxlJc1zE8Q3XibWtasrTSrB544ryN5djBUjUNknNZVPbSa5VfUuhGhSjJylbQ67xTp6JZSXksMkrKN21eSMfSuypCd3Jt6/gcGGrpz5LmNePbeKPD6anBC5knQ7Qy/d2jB/lUq01e+5vKLpTaOS+G2vFfDer6T5SyXFlftE4VSWAZQwFc14Rb6tG1Z+/F9Di/iN4E8Q6xHcSaZpk4iuMeamMDOMdPSmoyfTQxjUhza7nD6Dp+qfCTVRc+LhfXoDf6M0sh8q0GRg46AZ65qpQVJXep0e15o2j0NHxLqmr+MdWjs9Oud01/JstnPzEju/sMVhNznNK+rFGcXG/Y5740WGl+HtMi8I6BboyWyfO+35nl/icn61c6iUORdAp3qSbZxHwluIfDd1qXxA+KEgh8wLBoLKmF3KAWJznk9M+lY06fLJ1KvyCuqlVKFN2ta/ye3z2/I5DxlcWXj7xJ/Zy3kT2l5JIrXCyAqGbgZOevU1y1v3lSzejNqd4wu1qVpvA3hlbe30HTkSKK3heBLgryJQPmz7EgHNdCglHlOiLcdWec+M7K40C9vbDVL4S5xLs2DbPGDnccckjjn2rgrU3zG0KnNpE4bxr8PfD+r2323TVQvE4MkZUE5I+8PXIwfwrB04I3SkldnCa14O06C0eW1tbTz48s37sL5hB7+jdaxquyNI+8ee+I9SstKu9wYeRKu11D5CMc+nT1pRvY6NFE8+vfHWj6hdT2FuWivYJyvlXWVDj+8h6MKpJx3MadTmnypFS+mur6UG+tcY+9iU4zRzRudM4RjqzP1KeDSrR7yQ7CoxHx94+ldEakrJmc5pRscl9tuFvt0/zFhuJHPNelQk5K/Yzox980Yr8WOoo5YMsw4cN3HqK64VmpHY1zaHf+C/EMlpPDewNt3DDEN1FfSZZiZQqwktDqhUVDY9di8RGDTLa7tkUBxwzyYDNX7PgasKlCLaPapYhOmnct3/iJLK3WW/vYzLKuREDwK9FVKa6HS/dV0Lp1zqB083NkDHnBMs5PAryMzjVx0PYw2ZyVIyqqy2Jor7T7KJ9P0iIAz/Pd3HeQ9+a6MsymhgadorU6aMY0oJRWpnx6zHd62S0ey3iTGC3OfrXpzlUilZnXHkfxFaHxDJf6+bO3jNxIqkRRxAkj8qwrVI0sNz1JWXcU2r2OJ+LPw51n4leLofh9d6otnYFRLrdwz8rD1KqR0YjivnamLqYuHJQ95X6/8A8mvRWMly3sjZ1rVvCfhfwTZfCH4R6SND8NWSAtEpCyXUo+9LI/Uk/jXZlOR4TAy9va9Tuztp0qOF9yCsvzOf0zTdW1RJJbC/8AKhX5XuZjw/09q+hpYitzNxdnax0xozxHodN4e8Mad/wqv4gafNrUlnYDwpO+rX1oyrcvGuGKRg/MxYgDqOK/BvH+daOVZbOEVKUcRFJPa70u/QjGYWi8PKNSTt+J5D8FZFg+DOi2FvP5Qe33r3OP8cV+pcITVLLqVOo0m03+F7F4ak1gIqDtsY+u6zqWueKls9D0SW4S1jL3t7JF+6tgOuMfeb2r6F1pOurLTqViKz9tH3dFuz2D/glj4n8K+Pf26tC0O80uO/07+yb6CQy5jM8pgPGWKjP41+OfSNrut4QZg6WnK4P58y/I46GbVK2In7CTXL/meJfHrwvpGl/tp/EXw1exbbeHWWlWyuX3lcgHG4MQcfX0qfo55jLH+HWG59dDsrezlmk4Td/dT/A4bxhYHSidR8NSuYGcsUAKkkZz9BX7nONWVPmotrXbVbf18/Q4sbRnTjeCPp//AIJqfFzVPiz8JPHf7GHiia21LT9XhXUYtF1ERebHGf3dzdwvIDukgjPmCIAbwp5B5r+MPpI5BSybiLLeNaKlCtH3JzV2rrWEZJW0k/d5unZ7DwWLlOgnGn7SqpKNnJxXJJrmezu0tUravS6vdfMPibSNe+B/xG1f4a65qCiTR7h4VniYBL2A8xyoQTlXQqRz/FzX9J8DcX0OLOH6GYUZWco2lHtK2qfUxxMVgsbPCTVraq/bdFG31d9Vv21QM2+F9lnaSDkc/ePrX1cbc3Mzk96VT3jSutRg8W3MPhVrOOVJTm4LbcSMOq5bgccVjinH2bvt/Wh0zlCdN8yuuxxGvfCeOzluL/wb4kFhHGdstrcjcFkYE4HsAO3Ar56thnF/up2fY8mvl8oq9CfK30exxuo3fjDSZUi1TTS8KSh3niYsGXpkjrXnyrYuhJKoro8Tlx9Kr+9XurqjR03VxrEgvokZ/MkBjPoFzzg/SuiOLVSJ7EcXCpFKGtyre6qFvni80eUR8qbu+c80lWj7XlM4125crK11qSPO+4hUMZ3HrngZrrqVqNODlNg+V/Gx3hf4oeOvhraaxF4Lni0u81y0+xz6zD/x+RWbD54Ym/5ZCQcMw+Yr8uQCc/B5plkc+xsJ4h3pQd1Ho30bXWx5ydaDnyxUebr1t/wTmbCFI4B5ICbWHuT717PJGnG0FYxp0uaCUdCZWBxDHgsUwTjhTmtqckoHTOaow5Y7mZr+vz+d/wAI9o4We4bKs6crHk14+OzCpOXsKGrPlc0zetOo8Jh/el3XQt6BodtoloYyS0z/ADSuw+8a6svwkcO+ep8TO3LcF9Uhd6ye7PrjxnPZfFv4Kjw3PpUEGoeBvhlot/okTqIjcxMJPtPHJlPzA5PQdOlfgssXDJeIIV6TvCvXnGb6Jp2S8jfJqFT2uI5neKd7fI+UdJf7Lfnyk2xuxKD2NfvOHqza5ZbdDdXoV+RbMtWFzHBqZkySBICpB5x604p+1ZvhtJ6j7kRnxjM5gETBwTMvRgfWtXGz5gWIhTxTaWp618Pb6K7iFjvT5hiQSdzjg/jWFRNLmZ6H1iU9T9Vv+CYnimHWv2e7XSmZxNpUjQziWTd37e1fG49Qhimkj5/Gwl9YbtufQtzrdtaXuJWAzwu6uCUkzlSZatdRuZpxdxx4jA5JHBqbtjUL7mp4f8Rrc3zXc0gEcY4z2qnaw5rQdpniO78TeIJZbBF+zw9weprFScpEwp8uy3LV7q0LXQWWcF1HQchTVSTUbo1d5Qtsz5Y/4KPeO4fEHxH8O+CrUfutB0NpZvm486dv5hVH51+c8aVnVxdOkvsq7+Z9hwhQajUqvrofNV9qM8dyLNIw6SRZ+YgDjjrXx1OSVTlaufoVGUuRnV+Fb621maHR7XwrBBJaWknmy7cibPRieelexSUKr+C1i4c+7dznfEmnWRivvsrssyFCPKcrzj2Oa6I04K6aJre+l3O28K6noeh/Cu6sIPEHibS9diZ2vbrS/EC20F5p8iBG/ds6yXNwGfhM7doOeM1x4io6UpKndSe9m9Uc9SnWnVXNGMoJdVd3OW1XV103V7+Hw14v8U3GkkxizXxHqKtdELgASKh2jvgDoBXPQpSlFSqK0vmL3pK8kMa+vYWcS38wabkkzMQAV9jmujVPVmnLy+9Y9V+FGpytoNpCZnZigDbnJzkn1PPSvosqlGULPofMZrNubsdh8XPEk2oeEJJbqchhYhVIXGQOP6V9POUPq7PlYqftx37FmttpFv8AEDUC+EF1ZSZZsZxap/hTyqrCLn6/ocOYXUkutv1PS/h34iMN3P8AEbxcwE87sthG4GY17GvYpvnXv9zjp0+W5hfELVrv41eJ5LHw/qXlosGy7vUP3D6Zq6kuZ6Dqp8ljjLfxNF8FrePwNeTh/NkKxSMTmZieT704x5dzGKdrmR8U/hfaXAj8VeDrqOHUHhEkoj9+1Opy8um4qqckcF4H+K3iXwXrt5pHi1Bm4B8qbadpOOhrlp8ykYRVuo7QvjLBL41i8QzbGjDyWxJ6Z/8A1Vqp825nflVznPHOoeGfFWpXd9ay20sdsSZVjxnJ7VKlG9kVNwktDzvSLm10O8v7KztlfzBvjR0FaKF3c0pLlRkXNhpd3dG9vlWKIgszluQfStdEjHETs9Tz+TxFLca5ep4e09p4McyJ0LexrBRnJ+6c8Jc7sjQsLTxHqcPmwx+S8gwXY8mtZQfKrnXGPLEx7fR5/DeuyW8t2ZTM2ZQWyQf8KzlCzuc0f3dQ0dZWzlspEliYeYnGR7VWria1Xzx0OK0q0lmgmiimGYmI2n0rNNRlqZU433MLxNEPss0W3qhyB61cpNK6OlJJnj82jGK6kEV9cR7nPCuRWEMQ7PmSZzPAQc+a5esrGGxt/wDWO5P3mdsmuKpUdWrc76GHhGPuiNjfjt9a6Hbl1LrSdNaH9gU149hAyMdpxlee/vXv1vdk0ePNt1n6mTbtamQy3Nw/kNzIe7H0+lRFJ6suV2rI6zwciTSNdwQrFEiExkPk496tySM+S0Xcz7q+t5LuVBH87t+8nl9PQV58ppvQ2owZZsdRjheaYW+SqYVSMn6j0p3Rta8kjR0XUoprQiNXjjA+fPGW9T61hOTtuaSgky1Y2cd2xffMYE5YudokP19Ky5HPUpScY+ZU8e6w9joMj2gkgiRc+Xangke/erlNKNnp6GdGlGNS71fmdNcahJdeHbe5RHWOWzRtyvycrnmu6crw0Wll6nHQopVXfV3Zz3gK7ujFqtppV407xTZ8mccxow5wcetccbu6R241Rlytqxxum2VzpXjjUri8uFjkkizDp9qoUSMp+8zDqcGlGhBTvJjnC9KPVDj49vNNvZIJD/rWyzbuFOcY6da15nAhUKersc749ew8bR3WmQwtGoiPnz+YCT7dOtTGtGo7MpU4xStueK2K+O/2fvHP9uWEsuq6fPamOWxuJxusQf8AlpGzdD6g1xyozVZOCNp0/aQUVoyTR9f0j4y6n5vh/WFuIZJCJp05EIGS2/0I96KS9rOy+ZEEqWj3Mb4reItK17Tbi30SBDa24NjpqtwrRr/rJj9T3qcTUc7pbGiTjLle58wR+Ftdvvipp+oaXrM0NtbXIIhgkKxuM4yyjr9a8yNJyxCktkehCKUfeR63qZvLe+mhtp9twtzujVuiN/gwrunU5ZEqMWeZ/HjTf7V1Wzt5ZpofIjIBhJV4gRjgj+GuKrWcnyroaQSitDwrxjpnj/Rr+507SPiDc/Ph1SRVZRtHGDjoe3esaUb31OjmcoWaPOLzxj8TNO1qaPWvGDTWtz3kth+6kHQ8e9FSlDdvUVKi9ylrVnfXfmXOtzpI8gDFk4Vsenoaxc2nY62tbnN3egWsrGSWBTk/umyMY+vY05yb0CVuUq3n2fSLaW8vp/Jt4jmRiCcD0xULVmV5crb6HHat4hXxNfrMk6tbQki3UgjIPc+9dlOm0mjJSdSzRn316i6nHAImwifM2f513YdNQNpWjKxcvba1vrZBdtt2MGjkGcg/UcV6ENEVFNanT+FrpRCIsnG3GD1z6124Oty6M1i3NnonhrUVvrOKfUpS0NkciHOea/XOG8XLEYfl7Hs4JJr0Ne28UaNrurqzOJDEc+SpzsHua+rdOVtT0lJyVmbOo+JxcyJamfEarwitjIA71pSUYvQaTWxW1PX7VdOR5IAkCA7nzyxq6U7t3ZpN8qucxY+MX8U+IBoPhKNGaLhyj8AnjqeM1lVqU6UW5O6RjGu6s7djv/EFjJ8J/hVrukfCG+g1P4iahCAl1KQ0enI3XBP8WDX4XxRxJmmd8X08tw0XDDxd7X37Xdv0PHx2KrYip7HDv30efeHdL1vwx4Qt9G13XGv9ZmXzNXuBzl+pBPfnPFfrmX0o4RRTVrnq4eFSnhYqW/U5u5g8R+I719P060klcHBXGEHP8TdhXrUsSpy5UyJ069aqlA6SaxfQ7dFv9ZikuEj/ANSmTFFgdsdT1rvpwu1Y9ylB4ei43M2x8dTaH8MviF43drRo08My2qXF3GJHSSQhR5aMMZIBHPSvxDx1qxrUsrwUVdyrcz/7dPPxdTlw1S7d7HnfgDUp9N+DulrKwWVbBCu05PIGa/TOHYN5ZTT0aSsbYOUvqcG+xoReJ2sdOh06yYRrsMkzBMb8+vr+NfU6Kokhyqt2SO2/Ye8T23gn9vL4aeO/7Ntl8/Vzp+6eQiI+cjoC4+6PmYc81+beNWVSzLwrzWnH/n3f/wABdziVCk6r5U1ftueVftgTa34d/bo+KC+MNUsbjUJNTVg2ly74VUjgKcDgDjGBXxf0cKmCpeH1FYdNJWWuj8zor06FHOJuUndwi1fc5Kx8UCc7LqN2tzBtiYgZOc5zmv6Hp1pq9tP61N705xV3uQ+C9R8QfB74rad8bvhvdCG+0C6S6i4yt0uMNE4PBVkLKQcghjXxvFvB+B40yHE5Vi17lWLs+07e6/k7Hj14SpVvaUv+HPpL9qLwr8M/jP4dT456V4Is9Q0bSPC8Wr+DI7i9lhk1vTs7LuzuHi2sHspSwXDFivXgDP8AGnhjnmYeHvEiyvGyfNOq6VeL2hL7FRa7TVnta/e9j2auGee5TKtUhyzpfDK+rXnp02Pi671LXriNrjwuba1+2TMYrJQ7xwLkkKHcliACBkkniv7TqvGTjajI+QxGGzF008PNNvubfwznOl6reWHxKntbcx6fKdGkEDGKW8wCA/dcjIB9SDXxvGGK4qw2Ew0MFRU/fip27X1ZdOtmGGpv26Ta7GXd61exx3VndlI4JX3CJMnYSQSmScnr1NfYOlKcOaUbN9NdPLW7+82lUqtJsguNXivbyK0iIUuxF1Iqg5A4C/Tk/nXI6cZS5WKok4NPqY+t+ERqF3Lf6fO9rdMGIaAhAEGOoHXiuDFYGlJ3jo/I8CtlSq1eeEnH0M9rnVvD90ZLvR7DUxFCCqXMJAdc5ydpGa8XEUcXRTcZGqqV8DdySnp1RR1TxFqvi9ll1CG0t7aMkpZ6farFEvuQOWP1JNedQpzqu9SVzlWIxOMnz1Hp2WxQjhIMgLhj0Ar0aMo07pHQq8eVp7kF5e6bosIlvLpUXqqA/Nn6VjisXh6C1epzVcyweBpXqz+XUxbvW9X8QE2uh2zW1u3DzEfM1efLEYrHvkpKy7nzWIzLH5xP2WGjywfU09B0e30mBorcEztyzkZLV6ODytYfVfF3PUyzK4YaPIvi6s1bG2vdU1C20bTofMuby4S3gjUZLyOwVR+ZFVmWIp4PDVK03ZRTbforndiJ/V9EfVvxV8ZaZ4C/bdsPAdwBHpOh6XZ+D76NfuyxR2ywStnp98t2r8DyfAvOvDueNcbVHVlWj/4E2n91jfJakY4acv52z5l+IfhObwB8R9S8IXUZVtL1SW32nrt3EofyxX7Bw3mEcyyujiH1S+85ajlKsm+jsYFlexz6i8kR/wCWuCpr3IVE6kok4Wcp4hxRPqU0lt4x/eONssCkrnqKr2sVPlNnTaxtn1R3XgjxAmnaglvdMuwqFZt3UHoaKzVSNonrU6KjE/Qz/gl78V7jw/qWp+GxG7QXG2RpVfKZx6etfHZtTUKikkcuYcipq59pza5pN/cJqVxL8i89e9eK7tXPDdQ3/CvjrTdehe0twioq4z64rNTSkaJ6XL6DTtQglsdMm2tjnBwTTb5iJy5nYi8Nazb+E7ebT4zh2zncefxpU0oz0KVuXQW21SBrhpjJudiCctxW75uUtRvHU+Ufjb8OviR8YfjT4w8ReCPC1zrcWnXiW93DpBW4ntkSIHMkKEyKuP4iuOetfkPETnVziqrbH3HD88Ph8BDnkk5N7njd1o4ub7+y9b06ePbER8/7kg9erDjpXj0KPtaqUZK/qkfYwqRjHlZ1fwl0u6HiKW0tbGW6dNMknaKykMzJCgLO7HHAAySTxXcpSpTs9Wl01JdX2MVKeibsVtVaybUJpoYisdxAhVmHO4Hjnjr/AFrojVnUOm19WWdRut9nHamyjYRuMlgA3OM8jqBirknFbGSnLboc1cRxQ30kwVMsrBpfU5yOvXr+tc0pSXQaippsq3FwXkdmOxS3APQ4FZ1FYylK3unpXwv1Q/YrKBXxtcZJP6V7eV1OWC0PncwpOrUsn1Oz+K05m+FL3EjIWAfyxuz36V7uLquOD5jxqaksTyNGN8CdavdMttf0nTLZGudWubFYolPDYt1yT6AVhkNWVRzsefnEIw5Wes6n8KfFmvxWx8T+PZY7cAMbaxjwgH93NfZUYShZtnjKrDlsi7qup+EvhloH9maEvkRIcTbmG+Vj61tOyOd1W6nLIx9Vh8Ka94akvfEFklzPIubZ24eH3FaQUeWzIc1T1R5b4L1jxBHqNzZXchuI45tkTFvmZM8Zrm5ZKT7EynKaNDUtB0vxbevpWqWiQy78xnbzWtNp6GXLoeY/Fn4W3ngpZJ9KKzW0s29niP3PU1jKGjZHs5Mq6d4asr/SBc6bGiIsYL7R/rB3J9amMYy2NlBKJi6/4Rj+1NqdqnO3KhT1x2rVtoxnJo8g8XXV14u8QHwv4bkeFFlzfBv4R3FYc7Uk0c8lKo7F9dPs/CWinRtBs1Z1jwW7sTXTRvE6YQUIkuk2GqR6b512PLQRszc1tUTFFy5jkFXW5r6fUI7BXhR9qsvLMPWsoxu9QqRTehoDVrG5QWkzDfjDJKMH8KJTSdkYqVtDjtZtTpet+fbApHNnORWcmpLUhTk5GLr7BkOCM85OKxmrF8zbVmeX6wgF2+0fxmuBaTaPUopSp2KfmNjYex6+tb+zgtSofu9Bm0Y8wseKzrTaXKjOs11P65dXv/tDmOSQFVlGUbjP419HX1qM8apdzdvMnjltr/U0L20UbINiRYO1fckVEU2yYuUJanV6dssNBnCKsZlXbG4blz3PPQUVJOMbGjaumYMAjgheUxSSADCSsON2ew71yRSaOqm0omnZ3D2m6S6ZYyE3SE8lh6GiT5HqJuz01G6DrY8RXTX9qy+W85SNdmAQOprjvzyvc3s1ub2q6rHZw+dLOdip8ikYH5Vc58quKLXLdHN3Ok+PvirayL4Wlgs7NMr9vuSQgI9APvGsqSr1byg7W6le2wuHqqdTV9kdf4bguV8Fw6Be6vHe3Omxrb3VzbrhZWCjnHbtXdTTlSSctV+Jy1pxWJ54qyZk+AtcsPDPjC60iVw76n8iPzwVBIBz7E/lWNOp7OpZ9TXER+s0F/ddznfjJZ61aXH/AAkOj2rJdQSGSIwsBvA7E46GicqnLztG2HcZWg3ocrLfaL438LN4m8MMyF2zqNrNLmW2nGcqw7dePrxRTca1N8j9dR1oulUUGclovjzT9BjubbUIY0kjmD+XJncSOckHryKxsqab6h7KTaZk+J/EEuv6QdPtbdHvtXb5P3eWQHufSk6klG3Vm0Y637HgHxX+FnxV+DlvqUvwf+Jc+kahqsDLqMDxK8Nwe4KH7pxwGXBFReVK/Lo2UoU5yUmtijoXig6/4Dgis9Lu7bVYoksZrCch/KYDMkg55U9QepzXHKo3olqapxlNnO3kcema39qsG8l4h5Ks4yjHAwT7HkZ96cZO9jV3tY2NQ8Y6SNOR5H2yJDsmllfOHHKgnuD2NKTijNyvocF8RvF+laxryajpki3Nt9nCzgv80EmOVbuv8ulck2pSNYRaieW+NbeGNvMs7t2kC5tZuoZf7pz3FJRUep0Qfc4PXtPtNYEq3sTC82BmwMc+vuMVMouTNZyklZI46SaUCa3a2AYHY4YZDio5YxYru2pzPjbxh4P8IID4g1iOzckhLQtvd/oo5qlTnPZE1KsIO8jhtR8W6p4uLOJxHYhsQwJGQZFzwWzVwpRhLUw551X7uwllYrLIHWPjPJA5GK6la1joUVTjoYcc8+teIrq7iMeyJtkeD98DrXUkqaJi1Undm1ICbf7Osm9HXBXd901tSnc2ctLIs+H9SniVBIwbyzjcK7KVotM1opxd2dzoWsPanKEMkiZwTxmvvuGMb7PEcnRnq4apyyLun3n2BWvpDHaw7sybByR71+mus3BHotxkrp6mf4N8Z/8ACzfF93Z6LIv2DTEPmzLn539K5lOr7S3QmOJU6rjDZHdX91Bd6ZHpc8iiKMfvEB6Z/rXVzqOiN4y5jKsb/RfB0cp0uKODfu3u6cvxTnTlUiOUVCXuKx5Z4B8FeMrn426t41m8XXkml3KDFuLhlXjJ/wDrYrwaeVYbD4uWKkry2R4GHy/EQzKdectGal+njnx742e3k1VtI8PWz5nkSTbJOe4BNebV+sYzFpRlods3KpV9mnob+reIrXSdMfSfD8kkdhEMks/zSn1z3r6/CU4YejaOrR6arexShE56a/u/ElxGIpQlvFGfMAYncD3rrU6nMrbdR3qTW5Y+IOqeINI/Zq8VXGjaZK6X+p2WlT3RCFLUSA8hDySRkZHSvw3xS+qZhxrleElL3oxlK2vddjkxcqi5ad9Wc14h1bQ/Cfhy10mO5jVbeFI43I4wFGQB71+tYJ06EIQS2R6U8UsPSUDMvfF1odStoySVeHJXbgZxxz6Yr3FWvJHJGbdS70Ov+Aeu3Fz+0l4AvYXt/ItvFdm0IuG+SVzMow3H3ea+a4/5sVwPmNH7Loy/J3OyMo05qWvyMP8A4K66Cfhj+354m1iC8tbhb/D3gs7gusRzjO0/cXsB6DrX8+fRtzGrLg2UWmo05W1XT9TyeKZypY7DY+75ZQs/k+p5Lp2tWkuhSyCbz3kQuAjDcPYe3+Nf1PQxEKsL82jKhi4TpqpB3RbtPE62vh6RbiZJIhsLBj169fYVo8c6VNwUtNH6tXt+bOhypSp899j6d/4JbeN7P43adrv7K2o6fHc6nbPca14LvbloRDbwNEy6nayNIyny5IgCAmTu5xjJH8P/AEmcnlkec0eLsK7Uq1qdaKvdzTXs5JJWun1fTzOfLs9pZdmVO9KVSM5cj5bWirN80rtO10o+6m7yWlrtfK/jrwxc/Cv4p+IfhjqKMW0a/lhgeWFoy8RbMThW5wVKn/Gv6Y8OOJYcScK4fG9XFJ+qVn8yZSp4bGVKEns7r0eqMp9dOoCZrh9pC8tsyQR3r7lVVGLfU5qr5tLmNeXNw9w1jLG7uuZWm5/eoOSa46lR1OpyTrqGjJtIuo31WSWGBGZUykanOeOtcUWoyu2aUL1W0yzfa3CIWjs4UV4IQHDcltx5I9sUSqwcrNmdWpGnsU7y9tLeWNxKpMUuHMvQKw6H27e2K48W4yWwqk4Sjexy/ixtH8L6tO6XKQwuN/lbs9fT1FfL4iVPC1WtvI8DGSoZbVbnJK+tjl5fEOq6zIYPD1syqes7jn8K4qksVX/hKyPnK2Px2Mny4WFk+pPp/gcySC/1adp5CRkue9b4fJ+aXNVd2dGFyDml7TEO7N3+z4rWMRQwAYwMgf5zX0eFwsKaulsfSU8LTpWUEPhtbdJkl3gRhPmOelXW5YTTexvKdKlJSTPdP+Cb3w103x38dbr40eJreUeEfhbpkuv6vem2LwNdRA/ZoWYAgbpdp+imvw7xe4iWCyFZbQlfEYuSpRV9bSfvO3lG587i8RGupyi3orfN6HlXxH8c6t48+IWqePry5zd3uqzXjShyTvaTfuyfwr7bh/JqWWZDQwEY2jGHL+B6cIrD0KUIv4Tc/aJnXxJdaL8ULeQyf8JJoomu5mx/x+QNtkXPsMfhivP4QoyweJxOBmlFU37q8u/zFRwKwql77mnzTvJ3ercrLbRXtFdEktTybwkJJx9qfozEvk19TGcnUdjjyio6kXO3U1/GJgjv9O1GEBw8ZRufQ1bi1JNizSrKjjaU+5vaUltf2Ud89ysUkGAo/vCtJuVPY+gp14+yTPrL/gn/AHGr6t8SdNsrOIywsuy5CTmMkdj7187m1S1PVHFjeapTvY/QfU7aOztpNL0+6dQI8Krvknivm3rdHlWtqzV+Gz3GmaOYY5t8zcEBueamNOzdxSk5aHT+HLi58NK95ql8WkLE4JyFzSfusS93Qv6RdW+tyyXdw5Ck/fXgVaSLi9Ste6wkN59mt5WAVgM/jVqWpo1KWl7HyH8Tjbt8evGWqxDbc/2sAJosrJjylGAy81+TcQKFfPayeyt+R+pZLyUsngkr/I86X4heMtN1y+sU8U3rQrgLFPJ5iDjurZr4/FUaUK37tWfc9mhUTldG9pfxn16wjmtrnSdMuVu7cxXDi08lpIywYqWTBIzXZSr18PpCd00en7ChX5faR2Fv/i9oBlS41Tw1LFiPbGlrc5VB2GGrrp5g4L3ofcc2JpJT0Y2L4oeA7hyNUuNTgRypaRbVZGX6DI/nW08yhK7ady6WEpyV+YhfxV8HLjUzFb+PdWjjbOJbjRcHHbgOayeKpS1uyK2GnGPutFTWPEXwvspyJvGuo/MpyRozevbLCrjiaNR6NnLHD141LSsvU3/DXjjwrbxW0fhrWri6dX3E3sUUAUc+rsw/KqWd4XCR5XcdbJquJakmjV8Y/FCXUdAi03UZreC0tEYXEcCvO8pOONx2hQfUE9BUy4nlXh7Nqy+85v8AVxUpOpF3aNf9lXXYrn416vb3kqi3i0i0kjR3wyZjx0PqB1zX2nB841I1G99D4fiTDSp14u2lj3zxz8QI764i0jSpxHGE2ja2FUe3vX291F2R8g21Kxw3xS8KLcaDHqk87gxrvQyP1YHhiM1lKHW4p+6rnNT6j4y/4R6PV7i9t7mCRdjJAcOo9MVcJNRtcxjC7uef+LfHnhzwfDcXEOrTQXkbZMb9v/r1NWWtiXW5dEjD+EHx9l8Q3d42q363E0jskc5Y5UfjWcJcuzuYufvG9qPxRjjSTTbiZZISCjhjuBNbR5bamnM3CyOU8FeK5rLxBdeG5pgI8l4MHAZT2rWCURxjJq7ZZ8e/EzQ/B/hS5aeRfNQ5CFuR7Cone17aEvk2Z454P0Hxfrs1z48vv3AupMwwBMHb2z71NKlz63OeEJSnc39NicedJdQhpMgEN2rrilFG7kloTeIFvFUwRx7V2KEQYG6pndq5M3yxM4wjS5431FBE0w4VR8oPao3V2RFq2ph/E+10e6SHUrGNUuI5MHYcbq52tdAlFS1OM8V6ms1ksluSHTG5GNaU6TkQ30Ry+p3xmtS+AMjn61FaNpEKx57qLl7mRz/ePWvNf8Q9Gi5KKKTvlemD9a0qKyOyDu2RSkiJh1yKwauzkxMrpn9bK35jnuXmt9zKMoCc49819TWV6jPNqu1Rov8AhlZtUuUdCYrfOZcjBc96VOOupDabsb+t6tBLei3tIWkW3j+QSJgMawrNylY1cHGKszLe6mlvDLczn5esS9AfYVyt8u51U+VQI9fezlZbBElDyjChW5bPUk1hVmp6G1OF3zGvHJYeDLeyt7iNQkVszDeeje9YztCKTJqTcm+U5i/1rVfir4xs/BOg3ZS4uTuu2Vc+RCD8zH09Priua9TEVFSgVHlp0nWmtj1XxRBpfhrQIdB0S5Zba2gCJGhGGIHLZ9TXZWpOi7Rk7JWtpa/fa9/nY5cNJ1E6jWr/ACOT+DWrzz6n4h8Padp0077YZ2BfI3NuU/T7o/Wng6j5nTjFseMVOnyTk7GJ46v9R8JeI4/EOp2j2IsLqJ4lwMSLvAfODn7pNXiFGn70laxtGKqU/d1uema3r2isPtVzp8U6m2JSR26kjgYrr9qm7NXVjkhRqW0dj521/wAe6L8JvHF94oHh+RLPWfLj1ae2yEtduQJnTYcgZHzZGAOc9uBThhp6LRnfChOrBcz1Rk/GPTbLVo49W0a+W+v5lMts9tEAjoeVJI7Y4z+NS71Ho7sXNLW2x5l8CfiFLY+Ntdu/iw0WjXlmgXSUkuQftEY6upbjPbAqaUJc7dR2ZclNwUUw8ea23im8l8QK7bHl2Wasud2c/OTVyXNHnb9CouUY2bPH9X13xd4C8fP4p8FavBeFLZl1S0mTdHLuHyqTjg8kgjmsHyQk2tX1NKcFOKk2c1Z/HHwT4li/sLxK50bV3nylpcABSo64fpg4+vNc7afkdTjJq62HeNfEngG007+z9T8QWFpHd2pa1e7ulUXIA3YU56g8A+9ZycH1Of2tOMrX2PK9S1bwZr2oi50nWLSGa4iCSEXiE3G3p908+lc0knqjtpXqrm6HPa3rNj4e86HW32W0smDPMNu0juCcA9uRW0aU3uaRSbdlb5HnXxG+Knw78JTm6vfGVmzLnyGiuQ0j4/h2KST1qakHGTUSpSjBas8U8R/Fb4qeOr+eHwwsGkaXK2BdLATPIP73zAbfyq/ZUadT4lLzV7fikzg58TW02QzRPhrp2n3SX+rCS6u5Vy97cyeY5OPU/wAqirUk9Is6aVJy+PUsvGL+Tyd67l+SIhcDA9aiN1udahCC0INW1eHw3oN7q7ZLLERGo6ljx0ropuLlc5cRKUINpHL+EJYTYI0YyCd5JblWPWuvR7lUOb2WvU1prt/tRtptuGG6JwO/vWtKVtEXCVpO5Na3EazbkUoso5GeN1dcHfc6velsb2mauWjVMLwMg5619LklRU8TFnXSukN8RXkmvRHSl1YW0Tr87Jnp6mv2TDVoSppnSp9GangXXPCXw08DXUOgkjGTNcA8yP3oxE7L3WbJQp0XykPw78a6p4kFx4h8QwtFaq/7qMjHHY81FKTvuThqlaXvWZoXXiGLWdSFxPcOsag+XCB94V0udSPU7J1ZTklcZZ61c3F+6L+5hUcBV5rgxbcqepnWck9DnV1678SeJpba71DNtA3/AB6w4+Y+/pXmYSCjUa6nPh6cp1m3qcz8WfilYaLfDR9MVp5nxFBbr0DE4x7mvXa9j7999DpzDERw1JdZPY6zRnOjaVZvq1ssVw0AaSPd3I6tmuqF58rZ10ptUl3Zj/EbWrex8GaXc6vCWl1TXWmsit7tURxLgsydCckivxrOq8s18SqdODTjRhZ6d3fczqV4Rr04t3Zy+mWUOu6x/wAJh4tvdllbOGtrOQ584/Sv1rDUJJ+0kzadGFSfPUehifErxlaa1eKtqPsNojKpdByFJxhRnJ7CtcViUldM8/G4mnTgkzs/A2qSaR8RvBpd0tYv7csFV7tN0aL5yfMwyO1GdQjV4exUWr3pS0/7dZ0SryhUgo3u2tFv8jsf+C1LaVY/te6jbaNPp00ctlgpZ2DxFyTj5y33jnP0r+YPo11W+HMbTlF6S6tPr07HHxZOc8Bh1JWcovTd7ny7b+F/Evw60uDUoL77XazQZu4u9tu7fSv6OwuGr4Oaad4P8DwMHlOOyrDxqKblF6tdi/by6FrDRvc3jsSg/dBvkfHqewxXs8tKors9+hKhUhe51nwW8V+HvA3xc8OeJ/FEFzF4fttSEGuxaddNDNJp8p8u4CsuCMxs2DmviuP8oxOf8HYrD4aEfbRjKVPmSklKOsXZ6dLnFi/aYet7XDuzR7n/AMFX/AcXiPWbn9qP4feDpNKt/D2sDw/rWjrdi4f+z9gfT753HzMJIiMM3XI5r+YfAHiarw5iI5FjK3tPbRdSMrcq9pe1SCW14vojsznL3HKKOcRd5w92ovLufIOi+J7DWF8+K48wSsQRvxj61/W9PNY4uTfNd9T5/C5rh8Yr05IsSXJlc2X2obZBtkbrtXr+H4VXtlsmdvtqadmtTOubOeGR7rRrt7a4hH+tV8nk9D68VzVacaqbjLVDqQbhz0pcsjPvtT8UFmu/skLF0AdEyM49fevKrvG3vA4KssfUd1FMzdUu/Ger/vEtIocKAcEndip9nmWIjrocld5vUh7kVELTwm2uz+fr0xlnAwFfoAOwrHDZROrieeu7szo5P9dre0xcuaZq2GkxWTLbgCPZnIC+1e+8NTpwtax6X1RUZKOyRP8A2jGkAuoY9zWzDz0xncvrXLdRXOum5nOuvZ88Ffl3K93dTajdpDotu9y8zhbe2gQu7M3RQo5JrStiadHDSxDajTja7bSte7/JMzjiZVrKlq+iW56Hp/wp8L/CULq37RGk3Opa60PnWHw3t52tiFwGR7+UDcinP+qT5yDyy1+PZxxnjc/ruhkzUKC0dfe/R8i627vTyZVfK3Ti6mK+J7QT/M0PHP7ffx+8ffByT9nXwvc+H/Anw/ecvP4P8F6BFZRXLbiQbiVQZrkjpmRycVllHhrkU8zhm+LlKviY/DOrJyt/hWy+SR4WHwkpVPaSdvJHlF009nYs93cpLEiDay8Yr9MdqMXzbI9dxmqfNPZHZ6XqKeLv2Z9U0oQ+ZdeFtYi1C0IUHFvOPKmBPoDsNfM5m54HiPDYpfBWi4v1Wq/C5VSqp4eLj2seaeG4pYWIYDCk5CnqK9zCtJOTPNyWEoUnGWjL+v2t5qOkFLWHebZ/NLJ1A705yUmdWY4N4qjzR3jqXfAN+NShFjcEEOMDPBFbTqwlTTsb5dKnOldn0F+xl4y1Dwf8SbW0+1bWjn5bJBx7eteDmNL28dEdmInT9kfo1PqV7qFrFd2l4SJkXbIOpzXzNSChJo8KS5nZHQ+CvEcvh2FzcXBeRODuHQ1i9TNx5WasPiSXWb5p751WDOchiM0pQY0nuzptH8U2sts0GmuBEv3zv5FKnfYvl1MyTxrZS6stnbtufcA3PXmtrSjonua6HzL43kll+MvjG5t5MSf24dhPIztHWvyXN1GGb1mz9DyKtKOGppM8s8UWkw8ZaoGuQNrgMOAO3518lWnBptb3PpqUY+0bJZYzaSfZWYZTGSpz27GlHmkj0IVWnYzPEE+FAyR8vJxW8Gm7EYi/Lcypb15otzthhjBHpz1q5Nt67k0W7GbeXkgkODx7/StYQ0uyK09ChqGpTXEeJZ2cKuPnbOM1vCC3R5sqkpb9DovAt9DbXjXWw5ZY49xGNrZ7fhXkZnTcoJI9LCVowqHoWtySHw/PIyjy2VDjPPPWvDozbqKB9BaPsuZ9jp/gxpY8T+M/Et9pusJZ3ul6ZZ29vk4M48vLFvU81+28E0VLDzntbQ/G+MMRKWNjTiuh1dnH8TtPvDquoWS3SR5Ktz09a+3e9j4uSaZznjX4yaprVwmgJcS21x0JckKPpmlUSS1FzXSuMg8bXPhOALe3pmMirsKtkZ+lRTtzBJPoac6/D3xfp/8AbOsRxmRm28xD8TW1SnFq5hOnGZ4z4z8I6R4X8TT6v4EugYCGEqLwPrx0rk5LvQ5pR5XZnW/Di306aFI7i3S485N0u45w1dCp6XN6Ka1KPxN8PzWcbaxpF4kU0LfuWUY49DW8Iq2pU23ojy7wJo+v/F7x61rrpY29k/7yMtxI2c81jUnJvliR7GMn7zPafFw0vwzZR6fYxI4RQNoH3TitYR5VoavkgjzyQalqU9zc6fEME53Uc13Y4pNtmF4nfxHNOklwW+VsMUPI9M1FSTvYHeW5javqes3zrZ3mUZTlTvzUXbVjJp3MHWL/AFCDUII9Rb93ng56mhtJ6GkW5aGd4wubKWIi2Qq+OT2NdMLKA6zUI6HFajfMISp4xnP1rlqvU5Iye5yN++WYg8kmvKWtVnu4eC9kUlyecVvVV4lxfKxsygqWJHTiue9jkrRbTZ/V9dXxe02QwzfO3yykfePpX1NZ+8zgqt+1kdF4KvZriXc0IDIoVUP3SB1/Csot3Iive2NM6lNNeXV2qZkeQI0oHAUdl+tcknzTbOrl2TK0OoSHVpJIrQKI/lHy5P4e9ZJc0mbVIKMEWGuJIZ/Kd44guCzkbnz6VOilcqElFFzVNO0bXLI2OtS77ZoioxxIx/mKwq8tX3WW1JTTWxD8DfDfhz4bafres6RbOt5qF7sluJp2kl2KOF+boPYetXg6aowcorVl4t+05IPZFLxx46u7u4fF4Am0/KxAx/8AXrGquWbk38ghBJWI/wBl2HxPqPiHxN8RJLgw6bHGmn2iRkYuZh8zvn0XIUe+70rpy+Lc5VU9LW0M8dTp2hRkrvcufE86H4qtW8Hy6eZdQvcx25ZizSyN0TnP1z2qqii04X1d9+/b+tPkbUH7F87+FbkniLSfFHgKx0jw34h2tfy2sMCyRMXWSQDbge9YSlWjaEtzNYihWbqQehc+JvgSy0TwFL4euzDcX1+N+ouU6gj7n+6K6KlJUoKL3ZhRxE8VKU1ouh8a+BfiLY/s3fEKT4M+OtQZPD/ia9kXwtrdzMSLSdjn7Flhwh5KHoPu+lcPt4Yf3V1OidByXNHdbnYfHHwN4J+JGky+GrnSYpoIoFUyunJJ/i3D606jVSPvGtKpJU9D5i+JWn/tB/BW2eD4fa9B4g0yzicafp2p7tkZHTEg+bHTrk1xT9pB+67olL2lXcb8EPizonxD8GWmn+JdfT/hMIUZ/Eulzrsc3HdlD4LRgAKpH8NVRjJx5up1SiqcbJFL4sfDHQvFenm6vtLg814mcxxoMAH/ADxSmuaLT3ZcZS9m0eA/GP8AZo8G6syre6bFcfZLdBELtd6w55wu7p+lcUqUqSbuYxoc0rs4Jv2X/BQZbaDw9DZzRIS4VcFhj+EjBFEUzvUHGMVHRJ6nI6t+z7p2n3l3bXF9dXcMePLt725eWMBuMbXJA/KrVWd9Tb3U/wCmV7b4KeEdGvfP0/w/bxSNFkHYACfY/nWlaTkjL2cKj2Lupab4b0GwbVtXvYbS2V9ryTEAKPQ/571zU5NOw5ctGnd7HFat8Rk8SyGw8AWUzacshM2pzoVLAY4jU9uvNaumlHnk/l/X9aHHSxcqk7QWncvWFot1CjtP97BjcDv6GpVSysjscmzmfGmoJrOunQbcxtFZtunkRuDIR0qqc3ESq+1fK9iHTdMttOvTMrbFZCdhGQrdsj0rr997Gim9kU0vbyW6b7eyMxbon3cf0reCcVdkwpylK7NeJYZYSqZDxnIIPb3rSNSTeh0urFOxPaXygBFcFWPBHUV7+X1OSomdMZe8jkPFll8YdQ8UfZPCN3bR2TLmWSTHC1+q4DETlBWehNaniXVXs3odh4cg0PR9Fi0fxFqKSzSNmV3cBWNetCrd+8eh7ekqdma+q69o66OLaxCRwK2CIzy9bwkmbxrrkSRVfV7fT4X1ydV4ixFEpzj61bqJvU0motX6lDwjr+tX9ld6tfqIpJg3kxRn7q1xV5Sa5YnFTqVJXciHwtPb+EtC1LU7O1E945Zri5n+7Hn09TWFHnpyvI68O5005Hn/AMPXPinx6/i/UwJbXTZS1mrDAkkPfpzXVCUqtV32OTCy+u4z2tX4Y7HoGs+IbnWdRLSXCiS5fYVUc7jwBXW5ypJ1JSShGLurddLO/kr6eZ69SalJtbs53486rYzfGTR/hlHLdxjwxpg86KeMFTK/LEDP/wBfB7dK/FOC3DMM+xGZOSftJPla7LSx5NGp7bG3mnFxvpp0e+nff87PQx9b1qFnZTesqAYCDqvHQe9ftCqQn8bsj0K1dONzj/DNp/wsj4hQ6ZH/AMgzR28/UbjqMj7qZ+teVhoPG4yMIfBDc+WlKrnWaKMP4cHqz034Ua5BrH7Uvw/057iNbYeMLDdLJym0TrjcPTijjbEVaXCmPdFXaozdl1tFnr1k546EOl+h3H/BXDxbH48/bdu/GEXi211fT5rm8t4Psli1vDbtBcFHjVWdySGBBIwMg1/Pv0ZcFKhkOIo1KPs5vkk03dvmV0/n+R6We4L2FTARmn8D376Hk9lqun3EHlghopowJiYwxkAH3Tnt/jX9Pumr2Wnc6ZTfJyy1R574p8Na7od1LrOhW/naY7ndGB80Pfp6D2rlxFCtTj7SnrHsfK4/D47DTc6KvB/gaPhTXrbXNMewE+9vL9B075rowOJvTun/AMN1LwNZV4Wvdn2t+y94w0D9o79k3XPAmvaFc6p4jsLCPwx4umEnA0o7jpuovlhuNvJ+5ZiCdm3+7X8I+LOQ1uB/EOlXw01ToTk69Ff37r2lNaacy1SutfU+ryRwxEp4WcbwqLkl2Xnqfn7q3gWw0PUb3Qps22p6fdyW80kBwm9GKk479OvfNf1tkEMvz7KqWLp3i5xT07s/Oa+QYGlWlTptxnFtXRV/sjxrYl5oJoruN1J5OxiB3r06uUZlhnelLnXnuaU8tzWj76kpr7mSDxINPIj1S2a2kLDKTKfm4656da82tjnh5qNaLiwq5pRoPkqpwfmXtMuobiyExIYmbjaeucjP0rtwmIpTpcya3O3A4mNSN463GrIltJNbBlyg3Bieh9a7aVeL5oLod/PHVdhk11arErp1Oc4PTilOvSjJdzzalZUpKTepf8CeAfir8cfGlt8NPgz4B1PxP4gvAz22l6PatLKyqMu5x91AoJLEgADJNeRnmd4PLMK62IqKEVu2zkxuJr4pKNNXfkd6vwo+CnwCvI7j9pr4inWPElrMou/h74KkSbYA3zRXd9kxRsRxti8wjuQeK/Na/F3EWdfu8joqFN6e2qJ2fnGGjfk3Zep6EMLhsupKWNqe818Mf1MrxX+0Pa3N83/DO3ws8O/D+KOYSWxtQ9xqAIxgi6lJbPAPy45zW+B4MxeYxdTNMZPESe8G+WHpyrR/O5ngcbOEbYFRi11a1Ou8F+IdY/aW+EvjfxJ8adXvNZ8deFWs7jS9eu3XzJLB90UkEzAZdQdm0k5GSOh4+H4hwMuEOIcBhsvioYaspKVNLRSTTTXbrc82vmOLqYhSxDvK9nstz55s45LfU5eFMYc4wOoJr9qy+M4xSZKUvbNrYPFd40FilkiKDMwKkN2rqxcXy8ncrMsXCnh1SjvI7f4C3sFrqtx4b1aQ/Ydd02bT7lQOu9TsP4Ng15HFOGliMmjKHxUmpL5b/gFCLcFF6o4mKC60u6k0+7iCSWszQzoeoIOKMHWdenFx2aOWo5Ua7S0sXpbqfSp4r+zfKMfXgn3rt9hJSv0PQo1JRamthb21Fg48UaDH+4cj7VAv/LNj3+hrSUYQdmRWpWrc9LbqelfCHxCy+K7DW7dl3EgP83GR0NcOMlCNF8p3ulTdO5+g3wW+N9n4q0iDTppE8yEKjRg8gjuK+NqqfOeXiJU4vQ9MS4+1v9qS5KxuCAc9ayscim+pei1/dZGxibdj5Scc/wD16TTTF7S70L+iW9/oumSTR3eVlByu7pn+VWopamyk7WHeC7VItWS/nlDkybt5PQA1M2jOcuXV6HjFi8etfE7xvdyxjMetTMjMMgEYA/z71+Q5w/8AhUqu5+h5JG+Egzy7xGDL4s1MuQWFwoIB47V8o7KL9T6zDSipakN40qzeWzdDj9K2TThY7lrPQzfErsFRV4Ixgn6UqEnz6BWfcyN7CErjAKg5rotd3Zin2My/LB2PGRgVqpIxrTMyYHzMuQAcADHeuuEeaF0cuiV0dFoVzLb6UqscRtdCRc4zhRzXl4mEnNo0oO2vmegXWqxal4IJWQZghRWAHXqa+cgqkcYk13PsYcssLp2M7RfidB8O/Fuq3kNpPL9rt7ZmeEFtuIxkH/Cv2XgrESnl8rbXPxfjh+yxyiux33w5/astr/UG0m6uWkEq7TDITu6fTivuqbgnqz4KNVN3kO8V+IPAOo6uZpp1hCKSOQcGtJy5kaOrTitDiIPBJ8Uao15aeKWkQHMMKSjaPw71NONtTWnPnhqZviW917RJv7KuZbhdxGJEfgf4V0v4dTmk+WRxmu32veHLiVo7t5Y7hDuUnua5JSUXoYOLlK50nwL8di5mWzu02Or/AHWPJrVT5kayqODsdf4zvrpoZmDYhIOc+tarVaEOpyq5zf7P58uXU7yEqsjTNhz1xWKpckuZjiqlRXLXxM8bWtnI9rDOWcnknnn2ro5ko3ComtznPCPxChtY54Cw3Mudr8VlS95spRUVcxvEfxBNrfToJl/erviJ6Y9DVVoN7HLKraZytlr13rutvfXEqxxjop6ZqadJPUScp7lfxxej7HEUO4RuCCrfpVShGLCVRwaUTI1PVYbmzEXKnZwWo5rRM5SlV0Zw+s3bYYbs4JBzXJVd0Qo+9ZHPTyMwJJ/GuOMNbs+iorlopFfzSi/1NaVFoQmrkbT/ALsg/lXM0YVmkj+qrVNctbeD/iYXzyhWB2K2PLX09zX1FbSbPOq39rI63wFLbi2kuoW80bN67mwFHYVjpytmblaL5dzQ0jUUm0xniXenmsVbHG4nk5rjTVrnRGbbSaH6LcsnmSoDLNn5So4X8ad4xib1E3Pcp2q6nrviE2dtMLeOP5rm67c9vrXIrzk7M3ioxjzSOgaOOzCafbxgCY7RKx+d/Vgf4R704qPNZCnLZlPwvoet6hfXGhWDfZ411RkvZnUs0SZAwuPvbsHBHTFVBzcuRdx4ipQgvbNapaP1tp+Rs6x8H/Cfh6WWaa7V2nAKxzjzpBzlhhsgcdD2rSrhYQbb6mdGtUq2fb5G/wCF9A0L4c/DLSvDMcZSGCN5plc4Ls5LnJHUkk8+9XRhDD4eMEZ1q06+JnNb7HL/AAhs9J1f4ya144W4SaDQNMSO13PuQXE2SzAAdlAX1+9V0VF1pTfRfiZYz20sLCntzPX0RyPxY+NUvh/4g6Z411u8Y2djqaNJGbdjuUHDMMjGADmuCrWUaqqN7M66GEo+wcEzW8c/EBvF9wZbHUkuEnUSLLEfk8k87s9DkVcpzqvmb3/IIUo04Witjwz4mfC7wr8bvH9n4cutOiurPR0ad9yK2JMcH8OTXKqXt6/dIFU5KbUup8z/ABFsv2m/2dPHs2neFNVbxZ4ZkbzP7F1K4KTW4DZIim5LDGRtbI9xUVYOh7sdh3gqehsL+198IfH1tH4TsJn03xDb3WZvD+sWnlyEE4yN2BIoxxjNRKajVtHVLr0f3/qRTnPmvY4L4wfs9eD/AImTanrqO9rqUDolleWn7uSOR/4kZeQOe1Eqjvod6nPluzwbxB40/am+Dqy6BPrNv4rsokAR75THcIqtkKZVHzYHqO/WuSpVqWukROrJKyONvf22fi1b6pquqeIvgYZrMwQiGK3vh5pCsN7Elcfd5HuKyjzykrsuhOvzPmWhV8d/tf3szKNF+EGpOu0LBLcTopZCCecdCD0Ndkqd477Hc5xS2ZwPib4+/GLU0GqaP8KkW4a1Ec0F9efJnI5yq5IxXIlBz95kSxFotQj95l3fxI+PWuMY9P8AD2laWzQAMFV5m+o3EAH8K6o+xcNDGNTETm3aw20+FfiDxZff2t8QtUudSlQbgsgAjjbj+AcZrGcnTvymkqcqllJnRWfh/ToFiig2rsG3cqYTjqCKwbdjdU401ZIx/H/iGw8BaDNfkI8052WdoHB3yE4BAx2zk06dKrUi3FaLcxq1IUo3l12OG8KWUyWwaWRXmdt9wxxlmPJNdMKd/eHh/hVy54mvrbSLqyzJ5ZuCUzjgkdua3UtBVJqnNWK0sq2sv2wWytG4xPHn/wAeFapc3U61LniWoYoMi6t5j5Tfclx+hrpXLDQzWkiKeQw3H38DIJKr0r0cNO7R0qTurHE/E7/hYVt4xs28PasIrC5XEuT2r9IyiUqkE0/UwxkMd7WLpP3XuWPEVpYazbxac+sFXgUbpVOMmvqWqcoWudkYxqwUWzU0WzSO3SG41eQQRDO5zkt9PSoVqfU6oxVLRM1z4gsBHHb26KyYI8t25b3NaRmjb2yfUz9V8WXGnv8AYrGFBJKgCsGxtBrSKi2FSXLLQTxv4lfS/A/2PYxLKWZmP3ie9TUcWtzdxfsG79Dk/hVr9zcaQYdMtwBEx3ykfKuetPC1FHY5MtXNTfY9E+EnjHwj4c8Zt428bxCfSdBt3uprdmP7+UA7E/FsflXx3iTmuKwPDE6GF/i1moLyUtG/kjtniIUJOV9l+J47H421nxf411z4m+JryQTajcvJbLJj93GTkKPwwK8fgLLoYHL1FaKC09er+Z42FnieeVWtu9vQqWqa/wDEzxLD4Q8PzLCZD/pNwekEfdifWvs6tWviWqVN7Car4+t9Xg7Lqz0BLPw58PtF/wCEB8GZwx/027P37hz1Yn0r38voxw1Llhu9z6XDYPDZXh1TpL18/U5c6f4k8F+JIPiPouoW850m6juUDMVbdG4Ycjp0rpr4H65GdOTThOMov5po8LFYXGUcQ8TB6LU99/4KKtrHj3T/AAb8aLW2mXw0YEOjuNJjhtzFeRrO7o6ud2J/MQl8Esp9Mn+UfBeWH4X4txeSya9s3JS95tpwk1FWa092zVrqzR9Fm9aliMPSxLjJezly3bTUk4p3Vm9Lu2tndPS1m/n+3nkjsfLSRGSZgFCqThR1Nf1YnNz1PPqV+en7hbuNQVVlt43YIig8nrjrUYptwtdpabeT/XqbQqtU7M878Rf2h4d8TSXvhuHck0fmSWqnqM8kYr5avWxOExzlRV0+h8NjHiMszNywy5k1do98/wCCePxRuvCX7UGjacsmnwr4hKQmx1pCbS5uI2EkdvcLkZSQjZznG4Gvznxk4djxRwlOq4yVSkm4uNlKN9G16LU9vKc3p1Ma8PWbhGqnto1JLQT/AIKceFvDukfHw/tAeBPDSaX4Y8fPJdLpAtGgGj6hG225s/LYAoFbDKehVgRXzfgRnMqOSyyPH1OethbatqXPHeMrq6b79mjjzFVcpxEalRtxmrXe9139Tw601a01QidrpBGqfc3df/rV/SCq08XU5ua0excMZHEzvF6I9u/Yl1H4c6xqXjrwl44j8PhNX8LGE3Wv2azG3gV98piLgiOQhVAYYIz161+WeJc6qhhqtHmlyzV4x63018jD2OGxrlKouZq2h4XceB9KjvrtvDOqz20MdyyRBH3KQGwDz7c/jXr4TJViKMakJOLaV15mKyajBc+Hm4eRn3vhvWreSYw6x5zqMyFlGDg12zyjFYWm5Rq3fmbwweNhBv2t35or/wBkeIp7rbIiBVjJIUHkVzU8HjpVbyehjPL8ZXq3k1axvfBXwJ8Q/FPxEsdD8J+JbnSbnVZDbS3FrctCRAQTJuIIJXaCSPavCzvCwp5dUxOOs4R1s11MsBDG4bEcym430duqMGz0u2vEluIZfm3t87LncM9TXqYTAU/YKy1Kq4SOIqOo2QtpYjl37fLKcknuPWur2DptaWOerT9lG0Va3U9V/ZpdtS0v4k+GCzSG9+HlzKI1hDmRoJopM88jChjwa/P/ABBVKFbLcQ941kr/AOJNHj14Va1anr9pXPM5bOJ3C+Zgqu4sOc/WvtqNeMd+h9FVh7O9uhgZl1vW2lEeY4sqhDVjTxDrVHLojwcGpY7GSqP4VsdRYTf2eqTQz+WyEMrqPmDCutpVYtT2eh7iTU7I0PjFbbfE1l41jgkW28Q2CTs8mP3koG1yMdiRXzWSzWFVTDS+xKy9N0Ga0uRxqpaS0fqc9a3sJB0+7cbX4XnpX0VKvfRnJhKj5uRl/SLo6bI1jdpvhl+V1xw61q7SVj2l+6jy9y/4S1O48JeJIofmNlNKDDL0289PauOdCOvMzi5aqqOL2Pr/AOCfiPQtMuoZ7fWomu54xJtST+fbNfL433W3Yirh5pXsfRmieM21W0S2tnX7ik4boe9eNduWhwuUb2JPEniDU/DyC6t5W8wDJB71UpWM5r3boj0b4s6x4isnjtkdZB1YcBqhOzvcVPmvdnafDz4iafFaeZq0ojkiceZE5681o3fYtyU24taHl/gLWkuPFfjO9C5hutafzArc7C4/+tX5Pm1P/hRrWf8AVz9HyScfYRjFaHn2oulx438QQy7lC3gEIA4JyuM183jKcIRs2fTYaK57Fe6YNdMrLyJPWuOk/cZ6UdJmf4lKeaoLen4cVtQledjLEOzMZn+QnsMdK6UtyYO5lX85887m4Pc10RilHQ48RJJlCZwQBg5AyMnit6dkjnVRvQ2rG7lfT7ez25CwEgbgeSea4K9uds7aMGtGdJot+/2S600ybhNYgge615NamnOM10Z7eFrW5ot7o6/9n+Tw/eN4puNfto5zvt44vNUEgLEuSPzH51+s8EYfly+T6X/U/HeM8Uq2buL7flb/ADM7x74K8EC9GseFlZbtMl1UY/lX21SK0sj46bg1schHruh6jaz2UzrHeEYIbr+dSnfQil7zaOZm13WPBepm4s9QlWPjDB8qBW9NRSJxLnT2Op8O/ESHxmT/AGyI94jK+YD96tKkbx0LpVI1InM3WqQzeIW0q5cmMDKBuSPauFxd7CU3e1hljdNpfiS1lsiVUSlWbpmtOVxjoZ1lzas7Hxl4vd9JKeZjbGQVz1reLfKFNKWhzfgjxZceFtEYI2WuHY7h2zWNJOc9TplOMYqxn3093q94dRvCWw3Hpz6101FpYxbdRamRrUM+j6gNVsJhL8nzRhutKDS0Iq1Ixja5yF7qWp69MWMZRUkIVW9M0TlzOyORLnYqy6tZTFtjJkcYOaKSszolJRhoZ+r3eqXtwkc0hUZ+YUVPeehxqLkynrOpOqhRIRt4GaaV42aG5ODscveXjyo7FuSeBXFiHyvQ6sNSUql2Z7u0ny1l0uerOpZWI5cICDyfWpndwOdVPeIfMV0O8iuWSaLqpW1P6ltUkh+1/wBo21gZ+QMOeHb255xX1Va3O7nnVm/aNeZ2dpNcW3hP7La3KrcXYwWA6euDXHUvy2RlGCc7mn58kGlQ6LaRFYYUAK95D3J9K5pxsrHVSi43kTT6je2Glt5SeXAoxsRcBiffr+NZtS6G0eWpPUd4P0zWDDJPdxRPc3B/dW0Y+RB2J9WqKdNxvfc1xMqfJyW0LuhapfG91O4eOW5isrY/bNkJJjYA45xwR6fhTgr1JO2xzzUY8qT32Oj+HJuvC3hD+37iErf6s5m/eLhkQjC5HrtA61pTUaNPmluy6vPUfK9kcl428f8A2KznvY3IuipCksSxb+6uO/auWpUXxdTWgpSkkny2Ol8WaxqlxoVmdThuYnFgmQ6sFT5Bnr1PX3rfEcsIqTvovMmlGkm7O7bOQ/Zp1+31v4seKNFOnz2ul2+ixSyzSFlFxOXYFfQgAZx71yYatVqYmUfs2KzOm44SnJfFcy/2gZtD8a3DeA9G0qa7muWWOEF8wIQGBCAgDJySx56CnV5a37uJyUlKn78meFfDfxTrXwOvtZ+AvxLt7yG00+Mah4fuLaFpTHbY/eQMuSzBC28egbAwABTqSdGPs7bL+uvz+Z21KsZpSh8zq/gb8QPBl7Fq9/4c1q3v1nkMQntJ/McbjjDAcqfY9KnL5xasmGJpy5E5I574ti3/AOEge3vCkdvERG0jDLb2BBp1oSU3czpRUl72x80eDvh94H+N37RfxB+F3iyyUyL4RtLrRbhothSaKV/MMbjndgoeK5IpSlZnYqUacVLoYOqS/GL9nfWL5/G2n3fiTRWjBh1W1OZoo1Pyh4xw+P7y89Mg1U6M/ivuKpJJ+6Zeg6p4a+MPg6Lxno+rQ3aS3xN3FECWhlY4Mci4yg/3hXHySfu9iOeDaT3OQ1fQdEtWvhd2NvbpHGygoAUx75/zzVRikd8LtJIo+LvAvhOTw3ba9G1uLW7hWVH2jELHhlPtlSR6Vs5NKxEqqU2jCvPDNmLAWSaeJTISdwTO5VH3gw69a55RXY1puLRzN94RgfUFjtQmYYQ6pIRyf7pP9KiN+hopRWxHNJ4ZOYWvIrOTG6Xz5Bt3A98HIPv0ok31KvpdnI+KPEmktdeRot9BecFnRMuF+rKRRTgpsy9qpNy7Hit3Z3vjfxCfF+pXW6WIsmnJz5cMYPYHue5rqm+VcsdEcqh9alzS2Wx2Xh7QNQ8Q2k8mm2zC8tIS9xFEg+ZB1bnrRFux3+7GGhzfj/TLnxP4Xk+xyZuLVhPbllxgrzinBw9prsYukqsbrdE/hG8s/EGgQX6rjzIwWK9j0OatKXMONWLjoFsP7K1FtMuciOXmJhnbV7TuEJPmsQayzJMEdwCOBJ2Ye9ephHdanTdxZxnxmuns9DtNUWWRSkmG8scEV99kWIioOJvOpy0Ls4vwvd3d3cnUb+4l8leYwwHzH8a+lpzlPVHHTqpz02NWXX9Z1S+WC3vmDtxGsZGF+vrWnvM66jlJGzpstn4bw+q6gZrgnLK5zk1v7WNOI6UvZv3mJpniG18Sa4zxAzKj/K4UhRVxra6HYqiluM+NetlLCG0tFdZCgVSGxkmlVU5JSi7MMZiJrD8sOpX8P6kmjeG00TTwoO0G4b/a9K6aSVOCOrDSVLCqKNay1Wz0HRJLKSJZ5LhfNmt5xlXA5ANfN5xhKGcc1Cor21XqjmxLcXFpX1PM/G3iy3tFaa1tEVZyWitbccF2P3QPQVxYX2eBwSo0zgzrHwwzXKrt7JHVfC3T9Q8G+FJtb1ePyL7Us7hnDKnUDrX1WWYWdGgpz+JndlUJ0MNzVfikMh8QXd3qcl8uDtHyvIB/KvUpzSdrnoxq87u+hG1/ceKfENl4QgbiWTfdhRnKDk5zWcsS6uMp0YvZpv5GWOqfWasMPH1foe5eOrTX/jN+xtqtnFcxte/Da7ENgrXTmZbN2M0cQiHyBQfO+Y92A96/AuOJ0OEvGWjjYR5YY2PNskuaNot33u9NDslh1jcoxGGpr3ormXyPnTwd4vD2qXxYFmTBJOQvBzx61+/4bH0a1PmXU8bLqlKvhYt7jj4hdzLGnO4EAhuM96zxNaDgzb6zBOUfuM3wxfPrnjC5uZI1EdrCI/nH3vUV5OVuOIxkqnZHz2XSnjM0qVZbR0Oov/C1xqN4dd0u6a3vYLlJbGeJtrJKmCrD3BGa9OtgqOO541FeMk4td09Drx2UU8U+dO0r3R9ZftX+M/id+15+yVoHxg8QvPr1rLpzXGsybF8rSdXsiIbiNUABzNCVkyCe3FfxHwvhMD4aeKlfLZWpzhUtbVupSqaxbf8Adem3zPqqeGweZ5S41aXvpXv6aP8AU+IdQ+H1o0S3PhjWEXzl3CItnPt7V/ZdaNOqubCTs3rY+TxWUU6bvhJWutjO0LVdU8FazLZ63Y7PPjaMytnDg8H8PavPhCWH93GQvrvueXgq9bLMS1i479TcsNTgQzWoYKJSSu1vujIOfyrtwmIhGbjE96hOEru+hG+pnZuAyJojuIPU56munGYnnjbuTWxMUrIbPqhW8aIEDMQGB2xXBRxCdZxNKNaN2jd+FEz3XibW/EQufs9v4e8K6jfNMHKN5phMEC++Zpoxjvk5r5fjPEU8Rh6WDS/iVIKy7KSk/lZO55lSp7TFPleiTf6HJaOJLG1JVgrIny8deO9fRUKVSjF+TOqnTXs7McHe7C2su0bFPzAevTNdc4e0SuRNKpaMtkdp+y9q1n4a+NekrqNyFstZhudGviXxiO7haHk9uWU/hX5nx9ljxPD9SpBXlTlGovWDT/Q8epTpwvO2x554wtdQ0HWLrwnLE0V/BcyQXUb5zEUYqwOe/FdtPERxWGpypO/Ok9PMyx2MWIao0fil+BDpun/2VGsZCvG3fHf3r2MLhpUIWexpQoSy6j7Pe5cKJMGST7+PvEda75wcqdos7Fd09Xqb+o26eJ/gks9rcmW40TUit1AwyYIpB8kqY6KWyrA99p718hXisPnC51b2kd+7Q6dR43CSoyW3U4fSWtb7Md0AJY+DXv4a1SPLLdHnUpw5uTaSNezeSXFjdKHAwY5AeQP8K64UlCTZ61OpKUOWW5pWlzBLAba5wyqcdehz1rnrKdzeEIvWW56T8DvA3xBbxNDq9v4oZNJWVXZVk+9joD614OOkuVxkcmIxtZKUFsfS+m/E6bw5qCOZgFOB1wPrXzU5KnojxXBuVz0yHxXH420nziwb9394Go1bNtbWNvwVfaDpFjgGJpEPO89KfsubUp6Iz9ZvLTUNcFxazBQzgsEbg1tbliYtPoYfwuvvsni7xJZJ8iteBmcnIzkV+Y53SccbUkup+j8PcqwqXmctpd2L/VvEeoSSBpBqQ+bGP4gK+LzO8ZJdz6zCLnqshvsm6YAjPmHJrjpyTpne3y1LGZ4tUiWPOcY5rbCyTZz4u/MmjJnA8lgDnsD26V1qVpNCpv3TE1NC5Z93GOgPSuqnNtHLXs3ZlE20jyYDh4mP3n7+1ae0VrI53FQdzorXypJoPKjCZgC4AwMeteXOMnFtnZGq5SSRuaIgGp2saf8ALSCRCD3wprhxE+WhJvo1+Z7OEpXxMU1umO+HviN9Eh8SWzMfmvV3ZHTEaiv2PgufPlumzPyHi9Qhms4+f6FO18UG11A6gl23luSDkc/iK+zmopo+IlNJ2Rx3iPwlda1rs2rWl4wVUySj4yPpQoQehUWoK5jXOuxW2kzWd9mY9Fc84PpUqCgyKtSVRWMHwfq+p20rwQE7C5MYY9s9KJ1YRVkyKUKkNzu/Dvw91vWzJr1zdBJdvyrnFcsJylqd0aa36mPcXGq6bPLDqMLBoZ8q2eDW85KJy1r35WGo+Kf7UR0ySDhRzUyrWixU6UxIb8PdRgkiONQMY4ooTvqjaaSVnudBFrWlTQLbnbhgR8vUV1pcxjBvkOJ1aK807Xp7hblmiZPkUnIFKUVHU5XTk5FPRIZmuJLy7YBQThWrHVy0OiKjBW6jNU1G5DyPbwPIEH30jJC/U1006fLo2tTNwc9kZZv4rmJ7mR1JHT5qxlGSm0jn5rOyOR1vV43u3iTHA4x2reK0uxxpykzKkm+TdIea83Ecsquh62FhyQIUkAyc8/Wpkiakm5aFe8uT91epPWnZcuoWSd2VjJiM7mHSso0+aWpjiK3MtD+n7T7fxLfzRvea9FFbFw625QFtgHr/AIV7lWEpVXcxxFlNnpHh3UIdQkhUyEwKgBQphm/DsKym1AzirmiL0vcGOztXchsjjhvc+1cr11O+K9wTWtdaxtjd3XzT5yofov8Au+9ZynybhSjFyNLwfcnV7M2+ps9sCu5JfNCqp/2iRyT27Z/Okr9dBV7RknFXsd34Xa70Pw2oudVuC8yl5yXAaQdgxAHQYFaOc4Qeu5MoQbTscN8QfirPpdpcy3TswcYiPfA4GPrXnVa0o3v1NlCUtEVvhh4A8fa/qdr448a6ZbaHpYUi1TUJP9IlLdH2fwj0JNa4WjOo1UqKyCrUgqbUNWez+N9S8OaJ4anttVeOQyxACNXG7tjH+NepX9nGm1I83DwnOspLoeOXl1C6alrfhu0lt7O3hIu7lSEMiKMlQSeTgZ4ziuHncbzgtD03Vg5ezvqVP2bNXsviFaXXx9uLQSaTYNLaeF3DSHz3BKyS4bCkAgqCBzzye0QVOdKNWNnu7q91razvppa+nR6u+iyxMIRl7LqcH+1Pdx2nizw58U7fQVRBqiW0rzSonnwzkwsNoAL8uSTz07YrnxdScEp8u4sOuROKd3ueG/FL4IX/AOyhrK/ED4RwxW95Z2yy+JLYDEeoyyfOyPjqy5wrdqwp0vq81JbHdUrOvQvIr/Dn4qWv7VXh/wASeN/Aek3a6V4YjM3iu7vLZ4Y9OlRd3lGSQBXcgnAXJ6V304SxN5x2RxLE0qKUG9X0MGytIPCDN8S7OM/2jMzXiKY/mMHA8okY+8v8645JP3up1zjUmrNG1r/ijRvEnhq3vrXT8afdxK9t56AqYpEyV9ip4/D6VlJy3b0LhR0s0fJ2t/ArUfDPjrxB8RPg74oudHv1uVMNxZKTFcZPAli5V1PfIyBnnvXPKN5e6GJpU4axZkeGvGD/ABYWYfEy0FtqtvcGC8gszshZuSZNuf4sLx2JpRjeV2a4WTitTnviBeeOvh49lp3gbXLafQdUnKXen3UXmxq4JUsueVPYgGrvrYdWlUqSvE5248X/ABS0LTDZHwXbXlom9hHY3LxOhPUISWGOPb09KmUtVFHRR5YR94dY+J7fxfpks8GqnTLyBFLaRqdokmATzk4+bP1q5QfLZmtoSXMc94m1jTdduCraQlrdsm2S4tpN0M6kc5BHH0rncZ3dzP2jktDjvGrjwdop0jRXiW6vi0bCEYMcZ6vgcc1K1djCporGH4Hso4pj4f1VdpKZtpiMK4x0+tdUueT5pf1Y66PLGnZFrxfcXnhfS7mezvJLS8VhDHJDJgyo3BXPetaceY5py1s0VdOgeOzRiNodNrMwz+frUclttDqptJWOa8IGbwj4n1DwtJIBCZTPbA9CjdRz710VKiZ5ybhXkjpfEWnC6sQyEgH5oXzyD1xWNObvoejTt1MO4vXvrARzRYkiGOe5r08JdGrqJHE/Fe6u/wDhEVMX3I5csrDivs8hcXUaZnWU50jzS+1CS8SOK4utid/KOMivsKVaF9zjVRRkuZk+h6/Y2FwzW8pyi485n6ewrWeIhGNjvjWhFXTGpq76pe7bi6Hl7/mYsckVy87bMKdRzneR1NprkdmY7XSAIoyw3Hby34120pKMbs6ZVHKXulnx5PDftCbkGRmQAZXke9KdWpJcqOucoumomF4ZPiLUPEcdrdeJYY7GHLypJGFG0DOCfXsK5ZwrQjzqV/IMBg8VPE3lU93sWrXWtU8S+K7lRAUtxbOWl2nakY4LE9hXJm+PWXZe5qVpy0XncwzDG/VKzi1p0OS0SSC98aS6qIF8uyJSyVjkZ/vc1jkdKrVqKpWXQnBU6eLzKWIkvhWh1Wsa3qC2+17rfI4J5OT+FfV4nERpqyZ6lTESUnZFGxu3NhIqOVZc+Y5fqaxw9f7Tehz0KkXd9t9Sz8IL9m8Q6t4luYTMscfkxHdx708qcp4qpiWrrb+vQ5cjxc8bj69ZvRaI+jv2HtXuPF3jPxd8FbPxENMl8U+H/tNs7Isn2iSxbz2twCD80sXmxj/e6jqPxL6Q2GjHL8Dnfs/aLDzcHrblVRcqk7W+GVn8uux9HlePjgceqs482yt87P8AM+UPHPhK90H4na94R8KXcsFna37tawXsOxxGxJAYZO0jOPwr7bguvmGccP0Z865lFXs7p/M+SxeCzDD5xiMNQkoxvzJeT1MxdR1nRMNqensvlufnVSyk9/wr6arXxGHhy1ov5HNKtiMK060duq1Lvw7lmufMuPlVryYs+RjAq8iUlFy7muTVl7OUusmz0MaysS21yCEUDK5P3iAQa+ndWnF32PecUkrs+mv+CeM/h74rfDP4n/DPxD4oEMejeRrmmaI8h2XomU21zGqdGYhkbGR931r+P/pGxnl/FeW5rg6N3Wi6c52Xu8jUotv70enkeOpLEyovWL06WV/+CfHCaVqngnxrrfhnVHdX0a/lt1jlXBUKxxx9MV+68DYqeY5ZTxnNdOK/I+boYetRx1aFR/A2vl0F1C5s9aPl3UImj2kMuO/HP5191z0qsbVNjepOhXXLVV0c34i0XU/Dduuo6TL50cj+WlsTk5PpXzuOoQwTVWk9H0PGx8a2XwVTD+8npYksn8UWcS/2ppgthL8olY7lUf3T6Vy1q2JteUbHNRq4yMv38OW/UvWpSJ5fNCSNtwcnrnvXTgqkeWTb1PVoTp8zTd2b97fp4X+CUsMUKfbvGmtJHE6DDDT7L5n+qyXDp+Nua+bqyWY8Sxa1jQV/+3pf5L8zw8e5Uq0ZR+0/wX/B/I5+0vFkjmWRArBQGyPu+1fc05KpTbR71Oq61K4+a7gghkeVVUxQ/ezjms3VjFNs5/rEYN83Qo2FxN5SzQErIPnVlfBzngg+tebjIqthnGS0kmn6PQ5qqjOmvM9E+M/h9fiZ4Wsv2ofD8JkuLmVNM8ewxxfLZ6iq4iuSeyXCLu/66I47ivzbhvmyvHzyqra0bum2947tfL/InD4JRqfWYr1/zPOHmht4ZJJJwy4wPrX31SpSpQbbN8ZOlCDlJ3I9OvLbU3/0Ny7McFQcke9a0a1OrC6Zz4SVPGK6eh0vwajN/fa/4au7aVhNYuspGSNu0kFvoQD+FfG57ioc0JPRwlo/XoduS4im8RVodUcTqWmTWUh1O0UloWKzqVxnBr3VVfIqi3OHMMDUg3WorVbos2WopqCC4to/mUc4ODXXSr+0tYrBVoVFzM1g7lBc7cK42yKvr61vUlGJ6CU5u/Q9U+COtX1vZM7XTbUxiP15r5rMuWs/Myrqmlc77xVc6jcWQvbbftUfLXzVWlY86TW523wU+KsVxpx0eW42SKu1lY8k1jCUr6nJOvZ2Zs65ruuWdwbqyvGUtzgdMVu5uJpFya1E0Xx/eteIZbsiQMMN60m2zPneqOt+BN1FqvjHxBqGqSLHFFG0k0lwcJkDK49ycV8RnsIKs7adz73h+NqJl+CglzZ+Irhf+WupZT3G8V+ZZ3XUa6R+g5dGPzZJcx7rp8KOH5PrXn0qi9m0dM4fvLmX4xjw0YAOSveunBNznyxOfGLlsYzRF7dgeBkYrockrmdP4DI1KAlcFM8HOPSumlNM5Ky94p29qJLhZhGSFHT+EV089oszqJN2NuOJhewrGMZC8Yrgcl7OTZ004XqROs0WwSPVdPlCKVSGXcx6/dOce3rXgYipKdGovNHuwTp4mn6MxIYraz1/xBo11cgPcMksYI7tGpBr9w8PuSeTqfm19x+J8awks7nF+TONutE1a1kuI7u7XC5KKT1r7pqKuz47lUZalWwe/ttSMX2iQKUw6nnrXP7ZqVkbTfNHQd4k8EskFtO/yLcnKHoDTxF6cLswpzcZWOot/wBn5INHt720vIwzpuyHBPNc9HCzqpM3q30aM/UbPxT4SlFrciTAGF2rnP5V0SoPDP3rfeVSlVe6uVtRt9V1qyMcWi3MsjdStsxrCrVhTV2zSVGb95xZj2Pws+Il1NusvBeouucj/RyMfnXn1Mbh+s0aRp15K0abOgtPgd8VJVDf8InLEDyxmODWlPMcHSi/fHSy/GV5tKNvUvv+zV8SdQjUxta2jt0ZnJxVrPcBHudayPFtboLb9kTxbK4k8ReO0ZRw8Vrb8j8T0rgxWfRf8OJU+Hq07NzOksf2f9M8P22zTbSKdwMGa8DOSfp0rzpZzjWrJ2XkdmHyjD0mur8y/p2jeP8Aw9aXGm6bqdtHBOv762XTIypX3yvNZuqq7UpN39T1oQdCHLFK3oedeNP2crXxeJL37Y+nXEpJM9talU+pWvRw+aVcKrbnz+KynD4hupHRnnsn7IWoWlwZLv4ixOnqlkd2PxNbTzqrU2icMMpqp2lPQiuf2bPDMCZuvGN/KO/l2yqD+dTTxleTu0b/AFGEV8TIh8FPhzYYSdtTnIGTumC/yFOri8TLayM1gqKd22T23wx+GJLCDw0zsgGfPuWJrmdbFzVuYt4XDdiyvgTwLalRD4SsDn++hb+Zpr6zfWTJeFw6V1E/fRfEFnZ6+ohtTOXYKIhJk78dT9PSvuK2k2eRWUpVHzaanfeC9Q/tq9kuUUBI48TOy4UKOw9e1ctS7ehXNCKsbLarcQq5W5LFz/AvRff0Fcs5WR0qMWkZGs6tHdazaaYlhJcgNvMcacADnn0FcdWfvIunRsnqejeENJW/0+HVNcV0kEm9bO1ePYFycFhnIAx6Z5rSPNJczJdSPNyr9Sbxx42trS1lRigQKQwRuMdAPelN2u2xcnMmmcP8EdPf4z/EqfWruHfoPhsgySkgpc3RPyw/8B6ke49ajDUfa1Od2cTabVGmu7Pc/iPpf9paQb1w+bRleYoPvqOq49B/SvQqxTin2OClXam0lozh9b1fSPEGswWus3MVnEy7lhkkG6TA4XPQGuOdp1fedjTnqwhdLUreI9C1/wCIqnwh4QgEFgFCaheIgEMEJ+8FPQsRngeuTW8Y+0fKtjl9o3Nye5iSfEX4b+G/A1r8H/h7LHaWXhaP+zGst2DCYwQXIHc43Z75zWPNBw9lTVrG9OM51OefU+Wf2u/i34H8I+ALi88UaTqV5cKQmlPbZlEcySIY2CBd3D7cndwDnB6VxVOSMeWR2KlVnLkps+gdR8Hw/EnQjr/ieFpNJgH2qZOhv7hlyI/91c8/TFdlSHNDma0X4nPK9KPsz4i/ac+GXjbwl4lmvPhZ42vdL0nVNXjmvPCRunOm3twMBGlhVgCw4wfYelcFWq6KcabdmbUKdKM1OSu0QeJfjhqHhLSp/Cvxm8M3Xh3U5I2MN/Cxns5FKj5Qx5jz6EY9KxlUUY8rNq1Z1GrHzh+z38X5vC3xm1fwj4r+K06+E/E95v0lZpc22n33YMSf3aSjjPTI96e8EkV7ScIXb0Ppi+sbDw0lxaWKjelsrXWyXcuwhwHGM8Etwfb8qhBRk4p6lqoqkVY8f134USXfjy8v9JeVI7mYfvUH8YXd27jHb0qKukdAi2noY2o+CNY8Q6Suma0gV9MzKSiH94wY5bHY1hGEmdsZNbFKOex0qKNLC7ie3ETtLGFy6S5HzEehG7NWqbg7i1buzjPGWmm21WPWIFEcb4Pn2wyFBPQjuP5Zp1JvsTKbbscv451PQfB2hSeJdeCiApmBIWDGeXOAgX3/AK0op1GrBUapLU8Q8D+IPFXi7xpfz+OEEU15IZNOjUZWKEcCP8O/1reoqUZLkRy4d1K83zo73VbGK2skWZD5IOQ+3DIc9j2qU77HoaQjY4v4zalrtlpem3U0kd1ZWt6JJpFGXUH1Iq6M7TscOKVXmi+iOp0oQ6noqXtqCyyKGwp6HHWocldo9GHIoXRgeOdMSaxh160wL3T3/eow5eI9aE0cGIjeXMiaw1dzZBLvmN13QyHp9DU/CzalJsw9QliS7dhF8so5APQ16WHk0jrVra7nNeOVk1HwZf2ixebtTdgDkV9BlVVxrWZtJp0nE+eZ5Lp7nZ9qfYTgoDyPavqIScal0z5CrQnCtzc912NW2vrK2KWyR7pe46ivQhJX1PYeLo04KEVdksmokXAaRFGOgPc1v7WEVa46NZSlY6jTdbts25LbihA+RflH4/0q6c3LZnqU5U4zSZf8a6s4lR2k2nblBniuxJwhc2xVRxgpWMJNTijAB3hGHzSA4DGkpR5bMrD4hcq1Lei+NJYlv9KScpb3No3mQgZafaMhM54GRn8K+U4rwzxdGk4Ru4yR5WbqWJiuRXaZyXhrUGh1V45oCiO2QlerltdYetyW0M8rnVpYuUZaJmpr2tzRlrqRN2PlWPPU13Y7FRVO63OzMsQqUHrqa8OgXt74bj04asbTzEDzsseTk8/hW+EwVbEUopysmdtLLKmIy9U/act92aGn3Wn+CPDy6TYQb4clpJpCCzsepPoK9eVWjlmFVOCuurN6FPC5JhFRpa9W+5f+GHxAn8JfEfRfHmi6y9lJYanHKbyEZaOMttc47/KTx3r5XizBYbP+E8Zg/Zqp7SDaXeSV1+RpQxdOliIVFqrnf/t2eBvCnw9/aO1DVfBHiR9b8Pa1bpNpWuXUUkb3wGMyYkA+UluMAcY4HSvyvwEzjF4nIZYfGwVOrDeCa922y0b17/mVnmKqzxdPEyp8nPFKz3ujyWG9tWAt5GWSMsS25ck1+/KrSkuVu69DzqNaEpcsnci8E6VDctqElq3lKjkRMOnPavLwtlKbg7K+hngsEnUqThtcXxNqOoaft03ULYxiJf3Mg5D985r0K/Nb3isZVqR9x6eZ6r+wV8WPAHgP9pDSE+JVtbnQfEUEmkX088e4WUsmDbXZ9RFOsb49FNflni5k1XPODak8Jd1aL5ko7yVvej/28ro5MDDD1cRFV4KfvRaT/mi1KL9U1ddmb/8AwUI+EXin4RfG281fxbPa38niSPzX1rT1c2l1MnymSN2VQQ4w2AOOa+M8DOK8Fi8mqYKC5OTaMviiuzV3sfUZnyU6v1mSt7Rarsz5zs9Qk3SlxkSOVUgV+yU8Y6kpXd1c+ReJTqO3oWNKubnXdcjxCTDYLx8uQZDU4eTx2N/uwNcHUeOxt38NP8zbvFi80W7QlTKCLlGwRkdVOfXmvflQp2s9nuepiYRlfmV0znZdG1u41610HwxZmc6tcpbWMW7JWV2CqD7ZNfMZmv7LpyxEXanb7j5t0a+Dq+5rGWi8jT+Jeq/2h4tXSfDQW50nQbFNK0ZmYjzI4ifMmGenmytJL/20x2rx8oo4qhhPayV51HzP9F8lZBi6Vd1FyLmSVkc1/b6WE6wXVq0RUFWEi8N75717scfKklGasbUcbTw9PkqJpjNW1NdUCtczgnA2gYwwHc0qtdVrO55WLq/WPebL9pcBraJ4lwChAP8AerqdWnKkk+zPdoKMqEbne+FfFbeA7u88M3qu+ka1Ypba5YGQhZ0yGBOD95GwynsRXwMcLHM5uu1edJvkl+aNqGIVCo4NaM43xz4R/wCET1AxR3H2uwm+azuR91kPIz6EDrXt4fFOtG1VepGIjSW6umZNlYWmn3AvdNYDoSAa6o0EpqVPRHFy08PK9FWPpT9m39lvxrbpJ8YNP+Juk6VrWveHLxtH8IS2bTS31m8DoXmkBC2wkAbZnLHAOACDX5RxdxBgKePdGdFygpxvK+id1062PErYidLMZV6asvzZ518avg4nw18PeGfit4f1r+2PCPjKCQW+oNHslsNSh2i8066TnZNEzBh2kjkRx1IH2GWZtGrJ0Z6Sj+MejR6GCz6nVryjVVjzOTTI7e4N5pOGjkGXQHpX0dCk1LnjsaVKCp1va0HdPoW7KUKpQEEuTlMda7pNTidqnOtGy0Oz+EfiuGya4sLooWHILnBA715GOUHruwVB3u2e/aDqGk6v4QwpVl2ny27mvj8VNyqNR2MpVYJ2seY6vqN94T106lpZ2kPyvqM1y3lHQ8nFuLnoek+FPi1aeLtOWymbEy8EDvXXKFne+xVCU5R94q6nrbaffhkkPytnB7VPPfQh3uzb+Gfii4udVubl2kJmO1Iyx2ZyOT618hnSi6slY+yyWrOlRSTPSvh1mXR9SkeJd7Xjcj6j/CvxziCnKGMs2fo+UVPavma1X6jpbf8AflmHO45FcVOcIxPZcZSdyl4n0+W8hTAyQnBzXTQrqL1FiaPtIaGTLZPEhBjA4HB9a29opNmUKHLEzLuxeUgMnatoVbHPOjd3IYdMfzR8gGB1I4NbuuuXcxnTvI1bLTCLuGXryO3WvPrV7wkjqpU71I6HbQaVIbyznRMbIHVhgdwa8GNdck4vq0e+qPvxk1sjL139nT4k+PfE914p8NX+m21jJBAm+7udrllQA4UfhX7HwFmlKhkPLL+Zn47x3l+LxWdt0UrcsSdf2MvE11cCbXviZYquOFt4mbHsa+wqZ3T5nyJ2PkVkGKn8ckjbg/ZQ8H6deC71DxLc3UgUDEUIXPvyawedS5rqJ3UeH4KPvTubF38GvhfPbwwappf2tLYfujPcEAH3ApYnPMRUp2bSR2UsowcFrG7NWPRvDunQLaWGkWqKqYRRGG4/GuFY7EP7bOhYDDxd1BCkxyZVbdQy8ASWsY5+uOazliKst5M6lRhBaJCfZLvcFjmCtn5kCqv8hR7bm+LUGrFiDSdfvMrba3MdoywVsYH1rmajzXsS+a2hUvdD1+aPjVrl1LYJEpUjH1BFXzwXQdGMlrcyb3wj46+2FrLxEwhIASO5Yuw9fmUKD9MUoShe8kaVVNxdmVT4M8eyMS2uxDJw6qrZ/nxW8pUHE50q3LYjk8L+K7SUTX2tRPAPvRyK/wAvv8vU/SolUp2skVRpSjdtlJfDVxq08otfEksyx5AKRSoPzIFEKsY6NGs6btuUdT+ES6mFP/CRXKGXIYPMy/iBW0q8bbHLUoc8bHPXXwCvgzvpnjJJCFGUknbp6GlTxMb+8jz54KcXozE1r4H+LLOMRzRSyLksGhmbkD65Fd8MXTaF9SqtbGHqHwo8Zx27XFv4a1CYrJtYTMh47Ywcn8qJYin3MamGqR6GLf8Ag7xlYqHuvDV0m4H5hbNjGe5xg1UK9FrVnOqUm9TOhhurclJraRCv3hKmP503OMvdTLnNQjsft3f3emtqRvSpWGJ8yuhIyfQnr2r7urG83c+crczqNHpPwn126v8ARJ70wBSeEDJwqdse9ctSairEOk0zotEvIL+4neOImJDiRgDgEcc+tcLbk7nXyuMU7iXWtixIgs4zuIPmS4wxH+0TwBUWSd7Fxu48rdzrPBup6dqmhNqcFtJd3CxujtasTGPmOGwBzxxnIHFaRUeW/UTTpzXNotDivEHh3xd8WNZ/4RTwk32K2XAvNS2/u7aMnk8kZbGcAd68+oninKF2u2nW/XVW0vrrrpbquiFOnSSatZdD6M+HPgvwB8LfAdj4M8CQRyWdkmfPLbnmlPLSsf75OSSea9fDUaWHoqEDyq9WpVqOUlYh1/XriRhbW8as75CITx7k+tVPXRGMIpO55V8cPDHgm30+PSbbWlsNc1SdIYVik3NKWYblCc7flycjAFcWJpU5RSTs2ddGpXqysk2kO8SeOYvhd4Ug8A/DqUadaWkOJHyDgj78smRySc8VK5sPFQTshTpWquUkfF/7S0PxQOrT/FT4Q3csN20jJ592mU1ORztAkXuMnj07VjbVyp9/vO+jycjvsuh0njjwlrHwQ+Gsmi/EDU5tY17UNM+267ff2jLDEJdu8wpCGKCMAlSuPnwNxOKqcPZxafU0oRc6ilHT1sfQGq/FbTvE/wAO9O1DwxNALVdMhe1iU/KWlQMDx171vVqxdJI5a1P96zwH42adaNcaTYqhnubO+t3uZJG+WRzKrN+QxXBOn7WSSYJ6WtqdZ+1f8MPDXi2Z4bpbZgbcMRJEMDEYbbn3Na4ihGMTOCbjex8Z/wDDNPw/0f47aPZaxpCJp3iBpNJuIpF+Tz/LMkLHt/CV59a4rSjPlNv3koNX0KnxM/Y/i0XUJ9M8GeMdb0m3ug0Qis9SlSNdpztChsL0HT1rqpQ5Lt9TelBRWx5RdeDf2kPg1qXleFvivcXttbXAkSLUIRcKhHAfLfNyOpz3rGvh6N/dZp7JX0N34HeHvij8QfF1/wCOfHfi4yXBzGqE7IEI5K4GcEnj0+lYxjZ2ZXtJU1ypnY+NfhLZNq7apo+oB5I4Fa4tsYKHPp/EPQ06suxoqjktTzH4k+L/AA94C05m1uQsCXBtcfvDL2Vcdc1i02rEVKsYHhLaZ4g8da2niXxShSOAFbGzB+W3Q9Mjux7mtKT0sSqc6s7vYu694LeOwTVrK0IuLF/NQoeoHUfjzVOSXuo7YxjSVy34o1GG+8Jx3tnmQyqpCsOme2alKTJu6iujFTTtPvbFrDUId8MsRSSJ/Q9TRGk27l/FBqRg+Bry58Ma3dfD26nDpbjzLI+ZzLAT/MdK6q1ODSlCNjgpc1KpySZ116lte2rROoYEYJKgOP8A61ZRsjqaU0cdDILSWfQrwEmMloSTw6n0NN8zd2a0Yrl0MnVLmFoHtCxz1jc8YPpXdh5WlY0lFJmLLeRtZXFpcFseW2dvXp+te1hFL2yZUPj1PnfXro2uuT/Yjty5yWHPWvrWvZSufP5jWjRqNQRRW423AaNzuPVu9awrO1jghKWrTG3dyxmV5JCw3etZKKlUu2c1GtKNe8mddpfiGGO0t5L/AJii/wBXGo6V7EJUqMbn08KtJuMpO1zU8RajFqkCahFDhQMYccCtKtWTp3T0PaxMVPDpxeiMfTNbfUZ/Jis3lVRhpZBhV/CsaVdvRRPKwuNfNZRbS0uXNZuILuNNOhS3McDFhLDFtZ8+ppVIznfmOuXtK2sXoZOj3kbatPNJGMQpxkd64sG3PESnfRHNQrv6xOb+ygM5u9ZtoJl3OZN5XsB711TpQq14J+p537zGY+EZPrc6u8124ljBC7MDAjzzj1Ne1HEzUEorl9f+AfbPEumuRHP6prN3qUo0bSn+0SyDD9wv1ryMTip1/wBxSvJnzOY5iq0/YUPek+2yLHgZTBBqPhe/TFzGvm20g9uorfI04e1wmI+Kzt8zy8trV41Z4Wq/eWqPpT9tS+n+PP7Onw4+L8HjSO6vodEg09NISw8tNPMGYpQJB8rlyEfBORzjiv5s8PqNThvjnG5NGlyqVST53K7lzax06W27H2uZYStm3D0KtNWmndNvps/xPlGfUdc8O3baZqaZYLyyZI/H0r+hKlfE5XiJUqz5vQ+KVXFZZVcMRr6Ha+BZY9O8PZSVC8x3OwOe/SvUy/38MpRe+p9FluJh9WVne5r609veRtBNCHhEQOxl65/lXs0aiqS5JbHdOrBx5ZK9zkb3whfwyfb/AAzdkMD8kRPQ56g9ulc+Iy+lL36D7q3TzPJr5biaf73Dy1Wtj7G8I/tCaJ+1N+zRJ+zh8SvCGl3EEMcT2fie5MtxrmnamAVUK5Y4t2IA2AYIftgV/MeYZPT4X4jqYvCrkm5XasknF9+56zjDPqXNKq4ytZxvon39T411t73wlpupaFqtgovra/MLhk5jkRip/Ov2TD5gllbqRXx2a8j5GvXnhMDUUo+/e33E3g1prOwADhZpGMjnH519BlFHlwt38T1Z2ZMp08NdvV6s1LuVLi1F55p3M5Cnuw559zk166SlC9z2VUc43E8N+KR4U1iLUzEvmiF4rZ3UZiaRNhkHuqsxB7HFfMcU01VyqOHvZOS07pHDiq3s3GPVkN9aRR61c2Tw+QokzEhGCo6rXRhlFvl7bGjklWafQr30NjJam2voANr4dHTgc5yD2/8Ar111XTlS9/8AIyrVack+dX+Ryt/oA1bVZU0JFh2r+7QN8rn0rwalJ1pyeH0t+J5E8HHEzbwytb8Ta+HFpceJNesvDrwMsiXAE8ZU/Io5Yn2wDTeJdLLqlWorOC19TDB46VWXsp6OJv8AjacS61cXVuQFZz+7A6DP6Vw8OwnSwSs9ZbndKUnC7Md/GlpFZDw5rZM1nI/RRlkY9xXdjMIqdqylZdfMqOLjTXJVe5mahoGoaNIJrV/tFlLysi+lFGulC6d0Yzpzg7xd0z7A+Bvj6y1jwBo3jbUvE6WN3Yz2Whs0tpJ9nbYhwrygbQdirx359K/IeM8phKnikrt3vZarr1/LuebjvYUm9Xd9EcH+0RL8JtT8B/EKTSo7u+uH1KxvrCbTNQ/0G3vFd4Zy8XRmZdw3DpiubhGhnKxGEcnanFSjK695pq8denfzPOp4etiHzy0a301fY+ePDk8jHEblRjBz0r9lhOMFyo+qy9UqdNXNKWAQyqyyAseoFRKpK77HZKdOnK6NK68F3d7p/wDa+k3XlzKPneOTkj6V506nPUOLFVqs7qOiPYvh5r0Nh4TtdP8AtILLGBISR1r5/Epe0dkedD2kyXUdGj10TXKNwgzzXOqMou7HKnd6oxNLEOg6gJoZgrKcsNwFCvJ2MpT6RLniDxfaufMkuoxxnG4c1u6fJG5DqxhE9F+Cf2XUdNh1AK8g8wkCOIkk5r4rM5J15N9T6TKavNTi77M9g+F0IPhy9lZMM94+ARyOe9fjPFdVrMLI/WuHo3wzky5Lbbp2DDBzycV4SqtI+iikQXFqXUk4JGAeOtaKq27lJJuxW/s2E/MY1bjuKPbyTNFGJC+hW5bPkLz7VbxMu5MqUZdB0egWvAa2AJFS8TN9TF4aF9jS0zQIRKv7gYBGB6GuariHy6s6KVGMXsdXpOk7sBuw649q8irXUXoejpyna+HdEnaxXy7aMoc5LMf5V+tcFu+Rp92z814hlzZlL5Fz/hFb+aTe1xbRxj5n80tvPsCD9K+uvC58/wAk76Esuh6a8uWhQxrgMQxPP064pN8uxUKd9y3oejeChqCt4i0q8ltf4hpU0ayk+3m5FcmJqYrlvRtc1jT10KY0SCK8uZZdKjW0EubNWlDSGPPBkxgA+uKujOq0nU3KqJLYgvtHtfOQXGmQ8rkCIcdO9dLqcxi276jR4c0+cP5ekknHJXOMf41l7RoyaTdyF/CSRjEULwq3PLYDelVztlOEbalabwzPHiZ4Z1VD8xaTgnPX8qG+4WtEgXw7fRlnkgdNzfJumJIAoukiEhk2iXcSmUXCjP35POPfsatTuPUoTaLMg/1+Bj5185ifXNPmCXvRsV5RHIotGuUJyGUyTSDGB04OKE7PQcX7tmZ1xZAIZLhArEEbWkc5H51uncyejKkqSrF5VtY23JH7x4txzz361EldmU1cz3tdelTa8WcsD5kAIB9Rknj8qcHZWLV2tChdreNvke0WTAICyg5A9OoJ65reKizGSkyhNLrDRrLBbzDYh8qIXDLgenJI7elKUV0MpprYy77xZd3l4umXlvCs6q7GGfT8kqDwS7qqn2wfwqbxg9DnlH3bPU/SzVvE+n63OtmbkQjcu8IxwFHXJBPWv1CrKKm2mfHSvGbbPVvhFr8sXw+uLuOJUjknYREAnKjgYz7VyTjeLbMlUcqnkdXouo3z2CxSeXAijeVJ2hj7+prLVRO614lfVrm1uv8AkItLJ5nDxdFc+lcztzalUZOKvY6TwfqWlzunhrD29nI4Vo7SQhQ3oAFYsxHAAHUjmhxjOVugqsptcyWpe8VeI7TwU7aDplyEgMw2W8Uu7JPXe2BlhnB7cVNWpCl7qFSTa5jU1rxxc6FFBqOn3TIIVUyS7vvnr5YA5Oe/1qpTtqmZSlztqS90h+M/x+8N/Dv4f/8ACW210z3moosenRwxl5AzDnaq5JKjdn0IFOtiIQp8y1bOahQnUrcnY8s+BXhPx/4w1Y/H34nW9xp1ogceF9Hum/fzs2QbuUfw8ZCg88kms6NOok5z27HsaUabgma83h+b4reNX0ae/a10HTMza3dA8zMeViz6k043xVSz2OedRRVt7nKfH7V9BvtT8P6FpsNvY6XBr1nGn2mby42VZlJ3E8DOMZPHNKVSFKSj5hQpctNtkX7ZOnSfEjQbu8l8PSaY9tcNbwXEk243EeDwflGVA5Dc8HA4xW0+WeslsPD80dU7o8X+BvxJ8aQfCSaxh0m1lfwpO1pcWk85XdbqfMgZDzglTtye9c060JKyRdVRjPfVmVq/x40H4tapei10bVNOl09GluItTRY42nAQrGkgbD4OOnp+Fc0HzT1NadKUPeZ0uv8A7VXhjW9ZfTfF2mX+n6lPZR20tjqKj7PKMNGzRSdGY5BxnOK1qxhOV5N2tt0FytX0PIv2pxr3xOsrWb4fSTac2gzQX1ndsSS19EQVP+7uA47jNKPRroZ0qSqbo67wL8R7T42eA5vEt0r22qx3CJq9mzAGyvQoEi467Tjep7g96mFR12dEZwirM888eXGnzWrWbORqCHCHjDj+IZ/EfnUunaWrE5TlHYwvg00Utp4k0A2MCS2199psyxKO0ZVVmjyPRgGHuKykoqVkKnBv4iDxn4hSztTONTkMsbARSD72zP3Tj04rKcfeNrciPALpX+KnjTUda1tGb+yX+zws6bccZLnPUnpmlOhWpVOWomn2YQ5Kr0HBNHkD6fcuiSFgI2Y8Yzgg+nNXyWXunRG0FynM6r8UNN0ue78O2tnDqc6xkARTYVTjpuHfrVezsrsmo7ppHmPhn4m3c4n0O+09omtrwzx2gffvi/iQdOcHI+mKThUlK6ehx0KkuZq2h2+i3FjqNqJ7dxcK4LodnVfT2qlJuWh6Ckkcv8VvDt/ax2njjS023mly5+U43wn7wPtXRFOouQ4sTRlUaqLob+kaxF4m0uK+tpwzSxh0cEDHtXL1NYy5onP+InIn8yeMbo8jIHJHr7Vo2rG1OTUbHL65a/Zl+120izQuOcH7hrpw9Rc1jTnVzKYtKGWRwr4/duRweK9qjJ8ysZyqOMro8H8dQyHXbn7UgRvOOSgxmvp5qpPlbPncXWnVqONjAicQS5xwT3qIVOWWpMX7OOgT38SSgDGQelOVdp3PKqKSq3ZpaFqss14qyxpsToXGQvvit8PinXnboerhsdDm5Fsu52OkaoviGyksltFEEPAlK43GvapuM1ZrQ+my7GSxiacfdXUY/wBmija0hiCIFIO3Hze1dtP2UVpojsxcqUaaULGbdymxiYui+YT8qDqTXkZhiuSLV9Tx8RjPYUXFLU15Pg98WfD3w0h+LOvfDPXLXw7fXv2eDXbnTJI7SaU8iNJGADH6VxYGpRo4dtSTb31OHB4ilCjKHNee7V9TH0qw1HTtauZNZsJ7S7jVdtvdwGN1BGQdrYIyOa1weMWIrSqqSdtEPLKr+szrTeq0RHr9/NFHi3k/eyHaF9SavGYyThyRerKzbMq0o8lN6vQ09L02Dw/pi2aBWuJ13TysPmB9Aa9zLqdLBYVqXxS3Z6mX4Snl+Ba3nLVsj0T7ReeMINQtoCYbZGW7mA42kfrXDThVxOcQq0l7sU+Znk4ecq2dQrQXuR+Jn058C9b1n4gfsV+IfhbF4hvbzTtM1i4kn0KzsY2W1aVMw3s0zLuVFcbNoIGZe/b+b+NqGHyfxKhjo04xnUUXGpKTvKztKEY3s21re3T7/u8hdDHZfKkn7yU0te7utO68vn0PmvR1imgFxexrNJMpDs6A4r+lMJQVaKqTV3Neq2ufJYXlq0256t6Mm0HSbzw7cu9hOs1rIhLwN1QeorSngq2Blam/d7Dy/K8RgqzlGV4PoaN9rvnzCWNsK8JVV9cV6FGrFT0OueKjTqpIjfVFsY1i3H96nDDsSDXWlyUmk3r19TprYqrGKt1P0S/4IkaT8N3/AGevjP8AEzxT4T8C+Ir3SZLKCTSPEUXl3hgl2/v7afPyumxiFxyeMgE1/H30hc+x2UcV0MHRg5RxVCUFOzlySTTUlbaV0le+zas02jxadKX9p8zfxJPeyutz4s/a3tPBGs/tH+MX8JBv7Mn1QyQGQlmBIGc5759OPev27wswmMxnAWFWYK9Tl1fe2x7FXC0a0LT3PKri7OlSuYVzGdwD7cbTX38aEsO79DjlGphb3WlhDrcUdqoeUeUIsls8f55q3iKdGHNUegU6vLTvN+7a5Y06KS+8ISa9J8smpXf2e1Xji3iwzn/gTlOf9k181Cs82xsnvCOiMKUvrGGdbu7L0Qy/1F5reG/kuTLdDKlpG5YDp+QGK9ilRjFJrdFSi/ZqSepk3uo6tr12LaytGywAkY5xXDmGJnWfs6a1OGvVq16ns6a9S/8A2DLocy2upRGI7cghuvHXNZYaMqLSkd1JvCJJnefAbwddaxdeOvizazpHbeEPDMct1IvQyXFxHbovPBJ3t+Rr5Di/MIvEUMGnrXnbTtFOT/I8yNOOMzWUoK+mpw3inXWvJJpo2wSSdxPOOn8q+lwdWGEpJJ7I9LFzoUE7vYzfhxYHxT4pNja2r3Eqo0kaRwmRsKCWOACcAc+gxmvGzjN5zwM6UOrR8zRxMMRiW5/I7O68NeLNPuC+leE9TvLCY7R5NjI4B9sCvKwGZOlh/wB49D2I1ZxcUk2j6r/Zo0zQ2/Zd0D4ZePPAmsLayfEDUNZvBbeH57hpilqsUMU0YTcqZDEY65NfHZnmGLxOMr0sPdxko7NLZ9G/Jnz+a5TmNbNoVsNTlLl6LRanD/tK/siftOeLLbwtofwy+FsutWMPg+Cylu9NthaIiLcSSpFKJdhaRA+0kg4AUAkAVvw5mlDBOvOvGUHKbdpO/RK6s3ZO3l3tdnsfU81fM40JXlvdnG+F/wDgmd+2lfbI5vhrYafuGSb/AF63XA9wrE19GuLMHCV1d/I76OBzenD+F+J3Ojf8Ek/2ib0LJ4j8deEtLTvtu5bhlOf9lAP1rLFcZUVC1ODZ0wyvM6jvKy+Z6L4Q/wCCVTaZCE8VfHkvlfmTS9Ixn15djXlvi2tJaU7HWsnxMvinb5HXaP8A8ExPgbYvm/8AHHi29zyViukhGfoFryq2fY2c+ZJI7aOQYRK8pNs7HQf2GP2dtD4i0DVbtX4IvdZkYH6gEVnUzvH1I6yOtZRl8X8N/VnQWf7JP7PFpJiL4QaW744eZWkx+JNefPH41u/OzaGW4CCt7JG/pvwD+F+hZudH+EeioEXDSrpcZA9yWFJ43GVo2c2aLBYSEdKa+427HQNEghUQaVbWsP8AD5EESj9BXG4ye7ZVOhQi9IpfI8Y/shNF1DWLERhR/acu3nrnmvyvim/9rNeR9vksfZ4axSdUKiQknkYPr9a8LVOx7lO1yFihztHOfyrVJ2NGhjRdSij39KXMhwsIts5IIUg+uetJyRrGSRYtrAO3XjHbtWU6iSNbNrQ1dMsyjD93yOBxXBWqXRUeVHQWaRW8e58Y9c150pObCVRROz8GSDWtETUIsRqsrog8/htpxX7hwjReHyGlGW+v5n5pmtV1swnI1HtIMEyx/KvcS9T/AIV9E3fY86/cqS6ho8c0VvcXEcc0pYwRySkNLgZOB3xTm2+hKlFEN5qFiUN3LYKU3cylhuX2qNXqNy6lNr1TAJIo3JXpiQtjnvxzQ07EpyZZe8cWlxaxzTxuJ41j8yIbZ1wS0mQcgA4ABwSc+lRFzTsnoyuVct2Vr6/lu7i2u725Dy2sskloYpJIxGzrtYlYyA/HQOCBngVoqEl719yHayRBNq8nyq8spHVj8oHr2NbKCMG7MrzapluUlZSMAvKQAf8ACm4qxfM3EqzzqQX3oN2eTJkjnuB1qLaEIhkuGRQn9oAN1Lxwk55/IVSso6kNtMq6hI4C/ap7lQ5VdyL94t9B0qJzildEyneNiFtCjuyrQu4ypyGcgY+vHb0oV0ydWipd+H7ZQXismcoSPMLOcfn2rdNpDUJPVFGewZkxa2isMn5AS3X6dPxqebqS9dCre6WLVC0+lNGqrl3LlQPrkgfjRHV3TGpcu43QpfDPiC5uvI1u1iFjArzyXd1tXB6LHhSZmP8AdjDEd8VM68qcuU2Si1cnvdO0g3QtVt7ySMx71kgsHKOCNwwxC889OCO4zWyqSa2JqRXKPTTreaDyxoWsybjyIhCvbp+8bIrnfPUla5ySclTeh9maHZSa3qVvJZ2qxWbuA8UTBjIeOSew4FfrMqb5rM/PKkp1Lvuer2etWPh3RRY2ibnjcCOLfje2OgHoK56snayNqNK7Oj0bU7u100XWohJJyMsrfdBPYD2rGU2o2Ou0djOn1a+1TUhDGfkiGZWx90egrjbfNoOMIwW50ei3F/YgandxCPaMxMgA8sD0H94+tVDmbu0bXi1oZEWs6D4r8Z2ugarfMhF7ETGl4sTmLDF2XIJlYEINi4Pz+1Y8sKtW0uhhUlOC02PQPGdiNP0Y6zrbiKIsY0EeMW45+XGTtJAP159K0qxtvsJOLfLE8dt/EOieI/GunfDnwLp6pJe3Bl1O/I3ypbKct8x+7uxjiuejSjOdoouUpR949J+JnxLOj6azW0bJDDbKkUW7BCgfKoHY131qkaUeUzoweIiqquk11/yML4b3eryeEIl1SCaP7Vei4ugsTNmR87AzYPAUEn0AJrJT5o2iiq1qHmfNX7ZfjJfHviDRPhH4W8Qiy1nWNdWwvdOkLCa2VH3STR4GCoRWyeNrLjncueaVOOIpyTkk10e716afPW2z62TKTqSd7aM9Y+Knjawl0bTtC0+RZdPtbOGC1t5nIMjAfMzd+eM10QTUeW5NNtXPG/FvjPw1+zr4N1/x14tu47e21KNo9Rl2k7EVgqMVHoc8dcVqqcVsNt813ujI8DWun+LdAvNV8L6pbarYNeG4eWJiyPC+0Eg44bbvOOoOM4zXFKE0/d1OyNVTSdjZ8d+D/CXivwZfeFNUPmXNrAJrK6Y/Og69c/wtjmtadmrMbc07o3/hTDo/jr4AWGpPBCdUimay1BlcMrSRKwbj/aA3D6GrbpqNhydnseEeMp5/g18SZviJaRlrfVI1h1a2jJRZFVsLLjn5lGRn0NcyrU6Sate+39ehnKmk73OA+P8A471HxBqdp4U+D1tHLr+ou81m7fNBZWvBa4kI/hGcKDyzfTNRKsqjBYiKkoJHmOip8Tvg0iQ6d4zn1aRJnuGuNVG8zyOf3gyOg+UcdAMelZRgnWuzodJqGjKusfG3xz8RpGsrPwy1nfPf+XdzS3GYkYjJYAcnrkCtq75k5dRr95CxyvifUIfg1p8eoWt7JIivIJbUnLag247s+ueeawo3nuKNJUYu+xzup+L/ABJ8Q7E32iWT6ZZXEoMhkbMhP932HStXJ0Z3SujOM51tUQWfhfTPD+hTagbiO3MT7pGY4J9WJrnlN3vc29q3HVHBaek/ir4g3uq6baNDZPEn2SQj/WMv8Y/Q1sm2kcc1ed0tzsNO1rxB4RuEGp2H2i0Ay01lw4PdmXoffFJQcVoaL2ravsdDpHjrwj4qsZba31COWO4Upg5wp6FSDyD7VPtGnytG0MRGqnFHGeHWu/B/iW48FXbjy9xlsTu4dDztFWuWSuiYwdPdmtrmoWl/E8T71P8AEGXlTj+XvQ11R07RZwd5MYriSHzNrfxRj7rL6iuilH3rmNNyk9SlLcbI2Ct8uDhhXuUF70SnBylY8R8WSSXWt3TPeeaqykBmHIr6WviFCPLE8etKNOtKzvYp6V4a1vxHMbXQ9Eubx1XJFvAWwPXgV5FfGU8N/FdjhUpVJaK5Ss9AutQup4obKdhaqXugkZJjAPOfStaNSOJaXQ5ZR+tVOSKem5JZ6lBFKI7KyAiJwzvyTXr061LBNKKudFGpSoVPcjdeZ3Gn3F3e2BTSNJnmZIDJJBZwlyqDq5x0HvXbUx9GlRU6suVPbzPqJZnQw+FUrKK7H058Kv8Agnb4S1P9mIftKfHv4wXWlT6sw/4RjwToFmGurlMZ86eaT5Yk6DABJr82zrxHw9GU6GFa54y5bP8AF6f5n57jeKZ18S6VN7M9x/ZQ/ZK8DfDLwLa67o3gjTLrxnqAYrqeu2wvDaxN0Kq42q+OhAzmvyHOeMc9zbGOEajUNrLS54eNz/FYiuoRlyxXbr8zt/Cn7PHjPxR4lttS8fzya/c2E73FgviEk2GmIhBUiJvkXAHYZya4v7UzOvQdCnJwVtXe3r1PMeYOg7Qdm92t2fL3xe/Yd/bA/a0/aT8UfGHUvEukDS73UhGninX79ILdokAWNVCknAUYAx2r9MyzivJuG8ppUlNuSjstW2ffLF4ChRpzjXTbitLNu55V+1d+x/4R/ZTn0HU5f2pvBfjjUr+RlvdE8OibztPYD7zl1Clc8dR9K9/hPi6XEePcp4acIx6yVk/Q6MtxtPEY2FWrFqKfVWPIr69k1S/TR9PGWmY5frsXuc1+sRjVxNT2cXv+R9TXq1MZV+r0ftdfI2NQ1Gw0nTotI0uELDD98EfM7HqSe9e/GVDC0uSG3U7pqhhqHsKS0W/n5nu3/BOy++IfiHW/iR8L/Ayxmy17wct5ryTX7QKlpaTxyySAKp8xgDkKcDvkYr+ePHCjktCtl2ZYhe9Co4wtG/vTVknqrJ919zFkFb6tmkVCCnzNbu1k7ptaO7120v3R4OYIdP1jUdJSQGK01KeJGXuA7AGv23h3FxnkdGpPdxX3kUaKo1asX0k/zL2kXA+1uJmyu0Dt8wrujWlXm4neq2iRmeJbf7Nr8ZtISFlJMYHasK0Xh6sXfc8/FYVU8TGQ++bfHFp+cux5IXlV7ms8Ti5yapQb1Lr14tKl3Pbv2UPBOgfFnS/iZ8ErG7ube7vvAUuq+HLyKYxM17YSJNtYDlg0ZkGP9kHtX5P4y4yOTSyjNIJSpQq+yndK/LUur+qbXXbQxzKaw0ISo3cdm35o8u8dJo1j4purbRr53too4UaWUgNJIIl8wkZOMvuPWv1PhyrTp5VT5dI2v231PUiqare5K8bLfTp8zlrieK+V7RD5oPPloCxP5V6+KxlKFF3krHHi69NpweppaD8EPiL4ohii07wFrd7bbWEUVtpkrlzn+LC8DNfA4/G4Wo7VKyUeiujylgatVe+3yrodpH+zB+1L4i0/T9M0L9mjxgRY2IiUDQpY1J3ElssAD161pl2aZNgaFvbK78xxlV5I04U5aeRteHf+CcH7cHia+gnf4FT2ESMSTqmq2tvkdOQ0mf0rmxvGuU0q0Wqidu3UqrhM0qVYNU2kvM9J8P8A/BI/9qZ42m17WPCekRAAuDqjTFQemfLQ/wA68LEcf4VtypU219x6FLAY2V9ErnTQ/wDBIHxTqqJD4m/aJ02EHomnaLLM4HsWK+vpXm1+Oa04/u6f4hDI8bWnapOyPS/Bv/BOv4d+DPgvrPwJT4l65PpfiPU7a+8RX1tYQwXV61vu8mLzXDhI0Ls21Rkk5J4FfKVs0li84p5jWhedNNRV3Zc279Wejh+HsPhouKk7vd7P0JvDP/BL/wDZR8PHzG8D32qMh2mXW9XlmByOpRSq/pWmL4hzWvL4+VeRS4dyty5ppy9Wz074Z/s4+APg1eWurfC7wfo2g39rC6WuqaTpMCXcaSKVceeF8wqykggt0JFcbx+Lqw5ak20XTybL6ErwpJHXWui6hDaCCGZQoO5kWBVI46kbeKiWJk42uehChGP2V9xI+i6vNIjnUGfPeGbGT7jt+VZKTg7p/idLv2JbLwcZS0r2tzhny6GNuvqOamrO6uyYvni32LVv4XjZt5F3tXIUC3JxjtyORSUkZWbdmXrPw/4lUNst1lt1kUkTWYx9CaTqXg2ldIHQqWuZfje78ceHrnRYfCvwJXxJbX4caxe22vJZS2JLAKVidSHAGT94UoVKPs5OcrPp5mFaGJjNOnG8eup1kfwzsp4BMLu6ty5AIlG/GccZXgkc8e3Wub2knudsY+5fYS2+GV5FcSKskE8IwbZoUdJNvferZAIPofyqvapA4Nxuh0nghLKY2U29JCpIDSAA/wD1qHUhawcs0LF4SSXKDT4JgTgiYbh09+DWTqOOxaV9xknhB0fy4bWKEZwVWIYP0o9pKT0YJanhfjbwL8Vr7xF4j1rwt8LdU1jw9b3wjutT0m3817KXAyJFHRSDkGvls94d+v4j29Gf7xLWLOzBZ7DB1XQqLR/ecg3hi6uLMzRarcQDOTHcRqrL9c18VKnUpVHGpFXR9Xh68p01OL0Ma/8ADXiOAbotc4YcHaDS+sYdW5oGspVZL4jIudN8YKfl8QgAf7ArojXwKX8MzTxC+0JBpPi5+nirafeMUSrYL/n1+I1VxKekjR0/wz47MgMXjCPB6ZiFcdXE4C2tH8TT2uPtpNfcdn4X+FPxi1mYfY7l7hUj3t5dmS23+9j0968fE5jk1OPvKz9TKpUxsFzTn+B6V8Dv2dfif8T/AB7YaR4U8UaPNPGRcNHfXdtCi7DuKt5rYPTkd658PVjiq/ssPS9/dczstPN2R52OzGVCg515Plemib/I6Lx5d6kfHWtf8JBe2jXTai7Xj2EUUMBfofLWH5AuR2HPWv2bhzMJ5hlcatS3Ns0rW/A+dnThTaUL2tpe9/xOPufil8MrPxsnw4l8YWkniQ2ZuotHjhlLeUASWZwNq8epzXuN1lH2ij7t7XOGWJw0cQqLl776BfeNYhCYIZAiyZwscucc9+Mgf41u2+W66nSuVoyI9dupm8mxsotpBZpPKJXoSOaTaSuc9ZJPcrS+KYZmY3N3NCyKUxGdqkj8OnvWidlcyjPXQiXxDbSBTHMzHG5jlmDfyzQ3HdGt2lqxZdSkcsTGwbnCsflxjnBNK6J3IReRM4CkA4JHTco6468iqTSdzNq7LNqJrtiLaCZ2zhfLXI/Wpck9h8yjoXk0PXLj5YdHlUg5JckDPbgdO1LmsL4h8/hb4jT2kkmmafYQyyQFLe9N0UaFu0gwCCRwcEEHuKUoue5zTjKZp2yfEKG009dX1Dw/c31jZ/ZjqM9rvadcEB3jPyBsHtxWX1Z05Pl2HCCtZlOLw7rUlzKx8R28judzJDaqFU49AOB7VsoNrc25YtWNXwH4OS98ZWS+JdM1HUdHtbyO58QQWNi0kpsY3VrhlVME4j3dO9RVqclN36diU3ZqO/QwvFXhjQ7XxPc6jIt3PZXd1JLp9q1zPBbxQM58tRDuGMLgfPluOSaVKKdLVv57kVaMou8txsPh7RZLhrjTvCtpAxBDGKJcnHvXRCmrWQLV7EjW9xt8sREbcjoFBGOtUtHexteVtyo9rcHfMbVXPUq2euP1rVTsiJXa0HLYXDMbg6WmckAqw/pWfNzTuZTUnTZ638PPiTc6ZfxwQl4ZYxmZZ24Y46qeOfzr9RnWlKR+eTSVRqJ6h4T8Uy6jdx3uoeUWllHljf8AdGeTj8azqS7GvOkj1K01q2vLApYDakLcnacbvf1rmnGUlcmNSK0FsNUFvPiYlrh2yIgM592NRGMVudc0pQJfE/jK8gQzEqcqdrKflB6cVFWXUmEXeyOV+BYtL/4hXnxS1p43GjxNb6TJJJuO98eYwHIBAGB35NcuHvKq9BVFra5oeNfjn47+IHjnTvBXgnRTqz6fObmS1t18uGMgZWS5k+6AGC5GMsN3UkmuipO8rR3Qo04RbjDS53/hbR/Anw2tdW8Vm1gOsXlhHEkix9HLB5MFe2WkA/2QorppctKm21qN05KyTM/UtX0Txv4q0q38L6ZaSzW1wk+pxSRyFIbXy/3hmZwBu3Z2lckdc8DGE1TrK63TWnl1/rzM3KoouLZhfH34w3FrPqeleDLmK30+bT1gSOVNpVQoVZODgPlTgjoCa55zfM1HsYxhJr3mfBfgnVviK37Xuv8AxI8b+Ik1C607w40fh9bxyG82Q/vWZjyW2qo9amhTTjK79466PNFNI9S8FeO9RvPN17x1qETSyXXk2FpahsKAGYsWPTp1rane2pU1yy9Sz4Z8I2/7S3xKfSfEYRvDvhKM32qRP/qry8b5oYDnqFILkH0HrROtyzsjWMOWPM0c58Uf2cr3wTrlz8QvhL49utC1CVjmztZD5N3kEhXiOVZeBngHHQ1EZWu2y5Soxhd7ni/jf9tnxZ4EuZoPjNpK6VcmKOFtZ0+Jmt513fMGTqhbIHce9cvPUV2tzOlXjduW3Q9b/ZI+Omn2/guXN0kkeog/a4kJJinZvlYjqCR3x3qYOrUjfYdSqqj90rftJ62NQ0+4tTlpGAWLavV3O0AZ68/zqGp81kVzqMG5I86/Zm0e20PVvH+jXsqTa2JLKCF2AZkthESY1B6fPvOK0VGcXdnLRlGpUbtsJ448MiC7EtyP3PziRDD8zPweM9uv6UndSudzq+7Y8we98M6HqvibWWZfs1pLbTpE6fPIrhlC49yBk+mah1bzaM6dSSkedeJheeMLmXVNVaK4upTmGKMfLbqOiD0681tBJPc0dWpJuPQZpgfwfLNNeNDHaRxEzpcNtRSO9XOnJ+6inJUYO+hw+q+J4PixrFzY6OSmkW7lmBZv9Mbj5R/sDj61zunKD11ZxUK31mpZaJfidhoulWMOiTWnlpFNbL5tq2eBgfMp9sD9K6VFLRHbWaUPQr6TqWmeJImk0zUUkkc5Ko/Q+1JSS0Iw84ybSMK50238Pa619BDHHHcvtvIgmMt2es6nccacKUuZi/EW2a80eHU7fC3NgweCWMnkDqPyrWjT55WN6yVSCcehl3Hie51vRY9TtLhTIqD5SevqDVuioPVmUqmmpympaobllljHltuPykjKnuPoa6aSgOg5X2NPwz8Ovid8QbK71LwB8O9a1uHTlDX8+laZLPFbAnGZGUEIPc4rpnicJhmnWqKL6Xdr/wCZtUqKNl3Nzwb+x14ZW4k8RfEi5kvLmb5jplr8kcZ7bm6k185mHE+Jr1HToK0e5H9kU4TdSpu+h6Npvhm18K2i2PgrTbfTIVTaUtogpPsTjJ/GvJdWWI/iSbOmGEpRXuxseMfFL9jvWtd1G88SfDnxE9lc3rFruwkciOUnk4YdM+hr6PAcSfU4KnNbdUeViMj5G6lCVmzxnxJ8Gfib4EuF07xD4Fv4yZNqTW0RlWQ5wACvrX0mEzXB5hrGe254mIwuKwcL1IO3dan6L/8ABP8A/Zx/4V7+zN4jtvFPg0P4n8b2yLIZYB9osrIMNsKqRkM/JI9x6V+M+JHF0cwzyGDwUueFLa2nvd9H/mfJZ5jKuIrU6UJbaux6J8SvhL8SW8HWWt+JPCOoaBoNs8NpoVrrUItpLjawU7ImwzAdeBg8HPNfJU6FfDwlUxF+Z6/eeLRoJxlVs1vumvLr+fXdaGN8cPihf/DmKGy06T7JcWdvE9sHbDXcnGEQDqcmtckpvMMY4w05evcmlg4YibTkk7X6/wCR5B8YP2mfjXo2nX3h258OX1xPqtrtfZqUZgtd3/PZmI5/2RX2GByPDVcVL28tt7p3v/Xc9DBZZRnW97X+vmfKvjX4pftT+JbeXwfd+NbyPSbRdqxaXI4gb/ZGwAGvu8syjhmlW5+Rc3d7/ifS0ctw1KS5Eubv1OYi/Zx+M+sWn9rXHw48S3b3A3wXMWlTOZPfOOa+2gsooQ5J14xbWlj3lk1fFU3qzd+H37M/7TivLcwfs++MZpGG1Jv7CmA2/UgV7OX8Q5ThIy9rWjzdHc9PKHicBGXPTk5bXsdVZ/sRftheItXWwsf2fPECzTqWjjvEjhLKCMkb3HAJGfqPWli+K8np0XJVk1s2rvf+vwG8TWrYlUIxanJNqL0bSsm0uybSb6XXdHuP7FX7G37UPwU+Nl1rvxX+Gg0fRNR8L6lo9/Nc6lC5ja4gKxhkjdmPzhexxX5P4l4vBcTcOwpYGSlWpVYTS2fuy138j0MswmYYfHRnKm1brfzGW/8AwS01zXvFmpa34h+OMGkrqFzJNDp2neGJZ235+ZAzui5zk9cV3ZVxjHAZbToSb5orVWZtj8ozWtmNStTmuWTudz8MP+CSfwx8WPNZ6p8c/E9xqVmoN7oUOiQWV3ACc7tsjPlSOjLkVvivEPH0IKeFhzXPJxWX8Sxm1Rs7dz0jSP8AgjZ8At1vqOp6d451UqdoSfxHBCOvX93HnOBXj4vxA4sxUOZRgvvMMTkPHeNUWqtOC03u2SaJ/wAEWf2arDUL3VPEXjTxjOs0xa101bmONrWI9IzIUzJjn5sAmoocfZ9Cn7/Lzdz6PAcO1KUU8VU559baI7v4bf8ABN39lb4QeIYPFHg/wXq41S3ikjjvrrxDOzFHUo4wpUYZSQRjvXlZnxBjc9wzw2PUZwunZrqndP7z2/7JwlrON15m7ov7Dn7LmjXRu9M/Z38KeaT80lxp4uDu7kmTNVV4hzWVPkVRpLTTQ6FhMPHXkR2+ifCDwX4Z2w+HPhv4fsgCSDY6Jbpj8QgNck8yx9aNp1G/mw+r0G78prNo8iKy3LTQRbSNyoQqf98Akjp2riu76mjUehTn8DNdzmdbhroBTsaOZirD3DYI69CKvn6DUEtbDE+H8KMc2JxIOCGyo59TUyaeoOTZEPhzAHluxHIJppB5kglYlsH6jFJSS3JVO7A+BL0gFo5WjVcKwkPfPHQ8f57UnOTVjZR5SGb4fSSuo07VLxYwFwrW6uvXJLMq5I7Zq6c7L3gm1JJomtPCV8+3ZAkka4LhUOM5BGM9DxmnKSlqY620L2l+GL9jLLDov2nyAqh0hP3WIz2yvIzxmuapXdNWHCmnLUv2ngqyvrqS18QaPNaRTYFvdwWu9o1XqSM8jrWUq0mrm0Y8pLa+D59C1BriG80qa0kiQQwjRQGLdC5Z2O4HJ4A4rKHNJttm/PDlulqJH8PdNjuTdFAGaMgCO5cIx91BwO3UV0ym+SyZzevU6fQvAPwu1PTLuyufiD/YGuWUCyj+0rO4e2u42D4EUqK4MmVAIIHWuKMsVKpK80kuncTqyo1Yr2LlF9U1p8mUNQ8H+JND8IS+JtO8Max4gRIpvs9lpFsHubyRFyERHKYLZGC20c1nRqYipWjCUXFPr0HjalPD0W0m/wA/8i7o+hXsmh6fqWr+FtQ0Se8t1nbS9btvLntSwyYpApZQynI4JFejJyi3F6mFBqpRUlf0ZqL4bW4Bu4poArsC3lhct7Y54pOXM7mkryb0A+GIZ5MW1tE69XaKIncB7gcfjWdWXK9GOKi4kE2gRxvI0ejuV2ZUyNu/Dp/QCphU10G2tjPu7OK1g+03luDD56oHtbZn3M2dqgICcnHT61nicVSw8F7Vq7dl89hyjJwc1tFXZwXwZ+MngP8AaA8Dt4+8EPNFBDq89jPaXymOWOSNtpDIeRxzg+taVfaUKzpTVmrP5P0OTL8VSxsOeHRnzr8RoPilafEbxP4k+GHxd1Xw/bDUmg1KLR45WFzGRgqwBCgdOT6VnWqQda7WrW97HGqdac5zir9Ds/2WP2d2+Kmiaxba9eaVql4Y3MVx4g8bQ2PlYGQ+wckexzya+OzfDSljL0qijpdqy1+bPey6tUo4RcybV7aXZwXjHwf4d8J30mhtb2jyxM0cjxa3LKMqSODtwV44NeN9QxlW1SNWNn09096jiaUFy1Iv53Odgj8JTwGYxpsXhtt7I3I/CuadDHQdr3+SO6hi8BUTvbTzZJC3w1ijBvJpFDL/AM/DY/CocM0vaCX3ImpXyqGrkXNO134L2U6tefaJAq5CtfOoyPoKmeFz+pH3Ul8l/mQsbk0mk7/ez1C4+IH7MGkfDfwbf+HPiB4hbxVql3eTeJIbK7uBDp1puCxRFsYkZsbsDoCK5MVkeaQo+0jKE207wcErNPR3v11v2PPo411sbONeNqK+F3u330PPdL0wad4gujaeLL2W1urxpbSW50+48yRSfX5e3HFb1OevRgp0kpJWdmrDowVBySm3Fu6vudTqOsavo149xpHhu6miuI1EksrEAOB0AfkEjmvteClUp4apSfR7Hl5tU5akXFdDA1jXPEV9Itw2ixwyldjzKYw5XP3SwGcV9xGg7XaPFb53zNakcUetsQryQx/KfvuW5P05q3GSL5kt2Rnw9NPK0114mY72G5YkYg47cnFJQXVEz9nLUfD4c0SBxPLqkrMcg/vEj/xq9loK6juaNtH4atypVvMJGSrXRbp9KjkbM5VOZlqGfTo4w9tocTjcMuynIz25o5L6hC7JJdcu9p8rR7dCFwpRM9fcVtCiupbdi/aav4mYPtnULkCFVtypOBzu9PStHCCWhzTWu5oW1x4iaZ3klBZUYHzEJ5xgHGR0NYSRV2SpZ3cpDzxJkrlwgwpbHJAJ4+lODaKTdhw0eVMbbXcCD94jH5jmrbstCXqWItHVlZJYuucAyEY7YBFRzaFwauRXXhBdUUCa0SVScDczkr9cnAojKxray0GQeD4rGXNkiB3UhlWM5I6EHOcjB/Wrk1IyqRjP3WJa6BHo1mILTSmgiV/ljUEj36jI5rNTsiEkkSMjJCXksFOW5If5T7fpT5rj6AlmZ51t4bKczuPkEdu8gOBk8jI4qJT11HBNiS6Xq0482OILwWcNY8k/XIrWHLzIpxbgztvE2n6Nd3KQoiRGFQ0khYkAjnr3+lfq9aykz80rScZM0vBlzqOqLLe2BKqSUjmljKcf3voK53Z6nOpc0j2Xwn4nXT/DkOjWuoAxxJmRygLu3dif5VhKaasjppws7i6R4jS9uHuRIkIX7+Xwx+v+FZxXLqdjcZaGL428R3muQtDBJMjMhSNgR8i56qv0rkrylU901jKK0RnWt14pFtp3wy8Cxf2bHOSr3GPMlAPLHGPmc8n+6O5rWlHktGJnOKWr6np2nQeGvhR4TbwrZkqJfmuoLaXdJcv3eaXqxPp0HQV2OMIRutDFQU7xlszg/iP8ZrzTIbjUGmt7e2gty0ru2fIUD2HU+g5rlc30N6s4wVkdR4C1q68I/B+3utRili1LXoxfatLM+1grcxRnngBcceprVt04+ZjBvmbseA/H/wCL76QBPJcPLJIwjtLdSMyyscKMeueg9K5JuV7vcKjSMT4gfCXQ/DHgXTb/AMaTyDU5UNzqc0aneWcZ2ZHOBwMVc04pITnUgfMPxZ8aftEL4xs7T4S+JFSXVbmSeS3vrFJY47aMfMyqABGFLABRjrXPzODa7gpy5nKW7PUf2EvjF4h8HaV4g+G/xO8SS3ustqDajLeTxBPtcLAKflz1TGBjoD71VKmrNyM4Yiam4y1R6/q/xFk1m1u5DeJIjzbrPYQcR7QvT65496XxN6nZGN43Z4H+1D4X0vxdpV9p19YwThYgjNgfe3Kf0pOLV2Z1ouUeVl34y/CG88CaLbeJPAF3Lpeq2umRTSAjC/6tThx0ZSMnJ6bqlVabSb0Ip4eST5mcL8DvH/xZ/aS1+fxtrNrBZ6X4ZZ4rWBZCf7TvkADSk/3EPQdz+FNXjK8QhKeIlrokXfCeq6n8Kv2hLafXdXlkm1zT2ikd22hLqJ2ZRuHUkMw59K65tShe2p0qKoyvtctfH74022jaddapr1zkByQ0bHdK+cBVGTuY5xXDKFSozWc/ZQ52eAWPhD4lanq0vxG8TeIJdOW+iCR6OgBRIQcqJBjl8HPtVxoqEbW1MI4epOr7WT+RB4u8QWHhBTrVrY3DBVY/ZIVLs4UfMf8APrURpOdRKJ23hTXNYyfhV8M/F/7Rnw81f9ozxZZXcXgHw7rkdg2mxkqbi7cFxHI3O3Kq3BrjzrNnlmPpZbRX72or3eyR8/iK1XE1o0qafK2/6/rY0dN0PT72Q6lpFskEIO2O3QghVHTp9K2SqprmevU9qnTjQglFFP4ha5DoXh17eGdo5tVmW2jIXoCcMw+i5rojJcyUtjLEKbikupjXmkzaeYr7QpxG9sEWIrwSuO/r/wDXqfcvua0qU6TuaGs6k2ueH/O8ryr+3BLof4vf6VtBPcqvCPIuUoaNrw1OxFvKQ0cqlfn6j1Fbp8quhU6/u2RyLuPC+r3GizEfZ52MkDg8Z9KtpT6HHzzVTUytSV7w77KMtdE+WiAffY9BWlL2cVzT0SO9OcoaI+3f2SfjT8Q/hv8AD/TfAFtfLoOo2FmY5Z9EXy1mDHJFwAB5pOcZbNfmWe4ShjsTOs3d3012KjP2ibe6Wh1HirwhF45uLnxDomkQWd+qh57eE4S967nRSMK3fA4PbFeVgsTVpv2dV3WyZvhcbUnifZ1PhsrPz/qxw8uh2jqXEZBJwy7eVI9a92nUtoj2ZQSWg2HQYxJ5Lbpc5ztx/ShS5J+/dr+vI55Qluej/s6Wvw28F+Jl8f8Ajm9sXurRimmafdLvWFiObhlxglR90Hvz2r5jPcVmM0qGETs92fG8T4vGVaX1bDxbT3Z7x8Fv2i/2fvAfx/0K88I6ve+JbiC9a7ubC40j9zIxOSzyH5QFzwK+cweGxGW5jDGOOkej6nyNPLKuHiq9SNmvM+VP+Cmvxg+M3xo/4KNeGtS17xDJqdqdR86y09XK21vaZGFjUHA24B/GvssuxSzbIcdicUveu0vL0OWUo1cJVqVJO/RG14o/Z6+J/wC0r8fbHSPBnhuG8k0W3EdvNfybLayU/ekZsYLAZPtXk8P4qhluE5VpffueZg6k1gJRjH3v60uc1+2P+z5b+FtZs/hF8CvA2sa/pdm4k8X67aIbgS3pHKeZwBznC+nNfSZXxJl9LFVJVqqSn8MXq/8Ag+tj6XInhYYiPt5q7Wxu/s7/ALCn7T/xv8X6XdW37PE+gWUcRTSINcihsopYYgN02Cct1BLnuwz1Fe1RzDD4qq1R97ZX6LsvXR+p9o82yLCZhClOUfaSTcY6XajZNpbtK6Tfmr7o9P8AGHw41H4J2l9d/FD4kaBBHonyz21hrRlMT9TtC8YxxxxnjrXh4/OcDTrxpSlzTeyWrO+jxpkvO4O6t5Fvwbp/hn4h+FrLxV4a119U0/UAJLeWOZmVl+ueKhVlJuKVmujWtz6zD4jD4qkqlH4Wa6eAbZtQaGzk2DP+qlm4z67jXRGakrM6YyjGWm4H4Zanrem39hL4Wmu7m4OdP1ZtZe2WxIzhyiKfNGSDg+nWnQqxjJ6nR7GU5Kd0rfiXNA+AHj3UNRszZeLvC9osdtbw3Ok3llPPaTTCIJNcLL5vmxb3BfaGwpbgADFVCdOEm5Xlr1t92ltv67mGLourFRi7PujpLf4KeP7HU4bnV/EPhmS606fGl6jYXlxHPbRHIeMSAN5ikHbhsjFVUxDirwJoUpw5rt9jqNT+G2kHUHvdBuXMYAeGKW4LvESOR5gRNwB77R9K5PaN30Ol3SsX9M0vUdPiEU+pSzDgPDcZkXjGOo/lSctCYrU3oF0fUE8uRjbOOu9S8bfQ4yoqYycZXZq5K1x8Ph4Ah0izGRkmEAofqetaOto9TO/OW4dA0mQFXjZSv/PIZJ745/wqOdj5WtBs2g2EoKJAwZcjJyAfrV8yKVkiGXwwm4TCxYkdTjBJ/wAKUn2J5rif2HaCMM1gFcZbEi47dT6UX0JUW2SDQoZE/wCPJcnOCTnA7/hTT1NrcoSeE3kP+j6e+A37wRIcd/yptu5Ld0R/8I5bSxrNNp00TRn54570RlvfYBzjjv1olLQS1A+FbedCfsKLsOHEcqtk89cnrWd1Fk8liO8+HemaneW98+q3UEsAJi+yaxNbJIM/8tEjYK/0YHFTUipFK1yW68C3DurzLFdFQd7NN83PbknPr2/SjljGOhFSSeiC08JQWQwlhHb7ozsWXhl59en86hNPY1px5Y6kyeHopVDOEnkL7kIYNjg8nApz5bak3TZY/sa4t1W3ZZQACWBxgHPqOaUJPmuaxWhY0/T555UZYpWXeTu2FizDpwDRV13M5y6M05vCWvtajVpYriK2aPc0nzFY1Jx83HAJ6VlCpyuyI9rSvy31K154efRXFtqMd2kr4YCdSjAEbhyBnbj1HNKT5tLiVWL+F3RFe2iatZC2RL+2kgkWS3uLDVZIJAw5z8jDzF/2WBHqKHGcot3JUeeW5Ve48UXnnm4l85pZSSzR7SfQ1VOHKrI6rRVjMsofjZZ602p23xAkhhi1SC8stPs7cRLC0Ksq54O9sM2SeOelZVcBRq14Vajd4u9lp/TCpCklJJbq2p5xr37Pfxjt/FZ8WeFdXsLK3e5mvdTtbPTMvczFeHAQqA3GDnrnrXPhsG8LUk4Tdn3d2edOL57pW9D5d8XaT8SPB/gu/fxt8RZfDmoanrtw0/ho3Drc3CszeXOy7SgXG3jeeSa+hy/BYDEYtuqum7/Q4IVMZh8M4xnJJu7V9HbZ9tDn/hr8PvE2oW1ymm/GHV9OlkhZpJZL2ONGx23EHmvUrZFkeLqXqU07Cw2Pxqi405tW13sF34R+KMQZpfirq924jKlDqaHI9Puk9K4a3B/DkpaUEjR5rmdVW5m0IbWK10+SHUbTX5Lh23JcReJvLQgdQUER4/HNXHhHJrXUEvkCzjFQVtbnP3aeJrQLI9veSx8ITJr0uPocY5qP9UMqb3t8kZzzbHSXNYXT9X1y1vmiv/Dc01vg7d2tXRHGcDh8UpcHZVOTSm0uj5UVSz/EUVZxv82ewfs7+OND8cfEHS/Avxe8RDwfoENncG11ldTuyPNABjR3Zm2KSOwrzqHh1kHt5VK7bi99EjnzHinNakIxp6dDr/hJonxL/aY+Oj+APCS3d9a/PDp/iLX3lgsoo42kJla6n+TbtC4wSSTgDnFfM5nwlChiVhsq1u9L6WXzPfwXEtOOXvEY9uTSS7v5HI/Ez4vRaZ8P/FPwlN1N/wAJXp3j2GOK1gtTNbzQW8VzDNIlwg2MpZ0K4PzDkVrgsHmWR5o4zs4OOtn9roVPE0s0pQxEbrfRpo890rSPitrcgl+zTRg9CUx/+qvajj61So09uhg4NrRHRaV8L/iBdkNfXsq8Z+VuPzxxXQqzdNX3MvYSeqRtw/BXW5IkS5knct1VZcge/UVmq0myoUmknJGhB8C57cJO6KVzgs7579+4rpjiIJalyhGWxp2Pw4s1AKuM4IJGNrYHTNNYiD2Zg6LuXovBtsuMRNkckKpIPHTJHIp+1SKjCSLMfhi1tojiGRHDbWVkYDHr0PIGabru+hPLdlj+y7OR/JSeEScbIzIA5HXPvVe0JcGnqiYadcF9hjB4y24ckiq5ieRix2REZtwq8Zwduc1PNYVnsPNjCowqMzEHeDJggZ5H5UNtkyuiR9NhlAe1tpRjGFlcbhn8uKSbSCFyGTTFR1IsVxySVnbBNPme5tzLlLdo9vIlwtq0imzkiim8yFowXkUsoQsB5uAOSm4LkA4JFZe39/lZmqsXLl6iExAFZLaYy5z5jxnHP0x2q3ZrQTvzCSfZVQFbVMMc/NkgDPTrx/8AXpIfKQTskcZWKziAdcEqzE4PGTjpVdSoqyK+pwves91d3U0TJFtVLedwhIHOfm69+K3pKKkkaN3RcivbC71y3u7398IeIoi3yq2OWbnn9a/VJNTlc/L6ztN+p6BZT3eu6a9jYwhYrZN1w5UKo9uamVNyJhCzKfgvXzDq8y61rU1vaxg+XDCwUlvVua4ZLkluaRq8nQ6O01CyWwY6RPLcxl98kqoRxn35P1NJyurXNXLmVzL1rxrpEU4vZBEogJWNi2SPUkj/ACK56koRlqax0SIPht8c4Lc6h41sUDXc0RjtpGGVjtwcEpz1Y962pVVCPNuVJqasQr8RfEfil21HUbn7JAScAnBI9/U+1TzubbFNqKSRn+CPL+PnxDTw5aQv/wAIj4ZuFn8QXoU4vrhTlLYHvg8t7YFaUqac/IiyXvSOm+PPx+0SzF1brqAhWFfnuJsMoOMLHEgPzN0H0NKVSDm1fRBGLndo+ePgjMfjF8Yj8RPFKrH4c8JnzbaO4nH7+6JIG4f3gASAfWsFGXtubo1f+v8Ag+uxVNc7aZ2fx9+J0fiEybbgSpKSkcrN8qkkA4A6kHC81VV63ewqtotRR59+zJ4ct/FN5rfxE1nXoLWC51T+y9NmuoH2R2UI/ftkZwTIevOdvA9FDmkrp7HJHmleVhPE974N8OfEOPxrDo4uF0268uRQcefA+BKzcfKPm+nFTUldK2htRpSn0NH4n6DpWj6zd+IvhN4hgZYJ1gntJn+XeY1l2cn5TtdeR1zWMXGLdnc75v2dP3jwz4k/tEaM6QaZ4kUWskVysuprL0VEYZcH+IE+nNWqt21Y5YVYu7tsd1q3xM8e/tQ+EpNatLG60nQ7jTYra1guHK3N3DGTghScRqcn3IPPapjRalzSXy/rQ6Pb+2VkrFP9nTVbH4TeILn4V6xHHbLdSSzaJO0eFZjgvH/vZGR61dacYdBtxpxsc3+08t1eaH51nctFdW8sc1pOAVeKRWJySeQDkfnWVOcpta6HLNuR5b8JP+Ej+OPjCT4k+NpIhY6dfNbaHp2/908q/wCsnbtnOcUVPerckTXC+0rycqm3RHX+JNXmuryWEMoeQoqtsHbIwB6Vo5vl1O6bsrdTG8INpMWoX3iLVrZpfIkFrFbvGDuwPnPNKDVzCnJ3budJ4K+Jml+DNP8AEHwd0jVhY+BfH99ZnW4guUs7uFjsugf4SAxVsdQfbFeDnuVLEzhmCV61FPl812JkvbTj9mzH/G79l74sfs/eKYox4XvtU0LVlafRdV02Bpo72Ic7025yMY/OpynO8Jj6fvNRmt0+jOqap06vLe7eyPB/F3g34v8AjAxeIL34U+JYrKCULYldEn2EdS+7b9Pzr2ZY3LaMGpVY8ze10cKqqVT3tPI1YVEmh2l/dSAMpEMiMMEMMjv3zgVeHinq9T0KzvDmQ3VY1ZT5abZFU4KDJwfWuj3pOyRxzUpOyRwz3N5omrvBJGY4Z23IWB6/XtScqdN3bHCk4K7HeKYF1qwxvzPCdysBzn3pOtJsqTgle2p1v7Mek+FNe8aSa54ptvtUel6c8y2azBXM+QisOOxOefSvA4ixOJjg1TpP4nr6HBjcbLD0eaKv6HqOrW/xM+Husp4ztVa104tkX6SpIHUnDArnJIH8OM189LE4VQVOsn936nHh8TXjWVTWEX18j2fV/jV+zn4I07VIpPjHfeIVstDhu9LFho0tuX1FiN1u6OAQi8neODivEli69aMaNODUW21qrX+8vMM8yjL6tTlm6iUbppdTwPxp+0p4jkuZdTs9E0S0lvPmjM94HYk9CY1PGfTFevhXXqR5Wnp1UXb73octHjPHYiEYqEYp9b3fzRH8MfiP8UfHWqS2/iPxFDbRW0W57C1tDA5zgjJbn8q668VOF4bd7p/kexSzLF17wlPb5HaNPfSX8Gn2sI+0XbiOD58l2PGTnrXm4nEU6FFyeluphVxEaUHUmfTfwjtPCvwR0M6h4nsLeb7LD9o1GW4XHnYGSueuK+TlUqyqc9W7b2T63PjcTi62LrKUtl0PM/BNt8L/ANoTxB8Qv2s5HFvd6e5g8I6WX3Q+WCA3JGRzzn0rrzCdfC0lgY+5F+9K3meXj8TCo/ZU7RT/AAKfwR/a3/aj/aGnl+Bnwd+Edl4cis73yPEniO1uQY0QHk7l5lOOgPcirzrLsFluX05VsS3dXjBKzfqThaU8TJUqf4H1h8XPjn+zf/wS0+DsN9qNxB4o8WataebY6G53gTkZMku4csSep4Havncvy/F4/EQWGnCo5r3t7U/J3S970bR2VatLBv2claS28z4a8W/8FS/2jvjf4ofxB4o+KV7Dby2cv/Ek0oHybK3I5HHfHftX188hr4WnaLd/h5m7XuraI86pVxNSak5a2fyOU+DWk+Mf28PiVHbRG8i+G3h6UNrUxYr/AGhJ18rceWJ789678LkkeH8PzSSeIns/5V3/AMj67hfJfr9ROavCO/mfbOleG/DnhHSItC8HaOmk6fBGsdrYwYVQOgxjpThHkTe7e77n61Tpwo01CmrLsOtJ4Reva3whFwDtgt5CS59xjrRKpG6SOynVjTdnq2dd4H8LeJL5J/Emm+EtRubKFGF1c3Vufs0OByctwKmrOlGPxWbLnj6NOnaT2Oz8DwaTrNutppd+s8d2oe3lEIAbsQrAE4B9DWf1mndpy+EeHzHC4iPuvRnSN4LsVWMGCKcLIV80OG2kdQT61p7Xnaa1OqhUjVhzQd0Ph8MWby7IYSABkRsw5x+H8qr2ivZGko63HXHhSKbYxtWyG/do/IXPoaNHuQ32K1z4QnJ8pI5Qy5+Vz3p84a7EMGm6tpMm61EqEN0XOP8A69F4sm3U1INdmeIR3lvbsxbcZGwpIzyMj1qXFrYd5LZlxJ45JBFbW0sUjgBRuEinPcY5xVa2stzSKbiXktJYgyNYqGziTc5XPPPFa6X0MGmnuOXQLRQSti6hjtCh8k/Wm3boWpW6jv8AhHLMAP8AZiGxySSB71FhuorEj6GCuyANgcsrPjNaW0CMvIlTR1Y4VEOV4Jwdw980JtMPQBoNrGR5WmQgNguVjGSahq7uO7aJV0BZCfLs0znlcA/jVPVAm3oRvoEsYzJaq/Ygp3qWlYm6uH/CPsgybePBByWjxn2PFRCGpcpNFdvDUFxC2zTosdAsJG7HofT61NSJNN6jhpsOigR3V21vE0Jcn5pd4XJICqpOcduprPmlA25mloF78OfCfi/yNWa1iMkhD29xG81rInXBK5DKfwyKlS59TmnUbkrorx/Bq60/T57bTPGviW3triMx3EEevysrjOcBXJ4pyip6GfsoOV7fgSTeDPFk1wkJ8dapeMiKgS/VJsKowEJK5wB71P1ead0zVRjBWjEY/g/xcl1Ffbba4VGxO1taqrSJjkEHqe/BHf1qY0qyfc0puFzQuLfTIo4GZN0zRb5kNuVWBySNmT1OOcjjmuhR25i7yk3dWHRw2kkW1fnZhyRH+Oc4quaysDXcqyaPpctw1tbWTvO5Pzg4UgAk9Ezn8aiV0tUQ5Qa8/UxfEvw98N+LLZ7HxH4TstRjxt8u8tQ4x1PXJrnlKT2uVfnVmec6n+wr+zhcie7t/h9PpzysN0ml3M0IJ68bD+ldFHF4qikoSY3hcNUjdwRg3v7CPwuu49ln4r8WW8WCyxNe+av5So1d8s3xvLo9TFYDCbctvmY2p/8ABOjwXcoXtvHWrKmMEPpdlnnqP9QKqnnWNUfesZPLMG9k0ZN3/wAEzfC92qxzfE/V9kfKAaVafN/5B9zWX9rY7V3RMsqwSW7K8P8AwS6+FyOJJ/iP4p27ThLWWO3Ujv8A6uMUoZtj+Xc5amTYWcr6mpYf8Evv2dohi8m8S3uOsd7rtxtJ+isBUVc1zSpHldSy9Ap5JgYSvy3Oktv+CfXwOgt1tYfDqzwx4EcF/eTSKMdMB2YcZ9K4lLEc15Tuz26FHCUKfLGCS9DpNF/ZU8CaTCIdJ07ToVjAUxx7iqZ/2VwBVSpRcdTSeJjJWsreRqR/s/eF7CeOCS0gk3H97JGWP0xk4IrFU/e1OV1G37q0Ih8CDFOBJc6S0BPy+XaOjBeeuXxXROELaCinfUhvvgzpMgMdnaWxlGQzsWGBxj+KnGKsUtEZt58EfEttMmoaJbWEtqTiQSztnn0656DFc9X2kfhRmlFy1ZdufhakFvZ3N62mO08Je6htjLFJaODjafMQpJkcgofrinTlUsr2JgrzkpRfkyCP4d6NFAxmurmOckjy2gXGO/JwDgVor33NVGNth118OPDkEhitNbM74XdHcXaQ7SckjB6/nVKTUjju/aWsZsvhrSI5vK+zQ9yC8u9vw9O1buUrG71Ww0eE4pmINnGwx8gVsluvYjPvT55Iz5ebQlsfhlq2oz28ejeGGne9uVht1R4l3uxAAZnICn/eIqJYiFN2ZE4KFJzb0RjjQILhmj/suS3dZXjkiuCokUoSrZ2FgeR1BII71pGXNsYcqkrohk8PWQ3GSwcH+EAkDj69q0TsioxaIX061jAWWxwOuGY4B/HpRuJq+hHcAJGsckSYTICtyB645o5ddSuTQqA2kaF5LWLoSGGOR+Ap2I5dRHWwkUv5O0A8Iq4J9smhpIbS6ELW+jSXESJt8xiFj+YAgk9Onek3bUaTtZDL63tY5HWWyjiZFIkDgK2fTHTua1pTbqL1CXNtY860LxJDqusLpFmU88/vWiL8/U1+tygoM/LYqbfvanpWman/AGPaR6d9vBg2lpFDD5m9Tn+VTOorWNEnfQtT+Hn8T6RJdW8qW7bPljeQgynPAYY6VwVINq6GnfQlsdMuvB2hf2l4tEbTvnZaxOyxxJjAI+tc8vhs9zWXvQSijh/GC/25HJZWmnMyyxHcqZXapB3Z5yPr7VzVdUPlbRwvhPxPJ4Nkkt5obfykTZFasWKxKOF3YxvbGDgcc81TlypIS5k7nQeBX+If7SPiGTwf4JlNnp9gwXX9e24isUPJjUngyHHTtWuGjUqyeuhorP32eqeOviT4G/Z4+HyfDf4ZgqRAYwRId0zZJaQ88sxOSep4qq9aMfcW5zSh7XEOor9Fa+ml+n5vr8j5K+InjzXPGWsW+lzXLJcXk6wxJHMd0kjnAxzkHnnHQVxRUp3j3Oh1FSsj0L4j3l18JvBNp8KPhxZww3sMQku7iaIOssxUFnIByQMkc+ldUeem+RK36mNWpJO8Tx74kXfxR8ZWd/BY/Fw2aW9vHbx6XpmmpDI7lAztvOSRk5GMH8aXvyfLcxSdWScmeIfCPxd8TvC+lXvw/t/iFrcB0W7dhC052lX3MJMHqcn862hRcJOSdhUqdaN4p6Gp4v8AAXxl8a+G5NSv/izqklhLOUZUu1VnlwrsHxglcFDzwe3Q1hJ8tRnalKjRTbPafgn8VdT+Lfwj/wCER15YoNX8LzG0AtlJE8DKWVySSWIUYDMSflHPFKNG0bPcxWIVSajJ6vT+vuPOPi94Gs9P8RaDqV9bieBNXgS4SU8qGcDnPXqDQo+zlzG7g6Svc+xR4e0SPw9byWGmW9hF9i8uK3kwJ5kTO5wy8Fc8gejCrhWdXV7m8b6dzxv47f2XqNpMbGVklguVe0vB8rwspJB/2fX6VM4KSZnUhKSucV8QfihB41+D154i1ZY4dU02IwatGGziRVBDD0DAZrOdGdKSi2tQekLo5/8AZ+gW0+D2hag8LRQTWrTKXBC+ZIWO4nsMc5NVOMYTs0dOHVT2epznin4u+EtI1ySPS521a6hyFgtFLJ5n+2/QUTpztoxYmpaOjOV0rxN8QNfmXRdN0ZLFYZWea7uHypkbJY+/Yc0lanT13OehCrze9sbkfhyOxs5LW61GeeRgwnU4COCByBWHPKcrnW3Hpue5/AL/AIKC/Fv4EeApvhJrF4mtaOthPb+Hr+9gSSfRTNtDiNnBO07VGPavmsz4boYrFRrYWfI3ZyXRtHHHCU5YpVpfGk0n1SdrpPs7K/oj1z9iP/gqz4K/ZP8ABMng74kfC3xH4onvEnWN52tru0g3ncXjiEatETxxuPTBrxMx4azWderUw/spKcWveTum1a6d91v/AMA0rYTEayXvWOA/Zu8ffsJfGD9qTxRqP7TOmPovhbxNcNPYQzziJrRmPPoEbOTjPfrxUV6fEGT5XhoU3KfJpNxs218zmc8XCnGnO+r6FbUvg5+wh8Qf2/vD3wP+HHjbXk+F91dLb6x4gsbxGfzXGFVXJYKuc8+nSuvDcRZlhcnlisZzRd+q95R72RhKtWSum1bqHjj/AIJVQa/+1T4g/Zu+Ffxq0aCOwhuL+wuNf1WJnnsogzbkK8ElRxnv1xXHLj+lTwX1hU3Ujzct0mvQupj1TShOV2zK8KfsBfBW0An8R+MNb8QTxgCeCCRbSDcOo3DLEV3/AOsmPxUYukuW+p7dHLvaJSk3qd3pnwO+DHgOOWXwl4E0/RYjEfMu7iEkle4ad+tcVXOIyrclSb11S3/FKx2xwODp071Eku7POfjJr/wPv/CTeAb6407UL7UpyNLn09mK21yvzIdxAGciuXF4/GVbfV1pDWX+HZnz3EWeZSst+qU2pSbtddPmeHeAvC3xf/aU8Ua/4Y8JtAde0PTJJLhZWCtcQQrkjngt16dTXXWWU8P4ajWrp+yqP7m/0PhMmyzH5vjKlOjTvZXOh+AXj74J/AjxFYS6/wDBew8e+JY7ac6+viy6e3t7EspCGHZz5iN827nkDHqOjELGYqt7Wq7YXaMIN3mvOS1VxYSustqqcqSnLVWeyOx8BeN9a/aM1a9+IXxB+Lltf3VjYtG15fRxQvHDEMLESgAYgALzzxXzlVYfhWKoYbDOMZPRJt3b663PYwuOre15pa36En7L6aH8XfiTqHjvWfEFrbaDou620mWaUqksw+83GTx0rHP61TCqhg5+7Op70m7+6umye5xZtmcKuK+rw7HrvxwitPiJ8P7vTPDuszokVk8e/wC1fLcEd07/AJ1z+1cq1Ke/Jbftc8uVZy5VHRo82/Zp8fT/AAv+GY8FaxpaSed5sclvJbHv1z9cZzXVmWMUMxqVeXm5lZeXoebKlOpWlJq56B+wN4b8Ga38ZvEfxm0LTrbR/B/gS2kv9VltHIi1HU8ZjtyQcM2eT1rzOIauaUMspe0fNUfw83SP/BPbyXC81ZypxsoavzPh79r79oHxd+0p+0HrHizWrpo7eXUZFtoXkYJbxBuAAegr9I4XybD5LksWknOSu7dWz5/F4ipiq8q0u+hf/ZW+FPxH+OXxLutP8KyXGn+DtKtgvi/WrZhGRbk/NGjMOXboMc81rnOOy3LMJBYlKVabvTi+/d+SPUyrL3mNaMJ37vyR+kHwn8JeDPAPhCHwr8KfDsmhaFbx7rXTpphJLKOpklfAyT1NeK5SnWlUnJuUu7vby6H7blmFpYTDqlSVkjs4NSuLoxRXwQqw2pGLc5H4+lTJTT3TR6lNx6bml4d+Aem/HjV38Lpqs1k+nxm71HW7NgjWSLyFZu2fSvGz/MY5ZgVUpyTm+nXQ+dzjMHTnyQ3R2+ry+INT+CmpfC74b6/ql8qrNPqGoTTCC1giWLYi5481yQzbRkkkegrxctxsMTh6c5ytUu5Wb3Xz/p9NTzMPT9rhF7z9pJttNpLlSW347/I5f9mVfDfwi+DWn3njDxElmNB0+RYDqkxR57lsDLBjnbkk/hWWJq0JTqVp1rufb8jLDV8tw9CCm2kk7Wbd3brqVv2TPiJoU+rXXwn+GguNXtYb+4vtV8S6ldskd5eTyFvItgclyM9Bxg9a6/7ZnhFTjL3+ayUYrVLuysh4j+o4j6hSpymu+lte1306+ul3c9313U7bwfqT6N4nnh07UFiL/ZLuVfMx1yBnpivoYVoVG1s1vfofexzXC15+zvaS3XUXw9478Ja3JLp+m61DNOkPnMEdSygcnj0xRKquVPmOmhjMNWk4wabXmWfCvjXwF48tp7rwn4ls9Sit7o2t1JayqxSUdFOD15H51bkouzOnD1qGITdOSlbe3Q2ZtLQQZ+yEqR3GSD+NXBt7l6zRSm0e3OD/AGejkfxheffIockLljFamH4o0G3nsybDw/CNSjIW0v47hk2jnKyJyJAfbBHrWU4VLc0ZWGoSavfQpa/qGsXdnYafqV5Gk9i7i1ntlZd0br80LBmPyhsEHrnvWNClWo4nnlO6OeOH5Zt3uOsJ/ENpiK3vrhiCAykZ5/wr1faKS902jBTdkjRstc1/azNOmxQWlkYAKp7liegx/KnGV9AUY3sbXgnxFofjbQY/EPhzVrPULOSRkS8tJRJG5QkMAwODggjijnu2hOacbpm+bMMmEhHXDbc5U+lUmrXMnJj104j5RbsM/ex9e9LfYfM+gqaLDIpYQBSRx89DRfO0TPpc9vERFZCcg9DKAfzNErpaExabuxz2aKzbgRlflEuDgYHHFC0QTdxPsFpcAs1tHvwP3oXawP1qJKT2GtEK+gzKBgo8bcjY6huD1OMGh3QNqWhQuvD1ow81ki3NnO5NrenUc1KjzFLzG22iXtrys7/eG0KSf/105RjHVFpq5O0moWgUSW6sN/KBMfrR0HpYhlvZCJGa1hGc7x0PtnH8xRB2M+R3IXa2u3El18owfl83IP4Grlqim5IieCwRmdpHXggbTyPb2FZclhpyluVZLm/kgWysdTvfs8bGQwmQ7EOMZPpWnLUqJqKulq/LzIjTp81+pRuLhXZpQ7M54LM5BY47H/8AXXE2nsdNox0KjSvBmWIgYblTyM/Tv+NUr2uEW2itcNM6CRrdMs3JjYjOcZPFE5aD5rMiMd0ckXEmc/MW6H2qVa5XMpDHDqGIRpNow2JWXA46e1OVrCmlazIP7QglQSWieYpbgG5Zsjoe9OE9NDOMZN2GPqt1IpIhkACnByeOB703Zm3LYryXeoF8qWLEbQRwe3p0qeW7uJ3sMNxqD8i8kC8ZIIB9x71onoZODfUfDcy7QVu2x1JyeePzoTV9AUEtyVbuRyQZGyWwTyB/9endMCMzz7toDFjjAJIHvzRd9CJXHHUri2hZfKmAUD7uAWOemT0pStLUyepLMReKFuZGHykgFuB0I5H8qqysVzNlRNK02aX7RPczQk7trxTscggjgE4oajY0jPTVHL+IfhZqE+sPqsHxj1toTIsjWL2sJRSM8A7c9yOvesI0ZqbfMc0qd23Yvw+HZnjkIne5yuDNMMnJ+nSuq035lpvlsMg02ewS7eS3gaWS4VrS68x1EMIUAxmPo2Wyd2c4OKyeGrSrqaqadi+aKjYqT6XcrEqJewINx3+XCeSfUZwK2dNN6mMm7aGbNp1zGFEmpMRkDCLjv7DvWitFEXdypd6eVbJug5xht0h/XFNMq3MjPujYQKGuLhE/i37srjj16f8A16bmkYyjy7mZca54eiUn7arFjtXy2DF+ODxT5k1cuKctinJrekthl8+Rh1KR4z3Izjmo532M5KSZXuNZtZyETRZc7dw33RVj+A9OMUm5SJcZplVtSf7K1jJ4asbgyRkSJdgybx33A/55pOLfU0p3UipJPNaxyvYaFptqWYu3kWyjccdTx1rppRtNFybsz5+ufiKkVxHD4ciH2+6ZY4RFzLO5Iwi9etfrc047n5K3GjUsex+G/h78WdN0Ea3411uyF8QCumRREvbKRkB27t9Kx5E9WzN13J6noXwdC3F//a3iq+DRIoMVtCpAZwepJ6inzwStcpy5ranTfEHU4dfinvnjVYmVWOAAHI6KPyFctSlOo9EdtKyhfoeZ69ftFBPdxskUs0a+YxAUk+nuMcVj7Cb6Gl4vY8h8cW+p+M9TXwx4d1Q2UtwMNNBGu6NTjLD3/rWbwspPYJQlJbHq3h/xBpfwm+HEXw48O3EENtZhZrq0FzvmuJiSTPO2Ms7HJ/8A1VtOcqUNXr1+f+Zm4xjBQkeMfEbx5careTahrF6qxbiyBcAge5zx0rz9ZO9732GpwjHVnnvwW8SW/if4pT/EuSBZtK8MkpZMh+SW6fAz6YQc59TXpYbDyg+ZmkFzrmWptwfGKLUPiDqFzr80b77craSNNvIPIyevJ9D2repRlOXMc9R+8efeK9T1K21tPElhqUn2cNkbFI2tg4BH+P8ASuZ0505XsTHmpPmOXk0m7l8TQfETRptqTxNb6ujcAox4Y/Q/oTWFSTqND9pKXvHSeGtSk8RacZJrySCS3lka4WGL5ZHGQePfgZ9hVqHJG/UlVVN2Om/Z51238JfHlrS4jjEXiHSZIlhbjmP5lyMfewTzXNJudRJHXRUYassfHSa3udIupZmMZs7qOYFhkrh1JHv0HNaP2luU6Y8tW6Wp6brPxGu9Qi3tdlFS2TaS+BjaMj8a1hSm1ypGllBnBfEPxX9sae2YosNzCHYD+8FI/Pk/nUKLp3uRWrRirHlfwxh0n4kfFDxJpviEEeENC0qPUPGDxkjzFRtsdsG7PM7LGCOcEntXDjadeqouPUjA0/azk5bI0/F2t2WtaMljdqsNsuDb6XbsUgt06BNo+9gYHPpXZSjONPllc6K0lFW2OE1iCDRJ430mOJXaaNbWDywBuz97j0GTzWFeck9DmpxdSWhuNqEJEnmyhpGk3SNgZZjySaajOWr6nVVbS1IpdTgaUJcFWbBxtOMjtU+zcFexnBKTKV7MzROWlLZPK9+OlSpyhFpdToUdLWF8I+I0t5rpLiXPlyKYznJAx0x3rGakoNo0oVIxbitzqVvtFu4990sL7+gaIcf4VkoVVI7ORw1LWmW3hhIjCNMsJS+N37sDp0P1rOdKtLSSvfyM4Uot35Uz0H9mL9nTRPj78e9L8HaILTTJWVrrVtfuLt1FjYQgvM7PnIULnjoSa+e4kzajw1kNWtOnzX0jG28nt/w5xY2ng6VGUpQVz0r9pP8AaQ8Mz+IJvC//AAT++E0mr6LpQNpceO/FUhMdxIgwzQRNgEdcE9ewr85ytYilRVTP6/JKWqpw3Se12j5TMeLa+GpRjTX4XPnDxn4U+N+q2954z/aM+Kt2unpGsk1rczCOOMHlVSMcLnHGOSK+pw+dYKrbDZZQTb0va7+97eqt26nyOYZnmOOi+eo7W1OF+DvgyL9obxvf/EzVtW/sD4ceANhl1WQlUMzgiNBx8zsecele1m81w9lkMHTh7TF4jp5Lf5FZVl31hqKdox1bZueKPAWlNpV1rvwj8aalcTWu+VtW0+0Nv5as2NzlBuwSQMucciubAV8a6ns8VQTgkuZaySWi66LV9t2j6SthqWCw3Nhar5n1Wn3Hn2qaPJr2iyahd3MkmvaagW+ljywuIsdWPtmvTli/q2JUIpKlLZdmfPwrqjBRb5mt2+pofB3RvhZqs4sdft9Xj0RFafV4NLmZPMX+MsOOM55PYivMzavmdHWm4uo9IuSvbtb5HLXxNSXvQsmz2v4S/Cb4ZeLdSuNY+DCa/pngTTrgHUI4rYkTyuDhHk5Ck7T7/KfQ18zjs4xeCUIZvGNStLrezSW9tPx2VzzqVNvF805LnaPQPj/+0P8AA79mbw//AGYLvTNc8STWbW+l+H4DvitS4wGlc/xc98VOU5VmWe4jnoR5aOt29dP1Z6eGoKrLmm9j5f8AhT4O8YfEq38S+DviH4i1rSfELaj9osmSVlWFe6ArwV7DBr67NswweW1aGJwtOFSly2fdvuXWxtKmlGk1qj7N+FXi/wCDHwN+BWo/AJdJu5YtM0KS/j0qzjO/WdWkXajycZZQSehP4dK/PswxWIzTEutXT5ajtzXsoJbfcj67C5xluAyv97C75Xou9up+ePjv9mP4+2FnffEj4m2iaDpkkwlkNw4EjBySqqgOTX63l/FHD85QwWCftJpW8tPM/OqWNw0JKEabbfdaHTfs4fCn4ja74bvvGC/GG68LeFoJRvZJSiXMg6fLwCeB1rm4izTLcJiIUPqqq13+C9T0JZlHCy5KafN1PdvCOs/GrwlqmkS+KPip4ql0XUtPlu7HULaJYLSdI2MaMJJEJdPMVgSoIJjcEgivmq2OUoyjRoxi00mm25a+S/z66I+my/iWrSpPnk3y6WT1vbS+j8nbqu257v8ABj9tBvhZ8Er7/hbeoJ4i8UG5xoz3VuUMqE/IQcA4IK84xiuavinWmoUItW3fRW3NY8byVF02rvoz1PS/2jdSutB0L9jX4Z+IoND8e/EV/tvjfxA0asukWp+ZQCwwWA6D8TXy+WYDFZ7iXi8XZYeMrK/V38uhGAnUxyVKc7Sm7tvojzDXvgDYfBf4zaje/Ez9qLW/G3w+imiC6va+JPs8Nvcg9HeH5fvZxjvxXrZy402sLltOHtLtNxje68r3OfNZ4TC4qK9u5RXmZn7SvwL/AGd9V8daRF8OPjX4q1TUdWjWay8Pr4ninjmDjGJh5jlT35APeuHBVM4wWDtKjFxevM4K61t02fk+lns0d+LWWw9hKhq2rpX39V0/p9TrPDH7K2l/ss6bbfGH4jftS/2dN4fmTUrTwQNe3+a4BKowwCM4445reWNqYijbD0I+0lpzcu3ma1qGFwkFiZ1bNaqKepL4Qn0X9sD4lP8AtW/tWeI76W8vQf8AhGvCOk3jQPDAD0dFwW3YGc8EGvDzjNcyw2J/s7AR91/xJ21fo3seDh6v9qZp9YrtqL7bne+JtT/Zd8Z+JNQbVrdvD2p6jY/Zrn+wfE32a9ECjoQpG3gfXjA9KMFhMyXJSw7+FOXv2t7qb3lo3ZaK929Em2kfRyxvD9Cna0k2raN3+Z2X7CHwv/Z2+D1nqGmfs4+K5JbSUS3Umga3qBklvbzoGWRjy33R/wABFRjuK87y2ssXmVLnTstFZJfI9Lh/H4XLas6mEd1KOsZPd9DpP2bPi58X9W17xx8Sv2mNLm8NQy6mNP0Hw9fTBYo0TgGM4wzM3OfoK63xZgJZhCjRnzQcU27Pd9D0uG88xM8ZXr4u8YvaLvZeh6xH8UdKsUmS+04CUxJNhG2lo275HXFfRYbHYWq3yb+h9dTzjCV0+Um0bxf4b8fMt74fsAEt4zHMsvLmXJBOOoAxiut1G1d6I76VejUhoxms6JP5UiXkQO5NyhVAOOx5z7U6cufS935HRFNQuloU/Dlzp+kpcy+JdPm1EWVld3QH2+G3Fw6JuSJ5nwIlP8TnOBzU18TLARUpK669Dgx1XEU6V6PxX6nyd8Ufg3/wUS/az8R3em+P9LtvhZ4HtZ0Wc3F2BYWyeZ1CRlptUkwRgPsjyeRiuihjMG5JRd2+i3fz6fK79D5NzzbGV5Uqit530tY+zv2dfhb8Nvg18LNE+Cng+0nstG067ka61262vdX1xM5eW4eCMKsKsxJEUYCoCAB0rRV0oOpJKKTS1evlu7vbV6+bu1f0suw9bLsO6cG5W2u/1Oqmla2u57FFdTFK0ZZxt3gE4bBHQ9a3w9eGIXus+hp05zpKbW6I0urgLsZiVAxhmGDXX7KfYyvHoyRHtJR8yqpYdQ3QU/Yz7BzjhFH94sCpHTfkfWj2M+zBSGyJBMeCAcYGGzQ6M30HzEZjkBy0YcEdG7Co9jJdw5kKsbcmRQB2w1HsZ9ilIa7wjIdUYkcBj0o9jLsPmuNV4t2Y/lyOqvjpUOjLsx30HbbqRSDLwecFsih0pbal80SKRH4YxpwPugDGalUpxe34Fb6laWz3ks0KnIxhl6f40pz5dzTklFXex4l8fPGH7buk/E2Dwl+zX+y1b+KtCGji6vvEN1clVjl3OGgC5GSAFOO+6sHTqVqTlSl719rXPJxeNq0qyhBKz63R852PhH/gvN8evH/lada2nw+0y43tbJqFtBaWsKr821wyvLM21T8oAI4JPWvQw9DL7ezqtuXXW33f0zz44vMqbk9l0as/vPq34M2Px6Hw10y0+PP9mah4sgjYatdeHbNltG5O3aCOuMDPc815LdCnUbpP3fM9zCOvLDr27Tl5HSnQNY4zpbqWGT5p6+3NZe1jJaM74030RUm8O62JQzXMEGQdxJY/oOKlSctg9lJ6pMbHoi3AZ28Tc5+aNI8Ac+9dCoVN9TJzcXtYdJ4YtGhZ5dRuHLZbCHHH1xWU2lKzZUanN0K7ab4dtNzW/nDLN8s9wcgg+g7VpCE5arYJuUWPNpYu+UtVLN1bkntyfWlJcj94lVU3uRvb2EQHmW+1AMcYBoi1L4WaKM5apEMt1YWwf7LsdjIYwSgwg/vE9z9K2VCpfVEO6ZV+2WO4uiRFuhYgDPT862VCXZkN6jjPPISYrMH5chQB8o/ClKm4LUS1ZEJLxoCIbYYUYBZxgkdiT+VEKc5r3RzhOC1RXtb/AF0xtHe3dtD5iEMkQDDHbBIFV9XqdUzntrdhDFaxo8Zuz0wTv+8etDoztsXFpvQhupNFteLq8Ve/zSYx9KXspPZFNyXQyb/xT4asic6hG2B8x35yf61aw872aG7qOxly/EbS0RmjunJOCdq+3bNW6E+lzmc+xQvvHUlxI0lhYMzbcbjgZGf/ANdSsPNO9tfQXMZ0mteKbuNvLtII1bliRknIo9jNuzJ51czpm8V3HEusNEScERKoI/OtFQl1TK8yF9KaYSvc69dyNn5w9wcZ+i+1J03HoV7VRWpTudF02H5ZYlfKnJcE8dO/tWXNG4tKivEgj/sOzRg1uY1TGNigDp2/GrUZS2RPPyuxTuta0a3yjw7yQSRIafsalrWE3cqXPi+1CHZbxnOSORkCrVGpbYlszLnxvKzkKI1GM70weeuP6U/Y1OwJ2ZQuPGRkZxHKoZz820YzVU6coSvYcp8qbP/Z",
+              "text/plain": [
+                ""
+              ]
+            },
+            "execution_count": 15,
+            "metadata": {
+              "image/jpeg": {
+                "height": 256,
+                "width": 256
+              }
+            },
+            "output_type": "execute_result"
+          }
+        ],
+        "source": [
+          "!curl -O https://raw.githubusercontent.com/meta-llama/llama-models/refs/heads/main/Llama_Repo.jpeg\n",
+          "\n",
+          "from IPython.display import Image\n",
+          "Image(\"Llama_Repo.jpeg\", width=256, height=256)"
+        ]
+      },
+      {
+        "cell_type": "code",
+        "execution_count": 16,
+        "id": "e1450ecc",
+        "metadata": {},
+        "outputs": [],
+        "source": [
+          "import base64\n",
+          "def encode_image(image_path):\n",
+          "    with open(image_path, \"rb\") as image_file:\n",
+          "        base64_string = base64.b64encode(image_file.read()).decode(\"utf-8\")\n",
+          "        base64_url = f\"data:image/png;base64,{base64_string}\"\n",
+          "        return base64_url"
+        ]
+      },
+      {
+        "cell_type": "code",
+        "execution_count": 18,
+        "id": "d7914894",
+        "metadata": {},
+        "outputs": [
+          {
+            "name": "stdout",
+            "output_type": "stream",
+            "text": [
+              "The image features three llamas, each with a distinct color. The llama on the left is white, the middle one is purple, and the one on the right is also white but wears a blue party hat.\n",
+              "\n",
+              "To determine the number of different colors present, we can count the unique hues:\n",
+              "\n",
+              "1. White (two llamas)\n",
+              "2. Purple (one llama)\n",
+              "3. Blue (party hat)\n",
+              "\n",
+              "Therefore, there are 3 different colors visible in the image: white, purple, and blue.\n"
+            ]
+          }
+        ],
+        "source": [
+          "response = client.inference.chat_completion(\n",
+          "    messages=[\n",
+          "        {\n",
+          "            \"role\": \"user\",\n",
+          "            \"content\": [\n",
+          "                {\n",
+          "                    \"type\": \"image\",\n",
+          "                    \"image\": {\n",
+          "                        \"url\": {\n",
+          "                            \"uri\": encode_image(\"Llama_Repo.jpeg\")\n",
+          "                        }\n",
+          "                    }\n",
+          "                },\n",
+          "                {\n",
+          "                    \"type\": \"text\",\n",
+          "                    \"text\": \"How many different colors are those llamas? What are those colors?\",\n",
+          "                }\n",
+          "            ]\n",
+          "        }\n",
+          "    ],\n",
+          "    model_id=model_id,\n",
+          "    stream=False,\n",
+          ")\n",
+          "\n",
+          "print(response.completion_message.content)"
+        ]
+      },
+      {
+        "cell_type": "markdown",
+        "id": "8cf0d555",
+        "metadata": {
+          "id": "8cf0d555"
+        },
+        "source": [
+          "### 2.4 Have a conversation\n",
+          "\n",
+          "Maintaining a conversation history allows the model to retain context from previous interactions. Use a list to accumulate messages, enabling continuity throughout the chat session."
+        ]
+      },
+      {
+        "cell_type": "code",
+        "execution_count": 19,
+        "id": "3fdf9df6",
+        "metadata": {
+          "id": "3fdf9df6"
+        },
+        "outputs": [
+          {
+            "name": "stdout",
+            "output_type": "stream",
+            "text": [
+              "\u001b[36m> Response: The most famous Prime Minister of England during World War 2 was Winston Churchill. He served as the Prime Minister of the United Kingdom from 1940 to 1945, and again from 1951 to 1955. Churchill is widely regarded as one of the greatest wartime leaders in history, known for his leadership, oratory skills, and unwavering resolve during the war.\n",
+              "\n",
+              "Churchill played a crucial role in rallying the British people during the war, and his speeches, such as the \"We shall fight on the beaches\" and \"Their finest hour\" speeches, are still remembered and celebrated today. He worked closely with other Allied leaders, including US President Franklin D. Roosevelt and Soviet leader Joseph Stalin, to coordinate the war effort and ultimately secure the defeat of Nazi Germany.\n",
+              "\n",
+              "Churchill's leadership and legacy have endured long after the war, and he remains one of the most iconic and influential figures in British history.\u001b[0m\n",
+              "\u001b[36m> Response: Winston Churchill was known for his many memorable quotes, but one of his most famous is:\n",
+              "\n",
+              "**\"We shall fight on the beaches, we shall fight on the landing grounds, we shall fight in the fields and in the streets, we shall fight in the hills; we shall never surrender.\"**\n",
+              "\n",
+              "This quote is from his speech to the House of Commons on June 4, 1940, during the early stages of World War II, when Nazi Germany was threatening to invade Britain. The speech is known as the \"We Shall Fight on the Beaches\" speech, and it's considered one of the greatest speeches of the 20th century.\n",
+              "\n",
+              "However, if I had to pick a single, even more concise quote, it would be:\n",
+              "\n",
+              "**\"Blood, toil, tears, and sweat.\"**\n",
+              "\n",
+              "This was the opening phrase of his first speech as Prime Minister to the House of Commons on May 13, 1940, in which he said:\n",
+              "\n",
+              "\"I say to the House as I said to those who have joined this Government, I have nothing to offer but blood, toil, tears, and sweat. We have before us an ordeal of the most grievous kind.\"\n",
+              "\n",
+              "This quote has become synonymous with Churchill's leadership and resolve during the war.\u001b[0m\n"
+            ]
+          }
+        ],
+        "source": [
+          "from termcolor import cprint\n",
+          "\n",
+          "questions = [\n",
+          "    \"Who was the most famous PM of England during world war 2 ?\",\n",
+          "    \"What was his most famous quote ?\"\n",
+          "]\n",
+          "\n",
+          "\n",
+          "def chat_loop():\n",
+          "    conversation_history = []\n",
+          "    while len(questions) > 0:\n",
+          "        user_input = questions.pop(0)\n",
+          "        if user_input.lower() in [\"exit\", \"quit\", \"bye\"]:\n",
+          "            cprint(\"Ending conversation. Goodbye!\", \"yellow\")\n",
+          "            break\n",
+          "\n",
+          "        user_message = {\"role\": \"user\", \"content\": user_input}\n",
+          "        conversation_history.append(user_message)\n",
+          "\n",
+          "        response = client.inference.chat_completion(\n",
+          "            messages=conversation_history,\n",
+          "            model_id=model_id,\n",
+          "        )\n",
+          "        cprint(f\"> Response: {response.completion_message.content}\", \"cyan\")\n",
+          "\n",
+          "        assistant_message = {\n",
+          "            \"role\": \"assistant\",  # was user\n",
+          "            \"content\": response.completion_message.content,\n",
+          "            \"stop_reason\": response.completion_message.stop_reason,\n",
+          "        }\n",
+          "        conversation_history.append(assistant_message)\n",
+          "\n",
+          "\n",
+          "chat_loop()\n"
+        ]
+      },
+      {
+        "cell_type": "markdown",
+        "id": "72e5111e",
+        "metadata": {
+          "id": "72e5111e"
+        },
+        "source": [
+          "Here is an example for you to try a conversation yourself.\n",
+          "Remember to type `quit` or `exit` after you are done chatting."
+        ]
+      },
+      {
+        "cell_type": "code",
+        "execution_count": 35,
+        "id": "9496f75c",
+        "metadata": {
+          "colab": {
+            "base_uri": "https://localhost:8080/"
+          },
+          "id": "9496f75c",
+          "outputId": "7d93a4cf-a5d4-4741-b6eb-6bce3a27ff66"
+        },
+        "outputs": [
+          {
+            "name": "stdout",
+            "output_type": "stream",
+            "text": [
+              "\u001b[36m> Response: Hello! How are you today? Is there something I can help you with or would you like to chat?\u001b[0m\n",
+              "\u001b[33mEnding conversation. Goodbye!\u001b[0m\n"
+            ]
+          }
+        ],
+        "source": [
+          "# NBVAL_SKIP\n",
+          "from termcolor import cprint\n",
+          "\n",
+          "def chat_loop():\n",
+          "    conversation_history = []\n",
+          "    while True:\n",
+          "        user_input = input(\"User> \")\n",
+          "        if user_input.lower() in [\"exit\", \"quit\", \"bye\"]:\n",
+          "            cprint(\"Ending conversation. Goodbye!\", \"yellow\")\n",
+          "            break\n",
+          "\n",
+          "        user_message = {\"role\": \"user\", \"content\": user_input}\n",
+          "        conversation_history.append(user_message)\n",
+          "\n",
+          "        response = client.inference.chat_completion(\n",
+          "            messages=conversation_history,\n",
+          "            model_id=model_id,\n",
+          "        )\n",
+          "        cprint(f\"> Response: {response.completion_message.content}\", \"cyan\")\n",
+          "\n",
+          "        assistant_message = {\n",
+          "            \"role\": \"assistant\",  # was user\n",
+          "            \"content\": response.completion_message.content,\n",
+          "            \"stop_reason\": response.completion_message.stop_reason,\n",
+          "        }\n",
+          "        conversation_history.append(assistant_message)\n",
+          "\n",
+          "\n",
+          "chat_loop()\n"
+        ]
+      }
+    ],
+    "metadata": {
+      "accelerator": "GPU",
+      "colab": {
+        "gpuType": "T4",
+        "provenance": []
+      },
+      "kernelspec": {
+        "display_name": "l4",
+        "language": "python",
+        "name": "python3"
+      },
+      "language_info": {
+        "codemirror_mode": {
+          "name": "ipython",
+          "version": 3
+        },
+        "file_extension": ".py",
+        "mimetype": "text/x-python",
+        "name": "python",
+        "nbconvert_exporter": "python",
+        "pygments_lexer": "ipython3",
+        "version": "3.10.16"
+      }
+    },
+    "nbformat": 4,
+    "nbformat_minor": 5
+  }
diff --git a/docs/make.bat b/docs/make.bat
index 32bb24529..954237b9b 100644
--- a/docs/make.bat
+++ b/docs/make.bat
@@ -1,35 +1,35 @@
-@ECHO OFF
-
-pushd %~dp0
-
-REM Command file for Sphinx documentation
-
-if "%SPHINXBUILD%" == "" (
-	set SPHINXBUILD=sphinx-build
-)
-set SOURCEDIR=.
-set BUILDDIR=_build
-
-%SPHINXBUILD% >NUL 2>NUL
-if errorlevel 9009 (
-	echo.
-	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
-	echo.installed, then set the SPHINXBUILD environment variable to point
-	echo.to the full path of the 'sphinx-build' executable. Alternatively you
-	echo.may add the Sphinx directory to PATH.
-	echo.
-	echo.If you don't have Sphinx installed, grab it from
-	echo.https://www.sphinx-doc.org/
-	exit /b 1
-)
-
-if "%1" == "" goto help
-
-%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
-goto end
-
-:help
-%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
-
-:end
-popd
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+	set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=.
+set BUILDDIR=_build
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+	echo.
+	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+	echo.installed, then set the SPHINXBUILD environment variable to point
+	echo.to the full path of the 'sphinx-build' executable. Alternatively you
+	echo.may add the Sphinx directory to PATH.
+	echo.
+	echo.If you don't have Sphinx installed, grab it from
+	echo.https://www.sphinx-doc.org/
+	exit /b 1
+)
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+
+:end
+popd
diff --git a/docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb b/docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb
index 5de7f715e..93f78d268 100644
--- a/docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb
+++ b/docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb
@@ -38,12 +38,8 @@
       "cell_type": "code",
       "execution_count": null,
       "metadata": {
-        "colab": {
-          "base_uri": "https://localhost:8080/"
-        },
         "collapsed": true,
-        "id": "O9pGVlPIjpix",
-        "outputId": "e1fbe723-ae31-4630-eb80-4c4f6476d56f"
+        "id": "O9pGVlPIjpix"
       },
       "outputs": [],
       "source": [
@@ -55,12 +51,8 @@
       "cell_type": "code",
       "execution_count": null,
       "metadata": {
-        "colab": {
-          "base_uri": "https://localhost:8080/"
-        },
         "collapsed": true,
-        "id": "JQpLUSNjlGAM",
-        "outputId": "2f7fec97-5511-4cae-d51e-6d262fbca19c"
+        "id": "JQpLUSNjlGAM"
       },
       "outputs": [],
       "source": [
@@ -70,7 +62,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 1,
+      "execution_count": null,
       "metadata": {
         "colab": {
           "base_uri": "https://localhost:8080/"
@@ -337,9 +329,6 @@
               "    provider_id: tavily-search\n",
               "    provider_type: remote::tavily-search\n",
               "  - config: {}\n",
-              "    provider_id: code-interpreter\n",
-              "    provider_type: inline::code-interpreter\n",
-              "  - config: {}\n",
               "    provider_id: rag-runtime\n",
               "    provider_type: inline::rag-runtime\n",
               "  - config: {}\n",
@@ -378,10 +367,6 @@
               "  toolgroup_id: builtin::rag\n",
               "- args: null\n",
               "  mcp_endpoint: null\n",
-              "  provider_id: code-interpreter\n",
-              "  toolgroup_id: builtin::code_interpreter\n",
-              "- args: null\n",
-              "  mcp_endpoint: null\n",
               "  provider_id: wolfram-alpha\n",
               "  toolgroup_id: builtin::wolfram_alpha\n",
               "vector_dbs: []\n",
@@ -617,9 +602,6 @@
               "    provider_id: tavily-search\n",
               "    provider_type: remote::tavily-search\n",
               "  - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
-              "    provider_id: code-interpreter\n",
-              "    provider_type: inlin\u001b[1;92me::c\u001b[0mode-interpreter\n",
-              "  - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
               "    provider_id: rag-runtime\n",
               "    provider_type: inline::rag-runtime\n",
               "  - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
@@ -658,10 +640,6 @@
               "  toolgroup_id: builtin::rag\n",
               "- args: null\n",
               "  mcp_endpoint: null\n",
-              "  provider_id: code-interpreter\n",
-              "  toolgroup_id: builtin::code_interpreter\n",
-              "- args: null\n",
-              "  mcp_endpoint: null\n",
               "  provider_id: wolfram-alpha\n",
               "  toolgroup_id: builtin::wolfram_alpha\n",
               "vector_dbs: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
@@ -715,7 +693,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 2,
+      "execution_count": null,
       "metadata": {
         "id": "TC_IwIAQo4q-"
       },
@@ -728,116 +706,10 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 3,
+      "execution_count": null,
       "metadata": {
-        "colab": {
-          "base_uri": "https://localhost:8080/",
-          "height": 305,
-          "referenced_widgets": [
-            "feb82e061ee44283b4a46be858ef4cd7",
-            "78a2d2d4ee3f42f3be42ef4baa298561",
-            "ba5e6ca09f174ef3a348453cf5cfc24a",
-            "74b58e4647644c9daf9af488942fdaf4",
-            "d56e218958a041e286e80f24e400ab0b",
-            "cab80632b7564a9eb59583e09573c1ee",
-            "10c0d50d7c204de0b4c8e8f4d3ec0af5",
-            "626ef2f811ae4e119a0e85cebe92b91d",
-            "aef4172d916f40b0ab4ed09104e10f24",
-            "25529e7fd57049d2816d31f696eab1fd",
-            "093bdcb608cf4b4fa37b0032a3915187",
-            "c788d4e9e1e24dca9b6503689df9b631",
-            "d1587e2144bf46299c1bdec3ea96e4e7",
-            "500a072c09da41759cb2c942a16d8429",
-            "9785009392934e3bbb229e8781667cbc",
-            "84570fe2c2a54a068fb9b8cbc8b041a1",
-            "f9e579c58e3f4ae0bbb721dffa33bf0a",
-            "737116977f474ec0b68d88a40fd1086c",
-            "e6d6e516cd03452297d80c36376855dd",
-            "6ae0fadb3aeb4be18a9ab3279fb23145",
-            "fa4800a506ac480984d58933580df086",
-            "117468099dbc42fdaafc08207eaac7ab",
-            "44f585990aa244d8ba61f892dc1ccc1c",
-            "4fc59928a0544f95a4438b37d19ca437",
-            "fb644d47049f495397d0e60597c86ea3",
-            "78632694ff694442bc3fefc2cac2cbf5",
-            "083fd2549abd4b03bd41d8b92ec28f42",
-            "611d6472a58d419583acc416767a4c90",
-            "98c5ce434cff454eaaa3f0fd3498183a",
-            "3d0344a9cc744e369da1b6b7ea1b3be8",
-            "c452ccbf47a44073aee710175f707a7d",
-            "0218397c573e4b28bfb4ffa66464d50f",
-            "9b01bcd6e5174be2af19f457047017c8",
-            "4fed5720f30b4b3cbbc606a4f25e223b",
-            "6fa866b9971542739b0ed26d90ceac80",
-            "fe7553b513954cc68c427b5d9d260b33",
-            "4bc266d49a6741a88350e029d101425b",
-            "da57445f98e7427589962836c2b4287e",
-            "ad1fb86cc1f94fd9911eda03cf4a3783",
-            "fdefb51ad4c4418b98c5826126558011",
-            "179d41b80dc841e8a440482516b8bca5",
-            "22b1ecd2eff14770bcfb0c62d3d4213f",
-            "47f876cf41484d55b645e1e99337423a",
-            "340fbbb4982c460992c88885e79b47db",
-            "9659140487ca4d3ea799196d2c1ecf61",
-            "52150fd494d24eea89b5232077509355",
-            "04acde771d0a46699e1de07d9733d1a3",
-            "7b98103300814f3caea84266263b95a2",
-            "75f06408071c494f934bb909b84110d1",
-            "b09b2690894749339a9172e5ad0a9b75",
-            "cbed38801163438d891879b756f5baab",
-            "399a6417b23e4593bb244ec3abb6b46d",
-            "53a321f36b0d4e08a74a5bcfbd04434b",
-            "b8c0c8aaac0d4032bf5c673a43d084ab",
-            "d1f32499fa3f4795b92361637e23a9bb",
-            "c06f9a090fb54c74b947634bf6d11fa8",
-            "82991dcc80f14af9bd2e95f705980676",
-            "cd832e3842b945aabbb327856053f261",
-            "93ee645d54f34acdb0d15092d4a6f0d1",
-            "b77fe05bbcf84cdc8ef85b264ccd35f6",
-            "e17d286a965a49cfb8d5bf885865cb1e",
-            "ca015c1a0c1449e68edb282462435a3f",
-            "2932b06afde9468a976eb6bfb072b80e",
-            "d027c807ddc04f89bec41dc05fde7718",
-            "4ff3a6aaf706460bbba01b248b93000e",
-            "bfd75a39f0154c30adbaad1e2ca0f1e2",
-            "4f788a7920c346f3b42900825bd6711a",
-            "8e9358ec7d474808bb96c13e13489c67",
-            "f0dfeee2a8d64dedbc8ef55ad4e69932",
-            "9437b707bf1a4847a50aafeb4252dab5",
-            "f255707788704a76bd1651f26a22402d",
-            "3b70fa4e43ef4951862e119378c3c501",
-            "6c0a6a7fa8ca4e1c961a36305f0e7638",
-            "201bd914f9884e46b8e6df9d9900a6e8",
-            "f53b7ada01084e73bba6e14a95e2a534",
-            "d2029292327b488db02fd123ee2b75af",
-            "3e26bc24a3e44b4582f57913bdf98de4",
-            "9d2b6eabf7e14436b72bbf374b4a2a0a",
-            "b5d7cb5a6157449a850ef0e12e3d3eb7",
-            "c245d316bf9e44dabe5bfd1e47fc8d2e",
-            "963cf422ca894d82b0dd94c6165d41bf",
-            "78d0e2aa93674bbeb42bff87a23cce9b",
-            "12c6f1180eeb4e9eb9037ea5dd24ec8e",
-            "017a81d7160240a398947545963856f5",
-            "1cf8eeb8d81c4e8a8e95dd43296a78b9",
-            "5b0b5a3f79e94c51aae48fe0dd34ba0e",
-            "f5b34a743ce54fb591f25b04a2651d65",
-            "dec6399e2c5341aead66e1674d3e6c72",
-            "24e48376a72940679989a39a40bbe7f6",
-            "484df732051540859bc7ac9cecadc83c",
-            "4b33b1db50c34a2fa957d81a71a2a47f",
-            "e51d501e2f994baba40345ad632eabee",
-            "631a85e420b64e8cb6915af59c5ce08a",
-            "70af9cb2838c4a92bd67f8cb5c98d97f",
-            "158115266c284c4f8dbce3586151cbf1",
-            "ce5019b36cde44c58c5f596dbb59a2f8",
-            "b90d660ca8584ba1815a3c66b420c079",
-            "7c4d1de626784a59a7e0a33c24086186",
-            "21cf0e35ecd845a8b5e7c5ce241cf177"
-          ]
-        },
         "collapsed": true,
-        "id": "DJkmoG2kq1_P",
-        "outputId": "8493ee59-c6ff-4bb6-d787-f295944db1cf"
+        "id": "DJkmoG2kq1_P"
       },
       "outputs": [],
       "source": [
@@ -862,7 +734,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 4,
+      "execution_count": null,
       "metadata": {
         "colab": {
           "base_uri": "https://localhost:8080/",
@@ -963,7 +835,7 @@
         "\n",
         "client.benchmarks.register(\n",
         "    benchmark_id=\"meta-reference::mmmu\",\n",
-        "    # Note: we can use any value as `dataset_id` because we'll be using the `evaluate_rows` API which accepts the \n",
+        "    # Note: we can use any value as `dataset_id` because we'll be using the `evaluate_rows` API which accepts the\n",
         "    # `input_rows` argument and does not fetch data from the dataset.\n",
         "    dataset_id=f\"mmmu-{subset}-{split}\",\n",
         "    # Note: for the same reason as above, we can use any value as `scoring_functions`.\n",
@@ -1008,7 +880,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 5,
+      "execution_count": null,
       "metadata": {
         "id": "HXmZf3Ymw-aX"
       },
@@ -1028,7 +900,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 6,
+      "execution_count": null,
       "metadata": {
         "id": "Gc8azb4Rxr5J"
       },
@@ -1042,7 +914,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 7,
+      "execution_count": null,
       "metadata": {
         "colab": {
           "base_uri": "https://localhost:8080/",
@@ -1182,7 +1054,7 @@
     },
     {
       "cell_type": "code",
-      "execution_count": 27,
+      "execution_count": null,
       "metadata": {
         "colab": {
           "base_uri": "https://localhost:8080/",
@@ -1307,7 +1179,9 @@
     {
       "cell_type": "code",
       "execution_count": null,
-      "metadata": {},
+      "metadata": {
+        "id": "lxc9-eXYK5Av"
+      },
       "outputs": [],
       "source": []
     }
@@ -1336,3088 +1210,6 @@
       "nbconvert_exporter": "python",
       "pygments_lexer": "ipython3",
       "version": "3.10.16"
-    },
-    "widgets": {
-      "application/vnd.jupyter.widget-state+json": {
-        "017a81d7160240a398947545963856f5": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "DescriptionStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "DescriptionStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "description_width": ""
-          }
-        },
-        "0218397c573e4b28bfb4ffa66464d50f": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "04acde771d0a46699e1de07d9733d1a3": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "FloatProgressModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "FloatProgressModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "ProgressView",
-            "bar_style": "success",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_399a6417b23e4593bb244ec3abb6b46d",
-            "max": 453677660,
-            "min": 0,
-            "orientation": "horizontal",
-            "style": "IPY_MODEL_53a321f36b0d4e08a74a5bcfbd04434b",
-            "value": 453677660
-          }
-        },
-        "083fd2549abd4b03bd41d8b92ec28f42": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "093bdcb608cf4b4fa37b0032a3915187": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "DescriptionStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "DescriptionStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "description_width": ""
-          }
-        },
-        "10c0d50d7c204de0b4c8e8f4d3ec0af5": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "DescriptionStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "DescriptionStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "description_width": ""
-          }
-        },
-        "117468099dbc42fdaafc08207eaac7ab": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "DescriptionStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "DescriptionStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "description_width": ""
-          }
-        },
-        "12c6f1180eeb4e9eb9037ea5dd24ec8e": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "158115266c284c4f8dbce3586151cbf1": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "DescriptionStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "DescriptionStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "description_width": ""
-          }
-        },
-        "179d41b80dc841e8a440482516b8bca5": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "1cf8eeb8d81c4e8a8e95dd43296a78b9": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "201bd914f9884e46b8e6df9d9900a6e8": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "21cf0e35ecd845a8b5e7c5ce241cf177": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "DescriptionStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "DescriptionStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "description_width": ""
-          }
-        },
-        "22b1ecd2eff14770bcfb0c62d3d4213f": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "ProgressStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "ProgressStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "bar_color": null,
-            "description_width": ""
-          }
-        },
-        "24e48376a72940679989a39a40bbe7f6": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HBoxModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HBoxModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HBoxView",
-            "box_style": "",
-            "children": [
-              "IPY_MODEL_484df732051540859bc7ac9cecadc83c",
-              "IPY_MODEL_4b33b1db50c34a2fa957d81a71a2a47f",
-              "IPY_MODEL_e51d501e2f994baba40345ad632eabee"
-            ],
-            "layout": "IPY_MODEL_631a85e420b64e8cb6915af59c5ce08a"
-          }
-        },
-        "25529e7fd57049d2816d31f696eab1fd": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "2932b06afde9468a976eb6bfb072b80e": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "340fbbb4982c460992c88885e79b47db": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "DescriptionStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "DescriptionStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "description_width": ""
-          }
-        },
-        "399a6417b23e4593bb244ec3abb6b46d": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "3b70fa4e43ef4951862e119378c3c501": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "3d0344a9cc744e369da1b6b7ea1b3be8": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "3e26bc24a3e44b4582f57913bdf98de4": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "DescriptionStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "DescriptionStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "description_width": ""
-          }
-        },
-        "44f585990aa244d8ba61f892dc1ccc1c": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HBoxModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HBoxModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HBoxView",
-            "box_style": "",
-            "children": [
-              "IPY_MODEL_4fc59928a0544f95a4438b37d19ca437",
-              "IPY_MODEL_fb644d47049f495397d0e60597c86ea3",
-              "IPY_MODEL_78632694ff694442bc3fefc2cac2cbf5"
-            ],
-            "layout": "IPY_MODEL_083fd2549abd4b03bd41d8b92ec28f42"
-          }
-        },
-        "47f876cf41484d55b645e1e99337423a": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "484df732051540859bc7ac9cecadc83c": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HTMLModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HTMLModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HTMLView",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_70af9cb2838c4a92bd67f8cb5c98d97f",
-            "placeholder": "​",
-            "style": "IPY_MODEL_158115266c284c4f8dbce3586151cbf1",
-            "value": "Generating test split: 100%"
-          }
-        },
-        "4b33b1db50c34a2fa957d81a71a2a47f": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "FloatProgressModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "FloatProgressModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "ProgressView",
-            "bar_style": "success",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_ce5019b36cde44c58c5f596dbb59a2f8",
-            "max": 287,
-            "min": 0,
-            "orientation": "horizontal",
-            "style": "IPY_MODEL_b90d660ca8584ba1815a3c66b420c079",
-            "value": 287
-          }
-        },
-        "4bc266d49a6741a88350e029d101425b": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HTMLModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HTMLModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HTMLView",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_47f876cf41484d55b645e1e99337423a",
-            "placeholder": "​",
-            "style": "IPY_MODEL_340fbbb4982c460992c88885e79b47db",
-            "value": " 461M/461M [00:11<00:00, 31.2MB/s]"
-          }
-        },
-        "4f788a7920c346f3b42900825bd6711a": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HBoxModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HBoxModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HBoxView",
-            "box_style": "",
-            "children": [
-              "IPY_MODEL_8e9358ec7d474808bb96c13e13489c67",
-              "IPY_MODEL_f0dfeee2a8d64dedbc8ef55ad4e69932",
-              "IPY_MODEL_9437b707bf1a4847a50aafeb4252dab5"
-            ],
-            "layout": "IPY_MODEL_f255707788704a76bd1651f26a22402d"
-          }
-        },
-        "4fc59928a0544f95a4438b37d19ca437": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HTMLModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HTMLModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HTMLView",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_611d6472a58d419583acc416767a4c90",
-            "placeholder": "​",
-            "style": "IPY_MODEL_98c5ce434cff454eaaa3f0fd3498183a",
-            "value": "validation-00000-of-00001.parquet: 100%"
-          }
-        },
-        "4fed5720f30b4b3cbbc606a4f25e223b": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HBoxModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HBoxModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HBoxView",
-            "box_style": "",
-            "children": [
-              "IPY_MODEL_6fa866b9971542739b0ed26d90ceac80",
-              "IPY_MODEL_fe7553b513954cc68c427b5d9d260b33",
-              "IPY_MODEL_4bc266d49a6741a88350e029d101425b"
-            ],
-            "layout": "IPY_MODEL_da57445f98e7427589962836c2b4287e"
-          }
-        },
-        "4ff3a6aaf706460bbba01b248b93000e": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "500a072c09da41759cb2c942a16d8429": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "FloatProgressModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "FloatProgressModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "ProgressView",
-            "bar_style": "success",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_e6d6e516cd03452297d80c36376855dd",
-            "max": 29453850,
-            "min": 0,
-            "orientation": "horizontal",
-            "style": "IPY_MODEL_6ae0fadb3aeb4be18a9ab3279fb23145",
-            "value": 29453850
-          }
-        },
-        "52150fd494d24eea89b5232077509355": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HTMLModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HTMLModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HTMLView",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_b09b2690894749339a9172e5ad0a9b75",
-            "placeholder": "​",
-            "style": "IPY_MODEL_cbed38801163438d891879b756f5baab",
-            "value": "test-00001-of-00003.parquet: 100%"
-          }
-        },
-        "53a321f36b0d4e08a74a5bcfbd04434b": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "ProgressStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "ProgressStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "bar_color": null,
-            "description_width": ""
-          }
-        },
-        "5b0b5a3f79e94c51aae48fe0dd34ba0e": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "ProgressStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "ProgressStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "bar_color": null,
-            "description_width": ""
-          }
-        },
-        "611d6472a58d419583acc416767a4c90": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "626ef2f811ae4e119a0e85cebe92b91d": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "631a85e420b64e8cb6915af59c5ce08a": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "6ae0fadb3aeb4be18a9ab3279fb23145": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "ProgressStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "ProgressStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "bar_color": null,
-            "description_width": ""
-          }
-        },
-        "6c0a6a7fa8ca4e1c961a36305f0e7638": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "DescriptionStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "DescriptionStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "description_width": ""
-          }
-        },
-        "6fa866b9971542739b0ed26d90ceac80": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HTMLModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HTMLModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HTMLView",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_ad1fb86cc1f94fd9911eda03cf4a3783",
-            "placeholder": "​",
-            "style": "IPY_MODEL_fdefb51ad4c4418b98c5826126558011",
-            "value": "test-00000-of-00003.parquet: 100%"
-          }
-        },
-        "70af9cb2838c4a92bd67f8cb5c98d97f": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "737116977f474ec0b68d88a40fd1086c": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "DescriptionStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "DescriptionStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "description_width": ""
-          }
-        },
-        "74b58e4647644c9daf9af488942fdaf4": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HTMLModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HTMLModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HTMLView",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_25529e7fd57049d2816d31f696eab1fd",
-            "placeholder": "​",
-            "style": "IPY_MODEL_093bdcb608cf4b4fa37b0032a3915187",
-            "value": " 36.0k/36.0k [00:00<00:00, 1.29MB/s]"
-          }
-        },
-        "75f06408071c494f934bb909b84110d1": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "78632694ff694442bc3fefc2cac2cbf5": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HTMLModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HTMLModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HTMLView",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_0218397c573e4b28bfb4ffa66464d50f",
-            "placeholder": "​",
-            "style": "IPY_MODEL_9b01bcd6e5174be2af19f457047017c8",
-            "value": " 165M/165M [00:03<00:00, 42.9MB/s]"
-          }
-        },
-        "78a2d2d4ee3f42f3be42ef4baa298561": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HTMLModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HTMLModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HTMLView",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_cab80632b7564a9eb59583e09573c1ee",
-            "placeholder": "​",
-            "style": "IPY_MODEL_10c0d50d7c204de0b4c8e8f4d3ec0af5",
-            "value": "README.md: 100%"
-          }
-        },
-        "78d0e2aa93674bbeb42bff87a23cce9b": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "7b98103300814f3caea84266263b95a2": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HTMLModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HTMLModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HTMLView",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_b8c0c8aaac0d4032bf5c673a43d084ab",
-            "placeholder": "​",
-            "style": "IPY_MODEL_d1f32499fa3f4795b92361637e23a9bb",
-            "value": " 454M/454M [00:11<00:00, 40.4MB/s]"
-          }
-        },
-        "7c4d1de626784a59a7e0a33c24086186": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "82991dcc80f14af9bd2e95f705980676": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HTMLModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HTMLModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HTMLView",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_e17d286a965a49cfb8d5bf885865cb1e",
-            "placeholder": "​",
-            "style": "IPY_MODEL_ca015c1a0c1449e68edb282462435a3f",
-            "value": "test-00002-of-00003.parquet: 100%"
-          }
-        },
-        "84570fe2c2a54a068fb9b8cbc8b041a1": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "8e9358ec7d474808bb96c13e13489c67": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HTMLModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HTMLModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HTMLView",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_3b70fa4e43ef4951862e119378c3c501",
-            "placeholder": "​",
-            "style": "IPY_MODEL_6c0a6a7fa8ca4e1c961a36305f0e7638",
-            "value": "Generating dev split: 100%"
-          }
-        },
-        "93ee645d54f34acdb0d15092d4a6f0d1": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HTMLModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HTMLModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HTMLView",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_4ff3a6aaf706460bbba01b248b93000e",
-            "placeholder": "​",
-            "style": "IPY_MODEL_bfd75a39f0154c30adbaad1e2ca0f1e2",
-            "value": " 471M/471M [00:11<00:00, 41.5MB/s]"
-          }
-        },
-        "9437b707bf1a4847a50aafeb4252dab5": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HTMLModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HTMLModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HTMLView",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_d2029292327b488db02fd123ee2b75af",
-            "placeholder": "​",
-            "style": "IPY_MODEL_3e26bc24a3e44b4582f57913bdf98de4",
-            "value": " 5/5 [00:00<00:00,  8.03 examples/s]"
-          }
-        },
-        "963cf422ca894d82b0dd94c6165d41bf": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HTMLModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HTMLModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HTMLView",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_f5b34a743ce54fb591f25b04a2651d65",
-            "placeholder": "​",
-            "style": "IPY_MODEL_dec6399e2c5341aead66e1674d3e6c72",
-            "value": " 30/30 [00:03<00:00,  8.23 examples/s]"
-          }
-        },
-        "9659140487ca4d3ea799196d2c1ecf61": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HBoxModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HBoxModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HBoxView",
-            "box_style": "",
-            "children": [
-              "IPY_MODEL_52150fd494d24eea89b5232077509355",
-              "IPY_MODEL_04acde771d0a46699e1de07d9733d1a3",
-              "IPY_MODEL_7b98103300814f3caea84266263b95a2"
-            ],
-            "layout": "IPY_MODEL_75f06408071c494f934bb909b84110d1"
-          }
-        },
-        "9785009392934e3bbb229e8781667cbc": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HTMLModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HTMLModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HTMLView",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_fa4800a506ac480984d58933580df086",
-            "placeholder": "​",
-            "style": "IPY_MODEL_117468099dbc42fdaafc08207eaac7ab",
-            "value": " 29.5M/29.5M [00:00<00:00, 36.5MB/s]"
-          }
-        },
-        "98c5ce434cff454eaaa3f0fd3498183a": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "DescriptionStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "DescriptionStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "description_width": ""
-          }
-        },
-        "9b01bcd6e5174be2af19f457047017c8": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "DescriptionStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "DescriptionStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "description_width": ""
-          }
-        },
-        "9d2b6eabf7e14436b72bbf374b4a2a0a": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HBoxModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HBoxModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HBoxView",
-            "box_style": "",
-            "children": [
-              "IPY_MODEL_b5d7cb5a6157449a850ef0e12e3d3eb7",
-              "IPY_MODEL_c245d316bf9e44dabe5bfd1e47fc8d2e",
-              "IPY_MODEL_963cf422ca894d82b0dd94c6165d41bf"
-            ],
-            "layout": "IPY_MODEL_78d0e2aa93674bbeb42bff87a23cce9b"
-          }
-        },
-        "ad1fb86cc1f94fd9911eda03cf4a3783": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "aef4172d916f40b0ab4ed09104e10f24": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "ProgressStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "ProgressStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "bar_color": null,
-            "description_width": ""
-          }
-        },
-        "b09b2690894749339a9172e5ad0a9b75": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "b5d7cb5a6157449a850ef0e12e3d3eb7": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HTMLModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HTMLModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HTMLView",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_12c6f1180eeb4e9eb9037ea5dd24ec8e",
-            "placeholder": "​",
-            "style": "IPY_MODEL_017a81d7160240a398947545963856f5",
-            "value": "Generating validation split: 100%"
-          }
-        },
-        "b77fe05bbcf84cdc8ef85b264ccd35f6": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "b8c0c8aaac0d4032bf5c673a43d084ab": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "b90d660ca8584ba1815a3c66b420c079": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "ProgressStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "ProgressStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "bar_color": null,
-            "description_width": ""
-          }
-        },
-        "ba5e6ca09f174ef3a348453cf5cfc24a": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "FloatProgressModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "FloatProgressModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "ProgressView",
-            "bar_style": "success",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_626ef2f811ae4e119a0e85cebe92b91d",
-            "max": 36030,
-            "min": 0,
-            "orientation": "horizontal",
-            "style": "IPY_MODEL_aef4172d916f40b0ab4ed09104e10f24",
-            "value": 36030
-          }
-        },
-        "bfd75a39f0154c30adbaad1e2ca0f1e2": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "DescriptionStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "DescriptionStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "description_width": ""
-          }
-        },
-        "c06f9a090fb54c74b947634bf6d11fa8": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HBoxModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HBoxModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HBoxView",
-            "box_style": "",
-            "children": [
-              "IPY_MODEL_82991dcc80f14af9bd2e95f705980676",
-              "IPY_MODEL_cd832e3842b945aabbb327856053f261",
-              "IPY_MODEL_93ee645d54f34acdb0d15092d4a6f0d1"
-            ],
-            "layout": "IPY_MODEL_b77fe05bbcf84cdc8ef85b264ccd35f6"
-          }
-        },
-        "c245d316bf9e44dabe5bfd1e47fc8d2e": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "FloatProgressModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "FloatProgressModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "ProgressView",
-            "bar_style": "success",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_1cf8eeb8d81c4e8a8e95dd43296a78b9",
-            "max": 30,
-            "min": 0,
-            "orientation": "horizontal",
-            "style": "IPY_MODEL_5b0b5a3f79e94c51aae48fe0dd34ba0e",
-            "value": 30
-          }
-        },
-        "c452ccbf47a44073aee710175f707a7d": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "ProgressStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "ProgressStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "bar_color": null,
-            "description_width": ""
-          }
-        },
-        "c788d4e9e1e24dca9b6503689df9b631": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HBoxModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HBoxModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HBoxView",
-            "box_style": "",
-            "children": [
-              "IPY_MODEL_d1587e2144bf46299c1bdec3ea96e4e7",
-              "IPY_MODEL_500a072c09da41759cb2c942a16d8429",
-              "IPY_MODEL_9785009392934e3bbb229e8781667cbc"
-            ],
-            "layout": "IPY_MODEL_84570fe2c2a54a068fb9b8cbc8b041a1"
-          }
-        },
-        "ca015c1a0c1449e68edb282462435a3f": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "DescriptionStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "DescriptionStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "description_width": ""
-          }
-        },
-        "cab80632b7564a9eb59583e09573c1ee": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "cbed38801163438d891879b756f5baab": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "DescriptionStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "DescriptionStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "description_width": ""
-          }
-        },
-        "cd832e3842b945aabbb327856053f261": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "FloatProgressModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "FloatProgressModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "ProgressView",
-            "bar_style": "success",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_2932b06afde9468a976eb6bfb072b80e",
-            "max": 470745176,
-            "min": 0,
-            "orientation": "horizontal",
-            "style": "IPY_MODEL_d027c807ddc04f89bec41dc05fde7718",
-            "value": 470745176
-          }
-        },
-        "ce5019b36cde44c58c5f596dbb59a2f8": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "d027c807ddc04f89bec41dc05fde7718": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "ProgressStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "ProgressStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "bar_color": null,
-            "description_width": ""
-          }
-        },
-        "d1587e2144bf46299c1bdec3ea96e4e7": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HTMLModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HTMLModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HTMLView",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_f9e579c58e3f4ae0bbb721dffa33bf0a",
-            "placeholder": "​",
-            "style": "IPY_MODEL_737116977f474ec0b68d88a40fd1086c",
-            "value": "dev-00000-of-00001.parquet: 100%"
-          }
-        },
-        "d1f32499fa3f4795b92361637e23a9bb": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "DescriptionStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "DescriptionStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "description_width": ""
-          }
-        },
-        "d2029292327b488db02fd123ee2b75af": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "d56e218958a041e286e80f24e400ab0b": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "da57445f98e7427589962836c2b4287e": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "dec6399e2c5341aead66e1674d3e6c72": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "DescriptionStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "DescriptionStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "description_width": ""
-          }
-        },
-        "e17d286a965a49cfb8d5bf885865cb1e": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "e51d501e2f994baba40345ad632eabee": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HTMLModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HTMLModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HTMLView",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_7c4d1de626784a59a7e0a33c24086186",
-            "placeholder": "​",
-            "style": "IPY_MODEL_21cf0e35ecd845a8b5e7c5ce241cf177",
-            "value": " 287/287 [00:23<00:00, 12.48 examples/s]"
-          }
-        },
-        "e6d6e516cd03452297d80c36376855dd": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "f0dfeee2a8d64dedbc8ef55ad4e69932": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "FloatProgressModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "FloatProgressModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "ProgressView",
-            "bar_style": "success",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_201bd914f9884e46b8e6df9d9900a6e8",
-            "max": 5,
-            "min": 0,
-            "orientation": "horizontal",
-            "style": "IPY_MODEL_f53b7ada01084e73bba6e14a95e2a534",
-            "value": 5
-          }
-        },
-        "f255707788704a76bd1651f26a22402d": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "f53b7ada01084e73bba6e14a95e2a534": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "ProgressStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "ProgressStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "bar_color": null,
-            "description_width": ""
-          }
-        },
-        "f5b34a743ce54fb591f25b04a2651d65": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "f9e579c58e3f4ae0bbb721dffa33bf0a": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "fa4800a506ac480984d58933580df086": {
-          "model_module": "@jupyter-widgets/base",
-          "model_module_version": "1.2.0",
-          "model_name": "LayoutModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/base",
-            "_model_module_version": "1.2.0",
-            "_model_name": "LayoutModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "LayoutView",
-            "align_content": null,
-            "align_items": null,
-            "align_self": null,
-            "border": null,
-            "bottom": null,
-            "display": null,
-            "flex": null,
-            "flex_flow": null,
-            "grid_area": null,
-            "grid_auto_columns": null,
-            "grid_auto_flow": null,
-            "grid_auto_rows": null,
-            "grid_column": null,
-            "grid_gap": null,
-            "grid_row": null,
-            "grid_template_areas": null,
-            "grid_template_columns": null,
-            "grid_template_rows": null,
-            "height": null,
-            "justify_content": null,
-            "justify_items": null,
-            "left": null,
-            "margin": null,
-            "max_height": null,
-            "max_width": null,
-            "min_height": null,
-            "min_width": null,
-            "object_fit": null,
-            "object_position": null,
-            "order": null,
-            "overflow": null,
-            "overflow_x": null,
-            "overflow_y": null,
-            "padding": null,
-            "right": null,
-            "top": null,
-            "visibility": null,
-            "width": null
-          }
-        },
-        "fb644d47049f495397d0e60597c86ea3": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "FloatProgressModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "FloatProgressModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "ProgressView",
-            "bar_style": "success",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_3d0344a9cc744e369da1b6b7ea1b3be8",
-            "max": 165333397,
-            "min": 0,
-            "orientation": "horizontal",
-            "style": "IPY_MODEL_c452ccbf47a44073aee710175f707a7d",
-            "value": 165333397
-          }
-        },
-        "fdefb51ad4c4418b98c5826126558011": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "DescriptionStyleModel",
-          "state": {
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "DescriptionStyleModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/base",
-            "_view_module_version": "1.2.0",
-            "_view_name": "StyleView",
-            "description_width": ""
-          }
-        },
-        "fe7553b513954cc68c427b5d9d260b33": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "FloatProgressModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "FloatProgressModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "ProgressView",
-            "bar_style": "success",
-            "description": "",
-            "description_tooltip": null,
-            "layout": "IPY_MODEL_179d41b80dc841e8a440482516b8bca5",
-            "max": 461411018,
-            "min": 0,
-            "orientation": "horizontal",
-            "style": "IPY_MODEL_22b1ecd2eff14770bcfb0c62d3d4213f",
-            "value": 461411018
-          }
-        },
-        "feb82e061ee44283b4a46be858ef4cd7": {
-          "model_module": "@jupyter-widgets/controls",
-          "model_module_version": "1.5.0",
-          "model_name": "HBoxModel",
-          "state": {
-            "_dom_classes": [],
-            "_model_module": "@jupyter-widgets/controls",
-            "_model_module_version": "1.5.0",
-            "_model_name": "HBoxModel",
-            "_view_count": null,
-            "_view_module": "@jupyter-widgets/controls",
-            "_view_module_version": "1.5.0",
-            "_view_name": "HBoxView",
-            "box_style": "",
-            "children": [
-              "IPY_MODEL_78a2d2d4ee3f42f3be42ef4baa298561",
-              "IPY_MODEL_ba5e6ca09f174ef3a348453cf5cfc24a",
-              "IPY_MODEL_74b58e4647644c9daf9af488942fdaf4"
-            ],
-            "layout": "IPY_MODEL_d56e218958a041e286e80f24e400ab0b"
-          }
-        }
-      }
     }
   },
   "nbformat": 4,
diff --git a/docs/notebooks/Llama_Stack_RAG_Lifecycle.ipynb b/docs/notebooks/Llama_Stack_RAG_Lifecycle.ipynb
index 399a3bff1..e70cc3bbe 100644
--- a/docs/notebooks/Llama_Stack_RAG_Lifecycle.ipynb
+++ b/docs/notebooks/Llama_Stack_RAG_Lifecycle.ipynb
@@ -840,7 +840,6 @@
     "    \"memory_optimizations.rst\",\n",
     "    \"chat.rst\",\n",
     "    \"llama3.rst\",\n",
-    "    \"datasets.rst\",\n",
     "    \"qat_finetune.rst\",\n",
     "    \"lora_finetune.rst\",\n",
     "]\n",
@@ -1586,7 +1585,6 @@
     "    \"memory_optimizations.rst\",\n",
     "    \"chat.rst\",\n",
     "    \"llama3.rst\",\n",
-    "    \"datasets.rst\",\n",
     "    \"qat_finetune.rst\",\n",
     "    \"lora_finetune.rst\",\n",
     "]\n",
diff --git a/docs/openapi_generator/generate.py b/docs/openapi_generator/generate.py
index caa4f17ff..9fc375175 100644
--- a/docs/openapi_generator/generate.py
+++ b/docs/openapi_generator/generate.py
@@ -44,7 +44,7 @@ def main(output_dir: str):
     if return_type_errors:
         print("\nAPI Method Return Type Validation Errors:\n")
         for error in return_type_errors:
-            print(error)
+            print(error, file=sys.stderr)
         sys.exit(1)
     now = str(datetime.now())
     print(
diff --git a/docs/openapi_generator/pyopenapi/generator.py b/docs/openapi_generator/pyopenapi/generator.py
index 3936bb3c4..5b7a685c1 100644
--- a/docs/openapi_generator/pyopenapi/generator.py
+++ b/docs/openapi_generator/pyopenapi/generator.py
@@ -6,6 +6,7 @@
 
 import hashlib
 import ipaddress
+import types
 import typing
 from dataclasses import make_dataclass
 from typing import Any, Dict, Set, Union
@@ -179,7 +180,7 @@ class ContentBuilder:
         "Creates the content subtree for a request or response."
 
         def is_iterator_type(t):
-            return "StreamChunk" in str(t)
+            return "StreamChunk" in str(t) or "OpenAIResponseObjectStream" in str(t)
 
         def get_media_type(t):
             if is_generic_list(t):
@@ -189,7 +190,7 @@ class ContentBuilder:
             else:
                 return "application/json"
 
-        if typing.get_origin(payload_type) is typing.Union:
+        if typing.get_origin(payload_type) in (typing.Union, types.UnionType):
             media_types = []
             item_types = []
             for x in typing.get_args(payload_type):
@@ -758,7 +759,7 @@ class Generator:
         )
 
         return Operation(
-            tags=[op.defining_class.__name__],
+            tags=[getattr(op.defining_class, "API_NAMESPACE", op.defining_class.__name__)],
             summary=None,
             # summary=doc_string.short_description,
             description=description,
@@ -804,6 +805,8 @@ class Generator:
         operation_tags: List[Tag] = []
         for cls in endpoint_classes:
             doc_string = parse_type(cls)
+            if hasattr(cls, "API_NAMESPACE") and cls.API_NAMESPACE != cls.__name__:
+                continue
             operation_tags.append(
                 Tag(
                     name=cls.__name__,
diff --git a/docs/openapi_generator/pyopenapi/utility.py b/docs/openapi_generator/pyopenapi/utility.py
index db18e8430..12a69050c 100644
--- a/docs/openapi_generator/pyopenapi/utility.py
+++ b/docs/openapi_generator/pyopenapi/utility.py
@@ -174,14 +174,64 @@ def _validate_list_parameters_contain_data(method) -> str | None:
         return "does not have a mandatory data attribute containing the list of objects"
 
 
+def _validate_has_ellipsis(method) -> str | None:
+    source = inspect.getsource(method)
+    if "..." not in source and not "NotImplementedError" in source:
+        return "does not contain ellipsis (...) in its implementation"
+
+def _validate_has_return_in_docstring(method) -> str | None:
+    source = inspect.getsource(method)
+    return_type = method.__annotations__.get('return')
+    if return_type is not None and return_type != type(None) and ":returns:" not in source:
+        return "does not have a ':returns:' in its docstring"
+
+def _validate_has_params_in_docstring(method) -> str | None:
+    source = inspect.getsource(method)
+    sig = inspect.signature(method)
+    # Only check if the method has more than one parameter
+    if len(sig.parameters) > 1 and ":param" not in source:
+        return "does not have a ':param' in its docstring"
+
+def _validate_has_no_return_none_in_docstring(method) -> str | None:
+    source = inspect.getsource(method)
+    return_type = method.__annotations__.get('return')
+    if return_type is None and ":returns: None" in source:
+        return "has a ':returns: None' in its docstring which is redundant for None-returning functions"
+
+def _validate_docstring_lines_end_with_dot(method) -> str | None:
+    docstring = inspect.getdoc(method)
+    if docstring is None:
+        return None
+
+    lines = docstring.split('\n')
+    for line in lines:
+        line = line.strip()
+        if line and not any(line.endswith(char) for char in '.:{}[]()",'):
+            return f"docstring line '{line}' does not end with a valid character: . : {{ }} [ ] ( ) , \""
+
 _VALIDATORS = {
     "GET": [
         _validate_api_method_return_type,
         _validate_list_parameters_contain_data,
         _validate_api_method_doesnt_return_list,
+        _validate_has_ellipsis,
+        _validate_has_return_in_docstring,
+        _validate_has_params_in_docstring,
+        _validate_docstring_lines_end_with_dot,
     ],
     "DELETE": [
         _validate_api_delete_method_returns_none,
+        _validate_has_ellipsis,
+        _validate_has_return_in_docstring,
+        _validate_has_params_in_docstring,
+        _validate_has_no_return_none_in_docstring
+    ],
+    "POST": [
+        _validate_has_ellipsis,
+        _validate_has_return_in_docstring,
+        _validate_has_params_in_docstring,
+        _validate_has_no_return_none_in_docstring,
+        _validate_docstring_lines_end_with_dot,
     ],
 }
 
diff --git a/docs/readme.md b/docs/readme.md
index b88a4738d..c238c4720 100644
--- a/docs/readme.md
+++ b/docs/readme.md
@@ -3,10 +3,10 @@
 Here's a collection of comprehensive guides, examples, and resources for building AI applications with Llama Stack. For the complete documentation, visit our [ReadTheDocs page](https://llama-stack.readthedocs.io/en/latest/index.html).
 
 ## Render locally
+
+From the llama-stack root directory, run the following command to render the docs locally:
 ```bash
-pip install -r requirements.txt
-cd docs
-python -m sphinx_autobuild source _build
+uv run --group docs sphinx-autobuild docs/source docs/build/html --write-all
 ```
 You can open up the docs in your browser at http://localhost:8000
 
diff --git a/docs/requirements.txt b/docs/requirements.txt
deleted file mode 100644
index e31d08ff1..000000000
--- a/docs/requirements.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-sphinx==8.1.3
-myst-parser
-linkify
--e git+https://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
-sphinx-rtd-theme>=1.0.0
-sphinx_autobuild
-sphinx-copybutton
-sphinx-design
-sphinx-pdj-theme
-sphinx_rtd_dark_mode
-sphinx-tabs
-sphinxcontrib-openapi
-sphinxcontrib-redoc
-sphinxcontrib-mermaid
-sphinxcontrib-video
-tomli
diff --git a/docs/source/building_applications/rag.md b/docs/source/building_applications/rag.md
index db6303209..289c38991 100644
--- a/docs/source/building_applications/rag.md
+++ b/docs/source/building_applications/rag.md
@@ -51,11 +51,37 @@ chunks = [
         "mime_type": "text/plain",
         "metadata": {
             "document_id": "doc1",
+            "author": "Jane Doe",
         },
     },
 ]
 client.vector_io.insert(vector_db_id=vector_db_id, chunks=chunks)
 ```
+
+#### Using Precomputed Embeddings
+If you decide to precompute embeddings for your documents, you can insert them directly into the vector database by
+including the embedding vectors in the chunk data. This is useful if you have a separate embedding service or if you
+want to customize the ingestion process.
+```python
+chunks_with_embeddings = [
+    {
+        "content": "First chunk of text",
+        "mime_type": "text/plain",
+        "embedding": [0.1, 0.2, 0.3, ...],  # Your precomputed embedding vector
+        "metadata": {"document_id": "doc1", "section": "introduction"},
+    },
+    {
+        "content": "Second chunk of text",
+        "mime_type": "text/plain",
+        "embedding": [0.2, 0.3, 0.4, ...],  # Your precomputed embedding vector
+        "metadata": {"document_id": "doc1", "section": "methodology"},
+    },
+]
+client.vector_io.insert(vector_db_id=vector_db_id, chunks=chunks_with_embeddings)
+```
+When providing precomputed embeddings, ensure the embedding dimension matches the embedding_dimension specified when
+registering the vector database.
+
 ### Retrieval
 You can query the vector database to retrieve documents based on their embeddings.
 ```python
@@ -98,6 +124,17 @@ results = client.tool_runtime.rag_tool.query(
 )
 ```
 
+You can configure how the RAG tool adds metadata to the context if you find it useful for your application. Simply add:
+```python
+# Query documents
+results = client.tool_runtime.rag_tool.query(
+    vector_db_ids=[vector_db_id],
+    content="What do you know about...",
+    query_config={
+        "chunk_template": "Result {index}\nContent: {chunk.content}\nMetadata: {metadata}\n",
+    },
+)
+```
 ### Building RAG-Enhanced Agents
 
 One of the most powerful patterns is combining agents with RAG capabilities. Here's a complete example:
@@ -115,6 +152,12 @@ agent = Agent(
             "name": "builtin::rag/knowledge_search",
             "args": {
                 "vector_db_ids": [vector_db_id],
+                # Defaults
+                "query_config": {
+                    "chunk_size_in_tokens": 512,
+                    "chunk_overlap_in_tokens": 0,
+                    "chunk_template": "Result {index}\nContent: {chunk.content}\nMetadata: {metadata}\n",
+                },
             },
         }
     ],
diff --git a/docs/source/building_applications/tools.md b/docs/source/building_applications/tools.md
index 6da1c5a6a..c7af17bfa 100644
--- a/docs/source/building_applications/tools.md
+++ b/docs/source/building_applications/tools.md
@@ -43,27 +43,6 @@ The tool requires an API key which can be provided either in the configuration o
 
 > **NOTE:** When using Tavily Search and Bing Search, the inference output will still display "Brave Search." This is because Llama models have been trained with Brave Search as a built-in tool. Tavily and bing is just being used in lieu of Brave search.
 
-#### Code Interpreter
-
-The Code Interpreter allows execution of Python code within a controlled environment.
-
-```python
-# Register Code Interpreter tool group
-client.toolgroups.register(
-    toolgroup_id="builtin::code_interpreter", provider_id="code_interpreter"
-)
-```
-
-Features:
-- Secure execution environment using `bwrap` sandboxing
-- Matplotlib support for generating plots
-- Disabled dangerous system operations
-- Configurable execution timeouts
-
-> ⚠️ Important: The code interpreter tool can operate in a controlled environment locally or on Podman containers. To ensure proper functionality in containerized environments:
-> - The container requires privileged access (e.g., --privileged).
-> - Users without sufficient permissions may encounter permission errors. (`bwrap: Can't mount devpts on /newroot/dev/pts: Permission denied`)
-> - 🔒 Security Warning: Privileged mode grants elevated access and bypasses security restrictions. Use only in local, isolated, or controlled environments.
 
 #### WolframAlpha
 
@@ -102,7 +81,7 @@ Features:
 - Context retrieval with token limits
 
 
-> **Note:** By default, llama stack run.yaml defines toolgroups for web search, code interpreter and rag, that are provided by tavily-search, code-interpreter and rag providers.
+> **Note:** By default, llama stack run.yaml defines toolgroups for web search, wolfram alpha and rag, that are provided by tavily-search, wolfram-alpha and rag providers.
 
 ## Model Context Protocol (MCP) Tools
 
@@ -186,34 +165,6 @@ all_tools = client.tools.list_tools()
 group_tools = client.tools.list_tools(toolgroup_id="search_tools")
 ```
 
-## Simple Example: Using an Agent with the Code-Interpreter Tool
-
-```python
-from llama_stack_client import Agent
-
-# Instantiate the AI agent with the given configuration
-agent = Agent(
-    client,
-    name="code-interpreter",
-    description="A code interpreter agent for executing Python code snippets",
-    instructions="""
-    You are a highly reliable, concise, and precise assistant.
-    Always show the generated code, never generate your own code, and never anticipate results.
-    """,
-    model="meta-llama/Llama-3.2-3B-Instruct",
-    tools=["builtin::code_interpreter"],
-    max_infer_iters=5,
-)
-
-# Start a session
-session_id = agent.create_session("tool_session")
-
-# Send a query to the AI agent for code execution
-response = agent.create_turn(
-    messages=[{"role": "user", "content": "Run this code: print(3 ** 4 - 5 * 2)"}],
-    session_id=session_id,
-)
-```
 ## Simple Example 2: Using an Agent with the Web Search Tool
 1. Start by registering a Tavily API key at [Tavily](https://tavily.com/).
 2. [Optional] Provide the API key directly to the Llama Stack server
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 55c6383b2..6e59dbdfb 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -22,7 +22,11 @@ from docutils import nodes
 # Read version from pyproject.toml
 with Path(__file__).parent.parent.parent.joinpath("pyproject.toml").open("rb") as f:
     pypi_url = "https://pypi.org/pypi/llama-stack/json"
-    version_tag = json.loads(requests.get(pypi_url).text)["info"]["version"]
+    headers = {
+        'User-Agent': 'pip/23.0.1 (python 3.11)',  # Mimic pip's user agent
+        'Accept': 'application/json'
+    }
+    version_tag = json.loads(requests.get(pypi_url, headers=headers).text)["info"]["version"]
     print(f"{version_tag=}")
 
     # generate the full link including text and url here
@@ -53,14 +57,6 @@ myst_enable_extensions = ["colon_fence"]
 
 html_theme = "sphinx_rtd_theme"
 html_use_relative_paths = True
-
-# html_theme = "sphinx_pdj_theme"
-# html_theme_path = [sphinx_pdj_theme.get_html_theme_path()]
-
-# html_theme = "pytorch_sphinx_theme"
-# html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
-
-
 templates_path = ["_templates"]
 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
 
@@ -110,6 +106,8 @@ html_theme_options = {
     "canonical_url": "https://github.com/meta-llama/llama-stack",
     "collapse_navigation": False,
     # "style_nav_header_background": "#c3c9d4",
+    'display_version': True,
+    'version_selector': True,
 }
 
 default_dark_mode = False
diff --git a/docs/source/contributing/new_api_provider.md b/docs/source/contributing/new_api_provider.md
index c412a350b..83058896a 100644
--- a/docs/source/contributing/new_api_provider.md
+++ b/docs/source/contributing/new_api_provider.md
@@ -6,7 +6,7 @@ This guide will walk you through the process of adding a new API provider to Lla
 - Begin by reviewing the [core concepts](../concepts/index.md) of Llama Stack and choose the API your provider belongs to (Inference, Safety, VectorIO, etc.)
 - Determine the provider type ({repopath}`Remote::llama_stack/providers/remote` or {repopath}`Inline::llama_stack/providers/inline`). Remote providers make requests to external services, while inline providers execute implementation locally.
 - Add your provider to the appropriate {repopath}`Registry::llama_stack/providers/registry/`. Specify pip dependencies necessary.
-- Update any distribution {repopath}`Templates::llama_stack/templates/` build.yaml and run.yaml files if they should include your provider by default. Run {repopath}`./scripts/distro_codegen.py` if necessary. Note that `distro_codegen.py` will fail if the new provider causes any distribution template to attempt to import provider-specific dependencies. This usually means the distribution's `get_distribution_template()` code path should only import any necessary Config or model alias definitions from each provider and not the provider's actual implementation.
+- Update any distribution {repopath}`Templates::llama_stack/templates/` `build.yaml` and `run.yaml` files if they should include your provider by default. Run {repopath}`./scripts/distro_codegen.py` if necessary. Note that `distro_codegen.py` will fail if the new provider causes any distribution template to attempt to import provider-specific dependencies. This usually means the distribution's `get_distribution_template()` code path should only import any necessary Config or model alias definitions from each provider and not the provider's actual implementation.
 
 
 Here are some example PRs to help you get started:
@@ -33,6 +33,7 @@ Note that each provider's `sample_run_config()` method (in the configuration cla
 
 Unit tests are located in {repopath}`tests/unit`. Provider-specific unit tests are located in {repopath}`tests/unit/providers`. These tests are all run automatically as part of the CI process.
 
+Consult {repopath}`tests/unit/README.md` for more details on how to run the tests manually.
 
 ### 3. Additional end-to-end testing
 
diff --git a/docs/source/distributions/building_distro.md b/docs/source/distributions/building_distro.md
index 56b8d30a8..0dbabf8aa 100644
--- a/docs/source/distributions/building_distro.md
+++ b/docs/source/distributions/building_distro.md
@@ -178,7 +178,7 @@ image_name: ollama
 image_type: conda
 
 # If some providers are external, you can specify the path to the implementation
-external_providers_dir: /etc/llama-stack/providers.d
+external_providers_dir: ~/.llama/providers.d
 ```
 
 ```
@@ -206,7 +206,7 @@ distribution_spec:
 image_type: container
 image_name: ci-test
 # Path to external provider implementations
-external_providers_dir: /etc/llama-stack/providers.d
+external_providers_dir: ~/.llama/providers.d
 ```
 
 Here's an example for a custom Ollama provider:
@@ -271,7 +271,7 @@ Now, let's start the Llama Stack Distribution Server. You will need the YAML con
 
 ```
 llama stack run -h
-usage: llama stack run [-h] [--port PORT] [--image-name IMAGE_NAME] [--disable-ipv6] [--env KEY=VALUE] [--tls-keyfile TLS_KEYFILE] [--tls-certfile TLS_CERTFILE]
+usage: llama stack run [-h] [--port PORT] [--image-name IMAGE_NAME] [--env KEY=VALUE] [--tls-keyfile TLS_KEYFILE] [--tls-certfile TLS_CERTFILE]
                        [--image-type {conda,container,venv}]
                        config
 
@@ -285,7 +285,6 @@ options:
   --port PORT           Port to run the server on. It can also be passed via the env var LLAMA_STACK_PORT. (default: 8321)
   --image-name IMAGE_NAME
                         Name of the image to run. Defaults to the current environment (default: None)
-  --disable-ipv6        Disable IPv6 support (default: False)
   --env KEY=VALUE       Environment variables to pass to the server in KEY=VALUE format. Can be specified multiple times. (default: [])
   --tls-keyfile TLS_KEYFILE
                         Path to TLS key file for HTTPS (default: None)
@@ -339,6 +338,48 @@ INFO:     Application startup complete.
 INFO:     Uvicorn running on http://['::', '0.0.0.0']:8321 (Press CTRL+C to quit)
 INFO:     2401:db00:35c:2d2b:face:0:c9:0:54678 - "GET /models/list HTTP/1.1" 200 OK
 ```
+### Listing Distributions
+Using the list command, you can view all existing Llama Stack distributions, including stacks built from templates, from scratch, or using custom configuration files.
+
+```
+llama stack list -h
+usage: llama stack list [-h]
+
+list the build stacks
+
+options:
+  -h, --help  show this help message and exit
+```
+
+Example Usage
+
+```
+llama stack list
+```
+
+### Removing a Distribution
+Use the remove command to delete a distribution you've previously built.
+
+```
+llama stack rm -h
+usage: llama stack rm [-h] [--all] [name]
+
+Remove the build stack
+
+positional arguments:
+  name        Name of the stack to delete (default: None)
+
+options:
+  -h, --help  show this help message and exit
+  --all, -a   Delete all stacks (use with caution) (default: False)
+```
+
+Example
+```
+llama stack rm llamastack-test
+```
+
+To keep your environment organized and avoid clutter, consider using `llama stack list` to review old or unused distributions and `llama stack rm ` to delete them when they’re no longer needed.
 
 ### Troubleshooting
 
diff --git a/docs/source/distributions/configuration.md b/docs/source/distributions/configuration.md
index c06632991..de99b6576 100644
--- a/docs/source/distributions/configuration.md
+++ b/docs/source/distributions/configuration.md
@@ -53,6 +53,13 @@ models:
   provider_id: ollama
   provider_model_id: null
 shields: []
+server:
+  port: 8321
+  auth:
+    provider_type: "kubernetes"
+    config:
+      api_server_url: "https://kubernetes.default.svc"
+      ca_cert_path: "/path/to/ca.crt"
 ```
 
 Let's break this down into the different sections. The first section specifies the set of APIs that the stack server will serve:
@@ -102,6 +109,227 @@ A Model is an instance of a "Resource" (see [Concepts](../concepts/index)) and i
 
 What's with the `provider_model_id` field? This is an identifier for the model inside the provider's model catalog. Contrast it with `model_id` which is the identifier for the same model for Llama Stack's purposes. For example, you may want to name "llama3.2:vision-11b" as "image_captioning_model" when you use it in your Stack interactions. When omitted, the server will set `provider_model_id` to be the same as `model_id`.
 
+## Server Configuration
+
+The `server` section configures the HTTP server that serves the Llama Stack APIs:
+
+```yaml
+server:
+  port: 8321  # Port to listen on (default: 8321)
+  tls_certfile: "/path/to/cert.pem"  # Optional: Path to TLS certificate for HTTPS
+  tls_keyfile: "/path/to/key.pem"    # Optional: Path to TLS key for HTTPS
+```
+
+### Authentication Configuration
+
+The `auth` section configures authentication for the server. When configured, all API requests must include a valid Bearer token in the Authorization header:
+
+```
+Authorization: Bearer 
+```
+
+The server supports multiple authentication providers:
+
+#### OAuth 2.0/OpenID Connect Provider with Kubernetes
+
+The Kubernetes cluster must be configured to use a service account for authentication.
+
+```bash
+kubectl create namespace llama-stack
+kubectl create serviceaccount llama-stack-auth -n llama-stack
+kubectl create rolebinding llama-stack-auth-rolebinding --clusterrole=admin --serviceaccount=llama-stack:llama-stack-auth -n llama-stack
+kubectl create token llama-stack-auth -n llama-stack > llama-stack-auth-token
+```
+
+Make sure the `kube-apiserver` runs with `--anonymous-auth=true` to allow unauthenticated requests
+and that the correct RoleBinding is created to allow the service account to access the necessary
+resources. If that is not the case, you can create a RoleBinding for the service account to access
+the necessary resources:
+
+```yaml
+# allow-anonymous-openid.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: allow-anonymous-openid
+rules:
+- nonResourceURLs: ["/openid/v1/jwks"]
+  verbs: ["get"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: allow-anonymous-openid
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: allow-anonymous-openid
+subjects:
+- kind: User
+  name: system:anonymous
+  apiGroup: rbac.authorization.k8s.io
+```
+
+And then apply the configuration:
+```bash
+kubectl apply -f allow-anonymous-openid.yaml
+```
+
+Validates tokens against the Kubernetes API server through the OIDC provider:
+```yaml
+server:
+  auth:
+    provider_type: "oauth2_token"
+    config:
+      jwks:
+        uri: "https://kubernetes.default.svc"
+        key_recheck_period: 3600
+      tls_cafile: "/path/to/ca.crt"
+      issuer: "https://kubernetes.default.svc"
+      audience: "https://kubernetes.default.svc"
+```
+
+To find your cluster's audience, run:
+```bash
+kubectl create token default --duration=1h | cut -d. -f2 | base64 -d | jq .aud
+```
+
+For the issuer, you can use the OIDC provider's URL:
+```bash
+kubectl get --raw /.well-known/openid-configuration| jq .issuer
+```
+
+For the tls_cafile, you can use the CA certificate of the OIDC provider:
+```bash
+kubectl config view --minify -o jsonpath='{.clusters[0].cluster.certificate-authority}'
+```
+
+The provider extracts user information from the JWT token:
+- Username from the `sub` claim becomes a role
+- Kubernetes groups become teams
+
+You can easily validate a request by running:
+
+```bash
+curl -s -L -H "Authorization: Bearer $(cat llama-stack-auth-token)" http://127.0.0.1:8321/v1/providers
+```
+
+#### Custom Provider
+Validates tokens against a custom authentication endpoint:
+```yaml
+server:
+  auth:
+    provider_type: "custom"
+    config:
+      endpoint: "https://auth.example.com/validate"  # URL of the auth endpoint
+```
+
+The custom endpoint receives a POST request with:
+```json
+{
+  "api_key": "",
+  "request": {
+    "path": "/api/v1/endpoint",
+    "headers": {
+      "content-type": "application/json",
+      "user-agent": "curl/7.64.1"
+    },
+    "params": {
+      "key": ["value"]
+    }
+  }
+}
+```
+
+And must respond with:
+```json
+{
+  "access_attributes": {
+    "roles": ["admin", "user"],
+    "teams": ["ml-team", "nlp-team"],
+    "projects": ["llama-3", "project-x"],
+    "namespaces": ["research"]
+  },
+  "message": "Authentication successful"
+}
+```
+
+If no access attributes are returned, the token is used as a namespace.
+
+### Quota Configuration
+
+The `quota` section allows you to enable server-side request throttling for both
+authenticated and anonymous clients. This is useful for preventing abuse, enforcing
+fairness across tenants, and controlling infrastructure costs without requiring
+client-side rate limiting or external proxies.
+
+Quotas are disabled by default. When enabled, each client is tracked using either:
+
+* Their authenticated `client_id` (derived from the Bearer token), or
+* Their IP address (fallback for anonymous requests)
+
+Quota state is stored in a SQLite-backed key-value store, and rate limits are applied
+within a configurable time window (currently only `day` is supported).
+
+#### Example
+
+```yaml
+server:
+  quota:
+    kvstore:
+      type: sqlite
+      db_path: ./quotas.db
+    anonymous_max_requests: 100
+    authenticated_max_requests: 1000
+    period: day
+```
+
+#### Configuration Options
+
+| Field                        | Description                                                                |
+| ---------------------------- | -------------------------------------------------------------------------- |
+| `kvstore`                    | Required. Backend storage config for tracking request counts.              |
+| `kvstore.type`               | Must be `"sqlite"` for now. Other backends may be supported in the future. |
+| `kvstore.db_path`            | File path to the SQLite database.                                          |
+| `anonymous_max_requests`     | Max requests per period for unauthenticated clients.                       |
+| `authenticated_max_requests` | Max requests per period for authenticated clients.                         |
+| `period`                     | Time window for quota enforcement. Only `"day"` is supported.              |
+
+> Note: if `authenticated_max_requests` is set but no authentication provider is
+configured, the server will fall back to applying `anonymous_max_requests` to all
+clients.
+
+#### Example with Authentication Enabled
+
+```yaml
+server:
+  port: 8321
+  auth:
+    provider_type: custom
+    config:
+      endpoint: https://auth.example.com/validate
+  quota:
+    kvstore:
+      type: sqlite
+      db_path: ./quotas.db
+    anonymous_max_requests: 100
+    authenticated_max_requests: 1000
+    period: day
+```
+
+If a client exceeds their limit, the server responds with:
+
+```http
+HTTP/1.1 429 Too Many Requests
+Content-Type: application/json
+
+{
+  "error": {
+    "message": "Quota exceeded"
+  }
+}
+```
+
 ## Extending to handle Safety
 
 Configuring Safety can be a little involved so it is instructive to go through an example.
diff --git a/docs/source/distributions/kubernetes_deployment.md b/docs/source/distributions/kubernetes_deployment.md
index 21ec02012..f43039824 100644
--- a/docs/source/distributions/kubernetes_deployment.md
+++ b/docs/source/distributions/kubernetes_deployment.md
@@ -172,7 +172,7 @@ spec:
       - name: llama-stack
         image: localhost/llama-stack-run-k8s:latest
         imagePullPolicy: IfNotPresent
-        command: ["python", "-m", "llama_stack.distribution.server.server", "--yaml-config", "/app/config.yaml"]
+        command: ["python", "-m", "llama_stack.distribution.server.server", "--config", "/app/config.yaml"]
         ports:
           - containerPort: 5000
         volumeMounts:
diff --git a/docs/source/distributions/remote_hosted_distro/watsonx.md b/docs/source/distributions/remote_hosted_distro/watsonx.md
index 018dc2a3c..ec1b98059 100644
--- a/docs/source/distributions/remote_hosted_distro/watsonx.md
+++ b/docs/source/distributions/remote_hosted_distro/watsonx.md
@@ -18,11 +18,11 @@ The `llamastack/distribution-watsonx` distribution consists of the following pro
 | agents | `inline::meta-reference` |
 | datasetio | `remote::huggingface`, `inline::localfs` |
 | eval | `inline::meta-reference` |
-| inference | `remote::watsonx` |
+| inference | `remote::watsonx`, `inline::sentence-transformers` |
 | safety | `inline::llama-guard` |
 | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
 | telemetry | `inline::meta-reference` |
-| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
+| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::rag-runtime`, `remote::model-context-protocol` |
 | vector_io | `inline::faiss` |
 
 
@@ -70,7 +70,7 @@ docker run \
   -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
   -v ./run.yaml:/root/my-run.yaml \
   llamastack/distribution-watsonx \
-  --yaml-config /root/my-run.yaml \
+  --config /root/my-run.yaml \
   --port $LLAMA_STACK_PORT \
   --env WATSONX_API_KEY=$WATSONX_API_KEY \
   --env WATSONX_PROJECT_ID=$WATSONX_PROJECT_ID \
diff --git a/docs/source/distributions/self_hosted_distro/bedrock.md b/docs/source/distributions/self_hosted_distro/bedrock.md
index 302d6932b..d7aedbfb2 100644
--- a/docs/source/distributions/self_hosted_distro/bedrock.md
+++ b/docs/source/distributions/self_hosted_distro/bedrock.md
@@ -19,7 +19,7 @@ The `llamastack/distribution-bedrock` distribution consists of the following pro
 | safety | `remote::bedrock` |
 | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
 | telemetry | `inline::meta-reference` |
-| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
+| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::rag-runtime`, `remote::model-context-protocol` |
 | vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
 
 
diff --git a/docs/source/distributions/self_hosted_distro/cerebras.md b/docs/source/distributions/self_hosted_distro/cerebras.md
index 8f441823a..3c4db1b75 100644
--- a/docs/source/distributions/self_hosted_distro/cerebras.md
+++ b/docs/source/distributions/self_hosted_distro/cerebras.md
@@ -12,7 +12,7 @@ The `llamastack/distribution-cerebras` distribution consists of the following pr
 | safety | `inline::llama-guard` |
 | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
 | telemetry | `inline::meta-reference` |
-| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime` |
+| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::rag-runtime` |
 | vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
 
 
@@ -52,7 +52,7 @@ docker run \
   -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
   -v ./run.yaml:/root/my-run.yaml \
   llamastack/distribution-cerebras \
-  --yaml-config /root/my-run.yaml \
+  --config /root/my-run.yaml \
   --port $LLAMA_STACK_PORT \
   --env CEREBRAS_API_KEY=$CEREBRAS_API_KEY
 ```
diff --git a/docs/source/distributions/self_hosted_distro/dell.md b/docs/source/distributions/self_hosted_distro/dell.md
index 96b0ef478..eded3bdc4 100644
--- a/docs/source/distributions/self_hosted_distro/dell.md
+++ b/docs/source/distributions/self_hosted_distro/dell.md
@@ -23,7 +23,7 @@ The `llamastack/distribution-dell` distribution consists of the following provid
 | safety | `inline::llama-guard` |
 | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
 | telemetry | `inline::meta-reference` |
-| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime` |
+| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::rag-runtime` |
 | vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
 
 
@@ -155,7 +155,7 @@ docker run \
   -v $HOME/.llama:/root/.llama \
   -v ./llama_stack/templates/tgi/run-with-safety.yaml:/root/my-run.yaml \
   llamastack/distribution-dell \
-  --yaml-config /root/my-run.yaml \
+  --config /root/my-run.yaml \
   --port $LLAMA_STACK_PORT \
   --env INFERENCE_MODEL=$INFERENCE_MODEL \
   --env DEH_URL=$DEH_URL \
diff --git a/docs/source/distributions/self_hosted_distro/fireworks.md b/docs/source/distributions/self_hosted_distro/fireworks.md
index ee9ddc818..d36e94748 100644
--- a/docs/source/distributions/self_hosted_distro/fireworks.md
+++ b/docs/source/distributions/self_hosted_distro/fireworks.md
@@ -22,7 +22,7 @@ The `llamastack/distribution-fireworks` distribution consists of the following p
 | safety | `inline::llama-guard` |
 | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
 | telemetry | `inline::meta-reference` |
-| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `remote::wolfram-alpha`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
+| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `remote::wolfram-alpha`, `inline::rag-runtime`, `remote::model-context-protocol` |
 | vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
 
 
diff --git a/docs/source/distributions/self_hosted_distro/groq.md b/docs/source/distributions/self_hosted_distro/groq.md
index b18be1b2f..1b2194ad8 100644
--- a/docs/source/distributions/self_hosted_distro/groq.md
+++ b/docs/source/distributions/self_hosted_distro/groq.md
@@ -22,7 +22,7 @@ The `llamastack/distribution-groq` distribution consists of the following provid
 | safety | `inline::llama-guard` |
 | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
 | telemetry | `inline::meta-reference` |
-| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime` |
+| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::rag-runtime` |
 | vector_io | `inline::faiss` |
 
 
diff --git a/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md b/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md
index f58d7bbee..8b9dcec55 100644
--- a/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md
+++ b/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md
@@ -22,7 +22,7 @@ The `llamastack/distribution-meta-reference-gpu` distribution consists of the fo
 | safety | `inline::llama-guard` |
 | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
 | telemetry | `inline::meta-reference` |
-| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
+| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::rag-runtime`, `remote::model-context-protocol` |
 | vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
 
 
diff --git a/docs/source/distributions/self_hosted_distro/nvidia.md b/docs/source/distributions/self_hosted_distro/nvidia.md
index 4407de779..e84b5c525 100644
--- a/docs/source/distributions/self_hosted_distro/nvidia.md
+++ b/docs/source/distributions/self_hosted_distro/nvidia.md
@@ -6,7 +6,7 @@ The `llamastack/distribution-nvidia` distribution consists of the following prov
 | API | Provider(s) |
 |-----|-------------|
 | agents | `inline::meta-reference` |
-| datasetio | `inline::localfs` |
+| datasetio | `inline::localfs`, `remote::nvidia` |
 | eval | `remote::nvidia` |
 | inference | `remote::nvidia` |
 | post_training | `remote::nvidia` |
@@ -143,7 +143,7 @@ docker run \
   -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
   -v ./run.yaml:/root/my-run.yaml \
   llamastack/distribution-nvidia \
-  --yaml-config /root/my-run.yaml \
+  --config /root/my-run.yaml \
   --port $LLAMA_STACK_PORT \
   --env NVIDIA_API_KEY=$NVIDIA_API_KEY
 ```
diff --git a/docs/source/distributions/self_hosted_distro/ollama.md b/docs/source/distributions/self_hosted_distro/ollama.md
index 2358a52a7..4d148feda 100644
--- a/docs/source/distributions/self_hosted_distro/ollama.md
+++ b/docs/source/distributions/self_hosted_distro/ollama.md
@@ -19,10 +19,11 @@ The `llamastack/distribution-ollama` distribution consists of the following prov
 | datasetio | `remote::huggingface`, `inline::localfs` |
 | eval | `inline::meta-reference` |
 | inference | `remote::ollama` |
+| post_training | `inline::huggingface` |
 | safety | `inline::llama-guard` |
 | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
 | telemetry | `inline::meta-reference` |
-| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol`, `remote::wolfram-alpha` |
+| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::rag-runtime`, `remote::model-context-protocol`, `remote::wolfram-alpha` |
 | vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
 
 
@@ -97,7 +98,7 @@ docker run \
   -v ~/.llama:/root/.llama \
   -v ./llama_stack/templates/ollama/run-with-safety.yaml:/root/my-run.yaml \
   llamastack/distribution-ollama \
-  --yaml-config /root/my-run.yaml \
+  --config /root/my-run.yaml \
   --port $LLAMA_STACK_PORT \
   --env INFERENCE_MODEL=$INFERENCE_MODEL \
   --env SAFETY_MODEL=$SAFETY_MODEL \
diff --git a/docs/source/distributions/self_hosted_distro/passthrough.md b/docs/source/distributions/self_hosted_distro/passthrough.md
index 04fc9d927..39f076be4 100644
--- a/docs/source/distributions/self_hosted_distro/passthrough.md
+++ b/docs/source/distributions/self_hosted_distro/passthrough.md
@@ -22,7 +22,7 @@ The `llamastack/distribution-passthrough` distribution consists of the following
 | safety | `inline::llama-guard` |
 | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
 | telemetry | `inline::meta-reference` |
-| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `remote::wolfram-alpha`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
+| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `remote::wolfram-alpha`, `inline::rag-runtime`, `remote::model-context-protocol` |
 | vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
 
 
diff --git a/docs/source/distributions/self_hosted_distro/remote-vllm.md b/docs/source/distributions/self_hosted_distro/remote-vllm.md
index 46df56008..6e7cf410d 100644
--- a/docs/source/distributions/self_hosted_distro/remote-vllm.md
+++ b/docs/source/distributions/self_hosted_distro/remote-vllm.md
@@ -21,7 +21,7 @@ The `llamastack/distribution-remote-vllm` distribution consists of the following
 | safety | `inline::llama-guard` |
 | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
 | telemetry | `inline::meta-reference` |
-| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol`, `remote::wolfram-alpha` |
+| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::rag-runtime`, `remote::model-context-protocol`, `remote::wolfram-alpha` |
 | vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
 
 
@@ -233,7 +233,7 @@ docker run \
   -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
   -v ./llama_stack/templates/remote-vllm/run.yaml:/root/my-run.yaml \
   llamastack/distribution-remote-vllm \
-  --yaml-config /root/my-run.yaml \
+  --config /root/my-run.yaml \
   --port $LLAMA_STACK_PORT \
   --env INFERENCE_MODEL=$INFERENCE_MODEL \
   --env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1
@@ -255,7 +255,7 @@ docker run \
   -v ~/.llama:/root/.llama \
   -v ./llama_stack/templates/remote-vllm/run-with-safety.yaml:/root/my-run.yaml \
   llamastack/distribution-remote-vllm \
-  --yaml-config /root/my-run.yaml \
+  --config /root/my-run.yaml \
   --port $LLAMA_STACK_PORT \
   --env INFERENCE_MODEL=$INFERENCE_MODEL \
   --env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1 \
diff --git a/docs/source/distributions/self_hosted_distro/sambanova.md b/docs/source/distributions/self_hosted_distro/sambanova.md
index 76b976d78..bb4842362 100644
--- a/docs/source/distributions/self_hosted_distro/sambanova.md
+++ b/docs/source/distributions/self_hosted_distro/sambanova.md
@@ -16,10 +16,10 @@ The `llamastack/distribution-sambanova` distribution consists of the following p
 | API | Provider(s) |
 |-----|-------------|
 | agents | `inline::meta-reference` |
-| inference | `remote::sambanova` |
-| safety | `inline::llama-guard` |
+| inference | `remote::sambanova`, `inline::sentence-transformers` |
+| safety | `remote::sambanova` |
 | telemetry | `inline::meta-reference` |
-| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime` |
+| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::rag-runtime`, `remote::model-context-protocol`, `remote::wolfram-alpha` |
 | vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
 
 
@@ -28,53 +28,64 @@ The `llamastack/distribution-sambanova` distribution consists of the following p
 The following environment variables can be configured:
 
 - `LLAMASTACK_PORT`: Port for the Llama Stack distribution server (default: `8321`)
-- `SAMBANOVA_API_KEY`: SambaNova.AI API Key (default: ``)
+- `SAMBANOVA_API_KEY`: SambaNova API Key (default: ``)
 
 ### Models
 
 The following models are available by default:
 
-- `Meta-Llama-3.1-8B-Instruct (aliases: meta-llama/Llama-3.1-8B-Instruct)`
-- `Meta-Llama-3.1-70B-Instruct (aliases: meta-llama/Llama-3.1-70B-Instruct)`
-- `Meta-Llama-3.1-405B-Instruct (aliases: meta-llama/Llama-3.1-405B-Instruct-FP8)`
-- `Meta-Llama-3.2-1B-Instruct (aliases: meta-llama/Llama-3.2-1B-Instruct)`
-- `Meta-Llama-3.2-3B-Instruct (aliases: meta-llama/Llama-3.2-3B-Instruct)`
-- `Meta-Llama-3.3-70B-Instruct (aliases: meta-llama/Llama-3.3-70B-Instruct)`
-- `Llama-3.2-11B-Vision-Instruct (aliases: meta-llama/Llama-3.2-11B-Vision-Instruct)`
-- `Llama-3.2-90B-Vision-Instruct (aliases: meta-llama/Llama-3.2-90B-Vision-Instruct)`
-- `Meta-Llama-Guard-3-8B (aliases: meta-llama/Llama-Guard-3-8B)`
-- `Llama-4-Scout-17B-16E-Instruct (aliases: meta-llama/Llama-4-Scout-17B-16E-Instruct)`
+- `sambanova/Meta-Llama-3.1-8B-Instruct (aliases: meta-llama/Llama-3.1-8B-Instruct)`
+- `sambanova/Meta-Llama-3.1-405B-Instruct (aliases: meta-llama/Llama-3.1-405B-Instruct-FP8)`
+- `sambanova/Meta-Llama-3.2-1B-Instruct (aliases: meta-llama/Llama-3.2-1B-Instruct)`
+- `sambanova/Meta-Llama-3.2-3B-Instruct (aliases: meta-llama/Llama-3.2-3B-Instruct)`
+- `sambanova/Meta-Llama-3.3-70B-Instruct (aliases: meta-llama/Llama-3.3-70B-Instruct)`
+- `sambanova/Llama-3.2-11B-Vision-Instruct (aliases: meta-llama/Llama-3.2-11B-Vision-Instruct)`
+- `sambanova/Llama-3.2-90B-Vision-Instruct (aliases: meta-llama/Llama-3.2-90B-Vision-Instruct)`
+- `sambanova/Llama-4-Scout-17B-16E-Instruct (aliases: meta-llama/Llama-4-Scout-17B-16E-Instruct)`
+- `sambanova/Llama-4-Maverick-17B-128E-Instruct (aliases: meta-llama/Llama-4-Maverick-17B-128E-Instruct)`
+- `sambanova/Meta-Llama-Guard-3-8B (aliases: meta-llama/Llama-Guard-3-8B)`
 
 
 ### Prerequisite: API Keys
 
-Make sure you have access to a SambaNova API Key. You can get one by visiting [SambaNova.ai](https://sambanova.ai/).
+Make sure you have access to a SambaNova API Key. You can get one by visiting [SambaNova.ai](http://cloud.sambanova.ai?utm_source=llamastack&utm_medium=external&utm_campaign=cloud_signup).
 
 
 ## Running Llama Stack with SambaNova
 
 You can do this via Conda (build code) or Docker which has a pre-built image.
 
-### Via Docker
 
-This method allows you to get started quickly without having to build the distribution code.
+### Via Docker
 
 ```bash
 LLAMA_STACK_PORT=8321
+llama stack build --template sambanova --image-type container
 docker run \
   -it \
-  --pull always \
   -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
-  llamastack/distribution-sambanova \
+  -v ~/.llama:/root/.llama \
+  distribution-sambanova \
   --port $LLAMA_STACK_PORT \
   --env SAMBANOVA_API_KEY=$SAMBANOVA_API_KEY
 ```
 
+
+### Via Venv
+
+```bash
+llama stack build --template sambanova --image-type venv
+llama stack run --image-type venv ~/.llama/distributions/sambanova/sambanova-run.yaml \
+  --port $LLAMA_STACK_PORT \
+  --env SAMBANOVA_API_KEY=$SAMBANOVA_API_KEY
+```
+
+
 ### Via Conda
 
 ```bash
 llama stack build --template sambanova --image-type conda
-llama stack run ./run.yaml \
+llama stack run --image-type conda ~/.llama/distributions/sambanova/sambanova-run.yaml \
   --port $LLAMA_STACK_PORT \
   --env SAMBANOVA_API_KEY=$SAMBANOVA_API_KEY
 ```
diff --git a/docs/source/distributions/self_hosted_distro/tgi.md b/docs/source/distributions/self_hosted_distro/tgi.md
index f6b14b064..24f9d03ec 100644
--- a/docs/source/distributions/self_hosted_distro/tgi.md
+++ b/docs/source/distributions/self_hosted_distro/tgi.md
@@ -23,7 +23,7 @@ The `llamastack/distribution-tgi` distribution consists of the following provide
 | safety | `inline::llama-guard` |
 | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
 | telemetry | `inline::meta-reference` |
-| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
+| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::rag-runtime`, `remote::model-context-protocol` |
 | vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
 
 
@@ -117,7 +117,7 @@ docker run \
   -v ~/.llama:/root/.llama \
   -v ./llama_stack/templates/tgi/run-with-safety.yaml:/root/my-run.yaml \
   llamastack/distribution-tgi \
-  --yaml-config /root/my-run.yaml \
+  --config /root/my-run.yaml \
   --port $LLAMA_STACK_PORT \
   --env INFERENCE_MODEL=$INFERENCE_MODEL \
   --env TGI_URL=http://host.docker.internal:$INFERENCE_PORT \
diff --git a/docs/source/distributions/self_hosted_distro/together.md b/docs/source/distributions/self_hosted_distro/together.md
index 3ebb1f59e..adfc2c472 100644
--- a/docs/source/distributions/self_hosted_distro/together.md
+++ b/docs/source/distributions/self_hosted_distro/together.md
@@ -22,7 +22,7 @@ The `llamastack/distribution-together` distribution consists of the following pr
 | safety | `inline::llama-guard` |
 | scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
 | telemetry | `inline::meta-reference` |
-| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol`, `remote::wolfram-alpha` |
+| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::rag-runtime`, `remote::model-context-protocol`, `remote::wolfram-alpha` |
 | vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
 
 
diff --git a/docs/source/getting_started/detailed_tutorial.md b/docs/source/getting_started/detailed_tutorial.md
index a1504f249..e40a4903a 100644
--- a/docs/source/getting_started/detailed_tutorial.md
+++ b/docs/source/getting_started/detailed_tutorial.md
@@ -42,7 +42,7 @@ powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | ie
 Setup your virtual environment.
 
 ```bash
-uv venv --python 3.10
+uv sync --python 3.10
 source .venv/bin/activate
 ```
 ## Step 2:  Run Llama Stack
@@ -445,7 +445,6 @@ from llama_stack_client import LlamaStackClient
 from llama_stack_client import Agent, AgentEventLogger
 from llama_stack_client.types import Document
 import uuid
-from termcolor import cprint
 
 client = LlamaStackClient(base_url="http://localhost:8321")
 
@@ -463,7 +462,6 @@ urls = [
     "memory_optimizations.rst",
     "chat.rst",
     "llama3.rst",
-    "datasets.rst",
     "qat_finetune.rst",
     "lora_finetune.rst",
 ]
diff --git a/docs/source/providers/external.md b/docs/source/providers/external.md
index 5aab5ee0f..55211ac5f 100644
--- a/docs/source/providers/external.md
+++ b/docs/source/providers/external.md
@@ -10,7 +10,7 @@ Llama Stack supports external providers that live outside of the main codebase.
 To enable external providers, you need to configure the `external_providers_dir` in your Llama Stack configuration. This directory should contain your external provider specifications:
 
 ```yaml
-external_providers_dir: /etc/llama-stack/providers.d/
+external_providers_dir: ~/.llama/providers.d/
 ```
 
 ## Directory Structure
@@ -53,7 +53,9 @@ Here's a list of known external providers that you can use with Llama Stack:
 | Name | Description | API | Type | Repository |
 |------|-------------|-----|------|------------|
 | KubeFlow Training | Train models with KubeFlow | Post Training | Remote | [llama-stack-provider-kft](https://github.com/opendatahub-io/llama-stack-provider-kft) |
+| KubeFlow Pipelines | Train models with KubeFlow Pipelines | Post Training | Inline **and** Remote | [llama-stack-provider-kfp-trainer](https://github.com/opendatahub-io/llama-stack-provider-kfp-trainer) |
 | RamaLama | Inference models with RamaLama | Inference | Remote | [ramalama-stack](https://github.com/containers/ramalama-stack) |
+| TrustyAI LM-Eval | Evaluate models with TrustyAI LM-Eval | Eval | Remote | [llama-stack-provider-lmeval](https://github.com/trustyai-explainability/llama-stack-provider-lmeval) |
 
 ### Remote Provider Specification
 
@@ -180,7 +182,7 @@ dependencies = ["llama-stack", "pydantic", "ollama", "aiohttp"]
 3. Create the provider specification:
 
 ```yaml
-# /etc/llama-stack/providers.d/remote/inference/custom_ollama.yaml
+# ~/.llama/providers.d/remote/inference/custom_ollama.yaml
 adapter:
   adapter_type: custom_ollama
   pip_packages: ["ollama", "aiohttp"]
@@ -199,7 +201,7 @@ uv pip install -e .
 5. Configure Llama Stack to use external providers:
 
 ```yaml
-external_providers_dir: /etc/llama-stack/providers.d/
+external_providers_dir: ~/.llama/providers.d/
 ```
 
 The provider will now be available in Llama Stack with the type `remote::custom_ollama`.
diff --git a/docs/source/providers/index.md b/docs/source/providers/index.md
index 1d1a6e081..1f5026479 100644
--- a/docs/source/providers/index.md
+++ b/docs/source/providers/index.md
@@ -30,6 +30,18 @@ Runs inference with an LLM.
 ## Post Training
 Fine-tunes a model.
 
+#### Post Training Providers
+The following providers are available for Post Training:
+
+```{toctree}
+:maxdepth: 1
+
+external
+post_training/huggingface
+post_training/torchtune
+post_training/nvidia_nemo
+```
+
 ## Safety
 Applies safety policies to the output at a Systems (not only model) level.
 
diff --git a/docs/source/providers/post_training/huggingface.md b/docs/source/providers/post_training/huggingface.md
new file mode 100644
index 000000000..c342203a8
--- /dev/null
+++ b/docs/source/providers/post_training/huggingface.md
@@ -0,0 +1,122 @@
+---
+orphan: true
+---
+# HuggingFace SFTTrainer
+
+[HuggingFace SFTTrainer](https://huggingface.co/docs/trl/en/sft_trainer) is an inline post training provider for Llama Stack. It allows you to run supervised fine tuning on a variety of models using many datasets
+
+## Features
+
+- Simple access through the post_training API
+- Fully integrated with Llama Stack
+- GPU support, CPU support, and MPS support (MacOS Metal Performance Shaders)
+
+## Usage
+
+To use the HF SFTTrainer in your Llama Stack project, follow these steps:
+
+1. Configure your Llama Stack project to use this provider.
+2. Kick off a SFT job using the Llama Stack post_training API.
+
+## Setup
+
+You can access the HuggingFace trainer via the `ollama` distribution:
+
+```bash
+llama stack build --template ollama --image-type venv
+llama stack run --image-type venv ~/.llama/distributions/ollama/ollama-run.yaml
+```
+
+## Run Training
+
+You can access the provider and the `supervised_fine_tune` method via the post_training API:
+
+```python
+import time
+import uuid
+
+
+from llama_stack_client.types import (
+    post_training_supervised_fine_tune_params,
+    algorithm_config_param,
+)
+
+
+def create_http_client():
+    from llama_stack_client import LlamaStackClient
+
+    return LlamaStackClient(base_url="http://localhost:8321")
+
+
+client = create_http_client()
+
+# Example Dataset
+client.datasets.register(
+    purpose="post-training/messages",
+    source={
+        "type": "uri",
+        "uri": "huggingface://datasets/llamastack/simpleqa?split=train",
+    },
+    dataset_id="simpleqa",
+)
+
+training_config = post_training_supervised_fine_tune_params.TrainingConfig(
+    data_config=post_training_supervised_fine_tune_params.TrainingConfigDataConfig(
+        batch_size=32,
+        data_format="instruct",
+        dataset_id="simpleqa",
+        shuffle=True,
+    ),
+    gradient_accumulation_steps=1,
+    max_steps_per_epoch=0,
+    max_validation_steps=1,
+    n_epochs=4,
+)
+
+algorithm_config = algorithm_config_param.LoraFinetuningConfig(  # this config is also currently mandatory but should not be
+    alpha=1,
+    apply_lora_to_mlp=True,
+    apply_lora_to_output=False,
+    lora_attn_modules=["q_proj"],
+    rank=1,
+    type="LoRA",
+)
+
+job_uuid = f"test-job{uuid.uuid4()}"
+
+# Example Model
+training_model = "ibm-granite/granite-3.3-8b-instruct"
+
+start_time = time.time()
+response = client.post_training.supervised_fine_tune(
+    job_uuid=job_uuid,
+    logger_config={},
+    model=training_model,
+    hyperparam_search_config={},
+    training_config=training_config,
+    algorithm_config=algorithm_config,
+    checkpoint_dir="output",
+)
+print("Job: ", job_uuid)
+
+
+# Wait for the job to complete!
+while True:
+    status = client.post_training.job.status(job_uuid=job_uuid)
+    if not status:
+        print("Job not found")
+        break
+
+    print(status)
+    if status.status == "completed":
+        break
+
+    print("Waiting for job to complete...")
+    time.sleep(5)
+
+end_time = time.time()
+print("Job completed in", end_time - start_time, "seconds!")
+
+print("Artifacts:")
+print(client.post_training.job.artifacts(job_uuid=job_uuid))
+```
diff --git a/docs/source/providers/post_training/nvidia_nemo.md b/docs/source/providers/post_training/nvidia_nemo.md
new file mode 100644
index 000000000..1a7adbe16
--- /dev/null
+++ b/docs/source/providers/post_training/nvidia_nemo.md
@@ -0,0 +1,163 @@
+---
+orphan: true
+---
+# NVIDIA NEMO
+
+[NVIDIA NEMO](https://developer.nvidia.com/nemo-framework) is a remote post training provider for Llama Stack. It provides enterprise-grade fine-tuning capabilities through NVIDIA's NeMo Customizer service.
+
+## Features
+
+- Enterprise-grade fine-tuning capabilities
+- Support for LoRA and SFT fine-tuning
+- Integration with NVIDIA's NeMo Customizer service
+- Support for various NVIDIA-optimized models
+- Efficient training with NVIDIA hardware acceleration
+
+## Usage
+
+To use NVIDIA NEMO in your Llama Stack project, follow these steps:
+
+1. Configure your Llama Stack project to use this provider.
+2. Set up your NVIDIA API credentials.
+3. Kick off a fine-tuning job using the Llama Stack post_training API.
+
+## Setup
+
+You'll need to set the following environment variables:
+
+```bash
+export NVIDIA_API_KEY="your-api-key"
+export NVIDIA_DATASET_NAMESPACE="default"
+export NVIDIA_CUSTOMIZER_URL="your-customizer-url"
+export NVIDIA_PROJECT_ID="your-project-id"
+export NVIDIA_OUTPUT_MODEL_DIR="your-output-model-dir"
+```
+
+## Run Training
+
+You can access the provider and the `supervised_fine_tune` method via the post_training API:
+
+```python
+import time
+import uuid
+
+from llama_stack_client.types import (
+    post_training_supervised_fine_tune_params,
+    algorithm_config_param,
+)
+
+
+def create_http_client():
+    from llama_stack_client import LlamaStackClient
+
+    return LlamaStackClient(base_url="http://localhost:8321")
+
+
+client = create_http_client()
+
+# Example Dataset
+client.datasets.register(
+    purpose="post-training/messages",
+    source={
+        "type": "uri",
+        "uri": "huggingface://datasets/llamastack/simpleqa?split=train",
+    },
+    dataset_id="simpleqa",
+)
+
+training_config = post_training_supervised_fine_tune_params.TrainingConfig(
+    data_config=post_training_supervised_fine_tune_params.TrainingConfigDataConfig(
+        batch_size=8,  # Default batch size for NEMO
+        data_format="instruct",
+        dataset_id="simpleqa",
+        shuffle=True,
+    ),
+    n_epochs=50,  # Default epochs for NEMO
+    optimizer_config=post_training_supervised_fine_tune_params.TrainingConfigOptimizerConfig(
+        lr=0.0001,  # Default learning rate
+        weight_decay=0.01,  # NEMO-specific parameter
+    ),
+    # NEMO-specific parameters
+    log_every_n_steps=None,
+    val_check_interval=0.25,
+    sequence_packing_enabled=False,
+    hidden_dropout=None,
+    attention_dropout=None,
+    ffn_dropout=None,
+)
+
+algorithm_config = algorithm_config_param.LoraFinetuningConfig(
+    alpha=16,  # Default alpha for NEMO
+    type="LoRA",
+)
+
+job_uuid = f"test-job{uuid.uuid4()}"
+
+# Example Model - must be a supported NEMO model
+training_model = "meta/llama-3.1-8b-instruct"
+
+start_time = time.time()
+response = client.post_training.supervised_fine_tune(
+    job_uuid=job_uuid,
+    logger_config={},
+    model=training_model,
+    hyperparam_search_config={},
+    training_config=training_config,
+    algorithm_config=algorithm_config,
+    checkpoint_dir="output",
+)
+print("Job: ", job_uuid)
+
+# Wait for the job to complete!
+while True:
+    status = client.post_training.job.status(job_uuid=job_uuid)
+    if not status:
+        print("Job not found")
+        break
+
+    print(status)
+    if status.status == "completed":
+        break
+
+    print("Waiting for job to complete...")
+    time.sleep(5)
+
+end_time = time.time()
+print("Job completed in", end_time - start_time, "seconds!")
+
+print("Artifacts:")
+print(client.post_training.job.artifacts(job_uuid=job_uuid))
+```
+
+## Supported Models
+
+Currently supports the following models:
+- meta/llama-3.1-8b-instruct
+- meta/llama-3.2-1b-instruct
+
+## Supported Parameters
+
+### TrainingConfig
+- n_epochs (default: 50)
+- data_config
+- optimizer_config
+- log_every_n_steps
+- val_check_interval (default: 0.25)
+- sequence_packing_enabled (default: False)
+- hidden_dropout (0.0-1.0)
+- attention_dropout (0.0-1.0)
+- ffn_dropout (0.0-1.0)
+
+### DataConfig
+- dataset_id
+- batch_size (default: 8)
+
+### OptimizerConfig
+- lr (default: 0.0001)
+- weight_decay (default: 0.01)
+
+### LoRA Config
+- alpha (default: 16)
+- type (must be "LoRA")
+
+Note: Some parameters from the standard Llama Stack API are not supported and will be ignored with a warning.
diff --git a/docs/source/providers/post_training/torchtune.md b/docs/source/providers/post_training/torchtune.md
new file mode 100644
index 000000000..ef72505b1
--- /dev/null
+++ b/docs/source/providers/post_training/torchtune.md
@@ -0,0 +1,125 @@
+---
+orphan: true
+---
+# TorchTune
+
+[TorchTune](https://github.com/pytorch/torchtune) is an inline post training provider for Llama Stack. It provides a simple and efficient way to fine-tune language models using PyTorch.
+
+## Features
+
+- Simple access through the post_training API
+- Fully integrated with Llama Stack
+- GPU support and single device capabilities.
+- Support for LoRA
+
+## Usage
+
+To use TorchTune in your Llama Stack project, follow these steps:
+
+1. Configure your Llama Stack project to use this provider.
+2. Kick off a fine-tuning job using the Llama Stack post_training API.
+
+## Setup
+
+You can access the TorchTune trainer by writing your own yaml pointing to the provider:
+
+```yaml
+post_training:
+  - provider_id: torchtune
+    provider_type: inline::torchtune
+    config: {}
+```
+
+you can then build and run your own stack with this provider.
+
+## Run Training
+
+You can access the provider and the `supervised_fine_tune` method via the post_training API:
+
+```python
+import time
+import uuid
+
+from llama_stack_client.types import (
+    post_training_supervised_fine_tune_params,
+    algorithm_config_param,
+)
+
+
+def create_http_client():
+    from llama_stack_client import LlamaStackClient
+
+    return LlamaStackClient(base_url="http://localhost:8321")
+
+
+client = create_http_client()
+
+# Example Dataset
+client.datasets.register(
+    purpose="post-training/messages",
+    source={
+        "type": "uri",
+        "uri": "huggingface://datasets/llamastack/simpleqa?split=train",
+    },
+    dataset_id="simpleqa",
+)
+
+training_config = post_training_supervised_fine_tune_params.TrainingConfig(
+    data_config=post_training_supervised_fine_tune_params.TrainingConfigDataConfig(
+        batch_size=32,
+        data_format="instruct",
+        dataset_id="simpleqa",
+        shuffle=True,
+    ),
+    gradient_accumulation_steps=1,
+    max_steps_per_epoch=0,
+    max_validation_steps=1,
+    n_epochs=4,
+)
+
+algorithm_config = algorithm_config_param.LoraFinetuningConfig(
+    alpha=1,
+    apply_lora_to_mlp=True,
+    apply_lora_to_output=False,
+    lora_attn_modules=["q_proj"],
+    rank=1,
+    type="LoRA",
+)
+
+job_uuid = f"test-job{uuid.uuid4()}"
+
+# Example Model
+training_model = "meta-llama/Llama-2-7b-hf"
+
+start_time = time.time()
+response = client.post_training.supervised_fine_tune(
+    job_uuid=job_uuid,
+    logger_config={},
+    model=training_model,
+    hyperparam_search_config={},
+    training_config=training_config,
+    algorithm_config=algorithm_config,
+    checkpoint_dir="output",
+)
+print("Job: ", job_uuid)
+
+# Wait for the job to complete!
+while True:
+    status = client.post_training.job.status(job_uuid=job_uuid)
+    if not status:
+        print("Job not found")
+        break
+
+    print(status)
+    if status.status == "completed":
+        break
+
+    print("Waiting for job to complete...")
+    time.sleep(5)
+
+end_time = time.time()
+print("Job completed in", end_time - start_time, "seconds!")
+
+print("Artifacts:")
+print(client.post_training.job.artifacts(job_uuid=job_uuid))
+```
diff --git a/docs/source/providers/vector_io/milvus.md b/docs/source/providers/vector_io/milvus.md
new file mode 100644
index 000000000..e030c85f8
--- /dev/null
+++ b/docs/source/providers/vector_io/milvus.md
@@ -0,0 +1,107 @@
+---
+orphan: true
+---
+# Milvus
+
+[Milvus](https://milvus.io/) is an inline and remote vector database provider for Llama Stack. It
+allows you to store and query vectors directly within a Milvus database.
+That means you're not limited to storing vectors in memory or in a separate service.
+
+## Features
+
+- Easy to use
+- Fully integrated with Llama Stack
+
+## Usage
+
+To use Milvus in your Llama Stack project, follow these steps:
+
+1. Install the necessary dependencies.
+2. Configure your Llama Stack project to use Milvus.
+3. Start storing and querying vectors.
+
+## Installation
+
+You can install Milvus using pymilvus:
+
+```bash
+pip install pymilvus
+```
+
+## Configuration
+
+In Llama Stack, Milvus can be configured in two ways:
+- **Inline (Local) Configuration** - Uses Milvus-Lite for local storage
+- **Remote Configuration** - Connects to a remote Milvus server
+
+### Inline (Local) Configuration
+
+The simplest method is local configuration, which requires setting `db_path`, a path for locally storing Milvus-Lite files:
+
+```yaml
+vector_io:
+  - provider_id: milvus
+    provider_type: inline::milvus
+    config:
+      db_path: ~/.llama/distributions/together/milvus_store.db
+```
+
+### Remote Configuration
+
+Remote configuration is suitable for larger data storage requirements:
+
+#### Standard Remote Connection
+
+```yaml
+vector_io:
+  - provider_id: milvus
+    provider_type: remote::milvus
+    config:
+      uri: "http://:"
+      token: ":"
+```
+
+#### TLS-Enabled Remote Connection (One-way TLS)
+
+For connections to Milvus instances with one-way TLS enabled:
+
+```yaml
+vector_io:
+  - provider_id: milvus
+    provider_type: remote::milvus
+    config:
+      uri: "https://:"
+      token: ":"
+      secure: True
+      server_pem_path: "/path/to/server.pem"
+```
+
+#### Mutual TLS (mTLS) Remote Connection
+
+For connections to Milvus instances with mutual TLS (mTLS) enabled:
+
+```yaml
+vector_io:
+  - provider_id: milvus
+    provider_type: remote::milvus
+    config:
+      uri: "https://:"
+      token: ":"
+      secure: True
+      ca_pem_path: "/path/to/ca.pem"
+      client_pem_path: "/path/to/client.pem"
+      client_key_path: "/path/to/client.key"
+```
+
+#### Key Parameters for TLS Configuration
+
+- **`secure`**: Enables TLS encryption when set to `true`. Defaults to `false`.
+- **`server_pem_path`**: Path to the **server certificate** for verifying the server’s identity (used in one-way TLS).
+- **`ca_pem_path`**: Path to the **Certificate Authority (CA) certificate** for validating the server certificate (required in mTLS).
+- **`client_pem_path`**: Path to the **client certificate** file (required for mTLS).
+- **`client_key_path`**: Path to the **client private key** file (required for mTLS).
+
+## Documentation
+See the [Milvus documentation](https://milvus.io/docs/install-overview.md) for more details about Milvus in general.
+
+For more details on TLS configuration, refer to the [TLS setup guide](https://milvus.io/docs/tls.md).
diff --git a/docs/source/providers/vector_io/mivus.md b/docs/source/providers/vector_io/mivus.md
deleted file mode 100644
index 8d2f043d5..000000000
--- a/docs/source/providers/vector_io/mivus.md
+++ /dev/null
@@ -1,31 +0,0 @@
----
-orphan: true
----
-# Milvus
-
-[Milvus](https://milvus.io/) is an inline and remote vector database provider for Llama Stack. It
-allows you to store and query vectors directly within a Milvus database.
-That means you're not limited to storing vectors in memory or in a separate service.
-
-## Features
-
-- Easy to use
-- Fully integrated with Llama Stack
-
-## Usage
-
-To use Milvus in your Llama Stack project, follow these steps:
-
-1. Install the necessary dependencies.
-2. Configure your Llama Stack project to use Milvus.
-3. Start storing and querying vectors.
-
-## Installation
-
-You can install Milvus using pymilvus:
-
-```bash
-pip install pymilvus
-```
-## Documentation
-See the [Milvus documentation](https://milvus.io/docs/install-overview.md) for more details about Milvus in general.
diff --git a/docs/source/providers/vector_io/sqlite-vec.md b/docs/source/providers/vector_io/sqlite-vec.md
index 43d10c751..49ba659f7 100644
--- a/docs/source/providers/vector_io/sqlite-vec.md
+++ b/docs/source/providers/vector_io/sqlite-vec.md
@@ -66,6 +66,25 @@ To use sqlite-vec in your Llama Stack project, follow these steps:
 2. Configure your Llama Stack project to use SQLite-Vec.
 3. Start storing and querying vectors.
 
+## Supported Search Modes
+
+The sqlite-vec provider supports both vector-based and keyword-based (full-text) search modes.
+
+When using the RAGTool interface, you can specify the desired search behavior via the `mode` parameter in
+`RAGQueryConfig`. For example:
+
+```python
+from llama_stack.apis.tool_runtime.rag import RAGQueryConfig
+
+query_config = RAGQueryConfig(max_chunks=6, mode="vector")
+
+results = client.tool_runtime.rag_tool.query(
+    vector_db_ids=[vector_db_id],
+    content="what is torchtune",
+    query_config=query_config,
+)
+```
+
 ## Installation
 
 You can install SQLite-Vec using pip:
diff --git a/docs/source/references/llama_stack_client_cli_reference.md b/docs/source/references/llama_stack_client_cli_reference.md
index 0b84027f0..cd4dd4cd7 100644
--- a/docs/source/references/llama_stack_client_cli_reference.md
+++ b/docs/source/references/llama_stack_client_cli_reference.md
@@ -253,8 +253,6 @@ llama-stack-client toolgroups list
 +---------------------------+------------------+------+---------------+
 | identifier                | provider_id      | args | mcp_endpoint  |
 +===========================+==================+======+===============+
-| builtin::code_interpreter | code-interpreter | None | None          |
-+---------------------------+------------------+------+---------------+
 | builtin::rag              | rag-runtime      | None | None          |
 +---------------------------+------------------+------+---------------+
 | builtin::websearch        | tavily-search    | None | None          |
diff --git a/docs/zero_to_hero_guide/README.md b/docs/zero_to_hero_guide/README.md
index 9f756de26..96f9768de 100644
--- a/docs/zero_to_hero_guide/README.md
+++ b/docs/zero_to_hero_guide/README.md
@@ -86,11 +86,11 @@ If you're looking for more specific topics, we have a [Zero to Hero Guide](#next
    llama stack build --template ollama --image-type conda
    ```
    **Expected Output:**
-   ```
+   ```bash
    ...
-   Build Successful! Next steps:
-   1. Set the environment variables: LLAMA_STACK_PORT, OLLAMA_URL, INFERENCE_MODEL, SAFETY_MODEL
-   2. `llama stack run /Users//.llama/distributions/llamastack-ollama/ollama-run.yaml
+   Build Successful!
+   You can find the newly-built template here: ~/.llama/distributions/ollama/ollama-run.yaml
+   You can run the new Llama Stack Distro via: llama stack run ~/.llama/distributions/ollama/ollama-run.yaml --image-type conda
    ```
 
 3. **Set the ENV variables by exporting them to the terminal**:
diff --git a/install.sh b/install.sh
new file mode 100755
index 000000000..e424925a6
--- /dev/null
+++ b/install.sh
@@ -0,0 +1,206 @@
+#!/usr/bin/env bash
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+set -Eeuo pipefail
+
+PORT=8321
+OLLAMA_PORT=11434
+MODEL_ALIAS="llama3.2:3b"
+SERVER_IMAGE="llamastack/distribution-ollama:0.2.2"
+WAIT_TIMEOUT=300
+
+log(){ printf "\e[1;32m%s\e[0m\n" "$*"; }
+die(){ printf "\e[1;31m❌ %s\e[0m\n" "$*" >&2; exit 1; }
+
+wait_for_service() {
+  local url="$1"
+  local pattern="$2"
+  local timeout="$3"
+  local name="$4"
+  local start ts
+  log "⏳  Waiting for ${name}…"
+  start=$(date +%s)
+  while true; do
+    if curl --retry 5 --retry-delay 1 --retry-max-time "$timeout" --retry-all-errors --silent --fail "$url" 2>/dev/null | grep -q "$pattern"; then
+      break
+    fi
+    ts=$(date +%s)
+    if (( ts - start >= timeout )); then
+      return 1
+    fi
+    printf '.'
+    sleep 1
+  done
+  return 0
+}
+
+usage() {
+    cat << EOF
+📚 Llama-Stack Deployment Script
+
+Description:
+    This script sets up and deploys Llama-Stack with Ollama integration in containers.
+    It handles both Docker and Podman runtimes and includes automatic platform detection.
+
+Usage:
+    $(basename "$0") [OPTIONS]
+
+Options:
+    -p, --port PORT            Server port for Llama-Stack (default: ${PORT})
+    -o, --ollama-port PORT     Ollama service port (default: ${OLLAMA_PORT})
+    -m, --model MODEL          Model alias to use (default: ${MODEL_ALIAS})
+    -i, --image IMAGE          Server image (default: ${SERVER_IMAGE})
+    -t, --timeout SECONDS      Service wait timeout in seconds (default: ${WAIT_TIMEOUT})
+    -h, --help                 Show this help message
+
+For more information:
+    Documentation: https://llama-stack.readthedocs.io/
+    GitHub: https://github.com/meta-llama/llama-stack
+
+Report issues:
+    https://github.com/meta-llama/llama-stack/issues
+EOF
+}
+
+# Parse command line arguments
+while [[ $# -gt 0 ]]; do
+    case $1 in
+        -h|--help)
+            usage
+            exit 0
+            ;;
+        -p|--port)
+            PORT="$2"
+            shift 2
+            ;;
+        -o|--ollama-port)
+            OLLAMA_PORT="$2"
+            shift 2
+            ;;
+        -m|--model)
+            MODEL_ALIAS="$2"
+            shift 2
+            ;;
+        -i|--image)
+            SERVER_IMAGE="$2"
+            shift 2
+            ;;
+        -t|--timeout)
+            WAIT_TIMEOUT="$2"
+            shift 2
+            ;;
+        *)
+            die "Unknown option: $1"
+            ;;
+    esac
+done
+
+if command -v docker &> /dev/null; then
+  ENGINE="docker"
+elif command -v podman &> /dev/null; then
+  ENGINE="podman"
+else
+  die "Docker or Podman is required. Install Docker: https://docs.docker.com/get-docker/ or Podman: https://podman.io/getting-started/installation"
+fi
+
+# Explicitly set the platform for the host architecture
+HOST_ARCH="$(uname -m)"
+if [ "$HOST_ARCH" = "arm64" ]; then
+  if [ "$ENGINE" = "docker" ]; then
+    PLATFORM_OPTS=( --platform linux/amd64 )
+  else
+    PLATFORM_OPTS=( --os linux --arch amd64 )
+  fi
+else
+  PLATFORM_OPTS=()
+fi
+
+# macOS + Podman: ensure VM is running before we try to launch containers
+# If you need GPU passthrough under Podman on macOS, init the VM with libkrun:
+#   CONTAINERS_MACHINE_PROVIDER=libkrun podman machine init
+if [ "$ENGINE" = "podman" ] && [ "$(uname -s)" = "Darwin" ]; then
+  if ! podman info &>/dev/null; then
+    log "⌛️ Initializing Podman VM…"
+    podman machine init &>/dev/null || true
+    podman machine start &>/dev/null || true
+
+    log "⌛️  Waiting for Podman API…"
+    until podman info &>/dev/null; do
+      sleep 1
+    done
+    log "✅  Podman VM is up"
+  fi
+fi
+
+# Clean up any leftovers from earlier runs
+for name in ollama-server llama-stack; do
+  ids=$($ENGINE ps -aq --filter "name=^${name}$")
+  if [ -n "$ids" ]; then
+    log "⚠️   Found existing container(s) for '${name}', removing…"
+    $ENGINE rm -f "$ids" > /dev/null 2>&1
+  fi
+done
+
+###############################################################################
+# 0. Create a shared network
+###############################################################################
+if ! $ENGINE network inspect llama-net >/dev/null 2>&1; then
+  log "🌐  Creating network…"
+  $ENGINE network create llama-net >/dev/null 2>&1
+fi
+
+###############################################################################
+# 1. Ollama
+###############################################################################
+log "🦙  Starting Ollama…"
+$ENGINE run -d "${PLATFORM_OPTS[@]}" --name ollama-server \
+  --network llama-net \
+  -p "${OLLAMA_PORT}:${OLLAMA_PORT}" \
+  ollama/ollama > /dev/null 2>&1
+
+if ! wait_for_service "http://localhost:${OLLAMA_PORT}/" "Ollama" "$WAIT_TIMEOUT" "Ollama daemon"; then
+  log "❌  Ollama daemon did not become ready in ${WAIT_TIMEOUT}s; dumping container logs:"
+  $ENGINE logs --tail 200 ollama-server
+  die "Ollama startup failed"
+fi
+
+log "📦  Ensuring model is pulled: ${MODEL_ALIAS}…"
+if ! $ENGINE exec ollama-server ollama pull "${MODEL_ALIAS}" > /dev/null 2>&1; then
+  log "❌  Failed to pull model ${MODEL_ALIAS}; dumping container logs:"
+  $ENGINE logs --tail 200 ollama-server
+  die "Model pull failed"
+fi
+
+###############################################################################
+# 2. Llama‑Stack
+###############################################################################
+cmd=( run -d "${PLATFORM_OPTS[@]}" --name llama-stack \
+      --network llama-net \
+      -p "${PORT}:${PORT}" \
+      "${SERVER_IMAGE}" --port "${PORT}" \
+      --env INFERENCE_MODEL="${MODEL_ALIAS}" \
+      --env OLLAMA_URL="http://ollama-server:${OLLAMA_PORT}" )
+
+log "🦙  Starting Llama‑Stack…"
+$ENGINE "${cmd[@]}" > /dev/null 2>&1
+
+if ! wait_for_service "http://127.0.0.1:${PORT}/v1/health" "OK" "$WAIT_TIMEOUT" "Llama-Stack API"; then
+  log "❌  Llama-Stack did not become ready in ${WAIT_TIMEOUT}s; dumping container logs:"
+  $ENGINE logs --tail 200 llama-stack
+  die "Llama-Stack startup failed"
+fi
+
+###############################################################################
+# Done
+###############################################################################
+log ""
+log "🎉  Llama‑Stack is ready!"
+log "👉  API endpoint: http://localhost:${PORT}"
+log "📖 Documentation: https://llama-stack.readthedocs.io/en/latest/references/index.html"
+log "💻 To access the llama‑stack CLI, exec into the container:"
+log "   $ENGINE exec -ti llama-stack bash"
+log ""
diff --git a/kvant_build_local.sh b/kvant_build_local.sh
new file mode 100755
index 000000000..9701c57dc
--- /dev/null
+++ b/kvant_build_local.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+
+export USE_COPY_NOT_MOUNT=true
+export LLAMA_STACK_DIR=.
+
+uvx --from . llama stack build --template kvant --image-type container  --image-name kvant
diff --git a/kvant_start_local.sh b/kvant_start_local.sh
new file mode 100755
index 000000000..db5bff84a
--- /dev/null
+++ b/kvant_start_local.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+
+export LLAMA_STACK_PORT=8321
+# VLLM_API_TOKEN= env file
+# KEYCLOAK_CLIENT_SECRET= env file
+
+
+docker run -it \
+  -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
+  -v $(pwd)/data:/root/.llama \
+  --mount type=bind,source="$(pwd)"/llama_stack/templates/kvant/run.yaml,target=/root/.llama/config.yaml,readonly \
+  --entrypoint python \
+  --env-file ./.env \
+  distribution-kvant:dev \
+  -m llama_stack.distribution.server.server  --config /root/.llama/config.yaml \
+  --port $LLAMA_STACK_PORT \
+
diff --git a/llama_stack/apis/agents/agents.py b/llama_stack/apis/agents/agents.py
index dec43280b..b79c512b8 100644
--- a/llama_stack/apis/agents/agents.py
+++ b/llama_stack/apis/agents/agents.py
@@ -4,24 +4,16 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
+import sys
+from collections.abc import AsyncIterator
 from datetime import datetime
 from enum import Enum
-from typing import (
-    Annotated,
-    Any,
-    AsyncIterator,
-    Dict,
-    List,
-    Literal,
-    Optional,
-    Protocol,
-    Union,
-    runtime_checkable,
-)
+from typing import Annotated, Any, Literal, Protocol, runtime_checkable
 
 from pydantic import BaseModel, ConfigDict, Field
 
 from llama_stack.apis.common.content_types import URL, ContentDelta, InterleavedContent
+from llama_stack.apis.common.responses import Order, PaginatedResponse
 from llama_stack.apis.inference import (
     CompletionMessage,
     ResponseFormat,
@@ -38,6 +30,23 @@ from llama_stack.apis.safety import SafetyViolation
 from llama_stack.apis.tools import ToolDef
 from llama_stack.schema_utils import json_schema_type, register_schema, webmethod
 
+from .openai_responses import (
+    ListOpenAIResponseInputItem,
+    ListOpenAIResponseObject,
+    OpenAIResponseInput,
+    OpenAIResponseInputTool,
+    OpenAIResponseObject,
+    OpenAIResponseObjectStream,
+)
+
+# TODO: use enum.StrEnum when we drop support for python 3.10
+if sys.version_info >= (3, 11):
+    from enum import StrEnum
+else:
+
+    class StrEnum(str, Enum):
+        """Backport of StrEnum for Python 3.10 and below."""
+
 
 class Attachment(BaseModel):
     """An attachment to an agent turn.
@@ -72,11 +81,11 @@ class StepCommon(BaseModel):
 
     turn_id: str
     step_id: str
-    started_at: Optional[datetime] = None
-    completed_at: Optional[datetime] = None
+    started_at: datetime | None = None
+    completed_at: datetime | None = None
 
 
-class StepType(Enum):
+class StepType(StrEnum):
     """Type of the step in an agent turn.
 
     :cvar inference: The step is an inference step that calls an LLM.
@@ -100,7 +109,7 @@ class InferenceStep(StepCommon):
 
     model_config = ConfigDict(protected_namespaces=())
 
-    step_type: Literal[StepType.inference.value] = StepType.inference.value
+    step_type: Literal[StepType.inference] = StepType.inference
     model_response: CompletionMessage
 
 
@@ -112,9 +121,9 @@ class ToolExecutionStep(StepCommon):
     :param tool_responses: The tool responses from the tool calls.
     """
 
-    step_type: Literal[StepType.tool_execution.value] = StepType.tool_execution.value
-    tool_calls: List[ToolCall]
-    tool_responses: List[ToolResponse]
+    step_type: Literal[StepType.tool_execution] = StepType.tool_execution
+    tool_calls: list[ToolCall]
+    tool_responses: list[ToolResponse]
 
 
 @json_schema_type
@@ -124,8 +133,8 @@ class ShieldCallStep(StepCommon):
     :param violation: The violation from the shield call.
     """
 
-    step_type: Literal[StepType.shield_call.value] = StepType.shield_call.value
-    violation: Optional[SafetyViolation]
+    step_type: Literal[StepType.shield_call] = StepType.shield_call
+    violation: SafetyViolation | None
 
 
 @json_schema_type
@@ -136,19 +145,14 @@ class MemoryRetrievalStep(StepCommon):
     :param inserted_context: The context retrieved from the vector databases.
     """
 
-    step_type: Literal[StepType.memory_retrieval.value] = StepType.memory_retrieval.value
+    step_type: Literal[StepType.memory_retrieval] = StepType.memory_retrieval
     # TODO: should this be List[str]?
     vector_db_ids: str
     inserted_context: InterleavedContent
 
 
 Step = Annotated[
-    Union[
-        InferenceStep,
-        ToolExecutionStep,
-        ShieldCallStep,
-        MemoryRetrievalStep,
-    ],
+    InferenceStep | ToolExecutionStep | ShieldCallStep | MemoryRetrievalStep,
     Field(discriminator="step_type"),
 ]
 
@@ -159,18 +163,13 @@ class Turn(BaseModel):
 
     turn_id: str
     session_id: str
-    input_messages: List[
-        Union[
-            UserMessage,
-            ToolResponseMessage,
-        ]
-    ]
-    steps: List[Step]
+    input_messages: list[UserMessage | ToolResponseMessage]
+    steps: list[Step]
     output_message: CompletionMessage
-    output_attachments: Optional[List[Attachment]] = Field(default_factory=list)
+    output_attachments: list[Attachment] | None = Field(default_factory=lambda: [])
 
     started_at: datetime
-    completed_at: Optional[datetime] = None
+    completed_at: datetime | None = None
 
 
 @json_schema_type
@@ -179,34 +178,31 @@ class Session(BaseModel):
 
     session_id: str
     session_name: str
-    turns: List[Turn]
+    turns: list[Turn]
     started_at: datetime
 
 
 class AgentToolGroupWithArgs(BaseModel):
     name: str
-    args: Dict[str, Any]
+    args: dict[str, Any]
 
 
-AgentToolGroup = Union[
-    str,
-    AgentToolGroupWithArgs,
-]
+AgentToolGroup = str | AgentToolGroupWithArgs
 register_schema(AgentToolGroup, name="AgentTool")
 
 
 class AgentConfigCommon(BaseModel):
-    sampling_params: Optional[SamplingParams] = Field(default_factory=SamplingParams)
+    sampling_params: SamplingParams | None = Field(default_factory=SamplingParams)
 
-    input_shields: Optional[List[str]] = Field(default_factory=list)
-    output_shields: Optional[List[str]] = Field(default_factory=list)
-    toolgroups: Optional[List[AgentToolGroup]] = Field(default_factory=list)
-    client_tools: Optional[List[ToolDef]] = Field(default_factory=list)
-    tool_choice: Optional[ToolChoice] = Field(default=None, deprecated="use tool_config instead")
-    tool_prompt_format: Optional[ToolPromptFormat] = Field(default=None, deprecated="use tool_config instead")
-    tool_config: Optional[ToolConfig] = Field(default=None)
+    input_shields: list[str] | None = Field(default_factory=lambda: [])
+    output_shields: list[str] | None = Field(default_factory=lambda: [])
+    toolgroups: list[AgentToolGroup] | None = Field(default_factory=lambda: [])
+    client_tools: list[ToolDef] | None = Field(default_factory=lambda: [])
+    tool_choice: ToolChoice | None = Field(default=None, deprecated="use tool_config instead")
+    tool_prompt_format: ToolPromptFormat | None = Field(default=None, deprecated="use tool_config instead")
+    tool_config: ToolConfig | None = Field(default=None)
 
-    max_infer_iters: Optional[int] = 10
+    max_infer_iters: int | None = 10
 
     def model_post_init(self, __context):
         if self.tool_config:
@@ -236,9 +232,9 @@ class AgentConfig(AgentConfigCommon):
 
     model: str
     instructions: str
-    name: Optional[str] = None
-    enable_session_persistence: Optional[bool] = False
-    response_format: Optional[ResponseFormat] = None
+    name: str | None = None
+    enable_session_persistence: bool | None = False
+    response_format: ResponseFormat | None = None
 
 
 @json_schema_type
@@ -248,21 +244,11 @@ class Agent(BaseModel):
     created_at: datetime
 
 
-@json_schema_type
-class ListAgentsResponse(BaseModel):
-    data: List[Agent]
-
-
-@json_schema_type
-class ListAgentSessionsResponse(BaseModel):
-    data: List[Session]
-
-
 class AgentConfigOverridablePerTurn(AgentConfigCommon):
-    instructions: Optional[str] = None
+    instructions: str | None = None
 
 
-class AgentTurnResponseEventType(Enum):
+class AgentTurnResponseEventType(StrEnum):
     step_start = "step_start"
     step_complete = "step_complete"
     step_progress = "step_progress"
@@ -274,15 +260,15 @@ class AgentTurnResponseEventType(Enum):
 
 @json_schema_type
 class AgentTurnResponseStepStartPayload(BaseModel):
-    event_type: Literal[AgentTurnResponseEventType.step_start.value] = AgentTurnResponseEventType.step_start.value
+    event_type: Literal[AgentTurnResponseEventType.step_start] = AgentTurnResponseEventType.step_start
     step_type: StepType
     step_id: str
-    metadata: Optional[Dict[str, Any]] = Field(default_factory=dict)
+    metadata: dict[str, Any] | None = Field(default_factory=lambda: {})
 
 
 @json_schema_type
 class AgentTurnResponseStepCompletePayload(BaseModel):
-    event_type: Literal[AgentTurnResponseEventType.step_complete.value] = AgentTurnResponseEventType.step_complete.value
+    event_type: Literal[AgentTurnResponseEventType.step_complete] = AgentTurnResponseEventType.step_complete
     step_type: StepType
     step_id: str
     step_details: Step
@@ -292,7 +278,7 @@ class AgentTurnResponseStepCompletePayload(BaseModel):
 class AgentTurnResponseStepProgressPayload(BaseModel):
     model_config = ConfigDict(protected_namespaces=())
 
-    event_type: Literal[AgentTurnResponseEventType.step_progress.value] = AgentTurnResponseEventType.step_progress.value
+    event_type: Literal[AgentTurnResponseEventType.step_progress] = AgentTurnResponseEventType.step_progress
     step_type: StepType
     step_id: str
 
@@ -301,33 +287,29 @@ class AgentTurnResponseStepProgressPayload(BaseModel):
 
 @json_schema_type
 class AgentTurnResponseTurnStartPayload(BaseModel):
-    event_type: Literal[AgentTurnResponseEventType.turn_start.value] = AgentTurnResponseEventType.turn_start.value
+    event_type: Literal[AgentTurnResponseEventType.turn_start] = AgentTurnResponseEventType.turn_start
     turn_id: str
 
 
 @json_schema_type
 class AgentTurnResponseTurnCompletePayload(BaseModel):
-    event_type: Literal[AgentTurnResponseEventType.turn_complete.value] = AgentTurnResponseEventType.turn_complete.value
+    event_type: Literal[AgentTurnResponseEventType.turn_complete] = AgentTurnResponseEventType.turn_complete
     turn: Turn
 
 
 @json_schema_type
 class AgentTurnResponseTurnAwaitingInputPayload(BaseModel):
-    event_type: Literal[AgentTurnResponseEventType.turn_awaiting_input.value] = (
-        AgentTurnResponseEventType.turn_awaiting_input.value
-    )
+    event_type: Literal[AgentTurnResponseEventType.turn_awaiting_input] = AgentTurnResponseEventType.turn_awaiting_input
     turn: Turn
 
 
 AgentTurnResponseEventPayload = Annotated[
-    Union[
-        AgentTurnResponseStepStartPayload,
-        AgentTurnResponseStepProgressPayload,
-        AgentTurnResponseStepCompletePayload,
-        AgentTurnResponseTurnStartPayload,
-        AgentTurnResponseTurnCompletePayload,
-        AgentTurnResponseTurnAwaitingInputPayload,
-    ],
+    AgentTurnResponseStepStartPayload
+    | AgentTurnResponseStepProgressPayload
+    | AgentTurnResponseStepCompletePayload
+    | AgentTurnResponseTurnStartPayload
+    | AgentTurnResponseTurnCompletePayload
+    | AgentTurnResponseTurnAwaitingInputPayload,
     Field(discriminator="event_type"),
 ]
 register_schema(AgentTurnResponseEventPayload, name="AgentTurnResponseEventPayload")
@@ -356,18 +338,13 @@ class AgentTurnCreateRequest(AgentConfigOverridablePerTurn):
     # TODO: figure out how we can simplify this and make why
     # ToolResponseMessage needs to be here (it is function call
     # execution from outside the system)
-    messages: List[
-        Union[
-            UserMessage,
-            ToolResponseMessage,
-        ]
-    ]
+    messages: list[UserMessage | ToolResponseMessage]
 
-    documents: Optional[List[Document]] = None
-    toolgroups: Optional[List[AgentToolGroup]] = None
+    documents: list[Document] | None = None
+    toolgroups: list[AgentToolGroup] | None = Field(default_factory=lambda: [])
 
-    stream: Optional[bool] = False
-    tool_config: Optional[ToolConfig] = None
+    stream: bool | None = False
+    tool_config: ToolConfig | None = None
 
 
 @json_schema_type
@@ -375,8 +352,8 @@ class AgentTurnResumeRequest(BaseModel):
     agent_id: str
     session_id: str
     turn_id: str
-    tool_responses: List[ToolResponse]
-    stream: Optional[bool] = False
+    tool_responses: list[ToolResponse]
+    stream: bool | None = False
 
 
 @json_schema_type
@@ -422,17 +399,12 @@ class Agents(Protocol):
         self,
         agent_id: str,
         session_id: str,
-        messages: List[
-            Union[
-                UserMessage,
-                ToolResponseMessage,
-            ]
-        ],
-        stream: Optional[bool] = False,
-        documents: Optional[List[Document]] = None,
-        toolgroups: Optional[List[AgentToolGroup]] = None,
-        tool_config: Optional[ToolConfig] = None,
-    ) -> Union[Turn, AsyncIterator[AgentTurnResponseStreamChunk]]:
+        messages: list[UserMessage | ToolResponseMessage],
+        stream: bool | None = False,
+        documents: list[Document] | None = None,
+        toolgroups: list[AgentToolGroup] | None = None,
+        tool_config: ToolConfig | None = None,
+    ) -> Turn | AsyncIterator[AgentTurnResponseStreamChunk]:
         """Create a new turn for an agent.
 
         :param agent_id: The ID of the agent to create the turn for.
@@ -443,8 +415,9 @@ class Agents(Protocol):
         :param toolgroups: (Optional) List of toolgroups to create the turn with, will be used in addition to the agent's config toolgroups for the request.
         :param tool_config: (Optional) The tool configuration to create the turn with, will be used to override the agent's tool_config.
         :returns: If stream=False, returns a Turn object.
-                  If stream=True, returns an SSE event stream of AgentTurnResponseStreamChunk
+                  If stream=True, returns an SSE event stream of AgentTurnResponseStreamChunk.
         """
+        ...
 
     @webmethod(
         route="/agents/{agent_id}/session/{session_id}/turn/{turn_id}/resume",
@@ -456,9 +429,9 @@ class Agents(Protocol):
         agent_id: str,
         session_id: str,
         turn_id: str,
-        tool_responses: List[ToolResponse],
-        stream: Optional[bool] = False,
-    ) -> Union[Turn, AsyncIterator[AgentTurnResponseStreamChunk]]:
+        tool_responses: list[ToolResponse],
+        stream: bool | None = False,
+    ) -> Turn | AsyncIterator[AgentTurnResponseStreamChunk]:
         """Resume an agent turn with executed tool call responses.
 
         When a Turn has the status `awaiting_input` due to pending input from client side tool calls, this endpoint can be used to submit the outputs from the tool calls once they are ready.
@@ -531,13 +504,14 @@ class Agents(Protocol):
         self,
         session_id: str,
         agent_id: str,
-        turn_ids: Optional[List[str]] = None,
+        turn_ids: list[str] | None = None,
     ) -> Session:
         """Retrieve an agent session by its ID.
 
         :param session_id: The ID of the session to get.
         :param agent_id: The ID of the agent to get the session for.
         :param turn_ids: (Optional) List of turn IDs to filter the session by.
+        :returns: A Session.
         """
         ...
 
@@ -547,7 +521,7 @@ class Agents(Protocol):
         session_id: str,
         agent_id: str,
     ) -> None:
-        """Delete an agent session by its ID.
+        """Delete an agent session by its ID and its associated turns.
 
         :param session_id: The ID of the session to delete.
         :param agent_id: The ID of the agent to delete the session for.
@@ -559,17 +533,19 @@ class Agents(Protocol):
         self,
         agent_id: str,
     ) -> None:
-        """Delete an agent by its ID.
+        """Delete an agent by its ID and its associated sessions and turns.
 
         :param agent_id: The ID of the agent to delete.
         """
         ...
 
     @webmethod(route="/agents", method="GET")
-    async def list_agents(self) -> ListAgentsResponse:
+    async def list_agents(self, start_index: int | None = None, limit: int | None = None) -> PaginatedResponse:
         """List all agents.
 
-        :returns: A ListAgentsResponse.
+        :param start_index: The index to start the pagination from.
+        :param limit: The number of agents to return.
+        :returns: A PaginatedResponse.
         """
         ...
 
@@ -586,10 +562,94 @@ class Agents(Protocol):
     async def list_agent_sessions(
         self,
         agent_id: str,
-    ) -> ListAgentSessionsResponse:
+        start_index: int | None = None,
+        limit: int | None = None,
+    ) -> PaginatedResponse:
         """List all session(s) of a given agent.
 
         :param agent_id: The ID of the agent to list sessions for.
-        :returns: A ListAgentSessionsResponse.
+        :param start_index: The index to start the pagination from.
+        :param limit: The number of sessions to return.
+        :returns: A PaginatedResponse.
+        """
+        ...
+
+    # We situate the OpenAI Responses API in the Agents API just like we did things
+    # for Inference. The Responses API, in its intent, serves the same purpose as
+    # the Agents API above -- it is essentially a lightweight "agentic loop" with
+    # integrated tool calling.
+    #
+    # Both of these APIs are inherently stateful.
+
+    @webmethod(route="/openai/v1/responses/{response_id}", method="GET")
+    async def get_openai_response(
+        self,
+        response_id: str,
+    ) -> OpenAIResponseObject:
+        """Retrieve an OpenAI response by its ID.
+
+        :param response_id: The ID of the OpenAI response to retrieve.
+        :returns: An OpenAIResponseObject.
+        """
+        ...
+
+    @webmethod(route="/openai/v1/responses", method="POST")
+    async def create_openai_response(
+        self,
+        input: str | list[OpenAIResponseInput],
+        model: str,
+        instructions: str | None = None,
+        previous_response_id: str | None = None,
+        store: bool | None = True,
+        stream: bool | None = False,
+        temperature: float | None = None,
+        tools: list[OpenAIResponseInputTool] | None = None,
+    ) -> OpenAIResponseObject | AsyncIterator[OpenAIResponseObjectStream]:
+        """Create a new OpenAI response.
+
+        :param input: Input message(s) to create the response.
+        :param model: The underlying LLM used for completions.
+        :param previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses.
+        :returns: An OpenAIResponseObject.
+        """
+        ...
+
+    @webmethod(route="/openai/v1/responses", method="GET")
+    async def list_openai_responses(
+        self,
+        after: str | None = None,
+        limit: int | None = 50,
+        model: str | None = None,
+        order: Order | None = Order.desc,
+    ) -> ListOpenAIResponseObject:
+        """List all OpenAI responses.
+
+        :param after: The ID of the last response to return.
+        :param limit: The number of responses to return.
+        :param model: The model to filter responses by.
+        :param order: The order to sort responses by when sorted by created_at ('asc' or 'desc').
+        :returns: A ListOpenAIResponseObject.
+        """
+        ...
+
+    @webmethod(route="/openai/v1/responses/{response_id}/input_items", method="GET")
+    async def list_openai_response_input_items(
+        self,
+        response_id: str,
+        after: str | None = None,
+        before: str | None = None,
+        include: list[str] | None = None,
+        limit: int | None = 20,
+        order: Order | None = Order.desc,
+    ) -> ListOpenAIResponseInputItem:
+        """List input items for a given OpenAI response.
+
+        :param response_id: The ID of the response to retrieve input items for.
+        :param after: An item ID to list items after, used for pagination.
+        :param before: An item ID to list items before, used for pagination.
+        :param include: Additional fields to include in the response.
+        :param limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.
+        :param order: The order to return the input items in. Default is desc.
+        :returns: An ListOpenAIResponseInputItem.
         """
         ...
diff --git a/llama_stack/apis/agents/openai_responses.py b/llama_stack/apis/agents/openai_responses.py
new file mode 100644
index 000000000..6806e1d3f
--- /dev/null
+++ b/llama_stack/apis/agents/openai_responses.py
@@ -0,0 +1,279 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from typing import Annotated, Any, Literal
+
+from pydantic import BaseModel, Field
+
+from llama_stack.schema_utils import json_schema_type, register_schema
+
+# NOTE(ashwin): this file is literally a copy of the OpenAI responses API schema. We should probably
+# take their YAML and generate this file automatically. Their YAML is available.
+
+
+@json_schema_type
+class OpenAIResponseError(BaseModel):
+    code: str
+    message: str
+
+
+@json_schema_type
+class OpenAIResponseInputMessageContentText(BaseModel):
+    text: str
+    type: Literal["input_text"] = "input_text"
+
+
+@json_schema_type
+class OpenAIResponseInputMessageContentImage(BaseModel):
+    detail: Literal["low"] | Literal["high"] | Literal["auto"] = "auto"
+    type: Literal["input_image"] = "input_image"
+    # TODO: handle file_id
+    image_url: str | None = None
+
+
+# TODO: handle file content types
+OpenAIResponseInputMessageContent = Annotated[
+    OpenAIResponseInputMessageContentText | OpenAIResponseInputMessageContentImage,
+    Field(discriminator="type"),
+]
+register_schema(OpenAIResponseInputMessageContent, name="OpenAIResponseInputMessageContent")
+
+
+@json_schema_type
+class OpenAIResponseOutputMessageContentOutputText(BaseModel):
+    text: str
+    type: Literal["output_text"] = "output_text"
+
+
+OpenAIResponseOutputMessageContent = Annotated[
+    OpenAIResponseOutputMessageContentOutputText,
+    Field(discriminator="type"),
+]
+register_schema(OpenAIResponseOutputMessageContent, name="OpenAIResponseOutputMessageContent")
+
+
+@json_schema_type
+class OpenAIResponseMessage(BaseModel):
+    """
+    Corresponds to the various Message types in the Responses API.
+    They are all under one type because the Responses API gives them all
+    the same "type" value, and there is no way to tell them apart in certain
+    scenarios.
+    """
+
+    content: str | list[OpenAIResponseInputMessageContent] | list[OpenAIResponseOutputMessageContent]
+    role: Literal["system"] | Literal["developer"] | Literal["user"] | Literal["assistant"]
+    type: Literal["message"] = "message"
+
+    # The fields below are not used in all scenarios, but are required in others.
+    id: str | None = None
+    status: str | None = None
+
+
+@json_schema_type
+class OpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
+    id: str
+    status: str
+    type: Literal["web_search_call"] = "web_search_call"
+
+
+@json_schema_type
+class OpenAIResponseOutputMessageFunctionToolCall(BaseModel):
+    call_id: str
+    name: str
+    arguments: str
+    type: Literal["function_call"] = "function_call"
+    id: str | None = None
+    status: str | None = None
+
+
+@json_schema_type
+class OpenAIResponseOutputMessageMCPCall(BaseModel):
+    id: str
+    type: Literal["mcp_call"] = "mcp_call"
+    arguments: str
+    name: str
+    server_label: str
+    error: str | None = None
+    output: str | None = None
+
+
+class MCPListToolsTool(BaseModel):
+    input_schema: dict[str, Any]
+    name: str
+    description: str | None = None
+
+
+@json_schema_type
+class OpenAIResponseOutputMessageMCPListTools(BaseModel):
+    id: str
+    type: Literal["mcp_list_tools"] = "mcp_list_tools"
+    server_label: str
+    tools: list[MCPListToolsTool]
+
+
+OpenAIResponseOutput = Annotated[
+    OpenAIResponseMessage
+    | OpenAIResponseOutputMessageWebSearchToolCall
+    | OpenAIResponseOutputMessageFunctionToolCall
+    | OpenAIResponseOutputMessageMCPCall
+    | OpenAIResponseOutputMessageMCPListTools,
+    Field(discriminator="type"),
+]
+register_schema(OpenAIResponseOutput, name="OpenAIResponseOutput")
+
+
+@json_schema_type
+class OpenAIResponseObject(BaseModel):
+    created_at: int
+    error: OpenAIResponseError | None = None
+    id: str
+    model: str
+    object: Literal["response"] = "response"
+    output: list[OpenAIResponseOutput]
+    parallel_tool_calls: bool = False
+    previous_response_id: str | None = None
+    status: str
+    temperature: float | None = None
+    top_p: float | None = None
+    truncation: str | None = None
+    user: str | None = None
+
+
+@json_schema_type
+class OpenAIResponseObjectStreamResponseCreated(BaseModel):
+    response: OpenAIResponseObject
+    type: Literal["response.created"] = "response.created"
+
+
+@json_schema_type
+class OpenAIResponseObjectStreamResponseOutputTextDelta(BaseModel):
+    content_index: int
+    delta: str
+    item_id: str
+    output_index: int
+    sequence_number: int
+    type: Literal["response.output_text.delta"] = "response.output_text.delta"
+
+
+@json_schema_type
+class OpenAIResponseObjectStreamResponseCompleted(BaseModel):
+    response: OpenAIResponseObject
+    type: Literal["response.completed"] = "response.completed"
+
+
+OpenAIResponseObjectStream = Annotated[
+    OpenAIResponseObjectStreamResponseCreated
+    | OpenAIResponseObjectStreamResponseOutputTextDelta
+    | OpenAIResponseObjectStreamResponseCompleted,
+    Field(discriminator="type"),
+]
+register_schema(OpenAIResponseObjectStream, name="OpenAIResponseObjectStream")
+
+
+@json_schema_type
+class OpenAIResponseInputFunctionToolCallOutput(BaseModel):
+    """
+    This represents the output of a function call that gets passed back to the model.
+    """
+
+    call_id: str
+    output: str
+    type: Literal["function_call_output"] = "function_call_output"
+    id: str | None = None
+    status: str | None = None
+
+
+OpenAIResponseInput = Annotated[
+    # Responses API allows output messages to be passed in as input
+    OpenAIResponseOutputMessageWebSearchToolCall
+    | OpenAIResponseOutputMessageFunctionToolCall
+    | OpenAIResponseInputFunctionToolCallOutput
+    |
+    # Fallback to the generic message type as a last resort
+    OpenAIResponseMessage,
+    Field(union_mode="left_to_right"),
+]
+register_schema(OpenAIResponseInput, name="OpenAIResponseInput")
+
+
+@json_schema_type
+class OpenAIResponseInputToolWebSearch(BaseModel):
+    type: Literal["web_search"] | Literal["web_search_preview_2025_03_11"] = "web_search"
+    # TODO: actually use search_context_size somewhere...
+    search_context_size: str | None = Field(default="medium", pattern="^low|medium|high$")
+    # TODO: add user_location
+
+
+@json_schema_type
+class OpenAIResponseInputToolFunction(BaseModel):
+    type: Literal["function"] = "function"
+    name: str
+    description: str | None = None
+    parameters: dict[str, Any] | None
+    strict: bool | None = None
+
+
+class FileSearchRankingOptions(BaseModel):
+    ranker: str | None = None
+    score_threshold: float | None = Field(default=0.0, ge=0.0, le=1.0)
+
+
+@json_schema_type
+class OpenAIResponseInputToolFileSearch(BaseModel):
+    type: Literal["file_search"] = "file_search"
+    vector_store_id: list[str]
+    ranking_options: FileSearchRankingOptions | None = None
+    # TODO: add filters
+
+
+class ApprovalFilter(BaseModel):
+    always: list[str] | None = None
+    never: list[str] | None = None
+
+
+class AllowedToolsFilter(BaseModel):
+    tool_names: list[str] | None = None
+
+
+@json_schema_type
+class OpenAIResponseInputToolMCP(BaseModel):
+    type: Literal["mcp"] = "mcp"
+    server_label: str
+    server_url: str
+    headers: dict[str, Any] | None = None
+
+    require_approval: Literal["always"] | Literal["never"] | ApprovalFilter = "never"
+    allowed_tools: list[str] | AllowedToolsFilter | None = None
+
+
+OpenAIResponseInputTool = Annotated[
+    OpenAIResponseInputToolWebSearch
+    | OpenAIResponseInputToolFileSearch
+    | OpenAIResponseInputToolFunction
+    | OpenAIResponseInputToolMCP,
+    Field(discriminator="type"),
+]
+register_schema(OpenAIResponseInputTool, name="OpenAIResponseInputTool")
+
+
+class ListOpenAIResponseInputItem(BaseModel):
+    data: list[OpenAIResponseInput]
+    object: Literal["list"] = "list"
+
+
+@json_schema_type
+class OpenAIResponseObjectWithInput(OpenAIResponseObject):
+    input: list[OpenAIResponseInput]
+
+
+@json_schema_type
+class ListOpenAIResponseObject(BaseModel):
+    data: list[OpenAIResponseObjectWithInput]
+    has_more: bool
+    first_id: str
+    last_id: str
+    object: Literal["list"] = "list"
diff --git a/llama_stack/apis/batch_inference/batch_inference.py b/llama_stack/apis/batch_inference/batch_inference.py
index 7a324128d..b2aa637e2 100644
--- a/llama_stack/apis/batch_inference/batch_inference.py
+++ b/llama_stack/apis/batch_inference/batch_inference.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List, Optional, Protocol, runtime_checkable
+from typing import Protocol, runtime_checkable
 
 from llama_stack.apis.common.job_types import Job
 from llama_stack.apis.inference import (
@@ -34,22 +34,45 @@ class BatchInference(Protocol):
     async def completion(
         self,
         model: str,
-        content_batch: List[InterleavedContent],
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        logprobs: Optional[LogProbConfig] = None,
-    ) -> Job: ...
+        content_batch: list[InterleavedContent],
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        logprobs: LogProbConfig | None = None,
+    ) -> Job:
+        """Generate completions for a batch of content.
+
+        :param model: The model to use for the completion.
+        :param content_batch: The content to complete.
+        :param sampling_params: The sampling parameters to use for the completion.
+        :param response_format: The response format to use for the completion.
+        :param logprobs: The logprobs to use for the completion.
+        :returns: A job for the completion.
+        """
+        ...
 
     @webmethod(route="/batch-inference/chat-completion", method="POST")
     async def chat_completion(
         self,
         model: str,
-        messages_batch: List[List[Message]],
-        sampling_params: Optional[SamplingParams] = None,
+        messages_batch: list[list[Message]],
+        sampling_params: SamplingParams | None = None,
         # zero-shot tool definitions as input to the model
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = ToolChoice.auto,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-        response_format: Optional[ResponseFormat] = None,
-        logprobs: Optional[LogProbConfig] = None,
-    ) -> Job: ...
+        tools: list[ToolDefinition] | None = None,
+        tool_choice: ToolChoice | None = ToolChoice.auto,
+        tool_prompt_format: ToolPromptFormat | None = None,
+        response_format: ResponseFormat | None = None,
+        logprobs: LogProbConfig | None = None,
+    ) -> Job:
+        """Generate chat completions for a batch of messages.
+
+        :param model: The model to use for the chat completion.
+        :param messages_batch: The messages to complete.
+        :param sampling_params: The sampling parameters to use for the completion.
+        :param tools: The tools to use for the chat completion.
+        :param tool_choice: The tool choice to use for the chat completion.
+        :param tool_prompt_format: The tool prompt format to use for the chat completion.
+        :param response_format: The response format to use for the chat completion.
+        :param logprobs: The logprobs to use for the chat completion.
+        :returns: A job for the chat completion.
+        """
+        ...
diff --git a/llama_stack/apis/benchmarks/benchmarks.py b/llama_stack/apis/benchmarks/benchmarks.py
index 809af8868..d80c767f8 100644
--- a/llama_stack/apis/benchmarks/benchmarks.py
+++ b/llama_stack/apis/benchmarks/benchmarks.py
@@ -3,7 +3,7 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict, List, Literal, Optional, Protocol, runtime_checkable
+from typing import Any, Literal, Protocol, runtime_checkable
 
 from pydantic import BaseModel, Field
 
@@ -13,8 +13,8 @@ from llama_stack.schema_utils import json_schema_type, webmethod
 
 class CommonBenchmarkFields(BaseModel):
     dataset_id: str
-    scoring_functions: List[str]
-    metadata: Dict[str, Any] = Field(
+    scoring_functions: list[str]
+    metadata: dict[str, Any] = Field(
         default_factory=dict,
         description="Metadata for this evaluation task",
     )
@@ -22,45 +22,66 @@ class CommonBenchmarkFields(BaseModel):
 
 @json_schema_type
 class Benchmark(CommonBenchmarkFields, Resource):
-    type: Literal[ResourceType.benchmark.value] = ResourceType.benchmark.value
+    type: Literal[ResourceType.benchmark] = ResourceType.benchmark
 
     @property
     def benchmark_id(self) -> str:
         return self.identifier
 
     @property
-    def provider_benchmark_id(self) -> str:
+    def provider_benchmark_id(self) -> str | None:
         return self.provider_resource_id
 
 
 class BenchmarkInput(CommonBenchmarkFields, BaseModel):
     benchmark_id: str
-    provider_id: Optional[str] = None
-    provider_benchmark_id: Optional[str] = None
+    provider_id: str | None = None
+    provider_benchmark_id: str | None = None
 
 
 class ListBenchmarksResponse(BaseModel):
-    data: List[Benchmark]
+    data: list[Benchmark]
 
 
 @runtime_checkable
 class Benchmarks(Protocol):
     @webmethod(route="/eval/benchmarks", method="GET")
-    async def list_benchmarks(self) -> ListBenchmarksResponse: ...
+    async def list_benchmarks(self) -> ListBenchmarksResponse:
+        """List all benchmarks.
+
+        :returns: A ListBenchmarksResponse.
+        """
+        ...
 
     @webmethod(route="/eval/benchmarks/{benchmark_id}", method="GET")
     async def get_benchmark(
         self,
         benchmark_id: str,
-    ) -> Benchmark: ...
+    ) -> Benchmark:
+        """Get a benchmark by its ID.
+
+        :param benchmark_id: The ID of the benchmark to get.
+        :returns: A Benchmark.
+        """
+        ...
 
     @webmethod(route="/eval/benchmarks", method="POST")
     async def register_benchmark(
         self,
         benchmark_id: str,
         dataset_id: str,
-        scoring_functions: List[str],
-        provider_benchmark_id: Optional[str] = None,
-        provider_id: Optional[str] = None,
-        metadata: Optional[Dict[str, Any]] = None,
-    ) -> None: ...
+        scoring_functions: list[str],
+        provider_benchmark_id: str | None = None,
+        provider_id: str | None = None,
+        metadata: dict[str, Any] | None = None,
+    ) -> None:
+        """Register a benchmark.
+
+        :param benchmark_id: The ID of the benchmark to register.
+        :param dataset_id: The ID of the dataset to use for the benchmark.
+        :param scoring_functions: The scoring functions to use for the benchmark.
+        :param provider_benchmark_id: The ID of the provider benchmark to use for the benchmark.
+        :param provider_id: The ID of the provider to use for the benchmark.
+        :param metadata: The metadata to use for the benchmark.
+        """
+        ...
diff --git a/llama_stack/apis/common/content_types.py b/llama_stack/apis/common/content_types.py
index 9d4e21308..8bcb781f7 100644
--- a/llama_stack/apis/common/content_types.py
+++ b/llama_stack/apis/common/content_types.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 from enum import Enum
-from typing import Annotated, List, Literal, Optional, Union
+from typing import Annotated, Literal
 
 from pydantic import BaseModel, Field, model_validator
 
@@ -26,9 +26,9 @@ class _URLOrData(BaseModel):
     :param data: base64 encoded image data as string
     """
 
-    url: Optional[URL] = None
+    url: URL | None = None
     # data is a base64 encoded string, hint with contentEncoding=base64
-    data: Optional[str] = Field(contentEncoding="base64", default=None)
+    data: str | None = Field(default=None, json_schema_extra={"contentEncoding": "base64"})
 
     @model_validator(mode="before")
     @classmethod
@@ -64,13 +64,13 @@ class TextContentItem(BaseModel):
 
 # other modalities can be added here
 InterleavedContentItem = Annotated[
-    Union[ImageContentItem, TextContentItem],
+    ImageContentItem | TextContentItem,
     Field(discriminator="type"),
 ]
 register_schema(InterleavedContentItem, name="InterleavedContentItem")
 
 # accept a single "str" as a special case since it is common
-InterleavedContent = Union[str, InterleavedContentItem, List[InterleavedContentItem]]
+InterleavedContent = str | InterleavedContentItem | list[InterleavedContentItem]
 register_schema(InterleavedContent, name="InterleavedContent")
 
 
@@ -100,13 +100,13 @@ class ToolCallDelta(BaseModel):
     # you either send an in-progress tool call so the client can stream a long
     # code generation or you send the final parsed tool call at the end of the
     # stream
-    tool_call: Union[str, ToolCall]
+    tool_call: str | ToolCall
     parse_status: ToolCallParseStatus
 
 
 # streaming completions send a stream of ContentDeltas
 ContentDelta = Annotated[
-    Union[TextDelta, ImageDelta, ToolCallDelta],
+    TextDelta | ImageDelta | ToolCallDelta,
     Field(discriminator="type"),
 ]
 register_schema(ContentDelta, name="ContentDelta")
diff --git a/llama_stack/apis/common/deployment_types.py b/llama_stack/apis/common/deployment_types.py
deleted file mode 100644
index 83eea28a2..000000000
--- a/llama_stack/apis/common/deployment_types.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-from enum import Enum
-from typing import Any, Dict, Optional
-
-from pydantic import BaseModel
-
-from llama_stack.apis.common.content_types import URL
-from llama_stack.schema_utils import json_schema_type
-
-
-@json_schema_type
-class RestAPIMethod(Enum):
-    GET = "GET"
-    POST = "POST"
-    PUT = "PUT"
-    DELETE = "DELETE"
-
-
-@json_schema_type
-class RestAPIExecutionConfig(BaseModel):
-    url: URL
-    method: RestAPIMethod
-    params: Optional[Dict[str, Any]] = None
-    headers: Optional[Dict[str, Any]] = None
-    body: Optional[Dict[str, Any]] = None
diff --git a/llama_stack/apis/common/responses.py b/llama_stack/apis/common/responses.py
index f9e9a4c31..5cb41e23d 100644
--- a/llama_stack/apis/common/responses.py
+++ b/llama_stack/apis/common/responses.py
@@ -4,13 +4,19 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, List
+from enum import Enum
+from typing import Any
 
 from pydantic import BaseModel
 
 from llama_stack.schema_utils import json_schema_type
 
 
+class Order(Enum):
+    asc = "asc"
+    desc = "desc"
+
+
 @json_schema_type
 class PaginatedResponse(BaseModel):
     """A generic paginated response that follows a simple format.
@@ -19,5 +25,5 @@ class PaginatedResponse(BaseModel):
     :param has_more: Whether there are more items available after this set
     """
 
-    data: List[Dict[str, Any]]
+    data: list[dict[str, Any]]
     has_more: bool
diff --git a/llama_stack/apis/common/training_types.py b/llama_stack/apis/common/training_types.py
index d6c6c6919..46cd101af 100644
--- a/llama_stack/apis/common/training_types.py
+++ b/llama_stack/apis/common/training_types.py
@@ -5,7 +5,6 @@
 # the root directory of this source tree.
 
 from datetime import datetime
-from typing import Optional
 
 from pydantic import BaseModel
 
@@ -27,4 +26,4 @@ class Checkpoint(BaseModel):
     epoch: int
     post_training_job_id: str
     path: str
-    training_metrics: Optional[PostTrainingMetric] = None
+    training_metrics: PostTrainingMetric | None = None
diff --git a/llama_stack/apis/common/type_system.py b/llama_stack/apis/common/type_system.py
index 5d9f000be..db4aab4c5 100644
--- a/llama_stack/apis/common/type_system.py
+++ b/llama_stack/apis/common/type_system.py
@@ -4,10 +4,9 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Literal, Union
+from typing import Annotated, Literal
 
 from pydantic import BaseModel, Field
-from typing_extensions import Annotated
 
 from llama_stack.schema_utils import json_schema_type, register_schema
 
@@ -73,18 +72,16 @@ class DialogType(BaseModel):
 
 
 ParamType = Annotated[
-    Union[
-        StringType,
-        NumberType,
-        BooleanType,
-        ArrayType,
-        ObjectType,
-        JsonType,
-        UnionType,
-        ChatCompletionInputType,
-        CompletionInputType,
-        AgentTurnInputType,
-    ],
+    StringType
+    | NumberType
+    | BooleanType
+    | ArrayType
+    | ObjectType
+    | JsonType
+    | UnionType
+    | ChatCompletionInputType
+    | CompletionInputType
+    | AgentTurnInputType,
     Field(discriminator="type"),
 ]
 register_schema(ParamType, name="ParamType")
diff --git a/llama_stack/apis/datasetio/datasetio.py b/llama_stack/apis/datasetio/datasetio.py
index 6331882fb..1183983cc 100644
--- a/llama_stack/apis/datasetio/datasetio.py
+++ b/llama_stack/apis/datasetio/datasetio.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, List, Optional, Protocol, runtime_checkable
+from typing import Any, Protocol, runtime_checkable
 
 from llama_stack.apis.common.responses import PaginatedResponse
 from llama_stack.apis.datasets import Dataset
@@ -24,8 +24,8 @@ class DatasetIO(Protocol):
     async def iterrows(
         self,
         dataset_id: str,
-        start_index: Optional[int] = None,
-        limit: Optional[int] = None,
+        start_index: int | None = None,
+        limit: int | None = None,
     ) -> PaginatedResponse:
         """Get a paginated list of rows from a dataset.
 
@@ -34,14 +34,21 @@ class DatasetIO(Protocol):
         - limit: Number of items to return. If None or -1, returns all items.
 
         The response includes:
-        - data: List of items for the current page
-        - has_more: Whether there are more items available after this set
+        - data: List of items for the current page.
+        - has_more: Whether there are more items available after this set.
 
         :param dataset_id: The ID of the dataset to get the rows from.
         :param start_index: Index into dataset for the first row to get. Get all rows if None.
         :param limit: The number of rows to get.
+        :returns: A PaginatedResponse.
         """
         ...
 
     @webmethod(route="/datasetio/append-rows/{dataset_id:path}", method="POST")
-    async def append_rows(self, dataset_id: str, rows: List[Dict[str, Any]]) -> None: ...
+    async def append_rows(self, dataset_id: str, rows: list[dict[str, Any]]) -> None:
+        """Append rows to a dataset.
+
+        :param dataset_id: The ID of the dataset to append the rows to.
+        :param rows: The rows to append to the dataset.
+        """
+        ...
diff --git a/llama_stack/apis/datasets/datasets.py b/llama_stack/apis/datasets/datasets.py
index 32ccde144..e3de3d5cb 100644
--- a/llama_stack/apis/datasets/datasets.py
+++ b/llama_stack/apis/datasets/datasets.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 from enum import Enum
-from typing import Annotated, Any, Dict, List, Literal, Optional, Protocol, Union
+from typing import Annotated, Any, Literal, Protocol
 
 from pydantic import BaseModel, Field
 
@@ -81,11 +81,11 @@ class RowsDataSource(BaseModel):
     """
 
     type: Literal["rows"] = "rows"
-    rows: List[Dict[str, Any]]
+    rows: list[dict[str, Any]]
 
 
 DataSource = Annotated[
-    Union[URIDataSource, RowsDataSource],
+    URIDataSource | RowsDataSource,
     Field(discriminator="type"),
 ]
 register_schema(DataSource, name="DataSource")
@@ -98,7 +98,7 @@ class CommonDatasetFields(BaseModel):
 
     purpose: DatasetPurpose
     source: DataSource
-    metadata: Dict[str, Any] = Field(
+    metadata: dict[str, Any] = Field(
         default_factory=dict,
         description="Any additional metadata for this dataset",
     )
@@ -106,14 +106,14 @@ class CommonDatasetFields(BaseModel):
 
 @json_schema_type
 class Dataset(CommonDatasetFields, Resource):
-    type: Literal[ResourceType.dataset.value] = ResourceType.dataset.value
+    type: Literal[ResourceType.dataset] = ResourceType.dataset
 
     @property
     def dataset_id(self) -> str:
         return self.identifier
 
     @property
-    def provider_dataset_id(self) -> str:
+    def provider_dataset_id(self) -> str | None:
         return self.provider_resource_id
 
 
@@ -122,7 +122,7 @@ class DatasetInput(CommonDatasetFields, BaseModel):
 
 
 class ListDatasetsResponse(BaseModel):
-    data: List[Dataset]
+    data: list[Dataset]
 
 
 class Datasets(Protocol):
@@ -131,13 +131,14 @@ class Datasets(Protocol):
         self,
         purpose: DatasetPurpose,
         source: DataSource,
-        metadata: Optional[Dict[str, Any]] = None,
-        dataset_id: Optional[str] = None,
+        metadata: dict[str, Any] | None = None,
+        dataset_id: str | None = None,
     ) -> Dataset:
         """
         Register a new dataset.
 
-        :param purpose: The purpose of the dataset. One of
+        :param purpose: The purpose of the dataset.
+        One of:
             - "post-training/messages": The dataset contains a messages column with list of messages for post-training.
                 {
                     "messages": [
@@ -188,8 +189,9 @@ class Datasets(Protocol):
                ]
            }
         :param metadata: The metadata for the dataset.
-           - E.g. {"description": "My dataset"}
+           - E.g. {"description": "My dataset"}.
         :param dataset_id: The ID of the dataset. If not provided, an ID will be generated.
+        :returns: A Dataset.
         """
         ...
 
@@ -197,13 +199,29 @@ class Datasets(Protocol):
     async def get_dataset(
         self,
         dataset_id: str,
-    ) -> Dataset: ...
+    ) -> Dataset:
+        """Get a dataset by its ID.
+
+        :param dataset_id: The ID of the dataset to get.
+        :returns: A Dataset.
+        """
+        ...
 
     @webmethod(route="/datasets", method="GET")
-    async def list_datasets(self) -> ListDatasetsResponse: ...
+    async def list_datasets(self) -> ListDatasetsResponse:
+        """List all datasets.
+
+        :returns: A ListDatasetsResponse.
+        """
+        ...
 
     @webmethod(route="/datasets/{dataset_id:path}", method="DELETE")
     async def unregister_dataset(
         self,
         dataset_id: str,
-    ) -> None: ...
+    ) -> None:
+        """Unregister a dataset by its ID.
+
+        :param dataset_id: The ID of the dataset to unregister.
+        """
+        ...
diff --git a/llama_stack/apis/datatypes.py b/llama_stack/apis/datatypes.py
index 25f3ab1ab..63a764725 100644
--- a/llama_stack/apis/datatypes.py
+++ b/llama_stack/apis/datatypes.py
@@ -5,7 +5,6 @@
 # the root directory of this source tree.
 
 from enum import Enum
-from typing import Optional
 
 from pydantic import BaseModel
 
@@ -54,4 +53,4 @@ class Error(BaseModel):
     status: int
     title: str
     detail: str
-    instance: Optional[str] = None
+    instance: str | None = None
diff --git a/llama_stack/apis/eval/eval.py b/llama_stack/apis/eval/eval.py
index 0e5959c37..83a0a8e56 100644
--- a/llama_stack/apis/eval/eval.py
+++ b/llama_stack/apis/eval/eval.py
@@ -4,10 +4,9 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, List, Literal, Optional, Protocol, Union
+from typing import Annotated, Any, Literal, Protocol
 
 from pydantic import BaseModel, Field
-from typing_extensions import Annotated
 
 from llama_stack.apis.agents import AgentConfig
 from llama_stack.apis.common.job_types import Job
@@ -29,7 +28,7 @@ class ModelCandidate(BaseModel):
     type: Literal["model"] = "model"
     model: str
     sampling_params: SamplingParams
-    system_message: Optional[SystemMessage] = None
+    system_message: SystemMessage | None = None
 
 
 @json_schema_type
@@ -43,7 +42,7 @@ class AgentCandidate(BaseModel):
     config: AgentConfig
 
 
-EvalCandidate = Annotated[Union[ModelCandidate, AgentCandidate], Field(discriminator="type")]
+EvalCandidate = Annotated[ModelCandidate | AgentCandidate, Field(discriminator="type")]
 register_schema(EvalCandidate, name="EvalCandidate")
 
 
@@ -57,11 +56,11 @@ class BenchmarkConfig(BaseModel):
     """
 
     eval_candidate: EvalCandidate
-    scoring_params: Dict[str, ScoringFnParams] = Field(
+    scoring_params: dict[str, ScoringFnParams] = Field(
         description="Map between scoring function id and parameters for each scoring function you want to run",
         default_factory=dict,
     )
-    num_examples: Optional[int] = Field(
+    num_examples: int | None = Field(
         description="Number of examples to evaluate (useful for testing), if not provided, all examples in the dataset will be evaluated",
         default=None,
     )
@@ -76,9 +75,9 @@ class EvaluateResponse(BaseModel):
     :param scores: The scores from the evaluation.
     """
 
-    generations: List[Dict[str, Any]]
+    generations: list[dict[str, Any]]
     # each key in the dict is a scoring function name
-    scores: Dict[str, ScoringResult]
+    scores: dict[str, ScoringResult]
 
 
 class Eval(Protocol):
@@ -94,15 +93,16 @@ class Eval(Protocol):
 
         :param benchmark_id: The ID of the benchmark to run the evaluation on.
         :param benchmark_config: The configuration for the benchmark.
-        :return: The job that was created to run the evaluation.
+        :returns: The job that was created to run the evaluation.
         """
+        ...
 
     @webmethod(route="/eval/benchmarks/{benchmark_id}/evaluations", method="POST")
     async def evaluate_rows(
         self,
         benchmark_id: str,
-        input_rows: List[Dict[str, Any]],
-        scoring_functions: List[str],
+        input_rows: list[dict[str, Any]],
+        scoring_functions: list[str],
         benchmark_config: BenchmarkConfig,
     ) -> EvaluateResponse:
         """Evaluate a list of rows on a benchmark.
@@ -111,8 +111,9 @@ class Eval(Protocol):
         :param input_rows: The rows to evaluate.
         :param scoring_functions: The scoring functions to use for the evaluation.
         :param benchmark_config: The configuration for the benchmark.
-        :return: EvaluateResponse object containing generations and scores
+        :returns: EvaluateResponse object containing generations and scores.
         """
+        ...
 
     @webmethod(route="/eval/benchmarks/{benchmark_id}/jobs/{job_id}", method="GET")
     async def job_status(self, benchmark_id: str, job_id: str) -> Job:
@@ -120,7 +121,7 @@ class Eval(Protocol):
 
         :param benchmark_id: The ID of the benchmark to run the evaluation on.
         :param job_id: The ID of the job to get the status of.
-        :return: The status of the evaluationjob.
+        :returns: The status of the evaluation job.
         """
         ...
 
@@ -139,5 +140,6 @@ class Eval(Protocol):
 
         :param benchmark_id: The ID of the benchmark to run the evaluation on.
         :param job_id: The ID of the job to get the result of.
-        :return: The result of the job.
+        :returns: The result of the job.
         """
+        ...
diff --git a/llama_stack/apis/files/files.py b/llama_stack/apis/files/files.py
index ef8b65829..1d762a68a 100644
--- a/llama_stack/apis/files/files.py
+++ b/llama_stack/apis/files/files.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List, Optional, Protocol, runtime_checkable
+from typing import Protocol, runtime_checkable
 
 from pydantic import BaseModel
 
@@ -42,7 +42,7 @@ class ListBucketResponse(BaseModel):
     :param data: List of FileResponse entries
     """
 
-    data: List[BucketResponse]
+    data: list[BucketResponse]
 
 
 @json_schema_type
@@ -74,7 +74,7 @@ class ListFileResponse(BaseModel):
     :param data: List of FileResponse entries
     """
 
-    data: List[FileResponse]
+    data: list[FileResponse]
 
 
 @runtime_checkable
@@ -91,10 +91,11 @@ class Files(Protocol):
         """
         Create a new upload session for a file identified by a bucket and key.
 
-        :param bucket: Bucket under which the file is stored (valid chars: a-zA-Z0-9_-)
-        :param key: Key under which the file is stored (valid chars: a-zA-Z0-9_-/.)
-        :param mime_type: MIME type of the file
-        :param size: File size in bytes
+        :param bucket: Bucket under which the file is stored (valid chars: a-zA-Z0-9_-).
+        :param key: Key under which the file is stored (valid chars: a-zA-Z0-9_-/.).
+        :param mime_type: MIME type of the file.
+        :param size: File size in bytes.
+        :returns: A FileUploadResponse.
         """
         ...
 
@@ -102,12 +103,13 @@ class Files(Protocol):
     async def upload_content_to_session(
         self,
         upload_id: str,
-    ) -> Optional[FileResponse]:
+    ) -> FileResponse | None:
         """
         Upload file content to an existing upload session.
         On the server, request body will have the raw bytes that are uploaded.
 
-        :param upload_id: ID of the upload session
+        :param upload_id: ID of the upload session.
+        :returns: A FileResponse or None if the upload is not complete.
         """
         ...
 
@@ -117,9 +119,10 @@ class Files(Protocol):
         upload_id: str,
     ) -> FileUploadResponse:
         """
-        Returns information about an existsing upload session
+        Returns information about an existsing upload session.
 
-        :param upload_id: ID of the upload session
+        :param upload_id: ID of the upload session.
+        :returns: A FileUploadResponse.
         """
         ...
 
@@ -130,6 +133,9 @@ class Files(Protocol):
     ) -> ListBucketResponse:
         """
         List all buckets.
+
+        :param bucket: Bucket name (valid chars: a-zA-Z0-9_-).
+        :returns: A ListBucketResponse.
         """
         ...
 
@@ -141,7 +147,8 @@ class Files(Protocol):
         """
         List all files in a bucket.
 
-        :param bucket: Bucket name (valid chars: a-zA-Z0-9_-)
+        :param bucket: Bucket name (valid chars: a-zA-Z0-9_-).
+        :returns: A ListFileResponse.
         """
         ...
 
@@ -154,8 +161,9 @@ class Files(Protocol):
         """
         Get a file info identified by a bucket and key.
 
-        :param bucket: Bucket name (valid chars: a-zA-Z0-9_-)
-        :param key: Key under which the file is stored (valid chars: a-zA-Z0-9_-/.)
+        :param bucket: Bucket name (valid chars: a-zA-Z0-9_-).
+        :param key: Key under which the file is stored (valid chars: a-zA-Z0-9_-/.).
+        :returns: A FileResponse.
         """
         ...
 
@@ -168,7 +176,7 @@ class Files(Protocol):
         """
         Delete a file identified by a bucket and key.
 
-        :param bucket: Bucket name (valid chars: a-zA-Z0-9_-)
-        :param key: Key under which the file is stored (valid chars: a-zA-Z0-9_-/.)
+        :param bucket: Bucket name (valid chars: a-zA-Z0-9_-).
+        :param key: Key under which the file is stored (valid chars: a-zA-Z0-9_-/.).
         """
         ...
diff --git a/llama_stack/apis/inference/inference.py b/llama_stack/apis/inference/inference.py
index 309171f20..74697dd18 100644
--- a/llama_stack/apis/inference/inference.py
+++ b/llama_stack/apis/inference/inference.py
@@ -4,23 +4,22 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
+import sys
+from collections.abc import AsyncIterator
 from enum import Enum
 from typing import (
+    Annotated,
     Any,
-    AsyncIterator,
-    Dict,
-    List,
     Literal,
-    Optional,
     Protocol,
-    Union,
     runtime_checkable,
 )
 
 from pydantic import BaseModel, Field, field_validator
-from typing_extensions import Annotated, TypedDict
+from typing_extensions import TypedDict
 
 from llama_stack.apis.common.content_types import ContentDelta, InterleavedContent, InterleavedContentItem
+from llama_stack.apis.common.responses import Order
 from llama_stack.apis.models import Model
 from llama_stack.apis.telemetry.telemetry import MetricResponseMixin
 from llama_stack.models.llama.datatypes import (
@@ -38,6 +37,16 @@ register_schema(ToolCall)
 register_schema(ToolParamDefinition)
 register_schema(ToolDefinition)
 
+# TODO: use enum.StrEnum when we drop support for python 3.10
+if sys.version_info >= (3, 11):
+    from enum import StrEnum
+else:
+
+    class StrEnum(str, Enum):
+        """Backport of StrEnum for Python 3.10 and below."""
+
+        pass
+
 
 @json_schema_type
 class GreedySamplingStrategy(BaseModel):
@@ -47,8 +56,8 @@ class GreedySamplingStrategy(BaseModel):
 @json_schema_type
 class TopPSamplingStrategy(BaseModel):
     type: Literal["top_p"] = "top_p"
-    temperature: Optional[float] = Field(..., gt=0.0)
-    top_p: Optional[float] = 0.95
+    temperature: float | None = Field(..., gt=0.0)
+    top_p: float | None = 0.95
 
 
 @json_schema_type
@@ -58,7 +67,7 @@ class TopKSamplingStrategy(BaseModel):
 
 
 SamplingStrategy = Annotated[
-    Union[GreedySamplingStrategy, TopPSamplingStrategy, TopKSamplingStrategy],
+    GreedySamplingStrategy | TopPSamplingStrategy | TopKSamplingStrategy,
     Field(discriminator="type"),
 ]
 register_schema(SamplingStrategy, name="SamplingStrategy")
@@ -79,9 +88,9 @@ class SamplingParams(BaseModel):
 
     strategy: SamplingStrategy = Field(default_factory=GreedySamplingStrategy)
 
-    max_tokens: Optional[int] = 0
-    repetition_penalty: Optional[float] = 1.0
-    stop: Optional[List[str]] = None
+    max_tokens: int | None = 0
+    repetition_penalty: float | None = 1.0
+    stop: list[str] | None = None
 
 
 class LogProbConfig(BaseModel):
@@ -90,7 +99,7 @@ class LogProbConfig(BaseModel):
     :param top_k: How many tokens (for each position) to return log probabilities for.
     """
 
-    top_k: Optional[int] = 0
+    top_k: int | None = 0
 
 
 class QuantizationType(Enum):
@@ -125,11 +134,11 @@ class Int4QuantizationConfig(BaseModel):
     """
 
     type: Literal["int4_mixed"] = "int4_mixed"
-    scheme: Optional[str] = "int4_weight_int8_dynamic_activation"
+    scheme: str | None = "int4_weight_int8_dynamic_activation"
 
 
 QuantizationConfig = Annotated[
-    Union[Bf16QuantizationConfig, Fp8QuantizationConfig, Int4QuantizationConfig],
+    Bf16QuantizationConfig | Fp8QuantizationConfig | Int4QuantizationConfig,
     Field(discriminator="type"),
 ]
 
@@ -145,7 +154,7 @@ class UserMessage(BaseModel):
 
     role: Literal["user"] = "user"
     content: InterleavedContent
-    context: Optional[InterleavedContent] = None
+    context: InterleavedContent | None = None
 
 
 @json_schema_type
@@ -190,16 +199,11 @@ class CompletionMessage(BaseModel):
     role: Literal["assistant"] = "assistant"
     content: InterleavedContent
     stop_reason: StopReason
-    tool_calls: Optional[List[ToolCall]] = Field(default_factory=list)
+    tool_calls: list[ToolCall] | None = Field(default_factory=lambda: [])
 
 
 Message = Annotated[
-    Union[
-        UserMessage,
-        SystemMessage,
-        ToolResponseMessage,
-        CompletionMessage,
-    ],
+    UserMessage | SystemMessage | ToolResponseMessage | CompletionMessage,
     Field(discriminator="role"),
 ]
 register_schema(Message, name="Message")
@@ -208,9 +212,9 @@ register_schema(Message, name="Message")
 @json_schema_type
 class ToolResponse(BaseModel):
     call_id: str
-    tool_name: Union[BuiltinTool, str]
+    tool_name: BuiltinTool | str
     content: InterleavedContent
-    metadata: Optional[Dict[str, Any]] = None
+    metadata: dict[str, Any] | None = None
 
     @field_validator("tool_name", mode="before")
     @classmethod
@@ -243,7 +247,7 @@ class TokenLogProbs(BaseModel):
     :param logprobs_by_token: Dictionary mapping tokens to their log probabilities
     """
 
-    logprobs_by_token: Dict[str, float]
+    logprobs_by_token: dict[str, float]
 
 
 class ChatCompletionResponseEventType(Enum):
@@ -271,11 +275,11 @@ class ChatCompletionResponseEvent(BaseModel):
 
     event_type: ChatCompletionResponseEventType
     delta: ContentDelta
-    logprobs: Optional[List[TokenLogProbs]] = None
-    stop_reason: Optional[StopReason] = None
+    logprobs: list[TokenLogProbs] | None = None
+    stop_reason: StopReason | None = None
 
 
-class ResponseFormatType(Enum):
+class ResponseFormatType(StrEnum):
     """Types of formats for structured (guided) decoding.
 
     :cvar json_schema: Response should conform to a JSON schema. In a Python SDK, this is often a `pydantic` model.
@@ -294,8 +298,8 @@ class JsonSchemaResponseFormat(BaseModel):
     :param json_schema: The JSON schema the response should conform to. In a Python SDK, this is often a `pydantic` model.
     """
 
-    type: Literal[ResponseFormatType.json_schema.value] = ResponseFormatType.json_schema.value
-    json_schema: Dict[str, Any]
+    type: Literal[ResponseFormatType.json_schema] = ResponseFormatType.json_schema
+    json_schema: dict[str, Any]
 
 
 @json_schema_type
@@ -306,12 +310,12 @@ class GrammarResponseFormat(BaseModel):
     :param bnf: The BNF grammar specification the response should conform to
     """
 
-    type: Literal[ResponseFormatType.grammar.value] = ResponseFormatType.grammar.value
-    bnf: Dict[str, Any]
+    type: Literal[ResponseFormatType.grammar] = ResponseFormatType.grammar
+    bnf: dict[str, Any]
 
 
 ResponseFormat = Annotated[
-    Union[JsonSchemaResponseFormat, GrammarResponseFormat],
+    JsonSchemaResponseFormat | GrammarResponseFormat,
     Field(discriminator="type"),
 ]
 register_schema(ResponseFormat, name="ResponseFormat")
@@ -321,10 +325,10 @@ register_schema(ResponseFormat, name="ResponseFormat")
 class CompletionRequest(BaseModel):
     model: str
     content: InterleavedContent
-    sampling_params: Optional[SamplingParams] = Field(default_factory=SamplingParams)
-    response_format: Optional[ResponseFormat] = None
-    stream: Optional[bool] = False
-    logprobs: Optional[LogProbConfig] = None
+    sampling_params: SamplingParams | None = Field(default_factory=SamplingParams)
+    response_format: ResponseFormat | None = None
+    stream: bool | None = False
+    logprobs: LogProbConfig | None = None
 
 
 @json_schema_type
@@ -338,7 +342,7 @@ class CompletionResponse(MetricResponseMixin):
 
     content: str
     stop_reason: StopReason
-    logprobs: Optional[List[TokenLogProbs]] = None
+    logprobs: list[TokenLogProbs] | None = None
 
 
 @json_schema_type
@@ -351,8 +355,8 @@ class CompletionResponseStreamChunk(MetricResponseMixin):
     """
 
     delta: str
-    stop_reason: Optional[StopReason] = None
-    logprobs: Optional[List[TokenLogProbs]] = None
+    stop_reason: StopReason | None = None
+    logprobs: list[TokenLogProbs] | None = None
 
 
 class SystemMessageBehavior(Enum):
@@ -383,9 +387,9 @@ class ToolConfig(BaseModel):
             '{{function_definitions}}' to indicate where the function definitions should be inserted.
     """
 
-    tool_choice: Optional[ToolChoice | str] = Field(default=ToolChoice.auto)
-    tool_prompt_format: Optional[ToolPromptFormat] = Field(default=None)
-    system_message_behavior: Optional[SystemMessageBehavior] = Field(default=SystemMessageBehavior.append)
+    tool_choice: ToolChoice | str | None = Field(default=ToolChoice.auto)
+    tool_prompt_format: ToolPromptFormat | None = Field(default=None)
+    system_message_behavior: SystemMessageBehavior | None = Field(default=SystemMessageBehavior.append)
 
     def model_post_init(self, __context: Any) -> None:
         if isinstance(self.tool_choice, str):
@@ -399,15 +403,15 @@ class ToolConfig(BaseModel):
 @json_schema_type
 class ChatCompletionRequest(BaseModel):
     model: str
-    messages: List[Message]
-    sampling_params: Optional[SamplingParams] = Field(default_factory=SamplingParams)
+    messages: list[Message]
+    sampling_params: SamplingParams | None = Field(default_factory=SamplingParams)
 
-    tools: Optional[List[ToolDefinition]] = Field(default_factory=list)
-    tool_config: Optional[ToolConfig] = Field(default_factory=ToolConfig)
+    tools: list[ToolDefinition] | None = Field(default_factory=lambda: [])
+    tool_config: ToolConfig | None = Field(default_factory=ToolConfig)
 
-    response_format: Optional[ResponseFormat] = None
-    stream: Optional[bool] = False
-    logprobs: Optional[LogProbConfig] = None
+    response_format: ResponseFormat | None = None
+    stream: bool | None = False
+    logprobs: LogProbConfig | None = None
 
 
 @json_schema_type
@@ -429,7 +433,7 @@ class ChatCompletionResponse(MetricResponseMixin):
     """
 
     completion_message: CompletionMessage
-    logprobs: Optional[List[TokenLogProbs]] = None
+    logprobs: list[TokenLogProbs] | None = None
 
 
 @json_schema_type
@@ -439,7 +443,7 @@ class EmbeddingsResponse(BaseModel):
     :param embeddings: List of embedding vectors, one per input content. Each embedding is a list of floats. The dimensionality of the embedding is model-specific; you can check model metadata using /models/{model_id}
     """
 
-    embeddings: List[List[float]]
+    embeddings: list[list[float]]
 
 
 @json_schema_type
@@ -451,7 +455,7 @@ class OpenAIChatCompletionContentPartTextParam(BaseModel):
 @json_schema_type
 class OpenAIImageURL(BaseModel):
     url: str
-    detail: Optional[str] = None
+    detail: str | None = None
 
 
 @json_schema_type
@@ -461,16 +465,13 @@ class OpenAIChatCompletionContentPartImageParam(BaseModel):
 
 
 OpenAIChatCompletionContentPartParam = Annotated[
-    Union[
-        OpenAIChatCompletionContentPartTextParam,
-        OpenAIChatCompletionContentPartImageParam,
-    ],
+    OpenAIChatCompletionContentPartTextParam | OpenAIChatCompletionContentPartImageParam,
     Field(discriminator="type"),
 ]
 register_schema(OpenAIChatCompletionContentPartParam, name="OpenAIChatCompletionContentPartParam")
 
 
-OpenAIChatCompletionMessageContent = Union[str, List[OpenAIChatCompletionContentPartParam]]
+OpenAIChatCompletionMessageContent = str | list[OpenAIChatCompletionContentPartParam]
 
 
 @json_schema_type
@@ -484,7 +485,7 @@ class OpenAIUserMessageParam(BaseModel):
 
     role: Literal["user"] = "user"
     content: OpenAIChatCompletionMessageContent
-    name: Optional[str] = None
+    name: str | None = None
 
 
 @json_schema_type
@@ -498,21 +499,21 @@ class OpenAISystemMessageParam(BaseModel):
 
     role: Literal["system"] = "system"
     content: OpenAIChatCompletionMessageContent
-    name: Optional[str] = None
+    name: str | None = None
 
 
 @json_schema_type
 class OpenAIChatCompletionToolCallFunction(BaseModel):
-    name: Optional[str] = None
-    arguments: Optional[str] = None
+    name: str | None = None
+    arguments: str | None = None
 
 
 @json_schema_type
 class OpenAIChatCompletionToolCall(BaseModel):
-    index: Optional[int] = None
-    id: Optional[str] = None
+    index: int | None = None
+    id: str | None = None
     type: Literal["function"] = "function"
-    function: Optional[OpenAIChatCompletionToolCallFunction] = None
+    function: OpenAIChatCompletionToolCallFunction | None = None
 
 
 @json_schema_type
@@ -526,9 +527,9 @@ class OpenAIAssistantMessageParam(BaseModel):
     """
 
     role: Literal["assistant"] = "assistant"
-    content: Optional[OpenAIChatCompletionMessageContent] = None
-    name: Optional[str] = None
-    tool_calls: Optional[List[OpenAIChatCompletionToolCall]] = None
+    content: OpenAIChatCompletionMessageContent | None = None
+    name: str | None = None
+    tool_calls: list[OpenAIChatCompletionToolCall] | None = None
 
 
 @json_schema_type
@@ -556,17 +557,15 @@ class OpenAIDeveloperMessageParam(BaseModel):
 
     role: Literal["developer"] = "developer"
     content: OpenAIChatCompletionMessageContent
-    name: Optional[str] = None
+    name: str | None = None
 
 
 OpenAIMessageParam = Annotated[
-    Union[
-        OpenAIUserMessageParam,
-        OpenAISystemMessageParam,
-        OpenAIAssistantMessageParam,
-        OpenAIToolMessageParam,
-        OpenAIDeveloperMessageParam,
-    ],
+    OpenAIUserMessageParam
+    | OpenAISystemMessageParam
+    | OpenAIAssistantMessageParam
+    | OpenAIToolMessageParam
+    | OpenAIDeveloperMessageParam,
     Field(discriminator="role"),
 ]
 register_schema(OpenAIMessageParam, name="OpenAIMessageParam")
@@ -580,14 +579,14 @@ class OpenAIResponseFormatText(BaseModel):
 @json_schema_type
 class OpenAIJSONSchema(TypedDict, total=False):
     name: str
-    description: Optional[str] = None
-    strict: Optional[bool] = None
+    description: str | None
+    strict: bool | None
 
     # Pydantic BaseModel cannot be used with a schema param, since it already
     # has one. And, we don't want to alias here because then have to handle
     # that alias when converting to OpenAI params. So, to support schema,
     # we use a TypedDict.
-    schema: Optional[Dict[str, Any]] = None
+    schema: dict[str, Any] | None
 
 
 @json_schema_type
@@ -602,11 +601,7 @@ class OpenAIResponseFormatJSONObject(BaseModel):
 
 
 OpenAIResponseFormatParam = Annotated[
-    Union[
-        OpenAIResponseFormatText,
-        OpenAIResponseFormatJSONSchema,
-        OpenAIResponseFormatJSONObject,
-    ],
+    OpenAIResponseFormatText | OpenAIResponseFormatJSONSchema | OpenAIResponseFormatJSONObject,
     Field(discriminator="type"),
 ]
 register_schema(OpenAIResponseFormatParam, name="OpenAIResponseFormatParam")
@@ -622,7 +617,7 @@ class OpenAITopLogProb(BaseModel):
     """
 
     token: str
-    bytes: Optional[List[int]] = None
+    bytes: list[int] | None = None
     logprob: float
 
 
@@ -637,9 +632,9 @@ class OpenAITokenLogProb(BaseModel):
     """
 
     token: str
-    bytes: Optional[List[int]] = None
+    bytes: list[int] | None = None
     logprob: float
-    top_logprobs: List[OpenAITopLogProb]
+    top_logprobs: list[OpenAITopLogProb]
 
 
 @json_schema_type
@@ -650,8 +645,8 @@ class OpenAIChoiceLogprobs(BaseModel):
     :param refusal: (Optional) The log probabilities for the tokens in the message
     """
 
-    content: Optional[List[OpenAITokenLogProb]] = None
-    refusal: Optional[List[OpenAITokenLogProb]] = None
+    content: list[OpenAITokenLogProb] | None = None
+    refusal: list[OpenAITokenLogProb] | None = None
 
 
 @json_schema_type
@@ -664,10 +659,10 @@ class OpenAIChoiceDelta(BaseModel):
     :param tool_calls: (Optional) The tool calls of the delta
     """
 
-    content: Optional[str] = None
-    refusal: Optional[str] = None
-    role: Optional[str] = None
-    tool_calls: Optional[List[OpenAIChatCompletionToolCall]] = None
+    content: str | None = None
+    refusal: str | None = None
+    role: str | None = None
+    tool_calls: list[OpenAIChatCompletionToolCall] | None = None
 
 
 @json_schema_type
@@ -683,7 +678,7 @@ class OpenAIChunkChoice(BaseModel):
     delta: OpenAIChoiceDelta
     finish_reason: str
     index: int
-    logprobs: Optional[OpenAIChoiceLogprobs] = None
+    logprobs: OpenAIChoiceLogprobs | None = None
 
 
 @json_schema_type
@@ -699,7 +694,7 @@ class OpenAIChoice(BaseModel):
     message: OpenAIMessageParam
     finish_reason: str
     index: int
-    logprobs: Optional[OpenAIChoiceLogprobs] = None
+    logprobs: OpenAIChoiceLogprobs | None = None
 
 
 @json_schema_type
@@ -714,7 +709,7 @@ class OpenAIChatCompletion(BaseModel):
     """
 
     id: str
-    choices: List[OpenAIChoice]
+    choices: list[OpenAIChoice]
     object: Literal["chat.completion"] = "chat.completion"
     created: int
     model: str
@@ -732,7 +727,7 @@ class OpenAIChatCompletionChunk(BaseModel):
     """
 
     id: str
-    choices: List[OpenAIChunkChoice]
+    choices: list[OpenAIChunkChoice]
     object: Literal["chat.completion.chunk"] = "chat.completion.chunk"
     created: int
     model: str
@@ -748,10 +743,10 @@ class OpenAICompletionLogprobs(BaseModel):
     :top_logprobs: (Optional) The top log probabilities for the tokens
     """
 
-    text_offset: Optional[List[int]] = None
-    token_logprobs: Optional[List[float]] = None
-    tokens: Optional[List[str]] = None
-    top_logprobs: Optional[List[Dict[str, float]]] = None
+    text_offset: list[int] | None = None
+    token_logprobs: list[float] | None = None
+    tokens: list[str] | None = None
+    top_logprobs: list[dict[str, float]] | None = None
 
 
 @json_schema_type
@@ -767,7 +762,7 @@ class OpenAICompletionChoice(BaseModel):
     finish_reason: str
     text: str
     index: int
-    logprobs: Optional[OpenAIChoiceLogprobs] = None
+    logprobs: OpenAIChoiceLogprobs | None = None
 
 
 @json_schema_type
@@ -782,12 +777,54 @@ class OpenAICompletion(BaseModel):
     """
 
     id: str
-    choices: List[OpenAICompletionChoice]
+    choices: list[OpenAICompletionChoice]
     created: int
     model: str
     object: Literal["text_completion"] = "text_completion"
 
 
+@json_schema_type
+class OpenAIEmbeddingData(BaseModel):
+    """A single embedding data object from an OpenAI-compatible embeddings response.
+
+    :param object: The object type, which will be "embedding"
+    :param embedding: The embedding vector as a list of floats (when encoding_format="float") or as a base64-encoded string (when encoding_format="base64")
+    :param index: The index of the embedding in the input list
+    """
+
+    object: Literal["embedding"] = "embedding"
+    embedding: list[float] | str
+    index: int
+
+
+@json_schema_type
+class OpenAIEmbeddingUsage(BaseModel):
+    """Usage information for an OpenAI-compatible embeddings response.
+
+    :param prompt_tokens: The number of tokens in the input
+    :param total_tokens: The total number of tokens used
+    """
+
+    prompt_tokens: int
+    total_tokens: int
+
+
+@json_schema_type
+class OpenAIEmbeddingsResponse(BaseModel):
+    """Response from an OpenAI-compatible embeddings request.
+
+    :param object: The object type, which will be "list"
+    :param data: List of embedding data objects
+    :param model: The model that was used to generate the embeddings
+    :param usage: Usage information
+    """
+
+    object: Literal["list"] = "list"
+    data: list[OpenAIEmbeddingData]
+    model: str
+    usage: OpenAIEmbeddingUsage
+
+
 class ModelStore(Protocol):
     async def get_model(self, identifier: str) -> Model: ...
 
@@ -818,23 +855,35 @@ class EmbeddingTaskType(Enum):
 
 @json_schema_type
 class BatchCompletionResponse(BaseModel):
-    batch: List[CompletionResponse]
+    batch: list[CompletionResponse]
 
 
 @json_schema_type
 class BatchChatCompletionResponse(BaseModel):
-    batch: List[ChatCompletionResponse]
+    batch: list[ChatCompletionResponse]
+
+
+class OpenAICompletionWithInputMessages(OpenAIChatCompletion):
+    input_messages: list[OpenAIMessageParam]
+
+
+@json_schema_type
+class ListOpenAIChatCompletionResponse(BaseModel):
+    data: list[OpenAICompletionWithInputMessages]
+    has_more: bool
+    first_id: str
+    last_id: str
+    object: Literal["list"] = "list"
 
 
 @runtime_checkable
 @trace_protocol
-class Inference(Protocol):
-    """Llama Stack Inference API for generating completions, chat completions, and embeddings.
-
-    This API provides the raw interface to the underlying models. Two kinds of models are supported:
-    - LLM models: these models generate "raw" and "chat" (conversational) completions.
-    - Embedding models: these models generate embeddings to be used for semantic search.
+class InferenceProvider(Protocol):
     """
+    This protocol defines the interface that should be implemented by all inference providers.
+    """
+
+    API_NAMESPACE: str = "Inference"
 
     model_store: ModelStore | None = None
 
@@ -843,21 +892,21 @@ class Inference(Protocol):
         self,
         model_id: str,
         content: InterleavedContent,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-    ) -> Union[CompletionResponse, AsyncIterator[CompletionResponseStreamChunk]]:
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+    ) -> CompletionResponse | AsyncIterator[CompletionResponseStreamChunk]:
         """Generate a completion for the given content using the specified model.
 
         :param model_id: The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint.
-        :param content: The content to generate a completion for
-        :param sampling_params: (Optional) Parameters to control the sampling strategy
-        :param response_format: (Optional) Grammar specification for guided (structured) decoding
+        :param content: The content to generate a completion for.
+        :param sampling_params: (Optional) Parameters to control the sampling strategy.
+        :param response_format: (Optional) Grammar specification for guided (structured) decoding.
         :param stream: (Optional) If True, generate an SSE event stream of the response. Defaults to False.
         :param logprobs: (Optional) If specified, log probabilities for each token position will be returned.
         :returns: If stream=False, returns a CompletionResponse with the full completion.
-                 If stream=True, returns an SSE event stream of CompletionResponseStreamChunk
+                 If stream=True, returns an SSE event stream of CompletionResponseStreamChunk.
         """
         ...
 
@@ -865,33 +914,42 @@ class Inference(Protocol):
     async def batch_completion(
         self,
         model_id: str,
-        content_batch: List[InterleavedContent],
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        logprobs: Optional[LogProbConfig] = None,
+        content_batch: list[InterleavedContent],
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        logprobs: LogProbConfig | None = None,
     ) -> BatchCompletionResponse:
+        """Generate completions for a batch of content using the specified model.
+
+        :param model_id: The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint.
+        :param content_batch: The content to generate completions for.
+        :param sampling_params: (Optional) Parameters to control the sampling strategy.
+        :param response_format: (Optional) Grammar specification for guided (structured) decoding.
+        :param logprobs: (Optional) If specified, log probabilities for each token position will be returned.
+        :returns: A BatchCompletionResponse with the full completions.
+        """
         raise NotImplementedError("Batch completion is not implemented")
 
     @webmethod(route="/inference/chat-completion", method="POST")
     async def chat_completion(
         self,
         model_id: str,
-        messages: List[Message],
-        sampling_params: Optional[SamplingParams] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = ToolChoice.auto,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-        tool_config: Optional[ToolConfig] = None,
-    ) -> Union[ChatCompletionResponse, AsyncIterator[ChatCompletionResponseStreamChunk]]:
+        messages: list[Message],
+        sampling_params: SamplingParams | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_choice: ToolChoice | None = ToolChoice.auto,
+        tool_prompt_format: ToolPromptFormat | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+        tool_config: ToolConfig | None = None,
+    ) -> ChatCompletionResponse | AsyncIterator[ChatCompletionResponseStreamChunk]:
         """Generate a chat completion for the given messages using the specified model.
 
         :param model_id: The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint.
-        :param messages: List of messages in the conversation
-        :param sampling_params: Parameters to control the sampling strategy
-        :param tools: (Optional) List of tool definitions available to the model
+        :param messages: List of messages in the conversation.
+        :param sampling_params: Parameters to control the sampling strategy.
+        :param tools: (Optional) List of tool definitions available to the model.
         :param tool_choice: (Optional) Whether tool use is required or automatic. Defaults to ToolChoice.auto.
             .. deprecated::
                Use tool_config instead.
@@ -908,7 +966,7 @@ class Inference(Protocol):
         :param logprobs: (Optional) If specified, log probabilities for each token position will be returned.
         :param tool_config: (Optional) Configuration for tool use.
         :returns: If stream=False, returns a ChatCompletionResponse with the full completion.
-                 If stream=True, returns an SSE event stream of ChatCompletionResponseStreamChunk
+                 If stream=True, returns an SSE event stream of ChatCompletionResponseStreamChunk.
         """
         ...
 
@@ -916,23 +974,34 @@ class Inference(Protocol):
     async def batch_chat_completion(
         self,
         model_id: str,
-        messages_batch: List[List[Message]],
-        sampling_params: Optional[SamplingParams] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_config: Optional[ToolConfig] = None,
-        response_format: Optional[ResponseFormat] = None,
-        logprobs: Optional[LogProbConfig] = None,
+        messages_batch: list[list[Message]],
+        sampling_params: SamplingParams | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_config: ToolConfig | None = None,
+        response_format: ResponseFormat | None = None,
+        logprobs: LogProbConfig | None = None,
     ) -> BatchChatCompletionResponse:
+        """Generate chat completions for a batch of messages using the specified model.
+
+        :param model_id: The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint.
+        :param messages_batch: The messages to generate completions for.
+        :param sampling_params: (Optional) Parameters to control the sampling strategy.
+        :param tools: (Optional) List of tool definitions available to the model.
+        :param tool_config: (Optional) Configuration for tool use.
+        :param response_format: (Optional) Grammar specification for guided (structured) decoding.
+        :param logprobs: (Optional) If specified, log probabilities for each token position will be returned.
+        :returns: A BatchChatCompletionResponse with the full completions.
+        """
         raise NotImplementedError("Batch chat completion is not implemented")
 
     @webmethod(route="/inference/embeddings", method="POST")
     async def embeddings(
         self,
         model_id: str,
-        contents: List[str] | List[InterleavedContentItem],
-        text_truncation: Optional[TextTruncation] = TextTruncation.none,
-        output_dimension: Optional[int] = None,
-        task_type: Optional[EmbeddingTaskType] = None,
+        contents: list[str] | list[InterleavedContentItem],
+        text_truncation: TextTruncation | None = TextTruncation.none,
+        output_dimension: int | None = None,
+        task_type: EmbeddingTaskType | None = None,
     ) -> EmbeddingsResponse:
         """Generate embeddings for content pieces using the specified model.
 
@@ -941,7 +1010,7 @@ class Inference(Protocol):
         :param output_dimension: (Optional) Output dimensionality for the embeddings. Only supported by Matryoshka models.
         :param text_truncation: (Optional) Config for how to truncate text for embedding when text is longer than the model's max sequence length.
         :param task_type: (Optional) How is the embedding being used? This is only supported by asymmetric embedding models.
-        :returns: An array of embeddings, one for each content. Each embedding is a list of floats. The dimensionality of the embedding is model-specific; you can check model metadata using /models/{model_id}
+        :returns: An array of embeddings, one for each content. Each embedding is a list of floats. The dimensionality of the embedding is model-specific; you can check model metadata using /models/{model_id}.
         """
         ...
 
@@ -950,45 +1019,46 @@ class Inference(Protocol):
         self,
         # Standard OpenAI completion parameters
         model: str,
-        prompt: Union[str, List[str], List[int], List[List[int]]],
-        best_of: Optional[int] = None,
-        echo: Optional[bool] = None,
-        frequency_penalty: Optional[float] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        presence_penalty: Optional[float] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
+        prompt: str | list[str] | list[int] | list[list[int]],
+        best_of: int | None = None,
+        echo: bool | None = None,
+        frequency_penalty: float | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        presence_penalty: float | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
         # vLLM-specific parameters
-        guided_choice: Optional[List[str]] = None,
-        prompt_logprobs: Optional[int] = None,
+        guided_choice: list[str] | None = None,
+        prompt_logprobs: int | None = None,
     ) -> OpenAICompletion:
         """Generate an OpenAI-compatible completion for the given prompt using the specified model.
 
         :param model: The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint.
-        :param prompt: The prompt to generate a completion for
-        :param best_of: (Optional) The number of completions to generate
-        :param echo: (Optional) Whether to echo the prompt
-        :param frequency_penalty: (Optional) The penalty for repeated tokens
-        :param logit_bias: (Optional) The logit bias to use
-        :param logprobs: (Optional) The log probabilities to use
-        :param max_tokens: (Optional) The maximum number of tokens to generate
-        :param n: (Optional) The number of completions to generate
-        :param presence_penalty: (Optional) The penalty for repeated tokens
-        :param seed: (Optional) The seed to use
-        :param stop: (Optional) The stop tokens to use
-        :param stream: (Optional) Whether to stream the response
-        :param stream_options: (Optional) The stream options to use
-        :param temperature: (Optional) The temperature to use
-        :param top_p: (Optional) The top p to use
-        :param user: (Optional) The user to use
+        :param prompt: The prompt to generate a completion for.
+        :param best_of: (Optional) The number of completions to generate.
+        :param echo: (Optional) Whether to echo the prompt.
+        :param frequency_penalty: (Optional) The penalty for repeated tokens.
+        :param logit_bias: (Optional) The logit bias to use.
+        :param logprobs: (Optional) The log probabilities to use.
+        :param max_tokens: (Optional) The maximum number of tokens to generate.
+        :param n: (Optional) The number of completions to generate.
+        :param presence_penalty: (Optional) The penalty for repeated tokens.
+        :param seed: (Optional) The seed to use.
+        :param stop: (Optional) The stop tokens to use.
+        :param stream: (Optional) Whether to stream the response.
+        :param stream_options: (Optional) The stream options to use.
+        :param temperature: (Optional) The temperature to use.
+        :param top_p: (Optional) The top p to use.
+        :param user: (Optional) The user to use.
+        :returns: An OpenAICompletion.
         """
         ...
 
@@ -996,53 +1066,110 @@ class Inference(Protocol):
     async def openai_chat_completion(
         self,
         model: str,
-        messages: List[OpenAIMessageParam],
-        frequency_penalty: Optional[float] = None,
-        function_call: Optional[Union[str, Dict[str, Any]]] = None,
-        functions: Optional[List[Dict[str, Any]]] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_completion_tokens: Optional[int] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        parallel_tool_calls: Optional[bool] = None,
-        presence_penalty: Optional[float] = None,
-        response_format: Optional[OpenAIResponseFormatParam] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
-        tools: Optional[List[Dict[str, Any]]] = None,
-        top_logprobs: Optional[int] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-    ) -> Union[OpenAIChatCompletion, AsyncIterator[OpenAIChatCompletionChunk]]:
+        messages: list[OpenAIMessageParam],
+        frequency_penalty: float | None = None,
+        function_call: str | dict[str, Any] | None = None,
+        functions: list[dict[str, Any]] | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_completion_tokens: int | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        parallel_tool_calls: bool | None = None,
+        presence_penalty: float | None = None,
+        response_format: OpenAIResponseFormatParam | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        tool_choice: str | dict[str, Any] | None = None,
+        tools: list[dict[str, Any]] | None = None,
+        top_logprobs: int | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+    ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
         """Generate an OpenAI-compatible chat completion for the given messages using the specified model.
 
         :param model: The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint.
-        :param messages: List of messages in the conversation
-        :param frequency_penalty: (Optional) The penalty for repeated tokens
-        :param function_call: (Optional) The function call to use
-        :param functions: (Optional) List of functions to use
-        :param logit_bias: (Optional) The logit bias to use
-        :param logprobs: (Optional) The log probabilities to use
-        :param max_completion_tokens: (Optional) The maximum number of tokens to generate
-        :param max_tokens: (Optional) The maximum number of tokens to generate
-        :param n: (Optional) The number of completions to generate
-        :param parallel_tool_calls: (Optional) Whether to parallelize tool calls
-        :param presence_penalty: (Optional) The penalty for repeated tokens
-        :param response_format: (Optional) The response format to use
-        :param seed: (Optional) The seed to use
-        :param stop: (Optional) The stop tokens to use
-        :param stream: (Optional) Whether to stream the response
-        :param stream_options: (Optional) The stream options to use
-        :param temperature: (Optional) The temperature to use
-        :param tool_choice: (Optional) The tool choice to use
-        :param tools: (Optional) The tools to use
-        :param top_logprobs: (Optional) The top log probabilities to use
-        :param top_p: (Optional) The top p to use
-        :param user: (Optional) The user to use
+        :param messages: List of messages in the conversation.
+        :param frequency_penalty: (Optional) The penalty for repeated tokens.
+        :param function_call: (Optional) The function call to use.
+        :param functions: (Optional) List of functions to use.
+        :param logit_bias: (Optional) The logit bias to use.
+        :param logprobs: (Optional) The log probabilities to use.
+        :param max_completion_tokens: (Optional) The maximum number of tokens to generate.
+        :param max_tokens: (Optional) The maximum number of tokens to generate.
+        :param n: (Optional) The number of completions to generate.
+        :param parallel_tool_calls: (Optional) Whether to parallelize tool calls.
+        :param presence_penalty: (Optional) The penalty for repeated tokens.
+        :param response_format: (Optional) The response format to use.
+        :param seed: (Optional) The seed to use.
+        :param stop: (Optional) The stop tokens to use.
+        :param stream: (Optional) Whether to stream the response.
+        :param stream_options: (Optional) The stream options to use.
+        :param temperature: (Optional) The temperature to use.
+        :param tool_choice: (Optional) The tool choice to use.
+        :param tools: (Optional) The tools to use.
+        :param top_logprobs: (Optional) The top log probabilities to use.
+        :param top_p: (Optional) The top p to use.
+        :param user: (Optional) The user to use.
+        :returns: An OpenAIChatCompletion.
         """
         ...
+
+    @webmethod(route="/openai/v1/embeddings", method="POST")
+    async def openai_embeddings(
+        self,
+        model: str,
+        input: str | list[str],
+        encoding_format: str | None = "float",
+        dimensions: int | None = None,
+        user: str | None = None,
+    ) -> OpenAIEmbeddingsResponse:
+        """Generate OpenAI-compatible embeddings for the given input using the specified model.
+
+        :param model: The identifier of the model to use. The model must be an embedding model registered with Llama Stack and available via the /models endpoint.
+        :param input: Input text to embed, encoded as a string or array of strings. To embed multiple inputs in a single request, pass an array of strings.
+        :param encoding_format: (Optional) The format to return the embeddings in. Can be either "float" or "base64". Defaults to "float".
+        :param dimensions: (Optional) The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models.
+        :param user: (Optional) A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
+        :returns: An OpenAIEmbeddingsResponse containing the embeddings.
+        """
+        ...
+
+
+class Inference(InferenceProvider):
+    """Llama Stack Inference API for generating completions, chat completions, and embeddings.
+
+    This API provides the raw interface to the underlying models. Two kinds of models are supported:
+    - LLM models: these models generate "raw" and "chat" (conversational) completions.
+    - Embedding models: these models generate embeddings to be used for semantic search.
+    """
+
+    @webmethod(route="/openai/v1/chat/completions", method="GET")
+    async def list_chat_completions(
+        self,
+        after: str | None = None,
+        limit: int | None = 20,
+        model: str | None = None,
+        order: Order | None = Order.desc,
+    ) -> ListOpenAIChatCompletionResponse:
+        """List all chat completions.
+
+        :param after: The ID of the last chat completion to return.
+        :param limit: The maximum number of chat completions to return.
+        :param model: The model to filter by.
+        :param order: The order to sort the chat completions by: "asc" or "desc". Defaults to "desc".
+        :returns: A ListOpenAIChatCompletionResponse.
+        """
+        raise NotImplementedError("List chat completions is not implemented")
+
+    @webmethod(route="/openai/v1/chat/completions/{completion_id}", method="GET")
+    async def get_chat_completion(self, completion_id: str) -> OpenAICompletionWithInputMessages:
+        """Describe a chat completion by its ID.
+
+        :param completion_id: ID of the chat completion.
+        :returns: A OpenAICompletionWithInputMessages.
+        """
+        raise NotImplementedError("Get chat completion is not implemented")
diff --git a/llama_stack/apis/inspect/inspect.py b/llama_stack/apis/inspect/inspect.py
index 863f90e14..44a5e95b2 100644
--- a/llama_stack/apis/inspect/inspect.py
+++ b/llama_stack/apis/inspect/inspect.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List, Protocol, runtime_checkable
+from typing import Protocol, runtime_checkable
 
 from pydantic import BaseModel
 
@@ -16,7 +16,7 @@ from llama_stack.schema_utils import json_schema_type, webmethod
 class RouteInfo(BaseModel):
     route: str
     method: str
-    provider_types: List[str]
+    provider_types: list[str]
 
 
 @json_schema_type
@@ -30,16 +30,31 @@ class VersionInfo(BaseModel):
 
 
 class ListRoutesResponse(BaseModel):
-    data: List[RouteInfo]
+    data: list[RouteInfo]
 
 
 @runtime_checkable
 class Inspect(Protocol):
     @webmethod(route="/inspect/routes", method="GET")
-    async def list_routes(self) -> ListRoutesResponse: ...
+    async def list_routes(self) -> ListRoutesResponse:
+        """List all routes.
+
+        :returns: A ListRoutesResponse.
+        """
+        ...
 
     @webmethod(route="/health", method="GET")
-    async def health(self) -> HealthInfo: ...
+    async def health(self) -> HealthInfo:
+        """Get the health of the service.
+
+        :returns: A HealthInfo.
+        """
+        ...
 
     @webmethod(route="/version", method="GET")
-    async def version(self) -> VersionInfo: ...
+    async def version(self) -> VersionInfo:
+        """Get the version of the service.
+
+        :returns: A VersionInfo.
+        """
+        ...
diff --git a/llama_stack/apis/models/models.py b/llama_stack/apis/models/models.py
index 97398ce75..3d90a92a0 100644
--- a/llama_stack/apis/models/models.py
+++ b/llama_stack/apis/models/models.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 from enum import Enum
-from typing import Any, Dict, List, Literal, Optional, Protocol, runtime_checkable
+from typing import Any, Literal, Protocol, runtime_checkable
 
 from pydantic import BaseModel, ConfigDict, Field
 
@@ -15,7 +15,7 @@ from llama_stack.schema_utils import json_schema_type, webmethod
 
 
 class CommonModelFields(BaseModel):
-    metadata: Dict[str, Any] = Field(
+    metadata: dict[str, Any] = Field(
         default_factory=dict,
         description="Any additional metadata for this model",
     )
@@ -29,14 +29,14 @@ class ModelType(str, Enum):
 
 @json_schema_type
 class Model(CommonModelFields, Resource):
-    type: Literal[ResourceType.model.value] = ResourceType.model.value
+    type: Literal[ResourceType.model] = ResourceType.model
 
     @property
     def model_id(self) -> str:
         return self.identifier
 
     @property
-    def provider_model_id(self) -> str:
+    def provider_model_id(self) -> str | None:
         return self.provider_resource_id
 
     model_config = ConfigDict(protected_namespaces=())
@@ -46,14 +46,14 @@ class Model(CommonModelFields, Resource):
 
 class ModelInput(CommonModelFields):
     model_id: str
-    provider_id: Optional[str] = None
-    provider_model_id: Optional[str] = None
-    model_type: Optional[ModelType] = ModelType.llm
+    provider_id: str | None = None
+    provider_model_id: str | None = None
+    model_type: ModelType | None = ModelType.llm
     model_config = ConfigDict(protected_namespaces=())
 
 
 class ListModelsResponse(BaseModel):
-    data: List[Model]
+    data: list[Model]
 
 
 @json_schema_type
@@ -73,36 +73,67 @@ class OpenAIModel(BaseModel):
 
 
 class OpenAIListModelsResponse(BaseModel):
-    data: List[OpenAIModel]
+    data: list[OpenAIModel]
 
 
 @runtime_checkable
 @trace_protocol
 class Models(Protocol):
     @webmethod(route="/models", method="GET")
-    async def list_models(self) -> ListModelsResponse: ...
+    async def list_models(self) -> ListModelsResponse:
+        """List all models.
+
+        :returns: A ListModelsResponse.
+        """
+        ...
 
     @webmethod(route="/openai/v1/models", method="GET")
-    async def openai_list_models(self) -> OpenAIListModelsResponse: ...
+    async def openai_list_models(self) -> OpenAIListModelsResponse:
+        """List models using the OpenAI API.
+
+        :returns: A OpenAIListModelsResponse.
+        """
+        ...
 
     @webmethod(route="/models/{model_id:path}", method="GET")
     async def get_model(
         self,
         model_id: str,
-    ) -> Model: ...
+    ) -> Model:
+        """Get a model by its identifier.
+
+        :param model_id: The identifier of the model to get.
+        :returns: A Model.
+        """
+        ...
 
     @webmethod(route="/models", method="POST")
     async def register_model(
         self,
         model_id: str,
-        provider_model_id: Optional[str] = None,
-        provider_id: Optional[str] = None,
-        metadata: Optional[Dict[str, Any]] = None,
-        model_type: Optional[ModelType] = None,
-    ) -> Model: ...
+        provider_model_id: str | None = None,
+        provider_id: str | None = None,
+        metadata: dict[str, Any] | None = None,
+        model_type: ModelType | None = None,
+    ) -> Model:
+        """Register a model.
+
+        :param model_id: The identifier of the model to register.
+        :param provider_model_id: The identifier of the model in the provider.
+        :param provider_id: The identifier of the provider.
+        :param metadata: Any additional metadata for this model.
+        :param model_type: The type of model to register.
+        :returns: A Model.
+        """
+        ...
 
     @webmethod(route="/models/{model_id:path}", method="DELETE")
     async def unregister_model(
         self,
         model_id: str,
-    ) -> None: ...
+    ) -> None:
+        """Unregister a model.
+
+        :param model_id: The identifier of the model to unregister.
+        """
+        ...
diff --git a/llama_stack/apis/post_training/post_training.py b/llama_stack/apis/post_training/post_training.py
index e5f1bcb65..b196c8a17 100644
--- a/llama_stack/apis/post_training/post_training.py
+++ b/llama_stack/apis/post_training/post_training.py
@@ -6,10 +6,9 @@
 
 from datetime import datetime
 from enum import Enum
-from typing import Any, Dict, List, Literal, Optional, Protocol, Union
+from typing import Annotated, Any, Literal, Protocol
 
 from pydantic import BaseModel, Field
-from typing_extensions import Annotated
 
 from llama_stack.apis.common.content_types import URL
 from llama_stack.apis.common.job_types import JobStatus
@@ -36,9 +35,9 @@ class DataConfig(BaseModel):
     batch_size: int
     shuffle: bool
     data_format: DatasetFormat
-    validation_dataset_id: Optional[str] = None
-    packed: Optional[bool] = False
-    train_on_input: Optional[bool] = False
+    validation_dataset_id: str | None = None
+    packed: bool | None = False
+    train_on_input: bool | None = False
 
 
 @json_schema_type
@@ -51,10 +50,10 @@ class OptimizerConfig(BaseModel):
 
 @json_schema_type
 class EfficiencyConfig(BaseModel):
-    enable_activation_checkpointing: Optional[bool] = False
-    enable_activation_offloading: Optional[bool] = False
-    memory_efficient_fsdp_wrap: Optional[bool] = False
-    fsdp_cpu_offload: Optional[bool] = False
+    enable_activation_checkpointing: bool | None = False
+    enable_activation_offloading: bool | None = False
+    memory_efficient_fsdp_wrap: bool | None = False
+    fsdp_cpu_offload: bool | None = False
 
 
 @json_schema_type
@@ -62,23 +61,23 @@ class TrainingConfig(BaseModel):
     n_epochs: int
     max_steps_per_epoch: int = 1
     gradient_accumulation_steps: int = 1
-    max_validation_steps: Optional[int] = 1
-    data_config: Optional[DataConfig] = None
-    optimizer_config: Optional[OptimizerConfig] = None
-    efficiency_config: Optional[EfficiencyConfig] = None
-    dtype: Optional[str] = "bf16"
+    max_validation_steps: int | None = 1
+    data_config: DataConfig | None = None
+    optimizer_config: OptimizerConfig | None = None
+    efficiency_config: EfficiencyConfig | None = None
+    dtype: str | None = "bf16"
 
 
 @json_schema_type
 class LoraFinetuningConfig(BaseModel):
     type: Literal["LoRA"] = "LoRA"
-    lora_attn_modules: List[str]
+    lora_attn_modules: list[str]
     apply_lora_to_mlp: bool
     apply_lora_to_output: bool
     rank: int
     alpha: int
-    use_dora: Optional[bool] = False
-    quantize_base: Optional[bool] = False
+    use_dora: bool | None = False
+    quantize_base: bool | None = False
 
 
 @json_schema_type
@@ -88,7 +87,7 @@ class QATFinetuningConfig(BaseModel):
     group_size: int
 
 
-AlgorithmConfig = Annotated[Union[LoraFinetuningConfig, QATFinetuningConfig], Field(discriminator="type")]
+AlgorithmConfig = Annotated[LoraFinetuningConfig | QATFinetuningConfig, Field(discriminator="type")]
 register_schema(AlgorithmConfig, name="AlgorithmConfig")
 
 
@@ -97,7 +96,7 @@ class PostTrainingJobLogStream(BaseModel):
     """Stream of logs from a finetuning job."""
 
     job_uuid: str
-    log_lines: List[str]
+    log_lines: list[str]
 
 
 @json_schema_type
@@ -131,8 +130,8 @@ class PostTrainingRLHFRequest(BaseModel):
     training_config: TrainingConfig
 
     # TODO: define these
-    hyperparam_search_config: Dict[str, Any]
-    logger_config: Dict[str, Any]
+    hyperparam_search_config: dict[str, Any]
+    logger_config: dict[str, Any]
 
 
 class PostTrainingJob(BaseModel):
@@ -146,17 +145,17 @@ class PostTrainingJobStatusResponse(BaseModel):
     job_uuid: str
     status: JobStatus
 
-    scheduled_at: Optional[datetime] = None
-    started_at: Optional[datetime] = None
-    completed_at: Optional[datetime] = None
+    scheduled_at: datetime | None = None
+    started_at: datetime | None = None
+    completed_at: datetime | None = None
 
-    resources_allocated: Optional[Dict[str, Any]] = None
+    resources_allocated: dict[str, Any] | None = None
 
-    checkpoints: List[Checkpoint] = Field(default_factory=list)
+    checkpoints: list[Checkpoint] = Field(default_factory=list)
 
 
 class ListPostTrainingJobsResponse(BaseModel):
-    data: List[PostTrainingJob]
+    data: list[PostTrainingJob]
 
 
 @json_schema_type
@@ -164,7 +163,7 @@ class PostTrainingJobArtifactsResponse(BaseModel):
     """Artifacts of a finetuning job."""
 
     job_uuid: str
-    checkpoints: List[Checkpoint] = Field(default_factory=list)
+    checkpoints: list[Checkpoint] = Field(default_factory=list)
 
     # TODO(ashwin): metrics, evals
 
@@ -175,15 +174,27 @@ class PostTraining(Protocol):
         self,
         job_uuid: str,
         training_config: TrainingConfig,
-        hyperparam_search_config: Dict[str, Any],
-        logger_config: Dict[str, Any],
-        model: Optional[str] = Field(
+        hyperparam_search_config: dict[str, Any],
+        logger_config: dict[str, Any],
+        model: str | None = Field(
             default=None,
             description="Model descriptor for training if not in provider config`",
         ),
-        checkpoint_dir: Optional[str] = None,
-        algorithm_config: Optional[AlgorithmConfig] = None,
-    ) -> PostTrainingJob: ...
+        checkpoint_dir: str | None = None,
+        algorithm_config: AlgorithmConfig | None = None,
+    ) -> PostTrainingJob:
+        """Run supervised fine-tuning of a model.
+
+        :param job_uuid: The UUID of the job to create.
+        :param training_config: The training configuration.
+        :param hyperparam_search_config: The hyperparam search configuration.
+        :param logger_config: The logger configuration.
+        :param model: The model to fine-tune.
+        :param checkpoint_dir: The directory to save checkpoint(s) to.
+        :param algorithm_config: The algorithm configuration.
+        :returns: A PostTrainingJob.
+        """
+        ...
 
     @webmethod(route="/post-training/preference-optimize", method="POST")
     async def preference_optimize(
@@ -192,18 +203,51 @@ class PostTraining(Protocol):
         finetuned_model: str,
         algorithm_config: DPOAlignmentConfig,
         training_config: TrainingConfig,
-        hyperparam_search_config: Dict[str, Any],
-        logger_config: Dict[str, Any],
-    ) -> PostTrainingJob: ...
+        hyperparam_search_config: dict[str, Any],
+        logger_config: dict[str, Any],
+    ) -> PostTrainingJob:
+        """Run preference optimization of a model.
+
+        :param job_uuid: The UUID of the job to create.
+        :param finetuned_model: The model to fine-tune.
+        :param algorithm_config: The algorithm configuration.
+        :param training_config: The training configuration.
+        :param hyperparam_search_config: The hyperparam search configuration.
+        :param logger_config: The logger configuration.
+        :returns: A PostTrainingJob.
+        """
+        ...
 
     @webmethod(route="/post-training/jobs", method="GET")
-    async def get_training_jobs(self) -> ListPostTrainingJobsResponse: ...
+    async def get_training_jobs(self) -> ListPostTrainingJobsResponse:
+        """Get all training jobs.
+
+        :returns: A ListPostTrainingJobsResponse.
+        """
+        ...
 
     @webmethod(route="/post-training/job/status", method="GET")
-    async def get_training_job_status(self, job_uuid: str) -> PostTrainingJobStatusResponse: ...
+    async def get_training_job_status(self, job_uuid: str) -> PostTrainingJobStatusResponse:
+        """Get the status of a training job.
+
+        :param job_uuid: The UUID of the job to get the status of.
+        :returns: A PostTrainingJobStatusResponse.
+        """
+        ...
 
     @webmethod(route="/post-training/job/cancel", method="POST")
-    async def cancel_training_job(self, job_uuid: str) -> None: ...
+    async def cancel_training_job(self, job_uuid: str) -> None:
+        """Cancel a training job.
+
+        :param job_uuid: The UUID of the job to cancel.
+        """
+        ...
 
     @webmethod(route="/post-training/job/artifacts", method="GET")
-    async def get_training_job_artifacts(self, job_uuid: str) -> PostTrainingJobArtifactsResponse: ...
+    async def get_training_job_artifacts(self, job_uuid: str) -> PostTrainingJobArtifactsResponse:
+        """Get the artifacts of a training job.
+
+        :param job_uuid: The UUID of the job to get the artifacts of.
+        :returns: A PostTrainingJobArtifactsResponse.
+        """
+        ...
diff --git a/llama_stack/apis/providers/providers.py b/llama_stack/apis/providers/providers.py
index ea5f968ec..4bc977bf1 100644
--- a/llama_stack/apis/providers/providers.py
+++ b/llama_stack/apis/providers/providers.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, List, Protocol, runtime_checkable
+from typing import Any, Protocol, runtime_checkable
 
 from pydantic import BaseModel
 
@@ -17,12 +17,12 @@ class ProviderInfo(BaseModel):
     api: str
     provider_id: str
     provider_type: str
-    config: Dict[str, Any]
+    config: dict[str, Any]
     health: HealthResponse
 
 
 class ListProvidersResponse(BaseModel):
-    data: List[ProviderInfo]
+    data: list[ProviderInfo]
 
 
 @runtime_checkable
@@ -32,7 +32,18 @@ class Providers(Protocol):
     """
 
     @webmethod(route="/providers", method="GET")
-    async def list_providers(self) -> ListProvidersResponse: ...
+    async def list_providers(self) -> ListProvidersResponse:
+        """List all available providers.
+
+        :returns: A ListProvidersResponse containing information about all providers.
+        """
+        ...
 
     @webmethod(route="/providers/{provider_id}", method="GET")
-    async def inspect_provider(self, provider_id: str) -> ProviderInfo: ...
+    async def inspect_provider(self, provider_id: str) -> ProviderInfo:
+        """Get detailed information about a specific provider.
+
+        :param provider_id: The ID of the provider to inspect.
+        :returns: A ProviderInfo object containing the provider's details.
+        """
+        ...
diff --git a/llama_stack/apis/resource.py b/llama_stack/apis/resource.py
index 70ec63c55..175baa7b9 100644
--- a/llama_stack/apis/resource.py
+++ b/llama_stack/apis/resource.py
@@ -4,12 +4,23 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
+import sys
 from enum import Enum
 
 from pydantic import BaseModel, Field
 
+# TODO: use enum.StrEnum when we drop support for python 3.10
+if sys.version_info >= (3, 11):
+    from enum import StrEnum
+else:
 
-class ResourceType(Enum):
+    class StrEnum(str, Enum):
+        """Backport of StrEnum for Python 3.10 and below."""
+
+        pass
+
+
+class ResourceType(StrEnum):
     model = "model"
     shield = "shield"
     vector_db = "vector_db"
@@ -25,9 +36,9 @@ class Resource(BaseModel):
 
     identifier: str = Field(description="Unique identifier for this resource in llama stack")
 
-    provider_resource_id: str = Field(
-        description="Unique identifier for this resource in the provider",
+    provider_resource_id: str | None = Field(
         default=None,
+        description="Unique identifier for this resource in the provider",
     )
 
     provider_id: str = Field(description="ID of the provider that owns this resource")
diff --git a/llama_stack/apis/safety/safety.py b/llama_stack/apis/safety/safety.py
index fd2f0292c..3aee52b7e 100644
--- a/llama_stack/apis/safety/safety.py
+++ b/llama_stack/apis/safety/safety.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 from enum import Enum
-from typing import Any, Dict, List, Optional, Protocol, runtime_checkable
+from typing import Any, Protocol, runtime_checkable
 
 from pydantic import BaseModel, Field
 
@@ -27,16 +27,16 @@ class SafetyViolation(BaseModel):
     violation_level: ViolationLevel
 
     # what message should you convey to the user
-    user_message: Optional[str] = None
+    user_message: str | None = None
 
     # additional metadata (including specific violation codes) more for
     # debugging, telemetry
-    metadata: Dict[str, Any] = Field(default_factory=dict)
+    metadata: dict[str, Any] = Field(default_factory=dict)
 
 
 @json_schema_type
 class RunShieldResponse(BaseModel):
-    violation: Optional[SafetyViolation] = None
+    violation: SafetyViolation | None = None
 
 
 class ShieldStore(Protocol):
@@ -52,6 +52,14 @@ class Safety(Protocol):
     async def run_shield(
         self,
         shield_id: str,
-        messages: List[Message],
-        params: Dict[str, Any] = None,
-    ) -> RunShieldResponse: ...
+        messages: list[Message],
+        params: dict[str, Any],
+    ) -> RunShieldResponse:
+        """Run a shield.
+
+        :param shield_id: The identifier of the shield to run.
+        :param messages: The messages to run the shield on.
+        :param params: The parameters of the shield.
+        :returns: A RunShieldResponse.
+        """
+        ...
diff --git a/llama_stack/apis/scoring/scoring.py b/llama_stack/apis/scoring/scoring.py
index 54a9ac2aa..732e80e79 100644
--- a/llama_stack/apis/scoring/scoring.py
+++ b/llama_stack/apis/scoring/scoring.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, List, Optional, Protocol, runtime_checkable
+from typing import Any, Protocol, runtime_checkable
 
 from pydantic import BaseModel
 
@@ -12,7 +12,7 @@ from llama_stack.apis.scoring_functions import ScoringFn, ScoringFnParams
 from llama_stack.schema_utils import json_schema_type, webmethod
 
 # mapping of metric to value
-ScoringResultRow = Dict[str, Any]
+ScoringResultRow = dict[str, Any]
 
 
 @json_schema_type
@@ -24,15 +24,15 @@ class ScoringResult(BaseModel):
     :param aggregated_results: Map of metric name to aggregated value
     """
 
-    score_rows: List[ScoringResultRow]
+    score_rows: list[ScoringResultRow]
     # aggregated metrics to value
-    aggregated_results: Dict[str, Any]
+    aggregated_results: dict[str, Any]
 
 
 @json_schema_type
 class ScoreBatchResponse(BaseModel):
-    dataset_id: Optional[str] = None
-    results: Dict[str, ScoringResult]
+    dataset_id: str | None = None
+    results: dict[str, ScoringResult]
 
 
 @json_schema_type
@@ -44,7 +44,7 @@ class ScoreResponse(BaseModel):
     """
 
     # each key in the dict is a scoring function name
-    results: Dict[str, ScoringResult]
+    results: dict[str, ScoringResult]
 
 
 class ScoringFunctionStore(Protocol):
@@ -59,20 +59,28 @@ class Scoring(Protocol):
     async def score_batch(
         self,
         dataset_id: str,
-        scoring_functions: Dict[str, Optional[ScoringFnParams]],
+        scoring_functions: dict[str, ScoringFnParams | None],
         save_results_dataset: bool = False,
-    ) -> ScoreBatchResponse: ...
+    ) -> ScoreBatchResponse:
+        """Score a batch of rows.
+
+        :param dataset_id: The ID of the dataset to score.
+        :param scoring_functions: The scoring functions to use for the scoring.
+        :param save_results_dataset: Whether to save the results to a dataset.
+        :returns: A ScoreBatchResponse.
+        """
+        ...
 
     @webmethod(route="/scoring/score", method="POST")
     async def score(
         self,
-        input_rows: List[Dict[str, Any]],
-        scoring_functions: Dict[str, Optional[ScoringFnParams]],
+        input_rows: list[dict[str, Any]],
+        scoring_functions: dict[str, ScoringFnParams | None],
     ) -> ScoreResponse:
         """Score a list of rows.
 
         :param input_rows: The rows to score.
         :param scoring_functions: The scoring functions to use for the scoring.
-        :return: ScoreResponse object containing rows and aggregated results
+        :returns: A ScoreResponse object containing rows and aggregated results.
         """
         ...
diff --git a/llama_stack/apis/scoring_functions/scoring_functions.py b/llama_stack/apis/scoring_functions/scoring_functions.py
index 4f85947dd..9cd21b7d1 100644
--- a/llama_stack/apis/scoring_functions/scoring_functions.py
+++ b/llama_stack/apis/scoring_functions/scoring_functions.py
@@ -4,37 +4,44 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
+# TODO: use enum.StrEnum when we drop support for python 3.10
+import sys
 from enum import Enum
 from typing import (
+    Annotated,
     Any,
-    Dict,
-    List,
     Literal,
-    Optional,
     Protocol,
-    Union,
     runtime_checkable,
 )
 
 from pydantic import BaseModel, Field
-from typing_extensions import Annotated
 
 from llama_stack.apis.common.type_system import ParamType
 from llama_stack.apis.resource import Resource, ResourceType
 from llama_stack.schema_utils import json_schema_type, register_schema, webmethod
 
+if sys.version_info >= (3, 11):
+    from enum import StrEnum
+else:
+
+    class StrEnum(str, Enum):
+        """Backport of StrEnum for Python 3.10 and below."""
+
+        pass
+
 
 # Perhaps more structure can be imposed on these functions. Maybe they could be associated
 # with standard metrics so they can be rolled up?
 @json_schema_type
-class ScoringFnParamsType(Enum):
+class ScoringFnParamsType(StrEnum):
     llm_as_judge = "llm_as_judge"
     regex_parser = "regex_parser"
     basic = "basic"
 
 
 @json_schema_type
-class AggregationFunctionType(Enum):
+class AggregationFunctionType(StrEnum):
     average = "average"
     weighted_average = "weighted_average"
     median = "median"
@@ -44,62 +51,58 @@ class AggregationFunctionType(Enum):
 
 @json_schema_type
 class LLMAsJudgeScoringFnParams(BaseModel):
-    type: Literal[ScoringFnParamsType.llm_as_judge.value] = ScoringFnParamsType.llm_as_judge.value
+    type: Literal[ScoringFnParamsType.llm_as_judge] = ScoringFnParamsType.llm_as_judge
     judge_model: str
-    prompt_template: Optional[str] = None
-    judge_score_regexes: Optional[List[str]] = Field(
+    prompt_template: str | None = None
+    judge_score_regexes: list[str] = Field(
         description="Regexes to extract the answer from generated response",
-        default_factory=list,
+        default_factory=lambda: [],
     )
-    aggregation_functions: Optional[List[AggregationFunctionType]] = Field(
+    aggregation_functions: list[AggregationFunctionType] = Field(
         description="Aggregation functions to apply to the scores of each row",
-        default_factory=list,
+        default_factory=lambda: [],
     )
 
 
 @json_schema_type
 class RegexParserScoringFnParams(BaseModel):
-    type: Literal[ScoringFnParamsType.regex_parser.value] = ScoringFnParamsType.regex_parser.value
-    parsing_regexes: Optional[List[str]] = Field(
+    type: Literal[ScoringFnParamsType.regex_parser] = ScoringFnParamsType.regex_parser
+    parsing_regexes: list[str] = Field(
         description="Regex to extract the answer from generated response",
-        default_factory=list,
+        default_factory=lambda: [],
     )
-    aggregation_functions: Optional[List[AggregationFunctionType]] = Field(
+    aggregation_functions: list[AggregationFunctionType] = Field(
         description="Aggregation functions to apply to the scores of each row",
-        default_factory=list,
+        default_factory=lambda: [],
     )
 
 
 @json_schema_type
 class BasicScoringFnParams(BaseModel):
-    type: Literal[ScoringFnParamsType.basic.value] = ScoringFnParamsType.basic.value
-    aggregation_functions: Optional[List[AggregationFunctionType]] = Field(
+    type: Literal[ScoringFnParamsType.basic] = ScoringFnParamsType.basic
+    aggregation_functions: list[AggregationFunctionType] = Field(
         description="Aggregation functions to apply to the scores of each row",
         default_factory=list,
     )
 
 
 ScoringFnParams = Annotated[
-    Union[
-        LLMAsJudgeScoringFnParams,
-        RegexParserScoringFnParams,
-        BasicScoringFnParams,
-    ],
+    LLMAsJudgeScoringFnParams | RegexParserScoringFnParams | BasicScoringFnParams,
     Field(discriminator="type"),
 ]
 register_schema(ScoringFnParams, name="ScoringFnParams")
 
 
 class CommonScoringFnFields(BaseModel):
-    description: Optional[str] = None
-    metadata: Dict[str, Any] = Field(
+    description: str | None = None
+    metadata: dict[str, Any] = Field(
         default_factory=dict,
         description="Any additional metadata for this definition",
     )
     return_type: ParamType = Field(
         description="The return type of the deterministic function",
     )
-    params: Optional[ScoringFnParams] = Field(
+    params: ScoringFnParams | None = Field(
         description="The parameters for the scoring function for benchmark eval, these can be overridden for app eval",
         default=None,
     )
@@ -107,34 +110,45 @@ class CommonScoringFnFields(BaseModel):
 
 @json_schema_type
 class ScoringFn(CommonScoringFnFields, Resource):
-    type: Literal[ResourceType.scoring_function.value] = ResourceType.scoring_function.value
+    type: Literal[ResourceType.scoring_function] = ResourceType.scoring_function
 
     @property
     def scoring_fn_id(self) -> str:
         return self.identifier
 
     @property
-    def provider_scoring_fn_id(self) -> str:
+    def provider_scoring_fn_id(self) -> str | None:
         return self.provider_resource_id
 
 
 class ScoringFnInput(CommonScoringFnFields, BaseModel):
     scoring_fn_id: str
-    provider_id: Optional[str] = None
-    provider_scoring_fn_id: Optional[str] = None
+    provider_id: str | None = None
+    provider_scoring_fn_id: str | None = None
 
 
 class ListScoringFunctionsResponse(BaseModel):
-    data: List[ScoringFn]
+    data: list[ScoringFn]
 
 
 @runtime_checkable
 class ScoringFunctions(Protocol):
     @webmethod(route="/scoring-functions", method="GET")
-    async def list_scoring_functions(self) -> ListScoringFunctionsResponse: ...
+    async def list_scoring_functions(self) -> ListScoringFunctionsResponse:
+        """List all scoring functions.
+
+        :returns: A ListScoringFunctionsResponse.
+        """
+        ...
 
     @webmethod(route="/scoring-functions/{scoring_fn_id:path}", method="GET")
-    async def get_scoring_function(self, scoring_fn_id: str, /) -> ScoringFn: ...
+    async def get_scoring_function(self, scoring_fn_id: str, /) -> ScoringFn:
+        """Get a scoring function by its ID.
+
+        :param scoring_fn_id: The ID of the scoring function to get.
+        :returns: A ScoringFn.
+        """
+        ...
 
     @webmethod(route="/scoring-functions", method="POST")
     async def register_scoring_function(
@@ -142,7 +156,17 @@ class ScoringFunctions(Protocol):
         scoring_fn_id: str,
         description: str,
         return_type: ParamType,
-        provider_scoring_fn_id: Optional[str] = None,
-        provider_id: Optional[str] = None,
-        params: Optional[ScoringFnParams] = None,
-    ) -> None: ...
+        provider_scoring_fn_id: str | None = None,
+        provider_id: str | None = None,
+        params: ScoringFnParams | None = None,
+    ) -> None:
+        """Register a scoring function.
+
+        :param scoring_fn_id: The ID of the scoring function to register.
+        :param description: The description of the scoring function.
+        :param return_type: The return type of the scoring function.
+        :param provider_scoring_fn_id: The ID of the provider scoring function to use for the scoring function.
+        :param provider_id: The ID of the provider to use for the scoring function.
+        :param params: The parameters for the scoring function for benchmark eval, these can be overridden for app eval.
+        """
+        ...
diff --git a/llama_stack/apis/shields/shields.py b/llama_stack/apis/shields/shields.py
index 67f3bd27b..ce1f73d8e 100644
--- a/llama_stack/apis/shields/shields.py
+++ b/llama_stack/apis/shields/shields.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, List, Literal, Optional, Protocol, runtime_checkable
+from typing import Any, Literal, Protocol, runtime_checkable
 
 from pydantic import BaseModel
 
@@ -14,48 +14,68 @@ from llama_stack.schema_utils import json_schema_type, webmethod
 
 
 class CommonShieldFields(BaseModel):
-    params: Optional[Dict[str, Any]] = None
+    params: dict[str, Any] | None = None
 
 
 @json_schema_type
 class Shield(CommonShieldFields, Resource):
     """A safety shield resource that can be used to check content"""
 
-    type: Literal[ResourceType.shield.value] = ResourceType.shield.value
+    type: Literal[ResourceType.shield] = ResourceType.shield
 
     @property
     def shield_id(self) -> str:
         return self.identifier
 
     @property
-    def provider_shield_id(self) -> str:
+    def provider_shield_id(self) -> str | None:
         return self.provider_resource_id
 
 
 class ShieldInput(CommonShieldFields):
     shield_id: str
-    provider_id: Optional[str] = None
-    provider_shield_id: Optional[str] = None
+    provider_id: str | None = None
+    provider_shield_id: str | None = None
 
 
 class ListShieldsResponse(BaseModel):
-    data: List[Shield]
+    data: list[Shield]
 
 
 @runtime_checkable
 @trace_protocol
 class Shields(Protocol):
     @webmethod(route="/shields", method="GET")
-    async def list_shields(self) -> ListShieldsResponse: ...
+    async def list_shields(self) -> ListShieldsResponse:
+        """List all shields.
+
+        :returns: A ListShieldsResponse.
+        """
+        ...
 
     @webmethod(route="/shields/{identifier:path}", method="GET")
-    async def get_shield(self, identifier: str) -> Shield: ...
+    async def get_shield(self, identifier: str) -> Shield:
+        """Get a shield by its identifier.
+
+        :param identifier: The identifier of the shield to get.
+        :returns: A Shield.
+        """
+        ...
 
     @webmethod(route="/shields", method="POST")
     async def register_shield(
         self,
         shield_id: str,
-        provider_shield_id: Optional[str] = None,
-        provider_id: Optional[str] = None,
-        params: Optional[Dict[str, Any]] = None,
-    ) -> Shield: ...
+        provider_shield_id: str | None = None,
+        provider_id: str | None = None,
+        params: dict[str, Any] | None = None,
+    ) -> Shield:
+        """Register a shield.
+
+        :param shield_id: The identifier of the shield to register.
+        :param provider_shield_id: The identifier of the shield in the provider.
+        :param provider_id: The identifier of the provider.
+        :param params: The parameters of the shield.
+        :returns: A Shield.
+        """
+        ...
diff --git a/llama_stack/apis/synthetic_data_generation/synthetic_data_generation.py b/llama_stack/apis/synthetic_data_generation/synthetic_data_generation.py
index 7b41192af..91e550da9 100644
--- a/llama_stack/apis/synthetic_data_generation/synthetic_data_generation.py
+++ b/llama_stack/apis/synthetic_data_generation/synthetic_data_generation.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 from enum import Enum
-from typing import Any, Dict, List, Optional, Protocol, Union
+from typing import Any, Protocol
 
 from pydantic import BaseModel
 
@@ -28,24 +28,24 @@ class FilteringFunction(Enum):
 class SyntheticDataGenerationRequest(BaseModel):
     """Request to generate synthetic data. A small batch of prompts and a filtering function"""
 
-    dialogs: List[Message]
+    dialogs: list[Message]
     filtering_function: FilteringFunction = FilteringFunction.none
-    model: Optional[str] = None
+    model: str | None = None
 
 
 @json_schema_type
 class SyntheticDataGenerationResponse(BaseModel):
     """Response from the synthetic data generation. Batch of (prompt, response, score) tuples that pass the threshold."""
 
-    synthetic_data: List[Dict[str, Any]]
-    statistics: Optional[Dict[str, Any]] = None
+    synthetic_data: list[dict[str, Any]]
+    statistics: dict[str, Any] | None = None
 
 
 class SyntheticDataGeneration(Protocol):
     @webmethod(route="/synthetic-data-generation/generate")
     def synthetic_data_generate(
         self,
-        dialogs: List[Message],
+        dialogs: list[Message],
         filtering_function: FilteringFunction = FilteringFunction.none,
-        model: Optional[str] = None,
-    ) -> Union[SyntheticDataGenerationResponse]: ...
+        model: str | None = None,
+    ) -> SyntheticDataGenerationResponse: ...
diff --git a/llama_stack/apis/telemetry/telemetry.py b/llama_stack/apis/telemetry/telemetry.py
index d57c311b2..0eb53f397 100644
--- a/llama_stack/apis/telemetry/telemetry.py
+++ b/llama_stack/apis/telemetry/telemetry.py
@@ -7,18 +7,14 @@
 from datetime import datetime
 from enum import Enum
 from typing import (
+    Annotated,
     Any,
-    Dict,
-    List,
     Literal,
-    Optional,
     Protocol,
-    Union,
     runtime_checkable,
 )
 
 from pydantic import BaseModel, Field
-from typing_extensions import Annotated
 
 from llama_stack.models.llama.datatypes import Primitive
 from llama_stack.schema_utils import json_schema_type, register_schema, webmethod
@@ -37,11 +33,11 @@ class SpanStatus(Enum):
 class Span(BaseModel):
     span_id: str
     trace_id: str
-    parent_span_id: Optional[str] = None
+    parent_span_id: str | None = None
     name: str
     start_time: datetime
-    end_time: Optional[datetime] = None
-    attributes: Optional[Dict[str, Any]] = Field(default_factory=dict)
+    end_time: datetime | None = None
+    attributes: dict[str, Any] | None = Field(default_factory=lambda: {})
 
     def set_attribute(self, key: str, value: Any):
         if self.attributes is None:
@@ -54,7 +50,7 @@ class Trace(BaseModel):
     trace_id: str
     root_span_id: str
     start_time: datetime
-    end_time: Optional[datetime] = None
+    end_time: datetime | None = None
 
 
 @json_schema_type
@@ -78,29 +74,29 @@ class EventCommon(BaseModel):
     trace_id: str
     span_id: str
     timestamp: datetime
-    attributes: Optional[Dict[str, Primitive]] = Field(default_factory=dict)
+    attributes: dict[str, Primitive] | None = Field(default_factory=lambda: {})
 
 
 @json_schema_type
 class UnstructuredLogEvent(EventCommon):
-    type: Literal[EventType.UNSTRUCTURED_LOG.value] = EventType.UNSTRUCTURED_LOG.value
+    type: Literal[EventType.UNSTRUCTURED_LOG] = EventType.UNSTRUCTURED_LOG
     message: str
     severity: LogSeverity
 
 
 @json_schema_type
 class MetricEvent(EventCommon):
-    type: Literal[EventType.METRIC.value] = EventType.METRIC.value
+    type: Literal[EventType.METRIC] = EventType.METRIC
     metric: str  # this would be an enum
-    value: Union[int, float]
+    value: int | float
     unit: str
 
 
 @json_schema_type
 class MetricInResponse(BaseModel):
     metric: str
-    value: Union[int, float]
-    unit: Optional[str] = None
+    value: int | float
+    unit: str | None = None
 
 
 # This is a short term solution to allow inference API to return metrics
@@ -124,7 +120,7 @@ class MetricInResponse(BaseModel):
 
 
 class MetricResponseMixin(BaseModel):
-    metrics: Optional[List[MetricInResponse]] = None
+    metrics: list[MetricInResponse] | None = None
 
 
 @json_schema_type
@@ -135,22 +131,19 @@ class StructuredLogType(Enum):
 
 @json_schema_type
 class SpanStartPayload(BaseModel):
-    type: Literal[StructuredLogType.SPAN_START.value] = StructuredLogType.SPAN_START.value
+    type: Literal[StructuredLogType.SPAN_START] = StructuredLogType.SPAN_START
     name: str
-    parent_span_id: Optional[str] = None
+    parent_span_id: str | None = None
 
 
 @json_schema_type
 class SpanEndPayload(BaseModel):
-    type: Literal[StructuredLogType.SPAN_END.value] = StructuredLogType.SPAN_END.value
+    type: Literal[StructuredLogType.SPAN_END] = StructuredLogType.SPAN_END
     status: SpanStatus
 
 
 StructuredLogPayload = Annotated[
-    Union[
-        SpanStartPayload,
-        SpanEndPayload,
-    ],
+    SpanStartPayload | SpanEndPayload,
     Field(discriminator="type"),
 ]
 register_schema(StructuredLogPayload, name="StructuredLogPayload")
@@ -158,16 +151,12 @@ register_schema(StructuredLogPayload, name="StructuredLogPayload")
 
 @json_schema_type
 class StructuredLogEvent(EventCommon):
-    type: Literal[EventType.STRUCTURED_LOG.value] = EventType.STRUCTURED_LOG.value
+    type: Literal[EventType.STRUCTURED_LOG] = EventType.STRUCTURED_LOG
     payload: StructuredLogPayload
 
 
 Event = Annotated[
-    Union[
-        UnstructuredLogEvent,
-        MetricEvent,
-        StructuredLogEvent,
-    ],
+    UnstructuredLogEvent | MetricEvent | StructuredLogEvent,
     Field(discriminator="type"),
 ]
 register_schema(Event, name="Event")
@@ -184,7 +173,7 @@ class EvalTrace(BaseModel):
 
 @json_schema_type
 class SpanWithStatus(Span):
-    status: Optional[SpanStatus] = None
+    status: SpanStatus | None = None
 
 
 @json_schema_type
@@ -203,58 +192,177 @@ class QueryCondition(BaseModel):
 
 
 class QueryTracesResponse(BaseModel):
-    data: List[Trace]
+    data: list[Trace]
 
 
 class QuerySpansResponse(BaseModel):
-    data: List[Span]
+    data: list[Span]
 
 
 class QuerySpanTreeResponse(BaseModel):
-    data: Dict[str, SpanWithStatus]
+    data: dict[str, SpanWithStatus]
+
+
+class MetricQueryType(Enum):
+    RANGE = "range"
+    INSTANT = "instant"
+
+
+class MetricLabelOperator(Enum):
+    EQUALS = "="
+    NOT_EQUALS = "!="
+    REGEX_MATCH = "=~"
+    REGEX_NOT_MATCH = "!~"
+
+
+class MetricLabelMatcher(BaseModel):
+    name: str
+    value: str
+    operator: MetricLabelOperator = MetricLabelOperator.EQUALS
+
+
+@json_schema_type
+class MetricLabel(BaseModel):
+    name: str
+    value: str
+
+
+@json_schema_type
+class MetricDataPoint(BaseModel):
+    timestamp: int
+    value: float
+
+
+@json_schema_type
+class MetricSeries(BaseModel):
+    metric: str
+    labels: list[MetricLabel]
+    values: list[MetricDataPoint]
+
+
+class QueryMetricsResponse(BaseModel):
+    data: list[MetricSeries]
 
 
 @runtime_checkable
 class Telemetry(Protocol):
     @webmethod(route="/telemetry/events", method="POST")
-    async def log_event(self, event: Event, ttl_seconds: int = DEFAULT_TTL_DAYS * 86400) -> None: ...
+    async def log_event(
+        self,
+        event: Event,
+        ttl_seconds: int = DEFAULT_TTL_DAYS * 86400,
+    ) -> None:
+        """Log an event.
+
+        :param event: The event to log.
+        :param ttl_seconds: The time to live of the event.
+        """
+        ...
 
     @webmethod(route="/telemetry/traces", method="POST")
     async def query_traces(
         self,
-        attribute_filters: Optional[List[QueryCondition]] = None,
-        limit: Optional[int] = 100,
-        offset: Optional[int] = 0,
-        order_by: Optional[List[str]] = None,
-    ) -> QueryTracesResponse: ...
+        attribute_filters: list[QueryCondition] | None = None,
+        limit: int | None = 100,
+        offset: int | None = 0,
+        order_by: list[str] | None = None,
+    ) -> QueryTracesResponse:
+        """Query traces.
+
+        :param attribute_filters: The attribute filters to apply to the traces.
+        :param limit: The limit of traces to return.
+        :param offset: The offset of the traces to return.
+        :param order_by: The order by of the traces to return.
+        :returns: A QueryTracesResponse.
+        """
+        ...
 
     @webmethod(route="/telemetry/traces/{trace_id:path}", method="GET")
-    async def get_trace(self, trace_id: str) -> Trace: ...
+    async def get_trace(self, trace_id: str) -> Trace:
+        """Get a trace by its ID.
+
+        :param trace_id: The ID of the trace to get.
+        :returns: A Trace.
+        """
+        ...
 
     @webmethod(route="/telemetry/traces/{trace_id:path}/spans/{span_id:path}", method="GET")
-    async def get_span(self, trace_id: str, span_id: str) -> Span: ...
+    async def get_span(self, trace_id: str, span_id: str) -> Span:
+        """Get a span by its ID.
+
+        :param trace_id: The ID of the trace to get the span from.
+        :param span_id: The ID of the span to get.
+        :returns: A Span.
+        """
+        ...
 
     @webmethod(route="/telemetry/spans/{span_id:path}/tree", method="POST")
     async def get_span_tree(
         self,
         span_id: str,
-        attributes_to_return: Optional[List[str]] = None,
-        max_depth: Optional[int] = None,
-    ) -> QuerySpanTreeResponse: ...
+        attributes_to_return: list[str] | None = None,
+        max_depth: int | None = None,
+    ) -> QuerySpanTreeResponse:
+        """Get a span tree by its ID.
+
+        :param span_id: The ID of the span to get the tree from.
+        :param attributes_to_return: The attributes to return in the tree.
+        :param max_depth: The maximum depth of the tree.
+        :returns: A QuerySpanTreeResponse.
+        """
+        ...
 
     @webmethod(route="/telemetry/spans", method="POST")
     async def query_spans(
         self,
-        attribute_filters: List[QueryCondition],
-        attributes_to_return: List[str],
-        max_depth: Optional[int] = None,
-    ) -> QuerySpansResponse: ...
+        attribute_filters: list[QueryCondition],
+        attributes_to_return: list[str],
+        max_depth: int | None = None,
+    ) -> QuerySpansResponse:
+        """Query spans.
+
+        :param attribute_filters: The attribute filters to apply to the spans.
+        :param attributes_to_return: The attributes to return in the spans.
+        :param max_depth: The maximum depth of the tree.
+        :returns: A QuerySpansResponse.
+        """
+        ...
 
     @webmethod(route="/telemetry/spans/export", method="POST")
     async def save_spans_to_dataset(
         self,
-        attribute_filters: List[QueryCondition],
-        attributes_to_save: List[str],
+        attribute_filters: list[QueryCondition],
+        attributes_to_save: list[str],
         dataset_id: str,
-        max_depth: Optional[int] = None,
-    ) -> None: ...
+        max_depth: int | None = None,
+    ) -> None:
+        """Save spans to a dataset.
+
+        :param attribute_filters: The attribute filters to apply to the spans.
+        :param attributes_to_save: The attributes to save to the dataset.
+        :param dataset_id: The ID of the dataset to save the spans to.
+        :param max_depth: The maximum depth of the tree.
+        """
+        ...
+
+    @webmethod(route="/telemetry/metrics/{metric_name}", method="POST")
+    async def query_metrics(
+        self,
+        metric_name: str,
+        start_time: int,
+        end_time: int | None = None,
+        granularity: str | None = "1d",
+        query_type: MetricQueryType = MetricQueryType.RANGE,
+        label_matchers: list[MetricLabelMatcher] | None = None,
+    ) -> QueryMetricsResponse:
+        """Query metrics.
+
+        :param metric_name: The name of the metric to query.
+        :param start_time: The start time of the metric to query.
+        :param end_time: The end time of the metric to query.
+        :param granularity: The granularity of the metric to query.
+        :param query_type: The type of query to perform.
+        :param label_matchers: The label matchers to apply to the metric.
+        :returns: A QueryMetricsResponse.
+        """
+        ...
diff --git a/llama_stack/apis/tools/rag_tool.py b/llama_stack/apis/tools/rag_tool.py
index 73b36e050..1e3542f74 100644
--- a/llama_stack/apis/tools/rag_tool.py
+++ b/llama_stack/apis/tools/rag_tool.py
@@ -5,10 +5,10 @@
 # the root directory of this source tree.
 
 from enum import Enum
-from typing import Any, Dict, List, Literal, Optional, Union
+from typing import Annotated, Any, Literal
 
-from pydantic import BaseModel, Field
-from typing_extensions import Annotated, Protocol, runtime_checkable
+from pydantic import BaseModel, Field, field_validator
+from typing_extensions import Protocol, runtime_checkable
 
 from llama_stack.apis.common.content_types import URL, InterleavedContent
 from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
@@ -29,13 +29,13 @@ class RAGDocument(BaseModel):
     document_id: str
     content: InterleavedContent | URL
     mime_type: str | None = None
-    metadata: Dict[str, Any] = Field(default_factory=dict)
+    metadata: dict[str, Any] = Field(default_factory=dict)
 
 
 @json_schema_type
 class RAGQueryResult(BaseModel):
-    content: Optional[InterleavedContent] = None
-    metadata: Dict[str, Any] = Field(default_factory=dict)
+    content: InterleavedContent | None = None
+    metadata: dict[str, Any] = Field(default_factory=dict)
 
 
 @json_schema_type
@@ -59,10 +59,7 @@ class LLMRAGQueryGeneratorConfig(BaseModel):
 
 
 RAGQueryGeneratorConfig = Annotated[
-    Union[
-        DefaultRAGQueryGeneratorConfig,
-        LLMRAGQueryGeneratorConfig,
-    ],
+    DefaultRAGQueryGeneratorConfig | LLMRAGQueryGeneratorConfig,
     Field(discriminator="type"),
 ]
 register_schema(RAGQueryGeneratorConfig, name="RAGQueryGeneratorConfig")
@@ -70,11 +67,35 @@ register_schema(RAGQueryGeneratorConfig, name="RAGQueryGeneratorConfig")
 
 @json_schema_type
 class RAGQueryConfig(BaseModel):
+    """
+    Configuration for the RAG query generation.
+
+    :param query_generator_config: Configuration for the query generator.
+    :param max_tokens_in_context: Maximum number of tokens in the context.
+    :param max_chunks: Maximum number of chunks to retrieve.
+    :param chunk_template: Template for formatting each retrieved chunk in the context.
+        Available placeholders: {index} (1-based chunk ordinal), {chunk.content} (chunk content string), {metadata} (chunk metadata dict).
+        Default: "Result {index}\\nContent: {chunk.content}\\nMetadata: {metadata}\\n"
+    :param mode: Search mode for retrieval—either "vector" or "keyword". Default "vector".
+    """
+
     # This config defines how a query is generated using the messages
     # for memory bank retrieval.
     query_generator_config: RAGQueryGeneratorConfig = Field(default=DefaultRAGQueryGeneratorConfig())
     max_tokens_in_context: int = 4096
     max_chunks: int = 5
+    chunk_template: str = "Result {index}\nContent: {chunk.content}\nMetadata: {metadata}\n"
+    mode: str | None = None
+
+    @field_validator("chunk_template")
+    def validate_chunk_template(cls, v: str) -> str:
+        if "{chunk.content}" not in v:
+            raise ValueError("chunk_template must contain {chunk.content}")
+        if "{index}" not in v:
+            raise ValueError("chunk_template must contain {index}")
+        if len(v) == 0:
+            raise ValueError("chunk_template must not be empty")
+        return v
 
 
 @runtime_checkable
@@ -83,7 +104,7 @@ class RAGToolRuntime(Protocol):
     @webmethod(route="/tool-runtime/rag-tool/insert", method="POST")
     async def insert(
         self,
-        documents: List[RAGDocument],
+        documents: list[RAGDocument],
         vector_db_id: str,
         chunk_size_in_tokens: int = 512,
     ) -> None:
@@ -94,8 +115,8 @@ class RAGToolRuntime(Protocol):
     async def query(
         self,
         content: InterleavedContent,
-        vector_db_ids: List[str],
-        query_config: Optional[RAGQueryConfig] = None,
+        vector_db_ids: list[str],
+        query_config: RAGQueryConfig | None = None,
     ) -> RAGQueryResult:
         """Query the RAG system for context; typically invoked by the agent"""
         ...
diff --git a/llama_stack/apis/tools/tools.py b/llama_stack/apis/tools/tools.py
index 4ca72f71d..0c8d47edf 100644
--- a/llama_stack/apis/tools/tools.py
+++ b/llama_stack/apis/tools/tools.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 from enum import Enum
-from typing import Any, Dict, List, Literal, Optional
+from typing import Any, Literal
 
 from pydantic import BaseModel, Field
 from typing_extensions import Protocol, runtime_checkable
@@ -24,68 +24,60 @@ class ToolParameter(BaseModel):
     parameter_type: str
     description: str
     required: bool = Field(default=True)
-    default: Optional[Any] = None
-
-
-@json_schema_type
-class ToolHost(Enum):
-    distribution = "distribution"
-    client = "client"
-    model_context_protocol = "model_context_protocol"
+    default: Any | None = None
 
 
 @json_schema_type
 class Tool(Resource):
-    type: Literal[ResourceType.tool.value] = ResourceType.tool.value
+    type: Literal[ResourceType.tool] = ResourceType.tool
     toolgroup_id: str
-    tool_host: ToolHost
     description: str
-    parameters: List[ToolParameter]
-    metadata: Optional[Dict[str, Any]] = None
+    parameters: list[ToolParameter]
+    metadata: dict[str, Any] | None = None
 
 
 @json_schema_type
 class ToolDef(BaseModel):
     name: str
-    description: Optional[str] = None
-    parameters: Optional[List[ToolParameter]] = None
-    metadata: Optional[Dict[str, Any]] = None
+    description: str | None = None
+    parameters: list[ToolParameter] | None = None
+    metadata: dict[str, Any] | None = None
 
 
 @json_schema_type
 class ToolGroupInput(BaseModel):
     toolgroup_id: str
     provider_id: str
-    args: Optional[Dict[str, Any]] = None
-    mcp_endpoint: Optional[URL] = None
+    args: dict[str, Any] | None = None
+    mcp_endpoint: URL | None = None
 
 
 @json_schema_type
 class ToolGroup(Resource):
-    type: Literal[ResourceType.tool_group.value] = ResourceType.tool_group.value
-    mcp_endpoint: Optional[URL] = None
-    args: Optional[Dict[str, Any]] = None
+    type: Literal[ResourceType.tool_group] = ResourceType.tool_group
+    mcp_endpoint: URL | None = None
+    args: dict[str, Any] | None = None
 
 
 @json_schema_type
 class ToolInvocationResult(BaseModel):
-    content: Optional[InterleavedContent] = None
-    error_message: Optional[str] = None
-    error_code: Optional[int] = None
-    metadata: Optional[Dict[str, Any]] = None
+    content: InterleavedContent | None = None
+    error_message: str | None = None
+    error_code: int | None = None
+    metadata: dict[str, Any] | None = None
 
 
 class ToolStore(Protocol):
-    def get_tool(self, tool_name: str) -> Tool: ...
-    def get_tool_group(self, toolgroup_id: str) -> ToolGroup: ...
+    async def get_tool(self, tool_name: str) -> Tool: ...
+    async def get_tool_group(self, toolgroup_id: str) -> ToolGroup: ...
 
 
 class ListToolGroupsResponse(BaseModel):
-    data: List[ToolGroup]
+    data: list[ToolGroup]
 
 
 class ListToolsResponse(BaseModel):
-    data: List[Tool]
+    data: list[Tool]
 
 
 class ListToolDefsResponse(BaseModel):
@@ -100,40 +92,68 @@ class ToolGroups(Protocol):
         self,
         toolgroup_id: str,
         provider_id: str,
-        mcp_endpoint: Optional[URL] = None,
-        args: Optional[Dict[str, Any]] = None,
+        mcp_endpoint: URL | None = None,
+        args: dict[str, Any] | None = None,
     ) -> None:
-        """Register a tool group"""
+        """Register a tool group.
+
+        :param toolgroup_id: The ID of the tool group to register.
+        :param provider_id: The ID of the provider to use for the tool group.
+        :param mcp_endpoint: The MCP endpoint to use for the tool group.
+        :param args: A dictionary of arguments to pass to the tool group.
+        """
         ...
 
     @webmethod(route="/toolgroups/{toolgroup_id:path}", method="GET")
     async def get_tool_group(
         self,
         toolgroup_id: str,
-    ) -> ToolGroup: ...
+    ) -> ToolGroup:
+        """Get a tool group by its ID.
+
+        :param toolgroup_id: The ID of the tool group to get.
+        :returns: A ToolGroup.
+        """
+        ...
 
     @webmethod(route="/toolgroups", method="GET")
     async def list_tool_groups(self) -> ListToolGroupsResponse:
-        """List tool groups with optional provider"""
+        """List tool groups with optional provider.
+
+        :returns: A ListToolGroupsResponse.
+        """
         ...
 
     @webmethod(route="/tools", method="GET")
-    async def list_tools(self, toolgroup_id: Optional[str] = None) -> ListToolsResponse:
-        """List tools with optional tool group"""
+    async def list_tools(self, toolgroup_id: str | None = None) -> ListToolsResponse:
+        """List tools with optional tool group.
+
+        :param toolgroup_id: The ID of the tool group to list tools for.
+        :returns: A ListToolsResponse.
+        """
         ...
 
     @webmethod(route="/tools/{tool_name:path}", method="GET")
     async def get_tool(
         self,
         tool_name: str,
-    ) -> Tool: ...
+    ) -> Tool:
+        """Get a tool by its name.
+
+        :param tool_name: The name of the tool to get.
+        :returns: A Tool.
+        """
+        ...
 
     @webmethod(route="/toolgroups/{toolgroup_id:path}", method="DELETE")
     async def unregister_toolgroup(
         self,
         toolgroup_id: str,
     ) -> None:
-        """Unregister a tool group"""
+        """Unregister a tool group.
+
+        :param toolgroup_id: The ID of the tool group to unregister.
+        """
         ...
 
 
@@ -151,10 +171,22 @@ class ToolRuntime(Protocol):
     # TODO: This needs to be renamed once OPEN API generator name conflict issue is fixed.
     @webmethod(route="/tool-runtime/list-tools", method="GET")
     async def list_runtime_tools(
-        self, tool_group_id: Optional[str] = None, mcp_endpoint: Optional[URL] = None
-    ) -> ListToolDefsResponse: ...
+        self, tool_group_id: str | None = None, mcp_endpoint: URL | None = None
+    ) -> ListToolDefsResponse:
+        """List all tools in the runtime.
+
+        :param tool_group_id: The ID of the tool group to list tools for.
+        :param mcp_endpoint: The MCP endpoint to use for the tool group.
+        :returns: A ListToolDefsResponse.
+        """
+        ...
 
     @webmethod(route="/tool-runtime/invoke", method="POST")
-    async def invoke_tool(self, tool_name: str, kwargs: Dict[str, Any]) -> ToolInvocationResult:
-        """Run a tool with the given arguments"""
+    async def invoke_tool(self, tool_name: str, kwargs: dict[str, Any]) -> ToolInvocationResult:
+        """Run a tool with the given arguments.
+
+        :param tool_name: The name of the tool to invoke.
+        :param kwargs: A dictionary of arguments to pass to the tool.
+        :returns: A ToolInvocationResult.
+        """
         ...
diff --git a/llama_stack/apis/vector_dbs/vector_dbs.py b/llama_stack/apis/vector_dbs/vector_dbs.py
index fe6c33919..405852476 100644
--- a/llama_stack/apis/vector_dbs/vector_dbs.py
+++ b/llama_stack/apis/vector_dbs/vector_dbs.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List, Literal, Optional, Protocol, runtime_checkable
+from typing import Literal, Protocol, runtime_checkable
 
 from pydantic import BaseModel
 
@@ -15,7 +15,7 @@ from llama_stack.schema_utils import json_schema_type, webmethod
 
 @json_schema_type
 class VectorDB(Resource):
-    type: Literal[ResourceType.vector_db.value] = ResourceType.vector_db.value
+    type: Literal[ResourceType.vector_db] = ResourceType.vector_db
 
     embedding_model: str
     embedding_dimension: int
@@ -25,7 +25,7 @@ class VectorDB(Resource):
         return self.identifier
 
     @property
-    def provider_vector_db_id(self) -> str:
+    def provider_vector_db_id(self) -> str | None:
         return self.provider_resource_id
 
 
@@ -33,34 +33,60 @@ class VectorDBInput(BaseModel):
     vector_db_id: str
     embedding_model: str
     embedding_dimension: int
-    provider_vector_db_id: Optional[str] = None
+    provider_vector_db_id: str | None = None
 
 
 class ListVectorDBsResponse(BaseModel):
-    data: List[VectorDB]
+    data: list[VectorDB]
 
 
 @runtime_checkable
 @trace_protocol
 class VectorDBs(Protocol):
     @webmethod(route="/vector-dbs", method="GET")
-    async def list_vector_dbs(self) -> ListVectorDBsResponse: ...
+    async def list_vector_dbs(self) -> ListVectorDBsResponse:
+        """List all vector databases.
+
+        :returns: A ListVectorDBsResponse.
+        """
+        ...
 
     @webmethod(route="/vector-dbs/{vector_db_id:path}", method="GET")
     async def get_vector_db(
         self,
         vector_db_id: str,
-    ) -> VectorDB: ...
+    ) -> VectorDB:
+        """Get a vector database by its identifier.
+
+        :param vector_db_id: The identifier of the vector database to get.
+        :returns: A VectorDB.
+        """
+        ...
 
     @webmethod(route="/vector-dbs", method="POST")
     async def register_vector_db(
         self,
         vector_db_id: str,
         embedding_model: str,
-        embedding_dimension: Optional[int] = 384,
-        provider_id: Optional[str] = None,
-        provider_vector_db_id: Optional[str] = None,
-    ) -> VectorDB: ...
+        embedding_dimension: int | None = 384,
+        provider_id: str | None = None,
+        provider_vector_db_id: str | None = None,
+    ) -> VectorDB:
+        """Register a vector database.
+
+        :param vector_db_id: The identifier of the vector database to register.
+        :param embedding_model: The embedding model to use.
+        :param embedding_dimension: The dimension of the embedding model.
+        :param provider_id: The identifier of the provider.
+        :param provider_vector_db_id: The identifier of the vector database in the provider.
+        :returns: A VectorDB.
+        """
+        ...
 
     @webmethod(route="/vector-dbs/{vector_db_id:path}", method="DELETE")
-    async def unregister_vector_db(self, vector_db_id: str) -> None: ...
+    async def unregister_vector_db(self, vector_db_id: str) -> None:
+        """Unregister a vector database.
+
+        :param vector_db_id: The identifier of the vector database to unregister.
+        """
+        ...
diff --git a/llama_stack/apis/vector_io/vector_io.py b/llama_stack/apis/vector_io/vector_io.py
index ab0a4a20a..44cc8f904 100644
--- a/llama_stack/apis/vector_io/vector_io.py
+++ b/llama_stack/apis/vector_io/vector_io.py
@@ -8,7 +8,7 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict, List, Optional, Protocol, runtime_checkable
+from typing import Any, Protocol, runtime_checkable
 
 from pydantic import BaseModel, Field
 
@@ -19,18 +19,26 @@ from llama_stack.schema_utils import json_schema_type, webmethod
 
 
 class Chunk(BaseModel):
+    """
+    A chunk of content that can be inserted into a vector database.
+    :param content: The content of the chunk, which can be interleaved text, images, or other types.
+    :param embedding: Optional embedding for the chunk. If not provided, it will be computed later.
+    :param metadata: Metadata associated with the chunk, such as document ID, source, or other relevant information.
+    """
+
     content: InterleavedContent
-    metadata: Dict[str, Any] = Field(default_factory=dict)
+    metadata: dict[str, Any] = Field(default_factory=dict)
+    embedding: list[float] | None = None
 
 
 @json_schema_type
 class QueryChunksResponse(BaseModel):
-    chunks: List[Chunk]
-    scores: List[float]
+    chunks: list[Chunk]
+    scores: list[float]
 
 
 class VectorDBStore(Protocol):
-    def get_vector_db(self, vector_db_id: str) -> Optional[VectorDB]: ...
+    def get_vector_db(self, vector_db_id: str) -> VectorDB | None: ...
 
 
 @runtime_checkable
@@ -44,14 +52,32 @@ class VectorIO(Protocol):
     async def insert_chunks(
         self,
         vector_db_id: str,
-        chunks: List[Chunk],
-        ttl_seconds: Optional[int] = None,
-    ) -> None: ...
+        chunks: list[Chunk],
+        ttl_seconds: int | None = None,
+    ) -> None:
+        """Insert chunks into a vector database.
+
+        :param vector_db_id: The identifier of the vector database to insert the chunks into.
+        :param chunks: The chunks to insert. Each `Chunk` should contain content which can be interleaved text, images, or other types.
+            `metadata`: `dict[str, Any]` and `embedding`: `List[float]` are optional.
+            If `metadata` is provided, you configure how Llama Stack formats the chunk during generation.
+            If `embedding` is not provided, it will be computed later.
+        :param ttl_seconds: The time to live of the chunks.
+        """
+        ...
 
     @webmethod(route="/vector-io/query", method="POST")
     async def query_chunks(
         self,
         vector_db_id: str,
         query: InterleavedContent,
-        params: Optional[Dict[str, Any]] = None,
-    ) -> QueryChunksResponse: ...
+        params: dict[str, Any] | None = None,
+    ) -> QueryChunksResponse:
+        """Query chunks from a vector database.
+
+        :param vector_db_id: The identifier of the vector database to query.
+        :param query: The query to search for.
+        :param params: The parameters of the query.
+        :returns: A QueryChunksResponse.
+        """
+        ...
diff --git a/llama_stack/cli/download.py b/llama_stack/cli/download.py
index 9694bf22d..b96842119 100644
--- a/llama_stack/cli/download.py
+++ b/llama_stack/cli/download.py
@@ -9,11 +9,11 @@ import asyncio
 import json
 import os
 import shutil
+import sys
 from dataclasses import dataclass
 from datetime import datetime, timezone
 from functools import partial
 from pathlib import Path
-from typing import Dict, List, Optional
 
 import httpx
 from pydantic import BaseModel, ConfigDict
@@ -102,7 +102,7 @@ class DownloadTask:
     output_file: str
     total_size: int = 0
     downloaded_size: int = 0
-    task_id: Optional[int] = None
+    task_id: int | None = None
     retries: int = 0
     max_retries: int = 3
 
@@ -262,7 +262,7 @@ class ParallelDownloader:
             self.progress.update(task.task_id, description=f"[red]Failed: {task.output_file}[/red]")
             raise DownloadError(f"Download failed for {task.output_file}: {str(e)}") from e
 
-    def has_disk_space(self, tasks: List[DownloadTask]) -> bool:
+    def has_disk_space(self, tasks: list[DownloadTask]) -> bool:
         try:
             total_remaining_size = sum(task.total_size - task.downloaded_size for task in tasks)
             dir_path = os.path.dirname(os.path.abspath(tasks[0].output_file))
@@ -282,7 +282,7 @@ class ParallelDownloader:
         except Exception as e:
             raise DownloadError(f"Failed to check disk space: {str(e)}") from e
 
-    async def download_all(self, tasks: List[DownloadTask]) -> None:
+    async def download_all(self, tasks: list[DownloadTask]) -> None:
         if not tasks:
             raise ValueError("No download tasks provided")
 
@@ -378,33 +378,34 @@ def _meta_download(
     downloader = ParallelDownloader(max_concurrent_downloads=max_concurrent_downloads)
     asyncio.run(downloader.download_all(tasks))
 
-    cprint(f"\nSuccessfully downloaded model to {output_dir}", "green")
+    cprint(f"\nSuccessfully downloaded model to {output_dir}", color="green", file=sys.stderr)
     cprint(
         f"\nView MD5 checksum files at: {output_dir / 'checklist.chk'}",
-        "white",
+        file=sys.stderr,
     )
     cprint(
         f"\n[Optionally] To run MD5 checksums, use the following command: llama model verify-download --model-id {model_id}",
-        "yellow",
+        color="yellow",
+        file=sys.stderr,
     )
 
 
 class ModelEntry(BaseModel):
     model_id: str
-    files: Dict[str, str]
+    files: dict[str, str]
 
     model_config = ConfigDict(protected_namespaces=())
 
 
 class Manifest(BaseModel):
-    models: List[ModelEntry]
+    models: list[ModelEntry]
     expires_on: datetime
 
 
 def _download_from_manifest(manifest_file: str, max_concurrent_downloads: int):
     from llama_stack.distribution.utils.model_utils import model_local_dir
 
-    with open(manifest_file, "r") as f:
+    with open(manifest_file) as f:
         d = json.load(f)
         manifest = Manifest(**d)
 
@@ -460,15 +461,17 @@ def run_download_cmd(args: argparse.Namespace, parser: argparse.ArgumentParser):
         from llama_stack.models.llama.sku_list import llama_meta_net_info, resolve_model
 
         from .model.safety_models import (
-            prompt_guard_download_info,
-            prompt_guard_model_sku,
+            prompt_guard_download_info_map,
+            prompt_guard_model_sku_map,
         )
 
-        prompt_guard = prompt_guard_model_sku()
+        prompt_guard_model_sku_map = prompt_guard_model_sku_map()
+        prompt_guard_download_info_map = prompt_guard_download_info_map()
+
         for model_id in model_ids:
-            if model_id == prompt_guard.model_id:
-                model = prompt_guard
-                info = prompt_guard_download_info()
+            if model_id in prompt_guard_model_sku_map.keys():
+                model = prompt_guard_model_sku_map[model_id]
+                info = prompt_guard_download_info_map[model_id]
             else:
                 model = resolve_model(model_id)
                 if model is None:
diff --git a/llama_stack/cli/llama.py b/llama_stack/cli/llama.py
index 8ff580029..433b311e7 100644
--- a/llama_stack/cli/llama.py
+++ b/llama_stack/cli/llama.py
@@ -38,7 +38,10 @@ class LlamaCLIParser:
         print_subcommand_description(self.parser, subparsers)
 
     def parse_args(self) -> argparse.Namespace:
-        return self.parser.parse_args()
+        args = self.parser.parse_args()
+        if not isinstance(args, argparse.Namespace):
+            raise TypeError(f"Expected argparse.Namespace, got {type(args)}")
+        return args
 
     def run(self, args: argparse.Namespace) -> None:
         args.func(args)
diff --git a/llama_stack/cli/model/describe.py b/llama_stack/cli/model/describe.py
index 62dde36e8..26b0da686 100644
--- a/llama_stack/cli/model/describe.py
+++ b/llama_stack/cli/model/describe.py
@@ -36,11 +36,11 @@ class ModelDescribe(Subcommand):
         )
 
     def _run_model_describe_cmd(self, args: argparse.Namespace) -> None:
-        from .safety_models import prompt_guard_model_sku
+        from .safety_models import prompt_guard_model_sku_map
 
-        prompt_guard = prompt_guard_model_sku()
-        if args.model_id == prompt_guard.model_id:
-            model = prompt_guard
+        prompt_guard_model_map = prompt_guard_model_sku_map()
+        if args.model_id in prompt_guard_model_map.keys():
+            model = prompt_guard_model_map[args.model_id]
         else:
             model = resolve_model(args.model_id)
 
diff --git a/llama_stack/cli/model/list.py b/llama_stack/cli/model/list.py
index b9499f06d..cf84dd526 100644
--- a/llama_stack/cli/model/list.py
+++ b/llama_stack/cli/model/list.py
@@ -84,7 +84,7 @@ class ModelList(Subcommand):
         )
 
     def _run_model_list_cmd(self, args: argparse.Namespace) -> None:
-        from .safety_models import prompt_guard_model_sku
+        from .safety_models import prompt_guard_model_skus
 
         if args.downloaded:
             return _run_model_list_downloaded_cmd()
@@ -96,7 +96,7 @@ class ModelList(Subcommand):
         ]
 
         rows = []
-        for model in all_registered_models() + [prompt_guard_model_sku()]:
+        for model in all_registered_models() + prompt_guard_model_skus():
             if not args.show_all and not model.is_featured:
                 continue
 
diff --git a/llama_stack/cli/model/remove.py b/llama_stack/cli/model/remove.py
index ee8d6299d..98710d82b 100644
--- a/llama_stack/cli/model/remove.py
+++ b/llama_stack/cli/model/remove.py
@@ -42,11 +42,12 @@ class ModelRemove(Subcommand):
         )
 
     def _run_model_remove_cmd(self, args: argparse.Namespace) -> None:
-        from .safety_models import prompt_guard_model_sku
+        from .safety_models import prompt_guard_model_sku_map
 
-        prompt_guard = prompt_guard_model_sku()
-        if args.model == prompt_guard.model_id:
-            model = prompt_guard
+        prompt_guard_model_map = prompt_guard_model_sku_map()
+
+        if args.model in prompt_guard_model_map.keys():
+            model = prompt_guard_model_map[args.model]
         else:
             model = resolve_model(args.model)
 
diff --git a/llama_stack/cli/model/safety_models.py b/llama_stack/cli/model/safety_models.py
index 131d055aa..e31767f13 100644
--- a/llama_stack/cli/model/safety_models.py
+++ b/llama_stack/cli/model/safety_models.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel, ConfigDict, Field
 
@@ -15,14 +15,14 @@ from llama_stack.models.llama.sku_types import CheckpointQuantizationFormat
 class PromptGuardModel(BaseModel):
     """Make a 'fake' Model-like object for Prompt Guard. Eventually this will be removed."""
 
-    model_id: str = "Prompt-Guard-86M"
+    model_id: str
+    huggingface_repo: str
     description: str = "Prompt Guard. NOTE: this model will not be provided via `llama` CLI soon."
     is_featured: bool = False
-    huggingface_repo: str = "meta-llama/Prompt-Guard-86M"
-    max_seq_length: int = 2048
+    max_seq_length: int = 512
     is_instruct_model: bool = False
     quantization_format: CheckpointQuantizationFormat = CheckpointQuantizationFormat.bf16
-    arch_args: Dict[str, Any] = Field(default_factory=dict)
+    arch_args: dict[str, Any] = Field(default_factory=dict)
 
     def descriptor(self) -> str:
         return self.model_id
@@ -30,18 +30,35 @@ class PromptGuardModel(BaseModel):
     model_config = ConfigDict(protected_namespaces=())
 
 
-def prompt_guard_model_sku():
-    return PromptGuardModel()
+def prompt_guard_model_skus():
+    return [
+        PromptGuardModel(model_id="Prompt-Guard-86M", huggingface_repo="meta-llama/Prompt-Guard-86M"),
+        PromptGuardModel(
+            model_id="Llama-Prompt-Guard-2-86M",
+            huggingface_repo="meta-llama/Llama-Prompt-Guard-2-86M",
+        ),
+        PromptGuardModel(
+            model_id="Llama-Prompt-Guard-2-22M",
+            huggingface_repo="meta-llama/Llama-Prompt-Guard-2-22M",
+        ),
+    ]
 
 
-def prompt_guard_download_info():
-    return LlamaDownloadInfo(
-        folder="Prompt-Guard",
-        files=[
-            "model.safetensors",
-            "special_tokens_map.json",
-            "tokenizer.json",
-            "tokenizer_config.json",
-        ],
-        pth_size=1,
-    )
+def prompt_guard_model_sku_map() -> dict[str, Any]:
+    return {model.model_id: model for model in prompt_guard_model_skus()}
+
+
+def prompt_guard_download_info_map() -> dict[str, LlamaDownloadInfo]:
+    return {
+        model.model_id: LlamaDownloadInfo(
+            folder="Prompt-Guard" if model.model_id == "Prompt-Guard-86M" else model.model_id,
+            files=[
+                "model.safetensors",
+                "special_tokens_map.json",
+                "tokenizer.json",
+                "tokenizer_config.json",
+            ],
+            pth_size=1,
+        )
+        for model in prompt_guard_model_skus()
+    }
diff --git a/llama_stack/cli/stack/_build.py b/llama_stack/cli/stack/_build.py
index 2787a93d5..f6f72946a 100644
--- a/llama_stack/cli/stack/_build.py
+++ b/llama_stack/cli/stack/_build.py
@@ -12,14 +12,14 @@ import shutil
 import sys
 import textwrap
 from functools import lru_cache
+from importlib.abc import Traversable
 from pathlib import Path
-from typing import Dict, Optional
 
 import yaml
 from prompt_toolkit import prompt
 from prompt_toolkit.completion import WordCompleter
 from prompt_toolkit.validation import Validator
-from termcolor import cprint
+from termcolor import colored, cprint
 
 from llama_stack.cli.stack.utils import ImageType
 from llama_stack.cli.table import print_table
@@ -37,7 +37,8 @@ from llama_stack.distribution.datatypes import (
 )
 from llama_stack.distribution.distribution import get_provider_registry
 from llama_stack.distribution.resolver import InvalidProviderError
-from llama_stack.distribution.utils.config_dirs import DISTRIBS_BASE_DIR
+from llama_stack.distribution.stack import replace_env_vars
+from llama_stack.distribution.utils.config_dirs import DISTRIBS_BASE_DIR, EXTERNAL_PROVIDERS_DIR
 from llama_stack.distribution.utils.dynamic import instantiate_class_type
 from llama_stack.distribution.utils.exec import formulate_run_args, run_command
 from llama_stack.distribution.utils.image_types import LlamaStackImageType
@@ -46,14 +47,14 @@ from llama_stack.providers.datatypes import Api
 TEMPLATES_PATH = Path(__file__).parent.parent.parent / "templates"
 
 
-@lru_cache()
-def available_templates_specs() -> Dict[str, BuildConfig]:
+@lru_cache
+def available_templates_specs() -> dict[str, BuildConfig]:
     import yaml
 
     template_specs = {}
     for p in TEMPLATES_PATH.rglob("*build.yaml"):
         template_name = p.parent.name
-        with open(p, "r") as f:
+        with open(p) as f:
             build_config = BuildConfig(**yaml.safe_load(f))
             template_specs[template_name] = build_config
     return template_specs
@@ -78,6 +79,7 @@ def run_stack_build_command(args: argparse.Namespace) -> None:
             cprint(
                 f"Could not find template {args.template}. Please run `llama stack build --list-templates` to check out the available templates",
                 color="red",
+                file=sys.stderr,
             )
             sys.exit(1)
         build_config = available_templates[args.template]
@@ -87,6 +89,7 @@ def run_stack_build_command(args: argparse.Namespace) -> None:
             cprint(
                 f"Please specify a image-type ({' | '.join(e.value for e in ImageType)}) for {args.template}",
                 color="red",
+                file=sys.stderr,
             )
             sys.exit(1)
     elif args.providers:
@@ -96,6 +99,7 @@ def run_stack_build_command(args: argparse.Namespace) -> None:
                 cprint(
                     "Could not parse `--providers`. Please ensure the list is in the format api1=provider1,api2=provider2",
                     color="red",
+                    file=sys.stderr,
                 )
                 sys.exit(1)
             api, provider = api_provider.split("=")
@@ -104,6 +108,7 @@ def run_stack_build_command(args: argparse.Namespace) -> None:
                 cprint(
                     f"{api} is not a valid API.",
                     color="red",
+                    file=sys.stderr,
                 )
                 sys.exit(1)
             if provider in providers_for_api:
@@ -112,6 +117,7 @@ def run_stack_build_command(args: argparse.Namespace) -> None:
                 cprint(
                     f"{provider} is not a valid provider for the {api} API.",
                     color="red",
+                    file=sys.stderr,
                 )
                 sys.exit(1)
         distribution_spec = DistributionSpec(
@@ -122,6 +128,7 @@ def run_stack_build_command(args: argparse.Namespace) -> None:
             cprint(
                 f"Please specify a image-type (container | conda | venv) for {args.template}",
                 color="red",
+                file=sys.stderr,
             )
             sys.exit(1)
 
@@ -150,12 +157,14 @@ def run_stack_build_command(args: argparse.Namespace) -> None:
                 cprint(
                     f"No current conda environment detected or specified, will create a new conda environment with the name `llamastack-{name}`",
                     color="yellow",
+                    file=sys.stderr,
                 )
                 image_name = f"llamastack-{name}"
             else:
                 cprint(
                     f"Using conda environment {image_name}",
                     color="green",
+                    file=sys.stderr,
                 )
         else:
             image_name = f"llamastack-{name}"
@@ -168,9 +177,10 @@ def run_stack_build_command(args: argparse.Namespace) -> None:
             """,
             ),
             color="green",
+            file=sys.stderr,
         )
 
-        print("Tip: use  to see options for the providers.\n")
+        cprint("Tip: use  to see options for the providers.\n", color="green", file=sys.stderr)
 
         providers = dict()
         for api, providers_for_api in get_provider_registry().items():
@@ -178,7 +188,7 @@ def run_stack_build_command(args: argparse.Namespace) -> None:
             if not available_providers:
                 continue
             api_provider = prompt(
-                "> Enter provider for API {}: ".format(api.value),
+                f"> Enter provider for API {api.value}: ",
                 completer=WordCompleter(available_providers),
                 complete_while_typing=True,
                 validator=Validator.from_callable(
@@ -201,13 +211,18 @@ def run_stack_build_command(args: argparse.Namespace) -> None:
 
         build_config = BuildConfig(image_type=image_type, distribution_spec=distribution_spec)
     else:
-        with open(args.config, "r") as f:
+        with open(args.config) as f:
             try:
-                build_config = BuildConfig(**yaml.safe_load(f))
+                contents = yaml.safe_load(f)
+                contents = replace_env_vars(contents)
+                build_config = BuildConfig(**contents)
+                if args.image_type:
+                    build_config.image_type = args.image_type
             except Exception as e:
                 cprint(
                     f"Could not parse config file {args.config}: {e}",
                     color="red",
+                    file=sys.stderr,
                 )
                 sys.exit(1)
 
@@ -234,23 +249,27 @@ def run_stack_build_command(args: argparse.Namespace) -> None:
         cprint(
             f"Error building stack: {exc}",
             color="red",
+            file=sys.stderr,
         )
-        cprint("Stack trace:", color="red")
+        cprint("Stack trace:", color="red", file=sys.stderr)
         traceback.print_exc()
         sys.exit(1)
+
     if run_config is None:
         cprint(
             "Run config path is empty",
             color="red",
+            file=sys.stderr,
         )
         sys.exit(1)
 
     if args.run:
-        run_config = Path(run_config)
         config_dict = yaml.safe_load(run_config.read_text())
         config = parse_and_maybe_upgrade_config(config_dict)
+        if config.external_providers_dir and not config.external_providers_dir.exists():
+            config.external_providers_dir.mkdir(exist_ok=True)
         run_args = formulate_run_args(args.image_type, args.image_name, config, args.template)
-        run_args.extend([run_config, str(os.getenv("LLAMA_STACK_PORT", 8321))])
+        run_args.extend([str(os.getenv("LLAMA_STACK_PORT", 8321)), "--config", run_config])
         run_command(run_args)
 
 
@@ -258,7 +277,7 @@ def _generate_run_config(
     build_config: BuildConfig,
     build_dir: Path,
     image_name: str,
-) -> str:
+) -> Path:
     """
     Generate a run.yaml template file for user to edit from a build.yaml file
     """
@@ -268,7 +287,9 @@ def _generate_run_config(
         image_name=image_name,
         apis=apis,
         providers={},
-        external_providers_dir=build_config.external_providers_dir if build_config.external_providers_dir else None,
+        external_providers_dir=build_config.external_providers_dir
+        if build_config.external_providers_dir
+        else EXTERNAL_PROVIDERS_DIR,
     )
     # build providers dict
     provider_registry = get_provider_registry(build_config)
@@ -296,6 +317,7 @@ def _generate_run_config(
                 cprint(
                     f"Failed to import provider {provider_type} for API {api} - assuming it's external, skipping",
                     color="yellow",
+                    file=sys.stderr,
                 )
                 # Set config_type to None to avoid UnboundLocalError
                 config_type = None
@@ -323,19 +345,16 @@ def _generate_run_config(
     # For non-container builds, the run.yaml is generated at the very end of the build process so it
     # makes sense to display this message
     if build_config.image_type != LlamaStackImageType.CONTAINER.value:
-        cprint(
-            f"You can now run your stack with `llama stack run {run_config_file}`",
-            color="green",
-        )
+        cprint(f"You can now run your stack with `llama stack run {run_config_file}`", color="green", file=sys.stderr)
     return run_config_file
 
 
 def _run_stack_build_command_from_build_config(
     build_config: BuildConfig,
-    image_name: Optional[str] = None,
-    template_name: Optional[str] = None,
-    config_path: Optional[str] = None,
-) -> str:
+    image_name: str | None = None,
+    template_name: str | None = None,
+    config_path: str | None = None,
+) -> Path | Traversable:
     image_name = image_name or build_config.image_name
     if build_config.image_type == LlamaStackImageType.CONTAINER.value:
         if template_name:
@@ -364,7 +383,7 @@ def _run_stack_build_command_from_build_config(
     # Generate the run.yaml so it can be included in the container image with the proper entrypoint
     # Only do this if we're building a container image and we're not using a template
     if build_config.image_type == LlamaStackImageType.CONTAINER.value and not template_name and config_path:
-        cprint("Generating run.yaml file", color="green")
+        cprint("Generating run.yaml file", color="yellow", file=sys.stderr)
         run_config_file = _generate_run_config(build_config, build_dir, image_name)
 
     with open(build_file_path, "w") as f:
@@ -388,7 +407,14 @@ def _run_stack_build_command_from_build_config(
             run_config_file = build_dir / f"{template_name}-run.yaml"
             shutil.copy(path, run_config_file)
 
-        cprint("Build Successful!", color="green")
+        cprint("Build Successful!", color="green", file=sys.stderr)
+        cprint(f"You can find the newly-built template here: {template_path}", color="light_blue", file=sys.stderr)
+        cprint(
+            "You can run the new Llama Stack distro via: "
+            + colored(f"llama stack run {template_path} --image-type {build_config.image_type}", "light_blue"),
+            color="green",
+            file=sys.stderr,
+        )
         return template_path
     else:
         return _generate_run_config(build_config, build_dir, image_name)
diff --git a/llama_stack/cli/stack/build.py b/llama_stack/cli/stack/build.py
index 93e7d9b22..2c402beeb 100644
--- a/llama_stack/cli/stack/build.py
+++ b/llama_stack/cli/stack/build.py
@@ -49,7 +49,7 @@ class StackBuild(Subcommand):
             type=str,
             help="Image Type to use for the build. If not specified, will use the image type from the template config.",
             choices=[e.value for e in ImageType],
-            default=ImageType.CONDA.value,
+            default=None,  # no default so we can detect if a user specified --image-type and override image_type in the config
         )
 
         self.parser.add_argument(
diff --git a/llama_stack/cli/stack/list_providers.py b/llama_stack/cli/stack/list_providers.py
index bfe11aa2c..deebd937b 100644
--- a/llama_stack/cli/stack/list_providers.py
+++ b/llama_stack/cli/stack/list_providers.py
@@ -46,7 +46,7 @@ class StackListProviders(Subcommand):
         else:
             providers = [(k.value, prov) for k, prov in all_providers.items()]
 
-        providers = [p for api, p in providers if api in self.providable_apis]
+        providers = [(api, p) for api, p in providers if api in self.providable_apis]
 
         # eventually, this should query a registry at llama.meta.com/llamastack/distributions
         headers = [
@@ -57,7 +57,7 @@ class StackListProviders(Subcommand):
 
         rows = []
 
-        specs = [spec for p in providers for spec in p.values()]
+        specs = [spec for api, p in providers for spec in p.values()]
         for spec in specs:
             if spec.is_sample:
                 continue
@@ -65,7 +65,7 @@ class StackListProviders(Subcommand):
                 [
                     spec.api.value,
                     spec.provider_type,
-                    ",".join(spec.pip_packages),
+                    ",".join(spec.pip_packages) if hasattr(spec, "pip_packages") else "",
                 ]
             )
         print_table(
diff --git a/llama_stack/cli/stack/list_stacks.py b/llama_stack/cli/stack/list_stacks.py
new file mode 100644
index 000000000..2ea0fdeea
--- /dev/null
+++ b/llama_stack/cli/stack/list_stacks.py
@@ -0,0 +1,56 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+import argparse
+from pathlib import Path
+
+from llama_stack.cli.subcommand import Subcommand
+from llama_stack.cli.table import print_table
+
+
+class StackListBuilds(Subcommand):
+    """List built stacks in .llama/distributions directory"""
+
+    def __init__(self, subparsers: argparse._SubParsersAction):
+        super().__init__()
+        self.parser = subparsers.add_parser(
+            "list",
+            prog="llama stack list",
+            description="list the build stacks",
+            formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+        )
+        self._add_arguments()
+        self.parser.set_defaults(func=self._list_stack_command)
+
+    def _get_distribution_dirs(self) -> dict[str, Path]:
+        """Return a dictionary of distribution names and their paths"""
+        distributions = {}
+        dist_dir = Path.home() / ".llama" / "distributions"
+
+        if dist_dir.exists():
+            for stack_dir in dist_dir.iterdir():
+                if stack_dir.is_dir():
+                    distributions[stack_dir.name] = stack_dir
+        return distributions
+
+    def _list_stack_command(self, args: argparse.Namespace) -> None:
+        distributions = self._get_distribution_dirs()
+
+        if not distributions:
+            print("No stacks found in ~/.llama/distributions")
+            return
+
+        headers = ["Stack Name", "Path"]
+        headers.extend(["Build Config", "Run Config"])
+        rows = []
+        for name, path in distributions.items():
+            row = [name, str(path)]
+            # Check for build and run config files
+            build_config = "Yes" if (path / f"{name}-build.yaml").exists() else "No"
+            run_config = "Yes" if (path / f"{name}-run.yaml").exists() else "No"
+            row.extend([build_config, run_config])
+            rows.append(row)
+        print_table(rows, headers, separate_rows=True)
diff --git a/llama_stack/cli/stack/remove.py b/llama_stack/cli/stack/remove.py
new file mode 100644
index 000000000..a1796941e
--- /dev/null
+++ b/llama_stack/cli/stack/remove.py
@@ -0,0 +1,115 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+import argparse
+import shutil
+import sys
+from pathlib import Path
+
+from termcolor import cprint
+
+from llama_stack.cli.subcommand import Subcommand
+from llama_stack.cli.table import print_table
+
+
+class StackRemove(Subcommand):
+    """Remove the build stack"""
+
+    def __init__(self, subparsers: argparse._SubParsersAction):
+        super().__init__()
+        self.parser = subparsers.add_parser(
+            "rm",
+            prog="llama stack rm",
+            description="Remove the build stack",
+            formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+        )
+        self._add_arguments()
+        self.parser.set_defaults(func=self._remove_stack_build_command)
+
+    def _add_arguments(self) -> None:
+        self.parser.add_argument(
+            "name",
+            type=str,
+            nargs="?",
+            help="Name of the stack to delete",
+        )
+        self.parser.add_argument(
+            "--all",
+            "-a",
+            action="store_true",
+            help="Delete all stacks (use with caution)",
+        )
+
+    def _get_distribution_dirs(self) -> dict[str, Path]:
+        """Return a dictionary of distribution names and their paths"""
+        distributions = {}
+        dist_dir = Path.home() / ".llama" / "distributions"
+
+        if dist_dir.exists():
+            for stack_dir in dist_dir.iterdir():
+                if stack_dir.is_dir():
+                    distributions[stack_dir.name] = stack_dir
+        return distributions
+
+    def _list_stacks(self) -> None:
+        """Display available stacks in a table"""
+        distributions = self._get_distribution_dirs()
+        if not distributions:
+            cprint("No stacks found in ~/.llama/distributions", color="red", file=sys.stderr)
+            sys.exit(1)
+
+        headers = ["Stack Name", "Path"]
+        rows = [[name, str(path)] for name, path in distributions.items()]
+        print_table(rows, headers, separate_rows=True)
+
+    def _remove_stack_build_command(self, args: argparse.Namespace) -> None:
+        distributions = self._get_distribution_dirs()
+
+        if args.all:
+            confirm = input("Are you sure you want to delete ALL stacks? [yes-i-really-want/N] ").lower()
+            if confirm != "yes-i-really-want":
+                cprint("Deletion cancelled.", color="green", file=sys.stderr)
+                return
+
+            for name, path in distributions.items():
+                try:
+                    shutil.rmtree(path)
+                    cprint(f"Deleted stack: {name}", color="green", file=sys.stderr)
+                except Exception as e:
+                    cprint(
+                        f"Failed to delete stack {name}: {e}",
+                        color="red",
+                        file=sys.stderr,
+                    )
+                    sys.exit(1)
+
+        if not args.name:
+            self._list_stacks()
+            if not args.name:
+                return
+
+        if args.name not in distributions:
+            self._list_stacks()
+            cprint(
+                f"Stack not found: {args.name}",
+                color="red",
+                file=sys.stderr,
+            )
+            sys.exit(1)
+
+        stack_path = distributions[args.name]
+
+        confirm = input(f"Are you sure you want to delete stack '{args.name}'? [y/N] ").lower()
+        if confirm != "y":
+            cprint("Deletion cancelled.", color="green", file=sys.stderr)
+            return
+
+        try:
+            shutil.rmtree(stack_path)
+            cprint(f"Successfully deleted stack: {args.name}", color="green", file=sys.stderr)
+        except Exception as e:
+            cprint(f"Failed to delete stack {args.name}: {e}", color="red", file=sys.stderr)
+            sys.exit(1)
diff --git a/llama_stack/cli/stack/run.py b/llama_stack/cli/stack/run.py
index d8234bb46..27745edac 100644
--- a/llama_stack/cli/stack/run.py
+++ b/llama_stack/cli/stack/run.py
@@ -6,6 +6,7 @@
 
 import argparse
 import os
+import subprocess
 from pathlib import Path
 
 from llama_stack.cli.stack.utils import ImageType
@@ -33,7 +34,8 @@ class StackRun(Subcommand):
         self.parser.add_argument(
             "config",
             type=str,
-            help="Path to config file to use for the run",
+            nargs="?",  # Make it optional
+            help="Path to config file to use for the run. Required for venv and conda environments.",
         )
         self.parser.add_argument(
             "--port",
@@ -47,34 +49,23 @@ class StackRun(Subcommand):
             default=os.environ.get("CONDA_DEFAULT_ENV"),
             help="Name of the image to run. Defaults to the current environment",
         )
-        self.parser.add_argument(
-            "--disable-ipv6",
-            action="store_true",
-            help="Disable IPv6 support",
-            default=False,
-        )
         self.parser.add_argument(
             "--env",
             action="append",
             help="Environment variables to pass to the server in KEY=VALUE format. Can be specified multiple times.",
             metavar="KEY=VALUE",
         )
-        self.parser.add_argument(
-            "--tls-keyfile",
-            type=str,
-            help="Path to TLS key file for HTTPS",
-        )
-        self.parser.add_argument(
-            "--tls-certfile",
-            type=str,
-            help="Path to TLS certificate file for HTTPS",
-        )
         self.parser.add_argument(
             "--image-type",
             type=str,
             help="Image Type used during the build. This can be either conda or container or venv.",
             choices=[e.value for e in ImageType],
         )
+        self.parser.add_argument(
+            "--enable-ui",
+            action="store_true",
+            help="Start the UI server",
+        )
 
     # If neither image type nor image name is provided, but at the same time
     # the current environment has conda breadcrumbs, then assume what the user
@@ -98,44 +89,57 @@ class StackRun(Subcommand):
         from llama_stack.distribution.utils.config_dirs import DISTRIBS_BASE_DIR
         from llama_stack.distribution.utils.exec import formulate_run_args, run_command
 
-        config_file = Path(args.config)
-        has_yaml_suffix = args.config.endswith(".yaml")
-        template_name = None
-
-        if not config_file.exists() and not has_yaml_suffix:
-            # check if this is a template
-            config_file = Path(REPO_ROOT) / "llama_stack" / "templates" / args.config / "run.yaml"
-            if config_file.exists():
-                template_name = args.config
-
-        if not config_file.exists() and not has_yaml_suffix:
-            # check if it's a build config saved to ~/.llama dir
-            config_file = Path(DISTRIBS_BASE_DIR / f"llamastack-{args.config}" / f"{args.config}-run.yaml")
-
-        if not config_file.exists():
-            self.parser.error(
-                f"File {str(config_file)} does not exist.\n\nPlease run `llama stack build` to generate (and optionally edit) a run.yaml file"
-            )
-
-        if not config_file.is_file():
-            self.parser.error(
-                f"Config file must be a valid file path, '{config_file}’ is not a file: type={type(config_file)}"
-            )
-
-        logger.info(f"Using run configuration: {config_file}")
-
-        try:
-            config_dict = yaml.safe_load(config_file.read_text())
-        except yaml.parser.ParserError as e:
-            self.parser.error(f"failed to load config file '{config_file}':\n {e}")
-
-        try:
-            config = parse_and_maybe_upgrade_config(config_dict)
-        except AttributeError as e:
-            self.parser.error(f"failed to parse config file '{config_file}':\n {e}")
-
+        if args.enable_ui:
+            self._start_ui_development_server(args.port)
         image_type, image_name = self._get_image_type_and_name(args)
 
+        # Check if config is required based on image type
+        if (image_type in [ImageType.CONDA.value, ImageType.VENV.value]) and not args.config:
+            self.parser.error("Config file is required for venv and conda environments")
+
+        if args.config:
+            config_file = Path(args.config)
+            has_yaml_suffix = args.config.endswith(".yaml")
+            template_name = None
+
+            if not config_file.exists() and not has_yaml_suffix:
+                # check if this is a template
+                config_file = Path(REPO_ROOT) / "llama_stack" / "templates" / args.config / "run.yaml"
+                if config_file.exists():
+                    template_name = args.config
+
+            if not config_file.exists() and not has_yaml_suffix:
+                # check if it's a build config saved to ~/.llama dir
+                config_file = Path(DISTRIBS_BASE_DIR / f"llamastack-{args.config}" / f"{args.config}-run.yaml")
+
+            if not config_file.exists():
+                self.parser.error(
+                    f"File {str(config_file)} does not exist.\n\nPlease run `llama stack build` to generate (and optionally edit) a run.yaml file"
+                )
+
+            if not config_file.is_file():
+                self.parser.error(
+                    f"Config file must be a valid file path, '{config_file}' is not a file: type={type(config_file)}"
+                )
+
+            logger.info(f"Using run configuration: {config_file}")
+
+            try:
+                config_dict = yaml.safe_load(config_file.read_text())
+            except yaml.parser.ParserError as e:
+                self.parser.error(f"failed to load config file '{config_file}':\n {e}")
+
+            try:
+                config = parse_and_maybe_upgrade_config(config_dict)
+                if not os.path.exists(str(config.external_providers_dir)):
+                    os.makedirs(str(config.external_providers_dir), exist_ok=True)
+            except AttributeError as e:
+                self.parser.error(f"failed to parse config file '{config_file}':\n {e}")
+        else:
+            config = None
+            config_file = None
+            template_name = None
+
         # If neither image type nor image name is provided, assume the server should be run directly
         # using the current environment packages.
         if not image_type and not image_name:
@@ -157,9 +161,10 @@ class StackRun(Subcommand):
         else:
             run_args = formulate_run_args(image_type, image_name, config, template_name)
 
-            run_args.extend([str(config_file), str(args.port)])
-            if args.disable_ipv6:
-                run_args.append("--disable-ipv6")
+            run_args.extend([str(args.port)])
+
+            if config_file:
+                run_args.extend(["--config", str(config_file)])
 
             if args.env:
                 for env_var in args.env:
@@ -172,6 +177,45 @@ class StackRun(Subcommand):
                         return
                     run_args.extend(["--env", f"{key}={value}"])
 
-            if args.tls_keyfile and args.tls_certfile:
-                run_args.extend(["--tls-keyfile", args.tls_keyfile, "--tls-certfile", args.tls_certfile])
             run_command(run_args)
+
+    def _start_ui_development_server(self, stack_server_port: int):
+        logger.info("Attempting to start UI development server...")
+        # Check if npm is available
+        npm_check = subprocess.run(["npm", "--version"], capture_output=True, text=True, check=False)
+        if npm_check.returncode != 0:
+            logger.warning(
+                f"'npm' command not found or not executable. UI development server will not be started. Error: {npm_check.stderr}"
+            )
+            return
+
+        ui_dir = REPO_ROOT / "llama_stack" / "ui"
+        logs_dir = Path("~/.llama/ui/logs").expanduser()
+        try:
+            # Create logs directory if it doesn't exist
+            logs_dir.mkdir(parents=True, exist_ok=True)
+
+            ui_stdout_log_path = logs_dir / "stdout.log"
+            ui_stderr_log_path = logs_dir / "stderr.log"
+
+            # Open log files in append mode
+            stdout_log_file = open(ui_stdout_log_path, "a")
+            stderr_log_file = open(ui_stderr_log_path, "a")
+
+            process = subprocess.Popen(
+                ["npm", "run", "dev"],
+                cwd=str(ui_dir),
+                stdout=stdout_log_file,
+                stderr=stderr_log_file,
+                env={**os.environ, "NEXT_PUBLIC_LLAMA_STACK_BASE_URL": f"http://localhost:{stack_server_port}"},
+            )
+            logger.info(f"UI development server process started in {ui_dir} with PID {process.pid}.")
+            logger.info(f"Logs: stdout -> {ui_stdout_log_path}, stderr -> {ui_stderr_log_path}")
+            logger.info(f"UI will be available at http://localhost:{os.getenv('LLAMA_STACK_UI_PORT', 8322)}")
+
+        except FileNotFoundError:
+            logger.error(
+                "Failed to start UI development server: 'npm' command not found. Make sure npm is installed and in your PATH."
+            )
+        except Exception as e:
+            logger.error(f"Failed to start UI development server in {ui_dir}: {e}")
diff --git a/llama_stack/cli/stack/stack.py b/llama_stack/cli/stack/stack.py
index ccf1a5ffc..3aff78e23 100644
--- a/llama_stack/cli/stack/stack.py
+++ b/llama_stack/cli/stack/stack.py
@@ -7,12 +7,14 @@
 import argparse
 from importlib.metadata import version
 
+from llama_stack.cli.stack.list_stacks import StackListBuilds
 from llama_stack.cli.stack.utils import print_subcommand_description
 from llama_stack.cli.subcommand import Subcommand
 
 from .build import StackBuild
 from .list_apis import StackListApis
 from .list_providers import StackListProviders
+from .remove import StackRemove
 from .run import StackRun
 
 
@@ -41,5 +43,6 @@ class StackParser(Subcommand):
         StackListApis.create(subparsers)
         StackListProviders.create(subparsers)
         StackRun.create(subparsers)
-
+        StackRemove.create(subparsers)
+        StackListBuilds.create(subparsers)
         print_subcommand_description(self.parser, subparsers)
diff --git a/llama_stack/cli/table.py b/llama_stack/cli/table.py
index bf59e6103..86c3adff2 100644
--- a/llama_stack/cli/table.py
+++ b/llama_stack/cli/table.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Iterable
+from collections.abc import Iterable
 
 from rich.console import Console
 from rich.table import Table
diff --git a/llama_stack/cli/verify_download.py b/llama_stack/cli/verify_download.py
index 1229e8601..3a1af3cbc 100644
--- a/llama_stack/cli/verify_download.py
+++ b/llama_stack/cli/verify_download.py
@@ -9,7 +9,6 @@ import hashlib
 from dataclasses import dataclass
 from functools import partial
 from pathlib import Path
-from typing import Dict, List, Optional
 
 from rich.console import Console
 from rich.progress import Progress, SpinnerColumn, TextColumn
@@ -21,7 +20,7 @@ from llama_stack.cli.subcommand import Subcommand
 class VerificationResult:
     filename: str
     expected_hash: str
-    actual_hash: Optional[str]
+    actual_hash: str | None
     exists: bool
     matches: bool
 
@@ -60,9 +59,9 @@ def calculate_md5(filepath: Path, chunk_size: int = 8192) -> str:
     return md5_hash.hexdigest()
 
 
-def load_checksums(checklist_path: Path) -> Dict[str, str]:
+def load_checksums(checklist_path: Path) -> dict[str, str]:
     checksums = {}
-    with open(checklist_path, "r") as f:
+    with open(checklist_path) as f:
         for line in f:
             if line.strip():
                 md5sum, filepath = line.strip().split("  ", 1)
@@ -72,7 +71,7 @@ def load_checksums(checklist_path: Path) -> Dict[str, str]:
     return checksums
 
 
-def verify_files(model_dir: Path, checksums: Dict[str, str], console: Console) -> List[VerificationResult]:
+def verify_files(model_dir: Path, checksums: dict[str, str], console: Console) -> list[VerificationResult]:
     results = []
 
     with Progress(
diff --git a/llama_stack/distribution/access_control.py b/llama_stack/distribution/access_control.py
index 0651ab6eb..d560ec80f 100644
--- a/llama_stack/distribution/access_control.py
+++ b/llama_stack/distribution/access_control.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from llama_stack.distribution.datatypes import AccessAttributes
 from llama_stack.log import get_logger
@@ -14,8 +14,8 @@ logger = get_logger(__name__, category="core")
 
 def check_access(
     obj_identifier: str,
-    obj_attributes: Optional[AccessAttributes],
-    user_attributes: Optional[Dict[str, Any]] = None,
+    obj_attributes: AccessAttributes | None,
+    user_attributes: dict[str, Any] | None = None,
 ) -> bool:
     """Check if the current user has access to the given object, based on access attributes.
 
diff --git a/llama_stack/distribution/build.py b/llama_stack/distribution/build.py
index 9664449f3..072f9c425 100644
--- a/llama_stack/distribution/build.py
+++ b/llama_stack/distribution/build.py
@@ -6,6 +6,7 @@
 
 import importlib.resources
 import logging
+import sys
 from pathlib import Path
 
 from pydantic import BaseModel
@@ -43,18 +44,29 @@ def get_provider_dependencies(
     # Extract providers based on config type
     if isinstance(config, DistributionTemplate):
         providers = config.providers
+
+        # TODO: This is a hack to get the dependencies for internal APIs into build
+        # We should have a better way to do this by formalizing the concept of "internal" APIs
+        # and providers, with a way to specify dependencies for them.
+        run_configs = config.run_configs
+        additional_pip_packages: list[str] = []
+        if run_configs:
+            for run_config in run_configs.values():
+                run_config_ = run_config.run_config(name="", providers={}, container_image=None)
+                if run_config_.inference_store:
+                    additional_pip_packages.extend(run_config_.inference_store.pip_packages)
     elif isinstance(config, BuildConfig):
         providers = config.distribution_spec.providers
+        additional_pip_packages = config.additional_pip_packages
     deps = []
     registry = get_provider_registry(config)
-
     for api_str, provider_or_providers in providers.items():
         providers_for_api = registry[Api(api_str)]
 
         providers = provider_or_providers if isinstance(provider_or_providers, list) else [provider_or_providers]
 
         for provider in providers:
-            # Providers from BuildConfig and RunConfig are subtly different – not great
+            # Providers from BuildConfig and RunConfig are subtly different - not great
             provider_type = provider if isinstance(provider, str) else provider.provider_type
 
             if provider_type not in providers_for_api:
@@ -73,6 +85,9 @@ def get_provider_dependencies(
         else:
             normal_deps.append(package)
 
+    if additional_pip_packages:
+        normal_deps.extend(additional_pip_packages)
+
     return list(set(normal_deps)), list(set(special_deps))
 
 
@@ -81,10 +96,11 @@ def print_pip_install_help(config: BuildConfig):
 
     cprint(
         f"Please install needed dependencies using the following commands:\n\nuv pip install {' '.join(normal_deps)}",
-        "yellow",
+        color="yellow",
+        file=sys.stderr,
     )
     for special_dep in special_deps:
-        cprint(f"uv pip install {special_dep}", "yellow")
+        cprint(f"uv pip install {special_dep}", color="yellow", file=sys.stderr)
     print()
 
 
diff --git a/llama_stack/distribution/build_container.sh b/llama_stack/distribution/build_container.sh
index ad316d45e..c128729e1 100755
--- a/llama_stack/distribution/build_container.sh
+++ b/llama_stack/distribution/build_container.sh
@@ -154,6 +154,12 @@ get_python_cmd() {
     fi
 }
 
+# Add other required item commands generic to all containers
+add_to_container << EOF
+# Allows running as non-root user
+RUN mkdir -p /.llama/providers.d /.cache
+EOF
+
 if [ -n "$run_config" ]; then
   # Copy the run config to the build context since it's an absolute path
   cp "$run_config" "$BUILD_CONTEXT_DIR/run.yaml"
@@ -166,17 +172,19 @@ EOF
   # and update the configuration to reference the new container path
   python_cmd=$(get_python_cmd)
   external_providers_dir=$($python_cmd -c "import yaml; config = yaml.safe_load(open('$run_config')); print(config.get('external_providers_dir') or '')")
-  if [ -n "$external_providers_dir" ]; then
+  external_providers_dir=$(eval echo "$external_providers_dir")
+  if [ -n "$external_providers_dir" ] && [ -d "$external_providers_dir" ]; then
     echo "Copying external providers directory: $external_providers_dir"
+    cp -r "$external_providers_dir" "$BUILD_CONTEXT_DIR/providers.d"
     add_to_container << EOF
-COPY $external_providers_dir /app/providers.d
+COPY providers.d /.llama/providers.d
 EOF
-    # Edit the run.yaml file to change the external_providers_dir to /app/providers.d
+    # Edit the run.yaml file to change the external_providers_dir to /.llama/providers.d
     if [ "$(uname)" = "Darwin" ]; then
-      sed -i.bak -e 's|external_providers_dir:.*|external_providers_dir: /app/providers.d|' "$BUILD_CONTEXT_DIR/run.yaml"
+      sed -i.bak -e 's|external_providers_dir:.*|external_providers_dir: /.llama/providers.d|' "$BUILD_CONTEXT_DIR/run.yaml"
       rm -f "$BUILD_CONTEXT_DIR/run.yaml.bak"
     else
-      sed -i 's|external_providers_dir:.*|external_providers_dir: /app/providers.d|' "$BUILD_CONTEXT_DIR/run.yaml"
+      sed -i 's|external_providers_dir:.*|external_providers_dir: /.llama/providers.d|' "$BUILD_CONTEXT_DIR/run.yaml"
     fi
   fi
 fi
@@ -255,9 +263,6 @@ fi
 # Add other require item commands genearic to all containers
 add_to_container << EOF
 
-# Allows running as non-root user
-RUN mkdir -p /.llama /.cache
-
 RUN chmod -R g+rw /app /.llama /.cache
 EOF
 
diff --git a/llama_stack/distribution/client.py b/llama_stack/distribution/client.py
index 1925b864f..03e4fb051 100644
--- a/llama_stack/distribution/client.py
+++ b/llama_stack/distribution/client.py
@@ -6,9 +6,10 @@
 
 import inspect
 import json
+import sys
 from collections.abc import AsyncIterator
 from enum import Enum
-from typing import Any, Type, Union, get_args, get_origin
+from typing import Any, Union, get_args, get_origin
 
 import httpx
 from pydantic import BaseModel, parse_obj_as
@@ -27,7 +28,7 @@ async def get_client_impl(protocol, config: RemoteProviderConfig, _deps: Any):
     return impl
 
 
-def create_api_client_class(protocol) -> Type:
+def create_api_client_class(protocol) -> type:
     if protocol in _CLIENT_CLASSES:
         return _CLIENT_CLASSES[protocol]
 
@@ -96,13 +97,13 @@ def create_api_client_class(protocol) -> Type:
                             try:
                                 data = json.loads(data)
                                 if "error" in data:
-                                    cprint(data, "red")
+                                    cprint(data, color="red", file=sys.stderr)
                                     continue
 
                                 yield parse_obj_as(return_type, data)
                             except Exception as e:
-                                print(f"Error with parsing or validation: {e}")
-                                print(data)
+                                cprint(f"Error with parsing or validation: {e}", color="red", file=sys.stderr)
+                                cprint(data, color="red", file=sys.stderr)
 
         def httpx_request_params(self, method_name: str, *args, **kwargs) -> dict:
             webmethod, sig = self.routes[method_name]
diff --git a/llama_stack/distribution/common.sh b/llama_stack/distribution/common.sh
index 15220048b..5f764bcca 100755
--- a/llama_stack/distribution/common.sh
+++ b/llama_stack/distribution/common.sh
@@ -1,3 +1,5 @@
+#!/usr/bin/env bash
+
 # Copyright (c) Meta Platforms, Inc. and affiliates.
 # All rights reserved.
 #
diff --git a/llama_stack/distribution/configure.py b/llama_stack/distribution/configure.py
index 2a3bf7053..e58ea0338 100644
--- a/llama_stack/distribution/configure.py
+++ b/llama_stack/distribution/configure.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 import logging
 import textwrap
-from typing import Any, Dict
+from typing import Any
 
 from llama_stack.distribution.datatypes import (
     LLAMA_STACK_RUN_CONFIG_VERSION,
@@ -17,6 +17,7 @@ from llama_stack.distribution.distribution import (
     builtin_automatically_routed_apis,
     get_provider_registry,
 )
+from llama_stack.distribution.utils.config_dirs import EXTERNAL_PROVIDERS_DIR
 from llama_stack.distribution.utils.dynamic import instantiate_class_type
 from llama_stack.distribution.utils.prompt_for_config import prompt_for_config
 from llama_stack.providers.datatypes import Api, ProviderSpec
@@ -24,7 +25,7 @@ from llama_stack.providers.datatypes import Api, ProviderSpec
 logger = logging.getLogger(__name__)
 
 
-def configure_single_provider(registry: Dict[str, ProviderSpec], provider: Provider) -> Provider:
+def configure_single_provider(registry: dict[str, ProviderSpec], provider: Provider) -> Provider:
     provider_spec = registry[provider.provider_type]
     config_type = instantiate_class_type(provider_spec.config_class)
     try:
@@ -73,11 +74,7 @@ def configure_api_providers(config: StackRunConfig, build_spec: DistributionSpec
 
         existing_providers = config.providers.get(api_str, [])
         if existing_providers:
-            logger.info(
-                f"Re-configuring existing providers for API `{api_str}`...",
-                "green",
-                attrs=["bold"],
-            )
+            logger.info(f"Re-configuring existing providers for API `{api_str}`...")
             updated_providers = []
             for p in existing_providers:
                 logger.info(f"> Configuring provider `({p.provider_type})`")
@@ -91,7 +88,7 @@ def configure_api_providers(config: StackRunConfig, build_spec: DistributionSpec
             if not plist:
                 raise ValueError(f"No provider configured for API {api_str}?")
 
-            logger.info(f"Configuring API `{api_str}`...", "green", attrs=["bold"])
+            logger.info(f"Configuring API `{api_str}`...")
             updated_providers = []
             for i, provider_type in enumerate(plist):
                 if i >= 1:
@@ -120,8 +117,8 @@ def configure_api_providers(config: StackRunConfig, build_spec: DistributionSpec
 
 
 def upgrade_from_routing_table(
-    config_dict: Dict[str, Any],
-) -> Dict[str, Any]:
+    config_dict: dict[str, Any],
+) -> dict[str, Any]:
     def get_providers(entries):
         return [
             Provider(
@@ -163,7 +160,7 @@ def upgrade_from_routing_table(
     return config_dict
 
 
-def parse_and_maybe_upgrade_config(config_dict: Dict[str, Any]) -> StackRunConfig:
+def parse_and_maybe_upgrade_config(config_dict: dict[str, Any]) -> StackRunConfig:
     version = config_dict.get("version", None)
     if version == LLAMA_STACK_RUN_CONFIG_VERSION:
         return StackRunConfig(**config_dict)
@@ -174,4 +171,7 @@ def parse_and_maybe_upgrade_config(config_dict: Dict[str, Any]) -> StackRunConfi
 
     config_dict["version"] = LLAMA_STACK_RUN_CONFIG_VERSION
 
+    if not config_dict.get("external_providers_dir", None):
+        config_dict["external_providers_dir"] = EXTERNAL_PROVIDERS_DIR
+
     return StackRunConfig(**config_dict)
diff --git a/llama_stack/distribution/datatypes.py b/llama_stack/distribution/datatypes.py
index 38353c1ff..def7048c0 100644
--- a/llama_stack/distribution/datatypes.py
+++ b/llama_stack/distribution/datatypes.py
@@ -4,9 +4,11 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Annotated, Any, Dict, List, Optional, Union
+from enum import Enum
+from pathlib import Path
+from typing import Annotated, Any
 
-from pydantic import BaseModel, Field
+from pydantic import BaseModel, Field, field_validator
 
 from llama_stack.apis.benchmarks import Benchmark, BenchmarkInput
 from llama_stack.apis.datasetio import DatasetIO
@@ -23,13 +25,14 @@ from llama_stack.apis.tools import Tool, ToolGroup, ToolGroupInput, ToolRuntime
 from llama_stack.apis.vector_dbs import VectorDB, VectorDBInput
 from llama_stack.apis.vector_io import VectorIO
 from llama_stack.providers.datatypes import Api, ProviderSpec
-from llama_stack.providers.utils.kvstore.config import KVStoreConfig
+from llama_stack.providers.utils.kvstore.config import KVStoreConfig, SqliteKVStoreConfig
+from llama_stack.providers.utils.sqlstore.sqlstore import SqlStoreConfig
 
 LLAMA_STACK_BUILD_CONFIG_VERSION = "2"
 LLAMA_STACK_RUN_CONFIG_VERSION = "2"
 
 
-RoutingKey = Union[str, List[str]]
+RoutingKey = str | list[str]
 
 
 class AccessAttributes(BaseModel):
@@ -46,17 +49,17 @@ class AccessAttributes(BaseModel):
     """
 
     # Standard attribute categories - the minimal set we need now
-    roles: Optional[List[str]] = Field(
+    roles: list[str] | None = Field(
         default=None, description="Role-based attributes (e.g., 'admin', 'data-scientist', 'user')"
     )
 
-    teams: Optional[List[str]] = Field(default=None, description="Team-based attributes (e.g., 'ml-team', 'nlp-team')")
+    teams: list[str] | None = Field(default=None, description="Team-based attributes (e.g., 'ml-team', 'nlp-team')")
 
-    projects: Optional[List[str]] = Field(
+    projects: list[str] | None = Field(
         default=None, description="Project-based access attributes (e.g., 'llama-3', 'customer-insights')"
     )
 
-    namespaces: Optional[List[str]] = Field(
+    namespaces: list[str] | None = Field(
         default=None, description="Namespace-based access control for resource isolation"
     )
 
@@ -105,7 +108,7 @@ class ResourceWithACL(Resource):
         # ^ User must have access to the customer-insights project AND have confidential namespace
     """
 
-    access_attributes: Optional[AccessAttributes] = None
+    access_attributes: AccessAttributes | None = None
 
 
 # Use the extended Resource for all routable objects
@@ -141,41 +144,21 @@ class ToolGroupWithACL(ToolGroup, ResourceWithACL):
     pass
 
 
-RoutableObject = Union[
-    Model,
-    Shield,
-    VectorDB,
-    Dataset,
-    ScoringFn,
-    Benchmark,
-    Tool,
-    ToolGroup,
-]
-
+RoutableObject = Model | Shield | VectorDB | Dataset | ScoringFn | Benchmark | Tool | ToolGroup
 
 RoutableObjectWithProvider = Annotated[
-    Union[
-        ModelWithACL,
-        ShieldWithACL,
-        VectorDBWithACL,
-        DatasetWithACL,
-        ScoringFnWithACL,
-        BenchmarkWithACL,
-        ToolWithACL,
-        ToolGroupWithACL,
-    ],
+    ModelWithACL
+    | ShieldWithACL
+    | VectorDBWithACL
+    | DatasetWithACL
+    | ScoringFnWithACL
+    | BenchmarkWithACL
+    | ToolWithACL
+    | ToolGroupWithACL,
     Field(discriminator="type"),
 ]
 
-RoutedProtocol = Union[
-    Inference,
-    Safety,
-    VectorIO,
-    DatasetIO,
-    Scoring,
-    Eval,
-    ToolRuntime,
-]
+RoutedProtocol = Inference | Safety | VectorIO | DatasetIO | Scoring | Eval | ToolRuntime
 
 
 # Example: /inference, /safety
@@ -183,15 +166,15 @@ class AutoRoutedProviderSpec(ProviderSpec):
     provider_type: str = "router"
     config_class: str = ""
 
-    container_image: Optional[str] = None
+    container_image: str | None = None
     routing_table_api: Api
     module: str
-    provider_data_validator: Optional[str] = Field(
+    provider_data_validator: str | None = Field(
         default=None,
     )
 
     @property
-    def pip_packages(self) -> List[str]:
+    def pip_packages(self) -> list[str]:
         raise AssertionError("Should not be called on AutoRoutedProviderSpec")
 
 
@@ -199,20 +182,20 @@ class AutoRoutedProviderSpec(ProviderSpec):
 class RoutingTableProviderSpec(ProviderSpec):
     provider_type: str = "routing_table"
     config_class: str = ""
-    container_image: Optional[str] = None
+    container_image: str | None = None
 
     router_api: Api
     module: str
-    pip_packages: List[str] = Field(default_factory=list)
+    pip_packages: list[str] = Field(default_factory=list)
 
 
 class DistributionSpec(BaseModel):
-    description: Optional[str] = Field(
+    description: str | None = Field(
         default="",
         description="Description of the distribution",
     )
-    container_image: Optional[str] = None
-    providers: Dict[str, Union[str, List[str]]] = Field(
+    container_image: str | None = None
+    providers: dict[str, str | list[str]] = Field(
         default_factory=dict,
         description="""
 Provider Types for each of the APIs provided by this distribution. If you
@@ -224,22 +207,50 @@ in the runtime configuration to help route to the correct provider.""",
 class Provider(BaseModel):
     provider_id: str
     provider_type: str
-    config: Dict[str, Any]
+    config: dict[str, Any]
 
 
 class LoggingConfig(BaseModel):
-    category_levels: Dict[str, str] = Field(
-        default_factory=Dict,
+    category_levels: dict[str, str] = Field(
+        default_factory=dict,
         description="""
  Dictionary of different logging configurations for different portions (ex: core, server) of llama stack""",
     )
 
 
+class AuthProviderType(str, Enum):
+    """Supported authentication provider types."""
+
+    OAUTH2_TOKEN = "oauth2_token"
+    CUSTOM = "custom"
+
+
 class AuthenticationConfig(BaseModel):
-    endpoint: str = Field(
+    provider_type: AuthProviderType = Field(
         ...,
-        description="Endpoint URL to validate authentication tokens",
+        description="Type of authentication provider",
     )
+    config: dict[str, Any] = Field(
+        ...,
+        description="Provider-specific configuration",
+    )
+
+
+class AuthenticationRequiredError(Exception):
+    pass
+
+
+class QuotaPeriod(str, Enum):
+    DAY = "day"
+
+
+class QuotaConfig(BaseModel):
+    kvstore: SqliteKVStoreConfig = Field(description="Config for KV store backend (SQLite only for now)")
+    anonymous_max_requests: int = Field(default=100, description="Max requests for unauthenticated clients per period")
+    authenticated_max_requests: int = Field(
+        default=1000, description="Max requests for authenticated clients per period"
+    )
+    period: QuotaPeriod = Field(default=QuotaPeriod.DAY, description="Quota period to set")
 
 
 class ServerConfig(BaseModel):
@@ -249,18 +260,30 @@ class ServerConfig(BaseModel):
         ge=1024,
         le=65535,
     )
-    tls_certfile: Optional[str] = Field(
+    tls_certfile: str | None = Field(
         default=None,
         description="Path to TLS certificate file for HTTPS",
     )
-    tls_keyfile: Optional[str] = Field(
+    tls_keyfile: str | None = Field(
         default=None,
         description="Path to TLS key file for HTTPS",
     )
-    auth: Optional[AuthenticationConfig] = Field(
+    tls_cafile: str | None = Field(
+        default=None,
+        description="Path to TLS CA file for HTTPS with mutual TLS authentication",
+    )
+    auth: AuthenticationConfig | None = Field(
         default=None,
         description="Authentication configuration for the server",
     )
+    host: str | None = Field(
+        default=None,
+        description="The host the server should listen on",
+    )
+    quota: QuotaConfig | None = Field(
+        default=None,
+        description="Per client quota request configuration",
+    )
 
 
 class StackRunConfig(BaseModel):
@@ -273,50 +296,66 @@ Reference to the distribution this package refers to. For unregistered (adhoc) p
 this could be just a hash
 """,
     )
-    container_image: Optional[str] = Field(
+    container_image: str | None = Field(
         default=None,
         description="Reference to the container image if this package refers to a container",
     )
-    apis: List[str] = Field(
+    apis: list[str] = Field(
         default_factory=list,
         description="""
 The list of APIs to serve. If not specified, all APIs specified in the provider_map will be served""",
     )
 
-    providers: Dict[str, List[Provider]] = Field(
+    providers: dict[str, list[Provider]] = Field(
         description="""
 One or more providers to use for each API. The same provider_type (e.g., meta-reference)
 can be instantiated multiple times (with different configs) if necessary.
 """,
     )
-    metadata_store: Optional[KVStoreConfig] = Field(
+    metadata_store: KVStoreConfig | None = Field(
         default=None,
         description="""
 Configuration for the persistence store used by the distribution registry. If not specified,
 a default SQLite store will be used.""",
     )
 
-    # registry of "resources" in the distribution
-    models: List[ModelInput] = Field(default_factory=list)
-    shields: List[ShieldInput] = Field(default_factory=list)
-    vector_dbs: List[VectorDBInput] = Field(default_factory=list)
-    datasets: List[DatasetInput] = Field(default_factory=list)
-    scoring_fns: List[ScoringFnInput] = Field(default_factory=list)
-    benchmarks: List[BenchmarkInput] = Field(default_factory=list)
-    tool_groups: List[ToolGroupInput] = Field(default_factory=list)
+    inference_store: SqlStoreConfig | None = Field(
+        default=None,
+        description="""
+Configuration for the persistence store used by the inference API. If not specified,
+a default SQLite store will be used.""",
+    )
 
-    logging: Optional[LoggingConfig] = Field(default=None, description="Configuration for Llama Stack Logging")
+    # registry of "resources" in the distribution
+    models: list[ModelInput] = Field(default_factory=list)
+    shields: list[ShieldInput] = Field(default_factory=list)
+    vector_dbs: list[VectorDBInput] = Field(default_factory=list)
+    datasets: list[DatasetInput] = Field(default_factory=list)
+    scoring_fns: list[ScoringFnInput] = Field(default_factory=list)
+    benchmarks: list[BenchmarkInput] = Field(default_factory=list)
+    tool_groups: list[ToolGroupInput] = Field(default_factory=list)
+
+    logging: LoggingConfig | None = Field(default=None, description="Configuration for Llama Stack Logging")
 
     server: ServerConfig = Field(
         default_factory=ServerConfig,
         description="Configuration for the HTTP(S) server",
     )
 
-    external_providers_dir: Optional[str] = Field(
+    external_providers_dir: Path | None = Field(
         default=None,
         description="Path to directory containing external provider implementations. The providers code and dependencies must be installed on the system.",
     )
 
+    @field_validator("external_providers_dir")
+    @classmethod
+    def validate_external_providers_dir(cls, v):
+        if v is None:
+            return None
+        if isinstance(v, str):
+            return Path(v)
+        return v
+
 
 class BuildConfig(BaseModel):
     version: str = LLAMA_STACK_BUILD_CONFIG_VERSION
@@ -326,12 +365,25 @@ class BuildConfig(BaseModel):
         default="conda",
         description="Type of package to build (conda | container | venv)",
     )
-    image_name: Optional[str] = Field(
+    image_name: str | None = Field(
         default=None,
         description="Name of the distribution to build",
     )
-    external_providers_dir: Optional[str] = Field(
+    external_providers_dir: Path | None = Field(
         default=None,
         description="Path to directory containing external provider implementations. The providers packages will be resolved from this directory. "
         "pip_packages MUST contain the provider package name.",
     )
+    additional_pip_packages: list[str] = Field(
+        default_factory=list,
+        description="Additional pip packages to install in the distribution. These packages will be installed in the distribution environment.",
+    )
+
+    @field_validator("external_providers_dir")
+    @classmethod
+    def validate_external_providers_dir(cls, v):
+        if v is None:
+            return None
+        if isinstance(v, str):
+            return Path(v)
+        return v
diff --git a/llama_stack/distribution/distribution.py b/llama_stack/distribution/distribution.py
index f948ddf1c..b860d15ab 100644
--- a/llama_stack/distribution/distribution.py
+++ b/llama_stack/distribution/distribution.py
@@ -7,7 +7,7 @@
 import glob
 import importlib
 import os
-from typing import Any, Dict, List
+from typing import Any
 
 import yaml
 from pydantic import BaseModel
@@ -24,7 +24,7 @@ from llama_stack.providers.datatypes import (
 logger = get_logger(name=__name__, category="core")
 
 
-def stack_apis() -> List[Api]:
+def stack_apis() -> list[Api]:
     return list(Api)
 
 
@@ -33,7 +33,7 @@ class AutoRoutedApiInfo(BaseModel):
     router_api: Api
 
 
-def builtin_automatically_routed_apis() -> List[AutoRoutedApiInfo]:
+def builtin_automatically_routed_apis() -> list[AutoRoutedApiInfo]:
     return [
         AutoRoutedApiInfo(
             routing_table_api=Api.models,
@@ -66,12 +66,12 @@ def builtin_automatically_routed_apis() -> List[AutoRoutedApiInfo]:
     ]
 
 
-def providable_apis() -> List[Api]:
+def providable_apis() -> list[Api]:
     routing_table_apis = {x.routing_table_api for x in builtin_automatically_routed_apis()}
     return [api for api in Api if api not in routing_table_apis and api != Api.inspect and api != Api.providers]
 
 
-def _load_remote_provider_spec(spec_data: Dict[str, Any], api: Api) -> ProviderSpec:
+def _load_remote_provider_spec(spec_data: dict[str, Any], api: Api) -> ProviderSpec:
     adapter = AdapterSpec(**spec_data["adapter"])
     spec = remote_provider_spec(
         api=api,
@@ -81,7 +81,7 @@ def _load_remote_provider_spec(spec_data: Dict[str, Any], api: Api) -> ProviderS
     return spec
 
 
-def _load_inline_provider_spec(spec_data: Dict[str, Any], api: Api, provider_name: str) -> ProviderSpec:
+def _load_inline_provider_spec(spec_data: dict[str, Any], api: Api, provider_name: str) -> ProviderSpec:
     spec = InlineProviderSpec(
         api=api,
         provider_type=f"inline::{provider_name}",
@@ -98,7 +98,7 @@ def _load_inline_provider_spec(spec_data: Dict[str, Any], api: Api, provider_nam
 
 def get_provider_registry(
     config=None,
-) -> Dict[Api, Dict[str, ProviderSpec]]:
+) -> dict[Api, dict[str, ProviderSpec]]:
     """Get the provider registry, optionally including external providers.
 
     This function loads both built-in providers and external providers from YAML files.
@@ -133,7 +133,7 @@ def get_provider_registry(
         ValueError: If any provider spec is invalid
     """
 
-    ret: Dict[Api, Dict[str, ProviderSpec]] = {}
+    ret: dict[Api, dict[str, ProviderSpec]] = {}
     for api in providable_apis():
         name = api.name.lower()
         logger.debug(f"Importing module {name}")
@@ -145,7 +145,7 @@ def get_provider_registry(
 
     # Check if config has the external_providers_dir attribute
     if config and hasattr(config, "external_providers_dir") and config.external_providers_dir:
-        external_providers_dir = os.path.abspath(config.external_providers_dir)
+        external_providers_dir = os.path.abspath(os.path.expanduser(config.external_providers_dir))
         if not os.path.exists(external_providers_dir):
             raise FileNotFoundError(f"External providers directory not found: {external_providers_dir}")
         logger.info(f"Loading external providers from {external_providers_dir}")
diff --git a/llama_stack/distribution/inspect.py b/llama_stack/distribution/inspect.py
index 23f644ec6..5822070ad 100644
--- a/llama_stack/distribution/inspect.py
+++ b/llama_stack/distribution/inspect.py
@@ -16,7 +16,7 @@ from llama_stack.apis.inspect import (
     VersionInfo,
 )
 from llama_stack.distribution.datatypes import StackRunConfig
-from llama_stack.distribution.server.endpoints import get_all_api_endpoints
+from llama_stack.distribution.server.routes import get_all_api_routes
 from llama_stack.providers.datatypes import HealthStatus
 
 
@@ -31,7 +31,7 @@ async def get_provider_impl(config, deps):
 
 
 class DistributionInspectImpl(Inspect):
-    def __init__(self, config, deps):
+    def __init__(self, config: DistributionInspectConfig, deps):
         self.config = config
         self.deps = deps
 
@@ -39,22 +39,36 @@ class DistributionInspectImpl(Inspect):
         pass
 
     async def list_routes(self) -> ListRoutesResponse:
-        run_config = self.config.run_config
+        run_config: StackRunConfig = self.config.run_config
 
         ret = []
-        all_endpoints = get_all_api_endpoints()
+        all_endpoints = get_all_api_routes()
         for api, endpoints in all_endpoints.items():
-            providers = run_config.providers.get(api.value, [])
-            ret.extend(
-                [
-                    RouteInfo(
-                        route=e.route,
-                        method=e.method,
-                        provider_types=[p.provider_type for p in providers],
+            # Always include provider and inspect APIs, filter others based on run config
+            if api.value in ["providers", "inspect"]:
+                ret.extend(
+                    [
+                        RouteInfo(
+                            route=e.path,
+                            method=next(iter([m for m in e.methods if m != "HEAD"])),
+                            provider_types=[],  # These APIs don't have "real" providers - they're internal to the stack
+                        )
+                        for e in endpoints
+                    ]
+                )
+            else:
+                providers = run_config.providers.get(api.value, [])
+                if providers:  # Only process if there are providers for this API
+                    ret.extend(
+                        [
+                            RouteInfo(
+                                route=e.path,
+                                method=next(iter([m for m in e.methods if m != "HEAD"])),
+                                provider_types=[p.provider_type for p in providers],
+                            )
+                            for e in endpoints
+                        ]
                     )
-                    for e in endpoints
-                ]
-            )
 
         return ListRoutesResponse(data=ret)
 
diff --git a/llama_stack/distribution/library_client.py b/llama_stack/distribution/library_client.py
index f426bcafe..f32130cf9 100644
--- a/llama_stack/distribution/library_client.py
+++ b/llama_stack/distribution/library_client.py
@@ -9,10 +9,11 @@ import inspect
 import json
 import logging
 import os
+import sys
 from concurrent.futures import ThreadPoolExecutor
 from enum import Enum
 from pathlib import Path
-from typing import Any, Optional, TypeVar, Union, get_args, get_origin
+from typing import Any, TypeVar, Union, get_args, get_origin
 
 import httpx
 import yaml
@@ -30,16 +31,13 @@ from termcolor import cprint
 
 from llama_stack.distribution.build import print_pip_install_help
 from llama_stack.distribution.configure import parse_and_maybe_upgrade_config
-from llama_stack.distribution.datatypes import Api
+from llama_stack.distribution.datatypes import Api, BuildConfig, DistributionSpec
 from llama_stack.distribution.request_headers import (
     PROVIDER_DATA_VAR,
     request_provider_data_context,
 )
 from llama_stack.distribution.resolver import ProviderRegistry
-from llama_stack.distribution.server.endpoints import (
-    find_matching_endpoint,
-    initialize_endpoint_impls,
-)
+from llama_stack.distribution.server.routes import find_matching_route, initialize_route_impls
 from llama_stack.distribution.stack import (
     construct_stack,
     get_stack_run_config_from_template,
@@ -119,8 +117,8 @@ class LlamaStackAsLibraryClient(LlamaStackClient):
         self,
         config_path_or_template_name: str,
         skip_logger_removal: bool = False,
-        custom_provider_registry: Optional[ProviderRegistry] = None,
-        provider_data: Optional[dict[str, Any]] = None,
+        custom_provider_registry: ProviderRegistry | None = None,
+        provider_data: dict[str, Any] | None = None,
     ):
         super().__init__()
         self.async_client = AsyncLlamaStackAsLibraryClient(
@@ -181,8 +179,8 @@ class AsyncLlamaStackAsLibraryClient(AsyncLlamaStackClient):
     def __init__(
         self,
         config_path_or_template_name: str,
-        custom_provider_registry: Optional[ProviderRegistry] = None,
-        provider_data: Optional[dict[str, Any]] = None,
+        custom_provider_registry: ProviderRegistry | None = None,
+        provider_data: dict[str, Any] | None = None,
     ):
         super().__init__()
         # when using the library client, we should not log to console since many
@@ -207,22 +205,41 @@ class AsyncLlamaStackAsLibraryClient(AsyncLlamaStackClient):
 
     async def initialize(self) -> bool:
         try:
-            self.endpoint_impls = None
+            self.route_impls = None
             self.impls = await construct_stack(self.config, self.custom_provider_registry)
         except ModuleNotFoundError as _e:
-            cprint(_e.msg, "red")
+            cprint(_e.msg, color="red", file=sys.stderr)
             cprint(
                 "Using llama-stack as a library requires installing dependencies depending on the template (providers) you choose.\n",
-                "yellow",
+                color="yellow",
+                file=sys.stderr,
             )
             if self.config_path_or_template_name.endswith(".yaml"):
-                print_pip_install_help(self.config.providers)
+                # Convert Provider objects to their types
+                provider_types: dict[str, str | list[str]] = {}
+                for api, providers in self.config.providers.items():
+                    types = [p.provider_type for p in providers]
+                    # Convert single-item lists to strings
+                    provider_types[api] = types[0] if len(types) == 1 else types
+                build_config = BuildConfig(
+                    distribution_spec=DistributionSpec(
+                        providers=provider_types,
+                    ),
+                    external_providers_dir=self.config.external_providers_dir,
+                )
+                print_pip_install_help(build_config)
             else:
                 prefix = "!" if in_notebook() else ""
                 cprint(
                     f"Please run:\n\n{prefix}llama stack build --template {self.config_path_or_template_name} --image-type venv\n\n",
                     "yellow",
+                    file=sys.stderr,
                 )
+            cprint(
+                "Please check your internet connection and try again.",
+                "red",
+                file=sys.stderr,
+            )
             raise _e
 
         if Api.telemetry in self.impls:
@@ -234,7 +251,7 @@ class AsyncLlamaStackAsLibraryClient(AsyncLlamaStackClient):
             safe_config = redact_sensitive_fields(self.config.model_dump())
             console.print(yaml.dump(safe_config, indent=2))
 
-        self.endpoint_impls = initialize_endpoint_impls(self.impls)
+        self.route_impls = initialize_route_impls(self.impls)
         return True
 
     async def request(
@@ -245,13 +262,15 @@ class AsyncLlamaStackAsLibraryClient(AsyncLlamaStackClient):
         stream=False,
         stream_cls=None,
     ):
-        if not self.endpoint_impls:
+        if not self.route_impls:
             raise ValueError("Client not initialized")
 
         # Create headers with provider data if available
-        headers = {}
+        headers = options.headers or {}
         if self.provider_data:
-            headers["X-LlamaStack-Provider-Data"] = json.dumps(self.provider_data)
+            keys = ["X-LlamaStack-Provider-Data", "x-llamastack-provider-data"]
+            if all(key not in headers for key in keys):
+                headers["X-LlamaStack-Provider-Data"] = json.dumps(self.provider_data)
 
         # Use context manager for provider data
         with request_provider_data_context(headers):
@@ -274,11 +293,14 @@ class AsyncLlamaStackAsLibraryClient(AsyncLlamaStackClient):
         cast_to: Any,
         options: Any,
     ):
+        if self.route_impls is None:
+            raise ValueError("Client not initialized")
+
         path = options.url
         body = options.params or {}
         body |= options.json_data or {}
 
-        matched_func, path_params, route = find_matching_endpoint(options.method, path, self.endpoint_impls)
+        matched_func, path_params, route = find_matching_route(options.method, path, self.route_impls)
         body |= path_params
         body = self._convert_body(path, options.method, body)
         await start_trace(route, {"__location__": "library_client"})
@@ -320,10 +342,13 @@ class AsyncLlamaStackAsLibraryClient(AsyncLlamaStackClient):
         options: Any,
         stream_cls: Any,
     ):
+        if self.route_impls is None:
+            raise ValueError("Client not initialized")
+
         path = options.url
         body = options.params or {}
         body |= options.json_data or {}
-        func, path_params, route = find_matching_endpoint(options.method, path, self.endpoint_impls)
+        func, path_params, route = find_matching_route(options.method, path, self.route_impls)
         body |= path_params
 
         body = self._convert_body(path, options.method, body)
@@ -371,11 +396,14 @@ class AsyncLlamaStackAsLibraryClient(AsyncLlamaStackClient):
         )
         return await response.parse()
 
-    def _convert_body(self, path: str, method: str, body: Optional[dict] = None) -> dict:
+    def _convert_body(self, path: str, method: str, body: dict | None = None) -> dict:
         if not body:
             return {}
 
-        func, _, _ = find_matching_endpoint(method, path, self.endpoint_impls)
+        if self.route_impls is None:
+            raise ValueError("Client not initialized")
+
+        func, _, _ = find_matching_route(method, path, self.route_impls)
         sig = inspect.signature(func)
 
         # Strip NOT_GIVENs to use the defaults in signature
diff --git a/llama_stack/distribution/providers.py b/llama_stack/distribution/providers.py
index 1c00ce264..29b7109dd 100644
--- a/llama_stack/distribution/providers.py
+++ b/llama_stack/distribution/providers.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 import asyncio
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
@@ -73,14 +73,14 @@ class ProviderImpl(Providers):
 
         raise ValueError(f"Provider {provider_id} not found")
 
-    async def get_providers_health(self) -> Dict[str, Dict[str, HealthResponse]]:
+    async def get_providers_health(self) -> dict[str, dict[str, HealthResponse]]:
         """Get health status for all providers.
 
         Returns:
             Dict[str, Dict[str, HealthResponse]]: A dictionary mapping API names to provider health statuses.
                 Each API maps to a dictionary of provider IDs to their health responses.
         """
-        providers_health: Dict[str, Dict[str, HealthResponse]] = {}
+        providers_health: dict[str, dict[str, HealthResponse]] = {}
         timeout = 1.0
 
         async def check_provider_health(impl: Any) -> tuple[str, HealthResponse] | None:
@@ -99,7 +99,7 @@ class ProviderImpl(Providers):
             try:
                 health = await asyncio.wait_for(impl.health(), timeout=timeout)
                 return api_name, health
-            except asyncio.TimeoutError:
+            except (asyncio.TimeoutError, TimeoutError):
                 return (
                     api_name,
                     HealthResponse(
diff --git a/llama_stack/distribution/request_headers.py b/llama_stack/distribution/request_headers.py
index f9cde2cdf..b03d2dee8 100644
--- a/llama_stack/distribution/request_headers.py
+++ b/llama_stack/distribution/request_headers.py
@@ -7,7 +7,8 @@
 import contextvars
 import json
 import logging
-from typing import Any, ContextManager, Dict, List, Optional
+from contextlib import AbstractContextManager
+from typing import Any
 
 from .utils.dynamic import instantiate_class_type
 
@@ -17,11 +18,11 @@ log = logging.getLogger(__name__)
 PROVIDER_DATA_VAR = contextvars.ContextVar("provider_data", default=None)
 
 
-class RequestProviderDataContext(ContextManager):
+class RequestProviderDataContext(AbstractContextManager):
     """Context manager for request provider data"""
 
     def __init__(
-        self, provider_data: Optional[Dict[str, Any]] = None, auth_attributes: Optional[Dict[str, List[str]]] = None
+        self, provider_data: dict[str, Any] | None = None, auth_attributes: dict[str, list[str]] | None = None
     ):
         self.provider_data = provider_data or {}
         if auth_attributes:
@@ -43,7 +44,8 @@ class RequestProviderDataContext(ContextManager):
 class NeedsRequestProviderData:
     def get_request_provider_data(self) -> Any:
         spec = self.__provider_spec__
-        assert spec, f"Provider spec not set on {self.__class__}"
+        if not spec:
+            raise ValueError(f"Provider spec not set on {self.__class__}")
 
         provider_type = spec.provider_type
         validator_class = spec.provider_data_validator
@@ -63,7 +65,7 @@ class NeedsRequestProviderData:
             return None
 
 
-def parse_request_provider_data(headers: Dict[str, str]) -> Optional[Dict[str, Any]]:
+def parse_request_provider_data(headers: dict[str, str]) -> dict[str, Any] | None:
     """Parse provider data from request headers"""
     keys = [
         "X-LlamaStack-Provider-Data",
@@ -86,14 +88,14 @@ def parse_request_provider_data(headers: Dict[str, str]) -> Optional[Dict[str, A
 
 
 def request_provider_data_context(
-    headers: Dict[str, str], auth_attributes: Optional[Dict[str, List[str]]] = None
-) -> ContextManager:
+    headers: dict[str, str], auth_attributes: dict[str, list[str]] | None = None
+) -> AbstractContextManager:
     """Context manager that sets request provider data from headers and auth attributes for the duration of the context"""
     provider_data = parse_request_provider_data(headers)
     return RequestProviderDataContext(provider_data, auth_attributes)
 
 
-def get_auth_attributes() -> Optional[Dict[str, List[str]]]:
+def get_auth_attributes() -> dict[str, list[str]] | None:
     """Helper to retrieve auth attributes from the provider data context"""
     provider_data = PROVIDER_DATA_VAR.get()
     if not provider_data:
diff --git a/llama_stack/distribution/resolver.py b/llama_stack/distribution/resolver.py
index e9a594eba..b7c7cb87f 100644
--- a/llama_stack/distribution/resolver.py
+++ b/llama_stack/distribution/resolver.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 import importlib
 import inspect
-from typing import Any, Dict, List, Set, Tuple
+from typing import Any
 
 from llama_stack.apis.agents import Agents
 from llama_stack.apis.benchmarks import Benchmarks
@@ -13,7 +13,7 @@ from llama_stack.apis.datasetio import DatasetIO
 from llama_stack.apis.datasets import Datasets
 from llama_stack.apis.eval import Eval
 from llama_stack.apis.files import Files
-from llama_stack.apis.inference import Inference
+from llama_stack.apis.inference import Inference, InferenceProvider
 from llama_stack.apis.inspect import Inspect
 from llama_stack.apis.models import Models
 from llama_stack.apis.post_training import PostTraining
@@ -47,7 +47,7 @@ from llama_stack.providers.datatypes import (
     RemoteProviderSpec,
     ScoringFunctionsProtocolPrivate,
     ShieldsProtocolPrivate,
-    ToolsProtocolPrivate,
+    ToolGroupsProtocolPrivate,
     VectorDBsProtocolPrivate,
 )
 
@@ -58,7 +58,7 @@ class InvalidProviderError(Exception):
     pass
 
 
-def api_protocol_map() -> Dict[Api, Any]:
+def api_protocol_map() -> dict[Api, Any]:
     return {
         Api.providers: ProvidersAPI,
         Api.agents: Agents,
@@ -83,10 +83,17 @@ def api_protocol_map() -> Dict[Api, Any]:
     }
 
 
-def additional_protocols_map() -> Dict[Api, Any]:
+def api_protocol_map_for_compliance_check() -> dict[Api, Any]:
+    return {
+        **api_protocol_map(),
+        Api.inference: InferenceProvider,
+    }
+
+
+def additional_protocols_map() -> dict[Api, Any]:
     return {
         Api.inference: (ModelsProtocolPrivate, Models, Api.models),
-        Api.tool_groups: (ToolsProtocolPrivate, ToolGroups, Api.tool_groups),
+        Api.tool_groups: (ToolGroupsProtocolPrivate, ToolGroups, Api.tool_groups),
         Api.vector_io: (VectorDBsProtocolPrivate, VectorDBs, Api.vector_dbs),
         Api.safety: (ShieldsProtocolPrivate, Shields, Api.shields),
         Api.datasetio: (DatasetsProtocolPrivate, Datasets, Api.datasets),
@@ -104,14 +111,14 @@ class ProviderWithSpec(Provider):
     spec: ProviderSpec
 
 
-ProviderRegistry = Dict[Api, Dict[str, ProviderSpec]]
+ProviderRegistry = dict[Api, dict[str, ProviderSpec]]
 
 
 async def resolve_impls(
     run_config: StackRunConfig,
     provider_registry: ProviderRegistry,
     dist_registry: DistributionRegistry,
-) -> Dict[Api, Any]:
+) -> dict[Api, Any]:
     """
     Resolves provider implementations by:
     1. Validating and organizing providers.
@@ -133,10 +140,10 @@ async def resolve_impls(
 
     sorted_providers = sort_providers_by_deps(providers_with_specs, run_config)
 
-    return await instantiate_providers(sorted_providers, router_apis, dist_registry)
+    return await instantiate_providers(sorted_providers, router_apis, dist_registry, run_config)
 
 
-def specs_for_autorouted_apis(apis_to_serve: List[str] | Set[str]) -> Dict[str, Dict[str, ProviderWithSpec]]:
+def specs_for_autorouted_apis(apis_to_serve: list[str] | set[str]) -> dict[str, dict[str, ProviderWithSpec]]:
     """Generates specifications for automatically routed APIs."""
     specs = {}
     for info in builtin_automatically_routed_apis():
@@ -178,10 +185,10 @@ def specs_for_autorouted_apis(apis_to_serve: List[str] | Set[str]) -> Dict[str,
 
 
 def validate_and_prepare_providers(
-    run_config: StackRunConfig, provider_registry: ProviderRegistry, routing_table_apis: Set[Api], router_apis: Set[Api]
-) -> Dict[str, Dict[str, ProviderWithSpec]]:
+    run_config: StackRunConfig, provider_registry: ProviderRegistry, routing_table_apis: set[Api], router_apis: set[Api]
+) -> dict[str, dict[str, ProviderWithSpec]]:
     """Validates providers, handles deprecations, and organizes them into a spec dictionary."""
-    providers_with_specs: Dict[str, Dict[str, ProviderWithSpec]] = {}
+    providers_with_specs: dict[str, dict[str, ProviderWithSpec]] = {}
 
     for api_str, providers in run_config.providers.items():
         api = Api(api_str)
@@ -222,10 +229,10 @@ def validate_provider(provider: Provider, api: Api, provider_registry: ProviderR
 
 
 def sort_providers_by_deps(
-    providers_with_specs: Dict[str, Dict[str, ProviderWithSpec]], run_config: StackRunConfig
-) -> List[Tuple[str, ProviderWithSpec]]:
+    providers_with_specs: dict[str, dict[str, ProviderWithSpec]], run_config: StackRunConfig
+) -> list[tuple[str, ProviderWithSpec]]:
     """Sorts providers based on their dependencies."""
-    sorted_providers: List[Tuple[str, ProviderWithSpec]] = topological_sort(
+    sorted_providers: list[tuple[str, ProviderWithSpec]] = topological_sort(
         {k: list(v.values()) for k, v in providers_with_specs.items()}
     )
 
@@ -236,11 +243,14 @@ def sort_providers_by_deps(
 
 
 async def instantiate_providers(
-    sorted_providers: List[Tuple[str, ProviderWithSpec]], router_apis: Set[Api], dist_registry: DistributionRegistry
-) -> Dict:
+    sorted_providers: list[tuple[str, ProviderWithSpec]],
+    router_apis: set[Api],
+    dist_registry: DistributionRegistry,
+    run_config: StackRunConfig,
+) -> dict:
     """Instantiates providers asynchronously while managing dependencies."""
-    impls: Dict[Api, Any] = {}
-    inner_impls_by_provider_id: Dict[str, Dict[str, Any]] = {f"inner-{x.value}": {} for x in router_apis}
+    impls: dict[Api, Any] = {}
+    inner_impls_by_provider_id: dict[str, dict[str, Any]] = {f"inner-{x.value}": {} for x in router_apis}
     for api_str, provider in sorted_providers:
         deps = {a: impls[a] for a in provider.spec.api_dependencies}
         for a in provider.spec.optional_api_dependencies:
@@ -251,7 +261,7 @@ async def instantiate_providers(
         if isinstance(provider.spec, RoutingTableProviderSpec):
             inner_impls = inner_impls_by_provider_id[f"inner-{provider.spec.router_api.value}"]
 
-        impl = await instantiate_provider(provider, deps, inner_impls, dist_registry)
+        impl = await instantiate_provider(provider, deps, inner_impls, dist_registry, run_config)
 
         if api_str.startswith("inner-"):
             inner_impls_by_provider_id[api_str][provider.provider_id] = impl
@@ -263,9 +273,9 @@ async def instantiate_providers(
 
 
 def topological_sort(
-    providers_with_specs: Dict[str, List[ProviderWithSpec]],
-) -> List[Tuple[str, ProviderWithSpec]]:
-    def dfs(kv, visited: Set[str], stack: List[str]):
+    providers_with_specs: dict[str, list[ProviderWithSpec]],
+) -> list[tuple[str, ProviderWithSpec]]:
+    def dfs(kv, visited: set[str], stack: list[str]):
         api_str, providers = kv
         visited.add(api_str)
 
@@ -280,8 +290,8 @@ def topological_sort(
 
         stack.append(api_str)
 
-    visited: Set[str] = set()
-    stack: List[str] = []
+    visited: set[str] = set()
+    stack: list[str] = []
 
     for api_str, providers in providers_with_specs.items():
         if api_str not in visited:
@@ -298,13 +308,11 @@ def topological_sort(
 # returns a class implementing the protocol corresponding to the Api
 async def instantiate_provider(
     provider: ProviderWithSpec,
-    deps: Dict[Api, Any],
-    inner_impls: Dict[str, Any],
+    deps: dict[Api, Any],
+    inner_impls: dict[str, Any],
     dist_registry: DistributionRegistry,
+    run_config: StackRunConfig,
 ):
-    protocols = api_protocol_map()
-    additional_protocols = additional_protocols_map()
-
     provider_spec = provider.spec
     if not hasattr(provider_spec, "module"):
         raise AttributeError(f"ProviderSpec of type {type(provider_spec)} does not have a 'module' attribute")
@@ -323,7 +331,7 @@ async def instantiate_provider(
         method = "get_auto_router_impl"
 
         config = None
-        args = [provider_spec.api, deps[provider_spec.routing_table_api], deps]
+        args = [provider_spec.api, deps[provider_spec.routing_table_api], deps, run_config]
     elif isinstance(provider_spec, RoutingTableProviderSpec):
         method = "get_routing_table_impl"
 
@@ -342,6 +350,8 @@ async def instantiate_provider(
     impl.__provider_spec__ = provider_spec
     impl.__provider_config__ = config
 
+    protocols = api_protocol_map_for_compliance_check()
+    additional_protocols = additional_protocols_map()
     # TODO: check compliance for special tool groups
     # the impl should be for Api.tool_runtime, the name should be the special tool group, the protocol should be the special tool group protocol
     check_protocol_compliance(impl, protocols[provider_spec.api])
@@ -391,8 +401,8 @@ def check_protocol_compliance(obj: Any, protocol: Any) -> None:
 
 async def resolve_remote_stack_impls(
     config: RemoteProviderConfig,
-    apis: List[str],
-) -> Dict[Api, Any]:
+    apis: list[str],
+) -> dict[Api, Any]:
     protocols = api_protocol_map()
     additional_protocols = additional_protocols_map()
 
diff --git a/llama_stack/distribution/routers/__init__.py b/llama_stack/distribution/routers/__init__.py
index d0fca8771..1358d5812 100644
--- a/llama_stack/distribution/routers/__init__.py
+++ b/llama_stack/distribution/routers/__init__.py
@@ -4,29 +4,29 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from llama_stack.distribution.datatypes import RoutedProtocol
+from llama_stack.distribution.stack import StackRunConfig
 from llama_stack.distribution.store import DistributionRegistry
 from llama_stack.providers.datatypes import Api, RoutingTable
-
-from .routing_tables import (
-    BenchmarksRoutingTable,
-    DatasetsRoutingTable,
-    ModelsRoutingTable,
-    ScoringFunctionsRoutingTable,
-    ShieldsRoutingTable,
-    ToolGroupsRoutingTable,
-    VectorDBsRoutingTable,
-)
+from llama_stack.providers.utils.inference.inference_store import InferenceStore
 
 
 async def get_routing_table_impl(
     api: Api,
-    impls_by_provider_id: Dict[str, RoutedProtocol],
+    impls_by_provider_id: dict[str, RoutedProtocol],
     _deps,
     dist_registry: DistributionRegistry,
 ) -> Any:
+    from ..routing_tables.benchmarks import BenchmarksRoutingTable
+    from ..routing_tables.datasets import DatasetsRoutingTable
+    from ..routing_tables.models import ModelsRoutingTable
+    from ..routing_tables.scoring_functions import ScoringFunctionsRoutingTable
+    from ..routing_tables.shields import ShieldsRoutingTable
+    from ..routing_tables.toolgroups import ToolGroupsRoutingTable
+    from ..routing_tables.vector_dbs import VectorDBsRoutingTable
+
     api_to_tables = {
         "vector_dbs": VectorDBsRoutingTable,
         "models": ModelsRoutingTable,
@@ -45,16 +45,15 @@ async def get_routing_table_impl(
     return impl
 
 
-async def get_auto_router_impl(api: Api, routing_table: RoutingTable, deps: Dict[str, Any]) -> Any:
-    from .routers import (
-        DatasetIORouter,
-        EvalRouter,
-        InferenceRouter,
-        SafetyRouter,
-        ScoringRouter,
-        ToolRuntimeRouter,
-        VectorIORouter,
-    )
+async def get_auto_router_impl(
+    api: Api, routing_table: RoutingTable, deps: dict[str, Any], run_config: StackRunConfig
+) -> Any:
+    from .datasets import DatasetIORouter
+    from .eval_scoring import EvalRouter, ScoringRouter
+    from .inference import InferenceRouter
+    from .safety import SafetyRouter
+    from .tool_runtime import ToolRuntimeRouter
+    from .vector_io import VectorIORouter
 
     api_to_routers = {
         "vector_io": VectorIORouter,
@@ -76,6 +75,12 @@ async def get_auto_router_impl(api: Api, routing_table: RoutingTable, deps: Dict
         if dep_api in deps:
             api_to_dep_impl[dep_name] = deps[dep_api]
 
+    # TODO: move pass configs to routers instead
+    if api == Api.inference and run_config.inference_store:
+        inference_store = InferenceStore(run_config.inference_store)
+        await inference_store.initialize()
+        api_to_dep_impl["store"] = inference_store
+
     impl = api_to_routers[api.value](routing_table, **api_to_dep_impl)
     await impl.initialize()
     return impl
diff --git a/llama_stack/distribution/routers/datasets.py b/llama_stack/distribution/routers/datasets.py
new file mode 100644
index 000000000..6f28756c9
--- /dev/null
+++ b/llama_stack/distribution/routers/datasets.py
@@ -0,0 +1,71 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from typing import Any
+
+from llama_stack.apis.common.responses import PaginatedResponse
+from llama_stack.apis.datasetio import DatasetIO
+from llama_stack.apis.datasets import DatasetPurpose, DataSource
+from llama_stack.log import get_logger
+from llama_stack.providers.datatypes import RoutingTable
+
+logger = get_logger(name=__name__, category="core")
+
+
+class DatasetIORouter(DatasetIO):
+    def __init__(
+        self,
+        routing_table: RoutingTable,
+    ) -> None:
+        logger.debug("Initializing DatasetIORouter")
+        self.routing_table = routing_table
+
+    async def initialize(self) -> None:
+        logger.debug("DatasetIORouter.initialize")
+        pass
+
+    async def shutdown(self) -> None:
+        logger.debug("DatasetIORouter.shutdown")
+        pass
+
+    async def register_dataset(
+        self,
+        purpose: DatasetPurpose,
+        source: DataSource,
+        metadata: dict[str, Any] | None = None,
+        dataset_id: str | None = None,
+    ) -> None:
+        logger.debug(
+            f"DatasetIORouter.register_dataset: {purpose=} {source=} {metadata=} {dataset_id=}",
+        )
+        await self.routing_table.register_dataset(
+            purpose=purpose,
+            source=source,
+            metadata=metadata,
+            dataset_id=dataset_id,
+        )
+
+    async def iterrows(
+        self,
+        dataset_id: str,
+        start_index: int | None = None,
+        limit: int | None = None,
+    ) -> PaginatedResponse:
+        logger.debug(
+            f"DatasetIORouter.iterrows: {dataset_id}, {start_index=} {limit=}",
+        )
+        return await self.routing_table.get_provider_impl(dataset_id).iterrows(
+            dataset_id=dataset_id,
+            start_index=start_index,
+            limit=limit,
+        )
+
+    async def append_rows(self, dataset_id: str, rows: list[dict[str, Any]]) -> None:
+        logger.debug(f"DatasetIORouter.append_rows: {dataset_id}, {len(rows)} rows")
+        return await self.routing_table.get_provider_impl(dataset_id).append_rows(
+            dataset_id=dataset_id,
+            rows=rows,
+        )
diff --git a/llama_stack/distribution/routers/eval_scoring.py b/llama_stack/distribution/routers/eval_scoring.py
new file mode 100644
index 000000000..fd0bb90a7
--- /dev/null
+++ b/llama_stack/distribution/routers/eval_scoring.py
@@ -0,0 +1,148 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from typing import Any
+
+from llama_stack.apis.eval import BenchmarkConfig, Eval, EvaluateResponse, Job
+from llama_stack.apis.scoring import (
+    ScoreBatchResponse,
+    ScoreResponse,
+    Scoring,
+    ScoringFnParams,
+)
+from llama_stack.log import get_logger
+from llama_stack.providers.datatypes import RoutingTable
+
+logger = get_logger(name=__name__, category="core")
+
+
+class ScoringRouter(Scoring):
+    def __init__(
+        self,
+        routing_table: RoutingTable,
+    ) -> None:
+        logger.debug("Initializing ScoringRouter")
+        self.routing_table = routing_table
+
+    async def initialize(self) -> None:
+        logger.debug("ScoringRouter.initialize")
+        pass
+
+    async def shutdown(self) -> None:
+        logger.debug("ScoringRouter.shutdown")
+        pass
+
+    async def score_batch(
+        self,
+        dataset_id: str,
+        scoring_functions: dict[str, ScoringFnParams | None] = None,
+        save_results_dataset: bool = False,
+    ) -> ScoreBatchResponse:
+        logger.debug(f"ScoringRouter.score_batch: {dataset_id}")
+        res = {}
+        for fn_identifier in scoring_functions.keys():
+            score_response = await self.routing_table.get_provider_impl(fn_identifier).score_batch(
+                dataset_id=dataset_id,
+                scoring_functions={fn_identifier: scoring_functions[fn_identifier]},
+            )
+            res.update(score_response.results)
+
+        if save_results_dataset:
+            raise NotImplementedError("Save results dataset not implemented yet")
+
+        return ScoreBatchResponse(
+            results=res,
+        )
+
+    async def score(
+        self,
+        input_rows: list[dict[str, Any]],
+        scoring_functions: dict[str, ScoringFnParams | None] = None,
+    ) -> ScoreResponse:
+        logger.debug(f"ScoringRouter.score: {len(input_rows)} rows, {len(scoring_functions)} functions")
+        res = {}
+        # look up and map each scoring function to its provider impl
+        for fn_identifier in scoring_functions.keys():
+            score_response = await self.routing_table.get_provider_impl(fn_identifier).score(
+                input_rows=input_rows,
+                scoring_functions={fn_identifier: scoring_functions[fn_identifier]},
+            )
+            res.update(score_response.results)
+
+        return ScoreResponse(results=res)
+
+
+class EvalRouter(Eval):
+    def __init__(
+        self,
+        routing_table: RoutingTable,
+    ) -> None:
+        logger.debug("Initializing EvalRouter")
+        self.routing_table = routing_table
+
+    async def initialize(self) -> None:
+        logger.debug("EvalRouter.initialize")
+        pass
+
+    async def shutdown(self) -> None:
+        logger.debug("EvalRouter.shutdown")
+        pass
+
+    async def run_eval(
+        self,
+        benchmark_id: str,
+        benchmark_config: BenchmarkConfig,
+    ) -> Job:
+        logger.debug(f"EvalRouter.run_eval: {benchmark_id}")
+        return await self.routing_table.get_provider_impl(benchmark_id).run_eval(
+            benchmark_id=benchmark_id,
+            benchmark_config=benchmark_config,
+        )
+
+    async def evaluate_rows(
+        self,
+        benchmark_id: str,
+        input_rows: list[dict[str, Any]],
+        scoring_functions: list[str],
+        benchmark_config: BenchmarkConfig,
+    ) -> EvaluateResponse:
+        logger.debug(f"EvalRouter.evaluate_rows: {benchmark_id}, {len(input_rows)} rows")
+        return await self.routing_table.get_provider_impl(benchmark_id).evaluate_rows(
+            benchmark_id=benchmark_id,
+            input_rows=input_rows,
+            scoring_functions=scoring_functions,
+            benchmark_config=benchmark_config,
+        )
+
+    async def job_status(
+        self,
+        benchmark_id: str,
+        job_id: str,
+    ) -> Job:
+        logger.debug(f"EvalRouter.job_status: {benchmark_id}, {job_id}")
+        return await self.routing_table.get_provider_impl(benchmark_id).job_status(benchmark_id, job_id)
+
+    async def job_cancel(
+        self,
+        benchmark_id: str,
+        job_id: str,
+    ) -> None:
+        logger.debug(f"EvalRouter.job_cancel: {benchmark_id}, {job_id}")
+        await self.routing_table.get_provider_impl(benchmark_id).job_cancel(
+            benchmark_id,
+            job_id,
+        )
+
+    async def job_result(
+        self,
+        benchmark_id: str,
+        job_id: str,
+    ) -> EvaluateResponse:
+        logger.debug(f"EvalRouter.job_result: {benchmark_id}, {job_id}")
+        return await self.routing_table.get_provider_impl(benchmark_id).job_result(
+            benchmark_id,
+            job_id,
+        )
diff --git a/llama_stack/distribution/routers/routers.py b/llama_stack/distribution/routers/inference.py
similarity index 52%
rename from llama_stack/distribution/routers/routers.py
rename to llama_stack/distribution/routers/inference.py
index d88df00bd..763bd9105 100644
--- a/llama_stack/distribution/routers/routers.py
+++ b/llama_stack/distribution/routers/inference.py
@@ -6,22 +6,17 @@
 
 import asyncio
 import time
-from typing import Any, AsyncGenerator, AsyncIterator, Dict, List, Optional, Union
+from collections.abc import AsyncGenerator, AsyncIterator
+from typing import Annotated, Any
 
 from openai.types.chat import ChatCompletionToolChoiceOptionParam as OpenAIChatCompletionToolChoiceOptionParam
 from openai.types.chat import ChatCompletionToolParam as OpenAIChatCompletionToolParam
 from pydantic import Field, TypeAdapter
-from typing_extensions import Annotated
 
 from llama_stack.apis.common.content_types import (
-    URL,
     InterleavedContent,
     InterleavedContentItem,
 )
-from llama_stack.apis.common.responses import PaginatedResponse
-from llama_stack.apis.datasetio import DatasetIO
-from llama_stack.apis.datasets import DatasetPurpose, DataSource
-from llama_stack.apis.eval import BenchmarkConfig, Eval, EvaluateResponse, Job
 from llama_stack.apis.inference import (
     BatchChatCompletionResponse,
     BatchCompletionResponse,
@@ -32,8 +27,11 @@ from llama_stack.apis.inference import (
     EmbeddingsResponse,
     EmbeddingTaskType,
     Inference,
+    ListOpenAIChatCompletionResponse,
     LogProbConfig,
     Message,
+    OpenAICompletionWithInputMessages,
+    Order,
     ResponseFormat,
     SamplingParams,
     StopReason,
@@ -47,104 +45,36 @@ from llama_stack.apis.inference.inference import (
     OpenAIChatCompletion,
     OpenAIChatCompletionChunk,
     OpenAICompletion,
+    OpenAIEmbeddingsResponse,
     OpenAIMessageParam,
     OpenAIResponseFormatParam,
 )
 from llama_stack.apis.models import Model, ModelType
-from llama_stack.apis.safety import RunShieldResponse, Safety
-from llama_stack.apis.scoring import (
-    ScoreBatchResponse,
-    ScoreResponse,
-    Scoring,
-    ScoringFnParams,
-)
-from llama_stack.apis.shields import Shield
 from llama_stack.apis.telemetry import MetricEvent, MetricInResponse, Telemetry
-from llama_stack.apis.tools import (
-    ListToolDefsResponse,
-    RAGDocument,
-    RAGQueryConfig,
-    RAGQueryResult,
-    RAGToolRuntime,
-    ToolRuntime,
-)
-from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
 from llama_stack.log import get_logger
 from llama_stack.models.llama.llama3.chat_format import ChatFormat
 from llama_stack.models.llama.llama3.tokenizer import Tokenizer
 from llama_stack.providers.datatypes import HealthResponse, HealthStatus, RoutingTable
+from llama_stack.providers.utils.inference.inference_store import InferenceStore
+from llama_stack.providers.utils.inference.stream_utils import stream_and_store_openai_completion
 from llama_stack.providers.utils.telemetry.tracing import get_current_span
 
 logger = get_logger(name=__name__, category="core")
 
 
-class VectorIORouter(VectorIO):
-    """Routes to an provider based on the vector db identifier"""
-
-    def __init__(
-        self,
-        routing_table: RoutingTable,
-    ) -> None:
-        logger.debug("Initializing VectorIORouter")
-        self.routing_table = routing_table
-
-    async def initialize(self) -> None:
-        logger.debug("VectorIORouter.initialize")
-        pass
-
-    async def shutdown(self) -> None:
-        logger.debug("VectorIORouter.shutdown")
-        pass
-
-    async def register_vector_db(
-        self,
-        vector_db_id: str,
-        embedding_model: str,
-        embedding_dimension: Optional[int] = 384,
-        provider_id: Optional[str] = None,
-        provider_vector_db_id: Optional[str] = None,
-    ) -> None:
-        logger.debug(f"VectorIORouter.register_vector_db: {vector_db_id}, {embedding_model}")
-        await self.routing_table.register_vector_db(
-            vector_db_id,
-            embedding_model,
-            embedding_dimension,
-            provider_id,
-            provider_vector_db_id,
-        )
-
-    async def insert_chunks(
-        self,
-        vector_db_id: str,
-        chunks: List[Chunk],
-        ttl_seconds: Optional[int] = None,
-    ) -> None:
-        logger.debug(
-            f"VectorIORouter.insert_chunks: {vector_db_id}, {len(chunks)} chunks, ttl_seconds={ttl_seconds}, chunk_ids={[chunk.metadata['document_id'] for chunk in chunks[:3]]}{' and more...' if len(chunks) > 3 else ''}",
-        )
-        return await self.routing_table.get_provider_impl(vector_db_id).insert_chunks(vector_db_id, chunks, ttl_seconds)
-
-    async def query_chunks(
-        self,
-        vector_db_id: str,
-        query: InterleavedContent,
-        params: Optional[Dict[str, Any]] = None,
-    ) -> QueryChunksResponse:
-        logger.debug(f"VectorIORouter.query_chunks: {vector_db_id}")
-        return await self.routing_table.get_provider_impl(vector_db_id).query_chunks(vector_db_id, query, params)
-
-
 class InferenceRouter(Inference):
     """Routes to an provider based on the model"""
 
     def __init__(
         self,
         routing_table: RoutingTable,
-        telemetry: Optional[Telemetry] = None,
+        telemetry: Telemetry | None = None,
+        store: InferenceStore | None = None,
     ) -> None:
         logger.debug("Initializing InferenceRouter")
         self.routing_table = routing_table
         self.telemetry = telemetry
+        self.store = store
         if self.telemetry:
             self.tokenizer = Tokenizer.get_instance()
             self.formatter = ChatFormat(self.tokenizer)
@@ -160,10 +90,10 @@ class InferenceRouter(Inference):
     async def register_model(
         self,
         model_id: str,
-        provider_model_id: Optional[str] = None,
-        provider_id: Optional[str] = None,
-        metadata: Optional[Dict[str, Any]] = None,
-        model_type: Optional[ModelType] = None,
+        provider_model_id: str | None = None,
+        provider_id: str | None = None,
+        metadata: dict[str, Any] | None = None,
+        model_type: ModelType | None = None,
     ) -> None:
         logger.debug(
             f"InferenceRouter.register_model: {model_id=} {provider_model_id=} {provider_id=} {metadata=} {model_type=}",
@@ -176,7 +106,7 @@ class InferenceRouter(Inference):
         completion_tokens: int,
         total_tokens: int,
         model: Model,
-    ) -> List[MetricEvent]:
+    ) -> list[MetricEvent]:
         """Constructs a list of MetricEvent objects containing token usage metrics.
 
         Args:
@@ -221,7 +151,7 @@ class InferenceRouter(Inference):
         completion_tokens: int,
         total_tokens: int,
         model: Model,
-    ) -> List[MetricInResponse]:
+    ) -> list[MetricInResponse]:
         metrics = self._construct_metrics(prompt_tokens, completion_tokens, total_tokens, model)
         if self.telemetry:
             for metric in metrics:
@@ -230,9 +160,9 @@ class InferenceRouter(Inference):
 
     async def _count_tokens(
         self,
-        messages: List[Message] | InterleavedContent,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-    ) -> Optional[int]:
+        messages: list[Message] | InterleavedContent,
+        tool_prompt_format: ToolPromptFormat | None = None,
+    ) -> int | None:
         if isinstance(messages, list):
             encoded = self.formatter.encode_dialog_prompt(messages, tool_prompt_format)
         else:
@@ -242,16 +172,16 @@ class InferenceRouter(Inference):
     async def chat_completion(
         self,
         model_id: str,
-        messages: List[Message],
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = None,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-        tool_config: Optional[ToolConfig] = None,
-    ) -> Union[ChatCompletionResponse, AsyncIterator[ChatCompletionResponseStreamChunk]]:
+        messages: list[Message],
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_choice: ToolChoice | None = None,
+        tool_prompt_format: ToolPromptFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+        tool_config: ToolConfig | None = None,
+    ) -> ChatCompletionResponse | AsyncIterator[ChatCompletionResponseStreamChunk]:
         logger.debug(
             f"InferenceRouter.chat_completion: {model_id=}, {stream=}, {messages=}, {tools=}, {tool_config=}, {response_format=}",
         )
@@ -351,12 +281,12 @@ class InferenceRouter(Inference):
     async def batch_chat_completion(
         self,
         model_id: str,
-        messages_batch: List[List[Message]],
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_config: Optional[ToolConfig] = None,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        logprobs: Optional[LogProbConfig] = None,
+        messages_batch: list[list[Message]],
+        tools: list[ToolDefinition] | None = None,
+        tool_config: ToolConfig | None = None,
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        logprobs: LogProbConfig | None = None,
     ) -> BatchChatCompletionResponse:
         logger.debug(
             f"InferenceRouter.batch_chat_completion: {model_id=}, {len(messages_batch)=}, {sampling_params=}, {response_format=}, {logprobs=}",
@@ -376,10 +306,10 @@ class InferenceRouter(Inference):
         self,
         model_id: str,
         content: InterleavedContent,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
     ) -> AsyncGenerator:
         if sampling_params is None:
             sampling_params = SamplingParams()
@@ -439,10 +369,10 @@ class InferenceRouter(Inference):
     async def batch_completion(
         self,
         model_id: str,
-        content_batch: List[InterleavedContent],
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        logprobs: Optional[LogProbConfig] = None,
+        content_batch: list[InterleavedContent],
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        logprobs: LogProbConfig | None = None,
     ) -> BatchCompletionResponse:
         logger.debug(
             f"InferenceRouter.batch_completion: {model_id=}, {len(content_batch)=}, {sampling_params=}, {response_format=}, {logprobs=}",
@@ -453,10 +383,10 @@ class InferenceRouter(Inference):
     async def embeddings(
         self,
         model_id: str,
-        contents: List[str] | List[InterleavedContentItem],
-        text_truncation: Optional[TextTruncation] = TextTruncation.none,
-        output_dimension: Optional[int] = None,
-        task_type: Optional[EmbeddingTaskType] = None,
+        contents: list[str] | list[InterleavedContentItem],
+        text_truncation: TextTruncation | None = TextTruncation.none,
+        output_dimension: int | None = None,
+        task_type: EmbeddingTaskType | None = None,
     ) -> EmbeddingsResponse:
         logger.debug(f"InferenceRouter.embeddings: {model_id}")
         model = await self.routing_table.get_model(model_id)
@@ -475,24 +405,24 @@ class InferenceRouter(Inference):
     async def openai_completion(
         self,
         model: str,
-        prompt: Union[str, List[str], List[int], List[List[int]]],
-        best_of: Optional[int] = None,
-        echo: Optional[bool] = None,
-        frequency_penalty: Optional[float] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        presence_penalty: Optional[float] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-        guided_choice: Optional[List[str]] = None,
-        prompt_logprobs: Optional[int] = None,
+        prompt: str | list[str] | list[int] | list[list[int]],
+        best_of: int | None = None,
+        echo: bool | None = None,
+        frequency_penalty: float | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        presence_penalty: float | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+        guided_choice: list[str] | None = None,
+        prompt_logprobs: int | None = None,
     ) -> OpenAICompletion:
         logger.debug(
             f"InferenceRouter.openai_completion: {model=}, {stream=}, {prompt=}",
@@ -531,29 +461,29 @@ class InferenceRouter(Inference):
     async def openai_chat_completion(
         self,
         model: str,
-        messages: Annotated[List[OpenAIMessageParam], Field(..., min_length=1)],
-        frequency_penalty: Optional[float] = None,
-        function_call: Optional[Union[str, Dict[str, Any]]] = None,
-        functions: Optional[List[Dict[str, Any]]] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_completion_tokens: Optional[int] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        parallel_tool_calls: Optional[bool] = None,
-        presence_penalty: Optional[float] = None,
-        response_format: Optional[OpenAIResponseFormatParam] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
-        tools: Optional[List[Dict[str, Any]]] = None,
-        top_logprobs: Optional[int] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-    ) -> Union[OpenAIChatCompletion, AsyncIterator[OpenAIChatCompletionChunk]]:
+        messages: Annotated[list[OpenAIMessageParam], Field(..., min_length=1)],
+        frequency_penalty: float | None = None,
+        function_call: str | dict[str, Any] | None = None,
+        functions: list[dict[str, Any]] | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_completion_tokens: int | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        parallel_tool_calls: bool | None = None,
+        presence_penalty: float | None = None,
+        response_format: OpenAIResponseFormatParam | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        tool_choice: str | dict[str, Any] | None = None,
+        tools: list[dict[str, Any]] | None = None,
+        top_logprobs: int | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+    ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
         logger.debug(
             f"InferenceRouter.openai_chat_completion: {model=}, {stream=}, {messages=}",
         )
@@ -573,6 +503,12 @@ class InferenceRouter(Inference):
             for tool in tools:
                 TypeAdapter(OpenAIChatCompletionToolParam).validate_python(tool)
 
+        # Some providers make tool calls even when tool_choice is "none"
+        # so just clear them both out to avoid unexpected tool calls
+        if tool_choice == "none" and tools is not None:
+            tool_choice = None
+            tools = None
+
         params = dict(
             model=model_obj.identifier,
             messages=messages,
@@ -600,9 +536,71 @@ class InferenceRouter(Inference):
         )
 
         provider = self.routing_table.get_provider_impl(model_obj.identifier)
-        return await provider.openai_chat_completion(**params)
+        if stream:
+            response_stream = await provider.openai_chat_completion(**params)
+            if self.store:
+                return stream_and_store_openai_completion(response_stream, model, self.store, messages)
+            return response_stream
+        else:
+            response = await self._nonstream_openai_chat_completion(provider, params)
+            if self.store:
+                await self.store.store_chat_completion(response, messages)
+            return response
 
-    async def health(self) -> Dict[str, HealthResponse]:
+    async def openai_embeddings(
+        self,
+        model: str,
+        input: str | list[str],
+        encoding_format: str | None = "float",
+        dimensions: int | None = None,
+        user: str | None = None,
+    ) -> OpenAIEmbeddingsResponse:
+        logger.debug(
+            f"InferenceRouter.openai_embeddings: {model=}, input_type={type(input)}, {encoding_format=}, {dimensions=}",
+        )
+        model_obj = await self.routing_table.get_model(model)
+        if model_obj is None:
+            raise ValueError(f"Model '{model}' not found")
+        if model_obj.model_type != ModelType.embedding:
+            raise ValueError(f"Model '{model}' is not an embedding model")
+
+        params = dict(
+            model=model_obj.identifier,
+            input=input,
+            encoding_format=encoding_format,
+            dimensions=dimensions,
+            user=user,
+        )
+
+        provider = self.routing_table.get_provider_impl(model_obj.identifier)
+        return await provider.openai_embeddings(**params)
+
+    async def list_chat_completions(
+        self,
+        after: str | None = None,
+        limit: int | None = 20,
+        model: str | None = None,
+        order: Order | None = Order.desc,
+    ) -> ListOpenAIChatCompletionResponse:
+        if self.store:
+            return await self.store.list_chat_completions(after, limit, model, order)
+        raise NotImplementedError("List chat completions is not supported: inference store is not configured.")
+
+    async def get_chat_completion(self, completion_id: str) -> OpenAICompletionWithInputMessages:
+        if self.store:
+            return await self.store.get_chat_completion(completion_id)
+        raise NotImplementedError("Get chat completion is not supported: inference store is not configured.")
+
+    async def _nonstream_openai_chat_completion(self, provider: Inference, params: dict) -> OpenAIChatCompletion:
+        response = await provider.openai_chat_completion(**params)
+        for choice in response.choices:
+            # some providers return an empty list for no tool calls in non-streaming responses
+            # but the OpenAI API returns None. So, set tool_calls to None if it's empty
+            if choice.message and choice.message.tool_calls is not None and len(choice.message.tool_calls) == 0:
+                choice.message.tool_calls = None
+        return response
+
+    async def health(self) -> dict[str, HealthResponse]:
         health_statuses = {}
         timeout = 0.5
         for provider_id, impl in self.routing_table.impls_by_provider_id.items():
@@ -612,7 +610,7 @@ class InferenceRouter(Inference):
                     continue
                 health = await asyncio.wait_for(impl.health(), timeout=timeout)
                 health_statuses[provider_id] = health
-            except asyncio.TimeoutError:
+            except (asyncio.TimeoutError, TimeoutError):
                 health_statuses[provider_id] = HealthResponse(
                     status=HealthStatus.ERROR,
                     message=f"Health check timed out after {timeout} seconds",
@@ -624,295 +622,3 @@ class InferenceRouter(Inference):
                     status=HealthStatus.ERROR, message=f"Health check failed: {str(e)}"
                 )
         return health_statuses
-
-
-class SafetyRouter(Safety):
-    def __init__(
-        self,
-        routing_table: RoutingTable,
-    ) -> None:
-        logger.debug("Initializing SafetyRouter")
-        self.routing_table = routing_table
-
-    async def initialize(self) -> None:
-        logger.debug("SafetyRouter.initialize")
-        pass
-
-    async def shutdown(self) -> None:
-        logger.debug("SafetyRouter.shutdown")
-        pass
-
-    async def register_shield(
-        self,
-        shield_id: str,
-        provider_shield_id: Optional[str] = None,
-        provider_id: Optional[str] = None,
-        params: Optional[Dict[str, Any]] = None,
-    ) -> Shield:
-        logger.debug(f"SafetyRouter.register_shield: {shield_id}")
-        return await self.routing_table.register_shield(shield_id, provider_shield_id, provider_id, params)
-
-    async def run_shield(
-        self,
-        shield_id: str,
-        messages: List[Message],
-        params: Dict[str, Any] = None,
-    ) -> RunShieldResponse:
-        logger.debug(f"SafetyRouter.run_shield: {shield_id}")
-        return await self.routing_table.get_provider_impl(shield_id).run_shield(
-            shield_id=shield_id,
-            messages=messages,
-            params=params,
-        )
-
-
-class DatasetIORouter(DatasetIO):
-    def __init__(
-        self,
-        routing_table: RoutingTable,
-    ) -> None:
-        logger.debug("Initializing DatasetIORouter")
-        self.routing_table = routing_table
-
-    async def initialize(self) -> None:
-        logger.debug("DatasetIORouter.initialize")
-        pass
-
-    async def shutdown(self) -> None:
-        logger.debug("DatasetIORouter.shutdown")
-        pass
-
-    async def register_dataset(
-        self,
-        purpose: DatasetPurpose,
-        source: DataSource,
-        metadata: Optional[Dict[str, Any]] = None,
-        dataset_id: Optional[str] = None,
-    ) -> None:
-        logger.debug(
-            f"DatasetIORouter.register_dataset: {purpose=} {source=} {metadata=} {dataset_id=}",
-        )
-        await self.routing_table.register_dataset(
-            purpose=purpose,
-            source=source,
-            metadata=metadata,
-            dataset_id=dataset_id,
-        )
-
-    async def iterrows(
-        self,
-        dataset_id: str,
-        start_index: Optional[int] = None,
-        limit: Optional[int] = None,
-    ) -> PaginatedResponse:
-        logger.debug(
-            f"DatasetIORouter.iterrows: {dataset_id}, {start_index=} {limit=}",
-        )
-        return await self.routing_table.get_provider_impl(dataset_id).iterrows(
-            dataset_id=dataset_id,
-            start_index=start_index,
-            limit=limit,
-        )
-
-    async def append_rows(self, dataset_id: str, rows: List[Dict[str, Any]]) -> None:
-        logger.debug(f"DatasetIORouter.append_rows: {dataset_id}, {len(rows)} rows")
-        return await self.routing_table.get_provider_impl(dataset_id).append_rows(
-            dataset_id=dataset_id,
-            rows=rows,
-        )
-
-
-class ScoringRouter(Scoring):
-    def __init__(
-        self,
-        routing_table: RoutingTable,
-    ) -> None:
-        logger.debug("Initializing ScoringRouter")
-        self.routing_table = routing_table
-
-    async def initialize(self) -> None:
-        logger.debug("ScoringRouter.initialize")
-        pass
-
-    async def shutdown(self) -> None:
-        logger.debug("ScoringRouter.shutdown")
-        pass
-
-    async def score_batch(
-        self,
-        dataset_id: str,
-        scoring_functions: Dict[str, Optional[ScoringFnParams]] = None,
-        save_results_dataset: bool = False,
-    ) -> ScoreBatchResponse:
-        logger.debug(f"ScoringRouter.score_batch: {dataset_id}")
-        res = {}
-        for fn_identifier in scoring_functions.keys():
-            score_response = await self.routing_table.get_provider_impl(fn_identifier).score_batch(
-                dataset_id=dataset_id,
-                scoring_functions={fn_identifier: scoring_functions[fn_identifier]},
-            )
-            res.update(score_response.results)
-
-        if save_results_dataset:
-            raise NotImplementedError("Save results dataset not implemented yet")
-
-        return ScoreBatchResponse(
-            results=res,
-        )
-
-    async def score(
-        self,
-        input_rows: List[Dict[str, Any]],
-        scoring_functions: Dict[str, Optional[ScoringFnParams]] = None,
-    ) -> ScoreResponse:
-        logger.debug(f"ScoringRouter.score: {len(input_rows)} rows, {len(scoring_functions)} functions")
-        res = {}
-        # look up and map each scoring function to its provider impl
-        for fn_identifier in scoring_functions.keys():
-            score_response = await self.routing_table.get_provider_impl(fn_identifier).score(
-                input_rows=input_rows,
-                scoring_functions={fn_identifier: scoring_functions[fn_identifier]},
-            )
-            res.update(score_response.results)
-
-        return ScoreResponse(results=res)
-
-
-class EvalRouter(Eval):
-    def __init__(
-        self,
-        routing_table: RoutingTable,
-    ) -> None:
-        logger.debug("Initializing EvalRouter")
-        self.routing_table = routing_table
-
-    async def initialize(self) -> None:
-        logger.debug("EvalRouter.initialize")
-        pass
-
-    async def shutdown(self) -> None:
-        logger.debug("EvalRouter.shutdown")
-        pass
-
-    async def run_eval(
-        self,
-        benchmark_id: str,
-        benchmark_config: BenchmarkConfig,
-    ) -> Job:
-        logger.debug(f"EvalRouter.run_eval: {benchmark_id}")
-        return await self.routing_table.get_provider_impl(benchmark_id).run_eval(
-            benchmark_id=benchmark_id,
-            benchmark_config=benchmark_config,
-        )
-
-    async def evaluate_rows(
-        self,
-        benchmark_id: str,
-        input_rows: List[Dict[str, Any]],
-        scoring_functions: List[str],
-        benchmark_config: BenchmarkConfig,
-    ) -> EvaluateResponse:
-        logger.debug(f"EvalRouter.evaluate_rows: {benchmark_id}, {len(input_rows)} rows")
-        return await self.routing_table.get_provider_impl(benchmark_id).evaluate_rows(
-            benchmark_id=benchmark_id,
-            input_rows=input_rows,
-            scoring_functions=scoring_functions,
-            benchmark_config=benchmark_config,
-        )
-
-    async def job_status(
-        self,
-        benchmark_id: str,
-        job_id: str,
-    ) -> Job:
-        logger.debug(f"EvalRouter.job_status: {benchmark_id}, {job_id}")
-        return await self.routing_table.get_provider_impl(benchmark_id).job_status(benchmark_id, job_id)
-
-    async def job_cancel(
-        self,
-        benchmark_id: str,
-        job_id: str,
-    ) -> None:
-        logger.debug(f"EvalRouter.job_cancel: {benchmark_id}, {job_id}")
-        await self.routing_table.get_provider_impl(benchmark_id).job_cancel(
-            benchmark_id,
-            job_id,
-        )
-
-    async def job_result(
-        self,
-        benchmark_id: str,
-        job_id: str,
-    ) -> EvaluateResponse:
-        logger.debug(f"EvalRouter.job_result: {benchmark_id}, {job_id}")
-        return await self.routing_table.get_provider_impl(benchmark_id).job_result(
-            benchmark_id,
-            job_id,
-        )
-
-
-class ToolRuntimeRouter(ToolRuntime):
-    class RagToolImpl(RAGToolRuntime):
-        def __init__(
-            self,
-            routing_table: RoutingTable,
-        ) -> None:
-            logger.debug("Initializing ToolRuntimeRouter.RagToolImpl")
-            self.routing_table = routing_table
-
-        async def query(
-            self,
-            content: InterleavedContent,
-            vector_db_ids: List[str],
-            query_config: Optional[RAGQueryConfig] = None,
-        ) -> RAGQueryResult:
-            logger.debug(f"ToolRuntimeRouter.RagToolImpl.query: {vector_db_ids}")
-            return await self.routing_table.get_provider_impl("knowledge_search").query(
-                content, vector_db_ids, query_config
-            )
-
-        async def insert(
-            self,
-            documents: List[RAGDocument],
-            vector_db_id: str,
-            chunk_size_in_tokens: int = 512,
-        ) -> None:
-            logger.debug(
-                f"ToolRuntimeRouter.RagToolImpl.insert: {vector_db_id}, {len(documents)} documents, chunk_size={chunk_size_in_tokens}"
-            )
-            return await self.routing_table.get_provider_impl("insert_into_memory").insert(
-                documents, vector_db_id, chunk_size_in_tokens
-            )
-
-    def __init__(
-        self,
-        routing_table: RoutingTable,
-    ) -> None:
-        logger.debug("Initializing ToolRuntimeRouter")
-        self.routing_table = routing_table
-
-        # HACK ALERT this should be in sync with "get_all_api_endpoints()"
-        self.rag_tool = self.RagToolImpl(routing_table)
-        for method in ("query", "insert"):
-            setattr(self, f"rag_tool.{method}", getattr(self.rag_tool, method))
-
-    async def initialize(self) -> None:
-        logger.debug("ToolRuntimeRouter.initialize")
-        pass
-
-    async def shutdown(self) -> None:
-        logger.debug("ToolRuntimeRouter.shutdown")
-        pass
-
-    async def invoke_tool(self, tool_name: str, kwargs: Dict[str, Any]) -> Any:
-        logger.debug(f"ToolRuntimeRouter.invoke_tool: {tool_name}")
-        return await self.routing_table.get_provider_impl(tool_name).invoke_tool(
-            tool_name=tool_name,
-            kwargs=kwargs,
-        )
-
-    async def list_runtime_tools(
-        self, tool_group_id: Optional[str] = None, mcp_endpoint: Optional[URL] = None
-    ) -> ListToolDefsResponse:
-        logger.debug(f"ToolRuntimeRouter.list_runtime_tools: {tool_group_id}")
-        return await self.routing_table.get_provider_impl(tool_group_id).list_tools(tool_group_id, mcp_endpoint)
diff --git a/llama_stack/distribution/routers/routing_tables.py b/llama_stack/distribution/routers/routing_tables.py
deleted file mode 100644
index 18b0c891f..000000000
--- a/llama_stack/distribution/routers/routing_tables.py
+++ /dev/null
@@ -1,631 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-import logging
-import time
-import uuid
-from typing import Any, Dict, List, Optional
-
-from pydantic import TypeAdapter
-
-from llama_stack.apis.benchmarks import Benchmark, Benchmarks, ListBenchmarksResponse
-from llama_stack.apis.common.content_types import URL
-from llama_stack.apis.common.type_system import ParamType
-from llama_stack.apis.datasets import (
-    Dataset,
-    DatasetPurpose,
-    Datasets,
-    DatasetType,
-    DataSource,
-    ListDatasetsResponse,
-    RowsDataSource,
-    URIDataSource,
-)
-from llama_stack.apis.models import ListModelsResponse, Model, Models, ModelType, OpenAIListModelsResponse, OpenAIModel
-from llama_stack.apis.resource import ResourceType
-from llama_stack.apis.scoring_functions import (
-    ListScoringFunctionsResponse,
-    ScoringFn,
-    ScoringFnParams,
-    ScoringFunctions,
-)
-from llama_stack.apis.shields import ListShieldsResponse, Shield, Shields
-from llama_stack.apis.tools import (
-    ListToolGroupsResponse,
-    ListToolsResponse,
-    Tool,
-    ToolGroup,
-    ToolGroups,
-    ToolHost,
-)
-from llama_stack.apis.vector_dbs import ListVectorDBsResponse, VectorDB, VectorDBs
-from llama_stack.distribution.access_control import check_access
-from llama_stack.distribution.datatypes import (
-    AccessAttributes,
-    BenchmarkWithACL,
-    DatasetWithACL,
-    ModelWithACL,
-    RoutableObject,
-    RoutableObjectWithProvider,
-    RoutedProtocol,
-    ScoringFnWithACL,
-    ShieldWithACL,
-    ToolGroupWithACL,
-    ToolWithACL,
-    VectorDBWithACL,
-)
-from llama_stack.distribution.request_headers import get_auth_attributes
-from llama_stack.distribution.store import DistributionRegistry
-from llama_stack.providers.datatypes import Api, RoutingTable
-
-logger = logging.getLogger(__name__)
-
-
-def get_impl_api(p: Any) -> Api:
-    return p.__provider_spec__.api
-
-
-# TODO: this should return the registered object for all APIs
-async def register_object_with_provider(obj: RoutableObject, p: Any) -> RoutableObject:
-    api = get_impl_api(p)
-
-    assert obj.provider_id != "remote", "Remote provider should not be registered"
-
-    if api == Api.inference:
-        return await p.register_model(obj)
-    elif api == Api.safety:
-        return await p.register_shield(obj)
-    elif api == Api.vector_io:
-        return await p.register_vector_db(obj)
-    elif api == Api.datasetio:
-        return await p.register_dataset(obj)
-    elif api == Api.scoring:
-        return await p.register_scoring_function(obj)
-    elif api == Api.eval:
-        return await p.register_benchmark(obj)
-    elif api == Api.tool_runtime:
-        return await p.register_tool(obj)
-    else:
-        raise ValueError(f"Unknown API {api} for registering object with provider")
-
-
-async def unregister_object_from_provider(obj: RoutableObject, p: Any) -> None:
-    api = get_impl_api(p)
-    if api == Api.vector_io:
-        return await p.unregister_vector_db(obj.identifier)
-    elif api == Api.inference:
-        return await p.unregister_model(obj.identifier)
-    elif api == Api.datasetio:
-        return await p.unregister_dataset(obj.identifier)
-    elif api == Api.tool_runtime:
-        return await p.unregister_tool(obj.identifier)
-    else:
-        raise ValueError(f"Unregister not supported for {api}")
-
-
-Registry = Dict[str, List[RoutableObjectWithProvider]]
-
-
-class CommonRoutingTableImpl(RoutingTable):
-    def __init__(
-        self,
-        impls_by_provider_id: Dict[str, RoutedProtocol],
-        dist_registry: DistributionRegistry,
-    ) -> None:
-        self.impls_by_provider_id = impls_by_provider_id
-        self.dist_registry = dist_registry
-
-    async def initialize(self) -> None:
-        async def add_objects(objs: List[RoutableObjectWithProvider], provider_id: str, cls) -> None:
-            for obj in objs:
-                if cls is None:
-                    obj.provider_id = provider_id
-                else:
-                    # Create a copy of the model data and explicitly set provider_id
-                    model_data = obj.model_dump()
-                    model_data["provider_id"] = provider_id
-                    obj = cls(**model_data)
-                await self.dist_registry.register(obj)
-
-        # Register all objects from providers
-        for pid, p in self.impls_by_provider_id.items():
-            api = get_impl_api(p)
-            if api == Api.inference:
-                p.model_store = self
-            elif api == Api.safety:
-                p.shield_store = self
-            elif api == Api.vector_io:
-                p.vector_db_store = self
-            elif api == Api.datasetio:
-                p.dataset_store = self
-            elif api == Api.scoring:
-                p.scoring_function_store = self
-                scoring_functions = await p.list_scoring_functions()
-                await add_objects(scoring_functions, pid, ScoringFn)
-            elif api == Api.eval:
-                p.benchmark_store = self
-            elif api == Api.tool_runtime:
-                p.tool_store = self
-
-    async def shutdown(self) -> None:
-        for p in self.impls_by_provider_id.values():
-            await p.shutdown()
-
-    def get_provider_impl(self, routing_key: str, provider_id: Optional[str] = None) -> Any:
-        def apiname_object():
-            if isinstance(self, ModelsRoutingTable):
-                return ("Inference", "model")
-            elif isinstance(self, ShieldsRoutingTable):
-                return ("Safety", "shield")
-            elif isinstance(self, VectorDBsRoutingTable):
-                return ("VectorIO", "vector_db")
-            elif isinstance(self, DatasetsRoutingTable):
-                return ("DatasetIO", "dataset")
-            elif isinstance(self, ScoringFunctionsRoutingTable):
-                return ("Scoring", "scoring_function")
-            elif isinstance(self, BenchmarksRoutingTable):
-                return ("Eval", "benchmark")
-            elif isinstance(self, ToolGroupsRoutingTable):
-                return ("Tools", "tool")
-            else:
-                raise ValueError("Unknown routing table type")
-
-        apiname, objtype = apiname_object()
-
-        # Get objects from disk registry
-        obj = self.dist_registry.get_cached(objtype, routing_key)
-        if not obj:
-            provider_ids = list(self.impls_by_provider_id.keys())
-            if len(provider_ids) > 1:
-                provider_ids_str = f"any of the providers: {', '.join(provider_ids)}"
-            else:
-                provider_ids_str = f"provider: `{provider_ids[0]}`"
-            raise ValueError(
-                f"{objtype.capitalize()} `{routing_key}` not served by {provider_ids_str}. Make sure there is an {apiname} provider serving this {objtype}."
-            )
-
-        if not provider_id or provider_id == obj.provider_id:
-            return self.impls_by_provider_id[obj.provider_id]
-
-        raise ValueError(f"Provider not found for `{routing_key}`")
-
-    async def get_object_by_identifier(self, type: str, identifier: str) -> Optional[RoutableObjectWithProvider]:
-        # Get from disk registry
-        obj = await self.dist_registry.get(type, identifier)
-        if not obj:
-            return None
-
-        # Check if user has permission to access this object
-        if not check_access(obj.identifier, getattr(obj, "access_attributes", None), get_auth_attributes()):
-            logger.debug(f"Access denied to {type} '{identifier}' based on attribute mismatch")
-            return None
-
-        return obj
-
-    async def unregister_object(self, obj: RoutableObjectWithProvider) -> None:
-        await self.dist_registry.delete(obj.type, obj.identifier)
-        await unregister_object_from_provider(obj, self.impls_by_provider_id[obj.provider_id])
-
-    async def register_object(self, obj: RoutableObjectWithProvider) -> RoutableObjectWithProvider:
-        # if provider_id is not specified, pick an arbitrary one from existing entries
-        if not obj.provider_id and len(self.impls_by_provider_id) > 0:
-            obj.provider_id = list(self.impls_by_provider_id.keys())[0]
-
-        if obj.provider_id not in self.impls_by_provider_id:
-            raise ValueError(f"Provider `{obj.provider_id}` not found")
-
-        p = self.impls_by_provider_id[obj.provider_id]
-
-        # If object supports access control but no attributes set, use creator's attributes
-        if not obj.access_attributes:
-            creator_attributes = get_auth_attributes()
-            if creator_attributes:
-                obj.access_attributes = AccessAttributes(**creator_attributes)
-                logger.info(f"Setting access attributes for {obj.type} '{obj.identifier}' based on creator's identity")
-
-        registered_obj = await register_object_with_provider(obj, p)
-        # TODO: This needs to be fixed for all APIs once they return the registered object
-        if obj.type == ResourceType.model.value:
-            await self.dist_registry.register(registered_obj)
-            return registered_obj
-
-        else:
-            await self.dist_registry.register(obj)
-            return obj
-
-    async def get_all_with_type(self, type: str) -> List[RoutableObjectWithProvider]:
-        objs = await self.dist_registry.get_all()
-        filtered_objs = [obj for obj in objs if obj.type == type]
-
-        # Apply attribute-based access control filtering
-        if filtered_objs:
-            filtered_objs = [
-                obj
-                for obj in filtered_objs
-                if check_access(obj.identifier, getattr(obj, "access_attributes", None), get_auth_attributes())
-            ]
-
-        return filtered_objs
-
-
-class ModelsRoutingTable(CommonRoutingTableImpl, Models):
-    async def list_models(self) -> ListModelsResponse:
-        return ListModelsResponse(data=await self.get_all_with_type("model"))
-
-    async def openai_list_models(self) -> OpenAIListModelsResponse:
-        models = await self.get_all_with_type("model")
-        openai_models = [
-            OpenAIModel(
-                id=model.identifier,
-                object="model",
-                created=int(time.time()),
-                owned_by="llama_stack",
-            )
-            for model in models
-        ]
-        return OpenAIListModelsResponse(data=openai_models)
-
-    async def get_model(self, model_id: str) -> Model:
-        model = await self.get_object_by_identifier("model", model_id)
-        if model is None:
-            raise ValueError(f"Model '{model_id}' not found")
-        return model
-
-    async def register_model(
-        self,
-        model_id: str,
-        provider_model_id: Optional[str] = None,
-        provider_id: Optional[str] = None,
-        metadata: Optional[Dict[str, Any]] = None,
-        model_type: Optional[ModelType] = None,
-    ) -> Model:
-        if provider_model_id is None:
-            provider_model_id = model_id
-        if provider_id is None:
-            # If provider_id not specified, use the only provider if it supports this model
-            if len(self.impls_by_provider_id) == 1:
-                provider_id = list(self.impls_by_provider_id.keys())[0]
-            else:
-                raise ValueError(
-                    f"No provider specified and multiple providers available. Please specify a provider_id. Available providers: {self.impls_by_provider_id.keys()}"
-                )
-        if metadata is None:
-            metadata = {}
-        if model_type is None:
-            model_type = ModelType.llm
-        if "embedding_dimension" not in metadata and model_type == ModelType.embedding:
-            raise ValueError("Embedding model must have an embedding dimension in its metadata")
-        model = ModelWithACL(
-            identifier=model_id,
-            provider_resource_id=provider_model_id,
-            provider_id=provider_id,
-            metadata=metadata,
-            model_type=model_type,
-        )
-        registered_model = await self.register_object(model)
-        return registered_model
-
-    async def unregister_model(self, model_id: str) -> None:
-        existing_model = await self.get_model(model_id)
-        if existing_model is None:
-            raise ValueError(f"Model {model_id} not found")
-        await self.unregister_object(existing_model)
-
-
-class ShieldsRoutingTable(CommonRoutingTableImpl, Shields):
-    async def list_shields(self) -> ListShieldsResponse:
-        return ListShieldsResponse(data=await self.get_all_with_type(ResourceType.shield.value))
-
-    async def get_shield(self, identifier: str) -> Shield:
-        shield = await self.get_object_by_identifier("shield", identifier)
-        if shield is None:
-            raise ValueError(f"Shield '{identifier}' not found")
-        return shield
-
-    async def register_shield(
-        self,
-        shield_id: str,
-        provider_shield_id: Optional[str] = None,
-        provider_id: Optional[str] = None,
-        params: Optional[Dict[str, Any]] = None,
-    ) -> Shield:
-        if provider_shield_id is None:
-            provider_shield_id = shield_id
-        if provider_id is None:
-            # If provider_id not specified, use the only provider if it supports this shield type
-            if len(self.impls_by_provider_id) == 1:
-                provider_id = list(self.impls_by_provider_id.keys())[0]
-            else:
-                raise ValueError(
-                    "No provider specified and multiple providers available. Please specify a provider_id."
-                )
-        if params is None:
-            params = {}
-        shield = ShieldWithACL(
-            identifier=shield_id,
-            provider_resource_id=provider_shield_id,
-            provider_id=provider_id,
-            params=params,
-        )
-        await self.register_object(shield)
-        return shield
-
-
-class VectorDBsRoutingTable(CommonRoutingTableImpl, VectorDBs):
-    async def list_vector_dbs(self) -> ListVectorDBsResponse:
-        return ListVectorDBsResponse(data=await self.get_all_with_type("vector_db"))
-
-    async def get_vector_db(self, vector_db_id: str) -> VectorDB:
-        vector_db = await self.get_object_by_identifier("vector_db", vector_db_id)
-        if vector_db is None:
-            raise ValueError(f"Vector DB '{vector_db_id}' not found")
-        return vector_db
-
-    async def register_vector_db(
-        self,
-        vector_db_id: str,
-        embedding_model: str,
-        embedding_dimension: Optional[int] = 384,
-        provider_id: Optional[str] = None,
-        provider_vector_db_id: Optional[str] = None,
-    ) -> VectorDB:
-        if provider_vector_db_id is None:
-            provider_vector_db_id = vector_db_id
-        if provider_id is None:
-            if len(self.impls_by_provider_id) > 0:
-                provider_id = list(self.impls_by_provider_id.keys())[0]
-                if len(self.impls_by_provider_id) > 1:
-                    logger.warning(
-                        f"No provider specified and multiple providers available. Arbitrarily selected the first provider {provider_id}."
-                    )
-            else:
-                raise ValueError("No provider available. Please configure a vector_io provider.")
-        model = await self.get_object_by_identifier("model", embedding_model)
-        if model is None:
-            raise ValueError(f"Model {embedding_model} not found")
-        if model.model_type != ModelType.embedding:
-            raise ValueError(f"Model {embedding_model} is not an embedding model")
-        if "embedding_dimension" not in model.metadata:
-            raise ValueError(f"Model {embedding_model} does not have an embedding dimension")
-        vector_db_data = {
-            "identifier": vector_db_id,
-            "type": ResourceType.vector_db.value,
-            "provider_id": provider_id,
-            "provider_resource_id": provider_vector_db_id,
-            "embedding_model": embedding_model,
-            "embedding_dimension": model.metadata["embedding_dimension"],
-        }
-        vector_db = TypeAdapter(VectorDBWithACL).validate_python(vector_db_data)
-        await self.register_object(vector_db)
-        return vector_db
-
-    async def unregister_vector_db(self, vector_db_id: str) -> None:
-        existing_vector_db = await self.get_vector_db(vector_db_id)
-        if existing_vector_db is None:
-            raise ValueError(f"Vector DB {vector_db_id} not found")
-        await self.unregister_object(existing_vector_db)
-
-
-class DatasetsRoutingTable(CommonRoutingTableImpl, Datasets):
-    async def list_datasets(self) -> ListDatasetsResponse:
-        return ListDatasetsResponse(data=await self.get_all_with_type(ResourceType.dataset.value))
-
-    async def get_dataset(self, dataset_id: str) -> Dataset:
-        dataset = await self.get_object_by_identifier("dataset", dataset_id)
-        if dataset is None:
-            raise ValueError(f"Dataset '{dataset_id}' not found")
-        return dataset
-
-    async def register_dataset(
-        self,
-        purpose: DatasetPurpose,
-        source: DataSource,
-        metadata: Optional[Dict[str, Any]] = None,
-        dataset_id: Optional[str] = None,
-    ) -> Dataset:
-        if isinstance(source, dict):
-            if source["type"] == "uri":
-                source = URIDataSource.parse_obj(source)
-            elif source["type"] == "rows":
-                source = RowsDataSource.parse_obj(source)
-
-        if not dataset_id:
-            dataset_id = f"dataset-{str(uuid.uuid4())}"
-
-        provider_dataset_id = dataset_id
-
-        # infer provider from source
-        if source.type == DatasetType.rows.value:
-            provider_id = "localfs"
-        elif source.type == DatasetType.uri.value:
-            # infer provider from uri
-            if source.uri.startswith("huggingface"):
-                provider_id = "huggingface"
-            else:
-                provider_id = "localfs"
-        else:
-            raise ValueError(f"Unknown data source type: {source.type}")
-
-        if metadata is None:
-            metadata = {}
-
-        dataset = DatasetWithACL(
-            identifier=dataset_id,
-            provider_resource_id=provider_dataset_id,
-            provider_id=provider_id,
-            purpose=purpose,
-            source=source,
-            metadata=metadata,
-        )
-
-        await self.register_object(dataset)
-        return dataset
-
-    async def unregister_dataset(self, dataset_id: str) -> None:
-        dataset = await self.get_dataset(dataset_id)
-        if dataset is None:
-            raise ValueError(f"Dataset {dataset_id} not found")
-        await self.unregister_object(dataset)
-
-
-class ScoringFunctionsRoutingTable(CommonRoutingTableImpl, ScoringFunctions):
-    async def list_scoring_functions(self) -> ListScoringFunctionsResponse:
-        return ListScoringFunctionsResponse(data=await self.get_all_with_type(ResourceType.scoring_function.value))
-
-    async def get_scoring_function(self, scoring_fn_id: str) -> ScoringFn:
-        scoring_fn = await self.get_object_by_identifier("scoring_function", scoring_fn_id)
-        if scoring_fn is None:
-            raise ValueError(f"Scoring function '{scoring_fn_id}' not found")
-        return scoring_fn
-
-    async def register_scoring_function(
-        self,
-        scoring_fn_id: str,
-        description: str,
-        return_type: ParamType,
-        provider_scoring_fn_id: Optional[str] = None,
-        provider_id: Optional[str] = None,
-        params: Optional[ScoringFnParams] = None,
-    ) -> None:
-        if provider_scoring_fn_id is None:
-            provider_scoring_fn_id = scoring_fn_id
-        if provider_id is None:
-            if len(self.impls_by_provider_id) == 1:
-                provider_id = list(self.impls_by_provider_id.keys())[0]
-            else:
-                raise ValueError(
-                    "No provider specified and multiple providers available. Please specify a provider_id."
-                )
-        scoring_fn = ScoringFnWithACL(
-            identifier=scoring_fn_id,
-            description=description,
-            return_type=return_type,
-            provider_resource_id=provider_scoring_fn_id,
-            provider_id=provider_id,
-            params=params,
-        )
-        scoring_fn.provider_id = provider_id
-        await self.register_object(scoring_fn)
-
-
-class BenchmarksRoutingTable(CommonRoutingTableImpl, Benchmarks):
-    async def list_benchmarks(self) -> ListBenchmarksResponse:
-        return ListBenchmarksResponse(data=await self.get_all_with_type("benchmark"))
-
-    async def get_benchmark(self, benchmark_id: str) -> Benchmark:
-        benchmark = await self.get_object_by_identifier("benchmark", benchmark_id)
-        if benchmark is None:
-            raise ValueError(f"Benchmark '{benchmark_id}' not found")
-        return benchmark
-
-    async def register_benchmark(
-        self,
-        benchmark_id: str,
-        dataset_id: str,
-        scoring_functions: List[str],
-        metadata: Optional[Dict[str, Any]] = None,
-        provider_benchmark_id: Optional[str] = None,
-        provider_id: Optional[str] = None,
-    ) -> None:
-        if metadata is None:
-            metadata = {}
-        if provider_id is None:
-            if len(self.impls_by_provider_id) == 1:
-                provider_id = list(self.impls_by_provider_id.keys())[0]
-            else:
-                raise ValueError(
-                    "No provider specified and multiple providers available. Please specify a provider_id."
-                )
-        if provider_benchmark_id is None:
-            provider_benchmark_id = benchmark_id
-        benchmark = BenchmarkWithACL(
-            identifier=benchmark_id,
-            dataset_id=dataset_id,
-            scoring_functions=scoring_functions,
-            metadata=metadata,
-            provider_id=provider_id,
-            provider_resource_id=provider_benchmark_id,
-        )
-        await self.register_object(benchmark)
-
-
-class ToolGroupsRoutingTable(CommonRoutingTableImpl, ToolGroups):
-    async def list_tools(self, toolgroup_id: Optional[str] = None) -> ListToolsResponse:
-        tools = await self.get_all_with_type("tool")
-        if toolgroup_id:
-            tools = [tool for tool in tools if tool.toolgroup_id == toolgroup_id]
-        return ListToolsResponse(data=tools)
-
-    async def list_tool_groups(self) -> ListToolGroupsResponse:
-        return ListToolGroupsResponse(data=await self.get_all_with_type("tool_group"))
-
-    async def get_tool_group(self, toolgroup_id: str) -> ToolGroup:
-        tool_group = await self.get_object_by_identifier("tool_group", toolgroup_id)
-        if tool_group is None:
-            raise ValueError(f"Tool group '{toolgroup_id}' not found")
-        return tool_group
-
-    async def get_tool(self, tool_name: str) -> Tool:
-        return await self.get_object_by_identifier("tool", tool_name)
-
-    async def register_tool_group(
-        self,
-        toolgroup_id: str,
-        provider_id: str,
-        mcp_endpoint: Optional[URL] = None,
-        args: Optional[Dict[str, Any]] = None,
-    ) -> None:
-        tools = []
-        tool_defs = await self.impls_by_provider_id[provider_id].list_runtime_tools(toolgroup_id, mcp_endpoint)
-        tool_host = ToolHost.model_context_protocol if mcp_endpoint else ToolHost.distribution
-
-        for tool_def in tool_defs.data:
-            tools.append(
-                ToolWithACL(
-                    identifier=tool_def.name,
-                    toolgroup_id=toolgroup_id,
-                    description=tool_def.description or "",
-                    parameters=tool_def.parameters or [],
-                    provider_id=provider_id,
-                    provider_resource_id=tool_def.name,
-                    metadata=tool_def.metadata,
-                    tool_host=tool_host,
-                )
-            )
-        for tool in tools:
-            existing_tool = await self.get_tool(tool.identifier)
-            # Compare existing and new object if one exists
-            if existing_tool:
-                existing_dict = existing_tool.model_dump()
-                new_dict = tool.model_dump()
-
-                if existing_dict != new_dict:
-                    raise ValueError(
-                        f"Object {tool.identifier} already exists in registry. Please use a different identifier."
-                    )
-            await self.register_object(tool)
-
-        await self.dist_registry.register(
-            ToolGroupWithACL(
-                identifier=toolgroup_id,
-                provider_id=provider_id,
-                provider_resource_id=toolgroup_id,
-                mcp_endpoint=mcp_endpoint,
-                args=args,
-            )
-        )
-
-    async def unregister_toolgroup(self, toolgroup_id: str) -> None:
-        tool_group = await self.get_tool_group(toolgroup_id)
-        if tool_group is None:
-            raise ValueError(f"Tool group {toolgroup_id} not found")
-        tools = await self.list_tools(toolgroup_id)
-        for tool in getattr(tools, "data", []):
-            await self.unregister_object(tool)
-        await self.unregister_object(tool_group)
-
-    async def shutdown(self) -> None:
-        pass
diff --git a/llama_stack/distribution/routers/safety.py b/llama_stack/distribution/routers/safety.py
new file mode 100644
index 000000000..9761d2db0
--- /dev/null
+++ b/llama_stack/distribution/routers/safety.py
@@ -0,0 +1,57 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from typing import Any
+
+from llama_stack.apis.inference import (
+    Message,
+)
+from llama_stack.apis.safety import RunShieldResponse, Safety
+from llama_stack.apis.shields import Shield
+from llama_stack.log import get_logger
+from llama_stack.providers.datatypes import RoutingTable
+
+logger = get_logger(name=__name__, category="core")
+
+
+class SafetyRouter(Safety):
+    def __init__(
+        self,
+        routing_table: RoutingTable,
+    ) -> None:
+        logger.debug("Initializing SafetyRouter")
+        self.routing_table = routing_table
+
+    async def initialize(self) -> None:
+        logger.debug("SafetyRouter.initialize")
+        pass
+
+    async def shutdown(self) -> None:
+        logger.debug("SafetyRouter.shutdown")
+        pass
+
+    async def register_shield(
+        self,
+        shield_id: str,
+        provider_shield_id: str | None = None,
+        provider_id: str | None = None,
+        params: dict[str, Any] | None = None,
+    ) -> Shield:
+        logger.debug(f"SafetyRouter.register_shield: {shield_id}")
+        return await self.routing_table.register_shield(shield_id, provider_shield_id, provider_id, params)
+
+    async def run_shield(
+        self,
+        shield_id: str,
+        messages: list[Message],
+        params: dict[str, Any] = None,
+    ) -> RunShieldResponse:
+        logger.debug(f"SafetyRouter.run_shield: {shield_id}")
+        return await self.routing_table.get_provider_impl(shield_id).run_shield(
+            shield_id=shield_id,
+            messages=messages,
+            params=params,
+        )
diff --git a/llama_stack/distribution/routers/tool_runtime.py b/llama_stack/distribution/routers/tool_runtime.py
new file mode 100644
index 000000000..285843dbc
--- /dev/null
+++ b/llama_stack/distribution/routers/tool_runtime.py
@@ -0,0 +1,92 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from typing import Any
+
+from llama_stack.apis.common.content_types import (
+    URL,
+    InterleavedContent,
+)
+from llama_stack.apis.tools import (
+    ListToolsResponse,
+    RAGDocument,
+    RAGQueryConfig,
+    RAGQueryResult,
+    RAGToolRuntime,
+    ToolRuntime,
+)
+from llama_stack.log import get_logger
+
+from ..routing_tables.toolgroups import ToolGroupsRoutingTable
+
+logger = get_logger(name=__name__, category="core")
+
+
+class ToolRuntimeRouter(ToolRuntime):
+    class RagToolImpl(RAGToolRuntime):
+        def __init__(
+            self,
+            routing_table: ToolGroupsRoutingTable,
+        ) -> None:
+            logger.debug("Initializing ToolRuntimeRouter.RagToolImpl")
+            self.routing_table = routing_table
+
+        async def query(
+            self,
+            content: InterleavedContent,
+            vector_db_ids: list[str],
+            query_config: RAGQueryConfig | None = None,
+        ) -> RAGQueryResult:
+            logger.debug(f"ToolRuntimeRouter.RagToolImpl.query: {vector_db_ids}")
+            return await self.routing_table.get_provider_impl("knowledge_search").query(
+                content, vector_db_ids, query_config
+            )
+
+        async def insert(
+            self,
+            documents: list[RAGDocument],
+            vector_db_id: str,
+            chunk_size_in_tokens: int = 512,
+        ) -> None:
+            logger.debug(
+                f"ToolRuntimeRouter.RagToolImpl.insert: {vector_db_id}, {len(documents)} documents, chunk_size={chunk_size_in_tokens}"
+            )
+            return await self.routing_table.get_provider_impl("insert_into_memory").insert(
+                documents, vector_db_id, chunk_size_in_tokens
+            )
+
+    def __init__(
+        self,
+        routing_table: ToolGroupsRoutingTable,
+    ) -> None:
+        logger.debug("Initializing ToolRuntimeRouter")
+        self.routing_table = routing_table
+
+        # HACK ALERT this should be in sync with "get_all_api_endpoints()"
+        self.rag_tool = self.RagToolImpl(routing_table)
+        for method in ("query", "insert"):
+            setattr(self, f"rag_tool.{method}", getattr(self.rag_tool, method))
+
+    async def initialize(self) -> None:
+        logger.debug("ToolRuntimeRouter.initialize")
+        pass
+
+    async def shutdown(self) -> None:
+        logger.debug("ToolRuntimeRouter.shutdown")
+        pass
+
+    async def invoke_tool(self, tool_name: str, kwargs: dict[str, Any]) -> Any:
+        logger.debug(f"ToolRuntimeRouter.invoke_tool: {tool_name}")
+        return await self.routing_table.get_provider_impl(tool_name).invoke_tool(
+            tool_name=tool_name,
+            kwargs=kwargs,
+        )
+
+    async def list_runtime_tools(
+        self, tool_group_id: str | None = None, mcp_endpoint: URL | None = None
+    ) -> ListToolsResponse:
+        logger.debug(f"ToolRuntimeRouter.list_runtime_tools: {tool_group_id}")
+        return await self.routing_table.list_tools(tool_group_id)
diff --git a/llama_stack/distribution/routers/vector_io.py b/llama_stack/distribution/routers/vector_io.py
new file mode 100644
index 000000000..8c17aa890
--- /dev/null
+++ b/llama_stack/distribution/routers/vector_io.py
@@ -0,0 +1,72 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from typing import Any
+
+from llama_stack.apis.common.content_types import (
+    InterleavedContent,
+)
+from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
+from llama_stack.log import get_logger
+from llama_stack.providers.datatypes import RoutingTable
+
+logger = get_logger(name=__name__, category="core")
+
+
+class VectorIORouter(VectorIO):
+    """Routes to an provider based on the vector db identifier"""
+
+    def __init__(
+        self,
+        routing_table: RoutingTable,
+    ) -> None:
+        logger.debug("Initializing VectorIORouter")
+        self.routing_table = routing_table
+
+    async def initialize(self) -> None:
+        logger.debug("VectorIORouter.initialize")
+        pass
+
+    async def shutdown(self) -> None:
+        logger.debug("VectorIORouter.shutdown")
+        pass
+
+    async def register_vector_db(
+        self,
+        vector_db_id: str,
+        embedding_model: str,
+        embedding_dimension: int | None = 384,
+        provider_id: str | None = None,
+        provider_vector_db_id: str | None = None,
+    ) -> None:
+        logger.debug(f"VectorIORouter.register_vector_db: {vector_db_id}, {embedding_model}")
+        await self.routing_table.register_vector_db(
+            vector_db_id,
+            embedding_model,
+            embedding_dimension,
+            provider_id,
+            provider_vector_db_id,
+        )
+
+    async def insert_chunks(
+        self,
+        vector_db_id: str,
+        chunks: list[Chunk],
+        ttl_seconds: int | None = None,
+    ) -> None:
+        logger.debug(
+            f"VectorIORouter.insert_chunks: {vector_db_id}, {len(chunks)} chunks, ttl_seconds={ttl_seconds}, chunk_ids={[chunk.metadata['document_id'] for chunk in chunks[:3]]}{' and more...' if len(chunks) > 3 else ''}",
+        )
+        return await self.routing_table.get_provider_impl(vector_db_id).insert_chunks(vector_db_id, chunks, ttl_seconds)
+
+    async def query_chunks(
+        self,
+        vector_db_id: str,
+        query: InterleavedContent,
+        params: dict[str, Any] | None = None,
+    ) -> QueryChunksResponse:
+        logger.debug(f"VectorIORouter.query_chunks: {vector_db_id}")
+        return await self.routing_table.get_provider_impl(vector_db_id).query_chunks(vector_db_id, query, params)
diff --git a/llama_stack/providers/tests/__init__.py b/llama_stack/distribution/routing_tables/__init__.py
similarity index 100%
rename from llama_stack/providers/tests/__init__.py
rename to llama_stack/distribution/routing_tables/__init__.py
diff --git a/llama_stack/distribution/routing_tables/benchmarks.py b/llama_stack/distribution/routing_tables/benchmarks.py
new file mode 100644
index 000000000..589a00c02
--- /dev/null
+++ b/llama_stack/distribution/routing_tables/benchmarks.py
@@ -0,0 +1,58 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from typing import Any
+
+from llama_stack.apis.benchmarks import Benchmark, Benchmarks, ListBenchmarksResponse
+from llama_stack.distribution.datatypes import (
+    BenchmarkWithACL,
+)
+from llama_stack.log import get_logger
+
+from .common import CommonRoutingTableImpl
+
+logger = get_logger(name=__name__, category="core")
+
+
+class BenchmarksRoutingTable(CommonRoutingTableImpl, Benchmarks):
+    async def list_benchmarks(self) -> ListBenchmarksResponse:
+        return ListBenchmarksResponse(data=await self.get_all_with_type("benchmark"))
+
+    async def get_benchmark(self, benchmark_id: str) -> Benchmark:
+        benchmark = await self.get_object_by_identifier("benchmark", benchmark_id)
+        if benchmark is None:
+            raise ValueError(f"Benchmark '{benchmark_id}' not found")
+        return benchmark
+
+    async def register_benchmark(
+        self,
+        benchmark_id: str,
+        dataset_id: str,
+        scoring_functions: list[str],
+        metadata: dict[str, Any] | None = None,
+        provider_benchmark_id: str | None = None,
+        provider_id: str | None = None,
+    ) -> None:
+        if metadata is None:
+            metadata = {}
+        if provider_id is None:
+            if len(self.impls_by_provider_id) == 1:
+                provider_id = list(self.impls_by_provider_id.keys())[0]
+            else:
+                raise ValueError(
+                    "No provider specified and multiple providers available. Please specify a provider_id."
+                )
+        if provider_benchmark_id is None:
+            provider_benchmark_id = benchmark_id
+        benchmark = BenchmarkWithACL(
+            identifier=benchmark_id,
+            dataset_id=dataset_id,
+            scoring_functions=scoring_functions,
+            metadata=metadata,
+            provider_id=provider_id,
+            provider_resource_id=provider_benchmark_id,
+        )
+        await self.register_object(benchmark)
diff --git a/llama_stack/distribution/routing_tables/common.py b/llama_stack/distribution/routing_tables/common.py
new file mode 100644
index 000000000..8ec87ca50
--- /dev/null
+++ b/llama_stack/distribution/routing_tables/common.py
@@ -0,0 +1,218 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from typing import Any
+
+from llama_stack.apis.resource import ResourceType
+from llama_stack.apis.scoring_functions import ScoringFn
+from llama_stack.distribution.access_control import check_access
+from llama_stack.distribution.datatypes import (
+    AccessAttributes,
+    RoutableObject,
+    RoutableObjectWithProvider,
+    RoutedProtocol,
+)
+from llama_stack.distribution.request_headers import get_auth_attributes
+from llama_stack.distribution.store import DistributionRegistry
+from llama_stack.log import get_logger
+from llama_stack.providers.datatypes import Api, RoutingTable
+
+logger = get_logger(name=__name__, category="core")
+
+
+def get_impl_api(p: Any) -> Api:
+    return p.__provider_spec__.api
+
+
+# TODO: this should return the registered object for all APIs
+async def register_object_with_provider(obj: RoutableObject, p: Any) -> RoutableObject:
+    api = get_impl_api(p)
+
+    assert obj.provider_id != "remote", "Remote provider should not be registered"
+
+    if api == Api.inference:
+        return await p.register_model(obj)
+    elif api == Api.safety:
+        return await p.register_shield(obj)
+    elif api == Api.vector_io:
+        return await p.register_vector_db(obj)
+    elif api == Api.datasetio:
+        return await p.register_dataset(obj)
+    elif api == Api.scoring:
+        return await p.register_scoring_function(obj)
+    elif api == Api.eval:
+        return await p.register_benchmark(obj)
+    elif api == Api.tool_runtime:
+        return await p.register_toolgroup(obj)
+    else:
+        raise ValueError(f"Unknown API {api} for registering object with provider")
+
+
+async def unregister_object_from_provider(obj: RoutableObject, p: Any) -> None:
+    api = get_impl_api(p)
+    if api == Api.vector_io:
+        return await p.unregister_vector_db(obj.identifier)
+    elif api == Api.inference:
+        return await p.unregister_model(obj.identifier)
+    elif api == Api.datasetio:
+        return await p.unregister_dataset(obj.identifier)
+    elif api == Api.tool_runtime:
+        return await p.unregister_toolgroup(obj.identifier)
+    else:
+        raise ValueError(f"Unregister not supported for {api}")
+
+
+Registry = dict[str, list[RoutableObjectWithProvider]]
+
+
+class CommonRoutingTableImpl(RoutingTable):
+    def __init__(
+        self,
+        impls_by_provider_id: dict[str, RoutedProtocol],
+        dist_registry: DistributionRegistry,
+    ) -> None:
+        self.impls_by_provider_id = impls_by_provider_id
+        self.dist_registry = dist_registry
+
+    async def initialize(self) -> None:
+        async def add_objects(objs: list[RoutableObjectWithProvider], provider_id: str, cls) -> None:
+            for obj in objs:
+                if cls is None:
+                    obj.provider_id = provider_id
+                else:
+                    # Create a copy of the model data and explicitly set provider_id
+                    model_data = obj.model_dump()
+                    model_data["provider_id"] = provider_id
+                    obj = cls(**model_data)
+                await self.dist_registry.register(obj)
+
+        # Register all objects from providers
+        for pid, p in self.impls_by_provider_id.items():
+            api = get_impl_api(p)
+            if api == Api.inference:
+                p.model_store = self
+            elif api == Api.safety:
+                p.shield_store = self
+            elif api == Api.vector_io:
+                p.vector_db_store = self
+            elif api == Api.datasetio:
+                p.dataset_store = self
+            elif api == Api.scoring:
+                p.scoring_function_store = self
+                scoring_functions = await p.list_scoring_functions()
+                await add_objects(scoring_functions, pid, ScoringFn)
+            elif api == Api.eval:
+                p.benchmark_store = self
+            elif api == Api.tool_runtime:
+                p.tool_store = self
+
+    async def shutdown(self) -> None:
+        for p in self.impls_by_provider_id.values():
+            await p.shutdown()
+
+    def get_provider_impl(self, routing_key: str, provider_id: str | None = None) -> Any:
+        from .benchmarks import BenchmarksRoutingTable
+        from .datasets import DatasetsRoutingTable
+        from .models import ModelsRoutingTable
+        from .scoring_functions import ScoringFunctionsRoutingTable
+        from .shields import ShieldsRoutingTable
+        from .toolgroups import ToolGroupsRoutingTable
+        from .vector_dbs import VectorDBsRoutingTable
+
+        def apiname_object():
+            if isinstance(self, ModelsRoutingTable):
+                return ("Inference", "model")
+            elif isinstance(self, ShieldsRoutingTable):
+                return ("Safety", "shield")
+            elif isinstance(self, VectorDBsRoutingTable):
+                return ("VectorIO", "vector_db")
+            elif isinstance(self, DatasetsRoutingTable):
+                return ("DatasetIO", "dataset")
+            elif isinstance(self, ScoringFunctionsRoutingTable):
+                return ("Scoring", "scoring_function")
+            elif isinstance(self, BenchmarksRoutingTable):
+                return ("Eval", "benchmark")
+            elif isinstance(self, ToolGroupsRoutingTable):
+                return ("ToolGroups", "tool_group")
+            else:
+                raise ValueError("Unknown routing table type")
+
+        apiname, objtype = apiname_object()
+
+        # Get objects from disk registry
+        obj = self.dist_registry.get_cached(objtype, routing_key)
+        if not obj:
+            provider_ids = list(self.impls_by_provider_id.keys())
+            if len(provider_ids) > 1:
+                provider_ids_str = f"any of the providers: {', '.join(provider_ids)}"
+            else:
+                provider_ids_str = f"provider: `{provider_ids[0]}`"
+            raise ValueError(
+                f"{objtype.capitalize()} `{routing_key}` not served by {provider_ids_str}. Make sure there is an {apiname} provider serving this {objtype}."
+            )
+
+        if not provider_id or provider_id == obj.provider_id:
+            return self.impls_by_provider_id[obj.provider_id]
+
+        raise ValueError(f"Provider not found for `{routing_key}`")
+
+    async def get_object_by_identifier(self, type: str, identifier: str) -> RoutableObjectWithProvider | None:
+        # Get from disk registry
+        obj = await self.dist_registry.get(type, identifier)
+        if not obj:
+            return None
+
+        # Check if user has permission to access this object
+        if not check_access(obj.identifier, getattr(obj, "access_attributes", None), get_auth_attributes()):
+            logger.debug(f"Access denied to {type} '{identifier}' based on attribute mismatch")
+            return None
+
+        return obj
+
+    async def unregister_object(self, obj: RoutableObjectWithProvider) -> None:
+        await self.dist_registry.delete(obj.type, obj.identifier)
+        await unregister_object_from_provider(obj, self.impls_by_provider_id[obj.provider_id])
+
+    async def register_object(self, obj: RoutableObjectWithProvider) -> RoutableObjectWithProvider:
+        # if provider_id is not specified, pick an arbitrary one from existing entries
+        if not obj.provider_id and len(self.impls_by_provider_id) > 0:
+            obj.provider_id = list(self.impls_by_provider_id.keys())[0]
+
+        if obj.provider_id not in self.impls_by_provider_id:
+            raise ValueError(f"Provider `{obj.provider_id}` not found")
+
+        p = self.impls_by_provider_id[obj.provider_id]
+
+        # If object supports access control but no attributes set, use creator's attributes
+        if not obj.access_attributes:
+            creator_attributes = get_auth_attributes()
+            if creator_attributes:
+                obj.access_attributes = AccessAttributes(**creator_attributes)
+                logger.info(f"Setting access attributes for {obj.type} '{obj.identifier}' based on creator's identity")
+
+        registered_obj = await register_object_with_provider(obj, p)
+        # TODO: This needs to be fixed for all APIs once they return the registered object
+        if obj.type == ResourceType.model.value:
+            await self.dist_registry.register(registered_obj)
+            return registered_obj
+
+        else:
+            await self.dist_registry.register(obj)
+            return obj
+
+    async def get_all_with_type(self, type: str) -> list[RoutableObjectWithProvider]:
+        objs = await self.dist_registry.get_all()
+        filtered_objs = [obj for obj in objs if obj.type == type]
+
+        # Apply attribute-based access control filtering
+        if filtered_objs:
+            filtered_objs = [
+                obj
+                for obj in filtered_objs
+                if check_access(obj.identifier, getattr(obj, "access_attributes", None), get_auth_attributes())
+            ]
+
+        return filtered_objs
diff --git a/llama_stack/distribution/routing_tables/datasets.py b/llama_stack/distribution/routing_tables/datasets.py
new file mode 100644
index 000000000..4401ad47e
--- /dev/null
+++ b/llama_stack/distribution/routing_tables/datasets.py
@@ -0,0 +1,93 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+import uuid
+from typing import Any
+
+from llama_stack.apis.datasets import (
+    Dataset,
+    DatasetPurpose,
+    Datasets,
+    DatasetType,
+    DataSource,
+    ListDatasetsResponse,
+    RowsDataSource,
+    URIDataSource,
+)
+from llama_stack.apis.resource import ResourceType
+from llama_stack.distribution.datatypes import (
+    DatasetWithACL,
+)
+from llama_stack.log import get_logger
+
+from .common import CommonRoutingTableImpl
+
+logger = get_logger(name=__name__, category="core")
+
+
+class DatasetsRoutingTable(CommonRoutingTableImpl, Datasets):
+    async def list_datasets(self) -> ListDatasetsResponse:
+        return ListDatasetsResponse(data=await self.get_all_with_type(ResourceType.dataset.value))
+
+    async def get_dataset(self, dataset_id: str) -> Dataset:
+        dataset = await self.get_object_by_identifier("dataset", dataset_id)
+        if dataset is None:
+            raise ValueError(f"Dataset '{dataset_id}' not found")
+        return dataset
+
+    async def register_dataset(
+        self,
+        purpose: DatasetPurpose,
+        source: DataSource,
+        metadata: dict[str, Any] | None = None,
+        dataset_id: str | None = None,
+    ) -> Dataset:
+        if isinstance(source, dict):
+            if source["type"] == "uri":
+                source = URIDataSource.parse_obj(source)
+            elif source["type"] == "rows":
+                source = RowsDataSource.parse_obj(source)
+
+        if not dataset_id:
+            dataset_id = f"dataset-{str(uuid.uuid4())}"
+
+        provider_dataset_id = dataset_id
+
+        # infer provider from source
+        if metadata:
+            if metadata.get("provider_id"):
+                provider_id = metadata.get("provider_id")  # pass through from nvidia datasetio
+        elif source.type == DatasetType.rows.value:
+            provider_id = "localfs"
+        elif source.type == DatasetType.uri.value:
+            # infer provider from uri
+            if source.uri.startswith("huggingface"):
+                provider_id = "huggingface"
+            else:
+                provider_id = "localfs"
+        else:
+            raise ValueError(f"Unknown data source type: {source.type}")
+
+        if metadata is None:
+            metadata = {}
+
+        dataset = DatasetWithACL(
+            identifier=dataset_id,
+            provider_resource_id=provider_dataset_id,
+            provider_id=provider_id,
+            purpose=purpose,
+            source=source,
+            metadata=metadata,
+        )
+
+        await self.register_object(dataset)
+        return dataset
+
+    async def unregister_dataset(self, dataset_id: str) -> None:
+        dataset = await self.get_dataset(dataset_id)
+        if dataset is None:
+            raise ValueError(f"Dataset {dataset_id} not found")
+        await self.unregister_object(dataset)
diff --git a/llama_stack/distribution/routing_tables/models.py b/llama_stack/distribution/routing_tables/models.py
new file mode 100644
index 000000000..7216d9935
--- /dev/null
+++ b/llama_stack/distribution/routing_tables/models.py
@@ -0,0 +1,82 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+import time
+from typing import Any
+
+from llama_stack.apis.models import ListModelsResponse, Model, Models, ModelType, OpenAIListModelsResponse, OpenAIModel
+from llama_stack.distribution.datatypes import (
+    ModelWithACL,
+)
+from llama_stack.log import get_logger
+
+from .common import CommonRoutingTableImpl
+
+logger = get_logger(name=__name__, category="core")
+
+
+class ModelsRoutingTable(CommonRoutingTableImpl, Models):
+    async def list_models(self) -> ListModelsResponse:
+        return ListModelsResponse(data=await self.get_all_with_type("model"))
+
+    async def openai_list_models(self) -> OpenAIListModelsResponse:
+        models = await self.get_all_with_type("model")
+        openai_models = [
+            OpenAIModel(
+                id=model.identifier,
+                object="model",
+                created=int(time.time()),
+                owned_by="llama_stack",
+            )
+            for model in models
+        ]
+        return OpenAIListModelsResponse(data=openai_models)
+
+    async def get_model(self, model_id: str) -> Model:
+        model = await self.get_object_by_identifier("model", model_id)
+        if model is None:
+            raise ValueError(f"Model '{model_id}' not found")
+        return model
+
+    async def register_model(
+        self,
+        model_id: str,
+        provider_model_id: str | None = None,
+        provider_id: str | None = None,
+        metadata: dict[str, Any] | None = None,
+        model_type: ModelType | None = None,
+    ) -> Model:
+        if provider_model_id is None:
+            provider_model_id = model_id
+        if provider_id is None:
+            # If provider_id not specified, use the only provider if it supports this model
+            if len(self.impls_by_provider_id) == 1:
+                provider_id = list(self.impls_by_provider_id.keys())[0]
+            else:
+                raise ValueError(
+                    f"No provider specified and multiple providers available. Please specify a provider_id. Available providers: {self.impls_by_provider_id.keys()}"
+                )
+        if metadata is None:
+            metadata = {}
+        if model_type is None:
+            model_type = ModelType.llm
+        if "embedding_dimension" not in metadata and model_type == ModelType.embedding:
+            raise ValueError("Embedding model must have an embedding dimension in its metadata")
+        model = ModelWithACL(
+            identifier=model_id,
+            provider_resource_id=provider_model_id,
+            provider_id=provider_id,
+            metadata=metadata,
+            model_type=model_type,
+        )
+        registered_model = await self.register_object(model)
+        return registered_model
+
+    async def unregister_model(self, model_id: str) -> None:
+        existing_model = await self.get_model(model_id)
+        if existing_model is None:
+            raise ValueError(f"Model {model_id} not found")
+        await self.unregister_object(existing_model)
diff --git a/llama_stack/distribution/routing_tables/scoring_functions.py b/llama_stack/distribution/routing_tables/scoring_functions.py
new file mode 100644
index 000000000..d85f64b57
--- /dev/null
+++ b/llama_stack/distribution/routing_tables/scoring_functions.py
@@ -0,0 +1,62 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from llama_stack.apis.common.type_system import ParamType
+from llama_stack.apis.resource import ResourceType
+from llama_stack.apis.scoring_functions import (
+    ListScoringFunctionsResponse,
+    ScoringFn,
+    ScoringFnParams,
+    ScoringFunctions,
+)
+from llama_stack.distribution.datatypes import (
+    ScoringFnWithACL,
+)
+from llama_stack.log import get_logger
+
+from .common import CommonRoutingTableImpl
+
+logger = get_logger(name=__name__, category="core")
+
+
+class ScoringFunctionsRoutingTable(CommonRoutingTableImpl, ScoringFunctions):
+    async def list_scoring_functions(self) -> ListScoringFunctionsResponse:
+        return ListScoringFunctionsResponse(data=await self.get_all_with_type(ResourceType.scoring_function.value))
+
+    async def get_scoring_function(self, scoring_fn_id: str) -> ScoringFn:
+        scoring_fn = await self.get_object_by_identifier("scoring_function", scoring_fn_id)
+        if scoring_fn is None:
+            raise ValueError(f"Scoring function '{scoring_fn_id}' not found")
+        return scoring_fn
+
+    async def register_scoring_function(
+        self,
+        scoring_fn_id: str,
+        description: str,
+        return_type: ParamType,
+        provider_scoring_fn_id: str | None = None,
+        provider_id: str | None = None,
+        params: ScoringFnParams | None = None,
+    ) -> None:
+        if provider_scoring_fn_id is None:
+            provider_scoring_fn_id = scoring_fn_id
+        if provider_id is None:
+            if len(self.impls_by_provider_id) == 1:
+                provider_id = list(self.impls_by_provider_id.keys())[0]
+            else:
+                raise ValueError(
+                    "No provider specified and multiple providers available. Please specify a provider_id."
+                )
+        scoring_fn = ScoringFnWithACL(
+            identifier=scoring_fn_id,
+            description=description,
+            return_type=return_type,
+            provider_resource_id=provider_scoring_fn_id,
+            provider_id=provider_id,
+            params=params,
+        )
+        scoring_fn.provider_id = provider_id
+        await self.register_object(scoring_fn)
diff --git a/llama_stack/distribution/routing_tables/shields.py b/llama_stack/distribution/routing_tables/shields.py
new file mode 100644
index 000000000..7f62596c9
--- /dev/null
+++ b/llama_stack/distribution/routing_tables/shields.py
@@ -0,0 +1,57 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from typing import Any
+
+from llama_stack.apis.resource import ResourceType
+from llama_stack.apis.shields import ListShieldsResponse, Shield, Shields
+from llama_stack.distribution.datatypes import (
+    ShieldWithACL,
+)
+from llama_stack.log import get_logger
+
+from .common import CommonRoutingTableImpl
+
+logger = get_logger(name=__name__, category="core")
+
+
+class ShieldsRoutingTable(CommonRoutingTableImpl, Shields):
+    async def list_shields(self) -> ListShieldsResponse:
+        return ListShieldsResponse(data=await self.get_all_with_type(ResourceType.shield.value))
+
+    async def get_shield(self, identifier: str) -> Shield:
+        shield = await self.get_object_by_identifier("shield", identifier)
+        if shield is None:
+            raise ValueError(f"Shield '{identifier}' not found")
+        return shield
+
+    async def register_shield(
+        self,
+        shield_id: str,
+        provider_shield_id: str | None = None,
+        provider_id: str | None = None,
+        params: dict[str, Any] | None = None,
+    ) -> Shield:
+        if provider_shield_id is None:
+            provider_shield_id = shield_id
+        if provider_id is None:
+            # If provider_id not specified, use the only provider if it supports this shield type
+            if len(self.impls_by_provider_id) == 1:
+                provider_id = list(self.impls_by_provider_id.keys())[0]
+            else:
+                raise ValueError(
+                    "No provider specified and multiple providers available. Please specify a provider_id."
+                )
+        if params is None:
+            params = {}
+        shield = ShieldWithACL(
+            identifier=shield_id,
+            provider_resource_id=provider_shield_id,
+            provider_id=provider_id,
+            params=params,
+        )
+        await self.register_object(shield)
+        return shield
diff --git a/llama_stack/distribution/routing_tables/toolgroups.py b/llama_stack/distribution/routing_tables/toolgroups.py
new file mode 100644
index 000000000..2f7dc3e06
--- /dev/null
+++ b/llama_stack/distribution/routing_tables/toolgroups.py
@@ -0,0 +1,132 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from typing import Any
+
+from llama_stack.apis.common.content_types import URL
+from llama_stack.apis.tools import ListToolGroupsResponse, ListToolsResponse, Tool, ToolGroup, ToolGroups
+from llama_stack.distribution.datatypes import ToolGroupWithACL
+from llama_stack.log import get_logger
+
+from .common import CommonRoutingTableImpl
+
+logger = get_logger(name=__name__, category="core")
+
+
+def parse_toolgroup_from_toolgroup_name_pair(toolgroup_name_with_maybe_tool_name: str) -> str | None:
+    # handle the funny case like "builtin::rag/knowledge_search"
+    parts = toolgroup_name_with_maybe_tool_name.split("/")
+    if len(parts) == 2:
+        return parts[0]
+    else:
+        return None
+
+
+class ToolGroupsRoutingTable(CommonRoutingTableImpl, ToolGroups):
+    toolgroups_to_tools: dict[str, list[Tool]] = {}
+    tool_to_toolgroup: dict[str, str] = {}
+
+    # overridden
+    def get_provider_impl(self, routing_key: str, provider_id: str | None = None) -> Any:
+        # we don't index tools in the registry anymore, but only keep a cache of them by toolgroup_id
+        # TODO: we may want to invalidate the cache (for a given toolgroup_id) every once in a while?
+
+        toolgroup_id = parse_toolgroup_from_toolgroup_name_pair(routing_key)
+        if toolgroup_id:
+            routing_key = toolgroup_id
+
+        if routing_key in self.tool_to_toolgroup:
+            routing_key = self.tool_to_toolgroup[routing_key]
+        return super().get_provider_impl(routing_key, provider_id)
+
+    async def list_tools(self, toolgroup_id: str | None = None) -> ListToolsResponse:
+        if toolgroup_id:
+            if group_id := parse_toolgroup_from_toolgroup_name_pair(toolgroup_id):
+                toolgroup_id = group_id
+            toolgroups = [await self.get_tool_group(toolgroup_id)]
+        else:
+            toolgroups = await self.get_all_with_type("tool_group")
+
+        all_tools = []
+        for toolgroup in toolgroups:
+            if toolgroup.identifier not in self.toolgroups_to_tools:
+                await self._index_tools(toolgroup)
+            all_tools.extend(self.toolgroups_to_tools[toolgroup.identifier])
+
+        return ListToolsResponse(data=all_tools)
+
+    async def _index_tools(self, toolgroup: ToolGroup):
+        provider_impl = super().get_provider_impl(toolgroup.identifier, toolgroup.provider_id)
+        tooldefs_response = await provider_impl.list_runtime_tools(toolgroup.identifier, toolgroup.mcp_endpoint)
+
+        # TODO: kill this Tool vs ToolDef distinction
+        tooldefs = tooldefs_response.data
+        tools = []
+        for t in tooldefs:
+            tools.append(
+                Tool(
+                    identifier=t.name,
+                    toolgroup_id=toolgroup.identifier,
+                    description=t.description or "",
+                    parameters=t.parameters or [],
+                    metadata=t.metadata,
+                    provider_id=toolgroup.provider_id,
+                )
+            )
+
+        self.toolgroups_to_tools[toolgroup.identifier] = tools
+        for tool in tools:
+            self.tool_to_toolgroup[tool.identifier] = toolgroup.identifier
+
+    async def list_tool_groups(self) -> ListToolGroupsResponse:
+        return ListToolGroupsResponse(data=await self.get_all_with_type("tool_group"))
+
+    async def get_tool_group(self, toolgroup_id: str) -> ToolGroup:
+        tool_group = await self.get_object_by_identifier("tool_group", toolgroup_id)
+        if tool_group is None:
+            raise ValueError(f"Tool group '{toolgroup_id}' not found")
+        return tool_group
+
+    async def get_tool(self, tool_name: str) -> Tool:
+        if tool_name in self.tool_to_toolgroup:
+            toolgroup_id = self.tool_to_toolgroup[tool_name]
+            tools = self.toolgroups_to_tools[toolgroup_id]
+            for tool in tools:
+                if tool.identifier == tool_name:
+                    return tool
+        raise ValueError(f"Tool '{tool_name}' not found")
+
+    async def register_tool_group(
+        self,
+        toolgroup_id: str,
+        provider_id: str,
+        mcp_endpoint: URL | None = None,
+        args: dict[str, Any] | None = None,
+    ) -> None:
+        toolgroup = ToolGroupWithACL(
+            identifier=toolgroup_id,
+            provider_id=provider_id,
+            provider_resource_id=toolgroup_id,
+            mcp_endpoint=mcp_endpoint,
+            args=args,
+        )
+        await self.register_object(toolgroup)
+
+        # ideally, indexing of the tools should not be necessary because anyone using
+        # the tools should first list the tools and then use them. but there are assumptions
+        # baked in some of the code and tests right now.
+        if not toolgroup.mcp_endpoint:
+            await self._index_tools(toolgroup)
+        return toolgroup
+
+    async def unregister_toolgroup(self, toolgroup_id: str) -> None:
+        tool_group = await self.get_tool_group(toolgroup_id)
+        if tool_group is None:
+            raise ValueError(f"Tool group {toolgroup_id} not found")
+        await self.unregister_object(tool_group)
+
+    async def shutdown(self) -> None:
+        pass
diff --git a/llama_stack/distribution/routing_tables/vector_dbs.py b/llama_stack/distribution/routing_tables/vector_dbs.py
new file mode 100644
index 000000000..dc6c0d0ef
--- /dev/null
+++ b/llama_stack/distribution/routing_tables/vector_dbs.py
@@ -0,0 +1,74 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from pydantic import TypeAdapter
+
+from llama_stack.apis.models import ModelType
+from llama_stack.apis.resource import ResourceType
+from llama_stack.apis.vector_dbs import ListVectorDBsResponse, VectorDB, VectorDBs
+from llama_stack.distribution.datatypes import (
+    VectorDBWithACL,
+)
+from llama_stack.log import get_logger
+
+from .common import CommonRoutingTableImpl
+
+logger = get_logger(name=__name__, category="core")
+
+
+class VectorDBsRoutingTable(CommonRoutingTableImpl, VectorDBs):
+    async def list_vector_dbs(self) -> ListVectorDBsResponse:
+        return ListVectorDBsResponse(data=await self.get_all_with_type("vector_db"))
+
+    async def get_vector_db(self, vector_db_id: str) -> VectorDB:
+        vector_db = await self.get_object_by_identifier("vector_db", vector_db_id)
+        if vector_db is None:
+            raise ValueError(f"Vector DB '{vector_db_id}' not found")
+        return vector_db
+
+    async def register_vector_db(
+        self,
+        vector_db_id: str,
+        embedding_model: str,
+        embedding_dimension: int | None = 384,
+        provider_id: str | None = None,
+        provider_vector_db_id: str | None = None,
+    ) -> VectorDB:
+        if provider_vector_db_id is None:
+            provider_vector_db_id = vector_db_id
+        if provider_id is None:
+            if len(self.impls_by_provider_id) > 0:
+                provider_id = list(self.impls_by_provider_id.keys())[0]
+                if len(self.impls_by_provider_id) > 1:
+                    logger.warning(
+                        f"No provider specified and multiple providers available. Arbitrarily selected the first provider {provider_id}."
+                    )
+            else:
+                raise ValueError("No provider available. Please configure a vector_io provider.")
+        model = await self.get_object_by_identifier("model", embedding_model)
+        if model is None:
+            raise ValueError(f"Model {embedding_model} not found")
+        if model.model_type != ModelType.embedding:
+            raise ValueError(f"Model {embedding_model} is not an embedding model")
+        if "embedding_dimension" not in model.metadata:
+            raise ValueError(f"Model {embedding_model} does not have an embedding dimension")
+        vector_db_data = {
+            "identifier": vector_db_id,
+            "type": ResourceType.vector_db.value,
+            "provider_id": provider_id,
+            "provider_resource_id": provider_vector_db_id,
+            "embedding_model": embedding_model,
+            "embedding_dimension": model.metadata["embedding_dimension"],
+        }
+        vector_db = TypeAdapter(VectorDBWithACL).validate_python(vector_db_data)
+        await self.register_object(vector_db)
+        return vector_db
+
+    async def unregister_vector_db(self, vector_db_id: str) -> None:
+        existing_vector_db = await self.get_vector_db(vector_db_id)
+        if existing_vector_db is None:
+            raise ValueError(f"Vector DB {vector_db_id} not found")
+        await self.unregister_object(existing_vector_db)
diff --git a/llama_stack/distribution/server/auth.py b/llama_stack/distribution/server/auth.py
index 52e6a013c..fb26b49a7 100644
--- a/llama_stack/distribution/server/auth.py
+++ b/llama_stack/distribution/server/auth.py
@@ -5,74 +5,30 @@
 # the root directory of this source tree.
 
 import json
-from typing import Dict, List, Optional
-from urllib.parse import parse_qs
 
 import httpx
-from pydantic import BaseModel, Field
 
-from llama_stack.distribution.datatypes import AccessAttributes
+from llama_stack.distribution.datatypes import AuthenticationConfig
+from llama_stack.distribution.server.auth_providers import create_auth_provider
 from llama_stack.log import get_logger
 
 logger = get_logger(name=__name__, category="auth")
 
 
-class AuthRequestContext(BaseModel):
-    path: str = Field(description="The path of the request being authenticated")
-
-    headers: Dict[str, str] = Field(description="HTTP headers from the original request (excluding Authorization)")
-
-    params: Dict[str, List[str]] = Field(
-        description="Query parameters from the original request, parsed as dictionary of lists"
-    )
-
-
-class AuthRequest(BaseModel):
-    api_key: str = Field(description="The API key extracted from the Authorization header")
-
-    request: AuthRequestContext = Field(description="Context information about the request being authenticated")
-
-
-class AuthResponse(BaseModel):
-    """The format of the authentication response from the auth endpoint."""
-
-    access_attributes: Optional[AccessAttributes] = Field(
-        default=None,
-        description="""
-        Structured user attributes for attribute-based access control.
-
-        These attributes determine which resources the user can access.
-        The model provides standard categories like "roles", "teams", "projects", and "namespaces".
-        Each attribute category contains a list of values that the user has for that category.
-        During access control checks, these values are compared against resource requirements.
-
-        Example with standard categories:
-        ```json
-        {
-            "roles": ["admin", "data-scientist"],
-            "teams": ["ml-team"],
-            "projects": ["llama-3"],
-            "namespaces": ["research"]
-        }
-        ```
-        """,
-    )
-
-    message: Optional[str] = Field(
-        default=None, description="Optional message providing additional context about the authentication result."
-    )
-
-
 class AuthenticationMiddleware:
-    """Middleware that authenticates requests using an external auth endpoint.
+    """Middleware that authenticates requests using configured authentication provider.
 
     This middleware:
     1. Extracts the Bearer token from the Authorization header
-    2. Sends it to the configured auth endpoint along with request details
-    3. Validates the response and extracts user attributes
+    2. Uses the configured auth provider to validate the token
+    3. Extracts user attributes from the provider's response
     4. Makes these attributes available to the route handlers for access control
 
-    Authentication Request Format:
+    The middleware supports multiple authentication providers through the AuthProvider interface:
+    - Kubernetes: Validates tokens against the Kubernetes API server
+    - Custom: Validates tokens against a custom endpoint
+
+    Authentication Request Format for Custom Auth Provider:
     ```json
     {
         "api_key": "the-api-key-extracted-from-auth-header",
@@ -105,21 +61,26 @@ class AuthenticationMiddleware:
     }
     ```
 
+    Token Validation:
+    Each provider implements its own token validation logic:
+    - Kubernetes: Uses TokenReview API to validate service account tokens
+    - Custom: Sends token to custom endpoint for validation
+
     Attribute-Based Access Control:
-    The attributes returned by the auth endpoint are used to determine which
+    The attributes returned by the auth provider are used to determine which
     resources the user can access. Resources can specify required attributes
     using the access_attributes field. For a user to access a resource:
 
     1. All attribute categories specified in the resource must be present in the user's attributes
     2. For each category, the user must have at least one matching value
 
-    If the auth endpoint doesn't return any attributes, the user will only be able to
+    If the auth provider doesn't return any attributes, the user will only be able to
     access resources that don't have access_attributes defined.
     """
 
-    def __init__(self, app, auth_endpoint):
+    def __init__(self, app, auth_config: AuthenticationConfig):
         self.app = app
-        self.auth_endpoint = auth_endpoint
+        self.auth_provider = create_auth_provider(auth_config)
 
     async def __call__(self, scope, receive, send):
         if scope["type"] == "http":
@@ -129,66 +90,41 @@ class AuthenticationMiddleware:
             if not auth_header or not auth_header.startswith("Bearer "):
                 return await self._send_auth_error(send, "Missing or invalid Authorization header")
 
-            api_key = auth_header.split("Bearer ", 1)[1]
+            token = auth_header.split("Bearer ", 1)[1]
 
-            path = scope.get("path", "")
-            request_headers = {k.decode(): v.decode() for k, v in headers.items()}
-
-            # Remove sensitive headers
-            if "authorization" in request_headers:
-                del request_headers["authorization"]
-
-            query_string = scope.get("query_string", b"").decode()
-            params = parse_qs(query_string)
-
-            # Build the auth request model
-            auth_request = AuthRequest(
-                api_key=api_key,
-                request=AuthRequestContext(
-                    path=path,
-                    headers=request_headers,
-                    params=params,
-                ),
-            )
-
-            # Validate with authentication endpoint
+            # Validate token and get access attributes
             try:
-                async with httpx.AsyncClient() as client:
-                    response = await client.post(
-                        self.auth_endpoint,
-                        json=auth_request.model_dump(),
-                        timeout=10.0,  # Add a reasonable timeout
-                    )
-                    if response.status_code != 200:
-                        logger.warning(f"Authentication failed: {response.status_code}")
-                        return await self._send_auth_error(send, "Authentication failed")
-
-                    # Parse and validate the auth response
-                    try:
-                        response_data = response.json()
-                        auth_response = AuthResponse(**response_data)
-
-                        # Store attributes in request scope for access control
-                        if auth_response.access_attributes:
-                            user_attributes = auth_response.access_attributes.model_dump(exclude_none=True)
-                        else:
-                            logger.warning("No access attributes, setting namespace to api_key by default")
-                            user_attributes = {
-                                "namespaces": [api_key],
-                            }
-
-                        scope["user_attributes"] = user_attributes
-                        logger.debug(f"Authentication successful: {len(user_attributes)} attributes")
-                    except Exception:
-                        logger.exception("Error parsing authentication response")
-                        return await self._send_auth_error(send, "Invalid authentication response format")
+                validation_result = await self.auth_provider.validate_token(token, scope)
             except httpx.TimeoutException:
                 logger.exception("Authentication request timed out")
                 return await self._send_auth_error(send, "Authentication service timeout")
+            except ValueError as e:
+                logger.exception("Error during authentication")
+                return await self._send_auth_error(send, str(e))
             except Exception:
                 logger.exception("Error during authentication")
                 return await self._send_auth_error(send, "Authentication service error")
 
+            # Store attributes in request scope for access control
+            if validation_result.access_attributes:
+                user_attributes = validation_result.access_attributes.model_dump(exclude_none=True)
+            else:
+                logger.warning("No access attributes, setting namespace to token by default")
+                user_attributes = {
+                    "roles": [token],
+                }
+
+            # Store the client ID in the request scope so that downstream middleware (like QuotaMiddleware)
+            # can identify the requester and enforce per-client rate limits.
+            scope["authenticated_client_id"] = token
+
+            # Store attributes in request scope
+            scope["user_attributes"] = user_attributes
+            scope["principal"] = validation_result.principal
+            logger.debug(
+                f"Authentication successful: {validation_result.principal} with {len(scope['user_attributes'])} attributes"
+            )
+
         return await self.app(scope, receive, send)
 
     async def _send_auth_error(self, send, message):
diff --git a/llama_stack/distribution/server/auth_providers.py b/llama_stack/distribution/server/auth_providers.py
new file mode 100644
index 000000000..723a65b77
--- /dev/null
+++ b/llama_stack/distribution/server/auth_providers.py
@@ -0,0 +1,376 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+import ssl
+import time
+from abc import ABC, abstractmethod
+from asyncio import Lock
+from pathlib import Path
+from urllib.parse import parse_qs
+
+import httpx
+from jose import jwt
+from pydantic import BaseModel, Field, field_validator, model_validator
+from typing_extensions import Self
+
+from llama_stack.distribution.datatypes import AccessAttributes, AuthenticationConfig, AuthProviderType
+from llama_stack.log import get_logger
+
+logger = get_logger(name=__name__, category="auth")
+
+
+class TokenValidationResult(BaseModel):
+    principal: str | None = Field(
+        default=None,
+        description="The principal (username or persistent identifier) of the authenticated user",
+    )
+    access_attributes: AccessAttributes | None = Field(
+        default=None,
+        description="""
+        Structured user attributes for attribute-based access control.
+
+        These attributes determine which resources the user can access.
+        The model provides standard categories like "roles", "teams", "projects", and "namespaces".
+        Each attribute category contains a list of values that the user has for that category.
+        During access control checks, these values are compared against resource requirements.
+
+        Example with standard categories:
+        ```json
+        {
+            "roles": ["admin", "data-scientist"],
+            "teams": ["ml-team"],
+            "projects": ["llama-3"],
+            "namespaces": ["research"]
+        }
+        ```
+        """,
+    )
+
+
+class AuthResponse(TokenValidationResult):
+    """The format of the authentication response from the auth endpoint."""
+
+    message: str | None = Field(
+        default=None, description="Optional message providing additional context about the authentication result."
+    )
+
+
+class AuthRequestContext(BaseModel):
+    path: str = Field(description="The path of the request being authenticated")
+
+    headers: dict[str, str] = Field(description="HTTP headers from the original request (excluding Authorization)")
+
+    params: dict[str, list[str]] = Field(
+        description="Query parameters from the original request, parsed as dictionary of lists"
+    )
+
+
+class AuthRequest(BaseModel):
+    api_key: str = Field(description="The API key extracted from the Authorization header")
+
+    request: AuthRequestContext = Field(description="Context information about the request being authenticated")
+
+
+class AuthProvider(ABC):
+    """Abstract base class for authentication providers."""
+
+    @abstractmethod
+    async def validate_token(self, token: str, scope: dict | None = None) -> TokenValidationResult:
+        """Validate a token and return access attributes."""
+        pass
+
+    @abstractmethod
+    async def close(self):
+        """Clean up any resources."""
+        pass
+
+
+def get_attributes_from_claims(claims: dict[str, str], mapping: dict[str, str]) -> AccessAttributes:
+    attributes = AccessAttributes()
+    for claim_key, attribute_key in mapping.items():
+        if claim_key not in claims or not hasattr(attributes, attribute_key):
+            continue
+        claim = claims[claim_key]
+        if isinstance(claim, list):
+            values = claim
+        else:
+            values = claim.split()
+
+        current = getattr(attributes, attribute_key)
+        if current:
+            current.extend(values)
+        else:
+            setattr(attributes, attribute_key, values)
+    return attributes
+
+
+class OAuth2JWKSConfig(BaseModel):
+    # The JWKS URI for collecting public keys
+    uri: str
+    key_recheck_period: int = Field(default=3600, description="The period to recheck the JWKS URI for key updates")
+
+
+class OAuth2IntrospectionConfig(BaseModel):
+    url: str
+    client_id: str
+    client_secret: str
+    send_secret_in_body: bool = False
+
+
+class OAuth2TokenAuthProviderConfig(BaseModel):
+    audience: str = "llama-stack"
+    verify_tls: bool = True
+    tls_cafile: Path | None = None
+    issuer: str | None = Field(default=None, description="The OIDC issuer URL.")
+    claims_mapping: dict[str, str] = Field(
+        default_factory=lambda: {
+            "sub": "roles",
+            "username": "roles",
+            "groups": "teams",
+            "team": "teams",
+            "project": "projects",
+            "tenant": "namespaces",
+            "namespace": "namespaces",
+        },
+    )
+    jwks: OAuth2JWKSConfig | None
+    introspection: OAuth2IntrospectionConfig | None = None
+
+    @classmethod
+    @field_validator("claims_mapping")
+    def validate_claims_mapping(cls, v):
+        for key, value in v.items():
+            if not value:
+                raise ValueError(f"claims_mapping value cannot be empty: {key}")
+            if value not in AccessAttributes.model_fields:
+                raise ValueError(f"claims_mapping value is not a valid attribute: {value}")
+        return v
+
+    @model_validator(mode="after")
+    def validate_mode(self) -> Self:
+        if not self.jwks and not self.introspection:
+            raise ValueError("One of jwks or introspection must be configured")
+        if self.jwks and self.introspection:
+            raise ValueError("At present only one of jwks or introspection should be configured")
+        return self
+
+
+class OAuth2TokenAuthProvider(AuthProvider):
+    """
+    JWT token authentication provider that validates a JWT token and extracts access attributes.
+
+    This should be the standard authentication provider for most use cases.
+    """
+
+    def __init__(self, config: OAuth2TokenAuthProviderConfig):
+        self.config = config
+        self._jwks_at: float = 0.0
+        self._jwks: dict[str, str] = {}
+        self._jwks_lock = Lock()
+
+    async def validate_token(self, token: str, scope: dict | None = None) -> TokenValidationResult:
+        if self.config.jwks:
+            return await self.validate_jwt_token(token, scope)
+        if self.config.introspection:
+            return await self.introspect_token(token, scope)
+        raise ValueError("One of jwks or introspection must be configured")
+
+    async def validate_jwt_token(self, token: str, scope: dict | None = None) -> TokenValidationResult:
+        """Validate a token using the JWT token."""
+        await self._refresh_jwks()
+
+        try:
+            header = jwt.get_unverified_header(token)
+            kid = header["kid"]
+            if kid not in self._jwks:
+                raise ValueError(f"Unknown key ID: {kid}")
+            key_data = self._jwks[kid]
+            algorithm = header.get("alg", "RS256")
+            claims = jwt.decode(
+                token,
+                key_data,
+                algorithms=[algorithm],
+                audience=self.config.audience,
+                issuer=self.config.issuer,
+            )
+        except Exception as exc:
+            raise ValueError(f"Invalid JWT token: {token}") from exc
+
+        # There are other standard claims, the most relevant of which is `scope`.
+        # We should incorporate these into the access attributes.
+        principal = claims["sub"]
+        access_attributes = get_attributes_from_claims(claims, self.config.claims_mapping)
+        return TokenValidationResult(
+            principal=principal,
+            access_attributes=access_attributes,
+        )
+
+    async def introspect_token(self, token: str, scope: dict | None = None) -> TokenValidationResult:
+        """Validate a token using token introspection as defined by RFC 7662."""
+        form = {
+            "token": token,
+        }
+        if self.config.introspection is None:
+            raise ValueError("Introspection is not configured")
+
+        if self.config.introspection.send_secret_in_body:
+            form["client_id"] = self.config.introspection.client_id
+            form["client_secret"] = self.config.introspection.client_secret
+            auth = None
+        else:
+            auth = (self.config.introspection.client_id, self.config.introspection.client_secret)
+        ssl_ctxt = None
+        if self.config.tls_cafile:
+            ssl_ctxt = ssl.create_default_context(cafile=self.config.tls_cafile.as_posix())
+        try:
+            async with httpx.AsyncClient(verify=ssl_ctxt) as client:
+                response = await client.post(
+                    self.config.introspection.url,
+                    data=form,
+                    auth=auth,
+                    timeout=10.0,  # Add a reasonable timeout
+                )
+                if response.status_code != 200:
+                    logger.warning(f"Token introspection failed with status code: {response.status_code}")
+                    raise ValueError(f"Token introspection failed: {response.status_code}")
+
+                fields = response.json()
+                if not fields["active"]:
+                    raise ValueError("Token not active")
+                principal = fields["sub"] or fields["username"]
+                access_attributes = get_attributes_from_claims(fields, self.config.claims_mapping)
+                return TokenValidationResult(
+                    principal=principal,
+                    access_attributes=access_attributes,
+                )
+        except httpx.TimeoutException:
+            logger.exception("Token introspection request timed out")
+            raise
+        except ValueError:
+            # Re-raise ValueError exceptions to preserve their message
+            raise
+        except Exception as e:
+            logger.exception("Error during token introspection")
+            raise ValueError("Token introspection error") from e
+
+    async def close(self):
+        pass
+
+    async def _refresh_jwks(self) -> None:
+        """
+        Refresh the JWKS cache.
+
+        This is a simple cache that expires after a certain amount of time (defined by `key_recheck_period`).
+        If the cache is expired, we refresh the JWKS from the JWKS URI.
+
+        Notes: for Kubernetes which doesn't fully implement the OIDC protocol:
+            * It doesn't have user authentication flows
+            * It doesn't have refresh tokens
+        """
+        async with self._jwks_lock:
+            if self.config.jwks is None:
+                raise ValueError("JWKS is not configured")
+            if time.time() - self._jwks_at > self.config.jwks.key_recheck_period:
+                verify = self.config.tls_cafile.as_posix() if self.config.tls_cafile else self.config.verify_tls
+                async with httpx.AsyncClient(verify=verify) as client:
+                    res = await client.get(self.config.jwks.uri, timeout=5)
+                    res.raise_for_status()
+                    jwks_data = res.json()["keys"]
+                    updated = {}
+                    for k in jwks_data:
+                        kid = k["kid"]
+                        # Store the entire key object as it may be needed for different algorithms
+                        updated[kid] = k
+                    self._jwks = updated
+                    self._jwks_at = time.time()
+
+
+class CustomAuthProviderConfig(BaseModel):
+    endpoint: str
+
+
+class CustomAuthProvider(AuthProvider):
+    """Custom authentication provider that uses an external endpoint."""
+
+    def __init__(self, config: CustomAuthProviderConfig):
+        self.config = config
+        self._client = None
+
+    async def validate_token(self, token: str, scope: dict | None = None) -> TokenValidationResult:
+        """Validate a token using the custom authentication endpoint."""
+        if scope is None:
+            scope = {}
+
+        headers = dict(scope.get("headers", []))
+        path = scope.get("path", "")
+        request_headers = {k.decode(): v.decode() for k, v in headers.items()}
+
+        # Remove sensitive headers
+        if "authorization" in request_headers:
+            del request_headers["authorization"]
+
+        query_string = scope.get("query_string", b"").decode()
+        params = parse_qs(query_string)
+
+        # Build the auth request model
+        auth_request = AuthRequest(
+            api_key=token,
+            request=AuthRequestContext(
+                path=path,
+                headers=request_headers,
+                params=params,
+            ),
+        )
+
+        # Validate with authentication endpoint
+        try:
+            async with httpx.AsyncClient() as client:
+                response = await client.post(
+                    self.config.endpoint,
+                    json=auth_request.model_dump(),
+                    timeout=10.0,  # Add a reasonable timeout
+                )
+                if response.status_code != 200:
+                    logger.warning(f"Authentication failed with status code: {response.status_code}")
+                    raise ValueError(f"Authentication failed: {response.status_code}")
+
+                # Parse and validate the auth response
+                try:
+                    response_data = response.json()
+                    auth_response = AuthResponse(**response_data)
+                    return auth_response
+                except Exception as e:
+                    logger.exception("Error parsing authentication response")
+                    raise ValueError("Invalid authentication response format") from e
+
+        except httpx.TimeoutException:
+            logger.exception("Authentication request timed out")
+            raise
+        except ValueError:
+            # Re-raise ValueError exceptions to preserve their message
+            raise
+        except Exception as e:
+            logger.exception("Error during authentication")
+            raise ValueError("Authentication service error") from e
+
+    async def close(self):
+        """Close the HTTP client."""
+        if self._client:
+            await self._client.aclose()
+            self._client = None
+
+
+def create_auth_provider(config: AuthenticationConfig) -> AuthProvider:
+    """Factory function to create the appropriate auth provider."""
+    provider_type = config.provider_type.lower()
+
+    if provider_type == "custom":
+        return CustomAuthProvider(CustomAuthProviderConfig.model_validate(config.config))
+    elif provider_type == "oauth2_token":
+        return OAuth2TokenAuthProvider(OAuth2TokenAuthProviderConfig.model_validate(config.config))
+    else:
+        supported_providers = ", ".join([t.value for t in AuthProviderType])
+        raise ValueError(f"Unsupported auth provider type: {provider_type}. Supported types are: {supported_providers}")
diff --git a/llama_stack/distribution/server/quota.py b/llama_stack/distribution/server/quota.py
new file mode 100644
index 000000000..ddbffae64
--- /dev/null
+++ b/llama_stack/distribution/server/quota.py
@@ -0,0 +1,110 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+import json
+import time
+from datetime import datetime, timedelta, timezone
+
+from starlette.types import ASGIApp, Receive, Scope, Send
+
+from llama_stack.log import get_logger
+from llama_stack.providers.utils.kvstore.api import KVStore
+from llama_stack.providers.utils.kvstore.config import KVStoreConfig, SqliteKVStoreConfig
+from llama_stack.providers.utils.kvstore.kvstore import kvstore_impl
+
+logger = get_logger(name=__name__, category="quota")
+
+
+class QuotaMiddleware:
+    """
+    ASGI middleware that enforces separate quotas for authenticated and anonymous clients
+    within a configurable time window.
+
+    - For authenticated requests, it reads the client ID from the
+      `Authorization: Bearer ` header.
+    - For anonymous requests, it falls back to the IP address of the client.
+    Requests are counted in a KV store (e.g., SQLite), and HTTP 429 is returned
+    once a client exceeds its quota.
+    """
+
+    def __init__(
+        self,
+        app: ASGIApp,
+        kv_config: KVStoreConfig,
+        anonymous_max_requests: int,
+        authenticated_max_requests: int,
+        window_seconds: int = 86400,
+    ):
+        self.app = app
+        self.kv_config = kv_config
+        self.kv: KVStore | None = None
+        self.anonymous_max_requests = anonymous_max_requests
+        self.authenticated_max_requests = authenticated_max_requests
+        self.window_seconds = window_seconds
+
+        if isinstance(self.kv_config, SqliteKVStoreConfig):
+            logger.warning(
+                "QuotaMiddleware: Using SQLite backend. Expiry/TTL is not enforced; cleanup is manual. "
+                f"window_seconds={self.window_seconds}"
+            )
+
+    async def _get_kv(self) -> KVStore:
+        if self.kv is None:
+            self.kv = await kvstore_impl(self.kv_config)
+        return self.kv
+
+    async def __call__(self, scope: Scope, receive: Receive, send: Send):
+        if scope["type"] == "http":
+            # pick key & limit based on auth
+            auth_id = scope.get("authenticated_client_id")
+            if auth_id:
+                key_id = auth_id
+                limit = self.authenticated_max_requests
+            else:
+                # fallback to IP
+                client = scope.get("client")
+                key_id = client[0] if client else "anonymous"
+                limit = self.anonymous_max_requests
+
+            current_window = int(time.time() // self.window_seconds)
+            key = f"quota:{key_id}:{current_window}"
+
+            try:
+                kv = await self._get_kv()
+                prev = await kv.get(key) or "0"
+                count = int(prev) + 1
+
+                if int(prev) == 0:
+                    # Set with expiration datetime when it is the first request in the window.
+                    expiration = datetime.now(timezone.utc) + timedelta(seconds=self.window_seconds)
+                    await kv.set(key, str(count), expiration=expiration)
+                else:
+                    await kv.set(key, str(count))
+            except Exception:
+                logger.exception("Failed to access KV store for quota")
+                return await self._send_error(send, 500, "Quota service error")
+
+            if count > limit:
+                logger.warning(
+                    "Quota exceeded for client %s: %d/%d",
+                    key_id,
+                    count,
+                    limit,
+                )
+                return await self._send_error(send, 429, "Quota exceeded")
+
+        return await self.app(scope, receive, send)
+
+    async def _send_error(self, send: Send, status: int, message: str):
+        await send(
+            {
+                "type": "http.response.start",
+                "status": status,
+                "headers": [[b"content-type", b"application/json"]],
+            }
+        )
+        body = json.dumps({"error": {"message": message}}).encode()
+        await send({"type": "http.response.body", "body": body})
diff --git a/llama_stack/distribution/server/endpoints.py b/llama_stack/distribution/server/routes.py
similarity index 55%
rename from llama_stack/distribution/server/endpoints.py
rename to llama_stack/distribution/server/routes.py
index 98f01c067..ea66fec5a 100644
--- a/llama_stack/distribution/server/endpoints.py
+++ b/llama_stack/distribution/server/routes.py
@@ -6,21 +6,23 @@
 
 import inspect
 import re
-from typing import Dict, List
+from collections.abc import Callable
+from typing import Any
 
-from pydantic import BaseModel
+from aiohttp import hdrs
+from starlette.routing import Route
 
 from llama_stack.apis.tools import RAGToolRuntime, SpecialToolGroup
 from llama_stack.apis.version import LLAMA_STACK_API_VERSION
 from llama_stack.distribution.resolver import api_protocol_map
 from llama_stack.providers.datatypes import Api
 
-
-class ApiEndpoint(BaseModel):
-    route: str
-    method: str
-    name: str
-    descriptive_name: str | None = None
+EndpointFunc = Callable[..., Any]
+PathParams = dict[str, str]
+RouteInfo = tuple[EndpointFunc, str]
+PathImpl = dict[str, RouteInfo]
+RouteImpls = dict[str, PathImpl]
+RouteMatch = tuple[EndpointFunc, PathParams, str]
 
 
 def toolgroup_protocol_map():
@@ -29,13 +31,13 @@ def toolgroup_protocol_map():
     }
 
 
-def get_all_api_endpoints() -> Dict[Api, List[ApiEndpoint]]:
+def get_all_api_routes() -> dict[Api, list[Route]]:
     apis = {}
 
     protocols = api_protocol_map()
     toolgroup_protocols = toolgroup_protocol_map()
     for api, protocol in protocols.items():
-        endpoints = []
+        routes = []
         protocol_methods = inspect.getmembers(protocol, predicate=inspect.isfunction)
 
         # HACK ALERT
@@ -52,26 +54,28 @@ def get_all_api_endpoints() -> Dict[Api, List[ApiEndpoint]]:
             if not hasattr(method, "__webmethod__"):
                 continue
 
-            webmethod = method.__webmethod__
-            route = f"/{LLAMA_STACK_API_VERSION}/{webmethod.route.lstrip('/')}"
-            if webmethod.method == "GET":
-                method = "get"
-            elif webmethod.method == "DELETE":
-                method = "delete"
+            # The __webmethod__ attribute is dynamically added by the @webmethod decorator
+            # mypy doesn't know about this dynamic attribute, so we ignore the attr-defined error
+            webmethod = method.__webmethod__  # type: ignore[attr-defined]
+            path = f"/{LLAMA_STACK_API_VERSION}/{webmethod.route.lstrip('/')}"
+            if webmethod.method == hdrs.METH_GET:
+                http_method = hdrs.METH_GET
+            elif webmethod.method == hdrs.METH_DELETE:
+                http_method = hdrs.METH_DELETE
             else:
-                method = "post"
-            endpoints.append(
-                ApiEndpoint(route=route, method=method, name=name, descriptive_name=webmethod.descriptive_name)
-            )
+                http_method = hdrs.METH_POST
+            routes.append(
+                Route(path=path, methods=[http_method], name=name, endpoint=None)
+            )  # setting endpoint to None since don't use a Router object
 
-        apis[api] = endpoints
+        apis[api] = routes
 
     return apis
 
 
-def initialize_endpoint_impls(impls):
-    endpoints = get_all_api_endpoints()
-    endpoint_impls = {}
+def initialize_route_impls(impls: dict[Api, Any]) -> RouteImpls:
+    routes = get_all_api_routes()
+    route_impls: RouteImpls = {}
 
     def _convert_path_to_regex(path: str) -> str:
         # Convert {param} to named capture groups
@@ -84,29 +88,34 @@ def initialize_endpoint_impls(impls):
 
         return f"^{pattern}$"
 
-    for api, api_endpoints in endpoints.items():
+    for api, api_routes in routes.items():
         if api not in impls:
             continue
-        for endpoint in api_endpoints:
+        for route in api_routes:
             impl = impls[api]
-            func = getattr(impl, endpoint.name)
-            if endpoint.method not in endpoint_impls:
-                endpoint_impls[endpoint.method] = {}
-            endpoint_impls[endpoint.method][_convert_path_to_regex(endpoint.route)] = (
+            func = getattr(impl, route.name)
+            # Get the first (and typically only) method from the set, filtering out HEAD
+            available_methods = [m for m in route.methods if m != "HEAD"]
+            if not available_methods:
+                continue  # Skip if only HEAD method is available
+            method = available_methods[0].lower()
+            if method not in route_impls:
+                route_impls[method] = {}
+            route_impls[method][_convert_path_to_regex(route.path)] = (
                 func,
-                endpoint.descriptive_name or endpoint.route,
+                route.path,
             )
 
-    return endpoint_impls
+    return route_impls
 
 
-def find_matching_endpoint(method, path, endpoint_impls):
+def find_matching_route(method: str, path: str, route_impls: RouteImpls) -> RouteMatch:
     """Find the matching endpoint implementation for a given method and path.
 
     Args:
         method: HTTP method (GET, POST, etc.)
         path: URL path to match against
-        endpoint_impls: A dictionary of endpoint implementations
+        route_impls: A dictionary of endpoint implementations
 
     Returns:
         A tuple of (endpoint_function, path_params, descriptive_name)
@@ -114,7 +123,7 @@ def find_matching_endpoint(method, path, endpoint_impls):
     Raises:
         ValueError: If no matching endpoint is found
     """
-    impls = endpoint_impls.get(method.lower())
+    impls = route_impls.get(method.lower())
     if not impls:
         raise ValueError(f"No endpoint found for {path}")
 
diff --git a/llama_stack/distribution/server/server.py b/llama_stack/distribution/server/server.py
index 6e9941d1c..6c88bbfe9 100644
--- a/llama_stack/distribution/server/server.py
+++ b/llama_stack/distribution/server/server.py
@@ -6,36 +6,42 @@
 
 import argparse
 import asyncio
+import functools
 import inspect
 import json
 import os
+import ssl
 import sys
 import traceback
 import warnings
+from collections.abc import Callable
 from contextlib import asynccontextmanager
 from importlib.metadata import version as parse_version
 from pathlib import Path
-from typing import Any, List, Optional, Union
+from typing import Annotated, Any
 
+import rich.pretty
 import yaml
+from aiohttp import hdrs
 from fastapi import Body, FastAPI, HTTPException, Request
 from fastapi import Path as FastapiPath
 from fastapi.exceptions import RequestValidationError
+from fastapi.middleware.cors import CORSMiddleware
 from fastapi.responses import JSONResponse, StreamingResponse
 from openai import BadRequestError
 from pydantic import BaseModel, ValidationError
-from typing_extensions import Annotated
 
-from llama_stack.distribution.datatypes import LoggingConfig, StackRunConfig
+from llama_stack.distribution.datatypes import AuthenticationRequiredError, LoggingConfig, StackRunConfig
 from llama_stack.distribution.distribution import builtin_automatically_routed_apis
 from llama_stack.distribution.request_headers import (
     PROVIDER_DATA_VAR,
     request_provider_data_context,
 )
 from llama_stack.distribution.resolver import InvalidProviderError
-from llama_stack.distribution.server.endpoints import (
-    find_matching_endpoint,
-    initialize_endpoint_impls,
+from llama_stack.distribution.server.routes import (
+    find_matching_route,
+    get_all_api_routes,
+    initialize_route_impls,
 )
 from llama_stack.distribution.stack import (
     construct_stack,
@@ -58,7 +64,7 @@ from llama_stack.providers.utils.telemetry.tracing import (
 )
 
 from .auth import AuthenticationMiddleware
-from .endpoints import get_all_api_endpoints
+from .quota import QuotaMiddleware
 
 REPO_ROOT = Path(__file__).parent.parent.parent.parent
 
@@ -91,7 +97,7 @@ async def global_exception_handler(request: Request, exc: Exception):
     return JSONResponse(status_code=http_exc.status_code, content={"error": {"detail": http_exc.detail}})
 
 
-def translate_exception(exc: Exception) -> Union[HTTPException, RequestValidationError]:
+def translate_exception(exc: Exception) -> HTTPException | RequestValidationError:
     if isinstance(exc, ValidationError):
         exc = RequestValidationError(exc.errors())
 
@@ -115,10 +121,12 @@ def translate_exception(exc: Exception) -> Union[HTTPException, RequestValidatio
         return HTTPException(status_code=400, detail=str(exc))
     elif isinstance(exc, PermissionError):
         return HTTPException(status_code=403, detail=f"Permission denied: {str(exc)}")
-    elif isinstance(exc, TimeoutError):
+    elif isinstance(exc, asyncio.TimeoutError | TimeoutError):
         return HTTPException(status_code=504, detail=f"Operation timed out: {str(exc)}")
     elif isinstance(exc, NotImplementedError):
         return HTTPException(status_code=501, detail=f"Not implemented: {str(exc)}")
+    elif isinstance(exc, AuthenticationRequiredError):
+        return HTTPException(status_code=401, detail=f"Authentication required: {str(exc)}")
     else:
         return HTTPException(
             status_code=500,
@@ -140,7 +148,7 @@ async def shutdown(app):
                 await asyncio.wait_for(impl.shutdown(), timeout=5)
             else:
                 logger.warning("No shutdown method for %s", impl_name)
-        except asyncio.TimeoutError:
+        except (asyncio.TimeoutError, TimeoutError):
             logger.exception("Shutdown timeout for %s ", impl_name, exc_info=True)
         except (Exception, asyncio.CancelledError) as e:
             logger.exception("Failed to shutdown %s: %s", impl_name, {e})
@@ -187,11 +195,31 @@ async def sse_generator(event_gen_coroutine):
         )
 
 
-def create_dynamic_typed_route(func: Any, method: str, route: str):
-    async def endpoint(request: Request, **kwargs):
+async def log_request_pre_validation(request: Request):
+    if request.method in ("POST", "PUT", "PATCH"):
+        try:
+            body_bytes = await request.body()
+            if body_bytes:
+                try:
+                    parsed_body = json.loads(body_bytes.decode())
+                    log_output = rich.pretty.pretty_repr(parsed_body)
+                except (json.JSONDecodeError, UnicodeDecodeError):
+                    log_output = repr(body_bytes)
+                logger.debug(f"Incoming raw request body for {request.method} {request.url.path}:\n{log_output}")
+            else:
+                logger.debug(f"Incoming {request.method} {request.url.path} request with empty body.")
+        except Exception as e:
+            logger.warning(f"Could not read or log request body for {request.method} {request.url.path}: {e}")
+
+
+def create_dynamic_typed_route(func: Any, method: str, route: str) -> Callable:
+    @functools.wraps(func)
+    async def route_handler(request: Request, **kwargs):
         # Get auth attributes from the request scope
         user_attributes = request.scope.get("user_attributes", {})
 
+        await log_request_pre_validation(request)
+
         # Use context manager with both provider data and auth attributes
         with request_provider_data_context(request.headers, user_attributes):
             is_streaming = is_streaming_request(func.__name__, request, **kwargs)
@@ -226,9 +254,9 @@ def create_dynamic_typed_route(func: Any, method: str, route: str):
             for param in new_params[1:]
         ]
 
-    endpoint.__signature__ = sig.replace(parameters=new_params)
+    route_handler.__signature__ = sig.replace(parameters=new_params)
 
-    return endpoint
+    return route_handler
 
 
 class TracingMiddleware:
@@ -250,17 +278,28 @@ class TracingMiddleware:
             logger.debug(f"Bypassing custom routing for FastAPI built-in path: {path}")
             return await self.app(scope, receive, send)
 
-        if not hasattr(self, "endpoint_impls"):
-            self.endpoint_impls = initialize_endpoint_impls(self.impls)
+        if not hasattr(self, "route_impls"):
+            self.route_impls = initialize_route_impls(self.impls)
 
         try:
-            _, _, trace_path = find_matching_endpoint(scope.get("method", "GET"), path, self.endpoint_impls)
+            _, _, trace_path = find_matching_route(scope.get("method", hdrs.METH_GET), path, self.route_impls)
         except ValueError:
             # If no matching endpoint is found, pass through to FastAPI
-            logger.debug(f"No matching endpoint found for path: {path}, falling back to FastAPI")
+            logger.debug(f"No matching route found for path: {path}, falling back to FastAPI")
             return await self.app(scope, receive, send)
 
-        trace_context = await start_trace(trace_path, {"__location__": "server", "raw_path": path})
+        trace_attributes = {"__location__": "server", "raw_path": path}
+
+        # Extract W3C trace context headers and store as trace attributes
+        headers = dict(scope.get("headers", []))
+        traceparent = headers.get(b"traceparent", b"").decode()
+        if traceparent:
+            trace_attributes["traceparent"] = traceparent
+        tracestate = headers.get(b"tracestate", b"").decode()
+        if tracestate:
+            trace_attributes["tracestate"] = tracestate
+
+        trace_context = await start_trace(trace_path, trace_attributes)
 
         async def send_with_trace_id(message):
             if message["type"] == "http.response.start":
@@ -315,7 +354,7 @@ class ClientVersionMiddleware:
         return await self.app(scope, receive, send)
 
 
-def main(args: Optional[argparse.Namespace] = None):
+def main(args: argparse.Namespace | None = None):
     """Start the LlamaStack server."""
     parser = argparse.ArgumentParser(description="Start the LlamaStack server.")
     parser.add_argument(
@@ -338,22 +377,11 @@ def main(args: Optional[argparse.Namespace] = None):
         default=int(os.getenv("LLAMA_STACK_PORT", 8321)),
         help="Port to listen on",
     )
-    parser.add_argument("--disable-ipv6", action="store_true", help="Whether to disable IPv6 support")
     parser.add_argument(
         "--env",
         action="append",
         help="Environment variables in KEY=value format. Can be specified multiple times.",
     )
-    parser.add_argument(
-        "--tls-keyfile",
-        help="Path to TLS key file for HTTPS",
-        required="--tls-certfile" in sys.argv,
-    )
-    parser.add_argument(
-        "--tls-certfile",
-        help="Path to TLS certificate file for HTTPS",
-        required="--tls-keyfile" in sys.argv,
-    )
 
     # Determine whether the server args are being passed by the "run" command, if this is the case
     # the args will be passed as a Namespace object to the main function, otherwise they will be
@@ -361,14 +389,6 @@ def main(args: Optional[argparse.Namespace] = None):
     if args is None:
         args = parser.parse_args()
 
-    # Check for deprecated argument usage
-    if "--yaml-config" in sys.argv:
-        warnings.warn(
-            "The '--yaml-config' argument is deprecated and will be removed in a future version. Use '--config' instead.",
-            DeprecationWarning,
-            stacklevel=2,
-        )
-
     log_line = ""
     if args.config:
         # if the user provided a config file, use it, even if template was specified
@@ -382,10 +402,10 @@ def main(args: Optional[argparse.Namespace] = None):
             raise ValueError(f"Template {args.template} does not exist")
         log_line = f"Using template {args.template} config file: {config_file}"
     else:
-        raise ValueError("Either --yaml-config or --template must be provided")
+        raise ValueError("Either --config or --template must be provided")
 
     logger_config = None
-    with open(config_file, "r") as fp:
+    with open(config_file) as fp:
         config_contents = yaml.safe_load(fp)
         if isinstance(config_contents, dict) and (cfg := config_contents.get("logging_config")):
             logger_config = LoggingConfig(**cfg)
@@ -419,9 +439,49 @@ def main(args: Optional[argparse.Namespace] = None):
         app.add_middleware(ClientVersionMiddleware)
 
     # Add authentication middleware if configured
-    if config.server.auth and config.server.auth.endpoint:
-        logger.info(f"Enabling authentication with endpoint: {config.server.auth.endpoint}")
-        app.add_middleware(AuthenticationMiddleware, auth_endpoint=config.server.auth.endpoint)
+    if config.server.auth:
+        logger.info(f"Enabling authentication with provider: {config.server.auth.provider_type.value}")
+        app.add_middleware(AuthenticationMiddleware, auth_config=config.server.auth)
+    else:
+        if config.server.quota:
+            quota = config.server.quota
+            logger.warning(
+                "Configured authenticated_max_requests (%d) but no auth is enabled; "
+                "falling back to anonymous_max_requests (%d) for all the requests",
+                quota.authenticated_max_requests,
+                quota.anonymous_max_requests,
+            )
+
+    if config.server.quota:
+        logger.info("Enabling quota middleware for authenticated and anonymous clients")
+
+        quota = config.server.quota
+        anonymous_max_requests = quota.anonymous_max_requests
+        # if auth is disabled, use the anonymous max requests
+        authenticated_max_requests = quota.authenticated_max_requests if config.server.auth else anonymous_max_requests
+
+        kv_config = quota.kvstore
+        window_map = {"day": 86400}
+        window_seconds = window_map[quota.period.value]
+
+        app.add_middleware(
+            QuotaMiddleware,
+            kv_config=kv_config,
+            anonymous_max_requests=anonymous_max_requests,
+            authenticated_max_requests=authenticated_max_requests,
+            window_seconds=window_seconds,
+        )
+
+    # --- CORS middleware for local development ---
+    # TODO: move to reverse proxy
+    ui_port = os.environ.get("LLAMA_STACK_UI_PORT", 8322)
+    app.add_middleware(
+        CORSMiddleware,
+        allow_origins=[f"http://localhost:{ui_port}"],
+        allow_credentials=True,
+        allow_methods=["*"],
+        allow_headers=["*"],
+    )
 
     try:
         impls = asyncio.run(construct_stack(config))
@@ -434,7 +494,7 @@ def main(args: Optional[argparse.Namespace] = None):
     else:
         setup_logger(TelemetryAdapter(TelemetryConfig(), {}))
 
-    all_endpoints = get_all_api_endpoints()
+    all_routes = get_all_api_routes()
 
     if config.apis:
         apis_to_serve = set(config.apis)
@@ -452,24 +512,29 @@ def main(args: Optional[argparse.Namespace] = None):
     for api_str in apis_to_serve:
         api = Api(api_str)
 
-        endpoints = all_endpoints[api]
+        routes = all_routes[api]
         impl = impls[api]
 
-        for endpoint in endpoints:
-            if not hasattr(impl, endpoint.name):
+        for route in routes:
+            if not hasattr(impl, route.name):
                 # ideally this should be a typing violation already
-                raise ValueError(f"Could not find method {endpoint.name} on {impl}!!")
+                raise ValueError(f"Could not find method {route.name} on {impl}!")
 
-            impl_method = getattr(impl, endpoint.name)
-            logger.debug(f"{endpoint.method.upper()} {endpoint.route}")
+            impl_method = getattr(impl, route.name)
+            # Filter out HEAD method since it's automatically handled by FastAPI for GET routes
+            available_methods = [m for m in route.methods if m != "HEAD"]
+            if not available_methods:
+                raise ValueError(f"No methods found for {route.name} on {impl}")
+            method = available_methods[0]
+            logger.debug(f"{method} {route.path}")
 
             with warnings.catch_warnings():
                 warnings.filterwarnings("ignore", category=UserWarning, module="pydantic._internal._fields")
-                getattr(app, endpoint.method)(endpoint.route, response_model=None)(
+                getattr(app, method.lower())(route.path, response_model=None)(
                     create_dynamic_typed_route(
                         impl_method,
-                        endpoint.method,
-                        endpoint.route,
+                        method.lower(),
+                        route.path,
                     )
                 )
 
@@ -487,21 +552,24 @@ def main(args: Optional[argparse.Namespace] = None):
     port = args.port or config.server.port
 
     ssl_config = None
-    if args.tls_keyfile:
-        keyfile = args.tls_keyfile
-        certfile = args.tls_certfile
-    else:
-        keyfile = config.server.tls_keyfile
-        certfile = config.server.tls_certfile
+    keyfile = config.server.tls_keyfile
+    certfile = config.server.tls_certfile
 
     if keyfile and certfile:
         ssl_config = {
             "ssl_keyfile": keyfile,
             "ssl_certfile": certfile,
         }
-        logger.info(f"HTTPS enabled with certificates:\n  Key: {keyfile}\n  Cert: {certfile}")
+        if config.server.tls_cafile:
+            ssl_config["ssl_ca_certs"] = config.server.tls_cafile
+            ssl_config["ssl_cert_reqs"] = ssl.CERT_REQUIRED
+            logger.info(
+                f"HTTPS enabled with certificates:\n  Key: {keyfile}\n  Cert: {certfile}\n  CA: {config.server.tls_cafile}"
+            )
+        else:
+            logger.info(f"HTTPS enabled with certificates:\n  Key: {keyfile}\n  Cert: {certfile}")
 
-    listen_host = ["::", "0.0.0.0"] if not args.disable_ipv6 else "0.0.0.0"
+    listen_host = config.server.host or ["::", "0.0.0.0"]
     logger.info(f"Listening on {listen_host}:{port}")
 
     uvicorn_config = {
@@ -517,7 +585,7 @@ def main(args: Optional[argparse.Namespace] = None):
     uvicorn.run(**uvicorn_config)
 
 
-def extract_path_params(route: str) -> List[str]:
+def extract_path_params(route: str) -> list[str]:
     segments = route.split("/")
     params = [seg[1:-1] for seg in segments if seg.startswith("{") and seg.endswith("}")]
     # to handle path params like {param:path}
diff --git a/llama_stack/distribution/stack.py b/llama_stack/distribution/stack.py
index a6dc3d2a0..fc68dc016 100644
--- a/llama_stack/distribution/stack.py
+++ b/llama_stack/distribution/stack.py
@@ -8,7 +8,7 @@ import importlib.resources
 import os
 import re
 import tempfile
-from typing import Any, Dict, Optional
+from typing import Any
 
 import yaml
 
@@ -90,7 +90,7 @@ RESOURCES = [
 ]
 
 
-async def register_resources(run_config: StackRunConfig, impls: Dict[Api, Any]):
+async def register_resources(run_config: StackRunConfig, impls: dict[Api, Any]):
     for rsrc, api, register_method, list_method in RESOURCES:
         objects = getattr(run_config, rsrc)
         if api not in impls:
@@ -197,7 +197,7 @@ def validate_env_pair(env_pair: str) -> tuple[str, str]:
         ) from e
 
 
-def add_internal_implementations(impls: Dict[Api, Any], run_config: StackRunConfig) -> None:
+def add_internal_implementations(impls: dict[Api, Any], run_config: StackRunConfig) -> None:
     """Add internal implementations (inspect and providers) to the implementations dictionary.
 
     Args:
@@ -220,8 +220,8 @@ def add_internal_implementations(impls: Dict[Api, Any], run_config: StackRunConf
 # Produces a stack of providers for the given run config. Not all APIs may be
 # asked for in the run config.
 async def construct_stack(
-    run_config: StackRunConfig, provider_registry: Optional[ProviderRegistry] = None
-) -> Dict[Api, Any]:
+    run_config: StackRunConfig, provider_registry: ProviderRegistry | None = None
+) -> dict[Api, Any]:
     dist_registry, _ = await create_dist_registry(run_config.metadata_store, run_config.image_name)
     impls = await resolve_impls(run_config, provider_registry or get_provider_registry(run_config), dist_registry)
 
@@ -244,7 +244,7 @@ def get_stack_run_config_from_template(template: str) -> StackRunConfig:
 
 
 def run_config_from_adhoc_config_spec(
-    adhoc_config_spec: str, provider_registry: Optional[ProviderRegistry] = None
+    adhoc_config_spec: str, provider_registry: ProviderRegistry | None = None
 ) -> StackRunConfig:
     """
     Create an adhoc distribution from a list of API providers.
diff --git a/llama_stack/distribution/start_stack.sh b/llama_stack/distribution/start_stack.sh
index d3e13c7dc..996935a5e 100755
--- a/llama_stack/distribution/start_stack.sh
+++ b/llama_stack/distribution/start_stack.sh
@@ -29,7 +29,7 @@ error_handler() {
 trap 'error_handler ${LINENO}' ERR
 
 if [ $# -lt 3 ]; then
-  echo "Usage: $0     "
+  echo "Usage: $0    [--config ] [--env KEY=VALUE]..."
   exit 1
 fi
 
@@ -40,37 +40,51 @@ env_path_or_name="$1"
 container_image="localhost/$env_path_or_name"
 shift
 
-yaml_config="$1"
-shift
-
 port="$1"
 shift
 
 SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
 source "$SCRIPT_DIR/common.sh"
 
-# Initialize env_vars as an string
+# Initialize variables
+yaml_config=""
 env_vars=""
 other_args=""
-# Process environment variables from --env arguments
+
+# Process remaining arguments
 while [[ $# -gt 0 ]]; do
   case "$1" in
-  --env)
-
-    if [[ -n "$2" ]]; then
-      env_vars="$env_vars --env $2"
-      shift 2
-    else
-      echo -e "${RED}Error: --env requires a KEY=VALUE argument${NC}" >&2
-      exit 1
-    fi
-    ;;
-  *)
-    other_args="$other_args $1"
-    shift
-    ;;
+    --config)
+      if [[ -n "$2" ]]; then
+        yaml_config="$2"
+        shift 2
+      else
+        echo -e "${RED}Error: $1 requires a CONFIG argument${NC}" >&2
+        exit 1
+      fi
+      ;;
+    --env)
+      if [[ -n "$2" ]]; then
+        env_vars="$env_vars --env $2"
+        shift 2
+      else
+        echo -e "${RED}Error: --env requires a KEY=VALUE argument${NC}" >&2
+        exit 1
+      fi
+      ;;
+    *)
+      other_args="$other_args $1"
+      shift
+      ;;
   esac
 done
+
+# Check if yaml_config is required based on env_type
+if [[ "$env_type" == "venv" || "$env_type" == "conda" ]] && [ -z "$yaml_config" ]; then
+  echo -e "${RED}Error: --config is required for venv and conda environments${NC}" >&2
+  exit 1
+fi
+
 PYTHON_BINARY="python"
 case "$env_type" in
   "venv")
@@ -106,8 +120,14 @@ esac
 if [[ "$env_type" == "venv" || "$env_type" == "conda" ]]; then
     set -x
 
+    if [ -n "$yaml_config" ]; then
+        yaml_config_arg="--config $yaml_config"
+    else
+        yaml_config_arg=""
+    fi
+
     $PYTHON_BINARY -m llama_stack.distribution.server.server \
-    --yaml-config "$yaml_config" \
+    $yaml_config_arg \
     --port "$port" \
     $env_vars \
     $other_args
@@ -149,15 +169,26 @@ elif [[ "$env_type" == "container" ]]; then
         version_tag=$(curl -s $URL | jq -r '.info.version')
     fi
 
-    $CONTAINER_BINARY run $CONTAINER_OPTS -it \
+    # Build the command with optional yaml config
+    cmd="$CONTAINER_BINARY run $CONTAINER_OPTS -it \
     -p $port:$port \
     $env_vars \
-    -v "$yaml_config:/app/config.yaml" \
     $mounts \
     --env LLAMA_STACK_PORT=$port \
     --entrypoint python \
     $container_image:$version_tag \
-    -m llama_stack.distribution.server.server \
-    --yaml-config /app/config.yaml \
-    $other_args
+    -m llama_stack.distribution.server.server"
+
+    # Add yaml config if provided, otherwise use default
+    if [ -n "$yaml_config" ]; then
+        cmd="$cmd -v $yaml_config:/app/run.yaml --config /app/run.yaml"
+    else
+        cmd="$cmd --config /app/run.yaml"
+    fi
+
+    # Add any other args
+    cmd="$cmd $other_args"
+
+    # Execute the command
+    eval $cmd
 fi
diff --git a/llama_stack/distribution/store/registry.py b/llama_stack/distribution/store/registry.py
index 76b66cc7a..0e84854c2 100644
--- a/llama_stack/distribution/store/registry.py
+++ b/llama_stack/distribution/store/registry.py
@@ -6,7 +6,7 @@
 
 import asyncio
 from contextlib import asynccontextmanager
-from typing import Dict, List, Optional, Protocol, Tuple
+from typing import Protocol
 
 import pydantic
 
@@ -20,13 +20,13 @@ logger = get_logger(__name__, category="core")
 
 
 class DistributionRegistry(Protocol):
-    async def get_all(self) -> List[RoutableObjectWithProvider]: ...
+    async def get_all(self) -> list[RoutableObjectWithProvider]: ...
 
     async def initialize(self) -> None: ...
 
-    async def get(self, identifier: str) -> Optional[RoutableObjectWithProvider]: ...
+    async def get(self, identifier: str) -> RoutableObjectWithProvider | None: ...
 
-    def get_cached(self, identifier: str) -> Optional[RoutableObjectWithProvider]: ...
+    def get_cached(self, identifier: str) -> RoutableObjectWithProvider | None: ...
 
     async def update(self, obj: RoutableObjectWithProvider) -> RoutableObjectWithProvider: ...
 
@@ -36,17 +36,17 @@ class DistributionRegistry(Protocol):
 
 
 REGISTER_PREFIX = "distributions:registry"
-KEY_VERSION = "v8"
+KEY_VERSION = "v9"
 KEY_FORMAT = f"{REGISTER_PREFIX}:{KEY_VERSION}::" + "{type}:{identifier}"
 
 
-def _get_registry_key_range() -> Tuple[str, str]:
+def _get_registry_key_range() -> tuple[str, str]:
     """Returns the start and end keys for the registry range query."""
     start_key = f"{REGISTER_PREFIX}:{KEY_VERSION}"
     return start_key, f"{start_key}\xff"
 
 
-def _parse_registry_values(values: List[str]) -> List[RoutableObjectWithProvider]:
+def _parse_registry_values(values: list[str]) -> list[RoutableObjectWithProvider]:
     """Utility function to parse registry values into RoutableObjectWithProvider objects."""
     all_objects = []
     for value in values:
@@ -67,16 +67,16 @@ class DiskDistributionRegistry(DistributionRegistry):
     async def initialize(self) -> None:
         pass
 
-    def get_cached(self, type: str, identifier: str) -> Optional[RoutableObjectWithProvider]:
+    def get_cached(self, type: str, identifier: str) -> RoutableObjectWithProvider | None:
         # Disk registry does not have a cache
         raise NotImplementedError("Disk registry does not have a cache")
 
-    async def get_all(self) -> List[RoutableObjectWithProvider]:
+    async def get_all(self) -> list[RoutableObjectWithProvider]:
         start_key, end_key = _get_registry_key_range()
-        values = await self.kvstore.range(start_key, end_key)
+        values = await self.kvstore.values_in_range(start_key, end_key)
         return _parse_registry_values(values)
 
-    async def get(self, type: str, identifier: str) -> Optional[RoutableObjectWithProvider]:
+    async def get(self, type: str, identifier: str) -> RoutableObjectWithProvider | None:
         json_str = await self.kvstore.get(KEY_FORMAT.format(type=type, identifier=identifier))
         if not json_str:
             return None
@@ -113,7 +113,7 @@ class DiskDistributionRegistry(DistributionRegistry):
 class CachedDiskDistributionRegistry(DiskDistributionRegistry):
     def __init__(self, kvstore: KVStore):
         super().__init__(kvstore)
-        self.cache: Dict[Tuple[str, str], RoutableObjectWithProvider] = {}
+        self.cache: dict[tuple[str, str], RoutableObjectWithProvider] = {}
         self._initialized = False
         self._initialize_lock = asyncio.Lock()
         self._cache_lock = asyncio.Lock()
@@ -134,7 +134,7 @@ class CachedDiskDistributionRegistry(DiskDistributionRegistry):
                 return
 
             start_key, end_key = _get_registry_key_range()
-            values = await self.kvstore.range(start_key, end_key)
+            values = await self.kvstore.values_in_range(start_key, end_key)
             objects = _parse_registry_values(values)
 
             async with self._locked_cache() as cache:
@@ -147,15 +147,15 @@ class CachedDiskDistributionRegistry(DiskDistributionRegistry):
     async def initialize(self) -> None:
         await self._ensure_initialized()
 
-    def get_cached(self, type: str, identifier: str) -> Optional[RoutableObjectWithProvider]:
+    def get_cached(self, type: str, identifier: str) -> RoutableObjectWithProvider | None:
         return self.cache.get((type, identifier), None)
 
-    async def get_all(self) -> List[RoutableObjectWithProvider]:
+    async def get_all(self) -> list[RoutableObjectWithProvider]:
         await self._ensure_initialized()
         async with self._locked_cache() as cache:
             return list(cache.values())
 
-    async def get(self, type: str, identifier: str) -> Optional[RoutableObjectWithProvider]:
+    async def get(self, type: str, identifier: str) -> RoutableObjectWithProvider | None:
         await self._ensure_initialized()
         cache_key = (type, identifier)
 
@@ -189,7 +189,7 @@ class CachedDiskDistributionRegistry(DiskDistributionRegistry):
 
 
 async def create_dist_registry(
-    metadata_store: Optional[KVStoreConfig],
+    metadata_store: KVStoreConfig | None,
     image_name: str,
 ) -> tuple[CachedDiskDistributionRegistry, KVStore]:
     # instantiate kvstore for storing and retrieving distribution metadata
diff --git a/llama_stack/distribution/ui/Containerfile b/llama_stack/distribution/ui/Containerfile
index 0126d1867..5d2dc933b 100644
--- a/llama_stack/distribution/ui/Containerfile
+++ b/llama_stack/distribution/ui/Containerfile
@@ -5,7 +5,8 @@ FROM python:3.12-slim
 WORKDIR /app
 COPY . /app/
 RUN /usr/local/bin/python -m pip install --upgrade pip && \
-    /usr/local/bin/pip3 install -r requirements.txt
+    /usr/local/bin/pip3 install -r requirements.txt && \
+    /usr/local/bin/pip3 install -r llama_stack/distribution/ui/requirements.txt
 EXPOSE 8501
 
-ENTRYPOINT ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0"]
+ENTRYPOINT ["streamlit", "run", "llama_stack/distribution/ui/app.py", "--server.port=8501", "--server.address=0.0.0.0"]
diff --git a/llama_stack/distribution/ui/README.md b/llama_stack/distribution/ui/README.md
index 51c2d2bc2..0e96690ec 100644
--- a/llama_stack/distribution/ui/README.md
+++ b/llama_stack/distribution/ui/README.md
@@ -48,3 +48,6 @@ uv run --with ".[ui]" streamlit run llama_stack/distribution/ui/app.py
 | TOGETHER_API_KEY           | API key for Together provider      | (empty string)            |
 | SAMBANOVA_API_KEY          | API key for SambaNova provider     | (empty string)            |
 | OPENAI_API_KEY             | API key for OpenAI provider        | (empty string)            |
+| KEYCLOAK_URL               | URL for keycloak authentication    | (empty string)            |
+| KEYCLOAK_REALM             | Keycloak realm                     | default                   |
+| KEYCLOAK_CLIENT_ID         | Client ID for keycloak auth        | (empty string)            |
\ No newline at end of file
diff --git a/llama_stack/distribution/ui/app.py b/llama_stack/distribution/ui/app.py
index 441f65d20..a9a28b445 100644
--- a/llama_stack/distribution/ui/app.py
+++ b/llama_stack/distribution/ui/app.py
@@ -50,6 +50,42 @@ def main():
     )
     pg.run()
 
+def main2():
+    from dataclasses import asdict
+    st.subheader(f"Welcome {keycloak.user_info['preferred_username']}!")
+    st.write(f"Here is your user information:")
+    st.write(asdict(keycloak))
+
+def get_access_token() -> str|None:
+    return st.session_state.get('access_token')
 
 if __name__ == "__main__":
-    main()
+    
+    from streamlit_keycloak import login
+    import os
+    
+    keycloak_url = os.environ.get("KEYCLOAK_URL")
+    keycloak_realm = os.environ.get("KEYCLOAK_REALM", "default")
+    keycloak_client_id = os.environ.get("KEYCLOAK_CLIENT_ID")
+    
+    if keycloak_url and keycloak_client_id:
+        keycloak = login(
+            url=keycloak_url,
+            realm=keycloak_realm,
+            client_id=keycloak_client_id,
+            custom_labels={
+                "labelButton": "Sign in to kvant",
+                "labelLogin": "Please sign in to your kvant account.",
+                "errorNoPopup": "Unable to open the authentication popup. Allow popups and refresh the page to proceed.",
+                "errorPopupClosed": "Authentication popup was closed manually.",
+                "errorFatal": "Unable to connect to Keycloak using the current configuration."   
+            },
+            auto_refresh=True,
+        )
+
+        if keycloak.authenticated:
+            st.session_state['access_token'] = keycloak.access_token
+            main()
+    # TBD - add other authentications
+    else:
+        main()
diff --git a/llama_stack/distribution/ui/modules/api.py b/llama_stack/distribution/ui/modules/api.py
index d5395c5b9..a426e59ba 100644
--- a/llama_stack/distribution/ui/modules/api.py
+++ b/llama_stack/distribution/ui/modules/api.py
@@ -5,14 +5,15 @@
 # the root directory of this source tree.
 
 import os
-from typing import Optional
 
 from llama_stack_client import LlamaStackClient
+from llama_stack.distribution.ui.app import get_access_token
 
 
 class LlamaStackApi:
     def __init__(self):
         self.client = LlamaStackClient(
+            api_key=get_access_token(),
             base_url=os.environ.get("LLAMA_STACK_ENDPOINT", "http://localhost:8321"),
             provider_data={
                 "fireworks_api_key": os.environ.get("FIREWORKS_API_KEY", ""),
@@ -23,11 +24,9 @@ class LlamaStackApi:
             },
         )
 
-    def run_scoring(self, row, scoring_function_ids: list[str], scoring_params: Optional[dict]):
+    def run_scoring(self, row, scoring_function_ids: list[str], scoring_params: dict | None):
         """Run scoring on a single row"""
         if not scoring_params:
             scoring_params = {fn_id: None for fn_id in scoring_function_ids}
         return self.client.scoring.score(input_rows=[row], scoring_functions=scoring_params)
 
-
-llama_stack_api = LlamaStackApi()
diff --git a/llama_stack/distribution/ui/page/distribution/datasets.py b/llama_stack/distribution/ui/page/distribution/datasets.py
index 6842b29a7..89f645ca8 100644
--- a/llama_stack/distribution/ui/page/distribution/datasets.py
+++ b/llama_stack/distribution/ui/page/distribution/datasets.py
@@ -6,13 +6,13 @@
 
 import streamlit as st
 
-from llama_stack.distribution.ui.modules.api import llama_stack_api
+from llama_stack.distribution.ui.modules.api import LlamaStackApi
 
 
 def datasets():
     st.header("Datasets")
 
-    datasets_info = {d.identifier: d.to_dict() for d in llama_stack_api.client.datasets.list()}
+    datasets_info = {d.identifier: d.to_dict() for d in LlamaStackApi().client.datasets.list()}
     if len(datasets_info) > 0:
         selected_dataset = st.selectbox("Select a dataset", list(datasets_info.keys()))
         st.json(datasets_info[selected_dataset], expanded=True)
diff --git a/llama_stack/distribution/ui/page/distribution/eval_tasks.py b/llama_stack/distribution/ui/page/distribution/eval_tasks.py
index 492be4700..2b70f9202 100644
--- a/llama_stack/distribution/ui/page/distribution/eval_tasks.py
+++ b/llama_stack/distribution/ui/page/distribution/eval_tasks.py
@@ -6,14 +6,14 @@
 
 import streamlit as st
 
-from llama_stack.distribution.ui.modules.api import llama_stack_api
+from llama_stack.distribution.ui.modules.api import LlamaStackApi
 
 
 def benchmarks():
     # Benchmarks Section
     st.header("Benchmarks")
 
-    benchmarks_info = {d.identifier: d.to_dict() for d in llama_stack_api.client.benchmarks.list()}
+    benchmarks_info = {d.identifier: d.to_dict() for d in LlamaStackApi().client.benchmarks.list()}
 
     if len(benchmarks_info) > 0:
         selected_benchmark = st.selectbox("Select an eval task", list(benchmarks_info.keys()), key="benchmark_inspect")
diff --git a/llama_stack/distribution/ui/page/distribution/models.py b/llama_stack/distribution/ui/page/distribution/models.py
index f29459098..3b96f179f 100644
--- a/llama_stack/distribution/ui/page/distribution/models.py
+++ b/llama_stack/distribution/ui/page/distribution/models.py
@@ -6,13 +6,13 @@
 
 import streamlit as st
 
-from llama_stack.distribution.ui.modules.api import llama_stack_api
+from llama_stack.distribution.ui.modules.api import LlamaStackApi
 
 
 def models():
     # Models Section
     st.header("Models")
-    models_info = {m.identifier: m.to_dict() for m in llama_stack_api.client.models.list()}
+    models_info = {m.identifier: m.to_dict() for m in LlamaStackApi().client.models.list()}
 
     selected_model = st.selectbox("Select a model", list(models_info.keys()))
     st.json(models_info[selected_model])
diff --git a/llama_stack/distribution/ui/page/distribution/providers.py b/llama_stack/distribution/ui/page/distribution/providers.py
index c660cb986..116237b13 100644
--- a/llama_stack/distribution/ui/page/distribution/providers.py
+++ b/llama_stack/distribution/ui/page/distribution/providers.py
@@ -6,12 +6,12 @@
 
 import streamlit as st
 
-from llama_stack.distribution.ui.modules.api import llama_stack_api
+from llama_stack.distribution.ui.modules.api import LlamaStackApi
 
 
 def providers():
     st.header("🔍 API Providers")
-    apis_providers_lst = llama_stack_api.client.providers.list()
+    apis_providers_lst = LlamaStackApi().client.providers.list()
     api_to_providers = {}
     for api_provider in apis_providers_lst:
         if api_provider.api in api_to_providers:
diff --git a/llama_stack/distribution/ui/page/distribution/scoring_functions.py b/llama_stack/distribution/ui/page/distribution/scoring_functions.py
index 193146356..3c3428f44 100644
--- a/llama_stack/distribution/ui/page/distribution/scoring_functions.py
+++ b/llama_stack/distribution/ui/page/distribution/scoring_functions.py
@@ -6,13 +6,13 @@
 
 import streamlit as st
 
-from llama_stack.distribution.ui.modules.api import llama_stack_api
+from llama_stack.distribution.ui.modules.api import LlamaStackApi
 
 
 def scoring_functions():
     st.header("Scoring Functions")
 
-    scoring_functions_info = {s.identifier: s.to_dict() for s in llama_stack_api.client.scoring_functions.list()}
+    scoring_functions_info = {s.identifier: s.to_dict() for s in LlamaStackApi().client.scoring_functions.list()}
 
     selected_scoring_function = st.selectbox("Select a scoring function", list(scoring_functions_info.keys()))
     st.json(scoring_functions_info[selected_scoring_function], expanded=True)
diff --git a/llama_stack/distribution/ui/page/distribution/shields.py b/llama_stack/distribution/ui/page/distribution/shields.py
index 67d66d64f..84b583980 100644
--- a/llama_stack/distribution/ui/page/distribution/shields.py
+++ b/llama_stack/distribution/ui/page/distribution/shields.py
@@ -6,14 +6,14 @@
 
 import streamlit as st
 
-from llama_stack.distribution.ui.modules.api import llama_stack_api
+from llama_stack.distribution.ui.modules.api import LlamaStackApi
 
 
 def shields():
     # Shields Section
     st.header("Shields")
 
-    shields_info = {s.identifier: s.to_dict() for s in llama_stack_api.client.shields.list()}
+    shields_info = {s.identifier: s.to_dict() for s in LlamaStackApi().client.shields.list()}
 
     selected_shield = st.selectbox("Select a shield", list(shields_info.keys()))
     st.json(shields_info[selected_shield])
diff --git a/llama_stack/distribution/ui/page/distribution/vector_dbs.py b/llama_stack/distribution/ui/page/distribution/vector_dbs.py
index 49a4f25bb..e7eb7b13b 100644
--- a/llama_stack/distribution/ui/page/distribution/vector_dbs.py
+++ b/llama_stack/distribution/ui/page/distribution/vector_dbs.py
@@ -6,12 +6,12 @@
 
 import streamlit as st
 
-from llama_stack.distribution.ui.modules.api import llama_stack_api
+from llama_stack.distribution.ui.modules.api import LlamaStackApi
 
 
 def vector_dbs():
     st.header("Vector Databases")
-    vector_dbs_info = {v.identifier: v.to_dict() for v in llama_stack_api.client.vector_dbs.list()}
+    vector_dbs_info = {v.identifier: v.to_dict() for v in LlamaStackApi().client.vector_dbs.list()}
 
     if len(vector_dbs_info) > 0:
         selected_vector_db = st.selectbox("Select a vector database", list(vector_dbs_info.keys()))
diff --git a/llama_stack/distribution/ui/page/evaluations/app_eval.py b/llama_stack/distribution/ui/page/evaluations/app_eval.py
index d7bc6388c..13da6071e 100644
--- a/llama_stack/distribution/ui/page/evaluations/app_eval.py
+++ b/llama_stack/distribution/ui/page/evaluations/app_eval.py
@@ -9,7 +9,7 @@ import json
 import pandas as pd
 import streamlit as st
 
-from llama_stack.distribution.ui.modules.api import llama_stack_api
+from llama_stack.distribution.ui.modules.api import LlamaStackApi
 from llama_stack.distribution.ui.modules.utils import process_dataset
 
 
@@ -39,7 +39,7 @@ def application_evaluation_page():
 
     # Select Scoring Functions to Run Evaluation On
     st.subheader("Select Scoring Functions")
-    scoring_functions = llama_stack_api.client.scoring_functions.list()
+    scoring_functions = LlamaStackApi().client.scoring_functions.list()
     scoring_functions = {sf.identifier: sf for sf in scoring_functions}
     scoring_functions_names = list(scoring_functions.keys())
     selected_scoring_functions = st.multiselect(
@@ -48,7 +48,7 @@ def application_evaluation_page():
         help="Choose one or more scoring functions.",
     )
 
-    available_models = llama_stack_api.client.models.list()
+    available_models = LlamaStackApi().client.models.list()
     available_models = [m.identifier for m in available_models]
 
     scoring_params = {}
@@ -108,7 +108,7 @@ def application_evaluation_page():
                 progress_bar.progress(progress, text=progress_text)
 
                 # Run evaluation for current row
-                score_res = llama_stack_api.run_scoring(
+                score_res = LlamaStackApi().run_scoring(
                     r,
                     scoring_function_ids=selected_scoring_functions,
                     scoring_params=scoring_params,
diff --git a/llama_stack/distribution/ui/page/evaluations/native_eval.py b/llama_stack/distribution/ui/page/evaluations/native_eval.py
index 97f875e17..133c3b151 100644
--- a/llama_stack/distribution/ui/page/evaluations/native_eval.py
+++ b/llama_stack/distribution/ui/page/evaluations/native_eval.py
@@ -9,13 +9,13 @@ import json
 import pandas as pd
 import streamlit as st
 
-from llama_stack.distribution.ui.modules.api import llama_stack_api
+from llama_stack.distribution.ui.modules.api import LlamaStackApi
 
 
 def select_benchmark_1():
     # Select Benchmarks
     st.subheader("1. Choose An Eval Task")
-    benchmarks = llama_stack_api.client.benchmarks.list()
+    benchmarks = LlamaStackApi().client.benchmarks.list()
     benchmarks = {et.identifier: et for et in benchmarks}
     benchmarks_names = list(benchmarks.keys())
     selected_benchmark = st.selectbox(
@@ -47,7 +47,7 @@ def define_eval_candidate_2():
         # Define Eval Candidate
         candidate_type = st.radio("Candidate Type", ["model", "agent"])
 
-        available_models = llama_stack_api.client.models.list()
+        available_models = LlamaStackApi().client.models.list()
         available_models = [model.identifier for model in available_models]
         selected_model = st.selectbox(
             "Choose a model",
@@ -167,7 +167,7 @@ def run_evaluation_3():
     eval_candidate = st.session_state["eval_candidate"]
 
     dataset_id = benchmarks[selected_benchmark].dataset_id
-    rows = llama_stack_api.client.datasets.iterrows(
+    rows = LlamaStackApi().client.datasets.iterrows(
         dataset_id=dataset_id,
     )
     total_rows = len(rows.data)
@@ -208,7 +208,7 @@ def run_evaluation_3():
             progress = i / len(rows)
             progress_bar.progress(progress, text=progress_text)
             # Run evaluation for current row
-            eval_res = llama_stack_api.client.eval.evaluate_rows(
+            eval_res = LlamaStackApi().client.eval.evaluate_rows(
                 benchmark_id=selected_benchmark,
                 input_rows=[r],
                 scoring_functions=benchmarks[selected_benchmark].scoring_functions,
diff --git a/llama_stack/distribution/ui/page/playground/chat.py b/llama_stack/distribution/ui/page/playground/chat.py
index 8e7345169..053ae42de 100644
--- a/llama_stack/distribution/ui/page/playground/chat.py
+++ b/llama_stack/distribution/ui/page/playground/chat.py
@@ -6,12 +6,12 @@
 
 import streamlit as st
 
-from llama_stack.distribution.ui.modules.api import llama_stack_api
+from llama_stack.distribution.ui.modules.api import LlamaStackApi
 
 # Sidebar configurations
 with st.sidebar:
     st.header("Configuration")
-    available_models = llama_stack_api.client.models.list()
+    available_models = LlamaStackApi().client.models.list()
     available_models = [model.identifier for model in available_models if model.model_type == "llm"]
     selected_model = st.selectbox(
         "Choose a model",
@@ -103,7 +103,7 @@ if prompt := st.chat_input("Example: What is Llama Stack?"):
         else:
             strategy = {"type": "greedy"}
 
-        response = llama_stack_api.client.inference.chat_completion(
+        response = LlamaStackApi().client.inference.chat_completion(
             messages=[
                 {"role": "system", "content": system_prompt},
                 {"role": "user", "content": prompt},
@@ -124,7 +124,7 @@ if prompt := st.chat_input("Example: What is Llama Stack?"):
                 message_placeholder.markdown(full_response + "▌")
             message_placeholder.markdown(full_response)
         else:
-            full_response = response
-            message_placeholder.markdown(full_response.completion_message.content)
+            full_response = response.completion_message.content
+            message_placeholder.markdown(full_response)
 
         st.session_state.messages.append({"role": "assistant", "content": full_response})
diff --git a/llama_stack/distribution/ui/page/playground/rag.py b/llama_stack/distribution/ui/page/playground/rag.py
index 696d89bc2..94e27a255 100644
--- a/llama_stack/distribution/ui/page/playground/rag.py
+++ b/llama_stack/distribution/ui/page/playground/rag.py
@@ -10,7 +10,7 @@ import streamlit as st
 from llama_stack_client import Agent, AgentEventLogger, RAGDocument
 
 from llama_stack.apis.common.content_types import ToolCallDelta
-from llama_stack.distribution.ui.modules.api import llama_stack_api
+from llama_stack.distribution.ui.modules.api import LlamaStackApi
 from llama_stack.distribution.ui.modules.utils import data_url_from_file
 
 
@@ -57,14 +57,14 @@ def rag_chat_page():
                     for i, uploaded_file in enumerate(uploaded_files)
                 ]
 
-                providers = llama_stack_api.client.providers.list()
+                providers = LlamaStackApi().client.providers.list()
                 vector_io_provider = None
 
                 for x in providers:
                     if x.api == "vector_io":
                         vector_io_provider = x.provider_id
 
-                llama_stack_api.client.vector_dbs.register(
+                LlamaStackApi().client.vector_dbs.register(
                     vector_db_id=vector_db_name,  # Use the user-provided name
                     embedding_dimension=384,
                     embedding_model="all-MiniLM-L6-v2",
@@ -72,7 +72,7 @@ def rag_chat_page():
                 )
 
                 # insert documents using the custom vector db name
-                llama_stack_api.client.tool_runtime.rag_tool.insert(
+                LlamaStackApi().client.tool_runtime.rag_tool.insert(
                     vector_db_id=vector_db_name,  # Use the user-provided name
                     documents=documents,
                     chunk_size_in_tokens=512,
@@ -93,7 +93,7 @@ def rag_chat_page():
         )
 
         # select memory banks
-        vector_dbs = llama_stack_api.client.vector_dbs.list()
+        vector_dbs = LlamaStackApi().client.vector_dbs.list()
         vector_dbs = [vector_db.identifier for vector_db in vector_dbs]
         selected_vector_dbs = st.multiselect(
             label="Select Document Collections to use in RAG queries",
@@ -103,7 +103,7 @@ def rag_chat_page():
         )
 
         st.subheader("Inference Parameters", divider=True)
-        available_models = llama_stack_api.client.models.list()
+        available_models = LlamaStackApi().client.models.list()
         available_models = [model.identifier for model in available_models if model.model_type == "llm"]
         selected_model = st.selectbox(
             label="Choose a model",
@@ -167,7 +167,7 @@ def rag_chat_page():
     @st.cache_resource
     def create_agent():
         return Agent(
-            llama_stack_api.client,
+            LlamaStackApi().client,
             model=selected_model,
             instructions=system_prompt,
             sampling_params={
@@ -232,7 +232,7 @@ def rag_chat_page():
             st.session_state.messages.append({"role": "system", "content": system_prompt})
 
         # Query the vector DB
-        rag_response = llama_stack_api.client.tool_runtime.rag_tool.query(
+        rag_response = LlamaStackApi().client.tool_runtime.rag_tool.query(
             content=prompt, vector_db_ids=list(selected_vector_dbs)
         )
         prompt_context = rag_response.content
@@ -251,7 +251,7 @@ def rag_chat_page():
 
             # Run inference directly
             st.session_state.messages.append({"role": "user", "content": extended_prompt})
-            response = llama_stack_api.client.inference.chat_completion(
+            response = LlamaStackApi().client.inference.chat_completion(
                 messages=st.session_state.messages,
                 model_id=selected_model,
                 sampling_params={
diff --git a/llama_stack/distribution/ui/page/playground/tools.py b/llama_stack/distribution/ui/page/playground/tools.py
index 6c6a9fcfd..570bfb366 100644
--- a/llama_stack/distribution/ui/page/playground/tools.py
+++ b/llama_stack/distribution/ui/page/playground/tools.py
@@ -13,7 +13,7 @@ from llama_stack_client import Agent
 from llama_stack_client.lib.agents.react.agent import ReActAgent
 from llama_stack_client.lib.agents.react.tool_parser import ReActOutput
 
-from llama_stack.distribution.ui.modules.api import llama_stack_api
+from llama_stack.distribution.ui.modules.api import LlamaStackApi
 
 
 class AgentType(enum.Enum):
@@ -24,7 +24,7 @@ class AgentType(enum.Enum):
 def tool_chat_page():
     st.title("🛠 Tools")
 
-    client = llama_stack_api.client
+    client = LlamaStackApi().client
     models = client.models.list()
     model_list = [model.identifier for model in models if model.api_model_type == "llm"]
 
@@ -55,7 +55,7 @@ def tool_chat_page():
         )
 
         if "builtin::rag" in toolgroup_selection:
-            vector_dbs = llama_stack_api.client.vector_dbs.list() or []
+            vector_dbs = LlamaStackApi().client.vector_dbs.list() or []
             if not vector_dbs:
                 st.info("No vector databases available for selection.")
             vector_dbs = [vector_db.identifier for vector_db in vector_dbs]
@@ -94,12 +94,16 @@ def tool_chat_page():
         st.subheader("Agent Configurations")
         st.subheader("Agent Type")
         agent_type = st.radio(
-            "Select Agent Type",
-            [AgentType.REGULAR, AgentType.REACT],
-            format_func=lambda x: x.value,
+            label="Select Agent Type",
+            options=["Regular", "ReAct"],
             on_change=reset_agent,
         )
 
+        if agent_type == "ReAct":
+            agent_type = AgentType.REACT
+        else:
+            agent_type = AgentType.REGULAR
+
         max_tokens = st.slider(
             "Max Tokens",
             min_value=0,
diff --git a/llama_stack/distribution/ui/requirements.txt b/llama_stack/distribution/ui/requirements.txt
index 61d42768d..862f969d6 100644
--- a/llama_stack/distribution/ui/requirements.txt
+++ b/llama_stack/distribution/ui/requirements.txt
@@ -1,5 +1,5 @@
-streamlit
+llama-stack-client>=0.2.9
 pandas
-llama-stack-client>=0.2.1
+streamlit
 streamlit-option-menu
-llama-stack>=0.2.1
+streamlit-keycloak
diff --git a/llama_stack/distribution/utils/config.py b/llama_stack/distribution/utils/config.py
index 5e78289b7..dece52460 100644
--- a/llama_stack/distribution/utils/config.py
+++ b/llama_stack/distribution/utils/config.py
@@ -4,10 +4,10 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 
-def redact_sensitive_fields(data: Dict[str, Any]) -> Dict[str, Any]:
+def redact_sensitive_fields(data: dict[str, Any]) -> dict[str, Any]:
     """Redact sensitive information from config before printing."""
     sensitive_patterns = ["api_key", "api_token", "password", "secret"]
 
@@ -18,7 +18,7 @@ def redact_sensitive_fields(data: Dict[str, Any]) -> Dict[str, Any]:
             return [_redact_value(i) for i in v]
         return v
 
-    def _redact_dict(d: Dict[str, Any]) -> Dict[str, Any]:
+    def _redact_dict(d: dict[str, Any]) -> dict[str, Any]:
         result = {}
         for k, v in d.items():
             if any(pattern in k.lower() for pattern in sensitive_patterns):
diff --git a/llama_stack/distribution/utils/config_dirs.py b/llama_stack/distribution/utils/config_dirs.py
index 9b9a7ceb3..c3e520f28 100644
--- a/llama_stack/distribution/utils/config_dirs.py
+++ b/llama_stack/distribution/utils/config_dirs.py
@@ -14,3 +14,5 @@ DISTRIBS_BASE_DIR = LLAMA_STACK_CONFIG_DIR / "distributions"
 DEFAULT_CHECKPOINT_DIR = LLAMA_STACK_CONFIG_DIR / "checkpoints"
 
 RUNTIME_BASE_DIR = LLAMA_STACK_CONFIG_DIR / "runtime"
+
+EXTERNAL_PROVIDERS_DIR = LLAMA_STACK_CONFIG_DIR / "providers.d"
diff --git a/llama_stack/distribution/utils/context.py b/llama_stack/distribution/utils/context.py
index c34079ac6..3fcd3315f 100644
--- a/llama_stack/distribution/utils/context.py
+++ b/llama_stack/distribution/utils/context.py
@@ -4,14 +4,15 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
+from collections.abc import AsyncGenerator
 from contextvars import ContextVar
-from typing import AsyncGenerator, List, TypeVar
+from typing import TypeVar
 
 T = TypeVar("T")
 
 
 def preserve_contexts_async_generator(
-    gen: AsyncGenerator[T, None], context_vars: List[ContextVar]
+    gen: AsyncGenerator[T, None], context_vars: list[ContextVar]
 ) -> AsyncGenerator[T, None]:
     """
     Wraps an async generator to preserve context variables across iterations.
diff --git a/llama_stack/distribution/utils/exec.py b/llama_stack/distribution/utils/exec.py
index 3bf3c81ce..7c2e00524 100644
--- a/llama_stack/distribution/utils/exec.py
+++ b/llama_stack/distribution/utils/exec.py
@@ -8,6 +8,7 @@ import logging
 import os
 import signal
 import subprocess
+import sys
 
 from termcolor import cprint
 
@@ -22,8 +23,10 @@ from llama_stack.distribution.utils.image_types import LlamaStackImageType
 
 def formulate_run_args(image_type, image_name, config, template_name) -> list:
     env_name = ""
-    if image_type == LlamaStackImageType.CONTAINER.value or config.container_image:
-        env_name = f"distribution-{template_name}" if template_name else config.container_image
+    if image_type == LlamaStackImageType.CONTAINER.value:
+        env_name = (
+            f"distribution-{template_name}" if template_name else (config.container_image if config else image_name)
+        )
     elif image_type == LlamaStackImageType.CONDA.value:
         current_conda_env = os.environ.get("CONDA_DEFAULT_ENV")
         env_name = image_name or current_conda_env
@@ -31,6 +34,7 @@ def formulate_run_args(image_type, image_name, config, template_name) -> list:
             cprint(
                 "No current conda environment detected, please specify a conda environment name with --image-name",
                 color="red",
+                file=sys.stderr,
             )
             return
 
@@ -47,12 +51,13 @@ def formulate_run_args(image_type, image_name, config, template_name) -> list:
                     return envpath
             return None
 
-        print(f"Using conda environment: {env_name}")
+        cprint(f"Using conda environment: {env_name}", color="green", file=sys.stderr)
         conda_prefix = get_conda_prefix(env_name)
         if not conda_prefix:
             cprint(
                 f"Conda environment {env_name} does not exist.",
                 color="red",
+                file=sys.stderr,
             )
             return
 
@@ -61,6 +66,7 @@ def formulate_run_args(image_type, image_name, config, template_name) -> list:
             cprint(
                 f"Build file {build_file} does not exist.\n\nPlease run `llama stack build` or specify the correct conda environment name with --image-name",
                 color="red",
+                file=sys.stderr,
             )
             return
     else:
@@ -71,9 +77,10 @@ def formulate_run_args(image_type, image_name, config, template_name) -> list:
             cprint(
                 "No current virtual environment detected, please specify a virtual environment name with --image-name",
                 color="red",
+                file=sys.stderr,
             )
             return
-        print(f"Using virtual environment: {env_name}")
+        cprint(f"Using virtual environment: {env_name}", file=sys.stderr)
 
     script = importlib.resources.files("llama_stack") / "distribution/start_stack.sh"
     run_args = [
diff --git a/llama_stack/distribution/utils/prompt_for_config.py b/llama_stack/distribution/utils/prompt_for_config.py
index 9b2b99022..26f6920e0 100644
--- a/llama_stack/distribution/utils/prompt_for_config.py
+++ b/llama_stack/distribution/utils/prompt_for_config.py
@@ -8,12 +8,11 @@ import inspect
 import json
 import logging
 from enum import Enum
-from typing import Any, List, Literal, Optional, Type, Union, get_args, get_origin
+from typing import Annotated, Any, Literal, Union, get_args, get_origin
 
 from pydantic import BaseModel
 from pydantic.fields import FieldInfo
 from pydantic_core import PydanticUndefinedType
-from typing_extensions import Annotated
 
 log = logging.getLogger(__name__)
 
@@ -21,7 +20,7 @@ log = logging.getLogger(__name__)
 def is_list_of_primitives(field_type):
     """Check if a field type is a List of primitive types."""
     origin = get_origin(field_type)
-    if origin is List or origin is list:
+    if origin is list or origin is list:
         args = get_args(field_type)
         if len(args) == 1 and args[0] in (int, float, str, bool):
             return True
@@ -53,7 +52,7 @@ def get_non_none_type(field_type):
     return next(arg for arg in get_args(field_type) if arg is not type(None))
 
 
-def manually_validate_field(model: Type[BaseModel], field_name: str, value: Any):
+def manually_validate_field(model: type[BaseModel], field_name: str, value: Any):
     validators = model.__pydantic_decorators__.field_validators
     for _name, validator in validators.items():
         if field_name in validator.info.fields:
@@ -126,7 +125,7 @@ def prompt_for_discriminated_union(
 #
 # doesn't support List[nested_class] yet or Dicts of any kind. needs a bunch of
 # unit tests for coverage.
-def prompt_for_config(config_type: type[BaseModel], existing_config: Optional[BaseModel] = None) -> BaseModel:
+def prompt_for_config(config_type: type[BaseModel], existing_config: BaseModel | None = None) -> BaseModel:
     """
     Recursively prompt the user for configuration values based on a Pydantic BaseModel.
 
diff --git a/llama_stack/log.py b/llama_stack/log.py
index 3835b74a1..f4184710a 100644
--- a/llama_stack/log.py
+++ b/llama_stack/log.py
@@ -6,8 +6,8 @@
 
 import logging
 import os
+import sys
 from logging.config import dictConfig
-from typing import Dict, Optional
 
 from rich.console import Console
 from rich.errors import MarkupError
@@ -33,7 +33,7 @@ CATEGORIES = [
 ]
 
 # Initialize category levels with default level
-_category_levels: Dict[str, int] = {category: DEFAULT_LOG_LEVEL for category in CATEGORIES}
+_category_levels: dict[str, int] = {category: DEFAULT_LOG_LEVEL for category in CATEGORIES}
 
 
 def config_to_category_levels(category: str, level: str):
@@ -49,7 +49,7 @@ def config_to_category_levels(category: str, level: str):
         Dict[str, int]: A dictionary mapping categories to their log levels.
     """
 
-    category_levels: Dict[str, int] = {}
+    category_levels: dict[str, int] = {}
     level_value = logging._nameToLevel.get(str(level).upper())
     if level_value is None:
         logging.warning(f"Unknown log level '{level}' for category '{category}'. Falling back to default 'INFO'.")
@@ -69,7 +69,7 @@ def config_to_category_levels(category: str, level: str):
     return category_levels
 
 
-def parse_yaml_config(yaml_config: LoggingConfig) -> Dict[str, int]:
+def parse_yaml_config(yaml_config: LoggingConfig) -> dict[str, int]:
     """
     Helper function to parse a yaml logging configuration found in the run.yaml
 
@@ -86,7 +86,7 @@ def parse_yaml_config(yaml_config: LoggingConfig) -> Dict[str, int]:
     return category_levels
 
 
-def parse_environment_config(env_config: str) -> Dict[str, int]:
+def parse_environment_config(env_config: str) -> dict[str, int]:
     """
     Parse the LLAMA_STACK_LOGGING environment variable and return a dictionary of category log levels.
 
@@ -131,7 +131,7 @@ class CustomRichHandler(RichHandler):
                 self.markup = original_markup
 
 
-def setup_logging(category_levels: Dict[str, int], log_file: str | None) -> None:
+def setup_logging(category_levels: dict[str, int], log_file: str | None) -> None:
     """
     Configure logging based on the provided category log levels and an optional log file.
 
@@ -211,7 +211,7 @@ def setup_logging(category_levels: Dict[str, int], log_file: str | None) -> None
 
 
 def get_logger(
-    name: str, category: str = "uncategorized", config: Optional[LoggingConfig] | None = None
+    name: str, category: str = "uncategorized", config: LoggingConfig | None | None = None
 ) -> logging.LoggerAdapter:
     """
     Returns a logger with the specified name and category.
@@ -235,7 +235,7 @@ def get_logger(
 
 env_config = os.environ.get("LLAMA_STACK_LOGGING", "")
 if env_config:
-    cprint(f"Environment variable LLAMA_STACK_LOGGING found: {env_config}", "yellow")
+    cprint(f"Environment variable LLAMA_STACK_LOGGING found: {env_config}", color="yellow", file=sys.stderr)
     _category_levels.update(parse_environment_config(env_config))
 
 log_file = os.environ.get("LLAMA_STACK_LOG_FILE")
diff --git a/llama_stack/models/llama/checkpoint.py b/llama_stack/models/llama/checkpoint.py
index 2bae08a69..c9e0030e3 100644
--- a/llama_stack/models/llama/checkpoint.py
+++ b/llama_stack/models/llama/checkpoint.py
@@ -7,14 +7,14 @@
 import concurrent.futures
 import re
 from pathlib import Path
-from typing import Any, Dict, List, Optional, Union
+from typing import Any
 
 import numpy as np
 import torch
 from fairscale.nn.model_parallel.initialize import get_model_parallel_rank, get_model_parallel_world_size
 
 
-def map_mp_rank(old_mp_size: int, new_mp_size: int, new_mp_rank: int) -> List[int]:
+def map_mp_rank(old_mp_size: int, new_mp_size: int, new_mp_rank: int) -> list[int]:
     """Map a new MP rank to a list of old MP ranks given a change in MP size."""
     if new_mp_size % old_mp_size == 0:
         # Read old MP shard and split it into smaller ones
@@ -31,12 +31,12 @@ def map_mp_rank(old_mp_size: int, new_mp_size: int, new_mp_rank: int) -> List[in
 
 
 def maybe_reshard_state_dict(
-    ckpt_paths: List[Path],
+    ckpt_paths: list[Path],
     n_kv_heads: int,
-    moe_num_experts: Optional[int] = None,
-    map_location: Union[str, torch.device] = "cpu",
+    moe_num_experts: int | None = None,
+    map_location: str | torch.device = "cpu",
     mmap: bool = True,
-) -> Dict[str, torch.Tensor]:
+) -> dict[str, torch.Tensor]:
     if str(map_location) == "cpu":
         torch.set_default_tensor_type(torch.BFloat16Tensor)
     else:
@@ -97,18 +97,18 @@ _MOE_WEIGHT_COLUMN_KEY = {"feed_forward.experts.moe_w_out_eF_D"}
 
 
 def reshard_mp(
-    state_dicts: List[Dict[str, torch.Tensor]],
+    state_dicts: list[dict[str, torch.Tensor]],
     size: int,
     rank: int,
     repeat_qk_qv: int = 1,
-) -> Dict[str, torch.Tensor]:
+) -> dict[str, torch.Tensor]:
     """
     Reshard a list of state dicts into a single state dict given a change in MP size.
     If the list has more than one state dict, we concatenate the values of the same
     key across all state dicts. Otherwise, we just slice it for the current MP rank.
     """
 
-    def concat_or_chunk(tensors: List[torch.Tensor], dim: int) -> torch.Tensor:
+    def concat_or_chunk(tensors: list[torch.Tensor], dim: int) -> torch.Tensor:
         if len(tensors) > 1:
             return torch.cat(tensors, dim=dim)
         return tensors[0].chunk(size, dim=dim)[rank].clone()
@@ -144,7 +144,7 @@ def reshard_mp(
     column_regex = re.compile("|".join(column_keys))
     row_regex = re.compile("|".join(row_keys))
 
-    output: Dict[str, torch.Tensor] = {}
+    output: dict[str, torch.Tensor] = {}
     with concurrent.futures.ThreadPoolExecutor() as executor:
         # Note: only processes keys in the first state dict.
         # Assumes keys are the same across all state dicts.
@@ -154,7 +154,7 @@ def reshard_mp(
     return output
 
 
-def convert_moe_weights(state_dict: Dict[str, Any], num_experts: int) -> Dict[str, Any]:
+def convert_moe_weights(state_dict: dict[str, Any], num_experts: int) -> dict[str, Any]:
     routed_keys = _MOE_WEIGHT_ROW_KEY | _MOE_WEIGHT_COLUMN_KEY
     routed_regex = re.compile("|".join(routed_keys))
     keys = list(state_dict.keys())
diff --git a/llama_stack/models/llama/datatypes.py b/llama_stack/models/llama/datatypes.py
index 48cb51005..f9f094c3d 100644
--- a/llama_stack/models/llama/datatypes.py
+++ b/llama_stack/models/llama/datatypes.py
@@ -7,10 +7,9 @@
 import base64
 from enum import Enum
 from io import BytesIO
-from typing import Any, Dict, List, Literal, Optional, Union
+from typing import Annotated, Any, Literal
 
 from pydantic import BaseModel, ConfigDict, Field, field_serializer, field_validator
-from typing_extensions import Annotated
 
 # The goal is that these set of types are relevant for all Llama models.
 # That isn't the current state yet -- e.g., BuiltinTool is somewhat specific to
@@ -31,21 +30,21 @@ class BuiltinTool(Enum):
     code_interpreter = "code_interpreter"
 
 
-Primitive = Union[str, int, float, bool, None]
-RecursiveType = Union[Primitive, List[Primitive], Dict[str, Primitive]]
+Primitive = str | int | float | bool | None
+RecursiveType = Primitive | list[Primitive] | dict[str, Primitive]
 
 
 class ToolCall(BaseModel):
     call_id: str
-    tool_name: Union[BuiltinTool, str]
+    tool_name: BuiltinTool | str
     # Plan is to deprecate the Dict in favor of a JSON string
     # that is parsed on the client side instead of trying to manage
     # the recursive type here.
     # Making this a union so that client side can start prepping for this change.
     # Eventually, we will remove both the Dict and arguments_json field,
     # and arguments will just be a str
-    arguments: Union[str, Dict[str, RecursiveType]]
-    arguments_json: Optional[str] = None
+    arguments: str | dict[str, RecursiveType]
+    arguments_json: str | None = None
 
     @field_validator("tool_name", mode="before")
     @classmethod
@@ -91,15 +90,15 @@ class StopReason(Enum):
 
 class ToolParamDefinition(BaseModel):
     param_type: str
-    description: Optional[str] = None
-    required: Optional[bool] = True
-    default: Optional[Any] = None
+    description: str | None = None
+    required: bool | None = True
+    default: Any | None = None
 
 
 class ToolDefinition(BaseModel):
-    tool_name: Union[BuiltinTool, str]
-    description: Optional[str] = None
-    parameters: Optional[Dict[str, ToolParamDefinition]] = None
+    tool_name: BuiltinTool | str
+    description: str | None = None
+    parameters: dict[str, ToolParamDefinition] | None = None
 
     @field_validator("tool_name", mode="before")
     @classmethod
@@ -119,7 +118,7 @@ class RawMediaItem(BaseModel):
     model_config = ConfigDict(arbitrary_types_allowed=True)
 
     @field_serializer("data")
-    def serialize_data(self, data: Optional[bytes], _info):
+    def serialize_data(self, data: bytes | None, _info):
         if data is None:
             return None
         return base64.b64encode(data).decode("utf-8")
@@ -137,9 +136,9 @@ class RawTextItem(BaseModel):
     text: str
 
 
-RawContentItem = Annotated[Union[RawTextItem, RawMediaItem], Field(discriminator="type")]
+RawContentItem = Annotated[RawTextItem | RawMediaItem, Field(discriminator="type")]
 
-RawContent = str | RawContentItem | List[RawContentItem]
+RawContent = str | RawContentItem | list[RawContentItem]
 
 
 class RawMessage(BaseModel):
@@ -147,17 +146,17 @@ class RawMessage(BaseModel):
     content: RawContent
 
     # This is for RAG but likely should be absorbed into content
-    context: Optional[RawContent] = None
+    context: RawContent | None = None
 
     # These are for the output message coming from the assistant
-    stop_reason: Optional[StopReason] = None
-    tool_calls: List[ToolCall] = Field(default_factory=list)
+    stop_reason: StopReason | None = None
+    tool_calls: list[ToolCall] = Field(default_factory=list)
 
 
 class GenerationResult(BaseModel):
     token: int
     text: str
-    logprobs: Optional[List[float]] = None
+    logprobs: list[float] | None = None
 
     source: Literal["input"] | Literal["output"]
 
diff --git a/llama_stack/models/llama/llama3/args.py b/llama_stack/models/llama/llama3/args.py
index f7e4b4557..4f92874f5 100644
--- a/llama_stack/models/llama/llama3/args.py
+++ b/llama_stack/models/llama/llama3/args.py
@@ -6,7 +6,6 @@
 
 from dataclasses import dataclass
 from enum import Enum
-from typing import Optional
 
 
 class QuantizationScheme(Enum):
@@ -15,8 +14,8 @@ class QuantizationScheme(Enum):
 
 @dataclass
 class QuantizationArgs:
-    scheme: Optional[QuantizationScheme] = None
-    group_size: Optional[int] = None
+    scheme: QuantizationScheme | None = None
+    group_size: int | None = None
     spinquant: bool = False
 
     def __init__(self, **kwargs):
@@ -39,10 +38,10 @@ class ModelArgs:
     dim: int = 4096
     n_layers: int = 32
     n_heads: int = 32
-    n_kv_heads: Optional[int] = None
+    n_kv_heads: int | None = None
     vocab_size: int = -1
     multiple_of: int = 256  # make SwiGLU hidden layer size multiple of large power of 2
-    ffn_dim_multiplier: Optional[float] = None
+    ffn_dim_multiplier: float | None = None
     norm_eps: float = 1e-5
     rope_theta: float = 500000
     use_scaled_rope: bool = False
@@ -55,8 +54,8 @@ class ModelArgs:
     vision_max_num_chunks: int = 4
     vision_num_cross_attention_layers: int = -1
 
-    quantization_args: Optional[QuantizationArgs] = None
-    lora_args: Optional[LoRAArgs] = None
+    quantization_args: QuantizationArgs | None = None
+    lora_args: LoRAArgs | None = None
 
     def __init__(self, **kwargs):
         for k, v in kwargs.items():
diff --git a/llama_stack/models/llama/llama3/chat_format.py b/llama_stack/models/llama/llama3/chat_format.py
index fe7a7a898..7bb05d8db 100644
--- a/llama_stack/models/llama/llama3/chat_format.py
+++ b/llama_stack/models/llama/llama3/chat_format.py
@@ -8,7 +8,6 @@ import io
 import json
 import uuid
 from dataclasses import dataclass
-from typing import Dict, List, Optional, Tuple
 
 from PIL import Image as PIL_Image
 
@@ -29,14 +28,14 @@ from .tool_utils import ToolUtils
 
 @dataclass
 class VisionInput:
-    mask: List[List[int]]
-    images: List[PIL_Image.Image]
+    mask: list[list[int]]
+    images: list[PIL_Image.Image]
 
 
 @dataclass
 class LLMInput:
-    tokens: List[int]
-    vision: Optional[VisionInput] = None
+    tokens: list[int]
+    vision: VisionInput | None = None
 
 
 def role_str(role: Role) -> str:
@@ -50,7 +49,7 @@ def role_str(role: Role) -> str:
 
 
 class ChatFormat:
-    possible_headers: Dict[Role, str]
+    possible_headers: dict[Role, str]
 
     def __init__(self, tokenizer: Tokenizer):
         self.tokenizer = tokenizer
@@ -58,7 +57,7 @@ class ChatFormat:
         self.possible_headers = {role: f"<|start_header_id|>{role_str(role)}<|end_header_id|>\n\n" for role in Role}
         self.vision_token = self.tokenizer.special_tokens["<|image|>"]
 
-    def _encode_header(self, role: str) -> List[int]:
+    def _encode_header(self, role: str) -> list[int]:
         tokens = []
         tokens.append(self.tokenizer.special_tokens["<|start_header_id|>"])
         tokens.extend(self.tokenizer.encode("ipython" if role == "tool" else role, bos=False, eos=False))
@@ -70,7 +69,7 @@ class ChatFormat:
         tokens, images = self._encode_content(content, bos=True)
         return self._model_input_from_tokens_images(tokens, images)
 
-    def _encode_content(self, content: RawContent, bos: bool = False) -> Tuple[List[int], List[PIL_Image.Image]]:
+    def _encode_content(self, content: RawContent, bos: bool = False) -> tuple[list[int], list[PIL_Image.Image]]:
         tokens = []
         images = []
 
@@ -107,7 +106,7 @@ class ChatFormat:
 
     def encode_message(
         self, message: RawMessage, tool_prompt_format: ToolPromptFormat
-    ) -> Tuple[List[int], List[PIL_Image.Image]]:
+    ) -> tuple[list[int], list[PIL_Image.Image]]:
         tokens = self._encode_header(message.role)
         images = []
 
@@ -145,8 +144,8 @@ class ChatFormat:
 
     def encode_dialog_prompt(
         self,
-        messages: List[RawMessage],
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
+        messages: list[RawMessage],
+        tool_prompt_format: ToolPromptFormat | None = None,
     ) -> LLMInput:
         tool_prompt_format = tool_prompt_format or ToolPromptFormat.json
         tokens = []
@@ -163,7 +162,7 @@ class ChatFormat:
         return self._model_input_from_tokens_images(tokens, images)
 
     # TODO(this should be generic, not only for assistant messages)
-    def decode_assistant_message(self, tokens: List[int], stop_reason: StopReason) -> RawMessage:
+    def decode_assistant_message(self, tokens: list[int], stop_reason: StopReason) -> RawMessage:
         content = self.tokenizer.decode(tokens)
 
         return self.decode_assistant_message_from_content(content, stop_reason)
@@ -234,7 +233,7 @@ class ChatFormat:
             tool_calls=tool_calls,
         )
 
-    def _model_input_from_tokens_images(self, tokens: List[int], images: List[PIL_Image.Image]) -> LLMInput:
+    def _model_input_from_tokens_images(self, tokens: list[int], images: list[PIL_Image.Image]) -> LLMInput:
         vision_input = None
         if len(images) > 0:
             vision_input = VisionInput(
@@ -249,9 +248,9 @@ class ChatFormat:
 
 
 def create_vision_mask(
-    tokens: List[int],
+    tokens: list[int],
     vision_token: int,
-) -> List[List[int]]:
+) -> list[list[int]]:
     vision_token_locations = [i for i, token in enumerate(tokens) if token == vision_token]
     if len(vision_token_locations) == 0:
         return []
diff --git a/llama_stack/models/llama/llama3/generation.py b/llama_stack/models/llama/llama3/generation.py
index 35c140707..fe7be5ea9 100644
--- a/llama_stack/models/llama/llama3/generation.py
+++ b/llama_stack/models/llama/llama3/generation.py
@@ -15,8 +15,8 @@ import json
 import os
 import sys
 import time
+from collections.abc import Callable, Generator
 from pathlib import Path
-from typing import Callable, Generator, List, Optional
 
 import torch
 import torch.nn.functional as F
@@ -41,8 +41,8 @@ class Llama3:
         ckpt_dir: str,
         max_seq_len: int,
         max_batch_size: int,
-        world_size: Optional[int] = None,
-        quantization_mode: Optional[QuantizationMode] = None,
+        world_size: int | None = None,
+        quantization_mode: QuantizationMode | None = None,
         seed: int = 1,
         device: str = "cuda",
     ):
@@ -82,7 +82,7 @@ class Llama3:
         ckpt_paths = sorted(Path(ckpt_dir).glob("*.pth"))
         assert len(ckpt_paths) > 0, f"no checkpoint files found in {ckpt_dir}"
         print(f"Loading a checkpoint (shards={len(ckpt_paths)}, current-mp-size={world_size})")
-        with open(Path(ckpt_dir) / "params.json", "r") as f:
+        with open(Path(ckpt_dir) / "params.json") as f:
             params = json.loads(f.read())
 
         model_args: ModelArgs = ModelArgs(
@@ -154,15 +154,15 @@ class Llama3:
     @torch.inference_mode()
     def generate(
         self,
-        llm_inputs: List[LLMInput],
+        llm_inputs: list[LLMInput],
         temperature: float = 0.6,
         top_p: float = 0.9,
-        max_gen_len: Optional[int] = None,
+        max_gen_len: int | None = None,
         logprobs: bool = False,
         echo: bool = False,
         print_model_input: bool = False,
-        logits_processor: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
-    ) -> Generator[List[GenerationResult], None, None]:
+        logits_processor: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] | None = None,
+    ) -> Generator[list[GenerationResult], None, None]:
         if max_gen_len is None or max_gen_len == 0 or max_gen_len >= self.args.max_seq_len:
             max_gen_len = self.args.max_seq_len - 1
         params = self.model.params
@@ -174,6 +174,7 @@ class Llama3:
                 cprint(
                     "Input to model:\n" + self.tokenizer.decode(tokens_to_print) + "\n",
                     "red",
+                    file=sys.stderr,
                 )
         prompt_tokens = [inp.tokens for inp in llm_inputs]
 
@@ -184,7 +185,11 @@ class Llama3:
         max_prompt_len = max(len(t) for t in prompt_tokens)
 
         if max_prompt_len >= params.max_seq_len:
-            cprint(f"Out of token budget {max_prompt_len} vs {params.max_seq_len}", "red")
+            cprint(
+                f"Out of token budget {max_prompt_len} vs {params.max_seq_len}",
+                color="red",
+                file=sys.stderr,
+            )
             return
 
         total_len = min(max_gen_len + max_prompt_len, params.max_seq_len)
@@ -302,13 +307,13 @@ class Llama3:
 
     def completion(
         self,
-        contents: List[RawContent],
+        contents: list[RawContent],
         temperature: float = 0.6,
         top_p: float = 0.9,
-        max_gen_len: Optional[int] = None,
+        max_gen_len: int | None = None,
         logprobs: bool = False,
         echo: bool = False,
-    ) -> Generator[List[GenerationResult], None, None]:
+    ) -> Generator[list[GenerationResult], None, None]:
         model_inputs = [self.formatter.encode_content(c) for c in contents]
         for result in self.generate(
             model_inputs=model_inputs,
@@ -324,14 +329,14 @@ class Llama3:
 
     def chat_completion(
         self,
-        messages_batch: List[List[RawMessage]],
+        messages_batch: list[list[RawMessage]],
         temperature: float = 0.6,
         top_p: float = 0.9,
-        max_gen_len: Optional[int] = None,
+        max_gen_len: int | None = None,
         logprobs: bool = False,
         tool_prompt_format: ToolPromptFormat = ToolPromptFormat.json,
         echo: bool = False,
-    ) -> Generator[List[GenerationResult], None, None]:
+    ) -> Generator[list[GenerationResult], None, None]:
         model_inputs = [self.formatter.encode_dialog_prompt(messages) for messages in messages_batch]
         for result in self.generate(
             model_inputs=model_inputs,
diff --git a/llama_stack/models/llama/llama3/interface.py b/llama_stack/models/llama/llama3/interface.py
index 8684237df..b63ba4847 100644
--- a/llama_stack/models/llama/llama3/interface.py
+++ b/llama_stack/models/llama/llama3/interface.py
@@ -12,7 +12,6 @@
 # the top-level of this source tree.
 
 from pathlib import Path
-from typing import List, Optional
 
 from termcolor import colored
 
@@ -131,7 +130,7 @@ class LLama31Interface:
         self.formatter = ChatFormat(self.tokenizer)
         self.tool_prompt_format = tool_prompt_format
 
-    def get_tokens(self, messages: List[RawMessage]) -> List[int]:
+    def get_tokens(self, messages: list[RawMessage]) -> list[int]:
         model_input = self.formatter.encode_dialog_prompt(
             messages,
             self.tool_prompt_format,
@@ -149,10 +148,10 @@ class LLama31Interface:
 
     def system_messages(
         self,
-        builtin_tools: List[BuiltinTool],
-        custom_tools: List[ToolDefinition],
-        instruction: Optional[str] = None,
-    ) -> List[RawMessage]:
+        builtin_tools: list[BuiltinTool],
+        custom_tools: list[ToolDefinition],
+        instruction: str | None = None,
+    ) -> list[RawMessage]:
         messages = []
 
         default_gen = SystemDefaultGenerator()
@@ -194,8 +193,8 @@ class LLama31Interface:
         self,
         content: str,
         stop_reason: StopReason,
-        tool_call: Optional[ToolCall] = None,
-    ) -> List[RawMessage]:
+        tool_call: ToolCall | None = None,
+    ) -> list[RawMessage]:
         tool_calls = []
         if tool_call:
             tool_calls.append(tool_call)
@@ -208,7 +207,7 @@ class LLama31Interface:
             )
         ]
 
-    def user_message(self, content: str) -> List[RawMessage]:
+    def user_message(self, content: str) -> list[RawMessage]:
         return [RawMessage(role="user", content=content)]
 
     def display_message_as_tokens(self, message: RawMessage) -> None:
@@ -228,7 +227,7 @@ class LLama31Interface:
         print("\n", end="")
 
 
-def list_jinja_templates() -> List[Template]:
+def list_jinja_templates() -> list[Template]:
     return TEMPLATES
 
 
diff --git a/llama_stack/models/llama/llama3/model.py b/llama_stack/models/llama/llama3/model.py
index 2562673e2..88f748c1d 100644
--- a/llama_stack/models/llama/llama3/model.py
+++ b/llama_stack/models/llama/llama3/model.py
@@ -5,7 +5,6 @@
 # the root directory of this source tree.
 
 import math
-from typing import Optional, Tuple
 
 import fairscale.nn.model_parallel.initialize as fs_init
 import torch
@@ -80,7 +79,7 @@ def apply_rotary_emb(
     xq: torch.Tensor,
     xk: torch.Tensor,
     freqs_cis: torch.Tensor,
-) -> Tuple[torch.Tensor, torch.Tensor]:
+) -> tuple[torch.Tensor, torch.Tensor]:
     xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
     xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
     freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
@@ -162,7 +161,7 @@ class Attention(nn.Module):
         x: torch.Tensor,
         start_pos: int,
         freqs_cis: torch.Tensor,
-        mask: Optional[torch.Tensor],
+        mask: torch.Tensor | None,
     ):
         bsz, seqlen, _ = x.shape
         xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
@@ -204,7 +203,7 @@ class FeedForward(nn.Module):
         dim: int,
         hidden_dim: int,
         multiple_of: int,
-        ffn_dim_multiplier: Optional[float],
+        ffn_dim_multiplier: float | None,
     ):
         super().__init__()
         hidden_dim = int(2 * hidden_dim / 3)
@@ -243,7 +242,7 @@ class TransformerBlock(nn.Module):
         x: torch.Tensor,
         start_pos: int,
         freqs_cis: torch.Tensor,
-        mask: Optional[torch.Tensor],
+        mask: torch.Tensor | None,
     ):
         h = x + self.attention(self.attention_norm(x), start_pos, freqs_cis, mask)
         out = h + self.feed_forward(self.ffn_norm(h))
diff --git a/llama_stack/models/llama/llama3/multimodal/image_transform.py b/llama_stack/models/llama/llama3/multimodal/image_transform.py
index c156d6d2e..f2761ee47 100644
--- a/llama_stack/models/llama/llama3/multimodal/image_transform.py
+++ b/llama_stack/models/llama/llama3/multimodal/image_transform.py
@@ -14,7 +14,7 @@
 import math
 from collections import defaultdict
 from logging import getLogger
-from typing import Any, Optional, Set, Tuple
+from typing import Any
 
 import torch
 import torchvision.transforms as tv
@@ -26,7 +26,7 @@ IMAGE_RES = 224
 logger = getLogger()
 
 
-class VariableSizeImageTransform(object):
+class VariableSizeImageTransform:
     """
     This class accepts images of any size and dynamically resize, pads and chunks it
     based on the image aspect ratio and the number of image chunks we allow.
@@ -75,7 +75,7 @@ class VariableSizeImageTransform(object):
         self.resample = tv.InterpolationMode.BILINEAR
 
     @staticmethod
-    def get_factors(n: int) -> Set[int]:
+    def get_factors(n: int) -> set[int]:
         """
         Calculate all factors of a given number, i.e. a dividor that leaves
         no remainder. For example, if n=12, it will return {1, 2, 3, 4, 6, 12}.
@@ -145,9 +145,9 @@ class VariableSizeImageTransform(object):
 
     @staticmethod
     def get_max_res_without_distortion(
-        image_size: Tuple[int, int],
-        target_size: Tuple[int, int],
-    ) -> Tuple[int, int]:
+        image_size: tuple[int, int],
+        target_size: tuple[int, int],
+    ) -> tuple[int, int]:
         """
         Determines the maximum resolution to which an image can be resized to without distorting its
         aspect ratio, based on the target resolution.
@@ -198,8 +198,8 @@ class VariableSizeImageTransform(object):
     def resize_without_distortion(
         self,
         image: torch.Tensor,
-        target_size: Tuple[int, int],
-        max_upscaling_size: Optional[int],
+        target_size: tuple[int, int],
+        max_upscaling_size: int | None,
     ) -> torch.Tensor:
         """
         Used to resize an image to target_resolution, without distortion.
@@ -261,10 +261,10 @@ class VariableSizeImageTransform(object):
 
     def get_best_fit(
         self,
-        image_size: Tuple[int, int],
+        image_size: tuple[int, int],
         possible_resolutions: torch.Tensor,
         resize_to_max_canvas: bool = False,
-    ) -> Tuple[int, int]:
+    ) -> tuple[int, int]:
         """
         Determines the best canvas possible from a list of possible resolutions to, without distortion,
         resize an image to.
@@ -364,7 +364,7 @@ class VariableSizeImageTransform(object):
         max_num_chunks: int,
         normalize_img: bool = True,
         resize_to_max_canvas: bool = False,
-    ) -> Tuple[Any, Any]:
+    ) -> tuple[Any, Any]:
         """
         Args:
             image (PIL.Image): Image to be resized.
diff --git a/llama_stack/models/llama/llama3/multimodal/model.py b/llama_stack/models/llama/llama3/multimodal/model.py
index 0cb18b948..5f1c3605c 100644
--- a/llama_stack/models/llama/llama3/multimodal/model.py
+++ b/llama_stack/models/llama/llama3/multimodal/model.py
@@ -6,8 +6,9 @@
 
 import logging
 import math
+from collections.abc import Callable
 from functools import partial
-from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+from typing import Any
 
 import fairscale.nn.model_parallel.initialize as fs_init
 import torch
@@ -104,9 +105,9 @@ class ColumnParallelConv2dPatch(torch.nn.Module):
         self,
         in_channels: int,
         out_channels: int,
-        kernel_size: Union[int, Tuple[int, int]],
-        stride: Union[int, Tuple[int, int]],
-        bias: Optional[bool] = False,
+        kernel_size: int | tuple[int, int],
+        stride: int | tuple[int, int],
+        bias: bool | None = False,
     ) -> None:
         super().__init__()
         if isinstance(kernel_size, int):
@@ -390,13 +391,13 @@ class VisionEncoder(nn.Module):
 
     def load_hook(
         self,
-        state_dict: Dict[str, Any],
+        state_dict: dict[str, Any],
         prefix: str,
-        local_metadata: Dict[str, Any],
+        local_metadata: dict[str, Any],
         strict: bool = True,
-        missing_keys: List[str] = None,
-        unexpected_keys: List[str] = None,
-        error_msgs: List[str] = None,
+        missing_keys: list[str] = None,
+        unexpected_keys: list[str] = None,
+        error_msgs: list[str] = None,
         return_state_dict: bool = False,
     ) -> None:
         orig_pos_embed = state_dict.get(prefix + "positional_embedding")
@@ -641,7 +642,7 @@ class FeedForward(nn.Module):
         dim: int,
         hidden_dim: int,
         multiple_of: int,
-        ffn_dim_multiplier: Optional[float],
+        ffn_dim_multiplier: float | None,
     ):
         """
         Initialize the FeedForward module.
@@ -983,7 +984,7 @@ class CrossAttentionTransformerBlock(torch.nn.Module):
         self,
         x: torch.Tensor,
         xattn_mask: torch.Tensor,
-        full_text_row_masked_out_mask: Tuple[torch.Tensor, torch.Tensor],
+        full_text_row_masked_out_mask: tuple[torch.Tensor, torch.Tensor],
         xattn_cache: torch.Tensor,
     ) -> torch.Tensor:
         _attn_out = self.attention(
@@ -1144,7 +1145,7 @@ class CrossAttentionTransformerText(torch.nn.Module):
     def _init_fusion_schedule(
         self,
         num_layers: int,
-    ) -> List[int]:
+    ) -> list[int]:
         llama_layers = list(range(self.n_llama_layers))
 
         # uniformly spread the layers
@@ -1231,7 +1232,7 @@ class CrossAttentionTransformerText(torch.nn.Module):
         text_dtype,
         vision_tokens,
         cross_attention_masks,
-    ) -> Tuple[Tensor, Tensor]:
+    ) -> tuple[Tensor, Tensor]:
         assert vision_tokens is not None, "Vision tokens must be provided"
         vision_seqlen = vision_tokens.shape[3]
         assert vision_tokens.shape[1] == cross_attention_masks.shape[2], (
@@ -1280,11 +1281,11 @@ class CrossAttentionTransformer(torch.nn.Module):
 
     def compute_vision_tokens_masks(
         self,
-        batch_images: List[List[PIL_Image.Image]],
-        batch_masks: List[List[List[int]]],
+        batch_images: list[list[PIL_Image.Image]],
+        batch_masks: list[list[list[int]]],
         total_len: int,
         device: torch.device,
-    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+    ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
         skip_vision_encoder = False
 
         assert len(batch_images) == len(batch_masks), "Images and masks must have the same length"
@@ -1371,11 +1372,11 @@ class CrossAttentionTransformer(torch.nn.Module):
 
 
 def _stack_images(
-    images: List[List[PIL_Image.Image]],
+    images: list[list[PIL_Image.Image]],
     max_num_chunks: int,
     image_res: int,
     max_num_images: int,
-) -> Tuple[torch.Tensor, List[int]]:
+) -> tuple[torch.Tensor, list[int]]:
     """
     Takes a list of list of images and stacks them into a tensor.
     This function is needed since images can be of completely
@@ -1400,8 +1401,8 @@ def _stack_images(
 
 
 def _pad_masks(
-    all_masks: List[List[List[int]]],
-    all_num_chunks: List[List[int]],
+    all_masks: list[list[list[int]]],
+    all_num_chunks: list[list[int]],
     total_len: int,
     max_num_chunks: int,
 ) -> torch.Tensor:
diff --git a/llama_stack/models/llama/llama3/prompt_templates/base.py b/llama_stack/models/llama/llama3/prompt_templates/base.py
index bff2a21e1..0081443be 100644
--- a/llama_stack/models/llama/llama3/prompt_templates/base.py
+++ b/llama_stack/models/llama/llama3/prompt_templates/base.py
@@ -12,7 +12,7 @@
 # the top-level of this source tree.
 
 from dataclasses import dataclass
-from typing import Any, Dict, List
+from typing import Any
 
 from jinja2 import Template
 
@@ -20,7 +20,7 @@ from jinja2 import Template
 @dataclass
 class PromptTemplate:
     template: str
-    data: Dict[str, Any]
+    data: dict[str, Any]
 
     def render(self):
         template = Template(self.template)
@@ -35,5 +35,5 @@ class PromptTemplateGeneratorBase:
     def gen(self, *args, **kwargs) -> PromptTemplate:
         raise NotImplementedError()
 
-    def data_examples(self) -> List[Any]:
+    def data_examples(self) -> list[Any]:
         raise NotImplementedError()
diff --git a/llama_stack/models/llama/llama3/prompt_templates/system_prompts.py b/llama_stack/models/llama/llama3/prompt_templates/system_prompts.py
index fbc0127fd..ab626e5af 100644
--- a/llama_stack/models/llama/llama3/prompt_templates/system_prompts.py
+++ b/llama_stack/models/llama/llama3/prompt_templates/system_prompts.py
@@ -13,7 +13,7 @@
 
 import textwrap
 from datetime import datetime
-from typing import Any, List, Optional
+from typing import Any
 
 from llama_stack.apis.inference import (
     BuiltinTool,
@@ -39,12 +39,12 @@ class SystemDefaultGenerator(PromptTemplateGeneratorBase):
             },
         )
 
-    def data_examples(self) -> List[Any]:
+    def data_examples(self) -> list[Any]:
         return [None]
 
 
 class BuiltinToolGenerator(PromptTemplateGeneratorBase):
-    def _tool_breakdown(self, tools: List[ToolDefinition]):
+    def _tool_breakdown(self, tools: list[ToolDefinition]):
         builtin_tools, custom_tools = [], []
         for dfn in tools:
             if isinstance(dfn.tool_name, BuiltinTool):
@@ -54,7 +54,7 @@ class BuiltinToolGenerator(PromptTemplateGeneratorBase):
 
         return builtin_tools, custom_tools
 
-    def gen(self, tools: List[ToolDefinition]) -> PromptTemplate:
+    def gen(self, tools: list[ToolDefinition]) -> PromptTemplate:
         builtin_tools, custom_tools = self._tool_breakdown(tools)
         template_str = textwrap.dedent(
             """
@@ -75,7 +75,7 @@ class BuiltinToolGenerator(PromptTemplateGeneratorBase):
             },
         )
 
-    def data_examples(self) -> List[List[ToolDefinition]]:
+    def data_examples(self) -> list[list[ToolDefinition]]:
         return [
             # builtin tools
             [
@@ -91,7 +91,7 @@ class BuiltinToolGenerator(PromptTemplateGeneratorBase):
 
 
 class JsonCustomToolGenerator(PromptTemplateGeneratorBase):
-    def gen(self, custom_tools: List[ToolDefinition]) -> PromptTemplate:
+    def gen(self, custom_tools: list[ToolDefinition]) -> PromptTemplate:
         template_str = textwrap.dedent(
             """
             Answer the user's question by making use of the following functions if needed.
@@ -137,7 +137,7 @@ class JsonCustomToolGenerator(PromptTemplateGeneratorBase):
             {"custom_tools": [t.model_dump() for t in custom_tools]},
         )
 
-    def data_examples(self) -> List[List[ToolDefinition]]:
+    def data_examples(self) -> list[list[ToolDefinition]]:
         return [
             [
                 ToolDefinition(
@@ -161,7 +161,7 @@ class JsonCustomToolGenerator(PromptTemplateGeneratorBase):
 
 
 class FunctionTagCustomToolGenerator(PromptTemplateGeneratorBase):
-    def gen(self, custom_tools: List[ToolDefinition]) -> PromptTemplate:
+    def gen(self, custom_tools: list[ToolDefinition]) -> PromptTemplate:
         template_str = textwrap.dedent(
             """
             You have access to the following functions:
@@ -199,7 +199,7 @@ class FunctionTagCustomToolGenerator(PromptTemplateGeneratorBase):
             {"custom_tools": [t.model_dump() for t in custom_tools]},
         )
 
-    def data_examples(self) -> List[List[ToolDefinition]]:
+    def data_examples(self) -> list[list[ToolDefinition]]:
         return [
             [
                 ToolDefinition(
@@ -238,14 +238,14 @@ class PythonListCustomToolGenerator(PromptTemplateGeneratorBase):  # noqa: N801
         """.strip("\n")
     )
 
-    def gen(self, custom_tools: List[ToolDefinition], system_prompt: Optional[str] = None) -> PromptTemplate:
+    def gen(self, custom_tools: list[ToolDefinition], system_prompt: str | None = None) -> PromptTemplate:
         system_prompt = system_prompt or self.DEFAULT_PROMPT
         return PromptTemplate(
             system_prompt,
             {"function_description": self._gen_function_description(custom_tools)},
         )
 
-    def _gen_function_description(self, custom_tools: List[ToolDefinition]) -> PromptTemplate:
+    def _gen_function_description(self, custom_tools: list[ToolDefinition]) -> str:
         template_str = textwrap.dedent(
             """
             Here is a list of functions in JSON format that you can invoke.
@@ -286,12 +286,14 @@ class PythonListCustomToolGenerator(PromptTemplateGeneratorBase):  # noqa: N801
 
             """
         )
-        return PromptTemplate(
+        template = PromptTemplate(
             template_str.strip("\n"),
             {"tools": [t.model_dump() for t in custom_tools]},
-        ).render()
+        )
+        rendered: str = template.render()
+        return rendered
 
-    def data_examples(self) -> List[List[ToolDefinition]]:
+    def data_examples(self) -> list[list[ToolDefinition]]:
         return [
             [
                 ToolDefinition(
diff --git a/llama_stack/models/llama/llama3/prompt_templates/tool_response.py b/llama_stack/models/llama/llama3/prompt_templates/tool_response.py
index 3df4dac14..4da171279 100644
--- a/llama_stack/models/llama/llama3/prompt_templates/tool_response.py
+++ b/llama_stack/models/llama/llama3/prompt_templates/tool_response.py
@@ -12,7 +12,6 @@
 # the top-level of this source tree.
 
 import textwrap
-from typing import Optional
 
 from .base import PromptTemplate, PromptTemplateGeneratorBase
 
@@ -21,8 +20,8 @@ class ToolResponseGenerator(PromptTemplateGeneratorBase):
     def gen(
         self,
         status: str,
-        stdout: Optional[str] = None,
-        stderr: Optional[str] = None,
+        stdout: str | None = None,
+        stderr: str | None = None,
     ):
         assert status in [
             "success",
diff --git a/llama_stack/models/llama/llama3/quantization/loader.py b/llama_stack/models/llama/llama3/quantization/loader.py
index 771fd02be..436cfa6fa 100644
--- a/llama_stack/models/llama/llama3/quantization/loader.py
+++ b/llama_stack/models/llama/llama3/quantization/loader.py
@@ -6,7 +6,7 @@
 
 # type: ignore
 import os
-from typing import Any, Dict, List, Optional, cast
+from typing import Any, cast
 
 import torch
 from fairscale.nn.model_parallel.initialize import get_model_parallel_rank
@@ -37,9 +37,9 @@ def swiglu_wrapper(
 def convert_to_quantized_model(
     model: Transformer | CrossAttentionTransformer,
     checkpoint_dir: str,
-    quantization_mode: Optional[str] = None,
-    fp8_activation_scale_ub: Optional[float] = 1200.0,
-    device: Optional[torch.device] = None,
+    quantization_mode: str | None = None,
+    fp8_activation_scale_ub: float | None = 1200.0,
+    device: torch.device | None = None,
 ) -> Transformer | CrossAttentionTransformer:
     if quantization_mode == QuantizationMode.fp8_mixed:
         return convert_to_fp8_quantized_model(model, checkpoint_dir, fp8_activation_scale_ub, device)
@@ -52,8 +52,8 @@ def convert_to_quantized_model(
 def convert_to_fp8_quantized_model(
     model: Transformer,
     checkpoint_dir: str,
-    fp8_activation_scale_ub: Optional[float] = 1200.0,
-    device: Optional[torch.device] = None,
+    fp8_activation_scale_ub: float | None = 1200.0,
+    device: torch.device | None = None,
 ) -> Transformer:
     # Move weights to GPU with quantization
     fp8_scales_path = os.path.join(checkpoint_dir, f"fp8_scales_{get_model_parallel_rank()}.pt")
@@ -122,8 +122,8 @@ class Int8DynActInt4WeightLinearLoRA(Int8DynActInt4WeightLinear):
         precision: torch.dtype = torch.float32,
         scales_precision: torch.dtype = torch.float32,
         # LoRA parameters
-        lora_rank: Optional[int] = None,
-        lora_scale: Optional[float] = None,
+        lora_rank: int | None = None,
+        lora_scale: float | None = None,
     ) -> None:
         super().__init__(
             in_features,
@@ -134,8 +134,8 @@ class Int8DynActInt4WeightLinearLoRA(Int8DynActInt4WeightLinear):
             precision=precision,
             scales_precision=scales_precision,
         )
-        self.lora_scale: Optional[float] = None
-        self.adaptor: Optional[nn.Sequential] = None
+        self.lora_scale: float | None = None
+        self.adaptor: nn.Sequential | None = None
         if lora_rank is not None:
             assert lora_scale is not None, "Please specify lora scale for LoRA."
             # Low-rank adaptation. See paper for more details: https://arxiv.org/abs/2106.09685
@@ -147,13 +147,13 @@ class Int8DynActInt4WeightLinearLoRA(Int8DynActInt4WeightLinear):
 
     def load_hook(
         self,
-        state_dict: Dict[str, Any],
+        state_dict: dict[str, Any],
         prefix: str,
-        local_metadata: Dict[str, Any],
+        local_metadata: dict[str, Any],
         strict: bool,
-        missing_keys: List[str],
-        unexpected_keys: List[str],
-        error_msgs: List[str],
+        missing_keys: list[str],
+        unexpected_keys: list[str],
+        error_msgs: list[str],
     ) -> None:
         """A hook to load the quantized weights from the state dict."""
         if prefix + "zeros" not in state_dict:
@@ -191,13 +191,13 @@ class Int8WeightEmbedding(torch.nn.Embedding):
 
     def load_hook(
         self,
-        state_dict: Dict[str, Any],
+        state_dict: dict[str, Any],
         prefix: str,
-        local_metadata: Dict[str, Any],
+        local_metadata: dict[str, Any],
         strict: bool,
-        missing_keys: List[str],
-        unexpected_keys: List[str],
-        error_msgs: List[str],
+        missing_keys: list[str],
+        unexpected_keys: list[str],
+        error_msgs: list[str],
     ) -> None:
         """A hook to load the quantized embedding weight and scales from the state dict."""
         weights = state_dict.pop(prefix + "weight")
@@ -221,13 +221,13 @@ class Int8WeightLinear(torch.nn.Linear):
 
     def load_hook(
         self,
-        state_dict: Dict[str, Any],
+        state_dict: dict[str, Any],
         prefix: str,
-        local_metadata: Dict[str, Any],
+        local_metadata: dict[str, Any],
         strict: bool,
-        missing_keys: List[str],
-        unexpected_keys: List[str],
-        error_msgs: List[str],
+        missing_keys: list[str],
+        unexpected_keys: list[str],
+        error_msgs: list[str],
     ) -> None:
         """A hook to load the quantized linear weight and scales from the state dict."""
         weights = state_dict.pop(prefix + "weight")
@@ -238,8 +238,8 @@ class Int8WeightLinear(torch.nn.Linear):
 def _prepare_model_int4_weight_int8_dynamic_activation(
     model: torch.nn.Module,
     group_size: int,
-    lora_rank: Optional[int],
-    lora_scale: Optional[float],
+    lora_rank: int | None,
+    lora_scale: float | None,
 ):
     """Prepare the model for int4 weight and int8 dynamic activation quantization.
 
@@ -265,7 +265,7 @@ def _prepare_model_int4_weight_int8_dynamic_activation(
             )
             del module
             setattr(model, module_name, quantized_module)
-        elif isinstance(module, (ColumnParallelLinear, RowParallelLinear, nn.Linear)):
+        elif isinstance(module, ColumnParallelLinear | RowParallelLinear | nn.Linear):
             quantized_module = Int8DynActInt4WeightLinearLoRA(
                 in_features=module.in_features,
                 out_features=module.out_features,
@@ -286,7 +286,7 @@ def _prepare_model_int4_weight_int8_dynamic_activation(
 def convert_to_int4_quantized_model(
     model: Transformer | CrossAttentionTransformer,
     checkpoint_dir: str,
-    device: Optional[torch.device] = None,
+    device: torch.device | None = None,
 ) -> Transformer | CrossAttentionTransformer:
     """Convert the model to int4 quantized model."""
     model_args = model.params
diff --git a/llama_stack/models/llama/llama3/tokenizer.py b/llama_stack/models/llama/llama3/tokenizer.py
index d3cc4fc07..e5ada3599 100644
--- a/llama_stack/models/llama/llama3/tokenizer.py
+++ b/llama_stack/models/llama/llama3/tokenizer.py
@@ -5,18 +5,11 @@
 # the root directory of this source tree.
 
 import os
+from collections.abc import Collection, Iterator, Sequence, Set
 from logging import getLogger
 from pathlib import Path
 from typing import (
-    AbstractSet,
-    Collection,
-    Dict,
-    Iterator,
-    List,
     Literal,
-    Optional,
-    Sequence,
-    Union,
     cast,
 )
 
@@ -44,7 +37,7 @@ class Tokenizer:
     Tokenizing and encoding/decoding text using the Tiktoken tokenizer.
     """
 
-    special_tokens: Dict[str, int]
+    special_tokens: dict[str, int]
 
     num_reserved_special_tokens = 256
 
@@ -116,9 +109,9 @@ class Tokenizer:
         *,
         bos: bool,
         eos: bool,
-        allowed_special: Optional[Union[Literal["all"], AbstractSet[str]]] = None,
-        disallowed_special: Union[Literal["all"], Collection[str]] = (),
-    ) -> List[int]:
+        allowed_special: Literal["all"] | Set[str] | None = None,
+        disallowed_special: Literal["all"] | Collection[str] = (),
+    ) -> list[int]:
         """
         Encodes a string into a list of token IDs.
 
@@ -151,7 +144,7 @@ class Tokenizer:
                 s[i : i + TIKTOKEN_MAX_ENCODE_CHARS], MAX_NO_WHITESPACES_CHARS
             )
         )
-        t: List[int] = []
+        t: list[int] = []
         for substr in substrs:
             t.extend(
                 self.model.encode(
@@ -177,7 +170,7 @@ class Tokenizer:
             str: The decoded string.
         """
         # Typecast is safe here. Tiktoken doesn't do anything list-related with the sequence.
-        return self.model.decode(cast(List[int], t))
+        return self.model.decode(cast(list[int], t))
 
     @staticmethod
     def _split_whitespaces_or_nonwhitespaces(s: str, max_consecutive_slice_len: int) -> Iterator[str]:
diff --git a/llama_stack/models/llama/llama3/tool_utils.py b/llama_stack/models/llama/llama3/tool_utils.py
index 91b46ec98..574080184 100644
--- a/llama_stack/models/llama/llama3/tool_utils.py
+++ b/llama_stack/models/llama/llama3/tool_utils.py
@@ -6,7 +6,6 @@
 
 import json
 import re
-from typing import Optional, Tuple
 
 from llama_stack.log import get_logger
 
@@ -172,7 +171,7 @@ class ToolUtils:
         return match is not None
 
     @staticmethod
-    def maybe_extract_builtin_tool_call(message_body: str) -> Optional[Tuple[str, str]]:
+    def maybe_extract_builtin_tool_call(message_body: str) -> tuple[str, str] | None:
         # Find the first match in the text
         match = re.search(BUILTIN_TOOL_PATTERN, message_body)
 
@@ -185,7 +184,7 @@ class ToolUtils:
             return None
 
     @staticmethod
-    def maybe_extract_custom_tool_call(message_body: str) -> Optional[Tuple[str, str]]:
+    def maybe_extract_custom_tool_call(message_body: str) -> tuple[str, str] | None:
         # NOTE: Custom function too calls are still experimental
         # Sometimes, response is of the form
         # {"type": "function", "name": "function_name", "parameters": {...}
@@ -252,7 +251,7 @@ class ToolUtils:
                 def format_value(value: RecursiveType) -> str:
                     if isinstance(value, str):
                         return f'"{value}"'
-                    elif isinstance(value, (int, float, bool)) or value is None:
+                    elif isinstance(value, int | float | bool) or value is None:
                         return str(value)
                     elif isinstance(value, list):
                         return f"[{', '.join(format_value(v) for v in value)}]"
diff --git a/llama_stack/models/llama/llama3_1/prompts.py b/llama_stack/models/llama/llama3_1/prompts.py
index 9dcc51dc8..579a5ee02 100644
--- a/llama_stack/models/llama/llama3_1/prompts.py
+++ b/llama_stack/models/llama/llama3_1/prompts.py
@@ -12,7 +12,6 @@
 # the top-level of this source tree.
 
 import textwrap
-from typing import List
 
 from llama_stack.models.llama.datatypes import (
     BuiltinTool,
@@ -73,7 +72,7 @@ def wolfram_alpha_response():
     )
 
 
-def usecases() -> List[UseCase | str]:
+def usecases() -> list[UseCase | str]:
     return [
         textwrap.dedent(
             """
diff --git a/llama_stack/models/llama/llama3_3/prompts.py b/llama_stack/models/llama/llama3_3/prompts.py
index 194e4fa26..60349e578 100644
--- a/llama_stack/models/llama/llama3_3/prompts.py
+++ b/llama_stack/models/llama/llama3_3/prompts.py
@@ -12,7 +12,6 @@
 # the top-level of this source tree.
 
 import textwrap
-from typing import List
 
 from llama_stack.models.llama.datatypes import (
     BuiltinTool,
@@ -74,7 +73,7 @@ def wolfram_alpha_response():
     )
 
 
-def usecases() -> List[UseCase | str]:
+def usecases() -> list[UseCase | str]:
     return [
         textwrap.dedent(
             """
diff --git a/llama_stack/models/llama/llama4/args.py b/llama_stack/models/llama/llama4/args.py
index dd5f7cbde..523d6ed10 100644
--- a/llama_stack/models/llama/llama4/args.py
+++ b/llama_stack/models/llama/llama4/args.py
@@ -5,7 +5,6 @@
 # the root directory of this source tree.
 
 from enum import Enum
-from typing import Optional
 
 from pydantic import BaseModel, model_validator
 
@@ -15,8 +14,8 @@ class QuantizationScheme(Enum):
 
 
 class QuantizationArgs(BaseModel):
-    scheme: Optional[QuantizationScheme] = None
-    group_size: Optional[int] = None
+    scheme: QuantizationScheme | None = None
+    group_size: int | None = None
     spinquant: bool = False
 
 
@@ -58,32 +57,32 @@ class ModelArgs(BaseModel):
     dim: int = -1
     n_layers: int = -1
     n_heads: int = -1
-    n_kv_heads: Optional[int] = None
-    head_dim: Optional[int] = None
+    n_kv_heads: int | None = None
+    head_dim: int | None = None
 
     vocab_size: int = -1
     multiple_of: int = 256  # make SwiGLU hidden layer size multiple of large power of 2
-    ffn_dim_multiplier: Optional[float] = None
-    ffn_exp: Optional[float] = None
+    ffn_dim_multiplier: float | None = None
+    ffn_exp: float | None = None
     norm_eps: float = 1e-5
 
-    attention_chunk_size: Optional[int] = None
+    attention_chunk_size: int | None = None
     rope_theta: float = 500000
     use_scaled_rope: bool = False
-    rope_scaling_factor: Optional[float] = None
-    rope_high_freq_factor: Optional[float] = None
+    rope_scaling_factor: float | None = None
+    rope_high_freq_factor: float | None = None
 
-    nope_layer_interval: Optional[int] = None  # No position encoding in every n layers
+    nope_layer_interval: int | None = None  # No position encoding in every n layers
     use_qk_norm: bool = False
     # Set to True to enable inference-time temperature tuning (useful for very long context)
     attn_temperature_tuning: bool = False
     floor_scale: float = 8192.0
     attn_scale: float = 0.1
 
-    vision_args: Optional[VisionArgs] = None
-    moe_args: Optional[MoEArgs] = None
-    quantization_args: Optional[QuantizationArgs] = None
-    lora_args: Optional[LoRAArgs] = None
+    vision_args: VisionArgs | None = None
+    moe_args: MoEArgs | None = None
+    quantization_args: QuantizationArgs | None = None
+    lora_args: LoRAArgs | None = None
 
     max_batch_size: int = 32
     max_seq_len: int = 2048
diff --git a/llama_stack/models/llama/llama4/chat_format.py b/llama_stack/models/llama/llama4/chat_format.py
index 1574eeb5e..96ebd0881 100644
--- a/llama_stack/models/llama/llama4/chat_format.py
+++ b/llama_stack/models/llama/llama4/chat_format.py
@@ -8,7 +8,6 @@ import io
 import json
 import uuid
 from dataclasses import dataclass
-from typing import Dict, List, Optional, Tuple
 
 import torch
 from PIL import Image as PIL_Image
@@ -46,10 +45,10 @@ def role_str(role: Role) -> str:
 class TransformedImage:
     image_tiles: torch.Tensor
     # is the aspect ratio needed anywhere?
-    aspect_ratio: Tuple[int, int]
+    aspect_ratio: tuple[int, int]
 
 
-def convert_image_to_rgb(image: PIL_Image.Image, bg: Tuple[int, int, int] = (255, 255, 255)) -> PIL_Image.Image:
+def convert_image_to_rgb(image: PIL_Image.Image, bg: tuple[int, int, int] = (255, 255, 255)) -> PIL_Image.Image:
     if image.mode == "RGBA":
         image.load()  # for png.split()
         new_img = PIL_Image.new("RGB", image.size, bg)
@@ -59,12 +58,12 @@ def convert_image_to_rgb(image: PIL_Image.Image, bg: Tuple[int, int, int] = (255
 
 
 class ChatFormat:
-    possible_headers: Dict[Role, str]
+    possible_headers: dict[Role, str]
 
     def __init__(
         self,
         tokenizer: Tokenizer,
-        vision_args: Optional[VisionArgs] = None,
+        vision_args: VisionArgs | None = None,
         max_num_chunks: int = 16,
     ):
         self.tokenizer = tokenizer
@@ -81,7 +80,7 @@ class ChatFormat:
                 vision_args.image_size.width, vision_args.image_size.height
             )
 
-    def _encode_header(self, role: str) -> List[int]:
+    def _encode_header(self, role: str) -> list[int]:
         tokens = []
         tokens.append(self.tokenizer.special_tokens["<|header_start|>"])
 
@@ -98,7 +97,7 @@ class ChatFormat:
     def _encode_image(
         self,
         transformed_image: TransformedImage,
-    ) -> List[int]:
+    ) -> list[int]:
         assert self.vision_args is not None, "The model is not vision-enabled"
 
         image_tensor = transformed_image.image_tiles
@@ -140,7 +139,7 @@ class ChatFormat:
 
         return tokens
 
-    def _encode_content(self, content: RawContent, bos: bool = False) -> Tuple[List[int], List[TransformedImage]]:
+    def _encode_content(self, content: RawContent, bos: bool = False) -> tuple[list[int], list[TransformedImage]]:
         tokens = []
         tranformed_images = []
 
@@ -189,7 +188,7 @@ class ChatFormat:
 
     def encode_message(
         self, message: RawMessage, tool_prompt_format: ToolPromptFormat
-    ) -> Tuple[List[int], List[TransformedImage]]:
+    ) -> tuple[list[int], list[TransformedImage]]:
         tokens = self._encode_header(message.role)
         images = []
 
@@ -223,7 +222,7 @@ class ChatFormat:
 
     def encode_dialog_prompt(
         self,
-        messages: List[RawMessage],
+        messages: list[RawMessage],
         tool_prompt_format: ToolPromptFormat = ToolPromptFormat.json,
     ) -> LLMInput:
         tokens = []
@@ -240,7 +239,7 @@ class ChatFormat:
         return self._model_input_from_tokens_images(tokens, images)
 
     # TODO(this should be generic, not only for assistant messages)
-    def decode_assistant_message(self, tokens: List[int], stop_reason: StopReason) -> RawMessage:
+    def decode_assistant_message(self, tokens: list[int], stop_reason: StopReason) -> RawMessage:
         content = self.tokenizer.decode(tokens)
 
         return self.decode_assistant_message_from_content(content, stop_reason)
@@ -312,7 +311,7 @@ class ChatFormat:
             tool_calls=tool_calls,
         )
 
-    def _model_input_from_tokens_images(self, tokens: List[int], images: List[TransformedImage]) -> LLMInput:
+    def _model_input_from_tokens_images(self, tokens: list[int], images: list[TransformedImage]) -> LLMInput:
         return LLMInput(
             tokens=tokens,
             images=[x.image_tiles for x in images] if len(images) > 0 else None,
diff --git a/llama_stack/models/llama/llama4/datatypes.py b/llama_stack/models/llama/llama4/datatypes.py
index 27174db63..24d8ae948 100644
--- a/llama_stack/models/llama/llama4/datatypes.py
+++ b/llama_stack/models/llama/llama4/datatypes.py
@@ -5,7 +5,6 @@
 # the root directory of this source tree.
 
 from dataclasses import dataclass
-from typing import List, Optional, Union
 
 import torch
 
@@ -30,7 +29,7 @@ class LLMInput:
     tokens: torch.Tensor
 
     # images are already pre-processed (resized, tiled, etc.)
-    images: Optional[List[torch.Tensor]] = None
+    images: list[torch.Tensor] | None = None
 
 
 @dataclass
@@ -45,8 +44,8 @@ class TransformerInput:
     # tokens_position defines the position of the tokens in each batch,
     # - when it is a tensor ([batch_size,]), it is the start position of the tokens in each batch
     # - when it is an int, the start position are the same for all batches
-    tokens_position: Union[torch.Tensor, int]
-    image_embedding: Optional[MaskedEmbedding] = None
+    tokens_position: torch.Tensor | int
+    image_embedding: MaskedEmbedding | None = None
 
 
 @dataclass
diff --git a/llama_stack/models/llama/llama4/ffn.py b/llama_stack/models/llama/llama4/ffn.py
index 9c9fca5fc..6584f1a2a 100644
--- a/llama_stack/models/llama/llama4/ffn.py
+++ b/llama_stack/models/llama/llama4/ffn.py
@@ -11,7 +11,7 @@
 # top-level folder for each specific model found within the models/ directory at
 # the top-level of this source tree.
 
-from typing import Any, Dict, List
+from typing import Any
 
 from fairscale.nn.model_parallel.layers import ColumnParallelLinear, RowParallelLinear
 from fairscale.nn.model_parallel.mappings import reduce_from_model_parallel_region
@@ -36,13 +36,13 @@ class FeedForward(nn.Module):
 
     def load_hook(
         self,
-        state_dict: Dict[str, Any],
+        state_dict: dict[str, Any],
         prefix: str,
-        local_metadata: Dict[str, Any],
+        local_metadata: dict[str, Any],
         strict: bool,
-        missing_keys: List[str],
-        unexpected_keys: List[str],
-        error_msgs: List[str],
+        missing_keys: list[str],
+        unexpected_keys: list[str],
+        error_msgs: list[str],
     ) -> None:
         if prefix + "mlp.fc1_weight" in state_dict:
             w1, w3 = state_dict.pop(prefix + "mlp.fc1_weight").chunk(2, dim=0)
diff --git a/llama_stack/models/llama/llama4/generation.py b/llama_stack/models/llama/llama4/generation.py
index 8e94bb33a..6132d25d4 100644
--- a/llama_stack/models/llama/llama4/generation.py
+++ b/llama_stack/models/llama/llama4/generation.py
@@ -10,8 +10,8 @@ import json
 import os
 import sys
 import time
+from collections.abc import Callable, Generator
 from pathlib import Path
-from typing import Callable, Generator, List, Optional
 
 import torch
 import torch.nn.functional as F
@@ -38,8 +38,8 @@ class Llama4:
         ckpt_dir: str,
         max_seq_len: int,
         max_batch_size: int,
-        world_size: Optional[int] = None,
-        quantization_mode: Optional[QuantizationMode] = None,
+        world_size: int | None = None,
+        quantization_mode: QuantizationMode | None = None,
         seed: int = 1,
     ):
         if not torch.distributed.is_initialized():
@@ -63,7 +63,7 @@ class Llama4:
         ckpt_paths = sorted(Path(ckpt_dir).glob("*.pth"))
         assert len(ckpt_paths) > 0, f"no checkpoint files found in {ckpt_dir}"
         print(f"Loading a checkpoint (shards={len(ckpt_paths)}, current-mp-size={world_size})")
-        with open(Path(ckpt_dir) / "params.json", "r") as f:
+        with open(Path(ckpt_dir) / "params.json") as f:
             params = json.loads(f.read())
 
         model_args: ModelArgs = ModelArgs(
@@ -117,15 +117,15 @@ class Llama4:
     @torch.inference_mode()
     def generate(
         self,
-        llm_inputs: List[LLMInput],
+        llm_inputs: list[LLMInput],
         temperature: float = 0.6,
         top_p: float = 0.9,
-        max_gen_len: Optional[int] = None,
+        max_gen_len: int | None = None,
         logprobs: bool = False,
         echo: bool = False,
         print_model_input: bool = False,
-        logits_processor: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
-    ) -> Generator[List[GenerationResult], None, None]:
+        logits_processor: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] | None = None,
+    ) -> Generator[list[GenerationResult], None, None]:
         if max_gen_len is None or max_gen_len == 0 or max_gen_len >= self.model.args.max_seq_len:
             max_gen_len = self.model.args.max_seq_len - 1
 
@@ -133,9 +133,9 @@ class Llama4:
 
         print_model_input = print_model_input or os.environ.get("LLAMA_MODELS_DEBUG", "0") == "1"
         if print_model_input:
-            cprint("Input to model:\n", "yellow")
+            cprint("Input to model:\n", color="yellow", file=sys.stderr)
             for inp in llm_inputs:
-                cprint(self.tokenizer.decode(inp.tokens), "grey")
+                cprint(self.tokenizer.decode(inp.tokens), color="grey", file=sys.stderr)
         prompt_tokens = [inp.tokens for inp in llm_inputs]
 
         bsz = len(llm_inputs)
@@ -145,7 +145,7 @@ class Llama4:
         max_prompt_len = max(len(t) for t in prompt_tokens)
 
         if max_prompt_len >= params.max_seq_len:
-            cprint(f"Out of token budget {max_prompt_len} vs {params.max_seq_len}", "red")
+            cprint(f"Out of token budget {max_prompt_len} vs {params.max_seq_len}", color="red", file=sys.stderr)
             return
 
         total_len = min(max_gen_len + max_prompt_len, params.max_seq_len)
@@ -245,13 +245,13 @@ class Llama4:
 
     def completion(
         self,
-        contents: List[RawContent],
+        contents: list[RawContent],
         temperature: float = 0.6,
         top_p: float = 0.9,
-        max_gen_len: Optional[int] = None,
+        max_gen_len: int | None = None,
         logprobs: bool = False,
         echo: bool = False,
-    ) -> Generator[List[GenerationResult], None, None]:
+    ) -> Generator[list[GenerationResult], None, None]:
         llm_inputs = [self.formatter.encode_content(c) for c in contents]
         for result in self.generate(
             llm_inputs=llm_inputs,
@@ -267,13 +267,13 @@ class Llama4:
 
     def chat_completion(
         self,
-        messages_batch: List[List[RawMessage]],
+        messages_batch: list[list[RawMessage]],
         temperature: float = 0.6,
         top_p: float = 0.9,
-        max_gen_len: Optional[int] = None,
+        max_gen_len: int | None = None,
         logprobs: bool = False,
         echo: bool = False,
-    ) -> Generator[List[GenerationResult], None, None]:
+    ) -> Generator[list[GenerationResult], None, None]:
         llm_inputs = [self.formatter.encode_dialog_prompt(messages) for messages in messages_batch]
         for result in self.generate(
             llm_inputs=llm_inputs,
diff --git a/llama_stack/models/llama/llama4/model.py b/llama_stack/models/llama/llama4/model.py
index 2272b868d..4fb1181f7 100644
--- a/llama_stack/models/llama/llama4/model.py
+++ b/llama_stack/models/llama/llama4/model.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 import math
-from typing import Any, Dict, List, Optional, Tuple
+from typing import Any
 
 import fairscale.nn.model_parallel.initialize as fs_init
 import torch
@@ -89,7 +89,7 @@ def apply_rotary_emb(
     xq: torch.Tensor,
     xk: torch.Tensor,
     freqs_cis: torch.Tensor,
-) -> Tuple[torch.Tensor, torch.Tensor]:
+) -> tuple[torch.Tensor, torch.Tensor]:
     xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
     xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
     freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
@@ -174,13 +174,13 @@ class Attention(nn.Module):
 
     def load_hook(
         self,
-        state_dict: Dict[str, Any],
+        state_dict: dict[str, Any],
         prefix: str,
-        local_metadata: Dict[str, Any],
+        local_metadata: dict[str, Any],
         strict: bool,
-        missing_keys: List[str],
-        unexpected_keys: List[str],
-        error_msgs: List[str],
+        missing_keys: list[str],
+        unexpected_keys: list[str],
+        error_msgs: list[str],
     ) -> None:
         if prefix + "wqkv.weight" in state_dict:
             wqkv = state_dict.pop(prefix + "wqkv.weight")
@@ -200,7 +200,7 @@ class Attention(nn.Module):
         x: torch.Tensor,
         start_pos: int,
         freqs_cis: torch.Tensor,
-        mask: Optional[torch.Tensor] = None,
+        mask: torch.Tensor | None = None,
     ):
         bsz, seqlen, _ = x.shape
         xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
@@ -288,13 +288,13 @@ class TransformerBlock(nn.Module):
 
     def load_hook(
         self,
-        state_dict: Dict[str, Any],
+        state_dict: dict[str, Any],
         prefix: str,
-        local_metadata: Dict[str, Any],
+        local_metadata: dict[str, Any],
         strict: bool,
-        missing_keys: List[str],
-        unexpected_keys: List[str],
-        error_msgs: List[str],
+        missing_keys: list[str],
+        unexpected_keys: list[str],
+        error_msgs: list[str],
     ) -> None:
         if prefix + "attention.wqkv.layer_norm_weight" in state_dict:
             state_dict[prefix + "attention_norm.weight"] = state_dict.pop(prefix + "attention.wqkv.layer_norm_weight")
@@ -318,8 +318,8 @@ class TransformerBlock(nn.Module):
         x: torch.Tensor,
         start_pos: int,
         freqs_cis: torch.Tensor,
-        global_attn_mask: Optional[torch.Tensor],
-        local_attn_mask: Optional[torch.Tensor],
+        global_attn_mask: torch.Tensor | None,
+        local_attn_mask: torch.Tensor | None,
     ):
         # The iRoPE architecture uses global attention mask for NoPE layers or
         # if chunked local attention is not used
@@ -374,13 +374,13 @@ class Transformer(nn.Module):
 
     def load_hook(
         self,
-        state_dict: Dict[str, Any],
+        state_dict: dict[str, Any],
         prefix: str,
-        local_metadata: Dict[str, Any],
+        local_metadata: dict[str, Any],
         strict: bool,
-        missing_keys: List[str],
-        unexpected_keys: List[str],
-        error_msgs: List[str],
+        missing_keys: list[str],
+        unexpected_keys: list[str],
+        error_msgs: list[str],
     ) -> None:
         if prefix + "rope.freqs" in state_dict:
             state_dict.pop(prefix + "rope.freqs")
diff --git a/llama_stack/models/llama/llama4/moe.py b/llama_stack/models/llama/llama4/moe.py
index 2ce49e915..7475963d3 100644
--- a/llama_stack/models/llama/llama4/moe.py
+++ b/llama_stack/models/llama/llama4/moe.py
@@ -6,7 +6,7 @@
 
 # ruff: noqa: N806
 # pyre-strict
-from typing import Any, Dict, List
+from typing import Any
 
 import fairscale.nn.model_parallel.initialize as fs_init
 import torch
@@ -63,13 +63,13 @@ class Experts(nn.Module):
 
     def load_hook(
         self,
-        state_dict: Dict[str, Any],
+        state_dict: dict[str, Any],
         prefix: str,
-        local_metadata: Dict[str, Any],
+        local_metadata: dict[str, Any],
         strict: bool,
-        missing_keys: List[str],
-        unexpected_keys: List[str],
-        error_msgs: List[str],
+        missing_keys: list[str],
+        unexpected_keys: list[str],
+        error_msgs: list[str],
     ) -> None:
         self.prefix = prefix
         if prefix + "moe_w_in_eD_F" in state_dict:
@@ -158,13 +158,13 @@ class MoE(torch.nn.Module):
 
     def load_hook(
         self,
-        state_dict: Dict[str, Any],
+        state_dict: dict[str, Any],
         prefix: str,
-        local_metadata: Dict[str, Any],
+        local_metadata: dict[str, Any],
         strict: bool,
-        missing_keys: List[str],
-        unexpected_keys: List[str],
-        error_msgs: List[str],
+        missing_keys: list[str],
+        unexpected_keys: list[str],
+        error_msgs: list[str],
     ) -> None:
         if prefix + "w_in_shared_FD.weight" in state_dict:
             state_dict[prefix + "shared_expert.w1.weight"] = state_dict.pop(prefix + "w_in_shared_FD.weight")
@@ -210,5 +210,5 @@ class MoE(torch.nn.Module):
 
 
 def divide_exact(numerator: int, denominator: int) -> int:
-    assert numerator % denominator == 0, "{} is not divisible by {}".format(numerator, denominator)
+    assert numerator % denominator == 0, f"{numerator} is not divisible by {denominator}"
     return numerator // denominator
diff --git a/llama_stack/models/llama/llama4/preprocess.py b/llama_stack/models/llama/llama4/preprocess.py
index 689680779..7527a9987 100644
--- a/llama_stack/models/llama/llama4/preprocess.py
+++ b/llama_stack/models/llama/llama4/preprocess.py
@@ -13,7 +13,6 @@
 
 import math
 from collections import defaultdict
-from typing import Optional, Set, Tuple
 
 import torch
 import torchvision.transforms as tv
@@ -52,7 +51,7 @@ class ResizeNormalizeImageTransform:
         return self.tv_transform(image)
 
 
-class VariableSizeImageTransform(object):
+class VariableSizeImageTransform:
     """
     This class accepts images of any size and dynamically resize, pads and chunks it
     based on the image aspect ratio and the number of image chunks we allow.
@@ -100,7 +99,7 @@ class VariableSizeImageTransform(object):
         self.resample = tv.InterpolationMode.BILINEAR
 
     @staticmethod
-    def get_factors(n: int) -> Set[int]:
+    def get_factors(n: int) -> set[int]:
         """
         Calculate all factors of a given number, i.e. a dividor that leaves
         no remainder. For example, if n=12, it will return {1, 2, 3, 4, 6, 12}.
@@ -170,9 +169,9 @@ class VariableSizeImageTransform(object):
 
     @staticmethod
     def get_max_res_without_distortion(
-        image_size: Tuple[int, int],
-        target_size: Tuple[int, int],
-    ) -> Tuple[int, int]:
+        image_size: tuple[int, int],
+        target_size: tuple[int, int],
+    ) -> tuple[int, int]:
         """
         Determines the maximum resolution to which an image can be resized to without distorting its
         aspect ratio, based on the target resolution.
@@ -223,8 +222,8 @@ class VariableSizeImageTransform(object):
     def resize_without_distortion(
         self,
         image: torch.Tensor,
-        target_size: Tuple[int, int],
-        max_upscaling_size: Optional[int],
+        target_size: tuple[int, int],
+        max_upscaling_size: int | None,
     ) -> torch.Tensor:
         """
         Used to resize an image to target_resolution, without distortion.
@@ -289,10 +288,10 @@ class VariableSizeImageTransform(object):
 
     def get_best_fit(
         self,
-        image_size: Tuple[int, int],
+        image_size: tuple[int, int],
         possible_resolutions: torch.Tensor,
         resize_to_max_canvas: bool = False,
-    ) -> Tuple[int, int]:
+    ) -> tuple[int, int]:
         """
         Determines the best canvas possible from a list of possible resolutions to, without distortion,
         resize an image to.
@@ -392,7 +391,7 @@ class VariableSizeImageTransform(object):
         max_num_chunks: int,
         normalize_img: bool = True,
         resize_to_max_canvas: bool = False,
-    ) -> Tuple[torch.Tensor, Tuple[int, int]]:
+    ) -> tuple[torch.Tensor, tuple[int, int]]:
         """
         Args:
             image (PIL.Image): Image to be resized.
diff --git a/llama_stack/models/llama/llama4/prompt_format.md b/llama_stack/models/llama/llama4/prompt_format.md
index 698571093..7ae998310 100644
--- a/llama_stack/models/llama/llama4/prompt_format.md
+++ b/llama_stack/models/llama/llama4/prompt_format.md
@@ -64,7 +64,7 @@ This example passes an image that is smaller than the tile size, to show the til
 
 ##### Model Response Format
 ```
-The image depicts a dog standing on a skateboard, with its front paws positioned on the board and its back paws hanging off the back. The dog has a distinctive coat pattern, featuring a white face, brown and black fur, and white paws, and is standing on a skateboard with red wheels, set against a blurred background of a street or alleyway with a teal door and beige wall.<|eot|>
+The image depicts a dog standing on a skateboard, positioned centrally and facing the camera directly. The dog has a distinctive coat pattern featuring white, black, and brown fur, with floppy ears and a black nose, and is standing on a skateboard with red wheels.<|eot|>
 ```
 
 
@@ -91,7 +91,7 @@ Here is an example of how to pass an image to the model
 
 ##### Model Response Format
 ```
-This image shows a dog standing on a skateboard, with its front paws positioned near the front of the board and its back paws near the back. The dog has a white, black, and orange coat, and is standing on a gray skateboard with red wheels, in front of a blurred background that appears to be a street or alleyway.<|eot|>
+The image depicts a dog standing on a skateboard, with the dog positioned centrally and facing forward. The dog has a distinctive coat featuring a mix of white, brown, and black fur, and is wearing a collar as it stands on the skateboard, which has red wheels.<|eot|>
 ```
 
 
@@ -117,7 +117,7 @@ Here is an example of how to pass an image to the model
 
 ##### Model Response Format
 ```
-The first image shows a dog standing on a skateboard, while the second image shows a plate of spaghetti with tomato sauce, parmesan cheese, and parsley. The two images are unrelated, with the first image featuring a dog and the second image featuring a food dish, and they do not share any common elements or themes.<|eot|>
+The first image features a dog standing on a skateboard, while the second image showcases a plate of spaghetti with tomato sauce and cheese. The two images appear to be unrelated, with one depicting a playful scene of a dog on a skateboard and the other presenting a classic Italian dish.<|eom|>
 ```
 
 
@@ -135,25 +135,52 @@ We are continuing the format for zero shot function calling used in previous ver
 ```
 <|begin_of_text|><|header_start|>system<|header_end|>
 
-You are an expert in composing functions. You are given a question and a set of possible functions.
-Based on the question, you will need to make one or more function/tool calls to achieve the purpose.
-If none of the function can be used, point it out. If the given question lacks the parameters required by the function,
-also point it out. You should only return the function call in tools call sections.
+You are a helpful assistant and an expert in function composition. You can answer general questions using your internal knowledge OR invoke functions when necessary. Follow these strict guidelines:
 
-If you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]
-You SHOULD NOT include any other text in the response.
+1. FUNCTION CALLS:
+- ONLY use functions that are EXPLICITLY listed in the function list below
+- If NO functions are listed (empty function list []), respond ONLY with internal knowledge or "I don't have access to [Unavailable service] information"
+- If a function is not in the list, respond ONLY with internal knowledge or "I don't have access to [Unavailable service] information"
+- If ALL required parameters are present AND the query EXACTLY matches a listed function's purpose: output ONLY the function call(s)
+- Use exact format: [func_name1(param1=value1, param2=value2), func_name2(...)]
+Examples:
+CORRECT: [get_weather(location="Vancouver"), calculate_route(start="Boston", end="New York")] <- Only if get_weather and calculate_route are in function list
+INCORRECT: get_weather(location="New York")
+INCORRECT: Let me check the weather: [get_weather(location="New York")]
+INCORRECT: [get_events(location="Singapore")] <- If function not in list
 
-Here is a list of functions in JSON format that you can invoke.
+2. RESPONSE RULES:
+- For pure function requests matching a listed function: ONLY output the function call(s)
+- For knowledge questions: ONLY output text
+- For missing parameters: ONLY request the specific missing parameters
+- For unavailable services (not in function list): output ONLY with internal knowledge or "I don't have access to [Unavailable service] information". Do NOT execute a function call.
+- If the query asks for information beyond what a listed function provides: output ONLY with internal knowledge about your limitations
+- NEVER combine text and function calls in the same response
+- NEVER suggest alternative functions when the requested service is unavailable
+- NEVER create or invent new functions not listed below
 
+3. STRICT BOUNDARIES:
+- ONLY use functions from the list below - no exceptions
+- NEVER use a function as an alternative to unavailable information
+- NEVER call functions not present in the function list
+- NEVER add explanatory text to function calls
+- NEVER respond with empty brackets
+- Use proper Python/JSON syntax for function calls
+- Check the function list carefully before responding
+
+4. TOOL RESPONSE HANDLING:
+- When receiving tool responses: provide concise, natural language responses
+- Don't repeat tool response verbatim
+- Don't add supplementary information
+
+Here is a list of functions in JSON format that you can invoke:
 [
     {
         "name": "get_weather",
         "description": "Get weather info for places",
         "parameters": {
             "type": "dict",
-            "required": [
-                "city"
-            ],
+            "required": ["city"],
             "properties": {
                 "city": {
                     "type": "string",
@@ -167,7 +194,7 @@ Here is a list of functions in JSON format that you can invoke.
             }
         }
     }
-<|eot|><|header_start|>user<|header_end|>
+]<|eot|><|header_start|>user<|header_end|>
 
 What is the weather in SF and Seattle?<|eot|><|header_start|>assistant<|header_end|>
 
@@ -176,7 +203,7 @@ What is the weather in SF and Seattle?<|eot|><|header_start|>assistant<|header_e
 
 ##### Model Response Format
 ```
-[get_weather(city='SF'), get_weather(city='Seattle')]<|eot|>
+[get_weather(city="San Francisco"), get_weather(city="Seattle")]<|eot|>
 ```
 
 
@@ -273,5 +300,5 @@ Use tools to get latest trending songs<|eot|><|header_start|>assistant<|header_e
 
 ##### Model Response Format
 ```
-{"n": "10"}<|eot|>
+{"n": 10}<|eot|>
 ```
diff --git a/llama_stack/models/llama/llama4/prompt_templates/system_prompts.py b/llama_stack/models/llama/llama4/prompt_templates/system_prompts.py
index 139e204ad..9c19f89ae 100644
--- a/llama_stack/models/llama/llama4/prompt_templates/system_prompts.py
+++ b/llama_stack/models/llama/llama4/prompt_templates/system_prompts.py
@@ -12,7 +12,6 @@
 # the top-level of this source tree.
 
 import textwrap
-from typing import List, Optional
 
 from llama_stack.apis.inference import ToolDefinition, ToolParamDefinition
 from llama_stack.models.llama.llama3.prompt_templates.base import (
@@ -62,23 +61,21 @@ class PythonListCustomToolGenerator(PromptTemplateGeneratorBase):  # noqa: N801
         - Don't repeat tool response verbatim
         - Don't add supplementary information
 
-
         {{ function_description }}
         """.strip("\n")
     )
 
-    def gen(self, custom_tools: List[ToolDefinition], system_prompt: Optional[str] = None) -> PromptTemplate:
+    def gen(self, custom_tools: list[ToolDefinition], system_prompt: str | None = None) -> PromptTemplate:
         system_prompt = system_prompt or self.DEFAULT_PROMPT
         return PromptTemplate(
             system_prompt,
             {"function_description": self._gen_function_description(custom_tools)},
         )
 
-    def _gen_function_description(self, custom_tools: List[ToolDefinition]) -> PromptTemplate:
+    def _gen_function_description(self, custom_tools: list[ToolDefinition]) -> PromptTemplate:
         template_str = textwrap.dedent(
             """
-            Here is a list of functions in JSON format that you can invoke.
-
+            Here is a list of functions in JSON format that you can invoke:
             [
                 {% for t in tools -%}
                 {# manually setting up JSON because jinja sorts keys in unexpected ways -#}
@@ -109,10 +106,6 @@ class PythonListCustomToolGenerator(PromptTemplateGeneratorBase):  # noqa: N801
                 {% endif -%}
                 {%- endfor %}
             ]
-
-            You can answer general questions or invoke tools when necessary.
-            In addition to tool calls, you should also augment your responses by using the tool outputs.
-
             """
         )
         return PromptTemplate(
@@ -120,7 +113,7 @@ class PythonListCustomToolGenerator(PromptTemplateGeneratorBase):  # noqa: N801
             {"tools": [t.model_dump() for t in custom_tools]},
         ).render()
 
-    def data_examples(self) -> List[List[ToolDefinition]]:
+    def data_examples(self) -> list[list[ToolDefinition]]:
         return [
             [
                 ToolDefinition(
diff --git a/llama_stack/models/llama/llama4/prompts.py b/llama_stack/models/llama/llama4/prompts.py
index 13b96359a..2da94db7b 100644
--- a/llama_stack/models/llama/llama4/prompts.py
+++ b/llama_stack/models/llama/llama4/prompts.py
@@ -7,7 +7,10 @@
 import textwrap
 from io import BytesIO
 from pathlib import Path
-from typing import List
+
+from llama_stack.models.llama.llama4.prompt_templates.system_prompts import (
+    PythonListCustomToolGenerator,
+)
 
 from ..datatypes import RawMediaItem, RawMessage, RawTextItem
 from ..prompt_format import (
@@ -19,7 +22,7 @@ from ..prompt_format import (
 THIS_DIR = Path(__file__).parent
 
 
-def usecases(base_model: bool = False) -> List[UseCase | str]:
+def usecases(base_model: bool = False) -> list[UseCase | str]:
     with open(THIS_DIR.parent / "resources/small_dog.jpg", "rb") as f:
         img_small_dog = f.read()
     with open(THIS_DIR.parent / "resources/dog.jpg", "rb") as f:
@@ -177,39 +180,9 @@ def usecases(base_model: bool = False) -> List[UseCase | str]:
                     [
                         RawMessage(
                             role="system",
-                            content="""You are an expert in composing functions. You are given a question and a set of possible functions.
-Based on the question, you will need to make one or more function/tool calls to achieve the purpose.
-If none of the function can be used, point it out. If the given question lacks the parameters required by the function,
-also point it out. You should only return the function call in tools call sections.
-
-If you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]
-You SHOULD NOT include any other text in the response.
-
-Here is a list of functions in JSON format that you can invoke.
-
-[
-    {
-        "name": "get_weather",
-        "description": "Get weather info for places",
-        "parameters": {
-            "type": "dict",
-            "required": [
-                "city"
-            ],
-            "properties": {
-                "city": {
-                    "type": "string",
-                    "description": "The name of the city to get the weather for"
-                },
-                "metric": {
-                    "type": "string",
-                    "description": "The metric for weather. Options are: celsius, fahrenheit",
-                    "default": "celsius"
-                }
-            }
-        }
-    }
-""",
+                            content=PythonListCustomToolGenerator()
+                            .gen(PythonListCustomToolGenerator().data_examples()[0])
+                            .render(),
                         ),
                         RawMessage(
                             role="user",
diff --git a/llama_stack/models/llama/llama4/quantization/loader.py b/llama_stack/models/llama/llama4/quantization/loader.py
index f11d83c60..223744a5f 100644
--- a/llama_stack/models/llama/llama4/quantization/loader.py
+++ b/llama_stack/models/llama/llama4/quantization/loader.py
@@ -6,7 +6,7 @@
 
 import logging
 import os
-from typing import Callable, Optional
+from collections.abc import Callable
 
 import torch
 from fairscale.nn.model_parallel.initialize import get_model_parallel_rank
@@ -45,8 +45,8 @@ def experts_batched_swiglu_wrapper(
 def convert_to_quantized_model(
     model: Transformer,
     checkpoint_dir: str,
-    quantization_mode: Optional[str] = None,
-    fp8_activation_scale_ub: Optional[float] = 1200.0,
+    quantization_mode: str | None = None,
+    fp8_activation_scale_ub: float | None = 1200.0,
     use_rich_progress: bool = True,
 ) -> Transformer:
     from ...quantize_impls import (
@@ -213,7 +213,7 @@ def logging_callbacks(
         )
         task_id = progress.add_task("[blue]Converting layers...", total=total_blocks, status="Starting")
 
-    def update_status(message: Optional[str], completed: Optional[int] = None) -> None:
+    def update_status(message: str | None, completed: int | None = None) -> None:
         if use_rich_progress:
             if message is not None:
                 progress.update(task_id, status=message)
diff --git a/llama_stack/models/llama/llama4/tokenizer.model b/llama_stack/models/llama/llama4/tokenizer.model
old mode 100755
new mode 100644
diff --git a/llama_stack/models/llama/llama4/tokenizer.py b/llama_stack/models/llama/llama4/tokenizer.py
index 0d2cc7ce5..74070d43e 100644
--- a/llama_stack/models/llama/llama4/tokenizer.py
+++ b/llama_stack/models/llama/llama4/tokenizer.py
@@ -5,18 +5,11 @@
 # the root directory of this source tree.
 
 import os
+from collections.abc import Collection, Iterator, Sequence, Set
 from logging import getLogger
 from pathlib import Path
 from typing import (
-    AbstractSet,
-    Collection,
-    Dict,
-    Iterator,
-    List,
     Literal,
-    Optional,
-    Sequence,
-    Union,
     cast,
 )
 
@@ -114,7 +107,7 @@ class Tokenizer:
     Tokenizing and encoding/decoding text using the Tiktoken tokenizer.
     """
 
-    special_tokens: Dict[str, int]
+    special_tokens: dict[str, int]
 
     num_reserved_special_tokens = 2048
 
@@ -182,9 +175,9 @@ class Tokenizer:
         *,
         bos: bool,
         eos: bool,
-        allowed_special: Optional[Union[Literal["all"], AbstractSet[str]]] = None,
-        disallowed_special: Union[Literal["all"], Collection[str]] = (),
-    ) -> List[int]:
+        allowed_special: Literal["all"] | Set[str] | None = None,
+        disallowed_special: Literal["all"] | Collection[str] = (),
+    ) -> list[int]:
         """
         Encodes a string into a list of token IDs.
 
@@ -217,7 +210,7 @@ class Tokenizer:
                 s[i : i + TIKTOKEN_MAX_ENCODE_CHARS], MAX_NO_WHITESPACES_CHARS
             )
         )
-        t: List[int] = []
+        t: list[int] = []
         for substr in substrs:
             t.extend(
                 self.model.encode(
@@ -243,7 +236,7 @@ class Tokenizer:
             str: The decoded string.
         """
         # Typecast is safe here. Tiktoken doesn't do anything list-related with the sequence.
-        return self.model.decode(cast(List[int], t))
+        return self.model.decode(cast(list[int], t))
 
     @staticmethod
     def _split_whitespaces_or_nonwhitespaces(s: str, max_consecutive_slice_len: int) -> Iterator[str]:
diff --git a/llama_stack/models/llama/llama4/vision/embedding.py b/llama_stack/models/llama/llama4/vision/embedding.py
index ed7659a73..c7dd81965 100644
--- a/llama_stack/models/llama/llama4/vision/embedding.py
+++ b/llama_stack/models/llama/llama4/vision/embedding.py
@@ -5,7 +5,8 @@
 # the root directory of this source tree.
 
 import math
-from typing import Any, Callable, Dict, List
+from collections.abc import Callable
+from typing import Any
 
 import torch
 import torch.nn as nn
@@ -136,13 +137,13 @@ class VisionEmbeddings(torch.nn.Module):
 
     def load_hook(
         self,
-        state_dict: Dict[str, Any],
+        state_dict: dict[str, Any],
         prefix: str,
-        local_metadata: Dict[str, Any],
+        local_metadata: dict[str, Any],
         strict: bool = True,
-        missing_keys: List[str] = None,
-        unexpected_keys: List[str] = None,
-        error_msgs: List[str] = None,
+        missing_keys: list[str] = None,
+        unexpected_keys: list[str] = None,
+        error_msgs: list[str] = None,
         return_state_dict: bool = False,
     ) -> None:
         original_sd = self.state_dict()
@@ -163,7 +164,7 @@ class VisionEmbeddings(torch.nn.Module):
     # each image is a tensor of shape [num_tiles, C, H, W]
     def forward(
         self,
-        image_batch: List[List[torch.Tensor]],
+        image_batch: list[list[torch.Tensor]],
         image_mask: torch.Tensor,
         h_ref: torch.Tensor,
     ) -> torch.Tensor:
diff --git a/llama_stack/models/llama/llama4/vision/encoder.py b/llama_stack/models/llama/llama4/vision/encoder.py
index 4baf03d8d..4b66f1411 100644
--- a/llama_stack/models/llama/llama4/vision/encoder.py
+++ b/llama_stack/models/llama/llama4/vision/encoder.py
@@ -4,7 +4,8 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+from collections.abc import Callable
+from typing import Any
 
 import fairscale.nn.model_parallel.initialize as fs_init
 import torch
@@ -42,9 +43,9 @@ class ColumnParallelConv2dPatch(torch.nn.Module):
         self,
         in_channels: int,
         out_channels: int,
-        kernel_size: Union[int, Tuple[int, int]],
-        stride: Union[int, Tuple[int, int]],
-        bias: Optional[bool] = False,
+        kernel_size: int | tuple[int, int],
+        stride: int | tuple[int, int],
+        bias: bool | None = False,
     ) -> None:
         super().__init__()
         if isinstance(kernel_size, int):
@@ -134,15 +135,15 @@ class _TransformerBlock(nn.Module):
     def attention(
         self,
         x: torch.Tensor,
-        freq_cis: Optional[torch.Tensor] = None,
+        freq_cis: torch.Tensor | None = None,
     ):
         return self.attn(x=x, start_pos=0, freqs_cis=freq_cis)
 
     def forward(
         self,
         x: torch.Tensor,
-        mask: Optional[torch.Tensor] = None,
-        freq_cis: Optional[torch.Tensor] = None,
+        mask: torch.Tensor | None = None,
+        freq_cis: torch.Tensor | None = None,
     ):
         _gate_attn = 1 if not self.gated else self.gate_attn.tanh()
         _gate_ffn = 1 if not self.gated else self.gate_ffn.tanh()
@@ -210,8 +211,8 @@ class PackingIndex:
 class VisionEncoder(nn.Module):
     def __init__(
         self,
-        image_size: Tuple[int, int],
-        patch_size: Tuple[int, int],
+        image_size: tuple[int, int],
+        patch_size: tuple[int, int],
         dim: int,
         layers: int,
         heads: int,
@@ -299,13 +300,13 @@ class VisionEncoder(nn.Module):
 
     def load_hook(
         self,
-        state_dict: Dict[str, Any],
+        state_dict: dict[str, Any],
         prefix: str,
-        local_metadata: Dict[str, Any],
+        local_metadata: dict[str, Any],
         strict: bool = True,
-        missing_keys: List[str] = None,
-        unexpected_keys: List[str] = None,
-        error_msgs: List[str] = None,
+        missing_keys: list[str] = None,
+        unexpected_keys: list[str] = None,
+        error_msgs: list[str] = None,
         return_state_dict: bool = False,
     ) -> None:
         orig_pos_embed = state_dict.get(prefix + "positional_embedding")
diff --git a/llama_stack/models/llama/prompt_format.py b/llama_stack/models/llama/prompt_format.py
index edb34620c..6191df61a 100644
--- a/llama_stack/models/llama/prompt_format.py
+++ b/llama_stack/models/llama/prompt_format.py
@@ -14,7 +14,6 @@
 import json
 import textwrap
 from pathlib import Path
-from typing import List
 
 from pydantic import BaseModel, Field
 
@@ -44,7 +43,7 @@ class TextCompletionContent(BaseModel):
 class UseCase(BaseModel):
     title: str = ""
     description: str = ""
-    dialogs: List[List[RawMessage] | TextCompletionContent | str] = Field(default_factory=list)
+    dialogs: list[list[RawMessage] | TextCompletionContent | str] = Field(default_factory=list)
     notes: str = ""
     tool_prompt_format: ToolPromptFormat = ToolPromptFormat.json
     max_gen_len: int = 512
diff --git a/llama_stack/models/llama/quantize_impls.py b/llama_stack/models/llama/quantize_impls.py
index a5da01588..a6400c5c9 100644
--- a/llama_stack/models/llama/quantize_impls.py
+++ b/llama_stack/models/llama/quantize_impls.py
@@ -7,7 +7,6 @@
 # type: ignore
 import collections
 import logging
-from typing import Optional, Tuple, Type, Union
 
 log = logging.getLogger(__name__)
 
@@ -27,7 +26,7 @@ class Fp8ScaledWeights:
     # TODO: Ugly trick so torch allows us to replace parameters
     # with our custom Fp8Weights instance. Do this properly.
     @property
-    def __class__(self) -> Type[nn.parameter.Parameter]:
+    def __class__(self) -> type[nn.parameter.Parameter]:
         return nn.Parameter
 
     @property
@@ -51,7 +50,7 @@ class Int4ScaledWeights:
     # TODO: Ugly trick so torch allows us to replace parameters
     # with our custom Int4Weights instance. Do this properly.
     @property
-    def __class__(self) -> Type[nn.parameter.Parameter]:
+    def __class__(self) -> type[nn.parameter.Parameter]:
         return nn.Parameter
 
     @property
@@ -74,7 +73,7 @@ class Int4Weights(
 def int4_row_quantize(
     x: torch.Tensor,
     group_size: int = 128,
-) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
     n_bit = 4  # Number of target bits.
     to_quant = x.reshape(-1, group_size).to(torch.float)
 
@@ -115,8 +114,8 @@ def pack_int4(x: torch.Tensor) -> torch.Tensor:
 
 def bmm_nt(
     x: Tensor,
-    w: Union[Fp8RowwiseWeights, Int4Weights],
-    num_tokens: Optional[Tensor] = None,
+    w: Fp8RowwiseWeights | Int4Weights,
+    num_tokens: Tensor | None = None,
 ) -> Tensor:
     if isinstance(w, Fp8ScaledWeights):
         xq, x_scale = torch.ops.fbgemm.quantize_fp8_per_row(x, num_tokens, w.activation_scale_ub)
@@ -129,10 +128,10 @@ def bmm_nt(
 
 def ffn_swiglu(
     x: Tensor,
-    w1: Union[Fp8RowwiseWeights, Int4Weights],
-    w3: Union[Fp8RowwiseWeights, Int4Weights],
-    w2: Union[Fp8RowwiseWeights, Int4Weights],
-    num_tokens: Optional[Tensor] = None,
+    w1: Fp8RowwiseWeights | Int4Weights,
+    w3: Fp8RowwiseWeights | Int4Weights,
+    w2: Fp8RowwiseWeights | Int4Weights,
+    num_tokens: Tensor | None = None,
     is_memory_bounded: bool = False,
 ) -> Tensor:
     if (isinstance(w1, Fp8ScaledWeights) and isinstance(w3, Fp8ScaledWeights) and isinstance(w2, Fp8ScaledWeights)) or (
@@ -158,7 +157,7 @@ def ffn_swiglu(
 def quantize_fp8(
     w: Tensor,
     fp8_activation_scale_ub: float,
-    output_device: Optional[torch.device] = None,
+    output_device: torch.device | None = None,
 ) -> Fp8RowwiseWeights:
     """Quantize [n, k] weight tensor.
 
@@ -184,7 +183,7 @@ def quantize_fp8(
 @torch.inference_mode()
 def quantize_int4(
     w: Tensor,
-    output_device: Optional[torch.device] = None,
+    output_device: torch.device | None = None,
 ) -> Int4Weights:
     """Quantize [n, k/2] weight tensor.
 
@@ -213,7 +212,7 @@ def load_fp8(
     w: Tensor,
     w_scale: Tensor,
     fp8_activation_scale_ub: float,
-    output_device: Optional[torch.device] = None,
+    output_device: torch.device | None = None,
 ) -> Fp8RowwiseWeights:
     """Load FP8 [n, k] weight tensor.
 
@@ -239,7 +238,7 @@ def load_int4(
     w: Tensor,
     scale: Tensor,
     zero_point: Tensor,
-    output_device: Optional[torch.device] = None,
+    output_device: torch.device | None = None,
 ) -> Int4Weights:
     """Load INT4 [n, k/2] weight tensor.
 
@@ -256,9 +255,9 @@ def load_int4(
 
 def fc_dynamic(
     x: Tensor,
-    w: Union[Fp8RowwiseWeights, Int4Weights],
-    activation_scale_ub: Optional[Tensor] = None,
-    num_tokens: Optional[Tensor] = None,
+    w: Fp8RowwiseWeights | Int4Weights,
+    activation_scale_ub: Tensor | None = None,
+    num_tokens: Tensor | None = None,
     is_memory_bounded: bool = False,
 ) -> Tensor:
     """
@@ -275,11 +274,11 @@ def fc_dynamic(
 
 def ffn_swiglu_dynamic(
     x: Tensor,
-    w1: Union[Fp8RowwiseWeights, Int4Weights],
-    w3: Union[Fp8RowwiseWeights, Int4Weights],
-    w2: Union[Fp8RowwiseWeights, Int4Weights],
-    activation_scale_ub: Optional[Tensor] = None,
-    num_tokens: Optional[Tensor] = None,
+    w1: Fp8RowwiseWeights | Int4Weights,
+    w3: Fp8RowwiseWeights | Int4Weights,
+    w2: Fp8RowwiseWeights | Int4Weights,
+    activation_scale_ub: Tensor | None = None,
+    num_tokens: Tensor | None = None,
     is_memory_bounded: bool = False,
 ) -> Tensor:
     assert x.dim() == 3 or x.dim() == 2
diff --git a/llama_stack/models/llama/sku_list.py b/llama_stack/models/llama/sku_list.py
index 513481831..271cec63f 100644
--- a/llama_stack/models/llama/sku_list.py
+++ b/llama_stack/models/llama/sku_list.py
@@ -6,7 +6,6 @@
 
 from dataclasses import dataclass
 from functools import lru_cache
-from typing import List, Optional
 
 from .sku_types import (
     CheckpointQuantizationFormat,
@@ -19,14 +18,14 @@ LLAMA2_VOCAB_SIZE = 32000
 LLAMA3_VOCAB_SIZE = 128256
 
 
-def resolve_model(descriptor: str) -> Optional[Model]:
+def resolve_model(descriptor: str) -> Model | None:
     for m in all_registered_models():
         if descriptor in (m.descriptor(), m.huggingface_repo):
             return m
     return None
 
 
-def all_registered_models() -> List[Model]:
+def all_registered_models() -> list[Model]:
     return (
         llama2_family()
         + llama3_family()
@@ -38,48 +37,48 @@ def all_registered_models() -> List[Model]:
     )
 
 
-def llama2_family() -> List[Model]:
+def llama2_family() -> list[Model]:
     return [
         *llama2_base_models(),
         *llama2_instruct_models(),
     ]
 
 
-def llama3_family() -> List[Model]:
+def llama3_family() -> list[Model]:
     return [
         *llama3_base_models(),
         *llama3_instruct_models(),
     ]
 
 
-def llama3_1_family() -> List[Model]:
+def llama3_1_family() -> list[Model]:
     return [
         *llama3_1_base_models(),
         *llama3_1_instruct_models(),
     ]
 
 
-def llama3_2_family() -> List[Model]:
+def llama3_2_family() -> list[Model]:
     return [
         *llama3_2_base_models(),
         *llama3_2_instruct_models(),
     ]
 
 
-def llama3_3_family() -> List[Model]:
+def llama3_3_family() -> list[Model]:
     return [
         *llama3_3_instruct_models(),
     ]
 
 
-def llama4_family() -> List[Model]:
+def llama4_family() -> list[Model]:
     return [
         *llama4_base_models(),
         *llama4_instruct_models(),
     ]
 
 
-def llama4_base_models() -> List[Model]:
+def llama4_base_models() -> list[Model]:
     return [
         Model(
             core_model_id=CoreModelId.llama4_scout_17b_16e,
@@ -98,7 +97,7 @@ def llama4_base_models() -> List[Model]:
     ]
 
 
-def llama4_instruct_models() -> List[Model]:
+def llama4_instruct_models() -> list[Model]:
     return [
         Model(
             core_model_id=CoreModelId.llama4_scout_17b_16e_instruct,
@@ -126,7 +125,7 @@ def llama4_instruct_models() -> List[Model]:
     ]
 
 
-def llama2_base_models() -> List[Model]:
+def llama2_base_models() -> list[Model]:
     return [
         Model(
             core_model_id=CoreModelId.llama2_7b,
@@ -185,7 +184,7 @@ def llama2_base_models() -> List[Model]:
     ]
 
 
-def llama3_base_models() -> List[Model]:
+def llama3_base_models() -> list[Model]:
     return [
         Model(
             core_model_id=CoreModelId.llama3_8b,
@@ -226,7 +225,7 @@ def llama3_base_models() -> List[Model]:
     ]
 
 
-def llama3_1_base_models() -> List[Model]:
+def llama3_1_base_models() -> list[Model]:
     return [
         Model(
             core_model_id=CoreModelId.llama3_1_8b,
@@ -324,7 +323,7 @@ def llama3_1_base_models() -> List[Model]:
     ]
 
 
-def llama3_2_base_models() -> List[Model]:
+def llama3_2_base_models() -> list[Model]:
     return [
         Model(
             core_model_id=CoreModelId.llama3_2_1b,
@@ -407,7 +406,7 @@ def llama3_2_base_models() -> List[Model]:
     ]
 
 
-def llama2_instruct_models() -> List[Model]:
+def llama2_instruct_models() -> list[Model]:
     return [
         Model(
             core_model_id=CoreModelId.llama2_7b_chat,
@@ -466,7 +465,7 @@ def llama2_instruct_models() -> List[Model]:
     ]
 
 
-def llama3_instruct_models() -> List[Model]:
+def llama3_instruct_models() -> list[Model]:
     return [
         Model(
             core_model_id=CoreModelId.llama3_8b_instruct,
@@ -507,7 +506,7 @@ def llama3_instruct_models() -> List[Model]:
     ]
 
 
-def llama3_1_instruct_models() -> List[Model]:
+def llama3_1_instruct_models() -> list[Model]:
     return [
         Model(
             core_model_id=CoreModelId.llama3_1_8b_instruct,
@@ -635,7 +634,7 @@ def arch_args_3b() -> dict:
     }
 
 
-def llama3_2_quantized_models() -> List[Model]:
+def llama3_2_quantized_models() -> list[Model]:
     return [
         Model(
             core_model_id=CoreModelId.llama3_2_1b_instruct,
@@ -704,7 +703,7 @@ def llama3_2_quantized_models() -> List[Model]:
     ]
 
 
-def llama3_2_instruct_models() -> List[Model]:
+def llama3_2_instruct_models() -> list[Model]:
     return [
         Model(
             core_model_id=CoreModelId.llama3_2_1b_instruct,
@@ -766,7 +765,7 @@ def llama3_2_instruct_models() -> List[Model]:
     ]
 
 
-def llama3_3_instruct_models() -> List[Model]:
+def llama3_3_instruct_models() -> list[Model]:
     return [
         Model(
             core_model_id=CoreModelId.llama3_3_70b_instruct,
@@ -790,8 +789,15 @@ def llama3_3_instruct_models() -> List[Model]:
 
 
 @lru_cache
-def safety_models() -> List[Model]:
+def safety_models() -> list[Model]:
     return [
+        Model(
+            core_model_id=CoreModelId.llama_guard_4_12b,
+            description="Llama Guard v4 12b system safety model",
+            huggingface_repo="meta-llama/Llama-Guard-4-12B",
+            arch_args={},
+            pth_file_count=1,
+        ),
         Model(
             core_model_id=CoreModelId.llama_guard_3_11b_vision,
             description="Llama Guard v3 11b vision system safety model",
@@ -912,7 +918,7 @@ def safety_models() -> List[Model]:
 @dataclass
 class LlamaDownloadInfo:
     folder: str
-    files: List[str]
+    files: list[str]
     pth_size: int
 
 
@@ -942,6 +948,8 @@ def llama_meta_net_info(model: Model) -> LlamaDownloadInfo:
     elif model.core_model_id == CoreModelId.llama_guard_2_8b:
         folder = "llama-guard-2"
     else:
+        if model.huggingface_repo is None:
+            raise ValueError(f"Model {model.core_model_id} has no huggingface_repo set")
         folder = model.huggingface_repo.split("/")[-1]
         if "Llama-2" in folder:
             folder = folder.lower()
@@ -1018,3 +1026,4 @@ def llama_meta_pth_size(model: Model) -> int:
                 return 54121549657
             else:
                 return 100426653046
+    return 0
diff --git a/llama_stack/models/llama/sku_types.py b/llama_stack/models/llama/sku_types.py
index 88799b66d..4147707d5 100644
--- a/llama_stack/models/llama/sku_types.py
+++ b/llama_stack/models/llama/sku_types.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 from enum import Enum
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, ConfigDict, Field
 
@@ -81,6 +81,7 @@ class CoreModelId(Enum):
     llama_guard_2_8b = "Llama-Guard-2-8B"
     llama_guard_3_11b_vision = "Llama-Guard-3-11B-Vision"
     llama_guard_3_1b = "Llama-Guard-3-1B"
+    llama_guard_4_12b = "Llama-Guard-4-12B"
 
 
 def is_multimodal(model_id) -> bool:
@@ -148,6 +149,7 @@ def model_family(model_id) -> ModelFamily:
         CoreModelId.llama_guard_2_8b,
         CoreModelId.llama_guard_3_11b_vision,
         CoreModelId.llama_guard_3_1b,
+        CoreModelId.llama_guard_4_12b,
     ]:
         return ModelFamily.safety
     else:
@@ -157,13 +159,13 @@ def model_family(model_id) -> ModelFamily:
 class Model(BaseModel):
     core_model_id: CoreModelId
     description: str
-    huggingface_repo: Optional[str] = None
-    arch_args: Dict[str, Any]
+    huggingface_repo: str | None = None
+    arch_args: dict[str, Any]
     variant: str = ""
 
     quantization_format: CheckpointQuantizationFormat = CheckpointQuantizationFormat.bf16
     pth_file_count: int
-    metadata: Dict[str, Any] = Field(default_factory=dict)
+    metadata: dict[str, Any] = Field(default_factory=dict)
 
     # silence pydantic until we remove the `model_` fields
     model_config = ConfigDict(protected_namespaces=())
@@ -225,5 +227,7 @@ class Model(BaseModel):
             CoreModelId.llama_guard_3_1b,
         ]:
             return 131072
+        elif self.core_model_id == CoreModelId.llama_guard_4_12b:
+            return 8192
         else:
             raise ValueError(f"Unknown max_seq_len for {self.core_model_id}")
diff --git a/llama_stack/providers/datatypes.py b/llama_stack/providers/datatypes.py
index c3141f807..60b05545b 100644
--- a/llama_stack/providers/datatypes.py
+++ b/llama_stack/providers/datatypes.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 from enum import Enum
-from typing import Any, List, Optional, Protocol
+from typing import Any, Protocol
 from urllib.parse import urlparse
 
 from pydantic import BaseModel, Field
@@ -16,12 +16,33 @@ from llama_stack.apis.datatypes import Api
 from llama_stack.apis.models import Model
 from llama_stack.apis.scoring_functions import ScoringFn
 from llama_stack.apis.shields import Shield
-from llama_stack.apis.tools import Tool
+from llama_stack.apis.tools import ToolGroup
 from llama_stack.apis.vector_dbs import VectorDB
 from llama_stack.schema_utils import json_schema_type
 
 
 class ModelsProtocolPrivate(Protocol):
+    """
+    Protocol for model management.
+
+    This allows users to register their preferred model identifiers.
+
+    Model registration requires -
+     - a provider, used to route the registration request
+     - a model identifier, user's intended name for the model during inference
+     - a provider model identifier, a model identifier supported by the provider
+
+    Providers will only accept registration for provider model ids they support.
+
+    Example,
+      register: provider x my-model-id x provider-model-id
+       -> Error if provider does not support provider-model-id
+       -> Error if my-model-id is already registered
+       -> Success if provider supports provider-model-id
+      inference: my-model-id x ...
+       -> Provider uses provider-model-id for inference
+    """
+
     async def register_model(self, model: Model) -> Model: ...
 
     async def unregister_model(self, model_id: str) -> None: ...
@@ -44,7 +65,7 @@ class DatasetsProtocolPrivate(Protocol):
 
 
 class ScoringFunctionsProtocolPrivate(Protocol):
-    async def list_scoring_functions(self) -> List[ScoringFn]: ...
+    async def list_scoring_functions(self) -> list[ScoringFn]: ...
 
     async def register_scoring_function(self, scoring_fn: ScoringFn) -> None: ...
 
@@ -53,10 +74,10 @@ class BenchmarksProtocolPrivate(Protocol):
     async def register_benchmark(self, benchmark: Benchmark) -> None: ...
 
 
-class ToolsProtocolPrivate(Protocol):
-    async def register_tool(self, tool: Tool) -> None: ...
+class ToolGroupsProtocolPrivate(Protocol):
+    async def register_toolgroup(self, toolgroup: ToolGroup) -> None: ...
 
-    async def unregister_tool(self, tool_id: str) -> None: ...
+    async def unregister_toolgroup(self, toolgroup_id: str) -> None: ...
 
 
 @json_schema_type
@@ -67,24 +88,24 @@ class ProviderSpec(BaseModel):
         ...,
         description="Fully-qualified classname of the config for this provider",
     )
-    api_dependencies: List[Api] = Field(
+    api_dependencies: list[Api] = Field(
         default_factory=list,
         description="Higher-level API surfaces may depend on other providers to provide their functionality",
     )
-    optional_api_dependencies: List[Api] = Field(
+    optional_api_dependencies: list[Api] = Field(
         default_factory=list,
     )
-    deprecation_warning: Optional[str] = Field(
+    deprecation_warning: str | None = Field(
         default=None,
         description="If this provider is deprecated, specify the warning message here",
     )
-    deprecation_error: Optional[str] = Field(
+    deprecation_error: str | None = Field(
         default=None,
         description="If this provider is deprecated and does NOT work, specify the error message here",
     )
 
     # used internally by the resolver; this is a hack for now
-    deps__: List[str] = Field(default_factory=list)
+    deps__: list[str] = Field(default_factory=list)
 
     @property
     def is_sample(self) -> bool:
@@ -110,25 +131,25 @@ Fully-qualified name of the module to import. The module is expected to have:
  - `get_adapter_impl(config, deps)`: returns the adapter implementation
 """,
     )
-    pip_packages: List[str] = Field(
+    pip_packages: list[str] = Field(
         default_factory=list,
         description="The pip dependencies needed for this implementation",
     )
     config_class: str = Field(
         description="Fully-qualified classname of the config for this provider",
     )
-    provider_data_validator: Optional[str] = Field(
+    provider_data_validator: str | None = Field(
         default=None,
     )
 
 
 @json_schema_type
 class InlineProviderSpec(ProviderSpec):
-    pip_packages: List[str] = Field(
+    pip_packages: list[str] = Field(
         default_factory=list,
         description="The pip dependencies needed for this implementation",
     )
-    container_image: Optional[str] = Field(
+    container_image: str | None = Field(
         default=None,
         description="""
 The container image to use for this implementation. If one is provided, pip_packages will be ignored.
@@ -143,14 +164,14 @@ Fully-qualified name of the module to import. The module is expected to have:
  - `get_provider_impl(config, deps)`: returns the local implementation
 """,
     )
-    provider_data_validator: Optional[str] = Field(
+    provider_data_validator: str | None = Field(
         default=None,
     )
 
 
 class RemoteProviderConfig(BaseModel):
     host: str = "localhost"
-    port: Optional[int] = None
+    port: int | None = None
     protocol: str = "http"
 
     @property
@@ -176,7 +197,7 @@ API responses, specify the adapter here.
     )
 
     @property
-    def container_image(self) -> Optional[str]:
+    def container_image(self) -> str | None:
         return None
 
     @property
@@ -184,16 +205,16 @@ API responses, specify the adapter here.
         return self.adapter.module
 
     @property
-    def pip_packages(self) -> List[str]:
+    def pip_packages(self) -> list[str]:
         return self.adapter.pip_packages
 
     @property
-    def provider_data_validator(self) -> Optional[str]:
+    def provider_data_validator(self) -> str | None:
         return self.adapter.provider_data_validator
 
 
 def remote_provider_spec(
-    api: Api, adapter: AdapterSpec, api_dependencies: Optional[List[Api]] = None
+    api: Api, adapter: AdapterSpec, api_dependencies: list[Api] | None = None
 ) -> RemoteProviderSpec:
     return RemoteProviderSpec(
         api=api,
diff --git a/llama_stack/providers/inline/agents/meta_reference/__init__.py b/llama_stack/providers/inline/agents/meta_reference/__init__.py
index 4be064f1d..7503b8c90 100644
--- a/llama_stack/providers/inline/agents/meta_reference/__init__.py
+++ b/llama_stack/providers/inline/agents/meta_reference/__init__.py
@@ -4,14 +4,14 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from llama_stack.distribution.datatypes import Api
 
 from .config import MetaReferenceAgentsImplConfig
 
 
-async def get_provider_impl(config: MetaReferenceAgentsImplConfig, deps: Dict[Api, Any]):
+async def get_provider_impl(config: MetaReferenceAgentsImplConfig, deps: dict[Api, Any]):
     from .agents import MetaReferenceAgentsImpl
 
     impl = MetaReferenceAgentsImpl(
diff --git a/llama_stack/providers/inline/agents/meta_reference/agent_instance.py b/llama_stack/providers/inline/agents/meta_reference/agent_instance.py
index b5714b438..2e387e7e8 100644
--- a/llama_stack/providers/inline/agents/meta_reference/agent_instance.py
+++ b/llama_stack/providers/inline/agents/meta_reference/agent_instance.py
@@ -10,8 +10,8 @@ import re
 import secrets
 import string
 import uuid
+from collections.abc import AsyncGenerator
 from datetime import datetime, timezone
-from typing import AsyncGenerator, List, Optional, Union
 
 import httpx
 
@@ -95,6 +95,7 @@ class ChatAgent(ShieldRunnerMixin):
         tool_groups_api: ToolGroups,
         vector_io_api: VectorIO,
         persistence_store: KVStore,
+        created_at: str,
     ):
         self.agent_id = agent_id
         self.agent_config = agent_config
@@ -104,6 +105,7 @@ class ChatAgent(ShieldRunnerMixin):
         self.storage = AgentPersistence(agent_id, persistence_store)
         self.tool_runtime_api = tool_runtime_api
         self.tool_groups_api = tool_groups_api
+        self.created_at = created_at
 
         ShieldRunnerMixin.__init__(
             self,
@@ -112,7 +114,7 @@ class ChatAgent(ShieldRunnerMixin):
             output_shields=agent_config.output_shields,
         )
 
-    def turn_to_messages(self, turn: Turn) -> List[Message]:
+    def turn_to_messages(self, turn: Turn) -> list[Message]:
         messages = []
 
         # NOTE: if a toolcall response is in a step, we do not add it when processing the input messages
@@ -161,7 +163,7 @@ class ChatAgent(ShieldRunnerMixin):
     async def create_session(self, name: str) -> str:
         return await self.storage.create_session(name)
 
-    async def get_messages_from_turns(self, turns: List[Turn]) -> List[Message]:
+    async def get_messages_from_turns(self, turns: list[Turn]) -> list[Message]:
         messages = []
         if self.agent_config.instructions != "":
             messages.append(SystemMessage(content=self.agent_config.instructions))
@@ -201,8 +203,8 @@ class ChatAgent(ShieldRunnerMixin):
 
     async def _run_turn(
         self,
-        request: Union[AgentTurnCreateRequest, AgentTurnResumeRequest],
-        turn_id: Optional[str] = None,
+        request: AgentTurnCreateRequest | AgentTurnResumeRequest,
+        turn_id: str | None = None,
     ) -> AsyncGenerator:
         assert request.stream is True, "Non-streaming not supported"
 
@@ -321,10 +323,10 @@ class ChatAgent(ShieldRunnerMixin):
         self,
         session_id: str,
         turn_id: str,
-        input_messages: List[Message],
+        input_messages: list[Message],
         sampling_params: SamplingParams,
         stream: bool = False,
-        documents: Optional[List[Document]] = None,
+        documents: list[Document] | None = None,
     ) -> AsyncGenerator:
         # Doing async generators makes downstream code much simpler and everything amenable to
         # streaming. However, it also makes things complicated here because AsyncGenerators cannot
@@ -374,8 +376,8 @@ class ChatAgent(ShieldRunnerMixin):
     async def run_multiple_shields_wrapper(
         self,
         turn_id: str,
-        messages: List[Message],
-        shields: List[str],
+        messages: list[Message],
+        shields: list[str],
         touchpoint: str,
     ) -> AsyncGenerator:
         async with tracing.span("run_shields") as span:
@@ -443,10 +445,10 @@ class ChatAgent(ShieldRunnerMixin):
         self,
         session_id: str,
         turn_id: str,
-        input_messages: List[Message],
+        input_messages: list[Message],
         sampling_params: SamplingParams,
         stream: bool = False,
-        documents: Optional[List[Document]] = None,
+        documents: list[Document] | None = None,
     ) -> AsyncGenerator:
         # if document is passed in a turn, we parse the raw text of the document
         # and sent it as a user message
@@ -760,7 +762,7 @@ class ChatAgent(ShieldRunnerMixin):
 
     async def _initialize_tools(
         self,
-        toolgroups_for_turn: Optional[List[AgentToolGroup]] = None,
+        toolgroups_for_turn: list[AgentToolGroup] | None = None,
     ) -> None:
         toolgroup_to_args = {}
         for toolgroup in (self.agent_config.toolgroups or []) + (toolgroups_for_turn or []):
@@ -847,7 +849,7 @@ class ChatAgent(ShieldRunnerMixin):
             tool_name_to_args,
         )
 
-    def _parse_toolgroup_name(self, toolgroup_name_with_maybe_tool_name: str) -> tuple[str, Optional[str]]:
+    def _parse_toolgroup_name(self, toolgroup_name_with_maybe_tool_name: str) -> tuple[str, str | None]:
         """Parse a toolgroup name into its components.
 
         Args:
@@ -921,7 +923,7 @@ async def get_raw_document_text(document: Document) -> str:
 
 def _interpret_content_as_attachment(
     content: str,
-) -> Optional[Attachment]:
+) -> Attachment | None:
     match = re.search(TOOLS_ATTACHMENT_KEY_REGEX, content)
     if match:
         snippet = match.group(1)
diff --git a/llama_stack/providers/inline/agents/meta_reference/agents.py b/llama_stack/providers/inline/agents/meta_reference/agents.py
index 656178773..bcbfcbe31 100644
--- a/llama_stack/providers/inline/agents/meta_reference/agents.py
+++ b/llama_stack/providers/inline/agents/meta_reference/agents.py
@@ -4,11 +4,10 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-import json
 import logging
-import shutil
 import uuid
-from typing import AsyncGenerator, List, Optional, Union
+from collections.abc import AsyncGenerator
+from datetime import datetime, timezone
 
 from llama_stack.apis.agents import (
     Agent,
@@ -21,11 +20,16 @@ from llama_stack.apis.agents import (
     AgentTurnCreateRequest,
     AgentTurnResumeRequest,
     Document,
-    ListAgentSessionsResponse,
-    ListAgentsResponse,
+    ListOpenAIResponseInputItem,
+    ListOpenAIResponseObject,
+    OpenAIResponseInput,
+    OpenAIResponseInputTool,
+    OpenAIResponseObject,
+    Order,
     Session,
     Turn,
 )
+from llama_stack.apis.common.responses import PaginatedResponse
 from llama_stack.apis.inference import (
     Inference,
     ToolConfig,
@@ -37,12 +41,15 @@ from llama_stack.apis.safety import Safety
 from llama_stack.apis.tools import ToolGroups, ToolRuntime
 from llama_stack.apis.vector_io import VectorIO
 from llama_stack.providers.utils.kvstore import InmemoryKVStoreImpl, kvstore_impl
+from llama_stack.providers.utils.pagination import paginate_records
+from llama_stack.providers.utils.responses.responses_store import ResponsesStore
 
 from .agent_instance import ChatAgent
 from .config import MetaReferenceAgentsImplConfig
+from .openai_responses import OpenAIResponsesImpl
+from .persistence import AgentInfo
 
 logger = logging.getLogger()
-logger.setLevel(logging.INFO)
 
 
 class MetaReferenceAgentsImpl(Agents):
@@ -63,56 +70,65 @@ class MetaReferenceAgentsImpl(Agents):
         self.tool_groups_api = tool_groups_api
 
         self.in_memory_store = InmemoryKVStoreImpl()
+        self.openai_responses_impl: OpenAIResponsesImpl | None = None
 
     async def initialize(self) -> None:
         self.persistence_store = await kvstore_impl(self.config.persistence_store)
-
-        # check if "bwrap" is available
-        if not shutil.which("bwrap"):
-            logger.warning("Warning: `bwrap` is not available. Code interpreter tool will not work correctly.")
+        self.responses_store = ResponsesStore(self.config.responses_store)
+        await self.responses_store.initialize()
+        self.openai_responses_impl = OpenAIResponsesImpl(
+            inference_api=self.inference_api,
+            tool_groups_api=self.tool_groups_api,
+            tool_runtime_api=self.tool_runtime_api,
+            responses_store=self.responses_store,
+        )
 
     async def create_agent(
         self,
         agent_config: AgentConfig,
     ) -> AgentCreateResponse:
         agent_id = str(uuid.uuid4())
+        created_at = datetime.now(timezone.utc)
 
+        agent_info = AgentInfo(
+            **agent_config.model_dump(),
+            created_at=created_at,
+        )
+
+        # Store the agent info
         await self.persistence_store.set(
             key=f"agent:{agent_id}",
-            value=agent_config.model_dump_json(),
+            value=agent_info.model_dump_json(),
         )
+
         return AgentCreateResponse(
             agent_id=agent_id,
         )
 
     async def _get_agent_impl(self, agent_id: str) -> ChatAgent:
-        agent_config = await self.persistence_store.get(
+        agent_info_json = await self.persistence_store.get(
             key=f"agent:{agent_id}",
         )
-        if not agent_config:
-            raise ValueError(f"Could not find agent config for {agent_id}")
+        if not agent_info_json:
+            raise ValueError(f"Could not find agent info for {agent_id}")
 
         try:
-            agent_config = json.loads(agent_config)
-        except json.JSONDecodeError as e:
-            raise ValueError(f"Could not JSON decode agent config for {agent_id}") from e
-
-        try:
-            agent_config = AgentConfig(**agent_config)
+            agent_info = AgentInfo.model_validate_json(agent_info_json)
         except Exception as e:
-            raise ValueError(f"Could not validate(?) agent config for {agent_id}") from e
+            raise ValueError(f"Could not validate agent info for {agent_id}") from e
 
         return ChatAgent(
             agent_id=agent_id,
-            agent_config=agent_config,
+            agent_config=agent_info,
             inference_api=self.inference_api,
             safety_api=self.safety_api,
             vector_io_api=self.vector_io_api,
             tool_runtime_api=self.tool_runtime_api,
             tool_groups_api=self.tool_groups_api,
             persistence_store=(
-                self.persistence_store if agent_config.enable_session_persistence else self.in_memory_store
+                self.persistence_store if agent_info.enable_session_persistence else self.in_memory_store
             ),
+            created_at=agent_info.created_at,
         )
 
     async def create_agent_session(
@@ -131,16 +147,11 @@ class MetaReferenceAgentsImpl(Agents):
         self,
         agent_id: str,
         session_id: str,
-        messages: List[
-            Union[
-                UserMessage,
-                ToolResponseMessage,
-            ]
-        ],
-        toolgroups: Optional[List[AgentToolGroup]] = None,
-        documents: Optional[List[Document]] = None,
-        stream: Optional[bool] = False,
-        tool_config: Optional[ToolConfig] = None,
+        messages: list[UserMessage | ToolResponseMessage],
+        toolgroups: list[AgentToolGroup] | None = None,
+        documents: list[Document] | None = None,
+        stream: bool | None = False,
+        tool_config: ToolConfig | None = None,
     ) -> AsyncGenerator:
         request = AgentTurnCreateRequest(
             agent_id=agent_id,
@@ -169,8 +180,8 @@ class MetaReferenceAgentsImpl(Agents):
         agent_id: str,
         session_id: str,
         turn_id: str,
-        tool_responses: List[ToolResponse],
-        stream: Optional[bool] = False,
+        tool_responses: list[ToolResponse],
+        stream: bool | None = False,
     ) -> AsyncGenerator:
         request = AgentTurnResumeRequest(
             agent_id=agent_id,
@@ -208,9 +219,10 @@ class MetaReferenceAgentsImpl(Agents):
         self,
         agent_id: str,
         session_id: str,
-        turn_ids: Optional[List[str]] = None,
+        turn_ids: list[str] | None = None,
     ) -> Session:
         agent = await self._get_agent_impl(agent_id)
+
         session_info = await agent.storage.get_session_info(session_id)
         if session_info is None:
             raise ValueError(f"Session {session_id} not found")
@@ -225,22 +237,117 @@ class MetaReferenceAgentsImpl(Agents):
         )
 
     async def delete_agents_session(self, agent_id: str, session_id: str) -> None:
-        await self.persistence_store.delete(f"session:{agent_id}:{session_id}")
+        agent = await self._get_agent_impl(agent_id)
+        session_info = await agent.storage.get_session_info(session_id)
+        if session_info is None:
+            raise ValueError(f"Session {session_id} not found")
+
+        # Delete turns first, then the session
+        await agent.storage.delete_session_turns(session_id)
+        await agent.storage.delete_session(session_id)
 
     async def delete_agent(self, agent_id: str) -> None:
+        # First get all sessions for this agent
+        agent = await self._get_agent_impl(agent_id)
+        sessions = await agent.storage.list_sessions()
+
+        # Delete all sessions
+        for session in sessions:
+            await self.delete_agents_session(agent_id, session.session_id)
+
+        # Finally delete the agent itself
         await self.persistence_store.delete(f"agent:{agent_id}")
 
+    async def list_agents(self, start_index: int | None = None, limit: int | None = None) -> PaginatedResponse:
+        agent_keys = await self.persistence_store.keys_in_range("agent:", "agent:\xff")
+        agent_list: list[Agent] = []
+        for agent_key in agent_keys:
+            agent_id = agent_key.split(":")[1]
+
+            # Get the agent info using the key
+            agent_info_json = await self.persistence_store.get(agent_key)
+            if not agent_info_json:
+                logger.error(f"Could not find agent info for key {agent_key}")
+                continue
+
+            try:
+                agent_info = AgentInfo.model_validate_json(agent_info_json)
+                agent_list.append(
+                    Agent(
+                        agent_id=agent_id,
+                        agent_config=agent_info,
+                        created_at=agent_info.created_at,
+                    )
+                )
+            except Exception as e:
+                logger.error(f"Error parsing agent info for {agent_id}: {e}")
+                continue
+
+        # Convert Agent objects to dictionaries
+        agent_dicts = [agent.model_dump() for agent in agent_list]
+        return paginate_records(agent_dicts, start_index, limit)
+
+    async def get_agent(self, agent_id: str) -> Agent:
+        chat_agent = await self._get_agent_impl(agent_id)
+        agent = Agent(
+            agent_id=agent_id,
+            agent_config=chat_agent.agent_config,
+            created_at=chat_agent.created_at,
+        )
+        return agent
+
+    async def list_agent_sessions(
+        self, agent_id: str, start_index: int | None = None, limit: int | None = None
+    ) -> PaginatedResponse:
+        agent = await self._get_agent_impl(agent_id)
+        sessions = await agent.storage.list_sessions()
+        # Convert Session objects to dictionaries
+        session_dicts = [session.model_dump() for session in sessions]
+        return paginate_records(session_dicts, start_index, limit)
+
     async def shutdown(self) -> None:
         pass
 
-    async def list_agents(self) -> ListAgentsResponse:
-        pass
-
-    async def get_agent(self, agent_id: str) -> Agent:
-        pass
-
-    async def list_agent_sessions(
+    # OpenAI responses
+    async def get_openai_response(
         self,
-        agent_id: str,
-    ) -> ListAgentSessionsResponse:
-        pass
+        response_id: str,
+    ) -> OpenAIResponseObject:
+        return await self.openai_responses_impl.get_openai_response(response_id)
+
+    async def create_openai_response(
+        self,
+        input: str | list[OpenAIResponseInput],
+        model: str,
+        instructions: str | None = None,
+        previous_response_id: str | None = None,
+        store: bool | None = True,
+        stream: bool | None = False,
+        temperature: float | None = None,
+        tools: list[OpenAIResponseInputTool] | None = None,
+    ) -> OpenAIResponseObject:
+        return await self.openai_responses_impl.create_openai_response(
+            input, model, instructions, previous_response_id, store, stream, temperature, tools
+        )
+
+    async def list_openai_responses(
+        self,
+        after: str | None = None,
+        limit: int | None = 50,
+        model: str | None = None,
+        order: Order | None = Order.desc,
+    ) -> ListOpenAIResponseObject:
+        return await self.openai_responses_impl.list_openai_responses(after, limit, model, order)
+
+    async def list_openai_response_input_items(
+        self,
+        response_id: str,
+        after: str | None = None,
+        before: str | None = None,
+        include: list[str] | None = None,
+        limit: int | None = 20,
+        order: Order | None = Order.desc,
+    ) -> ListOpenAIResponseInputItem:
+        return await self.openai_responses_impl.list_openai_response_input_items(
+            response_id, after, before, include, limit, order
+        )
diff --git a/llama_stack/providers/inline/agents/meta_reference/config.py b/llama_stack/providers/inline/agents/meta_reference/config.py
index ff34e5d5f..1c392f29c 100644
--- a/llama_stack/providers/inline/agents/meta_reference/config.py
+++ b/llama_stack/providers/inline/agents/meta_reference/config.py
@@ -4,22 +4,28 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
 from llama_stack.providers.utils.kvstore import KVStoreConfig
 from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig
+from llama_stack.providers.utils.sqlstore.sqlstore import SqliteSqlStoreConfig, SqlStoreConfig
 
 
 class MetaReferenceAgentsImplConfig(BaseModel):
     persistence_store: KVStoreConfig
+    responses_store: SqlStoreConfig
 
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str) -> dict[str, Any]:
         return {
             "persistence_store": SqliteKVStoreConfig.sample_run_config(
                 __distro_dir__=__distro_dir__,
                 db_name="agents_store.db",
-            )
+            ),
+            "responses_store": SqliteSqlStoreConfig.sample_run_config(
+                __distro_dir__=__distro_dir__,
+                db_name="responses_store.db",
+            ),
         }
diff --git a/llama_stack/providers/inline/agents/meta_reference/openai_responses.py b/llama_stack/providers/inline/agents/meta_reference/openai_responses.py
new file mode 100644
index 000000000..19d7ea56f
--- /dev/null
+++ b/llama_stack/providers/inline/agents/meta_reference/openai_responses.py
@@ -0,0 +1,776 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+import json
+import time
+import uuid
+from collections.abc import AsyncIterator
+from typing import Any, cast
+
+from openai.types.chat import ChatCompletionToolParam
+from pydantic import BaseModel
+
+from llama_stack.apis.agents import Order
+from llama_stack.apis.agents.openai_responses import (
+    AllowedToolsFilter,
+    ListOpenAIResponseInputItem,
+    ListOpenAIResponseObject,
+    OpenAIResponseInput,
+    OpenAIResponseInputFunctionToolCallOutput,
+    OpenAIResponseInputMessageContent,
+    OpenAIResponseInputMessageContentImage,
+    OpenAIResponseInputMessageContentText,
+    OpenAIResponseInputTool,
+    OpenAIResponseInputToolMCP,
+    OpenAIResponseMessage,
+    OpenAIResponseObject,
+    OpenAIResponseObjectStream,
+    OpenAIResponseObjectStreamResponseCompleted,
+    OpenAIResponseObjectStreamResponseCreated,
+    OpenAIResponseObjectStreamResponseOutputTextDelta,
+    OpenAIResponseOutput,
+    OpenAIResponseOutputMessageContent,
+    OpenAIResponseOutputMessageContentOutputText,
+    OpenAIResponseOutputMessageFunctionToolCall,
+    OpenAIResponseOutputMessageMCPListTools,
+    OpenAIResponseOutputMessageWebSearchToolCall,
+)
+from llama_stack.apis.inference.inference import (
+    Inference,
+    OpenAIAssistantMessageParam,
+    OpenAIChatCompletion,
+    OpenAIChatCompletionContentPartImageParam,
+    OpenAIChatCompletionContentPartParam,
+    OpenAIChatCompletionContentPartTextParam,
+    OpenAIChatCompletionToolCall,
+    OpenAIChatCompletionToolCallFunction,
+    OpenAIChoice,
+    OpenAIDeveloperMessageParam,
+    OpenAIImageURL,
+    OpenAIMessageParam,
+    OpenAISystemMessageParam,
+    OpenAIToolMessageParam,
+    OpenAIUserMessageParam,
+)
+from llama_stack.apis.tools.tools import ToolGroups, ToolRuntime
+from llama_stack.log import get_logger
+from llama_stack.models.llama.datatypes import ToolDefinition, ToolParamDefinition
+from llama_stack.providers.utils.inference.openai_compat import convert_tooldef_to_openai_tool
+from llama_stack.providers.utils.responses.responses_store import ResponsesStore
+from llama_stack.providers.utils.tools.mcp import invoke_mcp_tool, list_mcp_tools
+
+logger = get_logger(name=__name__, category="openai_responses")
+
+OPENAI_RESPONSES_PREFIX = "openai_responses:"
+
+
+async def _convert_response_content_to_chat_content(
+    content: str | list[OpenAIResponseInputMessageContent] | list[OpenAIResponseOutputMessageContent],
+) -> str | list[OpenAIChatCompletionContentPartParam]:
+    """
+    Convert the content parts from an OpenAI Response API request into OpenAI Chat Completion content parts.
+
+    The content schemas of each API look similar, but are not exactly the same.
+    """
+    if isinstance(content, str):
+        return content
+
+    converted_parts = []
+    for content_part in content:
+        if isinstance(content_part, OpenAIResponseInputMessageContentText):
+            converted_parts.append(OpenAIChatCompletionContentPartTextParam(text=content_part.text))
+        elif isinstance(content_part, OpenAIResponseOutputMessageContentOutputText):
+            converted_parts.append(OpenAIChatCompletionContentPartTextParam(text=content_part.text))
+        elif isinstance(content_part, OpenAIResponseInputMessageContentImage):
+            if content_part.image_url:
+                image_url = OpenAIImageURL(url=content_part.image_url, detail=content_part.detail)
+                converted_parts.append(OpenAIChatCompletionContentPartImageParam(image_url=image_url))
+        elif isinstance(content_part, str):
+            converted_parts.append(OpenAIChatCompletionContentPartTextParam(text=content_part))
+        else:
+            raise ValueError(
+                f"Llama Stack OpenAI Responses does not yet support content type '{type(content_part)}' in this context"
+            )
+    return converted_parts
+
+
+async def _convert_response_input_to_chat_messages(
+    input: str | list[OpenAIResponseInput],
+) -> list[OpenAIMessageParam]:
+    """
+    Convert the input from an OpenAI Response API request into OpenAI Chat Completion messages.
+    """
+    messages: list[OpenAIMessageParam] = []
+    if isinstance(input, list):
+        for input_item in input:
+            if isinstance(input_item, OpenAIResponseInputFunctionToolCallOutput):
+                messages.append(
+                    OpenAIToolMessageParam(
+                        content=input_item.output,
+                        tool_call_id=input_item.call_id,
+                    )
+                )
+            elif isinstance(input_item, OpenAIResponseOutputMessageFunctionToolCall):
+                tool_call = OpenAIChatCompletionToolCall(
+                    index=0,
+                    id=input_item.call_id,
+                    function=OpenAIChatCompletionToolCallFunction(
+                        name=input_item.name,
+                        arguments=input_item.arguments,
+                    ),
+                )
+                messages.append(OpenAIAssistantMessageParam(tool_calls=[tool_call]))
+            else:
+                content = await _convert_response_content_to_chat_content(input_item.content)
+                message_type = await _get_message_type_by_role(input_item.role)
+                if message_type is None:
+                    raise ValueError(
+                        f"Llama Stack OpenAI Responses does not yet support message role '{input_item.role}' in this context"
+                    )
+                messages.append(message_type(content=content))
+    else:
+        messages.append(OpenAIUserMessageParam(content=input))
+    return messages
+
+
+async def _convert_chat_choice_to_response_message(choice: OpenAIChoice) -> OpenAIResponseMessage:
+    """
+    Convert an OpenAI Chat Completion choice into an OpenAI Response output message.
+    """
+    output_content = ""
+    if isinstance(choice.message.content, str):
+        output_content = choice.message.content
+    elif isinstance(choice.message.content, OpenAIChatCompletionContentPartTextParam):
+        output_content = choice.message.content.text
+    else:
+        raise ValueError(
+            f"Llama Stack OpenAI Responses does not yet support output content type: {type(choice.message.content)}"
+        )
+
+    return OpenAIResponseMessage(
+        id=f"msg_{uuid.uuid4()}",
+        content=[OpenAIResponseOutputMessageContentOutputText(text=output_content)],
+        status="completed",
+        role="assistant",
+    )
+
+
+async def _get_message_type_by_role(role: str):
+    role_to_type = {
+        "user": OpenAIUserMessageParam,
+        "system": OpenAISystemMessageParam,
+        "assistant": OpenAIAssistantMessageParam,
+        "developer": OpenAIDeveloperMessageParam,
+    }
+    return role_to_type.get(role)
+
+
+class OpenAIResponsePreviousResponseWithInputItems(BaseModel):
+    input_items: ListOpenAIResponseInputItem
+    response: OpenAIResponseObject
+
+
+class ChatCompletionContext(BaseModel):
+    model: str
+    messages: list[OpenAIMessageParam]
+    tools: list[ChatCompletionToolParam] | None = None
+    mcp_tool_to_server: dict[str, OpenAIResponseInputToolMCP]
+    stream: bool
+    temperature: float | None
+
+
+class OpenAIResponsesImpl:
+    def __init__(
+        self,
+        inference_api: Inference,
+        tool_groups_api: ToolGroups,
+        tool_runtime_api: ToolRuntime,
+        responses_store: ResponsesStore,
+    ):
+        self.inference_api = inference_api
+        self.tool_groups_api = tool_groups_api
+        self.tool_runtime_api = tool_runtime_api
+        self.responses_store = responses_store
+
+    async def _prepend_previous_response(
+        self, input: str | list[OpenAIResponseInput], previous_response_id: str | None = None
+    ):
+        if previous_response_id:
+            previous_response_with_input = await self.responses_store.get_response_object(previous_response_id)
+
+            # previous response input items
+            new_input_items = previous_response_with_input.input
+
+            # previous response output items
+            new_input_items.extend(previous_response_with_input.output)
+
+            # new input items from the current request
+            if isinstance(input, str):
+                new_input_items.append(OpenAIResponseMessage(content=input, role="user"))
+            else:
+                new_input_items.extend(input)
+
+            input = new_input_items
+
+        return input
+
+    async def _prepend_instructions(self, messages, instructions):
+        if instructions:
+            messages.insert(0, OpenAISystemMessageParam(content=instructions))
+
+    async def get_openai_response(
+        self,
+        response_id: str,
+    ) -> OpenAIResponseObject:
+        response_with_input = await self.responses_store.get_response_object(response_id)
+        return OpenAIResponseObject(**{k: v for k, v in response_with_input.model_dump().items() if k != "input"})
+
+    async def list_openai_responses(
+        self,
+        after: str | None = None,
+        limit: int | None = 50,
+        model: str | None = None,
+        order: Order | None = Order.desc,
+    ) -> ListOpenAIResponseObject:
+        return await self.responses_store.list_responses(after, limit, model, order)
+
+    async def list_openai_response_input_items(
+        self,
+        response_id: str,
+        after: str | None = None,
+        before: str | None = None,
+        include: list[str] | None = None,
+        limit: int | None = 20,
+        order: Order | None = Order.desc,
+    ) -> ListOpenAIResponseInputItem:
+        """List input items for a given OpenAI response.
+
+        :param response_id: The ID of the response to retrieve input items for.
+        :param after: An item ID to list items after, used for pagination.
+        :param before: An item ID to list items before, used for pagination.
+        :param include: Additional fields to include in the response.
+        :param limit: A limit on the number of objects to be returned.
+        :param order: The order to return the input items in.
+        :returns: An ListOpenAIResponseInputItem.
+        """
+        return await self.responses_store.list_response_input_items(response_id, after, before, include, limit, order)
+
+    async def _process_response_choices(
+        self,
+        chat_response: OpenAIChatCompletion,
+        ctx: ChatCompletionContext,
+        tools: list[OpenAIResponseInputTool] | None,
+    ) -> list[OpenAIResponseOutput]:
+        """Handle tool execution and response message creation."""
+        output_messages: list[OpenAIResponseOutput] = []
+        # Execute tool calls if any
+        for choice in chat_response.choices:
+            if choice.message.tool_calls and tools:
+                # Assume if the first tool is a function, all tools are functions
+                if tools[0].type == "function":
+                    for tool_call in choice.message.tool_calls:
+                        output_messages.append(
+                            OpenAIResponseOutputMessageFunctionToolCall(
+                                arguments=tool_call.function.arguments or "",
+                                call_id=tool_call.id,
+                                name=tool_call.function.name or "",
+                                id=f"fc_{uuid.uuid4()}",
+                                status="completed",
+                            )
+                        )
+                else:
+                    tool_messages = await self._execute_tool_and_return_final_output(choice, ctx)
+                    output_messages.extend(tool_messages)
+            else:
+                output_messages.append(await _convert_chat_choice_to_response_message(choice))
+
+        return output_messages
+
+    async def _store_response(
+        self,
+        response: OpenAIResponseObject,
+        input: str | list[OpenAIResponseInput],
+    ) -> None:
+        new_input_id = f"msg_{uuid.uuid4()}"
+        if isinstance(input, str):
+            # synthesize a message from the input string
+            input_content = OpenAIResponseInputMessageContentText(text=input)
+            input_content_item = OpenAIResponseMessage(
+                role="user",
+                content=[input_content],
+                id=new_input_id,
+            )
+            input_items_data = [input_content_item]
+        else:
+            # we already have a list of messages
+            input_items_data = []
+            for input_item in input:
+                if isinstance(input_item, OpenAIResponseMessage):
+                    # These may or may not already have an id, so dump to dict, check for id, and add if missing
+                    input_item_dict = input_item.model_dump()
+                    if "id" not in input_item_dict:
+                        input_item_dict["id"] = new_input_id
+                    input_items_data.append(OpenAIResponseMessage(**input_item_dict))
+                else:
+                    input_items_data.append(input_item)
+
+        await self.responses_store.store_response_object(
+            response_object=response,
+            input=input_items_data,
+        )
+
+    async def create_openai_response(
+        self,
+        input: str | list[OpenAIResponseInput],
+        model: str,
+        instructions: str | None = None,
+        previous_response_id: str | None = None,
+        store: bool | None = True,
+        stream: bool | None = False,
+        temperature: float | None = None,
+        tools: list[OpenAIResponseInputTool] | None = None,
+    ):
+        stream = False if stream is None else stream
+
+        output_messages: list[OpenAIResponseOutput] = []
+
+        # Input preprocessing
+        input = await self._prepend_previous_response(input, previous_response_id)
+        messages = await _convert_response_input_to_chat_messages(input)
+        await self._prepend_instructions(messages, instructions)
+
+        # Tool setup
+        chat_tools, mcp_tool_to_server, mcp_list_message = (
+            await self._convert_response_tools_to_chat_tools(tools) if tools else (None, {}, None)
+        )
+        if mcp_list_message:
+            output_messages.append(mcp_list_message)
+
+        ctx = ChatCompletionContext(
+            model=model,
+            messages=messages,
+            tools=chat_tools,
+            mcp_tool_to_server=mcp_tool_to_server,
+            stream=stream,
+            temperature=temperature,
+        )
+
+        inference_result = await self.inference_api.openai_chat_completion(
+            model=model,
+            messages=messages,
+            tools=chat_tools,
+            stream=stream,
+            temperature=temperature,
+        )
+
+        if stream:
+            return self._create_streaming_response(
+                inference_result=inference_result,
+                ctx=ctx,
+                output_messages=output_messages,
+                input=input,
+                model=model,
+                store=store,
+                tools=tools,
+            )
+        else:
+            return await self._create_non_streaming_response(
+                inference_result=inference_result,
+                ctx=ctx,
+                output_messages=output_messages,
+                input=input,
+                model=model,
+                store=store,
+                tools=tools,
+            )
+
+    async def _create_non_streaming_response(
+        self,
+        inference_result: Any,
+        ctx: ChatCompletionContext,
+        output_messages: list[OpenAIResponseOutput],
+        input: str | list[OpenAIResponseInput],
+        model: str,
+        store: bool | None,
+        tools: list[OpenAIResponseInputTool] | None,
+    ) -> OpenAIResponseObject:
+        chat_response = OpenAIChatCompletion(**inference_result.model_dump())
+
+        # Process response choices (tool execution and message creation)
+        output_messages.extend(
+            await self._process_response_choices(
+                chat_response=chat_response,
+                ctx=ctx,
+                tools=tools,
+            )
+        )
+
+        response = OpenAIResponseObject(
+            created_at=chat_response.created,
+            id=f"resp-{uuid.uuid4()}",
+            model=model,
+            object="response",
+            status="completed",
+            output=output_messages,
+        )
+        logger.debug(f"OpenAI Responses response: {response}")
+
+        # Store response if requested
+        if store:
+            await self._store_response(
+                response=response,
+                input=input,
+            )
+
+        return response
+
+    async def _create_streaming_response(
+        self,
+        inference_result: Any,
+        ctx: ChatCompletionContext,
+        output_messages: list[OpenAIResponseOutput],
+        input: str | list[OpenAIResponseInput],
+        model: str,
+        store: bool | None,
+        tools: list[OpenAIResponseInputTool] | None,
+    ) -> AsyncIterator[OpenAIResponseObjectStream]:
+        # Create initial response and emit response.created immediately
+        response_id = f"resp-{uuid.uuid4()}"
+        created_at = int(time.time())
+
+        initial_response = OpenAIResponseObject(
+            created_at=created_at,
+            id=response_id,
+            model=model,
+            object="response",
+            status="in_progress",
+            output=output_messages.copy(),
+        )
+
+        # Emit response.created immediately
+        yield OpenAIResponseObjectStreamResponseCreated(response=initial_response)
+
+        # For streaming, inference_result is an async iterator of chunks
+        # Stream chunks and emit delta events as they arrive
+        chat_response_id = ""
+        chat_response_content = []
+        chat_response_tool_calls: dict[int, OpenAIChatCompletionToolCall] = {}
+        chunk_created = 0
+        chunk_model = ""
+        chunk_finish_reason = ""
+        sequence_number = 0
+
+        # Create a placeholder message item for delta events
+        message_item_id = f"msg_{uuid.uuid4()}"
+
+        async for chunk in inference_result:
+            chat_response_id = chunk.id
+            chunk_created = chunk.created
+            chunk_model = chunk.model
+            for chunk_choice in chunk.choices:
+                # Emit incremental text content as delta events
+                if chunk_choice.delta.content:
+                    sequence_number += 1
+                    yield OpenAIResponseObjectStreamResponseOutputTextDelta(
+                        content_index=0,
+                        delta=chunk_choice.delta.content,
+                        item_id=message_item_id,
+                        output_index=0,
+                        sequence_number=sequence_number,
+                    )
+
+                # Collect content for final response
+                chat_response_content.append(chunk_choice.delta.content or "")
+                if chunk_choice.finish_reason:
+                    chunk_finish_reason = chunk_choice.finish_reason
+
+                # Aggregate tool call arguments across chunks, using their index as the aggregation key
+                if chunk_choice.delta.tool_calls:
+                    for tool_call in chunk_choice.delta.tool_calls:
+                        response_tool_call = chat_response_tool_calls.get(tool_call.index, None)
+                        if response_tool_call:
+                            # Don't attempt to concatenate arguments if we don't have any new arguments
+                            if tool_call.function.arguments:
+                                # Guard against an initial None argument before we concatenate
+                                response_tool_call.function.arguments = (
+                                    response_tool_call.function.arguments or ""
+                                ) + tool_call.function.arguments
+                        else:
+                            tool_call_dict: dict[str, Any] = tool_call.model_dump()
+                            tool_call_dict.pop("type", None)
+                            response_tool_call = OpenAIChatCompletionToolCall(**tool_call_dict)
+                        chat_response_tool_calls[tool_call.index] = response_tool_call
+
+        # Convert collected chunks to complete response
+        if chat_response_tool_calls:
+            tool_calls = [chat_response_tool_calls[i] for i in sorted(chat_response_tool_calls.keys())]
+        else:
+            tool_calls = None
+        assistant_message = OpenAIAssistantMessageParam(
+            content="".join(chat_response_content),
+            tool_calls=tool_calls,
+        )
+        chat_response_obj = OpenAIChatCompletion(
+            id=chat_response_id,
+            choices=[
+                OpenAIChoice(
+                    message=assistant_message,
+                    finish_reason=chunk_finish_reason,
+                    index=0,
+                )
+            ],
+            created=chunk_created,
+            model=chunk_model,
+        )
+
+        # Process response choices (tool execution and message creation)
+        output_messages.extend(
+            await self._process_response_choices(
+                chat_response=chat_response_obj,
+                ctx=ctx,
+                tools=tools,
+            )
+        )
+
+        # Create final response
+        final_response = OpenAIResponseObject(
+            created_at=created_at,
+            id=response_id,
+            model=model,
+            object="response",
+            status="completed",
+            output=output_messages,
+        )
+
+        if store:
+            await self._store_response(
+                response=final_response,
+                input=input,
+            )
+
+        # Emit response.completed
+        yield OpenAIResponseObjectStreamResponseCompleted(response=final_response)
+
+    async def _convert_response_tools_to_chat_tools(
+        self, tools: list[OpenAIResponseInputTool]
+    ) -> tuple[
+        list[ChatCompletionToolParam],
+        dict[str, OpenAIResponseInputToolMCP],
+        OpenAIResponseOutput | None,
+    ]:
+        from llama_stack.apis.agents.openai_responses import (
+            MCPListToolsTool,
+        )
+        from llama_stack.apis.tools.tools import Tool
+
+        mcp_tool_to_server = {}
+
+        def make_openai_tool(tool_name: str, tool: Tool) -> ChatCompletionToolParam:
+            tool_def = ToolDefinition(
+                tool_name=tool_name,
+                description=tool.description,
+                parameters={
+                    param.name: ToolParamDefinition(
+                        param_type=param.parameter_type,
+                        description=param.description,
+                        required=param.required,
+                        default=param.default,
+                    )
+                    for param in tool.parameters
+                },
+            )
+            return convert_tooldef_to_openai_tool(tool_def)
+
+        mcp_list_message = None
+        chat_tools: list[ChatCompletionToolParam] = []
+        for input_tool in tools:
+            # TODO: Handle other tool types
+            if input_tool.type == "function":
+                chat_tools.append(ChatCompletionToolParam(type="function", function=input_tool.model_dump()))
+            elif input_tool.type == "web_search":
+                tool_name = "web_search"
+                tool = await self.tool_groups_api.get_tool(tool_name)
+                if not tool:
+                    raise ValueError(f"Tool {tool_name} not found")
+                chat_tools.append(make_openai_tool(tool_name, tool))
+            elif input_tool.type == "mcp":
+                always_allowed = None
+                never_allowed = None
+                if input_tool.allowed_tools:
+                    if isinstance(input_tool.allowed_tools, list):
+                        always_allowed = input_tool.allowed_tools
+                    elif isinstance(input_tool.allowed_tools, AllowedToolsFilter):
+                        always_allowed = input_tool.allowed_tools.always
+                        never_allowed = input_tool.allowed_tools.never
+
+                tool_defs = await list_mcp_tools(
+                    endpoint=input_tool.server_url,
+                    headers=input_tool.headers or {},
+                )
+
+                mcp_list_message = OpenAIResponseOutputMessageMCPListTools(
+                    id=f"mcp_list_{uuid.uuid4()}",
+                    status="completed",
+                    server_label=input_tool.server_label,
+                    tools=[],
+                )
+                for t in tool_defs.data:
+                    if never_allowed and t.name in never_allowed:
+                        continue
+                    if not always_allowed or t.name in always_allowed:
+                        chat_tools.append(make_openai_tool(t.name, t))
+                        if t.name in mcp_tool_to_server:
+                            raise ValueError(f"Duplicate tool name {t.name} found for server {input_tool.server_label}")
+                        mcp_tool_to_server[t.name] = input_tool
+                        mcp_list_message.tools.append(
+                            MCPListToolsTool(
+                                name=t.name,
+                                description=t.description,
+                                input_schema={
+                                    "type": "object",
+                                    "properties": {
+                                        p.name: {
+                                            "type": p.parameter_type,
+                                            "description": p.description,
+                                        }
+                                        for p in t.parameters
+                                    },
+                                    "required": [p.name for p in t.parameters if p.required],
+                                },
+                            )
+                        )
+            else:
+                raise ValueError(f"Llama Stack OpenAI Responses does not yet support tool type: {input_tool.type}")
+        return chat_tools, mcp_tool_to_server, mcp_list_message
+
+    async def _execute_tool_and_return_final_output(
+        self,
+        choice: OpenAIChoice,
+        ctx: ChatCompletionContext,
+    ) -> list[OpenAIResponseOutput]:
+        output_messages: list[OpenAIResponseOutput] = []
+
+        if not isinstance(choice.message, OpenAIAssistantMessageParam):
+            return output_messages
+
+        if not choice.message.tool_calls:
+            return output_messages
+
+        next_turn_messages = ctx.messages.copy()
+
+        # Add the assistant message with tool_calls response to the messages list
+        next_turn_messages.append(choice.message)
+
+        for tool_call in choice.message.tool_calls:
+            # TODO: telemetry spans for tool calls
+            tool_call_log, further_input = await self._execute_tool_call(tool_call, ctx)
+            if tool_call_log:
+                output_messages.append(tool_call_log)
+            if further_input:
+                next_turn_messages.append(further_input)
+
+        tool_results_chat_response = await self.inference_api.openai_chat_completion(
+            model=ctx.model,
+            messages=next_turn_messages,
+            stream=ctx.stream,
+            temperature=ctx.temperature,
+        )
+        # type cast to appease mypy: this is needed because we don't handle streaming properly :)
+        tool_results_chat_response = cast(OpenAIChatCompletion, tool_results_chat_response)
+
+        # Huge TODO: these are NOT the final outputs, we must keep the loop going
+        tool_final_outputs = [
+            await _convert_chat_choice_to_response_message(choice) for choice in tool_results_chat_response.choices
+        ]
+        # TODO: Wire in annotations with URLs, titles, etc to these output messages
+        output_messages.extend(tool_final_outputs)
+        return output_messages
+
+    async def _execute_tool_call(
+        self,
+        tool_call: OpenAIChatCompletionToolCall,
+        ctx: ChatCompletionContext,
+    ) -> tuple[OpenAIResponseOutput | None, OpenAIMessageParam | None]:
+        from llama_stack.providers.utils.inference.prompt_adapter import (
+            interleaved_content_as_str,
+        )
+
+        tool_call_id = tool_call.id
+        function = tool_call.function
+
+        if not function or not tool_call_id or not function.name:
+            return None, None
+
+        error_exc = None
+        result = None
+        try:
+            if function.name in ctx.mcp_tool_to_server:
+                mcp_tool = ctx.mcp_tool_to_server[function.name]
+                result = await invoke_mcp_tool(
+                    endpoint=mcp_tool.server_url,
+                    headers=mcp_tool.headers or {},
+                    tool_name=function.name,
+                    kwargs=json.loads(function.arguments) if function.arguments else {},
+                )
+            else:
+                result = await self.tool_runtime_api.invoke_tool(
+                    tool_name=function.name,
+                    kwargs=json.loads(function.arguments) if function.arguments else {},
+                )
+        except Exception as e:
+            error_exc = e
+
+        if function.name in ctx.mcp_tool_to_server:
+            from llama_stack.apis.agents.openai_responses import OpenAIResponseOutputMessageMCPCall
+
+            message = OpenAIResponseOutputMessageMCPCall(
+                id=tool_call_id,
+                arguments=function.arguments,
+                name=function.name,
+                server_label=ctx.mcp_tool_to_server[function.name].server_label,
+            )
+            if error_exc:
+                message.error = str(error_exc)
+            elif (result.error_code and result.error_code > 0) or result.error_message:
+                message.error = f"Error (code {result.error_code}): {result.error_message}"
+            elif result.content:
+                message.output = interleaved_content_as_str(result.content)
+        else:
+            if function.name == "web_search":
+                message = OpenAIResponseOutputMessageWebSearchToolCall(
+                    id=tool_call_id,
+                    status="completed",
+                )
+                if error_exc or (result.error_code and result.error_code > 0) or result.error_message:
+                    message.status = "failed"
+            else:
+                raise ValueError(f"Unknown tool {function.name} called")
+
+        input_message = None
+        if result and result.content:
+            if isinstance(result.content, str):
+                content = result.content
+            elif isinstance(result.content, list):
+                from llama_stack.apis.common.content_types import ImageContentItem, TextContentItem
+
+                content = []
+                for item in result.content:
+                    if isinstance(item, TextContentItem):
+                        part = OpenAIChatCompletionContentPartTextParam(text=item.text)
+                    elif isinstance(item, ImageContentItem):
+                        if item.image.data:
+                            url = f"data:image;base64,{item.image.data}"
+                        else:
+                            url = item.image.url
+                        part = OpenAIChatCompletionContentPartImageParam(image_url=OpenAIImageURL(url=url))
+                    else:
+                        raise ValueError(f"Unknown result content type: {type(item)}")
+                    content.append(part)
+            else:
+                raise ValueError(f"Unknown result content type: {type(result.content)}")
+            input_message = OpenAIToolMessageParam(content=content, tool_call_id=tool_call_id)
+
+        return message, input_message
diff --git a/llama_stack/providers/inline/agents/meta_reference/persistence.py b/llama_stack/providers/inline/agents/meta_reference/persistence.py
index 202d43609..5031a4a90 100644
--- a/llama_stack/providers/inline/agents/meta_reference/persistence.py
+++ b/llama_stack/providers/inline/agents/meta_reference/persistence.py
@@ -8,11 +8,8 @@ import json
 import logging
 import uuid
 from datetime import datetime, timezone
-from typing import List, Optional
 
-from pydantic import BaseModel
-
-from llama_stack.apis.agents import ToolExecutionStep, Turn
+from llama_stack.apis.agents import AgentConfig, Session, ToolExecutionStep, Turn
 from llama_stack.distribution.access_control import check_access
 from llama_stack.distribution.datatypes import AccessAttributes
 from llama_stack.distribution.request_headers import get_auth_attributes
@@ -21,13 +18,15 @@ from llama_stack.providers.utils.kvstore import KVStore
 log = logging.getLogger(__name__)
 
 
-class AgentSessionInfo(BaseModel):
-    session_id: str
-    session_name: str
+class AgentSessionInfo(Session):
     # TODO: is this used anywhere?
-    vector_db_id: Optional[str] = None
+    vector_db_id: str | None = None
     started_at: datetime
-    access_attributes: Optional[AccessAttributes] = None
+    access_attributes: AccessAttributes | None = None
+
+
+class AgentInfo(AgentConfig):
+    created_at: datetime
 
 
 class AgentPersistence:
@@ -47,6 +46,7 @@ class AgentPersistence:
             session_name=name,
             started_at=datetime.now(timezone.utc),
             access_attributes=access_attributes,
+            turns=[],
         )
 
         await self.kvstore.set(
@@ -55,7 +55,7 @@ class AgentPersistence:
         )
         return session_id
 
-    async def get_session_info(self, session_id: str) -> Optional[AgentSessionInfo]:
+    async def get_session_info(self, session_id: str) -> AgentSessionInfo | None:
         value = await self.kvstore.get(
             key=f"session:{self.agent_id}:{session_id}",
         )
@@ -78,7 +78,7 @@ class AgentPersistence:
 
         return check_access(session_info.session_id, session_info.access_attributes, get_auth_attributes())
 
-    async def get_session_if_accessible(self, session_id: str) -> Optional[AgentSessionInfo]:
+    async def get_session_if_accessible(self, session_id: str) -> AgentSessionInfo | None:
         """Get session info if the user has access to it. For internal use by sub-session methods."""
         session_info = await self.get_session_info(session_id)
         if not session_info:
@@ -106,11 +106,11 @@ class AgentPersistence:
             value=turn.model_dump_json(),
         )
 
-    async def get_session_turns(self, session_id: str) -> List[Turn]:
+    async def get_session_turns(self, session_id: str) -> list[Turn]:
         if not await self.get_session_if_accessible(session_id):
             raise ValueError(f"Session {session_id} not found or access denied")
 
-        values = await self.kvstore.range(
+        values = await self.kvstore.values_in_range(
             start_key=f"session:{self.agent_id}:{session_id}:",
             end_key=f"session:{self.agent_id}:{session_id}:\xff\xff\xff\xff",
         )
@@ -122,10 +122,9 @@ class AgentPersistence:
             except Exception as e:
                 log.error(f"Error parsing turn: {e}")
                 continue
-        turns.sort(key=lambda x: (x.completed_at or datetime.min))
         return turns
 
-    async def get_session_turn(self, session_id: str, turn_id: str) -> Optional[Turn]:
+    async def get_session_turn(self, session_id: str, turn_id: str) -> Turn | None:
         if not await self.get_session_if_accessible(session_id):
             raise ValueError(f"Session {session_id} not found or access denied")
 
@@ -145,7 +144,7 @@ class AgentPersistence:
             value=step.model_dump_json(),
         )
 
-    async def get_in_progress_tool_call_step(self, session_id: str, turn_id: str) -> Optional[ToolExecutionStep]:
+    async def get_in_progress_tool_call_step(self, session_id: str, turn_id: str) -> ToolExecutionStep | None:
         if not await self.get_session_if_accessible(session_id):
             return None
 
@@ -163,7 +162,7 @@ class AgentPersistence:
             value=str(num_infer_iters),
         )
 
-    async def get_num_infer_iters_in_turn(self, session_id: str, turn_id: str) -> Optional[int]:
+    async def get_num_infer_iters_in_turn(self, session_id: str, turn_id: str) -> int | None:
         if not await self.get_session_if_accessible(session_id):
             return None
 
@@ -171,3 +170,43 @@ class AgentPersistence:
             key=f"num_infer_iters_in_turn:{self.agent_id}:{session_id}:{turn_id}",
         )
         return int(value) if value else None
+
+    async def list_sessions(self) -> list[Session]:
+        values = await self.kvstore.values_in_range(
+            start_key=f"session:{self.agent_id}:",
+            end_key=f"session:{self.agent_id}:\xff\xff\xff\xff",
+        )
+        sessions = []
+        for value in values:
+            try:
+                session_info = Session(**json.loads(value))
+                sessions.append(session_info)
+            except Exception as e:
+                log.error(f"Error parsing session info: {e}")
+                continue
+        return sessions
+
+    async def delete_session_turns(self, session_id: str) -> None:
+        """Delete all turns and their associated data for a session.
+
+        Args:
+            session_id: The ID of the session whose turns should be deleted.
+        """
+        turns = await self.get_session_turns(session_id)
+        for turn in turns:
+            await self.kvstore.delete(key=f"session:{self.agent_id}:{session_id}:{turn.turn_id}")
+
+    async def delete_session(self, session_id: str) -> None:
+        """Delete a session and all its associated turns.
+
+        Args:
+            session_id: The ID of the session to delete.
+
+        Raises:
+            ValueError: If the session does not exist.
+        """
+        session_info = await self.get_session_info(session_id)
+        if session_info is None:
+            raise ValueError(f"Session {session_id} not found")
+
+        await self.kvstore.delete(key=f"session:{self.agent_id}:{session_id}")
diff --git a/llama_stack/providers/inline/agents/meta_reference/safety.py b/llama_stack/providers/inline/agents/meta_reference/safety.py
index bef16eaba..6b3573d8c 100644
--- a/llama_stack/providers/inline/agents/meta_reference/safety.py
+++ b/llama_stack/providers/inline/agents/meta_reference/safety.py
@@ -6,7 +6,6 @@
 
 import asyncio
 import logging
-from typing import List
 
 from llama_stack.apis.inference import Message
 from llama_stack.apis.safety import Safety, SafetyViolation, ViolationLevel
@@ -25,14 +24,14 @@ class ShieldRunnerMixin:
     def __init__(
         self,
         safety_api: Safety,
-        input_shields: List[str] = None,
-        output_shields: List[str] = None,
+        input_shields: list[str] = None,
+        output_shields: list[str] = None,
     ):
         self.safety_api = safety_api
         self.input_shields = input_shields
         self.output_shields = output_shields
 
-    async def run_multiple_shields(self, messages: List[Message], identifiers: List[str]) -> None:
+    async def run_multiple_shields(self, messages: list[Message], identifiers: list[str]) -> None:
         async def run_shield_with_span(identifier: str):
             async with tracing.span(f"run_shield_{identifier}"):
                 return await self.safety_api.run_shield(
diff --git a/llama_stack/providers/inline/datasetio/localfs/__init__.py b/llama_stack/providers/inline/datasetio/localfs/__init__.py
index 5a0876d79..58aa6ffaf 100644
--- a/llama_stack/providers/inline/datasetio/localfs/__init__.py
+++ b/llama_stack/providers/inline/datasetio/localfs/__init__.py
@@ -4,14 +4,14 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from .config import LocalFSDatasetIOConfig
 
 
 async def get_provider_impl(
     config: LocalFSDatasetIOConfig,
-    _deps: Dict[str, Any],
+    _deps: dict[str, Any],
 ):
     from .datasetio import LocalFSDatasetIOImpl
 
diff --git a/llama_stack/providers/inline/datasetio/localfs/config.py b/llama_stack/providers/inline/datasetio/localfs/config.py
index d74521f1f..b450e8777 100644
--- a/llama_stack/providers/inline/datasetio/localfs/config.py
+++ b/llama_stack/providers/inline/datasetio/localfs/config.py
@@ -3,7 +3,7 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
@@ -17,7 +17,7 @@ class LocalFSDatasetIOConfig(BaseModel):
     kvstore: KVStoreConfig
 
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
         return {
             "kvstore": SqliteKVStoreConfig.sample_run_config(
                 __distro_dir__=__distro_dir__,
diff --git a/llama_stack/providers/inline/datasetio/localfs/datasetio.py b/llama_stack/providers/inline/datasetio/localfs/datasetio.py
index e71107d61..da71ecb17 100644
--- a/llama_stack/providers/inline/datasetio/localfs/datasetio.py
+++ b/llama_stack/providers/inline/datasetio/localfs/datasetio.py
@@ -3,7 +3,7 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict, List, Optional
+from typing import Any
 
 import pandas
 
@@ -11,9 +11,9 @@ from llama_stack.apis.common.responses import PaginatedResponse
 from llama_stack.apis.datasetio import DatasetIO
 from llama_stack.apis.datasets import Dataset
 from llama_stack.providers.datatypes import DatasetsProtocolPrivate
-from llama_stack.providers.utils.datasetio.pagination import paginate_records
 from llama_stack.providers.utils.datasetio.url_utils import get_dataframe_from_uri
 from llama_stack.providers.utils.kvstore import kvstore_impl
+from llama_stack.providers.utils.pagination import paginate_records
 
 from .config import LocalFSDatasetIOConfig
 
@@ -64,7 +64,7 @@ class LocalFSDatasetIOImpl(DatasetIO, DatasetsProtocolPrivate):
         # Load existing datasets from kvstore
         start_key = DATASETS_PREFIX
         end_key = f"{DATASETS_PREFIX}\xff"
-        stored_datasets = await self.kvstore.range(start_key, end_key)
+        stored_datasets = await self.kvstore.values_in_range(start_key, end_key)
 
         for dataset in stored_datasets:
             dataset = Dataset.model_validate_json(dataset)
@@ -92,8 +92,8 @@ class LocalFSDatasetIOImpl(DatasetIO, DatasetsProtocolPrivate):
     async def iterrows(
         self,
         dataset_id: str,
-        start_index: Optional[int] = None,
-        limit: Optional[int] = None,
+        start_index: int | None = None,
+        limit: int | None = None,
     ) -> PaginatedResponse:
         dataset_def = self.dataset_infos[dataset_id]
         dataset_impl = PandasDataframeDataset(dataset_def)
@@ -102,7 +102,7 @@ class LocalFSDatasetIOImpl(DatasetIO, DatasetsProtocolPrivate):
         records = dataset_impl.df.to_dict("records")
         return paginate_records(records, start_index, limit)
 
-    async def append_rows(self, dataset_id: str, rows: List[Dict[str, Any]]) -> None:
+    async def append_rows(self, dataset_id: str, rows: list[dict[str, Any]]) -> None:
         dataset_def = self.dataset_infos[dataset_id]
         dataset_impl = PandasDataframeDataset(dataset_def)
         await dataset_impl.load()
diff --git a/llama_stack/providers/inline/eval/meta_reference/__init__.py b/llama_stack/providers/inline/eval/meta_reference/__init__.py
index e2a7fc2cd..7afe7f33b 100644
--- a/llama_stack/providers/inline/eval/meta_reference/__init__.py
+++ b/llama_stack/providers/inline/eval/meta_reference/__init__.py
@@ -3,7 +3,7 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict
+from typing import Any
 
 from llama_stack.distribution.datatypes import Api
 
@@ -12,7 +12,7 @@ from .config import MetaReferenceEvalConfig
 
 async def get_provider_impl(
     config: MetaReferenceEvalConfig,
-    deps: Dict[Api, Any],
+    deps: dict[Api, Any],
 ):
     from .eval import MetaReferenceEvalImpl
 
diff --git a/llama_stack/providers/inline/eval/meta_reference/config.py b/llama_stack/providers/inline/eval/meta_reference/config.py
index 5b2bec259..2a4a29998 100644
--- a/llama_stack/providers/inline/eval/meta_reference/config.py
+++ b/llama_stack/providers/inline/eval/meta_reference/config.py
@@ -3,7 +3,7 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
@@ -17,7 +17,7 @@ class MetaReferenceEvalConfig(BaseModel):
     kvstore: KVStoreConfig
 
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
         return {
             "kvstore": SqliteKVStoreConfig.sample_run_config(
                 __distro_dir__=__distro_dir__,
diff --git a/llama_stack/providers/inline/eval/meta_reference/eval.py b/llama_stack/providers/inline/eval/meta_reference/eval.py
index 7c28f1bb7..bc0898dc5 100644
--- a/llama_stack/providers/inline/eval/meta_reference/eval.py
+++ b/llama_stack/providers/inline/eval/meta_reference/eval.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 import json
-from typing import Any, Dict, List
+from typing import Any
 
 from tqdm import tqdm
 
@@ -58,7 +58,7 @@ class MetaReferenceEvalImpl(
         # Load existing benchmarks from kvstore
         start_key = EVAL_TASKS_PREFIX
         end_key = f"{EVAL_TASKS_PREFIX}\xff"
-        stored_benchmarks = await self.kvstore.range(start_key, end_key)
+        stored_benchmarks = await self.kvstore.values_in_range(start_key, end_key)
 
         for benchmark in stored_benchmarks:
             benchmark = Benchmark.model_validate_json(benchmark)
@@ -105,8 +105,8 @@ class MetaReferenceEvalImpl(
         return Job(job_id=job_id, status=JobStatus.completed)
 
     async def _run_agent_generation(
-        self, input_rows: List[Dict[str, Any]], benchmark_config: BenchmarkConfig
-    ) -> List[Dict[str, Any]]:
+        self, input_rows: list[dict[str, Any]], benchmark_config: BenchmarkConfig
+    ) -> list[dict[str, Any]]:
         candidate = benchmark_config.eval_candidate
         create_response = await self.agents_api.create_agent(candidate.config)
         agent_id = create_response.agent_id
@@ -148,8 +148,8 @@ class MetaReferenceEvalImpl(
         return generations
 
     async def _run_model_generation(
-        self, input_rows: List[Dict[str, Any]], benchmark_config: BenchmarkConfig
-    ) -> List[Dict[str, Any]]:
+        self, input_rows: list[dict[str, Any]], benchmark_config: BenchmarkConfig
+    ) -> list[dict[str, Any]]:
         candidate = benchmark_config.eval_candidate
         assert candidate.sampling_params.max_tokens is not None, "SamplingParams.max_tokens must be provided"
 
@@ -185,8 +185,8 @@ class MetaReferenceEvalImpl(
     async def evaluate_rows(
         self,
         benchmark_id: str,
-        input_rows: List[Dict[str, Any]],
-        scoring_functions: List[str],
+        input_rows: list[dict[str, Any]],
+        scoring_functions: list[str],
         benchmark_config: BenchmarkConfig,
     ) -> EvaluateResponse:
         candidate = benchmark_config.eval_candidate
diff --git a/llama_stack/providers/inline/inference/meta_reference/__init__.py b/llama_stack/providers/inline/inference/meta_reference/__init__.py
index 3710766e2..5eb822429 100644
--- a/llama_stack/providers/inline/inference/meta_reference/__init__.py
+++ b/llama_stack/providers/inline/inference/meta_reference/__init__.py
@@ -4,14 +4,14 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from .config import MetaReferenceInferenceConfig
 
 
 async def get_provider_impl(
     config: MetaReferenceInferenceConfig,
-    _deps: Dict[str, Any],
+    _deps: dict[str, Any],
 ):
     from .inference import MetaReferenceInferenceImpl
 
diff --git a/llama_stack/providers/inline/inference/meta_reference/config.py b/llama_stack/providers/inline/inference/meta_reference/config.py
index 6f796d0d4..7bc961443 100644
--- a/llama_stack/providers/inline/inference/meta_reference/config.py
+++ b/llama_stack/providers/inline/inference/meta_reference/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, field_validator
 
@@ -17,11 +17,11 @@ class MetaReferenceInferenceConfig(BaseModel):
     # the actual inference model id is dtermined by the moddel id in the request
     # Note: you need to register the model before using it for inference
     # models in the resouce list in the run.yaml config will be registered automatically
-    model: Optional[str] = None
-    torch_seed: Optional[int] = None
+    model: str | None = None
+    torch_seed: int | None = None
     max_seq_len: int = 4096
     max_batch_size: int = 1
-    model_parallel_size: Optional[int] = None
+    model_parallel_size: int | None = None
 
     # when this is False, we assume that the distributed process group is setup by someone
     # outside of this code (e.g., when run inside `torchrun`). that is useful for clients
@@ -30,9 +30,9 @@ class MetaReferenceInferenceConfig(BaseModel):
 
     # By default, the implementation will look at ~/.llama/checkpoints/ but you
     # can override by specifying the directory explicitly
-    checkpoint_dir: Optional[str] = None
+    checkpoint_dir: str | None = None
 
-    quantization: Optional[QuantizationConfig] = None
+    quantization: QuantizationConfig | None = None
 
     @field_validator("model")
     @classmethod
@@ -55,7 +55,7 @@ class MetaReferenceInferenceConfig(BaseModel):
         max_batch_size: str = "${env.MAX_BATCH_SIZE:1}",
         max_seq_len: str = "${env.MAX_SEQ_LEN:4096}",
         **kwargs,
-    ) -> Dict[str, Any]:
+    ) -> dict[str, Any]:
         return {
             "model": model,
             "checkpoint_dir": checkpoint_dir,
diff --git a/llama_stack/providers/inline/inference/meta_reference/generators.py b/llama_stack/providers/inline/inference/meta_reference/generators.py
index 0a928ce73..cb926f529 100644
--- a/llama_stack/providers/inline/inference/meta_reference/generators.py
+++ b/llama_stack/providers/inline/inference/meta_reference/generators.py
@@ -5,7 +5,8 @@
 # the root directory of this source tree.
 
 import math
-from typing import Generator, List, Optional, Tuple
+from collections.abc import Generator
+from typing import Optional
 
 import torch
 from lmformatenforcer import JsonSchemaParser, TokenEnforcer, TokenEnforcerTokenizerData
@@ -39,7 +40,7 @@ Tokenizer = Llama4Tokenizer | Llama3Tokenizer
 class LogitsProcessor:
     def __init__(self, token_enforcer: TokenEnforcer):
         self.token_enforcer = token_enforcer
-        self.mask: Optional[torch.Tensor] = None
+        self.mask: torch.Tensor | None = None
 
     def __call__(self, tokens: torch.Tensor, scores: torch.Tensor) -> torch.Tensor:
         token_sequence = tokens[0, :].tolist()
@@ -58,7 +59,7 @@ class LogitsProcessor:
 def get_logits_processor(
     tokenizer: Tokenizer,
     vocab_size: int,
-    response_format: Optional[ResponseFormat],
+    response_format: ResponseFormat | None,
 ) -> Optional["LogitsProcessor"]:
     if response_format is None:
         return None
@@ -76,7 +77,7 @@ def get_logits_processor(
     return LogitsProcessor(token_enforcer)
 
 
-def _build_regular_tokens_list(tokenizer: Tokenizer, vocab_size: int) -> List[Tuple[int, str, bool]]:
+def _build_regular_tokens_list(tokenizer: Tokenizer, vocab_size: int) -> list[tuple[int, str, bool]]:
     token_0 = tokenizer.encode("0", bos=False, eos=False)[-1]
     regular_tokens = []
 
@@ -158,7 +159,7 @@ class LlamaGenerator:
 
     def completion(
         self,
-        request_batch: List[CompletionRequestWithRawContent],
+        request_batch: list[CompletionRequestWithRawContent],
     ) -> Generator:
         first_request = request_batch[0]
         sampling_params = first_request.sampling_params or SamplingParams()
@@ -167,7 +168,7 @@ class LlamaGenerator:
             max_gen_len = self.args.max_seq_len - 1
 
         temperature, top_p = _infer_sampling_params(sampling_params)
-        for result in self.inner_generator.generate(
+        yield from self.inner_generator.generate(
             llm_inputs=[self.formatter.encode_content(request.content) for request in request_batch],
             max_gen_len=max_gen_len,
             temperature=temperature,
@@ -179,12 +180,11 @@ class LlamaGenerator:
                 self.args.vocab_size,
                 first_request.response_format,
             ),
-        ):
-            yield result
+        )
 
     def chat_completion(
         self,
-        request_batch: List[ChatCompletionRequestWithRawContent],
+        request_batch: list[ChatCompletionRequestWithRawContent],
     ) -> Generator:
         first_request = request_batch[0]
         sampling_params = first_request.sampling_params or SamplingParams()
@@ -193,7 +193,7 @@ class LlamaGenerator:
             max_gen_len = self.args.max_seq_len - 1
 
         temperature, top_p = _infer_sampling_params(sampling_params)
-        for result in self.inner_generator.generate(
+        yield from self.inner_generator.generate(
             llm_inputs=[
                 self.formatter.encode_dialog_prompt(request.messages, _infer_tool_prompt_format(request))
                 for request in request_batch
@@ -208,5 +208,4 @@ class LlamaGenerator:
                 self.args.vocab_size,
                 first_request.response_format,
             ),
-        ):
-            yield result
+        )
diff --git a/llama_stack/providers/inline/inference/meta_reference/inference.py b/llama_stack/providers/inline/inference/meta_reference/inference.py
index 1bc098fab..e238e1b78 100644
--- a/llama_stack/providers/inline/inference/meta_reference/inference.py
+++ b/llama_stack/providers/inline/inference/meta_reference/inference.py
@@ -6,7 +6,8 @@
 
 import asyncio
 import os
-from typing import AsyncGenerator, List, Optional, Union
+import sys
+from collections.abc import AsyncGenerator
 
 from pydantic import BaseModel
 from termcolor import cprint
@@ -28,7 +29,7 @@ from llama_stack.apis.inference import (
     CompletionRequest,
     CompletionResponse,
     CompletionResponseStreamChunk,
-    Inference,
+    InferenceProvider,
     InterleavedContent,
     LogProbConfig,
     Message,
@@ -86,7 +87,7 @@ class MetaReferenceInferenceImpl(
     OpenAICompletionToLlamaStackMixin,
     OpenAIChatCompletionToLlamaStackMixin,
     SentenceTransformerEmbeddingMixin,
-    Inference,
+    InferenceProvider,
     ModelsProtocolPrivate,
 ):
     def __init__(self, config: MetaReferenceInferenceConfig) -> None:
@@ -184,11 +185,11 @@ class MetaReferenceInferenceImpl(
         self,
         model_id: str,
         content: InterleavedContent,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-    ) -> Union[CompletionResponse, CompletionResponseStreamChunk]:
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+    ) -> CompletionResponse | CompletionResponseStreamChunk:
         if sampling_params is None:
             sampling_params = SamplingParams()
         if logprobs:
@@ -215,11 +216,11 @@ class MetaReferenceInferenceImpl(
     async def batch_completion(
         self,
         model_id: str,
-        content_batch: List[InterleavedContent],
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
+        content_batch: list[InterleavedContent],
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
     ) -> BatchCompletionResponse:
         if sampling_params is None:
             sampling_params = SamplingParams()
@@ -291,14 +292,14 @@ class MetaReferenceInferenceImpl(
             for x in impl():
                 yield x
 
-    async def _nonstream_completion(self, request_batch: List[CompletionRequest]) -> List[CompletionResponse]:
+    async def _nonstream_completion(self, request_batch: list[CompletionRequest]) -> list[CompletionResponse]:
         tokenizer = self.generator.formatter.tokenizer
 
         first_request = request_batch[0]
 
         class ItemState(BaseModel):
-            tokens: List[int] = []
-            logprobs: List[TokenLogProbs] = []
+            tokens: list[int] = []
+            logprobs: list[TokenLogProbs] = []
             stop_reason: StopReason | None = None
             finished: bool = False
 
@@ -349,15 +350,15 @@ class MetaReferenceInferenceImpl(
     async def chat_completion(
         self,
         model_id: str,
-        messages: List[Message],
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = ToolChoice.auto,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-        tool_config: Optional[ToolConfig] = None,
+        messages: list[Message],
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_choice: ToolChoice | None = ToolChoice.auto,
+        tool_prompt_format: ToolPromptFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+        tool_config: ToolConfig | None = None,
     ) -> AsyncGenerator:
         if sampling_params is None:
             sampling_params = SamplingParams()
@@ -395,13 +396,13 @@ class MetaReferenceInferenceImpl(
     async def batch_chat_completion(
         self,
         model_id: str,
-        messages_batch: List[List[Message]],
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-        tool_config: Optional[ToolConfig] = None,
+        messages_batch: list[list[Message]],
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        tools: list[ToolDefinition] | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+        tool_config: ToolConfig | None = None,
     ) -> BatchChatCompletionResponse:
         if sampling_params is None:
             sampling_params = SamplingParams()
@@ -436,15 +437,15 @@ class MetaReferenceInferenceImpl(
         return BatchChatCompletionResponse(batch=results)
 
     async def _nonstream_chat_completion(
-        self, request_batch: List[ChatCompletionRequest]
-    ) -> List[ChatCompletionResponse]:
+        self, request_batch: list[ChatCompletionRequest]
+    ) -> list[ChatCompletionResponse]:
         tokenizer = self.generator.formatter.tokenizer
 
         first_request = request_batch[0]
 
         class ItemState(BaseModel):
-            tokens: List[int] = []
-            logprobs: List[TokenLogProbs] = []
+            tokens: list[int] = []
+            logprobs: list[TokenLogProbs] = []
             stop_reason: StopReason | None = None
             finished: bool = False
 
@@ -455,9 +456,9 @@ class MetaReferenceInferenceImpl(
                 first = token_results[0]
                 if not first.finished and not first.ignore_token:
                     if os.environ.get("LLAMA_MODELS_DEBUG", "0") in ("1", "2"):
-                        cprint(first.text, "cyan", end="")
+                        cprint(first.text, color="cyan", end="", file=sys.stderr)
                     if os.environ.get("LLAMA_MODELS_DEBUG", "0") == "2":
-                        cprint(f"<{first.token}>", "magenta", end="")
+                        cprint(f"<{first.token}>", color="magenta", end="", file=sys.stderr)
 
                 for result in token_results:
                     idx = result.batch_idx
@@ -519,9 +520,9 @@ class MetaReferenceInferenceImpl(
             for token_results in self.generator.chat_completion([request]):
                 token_result = token_results[0]
                 if os.environ.get("LLAMA_MODELS_DEBUG", "0") == "1":
-                    cprint(token_result.text, "cyan", end="")
+                    cprint(token_result.text, color="cyan", end="", file=sys.stderr)
                 if os.environ.get("LLAMA_MODELS_DEBUG", "0") == "2":
-                    cprint(f"<{token_result.token}>", "magenta", end="")
+                    cprint(f"<{token_result.token}>", color="magenta", end="", file=sys.stderr)
 
                 if token_result.token == tokenizer.eot_id:
                     stop_reason = StopReason.end_of_turn
diff --git a/llama_stack/providers/inline/inference/meta_reference/model_parallel.py b/llama_stack/providers/inline/inference/meta_reference/model_parallel.py
index 50640c6d1..9031d36b3 100644
--- a/llama_stack/providers/inline/inference/meta_reference/model_parallel.py
+++ b/llama_stack/providers/inline/inference/meta_reference/model_parallel.py
@@ -4,9 +4,10 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
+from collections.abc import Callable, Generator
 from copy import deepcopy
 from functools import partial
-from typing import Any, Callable, Generator, List
+from typing import Any
 
 from llama_stack.models.llama.llama3.chat_format import ChatFormat as Llama3ChatFormat
 from llama_stack.models.llama.llama4.chat_format import ChatFormat as Llama4ChatFormat
@@ -82,7 +83,7 @@ class LlamaModelParallelGenerator:
 
     def completion(
         self,
-        request_batch: List[CompletionRequestWithRawContent],
+        request_batch: list[CompletionRequestWithRawContent],
     ) -> Generator:
         req_obj = deepcopy(request_batch)
         gen = self.group.run_inference(("completion", req_obj))
@@ -90,7 +91,7 @@ class LlamaModelParallelGenerator:
 
     def chat_completion(
         self,
-        request_batch: List[ChatCompletionRequestWithRawContent],
+        request_batch: list[ChatCompletionRequestWithRawContent],
     ) -> Generator:
         req_obj = deepcopy(request_batch)
         gen = self.group.run_inference(("chat_completion", req_obj))
diff --git a/llama_stack/providers/inline/inference/meta_reference/parallel_utils.py b/llama_stack/providers/inline/inference/meta_reference/parallel_utils.py
index 8c0ffc632..97e96b929 100644
--- a/llama_stack/providers/inline/inference/meta_reference/parallel_utils.py
+++ b/llama_stack/providers/inline/inference/meta_reference/parallel_utils.py
@@ -18,8 +18,9 @@ import os
 import tempfile
 import time
 import uuid
+from collections.abc import Callable, Generator
 from enum import Enum
-from typing import Callable, Generator, List, Literal, Optional, Tuple, Union
+from typing import Annotated, Literal
 
 import torch
 import zmq
@@ -30,7 +31,6 @@ from fairscale.nn.model_parallel.initialize import (
 )
 from pydantic import BaseModel, Field
 from torch.distributed.launcher.api import LaunchConfig, elastic_launch
-from typing_extensions import Annotated
 
 from llama_stack.models.llama.datatypes import GenerationResult
 from llama_stack.providers.utils.inference.prompt_adapter import (
@@ -69,15 +69,15 @@ class CancelSentinel(BaseModel):
 
 class TaskRequest(BaseModel):
     type: Literal[ProcessingMessageName.task_request] = ProcessingMessageName.task_request
-    task: Tuple[
+    task: tuple[
         str,
-        List[CompletionRequestWithRawContent] | List[ChatCompletionRequestWithRawContent],
+        list[CompletionRequestWithRawContent] | list[ChatCompletionRequestWithRawContent],
     ]
 
 
 class TaskResponse(BaseModel):
     type: Literal[ProcessingMessageName.task_response] = ProcessingMessageName.task_response
-    result: List[GenerationResult]
+    result: list[GenerationResult]
 
 
 class ExceptionResponse(BaseModel):
@@ -85,15 +85,9 @@ class ExceptionResponse(BaseModel):
     error: str
 
 
-ProcessingMessage = Union[
-    ReadyRequest,
-    ReadyResponse,
-    EndSentinel,
-    CancelSentinel,
-    TaskRequest,
-    TaskResponse,
-    ExceptionResponse,
-]
+ProcessingMessage = (
+    ReadyRequest | ReadyResponse | EndSentinel | CancelSentinel | TaskRequest | TaskResponse | ExceptionResponse
+)
 
 
 class ProcessingMessageWrapper(BaseModel):
@@ -203,7 +197,7 @@ def maybe_get_work(sock: zmq.Socket):
     return client_id, message
 
 
-def maybe_parse_message(maybe_json: Optional[str]) -> Optional[ProcessingMessage]:
+def maybe_parse_message(maybe_json: str | None) -> ProcessingMessage | None:
     if maybe_json is None:
         return None
     try:
@@ -334,9 +328,9 @@ class ModelParallelProcessGroup:
 
     def run_inference(
         self,
-        req: Tuple[
+        req: tuple[
             str,
-            List[CompletionRequestWithRawContent] | List[ChatCompletionRequestWithRawContent],
+            list[CompletionRequestWithRawContent] | list[ChatCompletionRequestWithRawContent],
         ],
     ) -> Generator:
         assert not self.running, "inference already running"
diff --git a/llama_stack/providers/inline/inference/sentence_transformers/__init__.py b/llama_stack/providers/inline/inference/sentence_transformers/__init__.py
index c1d65d10c..1719cbacc 100644
--- a/llama_stack/providers/inline/inference/sentence_transformers/__init__.py
+++ b/llama_stack/providers/inline/inference/sentence_transformers/__init__.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from llama_stack.providers.inline.inference.sentence_transformers.config import (
     SentenceTransformersInferenceConfig,
@@ -13,7 +13,7 @@ from llama_stack.providers.inline.inference.sentence_transformers.config import
 
 async def get_provider_impl(
     config: SentenceTransformersInferenceConfig,
-    _deps: Dict[str, Any],
+    _deps: dict[str, Any],
 ):
     from .sentence_transformers import SentenceTransformersInferenceImpl
 
diff --git a/llama_stack/providers/inline/inference/sentence_transformers/config.py b/llama_stack/providers/inline/inference/sentence_transformers/config.py
index 93e0afe11..b03010b10 100644
--- a/llama_stack/providers/inline/inference/sentence_transformers/config.py
+++ b/llama_stack/providers/inline/inference/sentence_transformers/config.py
@@ -4,12 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
 
 class SentenceTransformersInferenceConfig(BaseModel):
     @classmethod
-    def sample_run_config(cls, **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, **kwargs) -> dict[str, Any]:
         return {}
diff --git a/llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py b/llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py
index d717d055f..890c526f5 100644
--- a/llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py
+++ b/llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py
@@ -5,11 +5,11 @@
 # the root directory of this source tree.
 
 import logging
-from typing import AsyncGenerator, List, Optional, Union
+from collections.abc import AsyncGenerator
 
 from llama_stack.apis.inference import (
     CompletionResponse,
-    Inference,
+    InferenceProvider,
     InterleavedContent,
     LogProbConfig,
     Message,
@@ -38,7 +38,7 @@ class SentenceTransformersInferenceImpl(
     OpenAIChatCompletionToLlamaStackMixin,
     OpenAICompletionToLlamaStackMixin,
     SentenceTransformerEmbeddingMixin,
-    Inference,
+    InferenceProvider,
     ModelsProtocolPrivate,
 ):
     def __init__(self, config: SentenceTransformersInferenceConfig) -> None:
@@ -60,46 +60,46 @@ class SentenceTransformersInferenceImpl(
         self,
         model_id: str,
         content: str,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-    ) -> Union[CompletionResponse, AsyncGenerator]:
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+    ) -> CompletionResponse | AsyncGenerator:
         raise ValueError("Sentence transformers don't support completion")
 
     async def chat_completion(
         self,
         model_id: str,
-        messages: List[Message],
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = ToolChoice.auto,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-        tool_config: Optional[ToolConfig] = None,
+        messages: list[Message],
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_choice: ToolChoice | None = ToolChoice.auto,
+        tool_prompt_format: ToolPromptFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+        tool_config: ToolConfig | None = None,
     ) -> AsyncGenerator:
         raise ValueError("Sentence transformers don't support chat completion")
 
     async def batch_completion(
         self,
         model_id: str,
-        content_batch: List[InterleavedContent],
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        logprobs: Optional[LogProbConfig] = None,
+        content_batch: list[InterleavedContent],
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        logprobs: LogProbConfig | None = None,
     ):
         raise NotImplementedError("Batch completion is not supported for Sentence Transformers")
 
     async def batch_chat_completion(
         self,
         model_id: str,
-        messages_batch: List[List[Message]],
-        sampling_params: Optional[SamplingParams] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_config: Optional[ToolConfig] = None,
-        response_format: Optional[ResponseFormat] = None,
-        logprobs: Optional[LogProbConfig] = None,
+        messages_batch: list[list[Message]],
+        sampling_params: SamplingParams | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_config: ToolConfig | None = None,
+        response_format: ResponseFormat | None = None,
+        logprobs: LogProbConfig | None = None,
     ):
         raise NotImplementedError("Batch chat completion is not supported for Sentence Transformers")
diff --git a/llama_stack/providers/inline/inference/vllm/__init__.py b/llama_stack/providers/inline/inference/vllm/__init__.py
index bd0551e57..d0ec3e084 100644
--- a/llama_stack/providers/inline/inference/vllm/__init__.py
+++ b/llama_stack/providers/inline/inference/vllm/__init__.py
@@ -4,12 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from .config import VLLMConfig
 
 
-async def get_provider_impl(config: VLLMConfig, _deps: Dict[str, Any]):
+async def get_provider_impl(config: VLLMConfig, _deps: dict[str, Any]):
     from .vllm import VLLMInferenceImpl
 
     impl = VLLMInferenceImpl(config)
diff --git a/llama_stack/providers/inline/inference/vllm/config.py b/llama_stack/providers/inline/inference/vllm/config.py
index 51d48e6d5..ce8743c74 100644
--- a/llama_stack/providers/inline/inference/vllm/config.py
+++ b/llama_stack/providers/inline/inference/vllm/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel, Field
 
@@ -42,7 +42,7 @@ class VLLMConfig(BaseModel):
     )
 
     @classmethod
-    def sample_run_config(cls, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, **kwargs: Any) -> dict[str, Any]:
         return {
             "tensor_parallel_size": "${env.TENSOR_PARALLEL_SIZE:1}",
             "max_tokens": "${env.MAX_TOKENS:4096}",
diff --git a/llama_stack/providers/inline/inference/vllm/openai_utils.py b/llama_stack/providers/inline/inference/vllm/openai_utils.py
index d34f5ad5f..77cbf0403 100644
--- a/llama_stack/providers/inline/inference/vllm/openai_utils.py
+++ b/llama_stack/providers/inline/inference/vllm/openai_utils.py
@@ -4,7 +4,6 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List, Optional
 
 import vllm
 
@@ -55,8 +54,8 @@ def _merge_context_into_content(message: Message) -> Message:  # type: ignore
 
 
 def _llama_stack_tools_to_openai_tools(
-    tools: Optional[List[ToolDefinition]] = None,
-) -> List[vllm.entrypoints.openai.protocol.ChatCompletionToolsParam]:
+    tools: list[ToolDefinition] | None = None,
+) -> list[vllm.entrypoints.openai.protocol.ChatCompletionToolsParam]:
     """
     Convert the list of available tools from Llama Stack's format to vLLM's
     version of OpenAI's format.
diff --git a/llama_stack/providers/inline/inference/vllm/vllm.py b/llama_stack/providers/inline/inference/vllm/vllm.py
index 9d742c39c..bf54462b5 100644
--- a/llama_stack/providers/inline/inference/vllm/vllm.py
+++ b/llama_stack/providers/inline/inference/vllm/vllm.py
@@ -7,7 +7,7 @@
 import json
 import re
 import uuid
-from typing import AsyncGenerator, AsyncIterator, Dict, List, Optional, Union
+from collections.abc import AsyncGenerator, AsyncIterator
 
 # These vLLM modules contain names that overlap with Llama Stack names, so we import
 # fully-qualified names
@@ -40,6 +40,7 @@ from llama_stack.apis.inference import (
     JsonSchemaResponseFormat,
     LogProbConfig,
     Message,
+    OpenAIEmbeddingsResponse,
     ResponseFormat,
     SamplingParams,
     TextTruncation,
@@ -100,7 +101,7 @@ def _random_uuid_str() -> str:
 
 
 def _response_format_to_guided_decoding_params(
-    response_format: Optional[ResponseFormat],  # type: ignore
+    response_format: ResponseFormat | None,  # type: ignore
 ) -> vllm.sampling_params.GuidedDecodingParams:
     """
     Translate constrained decoding parameters from Llama Stack's format to vLLM's format.
@@ -131,9 +132,9 @@ def _response_format_to_guided_decoding_params(
 
 
 def _convert_sampling_params(
-    sampling_params: Optional[SamplingParams],
-    response_format: Optional[ResponseFormat],  # type: ignore
-    log_prob_config: Optional[LogProbConfig],
+    sampling_params: SamplingParams | None,
+    response_format: ResponseFormat | None,  # type: ignore
+    log_prob_config: LogProbConfig | None,
 ) -> vllm.SamplingParams:
     """Convert sampling and constrained decoding configuration from Llama Stack's format to vLLM's
     format."""
@@ -370,11 +371,11 @@ class VLLMInferenceImpl(
         self,
         model_id: str,
         content: InterleavedContent,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-    ) -> Union[CompletionResponse, AsyncIterator[CompletionResponseStreamChunk]]:
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+    ) -> CompletionResponse | AsyncIterator[CompletionResponseStreamChunk]:
         if model_id not in self.model_ids:
             raise ValueError(
                 f"This adapter is not registered to model id '{model_id}'. Registered IDs are: {self.model_ids}"
@@ -403,25 +404,35 @@ class VLLMInferenceImpl(
     async def embeddings(
         self,
         model_id: str,
-        contents: List[str] | List[InterleavedContentItem],
-        text_truncation: Optional[TextTruncation] = TextTruncation.none,
-        output_dimension: Optional[int] = None,
-        task_type: Optional[EmbeddingTaskType] = None,
+        contents: list[str] | list[InterleavedContentItem],
+        text_truncation: TextTruncation | None = TextTruncation.none,
+        output_dimension: int | None = None,
+        task_type: EmbeddingTaskType | None = None,
     ) -> EmbeddingsResponse:
         raise NotImplementedError()
 
+    async def openai_embeddings(
+        self,
+        model: str,
+        input: str | list[str],
+        encoding_format: str | None = "float",
+        dimensions: int | None = None,
+        user: str | None = None,
+    ) -> OpenAIEmbeddingsResponse:
+        raise NotImplementedError()
+
     async def chat_completion(
         self,
         model_id: str,
-        messages: List[Message],  # type: ignore
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,  # type: ignore
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = ToolChoice.auto,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-        tool_config: Optional[ToolConfig] = None,
+        messages: list[Message],  # type: ignore
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,  # type: ignore
+        tools: list[ToolDefinition] | None = None,
+        tool_choice: ToolChoice | None = ToolChoice.auto,
+        tool_prompt_format: ToolPromptFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+        tool_config: ToolConfig | None = None,
     ) -> ChatCompletionResponse | ChatCompletionResponseStreamChunk:
         sampling_params = sampling_params or SamplingParams()
         if model_id not in self.model_ids:
@@ -605,7 +616,7 @@ class VLLMInferenceImpl(
 
     async def _chat_completion_for_meta_llama(
         self, request: ChatCompletionRequest
-    ) -> Union[ChatCompletionResponse, AsyncIterator[ChatCompletionResponseStreamChunk]]:
+    ) -> ChatCompletionResponse | AsyncIterator[ChatCompletionResponseStreamChunk]:
         """
         Subroutine that routes chat completions for Meta Llama models through Llama Stack's
         chat template instead of using vLLM's version of that template. The Llama Stack version
@@ -701,7 +712,7 @@ class VLLMInferenceImpl(
         # Tool calls come in pieces, but Llama Stack expects them in bigger chunks. We build up
         # those chunks and output them at the end.
         # This data structure holds the current set of partial tool calls.
-        index_to_tool_call: Dict[int, Dict] = dict()
+        index_to_tool_call: dict[int, dict] = dict()
 
         # The Llama Stack event stream must always start with a start event. Use an empty one to
         # simplify logic below
diff --git a/llama_stack/providers/inline/post_training/common/utils.py b/llama_stack/providers/inline/post_training/common/utils.py
new file mode 100644
index 000000000..7840b21e8
--- /dev/null
+++ b/llama_stack/providers/inline/post_training/common/utils.py
@@ -0,0 +1,35 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+import gc
+
+
+def evacuate_model_from_device(model, device: str):
+    """Safely clear a model from memory and free device resources.
+    This function handles the proper cleanup of a model by:
+    1. Moving the model to CPU if it's on a non-CPU device
+    2. Deleting the model object to free memory
+    3. Running garbage collection
+    4. Clearing CUDA cache if the model was on a CUDA device
+    Args:
+        model: The PyTorch model to clear
+        device: The device type the model is currently on ('cuda', 'mps', 'cpu')
+    Note:
+        - For CUDA devices, this will clear the CUDA cache after moving the model to CPU
+        - For MPS devices, only moves the model to CPU (no cache clearing available)
+        - For CPU devices, only deletes the model object and runs garbage collection
+    """
+    if device != "cpu":
+        model.to("cpu")
+
+    del model
+    gc.collect()
+
+    if device == "cuda":
+        # we need to import such that this is only imported when the method is called
+        import torch
+
+        torch.cuda.empty_cache()
diff --git a/llama_stack/providers/inline/post_training/common/validator.py b/llama_stack/providers/inline/post_training/common/validator.py
index b0aec6187..950b75f86 100644
--- a/llama_stack/providers/inline/post_training/common/validator.py
+++ b/llama_stack/providers/inline/post_training/common/validator.py
@@ -17,10 +17,8 @@ from llama_stack.apis.common.type_system import (
     DialogType,
     StringType,
 )
-from llama_stack.apis.datasets import Datasets
 from llama_stack.providers.utils.common.data_schema_validator import (
     ColumnName,
-    validate_dataset_schema,
 )
 
 EXPECTED_DATASET_SCHEMA: dict[str, list[dict[str, Any]]] = {
@@ -36,21 +34,3 @@ EXPECTED_DATASET_SCHEMA: dict[str, list[dict[str, Any]]] = {
         }
     ],
 }
-
-
-async def validate_input_dataset_schema(
-    datasets_api: Datasets,
-    dataset_id: str,
-    dataset_type: str,
-) -> None:
-    dataset_def = await datasets_api.get_dataset(dataset_id=dataset_id)
-    if not dataset_def:
-        raise ValueError(f"Dataset {dataset_id} does not exist.")
-
-    if not dataset_def.dataset_schema or len(dataset_def.dataset_schema) == 0:
-        raise ValueError(f"Dataset {dataset_id} does not have a schema defined.")
-
-    if dataset_type not in EXPECTED_DATASET_SCHEMA:
-        raise ValueError(f"Dataset type {dataset_type} is not supported.")
-
-    validate_dataset_schema(dataset_def.dataset_schema, EXPECTED_DATASET_SCHEMA[dataset_type])
diff --git a/llama_stack/providers/inline/post_training/huggingface/__init__.py b/llama_stack/providers/inline/post_training/huggingface/__init__.py
new file mode 100644
index 000000000..cc1a671c1
--- /dev/null
+++ b/llama_stack/providers/inline/post_training/huggingface/__init__.py
@@ -0,0 +1,27 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from typing import Any
+
+from llama_stack.distribution.datatypes import Api
+
+from .config import HuggingFacePostTrainingConfig
+
+# post_training api and the huggingface provider is still experimental and under heavy development
+
+
+async def get_provider_impl(
+    config: HuggingFacePostTrainingConfig,
+    deps: dict[Api, Any],
+):
+    from .post_training import HuggingFacePostTrainingImpl
+
+    impl = HuggingFacePostTrainingImpl(
+        config,
+        deps[Api.datasetio],
+        deps[Api.datasets],
+    )
+    return impl
diff --git a/llama_stack/providers/inline/post_training/huggingface/config.py b/llama_stack/providers/inline/post_training/huggingface/config.py
new file mode 100644
index 000000000..06c6d8073
--- /dev/null
+++ b/llama_stack/providers/inline/post_training/huggingface/config.py
@@ -0,0 +1,72 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from typing import Any, Literal
+
+from pydantic import BaseModel
+
+
+class HuggingFacePostTrainingConfig(BaseModel):
+    # Device to run training on (cuda, cpu, mps)
+    device: str = "cuda"
+
+    # Distributed training backend if using multiple devices
+    # fsdp: Fully Sharded Data Parallel
+    # deepspeed: DeepSpeed ZeRO optimization
+    distributed_backend: Literal["fsdp", "deepspeed"] | None = None
+
+    # Format for saving model checkpoints
+    # full_state: Save complete model state
+    # huggingface: Save in HuggingFace format (recommended for compatibility)
+    checkpoint_format: Literal["full_state", "huggingface"] | None = "huggingface"
+
+    # Template for formatting chat inputs and outputs
+    # Used to structure the conversation format for training
+    chat_template: str = "<|user|>\n{input}\n<|assistant|>\n{output}"
+
+    # Model-specific configuration parameters
+    # trust_remote_code: Allow execution of custom model code
+    # attn_implementation: Use SDPA (Scaled Dot Product Attention) for better performance
+    model_specific_config: dict = {
+        "trust_remote_code": True,
+        "attn_implementation": "sdpa",
+    }
+
+    # Maximum sequence length for training
+    # Set to 2048 as this is the maximum that works reliably on MPS (Apple Silicon)
+    # Longer sequences may cause memory issues on MPS devices
+    max_seq_length: int = 2048
+
+    # Enable gradient checkpointing to reduce memory usage
+    # Trades computation for memory by recomputing activations
+    gradient_checkpointing: bool = False
+
+    # Maximum number of checkpoints to keep
+    # Older checkpoints are deleted when this limit is reached
+    save_total_limit: int = 3
+
+    # Number of training steps between logging updates
+    logging_steps: int = 10
+
+    # Ratio of training steps used for learning rate warmup
+    # Helps stabilize early training
+    warmup_ratio: float = 0.1
+
+    # L2 regularization coefficient
+    # Helps prevent overfitting
+    weight_decay: float = 0.01
+
+    # Number of worker processes for data loading
+    # Higher values can improve data loading speed but increase memory usage
+    dataloader_num_workers: int = 4
+
+    # Whether to pin memory in data loader
+    # Can improve data transfer speed to GPU but uses more memory
+    dataloader_pin_memory: bool = True
+
+    @classmethod
+    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
+        return {"checkpoint_format": "huggingface", "distributed_backend": None, "device": "cpu"}
diff --git a/llama_stack/providers/inline/post_training/huggingface/post_training.py b/llama_stack/providers/inline/post_training/huggingface/post_training.py
new file mode 100644
index 000000000..0b2760792
--- /dev/null
+++ b/llama_stack/providers/inline/post_training/huggingface/post_training.py
@@ -0,0 +1,176 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+from enum import Enum
+from typing import Any
+
+from llama_stack.apis.datasetio import DatasetIO
+from llama_stack.apis.datasets import Datasets
+from llama_stack.apis.post_training import (
+    AlgorithmConfig,
+    Checkpoint,
+    DPOAlignmentConfig,
+    JobStatus,
+    ListPostTrainingJobsResponse,
+    PostTrainingJob,
+    PostTrainingJobArtifactsResponse,
+    PostTrainingJobStatusResponse,
+    TrainingConfig,
+)
+from llama_stack.providers.inline.post_training.huggingface.config import (
+    HuggingFacePostTrainingConfig,
+)
+from llama_stack.providers.inline.post_training.huggingface.recipes.finetune_single_device import (
+    HFFinetuningSingleDevice,
+)
+from llama_stack.providers.utils.scheduler import JobArtifact, Scheduler
+from llama_stack.providers.utils.scheduler import JobStatus as SchedulerJobStatus
+from llama_stack.schema_utils import webmethod
+
+
+class TrainingArtifactType(Enum):
+    CHECKPOINT = "checkpoint"
+    RESOURCES_STATS = "resources_stats"
+
+
+_JOB_TYPE_SUPERVISED_FINE_TUNE = "supervised-fine-tune"
+
+
+class HuggingFacePostTrainingImpl:
+    def __init__(
+        self,
+        config: HuggingFacePostTrainingConfig,
+        datasetio_api: DatasetIO,
+        datasets: Datasets,
+    ) -> None:
+        self.config = config
+        self.datasetio_api = datasetio_api
+        self.datasets_api = datasets
+        self._scheduler = Scheduler()
+
+    async def shutdown(self) -> None:
+        await self._scheduler.shutdown()
+
+    @staticmethod
+    def _checkpoint_to_artifact(checkpoint: Checkpoint) -> JobArtifact:
+        return JobArtifact(
+            type=TrainingArtifactType.CHECKPOINT.value,
+            name=checkpoint.identifier,
+            uri=checkpoint.path,
+            metadata=dict(checkpoint),
+        )
+
+    @staticmethod
+    def _resources_stats_to_artifact(resources_stats: dict[str, Any]) -> JobArtifact:
+        return JobArtifact(
+            type=TrainingArtifactType.RESOURCES_STATS.value,
+            name=TrainingArtifactType.RESOURCES_STATS.value,
+            metadata=resources_stats,
+        )
+
+    async def supervised_fine_tune(
+        self,
+        job_uuid: str,
+        training_config: TrainingConfig,
+        hyperparam_search_config: dict[str, Any],
+        logger_config: dict[str, Any],
+        model: str,
+        checkpoint_dir: str | None = None,
+        algorithm_config: AlgorithmConfig | None = None,
+    ) -> PostTrainingJob:
+        async def handler(on_log_message_cb, on_status_change_cb, on_artifact_collected_cb):
+            on_log_message_cb("Starting HF finetuning")
+
+            recipe = HFFinetuningSingleDevice(
+                job_uuid=job_uuid,
+                datasetio_api=self.datasetio_api,
+                datasets_api=self.datasets_api,
+            )
+
+            resources_allocated, checkpoints = await recipe.train(
+                model=model,
+                output_dir=checkpoint_dir,
+                job_uuid=job_uuid,
+                lora_config=algorithm_config,
+                config=training_config,
+                provider_config=self.config,
+            )
+
+            on_artifact_collected_cb(self._resources_stats_to_artifact(resources_allocated))
+            if checkpoints:
+                for checkpoint in checkpoints:
+                    artifact = self._checkpoint_to_artifact(checkpoint)
+                    on_artifact_collected_cb(artifact)
+
+            on_status_change_cb(SchedulerJobStatus.completed)
+            on_log_message_cb("HF finetuning completed")
+
+        job_uuid = self._scheduler.schedule(_JOB_TYPE_SUPERVISED_FINE_TUNE, job_uuid, handler)
+        return PostTrainingJob(job_uuid=job_uuid)
+
+    async def preference_optimize(
+        self,
+        job_uuid: str,
+        finetuned_model: str,
+        algorithm_config: DPOAlignmentConfig,
+        training_config: TrainingConfig,
+        hyperparam_search_config: dict[str, Any],
+        logger_config: dict[str, Any],
+    ) -> PostTrainingJob:
+        raise NotImplementedError("DPO alignment is not implemented yet")
+
+    async def get_training_jobs(self) -> ListPostTrainingJobsResponse:
+        return ListPostTrainingJobsResponse(
+            data=[PostTrainingJob(job_uuid=job.id) for job in self._scheduler.get_jobs()]
+        )
+
+    @staticmethod
+    def _get_artifacts_metadata_by_type(job, artifact_type):
+        return [artifact.metadata for artifact in job.artifacts if artifact.type == artifact_type]
+
+    @classmethod
+    def _get_checkpoints(cls, job):
+        return cls._get_artifacts_metadata_by_type(job, TrainingArtifactType.CHECKPOINT.value)
+
+    @classmethod
+    def _get_resources_allocated(cls, job):
+        data = cls._get_artifacts_metadata_by_type(job, TrainingArtifactType.RESOURCES_STATS.value)
+        return data[0] if data else None
+
+    @webmethod(route="/post-training/job/status")
+    async def get_training_job_status(self, job_uuid: str) -> PostTrainingJobStatusResponse | None:
+        job = self._scheduler.get_job(job_uuid)
+
+        match job.status:
+            # TODO: Add support for other statuses to API
+            case SchedulerJobStatus.new | SchedulerJobStatus.scheduled:
+                status = JobStatus.scheduled
+            case SchedulerJobStatus.running:
+                status = JobStatus.in_progress
+            case SchedulerJobStatus.completed:
+                status = JobStatus.completed
+            case SchedulerJobStatus.failed:
+                status = JobStatus.failed
+            case _:
+                raise NotImplementedError()
+
+        return PostTrainingJobStatusResponse(
+            job_uuid=job_uuid,
+            status=status,
+            scheduled_at=job.scheduled_at,
+            started_at=job.started_at,
+            completed_at=job.completed_at,
+            checkpoints=self._get_checkpoints(job),
+            resources_allocated=self._get_resources_allocated(job),
+        )
+
+    @webmethod(route="/post-training/job/cancel")
+    async def cancel_training_job(self, job_uuid: str) -> None:
+        self._scheduler.cancel(job_uuid)
+
+    @webmethod(route="/post-training/job/artifacts")
+    async def get_training_job_artifacts(self, job_uuid: str) -> PostTrainingJobArtifactsResponse | None:
+        job = self._scheduler.get_job(job_uuid)
+        return PostTrainingJobArtifactsResponse(job_uuid=job_uuid, checkpoints=self._get_checkpoints(job))
diff --git a/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py b/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py
new file mode 100644
index 000000000..b6d13b029
--- /dev/null
+++ b/llama_stack/providers/inline/post_training/huggingface/recipes/finetune_single_device.py
@@ -0,0 +1,683 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+import gc
+import json
+import logging
+import multiprocessing
+import os
+import signal
+import sys
+from datetime import datetime, timezone
+from pathlib import Path
+from typing import Any
+
+import psutil
+
+from llama_stack.providers.inline.post_training.common.utils import evacuate_model_from_device
+
+# Set tokenizer parallelism environment variable
+os.environ["TOKENIZERS_PARALLELISM"] = "false"
+
+# Force PyTorch to use OpenBLAS instead of MKL
+os.environ["MKL_THREADING_LAYER"] = "GNU"
+os.environ["MKL_SERVICE_FORCE_INTEL"] = "0"
+os.environ["MKL_NUM_THREADS"] = "1"
+
+import torch
+from datasets import Dataset
+from peft import LoraConfig
+from transformers import (
+    AutoConfig,
+    AutoModelForCausalLM,
+    AutoTokenizer,
+)
+from trl import SFTConfig, SFTTrainer
+
+from llama_stack.apis.datasetio import DatasetIO
+from llama_stack.apis.datasets import Datasets
+from llama_stack.apis.post_training import (
+    Checkpoint,
+    DataConfig,
+    LoraFinetuningConfig,
+    TrainingConfig,
+)
+
+from ..config import HuggingFacePostTrainingConfig
+
+logger = logging.getLogger(__name__)
+
+
+def get_gb(to_convert: int) -> str:
+    """Converts memory stats to GB and formats to 2 decimal places.
+    Args:
+        to_convert: Memory value in bytes
+    Returns:
+        str: Memory value in GB formatted to 2 decimal places
+    """
+    return f"{(to_convert / (1024**3)):.2f}"
+
+
+def get_memory_stats(device: torch.device) -> dict[str, Any]:
+    """Get memory statistics for the given device."""
+    stats = {
+        "system_memory": {
+            "total": get_gb(psutil.virtual_memory().total),
+            "available": get_gb(psutil.virtual_memory().available),
+            "used": get_gb(psutil.virtual_memory().used),
+            "percent": psutil.virtual_memory().percent,
+        }
+    }
+
+    if device.type == "cuda":
+        stats["device_memory"] = {
+            "allocated": get_gb(torch.cuda.memory_allocated(device)),
+            "reserved": get_gb(torch.cuda.memory_reserved(device)),
+            "max_allocated": get_gb(torch.cuda.max_memory_allocated(device)),
+        }
+    elif device.type == "mps":
+        # MPS doesn't provide direct memory stats, but we can track system memory
+        stats["device_memory"] = {
+            "note": "MPS memory stats not directly available",
+            "system_memory_used": get_gb(psutil.virtual_memory().used),
+        }
+    elif device.type == "cpu":
+        # For CPU, we track process memory usage
+        process = psutil.Process()
+        stats["device_memory"] = {
+            "process_rss": get_gb(process.memory_info().rss),
+            "process_vms": get_gb(process.memory_info().vms),
+            "process_percent": process.memory_percent(),
+        }
+
+    return stats
+
+
+def setup_torch_device(device_str: str) -> torch.device:
+    """Initialize and validate a PyTorch device.
+    This function handles device initialization and validation for different device types:
+    - CUDA: Validates CUDA availability and handles device selection
+    - MPS: Validates MPS availability for Apple Silicon
+    - CPU: Basic validation
+    - HPU: Raises error as it's not supported
+    Args:
+        device_str: String specifying the device ('cuda', 'cpu', 'mps')
+    Returns:
+        torch.device: The initialized and validated device
+    Raises:
+        RuntimeError: If device initialization fails or device is not supported
+    """
+    try:
+        device = torch.device(device_str)
+    except RuntimeError as e:
+        raise RuntimeError(f"Error getting Torch Device {str(e)}") from e
+
+    # Validate device capabilities
+    if device.type == "cuda":
+        if not torch.cuda.is_available():
+            raise RuntimeError(
+                f"{device.type}: Torch has no CUDA/ROCm support or could not detect a compatible device."
+            )
+        if device.index is None:
+            device = torch.device(device.type, torch.cuda.current_device())
+    elif device.type == "mps":
+        if not torch.backends.mps.is_available():
+            raise RuntimeError(f"{device.type}: Torch has no MPS support or could not detect a compatible device.")
+    elif device.type == "hpu":
+        raise RuntimeError(f"{device.type}: training does not support Intel Gaudi.")
+
+    return device
+
+
+class HFFinetuningSingleDevice:
+    def __init__(
+        self,
+        job_uuid: str,
+        datasetio_api: DatasetIO,
+        datasets_api: Datasets,
+    ):
+        self.datasetio_api = datasetio_api
+        self.datasets_api = datasets_api
+        self.job_uuid = job_uuid
+
+    def validate_dataset_format(self, rows: list[dict]) -> bool:
+        """Validate that the dataset has the required fields."""
+        required_fields = ["input_query", "expected_answer", "chat_completion_input"]
+        return all(field in row for row in rows for field in required_fields)
+
+    def _process_instruct_format(self, row: dict) -> tuple[str | None, str | None]:
+        """Process a row in instruct format."""
+        if "chat_completion_input" in row and "expected_answer" in row:
+            try:
+                messages = json.loads(row["chat_completion_input"])
+                if not isinstance(messages, list) or len(messages) != 1:
+                    logger.warning(f"Invalid chat_completion_input format: {row['chat_completion_input']}")
+                    return None, None
+                if "content" not in messages[0]:
+                    logger.warning(f"Message missing content: {messages[0]}")
+                    return None, None
+                return messages[0]["content"], row["expected_answer"]
+            except json.JSONDecodeError:
+                logger.warning(f"Failed to parse chat_completion_input: {row['chat_completion_input']}")
+                return None, None
+        return None, None
+
+    def _process_dialog_format(self, row: dict) -> tuple[str | None, str | None]:
+        """Process a row in dialog format."""
+        if "dialog" in row:
+            try:
+                dialog = json.loads(row["dialog"])
+                if not isinstance(dialog, list) or len(dialog) < 2:
+                    logger.warning(f"Dialog must have at least 2 messages: {row['dialog']}")
+                    return None, None
+                if dialog[0].get("role") != "user":
+                    logger.warning(f"First message must be from user: {dialog[0]}")
+                    return None, None
+                if not any(msg.get("role") == "assistant" for msg in dialog):
+                    logger.warning("Dialog must have at least one assistant message")
+                    return None, None
+
+                # Convert to human/gpt format
+                role_map = {"user": "human", "assistant": "gpt"}
+                conversations = []
+                for msg in dialog:
+                    if "role" not in msg or "content" not in msg:
+                        logger.warning(f"Message missing role or content: {msg}")
+                        continue
+                    conversations.append({"from": role_map[msg["role"]], "value": msg["content"]})
+
+                # Format as a single conversation
+                return conversations[0]["value"], conversations[1]["value"]
+            except json.JSONDecodeError:
+                logger.warning(f"Failed to parse dialog: {row['dialog']}")
+                return None, None
+        return None, None
+
+    def _process_fallback_format(self, row: dict) -> tuple[str | None, str | None]:
+        """Process a row using fallback formats."""
+        if "input" in row and "output" in row:
+            return row["input"], row["output"]
+        elif "prompt" in row and "completion" in row:
+            return row["prompt"], row["completion"]
+        elif "question" in row and "answer" in row:
+            return row["question"], row["answer"]
+        return None, None
+
+    def _format_text(self, input_text: str, output_text: str, provider_config: HuggingFacePostTrainingConfig) -> str:
+        """Format input and output text based on model requirements."""
+        if hasattr(provider_config, "chat_template"):
+            return provider_config.chat_template.format(input=input_text, output=output_text)
+        return f"{input_text}\n{output_text}"
+
+    def _create_dataset(
+        self, rows: list[dict], config: TrainingConfig, provider_config: HuggingFacePostTrainingConfig
+    ) -> Dataset:
+        """Create and preprocess the dataset."""
+        formatted_rows = []
+        for row in rows:
+            input_text = None
+            output_text = None
+
+            # Process based on format
+            assert isinstance(config.data_config, DataConfig), "DataConfig must be initialized"
+            if config.data_config.data_format.value == "instruct":
+                input_text, output_text = self._process_instruct_format(row)
+            elif config.data_config.data_format.value == "dialog":
+                input_text, output_text = self._process_dialog_format(row)
+            else:
+                input_text, output_text = self._process_fallback_format(row)
+
+            if input_text and output_text:
+                formatted_text = self._format_text(input_text, output_text, provider_config)
+                formatted_rows.append({"text": formatted_text})
+
+        if not formatted_rows:
+            assert isinstance(config.data_config, DataConfig), "DataConfig must be initialized"
+            raise ValueError(
+                f"No valid input/output pairs found in the dataset for format: {config.data_config.data_format.value}"
+            )
+
+        return Dataset.from_list(formatted_rows)
+
+    def _preprocess_dataset(
+        self, ds: Dataset, tokenizer: AutoTokenizer, provider_config: HuggingFacePostTrainingConfig
+    ) -> Dataset:
+        """Preprocess the dataset with tokenizer."""
+
+        def tokenize_function(examples):
+            return tokenizer(
+                examples["text"],
+                padding=True,
+                truncation=True,
+                max_length=provider_config.max_seq_length,
+                return_tensors=None,
+            )
+
+        return ds.map(
+            tokenize_function,
+            batched=True,
+            remove_columns=ds.column_names,
+        )
+
+    async def _setup_data(self, dataset_id: str) -> list[dict[str, Any]]:
+        """Load dataset from llama stack dataset provider"""
+        try:
+            all_rows = await self.datasetio_api.iterrows(
+                dataset_id=dataset_id,
+                limit=-1,
+            )
+            if not isinstance(all_rows.data, list):
+                raise RuntimeError("Expected dataset data to be a list")
+            return all_rows.data
+        except Exception as e:
+            raise RuntimeError(f"Failed to load dataset: {str(e)}") from e
+
+    def _run_training_sync(
+        self,
+        model: str,
+        provider_config: dict[str, Any],
+        peft_config: LoraConfig | None,
+        config: dict[str, Any],
+        output_dir_path: Path | None,
+    ) -> None:
+        """Synchronous wrapper for running training process.
+        This method serves as a bridge between the multiprocessing Process and the async training function.
+        It creates a new event loop to run the async training process.
+        Args:
+            model: The model identifier to load
+            dataset_id: ID of the dataset to use for training
+            provider_config: Configuration specific to the HuggingFace provider
+            peft_config: Optional LoRA configuration
+            config: General training configuration
+            output_dir_path: Optional path to save the model
+        """
+        import asyncio
+
+        logger.info("Starting training process with async wrapper")
+        asyncio.run(
+            self._run_training(
+                model=model,
+                provider_config=provider_config,
+                peft_config=peft_config,
+                config=config,
+                output_dir_path=output_dir_path,
+            )
+        )
+
+    async def load_dataset(
+        self,
+        model: str,
+        config: TrainingConfig,
+        provider_config: HuggingFacePostTrainingConfig,
+    ) -> tuple[Dataset, Dataset, AutoTokenizer]:
+        """Load and prepare the dataset for training.
+        Args:
+            model: The model identifier to load
+            config: Training configuration
+            provider_config: Provider-specific configuration
+        Returns:
+            tuple: (train_dataset, eval_dataset, tokenizer)
+        """
+        # Validate data config
+        if not config.data_config:
+            raise ValueError("DataConfig is required for training")
+
+        # Load dataset
+        logger.info(f"Loading dataset: {config.data_config.dataset_id}")
+        rows = await self._setup_data(config.data_config.dataset_id)
+        if not self.validate_dataset_format(rows):
+            raise ValueError("Dataset is missing required fields: input_query, expected_answer, chat_completion_input")
+        logger.info(f"Loaded {len(rows)} rows from dataset")
+
+        # Initialize tokenizer
+        logger.info(f"Initializing tokenizer for model: {model}")
+        try:
+            tokenizer = AutoTokenizer.from_pretrained(model, **provider_config.model_specific_config)
+
+            # Set pad token to eos token if not present
+            # This is common for models that don't have a dedicated pad token
+            if not tokenizer.pad_token:
+                tokenizer.pad_token = tokenizer.eos_token
+
+            # Set padding side to right for causal language modeling
+            # This ensures that padding tokens don't interfere with the model's ability
+            # to predict the next token in the sequence
+            tokenizer.padding_side = "right"
+
+            # Set truncation side to right to keep the beginning of the sequence
+            # This is important for maintaining context and instruction format
+            tokenizer.truncation_side = "right"
+
+            # Set model max length to match provider config
+            # This ensures consistent sequence lengths across the training process
+            tokenizer.model_max_length = provider_config.max_seq_length
+
+            logger.info("Tokenizer initialized successfully")
+        except Exception as e:
+            raise RuntimeError(f"Failed to initialize tokenizer: {str(e)}") from e
+
+        # Create and preprocess dataset
+        logger.info("Creating and preprocessing dataset")
+        try:
+            ds = self._create_dataset(rows, config, provider_config)
+            ds = self._preprocess_dataset(ds, tokenizer, provider_config)
+            logger.info(f"Dataset created with {len(ds)} examples")
+        except Exception as e:
+            raise ValueError(f"Failed to create dataset: {str(e)}") from e
+
+        # Split dataset
+        logger.info("Splitting dataset into train and validation sets")
+        train_val_split = ds.train_test_split(test_size=0.1, seed=42)
+        train_dataset = train_val_split["train"]
+        eval_dataset = train_val_split["test"]
+        logger.info(f"Split dataset into {len(train_dataset)} training and {len(eval_dataset)} validation examples")
+
+        return train_dataset, eval_dataset, tokenizer
+
+    def load_model(
+        self,
+        model: str,
+        device: torch.device,
+        provider_config: HuggingFacePostTrainingConfig,
+    ) -> AutoModelForCausalLM:
+        """Load and initialize the model for training.
+        Args:
+            model: The model identifier to load
+            device: The device to load the model onto
+            provider_config: Provider-specific configuration
+        Returns:
+            The loaded and initialized model
+        Raises:
+            RuntimeError: If model loading fails
+        """
+        logger.info("Loading the base model")
+        try:
+            model_config = AutoConfig.from_pretrained(model, **provider_config.model_specific_config)
+            model_obj = AutoModelForCausalLM.from_pretrained(
+                model,
+                torch_dtype="auto" if device.type != "cpu" else "float32",
+                quantization_config=None,
+                config=model_config,
+                **provider_config.model_specific_config,
+            )
+            # Always move model to specified device
+            model_obj = model_obj.to(device)
+            logger.info(f"Model loaded and moved to device: {model_obj.device}")
+            return model_obj
+        except Exception as e:
+            raise RuntimeError(f"Failed to load model: {str(e)}") from e
+
+    def setup_training_args(
+        self,
+        config: TrainingConfig,
+        provider_config: HuggingFacePostTrainingConfig,
+        device: torch.device,
+        output_dir_path: Path | None,
+        steps_per_epoch: int,
+    ) -> SFTConfig:
+        """Setup training arguments.
+        Args:
+            config: Training configuration
+            provider_config: Provider-specific configuration
+            device: The device to train on
+            output_dir_path: Optional path to save the model
+            steps_per_epoch: Number of steps per epoch
+        Returns:
+            Configured SFTConfig object
+        """
+        logger.info("Configuring training arguments")
+        lr = 2e-5
+        if config.optimizer_config:
+            lr = config.optimizer_config.lr
+            logger.info(f"Using custom learning rate: {lr}")
+
+        # Validate data config
+        if not config.data_config:
+            raise ValueError("DataConfig is required for training")
+        data_config = config.data_config
+
+        # Calculate steps
+        total_steps = steps_per_epoch * config.n_epochs
+        max_steps = min(config.max_steps_per_epoch, total_steps)
+        eval_steps = max(1, steps_per_epoch // 10)  # Evaluate 10 times per epoch
+        save_steps = max(1, steps_per_epoch // 5)  # Save 5 times per epoch
+        logging_steps = max(1, steps_per_epoch // 50)  # Log 50 times per epoch
+
+        logger.info("Training configuration:")
+        logger.info(f"- Steps per epoch: {steps_per_epoch}")
+        logger.info(f"- Total steps: {total_steps}")
+        logger.info(f"- Max steps: {max_steps}")
+        logger.info(f"- Eval steps: {eval_steps}")
+        logger.info(f"- Save steps: {save_steps}")
+        logger.info(f"- Logging steps: {logging_steps}")
+
+        # Configure save strategy
+        save_strategy = "no"
+        if output_dir_path:
+            save_strategy = "steps"
+            logger.info(f"Will save checkpoints to {output_dir_path}")
+
+        return SFTConfig(
+            max_steps=max_steps,
+            output_dir=str(output_dir_path) if output_dir_path is not None else None,
+            num_train_epochs=config.n_epochs,
+            per_device_train_batch_size=data_config.batch_size,
+            fp16=device.type == "cuda",
+            bf16=False,  # Causes CPU issues.
+            eval_strategy="steps",
+            use_cpu=True if device.type == "cpu" and not torch.backends.mps.is_available() else False,
+            save_strategy=save_strategy,
+            report_to="none",
+            max_seq_length=provider_config.max_seq_length,
+            gradient_accumulation_steps=config.gradient_accumulation_steps,
+            gradient_checkpointing=provider_config.gradient_checkpointing,
+            learning_rate=lr,
+            warmup_ratio=provider_config.warmup_ratio,
+            weight_decay=provider_config.weight_decay,
+            remove_unused_columns=False,
+            dataloader_pin_memory=provider_config.dataloader_pin_memory,
+            dataloader_num_workers=provider_config.dataloader_num_workers,
+            dataset_text_field="text",
+            packing=False,
+            load_best_model_at_end=True if output_dir_path else False,
+            metric_for_best_model="eval_loss",
+            greater_is_better=False,
+            eval_steps=eval_steps,
+            save_steps=save_steps,
+            logging_steps=logging_steps,
+        )
+
+    def save_model(
+        self,
+        model_obj: AutoModelForCausalLM,
+        trainer: SFTTrainer,
+        peft_config: LoraConfig | None,
+        output_dir_path: Path,
+    ) -> None:
+        """Save the trained model.
+        Args:
+            model_obj: The model to save
+            trainer: The trainer instance
+            peft_config: Optional LoRA configuration
+            output_dir_path: Path to save the model
+        """
+        logger.info("Saving final model")
+        model_obj.config.use_cache = True
+
+        if peft_config:
+            logger.info("Merging LoRA weights with base model")
+            model_obj = trainer.model.merge_and_unload()
+        else:
+            model_obj = trainer.model
+
+        save_path = output_dir_path / "merged_model"
+        logger.info(f"Saving model to {save_path}")
+        model_obj.save_pretrained(save_path)
+
+    async def _run_training(
+        self,
+        model: str,
+        provider_config: dict[str, Any],
+        peft_config: LoraConfig | None,
+        config: dict[str, Any],
+        output_dir_path: Path | None,
+    ) -> None:
+        """Run the training process with signal handling."""
+
+        def signal_handler(signum, frame):
+            """Handle termination signals gracefully."""
+            logger.info(f"Received signal {signum}, initiating graceful shutdown")
+            sys.exit(0)
+
+        signal.signal(signal.SIGTERM, signal_handler)
+        signal.signal(signal.SIGINT, signal_handler)
+
+        # Convert config dicts back to objects
+        logger.info("Initializing configuration objects")
+        provider_config_obj = HuggingFacePostTrainingConfig(**provider_config)
+        config_obj = TrainingConfig(**config)
+
+        # Initialize and validate device
+        device = setup_torch_device(provider_config_obj.device)
+        logger.info(f"Using device '{device}'")
+
+        # Load dataset and tokenizer
+        train_dataset, eval_dataset, tokenizer = await self.load_dataset(model, config_obj, provider_config_obj)
+
+        # Calculate steps per epoch
+        if not config_obj.data_config:
+            raise ValueError("DataConfig is required for training")
+        steps_per_epoch = len(train_dataset) // config_obj.data_config.batch_size
+
+        # Setup training arguments
+        training_args = self.setup_training_args(
+            config_obj,
+            provider_config_obj,
+            device,
+            output_dir_path,
+            steps_per_epoch,
+        )
+
+        # Load model
+        model_obj = self.load_model(model, device, provider_config_obj)
+
+        # Initialize trainer
+        logger.info("Initializing SFTTrainer")
+        trainer = SFTTrainer(
+            model=model_obj,
+            train_dataset=train_dataset,
+            eval_dataset=eval_dataset,
+            peft_config=peft_config,
+            args=training_args,
+        )
+
+        try:
+            # Train
+            logger.info("Starting training")
+            trainer.train()
+            logger.info("Training completed successfully")
+
+            # Save final model if output directory is provided
+            if output_dir_path:
+                self.save_model(model_obj, trainer, peft_config, output_dir_path)
+
+        finally:
+            # Clean up resources
+            logger.info("Cleaning up resources")
+            if hasattr(trainer, "model"):
+                evacuate_model_from_device(trainer.model, device.type)
+            del trainer
+            gc.collect()
+            logger.info("Cleanup completed")
+
+    async def train(
+        self,
+        model: str,
+        output_dir: str | None,
+        job_uuid: str,
+        lora_config: LoraFinetuningConfig,
+        config: TrainingConfig,
+        provider_config: HuggingFacePostTrainingConfig,
+    ) -> tuple[dict[str, Any], list[Checkpoint] | None]:
+        """Train a model using HuggingFace's SFTTrainer"""
+        # Initialize and validate device
+        device = setup_torch_device(provider_config.device)
+        logger.info(f"Using device '{device}'")
+
+        output_dir_path = None
+        if output_dir:
+            output_dir_path = Path(output_dir)
+
+        # Track memory stats
+        memory_stats = {
+            "initial": get_memory_stats(device),
+            "after_training": None,
+            "final": None,
+        }
+
+        # Configure LoRA
+        peft_config = None
+        if lora_config:
+            peft_config = LoraConfig(
+                lora_alpha=lora_config.alpha,
+                lora_dropout=0.1,
+                r=lora_config.rank,
+                bias="none",
+                task_type="CAUSAL_LM",
+                target_modules=lora_config.lora_attn_modules,
+            )
+
+        # Validate data config
+        if not config.data_config:
+            raise ValueError("DataConfig is required for training")
+
+        # Train in a separate process
+        logger.info("Starting training in separate process")
+        try:
+            # Set multiprocessing start method to 'spawn' for CUDA/MPS compatibility
+            if device.type in ["cuda", "mps"]:
+                multiprocessing.set_start_method("spawn", force=True)
+
+            process = multiprocessing.Process(
+                target=self._run_training_sync,
+                kwargs={
+                    "model": model,
+                    "provider_config": provider_config.model_dump(),
+                    "peft_config": peft_config,
+                    "config": config.model_dump(),
+                    "output_dir_path": output_dir_path,
+                },
+            )
+            process.start()
+
+            # Monitor the process
+            while process.is_alive():
+                process.join(timeout=1)  # Check every second
+                if not process.is_alive():
+                    break
+
+            # Get the return code
+            if process.exitcode != 0:
+                raise RuntimeError(f"Training failed with exit code {process.exitcode}")
+
+            memory_stats["after_training"] = get_memory_stats(device)
+
+            checkpoints = None
+            if output_dir_path:
+                # Create checkpoint
+                checkpoint = Checkpoint(
+                    identifier=f"{model}-sft-{config.n_epochs}",
+                    created_at=datetime.now(timezone.utc),
+                    epoch=config.n_epochs,
+                    post_training_job_id=job_uuid,
+                    path=str(output_dir_path / "merged_model"),
+                )
+                checkpoints = [checkpoint]
+
+            return memory_stats, checkpoints
+        finally:
+            memory_stats["final"] = get_memory_stats(device)
+            gc.collect()
diff --git a/llama_stack/providers/inline/post_training/torchtune/__init__.py b/llama_stack/providers/inline/post_training/torchtune/__init__.py
index ca7801be7..7a2f9eba2 100644
--- a/llama_stack/providers/inline/post_training/torchtune/__init__.py
+++ b/llama_stack/providers/inline/post_training/torchtune/__init__.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from llama_stack.distribution.datatypes import Api
 
@@ -15,7 +15,7 @@ from .config import TorchtunePostTrainingConfig
 
 async def get_provider_impl(
     config: TorchtunePostTrainingConfig,
-    deps: Dict[Api, Any],
+    deps: dict[Api, Any],
 ):
     from .post_training import TorchtunePostTrainingImpl
 
diff --git a/llama_stack/providers/inline/post_training/torchtune/common/checkpointer.py b/llama_stack/providers/inline/post_training/torchtune/common/checkpointer.py
index fcadd0884..af8bd2765 100644
--- a/llama_stack/providers/inline/post_training/torchtune/common/checkpointer.py
+++ b/llama_stack/providers/inline/post_training/torchtune/common/checkpointer.py
@@ -8,7 +8,7 @@ import json
 import os
 import shutil
 from pathlib import Path
-from typing import Any, Dict, List
+from typing import Any
 
 import torch
 from safetensors.torch import save_file
@@ -34,7 +34,7 @@ class TorchtuneCheckpointer:
         model_id: str,
         training_algorithm: str,
         checkpoint_dir: str,
-        checkpoint_files: List[str],
+        checkpoint_files: list[str],
         output_dir: str,
         model_type: str,
     ):
@@ -54,11 +54,11 @@ class TorchtuneCheckpointer:
         # get ckpt paths
         self._checkpoint_path = Path.joinpath(self._checkpoint_dir, self._checkpoint_file)
 
-    def load_checkpoint(self) -> Dict[str, Any]:
+    def load_checkpoint(self) -> dict[str, Any]:
         """
         Load Meta checkpoint from file. Currently only loading from a single file is supported.
         """
-        state_dict: Dict[str, Any] = {}
+        state_dict: dict[str, Any] = {}
         model_state_dict = safe_torch_load(self._checkpoint_path)
         if self._model_type == ModelType.LLAMA3_VISION:
             from torchtune.models.llama3_2_vision._convert_weights import (
@@ -82,7 +82,7 @@ class TorchtuneCheckpointer:
 
     def save_checkpoint(
         self,
-        state_dict: Dict[str, Any],
+        state_dict: dict[str, Any],
         epoch: int,
         adapter_only: bool = False,
         checkpoint_format: str | None = None,
@@ -100,7 +100,7 @@ class TorchtuneCheckpointer:
     def _save_meta_format_checkpoint(
         self,
         model_file_path: Path,
-        state_dict: Dict[str, Any],
+        state_dict: dict[str, Any],
         adapter_only: bool = False,
     ) -> None:
         model_file_path.mkdir(parents=True, exist_ok=True)
@@ -168,7 +168,7 @@ class TorchtuneCheckpointer:
     def _save_hf_format_checkpoint(
         self,
         model_file_path: Path,
-        state_dict: Dict[str, Any],
+        state_dict: dict[str, Any],
     ) -> None:
         # the config.json file contains model params needed for state dict conversion
         config = json.loads(Path.joinpath(self._checkpoint_dir.parent, "config.json").read_text())
@@ -179,7 +179,7 @@ class TorchtuneCheckpointer:
         repo_id_path = Path.joinpath(self._checkpoint_dir.parent, REPO_ID_FNAME).with_suffix(".json")
         self.repo_id = None
         if repo_id_path.exists():
-            with open(repo_id_path, "r") as json_file:
+            with open(repo_id_path) as json_file:
                 data = json.load(json_file)
                 self.repo_id = data.get("repo_id")
 
diff --git a/llama_stack/providers/inline/post_training/torchtune/common/utils.py b/llama_stack/providers/inline/post_training/torchtune/common/utils.py
index a040ca1b0..f0fa052a2 100644
--- a/llama_stack/providers/inline/post_training/torchtune/common/utils.py
+++ b/llama_stack/providers/inline/post_training/torchtune/common/utils.py
@@ -10,7 +10,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Callable, Dict
+from collections.abc import Callable
 
 import torch
 from pydantic import BaseModel
@@ -35,7 +35,7 @@ class ModelConfig(BaseModel):
     checkpoint_type: str
 
 
-MODEL_CONFIGS: Dict[str, ModelConfig] = {
+MODEL_CONFIGS: dict[str, ModelConfig] = {
     "Llama3.2-3B-Instruct": ModelConfig(
         model_definition=lora_llama3_2_3b,
         tokenizer_type=llama3_tokenizer,
@@ -48,7 +48,7 @@ MODEL_CONFIGS: Dict[str, ModelConfig] = {
     ),
 }
 
-DATA_FORMATS: Dict[str, Transform] = {
+DATA_FORMATS: dict[str, Transform] = {
     "instruct": InputOutputToMessages,
     "dialog": ShareGPTToMessages,
 }
diff --git a/llama_stack/providers/inline/post_training/torchtune/config.py b/llama_stack/providers/inline/post_training/torchtune/config.py
index ee3504f9e..f3ce874aa 100644
--- a/llama_stack/providers/inline/post_training/torchtune/config.py
+++ b/llama_stack/providers/inline/post_training/torchtune/config.py
@@ -4,17 +4,17 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Literal, Optional
+from typing import Any, Literal
 
 from pydantic import BaseModel
 
 
 class TorchtunePostTrainingConfig(BaseModel):
-    torch_seed: Optional[int] = None
-    checkpoint_format: Optional[Literal["meta", "huggingface"]] = "meta"
+    torch_seed: int | None = None
+    checkpoint_format: Literal["meta", "huggingface"] | None = "meta"
 
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
         return {
             "checkpoint_format": "meta",
         }
diff --git a/llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py b/llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py
index 6b607f1c7..96dd8b8dd 100644
--- a/llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py
+++ b/llama_stack/providers/inline/post_training/torchtune/datasets/format_adapter.py
@@ -11,7 +11,8 @@
 # LICENSE file in the root directory of this source tree.
 
 import json
-from typing import Any, Mapping
+from collections.abc import Mapping
+from typing import Any
 
 from llama_stack.providers.utils.common.data_schema_validator import ColumnName
 
diff --git a/llama_stack/providers/inline/post_training/torchtune/datasets/sft.py b/llama_stack/providers/inline/post_training/torchtune/datasets/sft.py
index 050996860..ae7faf31e 100644
--- a/llama_stack/providers/inline/post_training/torchtune/datasets/sft.py
+++ b/llama_stack/providers/inline/post_training/torchtune/datasets/sft.py
@@ -10,7 +10,8 @@
 # This source code is licensed under the BSD-style license found in the
 # LICENSE file in the root directory of this source tree.
 
-from typing import Any, Dict, List, Mapping
+from collections.abc import Mapping
+from typing import Any
 
 import numpy as np
 from torch.utils.data import Dataset
@@ -27,7 +28,7 @@ from llama_stack.providers.inline.post_training.torchtune.datasets.format_adapte
 class SFTDataset(Dataset):
     def __init__(
         self,
-        rows: List[Dict[str, Any]],
+        rows: list[dict[str, Any]],
         message_transform: Transform,
         model_transform: Transform,
         dataset_type: str,
@@ -40,11 +41,11 @@ class SFTDataset(Dataset):
     def __len__(self):
         return len(self._rows)
 
-    def __getitem__(self, index: int) -> Dict[str, Any]:
+    def __getitem__(self, index: int) -> dict[str, Any]:
         sample = self._rows[index]
         return self._prepare_sample(sample)
 
-    def _prepare_sample(self, sample: Mapping[str, Any]) -> Dict[str, Any]:
+    def _prepare_sample(self, sample: Mapping[str, Any]) -> dict[str, Any]:
         if self._dataset_type == "instruct":
             sample = llama_stack_instruct_to_torchtune_instruct(sample)
         elif self._dataset_type == "dialog":
diff --git a/llama_stack/providers/inline/post_training/torchtune/post_training.py b/llama_stack/providers/inline/post_training/torchtune/post_training.py
index cc1a6a5fe..c7d8d6758 100644
--- a/llama_stack/providers/inline/post_training/torchtune/post_training.py
+++ b/llama_stack/providers/inline/post_training/torchtune/post_training.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 from enum import Enum
-from typing import Any, Dict, Optional
+from typing import Any
 
 from llama_stack.apis.datasetio import DatasetIO
 from llama_stack.apis.datasets import Datasets
@@ -64,7 +64,7 @@ class TorchtunePostTrainingImpl:
         )
 
     @staticmethod
-    def _resources_stats_to_artifact(resources_stats: Dict[str, Any]) -> JobArtifact:
+    def _resources_stats_to_artifact(resources_stats: dict[str, Any]) -> JobArtifact:
         return JobArtifact(
             type=TrainingArtifactType.RESOURCES_STATS.value,
             name=TrainingArtifactType.RESOURCES_STATS.value,
@@ -75,11 +75,11 @@ class TorchtunePostTrainingImpl:
         self,
         job_uuid: str,
         training_config: TrainingConfig,
-        hyperparam_search_config: Dict[str, Any],
-        logger_config: Dict[str, Any],
+        hyperparam_search_config: dict[str, Any],
+        logger_config: dict[str, Any],
         model: str,
-        checkpoint_dir: Optional[str],
-        algorithm_config: Optional[AlgorithmConfig],
+        checkpoint_dir: str | None,
+        algorithm_config: AlgorithmConfig | None,
     ) -> PostTrainingJob:
         if isinstance(algorithm_config, LoraFinetuningConfig):
 
@@ -121,8 +121,8 @@ class TorchtunePostTrainingImpl:
         finetuned_model: str,
         algorithm_config: DPOAlignmentConfig,
         training_config: TrainingConfig,
-        hyperparam_search_config: Dict[str, Any],
-        logger_config: Dict[str, Any],
+        hyperparam_search_config: dict[str, Any],
+        logger_config: dict[str, Any],
     ) -> PostTrainingJob: ...
 
     async def get_training_jobs(self) -> ListPostTrainingJobsResponse:
@@ -144,7 +144,7 @@ class TorchtunePostTrainingImpl:
         return data[0] if data else None
 
     @webmethod(route="/post-training/job/status")
-    async def get_training_job_status(self, job_uuid: str) -> Optional[PostTrainingJobStatusResponse]:
+    async def get_training_job_status(self, job_uuid: str) -> PostTrainingJobStatusResponse | None:
         job = self._scheduler.get_job(job_uuid)
 
         match job.status:
@@ -175,6 +175,6 @@ class TorchtunePostTrainingImpl:
         self._scheduler.cancel(job_uuid)
 
     @webmethod(route="/post-training/job/artifacts")
-    async def get_training_job_artifacts(self, job_uuid: str) -> Optional[PostTrainingJobArtifactsResponse]:
+    async def get_training_job_artifacts(self, job_uuid: str) -> PostTrainingJobArtifactsResponse | None:
         job = self._scheduler.get_job(job_uuid)
         return PostTrainingJobArtifactsResponse(job_uuid=job_uuid, checkpoints=self._get_checkpoints(job))
diff --git a/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py b/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py
index 04bf86b97..f56dd2499 100644
--- a/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py
+++ b/llama_stack/providers/inline/post_training/torchtune/recipes/lora_finetuning_single_device.py
@@ -4,14 +4,13 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-import gc
 import logging
 import os
 import time
 from datetime import datetime, timezone
 from functools import partial
 from pathlib import Path
-from typing import Any, Dict, List, Optional, Tuple
+from typing import Any
 
 import torch
 from torch import nn
@@ -39,7 +38,6 @@ from llama_stack.apis.datasets import Datasets
 from llama_stack.apis.post_training import (
     Checkpoint,
     DataConfig,
-    EfficiencyConfig,
     LoraFinetuningConfig,
     OptimizerConfig,
     QATFinetuningConfig,
@@ -48,9 +46,7 @@ from llama_stack.apis.post_training import (
 from llama_stack.distribution.utils.config_dirs import DEFAULT_CHECKPOINT_DIR
 from llama_stack.distribution.utils.model_utils import model_local_dir
 from llama_stack.models.llama.sku_list import resolve_model
-from llama_stack.providers.inline.post_training.common.validator import (
-    validate_input_dataset_schema,
-)
+from llama_stack.providers.inline.post_training.common.utils import evacuate_model_from_device
 from llama_stack.providers.inline.post_training.torchtune.common import utils
 from llama_stack.providers.inline.post_training.torchtune.common.checkpointer import (
     TorchtuneCheckpointer,
@@ -83,18 +79,16 @@ class LoraFinetuningSingleDevice:
         config: TorchtunePostTrainingConfig,
         job_uuid: str,
         training_config: TrainingConfig,
-        hyperparam_search_config: Dict[str, Any],
-        logger_config: Dict[str, Any],
+        hyperparam_search_config: dict[str, Any],
+        logger_config: dict[str, Any],
         model: str,
-        checkpoint_dir: Optional[str],
+        checkpoint_dir: str | None,
         algorithm_config: LoraFinetuningConfig | QATFinetuningConfig | None,
         datasetio_api: DatasetIO,
         datasets_api: Datasets,
     ) -> None:
         assert isinstance(training_config.data_config, DataConfig), "DataConfig must be initialized"
 
-        assert isinstance(training_config.efficiency_config, EfficiencyConfig), "EfficiencyConfig must be initialized"
-
         self.job_uuid = job_uuid
         self.training_config = training_config
         if not isinstance(algorithm_config, LoraFinetuningConfig):
@@ -159,7 +153,7 @@ class LoraFinetuningSingleDevice:
         self.datasets_api = datasets_api
 
     async def load_checkpoint(self):
-        def get_checkpoint_files(checkpoint_dir: str) -> List[str]:
+        def get_checkpoint_files(checkpoint_dir: str) -> list[str]:
             try:
                 # List all files in the given directory
                 files = os.listdir(checkpoint_dir)
@@ -253,8 +247,8 @@ class LoraFinetuningSingleDevice:
         self,
         enable_activation_checkpointing: bool,
         enable_activation_offloading: bool,
-        base_model_state_dict: Dict[str, Any],
-        lora_weights_state_dict: Optional[Dict[str, Any]] = None,
+        base_model_state_dict: dict[str, Any],
+        lora_weights_state_dict: dict[str, Any] | None = None,
     ) -> nn.Module:
         self._lora_rank = self.algorithm_config.rank
         self._lora_alpha = self.algorithm_config.alpha
@@ -338,7 +332,7 @@ class LoraFinetuningSingleDevice:
         tokenizer: Llama3Tokenizer,
         shuffle: bool,
         batch_size: int,
-    ) -> Tuple[DistributedSampler, DataLoader]:
+    ) -> tuple[DistributedSampler, DataLoader]:
         async def fetch_rows(dataset_id: str):
             return await self.datasetio_api.iterrows(
                 dataset_id=dataset_id,
@@ -348,11 +342,9 @@ class LoraFinetuningSingleDevice:
         all_rows = await fetch_rows(dataset_id)
         rows = all_rows.data
 
-        await validate_input_dataset_schema(
-            datasets_api=self.datasets_api,
-            dataset_id=dataset_id,
-            dataset_type=self._data_format.value,
-        )
+        # TODO (xiyan): validate dataset schema
+        # dataset_def = await self.datasets_api.get_dataset(dataset_id=dataset_id)
+
         data_transform = await utils.get_data_transform(self._data_format)
         ds = SFTDataset(
             rows,
@@ -435,7 +427,7 @@ class LoraFinetuningSingleDevice:
             checkpoint_format=self._checkpoint_format,
         )
 
-    async def _loss_step(self, batch: Dict[str, torch.Tensor]) -> torch.Tensor:
+    async def _loss_step(self, batch: dict[str, torch.Tensor]) -> torch.Tensor:
         # Shape [b, s], needed for the loss not the model
         labels = batch.pop("labels")
         # run model
@@ -457,7 +449,7 @@ class LoraFinetuningSingleDevice:
 
         return loss
 
-    async def train(self) -> Tuple[Dict[str, Any], List[Checkpoint]]:
+    async def train(self) -> tuple[dict[str, Any], list[Checkpoint]]:
         """
         The core training loop.
         """
@@ -469,7 +461,7 @@ class LoraFinetuningSingleDevice:
 
         # training artifacts
         checkpoints = []
-        memory_stats: Dict[str, Any] = {}
+        memory_stats: dict[str, Any] = {}
 
         # self.epochs_run should be non-zero when we're resuming from a checkpoint
         for curr_epoch in range(self.epochs_run, self.total_epochs):
@@ -562,15 +554,11 @@ class LoraFinetuningSingleDevice:
             checkpoints.append(checkpoint)
 
         # clean up the memory after training finishes
-        if self._device.type != "cpu":
-            self._model.to("cpu")
-            torch.cuda.empty_cache()
-        del self._model
-        gc.collect()
+        evacuate_model_from_device(self._model, self._device.type)
 
         return (memory_stats, checkpoints)
 
-    async def validation(self) -> Tuple[float, float]:
+    async def validation(self) -> tuple[float, float]:
         total_loss = 0.0
         total_tokens = 0
         log.info("Starting validation...")
diff --git a/llama_stack/providers/inline/safety/code_scanner/__init__.py b/llama_stack/providers/inline/safety/code_scanner/__init__.py
index 62975a963..68e32b747 100644
--- a/llama_stack/providers/inline/safety/code_scanner/__init__.py
+++ b/llama_stack/providers/inline/safety/code_scanner/__init__.py
@@ -4,12 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from .config import CodeScannerConfig
 
 
-async def get_provider_impl(config: CodeScannerConfig, deps: Dict[str, Any]):
+async def get_provider_impl(config: CodeScannerConfig, deps: dict[str, Any]):
     from .code_scanner import MetaReferenceCodeScannerSafetyImpl
 
     impl = MetaReferenceCodeScannerSafetyImpl(config, deps)
diff --git a/llama_stack/providers/inline/safety/code_scanner/code_scanner.py b/llama_stack/providers/inline/safety/code_scanner/code_scanner.py
index 606d11d2c..be05ee436 100644
--- a/llama_stack/providers/inline/safety/code_scanner/code_scanner.py
+++ b/llama_stack/providers/inline/safety/code_scanner/code_scanner.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 import logging
-from typing import Any, Dict, List
+from typing import Any
 
 from llama_stack.apis.inference import Message
 from llama_stack.apis.safety import (
@@ -48,8 +48,8 @@ class MetaReferenceCodeScannerSafetyImpl(Safety):
     async def run_shield(
         self,
         shield_id: str,
-        messages: List[Message],
-        params: Dict[str, Any] = None,
+        messages: list[Message],
+        params: dict[str, Any] = None,
     ) -> RunShieldResponse:
         shield = await self.shield_store.get_shield(shield_id)
         if not shield:
diff --git a/llama_stack/providers/inline/safety/code_scanner/config.py b/llama_stack/providers/inline/safety/code_scanner/config.py
index 1d880ee9c..66eb8e368 100644
--- a/llama_stack/providers/inline/safety/code_scanner/config.py
+++ b/llama_stack/providers/inline/safety/code_scanner/config.py
@@ -4,12 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
 
 class CodeScannerConfig(BaseModel):
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
         return {}
diff --git a/llama_stack/providers/inline/safety/llama_guard/__init__.py b/llama_stack/providers/inline/safety/llama_guard/__init__.py
index a4263b169..8865cc344 100644
--- a/llama_stack/providers/inline/safety/llama_guard/__init__.py
+++ b/llama_stack/providers/inline/safety/llama_guard/__init__.py
@@ -4,12 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from .config import LlamaGuardConfig
 
 
-async def get_provider_impl(config: LlamaGuardConfig, deps: Dict[str, Any]):
+async def get_provider_impl(config: LlamaGuardConfig, deps: dict[str, Any]):
     from .llama_guard import LlamaGuardSafetyImpl
 
     assert isinstance(config, LlamaGuardConfig), f"Unexpected config type: {type(config)}"
diff --git a/llama_stack/providers/inline/safety/llama_guard/config.py b/llama_stack/providers/inline/safety/llama_guard/config.py
index 53849ab33..412e7218d 100644
--- a/llama_stack/providers/inline/safety/llama_guard/config.py
+++ b/llama_stack/providers/inline/safety/llama_guard/config.py
@@ -4,16 +4,16 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, List
+from typing import Any
 
 from pydantic import BaseModel
 
 
 class LlamaGuardConfig(BaseModel):
-    excluded_categories: List[str] = []
+    excluded_categories: list[str] = []
 
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
         return {
             "excluded_categories": [],
         }
diff --git a/llama_stack/providers/inline/safety/llama_guard/llama_guard.py b/llama_stack/providers/inline/safety/llama_guard/llama_guard.py
index 2ab16f986..937301c2e 100644
--- a/llama_stack/providers/inline/safety/llama_guard/llama_guard.py
+++ b/llama_stack/providers/inline/safety/llama_guard/llama_guard.py
@@ -6,7 +6,7 @@
 
 import re
 from string import Template
-from typing import Any, Dict, List, Optional
+from typing import Any
 
 from llama_stack.apis.common.content_types import ImageContentItem, TextContentItem
 from llama_stack.apis.inference import (
@@ -149,8 +149,8 @@ class LlamaGuardSafetyImpl(Safety, ShieldsProtocolPrivate):
     async def run_shield(
         self,
         shield_id: str,
-        messages: List[Message],
-        params: Dict[str, Any] = None,
+        messages: list[Message],
+        params: dict[str, Any] = None,
     ) -> RunShieldResponse:
         shield = await self.shield_store.get_shield(shield_id)
         if not shield:
@@ -177,7 +177,7 @@ class LlamaGuardShield:
         self,
         model: str,
         inference_api: Inference,
-        excluded_categories: Optional[List[str]] = None,
+        excluded_categories: list[str] | None = None,
     ):
         if excluded_categories is None:
             excluded_categories = []
@@ -193,7 +193,7 @@ class LlamaGuardShield:
         self.inference_api = inference_api
         self.excluded_categories = excluded_categories
 
-    def check_unsafe_response(self, response: str) -> Optional[str]:
+    def check_unsafe_response(self, response: str) -> str | None:
         match = re.match(r"^unsafe\n(.*)$", response)
         if match:
             # extracts the unsafe code
@@ -202,7 +202,7 @@ class LlamaGuardShield:
 
         return None
 
-    def get_safety_categories(self) -> List[str]:
+    def get_safety_categories(self) -> list[str]:
         excluded_categories = self.excluded_categories
         if set(excluded_categories) == set(SAFETY_CATEGORIES_TO_CODE_MAP.values()):
             excluded_categories = []
@@ -218,7 +218,7 @@ class LlamaGuardShield:
 
         return final_categories
 
-    def validate_messages(self, messages: List[Message]) -> None:
+    def validate_messages(self, messages: list[Message]) -> None:
         if len(messages) == 0:
             raise ValueError("Messages must not be empty")
         if messages[0].role != Role.user.value:
@@ -229,7 +229,7 @@ class LlamaGuardShield:
 
         return messages
 
-    async def run(self, messages: List[Message]) -> RunShieldResponse:
+    async def run(self, messages: list[Message]) -> RunShieldResponse:
         messages = self.validate_messages(messages)
 
         if self.model == CoreModelId.llama_guard_3_11b_vision.value:
@@ -247,10 +247,10 @@ class LlamaGuardShield:
         content = content.strip()
         return self.get_shield_response(content)
 
-    def build_text_shield_input(self, messages: List[Message]) -> UserMessage:
+    def build_text_shield_input(self, messages: list[Message]) -> UserMessage:
         return UserMessage(content=self.build_prompt(messages))
 
-    def build_vision_shield_input(self, messages: List[Message]) -> UserMessage:
+    def build_vision_shield_input(self, messages: list[Message]) -> UserMessage:
         conversation = []
         most_recent_img = None
 
@@ -284,7 +284,7 @@ class LlamaGuardShield:
 
         return UserMessage(content=prompt)
 
-    def build_prompt(self, messages: List[Message]) -> str:
+    def build_prompt(self, messages: list[Message]) -> str:
         categories = self.get_safety_categories()
         categories_str = "\n".join(categories)
         conversations_str = "\n\n".join(
diff --git a/llama_stack/providers/inline/safety/prompt_guard/__init__.py b/llama_stack/providers/inline/safety/prompt_guard/__init__.py
index 747f34421..1761c9138 100644
--- a/llama_stack/providers/inline/safety/prompt_guard/__init__.py
+++ b/llama_stack/providers/inline/safety/prompt_guard/__init__.py
@@ -4,12 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
-from .config import PromptGuardConfig  # noqa: F401
+from .config import PromptGuardConfig
 
 
-async def get_provider_impl(config: PromptGuardConfig, deps: Dict[str, Any]):
+async def get_provider_impl(config: PromptGuardConfig, deps: dict[str, Any]):
     from .prompt_guard import PromptGuardSafetyImpl
 
     impl = PromptGuardSafetyImpl(config, deps)
diff --git a/llama_stack/providers/inline/safety/prompt_guard/config.py b/llama_stack/providers/inline/safety/prompt_guard/config.py
index 76bd5978d..69ea512c5 100644
--- a/llama_stack/providers/inline/safety/prompt_guard/config.py
+++ b/llama_stack/providers/inline/safety/prompt_guard/config.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 from enum import Enum
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel, field_validator
 
@@ -26,7 +26,7 @@ class PromptGuardConfig(BaseModel):
         return v
 
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
         return {
             "guard_type": "injection",
         }
diff --git a/llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py b/llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py
index fce3e3d14..ff87889ea 100644
--- a/llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py
+++ b/llama_stack/providers/inline/safety/prompt_guard/prompt_guard.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 import logging
-from typing import Any, Dict, List
+from typing import Any
 
 import torch
 from transformers import AutoModelForSequenceClassification, AutoTokenizer
@@ -49,8 +49,8 @@ class PromptGuardSafetyImpl(Safety, ShieldsProtocolPrivate):
     async def run_shield(
         self,
         shield_id: str,
-        messages: List[Message],
-        params: Dict[str, Any] = None,
+        messages: list[Message],
+        params: dict[str, Any] = None,
     ) -> RunShieldResponse:
         shield = await self.shield_store.get_shield(shield_id)
         if not shield:
@@ -75,13 +75,15 @@ class PromptGuardShield:
         self.temperature = temperature
         self.threshold = threshold
 
-        self.device = "cuda"
+        self.device = "cpu"
+        if torch.cuda.is_available():
+            self.device = "cuda"
 
         # load model and tokenizer
         self.tokenizer = AutoTokenizer.from_pretrained(model_dir)
         self.model = AutoModelForSequenceClassification.from_pretrained(model_dir, device_map=self.device)
 
-    async def run(self, messages: List[Message]) -> RunShieldResponse:
+    async def run(self, messages: list[Message]) -> RunShieldResponse:
         message = messages[-1]
         text = interleaved_content_as_str(message.content)
 
diff --git a/llama_stack/providers/inline/scoring/basic/__init__.py b/llama_stack/providers/inline/scoring/basic/__init__.py
index 4898b973a..d9d150b1a 100644
--- a/llama_stack/providers/inline/scoring/basic/__init__.py
+++ b/llama_stack/providers/inline/scoring/basic/__init__.py
@@ -3,7 +3,7 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict
+from typing import Any
 
 from llama_stack.distribution.datatypes import Api
 
@@ -12,7 +12,7 @@ from .config import BasicScoringConfig
 
 async def get_provider_impl(
     config: BasicScoringConfig,
-    deps: Dict[Api, Any],
+    deps: dict[Api, Any],
 ):
     from .scoring import BasicScoringImpl
 
diff --git a/llama_stack/providers/inline/scoring/basic/config.py b/llama_stack/providers/inline/scoring/basic/config.py
index 5866be359..e9c7fb451 100644
--- a/llama_stack/providers/inline/scoring/basic/config.py
+++ b/llama_stack/providers/inline/scoring/basic/config.py
@@ -3,12 +3,12 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
 
 class BasicScoringConfig(BaseModel):
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
         return {}
diff --git a/llama_stack/providers/inline/scoring/basic/scoring.py b/llama_stack/providers/inline/scoring/basic/scoring.py
index 9a45f7139..09f89be5e 100644
--- a/llama_stack/providers/inline/scoring/basic/scoring.py
+++ b/llama_stack/providers/inline/scoring/basic/scoring.py
@@ -3,7 +3,7 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict, List, Optional
+from typing import Any
 
 from llama_stack.apis.datasetio import DatasetIO
 from llama_stack.apis.datasets import Datasets
@@ -66,7 +66,7 @@ class BasicScoringImpl(
 
     async def shutdown(self) -> None: ...
 
-    async def list_scoring_functions(self) -> List[ScoringFn]:
+    async def list_scoring_functions(self) -> list[ScoringFn]:
         scoring_fn_defs_list = [
             fn_def for impl in self.scoring_fn_id_impls.values() for fn_def in impl.get_supported_scoring_fn_defs()
         ]
@@ -82,7 +82,7 @@ class BasicScoringImpl(
     async def score_batch(
         self,
         dataset_id: str,
-        scoring_functions: Dict[str, Optional[ScoringFnParams]] = None,
+        scoring_functions: dict[str, ScoringFnParams | None] = None,
         save_results_dataset: bool = False,
     ) -> ScoreBatchResponse:
         dataset_def = await self.datasets_api.get_dataset(dataset_id=dataset_id)
@@ -107,8 +107,8 @@ class BasicScoringImpl(
 
     async def score(
         self,
-        input_rows: List[Dict[str, Any]],
-        scoring_functions: Dict[str, Optional[ScoringFnParams]] = None,
+        input_rows: list[dict[str, Any]],
+        scoring_functions: dict[str, ScoringFnParams | None] = None,
     ) -> ScoreResponse:
         res = {}
         for scoring_fn_id in scoring_functions.keys():
diff --git a/llama_stack/providers/inline/scoring/basic/scoring_fn/bfcl_scoring_fn.py b/llama_stack/providers/inline/scoring/basic/scoring_fn/bfcl_scoring_fn.py
index f37780f3e..b29620be2 100644
--- a/llama_stack/providers/inline/scoring/basic/scoring_fn/bfcl_scoring_fn.py
+++ b/llama_stack/providers/inline/scoring/basic/scoring_fn/bfcl_scoring_fn.py
@@ -6,7 +6,7 @@
 
 import json
 import re
-from typing import Any, Dict, Optional
+from typing import Any
 
 from llama_stack.apis.scoring import ScoringResultRow
 from llama_stack.apis.scoring_functions import ScoringFnParams
@@ -17,7 +17,7 @@ from ..utils.bfcl.checker import ast_checker, is_empty_output
 from .fn_defs.bfcl import bfcl
 
 
-def postprocess(x: Dict[str, Any], test_category: str) -> Dict[str, Any]:
+def postprocess(x: dict[str, Any], test_category: str) -> dict[str, Any]:
     contain_func_call = False
     error = None
     error_type = None
@@ -52,11 +52,11 @@ def postprocess(x: Dict[str, Any], test_category: str) -> Dict[str, Any]:
     }
 
 
-def gen_valid(x: Dict[str, Any]) -> Dict[str, float]:
+def gen_valid(x: dict[str, Any]) -> dict[str, float]:
     return {"valid": x["valid"]}
 
 
-def gen_relevance_acc(x: Dict[str, Any]) -> Dict[str, float]:
+def gen_relevance_acc(x: dict[str, Any]) -> dict[str, float]:
     # This function serves for both relevance and irrelevance tests, which share the exact opposite logic.
     # If `test_category` is "irrelevance", the model is expected to output no function call.
     # No function call means either the AST decoding fails (a error message is generated) or the decoded AST does not contain any function call (such as a empty list, `[]`).
@@ -78,9 +78,9 @@ class BFCLScoringFn(RegisteredBaseScoringFn):
 
     async def score_row(
         self,
-        input_row: Dict[str, Any],
-        scoring_fn_identifier: Optional[str] = "bfcl",
-        scoring_params: Optional[ScoringFnParams] = None,
+        input_row: dict[str, Any],
+        scoring_fn_identifier: str | None = "bfcl",
+        scoring_params: ScoringFnParams | None = None,
     ) -> ScoringResultRow:
         test_category = re.sub(r"_[0-9_-]+$", "", input_row["id"])
         score_result = postprocess(input_row, test_category)
diff --git a/llama_stack/providers/inline/scoring/basic/scoring_fn/docvqa_scoring_fn.py b/llama_stack/providers/inline/scoring/basic/scoring_fn/docvqa_scoring_fn.py
index 84ca55732..b87974d08 100644
--- a/llama_stack/providers/inline/scoring/basic/scoring_fn/docvqa_scoring_fn.py
+++ b/llama_stack/providers/inline/scoring/basic/scoring_fn/docvqa_scoring_fn.py
@@ -6,7 +6,7 @@
 
 import json
 import re
-from typing import Any, Dict, Optional
+from typing import Any
 
 from llama_stack.apis.scoring import ScoringResultRow
 from llama_stack.apis.scoring_functions import ScoringFnParams
@@ -228,9 +228,9 @@ class DocVQAScoringFn(RegisteredBaseScoringFn):
 
     async def score_row(
         self,
-        input_row: Dict[str, Any],
-        scoring_fn_identifier: Optional[str] = "docvqa",
-        scoring_params: Optional[ScoringFnParams] = None,
+        input_row: dict[str, Any],
+        scoring_fn_identifier: str | None = "docvqa",
+        scoring_params: ScoringFnParams | None = None,
     ) -> ScoringResultRow:
         expected_answers = json.loads(input_row["expected_answer"])
         generated_answer = input_row["generated_answer"]
diff --git a/llama_stack/providers/inline/scoring/basic/scoring_fn/equality_scoring_fn.py b/llama_stack/providers/inline/scoring/basic/scoring_fn/equality_scoring_fn.py
index 0bd6bdd48..60804330f 100644
--- a/llama_stack/providers/inline/scoring/basic/scoring_fn/equality_scoring_fn.py
+++ b/llama_stack/providers/inline/scoring/basic/scoring_fn/equality_scoring_fn.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from llama_stack.apis.scoring import ScoringResultRow
 from llama_stack.apis.scoring_functions import ScoringFnParams
@@ -26,9 +26,9 @@ class EqualityScoringFn(RegisteredBaseScoringFn):
 
     async def score_row(
         self,
-        input_row: Dict[str, Any],
-        scoring_fn_identifier: Optional[str] = "equality",
-        scoring_params: Optional[ScoringFnParams] = None,
+        input_row: dict[str, Any],
+        scoring_fn_identifier: str | None = "equality",
+        scoring_params: ScoringFnParams | None = None,
     ) -> ScoringResultRow:
         assert "expected_answer" in input_row, "Expected answer not found in input row."
         assert "generated_answer" in input_row, "Generated answer not found in input row."
diff --git a/llama_stack/providers/inline/scoring/basic/scoring_fn/ifeval_scoring_fn.py b/llama_stack/providers/inline/scoring/basic/scoring_fn/ifeval_scoring_fn.py
index 6ff856684..77f6176e6 100644
--- a/llama_stack/providers/inline/scoring/basic/scoring_fn/ifeval_scoring_fn.py
+++ b/llama_stack/providers/inline/scoring/basic/scoring_fn/ifeval_scoring_fn.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from llama_stack.apis.scoring import ScoringResultRow
 from llama_stack.apis.scoring_functions import ScoringFnParams
@@ -28,9 +28,9 @@ class IfEvalScoringFn(RegisteredBaseScoringFn):
 
     async def score_row(
         self,
-        input_row: Dict[str, Any],
-        scoring_fn_identifier: Optional[str] = None,
-        scoring_params: Optional[ScoringFnParams] = None,
+        input_row: dict[str, Any],
+        scoring_fn_identifier: str | None = None,
+        scoring_params: ScoringFnParams | None = None,
     ) -> ScoringResultRow:
         from ..utils.ifeval_utils import INSTRUCTION_DICT, INSTRUCTION_LIST
 
diff --git a/llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_math_response_scoring_fn.py b/llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_math_response_scoring_fn.py
index d6c78a9ac..d765959a8 100644
--- a/llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_math_response_scoring_fn.py
+++ b/llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_math_response_scoring_fn.py
@@ -3,7 +3,7 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict, Optional
+from typing import Any
 
 from llama_stack.apis.scoring import ScoringResultRow
 from llama_stack.apis.scoring_functions import ScoringFnParams, ScoringFnParamsType
@@ -28,9 +28,9 @@ class RegexParserMathResponseScoringFn(RegisteredBaseScoringFn):
 
     async def score_row(
         self,
-        input_row: Dict[str, Any],
-        scoring_fn_identifier: Optional[str] = None,
-        scoring_params: Optional[ScoringFnParams] = None,
+        input_row: dict[str, Any],
+        scoring_fn_identifier: str | None = None,
+        scoring_params: ScoringFnParams | None = None,
     ) -> ScoringResultRow:
         assert scoring_fn_identifier is not None, "Scoring function identifier not found."
         fn_def = self.supported_fn_defs_registry[scoring_fn_identifier]
diff --git a/llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_scoring_fn.py b/llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_scoring_fn.py
index 0606a9581..cb336e303 100644
--- a/llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_scoring_fn.py
+++ b/llama_stack/providers/inline/scoring/basic/scoring_fn/regex_parser_scoring_fn.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 import re
-from typing import Any, Dict, Optional
+from typing import Any
 
 from llama_stack.apis.scoring import ScoringResultRow
 from llama_stack.apis.scoring_functions import ScoringFnParams, ScoringFnParamsType
@@ -28,9 +28,9 @@ class RegexParserScoringFn(RegisteredBaseScoringFn):
 
     async def score_row(
         self,
-        input_row: Dict[str, Any],
-        scoring_fn_identifier: Optional[str] = None,
-        scoring_params: Optional[ScoringFnParams] = None,
+        input_row: dict[str, Any],
+        scoring_fn_identifier: str | None = None,
+        scoring_params: ScoringFnParams | None = None,
     ) -> ScoringResultRow:
         assert scoring_fn_identifier is not None, "Scoring function identifier not found."
         fn_def = self.supported_fn_defs_registry[scoring_fn_identifier]
diff --git a/llama_stack/providers/inline/scoring/basic/scoring_fn/subset_of_scoring_fn.py b/llama_stack/providers/inline/scoring/basic/scoring_fn/subset_of_scoring_fn.py
index 71defc433..d6e10e6c9 100644
--- a/llama_stack/providers/inline/scoring/basic/scoring_fn/subset_of_scoring_fn.py
+++ b/llama_stack/providers/inline/scoring/basic/scoring_fn/subset_of_scoring_fn.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from llama_stack.apis.scoring import ScoringResultRow
 from llama_stack.apis.scoring_functions import ScoringFnParams
@@ -26,9 +26,9 @@ class SubsetOfScoringFn(RegisteredBaseScoringFn):
 
     async def score_row(
         self,
-        input_row: Dict[str, Any],
-        scoring_fn_identifier: Optional[str] = "subset_of",
-        scoring_params: Optional[ScoringFnParams] = None,
+        input_row: dict[str, Any],
+        scoring_fn_identifier: str | None = "subset_of",
+        scoring_params: ScoringFnParams | None = None,
     ) -> ScoringResultRow:
         expected_answer = input_row["expected_answer"]
         generated_answer = input_row["generated_answer"]
diff --git a/llama_stack/providers/inline/scoring/basic/utils/ifeval_utils.py b/llama_stack/providers/inline/scoring/basic/utils/ifeval_utils.py
index 28605159f..b74c3826e 100644
--- a/llama_stack/providers/inline/scoring/basic/utils/ifeval_utils.py
+++ b/llama_stack/providers/inline/scoring/basic/utils/ifeval_utils.py
@@ -11,8 +11,8 @@ import logging
 import random
 import re
 import string
+from collections.abc import Iterable, Sequence
 from types import MappingProxyType
-from typing import Dict, Iterable, List, Optional, Sequence, Union
 
 import emoji
 import langdetect
@@ -1673,12 +1673,11 @@ def split_chinese_japanese_hindi(lines: str) -> Iterable[str]:
     The separator for hindi is '।'
     """
     for line in lines.splitlines():
-        for sent in re.findall(
+        yield from re.findall(
             r"[^!?。\.\!\?\!\?\.\n।]+[!?。\.\!\?\!\?\.\n।]?",
             line.strip(),
             flags=re.U,
-        ):
-            yield sent
+        )
 
 
 def count_words_cjk(text: str) -> int:
@@ -1707,7 +1706,7 @@ def count_words_cjk(text: str) -> int:
     return non_asian_words_cnt + asian_chars_cnt + emoji_cnt
 
 
-@functools.lru_cache(maxsize=None)
+@functools.cache
 def _get_sentence_tokenizer():
     return nltk.data.load("nltk:tokenizers/punkt/english.pickle")
 
@@ -1719,8 +1718,8 @@ def count_sentences(text):
     return len(tokenized_sentences)
 
 
-def get_langid(text: str, lid_path: Optional[str] = None) -> str:
-    line_langs: List[str] = []
+def get_langid(text: str, lid_path: str | None = None) -> str:
+    line_langs: list[str] = []
     lines = [line.strip() for line in text.split("\n") if len(line.strip()) >= 4]
 
     for line in lines:
@@ -1741,7 +1740,7 @@ def generate_keywords(num_keywords):
 
 
 """Library of instructions"""
-_InstructionArgsDtype = Optional[Dict[str, Union[int, str, Sequence[str]]]]
+_InstructionArgsDtype = dict[str, int | str | Sequence[str]] | None
 
 _LANGUAGES = LANGUAGE_CODES
 
diff --git a/llama_stack/providers/inline/scoring/basic/utils/math_utils.py b/llama_stack/providers/inline/scoring/basic/utils/math_utils.py
index e11fc625b..6840aad14 100644
--- a/llama_stack/providers/inline/scoring/basic/utils/math_utils.py
+++ b/llama_stack/providers/inline/scoring/basic/utils/math_utils.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 import re
-from typing import Sequence
+from collections.abc import Sequence
 
 from llama_stack.providers.utils.scoring.basic_scoring_utils import time_limit
 
@@ -323,7 +323,7 @@ def _fix_a_slash_b(string: str) -> str:
     try:
         ia = int(a)
         ib = int(b)
-        assert string == "{}/{}".format(ia, ib)
+        assert string == f"{ia}/{ib}"
         new_string = "\\frac{" + str(ia) + "}{" + str(ib) + "}"
         return new_string
     except (ValueError, AssertionError):
diff --git a/llama_stack/providers/inline/scoring/braintrust/__init__.py b/llama_stack/providers/inline/scoring/braintrust/__init__.py
index f1b0112d9..8ea6e9b96 100644
--- a/llama_stack/providers/inline/scoring/braintrust/__init__.py
+++ b/llama_stack/providers/inline/scoring/braintrust/__init__.py
@@ -3,7 +3,7 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
@@ -18,7 +18,7 @@ class BraintrustProviderDataValidator(BaseModel):
 
 async def get_provider_impl(
     config: BraintrustScoringConfig,
-    deps: Dict[Api, Any],
+    deps: dict[Api, Any],
 ):
     from .braintrust import BraintrustScoringImpl
 
diff --git a/llama_stack/providers/inline/scoring/braintrust/braintrust.py b/llama_stack/providers/inline/scoring/braintrust/braintrust.py
index 3fae83340..d6655d657 100644
--- a/llama_stack/providers/inline/scoring/braintrust/braintrust.py
+++ b/llama_stack/providers/inline/scoring/braintrust/braintrust.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 import os
-from typing import Any, Dict, List, Optional
+from typing import Any
 
 from autoevals.llm import Factuality
 from autoevals.ragas import (
@@ -132,7 +132,7 @@ class BraintrustScoringImpl(
 
     async def shutdown(self) -> None: ...
 
-    async def list_scoring_functions(self) -> List[ScoringFn]:
+    async def list_scoring_functions(self) -> list[ScoringFn]:
         scoring_fn_defs_list = list(self.supported_fn_defs_registry.values())
         for f in scoring_fn_defs_list:
             assert f.identifier.startswith("braintrust"), (
@@ -159,7 +159,7 @@ class BraintrustScoringImpl(
     async def score_batch(
         self,
         dataset_id: str,
-        scoring_functions: Dict[str, Optional[ScoringFnParams]],
+        scoring_functions: dict[str, ScoringFnParams | None],
         save_results_dataset: bool = False,
     ) -> ScoreBatchResponse:
         await self.set_api_key()
@@ -181,9 +181,7 @@ class BraintrustScoringImpl(
             results=res.results,
         )
 
-    async def score_row(
-        self, input_row: Dict[str, Any], scoring_fn_identifier: Optional[str] = None
-    ) -> ScoringResultRow:
+    async def score_row(self, input_row: dict[str, Any], scoring_fn_identifier: str | None = None) -> ScoringResultRow:
         validate_row_schema(input_row, get_valid_schemas(Api.scoring.value))
         await self.set_api_key()
         assert scoring_fn_identifier is not None, "scoring_fn_identifier cannot be None"
@@ -203,8 +201,8 @@ class BraintrustScoringImpl(
 
     async def score(
         self,
-        input_rows: List[Dict[str, Any]],
-        scoring_functions: Dict[str, Optional[ScoringFnParams]],
+        input_rows: list[dict[str, Any]],
+        scoring_functions: dict[str, ScoringFnParams | None],
     ) -> ScoreResponse:
         await self.set_api_key()
         res = {}
diff --git a/llama_stack/providers/inline/scoring/braintrust/config.py b/llama_stack/providers/inline/scoring/braintrust/config.py
index d4e0d9bcd..4a80f1e4f 100644
--- a/llama_stack/providers/inline/scoring/braintrust/config.py
+++ b/llama_stack/providers/inline/scoring/braintrust/config.py
@@ -3,19 +3,19 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field
 
 
 class BraintrustScoringConfig(BaseModel):
-    openai_api_key: Optional[str] = Field(
+    openai_api_key: str | None = Field(
         default=None,
         description="The OpenAI API Key",
     )
 
     @classmethod
-    def sample_run_config(cls, **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, **kwargs) -> dict[str, Any]:
         return {
             "openai_api_key": "${env.OPENAI_API_KEY:}",
         }
diff --git a/llama_stack/providers/inline/scoring/llm_as_judge/__init__.py b/llama_stack/providers/inline/scoring/llm_as_judge/__init__.py
index 4a83bfe13..88bf10737 100644
--- a/llama_stack/providers/inline/scoring/llm_as_judge/__init__.py
+++ b/llama_stack/providers/inline/scoring/llm_as_judge/__init__.py
@@ -3,7 +3,7 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict
+from typing import Any
 
 from llama_stack.distribution.datatypes import Api
 
@@ -12,7 +12,7 @@ from .config import LlmAsJudgeScoringConfig
 
 async def get_provider_impl(
     config: LlmAsJudgeScoringConfig,
-    deps: Dict[Api, Any],
+    deps: dict[Api, Any],
 ):
     from .scoring import LlmAsJudgeScoringImpl
 
diff --git a/llama_stack/providers/inline/scoring/llm_as_judge/config.py b/llama_stack/providers/inline/scoring/llm_as_judge/config.py
index ff63fc5e7..b150ef54c 100644
--- a/llama_stack/providers/inline/scoring/llm_as_judge/config.py
+++ b/llama_stack/providers/inline/scoring/llm_as_judge/config.py
@@ -3,12 +3,12 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
 
 class LlmAsJudgeScoringConfig(BaseModel):
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
         return {}
diff --git a/llama_stack/providers/inline/scoring/llm_as_judge/scoring.py b/llama_stack/providers/inline/scoring/llm_as_judge/scoring.py
index 7f004fbb6..b705cb9b3 100644
--- a/llama_stack/providers/inline/scoring/llm_as_judge/scoring.py
+++ b/llama_stack/providers/inline/scoring/llm_as_judge/scoring.py
@@ -3,7 +3,7 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict, List, Optional
+from typing import Any
 
 from llama_stack.apis.datasetio import DatasetIO
 from llama_stack.apis.datasets import Datasets
@@ -50,7 +50,7 @@ class LlmAsJudgeScoringImpl(
 
     async def shutdown(self) -> None: ...
 
-    async def list_scoring_functions(self) -> List[ScoringFn]:
+    async def list_scoring_functions(self) -> list[ScoringFn]:
         scoring_fn_defs_list = self.llm_as_judge_fn.get_supported_scoring_fn_defs()
 
         for f in self.llm_as_judge_fn.get_supported_scoring_fn_defs():
@@ -66,7 +66,7 @@ class LlmAsJudgeScoringImpl(
     async def score_batch(
         self,
         dataset_id: str,
-        scoring_functions: Dict[str, Optional[ScoringFnParams]] = None,
+        scoring_functions: dict[str, ScoringFnParams | None] = None,
         save_results_dataset: bool = False,
     ) -> ScoreBatchResponse:
         dataset_def = await self.datasets_api.get_dataset(dataset_id=dataset_id)
@@ -91,8 +91,8 @@ class LlmAsJudgeScoringImpl(
 
     async def score(
         self,
-        input_rows: List[Dict[str, Any]],
-        scoring_functions: Dict[str, Optional[ScoringFnParams]] = None,
+        input_rows: list[dict[str, Any]],
+        scoring_functions: dict[str, ScoringFnParams | None] = None,
     ) -> ScoreResponse:
         res = {}
         for scoring_fn_id in scoring_functions.keys():
diff --git a/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py b/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py
index f4e8ab0aa..51cdf6c3f 100644
--- a/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py
+++ b/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 import re
-from typing import Any, Dict, Optional
+from typing import Any
 
 from llama_stack.apis.inference.inference import Inference, UserMessage
 from llama_stack.apis.scoring import ScoringResultRow
@@ -30,9 +30,9 @@ class LlmAsJudgeScoringFn(RegisteredBaseScoringFn):
 
     async def score_row(
         self,
-        input_row: Dict[str, Any],
-        scoring_fn_identifier: Optional[str] = None,
-        scoring_params: Optional[ScoringFnParams] = None,
+        input_row: dict[str, Any],
+        scoring_fn_identifier: str | None = None,
+        scoring_params: ScoringFnParams | None = None,
     ) -> ScoringResultRow:
         assert scoring_fn_identifier is not None, "Scoring function identifier not found."
         fn_def = self.supported_fn_defs_registry[scoring_fn_identifier]
diff --git a/llama_stack/providers/inline/telemetry/meta_reference/__init__.py b/llama_stack/providers/inline/telemetry/meta_reference/__init__.py
index 23468c5d0..09e97136a 100644
--- a/llama_stack/providers/inline/telemetry/meta_reference/__init__.py
+++ b/llama_stack/providers/inline/telemetry/meta_reference/__init__.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from llama_stack.distribution.datatypes import Api
 
@@ -13,7 +13,7 @@ from .config import TelemetryConfig, TelemetrySink
 __all__ = ["TelemetryConfig", "TelemetrySink"]
 
 
-async def get_provider_impl(config: TelemetryConfig, deps: Dict[Api, Any]):
+async def get_provider_impl(config: TelemetryConfig, deps: dict[Api, Any]):
     from .telemetry import TelemetryAdapter
 
     impl = TelemetryAdapter(config, deps)
diff --git a/llama_stack/providers/inline/telemetry/meta_reference/config.py b/llama_stack/providers/inline/telemetry/meta_reference/config.py
index 57312f41f..af53bfd9c 100644
--- a/llama_stack/providers/inline/telemetry/meta_reference/config.py
+++ b/llama_stack/providers/inline/telemetry/meta_reference/config.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 from enum import Enum
-from typing import Any, Dict, List
+from typing import Any
 
 from pydantic import BaseModel, Field, field_validator
 
@@ -30,10 +30,10 @@ class TelemetryConfig(BaseModel):
     )
     service_name: str = Field(
         # service name is always the same, use zero-width space to avoid clutter
-        default="​",
+        default="",
         description="The service name to use for telemetry",
     )
-    sinks: List[TelemetrySink] = Field(
+    sinks: list[TelemetrySink] = Field(
         default=[TelemetrySink.CONSOLE, TelemetrySink.SQLITE],
         description="List of telemetry sinks to enable (possible values: otel, sqlite, console)",
     )
@@ -50,9 +50,9 @@ class TelemetryConfig(BaseModel):
         return v
 
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str, db_name: str = "trace_store.db") -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str, db_name: str = "trace_store.db") -> dict[str, Any]:
         return {
-            "service_name": "${env.OTEL_SERVICE_NAME:​}",
+            "service_name": "${env.OTEL_SERVICE_NAME:}",
             "sinks": "${env.TELEMETRY_SINKS:console,sqlite}",
-            "sqlite_db_path": "${env.SQLITE_DB_PATH:" + __distro_dir__ + "/" + db_name + "}",
+            "sqlite_db_path": "${env.SQLITE_STORE_DIR:" + __distro_dir__ + "}/" + db_name,
         }
diff --git a/llama_stack/providers/inline/telemetry/meta_reference/console_span_processor.py b/llama_stack/providers/inline/telemetry/meta_reference/console_span_processor.py
index b909d32ef..ff1914c15 100644
--- a/llama_stack/providers/inline/telemetry/meta_reference/console_span_processor.py
+++ b/llama_stack/providers/inline/telemetry/meta_reference/console_span_processor.py
@@ -78,7 +78,7 @@ class ConsoleSpanProcessor(SpanProcessor):
 
             severity = event.attributes.get("severity", "info")
             message = event.attributes.get("message", event.name)
-            if isinstance(message, (dict, list)):
+            if isinstance(message, dict | list):
                 message = json.dumps(message, indent=2)
 
             severity_colors = {
diff --git a/llama_stack/providers/inline/telemetry/meta_reference/telemetry.py b/llama_stack/providers/inline/telemetry/meta_reference/telemetry.py
index 9b23c8229..0f6cf8619 100644
--- a/llama_stack/providers/inline/telemetry/meta_reference/telemetry.py
+++ b/llama_stack/providers/inline/telemetry/meta_reference/telemetry.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 import threading
-from typing import Any, Dict, List, Optional
+from typing import Any
 
 from opentelemetry import metrics, trace
 from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
@@ -16,11 +16,15 @@ from opentelemetry.sdk.resources import Resource
 from opentelemetry.sdk.trace import TracerProvider
 from opentelemetry.sdk.trace.export import BatchSpanProcessor
 from opentelemetry.semconv.resource import ResourceAttributes
+from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
 
 from llama_stack.apis.telemetry import (
     Event,
     MetricEvent,
+    MetricLabelMatcher,
+    MetricQueryType,
     QueryCondition,
+    QueryMetricsResponse,
     QuerySpanTreeResponse,
     QueryTracesResponse,
     Span,
@@ -41,6 +45,7 @@ from llama_stack.providers.inline.telemetry.meta_reference.sqlite_span_processor
 )
 from llama_stack.providers.utils.telemetry.dataset_mixin import TelemetryDatasetMixin
 from llama_stack.providers.utils.telemetry.sqlite_trace_store import SQLiteTraceStore
+from llama_stack.providers.utils.telemetry.tracing import ROOT_SPAN_MARKERS
 
 from .config import TelemetryConfig, TelemetrySink
 
@@ -60,7 +65,7 @@ def is_tracing_enabled(tracer):
 
 
 class TelemetryAdapter(TelemetryDatasetMixin, Telemetry):
-    def __init__(self, config: TelemetryConfig, deps: Dict[Api, Any]) -> None:
+    def __init__(self, config: TelemetryConfig, deps: dict[Api, Any]) -> None:
         self.config = config
         self.datasetio_api = deps.get(Api.datasetio)
         self.meter = None
@@ -123,6 +128,17 @@ class TelemetryAdapter(TelemetryDatasetMixin, Telemetry):
         else:
             raise ValueError(f"Unknown event type: {event}")
 
+    async def query_metrics(
+        self,
+        metric_name: str,
+        start_time: int,
+        end_time: int | None = None,
+        granularity: str | None = "1d",
+        query_type: MetricQueryType = MetricQueryType.RANGE,
+        label_matchers: list[MetricLabelMatcher] | None = None,
+    ) -> QueryMetricsResponse:
+        raise NotImplementedError("Querying metrics is not implemented")
+
     def _log_unstructured(self, event: UnstructuredLogEvent, ttl_seconds: int) -> None:
         with self._lock:
             # Use global storage instead of instance storage
@@ -132,7 +148,7 @@ class TelemetryAdapter(TelemetryDatasetMixin, Telemetry):
             if span:
                 timestamp_ns = int(event.timestamp.timestamp() * 1e9)
                 span.add_event(
-                    name=event.type,
+                    name=event.type.value,
                     attributes={
                         "message": event.message,
                         "severity": event.severity.value,
@@ -192,6 +208,15 @@ class TelemetryAdapter(TelemetryDatasetMixin, Telemetry):
                 event.attributes = {}
             event.attributes["__ttl__"] = ttl_seconds
 
+            # Extract these W3C trace context attributes so they are not written to
+            # underlying storage, as we just need them to propagate the trace context.
+            traceparent = event.attributes.pop("traceparent", None)
+            tracestate = event.attributes.pop("tracestate", None)
+            if traceparent:
+                # If we have a traceparent header value, we're not the root span.
+                for root_attribute in ROOT_SPAN_MARKERS:
+                    event.attributes.pop(root_attribute, None)
+
             if isinstance(event.payload, SpanStartPayload):
                 # Check if span already exists to prevent duplicates
                 if span_id in _GLOBAL_STORAGE["active_spans"]:
@@ -202,8 +227,12 @@ class TelemetryAdapter(TelemetryDatasetMixin, Telemetry):
                     parent_span_id = int(event.payload.parent_span_id, 16)
                     parent_span = _GLOBAL_STORAGE["active_spans"].get(parent_span_id)
                     context = trace.set_span_in_context(parent_span)
-                else:
-                    event.attributes["__root_span__"] = "true"
+                elif traceparent:
+                    carrier = {
+                        "traceparent": traceparent,
+                        "tracestate": tracestate,
+                    }
+                    context = TraceContextTextMapPropagator().extract(carrier=carrier)
 
                 span = tracer.start_span(
                     name=event.payload.name,
@@ -231,10 +260,10 @@ class TelemetryAdapter(TelemetryDatasetMixin, Telemetry):
 
     async def query_traces(
         self,
-        attribute_filters: Optional[List[QueryCondition]] = None,
-        limit: Optional[int] = 100,
-        offset: Optional[int] = 0,
-        order_by: Optional[List[str]] = None,
+        attribute_filters: list[QueryCondition] | None = None,
+        limit: int | None = 100,
+        offset: int | None = 0,
+        order_by: list[str] | None = None,
     ) -> QueryTracesResponse:
         return QueryTracesResponse(
             data=await self.trace_store.query_traces(
@@ -254,8 +283,8 @@ class TelemetryAdapter(TelemetryDatasetMixin, Telemetry):
     async def get_span_tree(
         self,
         span_id: str,
-        attributes_to_return: Optional[List[str]] = None,
-        max_depth: Optional[int] = None,
+        attributes_to_return: list[str] | None = None,
+        max_depth: int | None = None,
     ) -> QuerySpanTreeResponse:
         return QuerySpanTreeResponse(
             data=await self.trace_store.get_span_tree(
diff --git a/llama_stack/providers/inline/tool_runtime/code_interpreter/__init__.py b/llama_stack/providers/inline/tool_runtime/code_interpreter/__init__.py
deleted file mode 100644
index 8317ce793..000000000
--- a/llama_stack/providers/inline/tool_runtime/code_interpreter/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-from typing import Any, Dict
-
-from .config import CodeInterpreterToolConfig
-
-__all__ = ["CodeInterpreterToolConfig", "CodeInterpreterToolRuntimeImpl"]
-
-
-async def get_provider_impl(config: CodeInterpreterToolConfig, _deps: Dict[str, Any]):
-    from .code_interpreter import CodeInterpreterToolRuntimeImpl
-
-    impl = CodeInterpreterToolRuntimeImpl(config)
-    await impl.initialize()
-    return impl
diff --git a/llama_stack/providers/inline/tool_runtime/code_interpreter/code_env_prefix.py b/llama_stack/providers/inline/tool_runtime/code_interpreter/code_env_prefix.py
deleted file mode 100644
index 9c5f642ea..000000000
--- a/llama_stack/providers/inline/tool_runtime/code_interpreter/code_env_prefix.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-import errno
-
-# Disabling potentially dangerous functions
-import os as _os
-from functools import partial
-
-os_funcs_to_disable = [
-    "kill",
-    "system",
-    "putenv",
-    "remove",
-    "removedirs",
-    "rmdir",
-    "fchdir",
-    "setuid",
-    "fork",
-    "forkpty",
-    "killpg",
-    "rename",
-    "renames",
-    "truncate",
-    "replace",
-    # "unlink",  # Commenting as this was blocking matpltlib from rendering plots correctly
-    "fchmod",
-    "fchown",
-    "chmod",
-    "chown",
-    "chroot",
-    "fchdir",
-    "lchflags",
-    "lchmod",
-    "lchown",
-    "chdir",
-]
-
-
-def call_not_allowed(*args, **kwargs):
-    raise OSError(errno.EPERM, "Call are not permitted in this environment")
-
-
-for func_name in os_funcs_to_disable:
-    if hasattr(_os, func_name):
-        setattr(_os, func_name, partial(call_not_allowed, _func_name=f"os.{func_name}"))
-
-import shutil as _shutil
-
-for func_name in ["rmtree", "move", "chown"]:
-    if hasattr(_shutil, func_name):
-        setattr(
-            _shutil,
-            func_name,
-            partial(call_not_allowed, _func_name=f"shutil.{func_name}"),
-        )
-
-import subprocess as _subprocess
-
-
-def popen_not_allowed(*args, **kwargs):
-    raise _subprocess.CalledProcessError(
-        -1,
-        args[0] if args else "unknown",
-        stderr="subprocess.Popen is not allowed in this environment",
-    )
-
-
-_subprocess.Popen = popen_not_allowed  # type: ignore
-
-
-import atexit as _atexit
-import builtins as _builtins
-import io as _io
-import json as _json
-import sys as _sys
-
-# NB! The following "unused" imports crucial, make sure not not to remove
-# them with linters - they're used in code_execution.py
-from contextlib import (  # noqa
-    contextmanager as _contextmanager,
-)
-from multiprocessing.connection import Connection as _Connection
-
-# Mangle imports to avoid polluting model execution namespace.
-
-_IO_SINK = _io.StringIO()
-_NETWORK_TIMEOUT = 5
-_NETWORK_CONNECTIONS = None
-
-
-def _open_connections():
-    global _NETWORK_CONNECTIONS
-    if _NETWORK_CONNECTIONS is not None:
-        # Ensure connections only opened once.
-        return _NETWORK_CONNECTIONS
-    req_w_fd, resp_r_fd = _sys.argv[1], _sys.argv[2]
-    req_con = _Connection(int(req_w_fd), readable=False)
-    resp_con = _Connection(int(resp_r_fd), writable=False)
-    _NETWORK_CONNECTIONS = (req_con, resp_con)
-    return _NETWORK_CONNECTIONS
-
-
-_builtins._open_connections = _open_connections  # type: ignore
-
-
-@_atexit.register
-def _close_connections():
-    global _NETWORK_CONNECTIONS
-    if _NETWORK_CONNECTIONS is None:
-        return
-    for con in _NETWORK_CONNECTIONS:
-        con.close()
-    del _NETWORK_CONNECTIONS
-
-
-def _network_call(request):
-    # NOTE: We communicate with the parent process in json, encoded
-    # in raw bytes. We do this because native send/recv methods use
-    # pickle which involves execution of arbitrary code.
-    _open_connections()
-    req_con, resp_con = _NETWORK_CONNECTIONS
-
-    req_con.send_bytes(_json.dumps(request).encode("utf-8"))
-    if resp_con.poll(timeout=_NETWORK_TIMEOUT) is None:
-        raise Exception(f"Network request timed out: {_json.dumps(request)}")
-    else:
-        return _json.loads(resp_con.recv_bytes().decode("utf-8"))
diff --git a/llama_stack/providers/inline/tool_runtime/code_interpreter/code_execution.py b/llama_stack/providers/inline/tool_runtime/code_interpreter/code_execution.py
deleted file mode 100644
index 6106cf741..000000000
--- a/llama_stack/providers/inline/tool_runtime/code_interpreter/code_execution.py
+++ /dev/null
@@ -1,257 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-import base64
-import json
-import multiprocessing
-import os
-import re
-import subprocess
-import sys
-import tempfile
-import textwrap
-import time
-from dataclasses import dataclass
-from datetime import datetime
-from io import BytesIO
-from pathlib import Path
-from typing import List
-
-from PIL import Image
-
-from .utils import get_code_env_prefix
-
-TOOLS_ATTACHMENT_KEY = "__tools_attachment__"
-TOOLS_ATTACHMENT_KEY_REGEX = re.compile(r"__tools_attachment__=(\{.*?\})")
-
-DIRNAME = Path(__file__).parent
-
-CODE_EXEC_TIMEOUT = 20
-CODE_ENV_PREFIX = get_code_env_prefix()
-
-STDOUTERR_SINK_WRAPPER_TEMPLATE = """\
-with _redirect_stdout(_IO_SINK), _redirect_stderr(_IO_SINK):
-{code}\
-"""
-
-TRYEXCEPT_WRAPPER_TEMPLATE = """\
-try:
-{code}
-except:
-    pass\
-"""
-
-
-def generate_bwrap_command(bind_dirs: List[str]) -> str:
-    """
-    Generate the bwrap command string for binding all
-    directories in the current directory read-only.
-    """
-    bwrap_args = ""
-    bwrap_args += "--ro-bind / / "
-    # Add the --dev flag to mount device files
-    bwrap_args += "--dev /dev "
-    for d in bind_dirs:
-        bwrap_args += f"--bind {d} {d} "
-
-    # Add the --unshare-all flag to isolate the sandbox from the rest of the system
-    bwrap_args += "--unshare-all "
-    # Add the --die-with-parent flag to ensure the child process dies when bwrap's parent dies
-    bwrap_args += "--die-with-parent "
-    return bwrap_args
-
-
-@dataclass
-class CodeExecutionContext:
-    matplotlib_dump_dir: str
-
-
-@dataclass
-class CodeExecutionRequest:
-    scripts: List[str]
-    only_last_cell_stdouterr: bool = True
-    only_last_cell_fail: bool = True
-    seed: int = 0
-    strip_fpaths_in_stderr: bool = True
-    use_bwrap: bool = True
-
-
-class CodeExecutor:
-    def __init__(self, context: CodeExecutionContext):
-        self.context = context
-
-    def execute(self, req: CodeExecutionRequest) -> dict:
-        scripts = req.scripts
-        for i in range(len(scripts) - 1):
-            if req.only_last_cell_stdouterr:
-                scripts[i] = STDOUTERR_SINK_WRAPPER_TEMPLATE.format(code=textwrap.indent(scripts[i], " " * 4))
-            if req.only_last_cell_fail:
-                scripts[i] = TRYEXCEPT_WRAPPER_TEMPLATE.format(code=textwrap.indent(scripts[i], " " * 4))
-
-        # Seeds prefix:
-        seed = req.seed
-        seeds_prefix = f"""\
-def _set_seeds():
-    import random
-    random.seed({seed})
-    import numpy as np
-    np.random.seed({seed})
-_set_seeds()\
-"""
-
-        script = "\n\n".join([seeds_prefix] + [CODE_ENV_PREFIX] + scripts)
-        with tempfile.TemporaryDirectory() as dpath:
-            code_fpath = os.path.join(dpath, "code.py")
-            with open(code_fpath, "w") as f:
-                f.write(script)
-
-            try:
-                python_path = os.environ.get("PYTHONPATH", "")
-                env = dict(
-                    os.environ,
-                    PYTHONHASHSEED=str(seed),
-                    MPLCONFIGDIR=dpath,
-                    MPLBACKEND="module://matplotlib_custom_backend",
-                    PYTHONPATH=f"{DIRNAME}:{python_path}",
-                )
-
-                if req.use_bwrap:
-                    bwrap_prefix = "bwrap " + generate_bwrap_command(bind_dirs=[dpath])
-                    cmd = [*bwrap_prefix.split(), sys.executable, "-c", script]
-                else:
-                    cmd = [sys.executable, "-c", script]
-
-                stdout, stderr, returncode = do_subprocess(
-                    cmd=cmd,
-                    env=env,
-                    ctx=self.context,
-                )
-
-                stderr = stderr.strip()
-                if req.strip_fpaths_in_stderr:
-                    pattern = r'File "([^"]+)", line (\d+)'
-                    stderr = re.sub(pattern, r"line \2", stderr)
-
-                return {
-                    "process_status": "completed",
-                    "returncode": returncode,
-                    "stdout": stdout.strip(),
-                    "stderr": stderr,
-                }
-
-            except subprocess.TimeoutExpired:
-                return {
-                    "process_status": "timeout",
-                    "stdout": "Timed out",
-                    "stderr": "Timed out",
-                }
-
-            except Exception as e:
-                return {
-                    "process_status": "error",
-                    "error_type": type(e).__name__,
-                    "stderr": str(e),
-                    "stdout": str(e),
-                }
-
-
-def process_matplotlib_response(response, matplotlib_dump_dir: str):
-    image_data = response["image_data"]
-    # Convert the base64 string to a bytes object
-    images_raw = [base64.b64decode(d["image_base64"]) for d in image_data]
-    # Create a list of PIL images from the bytes objects
-    images = [Image.open(BytesIO(img)) for img in images_raw]
-    # Create a list of image paths
-    image_paths = []
-    for i, img in enumerate(images):
-        # create new directory for each day to better organize data:
-        dump_dname = datetime.today().strftime("%Y-%m-%d")  # noqa: DTZ002 - we don't care about timezones here since we are displaying the date
-        dump_dpath = Path(matplotlib_dump_dir, dump_dname)
-        dump_dpath.mkdir(parents=True, exist_ok=True)
-        # save image into a file
-        dump_fname = f"matplotlib_{str(time.time()).replace('.', '_')}_{i}.png"
-        dump_fpath = dump_dpath / dump_fname
-        img.save(dump_fpath, "PNG")
-        image_paths.append(str(dump_fpath))
-
-    # this is kind of convoluted, we send back this response to the subprocess which
-    # prints it out
-    info = {
-        "filepath": str(image_paths[-1]),
-        "mimetype": "image/png",
-    }
-    return f"{TOOLS_ATTACHMENT_KEY}={json.dumps(info)}"
-
-
-def execute_subprocess_request(request, ctx: CodeExecutionContext):
-    "Route requests from the subprocess (via network Pipes) to the internet/tools."
-    if request["type"] == "matplotlib":
-        return process_matplotlib_response(request, ctx.matplotlib_dump_dir)
-    else:
-        raise Exception(f"Unrecognised network request type: {request['type']}")
-
-
-def do_subprocess(*, cmd: list, env: dict, ctx: CodeExecutionContext):
-    # Create Pipes to be used for any external tool/network requests.
-    req_r, req_w = multiprocessing.Pipe(duplex=False)
-    resp_r, resp_w = multiprocessing.Pipe(duplex=False)
-
-    cmd += [str(req_w.fileno()), str(resp_r.fileno())]
-    proc = subprocess.Popen(
-        cmd,
-        pass_fds=(req_w.fileno(), resp_r.fileno()),
-        text=True,
-        stdout=subprocess.PIPE,
-        stderr=subprocess.PIPE,
-        close_fds=True,
-        env=env,
-    )
-
-    # Close unnecessary fds.
-    req_w.close()
-    resp_r.close()
-
-    pipe_close = False
-    done_read = False
-    start = time.monotonic()
-    while proc.poll() is None and not pipe_close:
-        if req_r.poll(0.1):
-            # NB: Python pipe semantics for poll and recv mean that
-            # poll() returns True is a pipe is closed.
-            # CF old school PEP from '09
-            #  https://bugs.python.org/issue5573
-            try:
-                request = json.loads(req_r.recv_bytes().decode("utf-8"))
-                response = execute_subprocess_request(request, ctx)
-
-                resp_w.send_bytes(json.dumps(response).encode("utf-8"))
-            except EOFError:
-                # The request pipe is closed - set a marker to exit
-                # after the next attempt at reading stdout/stderr.
-                pipe_close = True
-
-            try:
-                # If lots has been printed, pipe might be full but
-                # proc cannot exit until all the stdout/stderr
-                # been written/read.
-                stdout, stderr = proc.communicate(timeout=0.3)
-                done_read = True
-            except subprocess.TimeoutExpired:
-                # The program has not terminated. Ignore it, there
-                # may be more network/tool requests.
-                continue
-        if time.monotonic() - start > CODE_EXEC_TIMEOUT:
-            proc.terminate()
-            raise subprocess.TimeoutExpired(cmd, CODE_EXEC_TIMEOUT)
-
-    if not done_read:
-        # Solve race condition where process terminates before
-        # we hit the while loop.
-        stdout, stderr = proc.communicate(timeout=0.3)
-
-    resp_w.close()
-    req_r.close()
-    return stdout, stderr, proc.returncode
diff --git a/llama_stack/providers/inline/tool_runtime/code_interpreter/code_interpreter.py b/llama_stack/providers/inline/tool_runtime/code_interpreter/code_interpreter.py
deleted file mode 100644
index 10ac2fcc6..000000000
--- a/llama_stack/providers/inline/tool_runtime/code_interpreter/code_interpreter.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-
-import asyncio
-import logging
-import os
-import tempfile
-from typing import Any, Dict, Optional
-
-from llama_stack.apis.common.content_types import URL
-from llama_stack.apis.tools import (
-    ListToolDefsResponse,
-    Tool,
-    ToolDef,
-    ToolInvocationResult,
-    ToolParameter,
-    ToolRuntime,
-)
-from llama_stack.providers.datatypes import ToolsProtocolPrivate
-
-from .code_execution import CodeExecutionContext, CodeExecutionRequest, CodeExecutor
-from .config import CodeInterpreterToolConfig
-
-log = logging.getLogger(__name__)
-
-
-class CodeInterpreterToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime):
-    def __init__(self, config: CodeInterpreterToolConfig):
-        self.config = config
-        ctx = CodeExecutionContext(
-            matplotlib_dump_dir=tempfile.mkdtemp(),
-        )
-        self.code_executor = CodeExecutor(ctx)
-
-    async def initialize(self):
-        pass
-
-    async def register_tool(self, tool: Tool) -> None:
-        pass
-
-    async def unregister_tool(self, tool_id: str) -> None:
-        return
-
-    async def list_runtime_tools(
-        self, tool_group_id: Optional[str] = None, mcp_endpoint: Optional[URL] = None
-    ) -> ListToolDefsResponse:
-        return ListToolDefsResponse(
-            data=[
-                ToolDef(
-                    name="code_interpreter",
-                    description="Execute code",
-                    parameters=[
-                        ToolParameter(
-                            name="code",
-                            description="The code to execute",
-                            parameter_type="string",
-                        ),
-                    ],
-                )
-            ]
-        )
-
-    async def invoke_tool(self, tool_name: str, kwargs: Dict[str, Any]) -> ToolInvocationResult:
-        script = kwargs["code"]
-        # Use environment variable to control bwrap usage
-        force_disable_bwrap = os.environ.get("DISABLE_CODE_SANDBOX", "").lower() in ("1", "true", "yes")
-        req = CodeExecutionRequest(scripts=[script], use_bwrap=not force_disable_bwrap)
-        res = await asyncio.to_thread(self.code_executor.execute, req)
-        pieces = [res["process_status"]]
-        for out_type in ["stdout", "stderr"]:
-            res_out = res[out_type]
-            if res_out != "":
-                pieces.extend([f"[{out_type}]", res_out, f"[/{out_type}]"])
-                if out_type == "stderr":
-                    log.error(f"ipython tool error: ↓\n{res_out}")
-        return ToolInvocationResult(content="\n".join(pieces))
diff --git a/llama_stack/providers/inline/tool_runtime/code_interpreter/config.py b/llama_stack/providers/inline/tool_runtime/code_interpreter/config.py
deleted file mode 100644
index 7de1ec453..000000000
--- a/llama_stack/providers/inline/tool_runtime/code_interpreter/config.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-from typing import Any, Dict
-
-from pydantic import BaseModel
-
-
-class CodeInterpreterToolConfig(BaseModel):
-    @classmethod
-    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
-        return {}
diff --git a/llama_stack/providers/inline/tool_runtime/code_interpreter/matplotlib_custom_backend.py b/llama_stack/providers/inline/tool_runtime/code_interpreter/matplotlib_custom_backend.py
deleted file mode 100644
index 6454358a5..000000000
--- a/llama_stack/providers/inline/tool_runtime/code_interpreter/matplotlib_custom_backend.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-"""
-A custom Matplotlib backend that overrides the show method to return image bytes.
-"""
-
-import base64
-import io
-import json as _json
-import logging
-
-import matplotlib
-from matplotlib.backend_bases import FigureManagerBase
-
-# Import necessary components from Matplotlib
-from matplotlib.backends.backend_agg import FigureCanvasAgg
-
-log = logging.getLogger(__name__)
-
-
-class CustomFigureCanvas(FigureCanvasAgg):
-    def show(self):
-        # Save the figure to a BytesIO object
-        buf = io.BytesIO()
-        self.print_png(buf)
-        image_bytes = buf.getvalue()
-        buf.close()
-        return image_bytes
-
-
-class CustomFigureManager(FigureManagerBase):
-    def __init__(self, canvas, num):
-        super().__init__(canvas, num)
-
-
-# Mimic module initialization that integrates with the Matplotlib backend system
-def _create_figure_manager(num, *args, **kwargs):
-    """
-    Create a custom figure manager instance.
-    """
-    FigureClass = kwargs.pop("FigureClass", None)  # noqa: N806
-    if FigureClass is None:
-        from matplotlib.figure import Figure
-
-        FigureClass = Figure  # noqa: N806
-    fig = FigureClass(*args, **kwargs)
-    canvas = CustomFigureCanvas(fig)
-    manager = CustomFigureManager(canvas, num)
-    return manager
-
-
-def show():
-    """
-    Handle all figures and potentially return their images as bytes.
-
-    This function iterates over all figures registered with the custom backend,
-    renders them as images in bytes format, and could return a list of bytes objects,
-    one for each figure, or handle them as needed.
-    """
-    image_data = []
-    for manager in matplotlib._pylab_helpers.Gcf.get_all_fig_managers():
-        # Get the figure from the manager
-        fig = manager.canvas.figure
-        buf = io.BytesIO()  # Create a buffer for the figure
-        fig.savefig(buf, format="png")  # Save the figure to the buffer in PNG format
-        buf.seek(0)  # Go to the beginning of the buffer
-        image_bytes = buf.getvalue()  # Retrieve bytes value
-        image_base64 = base64.b64encode(image_bytes).decode("utf-8")
-        image_data.append({"image_base64": image_base64})
-        buf.close()
-
-    # The _open_connections method is dynamically made available to
-    # the interpreter by bundling code from "code_env_prefix.py" -- by literally prefixing it -- and
-    # then "eval"ing it within a sandboxed interpreter.
-    req_con, resp_con = _open_connections()  # noqa: F821
-
-    _json_dump = _json.dumps(
-        {
-            "type": "matplotlib",
-            "image_data": image_data,
-        }
-    )
-    req_con.send_bytes(_json_dump.encode("utf-8"))
-    resp = _json.loads(resp_con.recv_bytes().decode("utf-8"))
-    log.info(resp)
-
-
-FigureCanvas = CustomFigureCanvas
-FigureManager = CustomFigureManager
diff --git a/llama_stack/providers/inline/tool_runtime/code_interpreter/utils.py b/llama_stack/providers/inline/tool_runtime/code_interpreter/utils.py
deleted file mode 100644
index d6f539a39..000000000
--- a/llama_stack/providers/inline/tool_runtime/code_interpreter/utils.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-import os
-
-DIR = os.path.dirname(os.path.realpath(__file__))
-CODE_ENV_PREFIX_FILE = os.path.join(DIR, "code_env_prefix.py")
-CODE_ENV_PREFIX = None
-
-
-def get_code_env_prefix() -> str:
-    global CODE_ENV_PREFIX
-
-    if CODE_ENV_PREFIX is None:
-        with open(CODE_ENV_PREFIX_FILE, "r") as f:
-            CODE_ENV_PREFIX = f.read()
-
-    return CODE_ENV_PREFIX
diff --git a/llama_stack/providers/inline/tool_runtime/rag/__init__.py b/llama_stack/providers/inline/tool_runtime/rag/__init__.py
index 0ef3c35e9..f9a6e5c55 100644
--- a/llama_stack/providers/inline/tool_runtime/rag/__init__.py
+++ b/llama_stack/providers/inline/tool_runtime/rag/__init__.py
@@ -4,14 +4,14 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from llama_stack.providers.datatypes import Api
 
 from .config import RagToolRuntimeConfig
 
 
-async def get_provider_impl(config: RagToolRuntimeConfig, deps: Dict[Api, Any]):
+async def get_provider_impl(config: RagToolRuntimeConfig, deps: dict[Api, Any]):
     from .memory import MemoryToolRuntimeImpl
 
     impl = MemoryToolRuntimeImpl(config, deps[Api.vector_io], deps[Api.inference])
diff --git a/llama_stack/providers/inline/tool_runtime/rag/config.py b/llama_stack/providers/inline/tool_runtime/rag/config.py
index c75c3fc51..43ba78e65 100644
--- a/llama_stack/providers/inline/tool_runtime/rag/config.py
+++ b/llama_stack/providers/inline/tool_runtime/rag/config.py
@@ -4,12 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
 
 class RagToolRuntimeConfig(BaseModel):
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
         return {}
diff --git a/llama_stack/providers/inline/tool_runtime/rag/memory.py b/llama_stack/providers/inline/tool_runtime/rag/memory.py
index 8d4689e5d..4776d47d0 100644
--- a/llama_stack/providers/inline/tool_runtime/rag/memory.py
+++ b/llama_stack/providers/inline/tool_runtime/rag/memory.py
@@ -8,7 +8,7 @@ import asyncio
 import logging
 import secrets
 import string
-from typing import Any, Dict, List, Optional
+from typing import Any
 
 from pydantic import TypeAdapter
 
@@ -25,14 +25,14 @@ from llama_stack.apis.tools import (
     RAGQueryConfig,
     RAGQueryResult,
     RAGToolRuntime,
-    Tool,
     ToolDef,
+    ToolGroup,
     ToolInvocationResult,
     ToolParameter,
     ToolRuntime,
 )
 from llama_stack.apis.vector_io import QueryChunksResponse, VectorIO
-from llama_stack.providers.datatypes import ToolsProtocolPrivate
+from llama_stack.providers.datatypes import ToolGroupsProtocolPrivate
 from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str
 from llama_stack.providers.utils.memory.vector_store import (
     content_from_doc,
@@ -49,7 +49,7 @@ def make_random_string(length: int = 8):
     return "".join(secrets.choice(string.ascii_letters + string.digits) for _ in range(length))
 
 
-class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime):
+class MemoryToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime, RAGToolRuntime):
     def __init__(
         self,
         config: RagToolRuntimeConfig,
@@ -66,15 +66,15 @@ class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime):
     async def shutdown(self):
         pass
 
-    async def register_tool(self, tool: Tool) -> None:
+    async def register_toolgroup(self, toolgroup: ToolGroup) -> None:
         pass
 
-    async def unregister_tool(self, tool_id: str) -> None:
+    async def unregister_toolgroup(self, toolgroup_id: str) -> None:
         return
 
     async def insert(
         self,
-        documents: List[RAGDocument],
+        documents: list[RAGDocument],
         vector_db_id: str,
         chunk_size_in_tokens: int = 512,
     ) -> None:
@@ -87,6 +87,7 @@ class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime):
                     content,
                     chunk_size_in_tokens,
                     chunk_size_in_tokens // 4,
+                    doc.metadata,
                 )
             )
 
@@ -101,11 +102,13 @@ class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime):
     async def query(
         self,
         content: InterleavedContent,
-        vector_db_ids: List[str],
-        query_config: Optional[RAGQueryConfig] = None,
+        vector_db_ids: list[str],
+        query_config: RAGQueryConfig | None = None,
     ) -> RAGQueryResult:
         if not vector_db_ids:
-            return RAGQueryResult(content=None)
+            raise ValueError(
+                "No vector DBs were provided to the knowledge search tool. Please provide at least one vector DB ID."
+            )
 
         query_config = query_config or RAGQueryConfig()
         query = await generate_rag_query(
@@ -119,11 +122,12 @@ class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime):
                 query=query,
                 params={
                     "max_chunks": query_config.max_chunks,
+                    "mode": query_config.mode,
                 },
             )
             for vector_db_id in vector_db_ids
         ]
-        results: List[QueryChunksResponse] = await asyncio.gather(*tasks)
+        results: list[QueryChunksResponse] = await asyncio.gather(*tasks)
         chunks = [c for r in results for c in r.chunks]
         scores = [s for r in results for s in r.scores]
 
@@ -140,19 +144,21 @@ class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime):
                 text=f"knowledge_search tool found {len(chunks)} chunks:\nBEGIN of knowledge_search tool results.\n"
             )
         ]
-        for i, c in enumerate(chunks):
-            metadata = c.metadata
-            tokens += metadata["token_count"]
+        for i, chunk in enumerate(chunks):
+            metadata = chunk.metadata
+            tokens += metadata.get("token_count", 0)
+            tokens += metadata.get("metadata_token_count", 0)
+
             if tokens > query_config.max_tokens_in_context:
                 log.error(
                     f"Using {len(picked)} chunks; reached max tokens in context: {tokens}",
                 )
                 break
-            picked.append(
-                TextContentItem(
-                    text=f"Result {i + 1}:\nDocument_id:{metadata['document_id'][:5]}\nContent: {c.content}\n",
-                )
-            )
+
+            metadata_subset = {k: v for k, v in metadata.items() if k not in ["token_count", "metadata_token_count"]}
+            text_content = query_config.chunk_template.format(index=i + 1, chunk=chunk, metadata=metadata_subset)
+            picked.append(TextContentItem(text=text_content))
+
         picked.append(TextContentItem(text="END of knowledge_search tool results.\n"))
         picked.append(
             TextContentItem(
@@ -168,7 +174,7 @@ class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime):
         )
 
     async def list_runtime_tools(
-        self, tool_group_id: Optional[str] = None, mcp_endpoint: Optional[URL] = None
+        self, tool_group_id: str | None = None, mcp_endpoint: URL | None = None
     ) -> ListToolDefsResponse:
         # Parameters are not listed since these methods are not yet invoked automatically
         # by the LLM. The method is only implemented so things like /tools can list without
@@ -193,7 +199,7 @@ class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime):
             ]
         )
 
-    async def invoke_tool(self, tool_name: str, kwargs: Dict[str, Any]) -> ToolInvocationResult:
+    async def invoke_tool(self, tool_name: str, kwargs: dict[str, Any]) -> ToolInvocationResult:
         vector_db_ids = kwargs.get("vector_db_ids", [])
         query_config = kwargs.get("query_config")
         if query_config:
diff --git a/llama_stack/providers/inline/vector_io/chroma/__init__.py b/llama_stack/providers/inline/vector_io/chroma/__init__.py
index f39188b46..2e0efb8a1 100644
--- a/llama_stack/providers/inline/vector_io/chroma/__init__.py
+++ b/llama_stack/providers/inline/vector_io/chroma/__init__.py
@@ -4,14 +4,14 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from llama_stack.providers.datatypes import Api
 
 from .config import ChromaVectorIOConfig
 
 
-async def get_provider_impl(config: ChromaVectorIOConfig, deps: Dict[Api, Any]):
+async def get_provider_impl(config: ChromaVectorIOConfig, deps: dict[Api, Any]):
     from llama_stack.providers.remote.vector_io.chroma.chroma import (
         ChromaVectorIOAdapter,
     )
diff --git a/llama_stack/providers/inline/vector_io/chroma/config.py b/llama_stack/providers/inline/vector_io/chroma/config.py
index 1e333fe92..81e2f289e 100644
--- a/llama_stack/providers/inline/vector_io/chroma/config.py
+++ b/llama_stack/providers/inline/vector_io/chroma/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
@@ -13,5 +13,5 @@ class ChromaVectorIOConfig(BaseModel):
     db_path: str
 
     @classmethod
-    def sample_run_config(cls, db_path: str = "${env.CHROMADB_PATH}", **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, db_path: str = "${env.CHROMADB_PATH}", **kwargs: Any) -> dict[str, Any]:
         return {"db_path": db_path}
diff --git a/llama_stack/providers/inline/vector_io/faiss/__init__.py b/llama_stack/providers/inline/vector_io/faiss/__init__.py
index fc8ce70b4..68a1dee66 100644
--- a/llama_stack/providers/inline/vector_io/faiss/__init__.py
+++ b/llama_stack/providers/inline/vector_io/faiss/__init__.py
@@ -4,14 +4,14 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from llama_stack.providers.datatypes import Api
 
 from .config import FaissVectorIOConfig
 
 
-async def get_provider_impl(config: FaissVectorIOConfig, deps: Dict[Api, Any]):
+async def get_provider_impl(config: FaissVectorIOConfig, deps: dict[Api, Any]):
     from .faiss import FaissVectorIOAdapter
 
     assert isinstance(config, FaissVectorIOConfig), f"Unexpected config type: {type(config)}"
diff --git a/llama_stack/providers/inline/vector_io/faiss/config.py b/llama_stack/providers/inline/vector_io/faiss/config.py
index fa6e5bede..cbcbb1762 100644
--- a/llama_stack/providers/inline/vector_io/faiss/config.py
+++ b/llama_stack/providers/inline/vector_io/faiss/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
@@ -20,7 +20,7 @@ class FaissVectorIOConfig(BaseModel):
     kvstore: KVStoreConfig
 
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
         return {
             "kvstore": SqliteKVStoreConfig.sample_run_config(
                 __distro_dir__=__distro_dir__,
diff --git a/llama_stack/providers/inline/vector_io/faiss/faiss.py b/llama_stack/providers/inline/vector_io/faiss/faiss.py
index 20c795650..47256d88d 100644
--- a/llama_stack/providers/inline/vector_io/faiss/faiss.py
+++ b/llama_stack/providers/inline/vector_io/faiss/faiss.py
@@ -9,7 +9,7 @@ import base64
 import io
 import json
 import logging
-from typing import Any, Dict, List, Optional
+from typing import Any
 
 import faiss
 import numpy as np
@@ -84,7 +84,7 @@ class FaissIndex(EmbeddingIndex):
 
         await self.kvstore.delete(f"{FAISS_INDEX_PREFIX}{self.bank_id}")
 
-    async def add_chunks(self, chunks: List[Chunk], embeddings: NDArray):
+    async def add_chunks(self, chunks: list[Chunk], embeddings: NDArray):
         # Add dimension check
         embedding_dim = embeddings.shape[1] if len(embeddings.shape) > 1 else embeddings.shape[0]
         if embedding_dim != self.index.d:
@@ -99,9 +99,13 @@ class FaissIndex(EmbeddingIndex):
         # Save updated index
         await self._save_index()
 
-    async def query(self, embedding: NDArray, k: int, score_threshold: float) -> QueryChunksResponse:
+    async def query_vector(
+        self,
+        embedding: NDArray,
+        k: int,
+        score_threshold: float,
+    ) -> QueryChunksResponse:
         distances, indices = await asyncio.to_thread(self.index.search, embedding.reshape(1, -1).astype(np.float32), k)
-
         chunks = []
         scores = []
         for d, i in zip(distances[0], indices[0], strict=False):
@@ -112,6 +116,14 @@ class FaissIndex(EmbeddingIndex):
 
         return QueryChunksResponse(chunks=chunks, scores=scores)
 
+    async def query_keyword(
+        self,
+        query_string: str,
+        k: int,
+        score_threshold: float,
+    ) -> QueryChunksResponse:
+        raise NotImplementedError("Keyword search is not supported in FAISS")
+
 
 class FaissVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
     def __init__(self, config: FaissVectorIOConfig, inference_api: Inference) -> None:
@@ -125,7 +137,7 @@ class FaissVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
         # Load existing banks from kvstore
         start_key = VECTOR_DBS_PREFIX
         end_key = f"{VECTOR_DBS_PREFIX}\xff"
-        stored_vector_dbs = await self.kvstore.range(start_key, end_key)
+        stored_vector_dbs = await self.kvstore.values_in_range(start_key, end_key)
 
         for vector_db_data in stored_vector_dbs:
             vector_db = VectorDB.model_validate_json(vector_db_data)
@@ -159,7 +171,7 @@ class FaissVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
             inference_api=self.inference_api,
         )
 
-    async def list_vector_dbs(self) -> List[VectorDB]:
+    async def list_vector_dbs(self) -> list[VectorDB]:
         return [i.vector_db for i in self.cache.values()]
 
     async def unregister_vector_db(self, vector_db_id: str) -> None:
@@ -176,8 +188,8 @@ class FaissVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
     async def insert_chunks(
         self,
         vector_db_id: str,
-        chunks: List[Chunk],
-        ttl_seconds: Optional[int] = None,
+        chunks: list[Chunk],
+        ttl_seconds: int | None = None,
     ) -> None:
         index = self.cache.get(vector_db_id)
         if index is None:
@@ -189,7 +201,7 @@ class FaissVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
         self,
         vector_db_id: str,
         query: InterleavedContent,
-        params: Optional[Dict[str, Any]] = None,
+        params: dict[str, Any] | None = None,
     ) -> QueryChunksResponse:
         index = self.cache.get(vector_db_id)
         if index is None:
diff --git a/llama_stack/providers/inline/vector_io/milvus/__init__.py b/llama_stack/providers/inline/vector_io/milvus/__init__.py
index d88a3b005..fe3a1f7f9 100644
--- a/llama_stack/providers/inline/vector_io/milvus/__init__.py
+++ b/llama_stack/providers/inline/vector_io/milvus/__init__.py
@@ -4,14 +4,14 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from llama_stack.providers.datatypes import Api
 
 from .config import MilvusVectorIOConfig
 
 
-async def get_provider_impl(config: MilvusVectorIOConfig, deps: Dict[Api, Any]):
+async def get_provider_impl(config: MilvusVectorIOConfig, deps: dict[Api, Any]):
     from llama_stack.providers.remote.vector_io.milvus.milvus import MilvusVectorIOAdapter
 
     impl = MilvusVectorIOAdapter(config, deps[Api.inference])
diff --git a/llama_stack/providers/inline/vector_io/milvus/config.py b/llama_stack/providers/inline/vector_io/milvus/config.py
index 0e11d8c7c..eb22b5276 100644
--- a/llama_stack/providers/inline/vector_io/milvus/config.py
+++ b/llama_stack/providers/inline/vector_io/milvus/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
@@ -16,5 +16,5 @@ class MilvusVectorIOConfig(BaseModel):
     db_path: str
 
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
         return {"db_path": "${env.MILVUS_DB_PATH}"}
diff --git a/llama_stack/providers/inline/vector_io/qdrant/__init__.py b/llama_stack/providers/inline/vector_io/qdrant/__init__.py
index 8f0b91c61..ee33b3797 100644
--- a/llama_stack/providers/inline/vector_io/qdrant/__init__.py
+++ b/llama_stack/providers/inline/vector_io/qdrant/__init__.py
@@ -4,14 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Dict
-
 from llama_stack.providers.datatypes import Api, ProviderSpec
 
 from .config import QdrantVectorIOConfig
 
 
-async def get_adapter_impl(config: QdrantVectorIOConfig, deps: Dict[Api, ProviderSpec]):
+async def get_adapter_impl(config: QdrantVectorIOConfig, deps: dict[Api, ProviderSpec]):
     from llama_stack.providers.remote.vector_io.qdrant.qdrant import QdrantVectorIOAdapter
 
     impl = QdrantVectorIOAdapter(config, deps[Api.inference])
diff --git a/llama_stack/providers/inline/vector_io/qdrant/config.py b/llama_stack/providers/inline/vector_io/qdrant/config.py
index 282e951b0..283724b41 100644
--- a/llama_stack/providers/inline/vector_io/qdrant/config.py
+++ b/llama_stack/providers/inline/vector_io/qdrant/config.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
@@ -17,7 +17,7 @@ class QdrantVectorIOConfig(BaseModel):
     path: str
 
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str) -> dict[str, Any]:
         return {
             "path": "${env.QDRANT_PATH:~/.llama/" + __distro_dir__ + "}/" + "qdrant.db",
         }
diff --git a/llama_stack/providers/inline/vector_io/sqlite_vec/__init__.py b/llama_stack/providers/inline/vector_io/sqlite_vec/__init__.py
index 2380eb0ef..6db176eda 100644
--- a/llama_stack/providers/inline/vector_io/sqlite_vec/__init__.py
+++ b/llama_stack/providers/inline/vector_io/sqlite_vec/__init__.py
@@ -4,14 +4,14 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from llama_stack.providers.datatypes import Api
 
 from .config import SQLiteVectorIOConfig
 
 
-async def get_provider_impl(config: SQLiteVectorIOConfig, deps: Dict[Api, Any]):
+async def get_provider_impl(config: SQLiteVectorIOConfig, deps: dict[Api, Any]):
     from .sqlite_vec import SQLiteVecVectorIOAdapter
 
     assert isinstance(config, SQLiteVectorIOConfig), f"Unexpected config type: {type(config)}"
diff --git a/llama_stack/providers/inline/vector_io/sqlite_vec/config.py b/llama_stack/providers/inline/vector_io/sqlite_vec/config.py
index 906c19689..cb806cb39 100644
--- a/llama_stack/providers/inline/vector_io/sqlite_vec/config.py
+++ b/llama_stack/providers/inline/vector_io/sqlite_vec/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
@@ -13,7 +13,7 @@ class SQLiteVectorIOConfig(BaseModel):
     db_path: str
 
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str) -> dict[str, Any]:
         return {
             "db_path": "${env.SQLITE_STORE_DIR:" + __distro_dir__ + "}/" + "sqlite_vec.db",
         }
diff --git a/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py b/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py
index 5f7671138..fc1a8ddb0 100644
--- a/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py
+++ b/llama_stack/providers/inline/vector_io/sqlite_vec/sqlite_vec.py
@@ -10,7 +10,7 @@ import logging
 import sqlite3
 import struct
 import uuid
-from typing import Any, Dict, List, Optional
+from typing import Any
 
 import numpy as np
 import sqlite_vec
@@ -24,8 +24,13 @@ from llama_stack.providers.utils.memory.vector_store import EmbeddingIndex, Vect
 
 logger = logging.getLogger(__name__)
 
+# Specifying search mode is dependent on the VectorIO provider.
+VECTOR_SEARCH = "vector"
+KEYWORD_SEARCH = "keyword"
+SEARCH_MODES = {VECTOR_SEARCH, KEYWORD_SEARCH}
 
-def serialize_vector(vector: List[float]) -> bytes:
+
+def serialize_vector(vector: list[float]) -> bytes:
     """Serialize a list of floats into a compact binary representation."""
     return struct.pack(f"{len(vector)}f", *vector)
 
@@ -45,6 +50,7 @@ class SQLiteVecIndex(EmbeddingIndex):
     Two tables are used:
       - A metadata table (chunks_{bank_id}) that holds the chunk JSON.
       - A virtual table (vec_chunks_{bank_id}) that holds the serialized vector.
+      - An FTS5 table (fts_chunks_{bank_id}) for full-text keyword search.
     """
 
     def __init__(self, dimension: int, db_path: str, bank_id: str):
@@ -53,6 +59,7 @@ class SQLiteVecIndex(EmbeddingIndex):
         self.bank_id = bank_id
         self.metadata_table = f"chunks_{bank_id}".replace("-", "_")
         self.vector_table = f"vec_chunks_{bank_id}".replace("-", "_")
+        self.fts_table = f"fts_chunks_{bank_id}".replace("-", "_")
 
     @classmethod
     async def create(cls, dimension: int, db_path: str, bank_id: str):
@@ -78,6 +85,14 @@ class SQLiteVecIndex(EmbeddingIndex):
                     USING vec0(embedding FLOAT[{self.dimension}], id TEXT);
                 """)
                 connection.commit()
+                # FTS5 table (for keyword search) - creating both the tables by default. Will use the relevant one
+                # based on query. Implementation of the change on client side will allow passing the search_mode option
+                # during initialization to make it easier to create the table that is required.
+                cur.execute(f"""
+                            CREATE VIRTUAL TABLE IF NOT EXISTS {self.fts_table}
+                            USING fts5(id, content);
+                        """)
+                connection.commit()
             finally:
                 cur.close()
                 connection.close()
@@ -91,6 +106,7 @@ class SQLiteVecIndex(EmbeddingIndex):
             try:
                 cur.execute(f"DROP TABLE IF EXISTS {self.metadata_table};")
                 cur.execute(f"DROP TABLE IF EXISTS {self.vector_table};")
+                cur.execute(f"DROP TABLE IF EXISTS {self.fts_table};")
                 connection.commit()
             finally:
                 cur.close()
@@ -98,12 +114,13 @@ class SQLiteVecIndex(EmbeddingIndex):
 
         await asyncio.to_thread(_drop_tables)
 
-    async def add_chunks(self, chunks: List[Chunk], embeddings: NDArray, batch_size: int = 500):
+    async def add_chunks(self, chunks: list[Chunk], embeddings: NDArray, batch_size: int = 500):
         """
         Add new chunks along with their embeddings using batch inserts.
         For each chunk, we insert its JSON into the metadata table and then insert its
         embedding (serialized to raw bytes) into the virtual table using the assigned rowid.
         If any insert fails, the transaction is rolled back to maintain consistency.
+        Also inserts chunk content into FTS table for keyword search support.
         """
         assert all(isinstance(chunk.content, str) for chunk in chunks), "SQLiteVecIndex only supports text chunks"
 
@@ -112,18 +129,16 @@ class SQLiteVecIndex(EmbeddingIndex):
             cur = connection.cursor()
 
             try:
-                # Start transaction a single transcation for all batches
                 cur.execute("BEGIN TRANSACTION")
                 for i in range(0, len(chunks), batch_size):
                     batch_chunks = chunks[i : i + batch_size]
                     batch_embeddings = embeddings[i : i + batch_size]
-                    # Prepare metadata inserts
+
+                    # Insert metadata
                     metadata_data = [
                         (generate_chunk_id(chunk.metadata["document_id"], chunk.content), chunk.model_dump_json())
                         for chunk in batch_chunks
-                        if isinstance(chunk.content, str)
                     ]
-                    # Insert metadata (ON CONFLICT to avoid duplicates)
                     cur.executemany(
                         f"""
                         INSERT INTO {self.metadata_table} (id, chunk)
@@ -132,21 +147,43 @@ class SQLiteVecIndex(EmbeddingIndex):
                         """,
                         metadata_data,
                     )
-                    # Prepare embeddings inserts
+
+                    # Insert vector embeddings
                     embedding_data = [
                         (
-                            generate_chunk_id(chunk.metadata["document_id"], chunk.content),
-                            serialize_vector(emb.tolist()),
+                            (
+                                generate_chunk_id(chunk.metadata["document_id"], chunk.content),
+                                serialize_vector(emb.tolist()),
+                            )
                         )
                         for chunk, emb in zip(batch_chunks, batch_embeddings, strict=True)
-                        if isinstance(chunk.content, str)
                     ]
-                    # Insert embeddings in batch
-                    cur.executemany(f"INSERT INTO {self.vector_table} (id, embedding) VALUES (?, ?);", embedding_data)
+                    cur.executemany(
+                        f"INSERT INTO {self.vector_table} (id, embedding) VALUES (?, ?);",
+                        embedding_data,
+                    )
+
+                    # Insert FTS content
+                    fts_data = [
+                        (generate_chunk_id(chunk.metadata["document_id"], chunk.content), chunk.content)
+                        for chunk in batch_chunks
+                    ]
+                    # DELETE existing entries with same IDs (FTS5 doesn't support ON CONFLICT)
+                    cur.executemany(
+                        f"DELETE FROM {self.fts_table} WHERE id = ?;",
+                        [(row[0],) for row in fts_data],
+                    )
+
+                    # INSERT new entries
+                    cur.executemany(
+                        f"INSERT INTO {self.fts_table} (id, content) VALUES (?, ?);",
+                        fts_data,
+                    )
+
                 connection.commit()
 
             except sqlite3.Error as e:
-                connection.rollback()  # Rollback on failure
+                connection.rollback()
                 logger.error(f"Error inserting into {self.vector_table}: {e}")
                 raise
 
@@ -154,22 +191,25 @@ class SQLiteVecIndex(EmbeddingIndex):
                 cur.close()
                 connection.close()
 
-        # Process all batches in a single thread
+        # Run batch insertion in a background thread
         await asyncio.to_thread(_execute_all_batch_inserts)
 
-    async def query(self, embedding: NDArray, k: int, score_threshold: float) -> QueryChunksResponse:
+    async def query_vector(
+        self,
+        embedding: NDArray,
+        k: int,
+        score_threshold: float,
+    ) -> QueryChunksResponse:
         """
-        Query for the k most similar chunks. We convert the query embedding to a blob and run a SQL query
-        against the virtual table. The SQL joins the metadata table to recover the chunk JSON.
+        Performs vector-based search using a virtual table for vector similarity.
         """
-        emb_list = embedding.tolist() if isinstance(embedding, np.ndarray) else list(embedding)
-        emb_blob = serialize_vector(emb_list)
 
         def _execute_query():
             connection = _create_sqlite_connection(self.db_path)
             cur = connection.cursor()
-
             try:
+                emb_list = embedding.tolist() if isinstance(embedding, np.ndarray) else list(embedding)
+                emb_blob = serialize_vector(emb_list)
                 query_sql = f"""
                     SELECT m.id, m.chunk, v.distance
                     FROM {self.vector_table} AS v
@@ -184,17 +224,66 @@ class SQLiteVecIndex(EmbeddingIndex):
                 connection.close()
 
         rows = await asyncio.to_thread(_execute_query)
-
         chunks, scores = [], []
-        for _id, chunk_json, distance in rows:
+        for row in rows:
+            _id, chunk_json, distance = row
+            score = 1.0 / distance if distance != 0 else float("inf")
+            if score < score_threshold:
+                continue
+            try:
+                chunk = Chunk.model_validate_json(chunk_json)
+            except Exception as e:
+                logger.error(f"Error parsing chunk JSON for id {_id}: {e}")
+                continue
+            chunks.append(chunk)
+            scores.append(score)
+        return QueryChunksResponse(chunks=chunks, scores=scores)
+
+    async def query_keyword(
+        self,
+        query_string: str,
+        k: int,
+        score_threshold: float,
+    ) -> QueryChunksResponse:
+        """
+        Performs keyword-based search using SQLite FTS5 for relevance-ranked full-text search.
+        """
+        if query_string is None:
+            raise ValueError("query_string is required for keyword search.")
+
+        def _execute_query():
+            connection = _create_sqlite_connection(self.db_path)
+            cur = connection.cursor()
+            try:
+                query_sql = f"""
+                    SELECT DISTINCT m.id, m.chunk, bm25({self.fts_table}) AS score
+                    FROM {self.fts_table} AS f
+                    JOIN {self.metadata_table} AS m ON m.id = f.id
+                    WHERE f.content MATCH ?
+                    ORDER BY score ASC
+                    LIMIT ?;
+                """
+                cur.execute(query_sql, (query_string, k))
+                return cur.fetchall()
+            finally:
+                cur.close()
+                connection.close()
+
+        rows = await asyncio.to_thread(_execute_query)
+        chunks, scores = [], []
+        for row in rows:
+            _id, chunk_json, score = row
+            # BM25 scores returned by sqlite-vec are NEGATED (i.e., more relevant = more negative).
+            # This design is intentional to simplify sorting by ascending score.
+            # Reference: https://alexgarcia.xyz/blog/2024/sqlite-vec-hybrid-search/index.html
+            if score > -score_threshold:
+                continue
             try:
                 chunk = Chunk.model_validate_json(chunk_json)
             except Exception as e:
                 logger.error(f"Error parsing chunk JSON for id {_id}: {e}")
                 continue
             chunks.append(chunk)
-            # Mimic the Faiss scoring: score = 1/distance (avoid division by zero)
-            score = 1.0 / distance if distance != 0 else float("inf")
             scores.append(score)
         return QueryChunksResponse(chunks=chunks, scores=scores)
 
@@ -209,7 +298,7 @@ class SQLiteVecVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
     def __init__(self, config, inference_api: Inference) -> None:
         self.config = config
         self.inference_api = inference_api
-        self.cache: Dict[str, VectorDBWithIndex] = {}
+        self.cache: dict[str, VectorDBWithIndex] = {}
 
     async def initialize(self) -> None:
         def _setup_connection():
@@ -264,7 +353,7 @@ class SQLiteVecVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
         index = await SQLiteVecIndex.create(vector_db.embedding_dimension, self.config.db_path, vector_db.identifier)
         self.cache[vector_db.identifier] = VectorDBWithIndex(vector_db, index, self.inference_api)
 
-    async def list_vector_dbs(self) -> List[VectorDB]:
+    async def list_vector_dbs(self) -> list[VectorDB]:
         return [v.vector_db for v in self.cache.values()]
 
     async def unregister_vector_db(self, vector_db_id: str) -> None:
@@ -286,7 +375,7 @@ class SQLiteVecVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
 
         await asyncio.to_thread(_delete_vector_db_from_registry)
 
-    async def insert_chunks(self, vector_db_id: str, chunks: List[Chunk], ttl_seconds: Optional[int] = None) -> None:
+    async def insert_chunks(self, vector_db_id: str, chunks: list[Chunk], ttl_seconds: int | None = None) -> None:
         if vector_db_id not in self.cache:
             raise ValueError(f"Vector DB {vector_db_id} not found. Found: {list(self.cache.keys())}")
         # The VectorDBWithIndex helper is expected to compute embeddings via the inference_api
@@ -294,7 +383,7 @@ class SQLiteVecVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
         await self.cache[vector_db_id].insert_chunks(chunks)
 
     async def query_chunks(
-        self, vector_db_id: str, query: Any, params: Optional[Dict[str, Any]] = None
+        self, vector_db_id: str, query: Any, params: dict[str, Any] | None = None
     ) -> QueryChunksResponse:
         if vector_db_id not in self.cache:
             raise ValueError(f"Vector DB {vector_db_id} not found")
@@ -303,5 +392,5 @@ class SQLiteVecVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
 
 def generate_chunk_id(document_id: str, chunk_text: str) -> str:
     """Generate a unique chunk ID using a hash of document ID and chunk text."""
-    hash_input = f"{document_id}:{chunk_text}".encode("utf-8")
+    hash_input = f"{document_id}:{chunk_text}".encode()
     return str(uuid.UUID(hashlib.md5(hash_input).hexdigest()))
diff --git a/llama_stack/providers/registry/agents.py b/llama_stack/providers/registry/agents.py
index 3ed59304d..e0801a8d1 100644
--- a/llama_stack/providers/registry/agents.py
+++ b/llama_stack/providers/registry/agents.py
@@ -4,7 +4,6 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List
 
 from llama_stack.providers.datatypes import (
     Api,
@@ -14,7 +13,7 @@ from llama_stack.providers.datatypes import (
 from llama_stack.providers.utils.kvstore import kvstore_dependencies
 
 
-def available_providers() -> List[ProviderSpec]:
+def available_providers() -> list[ProviderSpec]:
     return [
         InlineProviderSpec(
             api=Api.agents,
diff --git a/llama_stack/providers/registry/datasetio.py b/llama_stack/providers/registry/datasetio.py
index f83dcbc60..152cc9cb9 100644
--- a/llama_stack/providers/registry/datasetio.py
+++ b/llama_stack/providers/registry/datasetio.py
@@ -4,7 +4,6 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List
 
 from llama_stack.providers.datatypes import (
     AdapterSpec,
@@ -15,7 +14,7 @@ from llama_stack.providers.datatypes import (
 )
 
 
-def available_providers() -> List[ProviderSpec]:
+def available_providers() -> list[ProviderSpec]:
     return [
         InlineProviderSpec(
             api=Api.datasetio,
@@ -36,4 +35,15 @@ def available_providers() -> List[ProviderSpec]:
                 config_class="llama_stack.providers.remote.datasetio.huggingface.HuggingfaceDatasetIOConfig",
             ),
         ),
+        remote_provider_spec(
+            api=Api.datasetio,
+            adapter=AdapterSpec(
+                adapter_type="nvidia",
+                pip_packages=[
+                    "datasets",
+                ],
+                module="llama_stack.providers.remote.datasetio.nvidia",
+                config_class="llama_stack.providers.remote.datasetio.nvidia.NvidiaDatasetIOConfig",
+            ),
+        ),
     ]
diff --git a/llama_stack/providers/registry/eval.py b/llama_stack/providers/registry/eval.py
index 9604d5da4..c9c29bbe0 100644
--- a/llama_stack/providers/registry/eval.py
+++ b/llama_stack/providers/registry/eval.py
@@ -4,12 +4,11 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List
 
 from llama_stack.providers.datatypes import AdapterSpec, Api, InlineProviderSpec, ProviderSpec, remote_provider_spec
 
 
-def available_providers() -> List[ProviderSpec]:
+def available_providers() -> list[ProviderSpec]:
     return [
         InlineProviderSpec(
             api=Api.eval,
diff --git a/llama_stack/providers/registry/inference.py b/llama_stack/providers/registry/inference.py
index 4040f0d80..7b49ef09b 100644
--- a/llama_stack/providers/registry/inference.py
+++ b/llama_stack/providers/registry/inference.py
@@ -4,7 +4,6 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List
 
 from llama_stack.providers.datatypes import (
     AdapterSpec,
@@ -29,7 +28,7 @@ META_REFERENCE_DEPS = [
 ]
 
 
-def available_providers() -> List[ProviderSpec]:
+def available_providers() -> list[ProviderSpec]:
     return [
         InlineProviderSpec(
             api=Api.inference,
@@ -227,6 +226,16 @@ def available_providers() -> List[ProviderSpec]:
                 provider_data_validator="llama_stack.providers.remote.inference.fireworks_openai_compat.config.FireworksProviderDataValidator",
             ),
         ),
+        remote_provider_spec(
+            api=Api.inference,
+            adapter=AdapterSpec(
+                adapter_type="llama-openai-compat",
+                pip_packages=["litellm"],
+                module="llama_stack.providers.remote.inference.llama_openai_compat",
+                config_class="llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaCompatConfig",
+                provider_data_validator="llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaProviderDataValidator",
+            ),
+        ),
         remote_provider_spec(
             api=Api.inference,
             adapter=AdapterSpec(
@@ -271,11 +280,10 @@ def available_providers() -> List[ProviderSpec]:
             api=Api.inference,
             adapter=AdapterSpec(
                 adapter_type="sambanova",
-                pip_packages=[
-                    "openai",
-                ],
+                pip_packages=["litellm"],
                 module="llama_stack.providers.remote.inference.sambanova",
                 config_class="llama_stack.providers.remote.inference.sambanova.SambaNovaImplConfig",
+                provider_data_validator="llama_stack.providers.remote.inference.sambanova.config.SambaNovaProviderDataValidator",
             ),
         ),
         remote_provider_spec(
diff --git a/llama_stack/providers/registry/post_training.py b/llama_stack/providers/registry/post_training.py
index 4d10fcf3b..d752b8819 100644
--- a/llama_stack/providers/registry/post_training.py
+++ b/llama_stack/providers/registry/post_training.py
@@ -4,12 +4,11 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List
 
 from llama_stack.providers.datatypes import AdapterSpec, Api, InlineProviderSpec, ProviderSpec, remote_provider_spec
 
 
-def available_providers() -> List[ProviderSpec]:
+def available_providers() -> list[ProviderSpec]:
     return [
         InlineProviderSpec(
             api=Api.post_training,
@@ -22,6 +21,17 @@ def available_providers() -> List[ProviderSpec]:
                 Api.datasets,
             ],
         ),
+        InlineProviderSpec(
+            api=Api.post_training,
+            provider_type="inline::huggingface",
+            pip_packages=["torch", "trl", "transformers", "peft", "datasets"],
+            module="llama_stack.providers.inline.post_training.huggingface",
+            config_class="llama_stack.providers.inline.post_training.huggingface.HuggingFacePostTrainingConfig",
+            api_dependencies=[
+                Api.datasetio,
+                Api.datasets,
+            ],
+        ),
         remote_provider_spec(
             api=Api.post_training,
             adapter=AdapterSpec(
diff --git a/llama_stack/providers/registry/safety.py b/llama_stack/providers/registry/safety.py
index 54dc51034..e0a04be48 100644
--- a/llama_stack/providers/registry/safety.py
+++ b/llama_stack/providers/registry/safety.py
@@ -4,7 +4,6 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List
 
 from llama_stack.providers.datatypes import (
     AdapterSpec,
@@ -15,7 +14,7 @@ from llama_stack.providers.datatypes import (
 )
 
 
-def available_providers() -> List[ProviderSpec]:
+def available_providers() -> list[ProviderSpec]:
     return [
         InlineProviderSpec(
             api=Api.safety,
@@ -64,4 +63,14 @@ def available_providers() -> List[ProviderSpec]:
                 config_class="llama_stack.providers.remote.safety.nvidia.NVIDIASafetyConfig",
             ),
         ),
+        remote_provider_spec(
+            api=Api.safety,
+            adapter=AdapterSpec(
+                adapter_type="sambanova",
+                pip_packages=["litellm"],
+                module="llama_stack.providers.remote.safety.sambanova",
+                config_class="llama_stack.providers.remote.safety.sambanova.SambaNovaSafetyConfig",
+                provider_data_validator="llama_stack.providers.remote.safety.sambanova.config.SambaNovaProviderDataValidator",
+            ),
+        ),
     ]
diff --git a/llama_stack/providers/registry/scoring.py b/llama_stack/providers/registry/scoring.py
index ca09be984..7980d6a13 100644
--- a/llama_stack/providers/registry/scoring.py
+++ b/llama_stack/providers/registry/scoring.py
@@ -4,12 +4,11 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List
 
 from llama_stack.providers.datatypes import Api, InlineProviderSpec, ProviderSpec
 
 
-def available_providers() -> List[ProviderSpec]:
+def available_providers() -> list[ProviderSpec]:
     return [
         InlineProviderSpec(
             api=Api.scoring,
diff --git a/llama_stack/providers/registry/telemetry.py b/llama_stack/providers/registry/telemetry.py
index fc249f3e2..14da06126 100644
--- a/llama_stack/providers/registry/telemetry.py
+++ b/llama_stack/providers/registry/telemetry.py
@@ -4,7 +4,6 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List
 
 from llama_stack.providers.datatypes import (
     Api,
@@ -13,7 +12,7 @@ from llama_stack.providers.datatypes import (
 )
 
 
-def available_providers() -> List[ProviderSpec]:
+def available_providers() -> list[ProviderSpec]:
     return [
         InlineProviderSpec(
             api=Api.telemetry,
diff --git a/llama_stack/providers/registry/tool_runtime.py b/llama_stack/providers/registry/tool_runtime.py
index 95ea2dcf9..277914df2 100644
--- a/llama_stack/providers/registry/tool_runtime.py
+++ b/llama_stack/providers/registry/tool_runtime.py
@@ -4,7 +4,6 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List
 
 from llama_stack.providers.datatypes import (
     AdapterSpec,
@@ -15,7 +14,7 @@ from llama_stack.providers.datatypes import (
 )
 
 
-def available_providers() -> List[ProviderSpec]:
+def available_providers() -> list[ProviderSpec]:
     return [
         InlineProviderSpec(
             api=Api.tool_runtime,
@@ -36,13 +35,6 @@ def available_providers() -> List[ProviderSpec]:
             config_class="llama_stack.providers.inline.tool_runtime.rag.config.RagToolRuntimeConfig",
             api_dependencies=[Api.vector_io, Api.inference],
         ),
-        InlineProviderSpec(
-            api=Api.tool_runtime,
-            provider_type="inline::code-interpreter",
-            pip_packages=[],
-            module="llama_stack.providers.inline.tool_runtime.code_interpreter",
-            config_class="llama_stack.providers.inline.tool_runtime.code_interpreter.config.CodeInterpreterToolConfig",
-        ),
         remote_provider_spec(
             api=Api.tool_runtime,
             adapter=AdapterSpec(
@@ -88,8 +80,9 @@ def available_providers() -> List[ProviderSpec]:
             adapter=AdapterSpec(
                 adapter_type="model-context-protocol",
                 module="llama_stack.providers.remote.tool_runtime.model_context_protocol",
-                config_class="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.ModelContextProtocolConfig",
+                config_class="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.MCPProviderConfig",
                 pip_packages=["mcp"],
+                provider_data_validator="llama_stack.providers.remote.tool_runtime.model_context_protocol.config.MCPProviderDataValidator",
             ),
         ),
     ]
diff --git a/llama_stack/providers/registry/vector_io.py b/llama_stack/providers/registry/vector_io.py
index 93031763d..d888c8420 100644
--- a/llama_stack/providers/registry/vector_io.py
+++ b/llama_stack/providers/registry/vector_io.py
@@ -4,7 +4,6 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List
 
 from llama_stack.providers.datatypes import (
     AdapterSpec,
@@ -15,7 +14,7 @@ from llama_stack.providers.datatypes import (
 )
 
 
-def available_providers() -> List[ProviderSpec]:
+def available_providers() -> list[ProviderSpec]:
     return [
         InlineProviderSpec(
             api=Api.vector_io,
diff --git a/llama_stack/providers/remote/datasetio/huggingface/config.py b/llama_stack/providers/remote/datasetio/huggingface/config.py
index c06996b6f..38f933728 100644
--- a/llama_stack/providers/remote/datasetio/huggingface/config.py
+++ b/llama_stack/providers/remote/datasetio/huggingface/config.py
@@ -3,7 +3,7 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
@@ -17,7 +17,7 @@ class HuggingfaceDatasetIOConfig(BaseModel):
     kvstore: KVStoreConfig
 
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
         return {
             "kvstore": SqliteKVStoreConfig.sample_run_config(
                 __distro_dir__=__distro_dir__,
diff --git a/llama_stack/providers/remote/datasetio/huggingface/huggingface.py b/llama_stack/providers/remote/datasetio/huggingface/huggingface.py
index 7a17e5e42..fafd1d8ff 100644
--- a/llama_stack/providers/remote/datasetio/huggingface/huggingface.py
+++ b/llama_stack/providers/remote/datasetio/huggingface/huggingface.py
@@ -3,7 +3,7 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict, List, Optional
+from typing import Any
 from urllib.parse import parse_qs, urlparse
 
 import datasets as hf_datasets
@@ -12,8 +12,8 @@ from llama_stack.apis.common.responses import PaginatedResponse
 from llama_stack.apis.datasetio import DatasetIO
 from llama_stack.apis.datasets import Dataset
 from llama_stack.providers.datatypes import DatasetsProtocolPrivate
-from llama_stack.providers.utils.datasetio.pagination import paginate_records
 from llama_stack.providers.utils.kvstore import kvstore_impl
+from llama_stack.providers.utils.pagination import paginate_records
 
 from .config import HuggingfaceDatasetIOConfig
 
@@ -42,7 +42,7 @@ class HuggingfaceDatasetIOImpl(DatasetIO, DatasetsProtocolPrivate):
         # Load existing datasets from kvstore
         start_key = DATASETS_PREFIX
         end_key = f"{DATASETS_PREFIX}\xff"
-        stored_datasets = await self.kvstore.range(start_key, end_key)
+        stored_datasets = await self.kvstore.values_in_range(start_key, end_key)
 
         for dataset in stored_datasets:
             dataset = Dataset.model_validate_json(dataset)
@@ -70,8 +70,8 @@ class HuggingfaceDatasetIOImpl(DatasetIO, DatasetsProtocolPrivate):
     async def iterrows(
         self,
         dataset_id: str,
-        start_index: Optional[int] = None,
-        limit: Optional[int] = None,
+        start_index: int | None = None,
+        limit: int | None = None,
     ) -> PaginatedResponse:
         dataset_def = self.dataset_infos[dataset_id]
         path, params = parse_hf_params(dataset_def)
@@ -80,7 +80,7 @@ class HuggingfaceDatasetIOImpl(DatasetIO, DatasetsProtocolPrivate):
         records = [loaded_dataset[i] for i in range(len(loaded_dataset))]
         return paginate_records(records, start_index, limit)
 
-    async def append_rows(self, dataset_id: str, rows: List[Dict[str, Any]]) -> None:
+    async def append_rows(self, dataset_id: str, rows: list[dict[str, Any]]) -> None:
         dataset_def = self.dataset_infos[dataset_id]
         path, params = parse_hf_params(dataset_def)
         loaded_dataset = hf_datasets.load_dataset(path, **params)
diff --git a/llama_stack/providers/remote/datasetio/nvidia/README.md b/llama_stack/providers/remote/datasetio/nvidia/README.md
new file mode 100644
index 000000000..1d3d15132
--- /dev/null
+++ b/llama_stack/providers/remote/datasetio/nvidia/README.md
@@ -0,0 +1,74 @@
+# NVIDIA DatasetIO Provider for LlamaStack
+
+This provider enables dataset management using NVIDIA's NeMo Customizer service.
+
+## Features
+
+- Register datasets for fine-tuning LLMs
+- Unregister datasets
+
+## Getting Started
+
+### Prerequisites
+
+- LlamaStack with NVIDIA configuration
+- Access to Hosted NVIDIA NeMo Microservice
+- API key for authentication with the NVIDIA service
+
+### Setup
+
+Build the NVIDIA environment:
+
+```bash
+llama stack build --template nvidia --image-type conda
+```
+
+### Basic Usage using the LlamaStack Python Client
+
+#### Initialize the client
+
+```python
+import os
+
+os.environ["NVIDIA_API_KEY"] = "your-api-key"
+os.environ["NVIDIA_CUSTOMIZER_URL"] = "http://nemo.test"
+os.environ["NVIDIA_USER_ID"] = "llama-stack-user"
+os.environ["NVIDIA_DATASET_NAMESPACE"] = "default"
+os.environ["NVIDIA_PROJECT_ID"] = "test-project"
+from llama_stack.distribution.library_client import LlamaStackAsLibraryClient
+
+client = LlamaStackAsLibraryClient("nvidia")
+client.initialize()
+```
+
+#### Register a dataset
+
+```python
+client.datasets.register(
+    purpose="post-training/messages",
+    dataset_id="my-training-dataset",
+    source={"type": "uri", "uri": "hf://datasets/default/sample-dataset"},
+    metadata={
+        "format": "json",
+        "description": "Dataset for LLM fine-tuning",
+        "provider": "nvidia",
+    },
+)
+```
+
+#### Get a list of all registered datasets
+
+```python
+datasets = client.datasets.list()
+for dataset in datasets:
+    print(f"Dataset ID: {dataset.identifier}")
+    print(f"Description: {dataset.metadata.get('description', '')}")
+    print(f"Source: {dataset.source.uri}")
+    print("---")
+```
+
+#### Unregister a dataset
+
+```python
+client.datasets.unregister(dataset_id="my-training-dataset")
+```
diff --git a/llama_stack/providers/remote/datasetio/nvidia/__init__.py b/llama_stack/providers/remote/datasetio/nvidia/__init__.py
new file mode 100644
index 000000000..418daec8d
--- /dev/null
+++ b/llama_stack/providers/remote/datasetio/nvidia/__init__.py
@@ -0,0 +1,23 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from .config import NvidiaDatasetIOConfig
+
+
+async def get_adapter_impl(
+    config: NvidiaDatasetIOConfig,
+    _deps,
+):
+    from .datasetio import NvidiaDatasetIOAdapter
+
+    if not isinstance(config, NvidiaDatasetIOConfig):
+        raise RuntimeError(f"Unexpected config type: {type(config)}")
+
+    impl = NvidiaDatasetIOAdapter(config)
+    return impl
+
+
+__all__ = ["get_adapter_impl", "NvidiaDatasetIOAdapter"]
diff --git a/llama_stack/providers/remote/datasetio/nvidia/config.py b/llama_stack/providers/remote/datasetio/nvidia/config.py
new file mode 100644
index 000000000..e616ce25c
--- /dev/null
+++ b/llama_stack/providers/remote/datasetio/nvidia/config.py
@@ -0,0 +1,61 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+import os
+import warnings
+from typing import Any
+
+from pydantic import BaseModel, Field
+
+
+class NvidiaDatasetIOConfig(BaseModel):
+    """Configuration for NVIDIA DatasetIO implementation."""
+
+    api_key: str | None = Field(
+        default_factory=lambda: os.getenv("NVIDIA_API_KEY"),
+        description="The NVIDIA API key.",
+    )
+
+    dataset_namespace: str | None = Field(
+        default_factory=lambda: os.getenv("NVIDIA_DATASET_NAMESPACE", "default"),
+        description="The NVIDIA dataset namespace.",
+    )
+
+    project_id: str | None = Field(
+        default_factory=lambda: os.getenv("NVIDIA_PROJECT_ID", "test-project"),
+        description="The NVIDIA project ID.",
+    )
+
+    datasets_url: str = Field(
+        default_factory=lambda: os.getenv("NVIDIA_DATASETS_URL", "http://nemo.test"),
+        description="Base URL for the NeMo Dataset API",
+    )
+
+    # warning for default values
+    def __post_init__(self):
+        default_values = []
+        if os.getenv("NVIDIA_PROJECT_ID") is None:
+            default_values.append("project_id='test-project'")
+        if os.getenv("NVIDIA_DATASET_NAMESPACE") is None:
+            default_values.append("dataset_namespace='default'")
+        if os.getenv("NVIDIA_DATASETS_URL") is None:
+            default_values.append("datasets_url='http://nemo.test'")
+
+        if default_values:
+            warnings.warn(
+                f"Using default values: {', '.join(default_values)}. \
+                          Please set the environment variables to avoid this default behavior.",
+                stacklevel=2,
+            )
+
+    @classmethod
+    def sample_run_config(cls, **kwargs) -> dict[str, Any]:
+        return {
+            "api_key": "${env.NVIDIA_API_KEY:}",
+            "dataset_namespace": "${env.NVIDIA_DATASET_NAMESPACE:default}",
+            "project_id": "${env.NVIDIA_PROJECT_ID:test-project}",
+            "datasets_url": "${env.NVIDIA_DATASETS_URL:http://nemo.test}",
+        }
diff --git a/llama_stack/providers/remote/datasetio/nvidia/datasetio.py b/llama_stack/providers/remote/datasetio/nvidia/datasetio.py
new file mode 100644
index 000000000..6a9e2bb58
--- /dev/null
+++ b/llama_stack/providers/remote/datasetio/nvidia/datasetio.py
@@ -0,0 +1,112 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from typing import Any
+
+import aiohttp
+
+from llama_stack.apis.common.content_types import URL
+from llama_stack.apis.common.responses import PaginatedResponse
+from llama_stack.apis.common.type_system import ParamType
+from llama_stack.apis.datasets import Dataset
+
+from .config import NvidiaDatasetIOConfig
+
+
+class NvidiaDatasetIOAdapter:
+    """Nvidia NeMo DatasetIO API."""
+
+    def __init__(self, config: NvidiaDatasetIOConfig):
+        self.config = config
+        self.headers = {}
+
+    async def _make_request(
+        self,
+        method: str,
+        path: str,
+        headers: dict[str, Any] | None = None,
+        params: dict[str, Any] | None = None,
+        json: dict[str, Any] | None = None,
+        **kwargs,
+    ) -> dict[str, Any]:
+        """Helper method to make HTTP requests to the Customizer API."""
+        url = f"{self.config.datasets_url}{path}"
+        request_headers = self.headers.copy()
+
+        if headers:
+            request_headers.update(headers)
+
+        async with aiohttp.ClientSession(headers=request_headers) as session:
+            async with session.request(method, url, params=params, json=json, **kwargs) as response:
+                if response.status != 200:
+                    error_data = await response.json()
+                    raise Exception(f"API request failed: {error_data}")
+                return await response.json()
+
+    async def register_dataset(
+        self,
+        dataset_def: Dataset,
+    ) -> Dataset:
+        """Register a new dataset.
+
+        Args:
+            dataset_def [Dataset]: The dataset definition.
+                dataset_id [str]: The ID of the dataset.
+                source [DataSource]: The source of the dataset.
+                metadata [Dict[str, Any]]: The metadata of the dataset.
+                    format [str]: The format of the dataset.
+                    description [str]: The description of the dataset.
+        Returns:
+            Dataset
+        """
+        ## add warnings for unsupported params
+        request_body = {
+            "name": dataset_def.identifier,
+            "namespace": self.config.dataset_namespace,
+            "files_url": dataset_def.source.uri,
+            "project": self.config.project_id,
+        }
+        if dataset_def.metadata:
+            request_body["format"] = dataset_def.metadata.get("format")
+            request_body["description"] = dataset_def.metadata.get("description")
+        await self._make_request(
+            "POST",
+            "/v1/datasets",
+            json=request_body,
+        )
+        return dataset_def
+
+    async def update_dataset(
+        self,
+        dataset_id: str,
+        dataset_schema: dict[str, ParamType],
+        url: URL,
+        provider_dataset_id: str | None = None,
+        provider_id: str | None = None,
+        metadata: dict[str, Any] | None = None,
+    ) -> None:
+        raise NotImplementedError("Not implemented")
+
+    async def unregister_dataset(
+        self,
+        dataset_id: str,
+    ) -> None:
+        await self._make_request(
+            "DELETE",
+            f"/v1/datasets/{self.config.dataset_namespace}/{dataset_id}",
+            headers={"Accept": "application/json", "Content-Type": "application/json"},
+        )
+
+    async def iterrows(
+        self,
+        dataset_id: str,
+        start_index: int | None = None,
+        limit: int | None = None,
+    ) -> PaginatedResponse:
+        raise NotImplementedError("Not implemented")
+
+    async def append_rows(self, dataset_id: str, rows: list[dict[str, Any]]) -> None:
+        raise NotImplementedError("Not implemented")
diff --git a/llama_stack/providers/remote/eval/nvidia/__init__.py b/llama_stack/providers/remote/eval/nvidia/__init__.py
index 8abbec9b2..55e3754f3 100644
--- a/llama_stack/providers/remote/eval/nvidia/__init__.py
+++ b/llama_stack/providers/remote/eval/nvidia/__init__.py
@@ -3,7 +3,7 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict
+from typing import Any
 
 from llama_stack.distribution.datatypes import Api
 
@@ -12,7 +12,7 @@ from .config import NVIDIAEvalConfig
 
 async def get_adapter_impl(
     config: NVIDIAEvalConfig,
-    deps: Dict[Api, Any],
+    deps: dict[Api, Any],
 ):
     from .eval import NVIDIAEvalImpl
 
diff --git a/llama_stack/providers/remote/eval/nvidia/config.py b/llama_stack/providers/remote/eval/nvidia/config.py
index b660fcd68..5c8f9ff76 100644
--- a/llama_stack/providers/remote/eval/nvidia/config.py
+++ b/llama_stack/providers/remote/eval/nvidia/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 import os
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel, Field
 
@@ -23,7 +23,7 @@ class NVIDIAEvalConfig(BaseModel):
     )
 
     @classmethod
-    def sample_run_config(cls, **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, **kwargs) -> dict[str, Any]:
         return {
             "evaluator_url": "${env.NVIDIA_EVALUATOR_URL:http://localhost:7331}",
         }
diff --git a/llama_stack/providers/remote/eval/nvidia/eval.py b/llama_stack/providers/remote/eval/nvidia/eval.py
index e1a3b5355..3572de0ef 100644
--- a/llama_stack/providers/remote/eval/nvidia/eval.py
+++ b/llama_stack/providers/remote/eval/nvidia/eval.py
@@ -3,7 +3,7 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Any, Dict, List
+from typing import Any
 
 import requests
 
@@ -101,8 +101,8 @@ class NVIDIAEvalImpl(
     async def evaluate_rows(
         self,
         benchmark_id: str,
-        input_rows: List[Dict[str, Any]],
-        scoring_functions: List[str],
+        input_rows: list[dict[str, Any]],
+        scoring_functions: list[str],
         benchmark_config: BenchmarkConfig,
     ) -> EvaluateResponse:
         raise NotImplementedError()
diff --git a/llama_stack/providers/remote/inference/anthropic/__init__.py b/llama_stack/providers/remote/inference/anthropic/__init__.py
index 3075f856e..8b420a5a0 100644
--- a/llama_stack/providers/remote/inference/anthropic/__init__.py
+++ b/llama_stack/providers/remote/inference/anthropic/__init__.py
@@ -4,15 +4,13 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Optional
-
 from pydantic import BaseModel
 
 from .config import AnthropicConfig
 
 
 class AnthropicProviderDataValidator(BaseModel):
-    anthropic_api_key: Optional[str] = None
+    anthropic_api_key: str | None = None
 
 
 async def get_adapter_impl(config: AnthropicConfig, _deps):
diff --git a/llama_stack/providers/remote/inference/anthropic/config.py b/llama_stack/providers/remote/inference/anthropic/config.py
index 0e9469602..10da0025e 100644
--- a/llama_stack/providers/remote/inference/anthropic/config.py
+++ b/llama_stack/providers/remote/inference/anthropic/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field
 
@@ -12,7 +12,7 @@ from llama_stack.schema_utils import json_schema_type
 
 
 class AnthropicProviderDataValidator(BaseModel):
-    anthropic_api_key: Optional[str] = Field(
+    anthropic_api_key: str | None = Field(
         default=None,
         description="API key for Anthropic models",
     )
@@ -20,13 +20,13 @@ class AnthropicProviderDataValidator(BaseModel):
 
 @json_schema_type
 class AnthropicConfig(BaseModel):
-    api_key: Optional[str] = Field(
+    api_key: str | None = Field(
         default=None,
         description="API key for Anthropic models",
     )
 
     @classmethod
-    def sample_run_config(cls, api_key: str = "${env.ANTHROPIC_API_KEY}", **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, api_key: str = "${env.ANTHROPIC_API_KEY}", **kwargs) -> dict[str, Any]:
         return {
             "api_key": api_key,
         }
diff --git a/llama_stack/providers/remote/inference/bedrock/__init__.py b/llama_stack/providers/remote/inference/bedrock/__init__.py
index e72c6ada9..4d98f4999 100644
--- a/llama_stack/providers/remote/inference/bedrock/__init__.py
+++ b/llama_stack/providers/remote/inference/bedrock/__init__.py
@@ -1,18 +1,18 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-from .config import BedrockConfig
-
-
-async def get_adapter_impl(config: BedrockConfig, _deps):
-    from .bedrock import BedrockInferenceAdapter
-
-    assert isinstance(config, BedrockConfig), f"Unexpected config type: {type(config)}"
-
-    impl = BedrockInferenceAdapter(config)
-
-    await impl.initialize()
-
-    return impl
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+from .config import BedrockConfig
+
+
+async def get_adapter_impl(config: BedrockConfig, _deps):
+    from .bedrock import BedrockInferenceAdapter
+
+    assert isinstance(config, BedrockConfig), f"Unexpected config type: {type(config)}"
+
+    impl = BedrockInferenceAdapter(config)
+
+    await impl.initialize()
+
+    return impl
diff --git a/llama_stack/providers/remote/inference/bedrock/bedrock.py b/llama_stack/providers/remote/inference/bedrock/bedrock.py
index f8dbcf31a..952d86f1a 100644
--- a/llama_stack/providers/remote/inference/bedrock/bedrock.py
+++ b/llama_stack/providers/remote/inference/bedrock/bedrock.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 import json
-from typing import AsyncGenerator, AsyncIterator, Dict, List, Optional, Union
+from collections.abc import AsyncGenerator, AsyncIterator
 
 from botocore.client import BaseClient
 
@@ -22,6 +22,7 @@ from llama_stack.apis.inference import (
     Inference,
     LogProbConfig,
     Message,
+    OpenAIEmbeddingsResponse,
     ResponseFormat,
     SamplingParams,
     TextTruncation,
@@ -79,26 +80,26 @@ class BedrockInferenceAdapter(
         self,
         model_id: str,
         content: InterleavedContent,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
     ) -> AsyncGenerator:
         raise NotImplementedError()
 
     async def chat_completion(
         self,
         model_id: str,
-        messages: List[Message],
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = ToolChoice.auto,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-        tool_config: Optional[ToolConfig] = None,
-    ) -> Union[ChatCompletionResponse, AsyncIterator[ChatCompletionResponseStreamChunk]]:
+        messages: list[Message],
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_choice: ToolChoice | None = ToolChoice.auto,
+        tool_prompt_format: ToolPromptFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+        tool_config: ToolConfig | None = None,
+    ) -> ChatCompletionResponse | AsyncIterator[ChatCompletionResponseStreamChunk]:
         if sampling_params is None:
             sampling_params = SamplingParams()
         model = await self.model_store.get_model(model_id)
@@ -151,7 +152,7 @@ class BedrockInferenceAdapter(
         async for chunk in process_chat_completion_stream_response(stream, request):
             yield chunk
 
-    async def _get_params_for_chat_completion(self, request: ChatCompletionRequest) -> Dict:
+    async def _get_params_for_chat_completion(self, request: ChatCompletionRequest) -> dict:
         bedrock_model = request.model
 
         sampling_params = request.sampling_params
@@ -176,10 +177,10 @@ class BedrockInferenceAdapter(
     async def embeddings(
         self,
         model_id: str,
-        contents: List[str] | List[InterleavedContentItem],
-        text_truncation: Optional[TextTruncation] = TextTruncation.none,
-        output_dimension: Optional[int] = None,
-        task_type: Optional[EmbeddingTaskType] = None,
+        contents: list[str] | list[InterleavedContentItem],
+        text_truncation: TextTruncation | None = TextTruncation.none,
+        output_dimension: int | None = None,
+        task_type: EmbeddingTaskType | None = None,
     ) -> EmbeddingsResponse:
         model = await self.model_store.get_model(model_id)
         embeddings = []
@@ -197,3 +198,13 @@ class BedrockInferenceAdapter(
             response_body = json.loads(response.get("body").read())
             embeddings.append(response_body.get("embedding"))
         return EmbeddingsResponse(embeddings=embeddings)
+
+    async def openai_embeddings(
+        self,
+        model: str,
+        input: str | list[str],
+        encoding_format: str | None = "float",
+        dimensions: int | None = None,
+        user: str | None = None,
+    ) -> OpenAIEmbeddingsResponse:
+        raise NotImplementedError()
diff --git a/llama_stack/providers/remote/inference/bedrock/config.py b/llama_stack/providers/remote/inference/bedrock/config.py
index f2e8930be..5961a2f15 100644
--- a/llama_stack/providers/remote/inference/bedrock/config.py
+++ b/llama_stack/providers/remote/inference/bedrock/config.py
@@ -1,11 +1,11 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-from llama_stack.providers.utils.bedrock.config import BedrockBaseConfig
-
-
-class BedrockConfig(BedrockBaseConfig):
-    pass
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from llama_stack.providers.utils.bedrock.config import BedrockBaseConfig
+
+
+class BedrockConfig(BedrockBaseConfig):
+    pass
diff --git a/llama_stack/providers/remote/inference/cerebras/cerebras.py b/llama_stack/providers/remote/inference/cerebras/cerebras.py
index 3156601be..952118e24 100644
--- a/llama_stack/providers/remote/inference/cerebras/cerebras.py
+++ b/llama_stack/providers/remote/inference/cerebras/cerebras.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import AsyncGenerator, List, Optional, Union
+from collections.abc import AsyncGenerator
 
 from cerebras.cloud.sdk import AsyncCerebras
 
@@ -21,6 +21,7 @@ from llama_stack.apis.inference import (
     Inference,
     LogProbConfig,
     Message,
+    OpenAIEmbeddingsResponse,
     ResponseFormat,
     SamplingParams,
     TextTruncation,
@@ -79,10 +80,10 @@ class CerebrasInferenceAdapter(
         self,
         model_id: str,
         content: InterleavedContent,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
     ) -> AsyncGenerator:
         if sampling_params is None:
             sampling_params = SamplingParams()
@@ -120,15 +121,15 @@ class CerebrasInferenceAdapter(
     async def chat_completion(
         self,
         model_id: str,
-        messages: List[Message],
-        sampling_params: Optional[SamplingParams] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = ToolChoice.auto,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-        tool_config: Optional[ToolConfig] = None,
+        messages: list[Message],
+        sampling_params: SamplingParams | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_choice: ToolChoice | None = ToolChoice.auto,
+        tool_prompt_format: ToolPromptFormat | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+        tool_config: ToolConfig | None = None,
     ) -> AsyncGenerator:
         if sampling_params is None:
             sampling_params = SamplingParams()
@@ -166,7 +167,7 @@ class CerebrasInferenceAdapter(
         async for chunk in process_chat_completion_stream_response(stream, request):
             yield chunk
 
-    async def _get_params(self, request: Union[ChatCompletionRequest, CompletionRequest]) -> dict:
+    async def _get_params(self, request: ChatCompletionRequest | CompletionRequest) -> dict:
         if request.sampling_params and isinstance(request.sampling_params.strategy, TopKSamplingStrategy):
             raise ValueError("`top_k` not supported by Cerebras")
 
@@ -188,9 +189,19 @@ class CerebrasInferenceAdapter(
     async def embeddings(
         self,
         model_id: str,
-        contents: List[str] | List[InterleavedContentItem],
-        text_truncation: Optional[TextTruncation] = TextTruncation.none,
-        output_dimension: Optional[int] = None,
-        task_type: Optional[EmbeddingTaskType] = None,
+        contents: list[str] | list[InterleavedContentItem],
+        text_truncation: TextTruncation | None = TextTruncation.none,
+        output_dimension: int | None = None,
+        task_type: EmbeddingTaskType | None = None,
     ) -> EmbeddingsResponse:
         raise NotImplementedError()
+
+    async def openai_embeddings(
+        self,
+        model: str,
+        input: str | list[str],
+        encoding_format: str | None = "float",
+        dimensions: int | None = None,
+        user: str | None = None,
+    ) -> OpenAIEmbeddingsResponse:
+        raise NotImplementedError()
diff --git a/llama_stack/providers/remote/inference/cerebras/config.py b/llama_stack/providers/remote/inference/cerebras/config.py
index 81682c980..81312ec76 100644
--- a/llama_stack/providers/remote/inference/cerebras/config.py
+++ b/llama_stack/providers/remote/inference/cerebras/config.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 import os
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field, SecretStr
 
@@ -20,13 +20,13 @@ class CerebrasImplConfig(BaseModel):
         default=os.environ.get("CEREBRAS_BASE_URL", DEFAULT_BASE_URL),
         description="Base URL for the Cerebras API",
     )
-    api_key: Optional[SecretStr] = Field(
+    api_key: SecretStr | None = Field(
         default=os.environ.get("CEREBRAS_API_KEY"),
         description="Cerebras API Key",
     )
 
     @classmethod
-    def sample_run_config(cls, **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, **kwargs) -> dict[str, Any]:
         return {
             "base_url": DEFAULT_BASE_URL,
             "api_key": "${env.CEREBRAS_API_KEY}",
diff --git a/llama_stack/providers/remote/inference/cerebras_openai_compat/__init__.py b/llama_stack/providers/remote/inference/cerebras_openai_compat/__init__.py
index a5f07edd2..523a8dfe7 100644
--- a/llama_stack/providers/remote/inference/cerebras_openai_compat/__init__.py
+++ b/llama_stack/providers/remote/inference/cerebras_openai_compat/__init__.py
@@ -4,12 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from llama_stack.apis.inference import Inference
+from llama_stack.apis.inference import InferenceProvider
 
 from .config import CerebrasCompatConfig
 
 
-async def get_adapter_impl(config: CerebrasCompatConfig, _deps) -> Inference:
+async def get_adapter_impl(config: CerebrasCompatConfig, _deps) -> InferenceProvider:
     # import dynamically so the import is used only when it is needed
     from .cerebras import CerebrasCompatInferenceAdapter
 
diff --git a/llama_stack/providers/remote/inference/cerebras_openai_compat/config.py b/llama_stack/providers/remote/inference/cerebras_openai_compat/config.py
index 149c0a202..cb8daff6a 100644
--- a/llama_stack/providers/remote/inference/cerebras_openai_compat/config.py
+++ b/llama_stack/providers/remote/inference/cerebras_openai_compat/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field
 
@@ -12,7 +12,7 @@ from llama_stack.schema_utils import json_schema_type
 
 
 class CerebrasProviderDataValidator(BaseModel):
-    cerebras_api_key: Optional[str] = Field(
+    cerebras_api_key: str | None = Field(
         default=None,
         description="API key for Cerebras models",
     )
@@ -20,7 +20,7 @@ class CerebrasProviderDataValidator(BaseModel):
 
 @json_schema_type
 class CerebrasCompatConfig(BaseModel):
-    api_key: Optional[str] = Field(
+    api_key: str | None = Field(
         default=None,
         description="The Cerebras API key",
     )
@@ -31,7 +31,7 @@ class CerebrasCompatConfig(BaseModel):
     )
 
     @classmethod
-    def sample_run_config(cls, api_key: str = "${env.CEREBRAS_API_KEY}", **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, api_key: str = "${env.CEREBRAS_API_KEY}", **kwargs) -> dict[str, Any]:
         return {
             "openai_compat_api_base": "https://api.cerebras.ai/v1",
             "api_key": api_key,
diff --git a/llama_stack/providers/remote/inference/databricks/config.py b/llama_stack/providers/remote/inference/databricks/config.py
index 1d51125cb..5710dcef3 100644
--- a/llama_stack/providers/remote/inference/databricks/config.py
+++ b/llama_stack/providers/remote/inference/databricks/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel, Field
 
@@ -28,7 +28,7 @@ class DatabricksImplConfig(BaseModel):
         url: str = "${env.DATABRICKS_URL}",
         api_token: str = "${env.DATABRICKS_API_TOKEN}",
         **kwargs: Any,
-    ) -> Dict[str, Any]:
+    ) -> dict[str, Any]:
         return {
             "url": url,
             "api_token": api_token,
diff --git a/llama_stack/providers/remote/inference/databricks/databricks.py b/llama_stack/providers/remote/inference/databricks/databricks.py
index 27d96eb7d..1dc18b97f 100644
--- a/llama_stack/providers/remote/inference/databricks/databricks.py
+++ b/llama_stack/providers/remote/inference/databricks/databricks.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import AsyncGenerator, List, Optional
+from collections.abc import AsyncGenerator
 
 from openai import OpenAI
 
@@ -20,6 +20,7 @@ from llama_stack.apis.inference import (
     Inference,
     LogProbConfig,
     Message,
+    OpenAIEmbeddingsResponse,
     ResponseFormat,
     SamplingParams,
     TextTruncation,
@@ -78,25 +79,25 @@ class DatabricksInferenceAdapter(
         self,
         model: str,
         content: InterleavedContent,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
     ) -> AsyncGenerator:
         raise NotImplementedError()
 
     async def chat_completion(
         self,
         model: str,
-        messages: List[Message],
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = ToolChoice.auto,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-        tool_config: Optional[ToolConfig] = None,
+        messages: list[Message],
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_choice: ToolChoice | None = ToolChoice.auto,
+        tool_prompt_format: ToolPromptFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+        tool_config: ToolConfig | None = None,
     ) -> AsyncGenerator:
         if sampling_params is None:
             sampling_params = SamplingParams()
@@ -146,9 +147,19 @@ class DatabricksInferenceAdapter(
     async def embeddings(
         self,
         model_id: str,
-        contents: List[str] | List[InterleavedContentItem],
-        text_truncation: Optional[TextTruncation] = TextTruncation.none,
-        output_dimension: Optional[int] = None,
-        task_type: Optional[EmbeddingTaskType] = None,
+        contents: list[str] | list[InterleavedContentItem],
+        text_truncation: TextTruncation | None = TextTruncation.none,
+        output_dimension: int | None = None,
+        task_type: EmbeddingTaskType | None = None,
     ) -> EmbeddingsResponse:
         raise NotImplementedError()
+
+    async def openai_embeddings(
+        self,
+        model: str,
+        input: str | list[str],
+        encoding_format: str | None = "float",
+        dimensions: int | None = None,
+        user: str | None = None,
+    ) -> OpenAIEmbeddingsResponse:
+        raise NotImplementedError()
diff --git a/llama_stack/providers/remote/inference/fireworks/config.py b/llama_stack/providers/remote/inference/fireworks/config.py
index c21ce4a40..072d558f4 100644
--- a/llama_stack/providers/remote/inference/fireworks/config.py
+++ b/llama_stack/providers/remote/inference/fireworks/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field, SecretStr
 
@@ -17,13 +17,13 @@ class FireworksImplConfig(BaseModel):
         default="https://api.fireworks.ai/inference/v1",
         description="The URL for the Fireworks server",
     )
-    api_key: Optional[SecretStr] = Field(
+    api_key: SecretStr | None = Field(
         default=None,
         description="The Fireworks.ai API Key",
     )
 
     @classmethod
-    def sample_run_config(cls, api_key: str = "${env.FIREWORKS_API_KEY}", **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, api_key: str = "${env.FIREWORKS_API_KEY}", **kwargs) -> dict[str, Any]:
         return {
             "url": "https://api.fireworks.ai/inference/v1",
             "api_key": api_key,
diff --git a/llama_stack/providers/remote/inference/fireworks/fireworks.py b/llama_stack/providers/remote/inference/fireworks/fireworks.py
index 58678a9cc..fe21685dd 100644
--- a/llama_stack/providers/remote/inference/fireworks/fireworks.py
+++ b/llama_stack/providers/remote/inference/fireworks/fireworks.py
@@ -4,7 +4,8 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, AsyncGenerator, AsyncIterator, Dict, List, Optional, Union
+from collections.abc import AsyncGenerator, AsyncIterator
+from typing import Any
 
 from fireworks.client import Fireworks
 from openai import AsyncOpenAI
@@ -36,6 +37,7 @@ from llama_stack.apis.inference.inference import (
     OpenAIChatCompletion,
     OpenAIChatCompletionChunk,
     OpenAICompletion,
+    OpenAIEmbeddingsResponse,
     OpenAIMessageParam,
     OpenAIResponseFormatParam,
 )
@@ -105,10 +107,10 @@ class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProv
         self,
         model_id: str,
         content: InterleavedContent,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
     ) -> AsyncGenerator:
         if sampling_params is None:
             sampling_params = SamplingParams()
@@ -146,9 +148,9 @@ class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProv
 
     def _build_options(
         self,
-        sampling_params: Optional[SamplingParams],
+        sampling_params: SamplingParams | None,
         fmt: ResponseFormat,
-        logprobs: Optional[LogProbConfig],
+        logprobs: LogProbConfig | None,
     ) -> dict:
         options = get_sampling_options(sampling_params)
         options.setdefault("max_tokens", 512)
@@ -177,15 +179,15 @@ class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProv
     async def chat_completion(
         self,
         model_id: str,
-        messages: List[Message],
-        sampling_params: Optional[SamplingParams] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = ToolChoice.auto,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-        tool_config: Optional[ToolConfig] = None,
+        messages: list[Message],
+        sampling_params: SamplingParams | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_choice: ToolChoice | None = ToolChoice.auto,
+        tool_prompt_format: ToolPromptFormat | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+        tool_config: ToolConfig | None = None,
     ) -> AsyncGenerator:
         if sampling_params is None:
             sampling_params = SamplingParams()
@@ -229,7 +231,7 @@ class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProv
         async for chunk in process_chat_completion_stream_response(stream, request):
             yield chunk
 
-    async def _get_params(self, request: Union[ChatCompletionRequest, CompletionRequest]) -> dict:
+    async def _get_params(self, request: ChatCompletionRequest | CompletionRequest) -> dict:
         input_dict = {}
         media_present = request_has_media(request)
 
@@ -263,10 +265,10 @@ class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProv
     async def embeddings(
         self,
         model_id: str,
-        contents: List[str] | List[InterleavedContentItem],
-        text_truncation: Optional[TextTruncation] = TextTruncation.none,
-        output_dimension: Optional[int] = None,
-        task_type: Optional[EmbeddingTaskType] = None,
+        contents: list[str] | list[InterleavedContentItem],
+        text_truncation: TextTruncation | None = TextTruncation.none,
+        output_dimension: int | None = None,
+        task_type: EmbeddingTaskType | None = None,
     ) -> EmbeddingsResponse:
         model = await self.model_store.get_model(model_id)
 
@@ -285,27 +287,37 @@ class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProv
         embeddings = [data.embedding for data in response.data]
         return EmbeddingsResponse(embeddings=embeddings)
 
+    async def openai_embeddings(
+        self,
+        model: str,
+        input: str | list[str],
+        encoding_format: str | None = "float",
+        dimensions: int | None = None,
+        user: str | None = None,
+    ) -> OpenAIEmbeddingsResponse:
+        raise NotImplementedError()
+
     async def openai_completion(
         self,
         model: str,
-        prompt: Union[str, List[str], List[int], List[List[int]]],
-        best_of: Optional[int] = None,
-        echo: Optional[bool] = None,
-        frequency_penalty: Optional[float] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        presence_penalty: Optional[float] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-        guided_choice: Optional[List[str]] = None,
-        prompt_logprobs: Optional[int] = None,
+        prompt: str | list[str] | list[int] | list[list[int]],
+        best_of: int | None = None,
+        echo: bool | None = None,
+        frequency_penalty: float | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        presence_penalty: float | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+        guided_choice: list[str] | None = None,
+        prompt_logprobs: int | None = None,
     ) -> OpenAICompletion:
         model_obj = await self.model_store.get_model(model)
 
@@ -338,29 +350,29 @@ class FireworksInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProv
     async def openai_chat_completion(
         self,
         model: str,
-        messages: List[OpenAIMessageParam],
-        frequency_penalty: Optional[float] = None,
-        function_call: Optional[Union[str, Dict[str, Any]]] = None,
-        functions: Optional[List[Dict[str, Any]]] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_completion_tokens: Optional[int] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        parallel_tool_calls: Optional[bool] = None,
-        presence_penalty: Optional[float] = None,
-        response_format: Optional[OpenAIResponseFormatParam] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
-        tools: Optional[List[Dict[str, Any]]] = None,
-        top_logprobs: Optional[int] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-    ) -> Union[OpenAIChatCompletion, AsyncIterator[OpenAIChatCompletionChunk]]:
+        messages: list[OpenAIMessageParam],
+        frequency_penalty: float | None = None,
+        function_call: str | dict[str, Any] | None = None,
+        functions: list[dict[str, Any]] | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_completion_tokens: int | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        parallel_tool_calls: bool | None = None,
+        presence_penalty: float | None = None,
+        response_format: OpenAIResponseFormatParam | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        tool_choice: str | dict[str, Any] | None = None,
+        tools: list[dict[str, Any]] | None = None,
+        top_logprobs: int | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+    ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
         model_obj = await self.model_store.get_model(model)
 
         # Divert Llama Models through Llama Stack inference APIs because
diff --git a/llama_stack/providers/remote/inference/fireworks_openai_compat/__init__.py b/llama_stack/providers/remote/inference/fireworks_openai_compat/__init__.py
index f78f218b5..15a666cb6 100644
--- a/llama_stack/providers/remote/inference/fireworks_openai_compat/__init__.py
+++ b/llama_stack/providers/remote/inference/fireworks_openai_compat/__init__.py
@@ -4,12 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from llama_stack.apis.inference import Inference
+from llama_stack.apis.inference import InferenceProvider
 
 from .config import FireworksCompatConfig
 
 
-async def get_adapter_impl(config: FireworksCompatConfig, _deps) -> Inference:
+async def get_adapter_impl(config: FireworksCompatConfig, _deps) -> InferenceProvider:
     # import dynamically so the import is used only when it is needed
     from .fireworks import FireworksCompatInferenceAdapter
 
diff --git a/llama_stack/providers/remote/inference/fireworks_openai_compat/config.py b/llama_stack/providers/remote/inference/fireworks_openai_compat/config.py
index 0263d348a..bf38cdd2b 100644
--- a/llama_stack/providers/remote/inference/fireworks_openai_compat/config.py
+++ b/llama_stack/providers/remote/inference/fireworks_openai_compat/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field
 
@@ -12,7 +12,7 @@ from llama_stack.schema_utils import json_schema_type
 
 
 class FireworksProviderDataValidator(BaseModel):
-    fireworks_api_key: Optional[str] = Field(
+    fireworks_api_key: str | None = Field(
         default=None,
         description="API key for Fireworks models",
     )
@@ -20,7 +20,7 @@ class FireworksProviderDataValidator(BaseModel):
 
 @json_schema_type
 class FireworksCompatConfig(BaseModel):
-    api_key: Optional[str] = Field(
+    api_key: str | None = Field(
         default=None,
         description="The Fireworks API key",
     )
@@ -31,7 +31,7 @@ class FireworksCompatConfig(BaseModel):
     )
 
     @classmethod
-    def sample_run_config(cls, api_key: str = "${env.FIREWORKS_API_KEY}", **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, api_key: str = "${env.FIREWORKS_API_KEY}", **kwargs) -> dict[str, Any]:
         return {
             "openai_compat_api_base": "https://api.fireworks.ai/inference/v1",
             "api_key": api_key,
diff --git a/llama_stack/providers/remote/inference/gemini/__init__.py b/llama_stack/providers/remote/inference/gemini/__init__.py
index dd972f21c..9d35da893 100644
--- a/llama_stack/providers/remote/inference/gemini/__init__.py
+++ b/llama_stack/providers/remote/inference/gemini/__init__.py
@@ -4,15 +4,13 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Optional
-
 from pydantic import BaseModel
 
 from .config import GeminiConfig
 
 
 class GeminiProviderDataValidator(BaseModel):
-    gemini_api_key: Optional[str] = None
+    gemini_api_key: str | None = None
 
 
 async def get_adapter_impl(config: GeminiConfig, _deps):
diff --git a/llama_stack/providers/remote/inference/gemini/config.py b/llama_stack/providers/remote/inference/gemini/config.py
index 30c8d9913..63ef4de01 100644
--- a/llama_stack/providers/remote/inference/gemini/config.py
+++ b/llama_stack/providers/remote/inference/gemini/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field
 
@@ -12,7 +12,7 @@ from llama_stack.schema_utils import json_schema_type
 
 
 class GeminiProviderDataValidator(BaseModel):
-    gemini_api_key: Optional[str] = Field(
+    gemini_api_key: str | None = Field(
         default=None,
         description="API key for Gemini models",
     )
@@ -20,13 +20,13 @@ class GeminiProviderDataValidator(BaseModel):
 
 @json_schema_type
 class GeminiConfig(BaseModel):
-    api_key: Optional[str] = Field(
+    api_key: str | None = Field(
         default=None,
         description="API key for Gemini models",
     )
 
     @classmethod
-    def sample_run_config(cls, api_key: str = "${env.GEMINI_API_KEY}", **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, api_key: str = "${env.GEMINI_API_KEY}", **kwargs) -> dict[str, Any]:
         return {
             "api_key": api_key,
         }
diff --git a/llama_stack/providers/remote/inference/groq/config.py b/llama_stack/providers/remote/inference/groq/config.py
index 8a1204b0b..fe060507a 100644
--- a/llama_stack/providers/remote/inference/groq/config.py
+++ b/llama_stack/providers/remote/inference/groq/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field
 
@@ -12,7 +12,7 @@ from llama_stack.schema_utils import json_schema_type
 
 
 class GroqProviderDataValidator(BaseModel):
-    groq_api_key: Optional[str] = Field(
+    groq_api_key: str | None = Field(
         default=None,
         description="API key for Groq models",
     )
@@ -20,7 +20,7 @@ class GroqProviderDataValidator(BaseModel):
 
 @json_schema_type
 class GroqConfig(BaseModel):
-    api_key: Optional[str] = Field(
+    api_key: str | None = Field(
         # The Groq client library loads the GROQ_API_KEY environment variable by default
         default=None,
         description="The Groq API key",
@@ -32,7 +32,7 @@ class GroqConfig(BaseModel):
     )
 
     @classmethod
-    def sample_run_config(cls, api_key: str = "${env.GROQ_API_KEY}", **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, api_key: str = "${env.GROQ_API_KEY}", **kwargs) -> dict[str, Any]:
         return {
             "url": "https://api.groq.com",
             "api_key": api_key,
diff --git a/llama_stack/providers/remote/inference/groq/groq.py b/llama_stack/providers/remote/inference/groq/groq.py
index f3f14e9af..27d7d7961 100644
--- a/llama_stack/providers/remote/inference/groq/groq.py
+++ b/llama_stack/providers/remote/inference/groq/groq.py
@@ -4,7 +4,8 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, AsyncIterator, Dict, List, Optional, Union
+from collections.abc import AsyncIterator
+from typing import Any
 
 from openai import AsyncOpenAI
 
@@ -59,29 +60,29 @@ class GroqInferenceAdapter(LiteLLMOpenAIMixin):
     async def openai_chat_completion(
         self,
         model: str,
-        messages: List[OpenAIMessageParam],
-        frequency_penalty: Optional[float] = None,
-        function_call: Optional[Union[str, Dict[str, Any]]] = None,
-        functions: Optional[List[Dict[str, Any]]] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_completion_tokens: Optional[int] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        parallel_tool_calls: Optional[bool] = None,
-        presence_penalty: Optional[float] = None,
-        response_format: Optional[OpenAIResponseFormatParam] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
-        tools: Optional[List[Dict[str, Any]]] = None,
-        top_logprobs: Optional[int] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-    ) -> Union[OpenAIChatCompletion, AsyncIterator[OpenAIChatCompletionChunk]]:
+        messages: list[OpenAIMessageParam],
+        frequency_penalty: float | None = None,
+        function_call: str | dict[str, Any] | None = None,
+        functions: list[dict[str, Any]] | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_completion_tokens: int | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        parallel_tool_calls: bool | None = None,
+        presence_penalty: float | None = None,
+        response_format: OpenAIResponseFormatParam | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        tool_choice: str | dict[str, Any] | None = None,
+        tools: list[dict[str, Any]] | None = None,
+        top_logprobs: int | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+    ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
         model_obj = await self.model_store.get_model(model)
 
         # Groq does not support json_schema response format, so we need to convert it to json_object
diff --git a/llama_stack/providers/remote/inference/groq_openai_compat/__init__.py b/llama_stack/providers/remote/inference/groq_openai_compat/__init__.py
index 8161df20d..794cdebd7 100644
--- a/llama_stack/providers/remote/inference/groq_openai_compat/__init__.py
+++ b/llama_stack/providers/remote/inference/groq_openai_compat/__init__.py
@@ -4,12 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from llama_stack.apis.inference import Inference
+from llama_stack.apis.inference import InferenceProvider
 
 from .config import GroqCompatConfig
 
 
-async def get_adapter_impl(config: GroqCompatConfig, _deps) -> Inference:
+async def get_adapter_impl(config: GroqCompatConfig, _deps) -> InferenceProvider:
     # import dynamically so the import is used only when it is needed
     from .groq import GroqCompatInferenceAdapter
 
diff --git a/llama_stack/providers/remote/inference/groq_openai_compat/config.py b/llama_stack/providers/remote/inference/groq_openai_compat/config.py
index 4b90b4576..481f740f9 100644
--- a/llama_stack/providers/remote/inference/groq_openai_compat/config.py
+++ b/llama_stack/providers/remote/inference/groq_openai_compat/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field
 
@@ -12,7 +12,7 @@ from llama_stack.schema_utils import json_schema_type
 
 
 class GroqProviderDataValidator(BaseModel):
-    groq_api_key: Optional[str] = Field(
+    groq_api_key: str | None = Field(
         default=None,
         description="API key for Groq models",
     )
@@ -20,7 +20,7 @@ class GroqProviderDataValidator(BaseModel):
 
 @json_schema_type
 class GroqCompatConfig(BaseModel):
-    api_key: Optional[str] = Field(
+    api_key: str | None = Field(
         default=None,
         description="The Groq API key",
     )
@@ -31,7 +31,7 @@ class GroqCompatConfig(BaseModel):
     )
 
     @classmethod
-    def sample_run_config(cls, api_key: str = "${env.GROQ_API_KEY}", **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, api_key: str = "${env.GROQ_API_KEY}", **kwargs) -> dict[str, Any]:
         return {
             "openai_compat_api_base": "https://api.groq.com/openai/v1",
             "api_key": api_key,
diff --git a/llama_stack/providers/remote/inference/llama_openai_compat/__init__.py b/llama_stack/providers/remote/inference/llama_openai_compat/__init__.py
new file mode 100644
index 000000000..be48d1067
--- /dev/null
+++ b/llama_stack/providers/remote/inference/llama_openai_compat/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from llama_stack.apis.inference import InferenceProvider
+
+from .config import LlamaCompatConfig
+
+
+async def get_adapter_impl(config: LlamaCompatConfig, _deps) -> InferenceProvider:
+    # import dynamically so the import is used only when it is needed
+    from .llama import LlamaCompatInferenceAdapter
+
+    adapter = LlamaCompatInferenceAdapter(config)
+    return adapter
diff --git a/llama_stack/providers/remote/inference/llama_openai_compat/config.py b/llama_stack/providers/remote/inference/llama_openai_compat/config.py
new file mode 100644
index 000000000..57bc7240d
--- /dev/null
+++ b/llama_stack/providers/remote/inference/llama_openai_compat/config.py
@@ -0,0 +1,38 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from typing import Any
+
+from pydantic import BaseModel, Field
+
+from llama_stack.schema_utils import json_schema_type
+
+
+class LlamaProviderDataValidator(BaseModel):
+    llama_api_key: str | None = Field(
+        default=None,
+        description="API key for api.llama models",
+    )
+
+
+@json_schema_type
+class LlamaCompatConfig(BaseModel):
+    api_key: str | None = Field(
+        default=None,
+        description="The Llama API key",
+    )
+
+    openai_compat_api_base: str = Field(
+        default="https://api.llama.com/compat/v1/",
+        description="The URL for the Llama API server",
+    )
+
+    @classmethod
+    def sample_run_config(cls, api_key: str = "${env.LLAMA_API_KEY}", **kwargs) -> dict[str, Any]:
+        return {
+            "openai_compat_api_base": "https://api.llama.com/compat/v1/",
+            "api_key": api_key,
+        }
diff --git a/llama_stack/providers/remote/inference/llama_openai_compat/llama.py b/llama_stack/providers/remote/inference/llama_openai_compat/llama.py
new file mode 100644
index 000000000..29b5e889a
--- /dev/null
+++ b/llama_stack/providers/remote/inference/llama_openai_compat/llama.py
@@ -0,0 +1,34 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from llama_stack.providers.remote.inference.llama_openai_compat.config import (
+    LlamaCompatConfig,
+)
+from llama_stack.providers.utils.inference.litellm_openai_mixin import (
+    LiteLLMOpenAIMixin,
+)
+
+from .models import MODEL_ENTRIES
+
+
+class LlamaCompatInferenceAdapter(LiteLLMOpenAIMixin):
+    _config: LlamaCompatConfig
+
+    def __init__(self, config: LlamaCompatConfig):
+        LiteLLMOpenAIMixin.__init__(
+            self,
+            model_entries=MODEL_ENTRIES,
+            api_key_from_config=config.api_key,
+            provider_data_api_key_field="llama_api_key",
+            openai_compat_api_base=config.openai_compat_api_base,
+        )
+        self.config = config
+
+    async def initialize(self):
+        await super().initialize()
+
+    async def shutdown(self):
+        await super().shutdown()
diff --git a/llama_stack/providers/remote/inference/llama_openai_compat/models.py b/llama_stack/providers/remote/inference/llama_openai_compat/models.py
new file mode 100644
index 000000000..6285e98e1
--- /dev/null
+++ b/llama_stack/providers/remote/inference/llama_openai_compat/models.py
@@ -0,0 +1,25 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from llama_stack.models.llama.sku_types import CoreModelId
+from llama_stack.providers.utils.inference.model_registry import (
+    build_hf_repo_model_entry,
+)
+
+MODEL_ENTRIES = [
+    build_hf_repo_model_entry(
+        "Llama-3.3-70B-Instruct",
+        CoreModelId.llama3_3_70b_instruct.value,
+    ),
+    build_hf_repo_model_entry(
+        "Llama-4-Scout-17B-16E-Instruct-FP8",
+        CoreModelId.llama4_scout_17b_16e_instruct.value,
+    ),
+    build_hf_repo_model_entry(
+        "Llama-4-Maverick-17B-128E-Instruct-FP8",
+        CoreModelId.llama4_maverick_17b_128e_instruct.value,
+    ),
+]
diff --git a/llama_stack/providers/remote/inference/nvidia/config.py b/llama_stack/providers/remote/inference/nvidia/config.py
index 8f80408d4..4c449edc2 100644
--- a/llama_stack/providers/remote/inference/nvidia/config.py
+++ b/llama_stack/providers/remote/inference/nvidia/config.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 import os
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field, SecretStr
 
@@ -39,7 +39,7 @@ class NVIDIAConfig(BaseModel):
         default_factory=lambda: os.getenv("NVIDIA_BASE_URL", "https://integrate.api.nvidia.com"),
         description="A base url for accessing the NVIDIA NIM",
     )
-    api_key: Optional[SecretStr] = Field(
+    api_key: SecretStr | None = Field(
         default_factory=lambda: os.getenv("NVIDIA_API_KEY"),
         description="The NVIDIA API key, only needed of using the hosted service",
     )
@@ -53,7 +53,7 @@ class NVIDIAConfig(BaseModel):
     )
 
     @classmethod
-    def sample_run_config(cls, **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, **kwargs) -> dict[str, Any]:
         return {
             "url": "${env.NVIDIA_BASE_URL:https://integrate.api.nvidia.com}",
             "api_key": "${env.NVIDIA_API_KEY:}",
diff --git a/llama_stack/providers/remote/inference/nvidia/nvidia.py b/llama_stack/providers/remote/inference/nvidia/nvidia.py
index 4a62ad6cb..4c68322e0 100644
--- a/llama_stack/providers/remote/inference/nvidia/nvidia.py
+++ b/llama_stack/providers/remote/inference/nvidia/nvidia.py
@@ -6,8 +6,9 @@
 
 import logging
 import warnings
+from collections.abc import AsyncIterator
 from functools import lru_cache
-from typing import Any, AsyncIterator, Dict, List, Optional, Union
+from typing import Any
 
 from openai import APIConnectionError, AsyncOpenAI, BadRequestError
 
@@ -28,6 +29,7 @@ from llama_stack.apis.inference import (
     Inference,
     LogProbConfig,
     Message,
+    OpenAIEmbeddingsResponse,
     ResponseFormat,
     SamplingParams,
     TextTruncation,
@@ -141,11 +143,11 @@ class NVIDIAInferenceAdapter(Inference, ModelRegistryHelper):
         self,
         model_id: str,
         content: InterleavedContent,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-    ) -> Union[CompletionResponse, AsyncIterator[CompletionResponseStreamChunk]]:
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+    ) -> CompletionResponse | AsyncIterator[CompletionResponseStreamChunk]:
         if sampling_params is None:
             sampling_params = SamplingParams()
         if content_has_media(content):
@@ -182,20 +184,20 @@ class NVIDIAInferenceAdapter(Inference, ModelRegistryHelper):
     async def embeddings(
         self,
         model_id: str,
-        contents: List[str] | List[InterleavedContentItem],
-        text_truncation: Optional[TextTruncation] = TextTruncation.none,
-        output_dimension: Optional[int] = None,
-        task_type: Optional[EmbeddingTaskType] = None,
+        contents: list[str] | list[InterleavedContentItem],
+        text_truncation: TextTruncation | None = TextTruncation.none,
+        output_dimension: int | None = None,
+        task_type: EmbeddingTaskType | None = None,
     ) -> EmbeddingsResponse:
         if any(content_has_media(content) for content in contents):
             raise NotImplementedError("Media is not supported")
 
         #
-        # Llama Stack: contents = List[str] | List[InterleavedContentItem]
+        # Llama Stack: contents = list[str] | list[InterleavedContentItem]
         #  ->
-        # OpenAI: input = str | List[str]
+        # OpenAI: input = str | list[str]
         #
-        # we can ignore str and always pass List[str] to OpenAI
+        # we can ignore str and always pass list[str] to OpenAI
         #
         flat_contents = [content.text if isinstance(content, TextContentItem) else content for content in contents]
         input = [content.text if isinstance(content, TextContentItem) else content for content in flat_contents]
@@ -231,25 +233,35 @@ class NVIDIAInferenceAdapter(Inference, ModelRegistryHelper):
             raise ValueError(f"Failed to get embeddings: {e}") from e
 
         #
-        # OpenAI: CreateEmbeddingResponse(data=[Embedding(embedding=List[float], ...)], ...)
+        # OpenAI: CreateEmbeddingResponse(data=[Embedding(embedding=list[float], ...)], ...)
         #  ->
-        # Llama Stack: EmbeddingsResponse(embeddings=List[List[float]])
+        # Llama Stack: EmbeddingsResponse(embeddings=list[list[float]])
         #
         return EmbeddingsResponse(embeddings=[embedding.embedding for embedding in response.data])
 
+    async def openai_embeddings(
+        self,
+        model: str,
+        input: str | list[str],
+        encoding_format: str | None = "float",
+        dimensions: int | None = None,
+        user: str | None = None,
+    ) -> OpenAIEmbeddingsResponse:
+        raise NotImplementedError()
+
     async def chat_completion(
         self,
         model_id: str,
-        messages: List[Message],
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = ToolChoice.auto,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-        tool_config: Optional[ToolConfig] = None,
-    ) -> Union[ChatCompletionResponse, AsyncIterator[ChatCompletionResponseStreamChunk]]:
+        messages: list[Message],
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_choice: ToolChoice | None = ToolChoice.auto,
+        tool_prompt_format: ToolPromptFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+        tool_config: ToolConfig | None = None,
+    ) -> ChatCompletionResponse | AsyncIterator[ChatCompletionResponseStreamChunk]:
         if sampling_params is None:
             sampling_params = SamplingParams()
         if tool_prompt_format:
@@ -286,24 +298,24 @@ class NVIDIAInferenceAdapter(Inference, ModelRegistryHelper):
     async def openai_completion(
         self,
         model: str,
-        prompt: Union[str, List[str], List[int], List[List[int]]],
-        best_of: Optional[int] = None,
-        echo: Optional[bool] = None,
-        frequency_penalty: Optional[float] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        presence_penalty: Optional[float] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-        guided_choice: Optional[List[str]] = None,
-        prompt_logprobs: Optional[int] = None,
+        prompt: str | list[str] | list[int] | list[list[int]],
+        best_of: int | None = None,
+        echo: bool | None = None,
+        frequency_penalty: float | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        presence_penalty: float | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+        guided_choice: list[str] | None = None,
+        prompt_logprobs: int | None = None,
     ) -> OpenAICompletion:
         provider_model_id = await self._get_provider_model_id(model)
 
@@ -335,29 +347,29 @@ class NVIDIAInferenceAdapter(Inference, ModelRegistryHelper):
     async def openai_chat_completion(
         self,
         model: str,
-        messages: List[OpenAIMessageParam],
-        frequency_penalty: Optional[float] = None,
-        function_call: Optional[Union[str, Dict[str, Any]]] = None,
-        functions: Optional[List[Dict[str, Any]]] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_completion_tokens: Optional[int] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        parallel_tool_calls: Optional[bool] = None,
-        presence_penalty: Optional[float] = None,
-        response_format: Optional[OpenAIResponseFormatParam] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
-        tools: Optional[List[Dict[str, Any]]] = None,
-        top_logprobs: Optional[int] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-    ) -> Union[OpenAIChatCompletion, AsyncIterator[OpenAIChatCompletionChunk]]:
+        messages: list[OpenAIMessageParam],
+        frequency_penalty: float | None = None,
+        function_call: str | dict[str, Any] | None = None,
+        functions: list[dict[str, Any]] | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_completion_tokens: int | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        parallel_tool_calls: bool | None = None,
+        presence_penalty: float | None = None,
+        response_format: OpenAIResponseFormatParam | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        tool_choice: str | dict[str, Any] | None = None,
+        tools: list[dict[str, Any]] | None = None,
+        top_logprobs: int | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+    ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
         provider_model_id = await self._get_provider_model_id(model)
 
         params = await prepare_openai_completion_params(
diff --git a/llama_stack/providers/remote/inference/nvidia/openai_utils.py b/llama_stack/providers/remote/inference/nvidia/openai_utils.py
index 3f2769b26..0b0d7fcf3 100644
--- a/llama_stack/providers/remote/inference/nvidia/openai_utils.py
+++ b/llama_stack/providers/remote/inference/nvidia/openai_utils.py
@@ -5,7 +5,8 @@
 # the root directory of this source tree.
 
 import warnings
-from typing import Any, AsyncGenerator, Dict, List, Optional
+from collections.abc import AsyncGenerator
+from typing import Any
 
 from openai import AsyncStream
 from openai.types.chat.chat_completion import (
@@ -64,7 +65,7 @@ async def convert_chat_completion_request(
         )
 
     nvext = {}
-    payload: Dict[str, Any] = dict(
+    payload: dict[str, Any] = dict(
         model=request.model,
         messages=[await convert_message_to_openai_dict_new(message) for message in request.messages],
         stream=request.stream,
@@ -137,7 +138,7 @@ def convert_completion_request(
     # logprobs.top_k -> logprobs
 
     nvext = {}
-    payload: Dict[str, Any] = dict(
+    payload: dict[str, Any] = dict(
         model=request.model,
         prompt=request.content,
         stream=request.stream,
@@ -176,8 +177,8 @@ def convert_completion_request(
 
 
 def _convert_openai_completion_logprobs(
-    logprobs: Optional[OpenAICompletionLogprobs],
-) -> Optional[List[TokenLogProbs]]:
+    logprobs: OpenAICompletionLogprobs | None,
+) -> list[TokenLogProbs] | None:
     """
     Convert an OpenAI CompletionLogprobs into a list of TokenLogProbs.
     """
diff --git a/llama_stack/providers/remote/inference/nvidia/utils.py b/llama_stack/providers/remote/inference/nvidia/utils.py
index 7d3f3f27e..74019999e 100644
--- a/llama_stack/providers/remote/inference/nvidia/utils.py
+++ b/llama_stack/providers/remote/inference/nvidia/utils.py
@@ -5,7 +5,6 @@
 # the root directory of this source tree.
 
 import logging
-from typing import Tuple
 
 import httpx
 
@@ -18,7 +17,7 @@ def _is_nvidia_hosted(config: NVIDIAConfig) -> bool:
     return "integrate.api.nvidia.com" in config.url
 
 
-async def _get_health(url: str) -> Tuple[bool, bool]:
+async def _get_health(url: str) -> tuple[bool, bool]:
     """
     Query {url}/v1/health/{live,ready} to check if the server is running and ready
 
diff --git a/llama_stack/providers/remote/inference/ollama/config.py b/llama_stack/providers/remote/inference/ollama/config.py
index a5a4d48ab..0e4aef0e1 100644
--- a/llama_stack/providers/remote/inference/ollama/config.py
+++ b/llama_stack/providers/remote/inference/ollama/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
@@ -15,5 +15,5 @@ class OllamaImplConfig(BaseModel):
     url: str = DEFAULT_OLLAMA_URL
 
     @classmethod
-    def sample_run_config(cls, url: str = "${env.OLLAMA_URL:http://localhost:11434}", **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, url: str = "${env.OLLAMA_URL:http://localhost:11434}", **kwargs) -> dict[str, Any]:
         return {"url": url}
diff --git a/llama_stack/providers/remote/inference/ollama/ollama.py b/llama_stack/providers/remote/inference/ollama/ollama.py
index cdfe7b568..8863e0edc 100644
--- a/llama_stack/providers/remote/inference/ollama/ollama.py
+++ b/llama_stack/providers/remote/inference/ollama/ollama.py
@@ -5,10 +5,11 @@
 # the root directory of this source tree.
 
 
-from typing import Any, AsyncGenerator, AsyncIterator, Dict, List, Optional, Union
+from collections.abc import AsyncGenerator, AsyncIterator
+from typing import Any
 
 import httpx
-from ollama import AsyncClient
+from ollama import AsyncClient  # type: ignore[attr-defined]
 from openai import AsyncOpenAI
 
 from llama_stack.apis.common.content_types import (
@@ -27,10 +28,11 @@ from llama_stack.apis.inference import (
     EmbeddingsResponse,
     EmbeddingTaskType,
     GrammarResponseFormat,
-    Inference,
+    InferenceProvider,
     JsonSchemaResponseFormat,
     LogProbConfig,
     Message,
+    OpenAIEmbeddingsResponse,
     ResponseFormat,
     SamplingParams,
     TextTruncation,
@@ -60,6 +62,7 @@ from llama_stack.providers.utils.inference.openai_compat import (
     OpenAICompatCompletionChoice,
     OpenAICompatCompletionResponse,
     get_sampling_options,
+    prepare_openai_completion_params,
     process_chat_completion_response,
     process_chat_completion_stream_response,
     process_completion_response,
@@ -80,7 +83,7 @@ logger = get_logger(name=__name__, category="inference")
 
 
 class OllamaInferenceAdapter(
-    Inference,
+    InferenceProvider,
     ModelsProtocolPrivate,
 ):
     def __init__(self, url: str) -> None:
@@ -130,14 +133,16 @@ class OllamaInferenceAdapter(
         self,
         model_id: str,
         content: InterleavedContent,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
     ) -> CompletionResponse | AsyncGenerator[CompletionResponseStreamChunk, None]:
         if sampling_params is None:
             sampling_params = SamplingParams()
         model = await self._get_model(model_id)
+        if model.provider_resource_id is None:
+            raise ValueError(f"Model {model_id} has no provider_resource_id set")
         request = CompletionRequest(
             model=model.provider_resource_id,
             content=content,
@@ -188,19 +193,21 @@ class OllamaInferenceAdapter(
     async def chat_completion(
         self,
         model_id: str,
-        messages: List[Message],
-        sampling_params: Optional[SamplingParams] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = ToolChoice.auto,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-        tool_config: Optional[ToolConfig] = None,
+        messages: list[Message],
+        sampling_params: SamplingParams | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_choice: ToolChoice | None = ToolChoice.auto,
+        tool_prompt_format: ToolPromptFormat | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+        tool_config: ToolConfig | None = None,
     ) -> ChatCompletionResponse | AsyncGenerator[ChatCompletionResponseStreamChunk, None]:
         if sampling_params is None:
             sampling_params = SamplingParams()
         model = await self._get_model(model_id)
+        if model.provider_resource_id is None:
+            raise ValueError(f"Model {model_id} has no provider_resource_id set")
         request = ChatCompletionRequest(
             model=model.provider_resource_id,
             messages=messages,
@@ -216,7 +223,7 @@ class OllamaInferenceAdapter(
         else:
             return await self._nonstream_chat_completion(request)
 
-    async def _get_params(self, request: Union[ChatCompletionRequest, CompletionRequest]) -> dict:
+    async def _get_params(self, request: ChatCompletionRequest | CompletionRequest) -> dict:
         sampling_options = get_sampling_options(request.sampling_params)
         # This is needed since the Ollama API expects num_predict to be set
         # for early truncation instead of max_tokens.
@@ -314,10 +321,10 @@ class OllamaInferenceAdapter(
     async def embeddings(
         self,
         model_id: str,
-        contents: List[str] | List[InterleavedContentItem],
-        text_truncation: Optional[TextTruncation] = TextTruncation.none,
-        output_dimension: Optional[int] = None,
-        task_type: Optional[EmbeddingTaskType] = None,
+        contents: list[str] | list[InterleavedContentItem],
+        text_truncation: TextTruncation | None = TextTruncation.none,
+        output_dimension: int | None = None,
+        task_type: EmbeddingTaskType | None = None,
     ) -> EmbeddingsResponse:
         model = await self._get_model(model_id)
 
@@ -333,7 +340,10 @@ class OllamaInferenceAdapter(
         return EmbeddingsResponse(embeddings=embeddings)
 
     async def register_model(self, model: Model) -> Model:
-        model = await self.register_helper.register_model(model)
+        try:
+            model = await self.register_helper.register_model(model)
+        except ValueError:
+            pass  # Ignore statically unknown model, will check live listing
         if model.model_type == ModelType.embedding:
             logger.info(f"Pulling embedding model `{model.provider_resource_id}` if necessary...")
             await self.client.pull(model.provider_resource_id)
@@ -342,9 +352,14 @@ class OllamaInferenceAdapter(
         #  - models not currently running are run by the ollama server as needed
         response = await self.client.list()
         available_models = [m["model"] for m in response["models"]]
-        if model.provider_resource_id not in available_models:
+        if model.provider_resource_id is None:
+            raise ValueError("Model provider_resource_id cannot be None")
+        provider_resource_id = self.register_helper.get_provider_model_id(model.provider_resource_id)
+        if provider_resource_id is None:
+            provider_resource_id = model.provider_resource_id
+        if provider_resource_id not in available_models:
             available_models_latest = [m["model"].split(":latest")[0] for m in response["models"]]
-            if model.provider_resource_id in available_models_latest:
+            if provider_resource_id in available_models_latest:
                 logger.warning(
                     f"Imprecise provider resource id was used but 'latest' is available in Ollama - using '{model.provider_resource_id}:latest'"
                 )
@@ -352,142 +367,145 @@ class OllamaInferenceAdapter(
             raise ValueError(
                 f"Model '{model.provider_resource_id}' is not available in Ollama. Available models: {', '.join(available_models)}"
             )
+        model.provider_resource_id = provider_resource_id
 
         return model
 
+    async def openai_embeddings(
+        self,
+        model: str,
+        input: str | list[str],
+        encoding_format: str | None = "float",
+        dimensions: int | None = None,
+        user: str | None = None,
+    ) -> OpenAIEmbeddingsResponse:
+        raise NotImplementedError()
+
     async def openai_completion(
         self,
         model: str,
-        prompt: Union[str, List[str], List[int], List[List[int]]],
-        best_of: Optional[int] = None,
-        echo: Optional[bool] = None,
-        frequency_penalty: Optional[float] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        presence_penalty: Optional[float] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-        guided_choice: Optional[List[str]] = None,
-        prompt_logprobs: Optional[int] = None,
+        prompt: str | list[str] | list[int] | list[list[int]],
+        best_of: int | None = None,
+        echo: bool | None = None,
+        frequency_penalty: float | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        presence_penalty: float | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+        guided_choice: list[str] | None = None,
+        prompt_logprobs: int | None = None,
     ) -> OpenAICompletion:
         if not isinstance(prompt, str):
             raise ValueError("Ollama does not support non-string prompts for completion")
 
         model_obj = await self._get_model(model)
-        params = {
-            k: v
-            for k, v in {
-                "model": model_obj.provider_resource_id,
-                "prompt": prompt,
-                "best_of": best_of,
-                "echo": echo,
-                "frequency_penalty": frequency_penalty,
-                "logit_bias": logit_bias,
-                "logprobs": logprobs,
-                "max_tokens": max_tokens,
-                "n": n,
-                "presence_penalty": presence_penalty,
-                "seed": seed,
-                "stop": stop,
-                "stream": stream,
-                "stream_options": stream_options,
-                "temperature": temperature,
-                "top_p": top_p,
-                "user": user,
-            }.items()
-            if v is not None
-        }
+        params = await prepare_openai_completion_params(
+            model=model_obj.provider_resource_id,
+            prompt=prompt,
+            best_of=best_of,
+            echo=echo,
+            frequency_penalty=frequency_penalty,
+            logit_bias=logit_bias,
+            logprobs=logprobs,
+            max_tokens=max_tokens,
+            n=n,
+            presence_penalty=presence_penalty,
+            seed=seed,
+            stop=stop,
+            stream=stream,
+            stream_options=stream_options,
+            temperature=temperature,
+            top_p=top_p,
+            user=user,
+        )
         return await self.openai_client.completions.create(**params)  # type: ignore
 
     async def openai_chat_completion(
         self,
         model: str,
-        messages: List[OpenAIMessageParam],
-        frequency_penalty: Optional[float] = None,
-        function_call: Optional[Union[str, Dict[str, Any]]] = None,
-        functions: Optional[List[Dict[str, Any]]] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_completion_tokens: Optional[int] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        parallel_tool_calls: Optional[bool] = None,
-        presence_penalty: Optional[float] = None,
-        response_format: Optional[OpenAIResponseFormatParam] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
-        tools: Optional[List[Dict[str, Any]]] = None,
-        top_logprobs: Optional[int] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-    ) -> Union[OpenAIChatCompletion, AsyncIterator[OpenAIChatCompletionChunk]]:
+        messages: list[OpenAIMessageParam],
+        frequency_penalty: float | None = None,
+        function_call: str | dict[str, Any] | None = None,
+        functions: list[dict[str, Any]] | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_completion_tokens: int | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        parallel_tool_calls: bool | None = None,
+        presence_penalty: float | None = None,
+        response_format: OpenAIResponseFormatParam | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        tool_choice: str | dict[str, Any] | None = None,
+        tools: list[dict[str, Any]] | None = None,
+        top_logprobs: int | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+    ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
         model_obj = await self._get_model(model)
-        params = {
-            k: v
-            for k, v in {
-                "model": model_obj.provider_resource_id,
-                "messages": messages,
-                "frequency_penalty": frequency_penalty,
-                "function_call": function_call,
-                "functions": functions,
-                "logit_bias": logit_bias,
-                "logprobs": logprobs,
-                "max_completion_tokens": max_completion_tokens,
-                "max_tokens": max_tokens,
-                "n": n,
-                "parallel_tool_calls": parallel_tool_calls,
-                "presence_penalty": presence_penalty,
-                "response_format": response_format,
-                "seed": seed,
-                "stop": stop,
-                "stream": stream,
-                "stream_options": stream_options,
-                "temperature": temperature,
-                "tool_choice": tool_choice,
-                "tools": tools,
-                "top_logprobs": top_logprobs,
-                "top_p": top_p,
-                "user": user,
-            }.items()
-            if v is not None
-        }
+        params = await prepare_openai_completion_params(
+            model=model_obj.provider_resource_id,
+            messages=messages,
+            frequency_penalty=frequency_penalty,
+            function_call=function_call,
+            functions=functions,
+            logit_bias=logit_bias,
+            logprobs=logprobs,
+            max_completion_tokens=max_completion_tokens,
+            max_tokens=max_tokens,
+            n=n,
+            parallel_tool_calls=parallel_tool_calls,
+            presence_penalty=presence_penalty,
+            response_format=response_format,
+            seed=seed,
+            stop=stop,
+            stream=stream,
+            stream_options=stream_options,
+            temperature=temperature,
+            tool_choice=tool_choice,
+            tools=tools,
+            top_logprobs=top_logprobs,
+            top_p=top_p,
+            user=user,
+        )
         return await self.openai_client.chat.completions.create(**params)  # type: ignore
 
     async def batch_completion(
         self,
         model_id: str,
-        content_batch: List[InterleavedContent],
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        logprobs: Optional[LogProbConfig] = None,
+        content_batch: list[InterleavedContent],
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        logprobs: LogProbConfig | None = None,
     ):
         raise NotImplementedError("Batch completion is not supported for Ollama")
 
     async def batch_chat_completion(
         self,
         model_id: str,
-        messages_batch: List[List[Message]],
-        sampling_params: Optional[SamplingParams] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_config: Optional[ToolConfig] = None,
-        response_format: Optional[ResponseFormat] = None,
-        logprobs: Optional[LogProbConfig] = None,
+        messages_batch: list[list[Message]],
+        sampling_params: SamplingParams | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_config: ToolConfig | None = None,
+        response_format: ResponseFormat | None = None,
+        logprobs: LogProbConfig | None = None,
     ):
         raise NotImplementedError("Batch chat completion is not supported for Ollama")
 
 
-async def convert_message_to_openai_dict_for_ollama(message: Message) -> List[dict]:
+async def convert_message_to_openai_dict_for_ollama(message: Message) -> list[dict]:
     async def _convert_content(content) -> dict:
         if isinstance(content, ImageContentItem):
             return {
diff --git a/llama_stack/providers/remote/inference/openai/__init__.py b/llama_stack/providers/remote/inference/openai/__init__.py
index 000a03d33..c245dbe10 100644
--- a/llama_stack/providers/remote/inference/openai/__init__.py
+++ b/llama_stack/providers/remote/inference/openai/__init__.py
@@ -4,15 +4,13 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Optional
-
 from pydantic import BaseModel
 
 from .config import OpenAIConfig
 
 
 class OpenAIProviderDataValidator(BaseModel):
-    openai_api_key: Optional[str] = None
+    openai_api_key: str | None = None
 
 
 async def get_adapter_impl(config: OpenAIConfig, _deps):
diff --git a/llama_stack/providers/remote/inference/openai/config.py b/llama_stack/providers/remote/inference/openai/config.py
index 2b0cc2c10..17fb98831 100644
--- a/llama_stack/providers/remote/inference/openai/config.py
+++ b/llama_stack/providers/remote/inference/openai/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field
 
@@ -12,7 +12,7 @@ from llama_stack.schema_utils import json_schema_type
 
 
 class OpenAIProviderDataValidator(BaseModel):
-    openai_api_key: Optional[str] = Field(
+    openai_api_key: str | None = Field(
         default=None,
         description="API key for OpenAI models",
     )
@@ -20,13 +20,13 @@ class OpenAIProviderDataValidator(BaseModel):
 
 @json_schema_type
 class OpenAIConfig(BaseModel):
-    api_key: Optional[str] = Field(
+    api_key: str | None = Field(
         default=None,
         description="API key for OpenAI models",
     )
 
     @classmethod
-    def sample_run_config(cls, api_key: str = "${env.OPENAI_API_KEY}", **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, api_key: str = "${env.OPENAI_API_KEY}", **kwargs) -> dict[str, Any]:
         return {
             "api_key": api_key,
         }
diff --git a/llama_stack/providers/remote/inference/openai/models.py b/llama_stack/providers/remote/inference/openai/models.py
index 1737043a4..e029c456c 100644
--- a/llama_stack/providers/remote/inference/openai/models.py
+++ b/llama_stack/providers/remote/inference/openai/models.py
@@ -4,27 +4,60 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
+from dataclasses import dataclass
+
 from llama_stack.apis.models.models import ModelType
 from llama_stack.providers.utils.inference.model_registry import (
     ProviderModelEntry,
 )
 
 LLM_MODEL_IDS = [
+    # the models w/ "openai/" prefix are the litellm specific model names.
+    # they should be deprecated in favor of the canonical openai model names.
     "openai/gpt-4o",
     "openai/gpt-4o-mini",
     "openai/chatgpt-4o-latest",
+    "gpt-3.5-turbo-0125",
+    "gpt-3.5-turbo",
+    "gpt-3.5-turbo-instruct",
+    "gpt-4",
+    "gpt-4-turbo",
+    "gpt-4o",
+    "gpt-4o-2024-08-06",
+    "gpt-4o-mini",
+    "gpt-4o-audio-preview",
+    "chatgpt-4o-latest",
+    "o1",
+    "o1-mini",
+    "o3-mini",
+    "o4-mini",
 ]
 
 
+@dataclass
+class EmbeddingModelInfo:
+    """Structured representation of embedding model information."""
+
+    embedding_dimension: int
+    context_length: int
+
+
+EMBEDDING_MODEL_IDS: dict[str, EmbeddingModelInfo] = {
+    "openai/text-embedding-3-small": EmbeddingModelInfo(1536, 8192),
+    "openai/text-embedding-3-large": EmbeddingModelInfo(3072, 8192),
+    "text-embedding-3-small": EmbeddingModelInfo(1536, 8192),
+    "text-embedding-3-large": EmbeddingModelInfo(3072, 8192),
+}
+
+
 MODEL_ENTRIES = [ProviderModelEntry(provider_model_id=m) for m in LLM_MODEL_IDS] + [
     ProviderModelEntry(
-        provider_model_id="openai/text-embedding-3-small",
+        provider_model_id=model_id,
         model_type=ModelType.embedding,
-        metadata={"embedding_dimension": 1536, "context_length": 8192},
-    ),
-    ProviderModelEntry(
-        provider_model_id="openai/text-embedding-3-large",
-        model_type=ModelType.embedding,
-        metadata={"embedding_dimension": 3072, "context_length": 8192},
-    ),
+        metadata={
+            "embedding_dimension": model_info.embedding_dimension,
+            "context_length": model_info.context_length,
+        },
+    )
+    for model_id, model_info in EMBEDDING_MODEL_IDS.items()
 ]
diff --git a/llama_stack/providers/remote/inference/openai/openai.py b/llama_stack/providers/remote/inference/openai/openai.py
index 6b9c02e6c..6f3a686a8 100644
--- a/llama_stack/providers/remote/inference/openai/openai.py
+++ b/llama_stack/providers/remote/inference/openai/openai.py
@@ -4,12 +4,45 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
+import logging
+from collections.abc import AsyncIterator
+from typing import Any
+
+from openai import AsyncOpenAI
+
+from llama_stack.apis.inference.inference import (
+    OpenAIChatCompletion,
+    OpenAIChatCompletionChunk,
+    OpenAICompletion,
+    OpenAIEmbeddingData,
+    OpenAIEmbeddingsResponse,
+    OpenAIEmbeddingUsage,
+    OpenAIMessageParam,
+    OpenAIResponseFormatParam,
+)
 from llama_stack.providers.utils.inference.litellm_openai_mixin import LiteLLMOpenAIMixin
+from llama_stack.providers.utils.inference.openai_compat import prepare_openai_completion_params
 
 from .config import OpenAIConfig
 from .models import MODEL_ENTRIES
 
+logger = logging.getLogger(__name__)
 
+
+#
+# This OpenAI adapter implements Inference methods using two clients -
+#
+# | Inference Method           | Implementation Source    |
+# |----------------------------|--------------------------|
+# | completion                 | LiteLLMOpenAIMixin       |
+# | chat_completion            | LiteLLMOpenAIMixin       |
+# | embedding                  | LiteLLMOpenAIMixin       |
+# | batch_completion           | LiteLLMOpenAIMixin       |
+# | batch_chat_completion      | LiteLLMOpenAIMixin       |
+# | openai_completion          | AsyncOpenAI              |
+# | openai_chat_completion     | AsyncOpenAI              |
+# | openai_embeddings          | AsyncOpenAI              |
+#
 class OpenAIInferenceAdapter(LiteLLMOpenAIMixin):
     def __init__(self, config: OpenAIConfig) -> None:
         LiteLLMOpenAIMixin.__init__(
@@ -19,9 +52,174 @@ class OpenAIInferenceAdapter(LiteLLMOpenAIMixin):
             provider_data_api_key_field="openai_api_key",
         )
         self.config = config
+        # we set is_openai_compat so users can use the canonical
+        # openai model names like "gpt-4" or "gpt-3.5-turbo"
+        # and the model name will be translated to litellm's
+        # "openai/gpt-4" or "openai/gpt-3.5-turbo" transparently.
+        # if we do not set this, users will be exposed to the
+        # litellm specific model names, an abstraction leak.
+        self.is_openai_compat = True
+        self._openai_client = AsyncOpenAI(
+            api_key=self.config.api_key,
+        )
 
     async def initialize(self) -> None:
         await super().initialize()
 
     async def shutdown(self) -> None:
         await super().shutdown()
+
+    async def openai_completion(
+        self,
+        model: str,
+        prompt: str | list[str] | list[int] | list[list[int]],
+        best_of: int | None = None,
+        echo: bool | None = None,
+        frequency_penalty: float | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        presence_penalty: float | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+        guided_choice: list[str] | None = None,
+        prompt_logprobs: int | None = None,
+    ) -> OpenAICompletion:
+        if guided_choice is not None:
+            logging.warning("guided_choice is not supported by the OpenAI API. Ignoring.")
+        if prompt_logprobs is not None:
+            logging.warning("prompt_logprobs is not supported by the OpenAI API. Ignoring.")
+
+        model_id = (await self.model_store.get_model(model)).provider_resource_id
+        if model_id.startswith("openai/"):
+            model_id = model_id[len("openai/") :]
+        params = await prepare_openai_completion_params(
+            model=model_id,
+            prompt=prompt,
+            best_of=best_of,
+            echo=echo,
+            frequency_penalty=frequency_penalty,
+            logit_bias=logit_bias,
+            logprobs=logprobs,
+            max_tokens=max_tokens,
+            n=n,
+            presence_penalty=presence_penalty,
+            seed=seed,
+            stop=stop,
+            stream=stream,
+            stream_options=stream_options,
+            temperature=temperature,
+            top_p=top_p,
+            user=user,
+        )
+        return await self._openai_client.completions.create(**params)
+
+    async def openai_chat_completion(
+        self,
+        model: str,
+        messages: list[OpenAIMessageParam],
+        frequency_penalty: float | None = None,
+        function_call: str | dict[str, Any] | None = None,
+        functions: list[dict[str, Any]] | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_completion_tokens: int | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        parallel_tool_calls: bool | None = None,
+        presence_penalty: float | None = None,
+        response_format: OpenAIResponseFormatParam | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        tool_choice: str | dict[str, Any] | None = None,
+        tools: list[dict[str, Any]] | None = None,
+        top_logprobs: int | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+    ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
+        model_id = (await self.model_store.get_model(model)).provider_resource_id
+        if model_id.startswith("openai/"):
+            model_id = model_id[len("openai/") :]
+        params = await prepare_openai_completion_params(
+            model=model_id,
+            messages=messages,
+            frequency_penalty=frequency_penalty,
+            function_call=function_call,
+            functions=functions,
+            logit_bias=logit_bias,
+            logprobs=logprobs,
+            max_completion_tokens=max_completion_tokens,
+            max_tokens=max_tokens,
+            n=n,
+            parallel_tool_calls=parallel_tool_calls,
+            presence_penalty=presence_penalty,
+            response_format=response_format,
+            seed=seed,
+            stop=stop,
+            stream=stream,
+            stream_options=stream_options,
+            temperature=temperature,
+            tool_choice=tool_choice,
+            tools=tools,
+            top_logprobs=top_logprobs,
+            top_p=top_p,
+            user=user,
+        )
+        return await self._openai_client.chat.completions.create(**params)
+
+    async def openai_embeddings(
+        self,
+        model: str,
+        input: str | list[str],
+        encoding_format: str | None = "float",
+        dimensions: int | None = None,
+        user: str | None = None,
+    ) -> OpenAIEmbeddingsResponse:
+        model_id = (await self.model_store.get_model(model)).provider_resource_id
+        if model_id.startswith("openai/"):
+            model_id = model_id[len("openai/") :]
+
+        # Prepare parameters for OpenAI embeddings API
+        params = {
+            "model": model_id,
+            "input": input,
+        }
+
+        if encoding_format is not None:
+            params["encoding_format"] = encoding_format
+        if dimensions is not None:
+            params["dimensions"] = dimensions
+        if user is not None:
+            params["user"] = user
+
+        # Call OpenAI embeddings API
+        response = await self._openai_client.embeddings.create(**params)
+
+        data = []
+        for i, embedding_data in enumerate(response.data):
+            data.append(
+                OpenAIEmbeddingData(
+                    embedding=embedding_data.embedding,
+                    index=i,
+                )
+            )
+
+        usage = OpenAIEmbeddingUsage(
+            prompt_tokens=response.usage.prompt_tokens,
+            total_tokens=response.usage.total_tokens,
+        )
+
+        return OpenAIEmbeddingsResponse(
+            data=data,
+            model=response.model,
+            usage=usage,
+        )
diff --git a/llama_stack/providers/remote/inference/passthrough/config.py b/llama_stack/providers/remote/inference/passthrough/config.py
index 46325e428..ce41495ce 100644
--- a/llama_stack/providers/remote/inference/passthrough/config.py
+++ b/llama_stack/providers/remote/inference/passthrough/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field, SecretStr
 
@@ -18,13 +18,13 @@ class PassthroughImplConfig(BaseModel):
         description="The URL for the passthrough endpoint",
     )
 
-    api_key: Optional[SecretStr] = Field(
+    api_key: SecretStr | None = Field(
         default=None,
         description="API Key for the passthrouth endpoint",
     )
 
     @classmethod
-    def sample_run_config(cls, **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, **kwargs) -> dict[str, Any]:
         return {
             "url": "${env.PASSTHROUGH_URL}",
             "api_key": "${env.PASSTHROUGH_API_KEY}",
diff --git a/llama_stack/providers/remote/inference/passthrough/passthrough.py b/llama_stack/providers/remote/inference/passthrough/passthrough.py
index af05320b0..6cf4680e2 100644
--- a/llama_stack/providers/remote/inference/passthrough/passthrough.py
+++ b/llama_stack/providers/remote/inference/passthrough/passthrough.py
@@ -4,7 +4,8 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, AsyncGenerator, AsyncIterator, Dict, List, Optional, Union
+from collections.abc import AsyncGenerator, AsyncIterator
+from typing import Any
 
 from llama_stack_client import AsyncLlamaStackClient
 
@@ -18,6 +19,7 @@ from llama_stack.apis.inference import (
     Inference,
     LogProbConfig,
     Message,
+    OpenAIEmbeddingsResponse,
     ResponseFormat,
     SamplingParams,
     TextTruncation,
@@ -93,10 +95,10 @@ class PassthroughInferenceAdapter(Inference):
         self,
         model_id: str,
         content: InterleavedContent,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
     ) -> AsyncGenerator:
         if sampling_params is None:
             sampling_params = SamplingParams()
@@ -123,15 +125,15 @@ class PassthroughInferenceAdapter(Inference):
     async def chat_completion(
         self,
         model_id: str,
-        messages: List[Message],
-        sampling_params: Optional[SamplingParams] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = ToolChoice.auto,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-        tool_config: Optional[ToolConfig] = None,
+        messages: list[Message],
+        sampling_params: SamplingParams | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_choice: ToolChoice | None = ToolChoice.auto,
+        tool_prompt_format: ToolPromptFormat | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+        tool_config: ToolConfig | None = None,
     ) -> AsyncGenerator:
         if sampling_params is None:
             sampling_params = SamplingParams()
@@ -165,7 +167,7 @@ class PassthroughInferenceAdapter(Inference):
         else:
             return await self._nonstream_chat_completion(json_params)
 
-    async def _nonstream_chat_completion(self, json_params: Dict[str, Any]) -> ChatCompletionResponse:
+    async def _nonstream_chat_completion(self, json_params: dict[str, Any]) -> ChatCompletionResponse:
         client = self._get_client()
         response = await client.inference.chat_completion(**json_params)
 
@@ -178,7 +180,7 @@ class PassthroughInferenceAdapter(Inference):
             logprobs=response.logprobs,
         )
 
-    async def _stream_chat_completion(self, json_params: Dict[str, Any]) -> AsyncGenerator:
+    async def _stream_chat_completion(self, json_params: dict[str, Any]) -> AsyncGenerator:
         client = self._get_client()
         stream_response = await client.inference.chat_completion(**json_params)
 
@@ -193,10 +195,10 @@ class PassthroughInferenceAdapter(Inference):
     async def embeddings(
         self,
         model_id: str,
-        contents: List[InterleavedContent],
-        text_truncation: Optional[TextTruncation] = TextTruncation.none,
-        output_dimension: Optional[int] = None,
-        task_type: Optional[EmbeddingTaskType] = None,
+        contents: list[InterleavedContent],
+        text_truncation: TextTruncation | None = TextTruncation.none,
+        output_dimension: int | None = None,
+        task_type: EmbeddingTaskType | None = None,
     ) -> EmbeddingsResponse:
         client = self._get_client()
         model = await self.model_store.get_model(model_id)
@@ -209,27 +211,37 @@ class PassthroughInferenceAdapter(Inference):
             task_type=task_type,
         )
 
+    async def openai_embeddings(
+        self,
+        model: str,
+        input: str | list[str],
+        encoding_format: str | None = "float",
+        dimensions: int | None = None,
+        user: str | None = None,
+    ) -> OpenAIEmbeddingsResponse:
+        raise NotImplementedError()
+
     async def openai_completion(
         self,
         model: str,
-        prompt: Union[str, List[str], List[int], List[List[int]]],
-        best_of: Optional[int] = None,
-        echo: Optional[bool] = None,
-        frequency_penalty: Optional[float] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        presence_penalty: Optional[float] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-        guided_choice: Optional[List[str]] = None,
-        prompt_logprobs: Optional[int] = None,
+        prompt: str | list[str] | list[int] | list[list[int]],
+        best_of: int | None = None,
+        echo: bool | None = None,
+        frequency_penalty: float | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        presence_penalty: float | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+        guided_choice: list[str] | None = None,
+        prompt_logprobs: int | None = None,
     ) -> OpenAICompletion:
         client = self._get_client()
         model_obj = await self.model_store.get_model(model)
@@ -261,29 +273,29 @@ class PassthroughInferenceAdapter(Inference):
     async def openai_chat_completion(
         self,
         model: str,
-        messages: List[OpenAIMessageParam],
-        frequency_penalty: Optional[float] = None,
-        function_call: Optional[Union[str, Dict[str, Any]]] = None,
-        functions: Optional[List[Dict[str, Any]]] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_completion_tokens: Optional[int] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        parallel_tool_calls: Optional[bool] = None,
-        presence_penalty: Optional[float] = None,
-        response_format: Optional[OpenAIResponseFormatParam] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
-        tools: Optional[List[Dict[str, Any]]] = None,
-        top_logprobs: Optional[int] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-    ) -> Union[OpenAIChatCompletion, AsyncIterator[OpenAIChatCompletionChunk]]:
+        messages: list[OpenAIMessageParam],
+        frequency_penalty: float | None = None,
+        function_call: str | dict[str, Any] | None = None,
+        functions: list[dict[str, Any]] | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_completion_tokens: int | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        parallel_tool_calls: bool | None = None,
+        presence_penalty: float | None = None,
+        response_format: OpenAIResponseFormatParam | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        tool_choice: str | dict[str, Any] | None = None,
+        tools: list[dict[str, Any]] | None = None,
+        top_logprobs: int | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+    ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
         client = self._get_client()
         model_obj = await self.model_store.get_model(model)
 
@@ -315,7 +327,7 @@ class PassthroughInferenceAdapter(Inference):
 
         return await client.inference.openai_chat_completion(**params)
 
-    def cast_value_to_json_dict(self, request_params: Dict[str, Any]) -> Dict[str, Any]:
+    def cast_value_to_json_dict(self, request_params: dict[str, Any]) -> dict[str, Any]:
         json_params = {}
         for key, value in request_params.items():
             json_input = convert_pydantic_to_json_value(value)
diff --git a/llama_stack/providers/remote/inference/runpod/config.py b/llama_stack/providers/remote/inference/runpod/config.py
index 377a7fe6a..e3913dc35 100644
--- a/llama_stack/providers/remote/inference/runpod/config.py
+++ b/llama_stack/providers/remote/inference/runpod/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field
 
@@ -13,17 +13,17 @@ from llama_stack.schema_utils import json_schema_type
 
 @json_schema_type
 class RunpodImplConfig(BaseModel):
-    url: Optional[str] = Field(
+    url: str | None = Field(
         default=None,
         description="The URL for the Runpod model serving endpoint",
     )
-    api_token: Optional[str] = Field(
+    api_token: str | None = Field(
         default=None,
         description="The API token",
     )
 
     @classmethod
-    def sample_run_config(cls, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, **kwargs: Any) -> dict[str, Any]:
         return {
             "url": "${env.RUNPOD_URL:}",
             "api_token": "${env.RUNPOD_API_TOKEN:}",
diff --git a/llama_stack/providers/remote/inference/runpod/runpod.py b/llama_stack/providers/remote/inference/runpod/runpod.py
index 72cbead9b..f8c98893e 100644
--- a/llama_stack/providers/remote/inference/runpod/runpod.py
+++ b/llama_stack/providers/remote/inference/runpod/runpod.py
@@ -3,11 +3,12 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import AsyncGenerator
+from collections.abc import AsyncGenerator
 
 from openai import OpenAI
 
 from llama_stack.apis.inference import *  # noqa: F403
+from llama_stack.apis.inference.inference import OpenAIEmbeddingsResponse
 
 # from llama_stack.providers.datatypes import ModelsProtocolPrivate
 from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
@@ -134,3 +135,13 @@ class RunpodInferenceAdapter(
         task_type: Optional[EmbeddingTaskType] = None,
     ) -> EmbeddingsResponse:
         raise NotImplementedError()
+
+    async def openai_embeddings(
+        self,
+        model: str,
+        input: str | list[str],
+        encoding_format: str | None = "float",
+        dimensions: int | None = None,
+        user: str | None = None,
+    ) -> OpenAIEmbeddingsResponse:
+        raise NotImplementedError()
diff --git a/llama_stack/providers/remote/inference/sambanova/__init__.py b/llama_stack/providers/remote/inference/sambanova/__init__.py
index 3e682e69c..a3a7b8fbd 100644
--- a/llama_stack/providers/remote/inference/sambanova/__init__.py
+++ b/llama_stack/providers/remote/inference/sambanova/__init__.py
@@ -4,16 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from pydantic import BaseModel
+from llama_stack.apis.inference import Inference
 
 from .config import SambaNovaImplConfig
 
 
-class SambaNovaProviderDataValidator(BaseModel):
-    sambanova_api_key: str
-
-
-async def get_adapter_impl(config: SambaNovaImplConfig, _deps):
+async def get_adapter_impl(config: SambaNovaImplConfig, _deps) -> Inference:
     from .sambanova import SambaNovaInferenceAdapter
 
     assert isinstance(config, SambaNovaImplConfig), f"Unexpected config type: {type(config)}"
diff --git a/llama_stack/providers/remote/inference/sambanova/config.py b/llama_stack/providers/remote/inference/sambanova/config.py
index a30c29b74..abbf9430f 100644
--- a/llama_stack/providers/remote/inference/sambanova/config.py
+++ b/llama_stack/providers/remote/inference/sambanova/config.py
@@ -4,27 +4,34 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
-from pydantic import BaseModel, Field
+from pydantic import BaseModel, Field, SecretStr
 
 from llama_stack.schema_utils import json_schema_type
 
 
+class SambaNovaProviderDataValidator(BaseModel):
+    sambanova_api_key: str | None = Field(
+        default=None,
+        description="Sambanova Cloud API key",
+    )
+
+
 @json_schema_type
 class SambaNovaImplConfig(BaseModel):
     url: str = Field(
         default="https://api.sambanova.ai/v1",
         description="The URL for the SambaNova AI server",
     )
-    api_key: Optional[str] = Field(
+    api_key: SecretStr | None = Field(
         default=None,
-        description="The SambaNova.ai API Key",
+        description="The SambaNova cloud API Key",
     )
 
     @classmethod
-    def sample_run_config(cls, **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, api_key: str = "${env.SAMBANOVA_API_KEY}", **kwargs) -> dict[str, Any]:
         return {
             "url": "https://api.sambanova.ai/v1",
-            "api_key": "${env.SAMBANOVA_API_KEY}",
+            "api_key": api_key,
         }
diff --git a/llama_stack/providers/remote/inference/sambanova/models.py b/llama_stack/providers/remote/inference/sambanova/models.py
index 43041e94a..9954fa7a0 100644
--- a/llama_stack/providers/remote/inference/sambanova/models.py
+++ b/llama_stack/providers/remote/inference/sambanova/models.py
@@ -11,43 +11,43 @@ from llama_stack.providers.utils.inference.model_registry import (
 
 MODEL_ENTRIES = [
     build_hf_repo_model_entry(
-        "Meta-Llama-3.1-8B-Instruct",
+        "sambanova/Meta-Llama-3.1-8B-Instruct",
         CoreModelId.llama3_1_8b_instruct.value,
     ),
     build_hf_repo_model_entry(
-        "Meta-Llama-3.1-70B-Instruct",
-        CoreModelId.llama3_1_70b_instruct.value,
-    ),
-    build_hf_repo_model_entry(
-        "Meta-Llama-3.1-405B-Instruct",
+        "sambanova/Meta-Llama-3.1-405B-Instruct",
         CoreModelId.llama3_1_405b_instruct.value,
     ),
     build_hf_repo_model_entry(
-        "Meta-Llama-3.2-1B-Instruct",
+        "sambanova/Meta-Llama-3.2-1B-Instruct",
         CoreModelId.llama3_2_1b_instruct.value,
     ),
     build_hf_repo_model_entry(
-        "Meta-Llama-3.2-3B-Instruct",
+        "sambanova/Meta-Llama-3.2-3B-Instruct",
         CoreModelId.llama3_2_3b_instruct.value,
     ),
     build_hf_repo_model_entry(
-        "Meta-Llama-3.3-70B-Instruct",
+        "sambanova/Meta-Llama-3.3-70B-Instruct",
         CoreModelId.llama3_3_70b_instruct.value,
     ),
     build_hf_repo_model_entry(
-        "Llama-3.2-11B-Vision-Instruct",
+        "sambanova/Llama-3.2-11B-Vision-Instruct",
         CoreModelId.llama3_2_11b_vision_instruct.value,
     ),
     build_hf_repo_model_entry(
-        "Llama-3.2-90B-Vision-Instruct",
+        "sambanova/Llama-3.2-90B-Vision-Instruct",
         CoreModelId.llama3_2_90b_vision_instruct.value,
     ),
     build_hf_repo_model_entry(
-        "Meta-Llama-Guard-3-8B",
-        CoreModelId.llama_guard_3_8b.value,
-    ),
-    build_hf_repo_model_entry(
-        "Llama-4-Scout-17B-16E-Instruct",
+        "sambanova/Llama-4-Scout-17B-16E-Instruct",
         CoreModelId.llama4_scout_17b_16e_instruct.value,
     ),
+    build_hf_repo_model_entry(
+        "sambanova/Llama-4-Maverick-17B-128E-Instruct",
+        CoreModelId.llama4_maverick_17b_128e_instruct.value,
+    ),
+    build_hf_repo_model_entry(
+        "sambanova/Meta-Llama-Guard-3-8B",
+        CoreModelId.llama_guard_3_8b.value,
+    ),
 ]
diff --git a/llama_stack/providers/remote/inference/sambanova/sambanova.py b/llama_stack/providers/remote/inference/sambanova/sambanova.py
index 1665e72b8..20f863665 100644
--- a/llama_stack/providers/remote/inference/sambanova/sambanova.py
+++ b/llama_stack/providers/remote/inference/sambanova/sambanova.py
@@ -5,305 +5,249 @@
 # the root directory of this source tree.
 
 import json
-from typing import AsyncGenerator, List, Optional
+from collections.abc import Iterable
 
-from openai import OpenAI
+from openai.types.chat import (
+    ChatCompletionAssistantMessageParam as OpenAIChatCompletionAssistantMessage,
+)
+from openai.types.chat import (
+    ChatCompletionContentPartImageParam as OpenAIChatCompletionContentPartImageParam,
+)
+from openai.types.chat import (
+    ChatCompletionContentPartParam as OpenAIChatCompletionContentPartParam,
+)
+from openai.types.chat import (
+    ChatCompletionContentPartTextParam as OpenAIChatCompletionContentPartTextParam,
+)
+from openai.types.chat import (
+    ChatCompletionMessageParam as OpenAIChatCompletionMessage,
+)
+from openai.types.chat import (
+    ChatCompletionMessageToolCallParam as OpenAIChatCompletionMessageToolCall,
+)
+from openai.types.chat import (
+    ChatCompletionSystemMessageParam as OpenAIChatCompletionSystemMessage,
+)
+from openai.types.chat import (
+    ChatCompletionToolMessageParam as OpenAIChatCompletionToolMessage,
+)
+from openai.types.chat import (
+    ChatCompletionUserMessageParam as OpenAIChatCompletionUserMessage,
+)
+from openai.types.chat.chat_completion_content_part_image_param import (
+    ImageURL as OpenAIImageURL,
+)
+from openai.types.chat.chat_completion_message_tool_call_param import (
+    Function as OpenAIFunction,
+)
 
 from llama_stack.apis.common.content_types import (
     ImageContentItem,
     InterleavedContent,
-    InterleavedContentItem,
     TextContentItem,
 )
 from llama_stack.apis.inference import (
     ChatCompletionRequest,
-    ChatCompletionResponse,
     CompletionMessage,
-    EmbeddingsResponse,
-    EmbeddingTaskType,
-    GreedySamplingStrategy,
-    Inference,
-    LogProbConfig,
+    JsonSchemaResponseFormat,
     Message,
-    ResponseFormat,
-    SamplingParams,
-    StopReason,
     SystemMessage,
-    TextTruncation,
-    ToolCall,
     ToolChoice,
-    ToolConfig,
-    ToolDefinition,
-    ToolPromptFormat,
     ToolResponseMessage,
-    TopKSamplingStrategy,
-    TopPSamplingStrategy,
     UserMessage,
 )
-from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
+from llama_stack.log import get_logger
+from llama_stack.models.llama.datatypes import BuiltinTool
+from llama_stack.providers.utils.inference.litellm_openai_mixin import LiteLLMOpenAIMixin
 from llama_stack.providers.utils.inference.openai_compat import (
-    OpenAIChatCompletionToLlamaStackMixin,
-    OpenAICompletionToLlamaStackMixin,
-    process_chat_completion_stream_response,
-)
-from llama_stack.providers.utils.inference.prompt_adapter import (
-    convert_image_content_to_url,
+    convert_tooldef_to_openai_tool,
+    get_sampling_options,
 )
+from llama_stack.providers.utils.inference.prompt_adapter import convert_image_content_to_url
 
 from .config import SambaNovaImplConfig
 from .models import MODEL_ENTRIES
 
+logger = get_logger(name=__name__, category="inference")
 
-class SambaNovaInferenceAdapter(
-    ModelRegistryHelper,
-    Inference,
-    OpenAIChatCompletionToLlamaStackMixin,
-    OpenAICompletionToLlamaStackMixin,
-):
-    def __init__(self, config: SambaNovaImplConfig) -> None:
-        ModelRegistryHelper.__init__(self, model_entries=MODEL_ENTRIES)
-        self.config = config
 
-    async def initialize(self) -> None:
-        return
+async def convert_message_to_openai_dict_with_b64_images(
+    message: Message | dict,
+) -> OpenAIChatCompletionMessage:
+    """
+    Convert a Message to an OpenAI API-compatible dictionary.
+    """
+    # users can supply a dict instead of a Message object, we'll
+    # convert it to a Message object and proceed with some type safety.
+    if isinstance(message, dict):
+        if "role" not in message:
+            raise ValueError("role is required in message")
+        if message["role"] == "user":
+            message = UserMessage(**message)
+        elif message["role"] == "assistant":
+            message = CompletionMessage(**message)
+        elif message["role"] == "tool":
+            message = ToolResponseMessage(**message)
+        elif message["role"] == "system":
+            message = SystemMessage(**message)
+        else:
+            raise ValueError(f"Unsupported message role: {message['role']}")
 
-    async def shutdown(self) -> None:
-        pass
-
-    def _get_client(self) -> OpenAI:
-        return OpenAI(base_url=self.config.url, api_key=self.config.api_key)
-
-    async def completion(
-        self,
-        model_id: str,
+    # Map Llama Stack spec to OpenAI spec -
+    #  str -> str
+    #  {"type": "text", "text": ...} -> {"type": "text", "text": ...}
+    #  {"type": "image", "image": {"url": {"uri": ...}}} -> {"type": "image_url", "image_url": {"url": ...}}
+    #  {"type": "image", "image": {"data": ...}} -> {"type": "image_url", "image_url": {"url": "data:image/?;base64,..."}}
+    #  List[...] -> List[...]
+    async def _convert_message_content(
         content: InterleavedContent,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-    ) -> AsyncGenerator:
-        raise NotImplementedError()
-
-    async def chat_completion(
-        self,
-        model_id: str,
-        messages: List[Message],
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = ToolChoice.auto,
-        tool_prompt_format: Optional[ToolPromptFormat] = ToolPromptFormat.json,
-        stream: Optional[bool] = False,
-        tool_config: Optional[ToolConfig] = None,
-        logprobs: Optional[LogProbConfig] = None,
-    ) -> AsyncGenerator:
-        if sampling_params is None:
-            sampling_params = SamplingParams()
-        model = await self.model_store.get_model(model_id)
-
-        request = ChatCompletionRequest(
-            model=model.provider_resource_id,
-            messages=messages,
-            sampling_params=sampling_params,
-            tools=tools or [],
-            stream=stream,
-            logprobs=logprobs,
-            tool_config=tool_config,
-        )
-        request_sambanova = await self.convert_chat_completion_request(request)
-
-        if stream:
-            return self._stream_chat_completion(request_sambanova)
-        else:
-            return await self._nonstream_chat_completion(request_sambanova)
-
-    async def _nonstream_chat_completion(self, request: ChatCompletionRequest) -> ChatCompletionResponse:
-        response = self._get_client().chat.completions.create(**request)
-
-        choice = response.choices[0]
-
-        result = ChatCompletionResponse(
-            completion_message=CompletionMessage(
-                content=choice.message.content or "",
-                stop_reason=self.convert_to_sambanova_finish_reason(choice.finish_reason),
-                tool_calls=self.convert_to_sambanova_tool_calls(choice.message.tool_calls),
-            ),
-            logprobs=None,
-        )
-
-        return result
-
-    async def _stream_chat_completion(self, request: ChatCompletionRequest) -> AsyncGenerator:
-        async def _to_async_generator():
-            streaming = self._get_client().chat.completions.create(**request)
-            for chunk in streaming:
-                yield chunk
-
-        stream = _to_async_generator()
-        async for chunk in process_chat_completion_stream_response(stream, request):
-            yield chunk
-
-    async def embeddings(
-        self,
-        model_id: str,
-        contents: List[str] | List[InterleavedContentItem],
-        text_truncation: Optional[TextTruncation] = TextTruncation.none,
-        output_dimension: Optional[int] = None,
-        task_type: Optional[EmbeddingTaskType] = None,
-    ) -> EmbeddingsResponse:
-        raise NotImplementedError()
-
-    async def convert_chat_completion_request(self, request: ChatCompletionRequest) -> dict:
-        compatible_request = self.convert_sampling_params(request.sampling_params)
-        compatible_request["model"] = request.model
-        compatible_request["messages"] = await self.convert_to_sambanova_messages(request.messages)
-        compatible_request["stream"] = request.stream
-        compatible_request["logprobs"] = False
-        compatible_request["extra_headers"] = {
-            b"User-Agent": b"llama-stack: sambanova-inference-adapter",
-        }
-        compatible_request["tools"] = self.convert_to_sambanova_tool(request.tools)
-        return compatible_request
-
-    def convert_sampling_params(self, sampling_params: SamplingParams, legacy: bool = False) -> dict:
-        params = {}
-
-        if sampling_params:
-            params["frequency_penalty"] = sampling_params.repetition_penalty
-
-            if sampling_params.max_tokens:
-                if legacy:
-                    params["max_tokens"] = sampling_params.max_tokens
-                else:
-                    params["max_completion_tokens"] = sampling_params.max_tokens
-
-            if isinstance(sampling_params.strategy, TopPSamplingStrategy):
-                params["top_p"] = sampling_params.strategy.top_p
-            if isinstance(sampling_params.strategy, TopKSamplingStrategy):
-                params["extra_body"]["top_k"] = sampling_params.strategy.top_k
-            if isinstance(sampling_params.strategy, GreedySamplingStrategy):
-                params["temperature"] = 0.0
-
-        return params
-
-    async def convert_to_sambanova_messages(self, messages: List[Message]) -> List[dict]:
-        conversation = []
-        for message in messages:
-            content = {}
-
-            content["content"] = await self.convert_to_sambanova_content(message)
-
-            if isinstance(message, UserMessage):
-                content["role"] = "user"
-            elif isinstance(message, CompletionMessage):
-                content["role"] = "assistant"
-                tools = []
-                for tool_call in message.tool_calls:
-                    tools.append(
-                        {
-                            "id": tool_call.call_id,
-                            "function": {
-                                "name": tool_call.name,
-                                "arguments": json.dumps(tool_call.arguments),
-                            },
-                            "type": "function",
-                        }
-                    )
-                content["tool_calls"] = tools
-            elif isinstance(message, ToolResponseMessage):
-                content["role"] = "tool"
-                content["tool_call_id"] = message.call_id
-            elif isinstance(message, SystemMessage):
-                content["role"] = "system"
-
-            conversation.append(content)
-
-        return conversation
-
-    async def convert_to_sambanova_content(self, message: Message) -> dict:
-        async def _convert_content(content) -> dict:
-            if isinstance(content, ImageContentItem):
-                url = await convert_image_content_to_url(content, download=True)
-                # A fix to make sure the call sucess.
-                components = url.split(";base64")
-                url = f"{components[0].lower()};base64{components[1]}"
-                return {
-                    "type": "image_url",
-                    "image_url": {"url": url},
-                }
+    ) -> str | Iterable[OpenAIChatCompletionContentPartParam]:
+        async def impl(
+            content_: InterleavedContent,
+        ) -> str | OpenAIChatCompletionContentPartParam | list[OpenAIChatCompletionContentPartParam]:
+            # Llama Stack and OpenAI spec match for str and text input
+            if isinstance(content_, str):
+                return content_
+            elif isinstance(content_, TextContentItem):
+                return OpenAIChatCompletionContentPartTextParam(
+                    type="text",
+                    text=content_.text,
+                )
+            elif isinstance(content_, ImageContentItem):
+                return OpenAIChatCompletionContentPartImageParam(
+                    type="image_url",
+                    image_url=OpenAIImageURL(url=await convert_image_content_to_url(content_, download=True)),
+                )
+            elif isinstance(content_, list):
+                return [await impl(item) for item in content_]
             else:
-                text = content.text if isinstance(content, TextContentItem) else content
-                assert isinstance(text, str)
-                return {"type": "text", "text": text}
+                raise ValueError(f"Unsupported content type: {type(content_)}")
 
-        if isinstance(message.content, list):
-            # If it is a list, the text content should be wrapped in dict
-            content = [await _convert_content(c) for c in message.content]
+        ret = await impl(content)
+
+        # OpenAI*Message expects a str or list
+        if isinstance(ret, str) or isinstance(ret, list):
+            return ret
         else:
-            content = message.content
+            return [ret]
 
-        return content
+    out: OpenAIChatCompletionMessage = None
+    if isinstance(message, UserMessage):
+        out = OpenAIChatCompletionUserMessage(
+            role="user",
+            content=await _convert_message_content(message.content),
+        )
+    elif isinstance(message, CompletionMessage):
+        out = OpenAIChatCompletionAssistantMessage(
+            role="assistant",
+            content=await _convert_message_content(message.content),
+            tool_calls=[
+                OpenAIChatCompletionMessageToolCall(
+                    id=tool.call_id,
+                    function=OpenAIFunction(
+                        name=tool.tool_name if not isinstance(tool.tool_name, BuiltinTool) else tool.tool_name.value,
+                        arguments=json.dumps(tool.arguments),
+                    ),
+                    type="function",
+                )
+                for tool in message.tool_calls
+            ]
+            or None,
+        )
+    elif isinstance(message, ToolResponseMessage):
+        out = OpenAIChatCompletionToolMessage(
+            role="tool",
+            tool_call_id=message.call_id,
+            content=await _convert_message_content(message.content),
+        )
+    elif isinstance(message, SystemMessage):
+        out = OpenAIChatCompletionSystemMessage(
+            role="system",
+            content=await _convert_message_content(message.content),
+        )
+    else:
+        raise ValueError(f"Unsupported message type: {type(message)}")
 
-    def convert_to_sambanova_tool(self, tools: List[ToolDefinition]) -> List[dict]:
-        if tools is None:
-            return tools
+    return out
 
-        compatiable_tools = []
 
-        for tool in tools:
-            properties = {}
-            compatiable_required = []
-            if tool.parameters:
-                for tool_key, tool_param in tool.parameters.items():
-                    properties[tool_key] = {"type": tool_param.param_type}
-                    if tool_param.description:
-                        properties[tool_key]["description"] = tool_param.description
-                    if tool_param.default:
-                        properties[tool_key]["default"] = tool_param.default
-                    if tool_param.required:
-                        compatiable_required.append(tool_key)
+class SambaNovaInferenceAdapter(LiteLLMOpenAIMixin):
+    _config: SambaNovaImplConfig
 
-            compatiable_tool = {
-                "type": "function",
-                "function": {
-                    "name": tool.tool_name,
-                    "description": tool.description,
-                    "parameters": {
-                        "type": "object",
-                        "properties": properties,
-                        "required": compatiable_required,
-                    },
+    def __init__(self, config: SambaNovaImplConfig):
+        self.config = config
+        LiteLLMOpenAIMixin.__init__(
+            self,
+            model_entries=MODEL_ENTRIES,
+            api_key_from_config=self.config.api_key,
+            provider_data_api_key_field="sambanova_api_key",
+        )
+
+    def _get_api_key(self) -> str:
+        config_api_key = self.config.api_key if self.config.api_key else None
+        if config_api_key:
+            return config_api_key.get_secret_value()
+        else:
+            provider_data = self.get_request_provider_data()
+            if provider_data is None or not provider_data.sambanova_api_key:
+                raise ValueError(
+                    'Pass Sambanova API Key in the header X-LlamaStack-Provider-Data as { "sambanova_api_key":  }'
+                )
+            return provider_data.sambanova_api_key
+
+    async def _get_params(self, request: ChatCompletionRequest) -> dict:
+        input_dict = {}
+
+        input_dict["messages"] = [await convert_message_to_openai_dict_with_b64_images(m) for m in request.messages]
+        if fmt := request.response_format:
+            if not isinstance(fmt, JsonSchemaResponseFormat):
+                raise ValueError(
+                    f"Unsupported response format: {type(fmt)}. Only JsonSchemaResponseFormat is supported."
+                )
+
+            fmt = fmt.json_schema
+            name = fmt["title"]
+            del fmt["title"]
+            fmt["additionalProperties"] = False
+
+            # Apply additionalProperties: False recursively to all objects
+            fmt = self._add_additional_properties_recursive(fmt)
+
+            input_dict["response_format"] = {
+                "type": "json_schema",
+                "json_schema": {
+                    "name": name,
+                    "schema": fmt,
+                    "strict": False,
                 },
             }
+        if request.tools:
+            input_dict["tools"] = [convert_tooldef_to_openai_tool(tool) for tool in request.tools]
+            if request.tool_config.tool_choice:
+                input_dict["tool_choice"] = (
+                    request.tool_config.tool_choice.value
+                    if isinstance(request.tool_config.tool_choice, ToolChoice)
+                    else request.tool_config.tool_choice
+                )
 
-            compatiable_tools.append(compatiable_tool)
+        provider_data = self.get_request_provider_data()
+        key_field = self.provider_data_api_key_field
+        if provider_data and getattr(provider_data, key_field, None):
+            api_key = getattr(provider_data, key_field)
+        else:
+            api_key = self._get_api_key()
 
-        if len(compatiable_tools) > 0:
-            return compatiable_tools
-        return None
-
-    def convert_to_sambanova_finish_reason(self, finish_reason: str) -> StopReason:
         return {
-            "stop": StopReason.end_of_turn,
-            "length": StopReason.out_of_tokens,
-            "tool_calls": StopReason.end_of_message,
-        }.get(finish_reason, StopReason.end_of_turn)
+            "model": request.model,
+            "api_key": api_key,
+            "api_base": self.config.url,
+            **input_dict,
+            "stream": request.stream,
+            **get_sampling_options(request.sampling_params),
+        }
 
-    def convert_to_sambanova_tool_calls(
-        self,
-        tool_calls,
-    ) -> List[ToolCall]:
-        if not tool_calls:
-            return []
+    async def initialize(self):
+        await super().initialize()
 
-        compitable_tool_calls = [
-            ToolCall(
-                call_id=call.id,
-                tool_name=call.function.name,
-                arguments=json.loads(call.function.arguments),
-                arguments_json=call.function.arguments,
-            )
-            for call in tool_calls
-        ]
-
-        return compitable_tool_calls
+    async def shutdown(self):
+        await super().shutdown()
diff --git a/llama_stack/providers/remote/inference/sambanova_openai_compat/__init__.py b/llama_stack/providers/remote/inference/sambanova_openai_compat/__init__.py
index e31a3364c..60afe91ca 100644
--- a/llama_stack/providers/remote/inference/sambanova_openai_compat/__init__.py
+++ b/llama_stack/providers/remote/inference/sambanova_openai_compat/__init__.py
@@ -4,12 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from llama_stack.apis.inference import Inference
+from llama_stack.apis.inference import InferenceProvider
 
 from .config import SambaNovaCompatConfig
 
 
-async def get_adapter_impl(config: SambaNovaCompatConfig, _deps) -> Inference:
+async def get_adapter_impl(config: SambaNovaCompatConfig, _deps) -> InferenceProvider:
     # import dynamically so the import is used only when it is needed
     from .sambanova import SambaNovaCompatInferenceAdapter
 
diff --git a/llama_stack/providers/remote/inference/sambanova_openai_compat/config.py b/llama_stack/providers/remote/inference/sambanova_openai_compat/config.py
index b792cb6e7..072fa85d1 100644
--- a/llama_stack/providers/remote/inference/sambanova_openai_compat/config.py
+++ b/llama_stack/providers/remote/inference/sambanova_openai_compat/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field
 
@@ -12,7 +12,7 @@ from llama_stack.schema_utils import json_schema_type
 
 
 class SambaNovaProviderDataValidator(BaseModel):
-    sambanova_api_key: Optional[str] = Field(
+    sambanova_api_key: str | None = Field(
         default=None,
         description="API key for SambaNova models",
     )
@@ -20,7 +20,7 @@ class SambaNovaProviderDataValidator(BaseModel):
 
 @json_schema_type
 class SambaNovaCompatConfig(BaseModel):
-    api_key: Optional[str] = Field(
+    api_key: str | None = Field(
         default=None,
         description="The SambaNova API key",
     )
@@ -31,7 +31,7 @@ class SambaNovaCompatConfig(BaseModel):
     )
 
     @classmethod
-    def sample_run_config(cls, api_key: str = "${env.SAMBANOVA_API_KEY}", **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, api_key: str = "${env.SAMBANOVA_API_KEY}", **kwargs) -> dict[str, Any]:
         return {
             "openai_compat_api_base": "https://api.sambanova.ai/v1",
             "api_key": api_key,
diff --git a/llama_stack/providers/remote/inference/tgi/__init__.py b/llama_stack/providers/remote/inference/tgi/__init__.py
index 834e51324..51614f1a6 100644
--- a/llama_stack/providers/remote/inference/tgi/__init__.py
+++ b/llama_stack/providers/remote/inference/tgi/__init__.py
@@ -4,13 +4,11 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Union
-
 from .config import InferenceAPIImplConfig, InferenceEndpointImplConfig, TGIImplConfig
 
 
 async def get_adapter_impl(
-    config: Union[InferenceAPIImplConfig, InferenceEndpointImplConfig, TGIImplConfig],
+    config: InferenceAPIImplConfig | InferenceEndpointImplConfig | TGIImplConfig,
     _deps,
 ):
     from .tgi import InferenceAPIAdapter, InferenceEndpointAdapter, TGIAdapter
diff --git a/llama_stack/providers/remote/inference/tgi/config.py b/llama_stack/providers/remote/inference/tgi/config.py
index 6ad663662..3d632c9d8 100644
--- a/llama_stack/providers/remote/inference/tgi/config.py
+++ b/llama_stack/providers/remote/inference/tgi/config.py
@@ -4,7 +4,6 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Optional
 
 from pydantic import BaseModel, Field, SecretStr
 
@@ -29,7 +28,7 @@ class InferenceEndpointImplConfig(BaseModel):
     endpoint_name: str = Field(
         description="The name of the Hugging Face Inference Endpoint in the format of '{namespace}/{endpoint_name}' (e.g. 'my-cool-org/meta-llama-3-1-8b-instruct-rce'). Namespace is optional and will default to the user account if not provided.",
     )
-    api_token: Optional[SecretStr] = Field(
+    api_token: SecretStr | None = Field(
         default=None,
         description="Your Hugging Face user access token (will default to locally saved token if not provided)",
     )
@@ -52,7 +51,7 @@ class InferenceAPIImplConfig(BaseModel):
     huggingface_repo: str = Field(
         description="The model ID of the model on the Hugging Face Hub (e.g. 'meta-llama/Meta-Llama-3.1-70B-Instruct')",
     )
-    api_token: Optional[SecretStr] = Field(
+    api_token: SecretStr | None = Field(
         default=None,
         description="Your Hugging Face user access token (will default to locally saved token if not provided)",
     )
diff --git a/llama_stack/providers/remote/inference/tgi/tgi.py b/llama_stack/providers/remote/inference/tgi/tgi.py
index 4ee386a15..292d74ef8 100644
--- a/llama_stack/providers/remote/inference/tgi/tgi.py
+++ b/llama_stack/providers/remote/inference/tgi/tgi.py
@@ -6,7 +6,7 @@
 
 
 import logging
-from typing import AsyncGenerator, List, Optional
+from collections.abc import AsyncGenerator
 
 from huggingface_hub import AsyncInferenceClient, HfApi
 
@@ -23,6 +23,7 @@ from llama_stack.apis.inference import (
     Inference,
     LogProbConfig,
     Message,
+    OpenAIEmbeddingsResponse,
     ResponseFormat,
     ResponseFormatType,
     SamplingParams,
@@ -105,10 +106,10 @@ class _HfAdapter(
         self,
         model_id: str,
         content: InterleavedContent,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
     ) -> AsyncGenerator:
         if sampling_params is None:
             sampling_params = SamplingParams()
@@ -134,7 +135,7 @@ class _HfAdapter(
 
     def _build_options(
         self,
-        sampling_params: Optional[SamplingParams] = None,
+        sampling_params: SamplingParams | None = None,
         fmt: ResponseFormat = None,
     ):
         options = get_sampling_options(sampling_params)
@@ -209,15 +210,15 @@ class _HfAdapter(
     async def chat_completion(
         self,
         model_id: str,
-        messages: List[Message],
-        sampling_params: Optional[SamplingParams] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = ToolChoice.auto,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-        tool_config: Optional[ToolConfig] = None,
+        messages: list[Message],
+        sampling_params: SamplingParams | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_choice: ToolChoice | None = ToolChoice.auto,
+        tool_prompt_format: ToolPromptFormat | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+        tool_config: ToolConfig | None = None,
     ) -> AsyncGenerator:
         if sampling_params is None:
             sampling_params = SamplingParams()
@@ -284,13 +285,23 @@ class _HfAdapter(
     async def embeddings(
         self,
         model_id: str,
-        contents: List[str] | List[InterleavedContentItem],
-        text_truncation: Optional[TextTruncation] = TextTruncation.none,
-        output_dimension: Optional[int] = None,
-        task_type: Optional[EmbeddingTaskType] = None,
+        contents: list[str] | list[InterleavedContentItem],
+        text_truncation: TextTruncation | None = TextTruncation.none,
+        output_dimension: int | None = None,
+        task_type: EmbeddingTaskType | None = None,
     ) -> EmbeddingsResponse:
         raise NotImplementedError()
 
+    async def openai_embeddings(
+        self,
+        model: str,
+        input: str | list[str],
+        encoding_format: str | None = "float",
+        dimensions: int | None = None,
+        user: str | None = None,
+    ) -> OpenAIEmbeddingsResponse:
+        raise NotImplementedError()
+
 
 class TGIAdapter(_HfAdapter):
     async def initialize(self, config: TGIImplConfig) -> None:
diff --git a/llama_stack/providers/remote/inference/together/config.py b/llama_stack/providers/remote/inference/together/config.py
index fa7c45c9f..5c7f60519 100644
--- a/llama_stack/providers/remote/inference/together/config.py
+++ b/llama_stack/providers/remote/inference/together/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field, SecretStr
 
@@ -17,13 +17,13 @@ class TogetherImplConfig(BaseModel):
         default="https://api.together.xyz/v1",
         description="The URL for the Together AI server",
     )
-    api_key: Optional[SecretStr] = Field(
+    api_key: SecretStr | None = Field(
         default=None,
         description="The Together AI API Key",
     )
 
     @classmethod
-    def sample_run_config(cls, **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, **kwargs) -> dict[str, Any]:
         return {
             "url": "https://api.together.xyz/v1",
             "api_key": "${env.TOGETHER_API_KEY:}",
diff --git a/llama_stack/providers/remote/inference/together/together.py b/llama_stack/providers/remote/inference/together/together.py
index 48e41f5b0..7305a638d 100644
--- a/llama_stack/providers/remote/inference/together/together.py
+++ b/llama_stack/providers/remote/inference/together/together.py
@@ -4,7 +4,8 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, AsyncGenerator, AsyncIterator, Dict, List, Optional, Union
+from collections.abc import AsyncGenerator, AsyncIterator
+from typing import Any
 
 from openai import AsyncOpenAI
 from together import AsyncTogether
@@ -22,6 +23,7 @@ from llama_stack.apis.inference import (
     Inference,
     LogProbConfig,
     Message,
+    OpenAIEmbeddingsResponse,
     ResponseFormat,
     ResponseFormatType,
     SamplingParams,
@@ -86,10 +88,10 @@ class TogetherInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProvi
         self,
         model_id: str,
         content: InterleavedContent,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
     ) -> AsyncGenerator:
         if sampling_params is None:
             sampling_params = SamplingParams()
@@ -147,8 +149,8 @@ class TogetherInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProvi
 
     def _build_options(
         self,
-        sampling_params: Optional[SamplingParams],
-        logprobs: Optional[LogProbConfig],
+        sampling_params: SamplingParams | None,
+        logprobs: LogProbConfig | None,
         fmt: ResponseFormat,
     ) -> dict:
         options = get_sampling_options(sampling_params)
@@ -175,15 +177,15 @@ class TogetherInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProvi
     async def chat_completion(
         self,
         model_id: str,
-        messages: List[Message],
-        sampling_params: Optional[SamplingParams] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = ToolChoice.auto,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-        tool_config: Optional[ToolConfig] = None,
+        messages: list[Message],
+        sampling_params: SamplingParams | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_choice: ToolChoice | None = ToolChoice.auto,
+        tool_prompt_format: ToolPromptFormat | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+        tool_config: ToolConfig | None = None,
     ) -> AsyncGenerator:
         if sampling_params is None:
             sampling_params = SamplingParams()
@@ -224,7 +226,7 @@ class TogetherInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProvi
         async for chunk in process_chat_completion_stream_response(stream, request):
             yield chunk
 
-    async def _get_params(self, request: Union[ChatCompletionRequest, CompletionRequest]) -> dict:
+    async def _get_params(self, request: ChatCompletionRequest | CompletionRequest) -> dict:
         input_dict = {}
         media_present = request_has_media(request)
         llama_model = self.get_llama_model(request.model)
@@ -249,10 +251,10 @@ class TogetherInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProvi
     async def embeddings(
         self,
         model_id: str,
-        contents: List[str] | List[InterleavedContentItem],
-        text_truncation: Optional[TextTruncation] = TextTruncation.none,
-        output_dimension: Optional[int] = None,
-        task_type: Optional[EmbeddingTaskType] = None,
+        contents: list[str] | list[InterleavedContentItem],
+        text_truncation: TextTruncation | None = TextTruncation.none,
+        output_dimension: int | None = None,
+        task_type: EmbeddingTaskType | None = None,
     ) -> EmbeddingsResponse:
         model = await self.model_store.get_model(model_id)
         assert all(not content_has_media(content) for content in contents), (
@@ -266,27 +268,37 @@ class TogetherInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProvi
         embeddings = [item.embedding for item in r.data]
         return EmbeddingsResponse(embeddings=embeddings)
 
+    async def openai_embeddings(
+        self,
+        model: str,
+        input: str | list[str],
+        encoding_format: str | None = "float",
+        dimensions: int | None = None,
+        user: str | None = None,
+    ) -> OpenAIEmbeddingsResponse:
+        raise NotImplementedError()
+
     async def openai_completion(
         self,
         model: str,
-        prompt: Union[str, List[str], List[int], List[List[int]]],
-        best_of: Optional[int] = None,
-        echo: Optional[bool] = None,
-        frequency_penalty: Optional[float] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        presence_penalty: Optional[float] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-        guided_choice: Optional[List[str]] = None,
-        prompt_logprobs: Optional[int] = None,
+        prompt: str | list[str] | list[int] | list[list[int]],
+        best_of: int | None = None,
+        echo: bool | None = None,
+        frequency_penalty: float | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        presence_penalty: float | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+        guided_choice: list[str] | None = None,
+        prompt_logprobs: int | None = None,
     ) -> OpenAICompletion:
         model_obj = await self.model_store.get_model(model)
         params = await prepare_openai_completion_params(
@@ -313,29 +325,29 @@ class TogetherInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProvi
     async def openai_chat_completion(
         self,
         model: str,
-        messages: List[OpenAIMessageParam],
-        frequency_penalty: Optional[float] = None,
-        function_call: Optional[Union[str, Dict[str, Any]]] = None,
-        functions: Optional[List[Dict[str, Any]]] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_completion_tokens: Optional[int] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        parallel_tool_calls: Optional[bool] = None,
-        presence_penalty: Optional[float] = None,
-        response_format: Optional[OpenAIResponseFormatParam] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
-        tools: Optional[List[Dict[str, Any]]] = None,
-        top_logprobs: Optional[int] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-    ) -> Union[OpenAIChatCompletion, AsyncIterator[OpenAIChatCompletionChunk]]:
+        messages: list[OpenAIMessageParam],
+        frequency_penalty: float | None = None,
+        function_call: str | dict[str, Any] | None = None,
+        functions: list[dict[str, Any]] | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_completion_tokens: int | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        parallel_tool_calls: bool | None = None,
+        presence_penalty: float | None = None,
+        response_format: OpenAIResponseFormatParam | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        tool_choice: str | dict[str, Any] | None = None,
+        tools: list[dict[str, Any]] | None = None,
+        top_logprobs: int | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+    ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
         model_obj = await self.model_store.get_model(model)
         params = await prepare_openai_completion_params(
             model=model_obj.provider_resource_id,
diff --git a/llama_stack/providers/remote/inference/together_openai_compat/__init__.py b/llama_stack/providers/remote/inference/together_openai_compat/__init__.py
index 6fdf05b7e..8213fc5f4 100644
--- a/llama_stack/providers/remote/inference/together_openai_compat/__init__.py
+++ b/llama_stack/providers/remote/inference/together_openai_compat/__init__.py
@@ -4,12 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from llama_stack.apis.inference import Inference
+from llama_stack.apis.inference import InferenceProvider
 
 from .config import TogetherCompatConfig
 
 
-async def get_adapter_impl(config: TogetherCompatConfig, _deps) -> Inference:
+async def get_adapter_impl(config: TogetherCompatConfig, _deps) -> InferenceProvider:
     # import dynamically so the import is used only when it is needed
     from .together import TogetherCompatInferenceAdapter
 
diff --git a/llama_stack/providers/remote/inference/together_openai_compat/config.py b/llama_stack/providers/remote/inference/together_openai_compat/config.py
index 120adbed9..0c6d4f748 100644
--- a/llama_stack/providers/remote/inference/together_openai_compat/config.py
+++ b/llama_stack/providers/remote/inference/together_openai_compat/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field
 
@@ -12,7 +12,7 @@ from llama_stack.schema_utils import json_schema_type
 
 
 class TogetherProviderDataValidator(BaseModel):
-    together_api_key: Optional[str] = Field(
+    together_api_key: str | None = Field(
         default=None,
         description="API key for Together models",
     )
@@ -20,7 +20,7 @@ class TogetherProviderDataValidator(BaseModel):
 
 @json_schema_type
 class TogetherCompatConfig(BaseModel):
-    api_key: Optional[str] = Field(
+    api_key: str | None = Field(
         default=None,
         description="The Together API key",
     )
@@ -31,7 +31,7 @@ class TogetherCompatConfig(BaseModel):
     )
 
     @classmethod
-    def sample_run_config(cls, api_key: str = "${env.TOGETHER_API_KEY}", **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, api_key: str = "${env.TOGETHER_API_KEY}", **kwargs) -> dict[str, Any]:
         return {
             "openai_compat_api_base": "https://api.together.xyz/v1",
             "api_key": api_key,
diff --git a/llama_stack/providers/remote/inference/vllm/config.py b/llama_stack/providers/remote/inference/vllm/config.py
index 762cffde3..99abddf51 100644
--- a/llama_stack/providers/remote/inference/vllm/config.py
+++ b/llama_stack/providers/remote/inference/vllm/config.py
@@ -4,16 +4,16 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Optional
+from pathlib import Path
 
-from pydantic import BaseModel, Field
+from pydantic import BaseModel, Field, field_validator
 
 from llama_stack.schema_utils import json_schema_type
 
 
 @json_schema_type
 class VLLMInferenceAdapterConfig(BaseModel):
-    url: Optional[str] = Field(
+    url: str | None = Field(
         default=None,
         description="The URL for the vLLM model serving endpoint",
     )
@@ -21,15 +21,31 @@ class VLLMInferenceAdapterConfig(BaseModel):
         default=4096,
         description="Maximum number of tokens to generate.",
     )
-    api_token: Optional[str] = Field(
+    api_token: str | None = Field(
         default="fake",
         description="The API token",
     )
-    tls_verify: bool = Field(
+    tls_verify: bool | str = Field(
         default=True,
-        description="Whether to verify TLS certificates",
+        description="Whether to verify TLS certificates. Can be a boolean or a path to a CA certificate file.",
     )
 
+    @field_validator("tls_verify")
+    @classmethod
+    def validate_tls_verify(cls, v):
+        if isinstance(v, str):
+            # Check if it's a boolean string
+            if v.lower() in ("true", "false"):
+                return v.lower() == "true"
+            # Otherwise, treat it as a cert path
+            cert_path = Path(v).expanduser().resolve()
+            if not cert_path.exists():
+                raise ValueError(f"TLS certificate file does not exist: {v}")
+            if not cert_path.is_file():
+                raise ValueError(f"TLS certificate path is not a file: {v}")
+            return v
+        return v
+
     @classmethod
     def sample_run_config(
         cls,
diff --git a/llama_stack/providers/remote/inference/vllm/vllm.py b/llama_stack/providers/remote/inference/vllm/vllm.py
index 8cfef2ee0..9f38d9abf 100644
--- a/llama_stack/providers/remote/inference/vllm/vllm.py
+++ b/llama_stack/providers/remote/inference/vllm/vllm.py
@@ -5,7 +5,8 @@
 # the root directory of this source tree.
 import json
 import logging
-from typing import Any, AsyncGenerator, AsyncIterator, Dict, List, Optional, Union
+from collections.abc import AsyncGenerator, AsyncIterator
+from typing import Any
 
 import httpx
 from openai import AsyncOpenAI
@@ -37,6 +38,7 @@ from llama_stack.apis.inference import (
     JsonSchemaResponseFormat,
     LogProbConfig,
     Message,
+    OpenAIEmbeddingsResponse,
     ResponseFormat,
     SamplingParams,
     TextTruncation,
@@ -94,7 +96,7 @@ def build_hf_repo_model_entries():
 
 def _convert_to_vllm_tool_calls_in_response(
     tool_calls,
-) -> List[ToolCall]:
+) -> list[ToolCall]:
     if not tool_calls:
         return []
 
@@ -109,7 +111,7 @@ def _convert_to_vllm_tool_calls_in_response(
     ]
 
 
-def _convert_to_vllm_tools_in_request(tools: List[ToolDefinition]) -> List[dict]:
+def _convert_to_vllm_tools_in_request(tools: list[ToolDefinition]) -> list[dict]:
     compat_tools = []
 
     for tool in tools:
@@ -157,27 +159,28 @@ def _convert_to_vllm_finish_reason(finish_reason: str) -> StopReason:
     }.get(finish_reason, StopReason.end_of_turn)
 
 
-async def _process_vllm_chat_completion_stream_response(
-    stream: AsyncGenerator[OpenAIChatCompletionChunk, None],
-) -> AsyncGenerator:
-    event_type = ChatCompletionResponseEventType.start
-    tool_call_buf = UnparseableToolCall()
-    async for chunk in stream:
-        if not chunk.choices:
-            log.warning("vLLM failed to generation any completions - check the vLLM server logs for an error.")
-            continue
-        choice = chunk.choices[0]
-        if choice.finish_reason:
-            args_str = tool_call_buf.arguments
-            args = None
-            try:
-                args = {} if not args_str else json.loads(args_str)
-            except Exception as e:
-                log.warning(f"Failed to parse tool call buffer arguments: {args_str} \nError: {e}")
-            if args:
-                yield ChatCompletionResponseStreamChunk(
+def _process_vllm_chat_completion_end_of_stream(
+    finish_reason: str | None,
+    last_chunk_content: str | None,
+    current_event_type: ChatCompletionResponseEventType,
+    tool_call_bufs: dict[str, UnparseableToolCall] | None = None,
+) -> list[OpenAIChatCompletionChunk]:
+    chunks = []
+
+    if finish_reason is not None:
+        stop_reason = _convert_to_vllm_finish_reason(finish_reason)
+    else:
+        stop_reason = StopReason.end_of_message
+
+    tool_call_bufs = tool_call_bufs or {}
+    for _index, tool_call_buf in sorted(tool_call_bufs.items()):
+        args_str = tool_call_buf.arguments or "{}"
+        try:
+            args = json.loads(args_str)
+            chunks.append(
+                ChatCompletionResponseStreamChunk(
                     event=ChatCompletionResponseEvent(
-                        event_type=event_type,
+                        event_type=current_event_type,
                         delta=ToolCallDelta(
                             tool_call=ToolCall(
                                 call_id=tool_call_buf.call_id,
@@ -189,8 +192,12 @@ async def _process_vllm_chat_completion_stream_response(
                         ),
                     )
                 )
-            elif args_str:
-                yield ChatCompletionResponseStreamChunk(
+            )
+        except Exception as e:
+            log.warning(f"Failed to parse tool call buffer arguments: {args_str} \nError: {e}")
+
+            chunks.append(
+                ChatCompletionResponseStreamChunk(
                     event=ChatCompletionResponseEvent(
                         event_type=ChatCompletionResponseEventType.progress,
                         delta=ToolCallDelta(
@@ -199,21 +206,62 @@ async def _process_vllm_chat_completion_stream_response(
                         ),
                     )
                 )
-            yield ChatCompletionResponseStreamChunk(
-                event=ChatCompletionResponseEvent(
-                    event_type=ChatCompletionResponseEventType.complete,
-                    delta=TextDelta(text=choice.delta.content or ""),
-                    logprobs=None,
-                    stop_reason=_convert_to_vllm_finish_reason(choice.finish_reason),
-                )
             )
-        elif choice.delta.tool_calls:
-            tool_call = convert_tool_call(choice.delta.tool_calls[0])
-            tool_call_buf.tool_name += str(tool_call.tool_name)
-            tool_call_buf.call_id += tool_call.call_id
-            # TODO: remove str() when dict type for 'arguments' is no longer allowed
-            tool_call_buf.arguments += str(tool_call.arguments)
-        else:
+
+    chunks.append(
+        ChatCompletionResponseStreamChunk(
+            event=ChatCompletionResponseEvent(
+                event_type=ChatCompletionResponseEventType.complete,
+                delta=TextDelta(text=last_chunk_content or ""),
+                logprobs=None,
+                stop_reason=stop_reason,
+            )
+        )
+    )
+
+    return chunks
+
+
+async def _process_vllm_chat_completion_stream_response(
+    stream: AsyncGenerator[OpenAIChatCompletionChunk, None],
+) -> AsyncGenerator:
+    yield ChatCompletionResponseStreamChunk(
+        event=ChatCompletionResponseEvent(
+            event_type=ChatCompletionResponseEventType.start,
+            delta=TextDelta(text=""),
+        )
+    )
+    event_type = ChatCompletionResponseEventType.progress
+    tool_call_bufs: dict[str, UnparseableToolCall] = {}
+    end_of_stream_processed = False
+
+    async for chunk in stream:
+        if not chunk.choices:
+            log.warning("vLLM failed to generation any completions - check the vLLM server logs for an error.")
+            return
+        choice = chunk.choices[0]
+        if choice.delta.tool_calls:
+            for delta_tool_call in choice.delta.tool_calls:
+                tool_call = convert_tool_call(delta_tool_call)
+                if delta_tool_call.index not in tool_call_bufs:
+                    tool_call_bufs[delta_tool_call.index] = UnparseableToolCall()
+                tool_call_buf = tool_call_bufs[delta_tool_call.index]
+                tool_call_buf.tool_name += str(tool_call.tool_name)
+                tool_call_buf.call_id += tool_call.call_id
+                tool_call_buf.arguments += (
+                    tool_call.arguments if isinstance(tool_call.arguments, str) else json.dumps(tool_call.arguments)
+                )
+        if choice.finish_reason:
+            chunks = _process_vllm_chat_completion_end_of_stream(
+                finish_reason=choice.finish_reason,
+                last_chunk_content=choice.delta.content,
+                current_event_type=event_type,
+                tool_call_bufs=tool_call_bufs,
+            )
+            for c in chunks:
+                yield c
+            end_of_stream_processed = True
+        elif not choice.delta.tool_calls:
             yield ChatCompletionResponseStreamChunk(
                 event=ChatCompletionResponseEvent(
                     event_type=event_type,
@@ -223,6 +271,17 @@ async def _process_vllm_chat_completion_stream_response(
             )
             event_type = ChatCompletionResponseEventType.progress
 
+    if end_of_stream_processed:
+        return
+
+    # the stream ended without a chunk containing finish_reason - we have to generate the
+    # respective completion chunks manually
+    chunks = _process_vllm_chat_completion_end_of_stream(
+        finish_reason=None, last_chunk_content=None, current_event_type=event_type, tool_call_bufs=tool_call_bufs
+    )
+    for c in chunks:
+        yield c
+
 
 class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
     def __init__(self, config: VLLMInferenceAdapterConfig) -> None:
@@ -255,22 +314,24 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
         return AsyncOpenAI(
             base_url=self.config.url,
             api_key=self.config.api_token,
-            http_client=None if self.config.tls_verify else httpx.AsyncClient(verify=False),
+            http_client=httpx.AsyncClient(verify=self.config.tls_verify),
         )
 
     async def completion(
         self,
         model_id: str,
         content: InterleavedContent,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
     ) -> CompletionResponse | AsyncGenerator[CompletionResponseStreamChunk, None]:
         self._lazy_initialize_client()
         if sampling_params is None:
             sampling_params = SamplingParams()
         model = await self._get_model(model_id)
+        if model.provider_resource_id is None:
+            raise ValueError(f"Model {model_id} has no provider_resource_id set")
         request = CompletionRequest(
             model=model.provider_resource_id,
             content=content,
@@ -287,20 +348,22 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
     async def chat_completion(
         self,
         model_id: str,
-        messages: List[Message],
-        sampling_params: Optional[SamplingParams] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = ToolChoice.auto,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-        tool_config: Optional[ToolConfig] = None,
+        messages: list[Message],
+        sampling_params: SamplingParams | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_choice: ToolChoice | None = ToolChoice.auto,
+        tool_prompt_format: ToolPromptFormat | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+        tool_config: ToolConfig | None = None,
     ) -> ChatCompletionResponse | AsyncGenerator[ChatCompletionResponseStreamChunk, None]:
         self._lazy_initialize_client()
         if sampling_params is None:
             sampling_params = SamplingParams()
         model = await self._get_model(model_id)
+        if model.provider_resource_id is None:
+            raise ValueError(f"Model {model_id} has no provider_resource_id set")
         # This is to be consistent with OpenAI API and support vLLM <= v0.6.3
         # References:
         #   * https://platform.openai.com/docs/api-reference/chat/create#chat-create-tool_choice
@@ -372,7 +435,10 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
         # self.client should only be created after the initialization is complete to avoid asyncio cross-context errors.
         # Changing this may lead to unpredictable behavior.
         client = self._create_client() if self.client is None else self.client
-        model = await self.register_helper.register_model(model)
+        try:
+            model = await self.register_helper.register_model(model)
+        except ValueError:
+            pass  # Ignore statically unknown model, will check live listing
         res = await client.models.list()
         available_models = [m.id async for m in res]
         if model.provider_resource_id not in available_models:
@@ -382,7 +448,7 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
             )
         return model
 
-    async def _get_params(self, request: Union[ChatCompletionRequest, CompletionRequest]) -> dict:
+    async def _get_params(self, request: ChatCompletionRequest | CompletionRequest) -> dict:
         options = get_sampling_options(request.sampling_params)
         if "max_tokens" not in options:
             options["max_tokens"] = self.config.max_tokens
@@ -419,10 +485,10 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
     async def embeddings(
         self,
         model_id: str,
-        contents: List[str] | List[InterleavedContentItem],
-        text_truncation: Optional[TextTruncation] = TextTruncation.none,
-        output_dimension: Optional[int] = None,
-        task_type: Optional[EmbeddingTaskType] = None,
+        contents: list[str] | list[InterleavedContentItem],
+        text_truncation: TextTruncation | None = TextTruncation.none,
+        output_dimension: int | None = None,
+        task_type: EmbeddingTaskType | None = None,
     ) -> EmbeddingsResponse:
         self._lazy_initialize_client()
         assert self.client is not None
@@ -442,32 +508,42 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
         embeddings = [data.embedding for data in response.data]
         return EmbeddingsResponse(embeddings=embeddings)
 
+    async def openai_embeddings(
+        self,
+        model: str,
+        input: str | list[str],
+        encoding_format: str | None = "float",
+        dimensions: int | None = None,
+        user: str | None = None,
+    ) -> OpenAIEmbeddingsResponse:
+        raise NotImplementedError()
+
     async def openai_completion(
         self,
         model: str,
-        prompt: Union[str, List[str], List[int], List[List[int]]],
-        best_of: Optional[int] = None,
-        echo: Optional[bool] = None,
-        frequency_penalty: Optional[float] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        presence_penalty: Optional[float] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-        guided_choice: Optional[List[str]] = None,
-        prompt_logprobs: Optional[int] = None,
+        prompt: str | list[str] | list[int] | list[list[int]],
+        best_of: int | None = None,
+        echo: bool | None = None,
+        frequency_penalty: float | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        presence_penalty: float | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+        guided_choice: list[str] | None = None,
+        prompt_logprobs: int | None = None,
     ) -> OpenAICompletion:
         self._lazy_initialize_client()
         model_obj = await self._get_model(model)
 
-        extra_body: Dict[str, Any] = {}
+        extra_body: dict[str, Any] = {}
         if prompt_logprobs is not None and prompt_logprobs >= 0:
             extra_body["prompt_logprobs"] = prompt_logprobs
         if guided_choice:
@@ -498,29 +574,29 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
     async def openai_chat_completion(
         self,
         model: str,
-        messages: List[OpenAIMessageParam],
-        frequency_penalty: Optional[float] = None,
-        function_call: Optional[Union[str, Dict[str, Any]]] = None,
-        functions: Optional[List[Dict[str, Any]]] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_completion_tokens: Optional[int] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        parallel_tool_calls: Optional[bool] = None,
-        presence_penalty: Optional[float] = None,
-        response_format: Optional[OpenAIResponseFormatParam] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
-        tools: Optional[List[Dict[str, Any]]] = None,
-        top_logprobs: Optional[int] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-    ) -> Union[OpenAIChatCompletion, AsyncIterator[OpenAIChatCompletionChunk]]:
+        messages: list[OpenAIMessageParam],
+        frequency_penalty: float | None = None,
+        function_call: str | dict[str, Any] | None = None,
+        functions: list[dict[str, Any]] | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_completion_tokens: int | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        parallel_tool_calls: bool | None = None,
+        presence_penalty: float | None = None,
+        response_format: OpenAIResponseFormatParam | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        tool_choice: str | dict[str, Any] | None = None,
+        tools: list[dict[str, Any]] | None = None,
+        top_logprobs: int | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+    ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
         self._lazy_initialize_client()
         model_obj = await self._get_model(model)
         params = await prepare_openai_completion_params(
@@ -553,21 +629,21 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
     async def batch_completion(
         self,
         model_id: str,
-        content_batch: List[InterleavedContent],
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        logprobs: Optional[LogProbConfig] = None,
+        content_batch: list[InterleavedContent],
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        logprobs: LogProbConfig | None = None,
     ):
         raise NotImplementedError("Batch completion is not supported for Ollama")
 
     async def batch_chat_completion(
         self,
         model_id: str,
-        messages_batch: List[List[Message]],
-        sampling_params: Optional[SamplingParams] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_config: Optional[ToolConfig] = None,
-        response_format: Optional[ResponseFormat] = None,
-        logprobs: Optional[LogProbConfig] = None,
+        messages_batch: list[list[Message]],
+        sampling_params: SamplingParams | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_config: ToolConfig | None = None,
+        response_format: ResponseFormat | None = None,
+        logprobs: LogProbConfig | None = None,
     ):
         raise NotImplementedError("Batch chat completion is not supported for Ollama")
diff --git a/llama_stack/providers/remote/inference/watsonx/config.py b/llama_stack/providers/remote/inference/watsonx/config.py
index 7ee99b7e0..5eda9c5c0 100644
--- a/llama_stack/providers/remote/inference/watsonx/config.py
+++ b/llama_stack/providers/remote/inference/watsonx/config.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 import os
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field, SecretStr
 
@@ -24,11 +24,11 @@ class WatsonXConfig(BaseModel):
         default_factory=lambda: os.getenv("WATSONX_BASE_URL", "https://us-south.ml.cloud.ibm.com"),
         description="A base url for accessing the watsonx.ai",
     )
-    api_key: Optional[SecretStr] = Field(
+    api_key: SecretStr | None = Field(
         default_factory=lambda: os.getenv("WATSONX_API_KEY"),
         description="The watsonx API key, only needed of using the hosted service",
     )
-    project_id: Optional[str] = Field(
+    project_id: str | None = Field(
         default_factory=lambda: os.getenv("WATSONX_PROJECT_ID"),
         description="The Project ID key, only needed of using the hosted service",
     )
@@ -38,7 +38,7 @@ class WatsonXConfig(BaseModel):
     )
 
     @classmethod
-    def sample_run_config(cls, **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, **kwargs) -> dict[str, Any]:
         return {
             "url": "${env.WATSONX_BASE_URL:https://us-south.ml.cloud.ibm.com}",
             "api_key": "${env.WATSONX_API_KEY:}",
diff --git a/llama_stack/providers/remote/inference/watsonx/watsonx.py b/llama_stack/providers/remote/inference/watsonx/watsonx.py
index d5d87ec01..59f5f5562 100644
--- a/llama_stack/providers/remote/inference/watsonx/watsonx.py
+++ b/llama_stack/providers/remote/inference/watsonx/watsonx.py
@@ -4,10 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import AsyncGenerator, List, Optional, Union
+from collections.abc import AsyncGenerator, AsyncIterator
+from typing import Any
 
 from ibm_watson_machine_learning.foundation_models import Model
 from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
+from openai import AsyncOpenAI
 
 from llama_stack.apis.common.content_types import InterleavedContent, InterleavedContentItem
 from llama_stack.apis.inference import (
@@ -19,6 +21,7 @@ from llama_stack.apis.inference import (
     Inference,
     LogProbConfig,
     Message,
+    OpenAIEmbeddingsResponse,
     ResponseFormat,
     SamplingParams,
     TextTruncation,
@@ -27,10 +30,21 @@ from llama_stack.apis.inference import (
     ToolDefinition,
     ToolPromptFormat,
 )
+from llama_stack.apis.inference.inference import (
+    GreedySamplingStrategy,
+    OpenAIChatCompletion,
+    OpenAIChatCompletionChunk,
+    OpenAICompletion,
+    OpenAIMessageParam,
+    OpenAIResponseFormatParam,
+    TopKSamplingStrategy,
+    TopPSamplingStrategy,
+)
 from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
 from llama_stack.providers.utils.inference.openai_compat import (
     OpenAICompatCompletionChoice,
     OpenAICompatCompletionResponse,
+    prepare_openai_completion_params,
     process_chat_completion_response,
     process_chat_completion_stream_response,
     process_completion_response,
@@ -66,10 +80,10 @@ class WatsonXInferenceAdapter(Inference, ModelRegistryHelper):
         self,
         model_id: str,
         content: InterleavedContent,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
     ) -> AsyncGenerator:
         if sampling_params is None:
             sampling_params = SamplingParams()
@@ -95,6 +109,14 @@ class WatsonXInferenceAdapter(Inference, ModelRegistryHelper):
 
         return Model(model_id=model_id, credentials=credentials, project_id=project_id)
 
+    def _get_openai_client(self) -> AsyncOpenAI:
+        if not self._openai_client:
+            self._openai_client = AsyncOpenAI(
+                base_url=f"{self._config.url}/openai/v1",
+                api_key=self._config.api_key,
+            )
+        return self._openai_client
+
     async def _nonstream_completion(self, request: CompletionRequest) -> ChatCompletionResponse:
         params = await self._get_params(request)
         r = self._get_client(request.model).generate(**params)
@@ -132,15 +154,15 @@ class WatsonXInferenceAdapter(Inference, ModelRegistryHelper):
     async def chat_completion(
         self,
         model_id: str,
-        messages: List[Message],
-        sampling_params: Optional[SamplingParams] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = ToolChoice.auto,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-        tool_config: Optional[ToolConfig] = None,
+        messages: list[Message],
+        sampling_params: SamplingParams | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_choice: ToolChoice | None = ToolChoice.auto,
+        tool_prompt_format: ToolPromptFormat | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+        tool_config: ToolConfig | None = None,
     ) -> AsyncGenerator:
         if sampling_params is None:
             sampling_params = SamplingParams()
@@ -197,7 +219,7 @@ class WatsonXInferenceAdapter(Inference, ModelRegistryHelper):
         async for chunk in process_chat_completion_stream_response(stream, request):
             yield chunk
 
-    async def _get_params(self, request: Union[ChatCompletionRequest, CompletionRequest]) -> dict:
+    async def _get_params(self, request: ChatCompletionRequest | CompletionRequest) -> dict:
         input_dict = {"params": {}}
         media_present = request_has_media(request)
         llama_model = self.get_llama_model(request.model)
@@ -213,36 +235,16 @@ class WatsonXInferenceAdapter(Inference, ModelRegistryHelper):
                 input_dict["params"][GenParams.MAX_NEW_TOKENS] = request.sampling_params.max_tokens
             if request.sampling_params.repetition_penalty:
                 input_dict["params"][GenParams.REPETITION_PENALTY] = request.sampling_params.repetition_penalty
-            if request.sampling_params.additional_params.get("top_p"):
-                input_dict["params"][GenParams.TOP_P] = request.sampling_params.additional_params["top_p"]
-            if request.sampling_params.additional_params.get("top_k"):
-                input_dict["params"][GenParams.TOP_K] = request.sampling_params.additional_params["top_k"]
-            if request.sampling_params.additional_params.get("temperature"):
-                input_dict["params"][GenParams.TEMPERATURE] = request.sampling_params.additional_params["temperature"]
-            if request.sampling_params.additional_params.get("length_penalty"):
-                input_dict["params"][GenParams.LENGTH_PENALTY] = request.sampling_params.additional_params[
-                    "length_penalty"
-                ]
-            if request.sampling_params.additional_params.get("random_seed"):
-                input_dict["params"][GenParams.RANDOM_SEED] = request.sampling_params.additional_params["random_seed"]
-            if request.sampling_params.additional_params.get("min_new_tokens"):
-                input_dict["params"][GenParams.MIN_NEW_TOKENS] = request.sampling_params.additional_params[
-                    "min_new_tokens"
-                ]
-            if request.sampling_params.additional_params.get("stop_sequences"):
-                input_dict["params"][GenParams.STOP_SEQUENCES] = request.sampling_params.additional_params[
-                    "stop_sequences"
-                ]
-            if request.sampling_params.additional_params.get("time_limit"):
-                input_dict["params"][GenParams.TIME_LIMIT] = request.sampling_params.additional_params["time_limit"]
-            if request.sampling_params.additional_params.get("truncate_input_tokens"):
-                input_dict["params"][GenParams.TRUNCATE_INPUT_TOKENS] = request.sampling_params.additional_params[
-                    "truncate_input_tokens"
-                ]
-            if request.sampling_params.additional_params.get("return_options"):
-                input_dict["params"][GenParams.RETURN_OPTIONS] = request.sampling_params.additional_params[
-                    "return_options"
-                ]
+
+            if isinstance(request.sampling_params.strategy, TopPSamplingStrategy):
+                input_dict["params"][GenParams.TOP_P] = request.sampling_params.strategy.top_p
+                input_dict["params"][GenParams.TEMPERATURE] = request.sampling_params.strategy.temperature
+            if isinstance(request.sampling_params.strategy, TopKSamplingStrategy):
+                input_dict["params"][GenParams.TOP_K] = request.sampling_params.strategy.top_k
+            if isinstance(request.sampling_params.strategy, GreedySamplingStrategy):
+                input_dict["params"][GenParams.TEMPERATURE] = 0.0
+
+        input_dict["params"][GenParams.STOP_SEQUENCES] = ["<|endoftext|>"]
 
         params = {
             **input_dict,
@@ -252,9 +254,137 @@ class WatsonXInferenceAdapter(Inference, ModelRegistryHelper):
     async def embeddings(
         self,
         model_id: str,
-        contents: List[str] | List[InterleavedContentItem],
-        text_truncation: Optional[TextTruncation] = TextTruncation.none,
-        output_dimension: Optional[int] = None,
-        task_type: Optional[EmbeddingTaskType] = None,
+        contents: list[str] | list[InterleavedContentItem],
+        text_truncation: TextTruncation | None = TextTruncation.none,
+        output_dimension: int | None = None,
+        task_type: EmbeddingTaskType | None = None,
     ) -> EmbeddingsResponse:
-        pass
+        raise NotImplementedError("embedding is not supported for watsonx")
+
+    async def openai_embeddings(
+        self,
+        model: str,
+        input: str | list[str],
+        encoding_format: str | None = "float",
+        dimensions: int | None = None,
+        user: str | None = None,
+    ) -> OpenAIEmbeddingsResponse:
+        raise NotImplementedError()
+
+    async def openai_completion(
+        self,
+        model: str,
+        prompt: str | list[str] | list[int] | list[list[int]],
+        best_of: int | None = None,
+        echo: bool | None = None,
+        frequency_penalty: float | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        presence_penalty: float | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+        guided_choice: list[str] | None = None,
+        prompt_logprobs: int | None = None,
+    ) -> OpenAICompletion:
+        model_obj = await self.model_store.get_model(model)
+        params = await prepare_openai_completion_params(
+            model=model_obj.provider_resource_id,
+            prompt=prompt,
+            best_of=best_of,
+            echo=echo,
+            frequency_penalty=frequency_penalty,
+            logit_bias=logit_bias,
+            logprobs=logprobs,
+            max_tokens=max_tokens,
+            n=n,
+            presence_penalty=presence_penalty,
+            seed=seed,
+            stop=stop,
+            stream=stream,
+            stream_options=stream_options,
+            temperature=temperature,
+            top_p=top_p,
+            user=user,
+        )
+        return await self._get_openai_client().completions.create(**params)  # type: ignore
+
+    async def openai_chat_completion(
+        self,
+        model: str,
+        messages: list[OpenAIMessageParam],
+        frequency_penalty: float | None = None,
+        function_call: str | dict[str, Any] | None = None,
+        functions: list[dict[str, Any]] | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_completion_tokens: int | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        parallel_tool_calls: bool | None = None,
+        presence_penalty: float | None = None,
+        response_format: OpenAIResponseFormatParam | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        tool_choice: str | dict[str, Any] | None = None,
+        tools: list[dict[str, Any]] | None = None,
+        top_logprobs: int | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+    ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
+        model_obj = await self.model_store.get_model(model)
+        params = await prepare_openai_completion_params(
+            model=model_obj.provider_resource_id,
+            messages=messages,
+            frequency_penalty=frequency_penalty,
+            function_call=function_call,
+            functions=functions,
+            logit_bias=logit_bias,
+            logprobs=logprobs,
+            max_completion_tokens=max_completion_tokens,
+            max_tokens=max_tokens,
+            n=n,
+            parallel_tool_calls=parallel_tool_calls,
+            presence_penalty=presence_penalty,
+            response_format=response_format,
+            seed=seed,
+            stop=stop,
+            stream=stream,
+            stream_options=stream_options,
+            temperature=temperature,
+            tool_choice=tool_choice,
+            tools=tools,
+            top_logprobs=top_logprobs,
+            top_p=top_p,
+            user=user,
+        )
+        if params.get("stream", False):
+            return self._stream_openai_chat_completion(params)
+        return await self._get_openai_client().chat.completions.create(**params)  # type: ignore
+
+    async def _stream_openai_chat_completion(self, params: dict) -> AsyncGenerator:
+        # watsonx.ai sometimes adds usage data to the stream
+        include_usage = False
+        if params.get("stream_options", None):
+            include_usage = params["stream_options"].get("include_usage", False)
+        stream = await self._get_openai_client().chat.completions.create(**params)
+
+        seen_finish_reason = False
+        async for chunk in stream:
+            # Final usage chunk with no choices that the user didn't request, so discard
+            if not include_usage and seen_finish_reason and len(chunk.choices) == 0:
+                break
+            yield chunk
+            for choice in chunk.choices:
+                if choice.finish_reason:
+                    seen_finish_reason = True
+                    break
diff --git a/llama_stack/providers/remote/post_training/nvidia/config.py b/llama_stack/providers/remote/post_training/nvidia/config.py
index 7b42c8bb0..fa08b6e3f 100644
--- a/llama_stack/providers/remote/post_training/nvidia/config.py
+++ b/llama_stack/providers/remote/post_training/nvidia/config.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 import os
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field
 
@@ -15,23 +15,23 @@ from pydantic import BaseModel, Field
 class NvidiaPostTrainingConfig(BaseModel):
     """Configuration for NVIDIA Post Training implementation."""
 
-    api_key: Optional[str] = Field(
+    api_key: str | None = Field(
         default_factory=lambda: os.getenv("NVIDIA_API_KEY"),
         description="The NVIDIA API key.",
     )
 
-    dataset_namespace: Optional[str] = Field(
+    dataset_namespace: str | None = Field(
         default_factory=lambda: os.getenv("NVIDIA_DATASET_NAMESPACE", "default"),
         description="The NVIDIA dataset namespace.",
     )
 
-    project_id: Optional[str] = Field(
+    project_id: str | None = Field(
         default_factory=lambda: os.getenv("NVIDIA_PROJECT_ID", "test-example-model@v1"),
         description="The NVIDIA project ID.",
     )
 
     # ToDO: validate this, add default value
-    customizer_url: Optional[str] = Field(
+    customizer_url: str | None = Field(
         default_factory=lambda: os.getenv("NVIDIA_CUSTOMIZER_URL"),
         description="Base URL for the NeMo Customizer API",
     )
@@ -53,7 +53,7 @@ class NvidiaPostTrainingConfig(BaseModel):
     )
 
     @classmethod
-    def sample_run_config(cls, **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, **kwargs) -> dict[str, Any]:
         return {
             "api_key": "${env.NVIDIA_API_KEY:}",
             "dataset_namespace": "${env.NVIDIA_DATASET_NAMESPACE:default}",
@@ -71,27 +71,27 @@ class SFTLoRADefaultConfig(BaseModel):
     n_epochs: int = 50
 
     # NeMo customizer specific parameters
-    log_every_n_steps: Optional[int] = None
+    log_every_n_steps: int | None = None
     val_check_interval: float = 0.25
     sequence_packing_enabled: bool = False
     weight_decay: float = 0.01
     lr: float = 0.0001
 
     # SFT specific parameters
-    hidden_dropout: Optional[float] = None
-    attention_dropout: Optional[float] = None
-    ffn_dropout: Optional[float] = None
+    hidden_dropout: float | None = None
+    attention_dropout: float | None = None
+    ffn_dropout: float | None = None
 
     # LoRA default parameters
     lora_adapter_dim: int = 8
-    lora_adapter_dropout: Optional[float] = None
+    lora_adapter_dropout: float | None = None
     lora_alpha: int = 16
 
     # Data config
     batch_size: int = 8
 
     @classmethod
-    def sample_config(cls) -> Dict[str, Any]:
+    def sample_config(cls) -> dict[str, Any]:
         """Return a sample configuration for NVIDIA training."""
         return {
             "n_epochs": 50,
diff --git a/llama_stack/providers/remote/post_training/nvidia/models.py b/llama_stack/providers/remote/post_training/nvidia/models.py
index 1b31b4dbe..6a28f8af8 100644
--- a/llama_stack/providers/remote/post_training/nvidia/models.py
+++ b/llama_stack/providers/remote/post_training/nvidia/models.py
@@ -4,7 +4,6 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List
 
 from llama_stack.models.llama.sku_types import CoreModelId
 from llama_stack.providers.utils.inference.model_registry import (
@@ -24,5 +23,5 @@ _MODEL_ENTRIES = [
 ]
 
 
-def get_model_entries() -> List[ProviderModelEntry]:
+def get_model_entries() -> list[ProviderModelEntry]:
     return _MODEL_ENTRIES
diff --git a/llama_stack/providers/remote/post_training/nvidia/post_training.py b/llama_stack/providers/remote/post_training/nvidia/post_training.py
index c74fb2a24..d839ffd6f 100644
--- a/llama_stack/providers/remote/post_training/nvidia/post_training.py
+++ b/llama_stack/providers/remote/post_training/nvidia/post_training.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 import warnings
 from datetime import datetime
-from typing import Any, Dict, List, Literal, Optional
+from typing import Any, Literal
 
 import aiohttp
 from pydantic import BaseModel, ConfigDict
@@ -50,7 +50,7 @@ class NvidiaPostTrainingJob(PostTrainingJob):
 
 
 class ListNvidiaPostTrainingJobs(BaseModel):
-    data: List[NvidiaPostTrainingJob]
+    data: list[NvidiaPostTrainingJob]
 
 
 class NvidiaPostTrainingJobStatusResponse(PostTrainingJobStatusResponse):
@@ -83,11 +83,11 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper):
         self,
         method: str,
         path: str,
-        headers: Optional[Dict[str, Any]] = None,
-        params: Optional[Dict[str, Any]] = None,
-        json: Optional[Dict[str, Any]] = None,
+        headers: dict[str, Any] | None = None,
+        params: dict[str, Any] | None = None,
+        json: dict[str, Any] | None = None,
         **kwargs,
-    ) -> Dict[str, Any]:
+    ) -> dict[str, Any]:
         """Helper method to make HTTP requests to the Customizer API."""
         url = f"{self.customizer_url}{path}"
         request_headers = self.headers.copy()
@@ -109,9 +109,9 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper):
 
     async def get_training_jobs(
         self,
-        page: Optional[int] = 1,
-        page_size: Optional[int] = 10,
-        sort: Optional[Literal["created_at", "-created_at"]] = "created_at",
+        page: int | None = 1,
+        page_size: int | None = 10,
+        sort: Literal["created_at", "-created_at"] | None = "created_at",
     ) -> ListNvidiaPostTrainingJobs:
         """Get all customization jobs.
         Updated the base class return type from ListPostTrainingJobsResponse to ListNvidiaPostTrainingJobs.
@@ -207,12 +207,12 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper):
     async def supervised_fine_tune(
         self,
         job_uuid: str,
-        training_config: Dict[str, Any],
-        hyperparam_search_config: Dict[str, Any],
-        logger_config: Dict[str, Any],
+        training_config: dict[str, Any],
+        hyperparam_search_config: dict[str, Any],
+        logger_config: dict[str, Any],
         model: str,
-        checkpoint_dir: Optional[str],
-        algorithm_config: Optional[AlgorithmConfig] = None,
+        checkpoint_dir: str | None,
+        algorithm_config: AlgorithmConfig | None = None,
     ) -> NvidiaPostTrainingJob:
         """
         Fine-tunes a model on a dataset.
@@ -224,7 +224,7 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper):
 
         Parameters:
             training_config: TrainingConfig - Configuration for training
-            model: str - Model identifier
+            model: str - NeMo Customizer configuration name
             algorithm_config: Optional[AlgorithmConfig] - Algorithm-specific configuration
             checkpoint_dir: Optional[str] - Directory containing model checkpoints, ignored atm
             job_uuid: str - Unique identifier for the job, ignored atm
@@ -299,9 +299,6 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper):
 
             User is informed about unsupported parameters via warnings.
         """
-        # Map model to nvidia model name
-        # See `_MODEL_ENTRIES` for supported models
-        nvidia_model = self.get_provider_model_id(model)
 
         # Check for unsupported method parameters
         unsupported_method_params = []
@@ -347,7 +344,7 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper):
 
         # Prepare base job configuration
         job_config = {
-            "config": nvidia_model,
+            "config": model,
             "dataset": {
                 "name": training_config["data_config"]["dataset_id"],
                 "namespace": self.config.dataset_namespace,
@@ -423,8 +420,8 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper):
         finetuned_model: str,
         algorithm_config: DPOAlignmentConfig,
         training_config: TrainingConfig,
-        hyperparam_search_config: Dict[str, Any],
-        logger_config: Dict[str, Any],
+        hyperparam_search_config: dict[str, Any],
+        logger_config: dict[str, Any],
     ) -> PostTrainingJob:
         """Optimize a model based on preference data."""
         raise NotImplementedError("Preference optimization is not implemented yet")
diff --git a/llama_stack/providers/remote/post_training/nvidia/utils.py b/llama_stack/providers/remote/post_training/nvidia/utils.py
index ac47966af..d6e1016b2 100644
--- a/llama_stack/providers/remote/post_training/nvidia/utils.py
+++ b/llama_stack/providers/remote/post_training/nvidia/utils.py
@@ -6,7 +6,7 @@
 
 import logging
 import warnings
-from typing import Any, Dict, Set, Tuple
+from typing import Any
 
 from pydantic import BaseModel
 
@@ -18,7 +18,7 @@ from .config import NvidiaPostTrainingConfig
 logger = logging.getLogger(__name__)
 
 
-def warn_unsupported_params(config_dict: Any, supported_keys: Set[str], config_name: str) -> None:
+def warn_unsupported_params(config_dict: Any, supported_keys: set[str], config_name: str) -> None:
     keys = set(config_dict.__annotations__.keys()) if isinstance(config_dict, BaseModel) else config_dict.keys()
     unsupported_params = [k for k in keys if k not in supported_keys]
     if unsupported_params:
@@ -28,7 +28,7 @@ def warn_unsupported_params(config_dict: Any, supported_keys: Set[str], config_n
 
 
 def validate_training_params(
-    training_config: Dict[str, Any], supported_keys: Set[str], config_name: str = "TrainingConfig"
+    training_config: dict[str, Any], supported_keys: set[str], config_name: str = "TrainingConfig"
 ) -> None:
     """
     Validates training parameters against supported keys.
@@ -57,7 +57,7 @@ def validate_training_params(
 
 
 # ToDo: implement post health checks for customizer are enabled
-async def _get_health(url: str) -> Tuple[bool, bool]: ...
+async def _get_health(url: str) -> tuple[bool, bool]: ...
 
 
 async def check_health(config: NvidiaPostTrainingConfig) -> None: ...
diff --git a/llama_stack/providers/remote/safety/bedrock/bedrock.py b/llama_stack/providers/remote/safety/bedrock/bedrock.py
index 2f960eead..c43b51073 100644
--- a/llama_stack/providers/remote/safety/bedrock/bedrock.py
+++ b/llama_stack/providers/remote/safety/bedrock/bedrock.py
@@ -6,7 +6,7 @@
 
 import json
 import logging
-from typing import Any, Dict, List
+from typing import Any
 
 from llama_stack.apis.inference import Message
 from llama_stack.apis.safety import (
@@ -53,7 +53,7 @@ class BedrockSafetyAdapter(Safety, ShieldsProtocolPrivate):
             )
 
     async def run_shield(
-        self, shield_id: str, messages: List[Message], params: Dict[str, Any] = None
+        self, shield_id: str, messages: list[Message], params: dict[str, Any] = None
     ) -> RunShieldResponse:
         shield = await self.shield_store.get_shield(shield_id)
         if not shield:
diff --git a/llama_stack/providers/remote/safety/nvidia/config.py b/llama_stack/providers/remote/safety/nvidia/config.py
index 3df80ed4f..4ca703a4d 100644
--- a/llama_stack/providers/remote/safety/nvidia/config.py
+++ b/llama_stack/providers/remote/safety/nvidia/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 import os
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field
 
@@ -27,10 +27,10 @@ class NVIDIASafetyConfig(BaseModel):
         default_factory=lambda: os.getenv("GUARDRAILS_SERVICE_URL", "http://0.0.0.0:7331"),
         description="The url for accessing the guardrails service",
     )
-    config_id: Optional[str] = Field(default="self-check", description="Config ID to use from the config store")
+    config_id: str | None = Field(default="self-check", description="Config ID to use from the config store")
 
     @classmethod
-    def sample_run_config(cls, **kwargs) -> Dict[str, Any]:
+    def sample_run_config(cls, **kwargs) -> dict[str, Any]:
         return {
             "guardrails_service_url": "${env.GUARDRAILS_SERVICE_URL:http://localhost:7331}",
             "config_id": "self-check",
diff --git a/llama_stack/providers/remote/safety/nvidia/nvidia.py b/llama_stack/providers/remote/safety/nvidia/nvidia.py
index 1ff4a6ad9..411badb1c 100644
--- a/llama_stack/providers/remote/safety/nvidia/nvidia.py
+++ b/llama_stack/providers/remote/safety/nvidia/nvidia.py
@@ -5,15 +5,15 @@
 # the root directory of this source tree.
 
 import logging
-from typing import Any, List, Optional
+from typing import Any
 
 import requests
 
 from llama_stack.apis.inference import Message
 from llama_stack.apis.safety import RunShieldResponse, Safety, SafetyViolation, ViolationLevel
 from llama_stack.apis.shields import Shield
-from llama_stack.distribution.library_client import convert_pydantic_to_json_value
 from llama_stack.providers.datatypes import ShieldsProtocolPrivate
+from llama_stack.providers.utils.inference.openai_compat import convert_message_to_openai_dict_new
 
 from .config import NVIDIASafetyConfig
 
@@ -28,7 +28,6 @@ class NVIDIASafetyAdapter(Safety, ShieldsProtocolPrivate):
         Args:
             config (NVIDIASafetyConfig): The configuration containing the guardrails service URL and config ID.
         """
-        print(f"Initializing NVIDIASafetyAdapter({config.guardrails_service_url})...")
         self.config = config
 
     async def initialize(self) -> None:
@@ -42,7 +41,7 @@ class NVIDIASafetyAdapter(Safety, ShieldsProtocolPrivate):
             raise ValueError("Shield model not provided.")
 
     async def run_shield(
-        self, shield_id: str, messages: List[Message], params: Optional[dict[str, Any]] = None
+        self, shield_id: str, messages: list[Message], params: dict[str, Any] | None = None
     ) -> RunShieldResponse:
         """
         Run a safety shield check against the provided messages.
@@ -113,7 +112,7 @@ class NeMoGuardrails:
         response.raise_for_status()
         return response.json()
 
-    async def run(self, messages: List[Message]) -> RunShieldResponse:
+    async def run(self, messages: list[Message]) -> RunShieldResponse:
         """
         Queries the /v1/guardrails/checks endpoint of the NeMo guardrails deployed API.
 
@@ -127,9 +126,10 @@ class NeMoGuardrails:
         Raises:
             requests.HTTPError: If the POST request fails.
         """
+        request_messages = [await convert_message_to_openai_dict_new(message) for message in messages]
         request_data = {
             "model": self.model,
-            "messages": convert_pydantic_to_json_value(messages),
+            "messages": request_messages,
             "temperature": self.temperature,
             "top_p": 1,
             "frequency_penalty": 0,
diff --git a/llama_stack/providers/remote/safety/sambanova/__init__.py b/llama_stack/providers/remote/safety/sambanova/__init__.py
new file mode 100644
index 000000000..bb9d15374
--- /dev/null
+++ b/llama_stack/providers/remote/safety/sambanova/__init__.py
@@ -0,0 +1,18 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+
+from typing import Any
+
+from .config import SambaNovaSafetyConfig
+
+
+async def get_adapter_impl(config: SambaNovaSafetyConfig, _deps) -> Any:
+    from .sambanova import SambaNovaSafetyAdapter
+
+    impl = SambaNovaSafetyAdapter(config)
+    await impl.initialize()
+    return impl
diff --git a/llama_stack/providers/remote/safety/sambanova/config.py b/llama_stack/providers/remote/safety/sambanova/config.py
new file mode 100644
index 000000000..383cea244
--- /dev/null
+++ b/llama_stack/providers/remote/safety/sambanova/config.py
@@ -0,0 +1,37 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from typing import Any
+
+from pydantic import BaseModel, Field, SecretStr
+
+from llama_stack.schema_utils import json_schema_type
+
+
+class SambaNovaProviderDataValidator(BaseModel):
+    sambanova_api_key: str | None = Field(
+        default=None,
+        description="Sambanova Cloud API key",
+    )
+
+
+@json_schema_type
+class SambaNovaSafetyConfig(BaseModel):
+    url: str = Field(
+        default="https://api.sambanova.ai/v1",
+        description="The URL for the SambaNova AI server",
+    )
+    api_key: SecretStr | None = Field(
+        default=None,
+        description="The SambaNova cloud API Key",
+    )
+
+    @classmethod
+    def sample_run_config(cls, api_key: str = "${env.SAMBANOVA_API_KEY}", **kwargs) -> dict[str, Any]:
+        return {
+            "url": "https://api.sambanova.ai/v1",
+            "api_key": api_key,
+        }
diff --git a/llama_stack/providers/remote/safety/sambanova/sambanova.py b/llama_stack/providers/remote/safety/sambanova/sambanova.py
new file mode 100644
index 000000000..84c8267ae
--- /dev/null
+++ b/llama_stack/providers/remote/safety/sambanova/sambanova.py
@@ -0,0 +1,100 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+import json
+import logging
+from typing import Any
+
+import litellm
+import requests
+
+from llama_stack.apis.inference import Message
+from llama_stack.apis.safety import (
+    RunShieldResponse,
+    Safety,
+    SafetyViolation,
+    ViolationLevel,
+)
+from llama_stack.apis.shields import Shield
+from llama_stack.distribution.request_headers import NeedsRequestProviderData
+from llama_stack.providers.datatypes import ShieldsProtocolPrivate
+from llama_stack.providers.utils.inference.openai_compat import convert_message_to_openai_dict_new
+
+from .config import SambaNovaSafetyConfig
+
+logger = logging.getLogger(__name__)
+
+CANNED_RESPONSE_TEXT = "I can't answer that. Can I help with something else?"
+
+
+class SambaNovaSafetyAdapter(Safety, ShieldsProtocolPrivate, NeedsRequestProviderData):
+    def __init__(self, config: SambaNovaSafetyConfig) -> None:
+        self.config = config
+
+    async def initialize(self) -> None:
+        pass
+
+    async def shutdown(self) -> None:
+        pass
+
+    def _get_api_key(self) -> str:
+        config_api_key = self.config.api_key if self.config.api_key else None
+        if config_api_key:
+            return config_api_key.get_secret_value()
+        else:
+            provider_data = self.get_request_provider_data()
+            if provider_data is None or not provider_data.sambanova_api_key:
+                raise ValueError(
+                    'Pass Sambanova API Key in the header X-LlamaStack-Provider-Data as { "sambanova_api_key":  }'
+                )
+            return provider_data.sambanova_api_key
+
+    async def register_shield(self, shield: Shield) -> None:
+        list_models_url = self.config.url + "/models"
+        try:
+            response = requests.get(list_models_url)
+            response.raise_for_status()
+        except requests.exceptions.RequestException as e:
+            raise RuntimeError(f"Request to {list_models_url} failed") from e
+        available_models = [model.get("id") for model in response.json().get("data", {})]
+        if (
+            len(available_models) == 0
+            or "guard" not in shield.provider_resource_id.lower()
+            or shield.provider_resource_id.split("sambanova/")[-1] not in available_models
+        ):
+            raise ValueError(f"Shield {shield.provider_resource_id} not found in SambaNova")
+
+    async def run_shield(
+        self, shield_id: str, messages: list[Message], params: dict[str, Any] | None = None
+    ) -> RunShieldResponse:
+        shield = await self.shield_store.get_shield(shield_id)
+        if not shield:
+            raise ValueError(f"Shield {shield_id} not found")
+
+        shield_params = shield.params
+        logger.debug(f"run_shield::{shield_params}::messages={messages}")
+        content_messages = [await convert_message_to_openai_dict_new(m) for m in messages]
+        logger.debug(f"run_shield::final:messages::{json.dumps(content_messages, indent=2)}:")
+
+        response = litellm.completion(
+            model=shield.provider_resource_id, messages=content_messages, api_key=self._get_api_key()
+        )
+        shield_message = response.choices[0].message.content
+
+        if "unsafe" in shield_message.lower():
+            user_message = CANNED_RESPONSE_TEXT
+            violation_type = shield_message.split("\n")[-1]
+            metadata = {"violation_type": violation_type}
+
+            return RunShieldResponse(
+                violation=SafetyViolation(
+                    user_message=user_message,
+                    violation_level=ViolationLevel.ERROR,
+                    metadata=metadata,
+                )
+            )
+
+        return RunShieldResponse()
diff --git a/llama_stack/providers/remote/tool_runtime/bing_search/bing_search.py b/llama_stack/providers/remote/tool_runtime/bing_search/bing_search.py
index b34c9fd9d..7e82cb6d4 100644
--- a/llama_stack/providers/remote/tool_runtime/bing_search/bing_search.py
+++ b/llama_stack/providers/remote/tool_runtime/bing_search/bing_search.py
@@ -5,26 +5,26 @@
 # the root directory of this source tree.
 
 import json
-from typing import Any, Dict, Optional
+from typing import Any
 
 import httpx
 
 from llama_stack.apis.common.content_types import URL
 from llama_stack.apis.tools import (
     ListToolDefsResponse,
-    Tool,
     ToolDef,
+    ToolGroup,
     ToolInvocationResult,
     ToolParameter,
     ToolRuntime,
 )
 from llama_stack.distribution.request_headers import NeedsRequestProviderData
-from llama_stack.providers.datatypes import ToolsProtocolPrivate
+from llama_stack.providers.datatypes import ToolGroupsProtocolPrivate
 
 from .config import BingSearchToolConfig
 
 
-class BingSearchToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, NeedsRequestProviderData):
+class BingSearchToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime, NeedsRequestProviderData):
     def __init__(self, config: BingSearchToolConfig):
         self.config = config
         self.url = "https://api.bing.microsoft.com/v7.0/search"
@@ -32,10 +32,10 @@ class BingSearchToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, NeedsRequestP
     async def initialize(self):
         pass
 
-    async def register_tool(self, tool: Tool) -> None:
+    async def register_toolgroup(self, toolgroup: ToolGroup) -> None:
         pass
 
-    async def unregister_tool(self, tool_id: str) -> None:
+    async def unregister_toolgroup(self, toolgroup_id: str) -> None:
         return
 
     def _get_api_key(self) -> str:
@@ -50,7 +50,7 @@ class BingSearchToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, NeedsRequestP
         return provider_data.bing_search_api_key
 
     async def list_runtime_tools(
-        self, tool_group_id: Optional[str] = None, mcp_endpoint: Optional[URL] = None
+        self, tool_group_id: str | None = None, mcp_endpoint: URL | None = None
     ) -> ListToolDefsResponse:
         return ListToolDefsResponse(
             data=[
@@ -68,7 +68,7 @@ class BingSearchToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, NeedsRequestP
             ]
         )
 
-    async def invoke_tool(self, tool_name: str, kwargs: Dict[str, Any]) -> ToolInvocationResult:
+    async def invoke_tool(self, tool_name: str, kwargs: dict[str, Any]) -> ToolInvocationResult:
         api_key = self._get_api_key()
         headers = {
             "Ocp-Apim-Subscription-Key": api_key,
diff --git a/llama_stack/providers/remote/tool_runtime/bing_search/config.py b/llama_stack/providers/remote/tool_runtime/bing_search/config.py
index 4f089439f..30269dbc1 100644
--- a/llama_stack/providers/remote/tool_runtime/bing_search/config.py
+++ b/llama_stack/providers/remote/tool_runtime/bing_search/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel
 
@@ -12,11 +12,11 @@ from pydantic import BaseModel
 class BingSearchToolConfig(BaseModel):
     """Configuration for Bing Search Tool Runtime"""
 
-    api_key: Optional[str] = None
+    api_key: str | None = None
     top_k: int = 3
 
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
         return {
             "api_key": "${env.BING_API_KEY:}",
         }
diff --git a/llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py b/llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py
index 41f3ce823..b96b9e59c 100644
--- a/llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py
+++ b/llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py
@@ -4,37 +4,37 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 import httpx
 
 from llama_stack.apis.common.content_types import URL
 from llama_stack.apis.tools import (
     ListToolDefsResponse,
-    Tool,
     ToolDef,
+    ToolGroup,
     ToolInvocationResult,
     ToolParameter,
     ToolRuntime,
 )
 from llama_stack.distribution.request_headers import NeedsRequestProviderData
 from llama_stack.models.llama.datatypes import BuiltinTool
-from llama_stack.providers.datatypes import ToolsProtocolPrivate
+from llama_stack.providers.datatypes import ToolGroupsProtocolPrivate
 
 from .config import BraveSearchToolConfig
 
 
-class BraveSearchToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, NeedsRequestProviderData):
+class BraveSearchToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime, NeedsRequestProviderData):
     def __init__(self, config: BraveSearchToolConfig):
         self.config = config
 
     async def initialize(self):
         pass
 
-    async def register_tool(self, tool: Tool) -> None:
+    async def register_toolgroup(self, toolgroup: ToolGroup) -> None:
         pass
 
-    async def unregister_tool(self, tool_id: str) -> None:
+    async def unregister_toolgroup(self, toolgroup_id: str) -> None:
         return
 
     def _get_api_key(self) -> str:
@@ -49,7 +49,7 @@ class BraveSearchToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, NeedsRequest
         return provider_data.brave_search_api_key
 
     async def list_runtime_tools(
-        self, tool_group_id: Optional[str] = None, mcp_endpoint: Optional[URL] = None
+        self, tool_group_id: str | None = None, mcp_endpoint: URL | None = None
     ) -> ListToolDefsResponse:
         return ListToolDefsResponse(
             data=[
@@ -68,7 +68,7 @@ class BraveSearchToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, NeedsRequest
             ]
         )
 
-    async def invoke_tool(self, tool_name: str, kwargs: Dict[str, Any]) -> ToolInvocationResult:
+    async def invoke_tool(self, tool_name: str, kwargs: dict[str, Any]) -> ToolInvocationResult:
         api_key = self._get_api_key()
         url = "https://api.search.brave.com/res/v1/web/search"
         headers = {
diff --git a/llama_stack/providers/remote/tool_runtime/brave_search/config.py b/llama_stack/providers/remote/tool_runtime/brave_search/config.py
index ab6053609..37ba21304 100644
--- a/llama_stack/providers/remote/tool_runtime/brave_search/config.py
+++ b/llama_stack/providers/remote/tool_runtime/brave_search/config.py
@@ -4,13 +4,13 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field
 
 
 class BraveSearchToolConfig(BaseModel):
-    api_key: Optional[str] = Field(
+    api_key: str | None = Field(
         default=None,
         description="The Brave Search API Key",
     )
@@ -20,7 +20,7 @@ class BraveSearchToolConfig(BaseModel):
     )
 
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str) -> dict[str, Any]:
         return {
             "api_key": "${env.BRAVE_SEARCH_API_KEY:}",
             "max_results": 3,
diff --git a/llama_stack/providers/remote/tool_runtime/model_context_protocol/__init__.py b/llama_stack/providers/remote/tool_runtime/model_context_protocol/__init__.py
index fb1f558e5..051a880a7 100644
--- a/llama_stack/providers/remote/tool_runtime/model_context_protocol/__init__.py
+++ b/llama_stack/providers/remote/tool_runtime/model_context_protocol/__init__.py
@@ -4,18 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from pydantic import BaseModel
-
-from .config import ModelContextProtocolConfig
+from .config import MCPProviderConfig
 
 
-class ModelContextProtocolToolProviderDataValidator(BaseModel):
-    api_key: str
-
-
-async def get_adapter_impl(config: ModelContextProtocolConfig, _deps):
+async def get_adapter_impl(config: MCPProviderConfig, _deps):
     from .model_context_protocol import ModelContextProtocolToolRuntimeImpl
 
-    impl = ModelContextProtocolToolRuntimeImpl(config)
+    impl = ModelContextProtocolToolRuntimeImpl(config, _deps)
     await impl.initialize()
     return impl
diff --git a/llama_stack/providers/remote/tool_runtime/model_context_protocol/config.py b/llama_stack/providers/remote/tool_runtime/model_context_protocol/config.py
index 30ac407bc..b8c5e77fd 100644
--- a/llama_stack/providers/remote/tool_runtime/model_context_protocol/config.py
+++ b/llama_stack/providers/remote/tool_runtime/model_context_protocol/config.py
@@ -4,12 +4,17 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
 
-class ModelContextProtocolConfig(BaseModel):
+class MCPProviderDataValidator(BaseModel):
+    # mcp_endpoint => dict of headers to send
+    mcp_headers: dict[str, dict[str, str]] | None = None
+
+
+class MCPProviderConfig(BaseModel):
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
         return {}
diff --git a/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py b/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py
index 676917225..a9b252dfe 100644
--- a/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py
+++ b/llama_stack/providers/remote/tool_runtime/model_context_protocol/model_context_protocol.py
@@ -4,66 +4,50 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 from urllib.parse import urlparse
 
-from mcp import ClientSession
-from mcp.client.sse import sse_client
-
 from llama_stack.apis.common.content_types import URL
+from llama_stack.apis.datatypes import Api
 from llama_stack.apis.tools import (
     ListToolDefsResponse,
-    ToolDef,
+    ToolGroup,
     ToolInvocationResult,
-    ToolParameter,
     ToolRuntime,
 )
-from llama_stack.providers.datatypes import ToolsProtocolPrivate
+from llama_stack.distribution.request_headers import NeedsRequestProviderData
+from llama_stack.log import get_logger
+from llama_stack.providers.datatypes import ToolGroupsProtocolPrivate
+from llama_stack.providers.utils.tools.mcp import invoke_mcp_tool, list_mcp_tools
 
-from .config import ModelContextProtocolConfig
+from .config import MCPProviderConfig
+
+logger = get_logger(__name__, category="tools")
 
 
-class ModelContextProtocolToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime):
-    def __init__(self, config: ModelContextProtocolConfig):
+class ModelContextProtocolToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime, NeedsRequestProviderData):
+    def __init__(self, config: MCPProviderConfig, _deps: dict[Api, Any]):
         self.config = config
 
     async def initialize(self):
         pass
 
+    async def register_toolgroup(self, toolgroup: ToolGroup) -> None:
+        pass
+
+    async def unregister_toolgroup(self, toolgroup_id: str) -> None:
+        return
+
     async def list_runtime_tools(
-        self, tool_group_id: Optional[str] = None, mcp_endpoint: Optional[URL] = None
+        self, tool_group_id: str | None = None, mcp_endpoint: URL | None = None
     ) -> ListToolDefsResponse:
+        # this endpoint should be retrieved by getting the tool group right?
         if mcp_endpoint is None:
             raise ValueError("mcp_endpoint is required")
+        headers = await self.get_headers_from_request(mcp_endpoint.uri)
+        return await list_mcp_tools(mcp_endpoint.uri, headers)
 
-        tools = []
-        async with sse_client(mcp_endpoint.uri) as streams:
-            async with ClientSession(*streams) as session:
-                await session.initialize()
-                tools_result = await session.list_tools()
-                for tool in tools_result.tools:
-                    parameters = []
-                    for param_name, param_schema in tool.inputSchema.get("properties", {}).items():
-                        parameters.append(
-                            ToolParameter(
-                                name=param_name,
-                                parameter_type=param_schema.get("type", "string"),
-                                description=param_schema.get("description", ""),
-                            )
-                        )
-                    tools.append(
-                        ToolDef(
-                            name=tool.name,
-                            description=tool.description,
-                            parameters=parameters,
-                            metadata={
-                                "endpoint": mcp_endpoint.uri,
-                            },
-                        )
-                    )
-        return ListToolDefsResponse(data=tools)
-
-    async def invoke_tool(self, tool_name: str, kwargs: Dict[str, Any]) -> ToolInvocationResult:
+    async def invoke_tool(self, tool_name: str, kwargs: dict[str, Any]) -> ToolInvocationResult:
         tool = await self.tool_store.get_tool(tool_name)
         if tool.metadata is None or tool.metadata.get("endpoint") is None:
             raise ValueError(f"Tool {tool_name} does not have metadata")
@@ -71,12 +55,19 @@ class ModelContextProtocolToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime):
         if urlparse(endpoint).scheme not in ("http", "https"):
             raise ValueError(f"Endpoint {endpoint} is not a valid HTTP(S) URL")
 
-        async with sse_client(endpoint) as streams:
-            async with ClientSession(*streams) as session:
-                await session.initialize()
-                result = await session.call_tool(tool.identifier, kwargs)
+        headers = await self.get_headers_from_request(endpoint)
+        return await invoke_mcp_tool(endpoint, headers, tool_name, kwargs)
 
-        return ToolInvocationResult(
-            content="\n".join([result.model_dump_json() for result in result.content]),
-            error_code=1 if result.isError else 0,
-        )
+    async def get_headers_from_request(self, mcp_endpoint_uri: str) -> dict[str, str]:
+        def canonicalize_uri(uri: str) -> str:
+            return f"{urlparse(uri).netloc or ''}/{urlparse(uri).path or ''}"
+
+        headers = {}
+
+        provider_data = self.get_request_provider_data()
+        if provider_data and provider_data.mcp_headers:
+            for uri, values in provider_data.mcp_headers.items():
+                if canonicalize_uri(uri) != canonicalize_uri(mcp_endpoint_uri):
+                    continue
+                headers.update(values)
+        return headers
diff --git a/llama_stack/providers/remote/tool_runtime/tavily_search/config.py b/llama_stack/providers/remote/tool_runtime/tavily_search/config.py
index 945430bb1..c9b18d30d 100644
--- a/llama_stack/providers/remote/tool_runtime/tavily_search/config.py
+++ b/llama_stack/providers/remote/tool_runtime/tavily_search/config.py
@@ -4,13 +4,13 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field
 
 
 class TavilySearchToolConfig(BaseModel):
-    api_key: Optional[str] = Field(
+    api_key: str | None = Field(
         default=None,
         description="The Tavily Search API Key",
     )
@@ -20,7 +20,7 @@ class TavilySearchToolConfig(BaseModel):
     )
 
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str) -> dict[str, Any]:
         return {
             "api_key": "${env.TAVILY_SEARCH_API_KEY:}",
             "max_results": 3,
diff --git a/llama_stack/providers/remote/tool_runtime/tavily_search/tavily_search.py b/llama_stack/providers/remote/tool_runtime/tavily_search/tavily_search.py
index 719d6be14..1fe91fd7f 100644
--- a/llama_stack/providers/remote/tool_runtime/tavily_search/tavily_search.py
+++ b/llama_stack/providers/remote/tool_runtime/tavily_search/tavily_search.py
@@ -5,36 +5,36 @@
 # the root directory of this source tree.
 
 import json
-from typing import Any, Dict, Optional
+from typing import Any
 
 import httpx
 
 from llama_stack.apis.common.content_types import URL
 from llama_stack.apis.tools import (
     ListToolDefsResponse,
-    Tool,
     ToolDef,
+    ToolGroup,
     ToolInvocationResult,
     ToolParameter,
     ToolRuntime,
 )
 from llama_stack.distribution.request_headers import NeedsRequestProviderData
-from llama_stack.providers.datatypes import ToolsProtocolPrivate
+from llama_stack.providers.datatypes import ToolGroupsProtocolPrivate
 
 from .config import TavilySearchToolConfig
 
 
-class TavilySearchToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, NeedsRequestProviderData):
+class TavilySearchToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime, NeedsRequestProviderData):
     def __init__(self, config: TavilySearchToolConfig):
         self.config = config
 
     async def initialize(self):
         pass
 
-    async def register_tool(self, tool: Tool) -> None:
+    async def register_toolgroup(self, toolgroup: ToolGroup) -> None:
         pass
 
-    async def unregister_tool(self, tool_id: str) -> None:
+    async def unregister_toolgroup(self, toolgroup_id: str) -> None:
         return
 
     def _get_api_key(self) -> str:
@@ -49,7 +49,7 @@ class TavilySearchToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, NeedsReques
         return provider_data.tavily_search_api_key
 
     async def list_runtime_tools(
-        self, tool_group_id: Optional[str] = None, mcp_endpoint: Optional[URL] = None
+        self, tool_group_id: str | None = None, mcp_endpoint: URL | None = None
     ) -> ListToolDefsResponse:
         return ListToolDefsResponse(
             data=[
@@ -67,7 +67,7 @@ class TavilySearchToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, NeedsReques
             ]
         )
 
-    async def invoke_tool(self, tool_name: str, kwargs: Dict[str, Any]) -> ToolInvocationResult:
+    async def invoke_tool(self, tool_name: str, kwargs: dict[str, Any]) -> ToolInvocationResult:
         api_key = self._get_api_key()
         async with httpx.AsyncClient() as client:
             response = await client.post(
diff --git a/llama_stack/providers/remote/tool_runtime/wolfram_alpha/config.py b/llama_stack/providers/remote/tool_runtime/wolfram_alpha/config.py
index 8ea49c7b5..aefc86bd6 100644
--- a/llama_stack/providers/remote/tool_runtime/wolfram_alpha/config.py
+++ b/llama_stack/providers/remote/tool_runtime/wolfram_alpha/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel
 
@@ -12,10 +12,10 @@ from pydantic import BaseModel
 class WolframAlphaToolConfig(BaseModel):
     """Configuration for WolframAlpha Tool Runtime"""
 
-    api_key: Optional[str] = None
+    api_key: str | None = None
 
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
         return {
             "api_key": "${env.WOLFRAM_ALPHA_API_KEY:}",
         }
diff --git a/llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py b/llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py
index b3e0e120c..6e1d0f61d 100644
--- a/llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py
+++ b/llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py
@@ -5,26 +5,26 @@
 # the root directory of this source tree.
 
 import json
-from typing import Any, Dict, Optional
+from typing import Any
 
 import httpx
 
 from llama_stack.apis.common.content_types import URL
 from llama_stack.apis.tools import (
     ListToolDefsResponse,
-    Tool,
     ToolDef,
+    ToolGroup,
     ToolInvocationResult,
     ToolParameter,
     ToolRuntime,
 )
 from llama_stack.distribution.request_headers import NeedsRequestProviderData
-from llama_stack.providers.datatypes import ToolsProtocolPrivate
+from llama_stack.providers.datatypes import ToolGroupsProtocolPrivate
 
 from .config import WolframAlphaToolConfig
 
 
-class WolframAlphaToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, NeedsRequestProviderData):
+class WolframAlphaToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime, NeedsRequestProviderData):
     def __init__(self, config: WolframAlphaToolConfig):
         self.config = config
         self.url = "https://api.wolframalpha.com/v2/query"
@@ -32,10 +32,10 @@ class WolframAlphaToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, NeedsReques
     async def initialize(self):
         pass
 
-    async def register_tool(self, tool: Tool) -> None:
+    async def register_toolgroup(self, toolgroup: ToolGroup) -> None:
         pass
 
-    async def unregister_tool(self, tool_id: str) -> None:
+    async def unregister_toolgroup(self, toolgroup_id: str) -> None:
         return
 
     def _get_api_key(self) -> str:
@@ -50,7 +50,7 @@ class WolframAlphaToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, NeedsReques
         return provider_data.wolfram_alpha_api_key
 
     async def list_runtime_tools(
-        self, tool_group_id: Optional[str] = None, mcp_endpoint: Optional[URL] = None
+        self, tool_group_id: str | None = None, mcp_endpoint: URL | None = None
     ) -> ListToolDefsResponse:
         return ListToolDefsResponse(
             data=[
@@ -68,7 +68,7 @@ class WolframAlphaToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, NeedsReques
             ]
         )
 
-    async def invoke_tool(self, tool_name: str, kwargs: Dict[str, Any]) -> ToolInvocationResult:
+    async def invoke_tool(self, tool_name: str, kwargs: dict[str, Any]) -> ToolInvocationResult:
         api_key = self._get_api_key()
         params = {
             "input": kwargs["query"],
diff --git a/llama_stack/providers/remote/vector_io/chroma/__init__.py b/llama_stack/providers/remote/vector_io/chroma/__init__.py
index 8646b04d6..ebbc62b1c 100644
--- a/llama_stack/providers/remote/vector_io/chroma/__init__.py
+++ b/llama_stack/providers/remote/vector_io/chroma/__init__.py
@@ -4,14 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Dict
-
 from llama_stack.providers.datatypes import Api, ProviderSpec
 
 from .config import ChromaVectorIOConfig
 
 
-async def get_adapter_impl(config: ChromaVectorIOConfig, deps: Dict[Api, ProviderSpec]):
+async def get_adapter_impl(config: ChromaVectorIOConfig, deps: dict[Api, ProviderSpec]):
     from .chroma import ChromaVectorIOAdapter
 
     impl = ChromaVectorIOAdapter(config, deps[Api.inference])
diff --git a/llama_stack/providers/remote/vector_io/chroma/chroma.py b/llama_stack/providers/remote/vector_io/chroma/chroma.py
index 3bf3a7740..a59a38573 100644
--- a/llama_stack/providers/remote/vector_io/chroma/chroma.py
+++ b/llama_stack/providers/remote/vector_io/chroma/chroma.py
@@ -6,7 +6,7 @@
 import asyncio
 import json
 import logging
-from typing import Any, Dict, List, Optional, Union
+from typing import Any
 from urllib.parse import urlparse
 
 import chromadb
@@ -26,8 +26,7 @@ from .config import ChromaVectorIOConfig as RemoteChromaVectorIOConfig
 
 log = logging.getLogger(__name__)
 
-
-ChromaClientType = Union[chromadb.AsyncHttpClient, chromadb.PersistentClient]
+ChromaClientType = chromadb.api.AsyncClientAPI | chromadb.api.ClientAPI
 
 
 # this is a helper to allow us to use async and non-async chroma clients interchangeably
@@ -42,7 +41,7 @@ class ChromaIndex(EmbeddingIndex):
         self.client = client
         self.collection = collection
 
-    async def add_chunks(self, chunks: List[Chunk], embeddings: NDArray):
+    async def add_chunks(self, chunks: list[Chunk], embeddings: NDArray):
         assert len(chunks) == len(embeddings), (
             f"Chunk length {len(chunks)} does not match embedding length {len(embeddings)}"
         )
@@ -85,11 +84,19 @@ class ChromaIndex(EmbeddingIndex):
     async def delete(self):
         await maybe_await(self.client.delete_collection(self.collection.name))
 
+    async def query_keyword(
+        self,
+        query_string: str,
+        k: int,
+        score_threshold: float,
+    ) -> QueryChunksResponse:
+        raise NotImplementedError("Keyword search is not supported in Chroma")
+
 
 class ChromaVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
     def __init__(
         self,
-        config: Union[RemoteChromaVectorIOConfig, InlineChromaVectorIOConfig],
+        config: RemoteChromaVectorIOConfig | InlineChromaVectorIOConfig,
         inference_api: Api.inference,
     ) -> None:
         log.info(f"Initializing ChromaVectorIOAdapter with url: {config}")
@@ -137,8 +144,8 @@ class ChromaVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
     async def insert_chunks(
         self,
         vector_db_id: str,
-        chunks: List[Chunk],
-        ttl_seconds: Optional[int] = None,
+        chunks: list[Chunk],
+        ttl_seconds: int | None = None,
     ) -> None:
         index = await self._get_and_cache_vector_db_index(vector_db_id)
 
@@ -148,7 +155,7 @@ class ChromaVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
         self,
         vector_db_id: str,
         query: InterleavedContent,
-        params: Optional[Dict[str, Any]] = None,
+        params: dict[str, Any] | None = None,
     ) -> QueryChunksResponse:
         index = await self._get_and_cache_vector_db_index(vector_db_id)
 
diff --git a/llama_stack/providers/remote/vector_io/chroma/config.py b/llama_stack/providers/remote/vector_io/chroma/config.py
index 3e2463252..4e893fab4 100644
--- a/llama_stack/providers/remote/vector_io/chroma/config.py
+++ b/llama_stack/providers/remote/vector_io/chroma/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
@@ -13,5 +13,5 @@ class ChromaVectorIOConfig(BaseModel):
     url: str
 
     @classmethod
-    def sample_run_config(cls, url: str = "${env.CHROMADB_URL}", **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, url: str = "${env.CHROMADB_URL}", **kwargs: Any) -> dict[str, Any]:
         return {"url": url}
diff --git a/llama_stack/providers/remote/vector_io/milvus/__init__.py b/llama_stack/providers/remote/vector_io/milvus/__init__.py
index 84cb1d748..92dbfda2e 100644
--- a/llama_stack/providers/remote/vector_io/milvus/__init__.py
+++ b/llama_stack/providers/remote/vector_io/milvus/__init__.py
@@ -4,14 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Dict
-
 from llama_stack.providers.datatypes import Api, ProviderSpec
 
 from .config import MilvusVectorIOConfig
 
 
-async def get_adapter_impl(config: MilvusVectorIOConfig, deps: Dict[Api, ProviderSpec]):
+async def get_adapter_impl(config: MilvusVectorIOConfig, deps: dict[Api, ProviderSpec]):
     from .milvus import MilvusVectorIOAdapter
 
     assert isinstance(config, MilvusVectorIOConfig), f"Unexpected config type: {type(config)}"
diff --git a/llama_stack/providers/remote/vector_io/milvus/config.py b/llama_stack/providers/remote/vector_io/milvus/config.py
index 17da6b23d..9bdc7ed5c 100644
--- a/llama_stack/providers/remote/vector_io/milvus/config.py
+++ b/llama_stack/providers/remote/vector_io/milvus/config.py
@@ -4,9 +4,9 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
-from pydantic import BaseModel
+from pydantic import BaseModel, ConfigDict
 
 from llama_stack.schema_utils import json_schema_type
 
@@ -14,9 +14,11 @@ from llama_stack.schema_utils import json_schema_type
 @json_schema_type
 class MilvusVectorIOConfig(BaseModel):
     uri: str
-    token: Optional[str] = None
+    token: str | None = None
     consistency_level: str = "Strong"
 
+    model_config = ConfigDict(extra="allow")
+
     @classmethod
-    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
         return {"uri": "${env.MILVUS_ENDPOINT}", "token": "${env.MILVUS_TOKEN}"}
diff --git a/llama_stack/providers/remote/vector_io/milvus/milvus.py b/llama_stack/providers/remote/vector_io/milvus/milvus.py
index 1949d293d..6628292db 100644
--- a/llama_stack/providers/remote/vector_io/milvus/milvus.py
+++ b/llama_stack/providers/remote/vector_io/milvus/milvus.py
@@ -9,7 +9,7 @@ import hashlib
 import logging
 import os
 import uuid
-from typing import Any, Dict, List, Optional, Union
+from typing import Any
 
 from numpy.typing import NDArray
 from pymilvus import MilvusClient
@@ -39,7 +39,7 @@ class MilvusIndex(EmbeddingIndex):
         if await asyncio.to_thread(self.client.has_collection, self.collection_name):
             await asyncio.to_thread(self.client.drop_collection, collection_name=self.collection_name)
 
-    async def add_chunks(self, chunks: List[Chunk], embeddings: NDArray):
+    async def add_chunks(self, chunks: list[Chunk], embeddings: NDArray):
         assert len(chunks) == len(embeddings), (
             f"Chunk length {len(chunks)} does not match embedding length {len(embeddings)}"
         )
@@ -73,7 +73,7 @@ class MilvusIndex(EmbeddingIndex):
             logger.error(f"Error inserting chunks into Milvus collection {self.collection_name}: {e}")
             raise e
 
-    async def query(self, embedding: NDArray, k: int, score_threshold: float) -> QueryChunksResponse:
+    async def query_vector(self, embedding: NDArray, k: int, score_threshold: float) -> QueryChunksResponse:
         search_res = await asyncio.to_thread(
             self.client.search,
             collection_name=self.collection_name,
@@ -86,10 +86,18 @@ class MilvusIndex(EmbeddingIndex):
         scores = [res["distance"] for res in search_res[0]]
         return QueryChunksResponse(chunks=chunks, scores=scores)
 
+    async def query_keyword(
+        self,
+        query_string: str,
+        k: int,
+        score_threshold: float,
+    ) -> QueryChunksResponse:
+        raise NotImplementedError("Keyword search is not supported in Milvus")
+
 
 class MilvusVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
     def __init__(
-        self, config: Union[RemoteMilvusVectorIOConfig, InlineMilvusVectorIOConfig], inference_api: Api.inference
+        self, config: RemoteMilvusVectorIOConfig | InlineMilvusVectorIOConfig, inference_api: Api.inference
     ) -> None:
         self.config = config
         self.cache = {}
@@ -124,7 +132,7 @@ class MilvusVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
 
         self.cache[vector_db.identifier] = index
 
-    async def _get_and_cache_vector_db_index(self, vector_db_id: str) -> Optional[VectorDBWithIndex]:
+    async def _get_and_cache_vector_db_index(self, vector_db_id: str) -> VectorDBWithIndex | None:
         if vector_db_id in self.cache:
             return self.cache[vector_db_id]
 
@@ -148,8 +156,8 @@ class MilvusVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
     async def insert_chunks(
         self,
         vector_db_id: str,
-        chunks: List[Chunk],
-        ttl_seconds: Optional[int] = None,
+        chunks: list[Chunk],
+        ttl_seconds: int | None = None,
     ) -> None:
         index = await self._get_and_cache_vector_db_index(vector_db_id)
         if not index:
@@ -161,7 +169,7 @@ class MilvusVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
         self,
         vector_db_id: str,
         query: InterleavedContent,
-        params: Optional[Dict[str, Any]] = None,
+        params: dict[str, Any] | None = None,
     ) -> QueryChunksResponse:
         index = await self._get_and_cache_vector_db_index(vector_db_id)
         if not index:
@@ -172,7 +180,7 @@ class MilvusVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
 
 def generate_chunk_id(document_id: str, chunk_text: str) -> str:
     """Generate a unique chunk ID using a hash of document ID and chunk text."""
-    hash_input = f"{document_id}:{chunk_text}".encode("utf-8")
+    hash_input = f"{document_id}:{chunk_text}".encode()
     return str(uuid.UUID(hashlib.md5(hash_input).hexdigest()))
 
 
diff --git a/llama_stack/providers/remote/vector_io/pgvector/__init__.py b/llama_stack/providers/remote/vector_io/pgvector/__init__.py
index 089d890b7..9f528db74 100644
--- a/llama_stack/providers/remote/vector_io/pgvector/__init__.py
+++ b/llama_stack/providers/remote/vector_io/pgvector/__init__.py
@@ -4,14 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Dict
-
 from llama_stack.providers.datatypes import Api, ProviderSpec
 
 from .config import PGVectorVectorIOConfig
 
 
-async def get_adapter_impl(config: PGVectorVectorIOConfig, deps: Dict[Api, ProviderSpec]):
+async def get_adapter_impl(config: PGVectorVectorIOConfig, deps: dict[Api, ProviderSpec]):
     from .pgvector import PGVectorVectorIOAdapter
 
     impl = PGVectorVectorIOAdapter(config, deps[Api.inference])
diff --git a/llama_stack/providers/remote/vector_io/pgvector/config.py b/llama_stack/providers/remote/vector_io/pgvector/config.py
index e9eb0f12d..04b92a2e4 100644
--- a/llama_stack/providers/remote/vector_io/pgvector/config.py
+++ b/llama_stack/providers/remote/vector_io/pgvector/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel, Field
 
@@ -28,5 +28,5 @@ class PGVectorVectorIOConfig(BaseModel):
         user: str = "${env.PGVECTOR_USER}",
         password: str = "${env.PGVECTOR_PASSWORD}",
         **kwargs: Any,
-    ) -> Dict[str, Any]:
+    ) -> dict[str, Any]:
         return {"host": host, "port": port, "db": db, "user": user, "password": password}
diff --git a/llama_stack/providers/remote/vector_io/pgvector/pgvector.py b/llama_stack/providers/remote/vector_io/pgvector/pgvector.py
index 7c683e126..ea918c552 100644
--- a/llama_stack/providers/remote/vector_io/pgvector/pgvector.py
+++ b/llama_stack/providers/remote/vector_io/pgvector/pgvector.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 import logging
-from typing import Any, Dict, List, Optional, Tuple
+from typing import Any
 
 import psycopg2
 from numpy.typing import NDArray
@@ -33,7 +33,7 @@ def check_extension_version(cur):
     return result[0] if result else None
 
 
-def upsert_models(conn, keys_models: List[Tuple[str, BaseModel]]):
+def upsert_models(conn, keys_models: list[tuple[str, BaseModel]]):
     with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
         query = sql.SQL(
             """
@@ -74,7 +74,7 @@ class PGVectorIndex(EmbeddingIndex):
             """
             )
 
-    async def add_chunks(self, chunks: List[Chunk], embeddings: NDArray):
+    async def add_chunks(self, chunks: list[Chunk], embeddings: NDArray):
         assert len(chunks) == len(embeddings), (
             f"Chunk length {len(chunks)} does not match embedding length {len(embeddings)}"
         )
@@ -99,7 +99,7 @@ class PGVectorIndex(EmbeddingIndex):
         with self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
             execute_values(cur, query, values, template="(%s, %s, %s::vector)")
 
-    async def query(self, embedding: NDArray, k: int, score_threshold: float) -> QueryChunksResponse:
+    async def query_vector(self, embedding: NDArray, k: int, score_threshold: float) -> QueryChunksResponse:
         with self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
             cur.execute(
                 f"""
@@ -120,6 +120,14 @@ class PGVectorIndex(EmbeddingIndex):
 
             return QueryChunksResponse(chunks=chunks, scores=scores)
 
+    async def query_keyword(
+        self,
+        query_string: str,
+        k: int,
+        score_threshold: float,
+    ) -> QueryChunksResponse:
+        raise NotImplementedError("Keyword search is not supported in PGVector")
+
     async def delete(self):
         with self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
             cur.execute(f"DROP TABLE IF EXISTS {self.table_name}")
@@ -180,8 +188,8 @@ class PGVectorVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
     async def insert_chunks(
         self,
         vector_db_id: str,
-        chunks: List[Chunk],
-        ttl_seconds: Optional[int] = None,
+        chunks: list[Chunk],
+        ttl_seconds: int | None = None,
     ) -> None:
         index = await self._get_and_cache_vector_db_index(vector_db_id)
         await index.insert_chunks(chunks)
@@ -190,7 +198,7 @@ class PGVectorVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
         self,
         vector_db_id: str,
         query: InterleavedContent,
-        params: Optional[Dict[str, Any]] = None,
+        params: dict[str, Any] | None = None,
     ) -> QueryChunksResponse:
         index = await self._get_and_cache_vector_db_index(vector_db_id)
         return await index.query_chunks(query, params)
diff --git a/llama_stack/providers/remote/vector_io/qdrant/__init__.py b/llama_stack/providers/remote/vector_io/qdrant/__init__.py
index f5bb7f84c..029de285f 100644
--- a/llama_stack/providers/remote/vector_io/qdrant/__init__.py
+++ b/llama_stack/providers/remote/vector_io/qdrant/__init__.py
@@ -4,14 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Dict
-
 from llama_stack.providers.datatypes import Api, ProviderSpec
 
 from .config import QdrantVectorIOConfig
 
 
-async def get_adapter_impl(config: QdrantVectorIOConfig, deps: Dict[Api, ProviderSpec]):
+async def get_adapter_impl(config: QdrantVectorIOConfig, deps: dict[Api, ProviderSpec]):
     from .qdrant import QdrantVectorIOAdapter
 
     impl = QdrantVectorIOAdapter(config, deps[Api.inference])
diff --git a/llama_stack/providers/remote/vector_io/qdrant/config.py b/llama_stack/providers/remote/vector_io/qdrant/config.py
index 6d7eebe23..314d3f5f1 100644
--- a/llama_stack/providers/remote/vector_io/qdrant/config.py
+++ b/llama_stack/providers/remote/vector_io/qdrant/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, Optional
+from typing import Any
 
 from pydantic import BaseModel
 
@@ -13,19 +13,19 @@ from llama_stack.schema_utils import json_schema_type
 
 @json_schema_type
 class QdrantVectorIOConfig(BaseModel):
-    location: Optional[str] = None
-    url: Optional[str] = None
-    port: Optional[int] = 6333
+    location: str | None = None
+    url: str | None = None
+    port: int | None = 6333
     grpc_port: int = 6334
     prefer_grpc: bool = False
-    https: Optional[bool] = None
-    api_key: Optional[str] = None
-    prefix: Optional[str] = None
-    timeout: Optional[int] = None
-    host: Optional[str] = None
+    https: bool | None = None
+    api_key: str | None = None
+    prefix: str | None = None
+    timeout: int | None = None
+    host: str | None = None
 
     @classmethod
-    def sample_run_config(cls, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, **kwargs: Any) -> dict[str, Any]:
         return {
             "api_key": "${env.QDRANT_API_KEY}",
         }
diff --git a/llama_stack/providers/remote/vector_io/qdrant/qdrant.py b/llama_stack/providers/remote/vector_io/qdrant/qdrant.py
index 9e7788dc0..ff0690083 100644
--- a/llama_stack/providers/remote/vector_io/qdrant/qdrant.py
+++ b/llama_stack/providers/remote/vector_io/qdrant/qdrant.py
@@ -6,7 +6,7 @@
 
 import logging
 import uuid
-from typing import Any, Dict, List, Optional, Union
+from typing import Any
 
 from numpy.typing import NDArray
 from qdrant_client import AsyncQdrantClient, models
@@ -44,7 +44,7 @@ class QdrantIndex(EmbeddingIndex):
         self.client = client
         self.collection_name = collection_name
 
-    async def add_chunks(self, chunks: List[Chunk], embeddings: NDArray):
+    async def add_chunks(self, chunks: list[Chunk], embeddings: NDArray):
         assert len(chunks) == len(embeddings), (
             f"Chunk length {len(chunks)} does not match embedding length {len(embeddings)}"
         )
@@ -68,7 +68,7 @@ class QdrantIndex(EmbeddingIndex):
 
         await self.client.upsert(collection_name=self.collection_name, points=points)
 
-    async def query(self, embedding: NDArray, k: int, score_threshold: float) -> QueryChunksResponse:
+    async def query_vector(self, embedding: NDArray, k: int, score_threshold: float) -> QueryChunksResponse:
         results = (
             await self.client.query_points(
                 collection_name=self.collection_name,
@@ -95,13 +95,21 @@ class QdrantIndex(EmbeddingIndex):
 
         return QueryChunksResponse(chunks=chunks, scores=scores)
 
+    async def query_keyword(
+        self,
+        query_string: str,
+        k: int,
+        score_threshold: float,
+    ) -> QueryChunksResponse:
+        raise NotImplementedError("Keyword search is not supported in Qdrant")
+
     async def delete(self):
         await self.client.delete_collection(collection_name=self.collection_name)
 
 
 class QdrantVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
     def __init__(
-        self, config: Union[RemoteQdrantVectorIOConfig, InlineQdrantVectorIOConfig], inference_api: Api.inference
+        self, config: RemoteQdrantVectorIOConfig | InlineQdrantVectorIOConfig, inference_api: Api.inference
     ) -> None:
         self.config = config
         self.client: AsyncQdrantClient = None
@@ -131,7 +139,7 @@ class QdrantVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
             await self.cache[vector_db_id].index.delete()
             del self.cache[vector_db_id]
 
-    async def _get_and_cache_vector_db_index(self, vector_db_id: str) -> Optional[VectorDBWithIndex]:
+    async def _get_and_cache_vector_db_index(self, vector_db_id: str) -> VectorDBWithIndex | None:
         if vector_db_id in self.cache:
             return self.cache[vector_db_id]
 
@@ -150,8 +158,8 @@ class QdrantVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
     async def insert_chunks(
         self,
         vector_db_id: str,
-        chunks: List[Chunk],
-        ttl_seconds: Optional[int] = None,
+        chunks: list[Chunk],
+        ttl_seconds: int | None = None,
     ) -> None:
         index = await self._get_and_cache_vector_db_index(vector_db_id)
         if not index:
@@ -163,7 +171,7 @@ class QdrantVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
         self,
         vector_db_id: str,
         query: InterleavedContent,
-        params: Optional[Dict[str, Any]] = None,
+        params: dict[str, Any] | None = None,
     ) -> QueryChunksResponse:
         index = await self._get_and_cache_vector_db_index(vector_db_id)
         if not index:
diff --git a/llama_stack/providers/remote/vector_io/weaviate/__init__.py b/llama_stack/providers/remote/vector_io/weaviate/__init__.py
index c93c628d8..22e116c22 100644
--- a/llama_stack/providers/remote/vector_io/weaviate/__init__.py
+++ b/llama_stack/providers/remote/vector_io/weaviate/__init__.py
@@ -4,14 +4,12 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Dict
-
 from llama_stack.providers.datatypes import Api, ProviderSpec
 
-from .config import WeaviateRequestProviderData, WeaviateVectorIOConfig  # noqa: F401
+from .config import WeaviateVectorIOConfig
 
 
-async def get_adapter_impl(config: WeaviateVectorIOConfig, deps: Dict[Api, ProviderSpec]):
+async def get_adapter_impl(config: WeaviateVectorIOConfig, deps: dict[Api, ProviderSpec]):
     from .weaviate import WeaviateVectorIOAdapter
 
     impl = WeaviateVectorIOAdapter(config, deps[Api.inference])
diff --git a/llama_stack/providers/remote/vector_io/weaviate/config.py b/llama_stack/providers/remote/vector_io/weaviate/config.py
index cc587f252..a8c6e3e2c 100644
--- a/llama_stack/providers/remote/vector_io/weaviate/config.py
+++ b/llama_stack/providers/remote/vector_io/weaviate/config.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict
+from typing import Any
 
 from pydantic import BaseModel
 
@@ -16,5 +16,5 @@ class WeaviateRequestProviderData(BaseModel):
 
 class WeaviateVectorIOConfig(BaseModel):
     @classmethod
-    def sample_run_config(cls, **kwargs: Any) -> Dict[str, Any]:
+    def sample_run_config(cls, **kwargs: Any) -> dict[str, Any]:
         return {}
diff --git a/llama_stack/providers/remote/vector_io/weaviate/weaviate.py b/llama_stack/providers/remote/vector_io/weaviate/weaviate.py
index 52aa2f3a3..e6fe8ccd3 100644
--- a/llama_stack/providers/remote/vector_io/weaviate/weaviate.py
+++ b/llama_stack/providers/remote/vector_io/weaviate/weaviate.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 import json
 import logging
-from typing import Any, Dict, List, Optional
+from typing import Any
 
 import weaviate
 import weaviate.classes as wvc
@@ -33,7 +33,7 @@ class WeaviateIndex(EmbeddingIndex):
         self.client = client
         self.collection_name = collection_name
 
-    async def add_chunks(self, chunks: List[Chunk], embeddings: NDArray):
+    async def add_chunks(self, chunks: list[Chunk], embeddings: NDArray):
         assert len(chunks) == len(embeddings), (
             f"Chunk length {len(chunks)} does not match embedding length {len(embeddings)}"
         )
@@ -55,7 +55,7 @@ class WeaviateIndex(EmbeddingIndex):
         # TODO: make this async friendly
         collection.data.insert_many(data_objects)
 
-    async def query(self, embedding: NDArray, k: int, score_threshold: float) -> QueryChunksResponse:
+    async def query_vector(self, embedding: NDArray, k: int, score_threshold: float) -> QueryChunksResponse:
         collection = self.client.collections.get(self.collection_name)
 
         results = collection.query.near_vector(
@@ -80,10 +80,18 @@ class WeaviateIndex(EmbeddingIndex):
 
         return QueryChunksResponse(chunks=chunks, scores=scores)
 
-    async def delete(self, chunk_ids: List[str]) -> None:
+    async def delete(self, chunk_ids: list[str]) -> None:
         collection = self.client.collections.get(self.collection_name)
         collection.data.delete_many(where=Filter.by_property("id").contains_any(chunk_ids))
 
+    async def query_keyword(
+        self,
+        query_string: str,
+        k: int,
+        score_threshold: float,
+    ) -> QueryChunksResponse:
+        raise NotImplementedError("Keyword search is not supported in Weaviate")
+
 
 class WeaviateVectorIOAdapter(
     VectorIO,
@@ -144,7 +152,7 @@ class WeaviateVectorIOAdapter(
             self.inference_api,
         )
 
-    async def _get_and_cache_vector_db_index(self, vector_db_id: str) -> Optional[VectorDBWithIndex]:
+    async def _get_and_cache_vector_db_index(self, vector_db_id: str) -> VectorDBWithIndex | None:
         if vector_db_id in self.cache:
             return self.cache[vector_db_id]
 
@@ -167,8 +175,8 @@ class WeaviateVectorIOAdapter(
     async def insert_chunks(
         self,
         vector_db_id: str,
-        chunks: List[Chunk],
-        ttl_seconds: Optional[int] = None,
+        chunks: list[Chunk],
+        ttl_seconds: int | None = None,
     ) -> None:
         index = await self._get_and_cache_vector_db_index(vector_db_id)
         if not index:
@@ -180,7 +188,7 @@ class WeaviateVectorIOAdapter(
         self,
         vector_db_id: str,
         query: InterleavedContent,
-        params: Optional[Dict[str, Any]] = None,
+        params: dict[str, Any] | None = None,
     ) -> QueryChunksResponse:
         index = await self._get_and_cache_vector_db_index(vector_db_id)
         if not index:
diff --git a/llama_stack/providers/tests/ci_test_config.yaml b/llama_stack/providers/tests/ci_test_config.yaml
deleted file mode 100644
index 3edcd38bf..000000000
--- a/llama_stack/providers/tests/ci_test_config.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-inference:
-  tests:
-  - inference/test_vision_inference.py::test_vision_chat_completion_streaming
-  - inference/test_vision_inference.py::test_vision_chat_completion_non_streaming
-  - inference/test_text_inference.py::test_structured_output
-  - inference/test_text_inference.py::test_chat_completion_streaming
-  - inference/test_text_inference.py::test_chat_completion_non_streaming
-  - inference/test_text_inference.py::test_chat_completion_with_tool_calling
-  - inference/test_text_inference.py::test_chat_completion_with_tool_calling_streaming
-
-  scenarios:
-  - provider_fixtures:
-      inference: ollama
-  - fixture_combo_id: fireworks
-  - provider_fixtures:
-      inference: together
-    # - inference: tgi
-    # - inference: vllm_remote
-
-  inference_models:
-  - meta-llama/Llama-3.1-8B-Instruct
-  - meta-llama/Llama-3.2-11B-Vision-Instruct
-
-
-agents:
-  tests:
-   - agents/test_agents.py::test_agent_turns_with_safety
-   - agents/test_agents.py::test_rag_agent
-
-  scenarios:
-  - fixture_combo_id: ollama
-  - fixture_combo_id: together
-  - fixture_combo_id: fireworks
-
-  inference_models:
-  - meta-llama/Llama-3.2-1B-Instruct
-
-  safety_shield: meta-llama/Llama-Guard-3-1B
-
-
-memory:
-  tests:
-   - memory/test_memory.py::test_query_documents
-
-  scenarios:
-  - fixture_combo_id: ollama
-  - provider_fixtures:
-      inference: sentence_transformers
-      memory: faiss
-  - fixture_combo_id: chroma
-
-  inference_models:
-  - meta-llama/Llama-3.2-1B-Instruct
-
-  embedding_model: all-MiniLM-L6-v2
diff --git a/llama_stack/providers/tests/conftest.py b/llama_stack/providers/tests/conftest.py
deleted file mode 100644
index d3e715b7e..000000000
--- a/llama_stack/providers/tests/conftest.py
+++ /dev/null
@@ -1,296 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-import os
-from collections import defaultdict
-from pathlib import Path
-from typing import Any, Dict, List, Optional
-
-import pytest
-import yaml
-from dotenv import load_dotenv
-from pydantic import BaseModel, Field
-from termcolor import colored
-
-from llama_stack.distribution.datatypes import Provider
-from llama_stack.providers.datatypes import RemoteProviderConfig
-
-from .env import get_env_or_fail
-from .report import Report
-
-
-class ProviderFixture(BaseModel):
-    providers: List[Provider]
-    provider_data: Optional[Dict[str, Any]] = None
-
-
-class TestScenario(BaseModel):
-    # provider fixtures can be either a mark or a dictionary of api -> providers
-    provider_fixtures: Dict[str, str] = Field(default_factory=dict)
-    fixture_combo_id: Optional[str] = None
-
-
-class APITestConfig(BaseModel):
-    scenarios: List[TestScenario] = Field(default_factory=list)
-    inference_models: List[str] = Field(default_factory=list)
-
-    # test name format should be ::
-    tests: List[str] = Field(default_factory=list)
-
-
-class MemoryApiTestConfig(APITestConfig):
-    embedding_model: Optional[str] = Field(default_factory=None)
-
-
-class AgentsApiTestConfig(APITestConfig):
-    safety_shield: Optional[str] = Field(default_factory=None)
-
-
-class TestConfig(BaseModel):
-    inference: Optional[APITestConfig] = None
-    agents: Optional[AgentsApiTestConfig] = None
-    memory: Optional[MemoryApiTestConfig] = None
-
-
-def get_test_config_from_config_file(metafunc_config):
-    config_file = metafunc_config.getoption("--config")
-    if config_file is None:
-        return None
-
-    config_file_path = Path(__file__).parent / config_file
-    if not config_file_path.exists():
-        raise ValueError(
-            f"Test config {config_file} was specified but not found. Please make sure it exists in the llama_stack/providers/tests directory."
-        )
-    with open(config_file_path, "r") as config_file:
-        config = yaml.safe_load(config_file)
-        return TestConfig(**config)
-
-
-def get_test_config_for_api(metafunc_config, api):
-    test_config = get_test_config_from_config_file(metafunc_config)
-    if test_config is None:
-        return None
-    return getattr(test_config, api)
-
-
-def get_provider_fixture_overrides_from_test_config(metafunc_config, api, default_provider_fixture_combinations):
-    api_config = get_test_config_for_api(metafunc_config, api)
-    if api_config is None:
-        return None
-
-    fixture_combo_ids = set()
-    custom_provider_fixture_combos = []
-    for scenario in api_config.scenarios:
-        if scenario.fixture_combo_id:
-            fixture_combo_ids.add(scenario.fixture_combo_id)
-        else:
-            custom_provider_fixture_combos.append(
-                pytest.param(
-                    scenario.provider_fixtures,
-                    id=scenario.provider_fixtures.get("inference") or "",
-                )
-            )
-
-    if len(fixture_combo_ids) > 0:
-        for default_fixture in default_provider_fixture_combinations:
-            if default_fixture.id in fixture_combo_ids:
-                custom_provider_fixture_combos.append(default_fixture)
-    return custom_provider_fixture_combos
-
-
-def remote_stack_fixture() -> ProviderFixture:
-    if url := os.getenv("REMOTE_STACK_URL", None):
-        config = RemoteProviderConfig.from_url(url)
-    else:
-        config = RemoteProviderConfig(
-            host=get_env_or_fail("REMOTE_STACK_HOST"),
-            port=int(get_env_or_fail("REMOTE_STACK_PORT")),
-        )
-    return ProviderFixture(
-        providers=[
-            Provider(
-                provider_id="test::remote",
-                provider_type="test::remote",
-                config=config.model_dump(),
-            )
-        ],
-    )
-
-
-def pytest_configure(config):
-    config.option.tbstyle = "short"
-    config.option.disable_warnings = True
-
-    """Load environment variables at start of test run"""
-    # Load from .env file if it exists
-    env_file = Path(__file__).parent / ".env"
-    if env_file.exists():
-        load_dotenv(env_file)
-
-    # Load any environment variables passed via --env
-    env_vars = config.getoption("--env") or []
-    for env_var in env_vars:
-        key, value = env_var.split("=", 1)
-        os.environ[key] = value
-
-    if config.getoption("--output") is not None:
-        config.pluginmanager.register(Report(config.getoption("--output")))
-
-
-def pytest_addoption(parser):
-    parser.addoption(
-        "--providers",
-        default="",
-        help=(
-            "Provider configuration in format: api1=provider1,api2=provider2. "
-            "Example: --providers inference=ollama,safety=meta-reference"
-        ),
-    )
-    parser.addoption(
-        "--config",
-        action="store",
-        help="Set test config file (supported format: YAML), e.g. --config=test_config.yml",
-    )
-    parser.addoption(
-        "--output",
-        action="store",
-        help="Set output file for test report, e.g. --output=pytest_report.md",
-    )
-    """Add custom command line options"""
-    parser.addoption("--env", action="append", help="Set environment variables, e.g. --env KEY=value")
-    parser.addoption(
-        "--inference-model",
-        action="store",
-        default="meta-llama/Llama-3.2-3B-Instruct",
-        help="Specify the inference model to use for testing",
-    )
-    parser.addoption(
-        "--safety-shield",
-        action="store",
-        default="meta-llama/Llama-Guard-3-1B",
-        help="Specify the safety shield to use for testing",
-    )
-    parser.addoption(
-        "--embedding-model",
-        action="store",
-        default=None,
-        help="Specify the embedding model to use for testing",
-    )
-    parser.addoption(
-        "--judge-model",
-        action="store",
-        default="meta-llama/Llama-3.1-8B-Instruct",
-        help="Specify the judge model to use for testing",
-    )
-
-
-def make_provider_id(providers: Dict[str, str]) -> str:
-    return ":".join(f"{api}={provider}" for api, provider in sorted(providers.items()))
-
-
-def get_provider_marks(providers: Dict[str, str]) -> List[Any]:
-    marks = []
-    for provider in providers.values():
-        marks.append(getattr(pytest.mark, provider))
-    return marks
-
-
-def get_provider_fixture_overrides(config, available_fixtures: Dict[str, List[str]]) -> Optional[List[pytest.param]]:
-    provider_str = config.getoption("--providers")
-    if not provider_str:
-        return None
-
-    fixture_dict = parse_fixture_string(provider_str, available_fixtures)
-    return [
-        pytest.param(
-            fixture_dict,
-            id=make_provider_id(fixture_dict),
-            marks=get_provider_marks(fixture_dict),
-        )
-    ]
-
-
-def parse_fixture_string(provider_str: str, available_fixtures: Dict[str, List[str]]) -> Dict[str, str]:
-    """Parse provider string of format 'api1=provider1,api2=provider2'"""
-    if not provider_str:
-        return {}
-
-    fixtures = {}
-    pairs = provider_str.split(",")
-    for pair in pairs:
-        if "=" not in pair:
-            raise ValueError(f"Invalid provider specification: {pair}. Expected format: api=provider")
-        api, fixture = pair.split("=")
-        if api not in available_fixtures:
-            raise ValueError(f"Unknown API: {api}. Available APIs: {list(available_fixtures.keys())}")
-        if fixture not in available_fixtures[api]:
-            raise ValueError(
-                f"Unknown provider '{fixture}' for API '{api}'. Available providers: {list(available_fixtures[api])}"
-            )
-        fixtures[api] = fixture
-
-    # Check that all provided APIs are supported
-    for api in available_fixtures.keys():
-        if api not in fixtures:
-            raise ValueError(
-                f"Missing provider fixture for API '{api}'. Available providers: {list(available_fixtures[api])}"
-            )
-    return fixtures
-
-
-def pytest_itemcollected(item):
-    # Get all markers as a list
-    filtered = ("asyncio", "parametrize")
-    marks = [mark.name for mark in item.iter_markers() if mark.name not in filtered]
-    if marks:
-        marks = colored(",".join(marks), "yellow")
-        item.name = f"{item.name}[{marks}]"
-
-
-def pytest_collection_modifyitems(session, config, items):
-    test_config = get_test_config_from_config_file(config)
-    if test_config is None:
-        return
-
-    required_tests = defaultdict(set)
-    for api_test_config in [
-        test_config.inference,
-        test_config.memory,
-        test_config.agents,
-    ]:
-        if api_test_config is None:
-            continue
-        for test in api_test_config.tests:
-            arr = test.split("::")
-            if len(arr) != 2:
-                raise ValueError(f"Invalid format for test name {test}")
-            test_path, func_name = arr
-            required_tests[Path(__file__).parent / test_path].add(func_name)
-
-    new_items, deselected_items = [], []
-    for item in items:
-        func_name = getattr(item, "originalname", item.name)
-        if func_name in required_tests[item.fspath]:
-            new_items.append(item)
-            continue
-        deselected_items.append(item)
-
-    items[:] = new_items
-    config.hook.pytest_deselected(items=deselected_items)
-
-
-pytest_plugins = [
-    "llama_stack.providers.tests.inference.fixtures",
-    "llama_stack.providers.tests.safety.fixtures",
-    "llama_stack.providers.tests.vector_io.fixtures",
-    "llama_stack.providers.tests.agents.fixtures",
-    "llama_stack.providers.tests.datasetio.fixtures",
-    "llama_stack.providers.tests.scoring.fixtures",
-    "llama_stack.providers.tests.eval.fixtures",
-    "llama_stack.providers.tests.post_training.fixtures",
-    "llama_stack.providers.tests.tools.fixtures",
-]
diff --git a/llama_stack/providers/tests/report.py b/llama_stack/providers/tests/report.py
deleted file mode 100644
index bc29534be..000000000
--- a/llama_stack/providers/tests/report.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-
-from collections import defaultdict
-from pathlib import Path
-
-import pytest
-from pytest import ExitCode
-from pytest_html.basereport import _process_outcome
-
-from llama_stack.models.llama.sku_list import all_registered_models
-from llama_stack.models.llama.sku_types import CoreModelId
-
-INFERENCE_APIS = ["chat_completion"]
-FUNCTIONALITIES = ["streaming", "structured_output", "tool_calling"]
-SUPPORTED_MODELS = {
-    "ollama": {
-        CoreModelId.llama3_1_8b_instruct.value,
-        CoreModelId.llama3_1_8b_instruct.value,
-        CoreModelId.llama3_1_70b_instruct.value,
-        CoreModelId.llama3_1_70b_instruct.value,
-        CoreModelId.llama3_1_405b_instruct.value,
-        CoreModelId.llama3_1_405b_instruct.value,
-        CoreModelId.llama3_2_1b_instruct.value,
-        CoreModelId.llama3_2_1b_instruct.value,
-        CoreModelId.llama3_2_3b_instruct.value,
-        CoreModelId.llama3_2_3b_instruct.value,
-        CoreModelId.llama3_2_11b_vision_instruct.value,
-        CoreModelId.llama3_2_11b_vision_instruct.value,
-        CoreModelId.llama3_2_90b_vision_instruct.value,
-        CoreModelId.llama3_2_90b_vision_instruct.value,
-        CoreModelId.llama3_3_70b_instruct.value,
-        CoreModelId.llama_guard_3_8b.value,
-        CoreModelId.llama_guard_3_1b.value,
-    },
-    "fireworks": {
-        CoreModelId.llama3_1_8b_instruct.value,
-        CoreModelId.llama3_1_70b_instruct.value,
-        CoreModelId.llama3_1_405b_instruct.value,
-        CoreModelId.llama3_2_1b_instruct.value,
-        CoreModelId.llama3_2_3b_instruct.value,
-        CoreModelId.llama3_2_11b_vision_instruct.value,
-        CoreModelId.llama3_2_90b_vision_instruct.value,
-        CoreModelId.llama3_3_70b_instruct.value,
-        CoreModelId.llama_guard_3_8b.value,
-        CoreModelId.llama_guard_3_11b_vision.value,
-    },
-    "together": {
-        CoreModelId.llama3_1_8b_instruct.value,
-        CoreModelId.llama3_1_70b_instruct.value,
-        CoreModelId.llama3_1_405b_instruct.value,
-        CoreModelId.llama3_2_3b_instruct.value,
-        CoreModelId.llama3_2_11b_vision_instruct.value,
-        CoreModelId.llama3_2_90b_vision_instruct.value,
-        CoreModelId.llama3_3_70b_instruct.value,
-        CoreModelId.llama_guard_3_8b.value,
-        CoreModelId.llama_guard_3_11b_vision.value,
-    },
-}
-
-
-class Report:
-    def __init__(self, output_path):
-        valid_file_format = (
-            output_path.split(".")[1] in ["md", "markdown"] if len(output_path.split(".")) == 2 else False
-        )
-        if not valid_file_format:
-            raise ValueError(f"Invalid output file {output_path}. Markdown file is required")
-        self.output_path = output_path
-        self.test_data = defaultdict(dict)
-        self.inference_tests = defaultdict(dict)
-
-    @pytest.hookimpl
-    def pytest_runtest_logreport(self, report):
-        # This hook is called in several phases, including setup, call and teardown
-        # The test is considered failed / error if any of the outcomes is not "Passed"
-        outcome = _process_outcome(report)
-        data = {
-            "outcome": report.outcome,
-            "longrepr": report.longrepr,
-            "name": report.nodeid,
-        }
-        if report.nodeid not in self.test_data:
-            self.test_data[report.nodeid] = data
-        elif self.test_data[report.nodeid] != outcome and outcome != "Passed":
-            self.test_data[report.nodeid] = data
-
-    @pytest.hookimpl
-    def pytest_sessionfinish(self, session, exitstatus):
-        if exitstatus <= ExitCode.INTERRUPTED:
-            return
-        report = []
-        report.append("# Llama Stack Integration Test Results Report")
-        report.append("\n## Summary")
-        report.append("\n## Supported Models: ")
-
-        header = "| Model Descriptor |"
-        dividor = "|:---|"
-        for k in SUPPORTED_MODELS.keys():
-            header += f"{k} |"
-            dividor += ":---:|"
-
-        report.append(header)
-        report.append(dividor)
-
-        rows = []
-        for model in all_registered_models():
-            if "Instruct" not in model.core_model_id.value and "Guard" not in model.core_model_id.value:
-                continue
-            row = f"| {model.core_model_id.value} |"
-            for k in SUPPORTED_MODELS.keys():
-                if model.core_model_id.value in SUPPORTED_MODELS[k]:
-                    row += " ✅ |"
-                else:
-                    row += " ❌ |"
-            rows.append(row)
-        report.extend(rows)
-
-        report.append("\n### Tests:")
-
-        for provider in SUPPORTED_MODELS.keys():
-            if provider not in self.inference_tests:
-                continue
-            report.append(f"\n #### {provider}")
-            test_table = [
-                "| Area | Model | API | Functionality Test | Status |",
-                "|:-----|:-----|:-----|:-----|:-----|",
-            ]
-            for api in INFERENCE_APIS:
-                tests = self.inference_tests[provider][api]
-                for test_nodeid in tests:
-                    row = "|{area} | {model} | {api} | {test} | {result} ".format(
-                        area="Text" if "text" in test_nodeid else "Vision",
-                        model=("Llama-3.1-8B-Instruct" if "text" in test_nodeid else "Llama3.2-11B-Vision-Instruct"),
-                        api=f"/{api}",
-                        test=self.get_simple_function_name(test_nodeid),
-                        result=("✅" if self.test_data[test_nodeid]["outcome"] == "passed" else "❌"),
-                    )
-                    test_table += [row]
-            report.extend(test_table)
-            report.append("\n")
-
-        output_file = Path(self.output_path)
-        output_file.write_text("\n".join(report))
-        print(f"\n Report generated: {output_file.absolute()}")
-
-    @pytest.hookimpl(trylast=True)
-    def pytest_collection_modifyitems(self, session, config, items):
-        for item in items:
-            inference = item.callspec.params.get("inference_stack")
-            if "inference" in item.nodeid:
-                func_name = getattr(item, "originalname", item.name)
-                for api in INFERENCE_APIS:
-                    if api in func_name:
-                        api_tests = self.inference_tests[inference].get(api, set())
-                        api_tests.add(item.nodeid)
-                        self.inference_tests[inference][api] = api_tests
-
-    def get_simple_function_name(self, nodeid):
-        """Extract function name from nodeid.
-
-        Examples:
-        - 'tests/test_math.py::test_addition' -> 'test_addition'
-        - 'tests/test_math.py::TestClass::test_method' -> test_method'
-        """
-        parts = nodeid.split("::")
-        func_name = nodeid  # Fallback to full nodeid if pattern doesn't match
-        if len(parts) == 2:  # Simple function
-            func_name = parts[1]
-        elif len(parts) == 3:  # Class method
-            func_name = parts[2]
-        return func_name.split("[")[0]
diff --git a/llama_stack/providers/utils/bedrock/config.py b/llama_stack/providers/utils/bedrock/config.py
index 95019666b..b25617d76 100644
--- a/llama_stack/providers/utils/bedrock/config.py
+++ b/llama_stack/providers/utils/bedrock/config.py
@@ -3,54 +3,53 @@
 #
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
-from typing import Optional
 
 from pydantic import BaseModel, Field
 
 
 class BedrockBaseConfig(BaseModel):
-    aws_access_key_id: Optional[str] = Field(
+    aws_access_key_id: str | None = Field(
         default=None,
         description="The AWS access key to use. Default use environment variable: AWS_ACCESS_KEY_ID",
     )
-    aws_secret_access_key: Optional[str] = Field(
+    aws_secret_access_key: str | None = Field(
         default=None,
         description="The AWS secret access key to use. Default use environment variable: AWS_SECRET_ACCESS_KEY",
     )
-    aws_session_token: Optional[str] = Field(
+    aws_session_token: str | None = Field(
         default=None,
         description="The AWS session token to use. Default use environment variable: AWS_SESSION_TOKEN",
     )
-    region_name: Optional[str] = Field(
+    region_name: str | None = Field(
         default=None,
         description="The default AWS Region to use, for example, us-west-1 or us-west-2."
         "Default use environment variable: AWS_DEFAULT_REGION",
     )
-    profile_name: Optional[str] = Field(
+    profile_name: str | None = Field(
         default=None,
         description="The profile name that contains credentials to use.Default use environment variable: AWS_PROFILE",
     )
-    total_max_attempts: Optional[int] = Field(
+    total_max_attempts: int | None = Field(
         default=None,
         description="An integer representing the maximum number of attempts that will be made for a single request, "
         "including the initial attempt. Default use environment variable: AWS_MAX_ATTEMPTS",
     )
-    retry_mode: Optional[str] = Field(
+    retry_mode: str | None = Field(
         default=None,
         description="A string representing the type of retries Boto3 will perform."
         "Default use environment variable: AWS_RETRY_MODE",
     )
-    connect_timeout: Optional[float] = Field(
+    connect_timeout: float | None = Field(
         default=60,
         description="The time in seconds till a timeout exception is thrown when attempting to make a connection. "
         "The default is 60 seconds.",
     )
-    read_timeout: Optional[float] = Field(
+    read_timeout: float | None = Field(
         default=60,
         description="The time in seconds till a timeout exception is thrown when attempting to read from a connection."
         "The default is 60 seconds.",
     )
-    session_ttl: Optional[int] = Field(
+    session_ttl: int | None = Field(
         default=3600,
         description="The time in seconds till a session expires. The default is 3600 seconds (1 hour).",
     )
diff --git a/llama_stack/providers/utils/common/data_schema_validator.py b/llama_stack/providers/utils/common/data_schema_validator.py
index eb9d9dd60..28a243863 100644
--- a/llama_stack/providers/utils/common/data_schema_validator.py
+++ b/llama_stack/providers/utils/common/data_schema_validator.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 from enum import Enum
-from typing import Any, Dict, List
+from typing import Any
 
 from llama_stack.apis.common.type_system import (
     ChatCompletionInputType,
@@ -85,16 +85,16 @@ def get_valid_schemas(api_str: str):
 
 
 def validate_dataset_schema(
-    dataset_schema: Dict[str, Any],
-    expected_schemas: List[Dict[str, Any]],
+    dataset_schema: dict[str, Any],
+    expected_schemas: list[dict[str, Any]],
 ):
     if dataset_schema not in expected_schemas:
         raise ValueError(f"Dataset {dataset_schema} does not have a correct input schema in {expected_schemas}")
 
 
 def validate_row_schema(
-    input_row: Dict[str, Any],
-    expected_schemas: List[Dict[str, Any]],
+    input_row: dict[str, Any],
+    expected_schemas: list[dict[str, Any]],
 ):
     for schema in expected_schemas:
         if all(key in input_row for key in schema):
diff --git a/llama_stack/providers/utils/inference/__init__.py b/llama_stack/providers/utils/inference/__init__.py
index e36be9404..66269d173 100644
--- a/llama_stack/providers/utils/inference/__init__.py
+++ b/llama_stack/providers/utils/inference/__init__.py
@@ -4,8 +4,6 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List
-
 from llama_stack.models.llama.sku_list import all_registered_models
 from llama_stack.models.llama.sku_types import *  # noqa: F403
 
@@ -22,7 +20,7 @@ def is_supported_safety_model(model: Model) -> bool:
     ]
 
 
-def supported_inference_models() -> List[Model]:
+def supported_inference_models() -> list[Model]:
     return [
         m
         for m in all_registered_models()
diff --git a/llama_stack/providers/utils/inference/embedding_mixin.py b/llama_stack/providers/utils/inference/embedding_mixin.py
index 8b14c7502..97cf87360 100644
--- a/llama_stack/providers/utils/inference/embedding_mixin.py
+++ b/llama_stack/providers/utils/inference/embedding_mixin.py
@@ -4,8 +4,10 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
+import base64
 import logging
-from typing import TYPE_CHECKING, List, Optional
+import struct
+from typing import TYPE_CHECKING
 
 if TYPE_CHECKING:
     from sentence_transformers import SentenceTransformer
@@ -15,6 +17,9 @@ from llama_stack.apis.inference import (
     EmbeddingTaskType,
     InterleavedContentItem,
     ModelStore,
+    OpenAIEmbeddingData,
+    OpenAIEmbeddingsResponse,
+    OpenAIEmbeddingUsage,
     TextTruncation,
 )
 from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str
@@ -31,10 +36,10 @@ class SentenceTransformerEmbeddingMixin:
     async def embeddings(
         self,
         model_id: str,
-        contents: List[str] | List[InterleavedContentItem],
-        text_truncation: Optional[TextTruncation] = TextTruncation.none,
-        output_dimension: Optional[int] = None,
-        task_type: Optional[EmbeddingTaskType] = None,
+        contents: list[str] | list[InterleavedContentItem],
+        text_truncation: TextTruncation | None = TextTruncation.none,
+        output_dimension: int | None = None,
+        task_type: EmbeddingTaskType | None = None,
     ) -> EmbeddingsResponse:
         model = await self.model_store.get_model(model_id)
         embedding_model = self._load_sentence_transformer_model(model.provider_resource_id)
@@ -43,6 +48,50 @@ class SentenceTransformerEmbeddingMixin:
         )
         return EmbeddingsResponse(embeddings=embeddings)
 
+    async def openai_embeddings(
+        self,
+        model: str,
+        input: str | list[str],
+        encoding_format: str | None = "float",
+        dimensions: int | None = None,
+        user: str | None = None,
+    ) -> OpenAIEmbeddingsResponse:
+        # Convert input to list format if it's a single string
+        input_list = [input] if isinstance(input, str) else input
+        if not input_list:
+            raise ValueError("Empty list not supported")
+
+        # Get the model and generate embeddings
+        model_obj = await self.model_store.get_model(model)
+        embedding_model = self._load_sentence_transformer_model(model_obj.provider_resource_id)
+        embeddings = embedding_model.encode(input_list, show_progress_bar=False)
+
+        # Convert embeddings to the requested format
+        data = []
+        for i, embedding in enumerate(embeddings):
+            if encoding_format == "base64":
+                # Convert float array to base64 string
+                float_bytes = struct.pack(f"{len(embedding)}f", *embedding)
+                embedding_value = base64.b64encode(float_bytes).decode("ascii")
+            else:
+                # Default to float format
+                embedding_value = embedding.tolist()
+
+            data.append(
+                OpenAIEmbeddingData(
+                    embedding=embedding_value,
+                    index=i,
+                )
+            )
+
+        # Not returning actual token usage
+        usage = OpenAIEmbeddingUsage(prompt_tokens=-1, total_tokens=-1)
+        return OpenAIEmbeddingsResponse(
+            data=data,
+            model=model_obj.provider_resource_id,
+            usage=usage,
+        )
+
     def _load_sentence_transformer_model(self, model: str) -> "SentenceTransformer":
         global EMBEDDING_MODELS
 
diff --git a/llama_stack/providers/utils/inference/inference_store.py b/llama_stack/providers/utils/inference/inference_store.py
new file mode 100644
index 000000000..7b6bc2e3d
--- /dev/null
+++ b/llama_stack/providers/utils/inference/inference_store.py
@@ -0,0 +1,123 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+from llama_stack.apis.inference import (
+    ListOpenAIChatCompletionResponse,
+    OpenAIChatCompletion,
+    OpenAICompletionWithInputMessages,
+    OpenAIMessageParam,
+    Order,
+)
+from llama_stack.distribution.utils.config_dirs import RUNTIME_BASE_DIR
+
+from ..sqlstore.api import ColumnDefinition, ColumnType
+from ..sqlstore.sqlstore import SqliteSqlStoreConfig, SqlStoreConfig, sqlstore_impl
+
+
+class InferenceStore:
+    def __init__(self, sql_store_config: SqlStoreConfig):
+        if not sql_store_config:
+            sql_store_config = SqliteSqlStoreConfig(
+                db_path=(RUNTIME_BASE_DIR / "sqlstore.db").as_posix(),
+            )
+        self.sql_store_config = sql_store_config
+        self.sql_store = None
+
+    async def initialize(self):
+        """Create the necessary tables if they don't exist."""
+        self.sql_store = sqlstore_impl(self.sql_store_config)
+        await self.sql_store.create_table(
+            "chat_completions",
+            {
+                "id": ColumnDefinition(type=ColumnType.STRING, primary_key=True),
+                "created": ColumnType.INTEGER,
+                "model": ColumnType.STRING,
+                "choices": ColumnType.JSON,
+                "input_messages": ColumnType.JSON,
+            },
+        )
+
+    async def store_chat_completion(
+        self, chat_completion: OpenAIChatCompletion, input_messages: list[OpenAIMessageParam]
+    ) -> None:
+        if not self.sql_store:
+            raise ValueError("Inference store is not initialized")
+
+        data = chat_completion.model_dump()
+
+        await self.sql_store.insert(
+            "chat_completions",
+            {
+                "id": data["id"],
+                "created": data["created"],
+                "model": data["model"],
+                "choices": data["choices"],
+                "input_messages": [message.model_dump() for message in input_messages],
+            },
+        )
+
+    async def list_chat_completions(
+        self,
+        after: str | None = None,
+        limit: int | None = 50,
+        model: str | None = None,
+        order: Order | None = Order.desc,
+    ) -> ListOpenAIChatCompletionResponse:
+        """
+        List chat completions from the database.
+
+        :param after: The ID of the last chat completion to return.
+        :param limit: The maximum number of chat completions to return.
+        :param model: The model to filter by.
+        :param order: The order to sort the chat completions by.
+        """
+        if not self.sql_store:
+            raise ValueError("Inference store is not initialized")
+
+        # TODO: support after
+        if after:
+            raise NotImplementedError("After is not supported for SQLite")
+        if not order:
+            order = Order.desc
+
+        rows = await self.sql_store.fetch_all(
+            "chat_completions",
+            where={"model": model} if model else None,
+            order_by=[("created", order.value)],
+            limit=limit,
+        )
+
+        data = [
+            OpenAICompletionWithInputMessages(
+                id=row["id"],
+                created=row["created"],
+                model=row["model"],
+                choices=row["choices"],
+                input_messages=row["input_messages"],
+            )
+            for row in rows
+        ]
+        return ListOpenAIChatCompletionResponse(
+            data=data,
+            # TODO: implement has_more
+            has_more=False,
+            first_id=data[0].id if data else "",
+            last_id=data[-1].id if data else "",
+        )
+
+    async def get_chat_completion(self, completion_id: str) -> OpenAICompletionWithInputMessages:
+        if not self.sql_store:
+            raise ValueError("Inference store is not initialized")
+
+        row = await self.sql_store.fetch_one("chat_completions", where={"id": completion_id})
+        if not row:
+            raise ValueError(f"Chat completion with id {completion_id} not found") from None
+        return OpenAICompletionWithInputMessages(
+            id=row["id"],
+            created=row["created"],
+            model=row["model"],
+            choices=row["choices"],
+            input_messages=row["input_messages"],
+        )
diff --git a/llama_stack/providers/utils/inference/litellm_openai_mixin.py b/llama_stack/providers/utils/inference/litellm_openai_mixin.py
index efe7031f5..dab10bc55 100644
--- a/llama_stack/providers/utils/inference/litellm_openai_mixin.py
+++ b/llama_stack/providers/utils/inference/litellm_openai_mixin.py
@@ -4,7 +4,10 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, AsyncGenerator, AsyncIterator, Dict, List, Optional, Union
+import base64
+import struct
+from collections.abc import AsyncGenerator, AsyncIterator
+from typing import Any
 
 import litellm
 
@@ -18,7 +21,7 @@ from llama_stack.apis.inference import (
     ChatCompletionResponseStreamChunk,
     EmbeddingsResponse,
     EmbeddingTaskType,
-    Inference,
+    InferenceProvider,
     JsonSchemaResponseFormat,
     LogProbConfig,
     Message,
@@ -34,6 +37,9 @@ from llama_stack.apis.inference.inference import (
     OpenAIChatCompletion,
     OpenAIChatCompletionChunk,
     OpenAICompletion,
+    OpenAIEmbeddingData,
+    OpenAIEmbeddingsResponse,
+    OpenAIEmbeddingUsage,
     OpenAIMessageParam,
     OpenAIResponseFormatParam,
 )
@@ -58,13 +64,16 @@ logger = get_logger(name=__name__, category="inference")
 
 class LiteLLMOpenAIMixin(
     ModelRegistryHelper,
-    Inference,
+    InferenceProvider,
     NeedsRequestProviderData,
 ):
+    # TODO: avoid exposing the litellm specific model names to the user.
+    #       potential change: add a prefix param that gets added to the model name
+    #                         when calling litellm.
     def __init__(
         self,
         model_entries,
-        api_key_from_config: Optional[str],
+        api_key_from_config: str | None,
         provider_data_api_key_field: str,
         openai_compat_api_base: str | None = None,
     ):
@@ -90,30 +99,35 @@ class LiteLLMOpenAIMixin(
             raise ValueError(f"Unsupported model: {model.provider_resource_id}")
         return model
 
+    def get_litellm_model_name(self, model_id: str) -> str:
+        # users may be using openai/ prefix in their model names. the openai/models.py did this by default.
+        # model_id.startswith("openai/") is for backwards compatibility.
+        return "openai/" + model_id if self.is_openai_compat and not model_id.startswith("openai/") else model_id
+
     async def completion(
         self,
         model_id: str,
         content: InterleavedContent,
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
     ) -> AsyncGenerator:
         raise NotImplementedError("LiteLLM does not support completion requests")
 
     async def chat_completion(
         self,
         model_id: str,
-        messages: List[Message],
-        sampling_params: Optional[SamplingParams] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_choice: Optional[ToolChoice] = ToolChoice.auto,
-        tool_prompt_format: Optional[ToolPromptFormat] = None,
-        response_format: Optional[ResponseFormat] = None,
-        stream: Optional[bool] = False,
-        logprobs: Optional[LogProbConfig] = None,
-        tool_config: Optional[ToolConfig] = None,
-    ) -> Union[ChatCompletionResponse, AsyncIterator[ChatCompletionResponseStreamChunk]]:
+        messages: list[Message],
+        sampling_params: SamplingParams | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_choice: ToolChoice | None = ToolChoice.auto,
+        tool_prompt_format: ToolPromptFormat | None = None,
+        response_format: ResponseFormat | None = None,
+        stream: bool | None = False,
+        logprobs: LogProbConfig | None = None,
+        tool_config: ToolConfig | None = None,
+    ) -> ChatCompletionResponse | AsyncIterator[ChatCompletionResponseStreamChunk]:
         if sampling_params is None:
             sampling_params = SamplingParams()
 
@@ -130,8 +144,7 @@ class LiteLLMOpenAIMixin(
         )
 
         params = await self._get_params(request)
-        if self.is_openai_compat:
-            params["model"] = "openai/" + params["model"]
+        params["model"] = self.get_litellm_model_name(params["model"])
 
         logger.debug(f"params to litellm (openai compat): {params}")
         # unfortunately, we need to use synchronous litellm.completion here because litellm
@@ -220,65 +233,113 @@ class LiteLLMOpenAIMixin(
                     else request.tool_config.tool_choice
                 )
 
-        provider_data = self.get_request_provider_data()
-        key_field = self.provider_data_api_key_field
-        if provider_data and getattr(provider_data, key_field, None):
-            api_key = getattr(provider_data, key_field)
-        else:
-            api_key = self.api_key_from_config
-
         return {
             "model": request.model,
-            "api_key": api_key,
+            "api_key": self.get_api_key(),
             "api_base": self.api_base,
             **input_dict,
             "stream": request.stream,
             **get_sampling_options(request.sampling_params),
         }
 
+    def get_api_key(self) -> str:
+        provider_data = self.get_request_provider_data()
+        key_field = self.provider_data_api_key_field
+        if provider_data and getattr(provider_data, key_field, None):
+            api_key = getattr(provider_data, key_field)
+        else:
+            api_key = self.api_key_from_config
+        return api_key
+
     async def embeddings(
         self,
         model_id: str,
-        contents: List[str] | List[InterleavedContentItem],
-        text_truncation: Optional[TextTruncation] = TextTruncation.none,
-        output_dimension: Optional[int] = None,
-        task_type: Optional[EmbeddingTaskType] = None,
+        contents: list[str] | list[InterleavedContentItem],
+        text_truncation: TextTruncation | None = TextTruncation.none,
+        output_dimension: int | None = None,
+        task_type: EmbeddingTaskType | None = None,
     ) -> EmbeddingsResponse:
         model = await self.model_store.get_model(model_id)
 
         response = litellm.embedding(
-            model=model.provider_resource_id,
+            model=self.get_litellm_model_name(model.provider_resource_id),
             input=[interleaved_content_as_str(content) for content in contents],
         )
 
         embeddings = [data["embedding"] for data in response["data"]]
         return EmbeddingsResponse(embeddings=embeddings)
 
+    async def openai_embeddings(
+        self,
+        model: str,
+        input: str | list[str],
+        encoding_format: str | None = "float",
+        dimensions: int | None = None,
+        user: str | None = None,
+    ) -> OpenAIEmbeddingsResponse:
+        model_obj = await self.model_store.get_model(model)
+
+        # Convert input to list if it's a string
+        input_list = [input] if isinstance(input, str) else input
+
+        # Call litellm embedding function
+        # litellm.drop_params = True
+        response = litellm.embedding(
+            model=self.get_litellm_model_name(model_obj.provider_resource_id),
+            input=input_list,
+            api_key=self.get_api_key(),
+            api_base=self.api_base,
+            dimensions=dimensions,
+        )
+
+        # Convert response to OpenAI format
+        data = []
+        for i, embedding_data in enumerate(response["data"]):
+            # we encode to base64 if the encoding format is base64 in the request
+            if encoding_format == "base64":
+                byte_data = b"".join(struct.pack("f", f) for f in embedding_data["embedding"])
+                embedding = base64.b64encode(byte_data).decode("utf-8")
+            else:
+                embedding = embedding_data["embedding"]
+
+            data.append(OpenAIEmbeddingData(embedding=embedding, index=i))
+
+        usage = OpenAIEmbeddingUsage(
+            prompt_tokens=response["usage"]["prompt_tokens"],
+            total_tokens=response["usage"]["total_tokens"],
+        )
+
+        return OpenAIEmbeddingsResponse(
+            data=data,
+            model=model_obj.provider_resource_id,
+            usage=usage,
+        )
+
     async def openai_completion(
         self,
         model: str,
-        prompt: Union[str, List[str], List[int], List[List[int]]],
-        best_of: Optional[int] = None,
-        echo: Optional[bool] = None,
-        frequency_penalty: Optional[float] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        presence_penalty: Optional[float] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-        guided_choice: Optional[List[str]] = None,
-        prompt_logprobs: Optional[int] = None,
+        prompt: str | list[str] | list[int] | list[list[int]],
+        best_of: int | None = None,
+        echo: bool | None = None,
+        frequency_penalty: float | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        presence_penalty: float | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+        guided_choice: list[str] | None = None,
+        prompt_logprobs: int | None = None,
     ) -> OpenAICompletion:
         model_obj = await self.model_store.get_model(model)
         params = await prepare_openai_completion_params(
-            model=model_obj.provider_resource_id,
+            model=self.get_litellm_model_name(model_obj.provider_resource_id),
             prompt=prompt,
             best_of=best_of,
             echo=echo,
@@ -297,38 +358,40 @@ class LiteLLMOpenAIMixin(
             user=user,
             guided_choice=guided_choice,
             prompt_logprobs=prompt_logprobs,
+            api_key=self.get_api_key(),
+            api_base=self.api_base,
         )
         return await litellm.atext_completion(**params)
 
     async def openai_chat_completion(
         self,
         model: str,
-        messages: List[OpenAIMessageParam],
-        frequency_penalty: Optional[float] = None,
-        function_call: Optional[Union[str, Dict[str, Any]]] = None,
-        functions: Optional[List[Dict[str, Any]]] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_completion_tokens: Optional[int] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        parallel_tool_calls: Optional[bool] = None,
-        presence_penalty: Optional[float] = None,
-        response_format: Optional[OpenAIResponseFormatParam] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
-        tools: Optional[List[Dict[str, Any]]] = None,
-        top_logprobs: Optional[int] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-    ) -> Union[OpenAIChatCompletion, AsyncIterator[OpenAIChatCompletionChunk]]:
+        messages: list[OpenAIMessageParam],
+        frequency_penalty: float | None = None,
+        function_call: str | dict[str, Any] | None = None,
+        functions: list[dict[str, Any]] | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_completion_tokens: int | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        parallel_tool_calls: bool | None = None,
+        presence_penalty: float | None = None,
+        response_format: OpenAIResponseFormatParam | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        tool_choice: str | dict[str, Any] | None = None,
+        tools: list[dict[str, Any]] | None = None,
+        top_logprobs: int | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+    ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
         model_obj = await self.model_store.get_model(model)
         params = await prepare_openai_completion_params(
-            model=model_obj.provider_resource_id,
+            model=self.get_litellm_model_name(model_obj.provider_resource_id),
             messages=messages,
             frequency_penalty=frequency_penalty,
             function_call=function_call,
@@ -351,27 +414,29 @@ class LiteLLMOpenAIMixin(
             top_logprobs=top_logprobs,
             top_p=top_p,
             user=user,
+            api_key=self.get_api_key(),
+            api_base=self.api_base,
         )
         return await litellm.acompletion(**params)
 
     async def batch_completion(
         self,
         model_id: str,
-        content_batch: List[InterleavedContent],
-        sampling_params: Optional[SamplingParams] = None,
-        response_format: Optional[ResponseFormat] = None,
-        logprobs: Optional[LogProbConfig] = None,
+        content_batch: list[InterleavedContent],
+        sampling_params: SamplingParams | None = None,
+        response_format: ResponseFormat | None = None,
+        logprobs: LogProbConfig | None = None,
     ):
         raise NotImplementedError("Batch completion is not supported for OpenAI Compat")
 
     async def batch_chat_completion(
         self,
         model_id: str,
-        messages_batch: List[List[Message]],
-        sampling_params: Optional[SamplingParams] = None,
-        tools: Optional[List[ToolDefinition]] = None,
-        tool_config: Optional[ToolConfig] = None,
-        response_format: Optional[ResponseFormat] = None,
-        logprobs: Optional[LogProbConfig] = None,
+        messages_batch: list[list[Message]],
+        sampling_params: SamplingParams | None = None,
+        tools: list[ToolDefinition] | None = None,
+        tool_config: ToolConfig | None = None,
+        response_format: ResponseFormat | None = None,
+        logprobs: LogProbConfig | None = None,
     ):
         raise NotImplementedError("Batch chat completion is not supported for OpenAI Compat")
diff --git a/llama_stack/providers/utils/inference/model_registry.py b/llama_stack/providers/utils/inference/model_registry.py
index 4d7063953..d707e36c2 100644
--- a/llama_stack/providers/utils/inference/model_registry.py
+++ b/llama_stack/providers/utils/inference/model_registry.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, List, Optional
+from typing import Any
 
 from pydantic import BaseModel, Field
 
@@ -20,13 +20,13 @@ from llama_stack.providers.utils.inference import (
 # more closer to the Model class.
 class ProviderModelEntry(BaseModel):
     provider_model_id: str
-    aliases: List[str] = Field(default_factory=list)
-    llama_model: Optional[str] = None
+    aliases: list[str] = Field(default_factory=list)
+    llama_model: str | None = None
     model_type: ModelType = ModelType.llm
-    metadata: Dict[str, Any] = Field(default_factory=dict)
+    metadata: dict[str, Any] = Field(default_factory=dict)
 
 
-def get_huggingface_repo(model_descriptor: str) -> Optional[str]:
+def get_huggingface_repo(model_descriptor: str) -> str | None:
     for model in all_registered_models():
         if model.descriptor() == model_descriptor:
             return model.huggingface_repo
@@ -34,7 +34,7 @@ def get_huggingface_repo(model_descriptor: str) -> Optional[str]:
 
 
 def build_hf_repo_model_entry(
-    provider_model_id: str, model_descriptor: str, additional_aliases: Optional[List[str]] = None
+    provider_model_id: str, model_descriptor: str, additional_aliases: list[str] | None = None
 ) -> ProviderModelEntry:
     aliases = [
         get_huggingface_repo(model_descriptor),
@@ -58,7 +58,7 @@ def build_model_entry(provider_model_id: str, model_descriptor: str) -> Provider
 
 
 class ModelRegistryHelper(ModelsProtocolPrivate):
-    def __init__(self, model_entries: List[ProviderModelEntry]):
+    def __init__(self, model_entries: list[ProviderModelEntry]):
         self.alias_to_provider_id_map = {}
         self.provider_id_to_llama_model_map = {}
         for entry in model_entries:
@@ -72,43 +72,53 @@ class ModelRegistryHelper(ModelsProtocolPrivate):
                 self.alias_to_provider_id_map[entry.llama_model] = entry.provider_model_id
                 self.provider_id_to_llama_model_map[entry.provider_model_id] = entry.llama_model
 
-    def get_provider_model_id(self, identifier: str) -> Optional[str]:
+    def get_provider_model_id(self, identifier: str) -> str | None:
         return self.alias_to_provider_id_map.get(identifier, None)
 
-    def get_llama_model(self, provider_model_id: str) -> Optional[str]:
+    # TODO: why keep a separate llama model mapping?
+    def get_llama_model(self, provider_model_id: str) -> str | None:
         return self.provider_id_to_llama_model_map.get(provider_model_id, None)
 
     async def register_model(self, model: Model) -> Model:
+        if not (supported_model_id := self.get_provider_model_id(model.provider_resource_id)):
+            raise ValueError(
+                f"Model '{model.provider_resource_id}' is not supported. Supported models are: {', '.join(self.alias_to_provider_id_map.keys())}"
+            )
+        provider_resource_id = self.get_provider_model_id(model.model_id)
         if model.model_type == ModelType.embedding:
             # embedding models are always registered by their provider model id and does not need to be mapped to a llama model
             provider_resource_id = model.provider_resource_id
-        else:
-            provider_resource_id = self.get_provider_model_id(model.provider_resource_id)
-
         if provider_resource_id:
-            model.provider_resource_id = provider_resource_id
+            if provider_resource_id != supported_model_id:  # be idemopotent, only reject differences
+                raise ValueError(
+                    f"Model id '{model.model_id}' is already registered. Please use a different id or unregister it first."
+                )
         else:
             llama_model = model.metadata.get("llama_model")
-            if llama_model is None:
-                return model
+            if llama_model:
+                existing_llama_model = self.get_llama_model(model.provider_resource_id)
+                if existing_llama_model:
+                    if existing_llama_model != llama_model:
+                        raise ValueError(
+                            f"Provider model id '{model.provider_resource_id}' is already registered to a different llama model: '{existing_llama_model}'"
+                        )
+                else:
+                    if llama_model not in ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR:
+                        raise ValueError(
+                            f"Invalid llama_model '{llama_model}' specified in metadata. "
+                            f"Must be one of: {', '.join(ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR.keys())}"
+                        )
+                    self.provider_id_to_llama_model_map[model.provider_resource_id] = (
+                        ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR[llama_model]
+                    )
 
-            existing_llama_model = self.get_llama_model(model.provider_resource_id)
-            if existing_llama_model:
-                if existing_llama_model != llama_model:
-                    raise ValueError(
-                        f"Provider model id '{model.provider_resource_id}' is already registered to a different llama model: '{existing_llama_model}'"
-                    )
-            else:
-                if llama_model not in ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR:
-                    raise ValueError(
-                        f"Invalid llama_model '{llama_model}' specified in metadata. "
-                        f"Must be one of: {', '.join(ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR.keys())}"
-                    )
-                self.provider_id_to_llama_model_map[model.provider_resource_id] = (
-                    ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR[llama_model]
-                )
+        self.alias_to_provider_id_map[model.model_id] = supported_model_id
 
         return model
 
     async def unregister_model(self, model_id: str) -> None:
-        pass
+        # TODO: should we block unregistering base supported provider model IDs?
+        if model_id not in self.alias_to_provider_id_map:
+            raise ValueError(f"Model id '{model_id}' is not registered.")
+
+        del self.alias_to_provider_id_map[model_id]
diff --git a/llama_stack/providers/utils/inference/openai_compat.py b/llama_stack/providers/utils/inference/openai_compat.py
index 4d690287b..049f06fdb 100644
--- a/llama_stack/providers/utils/inference/openai_compat.py
+++ b/llama_stack/providers/utils/inference/openai_compat.py
@@ -8,16 +8,9 @@ import logging
 import time
 import uuid
 import warnings
+from collections.abc import AsyncGenerator, AsyncIterator, Awaitable, Iterable
 from typing import (
     Any,
-    AsyncGenerator,
-    AsyncIterator,
-    Awaitable,
-    Dict,
-    Iterable,
-    List,
-    Optional,
-    Union,
 )
 
 from openai import AsyncStream
@@ -115,6 +108,7 @@ from llama_stack.apis.inference.inference import (
     OpenAIChatCompletion,
     OpenAICompletion,
     OpenAICompletionChoice,
+    OpenAIMessageParam,
     OpenAIResponseFormatParam,
     ToolConfig,
 )
@@ -141,24 +135,24 @@ class OpenAICompatCompletionChoiceDelta(BaseModel):
 
 
 class OpenAICompatLogprobs(BaseModel):
-    text_offset: Optional[List[int]] = None
+    text_offset: list[int] | None = None
 
-    token_logprobs: Optional[List[float]] = None
+    token_logprobs: list[float] | None = None
 
-    tokens: Optional[List[str]] = None
+    tokens: list[str] | None = None
 
-    top_logprobs: Optional[List[Dict[str, float]]] = None
+    top_logprobs: list[dict[str, float]] | None = None
 
 
 class OpenAICompatCompletionChoice(BaseModel):
-    finish_reason: Optional[str] = None
-    text: Optional[str] = None
-    delta: Optional[OpenAICompatCompletionChoiceDelta] = None
-    logprobs: Optional[OpenAICompatLogprobs] = None
+    finish_reason: str | None = None
+    text: str | None = None
+    delta: OpenAICompatCompletionChoiceDelta | None = None
+    logprobs: OpenAICompatLogprobs | None = None
 
 
 class OpenAICompatCompletionResponse(BaseModel):
-    choices: List[OpenAICompatCompletionChoice]
+    choices: list[OpenAICompatCompletionChoice]
 
 
 def get_sampling_strategy_options(params: SamplingParams) -> dict:
@@ -217,8 +211,8 @@ def get_stop_reason(finish_reason: str) -> StopReason:
 
 
 def convert_openai_completion_logprobs(
-    logprobs: Optional[OpenAICompatLogprobs],
-) -> Optional[List[TokenLogProbs]]:
+    logprobs: OpenAICompatLogprobs | None,
+) -> list[TokenLogProbs] | None:
     if not logprobs:
         return None
     if hasattr(logprobs, "top_logprobs"):
@@ -235,7 +229,7 @@ def convert_openai_completion_logprobs(
     return None
 
 
-def convert_openai_completion_logprobs_stream(text: str, logprobs: Optional[Union[float, OpenAICompatLogprobs]]):
+def convert_openai_completion_logprobs_stream(text: str, logprobs: float | OpenAICompatLogprobs | None):
     if logprobs is None:
         return None
     if isinstance(logprobs, float):
@@ -532,13 +526,24 @@ async def convert_message_to_openai_dict(message: Message, download: bool = Fals
     if hasattr(message, "tool_calls") and message.tool_calls:
         result["tool_calls"] = []
         for tc in message.tool_calls:
+            # The tool.tool_name can be a str or a BuiltinTool enum. If
+            # it's the latter, convert to a string.
+            tool_name = tc.tool_name
+            if isinstance(tool_name, BuiltinTool):
+                tool_name = tool_name.value
+
+            # arguments_json can be None, so attempt it first and fall back to arguments
+            if hasattr(tc, "arguments_json") and tc.arguments_json:
+                arguments = tc.arguments_json
+            else:
+                arguments = json.dumps(tc.arguments)
             result["tool_calls"].append(
                 {
                     "id": tc.call_id,
                     "type": "function",
                     "function": {
-                        "name": tc.tool_name,
-                        "arguments": tc.arguments_json if hasattr(tc, "arguments_json") else json.dumps(tc.arguments),
+                        "name": tool_name,
+                        "arguments": arguments,
                     },
                 }
             )
@@ -557,7 +562,7 @@ class UnparseableToolCall(BaseModel):
 
 
 async def convert_message_to_openai_dict_new(
-    message: Message | Dict,
+    message: Message | dict,
 ) -> OpenAIChatCompletionMessage:
     """
     Convert a Message to an OpenAI API-compatible dictionary.
@@ -586,14 +591,10 @@ async def convert_message_to_openai_dict_new(
     #  List[...] -> List[...]
     async def _convert_message_content(
         content: InterleavedContent,
-    ) -> Union[str, Iterable[OpenAIChatCompletionContentPartParam]]:
+    ) -> str | Iterable[OpenAIChatCompletionContentPartParam]:
         async def impl(
             content_: InterleavedContent,
-        ) -> Union[
-            str,
-            OpenAIChatCompletionContentPartParam,
-            List[OpenAIChatCompletionContentPartParam],
-        ]:
+        ) -> str | OpenAIChatCompletionContentPartParam | list[OpenAIChatCompletionContentPartParam]:
             # Llama Stack and OpenAI spec match for str and text input
             if isinstance(content_, str):
                 return content_
@@ -638,10 +639,13 @@ async def convert_message_to_openai_dict_new(
             )
             for tool in message.tool_calls
         ]
+        params = {}
+        if tool_calls:
+            params["tool_calls"] = tool_calls
         out = OpenAIChatCompletionAssistantMessage(
             role="assistant",
             content=await _convert_message_content(message.content),
-            tool_calls=tool_calls or None,
+            **params,
         )
     elif isinstance(message, ToolResponseMessage):
         out = OpenAIChatCompletionToolMessage(
@@ -662,7 +666,7 @@ async def convert_message_to_openai_dict_new(
 
 def convert_tool_call(
     tool_call: ChatCompletionMessageToolCall,
-) -> Union[ToolCall, UnparseableToolCall]:
+) -> ToolCall | UnparseableToolCall:
     """
     Convert a ChatCompletionMessageToolCall tool call to either a
     ToolCall or UnparseableToolCall. Returns an UnparseableToolCall
@@ -838,7 +842,7 @@ def _convert_openai_finish_reason(finish_reason: str) -> StopReason:
     }.get(finish_reason, StopReason.end_of_turn)
 
 
-def _convert_openai_request_tool_config(tool_choice: Optional[Union[str, Dict[str, Any]]] = None) -> ToolConfig:
+def _convert_openai_request_tool_config(tool_choice: str | dict[str, Any] | None = None) -> ToolConfig:
     tool_config = ToolConfig()
     if tool_choice:
         try:
@@ -849,7 +853,7 @@ def _convert_openai_request_tool_config(tool_choice: Optional[Union[str, Dict[st
     return tool_config
 
 
-def _convert_openai_request_tools(tools: Optional[List[Dict[str, Any]]] = None) -> List[ToolDefinition]:
+def _convert_openai_request_tools(tools: list[dict[str, Any]] | None = None) -> list[ToolDefinition]:
     lls_tools = []
     if not tools:
         return lls_tools
@@ -865,7 +869,7 @@ def _convert_openai_request_tools(tools: Optional[List[Dict[str, Any]]] = None)
             tool_param_properties = tool_params.get("properties", {})
             for tool_param_key, tool_param_value in tool_param_properties.items():
                 tool_param_def = ToolParamDefinition(
-                    param_type=tool_param_value.get("type", None),
+                    param_type=str(tool_param_value.get("type", None)),
                     description=tool_param_value.get("description", None),
                 )
                 lls_tool_params[tool_param_key] = tool_param_def
@@ -895,8 +899,8 @@ def _convert_openai_request_response_format(
 
 
 def _convert_openai_tool_calls(
-    tool_calls: List[OpenAIChatCompletionMessageToolCall],
-) -> List[ToolCall]:
+    tool_calls: list[OpenAIChatCompletionMessageToolCall],
+) -> list[ToolCall]:
     """
     Convert an OpenAI ChatCompletionMessageToolCall list into a list of ToolCall.
 
@@ -932,7 +936,7 @@ def _convert_openai_tool_calls(
 
 def _convert_openai_logprobs(
     logprobs: OpenAIChoiceLogprobs,
-) -> Optional[List[TokenLogProbs]]:
+) -> list[TokenLogProbs] | None:
     """
     Convert an OpenAI ChoiceLogprobs into a list of TokenLogProbs.
 
@@ -965,9 +969,9 @@ def _convert_openai_logprobs(
 
 
 def _convert_openai_sampling_params(
-    max_tokens: Optional[int] = None,
-    temperature: Optional[float] = None,
-    top_p: Optional[float] = None,
+    max_tokens: int | None = None,
+    temperature: float | None = None,
+    top_p: float | None = None,
 ) -> SamplingParams:
     sampling_params = SamplingParams()
 
@@ -990,20 +994,20 @@ def _convert_openai_sampling_params(
 
 
 def openai_messages_to_messages(
-    messages: List[OpenAIChatCompletionMessage],
-) -> List[Message]:
+    messages: list[OpenAIMessageParam],
+) -> list[Message]:
     """
     Convert a list of OpenAIChatCompletionMessage into a list of Message.
     """
     converted_messages = []
     for message in messages:
         if message.role == "system":
-            converted_message = SystemMessage(content=message.content)
+            converted_message = SystemMessage(content=openai_content_to_content(message.content))
         elif message.role == "user":
             converted_message = UserMessage(content=openai_content_to_content(message.content))
         elif message.role == "assistant":
             converted_message = CompletionMessage(
-                content=message.content,
+                content=openai_content_to_content(message.content),
                 tool_calls=_convert_openai_tool_calls(message.tool_calls),
                 stop_reason=StopReason.end_of_turn,
             )
@@ -1019,7 +1023,7 @@ def openai_messages_to_messages(
     return converted_messages
 
 
-def openai_content_to_content(content: Union[str, Iterable[OpenAIChatCompletionContentPartParam]]):
+def openai_content_to_content(content: str | Iterable[OpenAIChatCompletionContentPartParam]):
     if isinstance(content, str):
         return content
     elif isinstance(content, list):
@@ -1265,24 +1269,24 @@ class OpenAICompletionToLlamaStackMixin:
     async def openai_completion(
         self,
         model: str,
-        prompt: Union[str, List[str], List[int], List[List[int]]],
-        best_of: Optional[int] = None,
-        echo: Optional[bool] = None,
-        frequency_penalty: Optional[float] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        presence_penalty: Optional[float] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-        guided_choice: Optional[List[str]] = None,
-        prompt_logprobs: Optional[int] = None,
+        prompt: str | list[str] | list[int] | list[list[int]],
+        best_of: int | None = None,
+        echo: bool | None = None,
+        frequency_penalty: float | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        presence_penalty: float | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+        guided_choice: list[str] | None = None,
+        prompt_logprobs: int | None = None,
     ) -> OpenAICompletion:
         if stream:
             raise ValueError(f"{self.__class__.__name__} doesn't support streaming openai completions")
@@ -1334,29 +1338,29 @@ class OpenAIChatCompletionToLlamaStackMixin:
     async def openai_chat_completion(
         self,
         model: str,
-        messages: List[OpenAIChatCompletionMessage],
-        frequency_penalty: Optional[float] = None,
-        function_call: Optional[Union[str, Dict[str, Any]]] = None,
-        functions: Optional[List[Dict[str, Any]]] = None,
-        logit_bias: Optional[Dict[str, float]] = None,
-        logprobs: Optional[bool] = None,
-        max_completion_tokens: Optional[int] = None,
-        max_tokens: Optional[int] = None,
-        n: Optional[int] = None,
-        parallel_tool_calls: Optional[bool] = None,
-        presence_penalty: Optional[float] = None,
-        response_format: Optional[OpenAIResponseFormatParam] = None,
-        seed: Optional[int] = None,
-        stop: Optional[Union[str, List[str]]] = None,
-        stream: Optional[bool] = None,
-        stream_options: Optional[Dict[str, Any]] = None,
-        temperature: Optional[float] = None,
-        tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
-        tools: Optional[List[Dict[str, Any]]] = None,
-        top_logprobs: Optional[int] = None,
-        top_p: Optional[float] = None,
-        user: Optional[str] = None,
-    ) -> Union[OpenAIChatCompletion, AsyncIterator[OpenAIChatCompletionChunk]]:
+        messages: list[OpenAIMessageParam],
+        frequency_penalty: float | None = None,
+        function_call: str | dict[str, Any] | None = None,
+        functions: list[dict[str, Any]] | None = None,
+        logit_bias: dict[str, float] | None = None,
+        logprobs: bool | None = None,
+        max_completion_tokens: int | None = None,
+        max_tokens: int | None = None,
+        n: int | None = None,
+        parallel_tool_calls: bool | None = None,
+        presence_penalty: float | None = None,
+        response_format: OpenAIResponseFormatParam | None = None,
+        seed: int | None = None,
+        stop: str | list[str] | None = None,
+        stream: bool | None = None,
+        stream_options: dict[str, Any] | None = None,
+        temperature: float | None = None,
+        tool_choice: str | dict[str, Any] | None = None,
+        tools: list[dict[str, Any]] | None = None,
+        top_logprobs: int | None = None,
+        top_p: float | None = None,
+        user: str | None = None,
+    ) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
         messages = openai_messages_to_messages(messages)
         response_format = _convert_openai_request_response_format(response_format)
         sampling_params = _convert_openai_sampling_params(
@@ -1395,12 +1399,11 @@ class OpenAIChatCompletionToLlamaStackMixin:
     async def _process_stream_response(
         self,
         model: str,
-        outstanding_responses: List[Awaitable[AsyncIterator[ChatCompletionResponseStreamChunk]]],
+        outstanding_responses: list[Awaitable[AsyncIterator[ChatCompletionResponseStreamChunk]]],
     ):
         id = f"chatcmpl-{uuid.uuid4()}"
-        for outstanding_response in outstanding_responses:
+        for i, outstanding_response in enumerate(outstanding_responses):
             response = await outstanding_response
-            i = 0
             async for chunk in response:
                 event = chunk.event
                 finish_reason = _convert_stop_reason_to_openai_finish_reason(event.stop_reason)
@@ -1455,10 +1458,9 @@ class OpenAIChatCompletionToLlamaStackMixin:
                             model=model,
                             object="chat.completion.chunk",
                         )
-                i = i + 1
 
     async def _process_non_stream_response(
-        self, model: str, outstanding_responses: List[Awaitable[ChatCompletionResponse]]
+        self, model: str, outstanding_responses: list[Awaitable[ChatCompletionResponse]]
     ) -> OpenAIChatCompletion:
         choices = []
         for outstanding_response in outstanding_responses:
diff --git a/llama_stack/providers/utils/inference/prompt_adapter.py b/llama_stack/providers/utils/inference/prompt_adapter.py
index 657dc4b86..56e33cfdf 100644
--- a/llama_stack/providers/utils/inference/prompt_adapter.py
+++ b/llama_stack/providers/utils/inference/prompt_adapter.py
@@ -9,7 +9,6 @@ import base64
 import io
 import json
 import re
-from typing import List, Optional, Tuple, Union
 
 import httpx
 from PIL import Image as PIL_Image
@@ -63,7 +62,7 @@ log = get_logger(name=__name__, category="inference")
 
 
 class ChatCompletionRequestWithRawContent(ChatCompletionRequest):
-    messages: List[RawMessage]
+    messages: list[RawMessage]
 
 
 class CompletionRequestWithRawContent(CompletionRequest):
@@ -93,8 +92,8 @@ def interleaved_content_as_str(content: InterleavedContent, sep: str = " ") -> s
 
 
 async def convert_request_to_raw(
-    request: Union[ChatCompletionRequest, CompletionRequest],
-) -> Union[ChatCompletionRequestWithRawContent, CompletionRequestWithRawContent]:
+    request: ChatCompletionRequest | CompletionRequest,
+) -> ChatCompletionRequestWithRawContent | CompletionRequestWithRawContent:
     if isinstance(request, ChatCompletionRequest):
         messages = []
         for m in request.messages:
@@ -170,18 +169,18 @@ def content_has_media(content: InterleavedContent):
         return _has_media_content(content)
 
 
-def messages_have_media(messages: List[Message]):
+def messages_have_media(messages: list[Message]):
     return any(content_has_media(m.content) for m in messages)
 
 
-def request_has_media(request: Union[ChatCompletionRequest, CompletionRequest]):
+def request_has_media(request: ChatCompletionRequest | CompletionRequest):
     if isinstance(request, ChatCompletionRequest):
         return messages_have_media(request.messages)
     else:
         return content_has_media(request.content)
 
 
-async def localize_image_content(media: ImageContentItem) -> Tuple[bytes, str]:
+async def localize_image_content(media: ImageContentItem) -> tuple[bytes, str]:
     image = media.image
     if image.url and image.url.uri.startswith("http"):
         async with httpx.AsyncClient() as client:
@@ -228,7 +227,7 @@ async def completion_request_to_prompt(request: CompletionRequest) -> str:
 
 async def completion_request_to_prompt_model_input_info(
     request: CompletionRequest,
-) -> Tuple[str, int]:
+) -> tuple[str, int]:
     content = augment_content_with_response_format_prompt(request.response_format, request.content)
     request.content = content
     request = await convert_request_to_raw(request)
@@ -265,7 +264,7 @@ async def chat_completion_request_to_prompt(request: ChatCompletionRequest, llam
 
 async def chat_completion_request_to_model_input_info(
     request: ChatCompletionRequest, llama_model: str
-) -> Tuple[str, int]:
+) -> tuple[str, int]:
     messages = chat_completion_request_to_messages(request, llama_model)
     request.messages = messages
     request = await convert_request_to_raw(request)
@@ -284,7 +283,7 @@ async def chat_completion_request_to_model_input_info(
 def chat_completion_request_to_messages(
     request: ChatCompletionRequest,
     llama_model: str,
-) -> List[Message]:
+) -> list[Message]:
     """Reads chat completion request and augments the messages to handle tools.
     For eg. for llama_3_1, add system message with the appropriate tools or
     add user messsage for custom tools, etc.
@@ -323,7 +322,7 @@ def chat_completion_request_to_messages(
     return messages
 
 
-def response_format_prompt(fmt: Optional[ResponseFormat]):
+def response_format_prompt(fmt: ResponseFormat | None):
     if not fmt:
         return None
 
@@ -337,7 +336,7 @@ def response_format_prompt(fmt: Optional[ResponseFormat]):
 
 def augment_messages_for_tools_llama_3_1(
     request: ChatCompletionRequest,
-) -> List[Message]:
+) -> list[Message]:
     existing_messages = request.messages
     existing_system_message = None
     if existing_messages[0].role == Role.system.value:
@@ -383,7 +382,7 @@ def augment_messages_for_tools_llama_3_1(
 
     messages.append(SystemMessage(content=sys_content))
 
-    has_custom_tools = any(isinstance(dfn.tool_name, str) for dfn in request.tools)
+    has_custom_tools = request.tools is not None and any(isinstance(dfn.tool_name, str) for dfn in request.tools)
     if has_custom_tools:
         fmt = request.tool_config.tool_prompt_format or ToolPromptFormat.json
         if fmt == ToolPromptFormat.json:
@@ -406,7 +405,7 @@ def augment_messages_for_tools_llama_3_1(
 def augment_messages_for_tools_llama(
     request: ChatCompletionRequest,
     custom_tool_prompt_generator,
-) -> List[Message]:
+) -> list[Message]:
     existing_messages = request.messages
     existing_system_message = None
     if existing_messages[0].role == Role.system.value:
@@ -457,7 +456,7 @@ def augment_messages_for_tools_llama(
     return messages
 
 
-def _get_tool_choice_prompt(tool_choice: ToolChoice | str, tools: List[ToolDefinition]) -> str:
+def _get_tool_choice_prompt(tool_choice: ToolChoice | str, tools: list[ToolDefinition]) -> str:
     if tool_choice == ToolChoice.auto:
         return ""
     elif tool_choice == ToolChoice.required:
diff --git a/llama_stack/providers/utils/inference/stream_utils.py b/llama_stack/providers/utils/inference/stream_utils.py
new file mode 100644
index 000000000..a2edbb9c8
--- /dev/null
+++ b/llama_stack/providers/utils/inference/stream_utils.py
@@ -0,0 +1,129 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from collections.abc import AsyncIterator
+from datetime import datetime, timezone
+from typing import Any
+
+from llama_stack.apis.inference import (
+    OpenAIAssistantMessageParam,
+    OpenAIChatCompletion,
+    OpenAIChatCompletionChunk,
+    OpenAIChatCompletionToolCall,
+    OpenAIChatCompletionToolCallFunction,
+    OpenAIChoice,
+    OpenAIChoiceLogprobs,
+    OpenAIMessageParam,
+)
+from llama_stack.providers.utils.inference.inference_store import InferenceStore
+
+
+async def stream_and_store_openai_completion(
+    provider_stream: AsyncIterator[OpenAIChatCompletionChunk],
+    model: str,
+    store: InferenceStore,
+    input_messages: list[OpenAIMessageParam],
+) -> AsyncIterator[OpenAIChatCompletionChunk]:
+    """
+    Wraps a provider's stream, yields chunks, and stores the full completion at the end.
+    """
+    id = None
+    created = None
+    choices_data: dict[int, dict[str, Any]] = {}
+
+    try:
+        async for chunk in provider_stream:
+            if id is None and chunk.id:
+                id = chunk.id
+            if created is None and chunk.created:
+                created = chunk.created
+
+            if chunk.choices:
+                for choice_delta in chunk.choices:
+                    idx = choice_delta.index
+                    if idx not in choices_data:
+                        choices_data[idx] = {
+                            "content_parts": [],
+                            "tool_calls_builder": {},
+                            "finish_reason": None,
+                            "logprobs_content_parts": [],
+                        }
+                    current_choice_data = choices_data[idx]
+
+                    if choice_delta.delta:
+                        delta = choice_delta.delta
+                        if delta.content:
+                            current_choice_data["content_parts"].append(delta.content)
+                        if delta.tool_calls:
+                            for tool_call_delta in delta.tool_calls:
+                                tc_idx = tool_call_delta.index
+                                if tc_idx not in current_choice_data["tool_calls_builder"]:
+                                    # Initialize with correct structure for _ToolCallBuilderData
+                                    current_choice_data["tool_calls_builder"][tc_idx] = {
+                                        "id": None,
+                                        "type": "function",
+                                        "function_name_parts": [],
+                                        "function_arguments_parts": [],
+                                    }
+                                builder = current_choice_data["tool_calls_builder"][tc_idx]
+                                if tool_call_delta.id:
+                                    builder["id"] = tool_call_delta.id
+                                if tool_call_delta.type:
+                                    builder["type"] = tool_call_delta.type
+                                if tool_call_delta.function:
+                                    if tool_call_delta.function.name:
+                                        builder["function_name_parts"].append(tool_call_delta.function.name)
+                                    if tool_call_delta.function.arguments:
+                                        builder["function_arguments_parts"].append(tool_call_delta.function.arguments)
+                    if choice_delta.finish_reason:
+                        current_choice_data["finish_reason"] = choice_delta.finish_reason
+                    if choice_delta.logprobs and choice_delta.logprobs.content:
+                        # Ensure that we are extending with the correct type
+                        current_choice_data["logprobs_content_parts"].extend(choice_delta.logprobs.content)
+            yield chunk
+    finally:
+        if id:
+            assembled_choices: list[OpenAIChoice] = []
+            for choice_idx, choice_data in choices_data.items():
+                content_str = "".join(choice_data["content_parts"])
+                assembled_tool_calls: list[OpenAIChatCompletionToolCall] = []
+                if choice_data["tool_calls_builder"]:
+                    for tc_build_data in choice_data["tool_calls_builder"].values():
+                        if tc_build_data["id"]:
+                            func_name = "".join(tc_build_data["function_name_parts"])
+                            func_args = "".join(tc_build_data["function_arguments_parts"])
+                            assembled_tool_calls.append(
+                                OpenAIChatCompletionToolCall(
+                                    id=tc_build_data["id"],
+                                    type=tc_build_data["type"],  # No or "function" needed, already set
+                                    function=OpenAIChatCompletionToolCallFunction(name=func_name, arguments=func_args),
+                                )
+                            )
+                message = OpenAIAssistantMessageParam(
+                    role="assistant",
+                    content=content_str if content_str else None,
+                    tool_calls=assembled_tool_calls if assembled_tool_calls else None,
+                )
+                logprobs_content = choice_data["logprobs_content_parts"]
+                final_logprobs = OpenAIChoiceLogprobs(content=logprobs_content) if logprobs_content else None
+
+                assembled_choices.append(
+                    OpenAIChoice(
+                        finish_reason=choice_data["finish_reason"],
+                        index=choice_idx,
+                        message=message,
+                        logprobs=final_logprobs,
+                    )
+                )
+
+            final_response = OpenAIChatCompletion(
+                id=id,
+                choices=assembled_choices,
+                created=created or int(datetime.now(timezone.utc).timestamp()),
+                model=model,
+                object="chat.completion",
+            )
+            await store.store_chat_completion(final_response, input_messages)
diff --git a/llama_stack/providers/utils/kvstore/api.py b/llama_stack/providers/utils/kvstore/api.py
index 84b1730e1..d17dc66e1 100644
--- a/llama_stack/providers/utils/kvstore/api.py
+++ b/llama_stack/providers/utils/kvstore/api.py
@@ -5,15 +5,17 @@
 # the root directory of this source tree.
 
 from datetime import datetime
-from typing import List, Optional, Protocol
+from typing import Protocol
 
 
 class KVStore(Protocol):
     # TODO: make the value type bytes instead of str
-    async def set(self, key: str, value: str, expiration: Optional[datetime] = None) -> None: ...
+    async def set(self, key: str, value: str, expiration: datetime | None = None) -> None: ...
 
-    async def get(self, key: str) -> Optional[str]: ...
+    async def get(self, key: str) -> str | None: ...
 
     async def delete(self, key: str) -> None: ...
 
-    async def range(self, start_key: str, end_key: str) -> List[str]: ...
+    async def values_in_range(self, start_key: str, end_key: str) -> list[str]: ...
+
+    async def keys_in_range(self, start_key: str, end_key: str) -> list[str]: ...
diff --git a/llama_stack/providers/utils/kvstore/config.py b/llama_stack/providers/utils/kvstore/config.py
index 4f85982be..bbb0c5c0a 100644
--- a/llama_stack/providers/utils/kvstore/config.py
+++ b/llama_stack/providers/utils/kvstore/config.py
@@ -6,10 +6,9 @@
 
 import re
 from enum import Enum
-from typing import Literal, Optional, Union
+from typing import Annotated, Literal
 
 from pydantic import BaseModel, Field, field_validator
-from typing_extensions import Annotated
 
 from llama_stack.distribution.utils.config_dirs import RUNTIME_BASE_DIR
 
@@ -22,7 +21,7 @@ class KVStoreType(Enum):
 
 
 class CommonConfig(BaseModel):
-    namespace: Optional[str] = Field(
+    namespace: str | None = Field(
         default=None,
         description="All keys will be prefixed with this namespace",
     )
@@ -66,10 +65,10 @@ class SqliteKVStoreConfig(CommonConfig):
 class PostgresKVStoreConfig(CommonConfig):
     type: Literal[KVStoreType.postgres.value] = KVStoreType.postgres.value
     host: str = "localhost"
-    port: int = 5432
+    port: str = "5432"
     db: str = "llamastack"
     user: str
-    password: Optional[str] = None
+    password: str | None = None
     table_name: str = "llamastack_kvstore"
 
     @classmethod
@@ -108,7 +107,7 @@ class MongoDBKVStoreConfig(CommonConfig):
     port: int = 27017
     db: str = "llamastack"
     user: str = None
-    password: Optional[str] = None
+    password: str | None = None
     collection_name: str = "llamastack_kvstore"
 
     @classmethod
@@ -126,6 +125,6 @@ class MongoDBKVStoreConfig(CommonConfig):
 
 
 KVStoreConfig = Annotated[
-    Union[RedisKVStoreConfig, SqliteKVStoreConfig, PostgresKVStoreConfig, MongoDBKVStoreConfig],
+    RedisKVStoreConfig | SqliteKVStoreConfig | PostgresKVStoreConfig | MongoDBKVStoreConfig,
     Field(discriminator="type", default=KVStoreType.sqlite.value),
 ]
diff --git a/llama_stack/providers/utils/kvstore/kvstore.py b/llama_stack/providers/utils/kvstore/kvstore.py
index 6bc175260..3a1ee8a26 100644
--- a/llama_stack/providers/utils/kvstore/kvstore.py
+++ b/llama_stack/providers/utils/kvstore/kvstore.py
@@ -4,7 +4,6 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List, Optional
 
 from .api import KVStore
 from .config import KVStoreConfig, KVStoreType
@@ -21,15 +20,22 @@ class InmemoryKVStoreImpl(KVStore):
     async def initialize(self) -> None:
         pass
 
-    async def get(self, key: str) -> Optional[str]:
+    async def get(self, key: str) -> str | None:
         return self._store.get(key)
 
     async def set(self, key: str, value: str) -> None:
         self._store[key] = value
 
-    async def range(self, start_key: str, end_key: str) -> List[str]:
+    async def values_in_range(self, start_key: str, end_key: str) -> list[str]:
         return [self._store[key] for key in self._store.keys() if key >= start_key and key < end_key]
 
+    async def keys_in_range(self, start_key: str, end_key: str) -> list[str]:
+        """Get all keys in the given range."""
+        return [key for key in self._store.keys() if key >= start_key and key < end_key]
+
+    async def delete(self, key: str) -> None:
+        del self._store[key]
+
 
 async def kvstore_impl(config: KVStoreConfig) -> KVStore:
     if config.type == KVStoreType.redis.value:
diff --git a/llama_stack/providers/utils/kvstore/mongodb/mongodb.py b/llama_stack/providers/utils/kvstore/mongodb/mongodb.py
index c1581dc8d..3842773d9 100644
--- a/llama_stack/providers/utils/kvstore/mongodb/mongodb.py
+++ b/llama_stack/providers/utils/kvstore/mongodb/mongodb.py
@@ -6,7 +6,6 @@
 
 import logging
 from datetime import datetime
-from typing import List, Optional
 
 from pymongo import AsyncMongoClient
 
@@ -43,12 +42,12 @@ class MongoDBKVStoreImpl(KVStore):
             return key
         return f"{self.config.namespace}:{key}"
 
-    async def set(self, key: str, value: str, expiration: Optional[datetime] = None) -> None:
+    async def set(self, key: str, value: str, expiration: datetime | None = None) -> None:
         key = self._namespaced_key(key)
         update_query = {"$set": {"value": value, "expiration": expiration}}
         await self.collection.update_one({"key": key}, update_query, upsert=True)
 
-    async def get(self, key: str) -> Optional[str]:
+    async def get(self, key: str) -> str | None:
         key = self._namespaced_key(key)
         query = {"key": key}
         result = await self.collection.find_one(query, {"value": 1, "_id": 0})
@@ -58,7 +57,7 @@ class MongoDBKVStoreImpl(KVStore):
         key = self._namespaced_key(key)
         await self.collection.delete_one({"key": key})
 
-    async def range(self, start_key: str, end_key: str) -> List[str]:
+    async def values_in_range(self, start_key: str, end_key: str) -> list[str]:
         start_key = self._namespaced_key(start_key)
         end_key = self._namespaced_key(end_key)
         query = {
@@ -69,3 +68,10 @@ class MongoDBKVStoreImpl(KVStore):
         async for doc in cursor:
             result.append(doc["value"])
         return result
+
+    async def keys_in_range(self, start_key: str, end_key: str) -> list[str]:
+        start_key = self._namespaced_key(start_key)
+        end_key = self._namespaced_key(end_key)
+        query = {"key": {"$gte": start_key, "$lt": end_key}}
+        cursor = self.collection.find(query, {"key": 1, "_id": 0}).sort("key", 1)
+        return [doc["key"] for doc in cursor]
diff --git a/llama_stack/providers/utils/kvstore/postgres/postgres.py b/llama_stack/providers/utils/kvstore/postgres/postgres.py
index 097d36066..bd35decfc 100644
--- a/llama_stack/providers/utils/kvstore/postgres/postgres.py
+++ b/llama_stack/providers/utils/kvstore/postgres/postgres.py
@@ -6,7 +6,6 @@
 
 import logging
 from datetime import datetime
-from typing import List, Optional
 
 import psycopg2
 from psycopg2.extras import DictCursor
@@ -54,7 +53,7 @@ class PostgresKVStoreImpl(KVStore):
             return key
         return f"{self.config.namespace}:{key}"
 
-    async def set(self, key: str, value: str, expiration: Optional[datetime] = None) -> None:
+    async def set(self, key: str, value: str, expiration: datetime | None = None) -> None:
         key = self._namespaced_key(key)
         self.cursor.execute(
             f"""
@@ -66,7 +65,7 @@ class PostgresKVStoreImpl(KVStore):
             (key, value, expiration),
         )
 
-    async def get(self, key: str) -> Optional[str]:
+    async def get(self, key: str) -> str | None:
         key = self._namespaced_key(key)
         self.cursor.execute(
             f"""
@@ -86,7 +85,7 @@ class PostgresKVStoreImpl(KVStore):
             (key,),
         )
 
-    async def range(self, start_key: str, end_key: str) -> List[str]:
+    async def values_in_range(self, start_key: str, end_key: str) -> list[str]:
         start_key = self._namespaced_key(start_key)
         end_key = self._namespaced_key(end_key)
 
@@ -100,3 +99,13 @@ class PostgresKVStoreImpl(KVStore):
             (start_key, end_key),
         )
         return [row[0] for row in self.cursor.fetchall()]
+
+    async def keys_in_range(self, start_key: str, end_key: str) -> list[str]:
+        start_key = self._namespaced_key(start_key)
+        end_key = self._namespaced_key(end_key)
+
+        self.cursor.execute(
+            f"SELECT key FROM {self.config.table_name} WHERE key >= %s AND key < %s",
+            (start_key, end_key),
+        )
+        return [row[0] for row in self.cursor.fetchall()]
diff --git a/llama_stack/providers/utils/kvstore/redis/redis.py b/llama_stack/providers/utils/kvstore/redis/redis.py
index a390ea866..3d2d956c3 100644
--- a/llama_stack/providers/utils/kvstore/redis/redis.py
+++ b/llama_stack/providers/utils/kvstore/redis/redis.py
@@ -5,7 +5,6 @@
 # the root directory of this source tree.
 
 from datetime import datetime
-from typing import List, Optional
 
 from redis.asyncio import Redis
 
@@ -25,13 +24,13 @@ class RedisKVStoreImpl(KVStore):
             return key
         return f"{self.config.namespace}:{key}"
 
-    async def set(self, key: str, value: str, expiration: Optional[datetime] = None) -> None:
+    async def set(self, key: str, value: str, expiration: datetime | None = None) -> None:
         key = self._namespaced_key(key)
         await self.redis.set(key, value)
         if expiration:
             await self.redis.expireat(key, expiration)
 
-    async def get(self, key: str) -> Optional[str]:
+    async def get(self, key: str) -> str | None:
         key = self._namespaced_key(key)
         value = await self.redis.get(key)
         if value is None:
@@ -43,7 +42,7 @@ class RedisKVStoreImpl(KVStore):
         key = self._namespaced_key(key)
         await self.redis.delete(key)
 
-    async def range(self, start_key: str, end_key: str) -> List[str]:
+    async def values_in_range(self, start_key: str, end_key: str) -> list[str]:
         start_key = self._namespaced_key(start_key)
         end_key = self._namespaced_key(end_key)
         cursor = 0
@@ -68,3 +67,10 @@ class RedisKVStoreImpl(KVStore):
             ]
 
         return []
+
+    async def keys_in_range(self, start_key: str, end_key: str) -> list[str]:
+        """Get all keys in the given range."""
+        matching_keys = await self.redis.zrangebylex(self.namespace, f"[{start_key}", f"[{end_key}")
+        if not matching_keys:
+            return []
+        return [k.decode("utf-8") for k in matching_keys]
diff --git a/llama_stack/providers/utils/kvstore/sqlite/sqlite.py b/llama_stack/providers/utils/kvstore/sqlite/sqlite.py
index bc0488aac..4e49e4d8c 100644
--- a/llama_stack/providers/utils/kvstore/sqlite/sqlite.py
+++ b/llama_stack/providers/utils/kvstore/sqlite/sqlite.py
@@ -6,7 +6,6 @@
 
 import os
 from datetime import datetime
-from typing import List, Optional
 
 import aiosqlite
 
@@ -33,7 +32,7 @@ class SqliteKVStoreImpl(KVStore):
             )
             await db.commit()
 
-    async def set(self, key: str, value: str, expiration: Optional[datetime] = None) -> None:
+    async def set(self, key: str, value: str, expiration: datetime | None = None) -> None:
         async with aiosqlite.connect(self.db_path) as db:
             await db.execute(
                 f"INSERT OR REPLACE INTO {self.table_name} (key, value, expiration) VALUES (?, ?, ?)",
@@ -41,7 +40,7 @@ class SqliteKVStoreImpl(KVStore):
             )
             await db.commit()
 
-    async def get(self, key: str) -> Optional[str]:
+    async def get(self, key: str) -> str | None:
         async with aiosqlite.connect(self.db_path) as db:
             async with db.execute(f"SELECT value, expiration FROM {self.table_name} WHERE key = ?", (key,)) as cursor:
                 row = await cursor.fetchone()
@@ -55,7 +54,7 @@ class SqliteKVStoreImpl(KVStore):
             await db.execute(f"DELETE FROM {self.table_name} WHERE key = ?", (key,))
             await db.commit()
 
-    async def range(self, start_key: str, end_key: str) -> List[str]:
+    async def values_in_range(self, start_key: str, end_key: str) -> list[str]:
         async with aiosqlite.connect(self.db_path) as db:
             async with db.execute(
                 f"SELECT key, value, expiration FROM {self.table_name} WHERE key >= ? AND key <= ?",
@@ -66,3 +65,13 @@ class SqliteKVStoreImpl(KVStore):
                     _, value, _ = row
                     result.append(value)
                 return result
+
+    async def keys_in_range(self, start_key: str, end_key: str) -> list[str]:
+        """Get all keys in the given range."""
+        async with aiosqlite.connect(self.db_path) as db:
+            cursor = await db.execute(
+                f"SELECT key FROM {self.table_name} WHERE key >= ? AND key <= ?",
+                (start_key, end_key),
+            )
+            rows = await cursor.fetchall()
+            return [row[0] for row in rows]
diff --git a/llama_stack/providers/utils/memory/vector_store.py b/llama_stack/providers/utils/memory/vector_store.py
index ba4403ea1..4cd15860b 100644
--- a/llama_stack/providers/utils/memory/vector_store.py
+++ b/llama_stack/providers/utils/memory/vector_store.py
@@ -9,7 +9,7 @@ import logging
 import re
 from abc import ABC, abstractmethod
 from dataclasses import dataclass
-from typing import Any, Dict, List, Optional
+from typing import Any
 from urllib.parse import unquote
 
 import httpx
@@ -94,7 +94,7 @@ def content_from_data(data_url: str) -> str:
         return ""
 
 
-def concat_interleaved_content(content: List[InterleavedContent]) -> InterleavedContent:
+def concat_interleaved_content(content: list[InterleavedContent]) -> InterleavedContent:
     """concatenate interleaved content into a single list. ensure that 'str's are converted to TextContentItem when in a list"""
 
     ret = []
@@ -118,58 +118,86 @@ async def content_from_doc(doc: RAGDocument) -> str:
     if isinstance(doc.content, URL):
         if doc.content.uri.startswith("data:"):
             return content_from_data(doc.content.uri)
-        else:
-            async with httpx.AsyncClient() as client:
-                r = await client.get(doc.content.uri)
-            if doc.mime_type == "application/pdf":
-                return parse_pdf(r.content)
-            else:
-                return r.text
-
-    pattern = re.compile("^(https?://|file://|data:)")
-    if pattern.match(doc.content):
-        if doc.content.startswith("data:"):
-            return content_from_data(doc.content)
-        else:
+        async with httpx.AsyncClient() as client:
+            r = await client.get(doc.content.uri)
+        if doc.mime_type == "application/pdf":
+            return parse_pdf(r.content)
+        return r.text
+    elif isinstance(doc.content, str):
+        pattern = re.compile("^(https?://|file://|data:)")
+        if pattern.match(doc.content):
+            if doc.content.startswith("data:"):
+                return content_from_data(doc.content)
             async with httpx.AsyncClient() as client:
                 r = await client.get(doc.content)
             if doc.mime_type == "application/pdf":
                 return parse_pdf(r.content)
-            else:
-                return r.text
-
-    return interleaved_content_as_str(doc.content)
+            return r.text
+        return doc.content
+    else:
+        # will raise ValueError if the content is not List[InterleavedContent] or InterleavedContent
+        return interleaved_content_as_str(doc.content)
 
 
-def make_overlapped_chunks(document_id: str, text: str, window_len: int, overlap_len: int) -> List[Chunk]:
+def make_overlapped_chunks(
+    document_id: str, text: str, window_len: int, overlap_len: int, metadata: dict[str, Any]
+) -> list[Chunk]:
     tokenizer = Tokenizer.get_instance()
     tokens = tokenizer.encode(text, bos=False, eos=False)
+    try:
+        metadata_string = str(metadata)
+    except Exception as e:
+        raise ValueError("Failed to serialize metadata to string") from e
+
+    metadata_tokens = tokenizer.encode(metadata_string, bos=False, eos=False)
 
     chunks = []
     for i in range(0, len(tokens), window_len - overlap_len):
         toks = tokens[i : i + window_len]
         chunk = tokenizer.decode(toks)
+        chunk_metadata = metadata.copy()
+        chunk_metadata["document_id"] = document_id
+        chunk_metadata["token_count"] = len(toks)
+        chunk_metadata["metadata_token_count"] = len(metadata_tokens)
+
         # chunk is a string
         chunks.append(
             Chunk(
                 content=chunk,
-                metadata={
-                    "token_count": len(toks),
-                    "document_id": document_id,
-                },
+                metadata=chunk_metadata,
             )
         )
 
     return chunks
 
 
+def _validate_embedding(embedding: NDArray, index: int, expected_dimension: int):
+    """Helper method to validate embedding format and dimensions"""
+    if not isinstance(embedding, (list | np.ndarray)):
+        raise ValueError(f"Embedding at index {index} must be a list or numpy array, got {type(embedding)}")
+
+    if isinstance(embedding, np.ndarray):
+        if not np.issubdtype(embedding.dtype, np.number):
+            raise ValueError(f"Embedding at index {index} contains non-numeric values")
+    else:
+        if not all(isinstance(e, (float | int | np.number)) for e in embedding):
+            raise ValueError(f"Embedding at index {index} contains non-numeric values")
+
+    if len(embedding) != expected_dimension:
+        raise ValueError(f"Embedding at index {index} has dimension {len(embedding)}, expected {expected_dimension}")
+
+
 class EmbeddingIndex(ABC):
     @abstractmethod
-    async def add_chunks(self, chunks: List[Chunk], embeddings: NDArray):
+    async def add_chunks(self, chunks: list[Chunk], embeddings: NDArray):
         raise NotImplementedError()
 
     @abstractmethod
-    async def query(self, embedding: NDArray, k: int, score_threshold: float) -> QueryChunksResponse:
+    async def query_vector(self, embedding: NDArray, k: int, score_threshold: float) -> QueryChunksResponse:
+        raise NotImplementedError()
+
+    @abstractmethod
+    async def query_keyword(self, query_string: str, k: int, score_threshold: float) -> QueryChunksResponse:
         raise NotImplementedError()
 
     @abstractmethod
@@ -185,26 +213,40 @@ class VectorDBWithIndex:
 
     async def insert_chunks(
         self,
-        chunks: List[Chunk],
+        chunks: list[Chunk],
     ) -> None:
-        embeddings_response = await self.inference_api.embeddings(
-            self.vector_db.embedding_model, [x.content for x in chunks]
-        )
-        embeddings = np.array(embeddings_response.embeddings)
+        chunks_to_embed = []
+        for i, c in enumerate(chunks):
+            if c.embedding is None:
+                chunks_to_embed.append(c)
+            else:
+                _validate_embedding(c.embedding, i, self.vector_db.embedding_dimension)
 
+        if chunks_to_embed:
+            resp = await self.inference_api.embeddings(
+                self.vector_db.embedding_model,
+                [c.content for c in chunks_to_embed],
+            )
+            for c, embedding in zip(chunks_to_embed, resp.embeddings, strict=False):
+                c.embedding = embedding
+
+        embeddings = np.array([c.embedding for c in chunks], dtype=np.float32)
         await self.index.add_chunks(chunks, embeddings)
 
     async def query_chunks(
         self,
         query: InterleavedContent,
-        params: Optional[Dict[str, Any]] = None,
+        params: dict[str, Any] | None = None,
     ) -> QueryChunksResponse:
         if params is None:
             params = {}
         k = params.get("max_chunks", 3)
+        mode = params.get("mode")
         score_threshold = params.get("score_threshold", 0.0)
-
-        query_str = interleaved_content_as_str(query)
-        embeddings_response = await self.inference_api.embeddings(self.vector_db.embedding_model, [query_str])
-        query_vector = np.array(embeddings_response.embeddings[0], dtype=np.float32)
-        return await self.index.query(query_vector, k, score_threshold)
+        query_string = interleaved_content_as_str(query)
+        if mode == "keyword":
+            return await self.index.query_keyword(query_string, k, score_threshold)
+        else:
+            embeddings_response = await self.inference_api.embeddings(self.vector_db.embedding_model, [query_string])
+            query_vector = np.array(embeddings_response.embeddings[0], dtype=np.float32)
+            return await self.index.query_vector(query_vector, k, score_threshold)
diff --git a/llama_stack/providers/utils/datasetio/pagination.py b/llama_stack/providers/utils/pagination.py
similarity index 95%
rename from llama_stack/providers/utils/datasetio/pagination.py
rename to llama_stack/providers/utils/pagination.py
index 1b693f8f5..033022491 100644
--- a/llama_stack/providers/utils/datasetio/pagination.py
+++ b/llama_stack/providers/utils/pagination.py
@@ -4,13 +4,13 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Any, Dict, List
+from typing import Any
 
 from llama_stack.apis.common.responses import PaginatedResponse
 
 
 def paginate_records(
-    records: List[Dict[str, Any]],
+    records: list[dict[str, Any]],
     start_index: int | None = None,
     limit: int | None = None,
 ) -> PaginatedResponse:
diff --git a/llama_stack/providers/utils/responses/responses_store.py b/llama_stack/providers/utils/responses/responses_store.py
new file mode 100644
index 000000000..15354e3e2
--- /dev/null
+++ b/llama_stack/providers/utils/responses/responses_store.py
@@ -0,0 +1,135 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+from llama_stack.apis.agents import (
+    Order,
+)
+from llama_stack.apis.agents.openai_responses import (
+    ListOpenAIResponseInputItem,
+    ListOpenAIResponseObject,
+    OpenAIResponseInput,
+    OpenAIResponseObject,
+    OpenAIResponseObjectWithInput,
+)
+from llama_stack.distribution.utils.config_dirs import RUNTIME_BASE_DIR
+
+from ..sqlstore.api import ColumnDefinition, ColumnType
+from ..sqlstore.sqlstore import SqliteSqlStoreConfig, SqlStoreConfig, sqlstore_impl
+
+
+class ResponsesStore:
+    def __init__(self, sql_store_config: SqlStoreConfig):
+        if not sql_store_config:
+            sql_store_config = SqliteSqlStoreConfig(
+                db_path=(RUNTIME_BASE_DIR / "sqlstore.db").as_posix(),
+            )
+        self.sql_store = sqlstore_impl(sql_store_config)
+
+    async def initialize(self):
+        """Create the necessary tables if they don't exist."""
+        await self.sql_store.create_table(
+            "openai_responses",
+            {
+                "id": ColumnDefinition(type=ColumnType.STRING, primary_key=True),
+                "created_at": ColumnType.INTEGER,
+                "response_object": ColumnType.JSON,
+                "model": ColumnType.STRING,
+            },
+        )
+
+    async def store_response_object(
+        self, response_object: OpenAIResponseObject, input: list[OpenAIResponseInput]
+    ) -> None:
+        data = response_object.model_dump()
+        data["input"] = [input_item.model_dump() for input_item in input]
+
+        await self.sql_store.insert(
+            "openai_responses",
+            {
+                "id": data["id"],
+                "created_at": data["created_at"],
+                "model": data["model"],
+                "response_object": data,
+            },
+        )
+
+    async def list_responses(
+        self,
+        after: str | None = None,
+        limit: int | None = 50,
+        model: str | None = None,
+        order: Order | None = Order.desc,
+    ) -> ListOpenAIResponseObject:
+        """
+        List responses from the database.
+
+        :param after: The ID of the last response to return.
+        :param limit: The maximum number of responses to return.
+        :param model: The model to filter by.
+        :param order: The order to sort the responses by.
+        """
+        # TODO: support after
+        if after:
+            raise NotImplementedError("After is not supported for SQLite")
+        if not order:
+            order = Order.desc
+
+        rows = await self.sql_store.fetch_all(
+            "openai_responses",
+            where={"model": model} if model else None,
+            order_by=[("created_at", order.value)],
+            limit=limit,
+        )
+
+        data = [OpenAIResponseObjectWithInput(**row["response_object"]) for row in rows]
+        return ListOpenAIResponseObject(
+            data=data,
+            # TODO: implement has_more
+            has_more=False,
+            first_id=data[0].id if data else "",
+            last_id=data[-1].id if data else "",
+        )
+
+    async def get_response_object(self, response_id: str) -> OpenAIResponseObjectWithInput:
+        row = await self.sql_store.fetch_one("openai_responses", where={"id": response_id})
+        if not row:
+            raise ValueError(f"Response with id {response_id} not found") from None
+        return OpenAIResponseObjectWithInput(**row["response_object"])
+
+    async def list_response_input_items(
+        self,
+        response_id: str,
+        after: str | None = None,
+        before: str | None = None,
+        include: list[str] | None = None,
+        limit: int | None = 20,
+        order: Order | None = Order.desc,
+    ) -> ListOpenAIResponseInputItem:
+        """
+        List input items for a given response.
+
+        :param response_id: The ID of the response to retrieve input items for.
+        :param after: An item ID to list items after, used for pagination.
+        :param before: An item ID to list items before, used for pagination.
+        :param include: Additional fields to include in the response.
+        :param limit: A limit on the number of objects to be returned.
+        :param order: The order to return the input items in.
+        """
+        # TODO: support after/before pagination
+        if after or before:
+            raise NotImplementedError("After/before pagination is not supported yet")
+        if include:
+            raise NotImplementedError("Include is not supported yet")
+
+        response_with_input = await self.get_response_object(response_id)
+        input_items = response_with_input.input
+
+        if order == Order.desc:
+            input_items = list(reversed(input_items))
+
+        if limit is not None and len(input_items) > limit:
+            input_items = input_items[:limit]
+
+        return ListOpenAIResponseInputItem(data=input_items)
diff --git a/llama_stack/providers/utils/scheduler.py b/llama_stack/providers/utils/scheduler.py
index d4cffe605..845ab1f02 100644
--- a/llama_stack/providers/utils/scheduler.py
+++ b/llama_stack/providers/utils/scheduler.py
@@ -8,9 +8,10 @@ import abc
 import asyncio
 import functools
 import threading
+from collections.abc import Callable, Coroutine, Iterable
 from datetime import datetime, timezone
 from enum import Enum
-from typing import Any, Callable, Coroutine, Dict, Iterable, Tuple, TypeAlias
+from typing import Any, TypeAlias
 
 from pydantic import BaseModel
 
@@ -38,7 +39,7 @@ class JobArtifact(BaseModel):
     name: str
     # TODO: uri should be a reference to /files API; revisit when /files is implemented
     uri: str | None = None
-    metadata: Dict[str, Any]
+    metadata: dict[str, Any]
 
 
 JobHandler = Callable[
@@ -46,7 +47,7 @@ JobHandler = Callable[
 ]
 
 
-LogMessage: TypeAlias = Tuple[datetime, str]
+LogMessage: TypeAlias = tuple[datetime, str]
 
 
 _COMPLETED_STATUSES = {JobStatus.completed, JobStatus.failed}
@@ -60,7 +61,7 @@ class Job:
         self._handler = handler
         self._artifacts: list[JobArtifact] = []
         self._logs: list[LogMessage] = []
-        self._state_transitions: list[Tuple[datetime, JobStatus]] = [(datetime.now(timezone.utc), JobStatus.new)]
+        self._state_transitions: list[tuple[datetime, JobStatus]] = [(datetime.now(timezone.utc), JobStatus.new)]
 
     @property
     def handler(self) -> JobHandler:
diff --git a/llama_stack/providers/utils/scoring/aggregation_utils.py b/llama_stack/providers/utils/scoring/aggregation_utils.py
index 7254c9433..cff9a112f 100644
--- a/llama_stack/providers/utils/scoring/aggregation_utils.py
+++ b/llama_stack/providers/utils/scoring/aggregation_utils.py
@@ -4,13 +4,13 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 import statistics
-from typing import Any, Dict, List
+from typing import Any
 
 from llama_stack.apis.scoring import ScoringResultRow
 from llama_stack.apis.scoring_functions import AggregationFunctionType
 
 
-def aggregate_accuracy(scoring_results: List[ScoringResultRow]) -> Dict[str, Any]:
+def aggregate_accuracy(scoring_results: list[ScoringResultRow]) -> dict[str, Any]:
     num_correct = sum(result["score"] for result in scoring_results)
     avg_score = num_correct / len(scoring_results)
 
@@ -21,14 +21,14 @@ def aggregate_accuracy(scoring_results: List[ScoringResultRow]) -> Dict[str, Any
     }
 
 
-def aggregate_average(scoring_results: List[ScoringResultRow]) -> Dict[str, Any]:
+def aggregate_average(scoring_results: list[ScoringResultRow]) -> dict[str, Any]:
     return {
         "average": sum(result["score"] for result in scoring_results if result["score"] is not None)
         / len([_ for _ in scoring_results if _["score"] is not None]),
     }
 
 
-def aggregate_weighted_average(scoring_results: List[ScoringResultRow]) -> Dict[str, Any]:
+def aggregate_weighted_average(scoring_results: list[ScoringResultRow]) -> dict[str, Any]:
     return {
         "weighted_average": sum(
             result["score"] * result["weight"]
@@ -40,14 +40,14 @@ def aggregate_weighted_average(scoring_results: List[ScoringResultRow]) -> Dict[
 
 
 def aggregate_categorical_count(
-    scoring_results: List[ScoringResultRow],
-) -> Dict[str, Any]:
+    scoring_results: list[ScoringResultRow],
+) -> dict[str, Any]:
     scores = [str(r["score"]) for r in scoring_results]
     unique_scores = sorted(set(scores))
     return {"categorical_count": {s: scores.count(s) for s in unique_scores}}
 
 
-def aggregate_median(scoring_results: List[ScoringResultRow]) -> Dict[str, Any]:
+def aggregate_median(scoring_results: list[ScoringResultRow]) -> dict[str, Any]:
     scores = [r["score"] for r in scoring_results if r["score"] is not None]
     median = statistics.median(scores) if scores else None
     return {"median": median}
@@ -64,8 +64,8 @@ AGGREGATION_FUNCTIONS = {
 
 
 def aggregate_metrics(
-    scoring_results: List[ScoringResultRow], metrics: List[AggregationFunctionType]
-) -> Dict[str, Any]:
+    scoring_results: list[ScoringResultRow], metrics: list[AggregationFunctionType]
+) -> dict[str, Any]:
     agg_results = {}
     for metric in metrics:
         if metric not in AGGREGATION_FUNCTIONS:
diff --git a/llama_stack/providers/utils/scoring/base_scoring_fn.py b/llama_stack/providers/utils/scoring/base_scoring_fn.py
index 834deb7e1..2fae177b7 100644
--- a/llama_stack/providers/utils/scoring/base_scoring_fn.py
+++ b/llama_stack/providers/utils/scoring/base_scoring_fn.py
@@ -4,7 +4,7 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 from abc import ABC, abstractmethod
-from typing import Any, Dict, List, Optional
+from typing import Any
 
 from llama_stack.apis.scoring import ScoringFnParams, ScoringResultRow
 from llama_stack.apis.scoring_functions import ScoringFn
@@ -28,28 +28,28 @@ class BaseScoringFn(ABC):
     @abstractmethod
     async def score_row(
         self,
-        input_row: Dict[str, Any],
-        scoring_fn_identifier: Optional[str] = None,
-        scoring_params: Optional[ScoringFnParams] = None,
+        input_row: dict[str, Any],
+        scoring_fn_identifier: str | None = None,
+        scoring_params: ScoringFnParams | None = None,
     ) -> ScoringResultRow:
         raise NotImplementedError()
 
     @abstractmethod
     async def aggregate(
         self,
-        scoring_results: List[ScoringResultRow],
-        scoring_fn_identifier: Optional[str] = None,
-        scoring_params: Optional[ScoringFnParams] = None,
-    ) -> Dict[str, Any]:
+        scoring_results: list[ScoringResultRow],
+        scoring_fn_identifier: str | None = None,
+        scoring_params: ScoringFnParams | None = None,
+    ) -> dict[str, Any]:
         raise NotImplementedError()
 
     @abstractmethod
     async def score(
         self,
-        input_rows: List[Dict[str, Any]],
-        scoring_fn_identifier: Optional[str] = None,
-        scoring_params: Optional[ScoringFnParams] = None,
-    ) -> List[ScoringResultRow]:
+        input_rows: list[dict[str, Any]],
+        scoring_fn_identifier: str | None = None,
+        scoring_params: ScoringFnParams | None = None,
+    ) -> list[ScoringResultRow]:
         raise NotImplementedError()
 
 
@@ -65,7 +65,7 @@ class RegisteredBaseScoringFn(BaseScoringFn):
     def __str__(self) -> str:
         return self.__class__.__name__
 
-    def get_supported_scoring_fn_defs(self) -> List[ScoringFn]:
+    def get_supported_scoring_fn_defs(self) -> list[ScoringFn]:
         return list(self.supported_fn_defs_registry.values())
 
     def register_scoring_fn_def(self, scoring_fn: ScoringFn) -> None:
@@ -81,18 +81,18 @@ class RegisteredBaseScoringFn(BaseScoringFn):
     @abstractmethod
     async def score_row(
         self,
-        input_row: Dict[str, Any],
-        scoring_fn_identifier: Optional[str] = None,
-        scoring_params: Optional[ScoringFnParams] = None,
+        input_row: dict[str, Any],
+        scoring_fn_identifier: str | None = None,
+        scoring_params: ScoringFnParams | None = None,
     ) -> ScoringResultRow:
         raise NotImplementedError()
 
     async def aggregate(
         self,
-        scoring_results: List[ScoringResultRow],
-        scoring_fn_identifier: Optional[str] = None,
-        scoring_params: Optional[ScoringFnParams] = None,
-    ) -> Dict[str, Any]:
+        scoring_results: list[ScoringResultRow],
+        scoring_fn_identifier: str | None = None,
+        scoring_params: ScoringFnParams | None = None,
+    ) -> dict[str, Any]:
         params = self.supported_fn_defs_registry[scoring_fn_identifier].params
         if scoring_params is not None:
             if params is None:
@@ -107,8 +107,8 @@ class RegisteredBaseScoringFn(BaseScoringFn):
 
     async def score(
         self,
-        input_rows: List[Dict[str, Any]],
-        scoring_fn_identifier: Optional[str] = None,
-        scoring_params: Optional[ScoringFnParams] = None,
-    ) -> List[ScoringResultRow]:
+        input_rows: list[dict[str, Any]],
+        scoring_fn_identifier: str | None = None,
+        scoring_params: ScoringFnParams | None = None,
+    ) -> list[ScoringResultRow]:
         return [await self.score_row(input_row, scoring_fn_identifier, scoring_params) for input_row in input_rows]
diff --git a/llama_stack/providers/utils/scoring/basic_scoring_utils.py b/llama_stack/providers/utils/scoring/basic_scoring_utils.py
index 91abfdb2e..7372a521c 100644
--- a/llama_stack/providers/utils/scoring/basic_scoring_utils.py
+++ b/llama_stack/providers/utils/scoring/basic_scoring_utils.py
@@ -5,8 +5,8 @@
 # the root directory of this source tree.
 import contextlib
 import signal
+from collections.abc import Iterator
 from types import FrameType
-from typing import Iterator, Optional
 
 
 class TimeoutError(Exception):
@@ -15,7 +15,7 @@ class TimeoutError(Exception):
 
 @contextlib.contextmanager
 def time_limit(seconds: float) -> Iterator[None]:
-    def signal_handler(signum: int, frame: Optional[FrameType]) -> None:
+    def signal_handler(signum: int, frame: FrameType | None) -> None:
         raise TimeoutError("Timed out!")
 
     signal.setitimer(signal.ITIMER_REAL, seconds)
diff --git a/llama_stack/providers/utils/sqlstore/api.py b/llama_stack/providers/utils/sqlstore/api.py
new file mode 100644
index 000000000..ace40e4c4
--- /dev/null
+++ b/llama_stack/providers/utils/sqlstore/api.py
@@ -0,0 +1,90 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from collections.abc import Mapping
+from enum import Enum
+from typing import Any, Literal, Protocol
+
+from pydantic import BaseModel
+
+
+class ColumnType(Enum):
+    INTEGER = "INTEGER"
+    STRING = "STRING"
+    TEXT = "TEXT"
+    FLOAT = "FLOAT"
+    BOOLEAN = "BOOLEAN"
+    JSON = "JSON"
+    DATETIME = "DATETIME"
+
+
+class ColumnDefinition(BaseModel):
+    type: ColumnType
+    primary_key: bool = False
+    nullable: bool = True
+    default: Any = None
+
+
+class SqlStore(Protocol):
+    """
+    A protocol for a SQL store.
+    """
+
+    async def create_table(self, table: str, schema: Mapping[str, ColumnType | ColumnDefinition]) -> None:
+        """
+        Create a table.
+        """
+        pass
+
+    async def insert(self, table: str, data: Mapping[str, Any]) -> None:
+        """
+        Insert a row into a table.
+        """
+        pass
+
+    async def fetch_all(
+        self,
+        table: str,
+        where: Mapping[str, Any] | None = None,
+        limit: int | None = None,
+        order_by: list[tuple[str, Literal["asc", "desc"]]] | None = None,
+    ) -> list[dict[str, Any]]:
+        """
+        Fetch all rows from a table.
+        """
+        pass
+
+    async def fetch_one(
+        self,
+        table: str,
+        where: Mapping[str, Any] | None = None,
+        order_by: list[tuple[str, Literal["asc", "desc"]]] | None = None,
+    ) -> dict[str, Any] | None:
+        """
+        Fetch one row from a table.
+        """
+        pass
+
+    async def update(
+        self,
+        table: str,
+        data: Mapping[str, Any],
+        where: Mapping[str, Any],
+    ) -> None:
+        """
+        Update a row in a table.
+        """
+        pass
+
+    async def delete(
+        self,
+        table: str,
+        where: Mapping[str, Any],
+    ) -> None:
+        """
+        Delete a row from a table.
+        """
+        pass
diff --git a/llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py b/llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py
new file mode 100644
index 000000000..825220679
--- /dev/null
+++ b/llama_stack/providers/utils/sqlstore/sqlalchemy_sqlstore.py
@@ -0,0 +1,163 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+from collections.abc import Mapping
+from typing import Any, Literal
+
+from sqlalchemy import (
+    JSON,
+    Boolean,
+    Column,
+    DateTime,
+    Float,
+    Integer,
+    MetaData,
+    String,
+    Table,
+    Text,
+    select,
+)
+from sqlalchemy.ext.asyncio import async_sessionmaker, create_async_engine
+
+from .api import ColumnDefinition, ColumnType, SqlStore
+from .sqlstore import SqlAlchemySqlStoreConfig
+
+TYPE_MAPPING: dict[ColumnType, Any] = {
+    ColumnType.INTEGER: Integer,
+    ColumnType.STRING: String,
+    ColumnType.FLOAT: Float,
+    ColumnType.BOOLEAN: Boolean,
+    ColumnType.DATETIME: DateTime,
+    ColumnType.TEXT: Text,
+    ColumnType.JSON: JSON,
+}
+
+
+class SqlAlchemySqlStoreImpl(SqlStore):
+    def __init__(self, config: SqlAlchemySqlStoreConfig):
+        self.config = config
+        self.async_session = async_sessionmaker(create_async_engine(config.engine_str))
+        self.metadata = MetaData()
+
+    async def create_table(
+        self,
+        table: str,
+        schema: Mapping[str, ColumnType | ColumnDefinition],
+    ) -> None:
+        if not schema:
+            raise ValueError(f"No columns defined for table '{table}'.")
+
+        sqlalchemy_columns: list[Column] = []
+
+        for col_name, col_props in schema.items():
+            col_type = None
+            is_primary_key = False
+            is_nullable = True  # Default to nullable
+
+            if isinstance(col_props, ColumnType):
+                col_type = col_props
+            elif isinstance(col_props, ColumnDefinition):
+                col_type = col_props.type
+                is_primary_key = col_props.primary_key
+                is_nullable = col_props.nullable
+
+            sqlalchemy_type = TYPE_MAPPING.get(col_type)
+            if not sqlalchemy_type:
+                raise ValueError(f"Unsupported column type '{col_type}' for column '{col_name}'.")
+
+            sqlalchemy_columns.append(
+                Column(col_name, sqlalchemy_type, primary_key=is_primary_key, nullable=is_nullable)
+            )
+
+        # Check if table already exists in metadata, otherwise define it
+        if table not in self.metadata.tables:
+            sqlalchemy_table = Table(table, self.metadata, *sqlalchemy_columns)
+        else:
+            sqlalchemy_table = self.metadata.tables[table]
+
+        # Create the table in the database if it doesn't exist
+        # checkfirst=True ensures it doesn't try to recreate if it's already there
+        engine = create_async_engine(self.config.engine_str)
+        async with engine.begin() as conn:
+            await conn.run_sync(self.metadata.create_all, tables=[sqlalchemy_table], checkfirst=True)
+
+    async def insert(self, table: str, data: Mapping[str, Any]) -> None:
+        async with self.async_session() as session:
+            await session.execute(self.metadata.tables[table].insert(), data)
+            await session.commit()
+
+    async def fetch_all(
+        self,
+        table: str,
+        where: Mapping[str, Any] | None = None,
+        limit: int | None = None,
+        order_by: list[tuple[str, Literal["asc", "desc"]]] | None = None,
+    ) -> list[dict[str, Any]]:
+        async with self.async_session() as session:
+            query = select(self.metadata.tables[table])
+            if where:
+                for key, value in where.items():
+                    query = query.where(self.metadata.tables[table].c[key] == value)
+            if limit:
+                query = query.limit(limit)
+            if order_by:
+                if not isinstance(order_by, list):
+                    raise ValueError(
+                        f"order_by must be a list of tuples (column, order={['asc', 'desc']}), got {order_by}"
+                    )
+                for order in order_by:
+                    if not isinstance(order, tuple):
+                        raise ValueError(
+                            f"order_by must be a list of tuples (column, order={['asc', 'desc']}), got {order_by}"
+                        )
+                    name, order_type = order
+                    if order_type == "asc":
+                        query = query.order_by(self.metadata.tables[table].c[name].asc())
+                    elif order_type == "desc":
+                        query = query.order_by(self.metadata.tables[table].c[name].desc())
+                    else:
+                        raise ValueError(f"Invalid order '{order_type}' for column '{name}'")
+            result = await session.execute(query)
+            if result.rowcount == 0:
+                return []
+            return [dict(row._mapping) for row in result]
+
+    async def fetch_one(
+        self,
+        table: str,
+        where: Mapping[str, Any] | None = None,
+        order_by: list[tuple[str, Literal["asc", "desc"]]] | None = None,
+    ) -> dict[str, Any] | None:
+        rows = await self.fetch_all(table, where, limit=1, order_by=order_by)
+        if not rows:
+            return None
+        return rows[0]
+
+    async def update(
+        self,
+        table: str,
+        data: Mapping[str, Any],
+        where: Mapping[str, Any],
+    ) -> None:
+        if not where:
+            raise ValueError("where is required for update")
+
+        async with self.async_session() as session:
+            stmt = self.metadata.tables[table].update()
+            for key, value in where.items():
+                stmt = stmt.where(self.metadata.tables[table].c[key] == value)
+            await session.execute(stmt, data)
+            await session.commit()
+
+    async def delete(self, table: str, where: Mapping[str, Any]) -> None:
+        if not where:
+            raise ValueError("where is required for delete")
+
+        async with self.async_session() as session:
+            stmt = self.metadata.tables[table].delete()
+            for key, value in where.items():
+                stmt = stmt.where(self.metadata.tables[table].c[key] == value)
+            await session.execute(stmt)
+            await session.commit()
diff --git a/llama_stack/providers/utils/sqlstore/sqlstore.py b/llama_stack/providers/utils/sqlstore/sqlstore.py
new file mode 100644
index 000000000..3091e8f96
--- /dev/null
+++ b/llama_stack/providers/utils/sqlstore/sqlstore.py
@@ -0,0 +1,90 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+
+from abc import abstractmethod
+from enum import Enum
+from pathlib import Path
+from typing import Annotated, Literal
+
+from pydantic import BaseModel, Field
+
+from llama_stack.distribution.utils.config_dirs import RUNTIME_BASE_DIR
+
+from .api import SqlStore
+
+
+class SqlStoreType(Enum):
+    sqlite = "sqlite"
+    postgres = "postgres"
+
+
+class SqlAlchemySqlStoreConfig(BaseModel):
+    @property
+    @abstractmethod
+    def engine_str(self) -> str: ...
+
+    # TODO: move this when we have a better way to specify dependencies with internal APIs
+    @property
+    def pip_packages(self) -> list[str]:
+        return ["sqlalchemy[asyncio]"]
+
+
+class SqliteSqlStoreConfig(SqlAlchemySqlStoreConfig):
+    type: Literal["sqlite"] = SqlStoreType.sqlite.value
+    db_path: str = Field(
+        default=(RUNTIME_BASE_DIR / "sqlstore.db").as_posix(),
+        description="Database path, e.g. ~/.llama/distributions/ollama/sqlstore.db",
+    )
+
+    @property
+    def engine_str(self) -> str:
+        return "sqlite+aiosqlite:///" + Path(self.db_path).expanduser().as_posix()
+
+    @classmethod
+    def sample_run_config(cls, __distro_dir__: str, db_name: str = "sqlstore.db"):
+        return cls(
+            type="sqlite",
+            db_path="${env.SQLITE_STORE_DIR:" + __distro_dir__ + "}/" + db_name,
+        )
+
+    @property
+    def pip_packages(self) -> list[str]:
+        return super().pip_packages + ["aiosqlite"]
+
+
+class PostgresSqlStoreConfig(SqlAlchemySqlStoreConfig):
+    type: Literal["postgres"] = SqlStoreType.postgres.value
+    host: str = "localhost"
+    port: str = "5432"
+    db: str = "llamastack"
+    user: str
+    password: str | None = None
+
+    @property
+    def engine_str(self) -> str:
+        return f"postgresql+asyncpg://{self.user}:{self.password}@{self.host}:{self.port}/{self.db}"
+
+    @property
+    def pip_packages(self) -> list[str]:
+        return super().pip_packages + ["asyncpg"]
+
+
+SqlStoreConfig = Annotated[
+    SqliteSqlStoreConfig | PostgresSqlStoreConfig,
+    Field(discriminator="type", default=SqlStoreType.sqlite.value),
+]
+
+
+def sqlstore_impl(config: SqlStoreConfig) -> SqlStore:
+    if config.type in [SqlStoreType.sqlite.value, SqlStoreType.postgres.value]:
+        from .sqlalchemy_sqlstore import SqlAlchemySqlStoreImpl
+
+        impl = SqlAlchemySqlStoreImpl(config)
+    else:
+        raise ValueError(f"Unknown sqlstore type {config.type}")
+
+    return impl
diff --git a/llama_stack/providers/utils/telemetry/dataset_mixin.py b/llama_stack/providers/utils/telemetry/dataset_mixin.py
index 34c612133..fe729a244 100644
--- a/llama_stack/providers/utils/telemetry/dataset_mixin.py
+++ b/llama_stack/providers/utils/telemetry/dataset_mixin.py
@@ -4,7 +4,6 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List, Optional
 
 from llama_stack.apis.datasetio import DatasetIO
 from llama_stack.apis.telemetry import QueryCondition, QuerySpansResponse, Span
@@ -17,10 +16,10 @@ class TelemetryDatasetMixin:
 
     async def save_spans_to_dataset(
         self,
-        attribute_filters: List[QueryCondition],
-        attributes_to_save: List[str],
+        attribute_filters: list[QueryCondition],
+        attributes_to_save: list[str],
         dataset_id: str,
-        max_depth: Optional[int] = None,
+        max_depth: int | None = None,
     ) -> None:
         if self.datasetio_api is None:
             raise RuntimeError("DatasetIO API not available")
@@ -48,9 +47,9 @@ class TelemetryDatasetMixin:
 
     async def query_spans(
         self,
-        attribute_filters: List[QueryCondition],
-        attributes_to_return: List[str],
-        max_depth: Optional[int] = None,
+        attribute_filters: list[QueryCondition],
+        attributes_to_return: list[str],
+        max_depth: int | None = None,
     ) -> QuerySpansResponse:
         traces = await self.query_traces(attribute_filters=attribute_filters)
         spans = []
diff --git a/llama_stack/providers/utils/telemetry/sqlite_trace_store.py b/llama_stack/providers/utils/telemetry/sqlite_trace_store.py
index 3248f3fa7..af1145fe7 100644
--- a/llama_stack/providers/utils/telemetry/sqlite_trace_store.py
+++ b/llama_stack/providers/utils/telemetry/sqlite_trace_store.py
@@ -6,7 +6,7 @@
 
 import json
 from datetime import datetime
-from typing import Dict, List, Optional, Protocol
+from typing import Protocol
 
 import aiosqlite
 
@@ -16,18 +16,18 @@ from llama_stack.apis.telemetry import QueryCondition, Span, SpanWithStatus, Tra
 class TraceStore(Protocol):
     async def query_traces(
         self,
-        attribute_filters: Optional[List[QueryCondition]] = None,
-        limit: Optional[int] = 100,
-        offset: Optional[int] = 0,
-        order_by: Optional[List[str]] = None,
-    ) -> List[Trace]: ...
+        attribute_filters: list[QueryCondition] | None = None,
+        limit: int | None = 100,
+        offset: int | None = 0,
+        order_by: list[str] | None = None,
+    ) -> list[Trace]: ...
 
     async def get_span_tree(
         self,
         span_id: str,
-        attributes_to_return: Optional[List[str]] = None,
-        max_depth: Optional[int] = None,
-    ) -> Dict[str, SpanWithStatus]: ...
+        attributes_to_return: list[str] | None = None,
+        max_depth: int | None = None,
+    ) -> dict[str, SpanWithStatus]: ...
 
 
 class SQLiteTraceStore(TraceStore):
@@ -36,11 +36,11 @@ class SQLiteTraceStore(TraceStore):
 
     async def query_traces(
         self,
-        attribute_filters: Optional[List[QueryCondition]] = None,
-        limit: Optional[int] = 100,
-        offset: Optional[int] = 0,
-        order_by: Optional[List[str]] = None,
-    ) -> List[Trace]:
+        attribute_filters: list[QueryCondition] | None = None,
+        limit: int | None = 100,
+        offset: int | None = 0,
+        order_by: list[str] | None = None,
+    ) -> list[Trace]:
         def build_where_clause() -> tuple[str, list]:
             if not attribute_filters:
                 return "", []
@@ -112,9 +112,9 @@ class SQLiteTraceStore(TraceStore):
     async def get_span_tree(
         self,
         span_id: str,
-        attributes_to_return: Optional[List[str]] = None,
-        max_depth: Optional[int] = None,
-    ) -> Dict[str, SpanWithStatus]:
+        attributes_to_return: list[str] | None = None,
+        max_depth: int | None = None,
+    ) -> dict[str, SpanWithStatus]:
         # Build the attributes selection
         attributes_select = "s.attributes"
         if attributes_to_return:
diff --git a/llama_stack/providers/utils/telemetry/trace_protocol.py b/llama_stack/providers/utils/telemetry/trace_protocol.py
index 525ade74d..eb6d8b331 100644
--- a/llama_stack/providers/utils/telemetry/trace_protocol.py
+++ b/llama_stack/providers/utils/telemetry/trace_protocol.py
@@ -7,8 +7,9 @@
 import asyncio
 import inspect
 import json
+from collections.abc import AsyncGenerator, Callable
 from functools import wraps
-from typing import Any, AsyncGenerator, Callable, Type, TypeVar
+from typing import Any, TypeVar
 
 from pydantic import BaseModel
 
@@ -25,13 +26,13 @@ def _prepare_for_json(value: Any) -> str:
     """Serialize a single value into JSON-compatible format."""
     if value is None:
         return ""
-    elif isinstance(value, (str, int, float, bool)):
+    elif isinstance(value, str | int | float | bool):
         return value
     elif hasattr(value, "_name_"):
         return value._name_
     elif isinstance(value, BaseModel):
         return json.loads(value.model_dump_json())
-    elif isinstance(value, (list, tuple, set)):
+    elif isinstance(value, list | tuple | set):
         return [_prepare_for_json(item) for item in value]
     elif isinstance(value, dict):
         return {str(k): _prepare_for_json(v) for k, v in value.items()}
@@ -43,7 +44,7 @@ def _prepare_for_json(value: Any) -> str:
             return str(value)
 
 
-def trace_protocol(cls: Type[T]) -> Type[T]:
+def trace_protocol(cls: type[T]) -> type[T]:
     """
     A class decorator that automatically traces all methods in a protocol/base class
     and its inheriting classes.
diff --git a/llama_stack/providers/utils/telemetry/tracing.py b/llama_stack/providers/utils/telemetry/tracing.py
index 3d5c717d6..4edfa6516 100644
--- a/llama_stack/providers/utils/telemetry/tracing.py
+++ b/llama_stack/providers/utils/telemetry/tracing.py
@@ -10,9 +10,10 @@ import logging
 import queue
 import random
 import threading
+from collections.abc import Callable
 from datetime import datetime, timezone
 from functools import wraps
-from typing import Any, Callable, Dict, List, Optional
+from typing import Any
 
 from llama_stack.apis.telemetry import (
     LogSeverity,
@@ -33,6 +34,8 @@ logger = get_logger(__name__, category="core")
 INVALID_SPAN_ID = 0x0000000000000000
 INVALID_TRACE_ID = 0x00000000000000000000000000000000
 
+ROOT_SPAN_MARKERS = ["__root__", "__root_span__"]
+
 
 def trace_id_to_str(trace_id: int) -> str:
     """Convenience trace ID formatting method
@@ -106,13 +109,13 @@ class BackgroundLogger:
 
 
 class TraceContext:
-    spans: List[Span] = []
+    spans: list[Span] = []
 
     def __init__(self, logger: BackgroundLogger, trace_id: str):
         self.logger = logger
         self.trace_id = trace_id
 
-    def push_span(self, name: str, attributes: Dict[str, Any] = None) -> Span:
+    def push_span(self, name: str, attributes: dict[str, Any] = None) -> Span:
         current_span = self.get_current_span()
         span = Span(
             span_id=generate_span_id(),
@@ -168,7 +171,7 @@ def setup_logger(api: Telemetry, level: int = logging.INFO):
     root_logger.addHandler(TelemetryHandler())
 
 
-async def start_trace(name: str, attributes: Dict[str, Any] = None) -> TraceContext:
+async def start_trace(name: str, attributes: dict[str, Any] = None) -> TraceContext:
     global CURRENT_TRACE_CONTEXT, BACKGROUND_LOGGER
 
     if BACKGROUND_LOGGER is None:
@@ -177,7 +180,8 @@ async def start_trace(name: str, attributes: Dict[str, Any] = None) -> TraceCont
 
     trace_id = generate_trace_id()
     context = TraceContext(BACKGROUND_LOGGER, trace_id)
-    context.push_span(name, {"__root__": True, **(attributes or {})})
+    attributes = {marker: True for marker in ROOT_SPAN_MARKERS} | (attributes or {})
+    context.push_span(name, attributes)
 
     CURRENT_TRACE_CONTEXT.set(context)
     return context
@@ -246,7 +250,7 @@ class TelemetryHandler(logging.Handler):
 
 
 class SpanContextManager:
-    def __init__(self, name: str, attributes: Dict[str, Any] = None):
+    def __init__(self, name: str, attributes: dict[str, Any] = None):
         self.name = name
         self.attributes = attributes
         self.span = None
@@ -316,11 +320,11 @@ class SpanContextManager:
         return wrapper
 
 
-def span(name: str, attributes: Dict[str, Any] = None):
+def span(name: str, attributes: dict[str, Any] = None):
     return SpanContextManager(name, attributes)
 
 
-def get_current_span() -> Optional[Span]:
+def get_current_span() -> Span | None:
     global CURRENT_TRACE_CONTEXT
     if CURRENT_TRACE_CONTEXT is None:
         logger.debug("No trace context to get current span")
diff --git a/llama_stack/providers/utils/tools/mcp.py b/llama_stack/providers/utils/tools/mcp.py
new file mode 100644
index 000000000..f024693a0
--- /dev/null
+++ b/llama_stack/providers/utils/tools/mcp.py
@@ -0,0 +1,100 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from contextlib import asynccontextmanager
+from typing import Any
+
+try:
+    # for python < 3.11
+    import exceptiongroup
+
+    BaseExceptionGroup = exceptiongroup.BaseExceptionGroup
+except ImportError:
+    pass
+
+import httpx
+from mcp import ClientSession
+from mcp import types as mcp_types
+from mcp.client.sse import sse_client
+
+from llama_stack.apis.common.content_types import ImageContentItem, InterleavedContentItem, TextContentItem
+from llama_stack.apis.tools import (
+    ListToolDefsResponse,
+    ToolDef,
+    ToolInvocationResult,
+    ToolParameter,
+)
+from llama_stack.distribution.datatypes import AuthenticationRequiredError
+from llama_stack.log import get_logger
+
+logger = get_logger(__name__, category="tools")
+
+
+@asynccontextmanager
+async def sse_client_wrapper(endpoint: str, headers: dict[str, str]):
+    try:
+        async with sse_client(endpoint, headers=headers) as streams:
+            async with ClientSession(*streams) as session:
+                await session.initialize()
+                yield session
+    except BaseException as e:
+        if isinstance(e, BaseExceptionGroup):
+            for exc in e.exceptions:
+                if isinstance(exc, httpx.HTTPStatusError) and exc.response.status_code == 401:
+                    raise AuthenticationRequiredError(exc) from exc
+        elif isinstance(e, httpx.HTTPStatusError) and e.response.status_code == 401:
+            raise AuthenticationRequiredError(e) from e
+
+        raise
+
+
+async def list_mcp_tools(endpoint: str, headers: dict[str, str]) -> ListToolDefsResponse:
+    tools = []
+    async with sse_client_wrapper(endpoint, headers) as session:
+        tools_result = await session.list_tools()
+        for tool in tools_result.tools:
+            parameters = []
+            for param_name, param_schema in tool.inputSchema.get("properties", {}).items():
+                parameters.append(
+                    ToolParameter(
+                        name=param_name,
+                        parameter_type=param_schema.get("type", "string"),
+                        description=param_schema.get("description", ""),
+                    )
+                )
+            tools.append(
+                ToolDef(
+                    name=tool.name,
+                    description=tool.description,
+                    parameters=parameters,
+                    metadata={
+                        "endpoint": endpoint,
+                    },
+                )
+            )
+    return ListToolDefsResponse(data=tools)
+
+
+async def invoke_mcp_tool(
+    endpoint: str, headers: dict[str, str], tool_name: str, kwargs: dict[str, Any]
+) -> ToolInvocationResult:
+    async with sse_client_wrapper(endpoint, headers) as session:
+        result = await session.call_tool(tool_name, kwargs)
+
+        content: list[InterleavedContentItem] = []
+        for item in result.content:
+            if isinstance(item, mcp_types.TextContent):
+                content.append(TextContentItem(text=item.text))
+            elif isinstance(item, mcp_types.ImageContent):
+                content.append(ImageContentItem(image=item.data))
+            elif isinstance(item, mcp_types.EmbeddedResource):
+                logger.warning(f"EmbeddedResource is not supported: {item}")
+            else:
+                raise ValueError(f"Unknown content type: {type(item)}")
+        return ToolInvocationResult(
+            content=content,
+            error_code=1 if result.isError else 0,
+        )
diff --git a/llama_stack/schema_utils.py b/llama_stack/schema_utils.py
index 8143f1224..694de333e 100644
--- a/llama_stack/schema_utils.py
+++ b/llama_stack/schema_utils.py
@@ -4,37 +4,38 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
+from collections.abc import Callable
 from dataclasses import dataclass
-from typing import Any, Callable, List, Optional, TypeVar
+from typing import Any, TypeVar
 
 from .strong_typing.schema import json_schema_type, register_schema  # noqa: F401
 
 
 @dataclass
 class WebMethod:
-    route: Optional[str] = None
+    route: str | None = None
     public: bool = False
-    request_examples: Optional[List[Any]] = None
-    response_examples: Optional[List[Any]] = None
-    method: Optional[str] = None
-    raw_bytes_request_body: Optional[bool] = False
+    request_examples: list[Any] | None = None
+    response_examples: list[Any] | None = None
+    method: str | None = None
+    raw_bytes_request_body: bool | None = False
     # A descriptive name of the corresponding span created by tracing
-    descriptive_name: Optional[str] = None
-    experimental: Optional[bool] = False
+    descriptive_name: str | None = None
+    experimental: bool | None = False
 
 
 T = TypeVar("T", bound=Callable[..., Any])
 
 
 def webmethod(
-    route: Optional[str] = None,
-    method: Optional[str] = None,
-    public: Optional[bool] = False,
-    request_examples: Optional[List[Any]] = None,
-    response_examples: Optional[List[Any]] = None,
-    raw_bytes_request_body: Optional[bool] = False,
-    descriptive_name: Optional[str] = None,
-    experimental: Optional[bool] = False,
+    route: str | None = None,
+    method: str | None = None,
+    public: bool | None = False,
+    request_examples: list[Any] | None = None,
+    response_examples: list[Any] | None = None,
+    raw_bytes_request_body: bool | None = False,
+    descriptive_name: str | None = None,
+    experimental: bool | None = False,
 ) -> Callable[[T], T]:
     """
     Decorator that supplies additional metadata to an endpoint operation function.
diff --git a/llama_stack/strong_typing/docstring.py b/llama_stack/strong_typing/docstring.py
index b038d1024..497c9ea82 100644
--- a/llama_stack/strong_typing/docstring.py
+++ b/llama_stack/strong_typing/docstring.py
@@ -11,6 +11,7 @@ Type-safe data interchange for Python data classes.
 """
 
 import builtins
+import collections.abc
 import dataclasses
 import inspect
 import re
@@ -171,6 +172,13 @@ class SupportsDoc(Protocol):
     __doc__: Optional[str]
 
 
+def _maybe_unwrap_async_iterator(t):
+    origin_type = typing.get_origin(t)
+    if origin_type is collections.abc.AsyncIterator:
+        return typing.get_args(t)[0]
+    return t
+
+
 def parse_type(typ: SupportsDoc) -> Docstring:
     """
     Parse the docstring of a type into its components.
@@ -178,6 +186,8 @@ def parse_type(typ: SupportsDoc) -> Docstring:
     :param typ: The type whose documentation string to parse.
     :returns: Components of the documentation string.
     """
+    # Use docstring from the iterator origin type for streaming apis
+    typ = _maybe_unwrap_async_iterator(typ)
 
     doc = get_docstring(typ)
     if doc is None:
diff --git a/llama_stack/strong_typing/schema.py b/llama_stack/strong_typing/schema.py
index 0f5121906..82baddc86 100644
--- a/llama_stack/strong_typing/schema.py
+++ b/llama_stack/strong_typing/schema.py
@@ -10,6 +10,7 @@ Type-safe data interchange for Python data classes.
 :see: https://github.com/hunyadi/strong_typing
 """
 
+import collections.abc
 import dataclasses
 import datetime
 import decimal
@@ -478,6 +479,8 @@ class JsonSchemaGenerator:
                 }
             return ret
         elif origin_type is Literal:
+            if len(typing.get_args(typ)) != 1:
+                raise ValueError(f"Literal type {typ} has {len(typing.get_args(typ))} arguments")
             (literal_value,) = typing.get_args(typ)  # unpack value of literal type
             schema = self.type_to_schema(type(literal_value))
             schema["const"] = literal_value
@@ -485,6 +488,9 @@ class JsonSchemaGenerator:
         elif origin_type is type:
             (concrete_type,) = typing.get_args(typ)  # unpack single tuple element
             return {"const": self.type_to_schema(concrete_type, force_expand=True)}
+        elif origin_type is collections.abc.AsyncIterator:
+            (concrete_type,) = typing.get_args(typ)
+            return self.type_to_schema(concrete_type)
 
         # dictionary of class attributes
         members = dict(inspect.getmembers(typ, lambda a: not inspect.isroutine(a)))
diff --git a/llama_stack/templates/bedrock/bedrock.py b/llama_stack/templates/bedrock/bedrock.py
index f82defb4b..bc3a9304f 100644
--- a/llama_stack/templates/bedrock/bedrock.py
+++ b/llama_stack/templates/bedrock/bedrock.py
@@ -29,7 +29,6 @@ def get_distribution_template() -> DistributionTemplate:
         "tool_runtime": [
             "remote::brave-search",
             "remote::tavily-search",
-            "inline::code-interpreter",
             "inline::rag-runtime",
             "remote::model-context-protocol",
         ],
@@ -55,10 +54,6 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
     ]
 
     return DistributionTemplate(
diff --git a/llama_stack/templates/bedrock/build.yaml b/llama_stack/templates/bedrock/build.yaml
index 6c07b0478..97a06f77a 100644
--- a/llama_stack/templates/bedrock/build.yaml
+++ b/llama_stack/templates/bedrock/build.yaml
@@ -26,7 +26,9 @@ distribution_spec:
     tool_runtime:
     - remote::brave-search
     - remote::tavily-search
-    - inline::code-interpreter
     - inline::rag-runtime
     - remote::model-context-protocol
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/bedrock/run.yaml b/llama_stack/templates/bedrock/run.yaml
index fe21d4bef..a58068a60 100644
--- a/llama_stack/templates/bedrock/run.yaml
+++ b/llama_stack/templates/bedrock/run.yaml
@@ -35,13 +35,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/bedrock}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/bedrock}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/bedrock/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/bedrock}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -87,9 +90,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -99,6 +99,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/bedrock}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/bedrock}/inference_store.db
 models:
 - metadata: {}
   model_id: meta.llama3-1-8b-instruct-v1:0
@@ -140,7 +143,5 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/cerebras/build.yaml b/llama_stack/templates/cerebras/build.yaml
index ef6c43212..f26f4ed9b 100644
--- a/llama_stack/templates/cerebras/build.yaml
+++ b/llama_stack/templates/cerebras/build.yaml
@@ -27,6 +27,8 @@ distribution_spec:
     tool_runtime:
     - remote::brave-search
     - remote::tavily-search
-    - inline::code-interpreter
     - inline::rag-runtime
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/cerebras/cerebras.py b/llama_stack/templates/cerebras/cerebras.py
index c370fb7d0..d891502d8 100644
--- a/llama_stack/templates/cerebras/cerebras.py
+++ b/llama_stack/templates/cerebras/cerebras.py
@@ -34,7 +34,6 @@ def get_distribution_template() -> DistributionTemplate:
         "tool_runtime": [
             "remote::brave-search",
             "remote::tavily-search",
-            "inline::code-interpreter",
             "inline::rag-runtime",
         ],
     }
@@ -77,10 +76,6 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
     ]
 
     return DistributionTemplate(
diff --git a/llama_stack/templates/cerebras/doc_template.md b/llama_stack/templates/cerebras/doc_template.md
index 76f8c34ad..5cae2b2da 100644
--- a/llama_stack/templates/cerebras/doc_template.md
+++ b/llama_stack/templates/cerebras/doc_template.md
@@ -46,7 +46,7 @@ docker run \
   -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
   -v ./run.yaml:/root/my-run.yaml \
   llamastack/distribution-{{ name }} \
-  --yaml-config /root/my-run.yaml \
+  --config /root/my-run.yaml \
   --port $LLAMA_STACK_PORT \
   --env CEREBRAS_API_KEY=$CEREBRAS_API_KEY
 ```
diff --git a/llama_stack/templates/cerebras/report.md b/llama_stack/templates/cerebras/report.md
deleted file mode 100644
index 7c09474b1..000000000
--- a/llama_stack/templates/cerebras/report.md
+++ /dev/null
@@ -1,44 +0,0 @@
-# Report for cerebras distribution
-
-## Supported Models
-| Model Descriptor | cerebras |
-|:---|:---|
-| meta-llama/Llama-3-8B-Instruct | ❌ |
-| meta-llama/Llama-3-70B-Instruct | ❌ |
-| meta-llama/Llama-3.1-8B-Instruct | ✅ |
-| meta-llama/Llama-3.1-70B-Instruct | ❌ |
-| meta-llama/Llama-3.1-405B-Instruct-FP8 | ❌ |
-| meta-llama/Llama-3.2-1B-Instruct | ❌ |
-| meta-llama/Llama-3.2-3B-Instruct | ❌ |
-| meta-llama/Llama-3.2-11B-Vision-Instruct | ❌ |
-| meta-llama/Llama-3.2-90B-Vision-Instruct | ❌ |
-| meta-llama/Llama-3.3-70B-Instruct | ✅ |
-| meta-llama/Llama-Guard-3-11B-Vision | ❌ |
-| meta-llama/Llama-Guard-3-1B | ❌ |
-| meta-llama/Llama-Guard-3-8B | ❌ |
-| meta-llama/Llama-Guard-2-8B | ❌ |
-
-## Inference
-| Model | API | Capability | Test | Status |
-|:----- |:-----|:-----|:-----|:-----|
-| Llama-3.1-8B-Instruct | /chat_completion | streaming | test_text_chat_completion_streaming | ✅ |
-| Llama-3.2-11B-Vision-Instruct | /chat_completion | streaming | test_image_chat_completion_streaming | ❌ |
-| Llama-3.2-11B-Vision-Instruct | /chat_completion | non_streaming | test_image_chat_completion_non_streaming | ❌ |
-| Llama-3.1-8B-Instruct | /chat_completion | non_streaming | test_text_chat_completion_non_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /chat_completion | tool_calling | test_text_chat_completion_with_tool_calling_and_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /chat_completion | tool_calling | test_text_chat_completion_with_tool_calling_and_non_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /completion | streaming | test_text_completion_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /completion | non_streaming | test_text_completion_non_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /completion | structured_output | test_text_completion_structured_output | ❌ |
-
-## Vector IO
-| API | Capability | Test | Status |
-|:-----|:-----|:-----|:-----|
-| /retrieve |  | test_vector_db_retrieve | ✅ |
-
-## Agents
-| API | Capability | Test | Status |
-|:-----|:-----|:-----|:-----|
-| /create_agent_turn | rag | test_rag_agent | ✅ |
-| /create_agent_turn | custom_tool | test_custom_tool | ❌ |
-| /create_agent_turn | code_execution | test_code_interpreter_for_attachments | ✅ |
diff --git a/llama_stack/templates/cerebras/run.yaml b/llama_stack/templates/cerebras/run.yaml
index dc7ee4729..c080536b7 100644
--- a/llama_stack/templates/cerebras/run.yaml
+++ b/llama_stack/templates/cerebras/run.yaml
@@ -41,6 +41,9 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/cerebras}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/cerebras}/responses_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -79,9 +82,9 @@ providers:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/cerebras/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/cerebras}/trace_store.db
   tool_runtime:
   - provider_id: brave-search
     provider_type: remote::brave-search
@@ -93,15 +96,15 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/cerebras}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/cerebras}/inference_store.db
 models:
 - metadata: {}
   model_id: llama3.1-8b
@@ -138,7 +141,5 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/ci-tests/build.yaml b/llama_stack/templates/ci-tests/build.yaml
index a5c615f2f..9f4fbbdda 100644
--- a/llama_stack/templates/ci-tests/build.yaml
+++ b/llama_stack/templates/ci-tests/build.yaml
@@ -27,7 +27,9 @@ distribution_spec:
     tool_runtime:
     - remote::brave-search
     - remote::tavily-search
-    - inline::code-interpreter
     - inline::rag-runtime
     - remote::model-context-protocol
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/ci-tests/ci_tests.py b/llama_stack/templates/ci-tests/ci_tests.py
index f6e836918..afa8a23ce 100644
--- a/llama_stack/templates/ci-tests/ci_tests.py
+++ b/llama_stack/templates/ci-tests/ci_tests.py
@@ -40,7 +40,6 @@ def get_distribution_template() -> DistributionTemplate:
         "tool_runtime": [
             "remote::brave-search",
             "remote::tavily-search",
-            "inline::code-interpreter",
             "inline::rag-runtime",
             "remote::model-context-protocol",
         ],
@@ -71,10 +70,6 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
     ]
     available_models = {
         "fireworks": MODEL_ENTRIES,
diff --git a/llama_stack/templates/ci-tests/run.yaml b/llama_stack/templates/ci-tests/run.yaml
index 3c16dd5ea..368187d3a 100644
--- a/llama_stack/templates/ci-tests/run.yaml
+++ b/llama_stack/templates/ci-tests/run.yaml
@@ -38,13 +38,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ci-tests}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ci-tests}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/ci-tests/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ci-tests}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -90,9 +93,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -102,6 +102,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ci-tests}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ci-tests}/inference_store.db
 models:
 - metadata: {}
   model_id: accounts/fireworks/models/llama-v3p1-8b-instruct
@@ -236,7 +239,5 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/dell/build.yaml b/llama_stack/templates/dell/build.yaml
index 05b98d56f..513df16c1 100644
--- a/llama_stack/templates/dell/build.yaml
+++ b/llama_stack/templates/dell/build.yaml
@@ -28,6 +28,8 @@ distribution_spec:
     tool_runtime:
     - remote::brave-search
     - remote::tavily-search
-    - inline::code-interpreter
     - inline::rag-runtime
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/dell/dell.py b/llama_stack/templates/dell/dell.py
index 52c5a5476..a7ec5f3b8 100644
--- a/llama_stack/templates/dell/dell.py
+++ b/llama_stack/templates/dell/dell.py
@@ -30,7 +30,6 @@ def get_distribution_template() -> DistributionTemplate:
         "tool_runtime": [
             "remote::brave-search",
             "remote::tavily-search",
-            "inline::code-interpreter",
             "inline::rag-runtime",
         ],
     }
@@ -87,10 +86,6 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
     ]
 
     return DistributionTemplate(
diff --git a/llama_stack/templates/dell/doc_template.md b/llama_stack/templates/dell/doc_template.md
index 26f07130b..6bdd7f81c 100644
--- a/llama_stack/templates/dell/doc_template.md
+++ b/llama_stack/templates/dell/doc_template.md
@@ -143,7 +143,7 @@ docker run \
   -v $HOME/.llama:/root/.llama \
   -v ./llama_stack/templates/tgi/run-with-safety.yaml:/root/my-run.yaml \
   llamastack/distribution-{{ name }} \
-  --yaml-config /root/my-run.yaml \
+  --config /root/my-run.yaml \
   --port $LLAMA_STACK_PORT \
   --env INFERENCE_MODEL=$INFERENCE_MODEL \
   --env DEH_URL=$DEH_URL \
diff --git a/llama_stack/templates/dell/run-with-safety.yaml b/llama_stack/templates/dell/run-with-safety.yaml
index 802c56aad..5c6072245 100644
--- a/llama_stack/templates/dell/run-with-safety.yaml
+++ b/llama_stack/templates/dell/run-with-safety.yaml
@@ -41,13 +41,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dell}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dell}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/dell/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dell}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -93,15 +96,15 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dell}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dell}/inference_store.db
 models:
 - metadata: {}
   model_id: ${env.INFERENCE_MODEL}
@@ -127,7 +130,5 @@ tool_groups:
   provider_id: brave-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/dell/run.yaml b/llama_stack/templates/dell/run.yaml
index 4a2d819a9..ffaa0bf2f 100644
--- a/llama_stack/templates/dell/run.yaml
+++ b/llama_stack/templates/dell/run.yaml
@@ -37,13 +37,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dell}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dell}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/dell/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dell}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -89,15 +92,15 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dell}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dell}/inference_store.db
 models:
 - metadata: {}
   model_id: ${env.INFERENCE_MODEL}
@@ -118,7 +121,5 @@ tool_groups:
   provider_id: brave-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/dependencies.json b/llama_stack/templates/dependencies.json
index 4c16411f0..47a35edc0 100644
--- a/llama_stack/templates/dependencies.json
+++ b/llama_stack/templates/dependencies.json
@@ -31,6 +31,7 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
     "tqdm",
     "transformers",
     "tree_sitter",
@@ -67,6 +68,7 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
     "tqdm",
     "transformers",
     "tree_sitter",
@@ -105,6 +107,7 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
     "sqlite-vec",
     "tqdm",
     "transformers",
@@ -145,46 +148,7 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
-    "tqdm",
-    "transformers",
-    "tree_sitter",
-    "uvicorn",
-    "sentence-transformers --no-deps",
-    "torch torchvision --index-url https://download.pytorch.org/whl/cpu"
-  ],
-  "dev": [
-    "aiosqlite",
-    "autoevals",
-    "blobfile",
-    "chardet",
-    "chromadb-client",
-    "datasets",
-    "emoji",
-    "fastapi",
-    "fire",
-    "fireworks-ai",
-    "httpx",
-    "langdetect",
-    "litellm",
-    "matplotlib",
-    "mcp",
-    "nltk",
-    "numpy",
-    "openai",
-    "opentelemetry-exporter-otlp-proto-http",
-    "opentelemetry-sdk",
-    "pandas",
-    "pillow",
-    "psycopg2-binary",
-    "pymongo",
-    "pypdf",
-    "pythainlp",
-    "redis",
-    "requests",
-    "scikit-learn",
-    "scipy",
-    "sentencepiece",
-    "sqlite-vec",
+    "sqlalchemy[asyncio]",
     "tqdm",
     "transformers",
     "tree_sitter",
@@ -224,6 +188,7 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
     "tqdm",
     "transformers",
     "tree_sitter",
@@ -261,6 +226,7 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
     "tqdm",
     "transformers",
     "tree_sitter",
@@ -299,6 +265,7 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
     "tqdm",
     "transformers",
     "tree_sitter",
@@ -337,6 +304,86 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
+    "tqdm",
+    "transformers",
+    "tree_sitter",
+    "uvicorn",
+    "sentence-transformers --no-deps",
+    "torch torchvision --index-url https://download.pytorch.org/whl/cpu"
+  ],
+  "kvant": [
+    "aiosqlite",
+    "autoevals",
+    "blobfile",
+    "chardet",
+    "chromadb-client",
+    "datasets",
+    "emoji",
+    "faiss-cpu",
+    "fastapi",
+    "fire",
+    "httpx",
+    "langdetect",
+    "matplotlib",
+    "mcp",
+    "nltk",
+    "numpy",
+    "openai",
+    "opentelemetry-exporter-otlp-proto-http",
+    "opentelemetry-sdk",
+    "pandas",
+    "pillow",
+    "psycopg2-binary",
+    "pymongo",
+    "pypdf",
+    "pythainlp",
+    "redis",
+    "requests",
+    "scikit-learn",
+    "scipy",
+    "sentencepiece",
+    "sqlalchemy[asyncio]",
+    "tqdm",
+    "transformers",
+    "tree_sitter",
+    "uvicorn",
+    "sentence-transformers --no-deps",
+    "torch torchvision --index-url https://download.pytorch.org/whl/cpu"
+  ],
+  "llama_api": [
+    "aiosqlite",
+    "autoevals",
+    "blobfile",
+    "chardet",
+    "chromadb-client",
+    "datasets",
+    "emoji",
+    "fastapi",
+    "fire",
+    "httpx",
+    "langdetect",
+    "litellm",
+    "matplotlib",
+    "mcp",
+    "nltk",
+    "numpy",
+    "openai",
+    "opentelemetry-exporter-otlp-proto-http",
+    "opentelemetry-sdk",
+    "pandas",
+    "pillow",
+    "psycopg2-binary",
+    "pymongo",
+    "pypdf",
+    "pythainlp",
+    "redis",
+    "requests",
+    "scikit-learn",
+    "scipy",
+    "sentencepiece",
+    "sqlalchemy[asyncio]",
+    "sqlite-vec",
     "tqdm",
     "transformers",
     "tree_sitter",
@@ -380,6 +427,7 @@
     "scipy",
     "sentence-transformers",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
     "torch",
     "torchao==0.8.0",
     "torchvision",
@@ -394,6 +442,7 @@
     "aiosqlite",
     "blobfile",
     "chardet",
+    "datasets",
     "faiss-cpu",
     "fastapi",
     "fire",
@@ -414,6 +463,7 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
     "tqdm",
     "transformers",
     "uvicorn"
@@ -441,6 +491,7 @@
     "opentelemetry-exporter-otlp-proto-http",
     "opentelemetry-sdk",
     "pandas",
+    "peft",
     "pillow",
     "psycopg2-binary",
     "pymongo",
@@ -451,9 +502,12 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
+    "torch",
     "tqdm",
     "transformers",
     "tree_sitter",
+    "trl",
     "uvicorn"
   ],
   "open-benchmark": [
@@ -487,6 +541,7 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
     "sqlite-vec",
     "together",
     "tqdm",
@@ -525,6 +580,7 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
     "tqdm",
     "transformers",
     "tree_sitter",
@@ -563,6 +619,7 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
     "tqdm",
     "transformers",
     "tree_sitter",
@@ -579,10 +636,11 @@
     "fastapi",
     "fire",
     "httpx",
+    "litellm",
     "matplotlib",
+    "mcp",
     "nltk",
     "numpy",
-    "openai",
     "opentelemetry-exporter-otlp-proto-http",
     "opentelemetry-sdk",
     "pandas",
@@ -595,9 +653,53 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
     "tqdm",
     "transformers",
-    "uvicorn"
+    "uvicorn",
+    "sentence-transformers --no-deps",
+    "torch torchvision --index-url https://download.pytorch.org/whl/cpu"
+  ],
+  "starter": [
+    "aiosqlite",
+    "autoevals",
+    "blobfile",
+    "chardet",
+    "chromadb-client",
+    "datasets",
+    "emoji",
+    "fastapi",
+    "fire",
+    "fireworks-ai",
+    "httpx",
+    "langdetect",
+    "litellm",
+    "matplotlib",
+    "mcp",
+    "nltk",
+    "numpy",
+    "openai",
+    "opentelemetry-exporter-otlp-proto-http",
+    "opentelemetry-sdk",
+    "pandas",
+    "pillow",
+    "psycopg2-binary",
+    "pymongo",
+    "pypdf",
+    "pythainlp",
+    "redis",
+    "requests",
+    "scikit-learn",
+    "scipy",
+    "sentencepiece",
+    "sqlalchemy[asyncio]",
+    "sqlite-vec",
+    "tqdm",
+    "transformers",
+    "tree_sitter",
+    "uvicorn",
+    "sentence-transformers --no-deps",
+    "torch torchvision --index-url https://download.pytorch.org/whl/cpu"
   ],
   "tgi": [
     "aiohttp",
@@ -632,6 +734,7 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
     "tqdm",
     "transformers",
     "tree_sitter",
@@ -670,6 +773,7 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
     "together",
     "tqdm",
     "transformers",
@@ -709,6 +813,7 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
     "sqlite-vec",
     "tqdm",
     "transformers",
@@ -748,6 +853,7 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
     "tqdm",
     "transformers",
     "tree_sitter",
@@ -787,9 +893,12 @@
     "scikit-learn",
     "scipy",
     "sentencepiece",
+    "sqlalchemy[asyncio]",
     "tqdm",
     "transformers",
     "tree_sitter",
-    "uvicorn"
+    "uvicorn",
+    "sentence-transformers --no-deps",
+    "torch torchvision --index-url https://download.pytorch.org/whl/cpu"
   ]
 }
diff --git a/llama_stack/templates/experimental-post-training/build.yaml b/llama_stack/templates/experimental-post-training/build.yaml
index b4b5e2203..55cd189c6 100644
--- a/llama_stack/templates/experimental-post-training/build.yaml
+++ b/llama_stack/templates/experimental-post-training/build.yaml
@@ -13,9 +13,10 @@ distribution_spec:
     - inline::basic
     - inline::braintrust
     post_training:
-    - inline::torchtune
+    - inline::huggingface
     datasetio:
     - inline::localfs
+    - remote::huggingface
     telemetry:
     - inline::meta-reference
     agents:
diff --git a/llama_stack/templates/experimental-post-training/run.yaml b/llama_stack/templates/experimental-post-training/run.yaml
index 2ebdfe1aa..393cba41d 100644
--- a/llama_stack/templates/experimental-post-training/run.yaml
+++ b/llama_stack/templates/experimental-post-training/run.yaml
@@ -49,16 +49,24 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/experimental-post-training}/localfs_datasetio.db
+  - provider_id: huggingface
+    provider_type: remote::huggingface
+    config:
+      kvstore:
+        type: sqlite
+        namespace: null
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/huggingface}/huggingface_datasetio.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config: {}
   post_training:
-  - provider_id: torchtune-post-training
-    provider_type: inline::torchtune
-    config: {
+  - provider_id: huggingface
+    provider_type: inline::huggingface
+    config:
       checkpoint_format: huggingface
-    }
+      distributed_backend: null
+      device: cpu
   agents:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
diff --git a/llama_stack/templates/fireworks/build.yaml b/llama_stack/templates/fireworks/build.yaml
index 3907eba78..be19181c0 100644
--- a/llama_stack/templates/fireworks/build.yaml
+++ b/llama_stack/templates/fireworks/build.yaml
@@ -28,7 +28,9 @@ distribution_spec:
     - remote::brave-search
     - remote::tavily-search
     - remote::wolfram-alpha
-    - inline::code-interpreter
     - inline::rag-runtime
     - remote::model-context-protocol
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/fireworks/fireworks.py b/llama_stack/templates/fireworks/fireworks.py
index 449f18bf7..da68475e2 100644
--- a/llama_stack/templates/fireworks/fireworks.py
+++ b/llama_stack/templates/fireworks/fireworks.py
@@ -40,7 +40,6 @@ def get_distribution_template() -> DistributionTemplate:
             "remote::brave-search",
             "remote::tavily-search",
             "remote::wolfram-alpha",
-            "inline::code-interpreter",
             "inline::rag-runtime",
             "remote::model-context-protocol",
         ],
@@ -90,10 +89,6 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
     ]
 
     return DistributionTemplate(
diff --git a/llama_stack/templates/fireworks/report.md b/llama_stack/templates/fireworks/report.md
deleted file mode 100644
index 2c1ccc943..000000000
--- a/llama_stack/templates/fireworks/report.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Report for fireworks distribution
-
-## Supported Models
-| Model Descriptor | fireworks |
-|:---|:---|
-| Llama-3-8B-Instruct | ❌ |
-| Llama-3-70B-Instruct | ❌ |
-| Llama3.1-8B-Instruct | ✅ |
-| Llama3.1-70B-Instruct | ✅ |
-| Llama3.1-405B-Instruct | ✅ |
-| Llama3.2-1B-Instruct | ✅ |
-| Llama3.2-3B-Instruct | ✅ |
-| Llama3.2-11B-Vision-Instruct | ✅ |
-| Llama3.2-90B-Vision-Instruct | ✅ |
-| Llama3.3-70B-Instruct | ✅ |
-| Llama-Guard-3-11B-Vision | ✅ |
-| Llama-Guard-3-1B | ❌ |
-| Llama-Guard-3-8B | ✅ |
-| Llama-Guard-2-8B | ❌ |
-
-## Inference
-| Model | API | Capability | Test | Status |
-|:----- |:-----|:-----|:-----|:-----|
-| Llama-3.1-8B-Instruct | /chat_completion | streaming | test_text_chat_completion_streaming | ✅ |
-| Llama-3.2-11B-Vision-Instruct | /chat_completion | streaming | test_image_chat_completion_streaming | ✅ |
-| Llama-3.2-11B-Vision-Instruct | /chat_completion | non_streaming | test_image_chat_completion_non_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /chat_completion | non_streaming | test_text_chat_completion_non_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /chat_completion | tool_calling | test_text_chat_completion_with_tool_calling_and_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /chat_completion | tool_calling | test_text_chat_completion_with_tool_calling_and_non_streaming | ✅ |
-| Llama-3.2-11B-Vision-Instruct | /chat_completion | log_probs | test_completion_log_probs_non_streaming | ✅ |
-| Llama-3.2-11B-Vision-Instruct | /chat_completion | log_probs | test_completion_log_probs_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /completion | streaming | test_text_completion_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /completion | non_streaming | test_text_completion_non_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /completion | structured_output | test_text_completion_structured_output | ✅ |
-
-## Vector IO
-| Provider | API | Capability | Test | Status |
-|:-----|:-----|:-----|:-----|:-----|
-| inline::faiss | /retrieve |  | test_vector_db_retrieve | ✅ |
-
-## Agents
-| Provider | API | Capability | Test | Status |
-|:-----|:-----|:-----|:-----|:-----|
-| inline::meta-reference | /create_agent_turn | rag | test_rag_agent | ✅ |
-| inline::meta-reference | /create_agent_turn | custom_tool | test_custom_tool | ✅ |
-| inline::meta-reference | /create_agent_turn | code_execution | test_code_interpreter_for_attachments | ✅ |
diff --git a/llama_stack/templates/fireworks/run-with-safety.yaml b/llama_stack/templates/fireworks/run-with-safety.yaml
index aa6209db6..41500f6f6 100644
--- a/llama_stack/templates/fireworks/run-with-safety.yaml
+++ b/llama_stack/templates/fireworks/run-with-safety.yaml
@@ -46,13 +46,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/fireworks/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -102,9 +105,6 @@ providers:
     provider_type: remote::wolfram-alpha
     config:
       api_key: ${env.WOLFRAM_ALPHA_API_KEY:}
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -114,6 +114,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/inference_store.db
 models:
 - metadata: {}
   model_id: accounts/fireworks/models/llama-v3p1-8b-instruct
@@ -255,7 +258,5 @@ tool_groups:
   provider_id: wolfram-alpha
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/fireworks/run.yaml b/llama_stack/templates/fireworks/run.yaml
index 834ec8260..b1fa03306 100644
--- a/llama_stack/templates/fireworks/run.yaml
+++ b/llama_stack/templates/fireworks/run.yaml
@@ -41,13 +41,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/fireworks/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -97,9 +100,6 @@ providers:
     provider_type: remote::wolfram-alpha
     config:
       api_key: ${env.WOLFRAM_ALPHA_API_KEY:}
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -109,6 +109,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/inference_store.db
 models:
 - metadata: {}
   model_id: accounts/fireworks/models/llama-v3p1-8b-instruct
@@ -245,7 +248,5 @@ tool_groups:
   provider_id: wolfram-alpha
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/groq/build.yaml b/llama_stack/templates/groq/build.yaml
index 3263ce83b..819df22f0 100644
--- a/llama_stack/templates/groq/build.yaml
+++ b/llama_stack/templates/groq/build.yaml
@@ -24,6 +24,8 @@ distribution_spec:
     tool_runtime:
     - remote::brave-search
     - remote::tavily-search
-    - inline::code-interpreter
     - inline::rag-runtime
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/groq/groq.py b/llama_stack/templates/groq/groq.py
index 7999f95cb..4e52aa42d 100644
--- a/llama_stack/templates/groq/groq.py
+++ b/llama_stack/templates/groq/groq.py
@@ -33,7 +33,6 @@ def get_distribution_template() -> DistributionTemplate:
         "tool_runtime": [
             "remote::brave-search",
             "remote::tavily-search",
-            "inline::code-interpreter",
             "inline::rag-runtime",
         ],
     }
@@ -72,10 +71,6 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
     ]
 
     return DistributionTemplate(
diff --git a/llama_stack/templates/groq/run.yaml b/llama_stack/templates/groq/run.yaml
index 444452dcb..db7ebffee 100644
--- a/llama_stack/templates/groq/run.yaml
+++ b/llama_stack/templates/groq/run.yaml
@@ -41,13 +41,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/groq}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/groq}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/groq/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/groq}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -93,15 +96,15 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/groq}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/groq}/inference_store.db
 models:
 - metadata: {}
   model_id: groq/llama3-8b-8192
@@ -203,7 +206,5 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/hf-endpoint/build.yaml b/llama_stack/templates/hf-endpoint/build.yaml
index c2eaaa05b..8ede83694 100644
--- a/llama_stack/templates/hf-endpoint/build.yaml
+++ b/llama_stack/templates/hf-endpoint/build.yaml
@@ -26,7 +26,9 @@ distribution_spec:
     tool_runtime:
     - remote::brave-search
     - remote::tavily-search
-    - inline::code-interpreter
     - inline::rag-runtime
     - remote::model-context-protocol
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/hf-endpoint/hf_endpoint.py b/llama_stack/templates/hf-endpoint/hf_endpoint.py
index 53dc9d38f..69e037299 100644
--- a/llama_stack/templates/hf-endpoint/hf_endpoint.py
+++ b/llama_stack/templates/hf-endpoint/hf_endpoint.py
@@ -32,7 +32,6 @@ def get_distribution_template() -> DistributionTemplate:
         "tool_runtime": [
             "remote::brave-search",
             "remote::tavily-search",
-            "inline::code-interpreter",
             "inline::rag-runtime",
             "remote::model-context-protocol",
         ],
@@ -79,10 +78,6 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
     ]
 
     return DistributionTemplate(
diff --git a/llama_stack/templates/hf-endpoint/run-with-safety.yaml b/llama_stack/templates/hf-endpoint/run-with-safety.yaml
index 14753e08b..15cf2a47f 100644
--- a/llama_stack/templates/hf-endpoint/run-with-safety.yaml
+++ b/llama_stack/templates/hf-endpoint/run-with-safety.yaml
@@ -46,13 +46,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-endpoint}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-endpoint}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/hf-endpoint/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-endpoint}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -98,9 +101,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -110,6 +110,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-endpoint}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-endpoint}/inference_store.db
 models:
 - metadata: {}
   model_id: ${env.INFERENCE_MODEL}
@@ -135,7 +138,5 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/hf-endpoint/run.yaml b/llama_stack/templates/hf-endpoint/run.yaml
index 706ba9122..428edf9a2 100644
--- a/llama_stack/templates/hf-endpoint/run.yaml
+++ b/llama_stack/templates/hf-endpoint/run.yaml
@@ -41,13 +41,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-endpoint}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-endpoint}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/hf-endpoint/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-endpoint}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -93,9 +96,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -105,6 +105,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-endpoint}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-endpoint}/inference_store.db
 models:
 - metadata: {}
   model_id: ${env.INFERENCE_MODEL}
@@ -125,7 +128,5 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/hf-serverless/build.yaml b/llama_stack/templates/hf-serverless/build.yaml
index c0cc1e2c2..d0752db9a 100644
--- a/llama_stack/templates/hf-serverless/build.yaml
+++ b/llama_stack/templates/hf-serverless/build.yaml
@@ -27,7 +27,9 @@ distribution_spec:
     tool_runtime:
     - remote::brave-search
     - remote::tavily-search
-    - inline::code-interpreter
     - inline::rag-runtime
     - remote::model-context-protocol
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/hf-serverless/hf_serverless.py b/llama_stack/templates/hf-serverless/hf_serverless.py
index ad8a72012..ecfe2a167 100644
--- a/llama_stack/templates/hf-serverless/hf_serverless.py
+++ b/llama_stack/templates/hf-serverless/hf_serverless.py
@@ -32,7 +32,6 @@ def get_distribution_template() -> DistributionTemplate:
         "tool_runtime": [
             "remote::brave-search",
             "remote::tavily-search",
-            "inline::code-interpreter",
             "inline::rag-runtime",
             "remote::model-context-protocol",
         ],
@@ -80,10 +79,6 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
     ]
 
     return DistributionTemplate(
diff --git a/llama_stack/templates/hf-serverless/run-with-safety.yaml b/llama_stack/templates/hf-serverless/run-with-safety.yaml
index bf26fe507..ab461c6c3 100644
--- a/llama_stack/templates/hf-serverless/run-with-safety.yaml
+++ b/llama_stack/templates/hf-serverless/run-with-safety.yaml
@@ -46,13 +46,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-serverless}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-serverless}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/hf-serverless/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-serverless}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -98,9 +101,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -110,6 +110,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-serverless}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-serverless}/inference_store.db
 models:
 - metadata: {}
   model_id: ${env.INFERENCE_MODEL}
@@ -135,7 +138,5 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/hf-serverless/run.yaml b/llama_stack/templates/hf-serverless/run.yaml
index cc973b8de..d238506fb 100644
--- a/llama_stack/templates/hf-serverless/run.yaml
+++ b/llama_stack/templates/hf-serverless/run.yaml
@@ -41,13 +41,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-serverless}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-serverless}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/hf-serverless/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-serverless}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -93,9 +96,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -105,6 +105,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-serverless}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-serverless}/inference_store.db
 models:
 - metadata: {}
   model_id: ${env.INFERENCE_MODEL}
@@ -125,7 +128,5 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/kvant/__init__.py b/llama_stack/templates/kvant/__init__.py
new file mode 100644
index 000000000..61706f7f6
--- /dev/null
+++ b/llama_stack/templates/kvant/__init__.py
@@ -0,0 +1,7 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from .kvant import get_distribution_template  # noqa: F401
diff --git a/llama_stack/templates/kvant/build.yaml b/llama_stack/templates/kvant/build.yaml
new file mode 100644
index 000000000..25afc1f4d
--- /dev/null
+++ b/llama_stack/templates/kvant/build.yaml
@@ -0,0 +1,35 @@
+version: '2'
+distribution_spec:
+  description: distribution for kvant cloud
+  providers:
+    inference:
+    - remote::vllm
+    - inline::sentence-transformers
+    vector_io:
+    - inline::faiss
+    - remote::chromadb
+    - remote::pgvector
+    safety:
+    - inline::llama-guard
+    agents:
+    - inline::meta-reference
+    telemetry:
+    - inline::meta-reference
+    eval:
+    - inline::meta-reference
+    datasetio:
+    - remote::huggingface
+    - inline::localfs
+    scoring:
+    - inline::basic
+    - inline::llm-as-judge
+    - inline::braintrust
+    tool_runtime:
+    - remote::brave-search
+    - remote::tavily-search
+    - remote::wolfram-alpha
+    - inline::rag-runtime
+    - remote::model-context-protocol
+image_type: conda
+additional_pip_packages:
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/kvant/kvant.py b/llama_stack/templates/kvant/kvant.py
new file mode 100644
index 000000000..44cfc7016
--- /dev/null
+++ b/llama_stack/templates/kvant/kvant.py
@@ -0,0 +1,136 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from pathlib import Path
+
+from llama_stack.apis.models.models import ModelType
+from llama_stack.distribution.datatypes import (
+    ModelInput,
+    Provider,
+    ShieldInput,
+    ToolGroupInput,
+)
+from llama_stack.providers.inline.inference.sentence_transformers import (
+    SentenceTransformersInferenceConfig,
+)
+from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
+from llama_stack.providers.remote.inference.passthrough.config import (
+    PassthroughImplConfig,
+)
+from llama_stack.providers.utils.inference.model_registry import ProviderModelEntry
+from llama_stack.templates.template import DistributionTemplate, RunConfigSettings
+
+
+def get_distribution_template() -> DistributionTemplate:
+    providers = {
+        "inference": ["remote::openai", "inline::sentence-transformers"],
+        "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"],
+        "safety": ["inline::llama-guard"],
+        "agents": ["inline::meta-reference"],
+        "telemetry": ["inline::meta-reference"],
+        "eval": ["inline::meta-reference"],
+        "datasetio": ["remote::huggingface", "inline::localfs"],
+        "scoring": ["inline::basic", "inline::llm-as-judge", "inline::braintrust"],
+        "tool_runtime": [
+            "remote::brave-search",
+            "remote::tavily-search",
+            "remote::wolfram-alpha",
+            "inline::rag-runtime",
+            "remote::model-context-protocol",
+        ],
+    }
+
+    name = "kvant"
+
+    inference_provider = Provider(
+        provider_id="openai",
+        provider_type="remote::openai",
+        config=PassthroughImplConfig.sample_run_config(),
+    )
+    embedding_provider = Provider(
+        provider_id="sentence-transformers",
+        provider_type="inline::sentence-transformers",
+        config=SentenceTransformersInferenceConfig.sample_run_config(),
+    )
+    vector_io_provider = Provider(
+        provider_id="faiss",
+        provider_type="inline::faiss",
+        config=FaissVectorIOConfig.sample_run_config(f"~/.llama/distributions/{name}"),
+    )
+
+    default_models = [
+        ModelInput(
+            metadata={},
+            model_id="inference-llama4-maverick",
+            provider_id="openai",
+            provider_model_id="inference-llama4-maverick",
+            model_type=ModelType.llm,
+        ),
+    ]
+
+    embedding_model = ModelInput(
+        model_id="all-MiniLM-L6-v2",
+        provider_id="sentence-transformers",
+        model_type=ModelType.embedding,
+        metadata={
+            "embedding_dimension": 384,
+        },
+    )
+    default_tool_groups = [
+        ToolGroupInput(
+            toolgroup_id="builtin::websearch",
+            provider_id="tavily-search",
+        ),
+        ToolGroupInput(
+            toolgroup_id="builtin::wolfram_alpha",
+            provider_id="wolfram-alpha",
+        ),
+        ToolGroupInput(
+            toolgroup_id="builtin::rag",
+            provider_id="rag-runtime",
+        ),
+    ]
+
+    return DistributionTemplate(
+        name=name,
+        distro_type="self_hosted",
+        description="Use Passthrough hosted llama-stack endpoint for LLM inference",
+        container_image=None,
+        providers=providers,
+        available_models_by_provider={
+            "openai": [
+                ProviderModelEntry(
+                    provider_model_id="inference-llama4-maverick",
+                    model_type=ModelType.llm,
+                ),
+            ],
+        },
+        run_configs={
+            "run.yaml": RunConfigSettings(
+                provider_overrides={
+                    "inference": [inference_provider, embedding_provider],
+                    "vector_io": [vector_io_provider],
+                },
+                default_models=default_models + [embedding_model],
+                default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")],
+                default_tool_groups=default_tool_groups,
+            ),
+        },
+        run_config_env_vars={
+            "LLAMA_STACK_PORT": (
+                "8321",
+                "Port for the Llama Stack distribution server",
+            ),
+            "OPENAI_API_KEY": (
+                "",
+                "kvant maas API Key",
+            ),
+            "OPENAI_BASE_URL": (
+                "https://maas.kvant.cloud",
+                "kvant maas URL",
+            ),
+        },
+    )
diff --git a/llama_stack/templates/kvant/run.yaml b/llama_stack/templates/kvant/run.yaml
new file mode 100644
index 000000000..99fb6f7fa
--- /dev/null
+++ b/llama_stack/templates/kvant/run.yaml
@@ -0,0 +1,170 @@
+version: '2'
+image_name: kvant
+apis:
+- agents
+- datasetio
+- eval
+- inference
+- safety
+- scoring
+- telemetry
+- tool_runtime
+- vector_io
+providers:
+  inference:
+  - provider_id: kvant
+    provider_type: remote::vllm
+    config:
+      url: ${env.VLLM_URL:https://maas.ai-2.kvant.cloud/v1}
+      max_tokens: ${env.VLLM_MAX_TOKENS:400000}
+      api_token: ${env.VLLM_API_TOKEN:fake}
+      tls_verify: ${env.VLLM_TLS_VERIFY:true}
+  - provider_id: sentence-transformers
+    provider_type: inline::sentence-transformers
+    config: {}
+  vector_io:
+  - provider_id: faiss
+    provider_type: inline::faiss
+    config:
+      kvstore:
+        type: sqlite
+        namespace: null
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/kvant}/faiss_store.db
+  safety:
+  - provider_id: llama-guard
+    provider_type: inline::llama-guard
+    config:
+      excluded_categories: []
+  agents:
+  - provider_id: meta-reference
+    provider_type: inline::meta-reference
+    config:
+      persistence_store:
+        type: sqlite
+        namespace: null
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/kvant}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/kvant}/responses_store.db
+  telemetry:
+  - provider_id: meta-reference
+    provider_type: inline::meta-reference
+    config:
+      service_name: ${env.OTEL_SERVICE_NAME:}
+      sinks: ${env.TELEMETRY_SINKS:console,sqlite}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/kvant}/trace_store.db
+  eval:
+  - provider_id: meta-reference
+    provider_type: inline::meta-reference
+    config:
+      kvstore:
+        type: sqlite
+        namespace: null
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/kvant}/meta_reference_eval.db
+  datasetio:
+  - provider_id: huggingface
+    provider_type: remote::huggingface
+    config:
+      kvstore:
+        type: sqlite
+        namespace: null
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/kvant}/huggingface_datasetio.db
+  - provider_id: localfs
+    provider_type: inline::localfs
+    config:
+      kvstore:
+        type: sqlite
+        namespace: null
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/kvant}/localfs_datasetio.db
+  scoring:
+  - provider_id: basic
+    provider_type: inline::basic
+    config: {}
+  - provider_id: llm-as-judge
+    provider_type: inline::llm-as-judge
+    config: {}
+  - provider_id: braintrust
+    provider_type: inline::braintrust
+    config:
+      openai_api_key: ${env.OPENAI_API_KEY:}
+  tool_runtime:
+  - provider_id: brave-search
+    provider_type: remote::brave-search
+    config:
+      api_key: ${env.BRAVE_SEARCH_API_KEY:}
+      max_results: 3
+  - provider_id: tavily-search
+    provider_type: remote::tavily-search
+    config:
+      api_key: ${env.TAVILY_SEARCH_API_KEY:}
+      max_results: 3
+  - provider_id: wolfram-alpha
+    provider_type: remote::wolfram-alpha
+    config:
+      api_key: ${env.WOLFRAM_ALPHA_API_KEY:}
+  - provider_id: rag-runtime
+    provider_type: inline::rag-runtime
+    config: {}
+  - provider_id: model-context-protocol
+    provider_type: remote::model-context-protocol
+    config: {}
+metadata_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/kvant}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/kvant}/inference_store.db
+models:
+- metadata: {}
+  model_id: Llama-4-Maverick-17B-128E-Instruct-FP8
+  provider_id: kvant
+  provider_model_id: inference-llama4-maverick
+  model_type: llm
+- metadata:
+    embedding_dimension: 1024
+    context_length: 8192
+  model_id: inference-bge-m3
+  provider_id: kvant
+  model_type: embedding
+- metadata:
+    embedding_dimension: 384
+  model_id: all-MiniLM-L6-v2
+  provider_id: sentence-transformers
+  model_type: embedding
+shields:
+- shield_id: meta-llama/Llama-Guard-3-8B
+vector_dbs: []
+# - vector_db_id: test-bge
+#   embedding_model: inference-bge-m3
+#   embedding_dimension: 1024
+#   provider_id: faiss
+# - vector_db_id: test-MiniLM-L6-v2
+#   embedding_model: all-MiniLM-L6-v2
+#   embedding_dimension: 384
+#   provider_id: faiss
+datasets: []
+scoring_fns: []
+benchmarks: []
+tool_groups:
+- toolgroup_id: builtin::websearch
+  provider_id: tavily-search
+- toolgroup_id: builtin::wolfram_alpha
+  provider_id: wolfram-alpha
+- toolgroup_id: builtin::rag
+  provider_id: rag-runtime
+server:
+  port: 8321
+  auth:
+    provider_type: "oauth2_token"
+    config:
+      jwks:
+      introspection:
+        url: ${env.KEYCLOAK_INSTROSPECT:https://iam.phoenix-systems.ch/realms/kvant/protocol/openid-connect/token/introspect} 
+        client_id: ${env.KEYCLOAK_CLIENT_ID:llama-stack}
+        client_secret: ${env.KEYCLOAK_CLIENT_SECRET}
+      claims_mapping:
+        sub: projects
+        scope: roles
+        #groups: teams
+        customer/id: teams
+        aud: namespaces
diff --git a/llama_stack/templates/llama_api/__init__.py b/llama_stack/templates/llama_api/__init__.py
new file mode 100644
index 000000000..57cc75730
--- /dev/null
+++ b/llama_stack/templates/llama_api/__init__.py
@@ -0,0 +1,7 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from .llama_api import get_distribution_template  # noqa: F401
diff --git a/llama_stack/templates/dev/build.yaml b/llama_stack/templates/llama_api/build.yaml
similarity index 83%
rename from llama_stack/templates/dev/build.yaml
rename to llama_stack/templates/llama_api/build.yaml
index 726ebccca..857e5f014 100644
--- a/llama_stack/templates/dev/build.yaml
+++ b/llama_stack/templates/llama_api/build.yaml
@@ -3,11 +3,7 @@ distribution_spec:
   description: Distribution for running e2e tests in CI
   providers:
     inference:
-    - remote::openai
-    - remote::fireworks
-    - remote::anthropic
-    - remote::gemini
-    - remote::groq
+    - remote::llama-openai-compat
     - inline::sentence-transformers
     vector_io:
     - inline::sqlite-vec
@@ -31,7 +27,9 @@ distribution_spec:
     tool_runtime:
     - remote::brave-search
     - remote::tavily-search
-    - inline::code-interpreter
     - inline::rag-runtime
     - remote::model-context-protocol
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/llama_api/llama_api.py b/llama_stack/templates/llama_api/llama_api.py
new file mode 100644
index 000000000..b4641b9da
--- /dev/null
+++ b/llama_stack/templates/llama_api/llama_api.py
@@ -0,0 +1,153 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+
+from llama_stack.apis.models.models import ModelType
+from llama_stack.distribution.datatypes import (
+    ModelInput,
+    Provider,
+    ShieldInput,
+    ToolGroupInput,
+)
+from llama_stack.providers.inline.inference.sentence_transformers import (
+    SentenceTransformersInferenceConfig,
+)
+from llama_stack.providers.inline.vector_io.sqlite_vec.config import (
+    SQLiteVectorIOConfig,
+)
+from llama_stack.providers.remote.inference.llama_openai_compat.config import (
+    LlamaCompatConfig,
+)
+from llama_stack.providers.remote.inference.llama_openai_compat.models import (
+    MODEL_ENTRIES as LLLAMA_MODEL_ENTRIES,
+)
+from llama_stack.providers.remote.vector_io.chroma.config import ChromaVectorIOConfig
+from llama_stack.providers.remote.vector_io.pgvector.config import (
+    PGVectorVectorIOConfig,
+)
+from llama_stack.templates.template import (
+    DistributionTemplate,
+    RunConfigSettings,
+    get_model_registry,
+)
+
+
+def get_inference_providers() -> tuple[list[Provider], list[ModelInput]]:
+    # in this template, we allow each API key to be optional
+    providers = [
+        (
+            "llama-openai-compat",
+            LLLAMA_MODEL_ENTRIES,
+            LlamaCompatConfig.sample_run_config(api_key="${env.LLAMA_API_KEY:}"),
+        ),
+    ]
+    inference_providers = []
+    available_models = {}
+    for provider_id, model_entries, config in providers:
+        inference_providers.append(
+            Provider(
+                provider_id=provider_id,
+                provider_type=f"remote::{provider_id}",
+                config=config,
+            )
+        )
+        available_models[provider_id] = model_entries
+    return inference_providers, available_models
+
+
+def get_distribution_template() -> DistributionTemplate:
+    inference_providers, available_models = get_inference_providers()
+    providers = {
+        "inference": ([p.provider_type for p in inference_providers] + ["inline::sentence-transformers"]),
+        "vector_io": ["inline::sqlite-vec", "remote::chromadb", "remote::pgvector"],
+        "safety": ["inline::llama-guard"],
+        "agents": ["inline::meta-reference"],
+        "telemetry": ["inline::meta-reference"],
+        "eval": ["inline::meta-reference"],
+        "datasetio": ["remote::huggingface", "inline::localfs"],
+        "scoring": ["inline::basic", "inline::llm-as-judge", "inline::braintrust"],
+        "tool_runtime": [
+            "remote::brave-search",
+            "remote::tavily-search",
+            "inline::rag-runtime",
+            "remote::model-context-protocol",
+        ],
+    }
+    name = "llama_api"
+
+    vector_io_providers = [
+        Provider(
+            provider_id="sqlite-vec",
+            provider_type="inline::sqlite-vec",
+            config=SQLiteVectorIOConfig.sample_run_config(f"~/.llama/distributions/{name}"),
+        ),
+        Provider(
+            provider_id="${env.ENABLE_CHROMADB+chromadb}",
+            provider_type="remote::chromadb",
+            config=ChromaVectorIOConfig.sample_run_config(url="${env.CHROMADB_URL:}"),
+        ),
+        Provider(
+            provider_id="${env.ENABLE_PGVECTOR+pgvector}",
+            provider_type="remote::pgvector",
+            config=PGVectorVectorIOConfig.sample_run_config(
+                db="${env.PGVECTOR_DB:}",
+                user="${env.PGVECTOR_USER:}",
+                password="${env.PGVECTOR_PASSWORD:}",
+            ),
+        ),
+    ]
+    embedding_provider = Provider(
+        provider_id="sentence-transformers",
+        provider_type="inline::sentence-transformers",
+        config=SentenceTransformersInferenceConfig.sample_run_config(),
+    )
+
+    default_tool_groups = [
+        ToolGroupInput(
+            toolgroup_id="builtin::websearch",
+            provider_id="tavily-search",
+        ),
+        ToolGroupInput(
+            toolgroup_id="builtin::rag",
+            provider_id="rag-runtime",
+        ),
+    ]
+    embedding_model = ModelInput(
+        model_id="all-MiniLM-L6-v2",
+        provider_id=embedding_provider.provider_id,
+        model_type=ModelType.embedding,
+        metadata={
+            "embedding_dimension": 384,
+        },
+    )
+
+    default_models = get_model_registry(available_models)
+    return DistributionTemplate(
+        name=name,
+        distro_type="self_hosted",
+        description="Distribution for running e2e tests in CI",
+        container_image=None,
+        template_path=None,
+        providers=providers,
+        available_models_by_provider=available_models,
+        run_configs={
+            "run.yaml": RunConfigSettings(
+                provider_overrides={
+                    "inference": inference_providers + [embedding_provider],
+                    "vector_io": vector_io_providers,
+                },
+                default_models=default_models + [embedding_model],
+                default_tool_groups=default_tool_groups,
+                default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")],
+            ),
+        },
+        run_config_env_vars={
+            "LLAMA_STACK_PORT": (
+                "8321",
+                "Port for the Llama Stack distribution server",
+            ),
+        },
+    )
diff --git a/llama_stack/templates/llama_api/run.yaml b/llama_stack/templates/llama_api/run.yaml
new file mode 100644
index 000000000..a7f2b0769
--- /dev/null
+++ b/llama_stack/templates/llama_api/run.yaml
@@ -0,0 +1,168 @@
+version: '2'
+image_name: llama_api
+apis:
+- agents
+- datasetio
+- eval
+- inference
+- safety
+- scoring
+- telemetry
+- tool_runtime
+- vector_io
+providers:
+  inference:
+  - provider_id: llama-openai-compat
+    provider_type: remote::llama-openai-compat
+    config:
+      openai_compat_api_base: https://api.llama.com/compat/v1/
+      api_key: ${env.LLAMA_API_KEY:}
+  - provider_id: sentence-transformers
+    provider_type: inline::sentence-transformers
+    config: {}
+  vector_io:
+  - provider_id: sqlite-vec
+    provider_type: inline::sqlite-vec
+    config:
+      db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/llama_api}/sqlite_vec.db
+  - provider_id: ${env.ENABLE_CHROMADB+chromadb}
+    provider_type: remote::chromadb
+    config:
+      url: ${env.CHROMADB_URL:}
+  - provider_id: ${env.ENABLE_PGVECTOR+pgvector}
+    provider_type: remote::pgvector
+    config:
+      host: ${env.PGVECTOR_HOST:localhost}
+      port: ${env.PGVECTOR_PORT:5432}
+      db: ${env.PGVECTOR_DB:}
+      user: ${env.PGVECTOR_USER:}
+      password: ${env.PGVECTOR_PASSWORD:}
+  safety:
+  - provider_id: llama-guard
+    provider_type: inline::llama-guard
+    config:
+      excluded_categories: []
+  agents:
+  - provider_id: meta-reference
+    provider_type: inline::meta-reference
+    config:
+      persistence_store:
+        type: sqlite
+        namespace: null
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/llama_api}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/llama_api}/responses_store.db
+  telemetry:
+  - provider_id: meta-reference
+    provider_type: inline::meta-reference
+    config:
+      service_name: ${env.OTEL_SERVICE_NAME:}
+      sinks: ${env.TELEMETRY_SINKS:console,sqlite}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/llama_api}/trace_store.db
+  eval:
+  - provider_id: meta-reference
+    provider_type: inline::meta-reference
+    config:
+      kvstore:
+        type: sqlite
+        namespace: null
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/llama_api}/meta_reference_eval.db
+  datasetio:
+  - provider_id: huggingface
+    provider_type: remote::huggingface
+    config:
+      kvstore:
+        type: sqlite
+        namespace: null
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/llama_api}/huggingface_datasetio.db
+  - provider_id: localfs
+    provider_type: inline::localfs
+    config:
+      kvstore:
+        type: sqlite
+        namespace: null
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/llama_api}/localfs_datasetio.db
+  scoring:
+  - provider_id: basic
+    provider_type: inline::basic
+    config: {}
+  - provider_id: llm-as-judge
+    provider_type: inline::llm-as-judge
+    config: {}
+  - provider_id: braintrust
+    provider_type: inline::braintrust
+    config:
+      openai_api_key: ${env.OPENAI_API_KEY:}
+  tool_runtime:
+  - provider_id: brave-search
+    provider_type: remote::brave-search
+    config:
+      api_key: ${env.BRAVE_SEARCH_API_KEY:}
+      max_results: 3
+  - provider_id: tavily-search
+    provider_type: remote::tavily-search
+    config:
+      api_key: ${env.TAVILY_SEARCH_API_KEY:}
+      max_results: 3
+  - provider_id: rag-runtime
+    provider_type: inline::rag-runtime
+    config: {}
+  - provider_id: model-context-protocol
+    provider_type: remote::model-context-protocol
+    config: {}
+metadata_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/llama_api}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/llama_api}/inference_store.db
+models:
+- metadata: {}
+  model_id: Llama-3.3-70B-Instruct
+  provider_id: llama-openai-compat
+  provider_model_id: Llama-3.3-70B-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-3.3-70B-Instruct
+  provider_id: llama-openai-compat
+  provider_model_id: Llama-3.3-70B-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: Llama-4-Scout-17B-16E-Instruct-FP8
+  provider_id: llama-openai-compat
+  provider_model_id: Llama-4-Scout-17B-16E-Instruct-FP8
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-4-Scout-17B-16E-Instruct
+  provider_id: llama-openai-compat
+  provider_model_id: Llama-4-Scout-17B-16E-Instruct-FP8
+  model_type: llm
+- metadata: {}
+  model_id: Llama-4-Maverick-17B-128E-Instruct-FP8
+  provider_id: llama-openai-compat
+  provider_model_id: Llama-4-Maverick-17B-128E-Instruct-FP8
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-4-Maverick-17B-128E-Instruct
+  provider_id: llama-openai-compat
+  provider_model_id: Llama-4-Maverick-17B-128E-Instruct-FP8
+  model_type: llm
+- metadata:
+    embedding_dimension: 384
+  model_id: all-MiniLM-L6-v2
+  provider_id: sentence-transformers
+  model_type: embedding
+shields:
+- shield_id: meta-llama/Llama-Guard-3-8B
+vector_dbs: []
+datasets: []
+scoring_fns: []
+benchmarks: []
+tool_groups:
+- toolgroup_id: builtin::websearch
+  provider_id: tavily-search
+- toolgroup_id: builtin::rag
+  provider_id: rag-runtime
+server:
+  port: 8321
diff --git a/llama_stack/templates/meta-reference-gpu/build.yaml b/llama_stack/templates/meta-reference-gpu/build.yaml
index b9130fc7d..53ad411e3 100644
--- a/llama_stack/templates/meta-reference-gpu/build.yaml
+++ b/llama_stack/templates/meta-reference-gpu/build.yaml
@@ -26,7 +26,9 @@ distribution_spec:
     tool_runtime:
     - remote::brave-search
     - remote::tavily-search
-    - inline::code-interpreter
     - inline::rag-runtime
     - remote::model-context-protocol
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/meta-reference-gpu/meta_reference.py b/llama_stack/templates/meta-reference-gpu/meta_reference.py
index 8ba9fadca..95d126095 100644
--- a/llama_stack/templates/meta-reference-gpu/meta_reference.py
+++ b/llama_stack/templates/meta-reference-gpu/meta_reference.py
@@ -36,7 +36,6 @@ def get_distribution_template() -> DistributionTemplate:
         "tool_runtime": [
             "remote::brave-search",
             "remote::tavily-search",
-            "inline::code-interpreter",
             "inline::rag-runtime",
             "remote::model-context-protocol",
         ],
@@ -86,10 +85,6 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
     ]
 
     return DistributionTemplate(
diff --git a/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml b/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml
index 63177ab09..2b751a514 100644
--- a/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml
+++ b/llama_stack/templates/meta-reference-gpu/run-with-safety.yaml
@@ -56,13 +56,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/meta-reference-gpu/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -108,9 +111,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -120,6 +120,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/inference_store.db
 models:
 - metadata: {}
   model_id: ${env.INFERENCE_MODEL}
@@ -145,7 +148,5 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/meta-reference-gpu/run.yaml b/llama_stack/templates/meta-reference-gpu/run.yaml
index 380d83060..a24c5fec5 100644
--- a/llama_stack/templates/meta-reference-gpu/run.yaml
+++ b/llama_stack/templates/meta-reference-gpu/run.yaml
@@ -46,13 +46,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/meta-reference-gpu/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -98,9 +101,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -110,6 +110,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/inference_store.db
 models:
 - metadata: {}
   model_id: ${env.INFERENCE_MODEL}
@@ -130,7 +133,5 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/nvidia/build.yaml b/llama_stack/templates/nvidia/build.yaml
index a33fa3737..6bd8a0100 100644
--- a/llama_stack/templates/nvidia/build.yaml
+++ b/llama_stack/templates/nvidia/build.yaml
@@ -18,8 +18,12 @@ distribution_spec:
     - remote::nvidia
     datasetio:
     - inline::localfs
+    - remote::nvidia
     scoring:
     - inline::basic
     tool_runtime:
     - inline::rag-runtime
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/nvidia/doc_template.md b/llama_stack/templates/nvidia/doc_template.md
index 068dd7ac3..50c96802f 100644
--- a/llama_stack/templates/nvidia/doc_template.md
+++ b/llama_stack/templates/nvidia/doc_template.md
@@ -116,7 +116,7 @@ docker run \
   -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
   -v ./run.yaml:/root/my-run.yaml \
   llamastack/distribution-{{ name }} \
-  --yaml-config /root/my-run.yaml \
+  --config /root/my-run.yaml \
   --port $LLAMA_STACK_PORT \
   --env NVIDIA_API_KEY=$NVIDIA_API_KEY
 ```
diff --git a/llama_stack/templates/nvidia/nvidia.py b/llama_stack/templates/nvidia/nvidia.py
index 463c13879..bfd004037 100644
--- a/llama_stack/templates/nvidia/nvidia.py
+++ b/llama_stack/templates/nvidia/nvidia.py
@@ -7,6 +7,7 @@
 from pathlib import Path
 
 from llama_stack.distribution.datatypes import ModelInput, Provider, ShieldInput, ToolGroupInput
+from llama_stack.providers.remote.datasetio.nvidia import NvidiaDatasetIOConfig
 from llama_stack.providers.remote.eval.nvidia import NVIDIAEvalConfig
 from llama_stack.providers.remote.inference.nvidia import NVIDIAConfig
 from llama_stack.providers.remote.inference.nvidia.models import MODEL_ENTRIES
@@ -23,7 +24,7 @@ def get_distribution_template() -> DistributionTemplate:
         "telemetry": ["inline::meta-reference"],
         "eval": ["remote::nvidia"],
         "post_training": ["remote::nvidia"],
-        "datasetio": ["inline::localfs"],
+        "datasetio": ["inline::localfs", "remote::nvidia"],
         "scoring": ["inline::basic"],
         "tool_runtime": ["inline::rag-runtime"],
     }
@@ -38,6 +39,11 @@ def get_distribution_template() -> DistributionTemplate:
         provider_type="remote::nvidia",
         config=NVIDIASafetyConfig.sample_run_config(),
     )
+    datasetio_provider = Provider(
+        provider_id="nvidia",
+        provider_type="remote::nvidia",
+        config=NvidiaDatasetIOConfig.sample_run_config(),
+    )
     eval_provider = Provider(
         provider_id="nvidia",
         provider_type="remote::nvidia",
@@ -75,6 +81,7 @@ def get_distribution_template() -> DistributionTemplate:
             "run.yaml": RunConfigSettings(
                 provider_overrides={
                     "inference": [inference_provider],
+                    "datasetio": [datasetio_provider],
                     "eval": [eval_provider],
                 },
                 default_models=default_models,
diff --git a/llama_stack/templates/nvidia/run-with-safety.yaml b/llama_stack/templates/nvidia/run-with-safety.yaml
index a3e5fefa4..c431e12f2 100644
--- a/llama_stack/templates/nvidia/run-with-safety.yaml
+++ b/llama_stack/templates/nvidia/run-with-safety.yaml
@@ -46,13 +46,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/nvidia/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/trace_store.db
   eval:
   - provider_id: nvidia
     provider_type: remote::nvidia
@@ -74,6 +77,13 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/localfs_datasetio.db
+  - provider_id: nvidia
+    provider_type: remote::nvidia
+    config:
+      api_key: ${env.NVIDIA_API_KEY:}
+      dataset_namespace: ${env.NVIDIA_DATASET_NAMESPACE:default}
+      project_id: ${env.NVIDIA_PROJECT_ID:test-project}
+      datasets_url: ${env.NVIDIA_DATASETS_URL:http://nemo.test}
   scoring:
   - provider_id: basic
     provider_type: inline::basic
@@ -85,6 +95,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/inference_store.db
 models:
 - metadata: {}
   model_id: ${env.INFERENCE_MODEL}
diff --git a/llama_stack/templates/nvidia/run.yaml b/llama_stack/templates/nvidia/run.yaml
index 271ce1a16..5b244081d 100644
--- a/llama_stack/templates/nvidia/run.yaml
+++ b/llama_stack/templates/nvidia/run.yaml
@@ -41,13 +41,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/nvidia/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/trace_store.db
   eval:
   - provider_id: nvidia
     provider_type: remote::nvidia
@@ -62,13 +65,13 @@ providers:
       project_id: ${env.NVIDIA_PROJECT_ID:test-project}
       customizer_url: ${env.NVIDIA_CUSTOMIZER_URL:http://nemo.test}
   datasetio:
-  - provider_id: localfs
-    provider_type: inline::localfs
+  - provider_id: nvidia
+    provider_type: remote::nvidia
     config:
-      kvstore:
-        type: sqlite
-        namespace: null
-        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/localfs_datasetio.db
+      api_key: ${env.NVIDIA_API_KEY:}
+      dataset_namespace: ${env.NVIDIA_DATASET_NAMESPACE:default}
+      project_id: ${env.NVIDIA_PROJECT_ID:test-project}
+      datasets_url: ${env.NVIDIA_DATASETS_URL:http://nemo.test}
   scoring:
   - provider_id: basic
     provider_type: inline::basic
@@ -80,6 +83,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/inference_store.db
 models:
 - metadata: {}
   model_id: meta/llama3-8b-instruct
diff --git a/llama_stack/templates/ollama/build.yaml b/llama_stack/templates/ollama/build.yaml
index 37b72fc1f..36a120897 100644
--- a/llama_stack/templates/ollama/build.yaml
+++ b/llama_stack/templates/ollama/build.yaml
@@ -23,11 +23,15 @@ distribution_spec:
     - inline::basic
     - inline::llm-as-judge
     - inline::braintrust
+    post_training:
+    - inline::huggingface
     tool_runtime:
     - remote::brave-search
     - remote::tavily-search
-    - inline::code-interpreter
     - inline::rag-runtime
     - remote::model-context-protocol
     - remote::wolfram-alpha
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/ollama/doc_template.md b/llama_stack/templates/ollama/doc_template.md
index f961ab7ed..aaa65bab2 100644
--- a/llama_stack/templates/ollama/doc_template.md
+++ b/llama_stack/templates/ollama/doc_template.md
@@ -86,7 +86,7 @@ docker run \
   -v ~/.llama:/root/.llama \
   -v ./llama_stack/templates/ollama/run-with-safety.yaml:/root/my-run.yaml \
   llamastack/distribution-{{ name }} \
-  --yaml-config /root/my-run.yaml \
+  --config /root/my-run.yaml \
   --port $LLAMA_STACK_PORT \
   --env INFERENCE_MODEL=$INFERENCE_MODEL \
   --env SAFETY_MODEL=$SAFETY_MODEL \
diff --git a/llama_stack/templates/ollama/ollama.py b/llama_stack/templates/ollama/ollama.py
index d9f0960a2..0b4f05128 100644
--- a/llama_stack/templates/ollama/ollama.py
+++ b/llama_stack/templates/ollama/ollama.py
@@ -13,6 +13,7 @@ from llama_stack.distribution.datatypes import (
     ShieldInput,
     ToolGroupInput,
 )
+from llama_stack.providers.inline.post_training.huggingface import HuggingFacePostTrainingConfig
 from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
 from llama_stack.providers.remote.inference.ollama import OllamaImplConfig
 from llama_stack.templates.template import DistributionTemplate, RunConfigSettings
@@ -28,10 +29,10 @@ def get_distribution_template() -> DistributionTemplate:
         "eval": ["inline::meta-reference"],
         "datasetio": ["remote::huggingface", "inline::localfs"],
         "scoring": ["inline::basic", "inline::llm-as-judge", "inline::braintrust"],
+        "post_training": ["inline::huggingface"],
         "tool_runtime": [
             "remote::brave-search",
             "remote::tavily-search",
-            "inline::code-interpreter",
             "inline::rag-runtime",
             "remote::model-context-protocol",
             "remote::wolfram-alpha",
@@ -48,7 +49,11 @@ def get_distribution_template() -> DistributionTemplate:
         provider_type="inline::faiss",
         config=FaissVectorIOConfig.sample_run_config(f"~/.llama/distributions/{name}"),
     )
-
+    posttraining_provider = Provider(
+        provider_id="huggingface",
+        provider_type="inline::huggingface",
+        config=HuggingFacePostTrainingConfig.sample_run_config(f"~/.llama/distributions/{name}"),
+    )
     inference_model = ModelInput(
         model_id="${env.INFERENCE_MODEL}",
         provider_id="ollama",
@@ -75,10 +80,6 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
         ToolGroupInput(
             toolgroup_id="builtin::wolfram_alpha",
             provider_id="wolfram-alpha",
@@ -97,6 +98,7 @@ def get_distribution_template() -> DistributionTemplate:
                 provider_overrides={
                     "inference": [inference_provider],
                     "vector_io": [vector_io_provider_faiss],
+                    "post_training": [posttraining_provider],
                 },
                 default_models=[inference_model, embedding_model],
                 default_tool_groups=default_tool_groups,
@@ -105,6 +107,7 @@ def get_distribution_template() -> DistributionTemplate:
                 provider_overrides={
                     "inference": [inference_provider],
                     "vector_io": [vector_io_provider_faiss],
+                    "post_training": [posttraining_provider],
                     "safety": [
                         Provider(
                             provider_id="llama-guard",
diff --git a/llama_stack/templates/ollama/report.md b/llama_stack/templates/ollama/report.md
deleted file mode 100644
index 724809a59..000000000
--- a/llama_stack/templates/ollama/report.md
+++ /dev/null
@@ -1,44 +0,0 @@
-# Report for ollama distribution
-
-## Supported Models
-| Model Descriptor | ollama |
-|:---|:---|
-| Llama-3-8B-Instruct | ❌ |
-| Llama-3-70B-Instruct | ❌ |
-| Llama3.1-8B-Instruct | ✅ |
-| Llama3.1-70B-Instruct | ✅ |
-| Llama3.1-405B-Instruct | ✅ |
-| Llama3.2-1B-Instruct | ✅ |
-| Llama3.2-3B-Instruct | ✅ |
-| Llama3.2-11B-Vision-Instruct | ✅ |
-| Llama3.2-90B-Vision-Instruct | ✅ |
-| Llama3.3-70B-Instruct | ✅ |
-| Llama-Guard-3-11B-Vision | ❌ |
-| Llama-Guard-3-1B | ✅ |
-| Llama-Guard-3-8B | ✅ |
-| Llama-Guard-2-8B | ❌ |
-
-## Inference
-| Model | API | Capability | Test | Status |
-|:----- |:-----|:-----|:-----|:-----|
-| Llama-3.1-8B-Instruct | /chat_completion | streaming | test_text_chat_completion_streaming | ✅ |
-| Llama-3.2-11B-Vision-Instruct | /chat_completion | streaming | test_image_chat_completion_streaming | ❌ |
-| Llama-3.2-11B-Vision-Instruct | /chat_completion | non_streaming | test_image_chat_completion_non_streaming | ❌ |
-| Llama-3.1-8B-Instruct | /chat_completion | non_streaming | test_text_chat_completion_non_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /chat_completion | tool_calling | test_text_chat_completion_with_tool_calling_and_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /chat_completion | tool_calling | test_text_chat_completion_with_tool_calling_and_non_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /completion | streaming | test_text_completion_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /completion | non_streaming | test_text_completion_non_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /completion | structured_output | test_text_completion_structured_output | ✅ |
-
-## Vector IO
-| API | Capability | Test | Status |
-|:-----|:-----|:-----|:-----|
-| /retrieve |  | test_vector_db_retrieve | ✅ |
-
-## Agents
-| API | Capability | Test | Status |
-|:-----|:-----|:-----|:-----|
-| /create_agent_turn | rag | test_rag_agent | ✅ |
-| /create_agent_turn | custom_tool | test_custom_tool | ✅ |
-| /create_agent_turn | code_execution | test_code_interpreter_for_attachments | ✅ |
diff --git a/llama_stack/templates/ollama/run-with-safety.yaml b/llama_stack/templates/ollama/run-with-safety.yaml
index b43fec6db..d63c5e366 100644
--- a/llama_stack/templates/ollama/run-with-safety.yaml
+++ b/llama_stack/templates/ollama/run-with-safety.yaml
@@ -5,6 +5,7 @@ apis:
 - datasetio
 - eval
 - inference
+- post_training
 - safety
 - scoring
 - telemetry
@@ -39,13 +40,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/ollama/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -80,6 +84,13 @@ providers:
     provider_type: inline::braintrust
     config:
       openai_api_key: ${env.OPENAI_API_KEY:}
+  post_training:
+  - provider_id: huggingface
+    provider_type: inline::huggingface
+    config:
+      checkpoint_format: huggingface
+      distributed_backend: null
+      device: cpu
   tool_runtime:
   - provider_id: brave-search
     provider_type: remote::brave-search
@@ -91,9 +102,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -107,6 +115,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/inference_store.db
 models:
 - metadata: {}
   model_id: ${env.INFERENCE_MODEL}
@@ -136,8 +147,6 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 - toolgroup_id: builtin::wolfram_alpha
   provider_id: wolfram-alpha
 server:
diff --git a/llama_stack/templates/ollama/run.yaml b/llama_stack/templates/ollama/run.yaml
index c8f4ad9ad..d208cd7f0 100644
--- a/llama_stack/templates/ollama/run.yaml
+++ b/llama_stack/templates/ollama/run.yaml
@@ -5,6 +5,7 @@ apis:
 - datasetio
 - eval
 - inference
+- post_training
 - safety
 - scoring
 - telemetry
@@ -37,13 +38,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/ollama/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -78,6 +82,13 @@ providers:
     provider_type: inline::braintrust
     config:
       openai_api_key: ${env.OPENAI_API_KEY:}
+  post_training:
+  - provider_id: huggingface
+    provider_type: inline::huggingface
+    config:
+      checkpoint_format: huggingface
+      distributed_backend: null
+      device: cpu
   tool_runtime:
   - provider_id: brave-search
     provider_type: remote::brave-search
@@ -89,9 +100,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -105,6 +113,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/ollama}/inference_store.db
 models:
 - metadata: {}
   model_id: ${env.INFERENCE_MODEL}
@@ -126,8 +137,6 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 - toolgroup_id: builtin::wolfram_alpha
   provider_id: wolfram-alpha
 server:
diff --git a/llama_stack/templates/open-benchmark/build.yaml b/llama_stack/templates/open-benchmark/build.yaml
index 1db90ef27..840f1e1db 100644
--- a/llama_stack/templates/open-benchmark/build.yaml
+++ b/llama_stack/templates/open-benchmark/build.yaml
@@ -30,7 +30,9 @@ distribution_spec:
     tool_runtime:
     - remote::brave-search
     - remote::tavily-search
-    - inline::code-interpreter
     - inline::rag-runtime
     - remote::model-context-protocol
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/open-benchmark/open_benchmark.py b/llama_stack/templates/open-benchmark/open_benchmark.py
index a6a906c6f..d944d4eff 100644
--- a/llama_stack/templates/open-benchmark/open_benchmark.py
+++ b/llama_stack/templates/open-benchmark/open_benchmark.py
@@ -4,7 +4,6 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Dict, List, Tuple
 
 from llama_stack.apis.datasets import DatasetPurpose, URIDataSource
 from llama_stack.apis.models.models import ModelType
@@ -36,7 +35,7 @@ from llama_stack.templates.template import (
 )
 
 
-def get_inference_providers() -> Tuple[List[Provider], Dict[str, List[ProviderModelEntry]]]:
+def get_inference_providers() -> tuple[list[Provider], dict[str, list[ProviderModelEntry]]]:
     # in this template, we allow each API key to be optional
     providers = [
         (
@@ -108,7 +107,6 @@ def get_distribution_template() -> DistributionTemplate:
         "tool_runtime": [
             "remote::brave-search",
             "remote::tavily-search",
-            "inline::code-interpreter",
             "inline::rag-runtime",
             "remote::model-context-protocol",
         ],
@@ -146,10 +144,6 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
     ]
 
     default_models = get_model_registry(available_models) + [
diff --git a/llama_stack/templates/open-benchmark/run.yaml b/llama_stack/templates/open-benchmark/run.yaml
index 5e908b081..0e5edf728 100644
--- a/llama_stack/templates/open-benchmark/run.yaml
+++ b/llama_stack/templates/open-benchmark/run.yaml
@@ -64,13 +64,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/open-benchmark}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/open-benchmark}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/open-benchmark/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/open-benchmark}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -116,9 +119,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -128,6 +128,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/open-benchmark}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/open-benchmark}/inference_store.db
 models:
 - metadata: {}
   model_id: openai/gpt-4o
@@ -242,7 +245,5 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/passthrough/build.yaml b/llama_stack/templates/passthrough/build.yaml
index fb1fb1066..46b99cb75 100644
--- a/llama_stack/templates/passthrough/build.yaml
+++ b/llama_stack/templates/passthrough/build.yaml
@@ -28,7 +28,9 @@ distribution_spec:
     - remote::brave-search
     - remote::tavily-search
     - remote::wolfram-alpha
-    - inline::code-interpreter
     - inline::rag-runtime
     - remote::model-context-protocol
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/passthrough/passthrough.py b/llama_stack/templates/passthrough/passthrough.py
index 8454e49cf..6a30625c5 100644
--- a/llama_stack/templates/passthrough/passthrough.py
+++ b/llama_stack/templates/passthrough/passthrough.py
@@ -38,7 +38,6 @@ def get_distribution_template() -> DistributionTemplate:
             "remote::brave-search",
             "remote::tavily-search",
             "remote::wolfram-alpha",
-            "inline::code-interpreter",
             "inline::rag-runtime",
             "remote::model-context-protocol",
         ],
@@ -100,10 +99,6 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
     ]
 
     return DistributionTemplate(
diff --git a/llama_stack/templates/passthrough/run-with-safety.yaml b/llama_stack/templates/passthrough/run-with-safety.yaml
index 8ab6b1081..bbf5d9a52 100644
--- a/llama_stack/templates/passthrough/run-with-safety.yaml
+++ b/llama_stack/templates/passthrough/run-with-safety.yaml
@@ -46,13 +46,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/passthrough}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/passthrough}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/passthrough/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/passthrough}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -102,9 +105,6 @@ providers:
     provider_type: remote::wolfram-alpha
     config:
       api_key: ${env.WOLFRAM_ALPHA_API_KEY:}
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -114,6 +114,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/passthrough}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/passthrough}/inference_store.db
 models:
 - metadata: {}
   model_id: meta-llama/Llama-3.1-8B-Instruct
@@ -148,7 +151,5 @@ tool_groups:
   provider_id: wolfram-alpha
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/passthrough/run.yaml b/llama_stack/templates/passthrough/run.yaml
index 53e8c8857..146906d9b 100644
--- a/llama_stack/templates/passthrough/run.yaml
+++ b/llama_stack/templates/passthrough/run.yaml
@@ -41,13 +41,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/passthrough}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/passthrough}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/passthrough/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/passthrough}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -97,9 +100,6 @@ providers:
     provider_type: remote::wolfram-alpha
     config:
       api_key: ${env.WOLFRAM_ALPHA_API_KEY:}
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -109,6 +109,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/passthrough}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/passthrough}/inference_store.db
 models:
 - metadata: {}
   model_id: meta-llama/Llama-3.1-8B-Instruct
@@ -138,7 +141,5 @@ tool_groups:
   provider_id: wolfram-alpha
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/postgres-demo/__init__.py b/llama_stack/templates/postgres-demo/__init__.py
new file mode 100644
index 000000000..81473cb73
--- /dev/null
+++ b/llama_stack/templates/postgres-demo/__init__.py
@@ -0,0 +1,7 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from .postgres_demo import get_distribution_template  # noqa: F401
diff --git a/llama_stack/templates/postgres-demo/build.yaml b/llama_stack/templates/postgres-demo/build.yaml
new file mode 100644
index 000000000..8f3648abe
--- /dev/null
+++ b/llama_stack/templates/postgres-demo/build.yaml
@@ -0,0 +1,24 @@
+version: '2'
+distribution_spec:
+  description: Quick start template for running Llama Stack with several popular providers
+  providers:
+    inference:
+    - remote::fireworks
+    - remote::vllm
+    vector_io:
+    - remote::chromadb
+    safety:
+    - inline::llama-guard
+    agents:
+    - inline::meta-reference
+    telemetry:
+    - inline::meta-reference
+    tool_runtime:
+    - remote::brave-search
+    - remote::tavily-search
+    - inline::rag-runtime
+    - remote::model-context-protocol
+image_type: conda
+additional_pip_packages:
+- asyncpg
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/postgres-demo/postgres_demo.py b/llama_stack/templates/postgres-demo/postgres_demo.py
new file mode 100644
index 000000000..d2e352320
--- /dev/null
+++ b/llama_stack/templates/postgres-demo/postgres_demo.py
@@ -0,0 +1,164 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+
+from llama_stack.distribution.datatypes import (
+    ModelInput,
+    Provider,
+    ShieldInput,
+    ToolGroupInput,
+)
+from llama_stack.providers.remote.inference.fireworks.config import FireworksImplConfig
+from llama_stack.providers.remote.inference.fireworks.models import (
+    MODEL_ENTRIES as FIREWORKS_MODEL_ENTRIES,
+)
+from llama_stack.providers.remote.inference.vllm import VLLMInferenceAdapterConfig
+from llama_stack.providers.remote.vector_io.chroma.config import ChromaVectorIOConfig
+from llama_stack.providers.utils.inference.model_registry import ProviderModelEntry
+from llama_stack.providers.utils.kvstore.config import PostgresKVStoreConfig
+from llama_stack.providers.utils.sqlstore.sqlstore import PostgresSqlStoreConfig
+from llama_stack.templates.template import (
+    DistributionTemplate,
+    RunConfigSettings,
+    get_model_registry,
+)
+
+
+def get_inference_providers() -> tuple[list[Provider], dict[str, list[ProviderModelEntry]]]:
+    # in this template, we allow each API key to be optional
+    providers = [
+        (
+            "fireworks",
+            FIREWORKS_MODEL_ENTRIES,
+            FireworksImplConfig.sample_run_config(api_key="${env.FIREWORKS_API_KEY:}"),
+        ),
+    ]
+    inference_providers = []
+    available_models = {}
+    for provider_id, model_entries, config in providers:
+        inference_providers.append(
+            Provider(
+                provider_id=provider_id,
+                provider_type=f"remote::{provider_id}",
+                config=config,
+            )
+        )
+        available_models[provider_id] = model_entries
+    inference_providers.append(
+        Provider(
+            provider_id="vllm-inference",
+            provider_type="remote::vllm",
+            config=VLLMInferenceAdapterConfig.sample_run_config(
+                url="${env.VLLM_URL:http://localhost:8000/v1}",
+            ),
+        )
+    )
+    return inference_providers, available_models
+
+
+def get_distribution_template() -> DistributionTemplate:
+    inference_providers, available_models = get_inference_providers()
+    providers = {
+        "inference": ([p.provider_type for p in inference_providers]),
+        "vector_io": ["remote::chromadb"],
+        "safety": ["inline::llama-guard"],
+        "agents": ["inline::meta-reference"],
+        "telemetry": ["inline::meta-reference"],
+        "tool_runtime": [
+            "remote::brave-search",
+            "remote::tavily-search",
+            "inline::rag-runtime",
+            "remote::model-context-protocol",
+        ],
+    }
+    name = "postgres-demo"
+
+    vector_io_providers = [
+        Provider(
+            provider_id="${env.ENABLE_CHROMADB+chromadb}",
+            provider_type="remote::chromadb",
+            config=ChromaVectorIOConfig.sample_run_config(url="${env.CHROMADB_URL:}"),
+        ),
+    ]
+    default_tool_groups = [
+        ToolGroupInput(
+            toolgroup_id="builtin::websearch",
+            provider_id="tavily-search",
+        ),
+        ToolGroupInput(
+            toolgroup_id="builtin::rag",
+            provider_id="rag-runtime",
+        ),
+    ]
+
+    default_models = get_model_registry(available_models)
+    default_models.append(
+        ModelInput(
+            model_id="${env.INFERENCE_MODEL}",
+            provider_id="vllm-inference",
+        )
+    )
+    postgres_config = {
+        "type": "postgres",
+        "host": "${env.POSTGRES_HOST:localhost}",
+        "port": "${env.POSTGRES_PORT:5432}",
+        "db": "${env.POSTGRES_DB:llamastack}",
+        "user": "${env.POSTGRES_USER:llamastack}",
+        "password": "${env.POSTGRES_PASSWORD:llamastack}",
+    }
+
+    return DistributionTemplate(
+        name=name,
+        distro_type="self_hosted",
+        description="Quick start template for running Llama Stack with several popular providers",
+        container_image=None,
+        template_path=None,
+        providers=providers,
+        available_models_by_provider=available_models,
+        run_configs={
+            "run.yaml": RunConfigSettings(
+                provider_overrides={
+                    "inference": inference_providers,
+                    "vector_io": vector_io_providers,
+                    "agents": [
+                        Provider(
+                            provider_id="meta-reference",
+                            provider_type="inline::meta-reference",
+                            config=dict(
+                                persistence_store=postgres_config,
+                                responses_store=postgres_config,
+                            ),
+                        )
+                    ],
+                    "telemetry": [
+                        Provider(
+                            provider_id="meta-reference",
+                            provider_type="inline::meta-reference",
+                            config=dict(
+                                service_name="${env.OTEL_SERVICE_NAME:}",
+                                sinks="${env.TELEMETRY_SINKS:console}",
+                            ),
+                        )
+                    ],
+                },
+                default_models=default_models,
+                default_tool_groups=default_tool_groups,
+                default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")],
+                metadata_store=PostgresKVStoreConfig.model_validate(postgres_config),
+                inference_store=PostgresSqlStoreConfig.model_validate(postgres_config),
+            ),
+        },
+        run_config_env_vars={
+            "LLAMA_STACK_PORT": (
+                "8321",
+                "Port for the Llama Stack distribution server",
+            ),
+            "FIREWORKS_API_KEY": (
+                "",
+                "Fireworks API Key",
+            ),
+        },
+    )
diff --git a/llama_stack/templates/postgres-demo/run.yaml b/llama_stack/templates/postgres-demo/run.yaml
new file mode 100644
index 000000000..889b8eaa7
--- /dev/null
+++ b/llama_stack/templates/postgres-demo/run.yaml
@@ -0,0 +1,224 @@
+version: '2'
+image_name: postgres-demo
+apis:
+- agents
+- inference
+- safety
+- telemetry
+- tool_runtime
+- vector_io
+providers:
+  inference:
+  - provider_id: fireworks
+    provider_type: remote::fireworks
+    config:
+      url: https://api.fireworks.ai/inference/v1
+      api_key: ${env.FIREWORKS_API_KEY:}
+  - provider_id: vllm-inference
+    provider_type: remote::vllm
+    config:
+      url: ${env.VLLM_URL:http://localhost:8000/v1}
+      max_tokens: ${env.VLLM_MAX_TOKENS:4096}
+      api_token: ${env.VLLM_API_TOKEN:fake}
+      tls_verify: ${env.VLLM_TLS_VERIFY:true}
+  vector_io:
+  - provider_id: ${env.ENABLE_CHROMADB+chromadb}
+    provider_type: remote::chromadb
+    config:
+      url: ${env.CHROMADB_URL:}
+  safety:
+  - provider_id: llama-guard
+    provider_type: inline::llama-guard
+    config:
+      excluded_categories: []
+  agents:
+  - provider_id: meta-reference
+    provider_type: inline::meta-reference
+    config:
+      persistence_store:
+        type: postgres
+        host: ${env.POSTGRES_HOST:localhost}
+        port: ${env.POSTGRES_PORT:5432}
+        db: ${env.POSTGRES_DB:llamastack}
+        user: ${env.POSTGRES_USER:llamastack}
+        password: ${env.POSTGRES_PASSWORD:llamastack}
+      responses_store:
+        type: postgres
+        host: ${env.POSTGRES_HOST:localhost}
+        port: ${env.POSTGRES_PORT:5432}
+        db: ${env.POSTGRES_DB:llamastack}
+        user: ${env.POSTGRES_USER:llamastack}
+        password: ${env.POSTGRES_PASSWORD:llamastack}
+  telemetry:
+  - provider_id: meta-reference
+    provider_type: inline::meta-reference
+    config:
+      service_name: ${env.OTEL_SERVICE_NAME:}
+      sinks: ${env.TELEMETRY_SINKS:console}
+  tool_runtime:
+  - provider_id: brave-search
+    provider_type: remote::brave-search
+    config:
+      api_key: ${env.BRAVE_SEARCH_API_KEY:}
+      max_results: 3
+  - provider_id: tavily-search
+    provider_type: remote::tavily-search
+    config:
+      api_key: ${env.TAVILY_SEARCH_API_KEY:}
+      max_results: 3
+  - provider_id: rag-runtime
+    provider_type: inline::rag-runtime
+    config: {}
+  - provider_id: model-context-protocol
+    provider_type: remote::model-context-protocol
+    config: {}
+metadata_store:
+  type: postgres
+  host: ${env.POSTGRES_HOST:localhost}
+  port: ${env.POSTGRES_PORT:5432}
+  db: ${env.POSTGRES_DB:llamastack}
+  user: ${env.POSTGRES_USER:llamastack}
+  password: ${env.POSTGRES_PASSWORD:llamastack}
+  table_name: llamastack_kvstore
+inference_store:
+  type: postgres
+  host: ${env.POSTGRES_HOST:localhost}
+  port: ${env.POSTGRES_PORT:5432}
+  db: ${env.POSTGRES_DB:llamastack}
+  user: ${env.POSTGRES_USER:llamastack}
+  password: ${env.POSTGRES_PASSWORD:llamastack}
+models:
+- metadata: {}
+  model_id: accounts/fireworks/models/llama-v3p1-8b-instruct
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama-v3p1-8b-instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-3.1-8B-Instruct
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama-v3p1-8b-instruct
+  model_type: llm
+- metadata: {}
+  model_id: accounts/fireworks/models/llama-v3p1-70b-instruct
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama-v3p1-70b-instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-3.1-70B-Instruct
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama-v3p1-70b-instruct
+  model_type: llm
+- metadata: {}
+  model_id: accounts/fireworks/models/llama-v3p1-405b-instruct
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama-v3p1-405b-instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-3.1-405B-Instruct-FP8
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama-v3p1-405b-instruct
+  model_type: llm
+- metadata: {}
+  model_id: accounts/fireworks/models/llama-v3p2-3b-instruct
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama-v3p2-3b-instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-3.2-3B-Instruct
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama-v3p2-3b-instruct
+  model_type: llm
+- metadata: {}
+  model_id: accounts/fireworks/models/llama-v3p2-11b-vision-instruct
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama-v3p2-11b-vision-instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-3.2-11B-Vision-Instruct
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama-v3p2-11b-vision-instruct
+  model_type: llm
+- metadata: {}
+  model_id: accounts/fireworks/models/llama-v3p2-90b-vision-instruct
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama-v3p2-90b-vision-instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-3.2-90B-Vision-Instruct
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama-v3p2-90b-vision-instruct
+  model_type: llm
+- metadata: {}
+  model_id: accounts/fireworks/models/llama-v3p3-70b-instruct
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama-v3p3-70b-instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-3.3-70B-Instruct
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama-v3p3-70b-instruct
+  model_type: llm
+- metadata: {}
+  model_id: accounts/fireworks/models/llama-guard-3-8b
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama-guard-3-8b
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-Guard-3-8B
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama-guard-3-8b
+  model_type: llm
+- metadata: {}
+  model_id: accounts/fireworks/models/llama-guard-3-11b-vision
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama-guard-3-11b-vision
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-Guard-3-11B-Vision
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama-guard-3-11b-vision
+  model_type: llm
+- metadata: {}
+  model_id: accounts/fireworks/models/llama4-scout-instruct-basic
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama4-scout-instruct-basic
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-4-Scout-17B-16E-Instruct
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama4-scout-instruct-basic
+  model_type: llm
+- metadata: {}
+  model_id: accounts/fireworks/models/llama4-maverick-instruct-basic
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama4-maverick-instruct-basic
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-4-Maverick-17B-128E-Instruct
+  provider_id: fireworks
+  provider_model_id: accounts/fireworks/models/llama4-maverick-instruct-basic
+  model_type: llm
+- metadata:
+    embedding_dimension: 768
+    context_length: 8192
+  model_id: nomic-ai/nomic-embed-text-v1.5
+  provider_id: fireworks
+  provider_model_id: nomic-ai/nomic-embed-text-v1.5
+  model_type: embedding
+- metadata: {}
+  model_id: ${env.INFERENCE_MODEL}
+  provider_id: vllm-inference
+  model_type: llm
+shields:
+- shield_id: meta-llama/Llama-Guard-3-8B
+vector_dbs: []
+datasets: []
+scoring_fns: []
+benchmarks: []
+tool_groups:
+- toolgroup_id: builtin::websearch
+  provider_id: tavily-search
+- toolgroup_id: builtin::rag
+  provider_id: rag-runtime
+server:
+  port: 8321
diff --git a/llama_stack/templates/remote-vllm/build.yaml b/llama_stack/templates/remote-vllm/build.yaml
index b2bbf853a..16fe5d4fd 100644
--- a/llama_stack/templates/remote-vllm/build.yaml
+++ b/llama_stack/templates/remote-vllm/build.yaml
@@ -27,8 +27,10 @@ distribution_spec:
     tool_runtime:
     - remote::brave-search
     - remote::tavily-search
-    - inline::code-interpreter
     - inline::rag-runtime
     - remote::model-context-protocol
     - remote::wolfram-alpha
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/remote-vllm/doc_template.md b/llama_stack/templates/remote-vllm/doc_template.md
index 3cede6080..5684888da 100644
--- a/llama_stack/templates/remote-vllm/doc_template.md
+++ b/llama_stack/templates/remote-vllm/doc_template.md
@@ -220,7 +220,7 @@ docker run \
   -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
   -v ./llama_stack/templates/remote-vllm/run.yaml:/root/my-run.yaml \
   llamastack/distribution-{{ name }} \
-  --yaml-config /root/my-run.yaml \
+  --config /root/my-run.yaml \
   --port $LLAMA_STACK_PORT \
   --env INFERENCE_MODEL=$INFERENCE_MODEL \
   --env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1
@@ -242,7 +242,7 @@ docker run \
   -v ~/.llama:/root/.llama \
   -v ./llama_stack/templates/remote-vllm/run-with-safety.yaml:/root/my-run.yaml \
   llamastack/distribution-{{ name }} \
-  --yaml-config /root/my-run.yaml \
+  --config /root/my-run.yaml \
   --port $LLAMA_STACK_PORT \
   --env INFERENCE_MODEL=$INFERENCE_MODEL \
   --env VLLM_URL=http://host.docker.internal:$INFERENCE_PORT/v1 \
diff --git a/llama_stack/templates/remote-vllm/run-with-safety.yaml b/llama_stack/templates/remote-vllm/run-with-safety.yaml
index bb69496aa..e83162a4f 100644
--- a/llama_stack/templates/remote-vllm/run-with-safety.yaml
+++ b/llama_stack/templates/remote-vllm/run-with-safety.yaml
@@ -50,6 +50,9 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/responses_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -88,9 +91,9 @@ providers:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/remote-vllm/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/trace_store.db
   tool_runtime:
   - provider_id: brave-search
     provider_type: remote::brave-search
@@ -102,9 +105,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -118,6 +118,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/inference_store.db
 models:
 - metadata: {}
   model_id: ${env.INFERENCE_MODEL}
@@ -143,8 +146,6 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 - toolgroup_id: builtin::wolfram_alpha
   provider_id: wolfram-alpha
 server:
diff --git a/llama_stack/templates/remote-vllm/run.yaml b/llama_stack/templates/remote-vllm/run.yaml
index 14f2da37e..4cdf88c6b 100644
--- a/llama_stack/templates/remote-vllm/run.yaml
+++ b/llama_stack/templates/remote-vllm/run.yaml
@@ -43,6 +43,9 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/responses_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -81,9 +84,9 @@ providers:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/remote-vllm/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/trace_store.db
   tool_runtime:
   - provider_id: brave-search
     provider_type: remote::brave-search
@@ -95,9 +98,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -111,6 +111,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/inference_store.db
 models:
 - metadata: {}
   model_id: ${env.INFERENCE_MODEL}
@@ -131,8 +134,6 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 - toolgroup_id: builtin::wolfram_alpha
   provider_id: wolfram-alpha
 server:
diff --git a/llama_stack/templates/remote-vllm/vllm.py b/llama_stack/templates/remote-vllm/vllm.py
index 0f6c7659e..2782a3ea0 100644
--- a/llama_stack/templates/remote-vllm/vllm.py
+++ b/llama_stack/templates/remote-vllm/vllm.py
@@ -34,7 +34,6 @@ def get_distribution_template() -> DistributionTemplate:
         "tool_runtime": [
             "remote::brave-search",
             "remote::tavily-search",
-            "inline::code-interpreter",
             "inline::rag-runtime",
             "remote::model-context-protocol",
             "remote::wolfram-alpha",
@@ -84,10 +83,6 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
         ToolGroupInput(
             toolgroup_id="builtin::wolfram_alpha",
             provider_id="wolfram-alpha",
diff --git a/llama_stack/templates/sambanova/build.yaml b/llama_stack/templates/sambanova/build.yaml
index ca5ffe618..14b1c8974 100644
--- a/llama_stack/templates/sambanova/build.yaml
+++ b/llama_stack/templates/sambanova/build.yaml
@@ -1,15 +1,16 @@
 version: '2'
 distribution_spec:
-  description: Use SambaNova.AI for running LLM inference
+  description: Use SambaNova for running LLM inference and safety
   providers:
     inference:
     - remote::sambanova
+    - inline::sentence-transformers
     vector_io:
     - inline::faiss
     - remote::chromadb
     - remote::pgvector
     safety:
-    - inline::llama-guard
+    - remote::sambanova
     agents:
     - inline::meta-reference
     telemetry:
@@ -17,6 +18,10 @@ distribution_spec:
     tool_runtime:
     - remote::brave-search
     - remote::tavily-search
-    - inline::code-interpreter
     - inline::rag-runtime
+    - remote::model-context-protocol
+    - remote::wolfram-alpha
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/sambanova/doc_template.md b/llama_stack/templates/sambanova/doc_template.md
index 42d9efb66..1dc76fd3f 100644
--- a/llama_stack/templates/sambanova/doc_template.md
+++ b/llama_stack/templates/sambanova/doc_template.md
@@ -37,33 +37,44 @@ The following models are available by default:
 
 ### Prerequisite: API Keys
 
-Make sure you have access to a SambaNova API Key. You can get one by visiting [SambaNova.ai](https://sambanova.ai/).
+Make sure you have access to a SambaNova API Key. You can get one by visiting [SambaNova.ai](http://cloud.sambanova.ai?utm_source=llamastack&utm_medium=external&utm_campaign=cloud_signup).
 
 
 ## Running Llama Stack with SambaNova
 
 You can do this via Conda (build code) or Docker which has a pre-built image.
 
-### Via Docker
 
-This method allows you to get started quickly without having to build the distribution code.
+### Via Docker
 
 ```bash
 LLAMA_STACK_PORT=8321
+llama stack build --template sambanova --image-type container
 docker run \
   -it \
-  --pull always \
   -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
-  llamastack/distribution-{{ name }} \
+  -v ~/.llama:/root/.llama \
+  distribution-{{ name }} \
   --port $LLAMA_STACK_PORT \
   --env SAMBANOVA_API_KEY=$SAMBANOVA_API_KEY
 ```
 
+
+### Via Venv
+
+```bash
+llama stack build --template sambanova --image-type venv
+llama stack run --image-type venv ~/.llama/distributions/sambanova/sambanova-run.yaml \
+  --port $LLAMA_STACK_PORT \
+  --env SAMBANOVA_API_KEY=$SAMBANOVA_API_KEY
+```
+
+
 ### Via Conda
 
 ```bash
 llama stack build --template sambanova --image-type conda
-llama stack run ./run.yaml \
+llama stack run --image-type conda ~/.llama/distributions/sambanova/sambanova-run.yaml \
   --port $LLAMA_STACK_PORT \
   --env SAMBANOVA_API_KEY=$SAMBANOVA_API_KEY
 ```
diff --git a/llama_stack/templates/sambanova/run.yaml b/llama_stack/templates/sambanova/run.yaml
index e4e8e4e21..8c2a933ab 100644
--- a/llama_stack/templates/sambanova/run.yaml
+++ b/llama_stack/templates/sambanova/run.yaml
@@ -14,6 +14,9 @@ providers:
     config:
       url: https://api.sambanova.ai/v1
       api_key: ${env.SAMBANOVA_API_KEY}
+  - provider_id: sentence-transformers
+    provider_type: inline::sentence-transformers
+    config: {}
   vector_io:
   - provider_id: faiss
     provider_type: inline::faiss
@@ -35,10 +38,11 @@ providers:
       user: ${env.PGVECTOR_USER:}
       password: ${env.PGVECTOR_PASSWORD:}
   safety:
-  - provider_id: llama-guard
-    provider_type: inline::llama-guard
+  - provider_id: sambanova
+    provider_type: remote::sambanova
     config:
-      excluded_categories: []
+      url: https://api.sambanova.ai/v1
+      api_key: ${env.SAMBANOVA_API_KEY}
   agents:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -47,13 +51,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/sambanova}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/sambanova}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/sambanova/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/sambanova}/trace_store.db
   tool_runtime:
   - provider_id: brave-search
     provider_type: remote::brave-search
@@ -65,118 +72,133 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
+  - provider_id: model-context-protocol
+    provider_type: remote::model-context-protocol
+    config: {}
+  - provider_id: wolfram-alpha
+    provider_type: remote::wolfram-alpha
+    config:
+      api_key: ${env.WOLFRAM_ALPHA_API_KEY:}
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/sambanova}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/sambanova}/inference_store.db
 models:
 - metadata: {}
-  model_id: Meta-Llama-3.1-8B-Instruct
+  model_id: sambanova/Meta-Llama-3.1-8B-Instruct
   provider_id: sambanova
-  provider_model_id: Meta-Llama-3.1-8B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.1-8B-Instruct
   model_type: llm
 - metadata: {}
   model_id: meta-llama/Llama-3.1-8B-Instruct
   provider_id: sambanova
-  provider_model_id: Meta-Llama-3.1-8B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.1-8B-Instruct
   model_type: llm
 - metadata: {}
-  model_id: Meta-Llama-3.1-70B-Instruct
+  model_id: sambanova/Meta-Llama-3.1-405B-Instruct
   provider_id: sambanova
-  provider_model_id: Meta-Llama-3.1-70B-Instruct
-  model_type: llm
-- metadata: {}
-  model_id: meta-llama/Llama-3.1-70B-Instruct
-  provider_id: sambanova
-  provider_model_id: Meta-Llama-3.1-70B-Instruct
-  model_type: llm
-- metadata: {}
-  model_id: Meta-Llama-3.1-405B-Instruct
-  provider_id: sambanova
-  provider_model_id: Meta-Llama-3.1-405B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.1-405B-Instruct
   model_type: llm
 - metadata: {}
   model_id: meta-llama/Llama-3.1-405B-Instruct-FP8
   provider_id: sambanova
-  provider_model_id: Meta-Llama-3.1-405B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.1-405B-Instruct
   model_type: llm
 - metadata: {}
-  model_id: Meta-Llama-3.2-1B-Instruct
+  model_id: sambanova/Meta-Llama-3.2-1B-Instruct
   provider_id: sambanova
-  provider_model_id: Meta-Llama-3.2-1B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.2-1B-Instruct
   model_type: llm
 - metadata: {}
   model_id: meta-llama/Llama-3.2-1B-Instruct
   provider_id: sambanova
-  provider_model_id: Meta-Llama-3.2-1B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.2-1B-Instruct
   model_type: llm
 - metadata: {}
-  model_id: Meta-Llama-3.2-3B-Instruct
+  model_id: sambanova/Meta-Llama-3.2-3B-Instruct
   provider_id: sambanova
-  provider_model_id: Meta-Llama-3.2-3B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.2-3B-Instruct
   model_type: llm
 - metadata: {}
   model_id: meta-llama/Llama-3.2-3B-Instruct
   provider_id: sambanova
-  provider_model_id: Meta-Llama-3.2-3B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.2-3B-Instruct
   model_type: llm
 - metadata: {}
-  model_id: Meta-Llama-3.3-70B-Instruct
+  model_id: sambanova/Meta-Llama-3.3-70B-Instruct
   provider_id: sambanova
-  provider_model_id: Meta-Llama-3.3-70B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.3-70B-Instruct
   model_type: llm
 - metadata: {}
   model_id: meta-llama/Llama-3.3-70B-Instruct
   provider_id: sambanova
-  provider_model_id: Meta-Llama-3.3-70B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.3-70B-Instruct
   model_type: llm
 - metadata: {}
-  model_id: Llama-3.2-11B-Vision-Instruct
+  model_id: sambanova/Llama-3.2-11B-Vision-Instruct
   provider_id: sambanova
-  provider_model_id: Llama-3.2-11B-Vision-Instruct
+  provider_model_id: sambanova/Llama-3.2-11B-Vision-Instruct
   model_type: llm
 - metadata: {}
   model_id: meta-llama/Llama-3.2-11B-Vision-Instruct
   provider_id: sambanova
-  provider_model_id: Llama-3.2-11B-Vision-Instruct
+  provider_model_id: sambanova/Llama-3.2-11B-Vision-Instruct
   model_type: llm
 - metadata: {}
-  model_id: Llama-3.2-90B-Vision-Instruct
+  model_id: sambanova/Llama-3.2-90B-Vision-Instruct
   provider_id: sambanova
-  provider_model_id: Llama-3.2-90B-Vision-Instruct
+  provider_model_id: sambanova/Llama-3.2-90B-Vision-Instruct
   model_type: llm
 - metadata: {}
   model_id: meta-llama/Llama-3.2-90B-Vision-Instruct
   provider_id: sambanova
-  provider_model_id: Llama-3.2-90B-Vision-Instruct
+  provider_model_id: sambanova/Llama-3.2-90B-Vision-Instruct
   model_type: llm
 - metadata: {}
-  model_id: Meta-Llama-Guard-3-8B
+  model_id: sambanova/Llama-4-Scout-17B-16E-Instruct
   provider_id: sambanova
-  provider_model_id: Meta-Llama-Guard-3-8B
-  model_type: llm
-- metadata: {}
-  model_id: meta-llama/Llama-Guard-3-8B
-  provider_id: sambanova
-  provider_model_id: Meta-Llama-Guard-3-8B
-  model_type: llm
-- metadata: {}
-  model_id: Llama-4-Scout-17B-16E-Instruct
-  provider_id: sambanova
-  provider_model_id: Llama-4-Scout-17B-16E-Instruct
+  provider_model_id: sambanova/Llama-4-Scout-17B-16E-Instruct
   model_type: llm
 - metadata: {}
   model_id: meta-llama/Llama-4-Scout-17B-16E-Instruct
   provider_id: sambanova
-  provider_model_id: Llama-4-Scout-17B-16E-Instruct
+  provider_model_id: sambanova/Llama-4-Scout-17B-16E-Instruct
   model_type: llm
+- metadata: {}
+  model_id: sambanova/Llama-4-Maverick-17B-128E-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Llama-4-Maverick-17B-128E-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-4-Maverick-17B-128E-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Llama-4-Maverick-17B-128E-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: sambanova/Meta-Llama-Guard-3-8B
+  provider_id: sambanova
+  provider_model_id: sambanova/Meta-Llama-Guard-3-8B
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-Guard-3-8B
+  provider_id: sambanova
+  provider_model_id: sambanova/Meta-Llama-Guard-3-8B
+  model_type: llm
+- metadata:
+    embedding_dimension: 384
+  model_id: all-MiniLM-L6-v2
+  provider_id: sentence-transformers
+  model_type: embedding
 shields:
 - shield_id: meta-llama/Llama-Guard-3-8B
+  provider_shield_id: sambanova/Meta-Llama-Guard-3-8B
+- shield_id: sambanova/Meta-Llama-Guard-3-8B
+  provider_shield_id: sambanova/Meta-Llama-Guard-3-8B
 vector_dbs: []
 datasets: []
 scoring_fns: []
@@ -186,7 +208,7 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
+- toolgroup_id: builtin::wolfram_alpha
+  provider_id: wolfram-alpha
 server:
   port: 8321
diff --git a/llama_stack/templates/sambanova/sambanova.py b/llama_stack/templates/sambanova/sambanova.py
index 8b91f8712..54a49423d 100644
--- a/llama_stack/templates/sambanova/sambanova.py
+++ b/llama_stack/templates/sambanova/sambanova.py
@@ -6,7 +6,16 @@
 
 from pathlib import Path
 
-from llama_stack.distribution.datatypes import Provider, ShieldInput, ToolGroupInput
+from llama_stack.apis.models.models import ModelType
+from llama_stack.distribution.datatypes import (
+    ModelInput,
+    Provider,
+    ShieldInput,
+    ToolGroupInput,
+)
+from llama_stack.providers.inline.inference.sentence_transformers import (
+    SentenceTransformersInferenceConfig,
+)
 from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
 from llama_stack.providers.remote.inference.sambanova import SambaNovaImplConfig
 from llama_stack.providers.remote.inference.sambanova.models import MODEL_ENTRIES
@@ -23,26 +32,38 @@ from llama_stack.templates.template import (
 
 def get_distribution_template() -> DistributionTemplate:
     providers = {
-        "inference": ["remote::sambanova"],
+        "inference": ["remote::sambanova", "inline::sentence-transformers"],
         "vector_io": ["inline::faiss", "remote::chromadb", "remote::pgvector"],
-        "safety": ["inline::llama-guard"],
+        "safety": ["remote::sambanova"],
         "agents": ["inline::meta-reference"],
         "telemetry": ["inline::meta-reference"],
         "tool_runtime": [
             "remote::brave-search",
             "remote::tavily-search",
-            "inline::code-interpreter",
             "inline::rag-runtime",
+            "remote::model-context-protocol",
+            "remote::wolfram-alpha",
         ],
     }
     name = "sambanova"
-
     inference_provider = Provider(
         provider_id=name,
         provider_type=f"remote::{name}",
         config=SambaNovaImplConfig.sample_run_config(),
     )
-
+    embedding_provider = Provider(
+        provider_id="sentence-transformers",
+        provider_type="inline::sentence-transformers",
+        config=SentenceTransformersInferenceConfig.sample_run_config(),
+    )
+    embedding_model = ModelInput(
+        model_id="all-MiniLM-L6-v2",
+        provider_id="sentence-transformers",
+        model_type=ModelType.embedding,
+        metadata={
+            "embedding_dimension": 384,
+        },
+    )
     vector_io_providers = [
         Provider(
             provider_id="faiss",
@@ -81,27 +102,35 @@ def get_distribution_template() -> DistributionTemplate:
             provider_id="rag-runtime",
         ),
         ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
+            toolgroup_id="builtin::wolfram_alpha",
+            provider_id="wolfram-alpha",
         ),
     ]
 
     return DistributionTemplate(
         name=name,
         distro_type="self_hosted",
-        description="Use SambaNova.AI for running LLM inference",
-        docker_image=None,
+        description="Use SambaNova for running LLM inference and safety",
+        container_image=None,
         template_path=Path(__file__).parent / "doc_template.md",
         providers=providers,
         available_models_by_provider=available_models,
         run_configs={
             "run.yaml": RunConfigSettings(
                 provider_overrides={
-                    "inference": [inference_provider],
+                    "inference": [inference_provider, embedding_provider],
                     "vector_io": vector_io_providers,
                 },
-                default_models=default_models,
-                default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")],
+                default_models=default_models + [embedding_model],
+                default_shields=[
+                    ShieldInput(
+                        shield_id="meta-llama/Llama-Guard-3-8B", provider_shield_id="sambanova/Meta-Llama-Guard-3-8B"
+                    ),
+                    ShieldInput(
+                        shield_id="sambanova/Meta-Llama-Guard-3-8B",
+                        provider_shield_id="sambanova/Meta-Llama-Guard-3-8B",
+                    ),
+                ],
                 default_tool_groups=default_tool_groups,
             ),
         },
@@ -112,7 +141,7 @@ def get_distribution_template() -> DistributionTemplate:
             ),
             "SAMBANOVA_API_KEY": (
                 "",
-                "SambaNova.AI API Key",
+                "SambaNova API Key",
             ),
         },
     )
diff --git a/llama_stack/templates/starter/__init__.py b/llama_stack/templates/starter/__init__.py
new file mode 100644
index 000000000..9c0d937ce
--- /dev/null
+++ b/llama_stack/templates/starter/__init__.py
@@ -0,0 +1,7 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from .starter import get_distribution_template  # noqa: F401
diff --git a/llama_stack/templates/starter/build.yaml b/llama_stack/templates/starter/build.yaml
new file mode 100644
index 000000000..ec97c7d3e
--- /dev/null
+++ b/llama_stack/templates/starter/build.yaml
@@ -0,0 +1,40 @@
+version: '2'
+distribution_spec:
+  description: Quick start template for running Llama Stack with several popular providers
+  providers:
+    inference:
+    - remote::openai
+    - remote::fireworks
+    - remote::anthropic
+    - remote::gemini
+    - remote::groq
+    - remote::sambanova
+    - inline::sentence-transformers
+    vector_io:
+    - inline::sqlite-vec
+    - remote::chromadb
+    - remote::pgvector
+    safety:
+    - inline::llama-guard
+    agents:
+    - inline::meta-reference
+    telemetry:
+    - inline::meta-reference
+    eval:
+    - inline::meta-reference
+    datasetio:
+    - remote::huggingface
+    - inline::localfs
+    scoring:
+    - inline::basic
+    - inline::llm-as-judge
+    - inline::braintrust
+    tool_runtime:
+    - remote::brave-search
+    - remote::tavily-search
+    - inline::rag-runtime
+    - remote::model-context-protocol
+image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/dev/run.yaml b/llama_stack/templates/starter/run.yaml
similarity index 66%
rename from llama_stack/templates/dev/run.yaml
rename to llama_stack/templates/starter/run.yaml
index 0dd056405..04425ed35 100644
--- a/llama_stack/templates/dev/run.yaml
+++ b/llama_stack/templates/starter/run.yaml
@@ -1,5 +1,5 @@
 version: '2'
-image_name: dev
+image_name: starter
 apis:
 - agents
 - datasetio
@@ -34,6 +34,11 @@ providers:
     config:
       url: https://api.groq.com
       api_key: ${env.GROQ_API_KEY:}
+  - provider_id: sambanova
+    provider_type: remote::sambanova
+    config:
+      url: https://api.sambanova.ai/v1
+      api_key: ${env.SAMBANOVA_API_KEY:}
   - provider_id: sentence-transformers
     provider_type: inline::sentence-transformers
     config: {}
@@ -41,7 +46,7 @@ providers:
   - provider_id: sqlite-vec
     provider_type: inline::sqlite-vec
     config:
-      db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dev}/sqlite_vec.db
+      db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/starter}/sqlite_vec.db
   - provider_id: ${env.ENABLE_CHROMADB+chromadb}
     provider_type: remote::chromadb
     config:
@@ -66,14 +71,17 @@ providers:
       persistence_store:
         type: sqlite
         namespace: null
-        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dev}/agents_store.db
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/starter}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/starter}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/dev/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/starter}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -81,7 +89,7 @@ providers:
       kvstore:
         type: sqlite
         namespace: null
-        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dev}/meta_reference_eval.db
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/starter}/meta_reference_eval.db
   datasetio:
   - provider_id: huggingface
     provider_type: remote::huggingface
@@ -89,14 +97,14 @@ providers:
       kvstore:
         type: sqlite
         namespace: null
-        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dev}/huggingface_datasetio.db
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/starter}/huggingface_datasetio.db
   - provider_id: localfs
     provider_type: inline::localfs
     config:
       kvstore:
         type: sqlite
         namespace: null
-        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dev}/localfs_datasetio.db
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/starter}/localfs_datasetio.db
   scoring:
   - provider_id: basic
     provider_type: inline::basic
@@ -119,9 +127,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -130,7 +135,10 @@ providers:
     config: {}
 metadata_store:
   type: sqlite
-  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/dev}/registry.db
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/starter}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/starter}/inference_store.db
 models:
 - metadata: {}
   model_id: openai/gpt-4o
@@ -147,6 +155,76 @@ models:
   provider_id: openai
   provider_model_id: openai/chatgpt-4o-latest
   model_type: llm
+- metadata: {}
+  model_id: gpt-3.5-turbo-0125
+  provider_id: openai
+  provider_model_id: gpt-3.5-turbo-0125
+  model_type: llm
+- metadata: {}
+  model_id: gpt-3.5-turbo
+  provider_id: openai
+  provider_model_id: gpt-3.5-turbo
+  model_type: llm
+- metadata: {}
+  model_id: gpt-3.5-turbo-instruct
+  provider_id: openai
+  provider_model_id: gpt-3.5-turbo-instruct
+  model_type: llm
+- metadata: {}
+  model_id: gpt-4
+  provider_id: openai
+  provider_model_id: gpt-4
+  model_type: llm
+- metadata: {}
+  model_id: gpt-4-turbo
+  provider_id: openai
+  provider_model_id: gpt-4-turbo
+  model_type: llm
+- metadata: {}
+  model_id: gpt-4o
+  provider_id: openai
+  provider_model_id: gpt-4o
+  model_type: llm
+- metadata: {}
+  model_id: gpt-4o-2024-08-06
+  provider_id: openai
+  provider_model_id: gpt-4o-2024-08-06
+  model_type: llm
+- metadata: {}
+  model_id: gpt-4o-mini
+  provider_id: openai
+  provider_model_id: gpt-4o-mini
+  model_type: llm
+- metadata: {}
+  model_id: gpt-4o-audio-preview
+  provider_id: openai
+  provider_model_id: gpt-4o-audio-preview
+  model_type: llm
+- metadata: {}
+  model_id: chatgpt-4o-latest
+  provider_id: openai
+  provider_model_id: chatgpt-4o-latest
+  model_type: llm
+- metadata: {}
+  model_id: o1
+  provider_id: openai
+  provider_model_id: o1
+  model_type: llm
+- metadata: {}
+  model_id: o1-mini
+  provider_id: openai
+  provider_model_id: o1-mini
+  model_type: llm
+- metadata: {}
+  model_id: o3-mini
+  provider_id: openai
+  provider_model_id: o3-mini
+  model_type: llm
+- metadata: {}
+  model_id: o4-mini
+  provider_id: openai
+  provider_model_id: o4-mini
+  model_type: llm
 - metadata:
     embedding_dimension: 1536
     context_length: 8192
@@ -161,6 +239,20 @@ models:
   provider_id: openai
   provider_model_id: openai/text-embedding-3-large
   model_type: embedding
+- metadata:
+    embedding_dimension: 1536
+    context_length: 8192
+  model_id: text-embedding-3-small
+  provider_id: openai
+  provider_model_id: text-embedding-3-small
+  model_type: embedding
+- metadata:
+    embedding_dimension: 3072
+    context_length: 8192
+  model_id: text-embedding-3-large
+  provider_id: openai
+  provider_model_id: text-embedding-3-large
+  model_type: embedding
 - metadata: {}
   model_id: accounts/fireworks/models/llama-v3p1-8b-instruct
   provider_id: fireworks
@@ -416,6 +508,106 @@ models:
   provider_id: groq
   provider_model_id: groq/meta-llama/llama-4-maverick-17b-128e-instruct
   model_type: llm
+- metadata: {}
+  model_id: sambanova/Meta-Llama-3.1-8B-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Meta-Llama-3.1-8B-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-3.1-8B-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Meta-Llama-3.1-8B-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: sambanova/Meta-Llama-3.1-405B-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Meta-Llama-3.1-405B-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-3.1-405B-Instruct-FP8
+  provider_id: sambanova
+  provider_model_id: sambanova/Meta-Llama-3.1-405B-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: sambanova/Meta-Llama-3.2-1B-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Meta-Llama-3.2-1B-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-3.2-1B-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Meta-Llama-3.2-1B-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: sambanova/Meta-Llama-3.2-3B-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Meta-Llama-3.2-3B-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-3.2-3B-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Meta-Llama-3.2-3B-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: sambanova/Meta-Llama-3.3-70B-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Meta-Llama-3.3-70B-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-3.3-70B-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Meta-Llama-3.3-70B-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: sambanova/Llama-3.2-11B-Vision-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Llama-3.2-11B-Vision-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-3.2-11B-Vision-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Llama-3.2-11B-Vision-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: sambanova/Llama-3.2-90B-Vision-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Llama-3.2-90B-Vision-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-3.2-90B-Vision-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Llama-3.2-90B-Vision-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: sambanova/Llama-4-Scout-17B-16E-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Llama-4-Scout-17B-16E-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-4-Scout-17B-16E-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Llama-4-Scout-17B-16E-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: sambanova/Llama-4-Maverick-17B-128E-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Llama-4-Maverick-17B-128E-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-4-Maverick-17B-128E-Instruct
+  provider_id: sambanova
+  provider_model_id: sambanova/Llama-4-Maverick-17B-128E-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: sambanova/Meta-Llama-Guard-3-8B
+  provider_id: sambanova
+  provider_model_id: sambanova/Meta-Llama-Guard-3-8B
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-Guard-3-8B
+  provider_id: sambanova
+  provider_model_id: sambanova/Meta-Llama-Guard-3-8B
+  model_type: llm
 - metadata:
     embedding_dimension: 384
   model_id: all-MiniLM-L6-v2
@@ -432,7 +624,5 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/dev/dev.py b/llama_stack/templates/starter/starter.py
similarity index 91%
rename from llama_stack/templates/dev/dev.py
rename to llama_stack/templates/starter/starter.py
index 69924acbe..0932bfdfe 100644
--- a/llama_stack/templates/dev/dev.py
+++ b/llama_stack/templates/starter/starter.py
@@ -4,7 +4,6 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import List, Tuple
 
 from llama_stack.apis.models.models import ModelType
 from llama_stack.distribution.datatypes import (
@@ -39,10 +38,15 @@ from llama_stack.providers.remote.inference.openai.config import OpenAIConfig
 from llama_stack.providers.remote.inference.openai.models import (
     MODEL_ENTRIES as OPENAI_MODEL_ENTRIES,
 )
+from llama_stack.providers.remote.inference.sambanova.config import SambaNovaImplConfig
+from llama_stack.providers.remote.inference.sambanova.models import (
+    MODEL_ENTRIES as SAMBANOVA_MODEL_ENTRIES,
+)
 from llama_stack.providers.remote.vector_io.chroma.config import ChromaVectorIOConfig
 from llama_stack.providers.remote.vector_io.pgvector.config import (
     PGVectorVectorIOConfig,
 )
+from llama_stack.providers.utils.inference.model_registry import ProviderModelEntry
 from llama_stack.templates.template import (
     DistributionTemplate,
     RunConfigSettings,
@@ -50,7 +54,7 @@ from llama_stack.templates.template import (
 )
 
 
-def get_inference_providers() -> Tuple[List[Provider], List[ModelInput]]:
+def get_inference_providers() -> tuple[list[Provider], dict[str, list[ProviderModelEntry]]]:
     # in this template, we allow each API key to be optional
     providers = [
         (
@@ -78,6 +82,11 @@ def get_inference_providers() -> Tuple[List[Provider], List[ModelInput]]:
             GROQ_MODEL_ENTRIES,
             GroqConfig.sample_run_config(api_key="${env.GROQ_API_KEY:}"),
         ),
+        (
+            "sambanova",
+            SAMBANOVA_MODEL_ENTRIES,
+            SambaNovaImplConfig.sample_run_config(api_key="${env.SAMBANOVA_API_KEY:}"),
+        ),
     ]
     inference_providers = []
     available_models = {}
@@ -107,12 +116,11 @@ def get_distribution_template() -> DistributionTemplate:
         "tool_runtime": [
             "remote::brave-search",
             "remote::tavily-search",
-            "inline::code-interpreter",
             "inline::rag-runtime",
             "remote::model-context-protocol",
         ],
     }
-    name = "dev"
+    name = "starter"
 
     vector_io_providers = [
         Provider(
@@ -150,10 +158,6 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
     ]
     embedding_model = ModelInput(
         model_id="all-MiniLM-L6-v2",
@@ -168,7 +172,7 @@ def get_distribution_template() -> DistributionTemplate:
     return DistributionTemplate(
         name=name,
         distro_type="self_hosted",
-        description="Distribution for running e2e tests in CI",
+        description="Quick start template for running Llama Stack with several popular providers",
         container_image=None,
         template_path=None,
         providers=providers,
diff --git a/llama_stack/templates/template.py b/llama_stack/templates/template.py
index 92b1b534d..4013f08f9 100644
--- a/llama_stack/templates/template.py
+++ b/llama_stack/templates/template.py
@@ -5,7 +5,7 @@
 # the root directory of this source tree.
 
 from pathlib import Path
-from typing import Dict, List, Literal, Optional, Tuple
+from typing import Literal
 
 import jinja2
 import yaml
@@ -28,12 +28,13 @@ from llama_stack.distribution.datatypes import (
 from llama_stack.distribution.distribution import get_provider_registry
 from llama_stack.distribution.utils.dynamic import instantiate_class_type
 from llama_stack.providers.utils.inference.model_registry import ProviderModelEntry
-from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig
+from llama_stack.providers.utils.kvstore.config import KVStoreConfig, SqliteKVStoreConfig
+from llama_stack.providers.utils.sqlstore.sqlstore import SqliteSqlStoreConfig, SqlStoreConfig
 
 
 def get_model_registry(
-    available_models: Dict[str, List[ProviderModelEntry]],
-) -> List[ModelInput]:
+    available_models: dict[str, list[ProviderModelEntry]],
+) -> list[ModelInput]:
     models = []
     for provider_id, entries in available_models.items():
         for entry in entries:
@@ -57,18 +58,20 @@ class DefaultModel(BaseModel):
 
 
 class RunConfigSettings(BaseModel):
-    provider_overrides: Dict[str, List[Provider]] = Field(default_factory=dict)
-    default_models: Optional[List[ModelInput]] = None
-    default_shields: Optional[List[ShieldInput]] = None
-    default_tool_groups: Optional[List[ToolGroupInput]] = None
-    default_datasets: Optional[List[DatasetInput]] = None
-    default_benchmarks: Optional[List[BenchmarkInput]] = None
+    provider_overrides: dict[str, list[Provider]] = Field(default_factory=dict)
+    default_models: list[ModelInput] | None = None
+    default_shields: list[ShieldInput] | None = None
+    default_tool_groups: list[ToolGroupInput] | None = None
+    default_datasets: list[DatasetInput] | None = None
+    default_benchmarks: list[BenchmarkInput] | None = None
+    metadata_store: KVStoreConfig | None = None
+    inference_store: SqlStoreConfig | None = None
 
     def run_config(
         self,
         name: str,
-        providers: Dict[str, List[str]],
-        container_image: Optional[str] = None,
+        providers: dict[str, list[str]],
+        container_image: str | None = None,
     ) -> StackRunConfig:
         provider_registry = get_provider_registry()
 
@@ -113,10 +116,16 @@ class RunConfigSettings(BaseModel):
             container_image=container_image,
             apis=apis,
             providers=provider_configs,
-            metadata_store=SqliteKVStoreConfig.sample_run_config(
+            metadata_store=self.metadata_store
+            or SqliteKVStoreConfig.sample_run_config(
                 __distro_dir__=f"~/.llama/distributions/{name}",
                 db_name="registry.db",
             ),
+            inference_store=self.inference_store
+            or SqliteSqlStoreConfig.sample_run_config(
+                __distro_dir__=f"~/.llama/distributions/{name}",
+                db_name="inference_store.db",
+            ),
             models=self.default_models or [],
             shields=self.default_shields or [],
             tool_groups=self.default_tool_groups or [],
@@ -135,25 +144,31 @@ class DistributionTemplate(BaseModel):
     description: str
     distro_type: Literal["self_hosted", "remote_hosted", "ondevice"]
 
-    providers: Dict[str, List[str]]
-    run_configs: Dict[str, RunConfigSettings]
-    template_path: Optional[Path] = None
+    providers: dict[str, list[str]]
+    run_configs: dict[str, RunConfigSettings]
+    template_path: Path | None = None
 
     # Optional configuration
-    run_config_env_vars: Optional[Dict[str, Tuple[str, str]]] = None
-    container_image: Optional[str] = None
+    run_config_env_vars: dict[str, tuple[str, str]] | None = None
+    container_image: str | None = None
 
-    available_models_by_provider: Optional[Dict[str, List[ProviderModelEntry]]] = None
+    available_models_by_provider: dict[str, list[ProviderModelEntry]] | None = None
 
     def build_config(self) -> BuildConfig:
+        additional_pip_packages: list[str] = []
+        for run_config in self.run_configs.values():
+            run_config_ = run_config.run_config(self.name, self.providers, self.container_image)
+            if run_config_.inference_store:
+                additional_pip_packages.extend(run_config_.inference_store.pip_packages)
+
         return BuildConfig(
-            name=self.name,
             distribution_spec=DistributionSpec(
                 description=self.description,
                 container_image=self.container_image,
                 providers=self.providers,
             ),
             image_type="conda",  # default to conda, can be overridden
+            additional_pip_packages=sorted(set(additional_pip_packages)),
         )
 
     def generate_markdown_docs(self) -> str:
diff --git a/llama_stack/templates/tgi/build.yaml b/llama_stack/templates/tgi/build.yaml
index 9fe79647c..361b0b680 100644
--- a/llama_stack/templates/tgi/build.yaml
+++ b/llama_stack/templates/tgi/build.yaml
@@ -27,7 +27,9 @@ distribution_spec:
     tool_runtime:
     - remote::brave-search
     - remote::tavily-search
-    - inline::code-interpreter
     - inline::rag-runtime
     - remote::model-context-protocol
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/tgi/doc_template.md b/llama_stack/templates/tgi/doc_template.md
index b69ccaa56..68b475893 100644
--- a/llama_stack/templates/tgi/doc_template.md
+++ b/llama_stack/templates/tgi/doc_template.md
@@ -105,7 +105,7 @@ docker run \
   -v ~/.llama:/root/.llama \
   -v ./llama_stack/templates/tgi/run-with-safety.yaml:/root/my-run.yaml \
   llamastack/distribution-{{ name }} \
-  --yaml-config /root/my-run.yaml \
+  --config /root/my-run.yaml \
   --port $LLAMA_STACK_PORT \
   --env INFERENCE_MODEL=$INFERENCE_MODEL \
   --env TGI_URL=http://host.docker.internal:$INFERENCE_PORT \
diff --git a/llama_stack/templates/tgi/report.md b/llama_stack/templates/tgi/report.md
deleted file mode 100644
index b0f5d88a2..000000000
--- a/llama_stack/templates/tgi/report.md
+++ /dev/null
@@ -1,44 +0,0 @@
-# Report for tgi distribution
-
-## Supported Models
-| Model Descriptor | tgi |
-|:---|:---|
-| Llama-3-8B-Instruct | ✅ |
-| Llama-3-70B-Instruct | ✅ |
-| Llama3.1-8B-Instruct | ✅ |
-| Llama3.1-70B-Instruct | ✅ |
-| Llama3.1-405B-Instruct | ✅ |
-| Llama3.2-1B-Instruct | ✅ |
-| Llama3.2-3B-Instruct | ✅ |
-| Llama3.2-11B-Vision-Instruct | ✅ |
-| Llama3.2-90B-Vision-Instruct | ✅ |
-| Llama3.3-70B-Instruct | ✅ |
-| Llama-Guard-3-11B-Vision | ✅ |
-| Llama-Guard-3-1B | ✅ |
-| Llama-Guard-3-8B | ✅ |
-| Llama-Guard-2-8B | ✅ |
-
-## Inference
-| Model | API | Capability | Test | Status |
-|:----- |:-----|:-----|:-----|:-----|
-| Llama-3.1-8B-Instruct | /chat_completion | streaming | test_text_chat_completion_streaming | ✅ |
-| Llama-3.2-11B-Vision-Instruct | /chat_completion | streaming | test_image_chat_completion_streaming | ❌ |
-| Llama-3.2-11B-Vision-Instruct | /chat_completion | non_streaming | test_image_chat_completion_non_streaming | ❌ |
-| Llama-3.1-8B-Instruct | /chat_completion | non_streaming | test_text_chat_completion_non_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /chat_completion | tool_calling | test_text_chat_completion_with_tool_calling_and_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /chat_completion | tool_calling | test_text_chat_completion_with_tool_calling_and_non_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /completion | streaming | test_text_completion_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /completion | non_streaming | test_text_completion_non_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /completion | structured_output | test_text_completion_structured_output | ✅ |
-
-## Vector IO
-| API | Capability | Test | Status |
-|:-----|:-----|:-----|:-----|
-| /retrieve |  | test_vector_db_retrieve | ✅ |
-
-## Agents
-| API | Capability | Test | Status |
-|:-----|:-----|:-----|:-----|
-| /create_agent_turn | rag | test_rag_agent | ✅ |
-| /create_agent_turn | custom_tool | test_custom_tool | ✅ |
-| /create_agent_turn | code_execution | test_code_interpreter_for_attachments | ✅ |
diff --git a/llama_stack/templates/tgi/run-with-safety.yaml b/llama_stack/templates/tgi/run-with-safety.yaml
index 12d6bd284..c797b93aa 100644
--- a/llama_stack/templates/tgi/run-with-safety.yaml
+++ b/llama_stack/templates/tgi/run-with-safety.yaml
@@ -41,13 +41,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/tgi/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -93,9 +96,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -105,6 +105,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/inference_store.db
 models:
 - metadata: {}
   model_id: ${env.INFERENCE_MODEL}
@@ -125,7 +128,5 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/tgi/run.yaml b/llama_stack/templates/tgi/run.yaml
index 9f05c7584..7e91d20bd 100644
--- a/llama_stack/templates/tgi/run.yaml
+++ b/llama_stack/templates/tgi/run.yaml
@@ -40,13 +40,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/tgi/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -92,9 +95,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -104,6 +104,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/inference_store.db
 models:
 - metadata: {}
   model_id: ${env.INFERENCE_MODEL}
@@ -124,7 +127,5 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/tgi/tgi.py b/llama_stack/templates/tgi/tgi.py
index 22dcc3995..2c97cbf80 100644
--- a/llama_stack/templates/tgi/tgi.py
+++ b/llama_stack/templates/tgi/tgi.py
@@ -34,7 +34,6 @@ def get_distribution_template() -> DistributionTemplate:
         "tool_runtime": [
             "remote::brave-search",
             "remote::tavily-search",
-            "inline::code-interpreter",
             "inline::rag-runtime",
             "remote::model-context-protocol",
         ],
@@ -83,10 +82,6 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
     ]
 
     return DistributionTemplate(
diff --git a/llama_stack/templates/together/build.yaml b/llama_stack/templates/together/build.yaml
index 834a3ecaf..5ffeac873 100644
--- a/llama_stack/templates/together/build.yaml
+++ b/llama_stack/templates/together/build.yaml
@@ -27,8 +27,10 @@ distribution_spec:
     tool_runtime:
     - remote::brave-search
     - remote::tavily-search
-    - inline::code-interpreter
     - inline::rag-runtime
     - remote::model-context-protocol
     - remote::wolfram-alpha
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/together/report.md b/llama_stack/templates/together/report.md
deleted file mode 100644
index e125d5665..000000000
--- a/llama_stack/templates/together/report.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Report for together distribution
-
-## Supported Models
-| Model Descriptor | together |
-|:---|:---|
-| Llama-3-8B-Instruct | ❌ |
-| Llama-3-70B-Instruct | ❌ |
-| Llama3.1-8B-Instruct | ✅ |
-| Llama3.1-70B-Instruct | ✅ |
-| Llama3.1-405B-Instruct | ✅ |
-| Llama3.2-1B-Instruct | ❌ |
-| Llama3.2-3B-Instruct | ✅ |
-| Llama3.2-11B-Vision-Instruct | ✅ |
-| Llama3.2-90B-Vision-Instruct | ✅ |
-| Llama3.3-70B-Instruct | ✅ |
-| Llama-Guard-3-11B-Vision | ✅ |
-| Llama-Guard-3-1B | ❌ |
-| Llama-Guard-3-8B | ✅ |
-| Llama-Guard-2-8B | ❌ |
-
-## Inference
-| Model | API | Capability | Test | Status |
-|:----- |:-----|:-----|:-----|:-----|
-| Llama-3.1-8B-Instruct | /chat_completion | streaming | test_text_chat_completion_streaming | ✅ |
-| Llama-3.2-11B-Vision-Instruct | /chat_completion | streaming | test_image_chat_completion_streaming | ✅ |
-| Llama-3.2-11B-Vision-Instruct | /chat_completion | non_streaming | test_image_chat_completion_non_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /chat_completion | non_streaming | test_text_chat_completion_non_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /chat_completion | tool_calling | test_text_chat_completion_with_tool_calling_and_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /chat_completion | tool_calling | test_text_chat_completion_with_tool_calling_and_non_streaming | ✅ |
-| Llama-3.2-11B-Vision-Instruct | /chat_completion | log_probs | test_completion_log_probs_non_streaming | ✅ |
-| Llama-3.2-11B-Vision-Instruct | /chat_completion | log_probs | test_completion_log_probs_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /completion | streaming | test_text_completion_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /completion | non_streaming | test_text_completion_non_streaming | ✅ |
-| Llama-3.1-8B-Instruct | /completion | structured_output | test_text_completion_structured_output | ✅ |
-
-## Vector IO
-| Provider | API | Capability | Test | Status |
-|:-----|:-----|:-----|:-----|:-----|
-| inline::faiss | /retrieve |  | test_vector_db_retrieve | ✅ |
-
-## Agents
-| Provider | API | Capability | Test | Status |
-|:-----|:-----|:-----|:-----|:-----|
-| inline::meta-reference | /create_agent_turn | rag | test_rag_agent | ✅ |
-| inline::meta-reference | /create_agent_turn | custom_tool | test_custom_tool | ✅ |
-| inline::meta-reference | /create_agent_turn | code_execution | test_code_interpreter_for_attachments | ✅ |
diff --git a/llama_stack/templates/together/run-with-safety.yaml b/llama_stack/templates/together/run-with-safety.yaml
index 105ce896d..190a0400b 100644
--- a/llama_stack/templates/together/run-with-safety.yaml
+++ b/llama_stack/templates/together/run-with-safety.yaml
@@ -46,13 +46,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/together/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -98,9 +101,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -114,6 +114,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/inference_store.db
 models:
 - metadata: {}
   model_id: meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo
@@ -270,8 +273,6 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 - toolgroup_id: builtin::wolfram_alpha
   provider_id: wolfram-alpha
 server:
diff --git a/llama_stack/templates/together/run.yaml b/llama_stack/templates/together/run.yaml
index 1f1613655..ce9542130 100644
--- a/llama_stack/templates/together/run.yaml
+++ b/llama_stack/templates/together/run.yaml
@@ -41,13 +41,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/together/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -93,9 +96,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -109,6 +109,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/inference_store.db
 models:
 - metadata: {}
   model_id: meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo
@@ -260,8 +263,6 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 - toolgroup_id: builtin::wolfram_alpha
   provider_id: wolfram-alpha
 server:
diff --git a/llama_stack/templates/together/together.py b/llama_stack/templates/together/together.py
index a2bd87c97..7761bd9fd 100644
--- a/llama_stack/templates/together/together.py
+++ b/llama_stack/templates/together/together.py
@@ -39,7 +39,6 @@ def get_distribution_template() -> DistributionTemplate:
         "tool_runtime": [
             "remote::brave-search",
             "remote::tavily-search",
-            "inline::code-interpreter",
             "inline::rag-runtime",
             "remote::model-context-protocol",
             "remote::wolfram-alpha",
@@ -74,10 +73,6 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
         ToolGroupInput(
             toolgroup_id="builtin::wolfram_alpha",
             provider_id="wolfram-alpha",
diff --git a/llama_stack/templates/verification/build.yaml b/llama_stack/templates/verification/build.yaml
index 9f010d651..ce083dbba 100644
--- a/llama_stack/templates/verification/build.yaml
+++ b/llama_stack/templates/verification/build.yaml
@@ -32,7 +32,9 @@ distribution_spec:
     tool_runtime:
     - remote::brave-search
     - remote::tavily-search
-    - inline::code-interpreter
     - inline::rag-runtime
     - remote::model-context-protocol
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/verification/run.yaml b/llama_stack/templates/verification/run.yaml
index 454ecba5b..58b3c576c 100644
--- a/llama_stack/templates/verification/run.yaml
+++ b/llama_stack/templates/verification/run.yaml
@@ -74,13 +74,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/verification}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/verification}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/verification/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/verification}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -126,9 +129,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -138,6 +138,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/verification}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/verification}/inference_store.db
 models:
 - metadata: {}
   model_id: openai/gpt-4o
@@ -154,6 +157,76 @@ models:
   provider_id: openai
   provider_model_id: openai/chatgpt-4o-latest
   model_type: llm
+- metadata: {}
+  model_id: gpt-3.5-turbo-0125
+  provider_id: openai
+  provider_model_id: gpt-3.5-turbo-0125
+  model_type: llm
+- metadata: {}
+  model_id: gpt-3.5-turbo
+  provider_id: openai
+  provider_model_id: gpt-3.5-turbo
+  model_type: llm
+- metadata: {}
+  model_id: gpt-3.5-turbo-instruct
+  provider_id: openai
+  provider_model_id: gpt-3.5-turbo-instruct
+  model_type: llm
+- metadata: {}
+  model_id: gpt-4
+  provider_id: openai
+  provider_model_id: gpt-4
+  model_type: llm
+- metadata: {}
+  model_id: gpt-4-turbo
+  provider_id: openai
+  provider_model_id: gpt-4-turbo
+  model_type: llm
+- metadata: {}
+  model_id: gpt-4o
+  provider_id: openai
+  provider_model_id: gpt-4o
+  model_type: llm
+- metadata: {}
+  model_id: gpt-4o-2024-08-06
+  provider_id: openai
+  provider_model_id: gpt-4o-2024-08-06
+  model_type: llm
+- metadata: {}
+  model_id: gpt-4o-mini
+  provider_id: openai
+  provider_model_id: gpt-4o-mini
+  model_type: llm
+- metadata: {}
+  model_id: gpt-4o-audio-preview
+  provider_id: openai
+  provider_model_id: gpt-4o-audio-preview
+  model_type: llm
+- metadata: {}
+  model_id: chatgpt-4o-latest
+  provider_id: openai
+  provider_model_id: chatgpt-4o-latest
+  model_type: llm
+- metadata: {}
+  model_id: o1
+  provider_id: openai
+  provider_model_id: o1
+  model_type: llm
+- metadata: {}
+  model_id: o1-mini
+  provider_id: openai
+  provider_model_id: o1-mini
+  model_type: llm
+- metadata: {}
+  model_id: o3-mini
+  provider_id: openai
+  provider_model_id: o3-mini
+  model_type: llm
+- metadata: {}
+  model_id: o4-mini
+  provider_id: openai
+  provider_model_id: o4-mini
+  model_type: llm
 - metadata:
     embedding_dimension: 1536
     context_length: 8192
@@ -168,6 +241,20 @@ models:
   provider_id: openai
   provider_model_id: openai/text-embedding-3-large
   model_type: embedding
+- metadata:
+    embedding_dimension: 1536
+    context_length: 8192
+  model_id: text-embedding-3-small
+  provider_id: openai
+  provider_model_id: text-embedding-3-small
+  model_type: embedding
+- metadata:
+    embedding_dimension: 3072
+    context_length: 8192
+  model_id: text-embedding-3-large
+  provider_id: openai
+  provider_model_id: text-embedding-3-large
+  model_type: embedding
 - metadata: {}
   model_id: accounts/fireworks/models/llama-v3p1-8b-instruct
   provider_id: fireworks-openai-compat
@@ -505,104 +592,104 @@ models:
   provider_model_id: groq/meta-llama/llama-4-maverick-17b-128e-instruct
   model_type: llm
 - metadata: {}
-  model_id: Meta-Llama-3.1-8B-Instruct
+  model_id: sambanova/Meta-Llama-3.1-8B-Instruct
   provider_id: sambanova-openai-compat
-  provider_model_id: Meta-Llama-3.1-8B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.1-8B-Instruct
   model_type: llm
 - metadata: {}
   model_id: meta-llama/Llama-3.1-8B-Instruct
   provider_id: sambanova-openai-compat
-  provider_model_id: Meta-Llama-3.1-8B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.1-8B-Instruct
   model_type: llm
 - metadata: {}
-  model_id: Meta-Llama-3.1-70B-Instruct
+  model_id: sambanova/Meta-Llama-3.1-405B-Instruct
   provider_id: sambanova-openai-compat
-  provider_model_id: Meta-Llama-3.1-70B-Instruct
-  model_type: llm
-- metadata: {}
-  model_id: meta-llama/Llama-3.1-70B-Instruct
-  provider_id: sambanova-openai-compat
-  provider_model_id: Meta-Llama-3.1-70B-Instruct
-  model_type: llm
-- metadata: {}
-  model_id: Meta-Llama-3.1-405B-Instruct
-  provider_id: sambanova-openai-compat
-  provider_model_id: Meta-Llama-3.1-405B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.1-405B-Instruct
   model_type: llm
 - metadata: {}
   model_id: meta-llama/Llama-3.1-405B-Instruct-FP8
   provider_id: sambanova-openai-compat
-  provider_model_id: Meta-Llama-3.1-405B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.1-405B-Instruct
   model_type: llm
 - metadata: {}
-  model_id: Meta-Llama-3.2-1B-Instruct
+  model_id: sambanova/Meta-Llama-3.2-1B-Instruct
   provider_id: sambanova-openai-compat
-  provider_model_id: Meta-Llama-3.2-1B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.2-1B-Instruct
   model_type: llm
 - metadata: {}
   model_id: meta-llama/Llama-3.2-1B-Instruct
   provider_id: sambanova-openai-compat
-  provider_model_id: Meta-Llama-3.2-1B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.2-1B-Instruct
   model_type: llm
 - metadata: {}
-  model_id: Meta-Llama-3.2-3B-Instruct
+  model_id: sambanova/Meta-Llama-3.2-3B-Instruct
   provider_id: sambanova-openai-compat
-  provider_model_id: Meta-Llama-3.2-3B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.2-3B-Instruct
   model_type: llm
 - metadata: {}
   model_id: meta-llama/Llama-3.2-3B-Instruct
   provider_id: sambanova-openai-compat
-  provider_model_id: Meta-Llama-3.2-3B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.2-3B-Instruct
   model_type: llm
 - metadata: {}
-  model_id: Meta-Llama-3.3-70B-Instruct
+  model_id: sambanova/Meta-Llama-3.3-70B-Instruct
   provider_id: sambanova-openai-compat
-  provider_model_id: Meta-Llama-3.3-70B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.3-70B-Instruct
   model_type: llm
 - metadata: {}
   model_id: meta-llama/Llama-3.3-70B-Instruct
   provider_id: sambanova-openai-compat
-  provider_model_id: Meta-Llama-3.3-70B-Instruct
+  provider_model_id: sambanova/Meta-Llama-3.3-70B-Instruct
   model_type: llm
 - metadata: {}
-  model_id: Llama-3.2-11B-Vision-Instruct
+  model_id: sambanova/Llama-3.2-11B-Vision-Instruct
   provider_id: sambanova-openai-compat
-  provider_model_id: Llama-3.2-11B-Vision-Instruct
+  provider_model_id: sambanova/Llama-3.2-11B-Vision-Instruct
   model_type: llm
 - metadata: {}
   model_id: meta-llama/Llama-3.2-11B-Vision-Instruct
   provider_id: sambanova-openai-compat
-  provider_model_id: Llama-3.2-11B-Vision-Instruct
+  provider_model_id: sambanova/Llama-3.2-11B-Vision-Instruct
   model_type: llm
 - metadata: {}
-  model_id: Llama-3.2-90B-Vision-Instruct
+  model_id: sambanova/Llama-3.2-90B-Vision-Instruct
   provider_id: sambanova-openai-compat
-  provider_model_id: Llama-3.2-90B-Vision-Instruct
+  provider_model_id: sambanova/Llama-3.2-90B-Vision-Instruct
   model_type: llm
 - metadata: {}
   model_id: meta-llama/Llama-3.2-90B-Vision-Instruct
   provider_id: sambanova-openai-compat
-  provider_model_id: Llama-3.2-90B-Vision-Instruct
+  provider_model_id: sambanova/Llama-3.2-90B-Vision-Instruct
   model_type: llm
 - metadata: {}
-  model_id: Meta-Llama-Guard-3-8B
+  model_id: sambanova/Llama-4-Scout-17B-16E-Instruct
   provider_id: sambanova-openai-compat
-  provider_model_id: Meta-Llama-Guard-3-8B
-  model_type: llm
-- metadata: {}
-  model_id: meta-llama/Llama-Guard-3-8B
-  provider_id: sambanova-openai-compat
-  provider_model_id: Meta-Llama-Guard-3-8B
-  model_type: llm
-- metadata: {}
-  model_id: Llama-4-Scout-17B-16E-Instruct
-  provider_id: sambanova-openai-compat
-  provider_model_id: Llama-4-Scout-17B-16E-Instruct
+  provider_model_id: sambanova/Llama-4-Scout-17B-16E-Instruct
   model_type: llm
 - metadata: {}
   model_id: meta-llama/Llama-4-Scout-17B-16E-Instruct
   provider_id: sambanova-openai-compat
-  provider_model_id: Llama-4-Scout-17B-16E-Instruct
+  provider_model_id: sambanova/Llama-4-Scout-17B-16E-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: sambanova/Llama-4-Maverick-17B-128E-Instruct
+  provider_id: sambanova-openai-compat
+  provider_model_id: sambanova/Llama-4-Maverick-17B-128E-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-4-Maverick-17B-128E-Instruct
+  provider_id: sambanova-openai-compat
+  provider_model_id: sambanova/Llama-4-Maverick-17B-128E-Instruct
+  model_type: llm
+- metadata: {}
+  model_id: sambanova/Meta-Llama-Guard-3-8B
+  provider_id: sambanova-openai-compat
+  provider_model_id: sambanova/Meta-Llama-Guard-3-8B
+  model_type: llm
+- metadata: {}
+  model_id: meta-llama/Llama-Guard-3-8B
+  provider_id: sambanova-openai-compat
+  provider_model_id: sambanova/Meta-Llama-Guard-3-8B
   model_type: llm
 - metadata: {}
   model_id: llama3.1-8b
@@ -640,7 +727,5 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/verification/verification.py b/llama_stack/templates/verification/verification.py
index e6f74aad8..b58400f26 100644
--- a/llama_stack/templates/verification/verification.py
+++ b/llama_stack/templates/verification/verification.py
@@ -4,7 +4,6 @@
 # This source code is licensed under the terms described in the LICENSE file in
 # the root directory of this source tree.
 
-from typing import Dict, List, Tuple
 
 from llama_stack.apis.models.models import ModelType
 from llama_stack.distribution.datatypes import (
@@ -51,7 +50,7 @@ from llama_stack.templates.template import (
 )
 
 
-def get_inference_providers() -> Tuple[List[Provider], Dict[str, List[ProviderModelEntry]]]:
+def get_inference_providers() -> tuple[list[Provider], dict[str, list[ProviderModelEntry]]]:
     # in this template, we allow each API key to be optional
     providers = [
         (
@@ -113,7 +112,6 @@ def get_distribution_template() -> DistributionTemplate:
         "tool_runtime": [
             "remote::brave-search",
             "remote::tavily-search",
-            "inline::code-interpreter",
             "inline::rag-runtime",
             "remote::model-context-protocol",
         ],
@@ -156,10 +154,6 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
     ]
     embedding_model = ModelInput(
         model_id="all-MiniLM-L6-v2",
diff --git a/llama_stack/templates/vllm-gpu/build.yaml b/llama_stack/templates/vllm-gpu/build.yaml
index 8eb44dc1b..d5ff0f1f4 100644
--- a/llama_stack/templates/vllm-gpu/build.yaml
+++ b/llama_stack/templates/vllm-gpu/build.yaml
@@ -27,7 +27,9 @@ distribution_spec:
     tool_runtime:
     - remote::brave-search
     - remote::tavily-search
-    - inline::code-interpreter
     - inline::rag-runtime
     - remote::model-context-protocol
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/vllm-gpu/run.yaml b/llama_stack/templates/vllm-gpu/run.yaml
index a839aa2c5..6937e2bac 100644
--- a/llama_stack/templates/vllm-gpu/run.yaml
+++ b/llama_stack/templates/vllm-gpu/run.yaml
@@ -45,13 +45,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/vllm-gpu}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/vllm-gpu}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/vllm-gpu/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/vllm-gpu}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -97,9 +100,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -109,6 +109,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/vllm-gpu}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/vllm-gpu}/inference_store.db
 models:
 - metadata: {}
   model_id: ${env.INFERENCE_MODEL}
@@ -129,7 +132,5 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/vllm-gpu/vllm.py b/llama_stack/templates/vllm-gpu/vllm.py
index 9bfeadc8d..5775138b1 100644
--- a/llama_stack/templates/vllm-gpu/vllm.py
+++ b/llama_stack/templates/vllm-gpu/vllm.py
@@ -31,7 +31,6 @@ def get_distribution_template() -> DistributionTemplate:
         "tool_runtime": [
             "remote::brave-search",
             "remote::tavily-search",
-            "inline::code-interpreter",
             "inline::rag-runtime",
             "remote::model-context-protocol",
         ],
@@ -75,10 +74,6 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
     ]
 
     return DistributionTemplate(
diff --git a/llama_stack/templates/watsonx/build.yaml b/llama_stack/templates/watsonx/build.yaml
index badd643ad..e68ace183 100644
--- a/llama_stack/templates/watsonx/build.yaml
+++ b/llama_stack/templates/watsonx/build.yaml
@@ -4,6 +4,7 @@ distribution_spec:
   providers:
     inference:
     - remote::watsonx
+    - inline::sentence-transformers
     vector_io:
     - inline::faiss
     safety:
@@ -24,7 +25,9 @@ distribution_spec:
     tool_runtime:
     - remote::brave-search
     - remote::tavily-search
-    - inline::code-interpreter
     - inline::rag-runtime
     - remote::model-context-protocol
 image_type: conda
+additional_pip_packages:
+- aiosqlite
+- sqlalchemy[asyncio]
diff --git a/llama_stack/templates/watsonx/doc_template.md b/llama_stack/templates/watsonx/doc_template.md
index af0ae15a8..f28dbf0bf 100644
--- a/llama_stack/templates/watsonx/doc_template.md
+++ b/llama_stack/templates/watsonx/doc_template.md
@@ -56,7 +56,7 @@ docker run \
   -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
   -v ./run.yaml:/root/my-run.yaml \
   llamastack/distribution-{{ name }} \
-  --yaml-config /root/my-run.yaml \
+  --config /root/my-run.yaml \
   --port $LLAMA_STACK_PORT \
   --env WATSONX_API_KEY=$WATSONX_API_KEY \
   --env WATSONX_PROJECT_ID=$WATSONX_PROJECT_ID \
diff --git a/llama_stack/templates/watsonx/run.yaml b/llama_stack/templates/watsonx/run.yaml
index 1048f7192..e7222fd57 100644
--- a/llama_stack/templates/watsonx/run.yaml
+++ b/llama_stack/templates/watsonx/run.yaml
@@ -18,6 +18,9 @@ providers:
       url: ${env.WATSONX_BASE_URL:https://us-south.ml.cloud.ibm.com}
       api_key: ${env.WATSONX_API_KEY:}
       project_id: ${env.WATSONX_PROJECT_ID:}
+  - provider_id: sentence-transformers
+    provider_type: inline::sentence-transformers
+    config: {}
   vector_io:
   - provider_id: faiss
     provider_type: inline::faiss
@@ -39,13 +42,16 @@ providers:
         type: sqlite
         namespace: null
         db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/watsonx}/agents_store.db
+      responses_store:
+        type: sqlite
+        db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/watsonx}/responses_store.db
   telemetry:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
     config:
-      service_name: "${env.OTEL_SERVICE_NAME:\u200B}"
+      service_name: ${env.OTEL_SERVICE_NAME:}
       sinks: ${env.TELEMETRY_SINKS:console,sqlite}
-      sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/watsonx/trace_store.db}
+      sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/watsonx}/trace_store.db
   eval:
   - provider_id: meta-reference
     provider_type: inline::meta-reference
@@ -91,9 +97,6 @@ providers:
     config:
       api_key: ${env.TAVILY_SEARCH_API_KEY:}
       max_results: 3
-  - provider_id: code-interpreter
-    provider_type: inline::code-interpreter
-    config: {}
   - provider_id: rag-runtime
     provider_type: inline::rag-runtime
     config: {}
@@ -103,6 +106,9 @@ providers:
 metadata_store:
   type: sqlite
   db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/watsonx}/registry.db
+inference_store:
+  type: sqlite
+  db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/watsonx}/inference_store.db
 models:
 - metadata: {}
   model_id: meta-llama/llama-3-3-70b-instruct
@@ -194,6 +200,11 @@ models:
   provider_id: watsonx
   provider_model_id: meta-llama/llama-guard-3-11b-vision
   model_type: llm
+- metadata:
+    embedding_dimension: 384
+  model_id: all-MiniLM-L6-v2
+  provider_id: sentence-transformers
+  model_type: embedding
 shields: []
 vector_dbs: []
 datasets: []
@@ -204,7 +215,5 @@ tool_groups:
   provider_id: tavily-search
 - toolgroup_id: builtin::rag
   provider_id: rag-runtime
-- toolgroup_id: builtin::code_interpreter
-  provider_id: code-interpreter
 server:
   port: 8321
diff --git a/llama_stack/templates/watsonx/watsonx.py b/llama_stack/templates/watsonx/watsonx.py
index d59bb6f20..802aaf8f1 100644
--- a/llama_stack/templates/watsonx/watsonx.py
+++ b/llama_stack/templates/watsonx/watsonx.py
@@ -6,7 +6,11 @@
 
 from pathlib import Path
 
-from llama_stack.distribution.datatypes import Provider, ToolGroupInput
+from llama_stack.apis.models.models import ModelType
+from llama_stack.distribution.datatypes import ModelInput, Provider, ToolGroupInput
+from llama_stack.providers.inline.inference.sentence_transformers import (
+    SentenceTransformersInferenceConfig,
+)
 from llama_stack.providers.remote.inference.watsonx import WatsonXConfig
 from llama_stack.providers.remote.inference.watsonx.models import MODEL_ENTRIES
 from llama_stack.templates.template import DistributionTemplate, RunConfigSettings, get_model_registry
@@ -14,7 +18,7 @@ from llama_stack.templates.template import DistributionTemplate, RunConfigSettin
 
 def get_distribution_template() -> DistributionTemplate:
     providers = {
-        "inference": ["remote::watsonx"],
+        "inference": ["remote::watsonx", "inline::sentence-transformers"],
         "vector_io": ["inline::faiss"],
         "safety": ["inline::llama-guard"],
         "agents": ["inline::meta-reference"],
@@ -25,7 +29,6 @@ def get_distribution_template() -> DistributionTemplate:
         "tool_runtime": [
             "remote::brave-search",
             "remote::tavily-search",
-            "inline::code-interpreter",
             "inline::rag-runtime",
             "remote::model-context-protocol",
         ],
@@ -37,6 +40,12 @@ def get_distribution_template() -> DistributionTemplate:
         config=WatsonXConfig.sample_run_config(),
     )
 
+    embedding_provider = Provider(
+        provider_id="sentence-transformers",
+        provider_type="inline::sentence-transformers",
+        config=SentenceTransformersInferenceConfig.sample_run_config(),
+    )
+
     available_models = {
         "watsonx": MODEL_ENTRIES,
     }
@@ -49,12 +58,17 @@ def get_distribution_template() -> DistributionTemplate:
             toolgroup_id="builtin::rag",
             provider_id="rag-runtime",
         ),
-        ToolGroupInput(
-            toolgroup_id="builtin::code_interpreter",
-            provider_id="code-interpreter",
-        ),
     ]
 
+    embedding_model = ModelInput(
+        model_id="all-MiniLM-L6-v2",
+        provider_id="sentence-transformers",
+        model_type=ModelType.embedding,
+        metadata={
+            "embedding_dimension": 384,
+        },
+    )
+
     default_models = get_model_registry(available_models)
     return DistributionTemplate(
         name="watsonx",
@@ -67,9 +81,9 @@ def get_distribution_template() -> DistributionTemplate:
         run_configs={
             "run.yaml": RunConfigSettings(
                 provider_overrides={
-                    "inference": [inference_provider],
+                    "inference": [inference_provider, embedding_provider],
                 },
-                default_models=default_models,
+                default_models=default_models + [embedding_model],
                 default_tool_groups=default_tool_groups,
             ),
         },
diff --git a/llama_stack/ui/.gitignore b/llama_stack/ui/.gitignore
new file mode 100644
index 000000000..5ef6a5207
--- /dev/null
+++ b/llama_stack/ui/.gitignore
@@ -0,0 +1,41 @@
+# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
+
+# dependencies
+/node_modules
+/.pnp
+.pnp.*
+.yarn/*
+!.yarn/patches
+!.yarn/plugins
+!.yarn/releases
+!.yarn/versions
+
+# testing
+/coverage
+
+# next.js
+/.next/
+/out/
+
+# production
+/build
+
+# misc
+.DS_Store
+*.pem
+
+# debug
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+.pnpm-debug.log*
+
+# env files (can opt-in for committing if needed)
+.env*
+
+# vercel
+.vercel
+
+# typescript
+*.tsbuildinfo
+next-env.d.ts
diff --git a/llama_stack/ui/.prettierignore b/llama_stack/ui/.prettierignore
new file mode 100644
index 000000000..1b8ac8894
--- /dev/null
+++ b/llama_stack/ui/.prettierignore
@@ -0,0 +1,3 @@
+# Ignore artifacts:
+build
+coverage
diff --git a/llama_stack/ui/.prettierrc b/llama_stack/ui/.prettierrc
new file mode 100644
index 000000000..0967ef424
--- /dev/null
+++ b/llama_stack/ui/.prettierrc
@@ -0,0 +1 @@
+{}
diff --git a/llama_stack/ui/README.md b/llama_stack/ui/README.md
new file mode 100644
index 000000000..b6f803509
--- /dev/null
+++ b/llama_stack/ui/README.md
@@ -0,0 +1,25 @@
+## This is WIP.
+
+We use shadcdn/ui [Shadcn UI](https://ui.shadcn.com/) for the UI components.
+
+## Getting Started
+
+First, install dependencies:
+
+```bash
+npm install
+```
+
+Then, run the development server:
+
+```bash
+npm run dev
+# or
+yarn dev
+# or
+pnpm dev
+# or
+bun dev
+```
+
+Open [http://localhost:8322](http://localhost:8322) with your browser to see the result.
diff --git a/llama_stack/ui/app/favicon.ico b/llama_stack/ui/app/favicon.ico
new file mode 100644
index 000000000..718d6fea4
Binary files /dev/null and b/llama_stack/ui/app/favicon.ico differ
diff --git a/llama_stack/ui/app/globals.css b/llama_stack/ui/app/globals.css
new file mode 100644
index 000000000..dc98be74c
--- /dev/null
+++ b/llama_stack/ui/app/globals.css
@@ -0,0 +1,122 @@
+@import "tailwindcss";
+@import "tw-animate-css";
+
+@custom-variant dark (&:is(.dark *));
+
+@theme inline {
+  --color-background: var(--background);
+  --color-foreground: var(--foreground);
+  --font-sans: var(--font-geist-sans);
+  --font-mono: var(--font-geist-mono);
+  --color-sidebar-ring: var(--sidebar-ring);
+  --color-sidebar-border: var(--sidebar-border);
+  --color-sidebar-accent-foreground: var(--sidebar-accent-foreground);
+  --color-sidebar-accent: var(--sidebar-accent);
+  --color-sidebar-primary-foreground: var(--sidebar-primary-foreground);
+  --color-sidebar-primary: var(--sidebar-primary);
+  --color-sidebar-foreground: var(--sidebar-foreground);
+  --color-sidebar: var(--sidebar);
+  --color-chart-5: var(--chart-5);
+  --color-chart-4: var(--chart-4);
+  --color-chart-3: var(--chart-3);
+  --color-chart-2: var(--chart-2);
+  --color-chart-1: var(--chart-1);
+  --color-ring: var(--ring);
+  --color-input: var(--input);
+  --color-border: var(--border);
+  --color-destructive: var(--destructive);
+  --color-accent-foreground: var(--accent-foreground);
+  --color-accent: var(--accent);
+  --color-muted-foreground: var(--muted-foreground);
+  --color-muted: var(--muted);
+  --color-secondary-foreground: var(--secondary-foreground);
+  --color-secondary: var(--secondary);
+  --color-primary-foreground: var(--primary-foreground);
+  --color-primary: var(--primary);
+  --color-popover-foreground: var(--popover-foreground);
+  --color-popover: var(--popover);
+  --color-card-foreground: var(--card-foreground);
+  --color-card: var(--card);
+  --radius-sm: calc(var(--radius) - 4px);
+  --radius-md: calc(var(--radius) - 2px);
+  --radius-lg: var(--radius);
+  --radius-xl: calc(var(--radius) + 4px);
+}
+
+:root {
+  --radius: 0.625rem;
+  --background: oklch(1 0 0);
+  --foreground: oklch(0.145 0 0);
+  --card: oklch(1 0 0);
+  --card-foreground: oklch(0.145 0 0);
+  --popover: oklch(1 0 0);
+  --popover-foreground: oklch(0.145 0 0);
+  --primary: oklch(0.205 0 0);
+  --primary-foreground: oklch(0.985 0 0);
+  --secondary: oklch(0.97 0 0);
+  --secondary-foreground: oklch(0.205 0 0);
+  --muted: oklch(0.97 0 0);
+  --muted-foreground: oklch(0.556 0 0);
+  --accent: oklch(0.97 0 0);
+  --accent-foreground: oklch(0.205 0 0);
+  --destructive: oklch(0.577 0.245 27.325);
+  --border: oklch(0.922 0 0);
+  --input: oklch(0.922 0 0);
+  --ring: oklch(0.708 0 0);
+  --chart-1: oklch(0.646 0.222 41.116);
+  --chart-2: oklch(0.6 0.118 184.704);
+  --chart-3: oklch(0.398 0.07 227.392);
+  --chart-4: oklch(0.828 0.189 84.429);
+  --chart-5: oklch(0.769 0.188 70.08);
+  --sidebar: oklch(0.985 0 0);
+  --sidebar-foreground: oklch(0.145 0 0);
+  --sidebar-primary: oklch(0.205 0 0);
+  --sidebar-primary-foreground: oklch(0.985 0 0);
+  --sidebar-accent: oklch(0.97 0 0);
+  --sidebar-accent-foreground: oklch(0.205 0 0);
+  --sidebar-border: oklch(0.922 0 0);
+  --sidebar-ring: oklch(0.708 0 0);
+}
+
+.dark {
+  --background: oklch(0.145 0 0);
+  --foreground: oklch(0.985 0 0);
+  --card: oklch(0.205 0 0);
+  --card-foreground: oklch(0.985 0 0);
+  --popover: oklch(0.205 0 0);
+  --popover-foreground: oklch(0.985 0 0);
+  --primary: oklch(0.922 0 0);
+  --primary-foreground: oklch(0.205 0 0);
+  --secondary: oklch(0.269 0 0);
+  --secondary-foreground: oklch(0.985 0 0);
+  --muted: oklch(0.269 0 0);
+  --muted-foreground: oklch(0.708 0 0);
+  --accent: oklch(0.269 0 0);
+  --accent-foreground: oklch(0.985 0 0);
+  --destructive: oklch(0.704 0.191 22.216);
+  --border: oklch(1 0 0 / 10%);
+  --input: oklch(1 0 0 / 15%);
+  --ring: oklch(0.556 0 0);
+  --chart-1: oklch(0.488 0.243 264.376);
+  --chart-2: oklch(0.696 0.17 162.48);
+  --chart-3: oklch(0.769 0.188 70.08);
+  --chart-4: oklch(0.627 0.265 303.9);
+  --chart-5: oklch(0.645 0.246 16.439);
+  --sidebar: oklch(0.205 0 0);
+  --sidebar-foreground: oklch(0.985 0 0);
+  --sidebar-primary: oklch(0.488 0.243 264.376);
+  --sidebar-primary-foreground: oklch(0.985 0 0);
+  --sidebar-accent: oklch(0.269 0 0);
+  --sidebar-accent-foreground: oklch(0.985 0 0);
+  --sidebar-border: oklch(1 0 0 / 10%);
+  --sidebar-ring: oklch(0.556 0 0);
+}
+
+@layer base {
+  * {
+    @apply border-border outline-ring/50;
+  }
+  body {
+    @apply bg-background text-foreground;
+  }
+}
diff --git a/llama_stack/ui/app/layout.tsx b/llama_stack/ui/app/layout.tsx
new file mode 100644
index 000000000..ed8a6cd5d
--- /dev/null
+++ b/llama_stack/ui/app/layout.tsx
@@ -0,0 +1,55 @@
+import type { Metadata } from "next";
+import { ThemeProvider } from "@/components/ui/theme-provider";
+import { Geist, Geist_Mono } from "next/font/google";
+import { ModeToggle } from "@/components/ui/mode-toggle";
+import "./globals.css";
+
+const geistSans = Geist({
+  variable: "--font-geist-sans",
+  subsets: ["latin"],
+});
+
+const geistMono = Geist_Mono({
+  variable: "--font-geist-mono",
+  subsets: ["latin"],
+});
+
+export const metadata: Metadata = {
+  title: "Llama Stack",
+  description: "Llama Stack UI",
+};
+
+import { SidebarProvider, SidebarTrigger } from "@/components/ui/sidebar";
+import { AppSidebar } from "@/components/layout/app-sidebar";
+
+export default function Layout({ children }: { children: React.ReactNode }) {
+  return (
+    
+      
+        
+          
+            
+            
+ {/* Header with aligned elements */} +
+
+ +
+
+
+ +
+
+
{children}
+
+
+
+ + + ); +} diff --git a/llama_stack/ui/app/logs/chat-completions/[id]/page.tsx b/llama_stack/ui/app/logs/chat-completions/[id]/page.tsx new file mode 100644 index 000000000..e6feef363 --- /dev/null +++ b/llama_stack/ui/app/logs/chat-completions/[id]/page.tsx @@ -0,0 +1,58 @@ +"use client"; + +import { useEffect, useState } from "react"; +import { useParams } from "next/navigation"; +import { ChatCompletion } from "@/lib/types"; +import { ChatCompletionDetailView } from "@/components/chat-completions/chat-completion-detail"; +import { client } from "@/lib/client"; + +export default function ChatCompletionDetailPage() { + const params = useParams(); + const id = params.id as string; + + const [completionDetail, setCompletionDetail] = + useState(null); + const [isLoading, setIsLoading] = useState(true); + const [error, setError] = useState(null); + + useEffect(() => { + if (!id) { + setError(new Error("Completion ID is missing.")); + setIsLoading(false); + return; + } + + const fetchCompletionDetail = async () => { + setIsLoading(true); + setError(null); + setCompletionDetail(null); + try { + const response = await client.chat.completions.retrieve(id); + setCompletionDetail(response as ChatCompletion); + } catch (err) { + console.error( + `Error fetching chat completion detail for ID ${id}:`, + err, + ); + setError( + err instanceof Error + ? err + : new Error("Failed to fetch completion detail"), + ); + } finally { + setIsLoading(false); + } + }; + + fetchCompletionDetail(); + }, [id]); + + return ( + + ); +} diff --git a/llama_stack/ui/app/logs/chat-completions/layout.tsx b/llama_stack/ui/app/logs/chat-completions/layout.tsx new file mode 100644 index 000000000..f4dbfc782 --- /dev/null +++ b/llama_stack/ui/app/logs/chat-completions/layout.tsx @@ -0,0 +1,19 @@ +"use client"; + +import React from "react"; +import LogsLayout from "@/components/layout/logs-layout"; + +export default function ChatCompletionsLayout({ + children, +}: { + children: React.ReactNode; +}) { + return ( + + {children} + + ); +} diff --git a/llama_stack/ui/app/logs/chat-completions/page.tsx b/llama_stack/ui/app/logs/chat-completions/page.tsx new file mode 100644 index 000000000..5bbfcce94 --- /dev/null +++ b/llama_stack/ui/app/logs/chat-completions/page.tsx @@ -0,0 +1,51 @@ +"use client"; + +import { useEffect, useState } from "react"; +import { ChatCompletion } from "@/lib/types"; +import { ChatCompletionsTable } from "@/components/chat-completions/chat-completions-table"; +import { client } from "@/lib/client"; + +export default function ChatCompletionsPage() { + const [completions, setCompletions] = useState([]); + const [isLoading, setIsLoading] = useState(true); + const [error, setError] = useState(null); + + useEffect(() => { + const fetchCompletions = async () => { + setIsLoading(true); + setError(null); + try { + const response = await client.chat.completions.list(); + const data = Array.isArray(response) + ? response + : (response as { data: ChatCompletion[] }).data; + + if (Array.isArray(data)) { + setCompletions(data); + } else { + console.error("Unexpected response structure:", response); + setError(new Error("Unexpected response structure")); + setCompletions([]); + } + } catch (err) { + console.error("Error fetching chat completions:", err); + setError( + err instanceof Error ? err : new Error("Failed to fetch completions"), + ); + setCompletions([]); + } finally { + setIsLoading(false); + } + }; + + fetchCompletions(); + }, []); + + return ( + + ); +} diff --git a/llama_stack/ui/app/logs/responses/[id]/page.tsx b/llama_stack/ui/app/logs/responses/[id]/page.tsx new file mode 100644 index 000000000..efe6f0ff3 --- /dev/null +++ b/llama_stack/ui/app/logs/responses/[id]/page.tsx @@ -0,0 +1,125 @@ +"use client"; + +import { useEffect, useState } from "react"; +import { useParams } from "next/navigation"; +import type { ResponseObject } from "llama-stack-client/resources/responses/responses"; +import { OpenAIResponse, InputItemListResponse } from "@/lib/types"; +import { ResponseDetailView } from "@/components/responses/responses-detail"; +import { client } from "@/lib/client"; + +export default function ResponseDetailPage() { + const params = useParams(); + const id = params.id as string; + + const [responseDetail, setResponseDetail] = useState( + null, + ); + const [inputItems, setInputItems] = useState( + null, + ); + const [isLoading, setIsLoading] = useState(true); + const [isLoadingInputItems, setIsLoadingInputItems] = useState(true); + const [error, setError] = useState(null); + const [inputItemsError, setInputItemsError] = useState(null); + + // Helper function to convert ResponseObject to OpenAIResponse + const convertResponseObject = ( + responseData: ResponseObject, + ): OpenAIResponse => { + return { + id: responseData.id, + created_at: responseData.created_at, + model: responseData.model, + object: responseData.object, + status: responseData.status, + output: responseData.output as OpenAIResponse["output"], + input: [], // ResponseObject doesn't include input; component uses inputItems prop instead + error: responseData.error, + parallel_tool_calls: responseData.parallel_tool_calls, + previous_response_id: responseData.previous_response_id, + temperature: responseData.temperature, + top_p: responseData.top_p, + truncation: responseData.truncation, + user: responseData.user, + }; + }; + + useEffect(() => { + if (!id) { + setError(new Error("Response ID is missing.")); + setIsLoading(false); + return; + } + + const fetchResponseDetail = async () => { + setIsLoading(true); + setIsLoadingInputItems(true); + setError(null); + setInputItemsError(null); + setResponseDetail(null); + setInputItems(null); + + try { + const [responseResult, inputItemsResult] = await Promise.allSettled([ + client.responses.retrieve(id), + client.responses.inputItems.list(id, { order: "asc" }), + ]); + + // Handle response detail result + if (responseResult.status === "fulfilled") { + const convertedResponse = convertResponseObject(responseResult.value); + setResponseDetail(convertedResponse); + } else { + console.error( + `Error fetching response detail for ID ${id}:`, + responseResult.reason, + ); + setError( + responseResult.reason instanceof Error + ? responseResult.reason + : new Error("Failed to fetch response detail"), + ); + } + + // Handle input items result + if (inputItemsResult.status === "fulfilled") { + const inputItemsData = + inputItemsResult.value as unknown as InputItemListResponse; + setInputItems(inputItemsData); + } else { + console.error( + `Error fetching input items for response ID ${id}:`, + inputItemsResult.reason, + ); + setInputItemsError( + inputItemsResult.reason instanceof Error + ? inputItemsResult.reason + : new Error("Failed to fetch input items"), + ); + } + } catch (err) { + console.error(`Unexpected error fetching data for ID ${id}:`, err); + setError( + err instanceof Error ? err : new Error("Unexpected error occurred"), + ); + } finally { + setIsLoading(false); + setIsLoadingInputItems(false); + } + }; + + fetchResponseDetail(); + }, [id]); + + return ( + + ); +} diff --git a/llama_stack/ui/app/logs/responses/layout.tsx b/llama_stack/ui/app/logs/responses/layout.tsx new file mode 100644 index 000000000..1fe116e5e --- /dev/null +++ b/llama_stack/ui/app/logs/responses/layout.tsx @@ -0,0 +1,16 @@ +"use client"; + +import React from "react"; +import LogsLayout from "@/components/layout/logs-layout"; + +export default function ResponsesLayout({ + children, +}: { + children: React.ReactNode; +}) { + return ( + + {children} + + ); +} diff --git a/llama_stack/ui/app/logs/responses/page.tsx b/llama_stack/ui/app/logs/responses/page.tsx new file mode 100644 index 000000000..dab0c735f --- /dev/null +++ b/llama_stack/ui/app/logs/responses/page.tsx @@ -0,0 +1,66 @@ +"use client"; + +import { useEffect, useState } from "react"; +import type { ResponseListResponse } from "llama-stack-client/resources/responses/responses"; +import { OpenAIResponse } from "@/lib/types"; +import { ResponsesTable } from "@/components/responses/responses-table"; +import { client } from "@/lib/client"; + +export default function ResponsesPage() { + const [responses, setResponses] = useState([]); + const [isLoading, setIsLoading] = useState(true); + const [error, setError] = useState(null); + + // Helper function to convert ResponseListResponse.Data to OpenAIResponse + const convertResponseListData = ( + responseData: ResponseListResponse.Data, + ): OpenAIResponse => { + return { + id: responseData.id, + created_at: responseData.created_at, + model: responseData.model, + object: responseData.object, + status: responseData.status, + output: responseData.output as OpenAIResponse["output"], + input: responseData.input as OpenAIResponse["input"], + error: responseData.error, + parallel_tool_calls: responseData.parallel_tool_calls, + previous_response_id: responseData.previous_response_id, + temperature: responseData.temperature, + top_p: responseData.top_p, + truncation: responseData.truncation, + user: responseData.user, + }; + }; + + useEffect(() => { + const fetchResponses = async () => { + setIsLoading(true); + setError(null); + try { + const response = await client.responses.list(); + const responseListData = response as ResponseListResponse; + + const convertedResponses: OpenAIResponse[] = responseListData.data.map( + convertResponseListData, + ); + + setResponses(convertedResponses); + } catch (err) { + console.error("Error fetching responses:", err); + setError( + err instanceof Error ? err : new Error("Failed to fetch responses"), + ); + setResponses([]); + } finally { + setIsLoading(false); + } + }; + + fetchResponses(); + }, []); + + return ( + + ); +} diff --git a/llama_stack/ui/app/page.tsx b/llama_stack/ui/app/page.tsx new file mode 100644 index 000000000..d1d781bdb --- /dev/null +++ b/llama_stack/ui/app/page.tsx @@ -0,0 +1,7 @@ +export default function Home() { + return ( +
+

Welcome to Llama Stack!

+
+ ); +} diff --git a/llama_stack/ui/components.json b/llama_stack/ui/components.json new file mode 100644 index 000000000..4ee62ee10 --- /dev/null +++ b/llama_stack/ui/components.json @@ -0,0 +1,21 @@ +{ + "$schema": "https://ui.shadcn.com/schema.json", + "style": "new-york", + "rsc": true, + "tsx": true, + "tailwind": { + "config": "", + "css": "app/globals.css", + "baseColor": "neutral", + "cssVariables": true, + "prefix": "" + }, + "aliases": { + "components": "@/components", + "utils": "@/lib/utils", + "ui": "@/components/ui", + "lib": "@/lib", + "hooks": "@/hooks" + }, + "iconLibrary": "lucide" +} diff --git a/llama_stack/ui/components/chat-completions/chat-completion-detail.test.tsx b/llama_stack/ui/components/chat-completions/chat-completion-detail.test.tsx new file mode 100644 index 000000000..5348dbc3a --- /dev/null +++ b/llama_stack/ui/components/chat-completions/chat-completion-detail.test.tsx @@ -0,0 +1,193 @@ +import React from "react"; +import { render, screen } from "@testing-library/react"; +import "@testing-library/jest-dom"; +import { ChatCompletionDetailView } from "./chat-completion-detail"; +import { ChatCompletion } from "@/lib/types"; + +// Initial test file setup for ChatCompletionDetailView + +describe("ChatCompletionDetailView", () => { + test("renders skeleton UI when isLoading is true", () => { + const { container } = render( + , + ); + // Use the data-slot attribute for Skeletons + const skeletons = container.querySelectorAll('[data-slot="skeleton"]'); + expect(skeletons.length).toBeGreaterThan(0); + }); + + test("renders error message when error prop is provided", () => { + render( + , + ); + expect( + screen.getByText(/Error loading details for ID err-id: Network Error/), + ).toBeInTheDocument(); + }); + + test("renders default error message when error.message is empty", () => { + render( + , + ); + // Use regex to match the error message regardless of whitespace + expect( + screen.getByText(/Error loading details for ID\s*err-id\s*:/), + ).toBeInTheDocument(); + }); + + test("renders error message when error prop is an object without message", () => { + render( + , + ); + // Use regex to match the error message regardless of whitespace + expect( + screen.getByText(/Error loading details for ID\s*err-id\s*:/), + ).toBeInTheDocument(); + }); + + test("renders not found message when completion is null and not loading/error", () => { + render( + , + ); + expect( + screen.getByText("No details found for ID: notfound-id."), + ).toBeInTheDocument(); + }); + + test("renders input, output, and properties for valid completion", () => { + const mockCompletion: ChatCompletion = { + id: "comp_123", + object: "chat.completion", + created: 1710000000, + model: "llama-test-model", + choices: [ + { + index: 0, + message: { role: "assistant", content: "Test output" }, + finish_reason: "stop", + }, + ], + input_messages: [{ role: "user", content: "Test input" }], + }; + render( + , + ); + // Input + expect(screen.getByText("Input")).toBeInTheDocument(); + expect(screen.getByText("Test input")).toBeInTheDocument(); + // Output + expect(screen.getByText("Output")).toBeInTheDocument(); + expect(screen.getByText("Test output")).toBeInTheDocument(); + // Properties + expect(screen.getByText("Properties")).toBeInTheDocument(); + expect(screen.getByText("Created:")).toBeInTheDocument(); + expect( + screen.getByText(new Date(1710000000 * 1000).toLocaleString()), + ).toBeInTheDocument(); + expect(screen.getByText("ID:")).toBeInTheDocument(); + expect(screen.getByText("comp_123")).toBeInTheDocument(); + expect(screen.getByText("Model:")).toBeInTheDocument(); + expect(screen.getByText("llama-test-model")).toBeInTheDocument(); + expect(screen.getByText("Finish Reason:")).toBeInTheDocument(); + expect(screen.getByText("stop")).toBeInTheDocument(); + }); + + test("renders tool call in output and properties when present", () => { + const toolCall = { + function: { name: "search", arguments: '{"query":"llama"}' }, + }; + const mockCompletion: ChatCompletion = { + id: "comp_tool", + object: "chat.completion", + created: 1710001000, + model: "llama-tool-model", + choices: [ + { + index: 0, + message: { + role: "assistant", + content: "Tool output", + tool_calls: [toolCall], + }, + finish_reason: "stop", + }, + ], + input_messages: [{ role: "user", content: "Tool input" }], + }; + render( + , + ); + // Output should include the tool call block (should be present twice: input and output) + const toolCallLabels = screen.getAllByText("Tool Call"); + expect(toolCallLabels.length).toBeGreaterThanOrEqual(1); // At least one, but could be two + // The tool call block should contain the formatted tool call string in both input and output + const toolCallBlocks = screen.getAllByText('search({"query":"llama"})'); + expect(toolCallBlocks.length).toBe(2); + // Properties should include the tool call name + expect(screen.getByText("Functions/Tools Called:")).toBeInTheDocument(); + expect(screen.getByText("search")).toBeInTheDocument(); + }); + + test("handles missing/empty fields gracefully", () => { + const mockCompletion: ChatCompletion = { + id: "comp_edge", + object: "chat.completion", + created: 1710002000, + model: "llama-edge-model", + choices: [], // No choices + input_messages: [], // No input messages + }; + render( + , + ); + // Input section should be present but empty + expect(screen.getByText("Input")).toBeInTheDocument(); + // Output section should show fallback message + expect( + screen.getByText("No message found in assistant's choice."), + ).toBeInTheDocument(); + // Properties should show N/A for finish reason + expect(screen.getByText("Finish Reason:")).toBeInTheDocument(); + expect(screen.getByText("N/A")).toBeInTheDocument(); + }); +}); diff --git a/llama_stack/ui/components/chat-completions/chat-completion-detail.tsx b/llama_stack/ui/components/chat-completions/chat-completion-detail.tsx new file mode 100644 index 000000000..200807864 --- /dev/null +++ b/llama_stack/ui/components/chat-completions/chat-completion-detail.tsx @@ -0,0 +1,145 @@ +"use client"; + +import { ChatMessage, ChatCompletion } from "@/lib/types"; +import { ChatMessageItem } from "@/components/chat-completions/chat-messasge-item"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { + DetailLoadingView, + DetailErrorView, + DetailNotFoundView, + DetailLayout, + PropertiesCard, + PropertyItem, +} from "@/components/layout/detail-layout"; + +interface ChatCompletionDetailViewProps { + completion: ChatCompletion | null; + isLoading: boolean; + error: Error | null; + id: string; +} + +export function ChatCompletionDetailView({ + completion, + isLoading, + error, + id, +}: ChatCompletionDetailViewProps) { + const title = "Chat Completion Details"; + + if (error) { + return ; + } + + if (isLoading) { + return ; + } + + if (!completion) { + return ; + } + + // Main content cards + const mainContent = ( + <> + + + Input + + + {completion.input_messages?.map((msg, index) => ( + + ))} + {completion.choices?.[0]?.message?.tool_calls && + Array.isArray(completion.choices[0].message.tool_calls) && + !completion.input_messages?.some( + (im) => + im.role === "assistant" && + im.tool_calls && + Array.isArray(im.tool_calls) && + im.tool_calls.length > 0, + ) + ? completion.choices[0].message.tool_calls.map( + (toolCall: any, index: number) => { + const assistantToolCallMessage: ChatMessage = { + role: "assistant", + tool_calls: [toolCall], + content: "", // Ensure content is defined, even if empty + }; + return ( + + ); + }, + ) + : null} + + + + + + Output + + + {completion.choices?.[0]?.message ? ( + + ) : ( +

+ No message found in assistant's choice. +

+ )} +
+
+ + ); + + // Properties sidebar + const sidebar = ( + + + + + + {(() => { + const toolCalls = completion.choices?.[0]?.message?.tool_calls; + if (toolCalls && Array.isArray(toolCalls) && toolCalls.length > 0) { + return ( + +
    + {toolCalls.map((toolCall: any, index: number) => ( +
  • + + {toolCall.function?.name || "N/A"} + +
  • + ))} +
+ + } + hasBorder + /> + ); + } + return null; + })()} +
+ ); + + return ( + + ); +} diff --git a/llama_stack/ui/components/chat-completions/chat-completion-table.test.tsx b/llama_stack/ui/components/chat-completions/chat-completion-table.test.tsx new file mode 100644 index 000000000..c8a55b100 --- /dev/null +++ b/llama_stack/ui/components/chat-completions/chat-completion-table.test.tsx @@ -0,0 +1,347 @@ +import React from "react"; +import { render, screen, fireEvent } from "@testing-library/react"; +import "@testing-library/jest-dom"; +import { ChatCompletionsTable } from "./chat-completions-table"; +import { ChatCompletion } from "@/lib/types"; + +// Mock next/navigation +const mockPush = jest.fn(); +jest.mock("next/navigation", () => ({ + useRouter: () => ({ + push: mockPush, + }), +})); + +// Mock helper functions +jest.mock("@/lib/truncate-text"); +jest.mock("@/lib/format-message-content"); + +// Import the mocked functions to set up default or specific implementations +import { truncateText as originalTruncateText } from "@/lib/truncate-text"; +import { + extractTextFromContentPart as originalExtractTextFromContentPart, + extractDisplayableText as originalExtractDisplayableText, +} from "@/lib/format-message-content"; + +// Cast to jest.Mock for typings +const truncateText = originalTruncateText as jest.Mock; +const extractTextFromContentPart = + originalExtractTextFromContentPart as jest.Mock; +const extractDisplayableText = originalExtractDisplayableText as jest.Mock; + +describe("ChatCompletionsTable", () => { + const defaultProps = { + data: [] as ChatCompletion[], + isLoading: false, + error: null, + }; + + beforeEach(() => { + // Reset all mocks before each test + mockPush.mockClear(); + truncateText.mockClear(); + extractTextFromContentPart.mockClear(); + extractDisplayableText.mockClear(); + + // Default pass-through implementations + truncateText.mockImplementation((text: string | undefined) => text); + extractTextFromContentPart.mockImplementation((content: unknown) => + typeof content === "string" ? content : "extracted text", + ); + extractDisplayableText.mockImplementation( + (message: unknown) => + (message as { content?: string })?.content || "extracted output", + ); + }); + + test("renders without crashing with default props", () => { + render(); + expect(screen.getByText("No chat completions found.")).toBeInTheDocument(); + }); + + test("click on a row navigates to the correct URL", () => { + const mockCompletion: ChatCompletion = { + id: "comp_123", + object: "chat.completion", + created: Math.floor(Date.now() / 1000), + model: "llama-test-model", + choices: [ + { + index: 0, + message: { role: "assistant", content: "Test output" }, + finish_reason: "stop", + }, + ], + input_messages: [{ role: "user", content: "Test input" }], + }; + + // Set up mocks to return expected values + extractTextFromContentPart.mockReturnValue("Test input"); + extractDisplayableText.mockReturnValue("Test output"); + + render(); + + const row = screen.getByText("Test input").closest("tr"); + if (row) { + fireEvent.click(row); + expect(mockPush).toHaveBeenCalledWith("/logs/chat-completions/comp_123"); + } else { + throw new Error('Row with "Test input" not found for router mock test.'); + } + }); + + describe("Loading State", () => { + test("renders skeleton UI when isLoading is true", () => { + const { container } = render( + , + ); + + // Check for skeleton in the table caption + const tableCaption = container.querySelector("caption"); + expect(tableCaption).toBeInTheDocument(); + if (tableCaption) { + const captionSkeleton = tableCaption.querySelector( + '[data-slot="skeleton"]', + ); + expect(captionSkeleton).toBeInTheDocument(); + } + + // Check for skeletons in the table body cells + const tableBody = container.querySelector("tbody"); + expect(tableBody).toBeInTheDocument(); + if (tableBody) { + const bodySkeletons = tableBody.querySelectorAll( + '[data-slot="skeleton"]', + ); + expect(bodySkeletons.length).toBeGreaterThan(0); + } + }); + }); + + describe("Error State", () => { + test("renders error message when error prop is provided", () => { + const errorMessage = "Network Error"; + render( + , + ); + expect( + screen.getByText(`Error fetching data: ${errorMessage}`), + ).toBeInTheDocument(); + }); + + test("renders default error message when error.message is not available", () => { + render( + , + ); + expect( + screen.getByText("Error fetching data: An unknown error occurred"), + ).toBeInTheDocument(); + }); + + test("renders default error message when error prop is an object without message", () => { + render(); + expect( + screen.getByText("Error fetching data: An unknown error occurred"), + ).toBeInTheDocument(); + }); + }); + + describe("Empty State", () => { + test('renders "No chat completions found." and no table when data array is empty', () => { + render(); + expect( + screen.getByText("No chat completions found."), + ).toBeInTheDocument(); + + // Ensure that the table structure is NOT rendered in the empty state + const table = screen.queryByRole("table"); + expect(table).not.toBeInTheDocument(); + }); + }); + + describe("Data Rendering", () => { + test("renders table caption, headers, and completion data correctly", () => { + const mockCompletions = [ + { + id: "comp_1", + object: "chat.completion", + created: 1710000000, + model: "llama-test-model", + choices: [ + { + index: 0, + message: { role: "assistant", content: "Test output" }, + finish_reason: "stop", + }, + ], + input_messages: [{ role: "user", content: "Test input" }], + }, + { + id: "comp_2", + object: "chat.completion", + created: 1710001000, + model: "llama-another-model", + choices: [ + { + index: 0, + message: { role: "assistant", content: "Another output" }, + finish_reason: "stop", + }, + ], + input_messages: [{ role: "user", content: "Another input" }], + }, + ]; + + // Set up mocks to return expected values + extractTextFromContentPart.mockImplementation((content: unknown) => { + if (content === "Test input") return "Test input"; + if (content === "Another input") return "Another input"; + return "extracted text"; + }); + extractDisplayableText.mockImplementation((message: unknown) => { + const msg = message as { content?: string }; + if (msg?.content === "Test output") return "Test output"; + if (msg?.content === "Another output") return "Another output"; + return "extracted output"; + }); + + render( + , + ); + + // Table caption + expect( + screen.getByText("A list of your recent chat completions."), + ).toBeInTheDocument(); + + // Table headers + expect(screen.getByText("Input")).toBeInTheDocument(); + expect(screen.getByText("Output")).toBeInTheDocument(); + expect(screen.getByText("Model")).toBeInTheDocument(); + expect(screen.getByText("Created")).toBeInTheDocument(); + + // Data rows + expect(screen.getByText("Test input")).toBeInTheDocument(); + expect(screen.getByText("Test output")).toBeInTheDocument(); + expect(screen.getByText("llama-test-model")).toBeInTheDocument(); + expect( + screen.getByText(new Date(1710000000 * 1000).toLocaleString()), + ).toBeInTheDocument(); + + expect(screen.getByText("Another input")).toBeInTheDocument(); + expect(screen.getByText("Another output")).toBeInTheDocument(); + expect(screen.getByText("llama-another-model")).toBeInTheDocument(); + expect( + screen.getByText(new Date(1710001000 * 1000).toLocaleString()), + ).toBeInTheDocument(); + }); + }); + + describe("Text Truncation and Content Extraction", () => { + test("truncates long input and output text", () => { + // Specific mock implementation for this test + truncateText.mockImplementation( + (text: string | undefined, maxLength?: number) => { + const defaultTestMaxLength = 10; + const effectiveMaxLength = maxLength ?? defaultTestMaxLength; + return typeof text === "string" && text.length > effectiveMaxLength + ? text.slice(0, effectiveMaxLength) + "..." + : text; + }, + ); + + const longInput = + "This is a very long input message that should be truncated."; + const longOutput = + "This is a very long output message that should also be truncated."; + + extractTextFromContentPart.mockReturnValue(longInput); + extractDisplayableText.mockReturnValue(longOutput); + + const mockCompletions = [ + { + id: "comp_trunc", + object: "chat.completion", + created: 1710002000, + model: "llama-trunc-model", + choices: [ + { + index: 0, + message: { role: "assistant", content: longOutput }, + finish_reason: "stop", + }, + ], + input_messages: [{ role: "user", content: longInput }], + }, + ]; + + render( + , + ); + + // The truncated text should be present for both input and output + const truncatedTexts = screen.getAllByText( + longInput.slice(0, 10) + "...", + ); + expect(truncatedTexts.length).toBe(2); // one for input, one for output + truncatedTexts.forEach((textElement) => + expect(textElement).toBeInTheDocument(), + ); + }); + + test("uses content extraction functions correctly", () => { + const mockCompletion = { + id: "comp_extract", + object: "chat.completion", + created: 1710003000, + model: "llama-extract-model", + choices: [ + { + index: 0, + message: { role: "assistant", content: "Extracted output" }, + finish_reason: "stop", + }, + ], + input_messages: [{ role: "user", content: "Extracted input" }], + }; + + extractTextFromContentPart.mockReturnValue("Extracted input"); + extractDisplayableText.mockReturnValue("Extracted output"); + + render( + , + ); + + // Verify the extraction functions were called + expect(extractTextFromContentPart).toHaveBeenCalledWith( + "Extracted input", + ); + expect(extractDisplayableText).toHaveBeenCalledWith({ + role: "assistant", + content: "Extracted output", + }); + + // Verify the extracted content is displayed + expect(screen.getByText("Extracted input")).toBeInTheDocument(); + expect(screen.getByText("Extracted output")).toBeInTheDocument(); + }); + }); +}); diff --git a/llama_stack/ui/components/chat-completions/chat-completions-table.tsx b/llama_stack/ui/components/chat-completions/chat-completions-table.tsx new file mode 100644 index 000000000..5f1d2f03d --- /dev/null +++ b/llama_stack/ui/components/chat-completions/chat-completions-table.tsx @@ -0,0 +1,43 @@ +"use client"; + +import { ChatCompletion } from "@/lib/types"; +import { LogsTable, LogTableRow } from "@/components/logs/logs-table"; +import { + extractTextFromContentPart, + extractDisplayableText, +} from "@/lib/format-message-content"; + +interface ChatCompletionsTableProps { + data: ChatCompletion[]; + isLoading: boolean; + error: Error | null; +} + +function formatChatCompletionToRow(completion: ChatCompletion): LogTableRow { + return { + id: completion.id, + input: extractTextFromContentPart(completion.input_messages?.[0]?.content), + output: extractDisplayableText(completion.choices?.[0]?.message), + model: completion.model, + createdTime: new Date(completion.created * 1000).toLocaleString(), + detailPath: `/logs/chat-completions/${completion.id}`, + }; +} + +export function ChatCompletionsTable({ + data, + isLoading, + error, +}: ChatCompletionsTableProps) { + const formattedData = data.map(formatChatCompletionToRow); + + return ( + + ); +} diff --git a/llama_stack/ui/components/chat-completions/chat-messasge-item.tsx b/llama_stack/ui/components/chat-completions/chat-messasge-item.tsx new file mode 100644 index 000000000..2e8593bfb --- /dev/null +++ b/llama_stack/ui/components/chat-completions/chat-messasge-item.tsx @@ -0,0 +1,76 @@ +"use client"; + +import { ChatMessage } from "@/lib/types"; +import React from "react"; +import { formatToolCallToString } from "@/lib/format-tool-call"; +import { extractTextFromContentPart } from "@/lib/format-message-content"; +import { + MessageBlock, + ToolCallBlock, +} from "@/components/ui/message-components"; + +interface ChatMessageItemProps { + message: ChatMessage; +} +export function ChatMessageItem({ message }: ChatMessageItemProps) { + switch (message.role) { + case "system": + return ( + + ); + case "user": + return ( + + ); + + case "assistant": + if ( + message.tool_calls && + Array.isArray(message.tool_calls) && + message.tool_calls.length > 0 + ) { + return ( + <> + {message.tool_calls.map((toolCall: any, index: number) => { + const formattedToolCall = formatToolCallToString(toolCall); + const toolCallContent = ( + + {formattedToolCall || "Error: Could not display tool call"} + + ); + return ( + + ); + })} + + ); + } else { + return ( + + ); + } + case "tool": + const toolOutputContent = ( + + {extractTextFromContentPart(message.content)} + + ); + return ( + + ); + } + return null; +} diff --git a/llama_stack/ui/components/layout/app-sidebar.tsx b/llama_stack/ui/components/layout/app-sidebar.tsx new file mode 100644 index 000000000..1c53d6cc5 --- /dev/null +++ b/llama_stack/ui/components/layout/app-sidebar.tsx @@ -0,0 +1,82 @@ +"use client"; + +import { MessageSquareText, MessagesSquare, MoveUpRight } from "lucide-react"; +import Link from "next/link"; +import { usePathname } from "next/navigation"; +import { cn } from "@/lib/utils"; + +import { + Sidebar, + SidebarContent, + SidebarGroup, + SidebarGroupContent, + SidebarGroupLabel, + SidebarMenu, + SidebarMenuButton, + SidebarMenuItem, + SidebarHeader, +} from "@/components/ui/sidebar"; + +const logItems = [ + { + title: "Chat Completions", + url: "/logs/chat-completions", + icon: MessageSquareText, + }, + { + title: "Responses", + url: "/logs/responses", + icon: MessagesSquare, + }, + { + title: "Documentation", + url: "https://llama-stack.readthedocs.io/en/latest/references/api_reference/index.html", + icon: MoveUpRight, + }, +]; + +export function AppSidebar() { + const pathname = usePathname(); + + return ( + + + Llama Stack + + + + Logs + + + {logItems.map((item) => { + const isActive = pathname.startsWith(item.url); + return ( + + + + + {item.title} + + + + ); + })} + + + + + + ); +} diff --git a/llama_stack/ui/components/layout/detail-layout.tsx b/llama_stack/ui/components/layout/detail-layout.tsx new file mode 100644 index 000000000..58b912703 --- /dev/null +++ b/llama_stack/ui/components/layout/detail-layout.tsx @@ -0,0 +1,141 @@ +import React from "react"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { Skeleton } from "@/components/ui/skeleton"; + +export function DetailLoadingView({ title }: { title: string }) { + return ( + <> + {/* Title Skeleton */} +
+
+ {[...Array(2)].map((_, i) => ( + + + + + + + + + + + + + ))} +
+
+
+ {" "} + {/* Properties Title Skeleton */} + {[...Array(5)].map((_, i) => ( +
+ + +
+ ))} +
+
+
+ + ); +} + +export function DetailErrorView({ + title, + id, + error, +}: { + title: string; + id: string; + error: Error; +}) { + return ( + <> +

{title}

+

+ Error loading details for ID {id}: {error.message} +

+ + ); +} + +export function DetailNotFoundView({ + title, + id, +}: { + title: string; + id: string; +}) { + return ( + <> +

{title}

+

No details found for ID: {id}.

+ + ); +} + +export interface PropertyItemProps { + label: string; + value: React.ReactNode; + className?: string; + hasBorder?: boolean; +} + +export function PropertyItem({ + label, + value, + className = "", + hasBorder = false, +}: PropertyItemProps) { + return ( +
  • + {label}:{" "} + {typeof value === "string" || typeof value === "number" ? ( + {value} + ) : ( + value + )} +
  • + ); +} + +export interface PropertiesCardProps { + children: React.ReactNode; +} + +export function PropertiesCard({ children }: PropertiesCardProps) { + return ( + + + Properties + + +
      {children}
    +
    +
    + ); +} + +export interface DetailLayoutProps { + title: string; + mainContent: React.ReactNode; + sidebar: React.ReactNode; +} + +export function DetailLayout({ + title, + mainContent, + sidebar, +}: DetailLayoutProps) { + return ( + <> +

    {title}

    +
    +
    {mainContent}
    +
    {sidebar}
    +
    + + ); +} diff --git a/llama_stack/ui/components/layout/logs-layout.tsx b/llama_stack/ui/components/layout/logs-layout.tsx new file mode 100644 index 000000000..468ad6e9a --- /dev/null +++ b/llama_stack/ui/components/layout/logs-layout.tsx @@ -0,0 +1,49 @@ +"use client"; + +import React from "react"; +import { usePathname, useParams } from "next/navigation"; +import { + PageBreadcrumb, + BreadcrumbSegment, +} from "@/components/layout/page-breadcrumb"; +import { truncateText } from "@/lib/truncate-text"; + +interface LogsLayoutProps { + children: React.ReactNode; + sectionLabel: string; + basePath: string; +} + +export default function LogsLayout({ + children, + sectionLabel, + basePath, +}: LogsLayoutProps) { + const pathname = usePathname(); + const params = useParams(); + + let segments: BreadcrumbSegment[] = []; + + if (pathname === basePath) { + segments = [{ label: sectionLabel }]; + } + + const idParam = params?.id; + if (idParam && typeof idParam === "string") { + segments = [ + { label: sectionLabel, href: basePath }, + { label: `Details (${truncateText(idParam, 20)})` }, + ]; + } + + return ( +
    + <> + {segments.length > 0 && ( + + )} + {children} + +
    + ); +} diff --git a/llama_stack/ui/components/layout/page-breadcrumb.tsx b/llama_stack/ui/components/layout/page-breadcrumb.tsx new file mode 100644 index 000000000..fdb561d68 --- /dev/null +++ b/llama_stack/ui/components/layout/page-breadcrumb.tsx @@ -0,0 +1,49 @@ +"use client"; + +import Link from "next/link"; +import React from "react"; +import { + Breadcrumb, + BreadcrumbItem, + BreadcrumbLink, + BreadcrumbList, + BreadcrumbPage, + BreadcrumbSeparator, +} from "@/components/ui/breadcrumb"; + +export interface BreadcrumbSegment { + label: string; + href?: string; +} + +interface PageBreadcrumbProps { + segments: BreadcrumbSegment[]; + className?: string; +} + +export function PageBreadcrumb({ segments, className }: PageBreadcrumbProps) { + if (!segments || segments.length === 0) { + return null; + } + + return ( + + + {segments.map((segment, index) => ( + + + {segment.href ? ( + + {segment.label} + + ) : ( + {segment.label} + )} + + {index < segments.length - 1 && } + + ))} + + + ); +} diff --git a/llama_stack/ui/components/logs/logs-table.test.tsx b/llama_stack/ui/components/logs/logs-table.test.tsx new file mode 100644 index 000000000..88263b2fc --- /dev/null +++ b/llama_stack/ui/components/logs/logs-table.test.tsx @@ -0,0 +1,350 @@ +import React from "react"; +import { render, screen, fireEvent } from "@testing-library/react"; +import "@testing-library/jest-dom"; +import { LogsTable, LogTableRow } from "./logs-table"; + +// Mock next/navigation +const mockPush = jest.fn(); +jest.mock("next/navigation", () => ({ + useRouter: () => ({ + push: mockPush, + }), +})); + +// Mock helper functions +jest.mock("@/lib/truncate-text"); + +// Import the mocked functions +import { truncateText as originalTruncateText } from "@/lib/truncate-text"; + +// Cast to jest.Mock for typings +const truncateText = originalTruncateText as jest.Mock; + +describe("LogsTable", () => { + const defaultProps = { + data: [] as LogTableRow[], + isLoading: false, + error: null, + caption: "Test table caption", + emptyMessage: "No data found", + }; + + beforeEach(() => { + // Reset all mocks before each test + mockPush.mockClear(); + truncateText.mockClear(); + + // Default pass-through implementation + truncateText.mockImplementation((text: string | undefined) => text); + }); + + test("renders without crashing with default props", () => { + render(); + expect(screen.getByText("No data found")).toBeInTheDocument(); + }); + + test("click on a row navigates to the correct URL", () => { + const mockData: LogTableRow[] = [ + { + id: "row_123", + input: "Test input", + output: "Test output", + model: "test-model", + createdTime: "2024-01-01 12:00:00", + detailPath: "/test/path/row_123", + }, + ]; + + render(); + + const row = screen.getByText("Test input").closest("tr"); + if (row) { + fireEvent.click(row); + expect(mockPush).toHaveBeenCalledWith("/test/path/row_123"); + } else { + throw new Error('Row with "Test input" not found for router mock test.'); + } + }); + + describe("Loading State", () => { + test("renders skeleton UI when isLoading is true", () => { + const { container } = render( + , + ); + + // Check for skeleton in the table caption + const tableCaption = container.querySelector("caption"); + expect(tableCaption).toBeInTheDocument(); + if (tableCaption) { + const captionSkeleton = tableCaption.querySelector( + '[data-slot="skeleton"]', + ); + expect(captionSkeleton).toBeInTheDocument(); + } + + // Check for skeletons in the table body cells + const tableBody = container.querySelector("tbody"); + expect(tableBody).toBeInTheDocument(); + if (tableBody) { + const bodySkeletons = tableBody.querySelectorAll( + '[data-slot="skeleton"]', + ); + expect(bodySkeletons.length).toBeGreaterThan(0); + } + + // Check that table headers are still rendered + expect(screen.getByText("Input")).toBeInTheDocument(); + expect(screen.getByText("Output")).toBeInTheDocument(); + expect(screen.getByText("Model")).toBeInTheDocument(); + expect(screen.getByText("Created")).toBeInTheDocument(); + }); + + test("renders correct number of skeleton rows", () => { + const { container } = render( + , + ); + + const skeletonRows = container.querySelectorAll("tbody tr"); + expect(skeletonRows.length).toBe(3); // Should render 3 skeleton rows + }); + }); + + describe("Error State", () => { + test("renders error message when error prop is provided", () => { + const errorMessage = "Network Error"; + render( + , + ); + expect( + screen.getByText(`Error fetching data: ${errorMessage}`), + ).toBeInTheDocument(); + }); + + test("renders default error message when error.message is not available", () => { + render( + , + ); + expect( + screen.getByText("Error fetching data: An unknown error occurred"), + ).toBeInTheDocument(); + }); + + test("renders default error message when error prop is an object without message", () => { + render(); + expect( + screen.getByText("Error fetching data: An unknown error occurred"), + ).toBeInTheDocument(); + }); + + test("does not render table when in error state", () => { + render( + , + ); + const table = screen.queryByRole("table"); + expect(table).not.toBeInTheDocument(); + }); + }); + + describe("Empty State", () => { + test("renders custom empty message when data array is empty", () => { + render( + , + ); + expect(screen.getByText("Custom empty message")).toBeInTheDocument(); + + // Ensure that the table structure is NOT rendered in the empty state + const table = screen.queryByRole("table"); + expect(table).not.toBeInTheDocument(); + }); + }); + + describe("Data Rendering", () => { + test("renders table caption, headers, and data correctly", () => { + const mockData: LogTableRow[] = [ + { + id: "row_1", + input: "First input", + output: "First output", + model: "model-1", + createdTime: "2024-01-01 12:00:00", + detailPath: "/path/1", + }, + { + id: "row_2", + input: "Second input", + output: "Second output", + model: "model-2", + createdTime: "2024-01-02 13:00:00", + detailPath: "/path/2", + }, + ]; + + render( + , + ); + + // Table caption + expect(screen.getByText("Custom table caption")).toBeInTheDocument(); + + // Table headers + expect(screen.getByText("Input")).toBeInTheDocument(); + expect(screen.getByText("Output")).toBeInTheDocument(); + expect(screen.getByText("Model")).toBeInTheDocument(); + expect(screen.getByText("Created")).toBeInTheDocument(); + + // Data rows + expect(screen.getByText("First input")).toBeInTheDocument(); + expect(screen.getByText("First output")).toBeInTheDocument(); + expect(screen.getByText("model-1")).toBeInTheDocument(); + expect(screen.getByText("2024-01-01 12:00:00")).toBeInTheDocument(); + + expect(screen.getByText("Second input")).toBeInTheDocument(); + expect(screen.getByText("Second output")).toBeInTheDocument(); + expect(screen.getByText("model-2")).toBeInTheDocument(); + expect(screen.getByText("2024-01-02 13:00:00")).toBeInTheDocument(); + }); + + test("applies correct CSS classes to table rows", () => { + const mockData: LogTableRow[] = [ + { + id: "row_1", + input: "Test input", + output: "Test output", + model: "test-model", + createdTime: "2024-01-01 12:00:00", + detailPath: "/test/path", + }, + ]; + + render(); + + const row = screen.getByText("Test input").closest("tr"); + expect(row).toHaveClass("cursor-pointer"); + expect(row).toHaveClass("hover:bg-muted/50"); + }); + + test("applies correct alignment to Created column", () => { + const mockData: LogTableRow[] = [ + { + id: "row_1", + input: "Test input", + output: "Test output", + model: "test-model", + createdTime: "2024-01-01 12:00:00", + detailPath: "/test/path", + }, + ]; + + render(); + + const createdCell = screen.getByText("2024-01-01 12:00:00").closest("td"); + expect(createdCell).toHaveClass("text-right"); + }); + }); + + describe("Text Truncation", () => { + test("truncates input and output text using truncateText function", () => { + // Mock truncateText to return truncated versions + truncateText.mockImplementation((text: string | undefined) => { + if (typeof text === "string" && text.length > 10) { + return text.slice(0, 10) + "..."; + } + return text; + }); + + const longInput = + "This is a very long input text that should be truncated"; + const longOutput = + "This is a very long output text that should be truncated"; + + const mockData: LogTableRow[] = [ + { + id: "row_1", + input: longInput, + output: longOutput, + model: "test-model", + createdTime: "2024-01-01 12:00:00", + detailPath: "/test/path", + }, + ]; + + render(); + + // Verify truncateText was called + expect(truncateText).toHaveBeenCalledWith(longInput); + expect(truncateText).toHaveBeenCalledWith(longOutput); + + // Verify truncated text is displayed + const truncatedTexts = screen.getAllByText("This is a ..."); + expect(truncatedTexts).toHaveLength(2); // one for input, one for output + truncatedTexts.forEach((textElement) => + expect(textElement).toBeInTheDocument(), + ); + }); + + test("does not truncate model names", () => { + const mockData: LogTableRow[] = [ + { + id: "row_1", + input: "Test input", + output: "Test output", + model: "very-long-model-name-that-should-not-be-truncated", + createdTime: "2024-01-01 12:00:00", + detailPath: "/test/path", + }, + ]; + + render(); + + // Model name should not be passed to truncateText + expect(truncateText).not.toHaveBeenCalledWith( + "very-long-model-name-that-should-not-be-truncated", + ); + + // Full model name should be displayed + expect( + screen.getByText("very-long-model-name-that-should-not-be-truncated"), + ).toBeInTheDocument(); + }); + }); + + describe("Accessibility", () => { + test("table has proper role and structure", () => { + const mockData: LogTableRow[] = [ + { + id: "row_1", + input: "Test input", + output: "Test output", + model: "test-model", + createdTime: "2024-01-01 12:00:00", + detailPath: "/test/path", + }, + ]; + + render(); + + const table = screen.getByRole("table"); + expect(table).toBeInTheDocument(); + + const columnHeaders = screen.getAllByRole("columnheader"); + expect(columnHeaders).toHaveLength(4); + + const rows = screen.getAllByRole("row"); + expect(rows).toHaveLength(2); // 1 header row + 1 data row + }); + }); +}); diff --git a/llama_stack/ui/components/logs/logs-table.tsx b/llama_stack/ui/components/logs/logs-table.tsx new file mode 100644 index 000000000..33afea61b --- /dev/null +++ b/llama_stack/ui/components/logs/logs-table.tsx @@ -0,0 +1,113 @@ +"use client"; + +import { useRouter } from "next/navigation"; +import { truncateText } from "@/lib/truncate-text"; +import { + Table, + TableBody, + TableCaption, + TableCell, + TableHead, + TableHeader, + TableRow, +} from "@/components/ui/table"; +import { Skeleton } from "@/components/ui/skeleton"; + +// Generic table row data interface +export interface LogTableRow { + id: string; + input: string; + output: string; + model: string; + createdTime: string; + detailPath: string; +} + +interface LogsTableProps { + data: LogTableRow[]; + isLoading: boolean; + error: Error | null; + caption: string; + emptyMessage: string; +} + +export function LogsTable({ + data, + isLoading, + error, + caption, + emptyMessage, +}: LogsTableProps) { + const router = useRouter(); + + const tableHeader = ( + + + Input + Output + Model + Created + + + ); + + if (isLoading) { + return ( + + + + + {tableHeader} + + {[...Array(3)].map((_, i) => ( + + + + + + + + + + + + + + + ))} + +
    + ); + } + + if (error) { + return ( +

    Error fetching data: {error.message || "An unknown error occurred"}

    + ); + } + + if (data.length === 0) { + return

    {emptyMessage}

    ; + } + + return ( + + {caption} + {tableHeader} + + {data.map((row) => ( + router.push(row.detailPath)} + className="cursor-pointer hover:bg-muted/50" + > + {truncateText(row.input)} + {truncateText(row.output)} + {row.model} + {row.createdTime} + + ))} + +
    + ); +} diff --git a/llama_stack/ui/components/responses/grouping/grouped-items-display.tsx b/llama_stack/ui/components/responses/grouping/grouped-items-display.tsx new file mode 100644 index 000000000..6ddc0eacc --- /dev/null +++ b/llama_stack/ui/components/responses/grouping/grouped-items-display.tsx @@ -0,0 +1,56 @@ +import { useFunctionCallGrouping } from "../hooks/function-call-grouping"; +import { ItemRenderer } from "../items/item-renderer"; +import { GroupedFunctionCallItemComponent } from "../items/grouped-function-call-item"; +import { + isFunctionCallItem, + isFunctionCallOutputItem, + AnyResponseItem, +} from "../utils/item-types"; + +interface GroupedItemsDisplayProps { + items: AnyResponseItem[]; + keyPrefix: string; + defaultRole?: string; +} + +export function GroupedItemsDisplay({ + items, + keyPrefix, + defaultRole = "unknown", +}: GroupedItemsDisplayProps) { + const groupedItems = useFunctionCallGrouping(items); + + return ( + <> + {groupedItems.map((groupedItem) => { + // If this is a function call with an output, render the grouped component + if ( + groupedItem.outputItem && + isFunctionCallItem(groupedItem.item) && + isFunctionCallOutputItem(groupedItem.outputItem) + ) { + return ( + + ); + } + + // Otherwise, render the individual item + return ( + + ); + })} + + ); +} diff --git a/llama_stack/ui/components/responses/hooks/function-call-grouping.ts b/llama_stack/ui/components/responses/hooks/function-call-grouping.ts new file mode 100644 index 000000000..2994354d5 --- /dev/null +++ b/llama_stack/ui/components/responses/hooks/function-call-grouping.ts @@ -0,0 +1,92 @@ +import { useMemo } from "react"; +import { + isFunctionCallOutputItem, + AnyResponseItem, + FunctionCallOutputItem, +} from "../utils/item-types"; + +export interface GroupedItem { + item: AnyResponseItem; + index: number; + outputItem?: AnyResponseItem; + outputIndex?: number; +} + +/** + * Hook to group function calls with their corresponding outputs + * @param items Array of items to group + * @returns Array of grouped items with their outputs + */ +export function useFunctionCallGrouping( + items: AnyResponseItem[], +): GroupedItem[] { + return useMemo(() => { + const groupedItems: GroupedItem[] = []; + const processedIndices = new Set(); + + // Build a map of call_id to indices for function_call_output items + const callIdToIndices = new Map(); + + for (let i = 0; i < items.length; i++) { + const item = items[i]; + if (isFunctionCallOutputItem(item)) { + if (!callIdToIndices.has(item.call_id)) { + callIdToIndices.set(item.call_id, []); + } + callIdToIndices.get(item.call_id)!.push(i); + } + } + + // Process items and group function calls with their outputs + for (let i = 0; i < items.length; i++) { + if (processedIndices.has(i)) { + continue; + } + + const currentItem = items[i]; + + if ( + currentItem.type === "function_call" && + "name" in currentItem && + "call_id" in currentItem + ) { + const functionCallId = currentItem.call_id as string; + let outputIndex = -1; + let outputItem: FunctionCallOutputItem | null = null; + + const relatedIndices = callIdToIndices.get(functionCallId) || []; + for (const idx of relatedIndices) { + const potentialOutput = items[idx]; + outputIndex = idx; + outputItem = potentialOutput as FunctionCallOutputItem; + break; + } + + if (outputItem && outputIndex !== -1) { + // Group function call with its function_call_output + groupedItems.push({ + item: currentItem, + index: i, + outputItem, + outputIndex, + }); + + // Mark both items as processed + processedIndices.add(i); + processedIndices.add(outputIndex); + + // Matching function call and output found, skip to next item + continue; + } + } + // render normally + groupedItems.push({ + item: currentItem, + index: i, + }); + processedIndices.add(i); + } + + return groupedItems; + }, [items]); +} diff --git a/llama_stack/ui/components/responses/items/function-call-item.tsx b/llama_stack/ui/components/responses/items/function-call-item.tsx new file mode 100644 index 000000000..beca935f0 --- /dev/null +++ b/llama_stack/ui/components/responses/items/function-call-item.tsx @@ -0,0 +1,29 @@ +import { + MessageBlock, + ToolCallBlock, +} from "@/components/ui/message-components"; +import { FunctionCallItem } from "../utils/item-types"; + +interface FunctionCallItemProps { + item: FunctionCallItem; + index: number; + keyPrefix: string; +} + +export function FunctionCallItemComponent({ + item, + index, + keyPrefix, +}: FunctionCallItemProps) { + const name = item.name || "unknown"; + const args = item.arguments || "{}"; + const formattedFunctionCall = `${name}(${args})`; + + return ( + {formattedFunctionCall}} + /> + ); +} diff --git a/llama_stack/ui/components/responses/items/generic-item.tsx b/llama_stack/ui/components/responses/items/generic-item.tsx new file mode 100644 index 000000000..6b6f56603 --- /dev/null +++ b/llama_stack/ui/components/responses/items/generic-item.tsx @@ -0,0 +1,37 @@ +import { + MessageBlock, + ToolCallBlock, +} from "@/components/ui/message-components"; +import { BaseItem } from "../utils/item-types"; + +interface GenericItemProps { + item: BaseItem; + index: number; + keyPrefix: string; +} + +export function GenericItemComponent({ + item, + index, + keyPrefix, +}: GenericItemProps) { + // Handle other types like function calls, tool outputs, etc. + const itemData = item as Record; + + const content = itemData.content + ? typeof itemData.content === "string" + ? itemData.content + : JSON.stringify(itemData.content, null, 2) + : JSON.stringify(itemData, null, 2); + + const label = keyPrefix === "input" ? "Input" : "Output"; + + return ( + {content}} + /> + ); +} diff --git a/llama_stack/ui/components/responses/items/grouped-function-call-item.tsx b/llama_stack/ui/components/responses/items/grouped-function-call-item.tsx new file mode 100644 index 000000000..ded0ced71 --- /dev/null +++ b/llama_stack/ui/components/responses/items/grouped-function-call-item.tsx @@ -0,0 +1,54 @@ +import { + MessageBlock, + ToolCallBlock, +} from "@/components/ui/message-components"; +import { FunctionCallItem, FunctionCallOutputItem } from "../utils/item-types"; + +interface GroupedFunctionCallItemProps { + functionCall: FunctionCallItem; + output: FunctionCallOutputItem; + index: number; + keyPrefix: string; +} + +export function GroupedFunctionCallItemComponent({ + functionCall, + output, + index, + keyPrefix, +}: GroupedFunctionCallItemProps) { + const name = functionCall.name || "unknown"; + const args = functionCall.arguments || "{}"; + + // Extract the output content from function_call_output + let outputContent = ""; + if (output.output) { + outputContent = + typeof output.output === "string" + ? output.output + : JSON.stringify(output.output); + } else { + outputContent = JSON.stringify(output, null, 2); + } + + const functionCallContent = ( +
    +
    + Arguments + {`${name}(${args})`} +
    +
    + Output + {outputContent} +
    +
    + ); + + return ( + + ); +} diff --git a/llama_stack/ui/components/responses/items/index.ts b/llama_stack/ui/components/responses/items/index.ts new file mode 100644 index 000000000..d7bcc2ea4 --- /dev/null +++ b/llama_stack/ui/components/responses/items/index.ts @@ -0,0 +1,6 @@ +export { MessageItemComponent } from "./message-item"; +export { FunctionCallItemComponent } from "./function-call-item"; +export { WebSearchItemComponent } from "./web-search-item"; +export { GenericItemComponent } from "./generic-item"; +export { GroupedFunctionCallItemComponent } from "./grouped-function-call-item"; +export { ItemRenderer } from "./item-renderer"; diff --git a/llama_stack/ui/components/responses/items/item-renderer.tsx b/llama_stack/ui/components/responses/items/item-renderer.tsx new file mode 100644 index 000000000..8f65d50c4 --- /dev/null +++ b/llama_stack/ui/components/responses/items/item-renderer.tsx @@ -0,0 +1,60 @@ +import { + isMessageItem, + isFunctionCallItem, + isWebSearchCallItem, + AnyResponseItem, +} from "../utils/item-types"; +import { MessageItemComponent } from "./message-item"; +import { FunctionCallItemComponent } from "./function-call-item"; +import { WebSearchItemComponent } from "./web-search-item"; +import { GenericItemComponent } from "./generic-item"; + +interface ItemRendererProps { + item: AnyResponseItem; + index: number; + keyPrefix: string; + defaultRole?: string; +} + +export function ItemRenderer({ + item, + index, + keyPrefix, + defaultRole = "unknown", +}: ItemRendererProps) { + if (isMessageItem(item)) { + return ( + + ); + } + + if (isFunctionCallItem(item)) { + return ( + + ); + } + + if (isWebSearchCallItem(item)) { + return ( + + ); + } + + // Fallback to generic item for unknown types + return ( + + ); +} diff --git a/llama_stack/ui/components/responses/items/message-item.tsx b/llama_stack/ui/components/responses/items/message-item.tsx new file mode 100644 index 000000000..532fddfaa --- /dev/null +++ b/llama_stack/ui/components/responses/items/message-item.tsx @@ -0,0 +1,41 @@ +import { MessageBlock } from "@/components/ui/message-components"; +import { MessageItem } from "../utils/item-types"; + +interface MessageItemProps { + item: MessageItem; + index: number; + keyPrefix: string; + defaultRole?: string; +} + +export function MessageItemComponent({ + item, + index, + keyPrefix, + defaultRole = "unknown", +}: MessageItemProps) { + let content = ""; + + if (typeof item.content === "string") { + content = item.content; + } else if (Array.isArray(item.content)) { + content = item.content + .map((c) => { + return c.type === "input_text" || c.type === "output_text" + ? c.text + : JSON.stringify(c); + }) + .join(" "); + } + + const role = item.role || defaultRole; + const label = role.charAt(0).toUpperCase() + role.slice(1); + + return ( + + ); +} diff --git a/llama_stack/ui/components/responses/items/web-search-item.tsx b/llama_stack/ui/components/responses/items/web-search-item.tsx new file mode 100644 index 000000000..aaa5741ce --- /dev/null +++ b/llama_stack/ui/components/responses/items/web-search-item.tsx @@ -0,0 +1,28 @@ +import { + MessageBlock, + ToolCallBlock, +} from "@/components/ui/message-components"; +import { WebSearchCallItem } from "../utils/item-types"; + +interface WebSearchItemProps { + item: WebSearchCallItem; + index: number; + keyPrefix: string; +} + +export function WebSearchItemComponent({ + item, + index, + keyPrefix, +}: WebSearchItemProps) { + const formattedWebSearch = `web_search_call(status: ${item.status})`; + + return ( + {formattedWebSearch}} + /> + ); +} diff --git a/llama_stack/ui/components/responses/responses-detail.test.tsx b/llama_stack/ui/components/responses/responses-detail.test.tsx new file mode 100644 index 000000000..f426dc059 --- /dev/null +++ b/llama_stack/ui/components/responses/responses-detail.test.tsx @@ -0,0 +1,777 @@ +import React from "react"; +import { render, screen } from "@testing-library/react"; +import "@testing-library/jest-dom"; +import { ResponseDetailView } from "./responses-detail"; +import { OpenAIResponse, InputItemListResponse } from "@/lib/types"; + +describe("ResponseDetailView", () => { + const defaultProps = { + response: null, + inputItems: null, + isLoading: false, + isLoadingInputItems: false, + error: null, + inputItemsError: null, + id: "test_id", + }; + + describe("Loading State", () => { + test("renders loading skeleton when isLoading is true", () => { + const { container } = render( + , + ); + + // Check for skeleton elements + const skeletons = container.querySelectorAll('[data-slot="skeleton"]'); + expect(skeletons.length).toBeGreaterThan(0); + + // The title is replaced by a skeleton when loading, so we shouldn't expect the text + }); + }); + + describe("Error State", () => { + test("renders error message when error prop is provided", () => { + const errorMessage = "Network Error"; + render( + , + ); + + expect(screen.getByText("Responses Details")).toBeInTheDocument(); + // The error message is split across elements, so we check for parts + expect( + screen.getByText(/Error loading details for ID/), + ).toBeInTheDocument(); + expect(screen.getByText(/test_id/)).toBeInTheDocument(); + expect(screen.getByText(/Network Error/)).toBeInTheDocument(); + }); + + test("renders default error message when error.message is not available", () => { + render( + , + ); + + expect( + screen.getByText(/Error loading details for ID/), + ).toBeInTheDocument(); + expect(screen.getByText(/test_id/)).toBeInTheDocument(); + }); + }); + + describe("Not Found State", () => { + test("renders not found message when response is null and not loading/error", () => { + render(); + + expect(screen.getByText("Responses Details")).toBeInTheDocument(); + // The message is split across elements + expect(screen.getByText(/No details found for ID:/)).toBeInTheDocument(); + expect(screen.getByText(/test_id/)).toBeInTheDocument(); + }); + }); + + describe("Response Data Rendering", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "llama-test-model", + status: "completed", + output: [ + { + type: "message", + role: "assistant", + content: "Test response output", + }, + ], + input: [ + { + type: "message", + role: "user", + content: "Test input message", + }, + ], + temperature: 0.7, + top_p: 0.9, + parallel_tool_calls: true, + previous_response_id: "prev_resp_456", + }; + + test("renders response data with input and output sections", () => { + render(); + + // Check main sections + expect(screen.getByText("Responses Details")).toBeInTheDocument(); + expect(screen.getByText("Input")).toBeInTheDocument(); + expect(screen.getByText("Output")).toBeInTheDocument(); + + // Check input content + expect(screen.getByText("Test input message")).toBeInTheDocument(); + expect(screen.getByText("User")).toBeInTheDocument(); + + // Check output content + expect(screen.getByText("Test response output")).toBeInTheDocument(); + expect(screen.getByText("Assistant")).toBeInTheDocument(); + }); + + test("renders properties sidebar with all response metadata", () => { + render(); + + // Check properties - use regex to handle text split across elements + expect(screen.getByText(/Created/)).toBeInTheDocument(); + expect( + screen.getByText(new Date(1710000000 * 1000).toLocaleString()), + ).toBeInTheDocument(); + + // Check for the specific ID label (not Previous Response ID) + expect( + screen.getByText((content, element) => { + return element?.tagName === "STRONG" && content === "ID:"; + }), + ).toBeInTheDocument(); + expect(screen.getByText("resp_123")).toBeInTheDocument(); + + expect(screen.getByText(/Model/)).toBeInTheDocument(); + expect(screen.getByText("llama-test-model")).toBeInTheDocument(); + + expect(screen.getByText(/Status/)).toBeInTheDocument(); + expect(screen.getByText("completed")).toBeInTheDocument(); + + expect(screen.getByText(/Temperature/)).toBeInTheDocument(); + expect(screen.getByText("0.7")).toBeInTheDocument(); + + expect(screen.getByText(/Top P/)).toBeInTheDocument(); + expect(screen.getByText("0.9")).toBeInTheDocument(); + + expect(screen.getByText(/Parallel Tool Calls/)).toBeInTheDocument(); + expect(screen.getByText("Yes")).toBeInTheDocument(); + + expect(screen.getByText(/Previous Response ID/)).toBeInTheDocument(); + expect(screen.getByText("prev_resp_456")).toBeInTheDocument(); + }); + + test("handles optional properties correctly", () => { + const minimalResponse: OpenAIResponse = { + id: "resp_minimal", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [], + input: [], + }; + + render( + , + ); + + // Should show required properties + expect(screen.getByText("resp_minimal")).toBeInTheDocument(); + expect(screen.getByText("test-model")).toBeInTheDocument(); + expect(screen.getByText("completed")).toBeInTheDocument(); + + // Should not show optional properties + expect(screen.queryByText("Temperature")).not.toBeInTheDocument(); + expect(screen.queryByText("Top P")).not.toBeInTheDocument(); + expect(screen.queryByText("Parallel Tool Calls")).not.toBeInTheDocument(); + expect( + screen.queryByText("Previous Response ID"), + ).not.toBeInTheDocument(); + }); + + test("renders error information when response has error", () => { + const errorResponse: OpenAIResponse = { + ...mockResponse, + error: { + code: "invalid_request", + message: "The request was invalid", + }, + }; + + render(); + + // The error is shown in the properties sidebar, not as a separate "Error" label + expect( + screen.getByText("invalid_request: The request was invalid"), + ).toBeInTheDocument(); + }); + }); + + describe("Input Items Handling", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [{ type: "message", role: "assistant", content: "output" }], + input: [{ type: "message", role: "user", content: "fallback input" }], + }; + + test("shows loading state for input items", () => { + render( + , + ); + + // Check for skeleton loading in input items section + const { container } = render( + , + ); + + const skeletons = container.querySelectorAll('[data-slot="skeleton"]'); + expect(skeletons.length).toBeGreaterThan(0); + }); + + test("shows error message for input items with fallback", () => { + render( + , + ); + + expect( + screen.getByText( + "Error loading input items: Failed to load input items", + ), + ).toBeInTheDocument(); + expect( + screen.getByText("Falling back to response input data."), + ).toBeInTheDocument(); + + // Should still show fallback input data + expect(screen.getByText("fallback input")).toBeInTheDocument(); + }); + + test("uses input items data when available", () => { + const mockInputItems: InputItemListResponse = { + object: "list", + data: [ + { + type: "message", + role: "user", + content: "input from items API", + }, + ], + }; + + render( + , + ); + + // Should show input items data, not response.input + expect(screen.getByText("input from items API")).toBeInTheDocument(); + expect(screen.queryByText("fallback input")).not.toBeInTheDocument(); + }); + + test("falls back to response.input when input items is empty", () => { + const emptyInputItems: InputItemListResponse = { + object: "list", + data: [], + }; + + render( + , + ); + + // Should show fallback input data + expect(screen.getByText("fallback input")).toBeInTheDocument(); + }); + + test("shows no input message when no data available", () => { + const responseWithoutInput: OpenAIResponse = { + ...mockResponse, + input: [], + }; + + render( + , + ); + + expect(screen.getByText("No input data available.")).toBeInTheDocument(); + }); + }); + + describe("Input Display Components", () => { + test("renders string content input correctly", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [], + input: [ + { + type: "message", + role: "user", + content: "Simple string input", + }, + ], + }; + + render(); + + expect(screen.getByText("Simple string input")).toBeInTheDocument(); + expect(screen.getByText("User")).toBeInTheDocument(); + }); + + test("renders array content input correctly", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [], + input: [ + { + type: "message", + role: "user", + content: [ + { type: "input_text", text: "First part" }, + { type: "output_text", text: "Second part" }, + ], + }, + ], + }; + + render(); + + expect(screen.getByText("First part Second part")).toBeInTheDocument(); + expect(screen.getByText("User")).toBeInTheDocument(); + }); + + test("renders non-message input types correctly", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [], + input: [ + { + type: "function_call", + content: "function call content", + }, + ], + }; + + render(); + + expect(screen.getByText("function call content")).toBeInTheDocument(); + // Use getAllByText to find the specific "Input" with the type detail + const inputElements = screen.getAllByText("Input"); + expect(inputElements.length).toBeGreaterThan(0); + expect(screen.getByText("(function_call)")).toBeInTheDocument(); + }); + + test("handles input with object content", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [], + input: [ + { + type: "custom_type", + content: JSON.stringify({ key: "value", nested: { data: "test" } }), + }, + ], + }; + + render(); + + // Should show JSON stringified content (without quotes around keys in the rendered output) + expect(screen.getByText(/key.*value/)).toBeInTheDocument(); + // Use getAllByText to find the specific "Input" with the type detail + const inputElements = screen.getAllByText("Input"); + expect(inputElements.length).toBeGreaterThan(0); + expect(screen.getByText("(custom_type)")).toBeInTheDocument(); + }); + + test("renders function call input correctly", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [], + input: [ + { + type: "function_call", + id: "call_456", + status: "completed", + name: "input_function", + arguments: '{"param": "value"}', + }, + ], + }; + + render(); + + expect( + screen.getByText('input_function({"param": "value"})'), + ).toBeInTheDocument(); + expect(screen.getByText("Function Call")).toBeInTheDocument(); + }); + + test("renders web search call input correctly", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [], + input: [ + { + type: "web_search_call", + id: "search_789", + status: "completed", + }, + ], + }; + + render(); + + expect( + screen.getByText("web_search_call(status: completed)"), + ).toBeInTheDocument(); + expect(screen.getByText("Function Call")).toBeInTheDocument(); + expect(screen.getByText("(Web Search)")).toBeInTheDocument(); + }); + }); + + describe("Output Display Components", () => { + test("renders message output with string content", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [ + { + type: "message", + role: "assistant", + content: "Simple string output", + }, + ], + input: [], + }; + + render(); + + expect(screen.getByText("Simple string output")).toBeInTheDocument(); + expect(screen.getByText("Assistant")).toBeInTheDocument(); + }); + + test("renders message output with array content", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [ + { + type: "message", + role: "assistant", + content: [ + { type: "output_text", text: "First output" }, + { type: "input_text", text: "Second output" }, + ], + }, + ], + input: [], + }; + + render(); + + expect( + screen.getByText("First output Second output"), + ).toBeInTheDocument(); + expect(screen.getByText("Assistant")).toBeInTheDocument(); + }); + + test("renders function call output correctly", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [ + { + type: "function_call", + id: "call_123", + status: "completed", + name: "search_function", + arguments: '{"query": "test"}', + }, + ], + input: [], + }; + + render(); + + expect( + screen.getByText('search_function({"query": "test"})'), + ).toBeInTheDocument(); + expect(screen.getByText("Function Call")).toBeInTheDocument(); + }); + + test("renders function call output without arguments", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [ + { + type: "function_call", + id: "call_123", + status: "completed", + name: "simple_function", + }, + ], + input: [], + }; + + render(); + + expect(screen.getByText("simple_function({})")).toBeInTheDocument(); + expect(screen.getByText(/Function Call/)).toBeInTheDocument(); + }); + + test("renders web search call output correctly", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [ + { + type: "web_search_call", + id: "search_123", + status: "completed", + }, + ], + input: [], + }; + + render(); + + expect( + screen.getByText("web_search_call(status: completed)"), + ).toBeInTheDocument(); + expect(screen.getByText(/Function Call/)).toBeInTheDocument(); + expect(screen.getByText("(Web Search)")).toBeInTheDocument(); + }); + + test("renders unknown output types with JSON fallback", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [ + { + type: "unknown_type", + custom_field: "custom_value", + data: { nested: "object" }, + } as any, + ], + input: [], + }; + + render(); + + // Should show JSON stringified content + expect( + screen.getByText(/custom_field.*custom_value/), + ).toBeInTheDocument(); + expect(screen.getByText("(unknown_type)")).toBeInTheDocument(); + }); + + test("shows no output message when output array is empty", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [], + input: [], + }; + + render(); + + expect(screen.getByText("No output data available.")).toBeInTheDocument(); + }); + + test("groups function call with its output correctly", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [ + { + type: "function_call", + id: "call_123", + status: "completed", + name: "get_weather", + arguments: '{"city": "Tokyo"}', + }, + { + type: "message", + role: "assistant", + call_id: "call_123", + content: "sunny and warm", + } as any, // Using any to bypass the type restriction for this test + ], + input: [], + }; + + render(); + + // Should show the function call and message as separate items (not grouped) + expect(screen.getByText("Function Call")).toBeInTheDocument(); + expect( + screen.getByText('get_weather({"city": "Tokyo"})'), + ).toBeInTheDocument(); + expect(screen.getByText("Assistant")).toBeInTheDocument(); + expect(screen.getByText("sunny and warm")).toBeInTheDocument(); + + // Should NOT have the grouped "Arguments" and "Output" labels + expect(screen.queryByText("Arguments")).not.toBeInTheDocument(); + }); + + test("groups function call with function_call_output correctly", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [ + { + type: "function_call", + call_id: "call_123", + status: "completed", + name: "get_weather", + arguments: '{"city": "Tokyo"}', + }, + { + type: "function_call_output", + id: "fc_68364957013081...", + status: "completed", + call_id: "call_123", + output: "sunny and warm", + } as any, // Using any to bypass the type restriction for this test + ], + input: [], + }; + + render(); + + // Should show the function call grouped with its clean output + expect(screen.getByText("Function Call")).toBeInTheDocument(); + expect(screen.getByText("Arguments")).toBeInTheDocument(); + expect( + screen.getByText('get_weather({"city": "Tokyo"})'), + ).toBeInTheDocument(); + // Use getAllByText since there are multiple "Output" elements (card title and output label) + const outputElements = screen.getAllByText("Output"); + expect(outputElements.length).toBeGreaterThan(0); + expect(screen.getByText("sunny and warm")).toBeInTheDocument(); + }); + }); + + describe("Edge Cases and Error Handling", () => { + test("handles missing role in message input", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [], + input: [ + { + type: "message", + content: "Message without role", + }, + ], + }; + + render(); + + expect(screen.getByText("Message without role")).toBeInTheDocument(); + expect(screen.getByText("Unknown")).toBeInTheDocument(); // Default role + }); + + test("handles missing name in function call output", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [ + { + type: "function_call", + id: "call_123", + status: "completed", + }, + ], + input: [], + }; + + render(); + + // When name is missing, it falls back to JSON.stringify of the entire output + const functionCallElements = screen.getAllByText(/function_call/); + expect(functionCallElements.length).toBeGreaterThan(0); + expect(screen.getByText(/call_123/)).toBeInTheDocument(); + }); + }); +}); diff --git a/llama_stack/ui/components/responses/responses-detail.tsx b/llama_stack/ui/components/responses/responses-detail.tsx new file mode 100644 index 000000000..c8c447ba4 --- /dev/null +++ b/llama_stack/ui/components/responses/responses-detail.tsx @@ -0,0 +1,171 @@ +"use client"; + +import { OpenAIResponse, InputItemListResponse } from "@/lib/types"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { Skeleton } from "@/components/ui/skeleton"; +import { + DetailLoadingView, + DetailErrorView, + DetailNotFoundView, + DetailLayout, + PropertiesCard, + PropertyItem, +} from "@/components/layout/detail-layout"; +import { GroupedItemsDisplay } from "./grouping/grouped-items-display"; + +interface ResponseDetailViewProps { + response: OpenAIResponse | null; + inputItems: InputItemListResponse | null; + isLoading: boolean; + isLoadingInputItems: boolean; + error: Error | null; + inputItemsError: Error | null; + id: string; +} + +export function ResponseDetailView({ + response, + inputItems, + isLoading, + isLoadingInputItems, + error, + inputItemsError, + id, +}: ResponseDetailViewProps) { + const title = "Responses Details"; + + if (error) { + return ; + } + + if (isLoading) { + return ; + } + + if (!response) { + return ; + } + + // Main content cards + const mainContent = ( + <> + + + Input + + + {/* Show loading state for input items */} + {isLoadingInputItems ? ( +
    + + + +
    + ) : inputItemsError ? ( +
    + Error loading input items: {inputItemsError.message} +
    + + Falling back to response input data. + +
    + ) : null} + + {/* Display input items if available, otherwise fall back to response.input */} + {(() => { + const dataToDisplay = + inputItems?.data && inputItems.data.length > 0 + ? inputItems.data + : response.input; + + if (dataToDisplay && dataToDisplay.length > 0) { + return ( + + ); + } else { + return ( +

    + No input data available. +

    + ); + } + })()} +
    +
    + + + + Output + + + {response.output?.length > 0 ? ( + + ) : ( +

    + No output data available. +

    + )} +
    +
    + + ); + + // Properties sidebar + const sidebar = ( + + + + + + {response.temperature && ( + + )} + {response.top_p && } + {response.parallel_tool_calls && ( + + )} + {response.previous_response_id && ( + {response.previous_response_id} + } + hasBorder + /> + )} + {response.error && ( + + {response.error.code}: {response.error.message} + + } + className="pt-1 mt-1 border-t border-red-200" + /> + )} + + ); + + return ( + + ); +} diff --git a/llama_stack/ui/components/responses/responses-table.test.tsx b/llama_stack/ui/components/responses/responses-table.test.tsx new file mode 100644 index 000000000..7c45c57d3 --- /dev/null +++ b/llama_stack/ui/components/responses/responses-table.test.tsx @@ -0,0 +1,537 @@ +import React from "react"; +import { render, screen, fireEvent } from "@testing-library/react"; +import "@testing-library/jest-dom"; +import { ResponsesTable } from "./responses-table"; +import { OpenAIResponse } from "@/lib/types"; + +// Mock next/navigation +const mockPush = jest.fn(); +jest.mock("next/navigation", () => ({ + useRouter: () => ({ + push: mockPush, + }), +})); + +// Mock helper functions +jest.mock("@/lib/truncate-text"); + +// Import the mocked functions +import { truncateText as originalTruncateText } from "@/lib/truncate-text"; + +// Cast to jest.Mock for typings +const truncateText = originalTruncateText as jest.Mock; + +describe("ResponsesTable", () => { + const defaultProps = { + data: [] as OpenAIResponse[], + isLoading: false, + error: null, + }; + + beforeEach(() => { + // Reset all mocks before each test + mockPush.mockClear(); + truncateText.mockClear(); + + // Default pass-through implementation + truncateText.mockImplementation((text: string | undefined) => text); + }); + + test("renders without crashing with default props", () => { + render(); + expect(screen.getByText("No responses found.")).toBeInTheDocument(); + }); + + test("click on a row navigates to the correct URL", () => { + const mockResponse: OpenAIResponse = { + id: "resp_123", + object: "response", + created_at: Math.floor(Date.now() / 1000), + model: "llama-test-model", + status: "completed", + output: [ + { + type: "message", + role: "assistant", + content: "Test output", + }, + ], + input: [ + { + type: "message", + role: "user", + content: "Test input", + }, + ], + }; + + render(); + + const row = screen.getByText("Test input").closest("tr"); + if (row) { + fireEvent.click(row); + expect(mockPush).toHaveBeenCalledWith("/logs/responses/resp_123"); + } else { + throw new Error('Row with "Test input" not found for router mock test.'); + } + }); + + describe("Loading State", () => { + test("renders skeleton UI when isLoading is true", () => { + const { container } = render( + , + ); + + // Check for skeleton in the table caption + const tableCaption = container.querySelector("caption"); + expect(tableCaption).toBeInTheDocument(); + if (tableCaption) { + const captionSkeleton = tableCaption.querySelector( + '[data-slot="skeleton"]', + ); + expect(captionSkeleton).toBeInTheDocument(); + } + + // Check for skeletons in the table body cells + const tableBody = container.querySelector("tbody"); + expect(tableBody).toBeInTheDocument(); + if (tableBody) { + const bodySkeletons = tableBody.querySelectorAll( + '[data-slot="skeleton"]', + ); + expect(bodySkeletons.length).toBeGreaterThan(0); + } + }); + }); + + describe("Error State", () => { + test("renders error message when error prop is provided", () => { + const errorMessage = "Network Error"; + render( + , + ); + expect( + screen.getByText(`Error fetching data: ${errorMessage}`), + ).toBeInTheDocument(); + }); + + test("renders default error message when error.message is not available", () => { + render( + , + ); + expect( + screen.getByText("Error fetching data: An unknown error occurred"), + ).toBeInTheDocument(); + }); + + test("renders default error message when error prop is an object without message", () => { + render(); + expect( + screen.getByText("Error fetching data: An unknown error occurred"), + ).toBeInTheDocument(); + }); + }); + + describe("Empty State", () => { + test('renders "No responses found." and no table when data array is empty', () => { + render(); + expect(screen.getByText("No responses found.")).toBeInTheDocument(); + + // Ensure that the table structure is NOT rendered in the empty state + const table = screen.queryByRole("table"); + expect(table).not.toBeInTheDocument(); + }); + }); + + describe("Data Rendering", () => { + test("renders table caption, headers, and response data correctly", () => { + const mockResponses = [ + { + id: "resp_1", + object: "response" as const, + created_at: 1710000000, + model: "llama-test-model", + status: "completed", + output: [ + { + type: "message" as const, + role: "assistant" as const, + content: "Test output", + }, + ], + input: [ + { + type: "message", + role: "user", + content: "Test input", + }, + ], + }, + { + id: "resp_2", + object: "response" as const, + created_at: 1710001000, + model: "llama-another-model", + status: "completed", + output: [ + { + type: "message" as const, + role: "assistant" as const, + content: "Another output", + }, + ], + input: [ + { + type: "message", + role: "user", + content: "Another input", + }, + ], + }, + ]; + + render( + , + ); + + // Table caption + expect( + screen.getByText("A list of your recent responses."), + ).toBeInTheDocument(); + + // Table headers + expect(screen.getByText("Input")).toBeInTheDocument(); + expect(screen.getByText("Output")).toBeInTheDocument(); + expect(screen.getByText("Model")).toBeInTheDocument(); + expect(screen.getByText("Created")).toBeInTheDocument(); + + // Data rows + expect(screen.getByText("Test input")).toBeInTheDocument(); + expect(screen.getByText("Test output")).toBeInTheDocument(); + expect(screen.getByText("llama-test-model")).toBeInTheDocument(); + expect( + screen.getByText(new Date(1710000000 * 1000).toLocaleString()), + ).toBeInTheDocument(); + + expect(screen.getByText("Another input")).toBeInTheDocument(); + expect(screen.getByText("Another output")).toBeInTheDocument(); + expect(screen.getByText("llama-another-model")).toBeInTheDocument(); + expect( + screen.getByText(new Date(1710001000 * 1000).toLocaleString()), + ).toBeInTheDocument(); + }); + }); + + describe("Input Text Extraction", () => { + test("extracts text from string content", () => { + const mockResponse: OpenAIResponse = { + id: "resp_string", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [{ type: "message", role: "assistant", content: "output" }], + input: [ + { + type: "message", + role: "user", + content: "Simple string input", + }, + ], + }; + + render( + , + ); + expect(screen.getByText("Simple string input")).toBeInTheDocument(); + }); + + test("extracts text from array content with input_text type", () => { + const mockResponse: OpenAIResponse = { + id: "resp_array", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [{ type: "message", role: "assistant", content: "output" }], + input: [ + { + type: "message", + role: "user", + content: [ + { type: "input_text", text: "Array input text" }, + { type: "input_text", text: "Should not be used" }, + ], + }, + ], + }; + + render( + , + ); + expect(screen.getByText("Array input text")).toBeInTheDocument(); + }); + + test("returns empty string when no message input found", () => { + const mockResponse: OpenAIResponse = { + id: "resp_no_input", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [{ type: "message", role: "assistant", content: "output" }], + input: [ + { + type: "other_type", + content: "Not a message", + }, + ], + }; + + const { container } = render( + , + ); + + // Find the input cell (first cell in the data row) and verify it's empty + const inputCell = container.querySelector("tbody tr td:first-child"); + expect(inputCell).toBeInTheDocument(); + expect(inputCell).toHaveTextContent(""); + }); + }); + + describe("Output Text Extraction", () => { + test("extracts text from string message content", () => { + const mockResponse: OpenAIResponse = { + id: "resp_string_output", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [ + { + type: "message", + role: "assistant", + content: "Simple string output", + }, + ], + input: [{ type: "message", content: "input" }], + }; + + render( + , + ); + expect(screen.getByText("Simple string output")).toBeInTheDocument(); + }); + + test("extracts text from array message content with output_text type", () => { + const mockResponse: OpenAIResponse = { + id: "resp_array_output", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [ + { + type: "message", + role: "assistant", + content: [ + { type: "output_text", text: "Array output text" }, + { type: "output_text", text: "Should not be used" }, + ], + }, + ], + input: [{ type: "message", content: "input" }], + }; + + render( + , + ); + expect(screen.getByText("Array output text")).toBeInTheDocument(); + }); + + test("formats function call output", () => { + const mockResponse: OpenAIResponse = { + id: "resp_function_call", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [ + { + type: "function_call", + id: "call_123", + status: "completed", + name: "search_function", + arguments: '{"query": "test"}', + }, + ], + input: [{ type: "message", content: "input" }], + }; + + render( + , + ); + expect( + screen.getByText('search_function({"query": "test"})'), + ).toBeInTheDocument(); + }); + + test("formats function call output without arguments", () => { + const mockResponse: OpenAIResponse = { + id: "resp_function_no_args", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [ + { + type: "function_call", + id: "call_123", + status: "completed", + name: "simple_function", + }, + ], + input: [{ type: "message", content: "input" }], + }; + + render( + , + ); + expect(screen.getByText("simple_function({})")).toBeInTheDocument(); + }); + + test("formats web search call output", () => { + const mockResponse: OpenAIResponse = { + id: "resp_web_search", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [ + { + type: "web_search_call", + id: "search_123", + status: "completed", + }, + ], + input: [{ type: "message", content: "input" }], + }; + + render( + , + ); + expect( + screen.getByText("web_search_call(status: completed)"), + ).toBeInTheDocument(); + }); + + test("falls back to JSON.stringify for unknown tool call types", () => { + const mockResponse: OpenAIResponse = { + id: "resp_unknown_tool", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [ + { + type: "unknown_call", + id: "unknown_123", + status: "completed", + custom_field: "custom_value", + } as any, + ], + input: [{ type: "message", content: "input" }], + }; + + render( + , + ); + // Should contain the JSON stringified version + expect(screen.getByText(/unknown_call/)).toBeInTheDocument(); + }); + + test("falls back to JSON.stringify for entire output when no message or tool call found", () => { + const mockResponse: OpenAIResponse = { + id: "resp_fallback", + object: "response", + created_at: 1710000000, + model: "test-model", + status: "completed", + output: [ + { + type: "unknown_type", + data: "some data", + } as any, + ], + input: [{ type: "message", content: "input" }], + }; + + render( + , + ); + // Should contain the JSON stringified version of the output array + expect(screen.getByText(/unknown_type/)).toBeInTheDocument(); + }); + }); + + describe("Text Truncation", () => { + test("truncates long input and output text", () => { + // Specific mock implementation for this test + truncateText.mockImplementation( + (text: string | undefined, maxLength?: number) => { + const defaultTestMaxLength = 10; + const effectiveMaxLength = maxLength ?? defaultTestMaxLength; + return typeof text === "string" && text.length > effectiveMaxLength + ? text.slice(0, effectiveMaxLength) + "..." + : text; + }, + ); + + const longInput = + "This is a very long input message that should be truncated."; + const longOutput = + "This is a very long output message that should also be truncated."; + + const mockResponse: OpenAIResponse = { + id: "resp_trunc", + object: "response", + created_at: 1710002000, + model: "llama-trunc-model", + status: "completed", + output: [ + { + type: "message", + role: "assistant", + content: longOutput, + }, + ], + input: [ + { + type: "message", + role: "user", + content: longInput, + }, + ], + }; + + render( + , + ); + + // The truncated text should be present for both input and output + const truncatedTexts = screen.getAllByText( + longInput.slice(0, 10) + "...", + ); + expect(truncatedTexts.length).toBe(2); // one for input, one for output + truncatedTexts.forEach((textElement) => + expect(textElement).toBeInTheDocument(), + ); + }); + }); +}); diff --git a/llama_stack/ui/components/responses/responses-table.tsx b/llama_stack/ui/components/responses/responses-table.tsx new file mode 100644 index 000000000..352450d18 --- /dev/null +++ b/llama_stack/ui/components/responses/responses-table.tsx @@ -0,0 +1,117 @@ +"use client"; + +import { + OpenAIResponse, + ResponseInput, + ResponseInputMessageContent, +} from "@/lib/types"; +import { LogsTable, LogTableRow } from "@/components/logs/logs-table"; +import { + isMessageInput, + isMessageItem, + isFunctionCallItem, + isWebSearchCallItem, + MessageItem, + FunctionCallItem, + WebSearchCallItem, +} from "./utils/item-types"; + +interface ResponsesTableProps { + data: OpenAIResponse[]; + isLoading: boolean; + error: Error | null; +} + +function getInputText(response: OpenAIResponse): string { + const firstInput = response.input.find(isMessageInput); + if (firstInput) { + return extractContentFromItem(firstInput); + } + return ""; +} + +function getOutputText(response: OpenAIResponse): string { + const firstMessage = response.output.find((item) => + isMessageItem(item as any), + ); + if (firstMessage) { + const content = extractContentFromItem(firstMessage as MessageItem); + if (content) { + return content; + } + } + + const functionCall = response.output.find((item) => + isFunctionCallItem(item as any), + ); + if (functionCall) { + return formatFunctionCall(functionCall as FunctionCallItem); + } + + const webSearchCall = response.output.find((item) => + isWebSearchCallItem(item as any), + ); + if (webSearchCall) { + return formatWebSearchCall(webSearchCall as WebSearchCallItem); + } + + return JSON.stringify(response.output); +} + +function extractContentFromItem(item: { + content?: string | ResponseInputMessageContent[]; +}): string { + if (!item.content) { + return ""; + } + + if (typeof item.content === "string") { + return item.content; + } else if (Array.isArray(item.content)) { + const textContent = item.content.find( + (c: ResponseInputMessageContent) => + c.type === "input_text" || c.type === "output_text", + ); + return textContent?.text || ""; + } + return ""; +} + +function formatFunctionCall(functionCall: FunctionCallItem): string { + const args = functionCall.arguments || "{}"; + const name = functionCall.name || "unknown"; + return `${name}(${args})`; +} + +function formatWebSearchCall(webSearchCall: WebSearchCallItem): string { + return `web_search_call(status: ${webSearchCall.status})`; +} + +function formatResponseToRow(response: OpenAIResponse): LogTableRow { + return { + id: response.id, + input: getInputText(response), + output: getOutputText(response), + model: response.model, + createdTime: new Date(response.created_at * 1000).toLocaleString(), + detailPath: `/logs/responses/${response.id}`, + }; +} + +export function ResponsesTable({ + data, + isLoading, + error, +}: ResponsesTableProps) { + const formattedData = data.map(formatResponseToRow); + + return ( + + ); +} diff --git a/llama_stack/ui/components/responses/utils/item-types.ts b/llama_stack/ui/components/responses/utils/item-types.ts new file mode 100644 index 000000000..2bde49119 --- /dev/null +++ b/llama_stack/ui/components/responses/utils/item-types.ts @@ -0,0 +1,61 @@ +/** + * Type guards for different item types in responses + */ + +import type { + ResponseInput, + ResponseOutput, + ResponseMessage, + ResponseToolCall, +} from "@/lib/types"; + +export interface BaseItem { + type: string; + [key: string]: unknown; +} + +export type MessageItem = ResponseMessage; +export type FunctionCallItem = ResponseToolCall & { type: "function_call" }; +export type WebSearchCallItem = ResponseToolCall & { type: "web_search_call" }; +export type FunctionCallOutputItem = BaseItem & { + type: "function_call_output"; + call_id: string; + output?: string | object; +}; + +export type AnyResponseItem = + | ResponseInput + | ResponseOutput + | FunctionCallOutputItem; + +export function isMessageInput( + item: ResponseInput, +): item is ResponseInput & { type: "message" } { + return item.type === "message"; +} + +export function isMessageItem(item: AnyResponseItem): item is MessageItem { + return item.type === "message" && "content" in item; +} + +export function isFunctionCallItem( + item: AnyResponseItem, +): item is FunctionCallItem { + return item.type === "function_call" && "name" in item; +} + +export function isWebSearchCallItem( + item: AnyResponseItem, +): item is WebSearchCallItem { + return item.type === "web_search_call"; +} + +export function isFunctionCallOutputItem( + item: AnyResponseItem, +): item is FunctionCallOutputItem { + return ( + item.type === "function_call_output" && + "call_id" in item && + typeof (item as any).call_id === "string" + ); +} diff --git a/llama_stack/ui/components/ui/breadcrumb.tsx b/llama_stack/ui/components/ui/breadcrumb.tsx new file mode 100644 index 000000000..f63ae19af --- /dev/null +++ b/llama_stack/ui/components/ui/breadcrumb.tsx @@ -0,0 +1,109 @@ +import * as React from "react"; +import { Slot } from "@radix-ui/react-slot"; +import { ChevronRight, MoreHorizontal } from "lucide-react"; + +import { cn } from "@/lib/utils"; + +function Breadcrumb({ ...props }: React.ComponentProps<"nav">) { + return